diff --git a/.github/workflows/draft-new-release.yml b/.github/workflows/draft-new-release.yml index 0756a498..6f2abd1b 100644 --- a/.github/workflows/draft-new-release.yml +++ b/.github/workflows/draft-new-release.yml @@ -57,7 +57,7 @@ jobs: id: make-commit env: DPRINT_VERSION: "0.50.0" - RUST_TOOLCHAIN: "1.82" + RUST_TOOLCHAIN: "1.85" run: | rustup component add rustfmt --toolchain "$RUST_TOOLCHAIN-x86_64-unknown-linux-gnu" curl -fsSL https://dprint.dev/install.sh | sh -s $DPRINT_VERSION diff --git a/.gitignore b/.gitignore index c4a42ab4..1a98b3be 100644 --- a/.gitignore +++ b/.gitignore @@ -3,3 +3,8 @@ target/ .claude/settings.local.json .DS_Store build/ +release-build.sh +cn_macos +target-check +monero-rpc-pool/temp_db.sqlite +monero-rpc-pool/temp.db diff --git a/.vscode/settings.json b/.vscode/settings.json index 39b6d747..8dcc918c 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -69,6 +69,13 @@ "unordered_set": "cpp", "variant": "cpp", "algorithm": "cpp", - "*.rs": "rust" + "*.rs": "rust", + "shared_mutex": "cpp", + "source_location": "cpp", + "strstream": "cpp", + "typeindex": "cpp" + }, + "rust-analyzer.cargo.extraEnv": { + "CARGO_TARGET_DIR": "target-check" } } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index d5f5bdd7..c1dee8c0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,14 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] -## [2.2.0-beta] - 2025-06-17 +- ASB + GUI + CLI: We now cache fee estimates for the Bitcoin wallet for up to 2 minutes. This improves the speed of fee estimation and reduces the number of requests to the Electrum servers. + +## [2.3.0-beta.1] - 2025-06-19 + +- ASB + CLI + GUI: Introduce a load-balancing proxy for Monero RPC nodes that automatically discovers healthy nodes and routes requests to improve connection reliability. +- ASB: Added `monero_node_pool` boolean option to ASB config. When enabled, the ASB uses the internal Monero RPC pool instead of connecting directly to a single daemon URL, providing improved reliability and automatic failover across multiple Monero nodes. + +## [2.2.0-beta.2] - 2025-06-17 - We now call Monero function directly (via FFI bindings) instead of using `monero-wallet-rpc`. - ASB: Since we don't communicate with `monero-wallet-rpc` anymore, the Monero wallet's will no longer be accessible by connecting to it. If you are using the asb-docker-compose setup, run this command to migrate the wallet files from the volume of the monero-wallet-rpc container to the volume of the asb container: diff --git a/Cargo.lock b/Cargo.lock index e715d286..c11ac6ba 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13,9 +13,9 @@ dependencies = [ [[package]] name = "adler2" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" [[package]] name = "aead" @@ -178,9 +178,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.18" +version = "0.6.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" +checksum = "301af1932e46185686725e0fad2f8f2aa7da69dd70bf6ecc44d6b703844a3933" dependencies = [ "anstyle", "anstyle-parse", @@ -193,33 +193,33 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" +checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd" [[package]] name = "anstyle-parse" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" +checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" +checksum = "6c8bdeb6047d8983be085bab0ba1472e6dc604e7041dbf6fcd5e71523014fae9" dependencies = [ "windows-sys 0.59.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.8" +version = "3.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6680de5231bd6ee4c6191b8a1325daa282b415391ec9d3a37bd34f2060dc73fa" +checksum = "403f75924867bb1033c59fbf0797484329750cfbe3c4325cd33127941fabc882" dependencies = [ "anstyle", "once_cell_polyfill", @@ -255,7 +255,7 @@ dependencies = [ "objc2-core-foundation", "objc2-core-graphics", "objc2-foundation 0.3.1", - "parking_lot 0.12.3", + "parking_lot 0.12.4", "percent-encoding", "windows-sys 0.59.0", "wl-clipboard-rs", @@ -400,7 +400,7 @@ checksum = "965c2d33e53cb6b267e148a4cb0760bc01f4904c1cd4bb4002a085bb016d1490" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", "synstructure 0.13.2", ] @@ -423,7 +423,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -483,9 +483,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.23" +version = "0.4.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b37fc50485c4f3f736a4fb14199f6d5f5ba008d7f28fe710306c92780f004c07" +checksum = "40f6024f3f856663b45fd0c9b6f2024034a702f453549449e0d84a305900dad4" dependencies = [ "flate2", "futures-core", @@ -510,9 +510,9 @@ dependencies = [ [[package]] name = "async-io" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a2b323ccce0a1d90b449fd71f2a06ca7faa7c54c2751f06c9bd851fc061059" +checksum = "1237c0ae75a0f3765f58910ff9cdd0a12eeb39ab2f4c7de23262f337f0aacbb3" dependencies = [ "async-lock", "cfg-if", @@ -521,7 +521,7 @@ dependencies = [ "futures-lite", "parking", "polling", - "rustix 0.38.44", + "rustix 1.0.7", "slab", "tracing", "windows-sys 0.59.0", @@ -540,9 +540,9 @@ dependencies = [ [[package]] name = "async-process" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63255f1dc2381611000436537bbedfe83183faa303a5a0edaf191edef06526bb" +checksum = "cde3f4e40e6021d7acffc90095cbd6dc54cb593903d1de5832f435eb274b85dc" dependencies = [ "async-channel", "async-io", @@ -553,7 +553,7 @@ dependencies = [ "cfg-if", "event-listener", "futures-lite", - "rustix 0.38.44", + "rustix 1.0.7", "tracing", ] @@ -565,14 +565,14 @@ checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] name = "async-signal" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "637e00349800c0bdf8bfc21ebbc0b6524abea702b0da4168ac00d070d0c0b9f3" +checksum = "d7605a4e50d4b06df3898d5a70bf5fde51ed9059b0434b73105193bc27acce0d" dependencies = [ "async-io", "async-lock", @@ -580,12 +580,34 @@ dependencies = [ "cfg-if", "futures-core", "futures-io", - "rustix 0.38.44", + "rustix 1.0.7", "signal-hook-registry", "slab", "windows-sys 0.59.0", ] +[[package]] +name = "async-stream" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.103", +] + [[package]] name = "async-task" version = "4.7.1" @@ -600,7 +622,7 @@ checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -684,9 +706,9 @@ checksum = "c59bdb34bc650a32731b31bd8f0829cc15d24a708ee31559e0bb34f2bc320cba" [[package]] name = "atomic" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d818003e740b63afc82337e3160717f4f63078720a810b7b903e70a5d1d2994" +checksum = "a89cbf775b137e9b968e67227ef7f775587cde3fd31b0d8599dbd0f598a48340" dependencies = [ "bytemuck", ] @@ -721,9 +743,76 @@ dependencies = [ [[package]] name = "autocfg" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" + +[[package]] +name = "axum" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f" +dependencies = [ + "async-trait", + "axum-core", + "axum-macros", + "bytes", + "futures-util", + "http 1.3.1", + "http-body 1.0.1", + "http-body-util", + "hyper 1.6.0", + "hyper-util", + "itoa 1.0.15", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "serde_json", + "serde_path_to_error", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tower 0.5.2", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "axum-core" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http 1.3.1", + "http-body 1.0.1", + "http-body-util", + "mime", + "pin-project-lite", + "rustversion", + "sync_wrapper", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "axum-macros" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57d123550fa8d071b7255cb0cc04dc302baa6c8c4a79f55701552684d8399bce" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.103", +] [[package]] name = "backoff" @@ -811,9 +900,9 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" -version = "1.7.3" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89e25b6adfb930f02d1981565a6e5d9c547ac15a96606256d3b59040e5cd4ca3" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" [[package]] name = "bdk" @@ -1115,7 +1204,7 @@ checksum = "e0b121a9fe0df916e362fb3271088d071159cdf11db0e4182d02152850756eff" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -1207,7 +1296,7 @@ dependencies = [ "proc-macro-crate 3.3.0", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -1280,9 +1369,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.17.0" +version = "3.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" +checksum = "793db76d6187cd04dff33004d8e6c9cc4e05cd330500379d2394209271b4aeee" [[package]] name = "by_address" @@ -1314,9 +1403,9 @@ dependencies = [ [[package]] name = "bytemuck" -version = "1.23.0" +version = "1.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9134a6ef01ce4b366b50689c94f82c14bc72bc5d0386829828a2e2752ef7958c" +checksum = "5c76a5792e44e4abe34d3abf15636779261d45a7450612059293d1d2cfc63422" [[package]] name = "byteorder" @@ -1395,18 +1484,18 @@ dependencies = [ [[package]] name = "camino" -version = "1.1.9" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b96ec4966b5813e2c0507c1f86115c8c5abaadc3980879c3424042a02fd1ad3" +checksum = "0da45bc31171d8d6960122e222a67740df867c1dd53b4d51caa297084c185cab" dependencies = [ "serde", ] [[package]] name = "caret" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5440e59387a6f8291f2696a875656873e9d51e9fb7b38af81a25772a5f81b33" +checksum = "887ce2cf6dd0aacf15ce11190546d4348dbebce576cc11b57010a023907f7aa9" [[package]] name = "cargo-platform" @@ -1452,9 +1541,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.24" +version = "1.2.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16595d3be041c03b09d08d0858631facccee9221e579704070e6e9e4915d3bc7" +checksum = "d487aa071b5f64da6f19a3e848e3578944b726ee5a4854b82172f02aa876bfdc" dependencies = [ "jobserver", "libc", @@ -1490,9 +1579,9 @@ dependencies = [ [[package]] name = "cfg-if" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" [[package]] name = "cfg_aliases" @@ -1567,18 +1656,19 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.38" +version = "4.5.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed93b9805f8ba930df42c2590f05453d5ec36cbb85d018868a5b24d31f6ac000" +checksum = "40b6887a1d8685cebccf115538db5c0efe625ccac9696ad45c409d96566e910f" dependencies = [ "clap_builder", + "clap_derive", ] [[package]] name = "clap_builder" -version = "4.5.38" +version = "4.5.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "379026ff283facf611b0ea629334361c4211d1b12ee01024eec1591133b04120" +checksum = "e0c66c08ce9f0c698cbce5c0279d0bb6ac936d8674174fe48f736533b964f59e" dependencies = [ "anstream", "anstyle", @@ -1587,10 +1677,22 @@ dependencies = [ ] [[package]] -name = "clap_lex" -version = "0.7.4" +name = "clap_derive" +version = "4.5.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" +checksum = "d2c7947ae4cc3d851207c1adb5b5e260ff0cca11446b1d6d1423788e442257ce" +dependencies = [ + "heck 0.5.0", + "proc-macro2", + "quote", + "syn 2.0.103", +] + +[[package]] +name = "clap_lex" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675" [[package]] name = "clipboard-win" @@ -1629,14 +1731,14 @@ checksum = "fe6d2e5af09e8c8ad56c969f2157a3d4238cebc7c55f0a517728c38f7b200f81" dependencies = [ "serde", "termcolor", - "unicode-width 0.2.0", + "unicode-width 0.2.1", ] [[package]] name = "colorchoice" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" +checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" [[package]] name = "colored" @@ -1665,7 +1767,7 @@ checksum = "4a65ebfec4fb190b6f90e944a817d60499ee0744e582530e2c9900a22e591d9a" dependencies = [ "crossterm", "unicode-segmentation", - "unicode-width 0.2.0", + "unicode-width 0.2.1", ] [[package]] @@ -1713,7 +1815,7 @@ dependencies = [ "encode_unicode", "libc", "once_cell", - "unicode-width 0.2.0", + "unicode-width 0.2.1", "windows-sys 0.59.0", ] @@ -1784,9 +1886,9 @@ dependencies = [ [[package]] name = "core-foundation" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b55271e5c8c478ad3f38ad24ef34923091e0548492a266d19b3c0b4d82574c63" +checksum = "b2a6cd9ae233e7f62ba4e9353e81a88df7fc8a5987b8d445b4d90c879bd156f6" dependencies = [ "core-foundation-sys", "libc", @@ -1805,7 +1907,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa95a34622365fa5bbf40b20b75dba8dfa8c94c734aea8ac9a5ca38af14316f1" dependencies = [ "bitflags 2.9.1", - "core-foundation 0.10.0", + "core-foundation 0.10.1", "core-graphics-types", "foreign-types 0.5.0", "libc", @@ -1818,7 +1920,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d44a101f213f6c4cdc1853d4b78aef6db6bdfa3468798cc1d9912f4735013eb" dependencies = [ "bitflags 2.9.1", - "core-foundation 0.10.0", + "core-foundation 0.10.1", "libc", ] @@ -1915,7 +2017,7 @@ checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6" dependencies = [ "bitflags 2.9.1", "crossterm_winapi", - "parking_lot 0.12.3", + "parking_lot 0.12.4", "rustix 0.38.44", "winapi", ] @@ -1982,7 +2084,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13b588ba4ac1a99f7f2964d24b3d896ddc6bf847ee3855dbd4366f058cfcd331" dependencies = [ "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -1992,7 +2094,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a2785755761f3ddc1492979ce1e48d2c00d09311c39e4466429188f3dd6501" dependencies = [ "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -2041,7 +2143,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -2083,7 +2185,7 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -2092,11 +2194,11 @@ version = "1.0.158" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4f44296c8693e9ea226a48f6a122727f77aa9e9e338380cb021accaeeb7ee279" dependencies = [ - "clap 4.5.38", + "clap 4.5.40", "codespan-reporting", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -2114,7 +2216,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -2186,7 +2288,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -2219,7 +2321,7 @@ checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core 0.20.11", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -2245,7 +2347,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d162beedaa69905488a8da94f5ac3edb4dd4788b732fadb7bd120b2625c1976" dependencies = [ "data-encoding", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -2328,7 +2430,7 @@ dependencies = [ "quote", "sha3", "strum 0.27.1", - "syn 2.0.101", + "syn 2.0.103", "void", ] @@ -2340,7 +2442,7 @@ checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -2361,7 +2463,7 @@ dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -2392,7 +2494,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" dependencies = [ "derive_builder_core", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -2415,7 +2517,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -2445,7 +2547,7 @@ dependencies = [ "convert_case 0.6.0", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", "unicode-xid", ] @@ -2458,7 +2560,7 @@ dependencies = [ "convert_case 0.7.1", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", "unicode-xid", ] @@ -2554,7 +2656,7 @@ dependencies = [ "libc", "option-ext", "redox_users 0.5.0", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] @@ -2604,7 +2706,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -2621,13 +2723,13 @@ dependencies = [ [[package]] name = "dlopen2_derive" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2b99bf03862d7f545ebc28ddd33a665b50865f4dfd84031a393823879bd4c54" +checksum = "788160fb30de9cdd857af31c6a2675904b16ece8fc2737b2c7127ba368c9d0f4" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -2802,13 +2904,27 @@ dependencies = [ "byteorder", "libc", "log", - "rustls 0.23.27", + "rustls 0.23.28", "serde", "serde_json", "webpki-roots 0.25.4", "winapi", ] +[[package]] +name = "electrum-pool" +version = "0.1.0" +dependencies = [ + "backoff", + "bdk_electrum", + "bitcoin 0.32.6", + "futures", + "once_cell", + "serde_json", + "tokio", + "tracing", +] + [[package]] name = "elliptic-curve" version = "0.13.8" @@ -2830,16 +2946,16 @@ dependencies = [ [[package]] name = "embed-resource" -version = "3.0.2" +version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fbc6e0d8e0c03a655b53ca813f0463d2c956bc4db8138dbc89f120b066551e3" +checksum = "0963f530273dc3022ab2bdc3fcd6d488e850256f2284a82b7413cb9481ee85dd" dependencies = [ "cc", "memchr", "rustc_version", "toml", "vswhom", - "winreg 0.52.0", + "winreg 0.55.0", ] [[package]] @@ -2878,7 +2994,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -2891,14 +3007,14 @@ dependencies = [ "num-traits", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] name = "enumflags2" -version = "0.7.11" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba2f4b465f5318854c6f8dd686ede6c0a9dc67d4b1ac241cf0eb51521a309147" +checksum = "1027f7680c853e056ebcec683615fb6fbbc07dbaa13b4d5d9442b146ded4ecef" dependencies = [ "enumflags2_derive", "serde", @@ -2906,13 +3022,13 @@ dependencies = [ [[package]] name = "enumflags2_derive" -version = "0.7.11" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc4caf64a58d7a6d65ab00639b046ff54399a39f5f2554728895ace4b297cd79" +checksum = "67c78a4d8fdf9953a5c9d458f9efe940fd97a0cab0941c075a813ac594733827" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -2933,12 +3049,12 @@ dependencies = [ [[package]] name = "errno" -version = "0.3.12" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cea14ef9355e3beab063703aa9dab15afd25f0667c341310c1e5274bb1d0da18" +checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad" dependencies = [ "libc", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] @@ -3038,7 +3154,7 @@ version = "0.10.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8cb01cd46b0cf372153850f4c6c272d9cbea2da513e07538405148f95bd789f3" dependencies = [ - "atomic 0.6.0", + "atomic 0.6.1", "serde", "toml", "uncased", @@ -3077,9 +3193,9 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ced92e76e966ca2fd84c8f7aa01a4aea65b0eb6648d72f7c8f3e2764a67fece" +checksum = "4a3d7db9596fecd151c5f638c0ee5d5bd487b6e0ea232e5dc96d5250f6f94b1d" dependencies = [ "crc32fast", "libz-rs-sys", @@ -3142,7 +3258,7 @@ checksum = "1a5c6c585bc94aaf2c7b51dd4c2ba22680844aba4c687be581871a6f518c5742" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -3214,9 +3330,9 @@ dependencies = [ [[package]] name = "fslock-guard" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dd65ae40b736ed57be8f11668c12ef6689e2f8609b36da22ff8f4a863a954d3" +checksum = "96e83653bb011605793e687636bd1d24f1918f3c410f20e1d9173a37451cd351" dependencies = [ "fslock-arti-fork", "thiserror 2.0.12", @@ -3300,7 +3416,7 @@ checksum = "1d930c203dd0b6ff06e0201a4a2fe9149b43c684fd4420555b26d21b1a02956f" dependencies = [ "futures-core", "lock_api", - "parking_lot 0.12.3", + "parking_lot 0.12.4", ] [[package]] @@ -3330,7 +3446,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -3350,7 +3466,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" dependencies = [ "futures-io", - "rustls 0.23.27", + "rustls 0.23.28", "rustls-pki-types", ] @@ -3520,7 +3636,7 @@ dependencies = [ "libc", "log", "rustversion", - "windows 0.61.1", + "windows 0.61.3", ] [[package]] @@ -3571,7 +3687,7 @@ dependencies = [ "cfg-if", "js-sys", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi 0.11.1+wasi-snapshot-preview1", "wasm-bindgen", ] @@ -3684,7 +3800,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -3792,7 +3908,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -3860,9 +3976,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.15.3" +version = "0.15.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84b26c544d002229e640969970a2e74021aadf6e2f96372b9c58eff97de08eb3" +checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5" dependencies = [ "allocator-api2", "equivalent", @@ -3884,7 +4000,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1" dependencies = [ - "hashbrown 0.15.3", + "hashbrown 0.15.4", ] [[package]] @@ -3929,15 +4045,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.3.9" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" - -[[package]] -name = "hermit-abi" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" +checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" [[package]] name = "hex" @@ -4015,7 +4125,7 @@ dependencies = [ "ipconfig", "lru-cache", "once_cell", - "parking_lot 0.12.3", + "parking_lot 0.12.4", "rand 0.8.5", "resolv-conf", "smallvec", @@ -4208,20 +4318,20 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.6" +version = "0.27.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03a01595e11bdcec50946522c32dde3fc6914743000a68b93000965f2f02406d" +checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" dependencies = [ "http 1.3.1", "hyper 1.6.0", "hyper-util", - "rustls 0.23.27", + "rustls 0.23.28", "rustls-native-certs 0.8.1", "rustls-pki-types", "tokio", "tokio-rustls 0.26.2", "tower-service", - "webpki-roots 1.0.0", + "webpki-roots 1.0.1", ] [[package]] @@ -4242,22 +4352,28 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.12" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9f1e950e0d9d1d3c47184416723cf29c0d1f93bd8cccf37e4beb6b44f31710" +checksum = "dc2fdfdbff08affe55bb779f33b053aa1fe5dd5b54c257343c17edfa55711bdb" dependencies = [ + "base64 0.22.1", "bytes", "futures-channel", + "futures-core", "futures-util", "http 1.3.1", "http-body 1.0.1", "hyper 1.6.0", + "ipnet", "libc", + "percent-encoding", "pin-project-lite", "socket2", + "system-configuration", "tokio", "tower-service", "tracing", + "windows-registry", ] [[package]] @@ -4490,7 +4606,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" dependencies = [ "equivalent", - "hashbrown 0.15.3", + "hashbrown 0.15.4", "serde", ] @@ -4577,6 +4693,16 @@ dependencies = [ "nom", ] +[[package]] +name = "iri-string" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbc5ebe9c3a1a7a5127f920a418f7585e9e758e911d0466ed004f393b0e380b2" +dependencies = [ + "memchr", + "serde", +] + [[package]] name = "is-docker" version = "0.2.0" @@ -4866,9 +4992,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.172" +version = "0.2.174" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" +checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" [[package]] name = "libgit2-sys" @@ -4894,18 +5020,18 @@ dependencies = [ [[package]] name = "liblzma" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66352d7a8ac12d4877b6e6ea5a9b7650ee094257dc40889955bea5bc5b08c1d0" +checksum = "0791ab7e08ccc8e0ce893f6906eb2703ed8739d8e89b57c0714e71bad09024c8" dependencies = [ "liblzma-sys", ] [[package]] name = "liblzma-sys" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5839bad90c3cc2e0b8c4ed8296b80e86040240f81d46b9c0e9bc8dd51ddd3af1" +checksum = "01b9596486f6d60c3bbe644c0e1be1aa6ccc472ad630fe8927b456973d7cb736" dependencies = [ "cc", "libc", @@ -4971,7 +5097,7 @@ dependencies = [ [[package]] name = "libp2p-community-tor" version = "0.5.0" -source = "git+https://github.com/umgefahren/libp2p-tor?branch=main#8215104570a550cb3909a15c33af203387e88274" +source = "git+https://github.com/umgefahren/libp2p-tor?rev=e6b913e0f1ac1fc90b3ee4dd31b5511140c4a9af#e6b913e0f1ac1fc90b3ee4dd31b5511140c4a9af" dependencies = [ "anyhow", "arti-client", @@ -5014,7 +5140,7 @@ dependencies = [ "multihash", "multistream-select", "once_cell", - "parking_lot 0.12.3", + "parking_lot 0.12.4", "pin-project", "quick-protobuf", "rand 0.8.5", @@ -5039,7 +5165,7 @@ dependencies = [ "hickory-resolver", "libp2p-core", "libp2p-identity", - "parking_lot 0.12.3", + "parking_lot 0.12.4", "smallvec", "tracing", ] @@ -5245,11 +5371,11 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-tls 0.4.1", - "parking_lot 0.12.3", + "parking_lot 0.12.4", "quinn", "rand 0.8.5", "ring 0.17.14", - "rustls 0.23.27", + "rustls 0.23.28", "socket2", "thiserror 1.0.69", "tokio", @@ -5336,7 +5462,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -5387,7 +5513,7 @@ dependencies = [ "libp2p-identity", "rcgen", "ring 0.17.14", - "rustls 0.23.27", + "rustls 0.23.28", "rustls-webpki 0.101.7", "thiserror 1.0.69", "x509-parser 0.16.0", @@ -5433,7 +5559,7 @@ checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ "bitflags 2.9.1", "libc", - "redox_syscall 0.5.12", + "redox_syscall 0.5.13", ] [[package]] @@ -5449,9 +5575,9 @@ dependencies = [ [[package]] name = "libz-rs-sys" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6489ca9bd760fe9642d7644e827b0c9add07df89857b0416ee15c1cc1a3b8c5a" +checksum = "172a788537a2221661b480fee8dc5f96c580eb34fa88764d3205dc356c7e4221" dependencies = [ "zlib-rs", ] @@ -5503,9 +5629,9 @@ checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" [[package]] name = "lock_api" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765" dependencies = [ "autocfg", "scopeguard", @@ -5536,7 +5662,7 @@ version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "hashbrown 0.15.3", + "hashbrown 0.15.4", ] [[package]] @@ -5589,6 +5715,12 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" +[[package]] +name = "matchit" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" + [[package]] name = "md-5" version = "0.10.6" @@ -5601,9 +5733,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.7.4" +version = "2.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" +checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" [[package]] name = "memmap2" @@ -5686,9 +5818,9 @@ checksum = "e856fdd13623a2f5f2f54676a4ee49502a96a80ef4a62bcedd23d52427c44d43" [[package]] name = "miniz_oxide" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be647b768db090acb35d5ec5db2b0e1f1de11133ca123b9eacf5137868f892a" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" dependencies = [ "adler2", "simd-adler32", @@ -5696,14 +5828,14 @@ dependencies = [ [[package]] name = "mio" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" +checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" dependencies = [ "libc", "log", - "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.52.0", + "wasi 0.11.1+wasi-snapshot-preview1", + "windows-sys 0.59.0", ] [[package]] @@ -5743,7 +5875,7 @@ dependencies = [ "event-listener", "futures-util", "loom", - "parking_lot 0.12.3", + "parking_lot 0.12.4", "portable-atomic", "rustc_version", "smallvec", @@ -5815,6 +5947,35 @@ dependencies = [ "tracing", ] +[[package]] +name = "monero-rpc-pool" +version = "0.1.0" +dependencies = [ + "anyhow", + "axum", + "chrono", + "clap 4.5.40", + "dirs 5.0.1", + "futures", + "monero", + "monero-rpc", + "rand 0.8.5", + "regex", + "reqwest", + "serde", + "serde_json", + "sqlx", + "tokio", + "tokio-test", + "tower 0.4.13", + "tower-http 0.5.2", + "tracing", + "tracing-subscriber", + "typeshare", + "url", + "uuid", +] + [[package]] name = "monero-sys" version = "0.1.0" @@ -6184,11 +6345,11 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.16.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" dependencies = [ - "hermit-abi 0.3.9", + "hermit-abi 0.5.2", "libc", ] @@ -6210,7 +6371,7 @@ dependencies = [ "proc-macro-crate 3.3.0", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -6489,9 +6650,9 @@ checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" [[package]] name = "oneshot-fused-workaround" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e2f833c92b3bb159ddee62e27d611e056cd89373b4ba7ba6df8bcd00acdf1b5" +checksum = "eaa302cded54727eedb3a0d246e14146fd2a36f4744e886169457b05ef3254e8" dependencies = [ "futures", ] @@ -6537,7 +6698,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -6704,12 +6865,12 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +checksum = "70d58bf43669b5795d1576d0641cfb6fbb2057bf629506267a92807158584a13" dependencies = [ "lock_api", - "parking_lot_core 0.9.10", + "parking_lot_core 0.9.11", ] [[package]] @@ -6728,13 +6889,13 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.10" +version = "0.9.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.12", + "redox_syscall 0.5.13", "smallvec", "windows-targets 0.52.6", ] @@ -6788,9 +6949,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.8.0" +version = "2.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "198db74531d58c70a361c42201efde7e2591e976d518caf7662a47dc5720e7b6" +checksum = "1db05f56d34358a8b1066f67cbb203ee3e7ed2ba674a6263a1d5ec6db2204323" dependencies = [ "memchr", "thiserror 2.0.12", @@ -6799,9 +6960,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.8.0" +version = "2.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d725d9cfd79e87dccc9341a2ef39d1b6f6353d68c4b33c177febbe1a402c97c5" +checksum = "bb056d9e8ea77922845ec74a1c4e8fb17e7c218cc4fc11a15c5d25e189aa40bc" dependencies = [ "pest", "pest_generator", @@ -6809,24 +6970,23 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.8.0" +version = "2.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db7d01726be8ab66ab32f9df467ae8b1148906685bbe75c82d1e65d7f5b3f841" +checksum = "87e404e638f781eb3202dc82db6760c8ae8a1eeef7fb3fa8264b2ef280504966" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] name = "pest_meta" -version = "2.8.0" +version = "2.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f9f832470494906d1fca5329f8ab5791cc60beb230c74815dff541cbd2b5ca0" +checksum = "edd1101f170f5903fde0914f899bb503d9ff5271d7ba76bbb70bea63690cc0d5" dependencies = [ - "once_cell", "pest", "sha2 0.10.9", ] @@ -6945,7 +7105,7 @@ dependencies = [ "phf_shared 0.11.3", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -6992,7 +7152,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -7047,13 +7207,13 @@ checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" [[package]] name = "plist" -version = "1.7.1" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac26e981c03a6e53e0aee43c113e3202f5581d5360dae7bd2c70e800dd0451d" +checksum = "3d77244ce2d584cd84f6a15f86195b8c9b2a0dfbfd817c09e0464244091a58ed" dependencies = [ "base64 0.22.1", "indexmap 2.9.0", - "quick-xml 0.32.0", + "quick-xml", "serde", "time 0.3.41", ] @@ -7073,15 +7233,15 @@ dependencies = [ [[package]] name = "polling" -version = "3.7.4" +version = "3.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a604568c3202727d1507653cb121dbd627a58684eb09a820fd746bee38b4442f" +checksum = "b53a684391ad002dd6a596ceb6c74fd004fdce75f4be2e3f615068abbea5fd50" dependencies = [ "cfg-if", "concurrent-queue", - "hermit-abi 0.4.0", + "hermit-abi 0.5.2", "pin-project-lite", - "rustix 0.38.44", + "rustix 1.0.7", "tracing", "windows-sys 0.59.0", ] @@ -7111,9 +7271,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.11.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e" +checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" [[package]] name = "postage" @@ -7124,7 +7284,7 @@ dependencies = [ "atomic 0.5.3", "crossbeam-queue", "futures", - "parking_lot 0.12.3", + "parking_lot 0.12.4", "pin-project", "static_assertions", "thiserror 1.0.69", @@ -7181,9 +7341,9 @@ dependencies = [ [[package]] name = "priority-queue" -version = "2.3.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef08705fa1589a1a59aa924ad77d14722cb0cd97b67dd5004ed5f4a4873fce8d" +checksum = "5676d703dda103cbb035b653a9f11448c0a7216c7926bd35fcb5865475d0c970" dependencies = [ "autocfg", "equivalent", @@ -7215,7 +7375,7 @@ version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "edce586971a4dfaa28950c6f18ed55e0406c1ab88bbce2c6f6293a7aaba73d35" dependencies = [ - "toml_edit 0.22.26", + "toml_edit 0.22.27", ] [[package]] @@ -7265,7 +7425,7 @@ checksum = "504ee9ff529add891127c4827eb481bd69dc0ebc72e9a682e187db4caa60c3ca" dependencies = [ "dtoa", "itoa 1.0.15", - "parking_lot 0.12.3", + "parking_lot 0.12.4", "prometheus-client-derive-encode", ] @@ -7277,22 +7437,22 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] name = "proptest" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14cae93065090804185d3b75f0bf93b8eeda30c7a9b4a33d3bdb3988d6229e50" +checksum = "6fcdab19deb5195a31cf7726a210015ff1496ba1464fd42cb4f537b8b01b471f" dependencies = [ "bit-set", "bit-vec", "bitflags 2.9.1", "lazy_static", "num-traits", - "rand 0.8.5", - "rand_chacha 0.3.1", + "rand 0.9.1", + "rand_chacha 0.9.0", "rand_xorshift", "regex-syntax 0.8.5", "rusty-fork", @@ -7382,15 +7542,6 @@ dependencies = [ "unsigned-varint 0.8.0", ] -[[package]] -name = "quick-xml" -version = "0.32.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d3a6e5838b60e0e8fa7a43f22ade549a37d61f8bdbe636d0d7816191de969c2" -dependencies = [ - "memchr", -] - [[package]] name = "quick-xml" version = "0.37.5" @@ -7413,7 +7564,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash", - "rustls 0.23.27", + "rustls 0.23.28", "socket2", "thiserror 2.0.12", "tokio", @@ -7433,7 +7584,7 @@ dependencies = [ "rand 0.9.1", "ring 0.17.14", "rustc-hash", - "rustls 0.23.27", + "rustls 0.23.28", "rustls-pki-types", "slab", "thiserror 2.0.12", @@ -7444,9 +7595,9 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.12" +version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee4e529991f949c5e25755532370b8af5d114acae52326361d68d47af64aa842" +checksum = "fcebb1209ee276352ef14ff8732e24cc2b02bbac986cd74a4c81bcb2f9881970" dependencies = [ "cfg_aliases", "libc", @@ -7467,9 +7618,9 @@ dependencies = [ [[package]] name = "r-efi" -version = "5.2.0" +version = "5.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" [[package]] name = "radium" @@ -7589,11 +7740,11 @@ dependencies = [ [[package]] name = "rand_xorshift" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" +checksum = "513962919efc330f829edb2535844d1b912b0fbe2ca165d613e4e8788bb05a5a" dependencies = [ - "rand_core 0.6.4", + "rand_core 0.9.3", ] [[package]] @@ -7654,9 +7805,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.12" +version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "928fca9cf2aa042393a8325b9ead81d2f0df4cb12e1e24cef072922ccd99c5af" +checksum = "0d04b7d0ee6b4a0207a0a7adb104d23ecb0b47d6beae7152d0fa34b692b29fd6" dependencies = [ "bitflags 2.9.1", ] @@ -7683,6 +7834,26 @@ dependencies = [ "thiserror 2.0.12", ] +[[package]] +name = "ref-cast" +version = "1.0.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a0ae411dbe946a674d89546582cea4ba2bb8defac896622d6496f14c23ba5cf" +dependencies = [ + "ref-cast-impl", +] + +[[package]] +name = "ref-cast-impl" +version = "1.0.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1165225c21bff1f3bbce98f5a1f889949bc902d3575308cc7b0de30b4f6d27c7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.103", +] + [[package]] name = "regex" version = "1.11.1" @@ -7738,9 +7909,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.15" +version = "0.12.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d19c46a6fdd48bc4dab94b6103fccc55d34c67cc0ad04653aad4ea2a07cd7bbb" +checksum = "eabf4c97d9130e2bf606614eb937e86edac8292eaa6f422f995d7e8de1eb1813" dependencies = [ "base64 0.22.1", "bytes", @@ -7755,38 +7926,33 @@ dependencies = [ "hyper-rustls", "hyper-tls", "hyper-util", - "ipnet", "js-sys", "log", "mime", "native-tls", - "once_cell", "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.27", + "rustls 0.23.28", "rustls-native-certs 0.8.1", - "rustls-pemfile", "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", "sync_wrapper", - "system-configuration", "tokio", "tokio-native-tls", "tokio-rustls 0.26.2", - "tokio-socks", "tokio-util", "tower 0.5.2", + "tower-http 0.6.6", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "wasm-streams", "web-sys", - "webpki-roots 0.26.11", - "windows-registry", + "webpki-roots 1.0.1", ] [[package]] @@ -7797,9 +7963,9 @@ checksum = "95325155c684b1c89f7765e30bc1c42e4a6da51ca513615660cb8a62ef9a88e3" [[package]] name = "retry-error" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cd5db9deeb62617010191df02a0887c96cc15d91514d32c208d6b8f76b9f20e" +checksum = "322bb522344455926af0c63db415ef6dd70599ad3f69e847ab95dc4d5537d728" [[package]] name = "rfc6979" @@ -7950,9 +8116,9 @@ dependencies = [ [[package]] name = "rust_decimal" -version = "1.37.1" +version = "1.37.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faa7de2ba56ac291bd90c6b9bece784a52ae1411f9506544b3eae36dd2356d50" +checksum = "b203a6425500a03e0919c42d3c47caca51e79f1132046626d2c8871c5092035d" dependencies = [ "arrayvec", "borsh", @@ -7971,14 +8137,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6268b74858287e1a062271b988a0c534bf85bbeb567fe09331bf40ed78113d5" dependencies = [ "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] name = "rustc-demangle" -version = "0.1.24" +version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" +checksum = "989e6739f80c4ad5b13e0fd7fe89531180375b18520cc8c82080e4dc4035b84f" [[package]] name = "rustc-hash" @@ -8075,9 +8241,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.27" +version = "0.23.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "730944ca083c1c233a75c09f199e973ca499344a2b7ba9e755c457e86fb4a321" +checksum = "7160e3e10bf4535308537f3c4e1641468cd0e485175d6163087c0393c7d46643" dependencies = [ "log", "once_cell", @@ -8112,15 +8278,6 @@ dependencies = [ "security-framework 3.2.0", ] -[[package]] -name = "rustls-pemfile" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" -dependencies = [ - "rustls-pki-types", -] - [[package]] name = "rustls-pki-types" version = "1.12.0" @@ -8189,9 +8346,9 @@ checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" [[package]] name = "safelog" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fba05ad561772e139a16a49088b2d332f659ef49953d56e09cf0f726784e5fdd" +checksum = "f6ff608a21ec1d97035331c8944f01e3b57a083393cbb77b5a86b344aa02c827" dependencies = [ "derive_more 2.0.1", "educe", @@ -8251,6 +8408,18 @@ dependencies = [ "uuid", ] +[[package]] +name = "schemars" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd191f9397d57d581cddd31014772520aa448f65ef991055d7f61582c65165f" +dependencies = [ + "dyn-clone", + "ref-cast", + "serde", + "serde_json", +] + [[package]] name = "schemars_derive" version = "0.8.22" @@ -8260,7 +8429,7 @@ dependencies = [ "proc-macro2", "quote", "serde_derive_internals", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -8453,7 +8622,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" dependencies = [ "bitflags 2.9.1", - "core-foundation 0.10.0", + "core-foundation 0.10.1", "core-foundation-sys", "libc", "security-framework-sys", @@ -8564,7 +8733,7 @@ checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -8575,7 +8744,7 @@ checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -8599,6 +8768,16 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_path_to_error" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59fab13f937fa393d08645bf3a84bdfe86e296747b506ada67bb15f10f218b2a" +dependencies = [ + "itoa 1.0.15", + "serde", +] + [[package]] name = "serde_repr" version = "0.1.20" @@ -8607,14 +8786,14 @@ checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] name = "serde_spanned" -version = "0.6.8" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" +checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" dependencies = [ "serde", ] @@ -8643,19 +8822,20 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.12.0" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6b6f7f2fcb69f747921f79f3926bd1e203fce4fef62c268dd3abfb6d86029aa" +checksum = "bf65a400f8f66fb7b0552869ad70157166676db75ed8181f8104ea91cf9d0b42" dependencies = [ "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", "indexmap 2.9.0", + "schemars 0.9.0", "serde", "serde_derive", "serde_json", - "serde_with_macros 3.12.0", + "serde_with_macros 3.13.0", "time 0.3.41", ] @@ -8673,14 +8853,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.12.0" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d00caa5193a3c8362ac2b73be6b9e768aa5a4b2f721d8f4b339600c3cb51f8e" +checksum = "81679d9ed988d5e9a5e6531dc3f2c28efbd639cbd1dfb628df08edea6004da77" dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -8692,7 +8872,7 @@ dependencies = [ "futures", "log", "once_cell", - "parking_lot 0.12.3", + "parking_lot 0.12.4", "scc", "serial_test_derive", ] @@ -8705,7 +8885,7 @@ checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -8809,12 +8989,13 @@ dependencies = [ [[package]] name = "shared_child" -version = "1.0.2" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e297bd52991bbe0686c086957bee142f13df85d1e79b0b21630a99d374ae9dc" +checksum = "c2778001df1384cf20b6dc5a5a90f48da35539885edaaefd887f8d744e939c0b" dependencies = [ "libc", - "windows-sys 0.59.0", + "sigchld", + "windows-sys 0.60.2", ] [[package]] @@ -8840,6 +9021,17 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" +[[package]] +name = "sigchld" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1219ef50fc0fdb04fcc243e6aa27f855553434ffafe4fa26554efb78b5b4bf89" +dependencies = [ + "libc", + "os_pipe", + "signal-hook", +] + [[package]] name = "sigma_fun" version = "0.7.0" @@ -8854,6 +9046,16 @@ dependencies = [ "serde", ] +[[package]] +name = "signal-hook" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d881a16cf4426aa584979d30bd82cb33429027e42122b169753d6ef1085ed6e2" +dependencies = [ + "libc", + "signal-hook-registry", +] + [[package]] name = "signal-hook-registry" version = "1.4.5" @@ -8911,12 +9113,9 @@ checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" [[package]] name = "slab" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" -dependencies = [ - "autocfg", -] +checksum = "04dc19736151f35336d325007ac991178d504a119863a2fcb3758cdb5e52c50d" [[package]] name = "sled" @@ -8946,9 +9145,9 @@ dependencies = [ [[package]] name = "slotmap-careful" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "186e34c0f5a636bb33bf53ca356933c525a7758ddddb8d93f98eff866db966d5" +checksum = "70f1f2df70e13b0bad4cd34fdf0c6091bf3cbe3e24c11d4b62ebbd523c7be716" dependencies = [ "paste", "serde", @@ -8959,9 +9158,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.15.0" +version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" dependencies = [ "serde", ] @@ -8985,9 +9184,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.9" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f5fd57c80058a56cf5c777ab8a126398ece8e442983605d280a44ce79d0edef" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" dependencies = [ "libc", "windows-sys 0.52.0", @@ -9009,7 +9208,7 @@ dependencies = [ "objc2-foundation 0.2.2", "objc2-quartz-core 0.2.2", "raw-window-handle", - "redox_syscall 0.5.12", + "redox_syscall 0.5.13", "wasm-bindgen", "web-sys", "windows-sys 0.59.0", @@ -9087,6 +9286,7 @@ checksum = "ee6798b1838b6a0f69c007c133b8df5866302197e404e8b6ee8ed3e3a5e68dc6" dependencies = [ "base64 0.22.1", "bytes", + "chrono", "crc", "crossbeam-queue", "either", @@ -9095,14 +9295,14 @@ dependencies = [ "futures-intrusive", "futures-io", "futures-util", - "hashbrown 0.15.3", + "hashbrown 0.15.4", "hashlink 0.10.0", "indexmap 2.9.0", "log", "memchr", "once_cell", "percent-encoding", - "rustls 0.23.27", + "rustls 0.23.28", "serde", "serde_json", "sha2 0.10.9", @@ -9125,7 +9325,7 @@ dependencies = [ "quote", "sqlx-core", "sqlx-macros-core", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -9148,7 +9348,7 @@ dependencies = [ "sqlx-mysql", "sqlx-postgres", "sqlx-sqlite", - "syn 2.0.101", + "syn 2.0.103", "tokio", "url", ] @@ -9164,6 +9364,7 @@ dependencies = [ "bitflags 2.9.1", "byteorder", "bytes", + "chrono", "crc", "digest 0.10.7", "dotenvy", @@ -9205,6 +9406,7 @@ dependencies = [ "base64 0.22.1", "bitflags 2.9.1", "byteorder", + "chrono", "crc", "dotenvy", "etcetera", @@ -9239,6 +9441,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2d12fe70b2c1b4401038055f90f151b78208de1f9f89a7dbfd41587a10c3eea" dependencies = [ "atoi", + "chrono", "flume", "futures-channel", "futures-core", @@ -9316,7 +9519,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf776ba3fa74f83bf4b63c3dcbbf82173db2632ed8452cb2d891d33f459de70f" dependencies = [ "new_debug_unreachable", - "parking_lot 0.12.3", + "parking_lot 0.12.4", "phf_shared 0.11.3", "precomputed-hash", "serde", @@ -9415,7 +9618,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -9428,7 +9631,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -9445,7 +9648,7 @@ checksum = "734676eb262c623cec13c3155096e08d1f8f29adce39ba17948b18dad1e54142" [[package]] name = "swap" -version = "2.2.0-beta" +version = "2.3.0-beta.1" dependencies = [ "anyhow", "arti-client", @@ -9473,6 +9676,7 @@ dependencies = [ "directories-next", "ecdsa_fun", "ed25519-dalek 1.0.1", + "electrum-pool", "futures", "get-port", "hex", @@ -9483,6 +9687,7 @@ dependencies = [ "monero", "monero-harness", "monero-rpc", + "monero-rpc-pool", "monero-sys", "once_cell", "pem", @@ -9494,7 +9699,7 @@ dependencies = [ "reqwest", "rust_decimal", "rust_decimal_macros", - "rustls 0.23.27", + "rustls 0.23.28", "semver", "serde", "serde_cbor", @@ -9518,7 +9723,7 @@ dependencies = [ "toml", "tor-rtcompat", "tower 0.4.13", - "tower-http", + "tower-http 0.3.5", "tracing", "tracing-appender", "tracing-subscriber", @@ -9555,9 +9760,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.101" +version = "2.0.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce2b7fc941b3a24138a0a7cf8e858bfc6a992e7978a068a5c760deb0ed43caf" +checksum = "e4307e30089d6fd6aff212f2da3a1f9e32f3223b1f010fb09b7c95f90f3ca1e8" dependencies = [ "proc-macro2", "quote", @@ -9593,7 +9798,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -9657,7 +9862,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e59c1f38e657351a2e822eadf40d6a2ad4627b9c25557bc1180ec1b3295ef82" dependencies = [ "bitflags 2.9.1", - "core-foundation 0.10.0", + "core-foundation 0.10.1", "core-graphics", "crossbeam-channel", "dispatch", @@ -9677,13 +9882,13 @@ dependencies = [ "objc2-app-kit", "objc2-foundation 0.3.1", "once_cell", - "parking_lot 0.12.3", + "parking_lot 0.12.4", "raw-window-handle", "scopeguard", "tao-macros", "unicode-segmentation", "url", - "windows 0.61.1", + "windows 0.61.3", "windows-core 0.61.2", "windows-version", "x11-dl", @@ -9697,7 +9902,7 @@ checksum = "f4e16beb8b2ac17db28eab8bca40e62dbfbb34c0fcdc6d9826b11b7b5d047dfd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -9771,7 +9976,7 @@ dependencies = [ "webkit2gtk", "webview2-com", "window-vibrancy", - "windows 0.61.1", + "windows 0.61.3", ] [[package]] @@ -9786,7 +9991,7 @@ dependencies = [ "glob", "heck 0.5.0", "json-patch", - "schemars", + "schemars 0.8.22", "semver", "serde", "serde_json", @@ -9814,7 +10019,7 @@ dependencies = [ "serde", "serde_json", "sha2 0.10.9", - "syn 2.0.101", + "syn 2.0.103", "tauri-utils", "thiserror 2.0.12", "time 0.3.41", @@ -9832,7 +10037,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", "tauri-codegen", "tauri-utils", ] @@ -9846,7 +10051,7 @@ dependencies = [ "anyhow", "glob", "plist", - "schemars", + "schemars 0.8.22", "serde", "serde_json", "tauri-utils", @@ -9856,11 +10061,11 @@ dependencies = [ [[package]] name = "tauri-plugin-cli" -version = "2.2.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5458ae16eac81bdbe8d9da2a9f3e01e8cdedbc381cc1727c01127542c8a61c5" +checksum = "096abcf7c913b19f9f26f6a3a1d48b84e46b0274d6b6d164dcaa753f0d7a5700" dependencies = [ - "clap 4.5.38", + "clap 4.5.40", "log", "serde", "serde_json", @@ -9871,9 +10076,9 @@ dependencies = [ [[package]] name = "tauri-plugin-clipboard-manager" -version = "2.2.2" +version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ab4cb42fdf745229b768802e9180920a4be63122cf87ed1c879103f7609d98e" +checksum = "11fa4f17a6d380490597f7632aca40b65d379cb374cb92bd9d80f333309b7fd7" dependencies = [ "arboard", "log", @@ -9912,7 +10117,7 @@ dependencies = [ "dunce", "glob", "percent-encoding", - "schemars", + "schemars 0.8.22", "serde", "serde_json", "serde_repr", @@ -9926,31 +10131,31 @@ dependencies = [ [[package]] name = "tauri-plugin-opener" -version = "2.2.7" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66644b71a31ec1a8a52c4a16575edd28cf763c87cf4a7da24c884122b5c77097" +checksum = "2c8983f50326d34437142a6d560b5c3426e91324297519b6eeb32ed0a1d1e0f2" dependencies = [ "dunce", "glob", "objc2-app-kit", "objc2-foundation 0.3.1", "open", - "schemars", + "schemars 0.8.22", "serde", "serde_json", "tauri", "tauri-plugin", "thiserror 2.0.12", "url", - "windows 0.61.1", + "windows 0.61.3", "zbus", ] [[package]] name = "tauri-plugin-process" -version = "2.2.1" +version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57da5888533e802b6206b9685091f8714aa1f5266dc80051a82388449558b773" +checksum = "4d870adae9408be585abd56eade2b5def2660339512b7c8de5ddf21238b67a34" dependencies = [ "tauri", "tauri-plugin", @@ -9958,16 +10163,16 @@ dependencies = [ [[package]] name = "tauri-plugin-shell" -version = "2.2.1" +version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69d5eb3368b959937ad2aeaf6ef9a8f5d11e01ffe03629d3530707bbcb27ff5d" +checksum = "d34e525a448b80ad5d906fcbd93838ac3ba37985b29ac699a045b5da9b0a1a22" dependencies = [ "encoding_rs", "log", "open", "os_pipe", "regex", - "schemars", + "schemars 0.8.22", "serde", "serde_json", "shared_child", @@ -9994,9 +10199,9 @@ dependencies = [ [[package]] name = "tauri-plugin-store" -version = "2.2.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c0c08fae6995909f5e9a0da6038273b750221319f2c0f3b526d6de1cde21505" +checksum = "ada7e7aeea472dec9b8d09d25301e59fe3e8330dc11dbcf903d6388126cb3722" dependencies = [ "dunce", "serde", @@ -10010,9 +10215,9 @@ dependencies = [ [[package]] name = "tauri-plugin-updater" -version = "2.7.1" +version = "2.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73f05c38afd77a4b8fd98e8fb6f1cdbb5fbb8a46ba181eb2758b05321e3c6209" +checksum = "b068673e9037376ca9906f99b00ae5f9e6eb62f456f900b4435c38d57cfa73e4" dependencies = [ "base64 0.22.1", "dirs 6.0.0", @@ -10036,8 +10241,8 @@ dependencies = [ "time 0.3.41", "tokio", "url", - "windows-sys 0.59.0", - "zip 2.4.2", + "windows-sys 0.60.2", + "zip 4.1.0", ] [[package]] @@ -10059,7 +10264,7 @@ dependencies = [ "tauri-utils", "thiserror 2.0.12", "url", - "windows 0.61.1", + "windows 0.61.3", ] [[package]] @@ -10085,7 +10290,7 @@ dependencies = [ "url", "webkit2gtk", "webview2-com", - "windows 0.61.1", + "windows 0.61.3", "wry", ] @@ -10113,12 +10318,12 @@ dependencies = [ "proc-macro2", "quote", "regex", - "schemars", + "schemars 0.8.22", "semver", "serde", "serde-untagged", "serde_json", - "serde_with 3.12.0", + "serde_with 3.13.0", "swift-rs", "thiserror 2.0.12", "toml", @@ -10230,7 +10435,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -10241,17 +10446,16 @@ checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] name = "thread_local" -version = "1.1.8" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" dependencies = [ "cfg-if", - "once_cell", ] [[package]] @@ -10345,15 +10549,15 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.45.0" +version = "1.45.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2513ca694ef9ede0fb23fe71a4ee4107cb102b9dc1930f6d0fd77aae068ae165" +checksum = "75ef51a33ef1da925cea3e4eb122833cb377c61439ca401b770f54902b806779" dependencies = [ "backtrace", "bytes", "libc", "mio", - "parking_lot 0.12.3", + "parking_lot 0.12.4", "pin-project-lite", "signal-hook-registry", "socket2", @@ -10370,7 +10574,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -10400,19 +10604,7 @@ version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" dependencies = [ - "rustls 0.23.27", - "tokio", -] - -[[package]] -name = "tokio-socks" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d4770b8024672c1101b3f6733eab95b18007dbe0847a8afe341fcf79e06043f" -dependencies = [ - "either", - "futures-util", - "thiserror 1.0.69", + "rustls 0.23.28", "tokio", ] @@ -10442,6 +10634,19 @@ dependencies = [ "xattr", ] +[[package]] +name = "tokio-test" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2468baabc3311435b55dd935f702f42cd1b8abb7e754fb7dfb16bd36aa88f9f7" +dependencies = [ + "async-stream", + "bytes", + "futures-core", + "tokio", + "tokio-stream", +] + [[package]] name = "tokio-tungstenite" version = "0.15.0" @@ -10475,21 +10680,21 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.22" +version = "0.8.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05ae329d1f08c4d17a59bed7ff5b5a769d062e64a62d34a3261b219e62cd5aae" +checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.26", + "toml_edit 0.22.27", ] [[package]] name = "toml_datetime" -version = "0.6.9" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3da5db5a963e24bc68be8b17b6fa82814bb22ee8660f192bb182771d498f09a3" +checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" dependencies = [ "serde", ] @@ -10518,23 +10723,23 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.26" +version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "310068873db2c5b3e7659d2cc35d21855dbafa50d1ce336397c666e3cb08137e" +checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ "indexmap 2.9.0", "serde", "serde_spanned", "toml_datetime", "toml_write", - "winnow 0.7.10", + "winnow 0.7.11", ] [[package]] name = "toml_write" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfb942dfe1d8e29a7ee7fcbde5bd2b9a25fb89aa70caea2eba3bee836ff41076" +checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" [[package]] name = "tor-async-utils" @@ -10794,7 +10999,7 @@ version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f4b1eec6c4cd0dbb682982ef3db87d0da030bff5d7903604529e8562eaacb45" dependencies = [ - "async-compression 0.4.23", + "async-compression 0.4.25", "base64ct", "derive_more 1.0.0", "futures", @@ -11040,7 +11245,7 @@ dependencies = [ "retry-error", "safelog", "serde", - "serde_with 3.12.0", + "serde_with 3.13.0", "strum 0.26.3", "thiserror 2.0.12", "tor-async-utils", @@ -11140,7 +11345,7 @@ dependencies = [ "itertools 0.13.0", "safelog", "serde", - "serde_with 3.12.0", + "serde_with 3.13.0", "strum 0.26.3", "thiserror 2.0.12", "tor-basic-utils", @@ -11283,7 +11488,7 @@ dependencies = [ "phf 0.11.3", "rand 0.8.5", "serde", - "serde_with 3.12.0", + "serde_with 3.13.0", "signature 2.2.0", "smallvec", "subtle", @@ -11528,6 +11733,7 @@ dependencies = [ "tokio", "tower-layer", "tower-service", + "tracing", ] [[package]] @@ -11546,7 +11752,7 @@ dependencies = [ "http-body 0.4.6", "http-range-header", "httpdate", - "iri-string", + "iri-string 0.4.1", "mime", "mime_guess", "percent-encoding", @@ -11560,6 +11766,40 @@ dependencies = [ "uuid", ] +[[package]] +name = "tower-http" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" +dependencies = [ + "bitflags 2.9.1", + "bytes", + "http 1.3.1", + "http-body 1.0.1", + "http-body-util", + "pin-project-lite", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-http" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" +dependencies = [ + "bitflags 2.9.1", + "bytes", + "futures-util", + "http 1.3.1", + "http-body 1.0.1", + "iri-string 0.7.8", + "pin-project-lite", + "tower 0.5.2", + "tower-layer", + "tower-service", +] + [[package]] name = "tower-layer" version = "0.3.3" @@ -11598,20 +11838,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.28" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" +checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] name = "tracing-core" -version = "0.1.33" +version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" +checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" dependencies = [ "once_cell", "valuable", @@ -11678,7 +11918,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04659ddb06c87d233c566112c1c9c5b9e98256d9af50ec3bc9c8327f873a7568" dependencies = [ "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -11781,7 +12021,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a615d6c2764852a2e88a4f16e9ce1ea49bb776b5872956309e170d63a042a34f" dependencies = [ "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -11916,9 +12156,9 @@ checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" [[package]] name = "unicode-width" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd" +checksum = "4a1a07cc7db3810833284e8d372ccdc6da29741639ecc70c9ec107df0fa6154c" [[package]] name = "unicode-xid" @@ -11959,10 +12199,11 @@ dependencies = [ [[package]] name = "unstoppableswap-gui-rs" -version = "2.2.0-beta" +version = "2.3.0-beta.1" dependencies = [ "anyhow", - "rustls 0.23.27", + "monero-rpc-pool", + "rustls 0.23.28", "serde", "serde_json", "swap", @@ -11980,7 +12221,7 @@ dependencies = [ "tauri-plugin-updater", "tracing", "uuid", - "zip 4.0.0", + "zip 4.1.0", ] [[package]] @@ -12112,7 +12353,7 @@ checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -12183,9 +12424,9 @@ checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" [[package]] name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" +version = "0.11.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] name = "wasi" @@ -12208,7 +12449,7 @@ version = "0.12.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c1fbb4ef9bbca0c1170e0b00dd28abc9e3b68669821600cad1caaed606583c6d" dependencies = [ - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi 0.11.1+wasi-snapshot-preview1", ] [[package]] @@ -12233,7 +12474,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", "wasm-bindgen-shared", ] @@ -12268,7 +12509,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -12352,7 +12593,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "896fdafd5d28145fce7958917d69f2fd44469b1d4e861cb5961bcbeebc6d1484" dependencies = [ "proc-macro2", - "quick-xml 0.37.5", + "quick-xml", "quote", ] @@ -12485,14 +12726,14 @@ version = "0.26.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" dependencies = [ - "webpki-roots 1.0.0", + "webpki-roots 1.0.1", ] [[package]] name = "webpki-roots" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2853738d1cc4f2da3a225c18ec6c3721abb31961096e9dbf5ab35fa88b19cfdb" +checksum = "8782dd5a41a24eed3a4f40b606249b3e236ca61adf1f25ea4d45c73de122b502" dependencies = [ "rustls-pki-types", ] @@ -12505,7 +12746,7 @@ checksum = "b542b5cfbd9618c46c2784e4d41ba218c336ac70d44c55e47b251033e7d85601" dependencies = [ "webview2-com-macros", "webview2-com-sys", - "windows 0.61.1", + "windows 0.61.3", "windows-core 0.61.2", "windows-implement 0.60.0", "windows-interface 0.59.1", @@ -12519,7 +12760,7 @@ checksum = "1d228f15bba3b9d56dde8bddbee66fa24545bd17b48d5128ccf4a8742b18e431" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -12529,7 +12770,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ae2d11c4a686e4409659d7891791254cf9286d3cfe0eef54df1523533d22295" dependencies = [ "thiserror 2.0.12", - "windows 0.61.1", + "windows 0.61.3", "windows-core 0.61.2", ] @@ -12545,7 +12786,7 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6994d13118ab492c3c80c1f81928718159254c53c472bf9ce36f8dae4add02a7" dependencies = [ - "redox_syscall 0.5.12", + "redox_syscall 0.5.13", "wasite", ] @@ -12623,9 +12864,9 @@ dependencies = [ [[package]] name = "windows" -version = "0.61.1" +version = "0.61.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5ee8f3d025738cb02bad7868bbb5f8a6327501e870bf51f1b455b0a2454a419" +checksum = "9babd3a767a4c1aef6900409f85f5d53ce2544ccdfaa86dad48c91782c6d6893" dependencies = [ "windows-collections", "windows-core 0.61.2", @@ -12675,7 +12916,7 @@ dependencies = [ "windows-interface 0.59.1", "windows-link", "windows-result 0.3.4", - "windows-strings 0.4.2", + "windows-strings", ] [[package]] @@ -12697,7 +12938,7 @@ checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -12708,7 +12949,7 @@ checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -12719,7 +12960,7 @@ checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -12730,14 +12971,14 @@ checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] name = "windows-link" -version = "0.1.1" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38" +checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" [[package]] name = "windows-numerics" @@ -12751,13 +12992,13 @@ dependencies = [ [[package]] name = "windows-registry" -version = "0.4.0" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4286ad90ddb45071efd1a66dfa43eb02dd0dfbae1545ad6cc3c51cf34d7e8ba3" +checksum = "b3bab093bdd303a1240bb99b8aba8ea8a69ee19d34c9e2ef9594e708a4878820" dependencies = [ + "windows-link", "windows-result 0.3.4", - "windows-strings 0.3.1", - "windows-targets 0.53.0", + "windows-strings", ] [[package]] @@ -12778,15 +13019,6 @@ dependencies = [ "windows-link", ] -[[package]] -name = "windows-strings" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87fa48cc5d406560701792be122a10132491cff9d0aeb23583cc2dcafc847319" -dependencies = [ - "windows-link", -] - [[package]] name = "windows-strings" version = "0.4.2" @@ -12832,6 +13064,15 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.2", +] + [[package]] name = "windows-targets" version = "0.42.2" @@ -12880,9 +13121,9 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.53.0" +version = "0.53.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1e4c7e8ceaaf9cb7d7507c974735728ab453b67ef8f18febdd7c11fe59dca8b" +checksum = "c66f69fcc9ce11da9966ddb31a40968cad001c5bedeb5c2b82ede4253ab48aef" dependencies = [ "windows_aarch64_gnullvm 0.53.0", "windows_aarch64_msvc 0.53.0", @@ -13103,9 +13344,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.7.10" +version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06928c8748d81b05c9be96aad92e1b6ff01833332f281e8cfca3be4b35fc9ec" +checksum = "74c7b26e3480b707944fc872477815d29a8e429d2f93a1ce000f5fa84a15cbcd" dependencies = [ "memchr", ] @@ -13122,12 +13363,12 @@ dependencies = [ [[package]] name = "winreg" -version = "0.52.0" +version = "0.55.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a277a57398d4bfa075df44f501a17cfdf8542d224f0d36095a2adc7aee4ef0a5" +checksum = "cb5a765337c50e9ec252c2069be9bf91c7df47afb103b642ba3a53bf8101be97" dependencies = [ "cfg-if", - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] @@ -13202,7 +13443,7 @@ dependencies = [ "webkit2gtk", "webkit2gtk-sys", "webview2-com", - "windows 0.61.1", + "windows 0.61.3", "windows-core 0.61.2", "windows-version", "x11-dl", @@ -13351,7 +13592,7 @@ dependencies = [ "futures", "log", "nohash-hasher", - "parking_lot 0.12.3", + "parking_lot 0.12.4", "pin-project", "rand 0.8.5", "static_assertions", @@ -13366,7 +13607,7 @@ dependencies = [ "futures", "log", "nohash-hasher", - "parking_lot 0.12.3", + "parking_lot 0.12.4", "pin-project", "rand 0.9.1", "static_assertions", @@ -13402,7 +13643,7 @@ checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", "synstructure 0.13.2", ] @@ -13434,7 +13675,7 @@ dependencies = [ "tracing", "uds_windows", "windows-sys 0.59.0", - "winnow 0.7.10", + "winnow 0.7.11", "zbus_macros", "zbus_names", "zvariant", @@ -13449,7 +13690,7 @@ dependencies = [ "proc-macro-crate 3.3.0", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", "zbus_names", "zvariant", "zvariant_utils", @@ -13463,28 +13704,28 @@ checksum = "7be68e64bf6ce8db94f63e72f0c7eb9a60d733f7e0499e628dfab0f84d6bcb97" dependencies = [ "serde", "static_assertions", - "winnow 0.7.10", + "winnow 0.7.11", "zvariant", ] [[package]] name = "zerocopy" -version = "0.8.25" +version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1702d9583232ddb9174e01bb7c15a2ab8fb1bc6f227aa1233858c351a3ba0cb" +checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.25" +version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef" +checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -13504,7 +13745,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", "synstructure 0.13.2", ] @@ -13525,7 +13766,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -13558,7 +13799,7 @@ checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -13577,24 +13818,9 @@ dependencies = [ [[package]] name = "zip" -version = "2.4.2" +version = "4.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fabe6324e908f85a1c52063ce7aa26b68dcb7eb6dbc83a2d148403c9bc3eba50" -dependencies = [ - "arbitrary", - "crc32fast", - "crossbeam-utils", - "displaydoc", - "indexmap 2.9.0", - "memchr", - "thiserror 2.0.12", -] - -[[package]] -name = "zip" -version = "4.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "153a6fff49d264c4babdcfa6b4d534747f520e56e8f0f384f3b808c4b64cc1fd" +checksum = "af7dcdb4229c0e79c2531a24de7726a0e980417a74fb4d030a35f535665439a0" dependencies = [ "aes", "arbitrary", @@ -13618,9 +13844,9 @@ dependencies = [ [[package]] name = "zlib-rs" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "868b928d7949e09af2f6086dfc1e01936064cc7a819253bce650d4e2a2d63ba8" +checksum = "626bd9fa9734751fc50d6060752170984d7053f5a39061f524cda68023d4db8a" [[package]] name = "zopfli" @@ -13672,7 +13898,7 @@ dependencies = [ "enumflags2", "serde", "url", - "winnow 0.7.10", + "winnow 0.7.11", "zvariant_derive", "zvariant_utils", ] @@ -13686,7 +13912,7 @@ dependencies = [ "proc-macro-crate 3.3.0", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", "zvariant_utils", ] @@ -13700,6 +13926,6 @@ dependencies = [ "quote", "serde", "static_assertions", - "syn 2.0.101", - "winnow 0.7.10", + "syn 2.0.103", + "winnow 0.7.11", ] diff --git a/Cargo.toml b/Cargo.toml index af23192b..26fad62d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [workspace] resolver = "2" -members = ["monero-rpc", "monero-sys", "src-tauri", "swap"] +members = ["monero-rpc", "monero-rpc-pool", "monero-sys", "src-tauri", "swap", "electrum-pool"] [profile.release] opt-level = 0 diff --git a/Dockerfile b/Dockerfile index 9ee784a8..28f23abc 100644 --- a/Dockerfile +++ b/Dockerfile @@ -36,18 +36,23 @@ RUN apt-get update && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* -# Install Rust 1.82 -RUN curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain 1.82.0 +# Install Rust 1.85 +RUN curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain 1.85.0 ENV PATH="/root/.cargo/bin:${PATH}" COPY . . -# Update submodules recursively -# Force update to handle any local changes in submodules -RUN git submodule sync --recursive && git submodule update --init --recursive --force +# Check that submodules are present (they should be initialized before building) +RUN if [ ! -f "monero-sys/monero/CMakeLists.txt" ]; then \ + echo "ERROR: Submodules not initialized. Run 'git submodule update --init --recursive' before building Docker image."; \ + exit 1; \ + fi WORKDIR /build/swap +# Act as if we are in a GitHub Actions environment +ENV DOCKER_BUILD=true + RUN cargo build -vv --bin=asb FROM ubuntu:24.04 diff --git a/dev_scripts/bump-version.sh b/dev_scripts/bump-version.sh new file mode 100755 index 00000000..cc4088b2 --- /dev/null +++ b/dev_scripts/bump-version.sh @@ -0,0 +1,26 @@ +#!/bin/bash +set -eu + +if [ "$#" -ne 1 ]; then + echo "Usage: $0 " + exit 1 +fi + +VERSION=$1 +TODAY=$(date +%Y-%m-%d) +echo "Bumping version to $VERSION" + +# Using sed and assuming GNU sed syntax as this is for the github workflow. + +# Update version in tauri.conf.json +sed -i 's/"version": "[^"]*"/"version": "'"$VERSION"'"/' src-tauri/tauri.conf.json + +# Update version in Cargo.toml files +sed -i -E 's/^version = "[0-9]+\.[0-9]+\.[0-9]+"/version = "'"$VERSION"'"/' swap/Cargo.toml src-tauri/Cargo.toml + +# Update changelog +sed -i "s/^## \\[Unreleased\\]/## [$VERSION] - $TODAY/" CHANGELOG.md +# Add a new [Unreleased] section at the top +sed -i '3i## [Unreleased]\n' CHANGELOG.md + +echo "Updated all files to version $VERSION." \ No newline at end of file diff --git a/electrum-pool/Cargo.toml b/electrum-pool/Cargo.toml new file mode 100644 index 00000000..00560789 --- /dev/null +++ b/electrum-pool/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "electrum-pool" +version = "0.1.0" +edition = "2021" +authors = ["UnstoppableSwap Team "] + +[dependencies] +backoff = { version = "0.4", features = ["tokio"] } +bdk_electrum = { version = "0.19", default-features = false, features = ["use-rustls-ring"] } +bitcoin = { version = "0.32", features = ["rand", "serde"] } +futures = { version = "0.3", default-features = false, features = ["std"] } +once_cell = "1.19" +tokio = { version = "1", features = ["rt-multi-thread", "time", "macros", "sync"] } +tracing = { version = "0.1", features = ["attributes"] } + +[dev-dependencies] +serde_json = "1" diff --git a/swap/src/bitcoin/electrum_balancer.rs b/electrum-pool/src/lib.rs similarity index 99% rename from swap/src/bitcoin/electrum_balancer.rs rename to electrum-pool/src/lib.rs index dba73b7a..94f2d62b 100644 --- a/swap/src/bitcoin/electrum_balancer.rs +++ b/electrum-pool/src/lib.rs @@ -197,7 +197,7 @@ where &self, kind: &str, f: F, - ) -> Result + ) -> Result where F: Fn(&C) -> Result + Send + Sync + Clone + 'static, T: Send + 'static, diff --git a/justfile b/justfile index ed27ced4..7c30e612 100644 --- a/justfile +++ b/justfile @@ -100,4 +100,9 @@ docker-prune-network: # Install dependencies required for building monero-sys prepare_mac_os_brew_dependencies: - cd dev_scripts && chmod +x ./brew_dependencies_install.sh && ./brew_dependencies_install.sh \ No newline at end of file + cd dev_scripts && chmod +x ./brew_dependencies_install.sh && ./brew_dependencies_install.sh + +# Takes a crate (e.g monero-rpc-pool) and uses code2prompt to copy to clipboard +# E.g code2prompt . --exclude "*.lock" --exclude ".sqlx/*" --exclude "target" +code2prompt_single_crate crate: + cd {{crate}} && code2prompt . --exclude "*.lock" --exclude ".sqlx/*" --exclude "target" \ No newline at end of file diff --git a/monero-rpc-pool/.sqlx/query-3e8f39a6ec4443cec6497672891d12bbf7c1d0aca061827740af88ced863ae23.json b/monero-rpc-pool/.sqlx/query-3e8f39a6ec4443cec6497672891d12bbf7c1d0aca061827740af88ced863ae23.json new file mode 100644 index 00000000..d208a702 --- /dev/null +++ b/monero-rpc-pool/.sqlx/query-3e8f39a6ec4443cec6497672891d12bbf7c1d0aca061827740af88ced863ae23.json @@ -0,0 +1,48 @@ +{ + "db_name": "SQLite", + "query": "\n SELECT \n id as \"id!: i64\",\n scheme,\n host,\n port,\n full_url,\n network as \"network!: String\",\n first_seen_at\n FROM monero_nodes \n ORDER BY id\n ", + "describe": { + "columns": [ + { + "name": "id!: i64", + "ordinal": 0, + "type_info": "Integer" + }, + { + "name": "scheme", + "ordinal": 1, + "type_info": "Text" + }, + { + "name": "host", + "ordinal": 2, + "type_info": "Text" + }, + { + "name": "port", + "ordinal": 3, + "type_info": "Integer" + }, + { + "name": "full_url", + "ordinal": 4, + "type_info": "Text" + }, + { + "name": "network!: String", + "ordinal": 5, + "type_info": "Text" + }, + { + "name": "first_seen_at", + "ordinal": 6, + "type_info": "Text" + } + ], + "parameters": { + "Right": 0 + }, + "nullable": [false, false, false, false, false, false, false] + }, + "hash": "3e8f39a6ec4443cec6497672891d12bbf7c1d0aca061827740af88ced863ae23" +} diff --git a/monero-rpc-pool/.sqlx/query-549f5ef13ec7bf5d987dcb893753a9c903edcafa3a66bd82965b40a9e7f238b6.json b/monero-rpc-pool/.sqlx/query-549f5ef13ec7bf5d987dcb893753a9c903edcafa3a66bd82965b40a9e7f238b6.json new file mode 100644 index 00000000..7381e04d --- /dev/null +++ b/monero-rpc-pool/.sqlx/query-549f5ef13ec7bf5d987dcb893753a9c903edcafa3a66bd82965b40a9e7f238b6.json @@ -0,0 +1,116 @@ +{ + "db_name": "SQLite", + "query": "\n SELECT \n n.id as \"id!: i64\",\n n.scheme,\n n.host,\n n.port,\n n.full_url,\n n.network,\n n.first_seen_at,\n CAST(COALESCE(stats.success_count, 0) AS INTEGER) as \"success_count!: i64\",\n CAST(COALESCE(stats.failure_count, 0) AS INTEGER) as \"failure_count!: i64\",\n stats.last_success as \"last_success?: String\",\n stats.last_failure as \"last_failure?: String\",\n stats.last_checked as \"last_checked?: String\",\n CAST(CASE WHEN reliable_nodes.node_id IS NOT NULL THEN 1 ELSE 0 END AS INTEGER) as \"is_reliable!: i64\",\n stats.avg_latency_ms as \"avg_latency_ms?: f64\",\n stats.min_latency_ms as \"min_latency_ms?: f64\",\n stats.max_latency_ms as \"max_latency_ms?: f64\",\n stats.last_latency_ms as \"last_latency_ms?: f64\"\n FROM monero_nodes n\n LEFT JOIN (\n SELECT \n node_id,\n SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,\n SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,\n MAX(CASE WHEN was_successful THEN timestamp END) as last_success,\n MAX(CASE WHEN NOT was_successful THEN timestamp END) as last_failure,\n MAX(timestamp) as last_checked,\n AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms,\n MIN(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as min_latency_ms,\n MAX(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as max_latency_ms,\n (SELECT latency_ms FROM health_checks hc2 WHERE hc2.node_id = health_checks.node_id ORDER BY timestamp DESC LIMIT 1) as last_latency_ms\n FROM health_checks \n GROUP BY node_id\n ) stats ON n.id = stats.node_id\n LEFT JOIN (\n SELECT DISTINCT node_id FROM (\n SELECT \n n2.id as node_id,\n COALESCE(s2.success_count, 0) as success_count,\n COALESCE(s2.failure_count, 0) as failure_count,\n s2.avg_latency_ms,\n (CAST(COALESCE(s2.success_count, 0) AS REAL) / CAST(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0) AS REAL)) * \n (MIN(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0), 200) / 200.0) * 0.8 +\n CASE \n WHEN s2.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(s2.avg_latency_ms, 2000) / 2000.0)) * 0.2\n ELSE 0.0 \n END as reliability_score\n FROM monero_nodes n2\n LEFT JOIN (\n SELECT \n node_id,\n SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,\n SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,\n AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms\n FROM health_checks \n GROUP BY node_id\n ) s2 ON n2.id = s2.node_id\n WHERE n2.network = ? AND (COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0)) > 0\n ORDER BY reliability_score DESC\n LIMIT 4\n )\n ) reliable_nodes ON n.id = reliable_nodes.node_id\n WHERE n.network = ? AND (COALESCE(stats.success_count, 0) + COALESCE(stats.failure_count, 0)) > 0\n ORDER BY \n (CAST(COALESCE(stats.success_count, 0) AS REAL) / CAST(COALESCE(stats.success_count, 0) + COALESCE(stats.failure_count, 0) AS REAL)) DESC,\n stats.avg_latency_ms ASC\n LIMIT ?\n ", + "describe": { + "columns": [ + { + "name": "id!: i64", + "ordinal": 0, + "type_info": "Integer" + }, + { + "name": "scheme", + "ordinal": 1, + "type_info": "Text" + }, + { + "name": "host", + "ordinal": 2, + "type_info": "Text" + }, + { + "name": "port", + "ordinal": 3, + "type_info": "Integer" + }, + { + "name": "full_url", + "ordinal": 4, + "type_info": "Text" + }, + { + "name": "network", + "ordinal": 5, + "type_info": "Text" + }, + { + "name": "first_seen_at", + "ordinal": 6, + "type_info": "Text" + }, + { + "name": "success_count!: i64", + "ordinal": 7, + "type_info": "Null" + }, + { + "name": "failure_count!: i64", + "ordinal": 8, + "type_info": "Null" + }, + { + "name": "last_success?: String", + "ordinal": 9, + "type_info": "Null" + }, + { + "name": "last_failure?: String", + "ordinal": 10, + "type_info": "Null" + }, + { + "name": "last_checked?: String", + "ordinal": 11, + "type_info": "Null" + }, + { + "name": "is_reliable!: i64", + "ordinal": 12, + "type_info": "Null" + }, + { + "name": "avg_latency_ms?: f64", + "ordinal": 13, + "type_info": "Null" + }, + { + "name": "min_latency_ms?: f64", + "ordinal": 14, + "type_info": "Null" + }, + { + "name": "max_latency_ms?: f64", + "ordinal": 15, + "type_info": "Null" + }, + { + "name": "last_latency_ms?: f64", + "ordinal": 16, + "type_info": "Float" + } + ], + "parameters": { + "Right": 3 + }, + "nullable": [ + true, + false, + false, + false, + false, + false, + false, + null, + null, + null, + null, + null, + null, + null, + null, + null, + true + ] + }, + "hash": "549f5ef13ec7bf5d987dcb893753a9c903edcafa3a66bd82965b40a9e7f238b6" +} diff --git a/monero-rpc-pool/.sqlx/query-56549d93f0e2106297b85565a52b2d9ac64d5b50fb7aa6028be3fcf266fc1d5d.json b/monero-rpc-pool/.sqlx/query-56549d93f0e2106297b85565a52b2d9ac64d5b50fb7aa6028be3fcf266fc1d5d.json new file mode 100644 index 00000000..ab6db76f --- /dev/null +++ b/monero-rpc-pool/.sqlx/query-56549d93f0e2106297b85565a52b2d9ac64d5b50fb7aa6028be3fcf266fc1d5d.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "\n INSERT INTO health_checks (node_id, timestamp, was_successful, latency_ms)\n VALUES (?, ?, ?, ?)\n ", + "describe": { + "columns": [], + "parameters": { + "Right": 4 + }, + "nullable": [] + }, + "hash": "56549d93f0e2106297b85565a52b2d9ac64d5b50fb7aa6028be3fcf266fc1d5d" +} diff --git a/monero-rpc-pool/.sqlx/query-5736de2aac47eb69d7f6835d266aa28732b02a5e8e055ffaebcb452ed1b5044c.json b/monero-rpc-pool/.sqlx/query-5736de2aac47eb69d7f6835d266aa28732b02a5e8e055ffaebcb452ed1b5044c.json new file mode 100644 index 00000000..d7a0eb31 --- /dev/null +++ b/monero-rpc-pool/.sqlx/query-5736de2aac47eb69d7f6835d266aa28732b02a5e8e055ffaebcb452ed1b5044c.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "\n UPDATE monero_nodes \n SET network = ?, updated_at = ?\n WHERE full_url = ?\n ", + "describe": { + "columns": [], + "parameters": { + "Right": 3 + }, + "nullable": [] + }, + "hash": "5736de2aac47eb69d7f6835d266aa28732b02a5e8e055ffaebcb452ed1b5044c" +} diff --git a/monero-rpc-pool/.sqlx/query-5798d9589772742f074e0ecc2551a40d943bfb7ed2e295f09f12d77cb65ce821.json b/monero-rpc-pool/.sqlx/query-5798d9589772742f074e0ecc2551a40d943bfb7ed2e295f09f12d77cb65ce821.json new file mode 100644 index 00000000..3e52d837 --- /dev/null +++ b/monero-rpc-pool/.sqlx/query-5798d9589772742f074e0ecc2551a40d943bfb7ed2e295f09f12d77cb65ce821.json @@ -0,0 +1,18 @@ +{ + "db_name": "SQLite", + "query": "\n INSERT INTO monero_nodes (scheme, host, port, full_url, network, first_seen_at, updated_at)\n VALUES (?, ?, ?, ?, ?, ?, ?)\n ON CONFLICT(full_url) DO UPDATE SET\n network = excluded.network,\n updated_at = excluded.updated_at\n RETURNING id\n ", + "describe": { + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Integer" + } + ], + "parameters": { + "Right": 7 + }, + "nullable": [false] + }, + "hash": "5798d9589772742f074e0ecc2551a40d943bfb7ed2e295f09f12d77cb65ce821" +} diff --git a/monero-rpc-pool/.sqlx/query-5a25c95c04b11a60a04ad97b5fb684e9a0cc2eb5daf64f33e924f0c38a2edfec.json b/monero-rpc-pool/.sqlx/query-5a25c95c04b11a60a04ad97b5fb684e9a0cc2eb5daf64f33e924f0c38a2edfec.json new file mode 100644 index 00000000..1d10eb38 --- /dev/null +++ b/monero-rpc-pool/.sqlx/query-5a25c95c04b11a60a04ad97b5fb684e9a0cc2eb5daf64f33e924f0c38a2edfec.json @@ -0,0 +1,116 @@ +{ + "db_name": "SQLite", + "query": "\n SELECT \n n.id as \"id!: i64\",\n n.scheme,\n n.host,\n n.port,\n n.full_url,\n n.network,\n n.first_seen_at,\n CAST(COALESCE(stats.success_count, 0) AS INTEGER) as \"success_count!: i64\",\n CAST(COALESCE(stats.failure_count, 0) AS INTEGER) as \"failure_count!: i64\",\n stats.last_success as \"last_success?: String\",\n stats.last_failure as \"last_failure?: String\",\n stats.last_checked as \"last_checked?: String\",\n CAST(CASE WHEN reliable_nodes.node_id IS NOT NULL THEN 1 ELSE 0 END AS INTEGER) as \"is_reliable!: i64\",\n stats.avg_latency_ms as \"avg_latency_ms?: f64\",\n stats.min_latency_ms as \"min_latency_ms?: f64\",\n stats.max_latency_ms as \"max_latency_ms?: f64\",\n stats.last_latency_ms as \"last_latency_ms?: f64\"\n FROM monero_nodes n\n LEFT JOIN (\n SELECT \n node_id,\n SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,\n SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,\n MAX(CASE WHEN was_successful THEN timestamp END) as last_success,\n MAX(CASE WHEN NOT was_successful THEN timestamp END) as last_failure,\n MAX(timestamp) as last_checked,\n AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms,\n MIN(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as min_latency_ms,\n MAX(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as max_latency_ms,\n (SELECT latency_ms FROM health_checks hc2 WHERE hc2.node_id = health_checks.node_id ORDER BY timestamp DESC LIMIT 1) as last_latency_ms\n FROM health_checks \n GROUP BY node_id\n ) stats ON n.id = stats.node_id\n LEFT JOIN (\n SELECT DISTINCT node_id FROM (\n SELECT \n n2.id as node_id,\n COALESCE(s2.success_count, 0) as success_count,\n COALESCE(s2.failure_count, 0) as failure_count,\n s2.avg_latency_ms,\n (CAST(COALESCE(s2.success_count, 0) AS REAL) / CAST(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0) AS REAL)) * \n (MIN(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0), 200) / 200.0) * 0.8 +\n CASE \n WHEN s2.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(s2.avg_latency_ms, 2000) / 2000.0)) * 0.2\n ELSE 0.0 \n END as reliability_score\n FROM monero_nodes n2\n LEFT JOIN (\n SELECT \n node_id,\n SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,\n SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,\n AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms\n FROM health_checks \n GROUP BY node_id\n ) s2 ON n2.id = s2.node_id\n WHERE n2.network = ? AND (COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0)) > 0\n ORDER BY reliability_score DESC\n LIMIT 4\n )\n ) reliable_nodes ON n.id = reliable_nodes.node_id\n WHERE n.network = ?\n ORDER BY RANDOM()\n LIMIT ?\n ", + "describe": { + "columns": [ + { + "name": "id!: i64", + "ordinal": 0, + "type_info": "Integer" + }, + { + "name": "scheme", + "ordinal": 1, + "type_info": "Text" + }, + { + "name": "host", + "ordinal": 2, + "type_info": "Text" + }, + { + "name": "port", + "ordinal": 3, + "type_info": "Integer" + }, + { + "name": "full_url", + "ordinal": 4, + "type_info": "Text" + }, + { + "name": "network", + "ordinal": 5, + "type_info": "Text" + }, + { + "name": "first_seen_at", + "ordinal": 6, + "type_info": "Text" + }, + { + "name": "success_count!: i64", + "ordinal": 7, + "type_info": "Null" + }, + { + "name": "failure_count!: i64", + "ordinal": 8, + "type_info": "Null" + }, + { + "name": "last_success?: String", + "ordinal": 9, + "type_info": "Null" + }, + { + "name": "last_failure?: String", + "ordinal": 10, + "type_info": "Null" + }, + { + "name": "last_checked?: String", + "ordinal": 11, + "type_info": "Null" + }, + { + "name": "is_reliable!: i64", + "ordinal": 12, + "type_info": "Null" + }, + { + "name": "avg_latency_ms?: f64", + "ordinal": 13, + "type_info": "Null" + }, + { + "name": "min_latency_ms?: f64", + "ordinal": 14, + "type_info": "Null" + }, + { + "name": "max_latency_ms?: f64", + "ordinal": 15, + "type_info": "Null" + }, + { + "name": "last_latency_ms?: f64", + "ordinal": 16, + "type_info": "Float" + } + ], + "parameters": { + "Right": 3 + }, + "nullable": [ + true, + false, + false, + false, + false, + false, + false, + null, + null, + null, + null, + null, + null, + null, + null, + null, + true + ] + }, + "hash": "5a25c95c04b11a60a04ad97b5fb684e9a0cc2eb5daf64f33e924f0c38a2edfec" +} diff --git a/monero-rpc-pool/.sqlx/query-5ff27bdd9b6e7aadc8dd4936e0ee7e6a611aaef28697a0e9535dfb30d1c4861d.json b/monero-rpc-pool/.sqlx/query-5ff27bdd9b6e7aadc8dd4936e0ee7e6a611aaef28697a0e9535dfb30d1c4861d.json new file mode 100644 index 00000000..825adc0c --- /dev/null +++ b/monero-rpc-pool/.sqlx/query-5ff27bdd9b6e7aadc8dd4936e0ee7e6a611aaef28697a0e9535dfb30d1c4861d.json @@ -0,0 +1,116 @@ +{ + "db_name": "SQLite", + "query": "\n SELECT \n n.id as \"id!: i64\",\n n.scheme,\n n.host,\n n.port,\n n.full_url,\n n.network,\n n.first_seen_at,\n CAST(COALESCE(stats.success_count, 0) AS INTEGER) as \"success_count!: i64\",\n CAST(COALESCE(stats.failure_count, 0) AS INTEGER) as \"failure_count!: i64\",\n stats.last_success as \"last_success?: String\",\n stats.last_failure as \"last_failure?: String\",\n stats.last_checked as \"last_checked?: String\",\n CAST(CASE WHEN reliable_nodes.node_id IS NOT NULL THEN 1 ELSE 0 END AS INTEGER) as \"is_reliable!: i64\",\n stats.avg_latency_ms as \"avg_latency_ms?: f64\",\n stats.min_latency_ms as \"min_latency_ms?: f64\",\n stats.max_latency_ms as \"max_latency_ms?: f64\",\n stats.last_latency_ms as \"last_latency_ms?: f64\"\n FROM monero_nodes n\n LEFT JOIN (\n SELECT \n node_id,\n SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,\n SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,\n MAX(CASE WHEN was_successful THEN timestamp END) as last_success,\n MAX(CASE WHEN NOT was_successful THEN timestamp END) as last_failure,\n MAX(timestamp) as last_checked,\n AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms,\n MIN(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as min_latency_ms,\n MAX(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as max_latency_ms,\n (SELECT latency_ms FROM health_checks hc2 WHERE hc2.node_id = health_checks.node_id ORDER BY timestamp DESC LIMIT 1) as last_latency_ms\n FROM health_checks \n GROUP BY node_id\n ) stats ON n.id = stats.node_id\n LEFT JOIN (\n SELECT DISTINCT node_id FROM (\n SELECT \n n2.id as node_id,\n COALESCE(s2.success_count, 0) as success_count,\n COALESCE(s2.failure_count, 0) as failure_count,\n s2.avg_latency_ms,\n (CAST(COALESCE(s2.success_count, 0) AS REAL) / CAST(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0) AS REAL)) * \n (MIN(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0), 200) / 200.0) * 0.8 +\n CASE \n WHEN s2.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(s2.avg_latency_ms, 2000) / 2000.0)) * 0.2\n ELSE 0.0 \n END as reliability_score\n FROM monero_nodes n2\n LEFT JOIN (\n SELECT \n node_id,\n SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,\n SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,\n AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms\n FROM health_checks \n GROUP BY node_id\n ) s2 ON n2.id = s2.node_id\n WHERE n2.network = ? AND (COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0)) > 0\n ORDER BY reliability_score DESC\n LIMIT 4\n )\n ) reliable_nodes ON n.id = reliable_nodes.node_id\n WHERE n.network = ?\n ORDER BY stats.avg_latency_ms ASC, stats.success_count DESC\n ", + "describe": { + "columns": [ + { + "name": "id!: i64", + "ordinal": 0, + "type_info": "Integer" + }, + { + "name": "scheme", + "ordinal": 1, + "type_info": "Text" + }, + { + "name": "host", + "ordinal": 2, + "type_info": "Text" + }, + { + "name": "port", + "ordinal": 3, + "type_info": "Integer" + }, + { + "name": "full_url", + "ordinal": 4, + "type_info": "Text" + }, + { + "name": "network", + "ordinal": 5, + "type_info": "Text" + }, + { + "name": "first_seen_at", + "ordinal": 6, + "type_info": "Text" + }, + { + "name": "success_count!: i64", + "ordinal": 7, + "type_info": "Null" + }, + { + "name": "failure_count!: i64", + "ordinal": 8, + "type_info": "Null" + }, + { + "name": "last_success?: String", + "ordinal": 9, + "type_info": "Null" + }, + { + "name": "last_failure?: String", + "ordinal": 10, + "type_info": "Null" + }, + { + "name": "last_checked?: String", + "ordinal": 11, + "type_info": "Null" + }, + { + "name": "is_reliable!: i64", + "ordinal": 12, + "type_info": "Null" + }, + { + "name": "avg_latency_ms?: f64", + "ordinal": 13, + "type_info": "Null" + }, + { + "name": "min_latency_ms?: f64", + "ordinal": 14, + "type_info": "Null" + }, + { + "name": "max_latency_ms?: f64", + "ordinal": 15, + "type_info": "Null" + }, + { + "name": "last_latency_ms?: f64", + "ordinal": 16, + "type_info": "Float" + } + ], + "parameters": { + "Right": 2 + }, + "nullable": [ + true, + false, + false, + false, + false, + false, + false, + null, + null, + null, + null, + null, + null, + null, + null, + null, + true + ] + }, + "hash": "5ff27bdd9b6e7aadc8dd4936e0ee7e6a611aaef28697a0e9535dfb30d1c4861d" +} diff --git a/monero-rpc-pool/.sqlx/query-a032eb9773d4553aeaff4fb15ed99dbaef7d16d48750ee7bd4ab83233a9a732b.json b/monero-rpc-pool/.sqlx/query-a032eb9773d4553aeaff4fb15ed99dbaef7d16d48750ee7bd4ab83233a9a732b.json new file mode 100644 index 00000000..be9ea1c3 --- /dev/null +++ b/monero-rpc-pool/.sqlx/query-a032eb9773d4553aeaff4fb15ed99dbaef7d16d48750ee7bd4ab83233a9a732b.json @@ -0,0 +1,116 @@ +{ + "db_name": "SQLite", + "query": "\n SELECT \n n.id as \"id!: i64\",\n n.scheme,\n n.host,\n n.port,\n n.full_url,\n n.network,\n n.first_seen_at,\n CAST(COALESCE(stats.success_count, 0) AS INTEGER) as \"success_count!: i64\",\n CAST(COALESCE(stats.failure_count, 0) AS INTEGER) as \"failure_count!: i64\",\n stats.last_success as \"last_success?: String\",\n stats.last_failure as \"last_failure?: String\",\n stats.last_checked as \"last_checked?: String\",\n CAST(1 AS INTEGER) as \"is_reliable!: i64\",\n stats.avg_latency_ms as \"avg_latency_ms?: f64\",\n stats.min_latency_ms as \"min_latency_ms?: f64\",\n stats.max_latency_ms as \"max_latency_ms?: f64\",\n stats.last_latency_ms as \"last_latency_ms?: f64\"\n FROM monero_nodes n\n LEFT JOIN (\n SELECT \n node_id,\n SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,\n SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,\n MAX(CASE WHEN was_successful THEN timestamp END) as last_success,\n MAX(CASE WHEN NOT was_successful THEN timestamp END) as last_failure,\n MAX(timestamp) as last_checked,\n AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms,\n MIN(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as min_latency_ms,\n MAX(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as max_latency_ms,\n (SELECT latency_ms FROM health_checks hc2 WHERE hc2.node_id = health_checks.node_id ORDER BY timestamp DESC LIMIT 1) as last_latency_ms\n FROM health_checks \n GROUP BY node_id\n ) stats ON n.id = stats.node_id\n WHERE n.network = ? AND (COALESCE(stats.success_count, 0) + COALESCE(stats.failure_count, 0)) > 0\n ORDER BY \n (CAST(COALESCE(stats.success_count, 0) AS REAL) / CAST(COALESCE(stats.success_count, 0) + COALESCE(stats.failure_count, 0) AS REAL)) * \n (MIN(COALESCE(stats.success_count, 0) + COALESCE(stats.failure_count, 0), 200) / 200.0) * 0.8 +\n CASE \n WHEN stats.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(stats.avg_latency_ms, 2000) / 2000.0)) * 0.2\n ELSE 0.0 \n END DESC\n LIMIT 4\n ", + "describe": { + "columns": [ + { + "name": "id!: i64", + "ordinal": 0, + "type_info": "Integer" + }, + { + "name": "scheme", + "ordinal": 1, + "type_info": "Text" + }, + { + "name": "host", + "ordinal": 2, + "type_info": "Text" + }, + { + "name": "port", + "ordinal": 3, + "type_info": "Integer" + }, + { + "name": "full_url", + "ordinal": 4, + "type_info": "Text" + }, + { + "name": "network", + "ordinal": 5, + "type_info": "Text" + }, + { + "name": "first_seen_at", + "ordinal": 6, + "type_info": "Text" + }, + { + "name": "success_count!: i64", + "ordinal": 7, + "type_info": "Null" + }, + { + "name": "failure_count!: i64", + "ordinal": 8, + "type_info": "Null" + }, + { + "name": "last_success?: String", + "ordinal": 9, + "type_info": "Null" + }, + { + "name": "last_failure?: String", + "ordinal": 10, + "type_info": "Null" + }, + { + "name": "last_checked?: String", + "ordinal": 11, + "type_info": "Null" + }, + { + "name": "is_reliable!: i64", + "ordinal": 12, + "type_info": "Null" + }, + { + "name": "avg_latency_ms?: f64", + "ordinal": 13, + "type_info": "Null" + }, + { + "name": "min_latency_ms?: f64", + "ordinal": 14, + "type_info": "Null" + }, + { + "name": "max_latency_ms?: f64", + "ordinal": 15, + "type_info": "Null" + }, + { + "name": "last_latency_ms?: f64", + "ordinal": 16, + "type_info": "Float" + } + ], + "parameters": { + "Right": 1 + }, + "nullable": [ + true, + false, + false, + false, + false, + false, + false, + null, + null, + null, + null, + null, + null, + null, + null, + null, + true + ] + }, + "hash": "a032eb9773d4553aeaff4fb15ed99dbaef7d16d48750ee7bd4ab83233a9a732b" +} diff --git a/monero-rpc-pool/.sqlx/query-ba231efaf208a42fa857f716ef296b428c937f2eb7c8ce9c631f7f721e914c14.json b/monero-rpc-pool/.sqlx/query-ba231efaf208a42fa857f716ef296b428c937f2eb7c8ce9c631f7f721e914c14.json new file mode 100644 index 00000000..9f0cd24a --- /dev/null +++ b/monero-rpc-pool/.sqlx/query-ba231efaf208a42fa857f716ef296b428c937f2eb7c8ce9c631f7f721e914c14.json @@ -0,0 +1,116 @@ +{ + "db_name": "SQLite", + "query": "\n SELECT \n n.id as \"id!: i64\",\n n.scheme,\n n.host,\n n.port,\n n.full_url,\n n.network,\n n.first_seen_at,\n CAST(COALESCE(stats.success_count, 0) AS INTEGER) as \"success_count!: i64\",\n CAST(COALESCE(stats.failure_count, 0) AS INTEGER) as \"failure_count!: i64\",\n stats.last_success as \"last_success?: String\",\n stats.last_failure as \"last_failure?: String\",\n stats.last_checked as \"last_checked?: String\",\n CAST(CASE WHEN reliable_nodes.node_id IS NOT NULL THEN 1 ELSE 0 END AS INTEGER) as \"is_reliable!: i64\",\n stats.avg_latency_ms as \"avg_latency_ms?: f64\",\n stats.min_latency_ms as \"min_latency_ms?: f64\",\n stats.max_latency_ms as \"max_latency_ms?: f64\",\n stats.last_latency_ms as \"last_latency_ms?: f64\"\n FROM monero_nodes n\n LEFT JOIN (\n SELECT \n node_id,\n SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,\n SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,\n MAX(CASE WHEN was_successful THEN timestamp END) as last_success,\n MAX(CASE WHEN NOT was_successful THEN timestamp END) as last_failure,\n MAX(timestamp) as last_checked,\n AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms,\n MIN(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as min_latency_ms,\n MAX(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as max_latency_ms,\n (SELECT latency_ms FROM health_checks hc2 WHERE hc2.node_id = health_checks.node_id ORDER BY timestamp DESC LIMIT 1) as last_latency_ms\n FROM health_checks \n GROUP BY node_id\n ) stats ON n.id = stats.node_id\n LEFT JOIN (\n SELECT DISTINCT node_id FROM (\n SELECT \n n2.id as node_id,\n COALESCE(s2.success_count, 0) as success_count,\n COALESCE(s2.failure_count, 0) as failure_count,\n s2.avg_latency_ms,\n (CAST(COALESCE(s2.success_count, 0) AS REAL) / CAST(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0) AS REAL)) * \n (MIN(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0), 200) / 200.0) * 0.8 +\n CASE \n WHEN s2.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(s2.avg_latency_ms, 2000) / 2000.0)) * 0.2\n ELSE 0.0 \n END as reliability_score\n FROM monero_nodes n2\n LEFT JOIN (\n SELECT \n node_id,\n SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,\n SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,\n AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms\n FROM health_checks \n GROUP BY node_id\n ) s2 ON n2.id = s2.node_id\n WHERE n2.network = ? AND (COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0)) > 0\n ORDER BY reliability_score DESC\n LIMIT 4\n )\n ) reliable_nodes ON n.id = reliable_nodes.node_id\n WHERE n.network = ?\n ORDER BY RANDOM()\n LIMIT ?\n ", + "describe": { + "columns": [ + { + "name": "id!: i64", + "ordinal": 0, + "type_info": "Integer" + }, + { + "name": "scheme", + "ordinal": 1, + "type_info": "Text" + }, + { + "name": "host", + "ordinal": 2, + "type_info": "Text" + }, + { + "name": "port", + "ordinal": 3, + "type_info": "Integer" + }, + { + "name": "full_url", + "ordinal": 4, + "type_info": "Text" + }, + { + "name": "network", + "ordinal": 5, + "type_info": "Text" + }, + { + "name": "first_seen_at", + "ordinal": 6, + "type_info": "Text" + }, + { + "name": "success_count!: i64", + "ordinal": 7, + "type_info": "Null" + }, + { + "name": "failure_count!: i64", + "ordinal": 8, + "type_info": "Null" + }, + { + "name": "last_success?: String", + "ordinal": 9, + "type_info": "Null" + }, + { + "name": "last_failure?: String", + "ordinal": 10, + "type_info": "Null" + }, + { + "name": "last_checked?: String", + "ordinal": 11, + "type_info": "Null" + }, + { + "name": "is_reliable!: i64", + "ordinal": 12, + "type_info": "Null" + }, + { + "name": "avg_latency_ms?: f64", + "ordinal": 13, + "type_info": "Null" + }, + { + "name": "min_latency_ms?: f64", + "ordinal": 14, + "type_info": "Null" + }, + { + "name": "max_latency_ms?: f64", + "ordinal": 15, + "type_info": "Null" + }, + { + "name": "last_latency_ms?: f64", + "ordinal": 16, + "type_info": "Float" + } + ], + "parameters": { + "Right": 3 + }, + "nullable": [ + true, + false, + false, + false, + false, + false, + false, + null, + null, + null, + null, + null, + null, + null, + null, + null, + true + ] + }, + "hash": "ba231efaf208a42fa857f716ef296b428c937f2eb7c8ce9c631f7f721e914c14" +} diff --git a/monero-rpc-pool/.sqlx/query-d32d91ca2debc4212841282533482b2ff081234c7f9f848a7223ae04234995d9.json b/monero-rpc-pool/.sqlx/query-d32d91ca2debc4212841282533482b2ff081234c7f9f848a7223ae04234995d9.json new file mode 100644 index 00000000..b7cd990e --- /dev/null +++ b/monero-rpc-pool/.sqlx/query-d32d91ca2debc4212841282533482b2ff081234c7f9f848a7223ae04234995d9.json @@ -0,0 +1,23 @@ +{ + "db_name": "SQLite", + "query": "\n SELECT \n CAST(SUM(CASE WHEN hc.was_successful THEN 1 ELSE 0 END) AS INTEGER) as \"successful!: i64\",\n CAST(SUM(CASE WHEN NOT hc.was_successful THEN 1 ELSE 0 END) AS INTEGER) as \"unsuccessful!: i64\"\n FROM (\n SELECT hc.was_successful\n FROM health_checks hc\n JOIN monero_nodes n ON hc.node_id = n.id\n WHERE n.network = ?\n ORDER BY hc.timestamp DESC\n LIMIT 100\n ) hc\n ", + "describe": { + "columns": [ + { + "name": "successful!: i64", + "ordinal": 0, + "type_info": "Integer" + }, + { + "name": "unsuccessful!: i64", + "ordinal": 1, + "type_info": "Integer" + } + ], + "parameters": { + "Right": 1 + }, + "nullable": [true, true] + }, + "hash": "d32d91ca2debc4212841282533482b2ff081234c7f9f848a7223ae04234995d9" +} diff --git a/monero-rpc-pool/.sqlx/query-e0865335c2dcb040a34e3f1305fe1a823d6fcde4a061def602cba30971817781.json b/monero-rpc-pool/.sqlx/query-e0865335c2dcb040a34e3f1305fe1a823d6fcde4a061def602cba30971817781.json new file mode 100644 index 00000000..af454eda --- /dev/null +++ b/monero-rpc-pool/.sqlx/query-e0865335c2dcb040a34e3f1305fe1a823d6fcde4a061def602cba30971817781.json @@ -0,0 +1,18 @@ +{ + "db_name": "SQLite", + "query": "SELECT id FROM monero_nodes WHERE full_url = ?", + "describe": { + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Integer" + } + ], + "parameters": { + "Right": 1 + }, + "nullable": [true] + }, + "hash": "e0865335c2dcb040a34e3f1305fe1a823d6fcde4a061def602cba30971817781" +} diff --git a/monero-rpc-pool/.sqlx/query-fac12e3ca6ac1db1a4812a5390a333ec95a2e5e2cd554c169ceecc61b7ff2864.json b/monero-rpc-pool/.sqlx/query-fac12e3ca6ac1db1a4812a5390a333ec95a2e5e2cd554c169ceecc61b7ff2864.json new file mode 100644 index 00000000..199c59c9 --- /dev/null +++ b/monero-rpc-pool/.sqlx/query-fac12e3ca6ac1db1a4812a5390a333ec95a2e5e2cd554c169ceecc61b7ff2864.json @@ -0,0 +1,116 @@ +{ + "db_name": "SQLite", + "query": "\n SELECT \n n.id as \"id!: i64\",\n n.scheme,\n n.host,\n n.port,\n n.full_url,\n n.network,\n n.first_seen_at,\n CAST(COALESCE(stats.success_count, 0) AS INTEGER) as \"success_count!: i64\",\n CAST(COALESCE(stats.failure_count, 0) AS INTEGER) as \"failure_count!: i64\",\n stats.last_success as \"last_success?: String\",\n stats.last_failure as \"last_failure?: String\",\n stats.last_checked as \"last_checked?: String\",\n CAST(CASE WHEN reliable_nodes.node_id IS NOT NULL THEN 1 ELSE 0 END AS INTEGER) as \"is_reliable!: i64\",\n stats.avg_latency_ms as \"avg_latency_ms?: f64\",\n stats.min_latency_ms as \"min_latency_ms?: f64\",\n stats.max_latency_ms as \"max_latency_ms?: f64\",\n stats.last_latency_ms as \"last_latency_ms?: f64\"\n FROM monero_nodes n\n LEFT JOIN (\n SELECT \n node_id,\n SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,\n SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,\n MAX(CASE WHEN was_successful THEN timestamp END) as last_success,\n MAX(CASE WHEN NOT was_successful THEN timestamp END) as last_failure,\n MAX(timestamp) as last_checked,\n AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms,\n MIN(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as min_latency_ms,\n MAX(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as max_latency_ms,\n (SELECT latency_ms FROM health_checks hc2 WHERE hc2.node_id = health_checks.node_id ORDER BY timestamp DESC LIMIT 1) as last_latency_ms\n FROM health_checks \n GROUP BY node_id\n ) stats ON n.id = stats.node_id\n LEFT JOIN (\n SELECT DISTINCT node_id FROM (\n SELECT \n n2.id as node_id,\n COALESCE(s2.success_count, 0) as success_count,\n COALESCE(s2.failure_count, 0) as failure_count,\n s2.avg_latency_ms,\n (CAST(COALESCE(s2.success_count, 0) AS REAL) / CAST(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0) AS REAL)) * \n (MIN(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0), 200) / 200.0) * 0.8 +\n CASE \n WHEN s2.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(s2.avg_latency_ms, 2000) / 2000.0)) * 0.2\n ELSE 0.0 \n END as reliability_score\n FROM monero_nodes n2\n LEFT JOIN (\n SELECT \n node_id,\n SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,\n SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,\n AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms\n FROM health_checks \n GROUP BY node_id\n ) s2 ON n2.id = s2.node_id\n WHERE n2.network = ? AND (COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0)) > 0\n ORDER BY reliability_score DESC\n LIMIT 4\n )\n ) reliable_nodes ON n.id = reliable_nodes.node_id\n WHERE n.network = ? AND stats.success_count > 0\n ORDER BY stats.avg_latency_ms ASC, stats.success_count DESC\n ", + "describe": { + "columns": [ + { + "name": "id!: i64", + "ordinal": 0, + "type_info": "Integer" + }, + { + "name": "scheme", + "ordinal": 1, + "type_info": "Text" + }, + { + "name": "host", + "ordinal": 2, + "type_info": "Text" + }, + { + "name": "port", + "ordinal": 3, + "type_info": "Integer" + }, + { + "name": "full_url", + "ordinal": 4, + "type_info": "Text" + }, + { + "name": "network", + "ordinal": 5, + "type_info": "Text" + }, + { + "name": "first_seen_at", + "ordinal": 6, + "type_info": "Text" + }, + { + "name": "success_count!: i64", + "ordinal": 7, + "type_info": "Null" + }, + { + "name": "failure_count!: i64", + "ordinal": 8, + "type_info": "Null" + }, + { + "name": "last_success?: String", + "ordinal": 9, + "type_info": "Null" + }, + { + "name": "last_failure?: String", + "ordinal": 10, + "type_info": "Null" + }, + { + "name": "last_checked?: String", + "ordinal": 11, + "type_info": "Null" + }, + { + "name": "is_reliable!: i64", + "ordinal": 12, + "type_info": "Null" + }, + { + "name": "avg_latency_ms?: f64", + "ordinal": 13, + "type_info": "Null" + }, + { + "name": "min_latency_ms?: f64", + "ordinal": 14, + "type_info": "Null" + }, + { + "name": "max_latency_ms?: f64", + "ordinal": 15, + "type_info": "Null" + }, + { + "name": "last_latency_ms?: f64", + "ordinal": 16, + "type_info": "Float" + } + ], + "parameters": { + "Right": 2 + }, + "nullable": [ + true, + false, + false, + false, + false, + false, + false, + null, + null, + null, + null, + null, + null, + null, + null, + null, + true + ] + }, + "hash": "fac12e3ca6ac1db1a4812a5390a333ec95a2e5e2cd554c169ceecc61b7ff2864" +} diff --git a/monero-rpc-pool/.sqlx/query-ffa1b76d20c86d6bea02bd03e5e7de159adbb7c7c0ef585ce4df9ec648bea7f8.json b/monero-rpc-pool/.sqlx/query-ffa1b76d20c86d6bea02bd03e5e7de159adbb7c7c0ef585ce4df9ec648bea7f8.json new file mode 100644 index 00000000..0ab81591 --- /dev/null +++ b/monero-rpc-pool/.sqlx/query-ffa1b76d20c86d6bea02bd03e5e7de159adbb7c7c0ef585ce4df9ec648bea7f8.json @@ -0,0 +1,28 @@ +{ + "db_name": "SQLite", + "query": "\n SELECT \n COUNT(*) as total,\n CAST(SUM(CASE WHEN stats.success_count > 0 THEN 1 ELSE 0 END) AS INTEGER) as \"reachable!: i64\",\n CAST((SELECT COUNT(*) FROM (\n SELECT n2.id\n FROM monero_nodes n2\n LEFT JOIN (\n SELECT \n node_id,\n SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,\n SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,\n AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms\n FROM health_checks \n GROUP BY node_id\n ) s2 ON n2.id = s2.node_id\n WHERE n2.network = ? AND (COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0)) > 0\n ORDER BY \n (CAST(COALESCE(s2.success_count, 0) AS REAL) / CAST(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0) AS REAL)) * \n (MIN(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0), 200) / 200.0) * 0.8 +\n CASE \n WHEN s2.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(s2.avg_latency_ms, 2000) / 2000.0)) * 0.2\n ELSE 0.0 \n END DESC\n LIMIT 4\n )) AS INTEGER) as \"reliable!: i64\"\n FROM monero_nodes n\n LEFT JOIN (\n SELECT \n node_id,\n SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,\n SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count\n FROM health_checks \n GROUP BY node_id\n ) stats ON n.id = stats.node_id\n WHERE n.network = ?\n ", + "describe": { + "columns": [ + { + "name": "total", + "ordinal": 0, + "type_info": "Integer" + }, + { + "name": "reachable!: i64", + "ordinal": 1, + "type_info": "Integer" + }, + { + "name": "reliable!: i64", + "ordinal": 2, + "type_info": "Integer" + } + ], + "parameters": { + "Right": 2 + }, + "nullable": [false, true, false] + }, + "hash": "ffa1b76d20c86d6bea02bd03e5e7de159adbb7c7c0ef585ce4df9ec648bea7f8" +} diff --git a/monero-rpc-pool/Cargo.toml b/monero-rpc-pool/Cargo.toml new file mode 100644 index 00000000..aefd139b --- /dev/null +++ b/monero-rpc-pool/Cargo.toml @@ -0,0 +1,36 @@ +[package] +name = "monero-rpc-pool" +version = "0.1.0" +authors = ["UnstoppableSwap Team "] +edition = "2021" + +[[bin]] +name = "monero-rpc-pool" +path = "src/main.rs" + +[dependencies] +anyhow = "1" +axum = { version = "0.7", features = ["macros"] } +chrono = { version = "0.4", features = ["serde"] } +clap = { version = "4.0", features = ["derive"] } +dirs = "5.0" +futures = "0.3" +monero = { version = "0.12", features = ["serde_support"] } +monero-rpc = { path = "../monero-rpc" } +rand = "0.8" +regex = "1.0" +reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls"] } +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +sqlx = { version = "0.8", features = ["runtime-tokio-rustls", "sqlite", "chrono", "migrate"] } +tokio = { version = "1", features = ["full"] } +tower = "0.4" +tower-http = { version = "0.5", features = ["cors"] } +tracing = "0.1" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } +typeshare = "1.0.3" +url = "2.0" +uuid = { version = "1.0", features = ["v4"] } + +[dev-dependencies] +tokio-test = "0.4" diff --git a/monero-rpc-pool/migrations/20250618212026_initial_schema.sql b/monero-rpc-pool/migrations/20250618212026_initial_schema.sql new file mode 100644 index 00000000..6ec14d9b --- /dev/null +++ b/monero-rpc-pool/migrations/20250618212026_initial_schema.sql @@ -0,0 +1,30 @@ +-- Add migration script here + +-- Create monero_nodes table - stores node identity and current state +CREATE TABLE IF NOT EXISTS monero_nodes ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + scheme TEXT NOT NULL, + host TEXT NOT NULL, + port INTEGER NOT NULL, + full_url TEXT NOT NULL UNIQUE, + network TEXT NOT NULL, -- mainnet/stagenet/testnet - always known at insertion time + first_seen_at TEXT NOT NULL, + created_at TEXT NOT NULL DEFAULT (datetime('now')), + updated_at TEXT NOT NULL DEFAULT (datetime('now')) +); + +-- Create health_checks table - stores raw event data +CREATE TABLE IF NOT EXISTS health_checks ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + node_id INTEGER NOT NULL, + timestamp TEXT NOT NULL, + was_successful BOOLEAN NOT NULL, + latency_ms REAL, + FOREIGN KEY (node_id) REFERENCES monero_nodes(id) ON DELETE CASCADE +); + +-- Create indexes for performance +CREATE INDEX IF NOT EXISTS idx_nodes_full_url ON monero_nodes(full_url); +CREATE INDEX IF NOT EXISTS idx_nodes_network ON monero_nodes(network); +CREATE INDEX IF NOT EXISTS idx_health_checks_node_id ON health_checks(node_id); +CREATE INDEX IF NOT EXISTS idx_health_checks_timestamp ON health_checks(timestamp); diff --git a/monero-rpc-pool/migrations/20250618212059_insert_default_nodes.sql b/monero-rpc-pool/migrations/20250618212059_insert_default_nodes.sql new file mode 100644 index 00000000..8f4d065f --- /dev/null +++ b/monero-rpc-pool/migrations/20250618212059_insert_default_nodes.sql @@ -0,0 +1,31 @@ +-- Insert default mainnet bootstrap nodes +INSERT OR IGNORE INTO monero_nodes (scheme, host, port, full_url, network, first_seen_at) VALUES + ('http', 'node.supportxmr.com', 18081, 'http://node.supportxmr.com:18081', 'mainnet', datetime('now')), + ('http', 'nodes.hashvault.pro', 18081, 'http://nodes.hashvault.pro:18081', 'mainnet', datetime('now')), + ('http', 'xmr-node.cakewallet.com', 18081, 'http://xmr-node.cakewallet.com:18081', 'mainnet', datetime('now')), + ('http', 'node.xmr.to', 18081, 'http://node.xmr.to:18081', 'mainnet', datetime('now')), + ('https', 'opennode.xmr-tw.org', 18089, 'https://opennode.xmr-tw.org:18089', 'mainnet', datetime('now')), + ('https', 'monero.stackwallet.com', 18081, 'https://monero.stackwallet.com:18081', 'mainnet', datetime('now')), + ('https', 'node.sethforprivacy.com', 18089, 'https://node.sethforprivacy.com:18089', 'mainnet', datetime('now')), + ('https', 'node.monero.net', 18081, 'https://node.monero.net:18081', 'mainnet', datetime('now')), + ('https', 'moneronode.org', 18081, 'https://moneronode.org:18081', 'mainnet', datetime('now')), + ('http', 'node.majesticbank.at', 18089, 'http://node.majesticbank.at:18089', 'mainnet', datetime('now')), + ('http', 'node.majesticbank.is', 18089, 'http://node.majesticbank.is:18089', 'mainnet', datetime('now')), + ('https', 'xmr.cryptostorm.is', 18081, 'https://xmr.cryptostorm.is:18081', 'mainnet', datetime('now')), + ('https', 'xmr.privex.io', 18081, 'https://xmr.privex.io:18081', 'mainnet', datetime('now')), + ('https', 'nodes.hashvault.pro', 18081, 'https://nodes.hashvault.pro:18081', 'mainnet', datetime('now')), + ('http', 'hashvaultsvg2rinvxz7kos77hdfm6zrd5yco3tx2yh2linsmusfwyad.onion', 18081, 'http://hashvaultsvg2rinvxz7kos77hdfm6zrd5yco3tx2yh2linsmusfwyad.onion:18081', 'mainnet', datetime('now')), + ('https', 'plowsof3t5hogddwabaeiyrno25efmzfxyro2vligremt7sxpsclfaid.onion', 18089, 'https://plowsof3t5hogddwabaeiyrno25efmzfxyro2vligremt7sxpsclfaid.onion:18089', 'mainnet', datetime('now')), + ('http', 'moneroexnovtlp4datcwbgjznnulgm7q34wcl6r4gcvccruhkceb2xyd.onion', 18089, 'http://moneroexnovtlp4datcwbgjznnulgm7q34wcl6r4gcvccruhkceb2xyd.onion:18089', 'mainnet', datetime('now')), + ('https', 'yqz7oikk5fyxhyy32lyy3bkwcfw4rh2o5i77wuwslqll24g3bgd44iid.onion', 18081, 'https://yqz7oikk5fyxhyy32lyy3bkwcfw4rh2o5i77wuwslqll24g3bgd44iid.onion:18081', 'mainnet', datetime('now')); + +-- Insert default stagenet bootstrap nodes +INSERT OR IGNORE INTO monero_nodes (scheme, host, port, full_url, network, first_seen_at) VALUES + ('http', 'stagenet.xmr-tw.org', 38081, 'http://stagenet.xmr-tw.org:38081', 'stagenet', datetime('now')), + ('https', 'node.monerodevs.org', 38089, 'https://node.monerodevs.org:38089', 'stagenet', datetime('now')), + ('https', 'node2.monerodevs.org', 38089, 'https://node2.monerodevs.org:38089', 'stagenet', datetime('now')), + ('https', 'node3.monerodevs.org', 38089, 'https://node3.monerodevs.org:38089', 'stagenet', datetime('now')), + ('https', 'xmr-lux.boldsuck.org', 38081, 'https://xmr-lux.boldsuck.org:38081', 'stagenet', datetime('now')), + ('http', 'plowsofe6cleftfmk2raiw5h2x66atrik3nja4bfd3zrfa2hdlgworad.onion', 38089, 'http://plowsofe6cleftfmk2raiw5h2x66atrik3nja4bfd3zrfa2hdlgworad.onion:38089', 'stagenet', datetime('now')), + ('http', 'plowsoffjexmxalw73tkjmf422gq6575fc7vicuu4javzn2ynnte6tyd.onion', 38089, 'http://plowsoffjexmxalw73tkjmf422gq6575fc7vicuu4javzn2ynnte6tyd.onion:38089', 'stagenet', datetime('now')), + ('https', 'stagenet.xmr.ditatompel.com', 38081, 'https://stagenet.xmr.ditatompel.com:38081', 'stagenet', datetime('now')); \ No newline at end of file diff --git a/monero-rpc-pool/regenerate_sqlx_cache.sh b/monero-rpc-pool/regenerate_sqlx_cache.sh new file mode 100755 index 00000000..be4dd69f --- /dev/null +++ b/monero-rpc-pool/regenerate_sqlx_cache.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +# regenerate_sqlx_cache.sh +# +# Script to regenerate SQLx query cache for monero-rpc-pool +# +# This script: +# 1. Creates a temporary SQLite database in a temp directory +# 2. Runs all database migrations to set up the schema +# 3. Regenerates the SQLx query cache (.sqlx directory) +# 4. Cleans up temporary files automatically +# +# Usage: +# ./regenerate_sqlx_cache.sh +# +# Requirements: +# - cargo and sqlx-cli must be installed +# - Must be run from the monero-rpc-pool directory +# - migrations/ directory must exist with valid migration files +# +# The generated .sqlx directory should be committed to version control +# to enable offline compilation without requiring DATABASE_URL. + +set -e # Exit on any error + +echo "🔄 Regenerating SQLx query cache..." + +# Create a temporary directory for the database +TEMP_DIR=$(mktemp -d) +TEMP_DB="$TEMP_DIR/temp_sqlx_cache.sqlite" +DATABASE_URL="sqlite:$TEMP_DB" + +echo "📁 Using temporary database: $TEMP_DB" + +# Function to cleanup on exit +cleanup() { + echo "🧹 Cleaning up temporary files..." + rm -rf "$TEMP_DIR" +} +trap cleanup EXIT + +# Export DATABASE_URL for sqlx commands +export DATABASE_URL + +echo "🗄️ Creating database..." +cargo sqlx database create + +echo "🔄 Running migrations..." +cargo sqlx migrate run + +echo "⚡ Preparing SQLx query cache..." +cargo sqlx prepare + +echo "✅ SQLx query cache regenerated successfully!" +echo "📝 The .sqlx directory has been updated with the latest query metadata." +echo "💡 Make sure to commit the .sqlx directory to version control." \ No newline at end of file diff --git a/monero-rpc-pool/src/config.rs b/monero-rpc-pool/src/config.rs new file mode 100644 index 00000000..fda354ff --- /dev/null +++ b/monero-rpc-pool/src/config.rs @@ -0,0 +1,27 @@ +use serde::{Deserialize, Serialize}; +use std::path::PathBuf; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Config { + pub host: String, + pub port: u16, + pub data_dir: PathBuf, +} + +impl Config { + pub fn new_with_port(host: String, port: u16, data_dir: PathBuf) -> Self { + Self { + host, + port, + data_dir, + } + } + + pub fn new_random_port(host: String, data_dir: PathBuf) -> Self { + Self { + host, + port: 0, + data_dir, + } + } +} diff --git a/monero-rpc-pool/src/database.rs b/monero-rpc-pool/src/database.rs new file mode 100644 index 00000000..630d689f --- /dev/null +++ b/monero-rpc-pool/src/database.rs @@ -0,0 +1,952 @@ +use std::path::PathBuf; + +use anyhow::Result; +use dirs::data_dir; +use serde::{Deserialize, Serialize}; +use sqlx::SqlitePool; +use tracing::{debug, info, warn}; + +#[derive(Debug, Clone, Serialize, Deserialize, sqlx::FromRow)] +pub struct MoneroNode { + pub id: Option, + pub scheme: String, // http or https + pub host: String, + pub port: i64, + pub full_url: String, + pub network: String, // mainnet, stagenet, or testnet - always known at insertion time + pub first_seen_at: String, // ISO 8601 timestamp when first discovered + // Computed fields from health_checks (not stored in monero_nodes table) + #[sqlx(default)] + pub success_count: i64, + #[sqlx(default)] + pub failure_count: i64, + #[sqlx(default)] + pub last_success: Option, + #[sqlx(default)] + pub last_failure: Option, + #[sqlx(default)] + pub last_checked: Option, + #[sqlx(default)] + pub is_reliable: bool, + #[sqlx(default)] + pub avg_latency_ms: Option, + #[sqlx(default)] + pub min_latency_ms: Option, + #[sqlx(default)] + pub max_latency_ms: Option, + #[sqlx(default)] + pub last_latency_ms: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize, sqlx::FromRow)] +pub struct HealthCheck { + pub id: Option, + pub node_id: i64, + pub timestamp: String, // ISO 8601 timestamp + pub was_successful: bool, + pub latency_ms: Option, +} + +impl MoneroNode { + pub fn new(scheme: String, host: String, port: i64, network: String) -> Self { + let full_url = format!("{}://{}:{}", scheme, host, port); + let now = chrono::Utc::now().to_rfc3339(); + Self { + id: None, + scheme, + host, + port, + full_url, + network, + first_seen_at: now, + // These are computed from health_checks + success_count: 0, + failure_count: 0, + last_success: None, + last_failure: None, + last_checked: None, + is_reliable: false, + avg_latency_ms: None, + min_latency_ms: None, + max_latency_ms: None, + last_latency_ms: None, + } + } + + pub fn success_rate(&self) -> f64 { + let total = self.success_count + self.failure_count; + if total == 0 { + 0.0 + } else { + self.success_count as f64 / total as f64 + } + } + + pub fn reliability_score(&self) -> f64 { + let success_rate = self.success_rate(); + let total_requests = self.success_count + self.failure_count; + + // Weight success rate by total requests (more requests = more reliable data) + let request_weight = (total_requests as f64).min(200.0) / 200.0; + let mut score = success_rate * request_weight; + + // Factor in latency - lower latency = higher score + if let Some(avg_latency) = self.avg_latency_ms { + // Normalize latency to 0-1 range (assuming 0-2000ms range) + let latency_factor = 1.0 - (avg_latency.min(2000.0) / 2000.0); + score = score * 0.8 + latency_factor * 0.2; // 80% success rate, 20% latency + } + + score + } +} + +#[derive(Clone)] +pub struct Database { + pub pool: SqlitePool, +} + +impl Database { + pub async fn new() -> Result { + let app_data_dir = get_app_data_dir()?; + Self::new_with_data_dir(app_data_dir).await + } + + pub async fn new_with_data_dir(data_dir: PathBuf) -> Result { + if !data_dir.exists() { + std::fs::create_dir_all(&data_dir)?; + info!("Created application data directory: {}", data_dir.display()); + } + + let db_path = data_dir.join("nodes.db"); + info!("Using database at: {}", db_path.display()); + + let database_url = format!("sqlite:{}?mode=rwc", db_path.display()); + let pool = SqlitePool::connect(&database_url).await?; + + let db = Self { pool }; + db.migrate().await?; + + Ok(db) + } + + async fn migrate(&self) -> Result<()> { + // Run sqlx migrations + sqlx::migrate!("./migrations").run(&self.pool).await?; + + info!("Database migration completed"); + Ok(()) + } + + /// Insert a node if it doesn't exist, return the node_id + pub async fn upsert_node( + &self, + scheme: &str, + host: &str, + port: i64, + network: &str, + ) -> Result { + let full_url = format!("{}://{}:{}", scheme, host, port); + let now = chrono::Utc::now().to_rfc3339(); + + let result = sqlx::query!( + r#" + INSERT INTO monero_nodes (scheme, host, port, full_url, network, first_seen_at, updated_at) + VALUES (?, ?, ?, ?, ?, ?, ?) + ON CONFLICT(full_url) DO UPDATE SET + network = excluded.network, + updated_at = excluded.updated_at + RETURNING id + "#, + scheme, + host, + port, + full_url, + network, + now, + now + ) + .fetch_one(&self.pool) + .await?; + + Ok(result.id) + } + + /// Update a node's network after it has been identified + pub async fn update_node_network(&self, url: &str, network: &str) -> Result<()> { + let now = chrono::Utc::now().to_rfc3339(); + + let result = sqlx::query!( + r#" + UPDATE monero_nodes + SET network = ?, updated_at = ? + WHERE full_url = ? + "#, + network, + now, + url + ) + .execute(&self.pool) + .await?; + + if result.rows_affected() > 0 { + debug!("Updated network for node {} to {}", url, network); + } else { + warn!("Failed to update network for node {}: not found", url); + } + + Ok(()) + } + + /// Record a health check event + pub async fn record_health_check( + &self, + url: &str, + was_successful: bool, + latency_ms: Option, + ) -> Result<()> { + let now = chrono::Utc::now().to_rfc3339(); + + // First get the node_id + let node_row = sqlx::query!("SELECT id FROM monero_nodes WHERE full_url = ?", url) + .fetch_optional(&self.pool) + .await?; + + let node_id = match node_row { + Some(row) => row.id, + None => { + warn!("Cannot record health check for unknown node: {}", url); + return Ok(()); + } + }; + + sqlx::query!( + r#" + INSERT INTO health_checks (node_id, timestamp, was_successful, latency_ms) + VALUES (?, ?, ?, ?) + "#, + node_id, + now, + was_successful, + latency_ms + ) + .execute(&self.pool) + .await?; + + Ok(()) + } + + /// Get nodes that have been identified (have network set) + pub async fn get_identified_nodes(&self, network: &str) -> Result> { + let rows = sqlx::query!( + r#" + SELECT + n.id as "id!: i64", + n.scheme, + n.host, + n.port, + n.full_url, + n.network, + n.first_seen_at, + CAST(COALESCE(stats.success_count, 0) AS INTEGER) as "success_count!: i64", + CAST(COALESCE(stats.failure_count, 0) AS INTEGER) as "failure_count!: i64", + stats.last_success as "last_success?: String", + stats.last_failure as "last_failure?: String", + stats.last_checked as "last_checked?: String", + CAST(CASE WHEN reliable_nodes.node_id IS NOT NULL THEN 1 ELSE 0 END AS INTEGER) as "is_reliable!: i64", + stats.avg_latency_ms as "avg_latency_ms?: f64", + stats.min_latency_ms as "min_latency_ms?: f64", + stats.max_latency_ms as "max_latency_ms?: f64", + stats.last_latency_ms as "last_latency_ms?: f64" + FROM monero_nodes n + LEFT JOIN ( + SELECT + node_id, + SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count, + SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count, + MAX(CASE WHEN was_successful THEN timestamp END) as last_success, + MAX(CASE WHEN NOT was_successful THEN timestamp END) as last_failure, + MAX(timestamp) as last_checked, + AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms, + MIN(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as min_latency_ms, + MAX(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as max_latency_ms, + (SELECT latency_ms FROM health_checks hc2 WHERE hc2.node_id = health_checks.node_id ORDER BY timestamp DESC LIMIT 1) as last_latency_ms + FROM health_checks + GROUP BY node_id + ) stats ON n.id = stats.node_id + LEFT JOIN ( + SELECT DISTINCT node_id FROM ( + SELECT + n2.id as node_id, + COALESCE(s2.success_count, 0) as success_count, + COALESCE(s2.failure_count, 0) as failure_count, + s2.avg_latency_ms, + (CAST(COALESCE(s2.success_count, 0) AS REAL) / CAST(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0) AS REAL)) * + (MIN(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0), 200) / 200.0) * 0.8 + + CASE + WHEN s2.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(s2.avg_latency_ms, 2000) / 2000.0)) * 0.2 + ELSE 0.0 + END as reliability_score + FROM monero_nodes n2 + LEFT JOIN ( + SELECT + node_id, + SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count, + SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count, + AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms + FROM health_checks + GROUP BY node_id + ) s2 ON n2.id = s2.node_id + WHERE n2.network = ? AND (COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0)) > 0 + ORDER BY reliability_score DESC + LIMIT 4 + ) + ) reliable_nodes ON n.id = reliable_nodes.node_id + WHERE n.network = ? + ORDER BY stats.avg_latency_ms ASC, stats.success_count DESC + "#, + network, + network + ) + .fetch_all(&self.pool) + .await?; + + let nodes: Vec = rows + .into_iter() + .map(|row| MoneroNode { + id: Some(row.id), + scheme: row.scheme, + host: row.host, + port: row.port, + full_url: row.full_url, + network: row.network, + first_seen_at: row.first_seen_at, + success_count: row.success_count, + failure_count: row.failure_count, + last_success: row.last_success, + last_failure: row.last_failure, + last_checked: row.last_checked, + is_reliable: row.is_reliable != 0, + avg_latency_ms: row.avg_latency_ms, + min_latency_ms: row.min_latency_ms, + max_latency_ms: row.max_latency_ms, + last_latency_ms: row.last_latency_ms, + }) + .collect(); + + debug!( + "Retrieved {} identified nodes for network {}", + nodes.len(), + network + ); + Ok(nodes) + } + + /// Get reliable nodes (top 4 by reliability score) + pub async fn get_reliable_nodes(&self, network: &str) -> Result> { + let rows = sqlx::query!( + r#" + SELECT + n.id as "id!: i64", + n.scheme, + n.host, + n.port, + n.full_url, + n.network, + n.first_seen_at, + CAST(COALESCE(stats.success_count, 0) AS INTEGER) as "success_count!: i64", + CAST(COALESCE(stats.failure_count, 0) AS INTEGER) as "failure_count!: i64", + stats.last_success as "last_success?: String", + stats.last_failure as "last_failure?: String", + stats.last_checked as "last_checked?: String", + CAST(1 AS INTEGER) as "is_reliable!: i64", + stats.avg_latency_ms as "avg_latency_ms?: f64", + stats.min_latency_ms as "min_latency_ms?: f64", + stats.max_latency_ms as "max_latency_ms?: f64", + stats.last_latency_ms as "last_latency_ms?: f64" + FROM monero_nodes n + LEFT JOIN ( + SELECT + node_id, + SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count, + SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count, + MAX(CASE WHEN was_successful THEN timestamp END) as last_success, + MAX(CASE WHEN NOT was_successful THEN timestamp END) as last_failure, + MAX(timestamp) as last_checked, + AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms, + MIN(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as min_latency_ms, + MAX(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as max_latency_ms, + (SELECT latency_ms FROM health_checks hc2 WHERE hc2.node_id = health_checks.node_id ORDER BY timestamp DESC LIMIT 1) as last_latency_ms + FROM health_checks + GROUP BY node_id + ) stats ON n.id = stats.node_id + WHERE n.network = ? AND (COALESCE(stats.success_count, 0) + COALESCE(stats.failure_count, 0)) > 0 + ORDER BY + (CAST(COALESCE(stats.success_count, 0) AS REAL) / CAST(COALESCE(stats.success_count, 0) + COALESCE(stats.failure_count, 0) AS REAL)) * + (MIN(COALESCE(stats.success_count, 0) + COALESCE(stats.failure_count, 0), 200) / 200.0) * 0.8 + + CASE + WHEN stats.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(stats.avg_latency_ms, 2000) / 2000.0)) * 0.2 + ELSE 0.0 + END DESC + LIMIT 4 + "#, + network + ) + .fetch_all(&self.pool) + .await?; + + let nodes = rows + .into_iter() + .map(|row| MoneroNode { + id: Some(row.id), + scheme: row.scheme, + host: row.host, + port: row.port, + full_url: row.full_url, + network: row.network, + first_seen_at: row.first_seen_at, + success_count: row.success_count, + failure_count: row.failure_count, + last_success: row.last_success, + last_failure: row.last_failure, + last_checked: row.last_checked, + is_reliable: true, + avg_latency_ms: row.avg_latency_ms, + min_latency_ms: row.min_latency_ms, + max_latency_ms: row.max_latency_ms, + last_latency_ms: row.last_latency_ms, + }) + .collect(); + + Ok(nodes) + } + + /// Get node statistics for a network + pub async fn get_node_stats(&self, network: &str) -> Result<(i64, i64, i64)> { + let row = sqlx::query!( + r#" + SELECT + COUNT(*) as total, + CAST(SUM(CASE WHEN stats.success_count > 0 THEN 1 ELSE 0 END) AS INTEGER) as "reachable!: i64", + CAST((SELECT COUNT(*) FROM ( + SELECT n2.id + FROM monero_nodes n2 + LEFT JOIN ( + SELECT + node_id, + SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count, + SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count, + AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms + FROM health_checks + GROUP BY node_id + ) s2 ON n2.id = s2.node_id + WHERE n2.network = ? AND (COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0)) > 0 + ORDER BY + (CAST(COALESCE(s2.success_count, 0) AS REAL) / CAST(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0) AS REAL)) * + (MIN(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0), 200) / 200.0) * 0.8 + + CASE + WHEN s2.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(s2.avg_latency_ms, 2000) / 2000.0)) * 0.2 + ELSE 0.0 + END DESC + LIMIT 4 + )) AS INTEGER) as "reliable!: i64" + FROM monero_nodes n + LEFT JOIN ( + SELECT + node_id, + SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count, + SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count + FROM health_checks + GROUP BY node_id + ) stats ON n.id = stats.node_id + WHERE n.network = ? + "#, + network, + network + ) + .fetch_one(&self.pool) + .await?; + + let total = row.total; + let reachable = row.reachable; + let reliable = row.reliable; + + Ok((total, reachable, reliable)) + } + + /// Get health check statistics for a network + pub async fn get_health_check_stats(&self, network: &str) -> Result<(u64, u64)> { + let row = sqlx::query!( + r#" + SELECT + CAST(SUM(CASE WHEN hc.was_successful THEN 1 ELSE 0 END) AS INTEGER) as "successful!: i64", + CAST(SUM(CASE WHEN NOT hc.was_successful THEN 1 ELSE 0 END) AS INTEGER) as "unsuccessful!: i64" + FROM ( + SELECT hc.was_successful + FROM health_checks hc + JOIN monero_nodes n ON hc.node_id = n.id + WHERE n.network = ? + ORDER BY hc.timestamp DESC + LIMIT 100 + ) hc + "#, + network + ) + .fetch_one(&self.pool) + .await?; + + let successful = row.successful as u64; + let unsuccessful = row.unsuccessful as u64; + + Ok((successful, unsuccessful)) + } + + /// Get top nodes based on recent success rate and latency + pub async fn get_top_nodes_by_recent_success( + &self, + network: &str, + _recent_checks_limit: i64, + limit: i64, + ) -> Result> { + let rows = sqlx::query!( + r#" + SELECT + n.id as "id!: i64", + n.scheme, + n.host, + n.port, + n.full_url, + n.network, + n.first_seen_at, + CAST(COALESCE(stats.success_count, 0) AS INTEGER) as "success_count!: i64", + CAST(COALESCE(stats.failure_count, 0) AS INTEGER) as "failure_count!: i64", + stats.last_success as "last_success?: String", + stats.last_failure as "last_failure?: String", + stats.last_checked as "last_checked?: String", + CAST(CASE WHEN reliable_nodes.node_id IS NOT NULL THEN 1 ELSE 0 END AS INTEGER) as "is_reliable!: i64", + stats.avg_latency_ms as "avg_latency_ms?: f64", + stats.min_latency_ms as "min_latency_ms?: f64", + stats.max_latency_ms as "max_latency_ms?: f64", + stats.last_latency_ms as "last_latency_ms?: f64" + FROM monero_nodes n + LEFT JOIN ( + SELECT + node_id, + SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count, + SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count, + MAX(CASE WHEN was_successful THEN timestamp END) as last_success, + MAX(CASE WHEN NOT was_successful THEN timestamp END) as last_failure, + MAX(timestamp) as last_checked, + AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms, + MIN(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as min_latency_ms, + MAX(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as max_latency_ms, + (SELECT latency_ms FROM health_checks hc2 WHERE hc2.node_id = health_checks.node_id ORDER BY timestamp DESC LIMIT 1) as last_latency_ms + FROM health_checks + GROUP BY node_id + ) stats ON n.id = stats.node_id + LEFT JOIN ( + SELECT DISTINCT node_id FROM ( + SELECT + n2.id as node_id, + COALESCE(s2.success_count, 0) as success_count, + COALESCE(s2.failure_count, 0) as failure_count, + s2.avg_latency_ms, + (CAST(COALESCE(s2.success_count, 0) AS REAL) / CAST(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0) AS REAL)) * + (MIN(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0), 200) / 200.0) * 0.8 + + CASE + WHEN s2.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(s2.avg_latency_ms, 2000) / 2000.0)) * 0.2 + ELSE 0.0 + END as reliability_score + FROM monero_nodes n2 + LEFT JOIN ( + SELECT + node_id, + SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count, + SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count, + AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms + FROM health_checks + GROUP BY node_id + ) s2 ON n2.id = s2.node_id + WHERE n2.network = ? AND (COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0)) > 0 + ORDER BY reliability_score DESC + LIMIT 4 + ) + ) reliable_nodes ON n.id = reliable_nodes.node_id + WHERE n.network = ? AND (COALESCE(stats.success_count, 0) + COALESCE(stats.failure_count, 0)) > 0 + ORDER BY + (CAST(COALESCE(stats.success_count, 0) AS REAL) / CAST(COALESCE(stats.success_count, 0) + COALESCE(stats.failure_count, 0) AS REAL)) DESC, + stats.avg_latency_ms ASC + LIMIT ? + "#, + network, + network, + limit + ) + .fetch_all(&self.pool) + .await?; + + let nodes = rows + .into_iter() + .map(|row| MoneroNode { + id: Some(row.id), + scheme: row.scheme, + host: row.host, + port: row.port, + full_url: row.full_url, + network: row.network, + first_seen_at: row.first_seen_at, + success_count: row.success_count, + failure_count: row.failure_count, + last_success: row.last_success, + last_failure: row.last_failure, + last_checked: row.last_checked, + is_reliable: row.is_reliable != 0, + avg_latency_ms: row.avg_latency_ms, + min_latency_ms: row.min_latency_ms, + max_latency_ms: row.max_latency_ms, + last_latency_ms: row.last_latency_ms, + }) + .collect(); + + Ok(nodes) + } + + /// Get identified nodes that have at least one successful health check + pub async fn get_identified_nodes_with_success( + &self, + network: &str, + ) -> Result> { + let rows = sqlx::query!( + r#" + SELECT + n.id as "id!: i64", + n.scheme, + n.host, + n.port, + n.full_url, + n.network, + n.first_seen_at, + CAST(COALESCE(stats.success_count, 0) AS INTEGER) as "success_count!: i64", + CAST(COALESCE(stats.failure_count, 0) AS INTEGER) as "failure_count!: i64", + stats.last_success as "last_success?: String", + stats.last_failure as "last_failure?: String", + stats.last_checked as "last_checked?: String", + CAST(CASE WHEN reliable_nodes.node_id IS NOT NULL THEN 1 ELSE 0 END AS INTEGER) as "is_reliable!: i64", + stats.avg_latency_ms as "avg_latency_ms?: f64", + stats.min_latency_ms as "min_latency_ms?: f64", + stats.max_latency_ms as "max_latency_ms?: f64", + stats.last_latency_ms as "last_latency_ms?: f64" + FROM monero_nodes n + LEFT JOIN ( + SELECT + node_id, + SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count, + SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count, + MAX(CASE WHEN was_successful THEN timestamp END) as last_success, + MAX(CASE WHEN NOT was_successful THEN timestamp END) as last_failure, + MAX(timestamp) as last_checked, + AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms, + MIN(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as min_latency_ms, + MAX(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as max_latency_ms, + (SELECT latency_ms FROM health_checks hc2 WHERE hc2.node_id = health_checks.node_id ORDER BY timestamp DESC LIMIT 1) as last_latency_ms + FROM health_checks + GROUP BY node_id + ) stats ON n.id = stats.node_id + LEFT JOIN ( + SELECT DISTINCT node_id FROM ( + SELECT + n2.id as node_id, + COALESCE(s2.success_count, 0) as success_count, + COALESCE(s2.failure_count, 0) as failure_count, + s2.avg_latency_ms, + (CAST(COALESCE(s2.success_count, 0) AS REAL) / CAST(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0) AS REAL)) * + (MIN(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0), 200) / 200.0) * 0.8 + + CASE + WHEN s2.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(s2.avg_latency_ms, 2000) / 2000.0)) * 0.2 + ELSE 0.0 + END as reliability_score + FROM monero_nodes n2 + LEFT JOIN ( + SELECT + node_id, + SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count, + SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count, + AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms + FROM health_checks + GROUP BY node_id + ) s2 ON n2.id = s2.node_id + WHERE n2.network = ? AND (COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0)) > 0 + ORDER BY reliability_score DESC + LIMIT 4 + ) + ) reliable_nodes ON n.id = reliable_nodes.node_id + WHERE n.network = ? AND stats.success_count > 0 + ORDER BY stats.avg_latency_ms ASC, stats.success_count DESC + "#, + network, + network + ) + .fetch_all(&self.pool) + .await?; + + let nodes: Vec = rows + .into_iter() + .map(|row| MoneroNode { + id: Some(row.id), + scheme: row.scheme, + host: row.host, + port: row.port, + full_url: row.full_url, + network: row.network, + first_seen_at: row.first_seen_at, + success_count: row.success_count, + failure_count: row.failure_count, + last_success: row.last_success, + last_failure: row.last_failure, + last_checked: row.last_checked, + is_reliable: row.is_reliable != 0, + avg_latency_ms: row.avg_latency_ms, + min_latency_ms: row.min_latency_ms, + max_latency_ms: row.max_latency_ms, + last_latency_ms: row.last_latency_ms, + }) + .collect(); + + debug!( + "Retrieved {} identified nodes with success for network {}", + nodes.len(), + network + ); + Ok(nodes) + } + + /// Get random nodes for the specified network, excluding specific IDs + pub async fn get_random_nodes( + &self, + network: &str, + limit: i64, + exclude_ids: &[i64], + ) -> Result> { + if exclude_ids.is_empty() { + let rows = sqlx::query!( + r#" + SELECT + n.id as "id!: i64", + n.scheme, + n.host, + n.port, + n.full_url, + n.network, + n.first_seen_at, + CAST(COALESCE(stats.success_count, 0) AS INTEGER) as "success_count!: i64", + CAST(COALESCE(stats.failure_count, 0) AS INTEGER) as "failure_count!: i64", + stats.last_success as "last_success?: String", + stats.last_failure as "last_failure?: String", + stats.last_checked as "last_checked?: String", + CAST(CASE WHEN reliable_nodes.node_id IS NOT NULL THEN 1 ELSE 0 END AS INTEGER) as "is_reliable!: i64", + stats.avg_latency_ms as "avg_latency_ms?: f64", + stats.min_latency_ms as "min_latency_ms?: f64", + stats.max_latency_ms as "max_latency_ms?: f64", + stats.last_latency_ms as "last_latency_ms?: f64" + FROM monero_nodes n + LEFT JOIN ( + SELECT + node_id, + SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count, + SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count, + MAX(CASE WHEN was_successful THEN timestamp END) as last_success, + MAX(CASE WHEN NOT was_successful THEN timestamp END) as last_failure, + MAX(timestamp) as last_checked, + AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms, + MIN(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as min_latency_ms, + MAX(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as max_latency_ms, + (SELECT latency_ms FROM health_checks hc2 WHERE hc2.node_id = health_checks.node_id ORDER BY timestamp DESC LIMIT 1) as last_latency_ms + FROM health_checks + GROUP BY node_id + ) stats ON n.id = stats.node_id + LEFT JOIN ( + SELECT DISTINCT node_id FROM ( + SELECT + n2.id as node_id, + COALESCE(s2.success_count, 0) as success_count, + COALESCE(s2.failure_count, 0) as failure_count, + s2.avg_latency_ms, + (CAST(COALESCE(s2.success_count, 0) AS REAL) / CAST(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0) AS REAL)) * + (MIN(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0), 200) / 200.0) * 0.8 + + CASE + WHEN s2.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(s2.avg_latency_ms, 2000) / 2000.0)) * 0.2 + ELSE 0.0 + END as reliability_score + FROM monero_nodes n2 + LEFT JOIN ( + SELECT + node_id, + SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count, + SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count, + AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms + FROM health_checks + GROUP BY node_id + ) s2 ON n2.id = s2.node_id + WHERE n2.network = ? AND (COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0)) > 0 + ORDER BY reliability_score DESC + LIMIT 4 + ) + ) reliable_nodes ON n.id = reliable_nodes.node_id + WHERE n.network = ? + ORDER BY RANDOM() + LIMIT ? + "#, + network, + network, + limit + ) + .fetch_all(&self.pool) + .await?; + + return Ok(rows + .into_iter() + .map(|row| MoneroNode { + id: Some(row.id), + scheme: row.scheme, + host: row.host, + port: row.port, + full_url: row.full_url, + network: row.network, + first_seen_at: row.first_seen_at, + success_count: row.success_count, + failure_count: row.failure_count, + last_success: row.last_success, + last_failure: row.last_failure, + last_checked: row.last_checked, + is_reliable: row.is_reliable != 0, + avg_latency_ms: row.avg_latency_ms, + min_latency_ms: row.min_latency_ms, + max_latency_ms: row.max_latency_ms, + last_latency_ms: row.last_latency_ms, + }) + .collect()); + } + + // If exclude_ids is not empty, we need to handle it differently + // For now, get all nodes and filter in Rust (can be optimized with dynamic SQL) + let fetch_limit = limit + exclude_ids.len() as i64 + 10; // Get extra to account for exclusions + let all_rows = sqlx::query!( + r#" + SELECT + n.id as "id!: i64", + n.scheme, + n.host, + n.port, + n.full_url, + n.network, + n.first_seen_at, + CAST(COALESCE(stats.success_count, 0) AS INTEGER) as "success_count!: i64", + CAST(COALESCE(stats.failure_count, 0) AS INTEGER) as "failure_count!: i64", + stats.last_success as "last_success?: String", + stats.last_failure as "last_failure?: String", + stats.last_checked as "last_checked?: String", + CAST(CASE WHEN reliable_nodes.node_id IS NOT NULL THEN 1 ELSE 0 END AS INTEGER) as "is_reliable!: i64", + stats.avg_latency_ms as "avg_latency_ms?: f64", + stats.min_latency_ms as "min_latency_ms?: f64", + stats.max_latency_ms as "max_latency_ms?: f64", + stats.last_latency_ms as "last_latency_ms?: f64" + FROM monero_nodes n + LEFT JOIN ( + SELECT + node_id, + SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count, + SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count, + MAX(CASE WHEN was_successful THEN timestamp END) as last_success, + MAX(CASE WHEN NOT was_successful THEN timestamp END) as last_failure, + MAX(timestamp) as last_checked, + AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms, + MIN(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as min_latency_ms, + MAX(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as max_latency_ms, + (SELECT latency_ms FROM health_checks hc2 WHERE hc2.node_id = health_checks.node_id ORDER BY timestamp DESC LIMIT 1) as last_latency_ms + FROM health_checks + GROUP BY node_id + ) stats ON n.id = stats.node_id + LEFT JOIN ( + SELECT DISTINCT node_id FROM ( + SELECT + n2.id as node_id, + COALESCE(s2.success_count, 0) as success_count, + COALESCE(s2.failure_count, 0) as failure_count, + s2.avg_latency_ms, + (CAST(COALESCE(s2.success_count, 0) AS REAL) / CAST(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0) AS REAL)) * + (MIN(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0), 200) / 200.0) * 0.8 + + CASE + WHEN s2.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(s2.avg_latency_ms, 2000) / 2000.0)) * 0.2 + ELSE 0.0 + END as reliability_score + FROM monero_nodes n2 + LEFT JOIN ( + SELECT + node_id, + SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count, + SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count, + AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms + FROM health_checks + GROUP BY node_id + ) s2 ON n2.id = s2.node_id + WHERE n2.network = ? AND (COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0)) > 0 + ORDER BY reliability_score DESC + LIMIT 4 + ) + ) reliable_nodes ON n.id = reliable_nodes.node_id + WHERE n.network = ? + ORDER BY RANDOM() + LIMIT ? + "#, + network, + network, + fetch_limit + ) + .fetch_all(&self.pool) + .await?; + + // Convert exclude_ids to a HashSet for O(1) lookup + let exclude_set: std::collections::HashSet = exclude_ids.iter().cloned().collect(); + + let nodes: Vec = all_rows + .into_iter() + .filter(|row| !exclude_set.contains(&row.id)) + .take(limit as usize) + .map(|row| MoneroNode { + id: Some(row.id), + scheme: row.scheme, + host: row.host, + port: row.port, + full_url: row.full_url, + network: row.network, + first_seen_at: row.first_seen_at, + success_count: row.success_count, + failure_count: row.failure_count, + last_success: row.last_success, + last_failure: row.last_failure, + last_checked: row.last_checked, + is_reliable: row.is_reliable != 0, + avg_latency_ms: row.avg_latency_ms, + min_latency_ms: row.min_latency_ms, + max_latency_ms: row.max_latency_ms, + last_latency_ms: row.last_latency_ms, + }) + .collect(); + + Ok(nodes) + } +} + +pub fn get_app_data_dir() -> Result { + let base_dir = + data_dir().ok_or_else(|| anyhow::anyhow!("Could not determine system data directory"))?; + + let app_dir = base_dir.join("monero-rpc-pool"); + + if !app_dir.exists() { + std::fs::create_dir_all(&app_dir)?; + info!("Created application data directory: {}", app_dir.display()); + } + + Ok(app_dir) +} diff --git a/monero-rpc-pool/src/discovery.rs b/monero-rpc-pool/src/discovery.rs new file mode 100644 index 00000000..4222070c --- /dev/null +++ b/monero-rpc-pool/src/discovery.rs @@ -0,0 +1,383 @@ +use std::collections::HashSet; +use std::time::{Duration, Instant}; + +use anyhow::Result; +use monero::Network; +use rand::seq::SliceRandom; +use reqwest::Client; +use serde::Deserialize; +use serde_json::Value; +use tracing::{error, info, warn}; +use url; + +use crate::database::Database; + +#[derive(Debug, Deserialize)] +struct MoneroFailResponse { + monero: MoneroNodes, +} + +#[derive(Debug, Deserialize)] +struct MoneroNodes { + clear: Vec, + #[serde(default)] + web_compatible: Vec, +} + +#[derive(Debug)] +pub struct HealthCheckOutcome { + pub was_successful: bool, + pub latency: Duration, + pub discovered_network: Option, +} + +#[derive(Clone)] +pub struct NodeDiscovery { + client: Client, + db: Database, +} + +fn network_to_string(network: &Network) -> String { + match network { + Network::Mainnet => "mainnet".to_string(), + Network::Stagenet => "stagenet".to_string(), + Network::Testnet => "testnet".to_string(), + } +} + +impl NodeDiscovery { + pub fn new(db: Database) -> Result { + let client = Client::builder() + .timeout(Duration::from_secs(10)) + .user_agent("monero-rpc-pool/1.0") + .build() + .map_err(|e| anyhow::anyhow!("Failed to build HTTP client: {}", e))?; + + Ok(Self { client, db }) + } + + /// Fetch nodes from monero.fail API + pub async fn fetch_mainnet_nodes_from_api(&self) -> Result> { + let url = "https://monero.fail/nodes.json?chain=monero"; + + let response = self + .client + .get(url) + .timeout(Duration::from_secs(30)) + .send() + .await?; + + if !response.status().is_success() { + return Err(anyhow::anyhow!("HTTP error: {}", response.status())); + } + + let monero_fail_response: MoneroFailResponse = response.json().await?; + + // Combine clear and web_compatible nodes + let mut nodes = monero_fail_response.monero.web_compatible; + nodes.extend(monero_fail_response.monero.clear); + + // Remove duplicates using HashSet for O(n) complexity + let mut seen = HashSet::new(); + let mut unique_nodes = Vec::new(); + for node in nodes { + if seen.insert(node.clone()) { + unique_nodes.push(node); + } + } + + // Shuffle nodes in random order + let mut rng = rand::thread_rng(); + unique_nodes.shuffle(&mut rng); + + info!( + "Fetched {} mainnet nodes from monero.fail API", + unique_nodes.len() + ); + Ok(unique_nodes) + } + + /// Fetch nodes from monero.fail API and discover from other sources + pub async fn discover_nodes_from_sources(&self, target_network: Network) -> Result<()> { + // Only fetch from external sources for mainnet to avoid polluting test networks + if target_network == Network::Mainnet { + match self.fetch_mainnet_nodes_from_api().await { + Ok(nodes) => { + self.discover_and_insert_nodes(target_network, nodes) + .await?; + } + Err(e) => { + warn!("Failed to fetch nodes from monero.fail API: {}", e); + } + } + } + + Ok(()) + } + + /// Enhanced health check that detects network and validates node identity + pub async fn check_node_health(&self, url: &str) -> Result { + let start_time = Instant::now(); + + let rpc_request = serde_json::json!({ + "jsonrpc": "2.0", + "id": "0", + "method": "get_info" + }); + + let full_url = format!("{}/json_rpc", url); + let response = self.client.post(&full_url).json(&rpc_request).send().await; + + let latency = start_time.elapsed(); + + match response { + Ok(resp) => { + if resp.status().is_success() { + match resp.json::().await { + Ok(json) => { + if let Some(result) = json.get("result") { + // Extract network information from get_info response + let discovered_network = self.extract_network_from_info(result); + + Ok(HealthCheckOutcome { + was_successful: true, + latency, + discovered_network, + }) + } else { + Ok(HealthCheckOutcome { + was_successful: false, + latency, + discovered_network: None, + }) + } + } + Err(_e) => Ok(HealthCheckOutcome { + was_successful: false, + latency, + discovered_network: None, + }), + } + } else { + Ok(HealthCheckOutcome { + was_successful: false, + latency, + discovered_network: None, + }) + } + } + Err(_e) => Ok(HealthCheckOutcome { + was_successful: false, + latency, + discovered_network: None, + }), + } + } + + /// Extract network type from get_info response + fn extract_network_from_info(&self, info_result: &Value) -> Option { + // Check nettype field (0 = mainnet, 1 = testnet, 2 = stagenet) + if let Some(nettype) = info_result.get("nettype").and_then(|v| v.as_u64()) { + return match nettype { + 0 => Some(Network::Mainnet), + 1 => Some(Network::Testnet), + 2 => Some(Network::Stagenet), + _ => None, + }; + } + + // Fallback: check if testnet or stagenet is mentioned in fields + if let Some(testnet) = info_result.get("testnet").and_then(|v| v.as_bool()) { + return if testnet { + Some(Network::Testnet) + } else { + Some(Network::Mainnet) + }; + } + + // Additional heuristics could be added here + None + } + + /// Updated health check workflow with identification and validation logic + pub async fn health_check_all_nodes(&self, target_network: Network) -> Result<()> { + info!( + "Starting health check for all nodes targeting network: {}", + network_to_string(&target_network) + ); + + // Get all nodes from database with proper field mapping + let all_nodes = sqlx::query!( + r#" + SELECT + id as "id!: i64", + scheme, + host, + port, + full_url, + network as "network!: String", + first_seen_at + FROM monero_nodes + ORDER BY id + "# + ) + .fetch_all(&self.db.pool) + .await?; + + let mut checked_count = 0; + let mut healthy_count = 0; + let mut corrected_count = 0; + + for node in all_nodes { + match self.check_node_health(&node.full_url).await { + Ok(outcome) => { + // Always record the health check + self.db + .record_health_check( + &node.full_url, + outcome.was_successful, + if outcome.was_successful { + Some(outcome.latency.as_millis() as f64) + } else { + None + }, + ) + .await?; + + if outcome.was_successful { + healthy_count += 1; + + // Validate network consistency + if let Some(discovered_network) = outcome.discovered_network { + let discovered_network_str = network_to_string(&discovered_network); + if node.network != discovered_network_str { + warn!("Network mismatch detected for node {}: stored={}, discovered={}. Correcting...", + node.full_url, node.network, discovered_network_str); + self.db + .update_node_network(&node.full_url, &discovered_network_str) + .await?; + corrected_count += 1; + } + } + } + checked_count += 1; + } + Err(_e) => { + self.db + .record_health_check(&node.full_url, false, None) + .await?; + } + } + + // Small delay to avoid hammering nodes + tokio::time::sleep(Duration::from_secs(2)).await; + } + + info!( + "Health check completed: {}/{} nodes healthy, {} corrected", + healthy_count, checked_count, corrected_count + ); + + Ok(()) + } + + /// Periodic discovery task with improved error handling + pub async fn periodic_discovery_task(&self, target_network: Network) -> Result<()> { + let mut interval = tokio::time::interval(Duration::from_secs(3600)); // Every hour + + loop { + interval.tick().await; + + info!( + "Running periodic node discovery for network: {}", + network_to_string(&target_network) + ); + + // Discover new nodes from sources + if let Err(e) = self.discover_nodes_from_sources(target_network).await { + error!("Failed to discover nodes: {}", e); + } + + // Health check all nodes (will identify networks automatically) + if let Err(e) = self.health_check_all_nodes(target_network).await { + error!("Failed to perform health check: {}", e); + } + + // Log stats for all networks + for network in &[Network::Mainnet, Network::Stagenet, Network::Testnet] { + let network_str = network_to_string(network); + if let Ok((total, reachable, reliable)) = self.db.get_node_stats(&network_str).await + { + if total > 0 { + info!( + "Node stats for {}: {} total, {} reachable, {} reliable", + network_str, total, reachable, reliable + ); + } + } + } + } + } + + /// Insert configured nodes for a specific network + pub async fn discover_and_insert_nodes( + &self, + target_network: Network, + nodes: Vec, + ) -> Result<()> { + let mut success_count = 0; + let mut error_count = 0; + let target_network_str = network_to_string(&target_network); + + for node_url in nodes.iter() { + if let Ok(url) = url::Url::parse(node_url) { + let scheme = url.scheme(); + + // Validate scheme - must be http or https + if !matches!(scheme, "http" | "https") { + continue; + } + + // Validate host - must be non-empty + let Some(host) = url.host_str() else { + continue; + }; + if host.is_empty() { + continue; + } + + // Validate port - must be present + let Some(port) = url.port() else { + continue; + }; + let port = port as i64; + + match self + .db + .upsert_node(scheme, host, port, &target_network_str) + .await + { + Ok(_) => { + success_count += 1; + } + Err(e) => { + error_count += 1; + error!( + "Failed to insert configured node {}://{}:{}: {}", + scheme, host, port, e + ); + } + } + } else { + error_count += 1; + error!("Failed to parse node URL: {}", node_url); + } + } + + info!( + "Configured node insertion complete: {} successful, {} errors", + success_count, error_count + ); + Ok(()) + } +} diff --git a/monero-rpc-pool/src/lib.rs b/monero-rpc-pool/src/lib.rs new file mode 100644 index 00000000..f03dd1f3 --- /dev/null +++ b/monero-rpc-pool/src/lib.rs @@ -0,0 +1,228 @@ +use std::sync::Arc; + +use anyhow::Result; +use axum::{ + routing::{any, get}, + Router, +}; +use monero::Network; +use tokio::sync::RwLock; +use tokio::task::JoinHandle; +use tower_http::cors::CorsLayer; +use tracing::{error, info}; + +fn network_to_string(network: &Network) -> String { + match network { + Network::Mainnet => "mainnet".to_string(), + Network::Stagenet => "stagenet".to_string(), + Network::Testnet => "testnet".to_string(), + } +} + +pub mod config; +pub mod database; +pub mod discovery; +pub mod pool; +pub mod simple_handlers; + +use config::Config; +use database::Database; +use discovery::NodeDiscovery; +use pool::{NodePool, PoolStatus}; +use simple_handlers::{simple_proxy_handler, simple_stats_handler}; + +#[derive(Clone)] +pub struct AppState { + pub node_pool: Arc>, +} + +/// Manages background tasks for the RPC pool +pub struct PoolHandle { + pub status_update_handle: JoinHandle<()>, + pub discovery_handle: JoinHandle<()>, +} + +impl Drop for PoolHandle { + fn drop(&mut self) { + self.status_update_handle.abort(); + self.discovery_handle.abort(); + } +} + +/// Information about a running RPC pool server +#[derive(Debug, Clone)] +pub struct ServerInfo { + pub port: u16, + pub host: String, +} + +async fn create_app_with_receiver( + config: Config, + network: Network, +) -> Result<( + Router, + tokio::sync::broadcast::Receiver, + PoolHandle, +)> { + // Initialize database + let db = Database::new_with_data_dir(config.data_dir.clone()).await?; + + // Initialize node pool with network + let network_str = network_to_string(&network); + let (node_pool, status_receiver) = NodePool::new(db.clone(), network_str.clone()); + let node_pool = Arc::new(RwLock::new(node_pool)); + + // Initialize discovery service + let discovery = NodeDiscovery::new(db.clone())?; + + // Publish initial status immediately to ensure first event is sent + { + let pool_guard = node_pool.read().await; + if let Err(e) = pool_guard.publish_status_update().await { + error!("Failed to publish initial status update: {}", e); + } + } + + // Start background tasks + let node_pool_for_health_check = node_pool.clone(); + let status_update_handle = tokio::spawn(async move { + let mut interval = tokio::time::interval(std::time::Duration::from_secs(10)); + + loop { + interval.tick().await; + + // Publish status update + let pool_guard = node_pool_for_health_check.read().await; + if let Err(e) = pool_guard.publish_status_update().await { + error!("Failed to publish status update: {}", e); + } + } + }); + + // Start periodic discovery task + let discovery_clone = discovery.clone(); + let network_clone = network; + let discovery_handle = tokio::spawn(async move { + if let Err(e) = discovery_clone.periodic_discovery_task(network_clone).await { + error!( + "Periodic discovery task failed for network {}: {}", + network_to_string(&network_clone), + e + ); + } + }); + + let pool_handle = PoolHandle { + status_update_handle, + discovery_handle, + }; + + let app_state = AppState { node_pool }; + + // Build the app + let app = Router::new() + .route("/stats", get(simple_stats_handler)) + .route("/*path", any(simple_proxy_handler)) + .layer(CorsLayer::permissive()) + .with_state(app_state); + + Ok((app, status_receiver, pool_handle)) +} + +pub async fn create_app(config: Config, network: Network) -> Result { + let (app, _, _pool_handle) = create_app_with_receiver(config, network).await?; + // Note: pool_handle is dropped here, so tasks will be aborted when this function returns + // This is intentional for the simple create_app use case + Ok(app) +} + +/// Create an app with a custom data directory for the database +pub async fn create_app_with_data_dir( + config: Config, + network: Network, + data_dir: std::path::PathBuf, +) -> Result { + let config_with_data_dir = Config::new_with_port(config.host, config.port, data_dir); + create_app(config_with_data_dir, network).await +} + +pub async fn run_server(config: Config, network: Network) -> Result<()> { + let app = create_app(config.clone(), network).await?; + + let bind_address = format!("{}:{}", config.host, config.port); + info!("Starting server on {}", bind_address); + + let listener = tokio::net::TcpListener::bind(&bind_address).await?; + info!("Server listening on {}", bind_address); + + axum::serve(listener, app).await?; + Ok(()) +} + +/// Run a server with a custom data directory +pub async fn run_server_with_data_dir( + config: Config, + network: Network, + data_dir: std::path::PathBuf, +) -> Result<()> { + let config_with_data_dir = Config::new_with_port(config.host, config.port, data_dir); + run_server(config_with_data_dir, network).await +} + +/// Start a server with a random port for library usage +/// Returns the server info with the actual port used, a receiver for pool status updates, and pool handle +pub async fn start_server_with_random_port( + config: Config, + network: Network, +) -> Result<( + ServerInfo, + tokio::sync::broadcast::Receiver, + PoolHandle, +)> { + // Clone the host before moving config + let host = config.host.clone(); + + // If port is 0, the system will assign a random available port + let config_with_random_port = Config::new_random_port(config.host, config.data_dir); + + let (app, status_receiver, pool_handle) = + create_app_with_receiver(config_with_random_port, network).await?; + + // Bind to port 0 to get a random available port + let listener = tokio::net::TcpListener::bind(format!("{}:0", host)).await?; + let actual_addr = listener.local_addr()?; + + let server_info = ServerInfo { + port: actual_addr.port(), + host: host.clone(), + }; + + info!( + "Started server on {}:{} (random port)", + server_info.host, server_info.port + ); + + // Start the server in a background task + tokio::spawn(async move { + if let Err(e) = axum::serve(listener, app).await { + error!("Server error: {}", e); + } + }); + + Ok((server_info, status_receiver, pool_handle)) +} + +/// Start a server with a random port and custom data directory for library usage +/// Returns the server info with the actual port used, a receiver for pool status updates, and pool handle +pub async fn start_server_with_random_port_and_data_dir( + config: Config, + network: Network, + data_dir: std::path::PathBuf, +) -> Result<( + ServerInfo, + tokio::sync::broadcast::Receiver, + PoolHandle, +)> { + let config_with_data_dir = Config::new_random_port(config.host, data_dir); + start_server_with_random_port(config_with_data_dir, network).await +} diff --git a/monero-rpc-pool/src/main.rs b/monero-rpc-pool/src/main.rs new file mode 100644 index 00000000..9326c8bd --- /dev/null +++ b/monero-rpc-pool/src/main.rs @@ -0,0 +1,177 @@ +use clap::Parser; +use tracing::{info, warn}; +use tracing_subscriber::{self, EnvFilter}; + +use monero_rpc_pool::database::Database; +use monero_rpc_pool::discovery::NodeDiscovery; +use monero_rpc_pool::{config::Config, run_server}; + +use monero::Network; + +fn parse_network(s: &str) -> Result { + match s.to_lowercase().as_str() { + "mainnet" => Ok(Network::Mainnet), + "stagenet" => Ok(Network::Stagenet), + "testnet" => Ok(Network::Testnet), + _ => Err(format!( + "Invalid network: {}. Must be mainnet, stagenet, or testnet", + s + )), + } +} + +fn network_to_string(network: &Network) -> String { + match network { + Network::Mainnet => "mainnet".to_string(), + Network::Stagenet => "stagenet".to_string(), + Network::Testnet => "testnet".to_string(), + } +} + +#[derive(Parser)] +#[command(name = "monero-rpc-pool")] +#[command(about = "A load-balancing HTTP proxy for Monero RPC nodes")] +#[command(version)] +struct Args { + #[arg(long, default_value = "127.0.0.1")] + #[arg(help = "Host address to bind the server to")] + host: String, + + #[arg(short, long, default_value = "18081")] + #[arg(help = "Port to bind the server to")] + port: u16, + + #[arg(long, value_delimiter = ',')] + #[arg(help = "Comma-separated list of Monero node URLs (overrides network-based discovery)")] + nodes: Option>, + + #[arg(short, long, default_value = "mainnet")] + #[arg(help = "Network to use for automatic node discovery")] + #[arg(value_parser = parse_network)] + network: Network, + + #[arg(short, long)] + #[arg(help = "Enable verbose logging")] + verbose: bool, +} + +// Custom filter function that overrides log levels for our crate +fn create_level_override_filter(base_filter: &str) -> EnvFilter { + // Parse the base filter and modify it to treat all monero_rpc_pool logs as trace + let mut filter = EnvFilter::new(base_filter); + + // Add a directive that treats all levels from our crate as trace + filter = filter.add_directive("monero_rpc_pool=trace".parse().unwrap()); + + filter +} + +#[tokio::main] +async fn main() -> Result<(), Box> { + let args = Args::parse(); + + // Create a filter that treats all logs from our crate as traces + let base_filter = if args.verbose { + // In verbose mode, show logs from other crates at WARN level + "warn" + } else { + // In normal mode, show logs from other crates at ERROR level + "error" + }; + + let filter = create_level_override_filter(base_filter); + + tracing_subscriber::fmt() + .with_env_filter(filter) + .with_target(false) + .with_file(true) + .with_line_number(true) + .init(); + + // Store node count for later logging before potentially moving args.nodes + let manual_node_count = args.nodes.as_ref().map(|nodes| nodes.len()); + + // Determine nodes to use and set up discovery + let _nodes = if let Some(manual_nodes) = args.nodes { + info!( + "Using manually specified nodes for network: {}", + network_to_string(&args.network) + ); + + // Insert manual nodes into database with network information + let db = Database::new().await?; + let discovery = NodeDiscovery::new(db.clone())?; + let mut parsed_nodes = Vec::new(); + + for node_url in &manual_nodes { + // Parse the URL to extract components + if let Ok(url) = url::Url::parse(node_url) { + let scheme = url.scheme().to_string(); + let _protocol = if scheme == "https" { "ssl" } else { "tcp" }; + let host = url.host_str().unwrap_or("").to_string(); + let port = url + .port() + .unwrap_or(if scheme == "https" { 443 } else { 80 }) + as i64; + + let full_url = format!("{}://{}:{}", scheme, host, port); + + // Insert into database + if let Err(e) = db + .upsert_node(&scheme, &host, port, &network_to_string(&args.network)) + .await + { + warn!("Failed to insert manual node {}: {}", node_url, e); + } else { + parsed_nodes.push(full_url); + } + } else { + warn!("Failed to parse manual node URL: {}", node_url); + } + } + + // Use manual nodes for discovery + discovery + .discover_and_insert_nodes(args.network, manual_nodes) + .await?; + parsed_nodes + } else { + info!( + "Setting up automatic node discovery for {} network", + network_to_string(&args.network) + ); + let db = Database::new().await?; + let discovery = NodeDiscovery::new(db.clone())?; + + // Start discovery process + discovery.discover_nodes_from_sources(args.network).await?; + Vec::new() // Return empty vec for consistency + }; + + let config = Config::new_with_port( + args.host, + args.port, + std::env::temp_dir().join("monero-rpc-pool"), + ); + + let node_count_msg = if args.verbose { + match manual_node_count { + Some(count) => format!("{} manual nodes configured", count), + None => "using automatic discovery".to_string(), + } + } else { + "configured".to_string() + }; + + info!( + "Starting Monero RPC Pool\nConfiguration:\n Host: {}\n Port: {}\n Network: {}\n Nodes: {}", + config.host, config.port, network_to_string(&args.network), node_count_msg + ); + + if let Err(e) = run_server(config, args.network).await { + eprintln!("Server error: {}", e); + std::process::exit(1); + } + + Ok(()) +} diff --git a/monero-rpc-pool/src/pool.rs b/monero-rpc-pool/src/pool.rs new file mode 100644 index 00000000..9190b969 --- /dev/null +++ b/monero-rpc-pool/src/pool.rs @@ -0,0 +1,270 @@ +use anyhow::{Context, Result}; +use rand::prelude::*; +use tokio::sync::broadcast; +use tracing::debug; +use typeshare::typeshare; + +use crate::database::Database; + +#[derive(Debug, Clone, serde::Serialize)] +#[typeshare] +pub struct PoolStatus { + pub total_node_count: u32, + pub healthy_node_count: u32, + #[typeshare(serialized_as = "number")] + pub successful_health_checks: u64, + #[typeshare(serialized_as = "number")] + pub unsuccessful_health_checks: u64, + pub top_reliable_nodes: Vec, +} + +#[derive(Debug, Clone, serde::Serialize)] +#[typeshare] +pub struct ReliableNodeInfo { + pub url: String, + pub success_rate: f64, + pub avg_latency_ms: Option, +} + +pub struct NodePool { + db: Database, + network: String, + status_sender: broadcast::Sender, +} + +impl NodePool { + pub fn new(db: Database, network: String) -> (Self, broadcast::Receiver) { + let (status_sender, status_receiver) = broadcast::channel(100); + let pool = Self { + db, + network, + status_sender, + }; + (pool, status_receiver) + } + + /// Get next node using Power of Two Choices algorithm + /// Only considers identified nodes (nodes with network set) + pub async fn get_next_node(&self) -> Result> { + let candidate_nodes = self.db.get_identified_nodes(&self.network).await?; + + if candidate_nodes.is_empty() { + debug!("No identified nodes available for network {}", self.network); + return Ok(None); + } + + if candidate_nodes.len() == 1 { + return Ok(Some(candidate_nodes[0].full_url.clone())); + } + + // Power of Two Choices: pick 2 random nodes, select the better one + let mut rng = thread_rng(); + let node1 = candidate_nodes.choose(&mut rng).unwrap(); + let node2 = candidate_nodes.choose(&mut rng).unwrap(); + + let selected = + if self.calculate_goodness_score(node1) >= self.calculate_goodness_score(node2) { + node1 + } else { + node2 + }; + + debug!( + "Selected node using P2C for network {}: {}", + self.network, selected.full_url + ); + Ok(Some(selected.full_url.clone())) + } + + /// Calculate goodness score based on usage-based recency + /// Score is a function of success rate and latency from last N health checks + fn calculate_goodness_score(&self, node: &crate::database::MoneroNode) -> f64 { + let total_checks = node.success_count + node.failure_count; + if total_checks == 0 { + return 0.0; + } + + let success_rate = node.success_count as f64 / total_checks as f64; + + // Weight by recency (more recent interactions = higher weight) + let recency_weight = (total_checks as f64).min(200.0) / 200.0; + let mut score = success_rate * recency_weight; + + // Factor in latency - lower latency = higher score + if let Some(avg_latency) = node.avg_latency_ms { + let latency_factor = 1.0 - (avg_latency.min(2000.0) / 2000.0); + score = score * 0.8 + latency_factor * 0.2; // 80% success rate, 20% latency + } + + score + } + + pub async fn record_success(&self, url: &str, latency_ms: f64) -> Result<()> { + self.db + .record_health_check(url, true, Some(latency_ms)) + .await?; + Ok(()) + } + + pub async fn record_failure(&self, url: &str) -> Result<()> { + self.db.record_health_check(url, false, None).await?; + Ok(()) + } + + pub async fn publish_status_update(&self) -> Result<()> { + let status = self.get_current_status().await?; + let _ = self.status_sender.send(status); // Ignore if no receivers + Ok(()) + } + + pub async fn get_current_status(&self) -> Result { + let (total, reachable, _reliable) = self.db.get_node_stats(&self.network).await?; + let reliable_nodes = self.db.get_reliable_nodes(&self.network).await?; + let (successful_checks, unsuccessful_checks) = + self.db.get_health_check_stats(&self.network).await?; + + let top_reliable_nodes = reliable_nodes + .into_iter() + .take(5) + .map(|node| ReliableNodeInfo { + url: node.full_url.clone(), + success_rate: node.success_rate(), + avg_latency_ms: node.avg_latency_ms, + }) + .collect(); + + Ok(PoolStatus { + total_node_count: total as u32, + healthy_node_count: reachable as u32, + successful_health_checks: successful_checks, + unsuccessful_health_checks: unsuccessful_checks, + top_reliable_nodes, + }) + } + + /// Get top reliable nodes with fill-up logic to ensure pool size + /// First tries to get top nodes based on recent success, then fills up with random nodes + pub async fn get_top_reliable_nodes( + &self, + limit: usize, + ) -> Result> { + debug!( + "Getting top reliable nodes for network {} (target: {})", + self.network, limit + ); + + // Step 1: Try primary fetch - get top nodes based on recent success (last 200 health checks) + let mut top_nodes = self + .db + .get_top_nodes_by_recent_success(&self.network, 200, limit as i64) + .await + .context("Failed to get top nodes by recent success")?; + + debug!( + "Primary fetch returned {} nodes for network {} (target: {})", + top_nodes.len(), + self.network, + limit + ); + + // Step 2: If primary fetch didn't return enough nodes, fall back to any identified nodes with successful health checks + if top_nodes.len() < limit { + debug!("Primary fetch returned insufficient nodes, falling back to any identified nodes with successful health checks"); + top_nodes = self + .db + .get_identified_nodes_with_success(&self.network) + .await?; + + debug!( + "Fallback fetch returned {} nodes with successful health checks for network {}", + top_nodes.len(), + self.network + ); + } + + // Step 3: Check if we still don't have enough nodes + if top_nodes.len() < limit { + let needed = limit - top_nodes.len(); + debug!( + "Pool needs {} more nodes to reach target of {} for network {}", + needed, limit, self.network + ); + + // Step 4: Collect exclusion IDs from nodes already selected + let exclude_ids: Vec = top_nodes.iter().filter_map(|node| node.id).collect(); + + // Step 5: Secondary fetch - get random nodes to fill up + let random_fillers = self + .db + .get_random_nodes(&self.network, needed as i64, &exclude_ids) + .await?; + + debug!( + "Secondary fetch returned {} random nodes for network {}", + random_fillers.len(), + self.network + ); + + // Step 6: Combine lists + top_nodes.extend(random_fillers); + } + + debug!( + "Final pool size: {} nodes for network {} (target: {})", + top_nodes.len(), + self.network, + limit + ); + + Ok(top_nodes) + } + + pub async fn get_pool_stats(&self) -> Result { + let (total, reachable, reliable) = self.db.get_node_stats(&self.network).await?; + let reliable_nodes = self.db.get_reliable_nodes(&self.network).await?; + + let avg_reliable_latency = if reliable_nodes.is_empty() { + None + } else { + let total_latency: f64 = reliable_nodes + .iter() + .filter_map(|node| node.avg_latency_ms) + .sum(); + let count = reliable_nodes + .iter() + .filter(|node| node.avg_latency_ms.is_some()) + .count(); + + if count > 0 { + Some(total_latency / count as f64) + } else { + None + } + }; + + Ok(PoolStats { + total_nodes: total, + reachable_nodes: reachable, + reliable_nodes: reliable, + avg_reliable_latency_ms: avg_reliable_latency, + }) + } +} + +#[derive(Debug)] +pub struct PoolStats { + pub total_nodes: i64, + pub reachable_nodes: i64, + pub reliable_nodes: i64, + pub avg_reliable_latency_ms: Option, // TOOD: Why is this an Option, we hate Options +} + +impl PoolStats { + pub fn health_percentage(&self) -> f64 { + if self.total_nodes == 0 { + 0.0 + } else { + (self.reachable_nodes as f64 / self.total_nodes as f64) * 100.0 + } + } +} diff --git a/monero-rpc-pool/src/simple_handlers.rs b/monero-rpc-pool/src/simple_handlers.rs new file mode 100644 index 00000000..59486e30 --- /dev/null +++ b/monero-rpc-pool/src/simple_handlers.rs @@ -0,0 +1,579 @@ +use axum::{ + body::Body, + extract::State, + http::{HeaderMap, Method, StatusCode}, + response::Response, +}; +use serde_json::json; +use std::{error::Error, time::Instant}; +use tracing::{debug, error, info_span, Instrument}; +use uuid::Uuid; + +use crate::AppState; + +#[derive(Debug, Clone)] +enum HandlerError { + NoNodes, + PoolError(String), + RequestError(String), + AllRequestsFailed(Vec<(String, String)>), // Vec of (node_url, error_message) +} + +impl std::fmt::Display for HandlerError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + HandlerError::NoNodes => write!(f, "No nodes available"), + HandlerError::PoolError(msg) => write!(f, "Pool error: {}", msg), + HandlerError::RequestError(msg) => write!(f, "Request error: {}", msg), + HandlerError::AllRequestsFailed(errors) => { + write!(f, "All requests failed: [")?; + for (i, (node, error)) in errors.iter().enumerate() { + if i > 0 { + write!(f, ", ")?; + } + write!(f, "{}: {}", node, error)?; + } + write!(f, "]") + } + } + } +} + +fn is_jsonrpc_error(body: &[u8]) -> bool { + // Try to parse as JSON + if let Ok(json) = serde_json::from_slice::(body) { + // Check if there's an "error" field + return json.get("error").is_some(); + } + + // If we can't parse JSON, treat it as an error + true +} + +fn extract_jsonrpc_method(body: &[u8]) -> Option { + if let Ok(json) = serde_json::from_slice::(body) { + if let Some(method) = json.get("method").and_then(|m| m.as_str()) { + return Some(method.to_string()); + } + } + None +} + +async fn raw_http_request( + node_url: &str, + path: &str, + method: &str, + headers: &HeaderMap, + body: Option<&[u8]>, +) -> Result { + let client = reqwest::Client::builder() + .timeout(std::time::Duration::from_secs(30)) + .build() + .map_err(|e| HandlerError::RequestError(e.to_string()))?; + + let url = format!("{}{}", node_url, path); + + // Use generic request method to support any HTTP verb + let http_method = method + .parse::() + .map_err(|e| HandlerError::RequestError(format!("Invalid method '{}': {}", method, e)))?; + + let mut request_builder = client.request(http_method, &url); + + // Forward body if present + if let Some(body_bytes) = body { + request_builder = request_builder.body(body_bytes.to_vec()); + } + + // Forward essential headers + for (name, value) in headers.iter() { + let header_name = name.as_str(); + let header_name_lc = header_name.to_ascii_lowercase(); + + // Skip hop-by-hop headers and any body-related headers when we are **not** forwarding a body. + let is_hop_by_hop = matches!( + header_name_lc.as_str(), + "host" + | "connection" + | "transfer-encoding" + | "upgrade" + | "proxy-authenticate" + | "proxy-authorization" + | "te" + | "trailers" + ); + + // If we are not forwarding a body (e.g. GET request) then forwarding `content-length` or + // `content-type` with an absent body makes many Monero nodes hang waiting for bytes and + // eventually close the connection. This manifests as the time-outs we have observed. + let is_body_header_without_body = + body.is_none() && matches!(header_name_lc.as_str(), "content-length" | "content-type"); + + if !is_hop_by_hop && !is_body_header_without_body { + if let Ok(header_value) = std::str::from_utf8(value.as_bytes()) { + request_builder = request_builder.header(header_name, header_value); + } + } + } + + let response = request_builder + .send() + .await + .map_err(|e| HandlerError::RequestError(e.to_string()))?; + + // Convert to axum Response preserving everything + let status = response.status(); + let response_headers = response.headers().clone(); + + let body_bytes = response.bytes().await.map_err(|e| { + let mut error_msg = format!("Failed to read response body: {}", e); + if let Some(source) = e.source() { + error_msg.push_str(&format!(" (source: {})", source)); + } + + HandlerError::RequestError(error_msg) + })?; + + let mut axum_response = Response::new(Body::from(body_bytes)); + *axum_response.status_mut() = + StatusCode::from_u16(status.as_u16()).unwrap_or(StatusCode::INTERNAL_SERVER_ERROR); + + // Copy response headers exactly + for (name, value) in response_headers.iter() { + if let (Ok(header_name), Ok(header_value)) = ( + axum::http::HeaderName::try_from(name.as_str()), + axum::http::HeaderValue::try_from(value.as_bytes()), + ) { + axum_response + .headers_mut() + .insert(header_name, header_value); + } + } + + Ok(axum_response) +} + +async fn record_success(state: &AppState, node_url: &str, latency_ms: f64) { + let node_pool_guard = state.node_pool.read().await; + if let Err(e) = node_pool_guard.record_success(node_url, latency_ms).await { + error!("Failed to record success for {}: {}", node_url, e); + } +} + +async fn record_failure(state: &AppState, node_url: &str) { + let node_pool_guard = state.node_pool.read().await; + if let Err(e) = node_pool_guard.record_failure(node_url).await { + error!("Failed to record failure for {}: {}", node_url, e); + } +} + +async fn single_raw_request( + state: &AppState, + node_url: String, + path: &str, + method: &str, + headers: &HeaderMap, + body: Option<&[u8]>, +) -> Result<(Response, String, f64), HandlerError> { + let start_time = Instant::now(); + + match raw_http_request(&node_url, path, method, headers, body).await { + Ok(response) => { + let elapsed = start_time.elapsed(); + let latency_ms = elapsed.as_millis() as f64; + + // Check HTTP status code - only 200 is success! + if response.status().is_success() { + // For JSON-RPC endpoints, also check for JSON-RPC errors + if path == "/json_rpc" { + let (parts, body_stream) = response.into_parts(); + let body_bytes = axum::body::to_bytes(body_stream, usize::MAX) + .await + .map_err(|e| HandlerError::RequestError(e.to_string()))?; + + if is_jsonrpc_error(&body_bytes) { + record_failure(state, &node_url).await; + return Err(HandlerError::RequestError("JSON-RPC error".to_string())); + } + + // Reconstruct response with the body we consumed + let response = Response::from_parts(parts, Body::from(body_bytes)); + record_success(state, &node_url, latency_ms).await; + Ok((response, node_url, latency_ms)) + } else { + // For non-JSON-RPC endpoints, HTTP success is enough + record_success(state, &node_url, latency_ms).await; + Ok((response, node_url, latency_ms)) + } + } else { + // Non-200 status codes are failures + record_failure(state, &node_url).await; + Err(HandlerError::RequestError(format!( + "HTTP {}", + response.status() + ))) + } + } + Err(e) => { + record_failure(state, &node_url).await; + Err(e) + } + } +} + +async fn race_requests( + state: &AppState, + path: &str, + method: &str, + headers: &HeaderMap, + body: Option<&[u8]>, +) -> Result { + // Extract JSON-RPC method for better logging + let jsonrpc_method = if path == "/json_rpc" { + if let Some(body_data) = body { + extract_jsonrpc_method(body_data) + } else { + None + } + } else { + None + }; + const POOL_SIZE: usize = 20; + let mut tried_nodes = std::collections::HashSet::new(); + let mut pool_index = 0; + let mut collected_errors: Vec<(String, String)> = Vec::new(); + + // Get the exclusive pool of 20 nodes once at the beginning + let available_pool = { + let node_pool_guard = state.node_pool.read().await; + let reliable_nodes = node_pool_guard + .get_top_reliable_nodes(POOL_SIZE) + .await + .map_err(|e| HandlerError::PoolError(e.to_string()))?; + + let pool: Vec = reliable_nodes + .into_iter() + .map(|node| node.full_url) + .collect(); + + pool + }; + + if available_pool.is_empty() { + return Err(HandlerError::NoNodes); + } + + // Power of Two Choices within the exclusive pool + while pool_index < available_pool.len() && tried_nodes.len() < POOL_SIZE { + let mut node1_option = None; + let mut node2_option = None; + + // Select first untried node from pool + for (i, node) in available_pool.iter().enumerate().skip(pool_index) { + if !tried_nodes.contains(node) { + node1_option = Some(node.clone()); + pool_index = i + 1; + break; + } + } + + // Select second untried node from pool (different from first) + for node in available_pool.iter().skip(pool_index) { + if !tried_nodes.contains(node) && Some(node) != node1_option.as_ref() { + node2_option = Some(node.clone()); + break; + } + } + + // If we can't get any new nodes from the pool, we've exhausted our options + if node1_option.is_none() && node2_option.is_none() { + break; + } + + // Store node URLs for error tracking before consuming them + let current_nodes: Vec = [&node1_option, &node2_option] + .iter() + .filter_map(|opt| opt.as_ref()) + .cloned() + .collect(); + + let mut requests = Vec::new(); + + if let Some(node1) = node1_option { + tried_nodes.insert(node1.clone()); + requests.push(single_raw_request( + state, + node1.clone(), + path, + method, + headers, + body, + )); + } + + if let Some(node2) = node2_option { + tried_nodes.insert(node2.clone()); + requests.push(single_raw_request( + state, + node2.clone(), + path, + method, + headers, + body, + )); + } + + if requests.is_empty() { + break; + } + + match &jsonrpc_method { + Some(rpc_method) => debug!( + "Racing {} requests to {} (JSON-RPC: {}): {} nodes (tried {} so far)", + method, + path, + rpc_method, + requests.len(), + tried_nodes.len() + ), + None => debug!( + "Racing {} requests to {}: {} nodes (tried {} so far)", + method, + path, + requests.len(), + tried_nodes.len() + ), + } + + // Handle the requests based on how many we have + let result = match requests.len() { + 1 => { + // Only one request + requests.into_iter().next().unwrap().await + } + 2 => { + // Two requests - race them + let mut iter = requests.into_iter(); + let req1 = iter.next().unwrap(); + let req2 = iter.next().unwrap(); + + tokio::select! { + result1 = req1 => result1, + result2 = req2 => result2, + } + } + _ => unreachable!("We only add 1 or 2 requests"), + }; + + match result { + Ok((response, winning_node, latency_ms)) => { + match &jsonrpc_method { + Some(rpc_method) => { + debug!( + "{} response from {} ({}ms) - SUCCESS after trying {} nodes! JSON-RPC: {}", + method, winning_node, latency_ms, tried_nodes.len(), rpc_method + ) + } + None => debug!( + "{} response from {} ({}ms) - SUCCESS after trying {} nodes!", + method, + winning_node, + latency_ms, + tried_nodes.len() + ), + } + record_success(state, &winning_node, latency_ms).await; + return Ok(response); + } + Err(e) => { + // Since we don't know which specific node failed in the race, + // record the error for all nodes in this batch + for node_url in ¤t_nodes { + collected_errors.push((node_url.clone(), e.to_string())); + } + debug!( + "Request failed: {} - retrying with different nodes from pool...", + e + ); + continue; + } + } + } + + // Log detailed error information + let detailed_errors: Vec = collected_errors + .iter() + .map(|(node, error)| format!("{}: {}", node, error)) + .collect(); + + match &jsonrpc_method { + Some(rpc_method) => error!( + "All {} requests failed after trying {} nodes (JSON-RPC: {}). Detailed errors:\n{}", + method, + tried_nodes.len(), + rpc_method, + detailed_errors.join("\n") + ), + None => error!( + "All {} requests failed after trying {} nodes. Detailed errors:\n{}", + method, + tried_nodes.len(), + detailed_errors.join("\n") + ), + } + + Err(HandlerError::AllRequestsFailed(collected_errors)) +} + +/// Forward a request to the node pool, returning either a successful response or a simple +/// `500` with text "All nodes failed". Keeps the error handling logic in one place so the +/// public handlers stay readable. +async fn proxy_request( + state: &AppState, + path: &str, + method: &str, + headers: &HeaderMap, + body: Option<&[u8]>, +) -> Response { + match race_requests(state, path, method, headers, body).await { + Ok(res) => res, + Err(handler_error) => { + let error_response = match &handler_error { + HandlerError::AllRequestsFailed(node_errors) => { + json!({ + "error": "All nodes failed", + "details": { + "type": "AllRequestsFailed", + "message": "All proxy requests to available nodes failed", + "node_errors": node_errors.iter().map(|(node, error)| { + json!({ + "node": node, + "error": error + }) + }).collect::>(), + "total_nodes_tried": node_errors.len() + } + }) + } + HandlerError::NoNodes => { + json!({ + "error": "No nodes available", + "details": { + "type": "NoNodes", + "message": "No healthy nodes available in the pool" + } + }) + } + HandlerError::PoolError(msg) => { + json!({ + "error": "Pool error", + "details": { + "type": "PoolError", + "message": msg + } + }) + } + HandlerError::RequestError(msg) => { + json!({ + "error": "Request error", + "details": { + "type": "RequestError", + "message": msg + } + }) + } + }; + + Response::builder() + .status(StatusCode::INTERNAL_SERVER_ERROR) + .header("content-type", "application/json") + .body(Body::from(error_response.to_string())) + .unwrap_or_else(|_| Response::new(Body::empty())) + } + } +} + +#[axum::debug_handler] +pub async fn simple_proxy_handler( + State(state): State, + method: Method, + uri: axum::http::Uri, + headers: HeaderMap, + body: axum::body::Bytes, +) -> Response { + let body_size = body.len(); + let request_id = Uuid::new_v4(); + let path = uri.path().to_string(); + let method_str = method.to_string(); + let path_clone = path.clone(); + + // Extract JSON-RPC method for tracing span + let body_option = (!body.is_empty()).then_some(&body[..]); + let jsonrpc_method = if path == "/json_rpc" { + if let Some(body_data) = body_option { + extract_jsonrpc_method(body_data) + } else { + None + } + } else { + None + }; + let jsonrpc_method_for_span = jsonrpc_method.as_deref().unwrap_or("N/A").to_string(); + + async move { + match &jsonrpc_method { + Some(rpc_method) => debug!( + "Proxying {} {} ({} bytes) - JSON-RPC method: {}", + method, path, body_size, rpc_method + ), + None => debug!("Proxying {} {} ({} bytes)", method, path, body_size), + } + + proxy_request(&state, &path, method.as_str(), &headers, body_option).await + } + .instrument(info_span!("proxy_request", + request_id = %request_id, + method = %method_str, + path = %path_clone, + body_size = body_size, + jsonrpc_method = %jsonrpc_method_for_span + )) + .await +} + +#[axum::debug_handler] +pub async fn simple_stats_handler(State(state): State) -> Response { + async move { + let node_pool_guard = state.node_pool.read().await; + + match node_pool_guard.get_current_status().await { + Ok(status) => { + let stats_json = serde_json::json!({ + "status": "healthy", + "total_node_count": status.total_node_count, + "healthy_node_count": status.healthy_node_count, + "successful_health_checks": status.successful_health_checks, + "unsuccessful_health_checks": status.unsuccessful_health_checks, + "top_reliable_nodes": status.top_reliable_nodes + }); + + Response::builder() + .status(StatusCode::OK) + .header("content-type", "application/json") + .body(Body::from(stats_json.to_string())) + .unwrap_or_else(|_| Response::new(Body::empty())) + } + Err(e) => { + error!("Failed to get pool status: {}", e); + let error_json = r#"{"status":"error","message":"Failed to get pool status"}"#; + Response::builder() + .status(StatusCode::INTERNAL_SERVER_ERROR) + .header("content-type", "application/json") + .body(Body::from(error_json)) + .unwrap_or_else(|_| Response::new(Body::empty())) + } + } + } + .instrument(info_span!("stats_request")) + .await +} diff --git a/monero-sys/build.rs b/monero-sys/build.rs index ffe607d9..10f697bf 100644 --- a/monero-sys/build.rs +++ b/monero-sys/build.rs @@ -2,6 +2,7 @@ use cmake::Config; fn main() { let is_github_actions: bool = std::env::var("GITHUB_ACTIONS").is_ok(); + let is_docker_build: bool = std::env::var("DOCKER_BUILD").is_ok(); // Only rerun this when the bridge.rs or static_bridge.h file changes. println!("cargo:rerun-if-changed=src/bridge.rs"); @@ -37,9 +38,10 @@ fn main() { .define("GTEST_HAS_ABSL", "OFF") // Use lightweight crypto library .define("MONERO_WALLET_CRYPTO_LIBRARY", "cn") - .build_arg(match is_github_actions { - true => "-j1", - false => "-j", + .build_arg(match (is_github_actions, is_docker_build) { + (true, _) => "-j1", + (_, true) => "-j1", + (_, _) => "-j", }) .build(); diff --git a/monero-sys/src/lib.rs b/monero-sys/src/lib.rs index 296144cd..517aa05a 100644 --- a/monero-sys/src/lib.rs +++ b/monero-sys/src/lib.rs @@ -1054,7 +1054,7 @@ impl FfiWallet { monero::Address::from_str(&address.to_string()).expect("wallet's own address to be valid") } - fn set_daemon_address(&mut self, address: &str) -> anyhow::Result<()> { + pub fn set_daemon_address(&mut self, address: &str) -> anyhow::Result<()> { tracing::debug!(%address, "Setting daemon address"); let_cxx_string!(address = address); diff --git a/rust-toolchain.toml b/rust-toolchain.toml index d4f3af77..58368ac7 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,5 +1,5 @@ [toolchain] # also update this in the readme, changelog, and github actions -channel = "1.82" +channel = "1.85" components = ["clippy"] targets = ["armv7-unknown-linux-gnueabihf"] diff --git a/src-gui/package.json b/src-gui/package.json index 30ec2cca..72e2424c 100644 --- a/src-gui/package.json +++ b/src-gui/package.json @@ -4,9 +4,9 @@ "version": "0.7.0", "type": "module", "scripts": { - "check-bindings": "typeshare --lang=typescript --output-file __temp_bindings.ts ../swap/src && dprint fmt __temp_bindings.ts && diff -wbB __temp_bindings.ts ./src/models/tauriModel.ts && rm __temp_bindings.ts", - "gen-bindings-verbose": "RUST_LOG=debug RUST_BACKTRACE=1 typeshare --lang=typescript --output-file ./src/models/tauriModel.ts ../swap/src && dprint fmt ./src/models/tauriModel.ts", - "gen-bindings": "typeshare --lang=typescript --output-file ./src/models/tauriModel.ts ../swap/src && dprint fmt ./src/models/tauriModel.ts", + "check-bindings": "typeshare --lang=typescript --output-file __temp_bindings.ts ../swap/src ../monero-rpc-pool/src ../electrum-pool/src && dprint fmt __temp_bindings.ts && diff -wbB __temp_bindings.ts ./src/models/tauriModel.ts && rm __temp_bindings.ts", + "gen-bindings-verbose": "RUST_LOG=debug RUST_BACKTRACE=1 typeshare --lang=typescript --output-file ./src/models/tauriModel.ts ../swap/src ../monero-rpc-pool/src ../electrum-pool/src && dprint fmt ./src/models/tauriModel.ts", + "gen-bindings": "typeshare --lang=typescript --output-file ./src/models/tauriModel.ts ../swap/src ../monero-rpc-pool/src ../electrum-pool/src && dprint fmt ./src/models/tauriModel.ts", "test": "vitest", "test:ui": "vitest --ui", "dev": "vite", diff --git a/src-gui/src/renderer/background.ts b/src-gui/src/renderer/background.ts index 39a188f4..7f633a76 100644 --- a/src-gui/src/renderer/background.ts +++ b/src-gui/src/renderer/background.ts @@ -8,6 +8,7 @@ import { approvalEventReceived, backgroundProgressEventReceived, } from "store/features/rpcSlice"; +import { poolStatusReceived } from "store/features/poolSlice"; import { swapProgressEventReceived } from "store/features/swapSlice"; import logger from "utils/logger"; import { @@ -127,6 +128,10 @@ export async function setupBackgroundTasks(): Promise { store.dispatch(backgroundProgressEventReceived(eventData)); break; + case "PoolStatusUpdate": + store.dispatch(poolStatusReceived(eventData)); + break; + default: exhaustiveGuard(channelName); } diff --git a/src-gui/src/renderer/components/alert/UnfinishedSwapsAlert.tsx b/src-gui/src/renderer/components/alert/UnfinishedSwapsAlert.tsx index caa019f8..1605afb3 100644 --- a/src-gui/src/renderer/components/alert/UnfinishedSwapsAlert.tsx +++ b/src-gui/src/renderer/components/alert/UnfinishedSwapsAlert.tsx @@ -24,8 +24,8 @@ export default function UnfinishedSwapsAlert() { > You have{" "} {resumableSwapsCount > 1 - ? `${resumableSwapsCount} unfinished swaps` - : "one unfinished swap"} + ? `${resumableSwapsCount} pending swaps` + : "one pending swap"} ); } diff --git a/src-gui/src/renderer/components/modal/swap/SwapStateStepper.tsx b/src-gui/src/renderer/components/modal/swap/SwapStateStepper.tsx index 35c7f48c..957798ba 100644 --- a/src-gui/src/renderer/components/modal/swap/SwapStateStepper.tsx +++ b/src-gui/src/renderer/components/modal/swap/SwapStateStepper.tsx @@ -63,7 +63,10 @@ function getActiveStep(state: SwapState | null): PathStep | null { // Bitcoin has been locked, waiting for the counterparty to lock their XMR case "BtcLockTxInMempool": // We only display the first step as completed if the Bitcoin lock has been confirmed - if (latestState.content.btc_lock_confirmations > 0) { + if ( + latestState.content.btc_lock_confirmations !== undefined && + latestState.content.btc_lock_confirmations > 0 + ) { return [PathType.HAPPY_PATH, 1, isReleased]; } return [PathType.HAPPY_PATH, 0, isReleased]; diff --git a/src-gui/src/renderer/components/modal/swap/pages/in_progress/BitcoinLockTxInMempoolPage.tsx b/src-gui/src/renderer/components/modal/swap/pages/in_progress/BitcoinLockTxInMempoolPage.tsx index 92de445d..0007cc39 100644 --- a/src-gui/src/renderer/components/modal/swap/pages/in_progress/BitcoinLockTxInMempoolPage.tsx +++ b/src-gui/src/renderer/components/modal/swap/pages/in_progress/BitcoinLockTxInMempoolPage.tsx @@ -1,4 +1,5 @@ import { TauriSwapProgressEventContent } from "models/tauriModelExt"; +import { formatConfirmations } from "utils/formatUtils"; import BitcoinTransactionInfoBox from "../../BitcoinTransactionInfoBox"; import SwapStatusAlert from "renderer/components/alert/SwapStatusAlert/SwapStatusAlert"; import { useActiveSwapInfo } from "store/hooks"; @@ -15,10 +16,11 @@ export default function BitcoinLockTxInMempoolPage({ return ( - {btc_lock_confirmations < BITCOIN_CONFIRMATIONS_WARNING_THRESHOLD && ( + {(btc_lock_confirmations === undefined || + btc_lock_confirmations < BITCOIN_CONFIRMATIONS_WARNING_THRESHOLD) && ( Your Bitcoin has been locked.{" "} - {btc_lock_confirmations > 0 + {btc_lock_confirmations !== undefined && btc_lock_confirmations > 0 ? "We are waiting for the other party to lock their Monero." : "We are waiting for the blockchain to confirm the transaction. Once confirmed, the other party will lock their Monero."} @@ -30,9 +32,10 @@ export default function BitcoinLockTxInMempoolPage({ gap: "1rem", }} > - {btc_lock_confirmations >= BITCOIN_CONFIRMATIONS_WARNING_THRESHOLD && ( - - )} + {btc_lock_confirmations !== undefined && + btc_lock_confirmations >= BITCOIN_CONFIRMATIONS_WARNING_THRESHOLD && ( + + )} - Confirmations: {btc_lock_confirmations} + Confirmations: {formatConfirmations(btc_lock_confirmations)} } /> diff --git a/src-gui/src/renderer/components/modal/swap/pages/in_progress/XmrLockInMempoolPage.tsx b/src-gui/src/renderer/components/modal/swap/pages/in_progress/XmrLockInMempoolPage.tsx index 627a61b6..de8f695e 100644 --- a/src-gui/src/renderer/components/modal/swap/pages/in_progress/XmrLockInMempoolPage.tsx +++ b/src-gui/src/renderer/components/modal/swap/pages/in_progress/XmrLockInMempoolPage.tsx @@ -1,12 +1,13 @@ import { Box, DialogContentText } from "@mui/material"; import { TauriSwapProgressEventContent } from "models/tauriModelExt"; +import { formatConfirmations } from "utils/formatUtils"; import MoneroTransactionInfoBox from "../../MoneroTransactionInfoBox"; export default function XmrLockTxInMempoolPage({ xmr_lock_tx_confirmations, xmr_lock_txid, }: TauriSwapProgressEventContent<"XmrLockTxInMempool">) { - const additionalContent = `Confirmations: ${xmr_lock_tx_confirmations}/10`; + const additionalContent = `Confirmations: ${formatConfirmations(xmr_lock_tx_confirmations, 10)}`; return ( diff --git a/src-gui/src/renderer/components/pages/help/MoneroPoolHealthBox.tsx b/src-gui/src/renderer/components/pages/help/MoneroPoolHealthBox.tsx new file mode 100644 index 00000000..b2f692b4 --- /dev/null +++ b/src-gui/src/renderer/components/pages/help/MoneroPoolHealthBox.tsx @@ -0,0 +1,192 @@ +import { + Box, + Typography, + Table, + TableBody, + TableCell, + TableContainer, + TableHead, + TableRow, + Chip, + LinearProgress, + useTheme, +} from "@mui/material"; +import InfoBox from "renderer/components/modal/swap/InfoBox"; +import { ReliableNodeInfo } from "models/tauriModel"; +import NetworkWifiIcon from "@mui/icons-material/NetworkWifi"; +import { useAppSelector } from "store/hooks"; + +export default function MoneroPoolHealthBox() { + const { poolStatus, isLoading } = useAppSelector((state) => ({ + poolStatus: state.pool.status, + isLoading: state.pool.isLoading, + })); + const theme = useTheme(); + + const formatLatency = (latencyMs?: number) => { + if (latencyMs === undefined || latencyMs === null) return "N/A"; + return `${Math.round(latencyMs)}ms`; + }; + + const formatSuccessRate = (rate: number) => { + return `${(rate * 100).toFixed(1)}%`; + }; + + const getHealthColor = (healthyCount: number, reliableCount: number) => { + if (reliableCount === 0) return theme.palette.error.main; + if (reliableCount < 3) return theme.palette.warning.main; + return theme.palette.success.main; + }; + + const renderHealthSummary = () => { + if (!poolStatus) return null; + + const totalChecks = + poolStatus.successful_health_checks + + poolStatus.unsuccessful_health_checks; + const overallSuccessRate = + totalChecks > 0 + ? (poolStatus.successful_health_checks / totalChecks) * 100 + : 0; + + return ( + + + 0 ? "success" : "error"} + variant="outlined" + size="small" + /> + 80 + ? "success" + : overallSuccessRate > 60 + ? "warning" + : "error" + } + variant="outlined" + size="small" + /> + + ); + }; + + const renderTopNodes = () => { + if (!poolStatus || poolStatus.top_reliable_nodes.length === 0) { + return ( + <> + + + 🚧 + + + Bootstrapping remote Monero node registry... But you can already + start swapping! + + + + ); + } + + return ( + + + + + Node URL + Success Rate + Avg Latency + + + + {poolStatus.top_reliable_nodes.map( + (node: ReliableNodeInfo, index: number) => ( + + + + {node.url} + + + + + {formatSuccessRate(node.success_rate)} + + + + + {formatLatency(node.avg_latency_ms)} + + + + ), + )} + +
+
+ ); + }; + + // Show bootstrapping message when no data is available + if (!poolStatus && !isLoading) { + return ( + + + Monero Pool Health +
+ } + mainContent={ + + + 🚧 + + + Bootstrapping pool health monitoring. You can already start using + the app! + + + } + additionalContent={null} + icon={null} + loading={false} + /> + ); + } + + return ( + + + Monero Pool Health +
+ } + mainContent={ + + Real-time health monitoring of the Monero node pool. Shows node + availability, success rates, and performance metrics. + + } + additionalContent={ + + {poolStatus && renderHealthSummary()} + {renderTopNodes()} + + } + icon={null} + loading={isLoading} + /> + ); +} diff --git a/src-gui/src/renderer/components/pages/help/SettingsBox.tsx b/src-gui/src/renderer/components/pages/help/SettingsBox.tsx index 2415c608..daaa4863 100644 --- a/src-gui/src/renderer/components/pages/help/SettingsBox.tsx +++ b/src-gui/src/renderer/components/pages/help/SettingsBox.tsx @@ -20,6 +20,11 @@ import { useTheme, Switch, SelectChangeEvent, + TextField, + ToggleButton, + ToggleButtonGroup, + Chip, + LinearProgress, } from "@mui/material"; import { addNode, @@ -35,11 +40,13 @@ import { setFiatCurrency, setTheme, setTorEnabled, + setUseMoneroRpcPool, } from "store/features/settingsSlice"; import { useAppDispatch, useNodes, useSettings } from "store/hooks"; import ValidatedTextField from "renderer/components/other/ValidatedTextField"; +import PromiseInvokeButton from "renderer/components/PromiseInvokeButton"; import HelpIcon from "@mui/icons-material/HelpOutline"; -import { ReactNode, useState } from "react"; +import { ReactNode, useState, useEffect } from "react"; import { Theme } from "renderer/components/theme"; import { Add, @@ -47,12 +54,18 @@ import { Delete, Edit, HourglassEmpty, + Refresh, } from "@mui/icons-material"; + import { getNetwork } from "store/config"; import { currencySymbol } from "utils/formatUtils"; import InfoBox from "renderer/components/modal/swap/InfoBox"; import { isValidMultiAddressWithPeerId } from "utils/parseUtils"; +import { useAppSelector } from "store/hooks"; +import { getNodeStatus } from "renderer/rpc"; +import { setStatus } from "store/features/nodesSlice"; + const PLACEHOLDER_ELECTRUM_RPC_URL = "ssl://blockstream.info:700"; const PLACEHOLDER_MONERO_NODE_URL = "http://xmr-node.cakewallet.com:18081"; @@ -83,6 +96,7 @@ export default function SettingsBox() { + @@ -268,15 +282,21 @@ function ElectrumRpcUrlSetting() { function SettingLabel({ label, tooltip, + disabled = false, }: { label: ReactNode; tooltip: string | null; + disabled?: boolean; }) { + const opacity = disabled ? 0.5 : 1; + return ( - + {label} - + @@ -285,38 +305,173 @@ function SettingLabel({ } /** - * A setting that allows you to select the Monero Node URL to use. + * A setting that allows you to toggle between using the Monero RPC Pool and custom nodes. + */ +function MoneroRpcPoolSetting() { + const useMoneroRpcPool = useSettings((s) => s.useMoneroRpcPool); + const dispatch = useAppDispatch(); + + const handleChange = ( + event: React.MouseEvent, + newValue: string, + ) => { + if (newValue !== null) { + dispatch(setUseMoneroRpcPool(newValue === "pool")); + } + }; + + return ( + + + + + + + Pool (Recommended) + Manual + + + + ); +} + +/** + * A setting that allows you to configure a single Monero Node URL. + * Gets disabled when RPC pool is enabled. */ function MoneroNodeUrlSetting() { const network = getNetwork(); - const [tableVisible, setTableVisible] = useState(false); + const useMoneroRpcPool = useSettings((s) => s.useMoneroRpcPool); + const moneroNodeUrl = useSettings( + (s) => s.nodes[network][Blockchain.Monero][0] || "", + ); + const nodeStatuses = useNodes((s) => s.nodes); + const dispatch = useAppDispatch(); + const [isRefreshing, setIsRefreshing] = useState(false); - const isValid = (url: string) => isValidUrl(url, ["http"]); + const currentNodes = useSettings((s) => s.nodes[network][Blockchain.Monero]); + + const handleNodeUrlChange = (newUrl: string) => { + // Remove existing nodes and add the new one + currentNodes.forEach((node) => { + dispatch(removeNode({ network, type: Blockchain.Monero, node })); + }); + + if (newUrl.trim()) { + dispatch( + addNode({ network, type: Blockchain.Monero, node: newUrl.trim() }), + ); + } + }; + + const handleRefreshStatus = async () => { + // Don't refresh if pool is enabled or no node URL is configured + if (!moneroNodeUrl || useMoneroRpcPool) return; + + setIsRefreshing(true); + try { + const status = await getNodeStatus( + moneroNodeUrl, + Blockchain.Monero, + network, + ); + + // Update the status in the store + dispatch( + setStatus({ + node: moneroNodeUrl, + status, + blockchain: Blockchain.Monero, + }), + ); + } catch (error) { + console.error("Failed to refresh node status:", error); + } finally { + setIsRefreshing(false); + } + }; + + const isValid = (url: string) => url === "" || isValidUrl(url, ["http"]); + const nodeStatus = moneroNodeUrl + ? nodeStatuses[Blockchain.Monero][moneroNodeUrl] + : null; return ( - setTableVisible(!tableVisible)} size="large"> - - - {tableVisible ? ( - setTableVisible(false)} - network={network} - blockchain={Blockchain.Monero} - isValid={isValid} + + - ) : ( - <> - )} + <> + + + + + + + + {isRefreshing ? : } + + + + ); @@ -380,7 +535,7 @@ function NodeTableModal({ When the daemon is started, it will attempt to connect to the first available {blockchain} node in this list. If you leave this field empty or all nodes are unavailable, it will choose from a list of - known nodes at random. Requires a restart to take effect. + known nodes at random. - - - ); - case false: - return ( - - - - ); - default: - return ( - - - - ); - } -} - /** * A table that displays the available nodes for a given network and blockchain. * It allows you to add, remove, and move nodes up the list. @@ -515,7 +638,9 @@ function NodeTable({ {/* Node status icon */} - + {/* Remove and move buttons */} @@ -582,7 +707,7 @@ export function TorSettings() { diff --git a/src-gui/src/renderer/components/pages/help/SettingsPage.tsx b/src-gui/src/renderer/components/pages/help/SettingsPage.tsx index eca0cd36..448062e5 100644 --- a/src-gui/src/renderer/components/pages/help/SettingsPage.tsx +++ b/src-gui/src/renderer/components/pages/help/SettingsPage.tsx @@ -5,6 +5,7 @@ import DaemonControlBox from "./DaemonControlBox"; import SettingsBox from "./SettingsBox"; import ExportDataBox from "./ExportDataBox"; import DiscoveryBox from "./DiscoveryBox"; +import MoneroPoolHealthBox from "./MoneroPoolHealthBox"; import { useLocation } from "react-router-dom"; import { useEffect } from "react"; @@ -29,6 +30,7 @@ export default function SettingsPage() { > + diff --git a/src-gui/src/renderer/rpc.ts b/src-gui/src/renderer/rpc.ts index 95b026ba..56e2f476 100644 --- a/src-gui/src/renderer/rpc.ts +++ b/src-gui/src/renderer/rpc.ts @@ -223,36 +223,29 @@ export async function initializeContext() { const bitcoinNodes = store.getState().settings.nodes[network][Blockchain.Bitcoin]; - // For Monero nodes, check availability and use the first working one - const moneroNodes = - store.getState().settings.nodes[network][Blockchain.Monero]; - let moneroNode = null; + // For Monero nodes, determine whether to use pool or custom node + const useMoneroRpcPool = store.getState().settings.useMoneroRpcPool; - if (moneroNodes.length > 0) { - try { - moneroNode = await Promise.any( - moneroNodes.map(async (node) => { - const isAvailable = await getNodeStatus( - node, - Blockchain.Monero, - network, - ); - if (isAvailable) { - return node; - } - throw new Error(`Monero node ${node} is not available`); - }), - ); - } catch { - // If no Monero node is available, use null - moneroNode = null; - } - } + const moneroNodeUrl = + store.getState().settings.nodes[network][Blockchain.Monero][0] ?? null; + + // Check the state of the Monero node + const isMoneroNodeOnline = await getMoneroNodeStatus(moneroNodeUrl, network); + + const moneroNodeConfig = + useMoneroRpcPool || moneroNodeUrl == null || !isMoneroNodeOnline + ? { type: "Pool" as const } + : { + type: "SingleNode" as const, + content: { + url: moneroNodeUrl, + }, + }; // Initialize Tauri settings const tauriSettings: TauriSettings = { electrum_rpc_urls: bitcoinNodes, - monero_node_url: moneroNode, + monero_node_config: moneroNodeConfig, use_tor: useTor, }; @@ -325,13 +318,15 @@ export async function updateAllNodeStatuses() { const network = getNetwork(); const settings = store.getState().settings; - // Only check Monero nodes, skip Bitcoin nodes since we pass all electrum servers - // to the backend without checking them (ElectrumBalancer handles failover) - await Promise.all( - settings.nodes[network][Blockchain.Monero].map((node) => - updateNodeStatus(node, Blockchain.Monero, network), - ), - ); + // Only check Monero nodes if we're using custom nodes (not RPC pool) + // Skip Bitcoin nodes since we pass all electrum servers to the backend without checking them (ElectrumBalancer handles failover) + if (!settings.useMoneroRpcPool) { + await Promise.all( + settings.nodes[network][Blockchain.Monero].map((node) => + updateNodeStatus(node, Blockchain.Monero, network), + ), + ); + } } export async function getMoneroAddresses(): Promise { @@ -361,3 +356,9 @@ export async function saveLogFiles( ): Promise { await invokeUnsafe("save_txt_files", { zipFileName, content }); } + +export async function saveFilesInDialog(files: Record) { + await invokeUnsafe("save_txt_files", { + files, + }); +} diff --git a/src-gui/src/store/combinedReducer.ts b/src-gui/src/store/combinedReducer.ts index 99a9cd85..a81b278b 100644 --- a/src-gui/src/store/combinedReducer.ts +++ b/src-gui/src/store/combinedReducer.ts @@ -7,6 +7,7 @@ import torSlice from "./features/torSlice"; import settingsSlice from "./features/settingsSlice"; import nodesSlice from "./features/nodesSlice"; import conversationsSlice from "./features/conversationsSlice"; +import poolSlice from "./features/poolSlice"; export const reducers = { swap: swapReducer, @@ -18,4 +19,5 @@ export const reducers = { settings: settingsSlice, nodes: nodesSlice, conversations: conversationsSlice, + pool: poolSlice, }; diff --git a/src-gui/src/store/features/poolSlice.ts b/src-gui/src/store/features/poolSlice.ts new file mode 100644 index 00000000..4ea9e70c --- /dev/null +++ b/src-gui/src/store/features/poolSlice.ts @@ -0,0 +1,31 @@ +import { createSlice, PayloadAction } from "@reduxjs/toolkit"; +import { PoolStatus } from "models/tauriModel"; + +interface PoolSlice { + status: PoolStatus | null; + isLoading: boolean; +} + +const initialState: PoolSlice = { + status: null, + isLoading: true, +}; + +export const poolSlice = createSlice({ + name: "pool", + initialState, + reducers: { + poolStatusReceived(slice, action: PayloadAction) { + slice.status = action.payload; + slice.isLoading = false; + }, + poolStatusReset(slice) { + slice.status = null; + slice.isLoading = true; + }, + }, +}); + +export const { poolStatusReceived, poolStatusReset } = poolSlice.actions; + +export default poolSlice.reducer; diff --git a/src-gui/src/store/features/settingsSlice.ts b/src-gui/src/store/features/settingsSlice.ts index 27d7ea9a..2a378384 100644 --- a/src-gui/src/store/features/settingsSlice.ts +++ b/src-gui/src/store/features/settingsSlice.ts @@ -17,6 +17,8 @@ export interface SettingsState { fiatCurrency: FiatCurrency; /// Whether to enable Tor for p2p connections enableTor: boolean; + /// Whether to use the Monero RPC pool for load balancing (true) or custom nodes (false) + useMoneroRpcPool: boolean; userHasSeenIntroduction: boolean; /// List of rendezvous points rendezvousPoints: string[]; @@ -119,6 +121,7 @@ const initialState: SettingsState = { fetchFiatPrices: false, fiatCurrency: FiatCurrency.Usd, enableTor: true, + useMoneroRpcPool: true, // Default to using RPC pool userHasSeenIntroduction: false, rendezvousPoints: DEFAULT_RENDEZVOUS_POINTS, }; @@ -206,6 +209,9 @@ const alertsSlice = createSlice({ setTorEnabled(slice, action: PayloadAction) { slice.enableTor = action.payload; }, + setUseMoneroRpcPool(slice, action: PayloadAction) { + slice.useMoneroRpcPool = action.payload; + }, }, }); @@ -218,6 +224,7 @@ export const { setFetchFiatPrices, setFiatCurrency, setTorEnabled, + setUseMoneroRpcPool, setUserHasSeenIntroduction, addRendezvousPoint, removeRendezvousPoint, diff --git a/src-gui/src/utils/formatUtils.ts b/src-gui/src/utils/formatUtils.ts index bd654f6a..1c84adc9 100644 --- a/src-gui/src/utils/formatUtils.ts +++ b/src-gui/src/utils/formatUtils.ts @@ -83,3 +83,24 @@ export function currencySymbol(currency: FiatCurrency): string | null { return null; } } + +/** + * Formats confirmation count, displaying "?" when the transaction state is unknown + * @param confirmations - The number of confirmations, or undefined if unknown + * @param maxConfirmations - Optional maximum confirmations to show as "X/Y" format + * @returns Formatted string showing confirmations or "?" if unknown + */ +export function formatConfirmations( + confirmations: number | undefined | null, + maxConfirmations?: number, +): string { + if (confirmations === undefined || confirmations === null) { + return maxConfirmations !== undefined ? `?/${maxConfirmations}` : "?"; + } + + if (maxConfirmations !== undefined) { + return `${confirmations}/${maxConfirmations}`; + } + + return confirmations.toString(); +} diff --git a/src-tauri/Cargo.toml b/src-tauri/Cargo.toml index 4482ed69..99db2e95 100644 --- a/src-tauri/Cargo.toml +++ b/src-tauri/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "unstoppableswap-gui-rs" -version = "2.2.0-beta" +version = "2.3.0-beta.1" authors = [ "binarybaron", "einliterflasche", "unstoppableswap" ] edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -16,6 +16,7 @@ tauri-build = { version = "^2.0.0", features = [ "config-json5" ] } [dependencies] anyhow = "1" +monero-rpc-pool = { path = "../monero-rpc-pool" } rustls = { version = "0.23.26", default-features = false, features = ["ring"] } serde = { version = "1", features = [ "derive" ] } serde_json = "1" diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs index 9ad88627..f54ee36e 100644 --- a/src-tauri/src/lib.rs +++ b/src-tauri/src/lib.rs @@ -17,7 +17,7 @@ use swap::cli::{ tauri_bindings::{TauriContextStatusEvent, TauriEmitter, TauriHandle, TauriSettings}, Context, ContextBuilder, }, - command::{Bitcoin, Monero}, + command::Bitcoin, }; use tauri::{async_runtime::RwLock, Manager, RunEvent}; use tauri_plugin_dialog::DialogExt; @@ -141,7 +141,8 @@ fn setup(app: &mut tauri::App) -> Result<(), Box> { // We need to set a value for the Tauri state right at the start // If we don't do this, Tauri commands will panic at runtime if no value is present - app_handle.manage::>(RwLock::new(State::new())); + let state = RwLock::new(State::new()); + app_handle.manage::>(state); Ok(()) } @@ -192,7 +193,7 @@ pub fn run() { get_data_dir, resolve_approval_request, redact, - save_txt_files + save_txt_files, ]) .setup(setup) .build(tauri::generate_context!()) @@ -377,9 +378,7 @@ async fn initialize_context( bitcoin_electrum_rpc_urls: settings.electrum_rpc_urls.clone(), bitcoin_target_block: None, }) - .with_monero(Monero { - monero_node_address: settings.monero_node_url.clone(), - }) + .with_monero(settings.monero_node_config) .with_json(false) .with_debug(true) .with_tor(settings.use_tor) diff --git a/src-tauri/tauri.conf.json b/src-tauri/tauri.conf.json index 829e0b4d..f8e568a9 100644 --- a/src-tauri/tauri.conf.json +++ b/src-tauri/tauri.conf.json @@ -1,6 +1,6 @@ { "productName": "UnstoppableSwap", - "version": "2.2.0-beta", + "version": "2.3.0-beta.1", "identifier": "net.unstoppableswap.gui", "build": { "devUrl": "http://localhost:1420", diff --git a/swap/Cargo.toml b/swap/Cargo.toml index f6c53591..bca68aa0 100644 --- a/swap/Cargo.toml +++ b/swap/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "swap" -version = "2.2.0-beta" +version = "2.3.0-beta.1" authors = ["The COMIT guys "] edition = "2021" description = "XMR/BTC trustless atomic swaps." @@ -13,7 +13,7 @@ tauri = ["dep:tauri"] [dependencies] anyhow = "1" -arti-client = { version = "0.25.0", features = ["static-sqlite", "tokio", "rustls"], default-features = false } +arti-client = { version = "0.25.0", features = ["static-sqlite", "tokio", "rustls", "onion-service-service"], default-features = false } async-compression = { version = "0.3", features = ["bzip2", "tokio"] } async-trait = "0.1" asynchronous-codec = "0.7.0" @@ -40,11 +40,13 @@ ed25519-dalek = "1" futures = { version = "0.3", default-features = false, features = ["std"] } hex = "0.4" libp2p = { version = "0.53.2", features = ["tcp", "yamux", "dns", "noise", "request-response", "ping", "rendezvous", "identify", "macros", "cbor", "json", "tokio", "serde", "rsa"] } -libp2p-community-tor = { git = "https://github.com/umgefahren/libp2p-tor", branch = "main", features = ["listen-onion-service"] } +libp2p-community-tor = { git = "https://github.com/umgefahren/libp2p-tor", rev = "e6b913e0f1ac1fc90b3ee4dd31b5511140c4a9af", features = ["listen-onion-service"] } moka = { version = "0.12", features = ["sync", "future"] } monero = { version = "0.12", features = ["serde_support"] } monero-rpc = { path = "../monero-rpc" } +monero-rpc-pool = { path = "../monero-rpc-pool" } monero-sys = { path = "../monero-sys" } +electrum-pool = { path = "../electrum-pool" } once_cell = "1.19" pem = "3.0" proptest = "1" diff --git a/swap/src/asb/config.rs b/swap/src/asb/config.rs index f33bc174..577718c2 100644 --- a/swap/src/asb/config.rs +++ b/swap/src/asb/config.rs @@ -242,6 +242,12 @@ pub struct Monero { pub finality_confirmations: Option, #[serde(with = "crate::monero::network")] pub network: monero::Network, + #[serde(default = "default_monero_node_pool")] + pub monero_node_pool: bool, +} + +fn default_monero_node_pool() -> bool { + false } #[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)] @@ -461,6 +467,7 @@ pub fn query_user_for_initial_config(testnet: bool) -> Result { daemon_url: monero_daemon_url, finality_confirmations: None, network: monero_network, + monero_node_pool: false, }, tor: TorConf { register_hidden_service, @@ -511,6 +518,7 @@ mod tests { daemon_url: defaults.monero_daemon_address, finality_confirmations: None, network: monero::Network::Stagenet, + monero_node_pool: false, }, tor: Default::default(), maker: Maker { @@ -556,6 +564,7 @@ mod tests { daemon_url: defaults.monero_daemon_address, finality_confirmations: None, network: monero::Network::Mainnet, + monero_node_pool: false, }, tor: Default::default(), maker: Maker { @@ -611,6 +620,7 @@ mod tests { daemon_url: defaults.monero_daemon_address, finality_confirmations: None, network: monero::Network::Mainnet, + monero_node_pool: false, }, tor: Default::default(), maker: Maker { diff --git a/swap/src/bin/asb.rs b/swap/src/bin/asb.rs index 097bb495..dc13478f 100644 --- a/swap/src/bin/asb.rs +++ b/swap/src/bin/asb.rs @@ -44,6 +44,28 @@ use uuid::Uuid; const DEFAULT_WALLET_NAME: &str = "asb-wallet"; +trait IntoDaemon { + fn into_daemon(self) -> Result; +} + +impl IntoDaemon for url::Url { + fn into_daemon(self) -> Result { + let address = self.to_string(); + let ssl = self.scheme() == "https"; + + Ok(Daemon { address, ssl }) + } +} + +impl IntoDaemon for monero_rpc_pool::ServerInfo { + fn into_daemon(self) -> Result { + let address = format!("http://{}:{}", self.host, self.port); + let ssl = false; // Pool server always uses HTTP locally + + Ok(Daemon { address, ssl }) + } +} + #[tokio::main] pub async fn main() -> Result<()> { rustls::crypto::ring::default_provider() @@ -457,9 +479,39 @@ async fn init_monero_wallet( ) -> Result> { tracing::debug!("Initializing Monero wallets"); - let daemon = Daemon { - address: config.monero.daemon_url.to_string(), - ssl: config.monero.daemon_url.as_str().contains("https"), + let daemon = if config.monero.monero_node_pool { + // Start the monero-rpc-pool and use it + tracing::info!("Starting Monero RPC Pool for ASB"); + + let (server_info, _status_receiver, _pool_handle) = + monero_rpc_pool::start_server_with_random_port( + monero_rpc_pool::config::Config::new_random_port( + "127.0.0.1".to_string(), + config.data.dir.join("monero-rpc-pool"), + ), + env_config.monero_network, + ) + .await + .context("Failed to start Monero RPC Pool for ASB")?; + + let pool_url = format!("http://{}:{}", server_info.host, server_info.port); + tracing::info!("Monero RPC Pool started for ASB on {}", pool_url); + + server_info + .into_daemon() + .context("Failed to convert ServerInfo to Daemon")? + } else { + tracing::info!( + "Using direct Monero daemon connection: {}", + config.monero.daemon_url + ); + + config + .monero + .daemon_url + .clone() + .into_daemon() + .context("Failed to convert daemon URL to Daemon")? }; let manager = monero::Wallets::new( diff --git a/swap/src/bitcoin.rs b/swap/src/bitcoin.rs index 38cb3f1b..2bcfe4b7 100644 --- a/swap/src/bitcoin.rs +++ b/swap/src/bitcoin.rs @@ -1,4 +1,3 @@ -pub mod electrum_balancer; pub mod wallet; mod cancel; @@ -458,7 +457,7 @@ impl From for i64 { pub fn parse_rpc_error_code(error: &anyhow::Error) -> anyhow::Result { // First try to extract an Electrum error from a MultiError if present - if let Some(multi_error) = error.downcast_ref::() + if let Some(multi_error) = error.downcast_ref::() { // Try to find the first Electrum error in the MultiError for single_error in multi_error.iter() { diff --git a/swap/src/bitcoin/wallet.rs b/swap/src/bitcoin/wallet.rs index f471b913..fa6c9fd6 100644 --- a/swap/src/bitcoin/wallet.rs +++ b/swap/src/bitcoin/wallet.rs @@ -41,8 +41,9 @@ use tracing::{debug_span, Instrument}; use super::bitcoin_address::revalidate_network; use super::BlockHeight; -use crate::bitcoin::electrum_balancer::ElectrumBalancer; +use electrum_pool::ElectrumBalancer; use derive_builder::Builder; +use moka; /// We allow transaction fees of up to 20% of the transferred amount to ensure /// that lock transactions can always be published, even when fees are high. @@ -66,8 +67,10 @@ pub struct Wallet { persister: Arc>, /// The electrum client. electrum_client: Arc>, - /// The mempool client. - mempool_client: Arc>, + /// The cached fee estimator for the electrum client. + cached_electrum_fee_estimator: Arc>, + /// The cached fee estimator for the mempool client. + cached_mempool_fee_estimator: Arc>>, /// The network this wallet is on. network: Network, /// The number of confirmations (blocks) we require for a transaction @@ -83,6 +86,7 @@ pub struct Wallet { } /// This is our wrapper around a bdk electrum client. +#[derive(Clone)] pub struct Client { /// The underlying electrum balancer for load balancing across multiple servers. inner: Arc, @@ -130,7 +134,7 @@ impl WalletBuilder { /// Asynchronously builds the `Wallet` using the configured parameters. /// This method contains the core logic for wallet initialization, including /// database setup, key derivation, and potential migration from older wallet formats. - pub async fn build(self) -> Result> { + pub async fn build(self) -> Result> { let config = self .validate_config() .map_err(|e| anyhow!("Builder validation failed: {e}"))?; @@ -293,6 +297,83 @@ pub trait EstimateFeeRate { fn min_relay_fee(&self) -> impl std::future::Future> + Send; } +/// A caching wrapper around EstimateFeeRate implementations. +/// +/// Uses Moka cache with TTL (Time To Live) expiration for both fee rate estimates +/// and minimum relay fees to reduce the frequency of network calls to Electrum and mempool.space APIs. +#[derive(Clone)] +pub struct CachedFeeEstimator { + inner: T, + fee_cache: Arc>, + min_relay_cache: Arc>, +} + +impl CachedFeeEstimator { + /// Cache duration for fee estimates (2 minutes) + const CACHE_DURATION: Duration = Duration::from_secs(120); + /// Maximum number of cached fee rate entries (different target blocks) + const MAX_CACHE_SIZE: u64 = 10; + + /// Create a new caching wrapper around an EstimateFeeRate implementation. + pub fn new(inner: T) -> Self { + Self { + inner, + fee_cache: Arc::new( + moka::future::Cache::builder() + .max_capacity(Self::MAX_CACHE_SIZE) + .time_to_live(Self::CACHE_DURATION) + .build(), + ), + min_relay_cache: Arc::new( + moka::future::Cache::builder() + .max_capacity(1) // Only one min relay fee value + .time_to_live(Self::CACHE_DURATION) + .build(), + ), + } + } +} + +impl EstimateFeeRate for CachedFeeEstimator { + async fn estimate_feerate(&self, target_block: u32) -> Result { + // Check cache first + if let Some(cached_rate) = self.fee_cache.get(&target_block).await { + return Ok(cached_rate); + } + + // If not in cache, fetch from underlying estimator + let fee_rate = self.inner.estimate_feerate(target_block).await?; + + // Store in cache + self.fee_cache.insert(target_block, fee_rate).await; + + Ok(fee_rate) + } + + async fn min_relay_fee(&self) -> Result { + // Check cache first + if let Some(cached_rate) = self.min_relay_cache.get(&()).await { + return Ok(cached_rate); + } + + // If not in cache, fetch from underlying estimator + let min_relay_fee = self.inner.min_relay_fee().await?; + + // Store in cache + self.min_relay_cache.insert((), min_relay_fee).await; + + Ok(min_relay_fee) + } +} + +impl std::ops::Deref for CachedFeeEstimator { + type Target = T; + + fn deref(&self) -> &Self::Target { + &self.inner + } +} + impl Wallet { /// If this many consequent addresses are unused, we stop the full scan. /// On old wallets we used to generate a ton of unused addresses @@ -362,7 +443,7 @@ impl Wallet { sync_interval: Duration, env_config: crate::env::Config, tauri_handle: Option, - ) -> Result> { + ) -> Result> { // Construct the private key, directory and wallet file for the new (>= 1.0.0) bdk wallet let xprivkey = seed.derive_extended_private_key(env_config.bitcoin_network)?; let wallet_dir = data_dir @@ -425,7 +506,7 @@ impl Wallet { target_block: u32, sync_interval: Duration, tauri_handle: Option, - ) -> Result> { + ) -> Result> { Self::create_new( seed.derive_extended_private_key(network)?, network, @@ -458,7 +539,7 @@ impl Wallet { old_wallet: Option, tauri_handle: Option, use_mempool_space_fee_estimation: bool, - ) -> Result> + ) -> Result> where Persister: WalletPersister + Sized, ::Error: std::error::Error + Send + Sync + 'static, @@ -550,10 +631,16 @@ impl Wallet { None }; + // Create cached fee estimators + let cached_electrum_fee_estimator = Arc::new(CachedFeeEstimator::new(client.clone())); + let cached_mempool_fee_estimator = + Arc::new(mempool_client.clone().map(CachedFeeEstimator::new)); + Ok(Wallet { wallet: wallet.into_arc_mutex_async(), electrum_client: client.into_arc_mutex_async(), - mempool_client: Arc::new(mempool_client), + cached_electrum_fee_estimator, + cached_mempool_fee_estimator, persister: persister.into_arc_mutex_async(), tauri_handle, network, @@ -573,7 +660,7 @@ impl Wallet { target_block: u32, tauri_handle: Option, use_mempool_space_fee_estimation: bool, - ) -> Result> + ) -> Result> where Persister: WalletPersister + Sized, ::Error: std::error::Error + Send + Sync + 'static, @@ -596,19 +683,23 @@ impl Wallet { .context("Failed to open database")? .context("No wallet found in database")?; - // Create the mempool client - let mempool_client = if use_mempool_space_fee_estimation { + // Create the mempool client with caching + let cached_mempool_fee_estimator = if use_mempool_space_fee_estimation { mempool_client::MempoolClient::new(network).inspect_err(|e| { tracing::warn!("Failed to create mempool client: {:?}. We will only use the Electrum server for fee estimation.", e); - }).ok() + }).ok().map(CachedFeeEstimator::new) } else { None }; + // Wrap the electrum client with caching + let cached_electrum_fee_estimator = Arc::new(CachedFeeEstimator::new(client.clone())); + let wallet = Wallet { wallet: wallet.into_arc_mutex_async(), electrum_client: client.into_arc_mutex_async(), - mempool_client: Arc::new(mempool_client), + cached_electrum_fee_estimator, + cached_mempool_fee_estimator: Arc::new(cached_mempool_fee_estimator), persister: persister.into_arc_mutex_async(), tauri_handle, network, @@ -663,7 +754,7 @@ impl Wallet { kind, txid, total_count ); - let multi_error = crate::bitcoin::electrum_balancer::MultiError::new(errors, context); + let multi_error = electrum_pool::MultiError::new(errors, context); return Err(anyhow::Error::from(multi_error)); } @@ -1095,10 +1186,11 @@ where /// If either of the clients fail but the other is successful, we use the successful one. /// If both clients fail, we return an error async fn combined_fee_rate(&self) -> Result { - let electrum_client = self.electrum_client.lock().await; - let electrum_future = electrum_client.estimate_feerate(self.target_block); + let electrum_future = self + .cached_electrum_fee_estimator + .estimate_feerate(self.target_block); let mempool_future = async { - match self.mempool_client.as_ref() { + match self.cached_mempool_fee_estimator.as_ref() { Some(mempool_client) => mempool_client .estimate_feerate(self.target_block) .await @@ -1174,10 +1266,9 @@ where /// /// Only fails if both sources fail. Always chooses the higher value. async fn combined_min_relay_fee(&self) -> Result { - let electrum_client = self.electrum_client.lock().await; - let electrum_future = electrum_client.min_relay_fee(); + let electrum_future = self.cached_electrum_fee_estimator.min_relay_fee(); let mempool_future = async { - match self.mempool_client.as_ref() { + match self.cached_mempool_fee_estimator.as_ref() { Some(mempool_client) => mempool_client.min_relay_fee().await.map(Some), None => Ok(None), } @@ -2455,6 +2546,7 @@ mod mempool_client { /// A client for the mempool.space API. /// /// This client is used to estimate the fee rate for a transaction. + #[derive(Clone)] pub struct MempoolClient { client: reqwest::Client, base_url: String, @@ -2751,6 +2843,7 @@ impl IntoArcMutex for T { } #[cfg(test)] +#[derive(Clone)] pub struct StaticFeeRate { fee_rate: FeeRate, min_relay_fee: bitcoin::Amount, @@ -2856,10 +2949,13 @@ impl TestWalletBuilder { bitcoin::Amount::from_sat(self.min_relay_sats_per_vb), ); + let cached_electrum_fee_estimator = Arc::new(CachedFeeEstimator::new(client.clone())); + let wallet = Wallet { wallet: bdk_core_wallet.into_arc_mutex_async(), electrum_client: client.into_arc_mutex_async(), - mempool_client: Arc::new(None), // We don't use mempool client in tests + cached_electrum_fee_estimator, + cached_mempool_fee_estimator: Arc::new(None), // We don't use mempool client in tests persister: persister.into_arc_mutex_async(), tauri_handle: None, network: Network::Regtest, @@ -3309,6 +3405,264 @@ TRACE swap::bitcoin::wallet: Bitcoin transaction status changed txid=00000000000 }); } } + + mod cached_fee_estimator_tests { + use super::*; + use std::sync::atomic::{AtomicU32, Ordering}; + use std::sync::Arc; + use tokio::time::{sleep, Duration}; + + /// Mock fee estimator that tracks how many times methods are called + #[derive(Clone)] + struct MockFeeEstimator { + estimate_calls: Arc, + min_relay_calls: Arc, + fee_rate: FeeRate, + min_relay_fee: FeeRate, + delay: Duration, + } + + impl MockFeeEstimator { + fn new(fee_rate: FeeRate, min_relay_fee: FeeRate) -> Self { + Self { + estimate_calls: Arc::new(AtomicU32::new(0)), + min_relay_calls: Arc::new(AtomicU32::new(0)), + fee_rate, + min_relay_fee, + delay: Duration::from_millis(0), + } + } + + fn with_delay(mut self, delay: Duration) -> Self { + self.delay = delay; + self + } + + fn estimate_call_count(&self) -> u32 { + self.estimate_calls.load(Ordering::SeqCst) + } + + fn min_relay_call_count(&self) -> u32 { + self.min_relay_calls.load(Ordering::SeqCst) + } + } + + impl EstimateFeeRate for MockFeeEstimator { + async fn estimate_feerate(&self, _target_block: u32) -> Result { + self.estimate_calls.fetch_add(1, Ordering::SeqCst); + if !self.delay.is_zero() { + sleep(self.delay).await; + } + Ok(self.fee_rate) + } + + async fn min_relay_fee(&self) -> Result { + self.min_relay_calls.fetch_add(1, Ordering::SeqCst); + if !self.delay.is_zero() { + sleep(self.delay).await; + } + Ok(self.min_relay_fee) + } + } + + #[tokio::test] + async fn caches_fee_rate_estimates() { + let mock = MockFeeEstimator::new( + FeeRate::from_sat_per_vb(50).unwrap(), + FeeRate::from_sat_per_vb(1).unwrap(), + ); + let cached = CachedFeeEstimator::new(mock.clone()); + + // First call should hit the underlying estimator + let fee1 = cached.estimate_feerate(6).await.unwrap(); + assert_eq!(fee1, FeeRate::from_sat_per_vb(50).unwrap()); + assert_eq!(mock.estimate_call_count(), 1); + + // Second call with same target should use cache + let fee2 = cached.estimate_feerate(6).await.unwrap(); + assert_eq!(fee2, FeeRate::from_sat_per_vb(50).unwrap()); + assert_eq!(mock.estimate_call_count(), 1); // Still 1, not 2 + + // Different target should hit the underlying estimator again + let fee3 = cached.estimate_feerate(12).await.unwrap(); + assert_eq!(fee3, FeeRate::from_sat_per_vb(50).unwrap()); + assert_eq!(mock.estimate_call_count(), 2); + } + + #[tokio::test] + async fn caches_min_relay_fee() { + let mock = MockFeeEstimator::new( + FeeRate::from_sat_per_vb(50).unwrap(), + FeeRate::from_sat_per_vb(1).unwrap(), + ); + let cached = CachedFeeEstimator::new(mock.clone()); + + // First call should hit the underlying estimator + let fee1 = cached.min_relay_fee().await.unwrap(); + assert_eq!(fee1, FeeRate::from_sat_per_vb(1).unwrap()); + assert_eq!(mock.min_relay_call_count(), 1); + + // Second call should use cache + let fee2 = cached.min_relay_fee().await.unwrap(); + assert_eq!(fee2, FeeRate::from_sat_per_vb(1).unwrap()); + assert_eq!(mock.min_relay_call_count(), 1); // Still 1, not 2 + } + + #[tokio::test] + async fn concurrent_requests_dont_duplicate_calls() { + let mock = MockFeeEstimator::new( + FeeRate::from_sat_per_vb(25).unwrap(), + FeeRate::from_sat_per_vb(1).unwrap(), + ) + .with_delay(Duration::from_millis(50)); // Add delay to simulate network call + + let cached = CachedFeeEstimator::new(mock.clone()); + + // First, make one call to populate the cache + let _initial = cached.estimate_feerate(6).await.unwrap(); + assert_eq!(mock.estimate_call_count(), 1); + + // Now make multiple concurrent requests for the same target + // These should all hit the cache + let handles: Vec<_> = (0..5) + .map(|_| { + let cached = cached.clone(); + tokio::spawn(async move { cached.estimate_feerate(6).await }) + }) + .collect(); + + // Wait for all requests to complete + let results: Vec<_> = futures::future::join_all(handles).await; + + // All should succeed with the same value + for result in results { + let fee = result.unwrap().unwrap(); + assert_eq!(fee, FeeRate::from_sat_per_vb(25).unwrap()); + } + + // The underlying estimator should still only have been called once + // since all subsequent requests should hit the cache + assert_eq!( + mock.estimate_call_count(), + 1, + "Expected exactly 1 call, got {}", + mock.estimate_call_count() + ); + } + + #[tokio::test] + async fn different_target_blocks_cached_separately() { + let mock = MockFeeEstimator::new( + FeeRate::from_sat_per_vb(30).unwrap(), + FeeRate::from_sat_per_vb(1).unwrap(), + ); + let cached = CachedFeeEstimator::new(mock.clone()); + + // Request different target blocks + let _fee1 = cached.estimate_feerate(1).await.unwrap(); + let _fee2 = cached.estimate_feerate(6).await.unwrap(); + let _fee3 = cached.estimate_feerate(12).await.unwrap(); + + assert_eq!(mock.estimate_call_count(), 3); + + // Request same targets again - should use cache + let _fee1_cached = cached.estimate_feerate(1).await.unwrap(); + let _fee2_cached = cached.estimate_feerate(6).await.unwrap(); + let _fee3_cached = cached.estimate_feerate(12).await.unwrap(); + + assert_eq!(mock.estimate_call_count(), 3); // Still 3, no additional calls + } + + #[tokio::test] + async fn cache_respects_ttl() { + let mock = MockFeeEstimator::new( + FeeRate::from_sat_per_vb(40).unwrap(), + FeeRate::from_sat_per_vb(1).unwrap(), + ); + let cached = CachedFeeEstimator::new(mock.clone()); + + // First call + let _fee1 = cached.estimate_feerate(6).await.unwrap(); + assert_eq!(mock.estimate_call_count(), 1); + + // Wait for cache to expire (2 minutes + small buffer) + // Note: In a real test environment, you might want to use a shorter TTL + // or mock the time. For now, we'll just verify the cache works within TTL. + + // Immediate second call should use cache + let _fee2 = cached.estimate_feerate(6).await.unwrap(); + assert_eq!(mock.estimate_call_count(), 1); + } + + #[tokio::test] + async fn error_propagation() { + #[derive(Clone)] + struct FailingEstimator; + + impl EstimateFeeRate for FailingEstimator { + async fn estimate_feerate(&self, _target_block: u32) -> Result { + Err(anyhow::anyhow!("Network error")) + } + + async fn min_relay_fee(&self) -> Result { + Err(anyhow::anyhow!("Network error")) + } + } + + let cached = CachedFeeEstimator::new(FailingEstimator); + + // Errors should be propagated, not cached + let result1 = cached.estimate_feerate(6).await; + assert!(result1.is_err()); + assert!(result1.unwrap_err().to_string().contains("Network error")); + + let result2 = cached.min_relay_fee().await; + assert!(result2.is_err()); + assert!(result2.unwrap_err().to_string().contains("Network error")); + } + + #[tokio::test] + async fn cache_capacity_limits() { + let mock = MockFeeEstimator::new( + FeeRate::from_sat_per_vb(35).unwrap(), + FeeRate::from_sat_per_vb(1).unwrap(), + ); + let cached = CachedFeeEstimator::new(mock.clone()); + + // Fill cache beyond capacity (MAX_CACHE_SIZE = 10) + for target in 1..=15 { + let _fee = cached.estimate_feerate(target).await.unwrap(); + } + + assert_eq!(mock.estimate_call_count(), 15); + + // Request some of the earlier targets - some might have been evicted + // Due to LRU eviction, the earliest entries might be gone + let _fee = cached.estimate_feerate(1).await.unwrap(); + + // The exact behavior depends on Moka's eviction policy, + // but we should see that the cache is working within its limits + assert!(mock.estimate_call_count() >= 15); + } + + #[tokio::test] + async fn clone_shares_cache() { + let mock = MockFeeEstimator::new( + FeeRate::from_sat_per_vb(45).unwrap(), + FeeRate::from_sat_per_vb(1).unwrap(), + ); + let cached1 = CachedFeeEstimator::new(mock.clone()); + let cached2 = cached1.clone(); + + // First estimator makes a call + let _fee1 = cached1.estimate_feerate(6).await.unwrap(); + assert_eq!(mock.estimate_call_count(), 1); + + // Second estimator should use the shared cache + let _fee2 = cached2.estimate_feerate(6).await.unwrap(); + assert_eq!(mock.estimate_call_count(), 1); // Still 1, cache was shared + } + } } #[derive(Clone)] diff --git a/swap/src/cli/api.rs b/swap/src/cli/api.rs index 47521569..849f99ee 100644 --- a/swap/src/cli/api.rs +++ b/swap/src/cli/api.rs @@ -20,7 +20,9 @@ use std::fmt; use std::future::Future; use std::path::{Path, PathBuf}; use std::sync::{Arc, Once}; -use tauri_bindings::{TauriBackgroundProgress, TauriContextStatusEvent, TauriEmitter, TauriHandle}; +use tauri_bindings::{ + MoneroNodeConfig, TauriBackgroundProgress, TauriContextStatusEvent, TauriEmitter, TauriHandle, +}; use tokio::sync::{broadcast, broadcast::Sender, Mutex as TokioMutex, RwLock}; use tokio::task::JoinHandle; use tor_rtcompat::tokio::TokioRustlsRuntime; @@ -188,12 +190,13 @@ pub struct Context { bitcoin_wallet: Option>, monero_manager: Option>, tor_client: Option>>, + monero_rpc_pool_handle: Option>, } /// A conveniant builder struct for [`Context`]. #[must_use = "ContextBuilder must be built to be useful"] pub struct ContextBuilder { - monero: Option, + monero_config: Option, bitcoin: Option, data: Option, is_testnet: bool, @@ -216,7 +219,7 @@ impl ContextBuilder { /// Basic builder with default options for mainnet pub fn mainnet() -> Self { ContextBuilder { - monero: None, + monero_config: None, bitcoin: None, data: None, is_testnet: false, @@ -235,8 +238,8 @@ impl ContextBuilder { } /// Configures the Context to initialize a Monero wallet with the given configuration. - pub fn with_monero(mut self, monero: impl Into>) -> Self { - self.monero = monero.into(); + pub fn with_monero(mut self, monero_config: impl Into>) -> Self { + self.monero_config = monero_config.into(); self } @@ -247,8 +250,8 @@ impl ContextBuilder { } /// Attach a handle to Tauri to the Context for emitting events etc. - pub fn with_tauri(mut self, tauri: impl Into>) -> Self { - self.tauri_handle = tauri.into(); + pub fn with_tauri(mut self, tauri_handle: impl Into>) -> Self { + self.tauri_handle = tauri_handle.into(); self } @@ -364,17 +367,61 @@ impl ContextBuilder { }; let initialize_monero_wallet = async { - match self.monero { - Some(monero) => { + match self.monero_config { + Some(monero_config) => { let monero_progress_handle = tauri_handle .new_background_process_with_initial_progress( TauriBackgroundProgress::OpeningMoneroWallet, (), ); + // Handle the different monero configurations + let (monero_node_address, rpc_pool_handle) = match monero_config { + MoneroNodeConfig::Pool => { + // Start RPC pool and use it + match monero_rpc_pool::start_server_with_random_port( + monero_rpc_pool::config::Config::new_random_port( + "127.0.0.1".to_string(), + data_dir.join("monero-rpc-pool"), + ), + match self.is_testnet { + true => crate::monero::Network::Stagenet, + false => crate::monero::Network::Mainnet, + }, + ) + .await + { + Ok((server_info, mut status_receiver, pool_handle)) => { + let rpc_url = + format!("http://{}:{}", server_info.host, server_info.port); + tracing::info!("Monero RPC Pool started on {}", rpc_url); + + // Start listening for pool status updates and forward them to frontend + if let Some(ref handle) = self.tauri_handle { + let pool_tauri_handle = handle.clone(); + tokio::spawn(async move { + while let Ok(status) = status_receiver.recv().await { + pool_tauri_handle.emit_pool_status_update(status); + } + }); + } + + (Some(rpc_url), Some(Arc::new(pool_handle))) + } + Err(e) => { + tracing::error!("Failed to start Monero RPC Pool: {}", e); + (None, None) + } + } + } + MoneroNodeConfig::SingleNode { url } => { + (if url.is_empty() { None } else { Some(url) }, None) + } + }; + let wallets = init_monero_wallet( data_dir.as_path(), - monero.monero_node_address.map(|url| url.to_string()), + monero_node_address, env_config, tauri_handle.clone(), ) @@ -382,9 +429,9 @@ impl ContextBuilder { monero_progress_handle.finish(); - Ok(Some(wallets)) + Ok((Some(wallets), rpc_pool_handle)) } - None => Ok(None), + None => Ok((None, None)), } }; @@ -405,7 +452,7 @@ impl ContextBuilder { Ok(maybe_tor_client) }; - let (bitcoin_wallet, monero_manager, tor) = tokio::try_join!( + let (bitcoin_wallet, (monero_manager, monero_rpc_pool_handle), tor) = tokio::try_join!( initialize_bitcoin_wallet, initialize_monero_wallet, initialize_tor_client, @@ -443,6 +490,7 @@ impl ContextBuilder { tasks, tauri_handle: self.tauri_handle, tor_client: tor, + monero_rpc_pool_handle, }; Ok(context) @@ -476,6 +524,7 @@ impl Context { tasks: PendingTaskList::default().into(), tauri_handle: None, tor_client: None, + monero_rpc_pool_handle: None, } } @@ -507,7 +556,7 @@ async fn init_bitcoin_wallet( env_config: EnvConfig, bitcoin_target_block: u16, tauri_handle_option: Option, -) -> Result { +) -> Result> { let mut builder = bitcoin::wallet::WalletBuilder::default() .seed(seed.clone()) .network(env_config.bitcoin_network) @@ -637,6 +686,23 @@ impl Config { } } +impl From for MoneroNodeConfig { + fn from(monero: Monero) -> Self { + match monero.monero_node_address { + Some(url) => MoneroNodeConfig::SingleNode { + url: url.to_string(), + }, + None => MoneroNodeConfig::Pool, + } + } +} + +impl From for Option { + fn from(monero: Monero) -> Self { + Some(MoneroNodeConfig::from(monero)) + } +} + #[cfg(test)] pub mod api_test { use super::*; diff --git a/swap/src/cli/api/request.rs b/swap/src/cli/api/request.rs index 83368d67..fce041e5 100644 --- a/swap/src/cli/api/request.rs +++ b/swap/src/cli/api/request.rs @@ -1195,10 +1195,27 @@ pub async fn monero_recovery( #[tracing::instrument(fields(method = "get_current_swap"), skip(context))] pub async fn get_current_swap(context: Arc) -> Result { Ok(json!({ - "swap_id": context.swap_lock.get_current_swap_id().await + "swap_id": context.swap_lock.get_current_swap_id().await, })) } +pub async fn resolve_approval_request( + resolve_approval: ResolveApprovalArgs, + ctx: Arc, +) -> Result { + let request_id = Uuid::parse_str(&resolve_approval.request_id).context("Invalid request ID")?; + + if let Some(handle) = ctx.tauri_handle.clone() { + handle + .resolve_approval(request_id, resolve_approval.accept) + .await?; + } else { + bail!("Cannot resolve approval without a Tauri handle"); + } + + Ok(ResolveApprovalResponse { success: true }) +} + fn qr_code(value: &impl ToString) -> Result { let code = QrCode::new(value.to_string())?; let qr_code = code @@ -1353,6 +1370,9 @@ struct UnknownMoneroNetwork(String); impl CheckMoneroNodeArgs { pub async fn request(self) -> Result { + let url = self.url.clone(); + let network_str = self.network.clone(); + let network = match self.network.to_lowercase().as_str() { // When the GUI says testnet, it means monero stagenet "mainnet" => Network::Mainnet, @@ -1373,11 +1393,20 @@ impl CheckMoneroNodeArgs { return Ok(CheckMoneroNodeResponse { available: false }); }; - let Ok(available) = monero_daemon.is_available(&CLIENT).await else { - return Ok(CheckMoneroNodeResponse { available: false }); - }; + match monero_daemon.is_available(&CLIENT).await { + Ok(available) => Ok(CheckMoneroNodeResponse { available }), + Err(e) => { + tracing::error!( + url = %url, + network = %network_str, + error = ?e, + error_chain = %format!("{:#}", e), + "Failed to check monero node availability" + ); - Ok(CheckMoneroNodeResponse { available }) + Ok(CheckMoneroNodeResponse { available: false }) + } + } } } @@ -1410,14 +1439,14 @@ impl CheckElectrumNodeArgs { } #[typeshare] -#[derive(Deserialize, Serialize)] +#[derive(Debug, Eq, PartialEq, Serialize, Deserialize)] pub struct ResolveApprovalArgs { pub request_id: String, pub accept: bool, } #[typeshare] -#[derive(Deserialize, Serialize)] +#[derive(Serialize, Deserialize, Debug)] pub struct ResolveApprovalResponse { pub success: bool, } @@ -1426,14 +1455,6 @@ impl Request for ResolveApprovalArgs { type Response = ResolveApprovalResponse; async fn request(self, ctx: Arc) -> Result { - let request_id = Uuid::parse_str(&self.request_id).context("Invalid request ID")?; - - if let Some(handle) = ctx.tauri_handle.clone() { - handle.resolve_approval(request_id, self.accept).await?; - } else { - bail!("Cannot resolve approval without a Tauri handle"); - } - - Ok(ResolveApprovalResponse { success: true }) + resolve_approval_request(self, ctx).await } } diff --git a/swap/src/cli/api/tauri_bindings.rs b/swap/src/cli/api/tauri_bindings.rs index 8dd19c8c..5ac39497 100644 --- a/swap/src/cli/api/tauri_bindings.rs +++ b/swap/src/cli/api/tauri_bindings.rs @@ -3,6 +3,7 @@ use crate::bitcoin; use crate::{bitcoin::ExpiredTimelocks, monero, network::quote::BidQuote}; use anyhow::{anyhow, Context, Result}; use bitcoin::Txid; +use monero_rpc_pool::pool::PoolStatus; use serde::{Deserialize, Serialize}; use std::collections::HashMap; use std::future::Future; @@ -12,7 +13,6 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH}; use strum::Display; use tokio::sync::{oneshot, Mutex as TokioMutex}; use typeshare::typeshare; -use url::Url; use uuid::Uuid; #[typeshare] @@ -27,6 +27,7 @@ pub enum TauriEvent { TimelockChange(TauriTimelockChangeEvent), Approval(ApprovalRequest), BackgroundProgress(TauriBackgroundProgressWrapper), + PoolStatusUpdate(PoolStatus), } const TAURI_UNIFIED_EVENT_NAME: &str = "tauri-unified-event"; @@ -297,6 +298,10 @@ pub trait TauriEmitter { )); } + fn emit_pool_status_update(&self, status: PoolStatus) { + self.emit_unified_event(TauriEvent::PoolStatusUpdate(status)); + } + /// Create a new background progress handle for tracking a specific type of progress fn new_background_process( &self, @@ -609,14 +614,14 @@ pub enum TauriSwapProgressEvent { BtcLockTxInMempool { #[typeshare(serialized_as = "string")] btc_lock_txid: bitcoin::Txid, - #[typeshare(serialized_as = "number")] - btc_lock_confirmations: u64, + #[typeshare(serialized_as = "Option")] + btc_lock_confirmations: Option, }, XmrLockTxInMempool { #[typeshare(serialized_as = "string")] xmr_lock_txid: monero::TxHash, - #[typeshare(serialized_as = "number")] - xmr_lock_tx_confirmations: u64, + #[typeshare(serialized_as = "Option")] + xmr_lock_tx_confirmations: Option, }, XmrLocked, EncryptedSignatureSent, @@ -697,13 +702,20 @@ pub enum BackgroundRefundState { Completed, } +#[typeshare] +#[derive(Debug, Serialize, Deserialize, Clone)] +#[serde(tag = "type", content = "content")] +pub enum MoneroNodeConfig { + Pool, + SingleNode { url: String }, +} + /// This struct contains the settings for the Context #[typeshare] #[derive(Debug, Serialize, Deserialize, Clone)] pub struct TauriSettings { - /// The URL of the Monero node e.g `http://xmr.node:18081` - #[typeshare(serialized_as = "Option")] - pub monero_node_url: Option, + /// Configuration for Monero node connection + pub monero_node_config: MoneroNodeConfig, /// The URLs of the Electrum RPC servers e.g `["ssl://bitcoin.com:50001", "ssl://backup.com:50001"]` pub electrum_rpc_urls: Vec, /// Whether to initialize and use a tor client. diff --git a/swap/src/common/tracing_util.rs b/swap/src/common/tracing_util.rs index a9389b65..81ece2a1 100644 --- a/swap/src/common/tracing_util.rs +++ b/swap/src/common/tracing_util.rs @@ -67,7 +67,16 @@ pub fn init( "libp2p_dcutr", "monero_cpp", ]; - let OUR_CRATES: Vec<&str> = vec!["swap", "asb", "monero_sys", "unstoppableswap-gui-rs"]; + let OUR_CRATES: Vec<&str> = vec![ + "swap", + "asb", + "monero_sys", + "unstoppableswap-gui-rs", + ]; + + let INFO_LEVEL_CRATES: Vec<&str> = vec![ + "monero_rpc_pool", + ]; // General log file for non-verbose logs let file_appender: RollingFileAppender = tracing_appender::rolling::never(&dir, "swap-all.log"); @@ -89,8 +98,10 @@ pub fn init( .with_ansi(false) .with_timer(UtcTime::rfc_3339()) .with_target(false) + .with_file(true) + .with_line_number(true) .json() - .with_filter(env_filter(level_filter, OUR_CRATES.clone())?); + .with_filter(env_filter_with_info_crates(level_filter, OUR_CRATES.clone(), INFO_LEVEL_CRATES.clone())?); // Layer for writing to the verbose log file // Crates: All crates with different levels (libp2p at INFO+, others at TRACE) @@ -100,12 +111,15 @@ pub fn init( .with_ansi(false) .with_timer(UtcTime::rfc_3339()) .with_target(false) + .with_file(true) + .with_line_number(true) .json() - .with_filter(env_filter_with_libp2p_info( + .with_filter(env_filter_with_all_crates( LevelFilter::TRACE, OUR_CRATES.clone(), LIBP2P_CRATES.clone(), TOR_CRATES.clone(), + INFO_LEVEL_CRATES.clone(), )?); // Layer for writing to the terminal @@ -116,7 +130,9 @@ pub fn init( .with_writer(std::io::stderr) .with_ansi(is_terminal) .with_timer(UtcTime::rfc_3339()) - .with_target(true); + .with_target(true) + .with_file(true) + .with_line_number(true); // Layer for writing to the Tauri guest. This will be displayed in the GUI. // Crates: All crates with libp2p at INFO+ level @@ -126,24 +142,28 @@ pub fn init( .with_ansi(false) .with_timer(UtcTime::rfc_3339()) .with_target(true) + .with_file(true) + .with_line_number(true) .json() - .with_filter(env_filter_with_libp2p_info( + .with_filter(env_filter_with_all_crates( level_filter, OUR_CRATES.clone(), LIBP2P_CRATES.clone(), TOR_CRATES.clone(), + INFO_LEVEL_CRATES.clone(), )?); // If trace_stdout is true, we log all messages to the terminal // Otherwise, we only log the bare minimum let terminal_layer_env_filter = match trace_stdout { - true => env_filter_with_libp2p_info( + true => env_filter_with_all_crates( LevelFilter::TRACE, OUR_CRATES.clone(), LIBP2P_CRATES.clone(), TOR_CRATES.clone(), + INFO_LEVEL_CRATES.clone(), )?, - false => env_filter(level_filter, OUR_CRATES.clone())?, + false => env_filter_with_info_crates(level_filter, OUR_CRATES.clone(), INFO_LEVEL_CRATES.clone())?, }; let final_terminal_layer = match format { @@ -185,6 +205,29 @@ fn env_filter(level_filter: LevelFilter, crates: Vec<&str>) -> Result Ok(filter) } +/// This function controls which crate's logs actually get logged and from which level, with info-level crates at INFO level or higher. +fn env_filter_with_info_crates( + level_filter: LevelFilter, + our_crates: Vec<&str>, + info_level_crates: Vec<&str>, +) -> Result { + let mut filter = EnvFilter::from_default_env(); + + // Add directives for each crate in the provided list + for crate_name in our_crates { + filter = filter.add_directive(Directive::from_str(&format!( + "{}={}", + crate_name, &level_filter + ))?); + } + + for crate_name in info_level_crates { + filter = filter.add_directive(Directive::from_str(&format!("{}=INFO", crate_name))?); + } + + Ok(filter) +} + /// This function controls which crate's logs actually get logged and from which level, with libp2p crates at INFO level or higher. fn env_filter_with_libp2p_info( level_filter: LevelFilter, @@ -216,6 +259,42 @@ fn env_filter_with_libp2p_info( Ok(filter) } +/// This function controls which crate's logs actually get logged and from which level, including all crate categories. +fn env_filter_with_all_crates( + level_filter: LevelFilter, + our_crates: Vec<&str>, + libp2p_crates: Vec<&str>, + tor_crates: Vec<&str>, + info_level_crates: Vec<&str>, +) -> Result { + let mut filter = EnvFilter::from_default_env(); + + // Add directives for each crate in the provided list + for crate_name in our_crates { + filter = filter.add_directive(Directive::from_str(&format!( + "{}={}", + crate_name, &level_filter + ))?); + } + + for crate_name in libp2p_crates { + filter = filter.add_directive(Directive::from_str(&format!("{}=INFO", crate_name))?); + } + + for crate_name in tor_crates { + filter = filter.add_directive(Directive::from_str(&format!( + "{}={}", + crate_name, &level_filter + ))?); + } + + for crate_name in info_level_crates { + filter = filter.add_directive(Directive::from_str(&format!("{}=INFO", crate_name))?); + } + + Ok(filter) +} + /// A writer that forwards tracing log messages to the tauri guest. #[derive(Clone)] pub struct TauriWriter { diff --git a/swap/src/protocol/bob/swap.rs b/swap/src/protocol/bob/swap.rs index 1f4d3169..93196616 100644 --- a/swap/src/protocol/bob/swap.rs +++ b/swap/src/protocol/bob/swap.rs @@ -146,23 +146,9 @@ async fn next_state( BobState::SwapSetupCompleted(state2) } BobState::SwapSetupCompleted(state2) => { - // Record the current monero wallet block height so we don't have to scan from - // block 0 once we create the redeem wallet. - // This has to be done **before** the Bitcoin is locked in order to ensure that - // if Bob goes offline the recorded wallet-height is correct. - // If we only record this later, it can happen that Bob publishes the Bitcoin - // transaction, goes offline, while offline Alice publishes Monero. - // If the Monero transaction gets confirmed before Bob comes online again then - // Bob would record a wallet-height that is past the lock transaction height, - // which can lead to the wallet not detect the transaction. - let monero_wallet_restore_blockheight = monero_wallet - .blockchain_height() - .await - .context("Failed to fetch current Monero blockheight")?; - + // Alice and Bob have exchanged all necessary signatures let xmr_receive_amount = state2.xmr; - // Alice and Bob have exchanged info // Sign the Bitcoin lock transaction let (state3, tx_lock) = state2.lock_btc().await?; let signed_tx = bitcoin_wallet @@ -184,8 +170,9 @@ async fn next_state( swap_id, }); - // We request approval before publishing the Bitcoin lock transaction, as the exchange rate determined at this step might be different from the - // we previously displayed to the user. + // We request approval before publishing the Bitcoin lock transaction, + // as the exchange rate determined at this step might be different + // from the one we previously displayed to the user. let approval_result = event_emitter .request_approval(request, PRE_BTC_LOCK_APPROVAL_TIMEOUT_SECS) .await; @@ -194,6 +181,20 @@ async fn next_state( Ok(true) => { tracing::debug!("User approved swap offer"); + // Record the current monero wallet block height so we don't have to scan from + // block 0 once we create the redeem wallet. + // This has to be done **before** the Bitcoin is locked in order to ensure that + // if Bob goes offline the recorded wallet-height is correct. + // If we only record this later, it can happen that Bob publishes the Bitcoin + // transaction, goes offline, while offline Alice publishes Monero. + // If the Monero transaction gets confirmed before Bob comes online again then + // Bob would record a wallet-height that is past the lock transaction height, + // which can lead to the wallet not detect the transaction. + let monero_wallet_restore_blockheight = monero_wallet + .blockchain_height() + .await + .context("Failed to fetch current Monero blockheight")?; + // Publish the signed Bitcoin lock transaction let (..) = bitcoin_wallet.broadcast(signed_tx, "lock").await?; @@ -224,7 +225,7 @@ async fn next_state( swap_id, TauriSwapProgressEvent::BtcLockTxInMempool { btc_lock_txid: state3.tx_lock_id(), - btc_lock_confirmations: 0, + btc_lock_confirmations: None, }, ); @@ -289,7 +290,7 @@ async fn next_state( swap_id, TauriSwapProgressEvent::BtcLockTxInMempool { btc_lock_txid: state3.tx_lock_id(), - btc_lock_confirmations: u64::from(confirmed.confirmations()), + btc_lock_confirmations: Some(u64::from(confirmed.confirmations())), }, ); } @@ -334,7 +335,7 @@ async fn next_state( swap_id, TauriSwapProgressEvent::XmrLockTxInMempool { xmr_lock_txid: lock_transfer_proof.tx_hash(), - xmr_lock_tx_confirmations: 0, + xmr_lock_tx_confirmations: None, }, ); @@ -369,7 +370,7 @@ async fn next_state( swap_id, TauriSwapProgressEvent::XmrLockTxInMempool { xmr_lock_txid: lock_transfer_proof.clone().tx_hash(), - xmr_lock_tx_confirmations: confirmations, + xmr_lock_tx_confirmations: Some(confirmations), }, ); }), diff --git a/swap/src/tracing_ext.rs b/swap/src/tracing_ext.rs index 6fd7eaba..2ea20651 100644 --- a/swap/src/tracing_ext.rs +++ b/swap/src/tracing_ext.rs @@ -18,6 +18,8 @@ pub fn capture_logs(min_level: LevelFilter) -> MakeCapturingWriter { tracing_subscriber::fmt() .with_ansi(false) .without_time() + .with_file(true) + .with_line_number(true) .with_writer(make_writer.clone()) .with_env_filter(format!("{}", min_level)) .finish(),