diff --git a/Cargo.lock b/Cargo.lock index f406beab..e390ff7a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17,6 +17,12 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" +[[package]] +name = "adler2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" + [[package]] name = "aead" version = "0.5.2" @@ -46,7 +52,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if 1.0.0", - "getrandom", + "getrandom 0.2.15", "once_cell", "version_check", "zerocopy", @@ -63,18 +69,18 @@ dependencies = [ [[package]] name = "allo-isolate" -version = "0.1.25" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97b6d794345b06592d0ebeed8e477e41b71e5a0a49df4fc0e4184d5938b99509" +checksum = "1f67642eb6773fb42a95dd3b348c305ee18dee6642274c6b412d67e985e3befc" dependencies = [ "atomic", ] [[package]] name = "allocator-api2" -version = "0.2.18" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "android-tzdata" @@ -132,7 +138,7 @@ name = "ansi-parser" version = "0.9.1" source = "git+https://gitlab.com/davidbittner/ansi-parser.git#a431fb31f8b7f5680525987c1d67d4863ac02660" dependencies = [ - "heapless", + "heapless 0.8.0", "nom", ] @@ -147,9 +153,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.15" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" +checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" dependencies = [ "anstyle", "anstyle-parse", @@ -162,49 +168,50 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.8" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" +checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" [[package]] name = "anstyle-parse" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb" +checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a" +checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.4" +version = "3.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" +checksum = "ca3534e77181a9cc07539ad51f2141fe32f6c3ffd4df76db8ad92346b003ae4e" dependencies = [ "anstyle", - "windows-sys 0.52.0", + "once_cell", + "windows-sys 0.59.0", ] [[package]] name = "anyhow" -version = "1.0.86" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" +checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04" [[package]] name = "arboard" -version = "3.4.0" +version = "3.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb4009533e8ff8f1450a5bcbc30f4242a1d34442221f72314bea1f5dc9c7f89" +checksum = "df099ccb16cd014ff054ac1bf392c67feeef57164b05c42f037cd40f5d4357f4" dependencies = [ "clipboard-win", "log", @@ -228,16 +235,22 @@ dependencies = [ ] [[package]] -name = "arrayref" -version = "0.3.8" +name = "arraydeque" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d151e35f61089500b617991b791fc8bfd237ae50cd5950803758a179b41e67a" +checksum = "7d902e3d592a523def97af8f317b08ce16b7ab854c1985a0c671e6f15cebc236" + +[[package]] +name = "arrayref" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" [[package]] name = "arrayvec" -version = "0.7.4" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" [[package]] name = "async-attributes" @@ -274,14 +287,14 @@ dependencies = [ [[package]] name = "async-executor" -version = "1.13.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7ebdfa2ebdab6b1760375fa7d6f382b9f486eac35fc994625a00e89280bdbb7" +checksum = "30ca9a001c1e8ba5149f91a74362376cc6bc5b919d92d988668657bd570bdcec" dependencies = [ "async-task", "concurrent-queue", - "fastrand 2.1.0", - "futures-lite 2.3.0", + "fastrand 2.3.0", + "futures-lite 2.6.0", "slab", ] @@ -293,10 +306,10 @@ checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" dependencies = [ "async-channel 2.3.1", "async-executor", - "async-io 2.3.4", + "async-io 2.4.0", "async-lock 3.4.0", "blocking", - "futures-lite 2.3.0", + "futures-lite 2.6.0", "once_cell", ] @@ -314,7 +327,7 @@ dependencies = [ "log", "parking", "polling 2.8.0", - "rustix 0.37.27", + "rustix 0.37.28", "slab", "socket2 0.4.10", "waker-fn", @@ -322,18 +335,18 @@ dependencies = [ [[package]] name = "async-io" -version = "2.3.4" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "444b0228950ee6501b3568d3c93bf1176a1fdbc3b758dcd9475046d30f4dc7e8" +checksum = "43a2b323ccce0a1d90b449fd71f2a06ca7faa7c54c2751f06c9bd851fc061059" dependencies = [ "async-lock 3.4.0", "cfg-if 1.0.0", "concurrent-queue", "futures-io", - "futures-lite 2.3.0", + "futures-lite 2.6.0", "parking", - "polling 3.7.3", - "rustix 0.38.34", + "polling 3.7.4", + "rustix 0.38.44", "slab", "tracing", "windows-sys 0.59.0", @@ -354,26 +367,28 @@ version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" dependencies = [ - "event-listener 5.3.1", + "event-listener 5.4.0", "event-listener-strategy", "pin-project-lite", ] [[package]] name = "async-process" -version = "1.8.1" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea6438ba0a08d81529c69b36700fa2f95837bfe3e776ab39cde9c14d9149da88" +checksum = "63255f1dc2381611000436537bbedfe83183faa303a5a0edaf191edef06526bb" dependencies = [ - "async-io 1.13.0", - "async-lock 2.8.0", + "async-channel 2.3.1", + "async-io 2.4.0", + "async-lock 3.4.0", "async-signal", + "async-task", "blocking", "cfg-if 1.0.0", - "event-listener 3.1.0", - "futures-lite 1.13.0", - "rustix 0.38.34", - "windows-sys 0.48.0", + "event-listener 5.4.0", + "futures-lite 2.6.0", + "rustix 0.38.44", + "tracing", ] [[package]] @@ -382,13 +397,13 @@ version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "637e00349800c0bdf8bfc21ebbc0b6524abea702b0da4168ac00d070d0c0b9f3" dependencies = [ - "async-io 2.3.4", + "async-io 2.4.0", "async-lock 3.4.0", "atomic-waker", "cfg-if 1.0.0", "futures-core", "futures-io", - "rustix 0.38.34", + "rustix 0.38.44", "signal-hook-registry", "slab", "windows-sys 0.59.0", @@ -396,22 +411,22 @@ dependencies = [ [[package]] name = "async-std" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" +checksum = "c634475f29802fde2b8f0b505b1bd00dfe4df7d4a000f0b36f7671197d5c3615" dependencies = [ "async-attributes", "async-channel 1.9.0", "async-global-executor", - "async-io 1.13.0", - "async-lock 2.8.0", + "async-io 2.4.0", + "async-lock 3.4.0", "async-process", "crossbeam-utils", "futures-channel", "futures-core", "futures-io", - "futures-lite 1.13.0", - "gloo-timers", + "futures-lite 2.6.0", + "gloo-timers 0.3.0", "kv-log-macro", "log", "memchr", @@ -424,9 +439,9 @@ dependencies = [ [[package]] name = "async-std-resolver" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc3b454643291f9a4a3bbdb35fa62efa4ba7be5ea13fe243e3be4352182ff4b8" +checksum = "fa5ee46ec0c518414838d2fdc7dd18f6ba7d934b6e728005c958621da450682d" dependencies = [ "async-std", "async-trait", @@ -434,14 +449,14 @@ dependencies = [ "futures-util", "hickory-resolver", "pin-utils", - "socket2 0.5.7", + "socket2 0.5.8", ] [[package]] name = "async-stream" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" dependencies = [ "async-stream-impl", "futures-core", @@ -450,13 +465,13 @@ dependencies = [ [[package]] name = "async-stream-impl" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.98", ] [[package]] @@ -480,13 +495,13 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.81" +version = "0.1.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" +checksum = "644dd749086bf3771a2fbc5f256fdb982d53f011c7d5d560304eafeecebce79d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.98", ] [[package]] @@ -516,6 +531,22 @@ dependencies = [ "tungstenite 0.23.0", ] +[[package]] +name = "async-tungstenite" +version = "0.28.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c348fb0b6d132c596eca3dcd941df48fb597aafcb07a738ec41c004b087dc99" +dependencies = [ + "atomic-waker", + "futures-core", + "futures-io", + "futures-task", + "futures-util", + "log", + "pin-project-lite", + "tungstenite 0.24.0", +] + [[package]] name = "async_executors" version = "0.7.0" @@ -528,7 +559,7 @@ dependencies = [ "futures-task", "futures-timer", "futures-util", - "pin-project", + "pin-project 1.1.9", "rustc_version", "tokio", "wasm-bindgen-futures", @@ -551,6 +582,15 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c59bdb34bc650a32731b31bd8f0829cc15d24a708ee31559e0bb34f2bc320cba" +[[package]] +name = "atomic-polyfill" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8cf2bce30dfe09ef0bfaef228b9d414faaf7e563035494d7fe092dba54b300f4" +dependencies = [ + "critical-section", +] + [[package]] name = "atomic-waker" version = "1.1.2" @@ -581,9 +621,9 @@ dependencies = [ [[package]] name = "autocfg" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "axum" @@ -594,7 +634,7 @@ dependencies = [ "async-trait", "axum-core", "bitflags 1.3.2", - "bytes", + "bytes 1.10.0", "futures-util", "http 0.2.12", "http-body", @@ -620,7 +660,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" dependencies = [ "async-trait", - "bytes", + "bytes 1.10.0", "futures-util", "http 0.2.12", "http-body", @@ -640,7 +680,7 @@ dependencies = [ "cc", "cfg-if 1.0.0", "libc", - "miniz_oxide", + "miniz_oxide 0.7.4", "object", "rustc-demangle", ] @@ -688,9 +728,15 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.6.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" +checksum = "8f68f53c83ab957f72c32642f3868eec03eb974d1fb82e453128456482613d36" + +[[package]] +name = "bitmaps" +version = "3.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d084b0137aaa901caf9f1e8b21daa6aa24d41cd806e111335541eff9683bd6" [[package]] name = "blake2" @@ -703,9 +749,9 @@ dependencies = [ [[package]] name = "blake3" -version = "1.5.3" +version = "1.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9ec96fe9a81b5e365f9db71fe00edc4fe4ca2cc7dcb7861f0603012a7caa210" +checksum = "b8ee0c1824c4dea5b5f81736aff91bae041d2c07ee1192bec91054e10e3e601e" dependencies = [ "arrayref", "arrayvec", @@ -722,7 +768,7 @@ checksum = "e0b121a9fe0df916e362fb3271088d071159cdf11db0e4182d02152850756eff" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.98", ] [[package]] @@ -777,7 +823,7 @@ dependencies = [ "async-channel 2.3.1", "async-task", "futures-io", - "futures-lite 2.3.0", + "futures-lite 2.6.0", "piper", ] @@ -792,9 +838,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.16.0" +version = "3.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" +checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" [[package]] name = "byteorder" @@ -804,17 +850,23 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.7.1" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" +checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" + +[[package]] +name = "bytes" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f61dac84819c6588b558454b194026eb1f09c293b9036ae9b159e74e73ab6cf9" [[package]] name = "capnp" -version = "0.19.6" +version = "0.19.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de71387912cac7dd3cb7c219e09628411620a18061bba58c71453c26ae7bf66a" +checksum = "4e985a566bdaae9a428a957d12b10c318d41b2afddb54cfbb764878059df636e" dependencies = [ - "embedded-io", + "embedded-io 0.6.1", ] [[package]] @@ -828,9 +880,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.11" +version = "1.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fb8dd288a69fc53a1996d7ecfbf4a20d59065bff137ce7e56bbd620de191189" +checksum = "755717a7de9ec452bf7f3f1a3099085deabd7f2962b861dae91ecd7a365903d2" dependencies = [ "shlex", ] @@ -894,9 +946,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.38" +version = "0.4.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" +checksum = "7e36cc9d416881d2e24f9a963be5fb1cd90966419ac844274161d10488b3e825" dependencies = [ "android-tzdata", "iana-time-zone", @@ -948,15 +1000,15 @@ dependencies = [ "bitflags 1.3.2", "strsim 0.8.0", "textwrap", - "unicode-width", + "unicode-width 0.1.14", "vec_map", ] [[package]] name = "clap" -version = "4.5.15" +version = "4.5.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11d8838454fda655dafd3accb2b6e2bea645b9e4078abe84a22ceb947235c5cc" +checksum = "3e77c3243bd94243c03672cb5154667347c457ca271254724f9f393aee1c05ff" dependencies = [ "clap_builder", "clap_derive", @@ -964,9 +1016,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.15" +version = "4.5.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "216aec2b177652e3846684cbfe25c9964d18ec45234f0f5da5157b207ed1aab6" +checksum = "1b26884eb4b57140e4d2d93652abfa49498b938b3c9179f9fc487b0acc3edad7" dependencies = [ "anstream", "anstyle", @@ -977,21 +1029,21 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.13" +version = "4.5.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0" +checksum = "bf4ced95c6f4a675af3da73304b9ac4ed991640c36374e4b46795c49e17cf1ed" dependencies = [ - "heck 0.5.0", + "heck", "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.98", ] [[package]] name = "clap_lex" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" +checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" [[package]] name = "clipboard-win" @@ -1004,13 +1056,19 @@ dependencies = [ [[package]] name = "cmake" -version = "0.1.50" +version = "0.1.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31c789563b815f77f4250caee12365734369f942439b7defd71e18a48197130" +checksum = "e24a03c8b52922d68a1589ad61032f2c1aa5a8158d2aa0d93c6e9534944bbad6" dependencies = [ "cc", ] +[[package]] +name = "cobs" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67ba02a97a2bd10f4b59b25c7973101c79642302776489e030cd13cdab09ed15" + [[package]] name = "color-eyre" version = "0.6.3" @@ -1026,9 +1084,9 @@ dependencies = [ [[package]] name = "colorchoice" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0" +checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" [[package]] name = "combine" @@ -1036,7 +1094,7 @@ version = "4.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" dependencies = [ - "bytes", + "bytes 1.10.0", "memchr", ] @@ -1066,28 +1124,39 @@ dependencies = [ [[package]] name = "config" -version = "0.14.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7328b20597b53c2454f0b1919720c25c7339051c02b72b7e05409e00b14132be" +checksum = "68578f196d2a33ff61b27fae256c3164f65e36382648e30666dde05b8cc9dfdf" dependencies = [ - "lazy_static", "nom", "pathdiff", "serde", - "yaml-rust", + "yaml-rust2 0.8.1", +] + +[[package]] +name = "config" +version = "0.15.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e26695492a475c4a091cfda61446d5ba01aac2e1dfbcd27a12fdd11aa2e32596" +dependencies = [ + "pathdiff", + "serde", + "winnow 0.7.1", + "yaml-rust2 0.9.0", ] [[package]] name = "console" -version = "0.15.8" +version = "0.15.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e1f83fc076bd6dd27517eacdf25fef6c4dfe5f1d7448bafaaf3a26f13b5e4eb" +checksum = "ea3c6ecd8059b57859df5c69830340ed3c41d30e3da0c1cbed90a96ac853041b" dependencies = [ "encode_unicode", - "lazy_static", "libc", - "unicode-width", - "windows-sys 0.52.0", + "once_cell", + "unicode-width 0.2.0", + "windows-sys 0.59.0", ] [[package]] @@ -1146,9 +1215,9 @@ checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "constant_time_eq" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7144d30dcf0fafbce74250a3963025d8d52177934239851c917d29f1df280c2" +checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" [[package]] name = "core-foundation" @@ -1168,9 +1237,9 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "cpufeatures" -version = "0.2.13" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51e852e6dc9a5bed1fae92dd2375037bf2b768725bf3be87811edee3249d09ad" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" dependencies = [ "libc", ] @@ -1185,19 +1254,25 @@ dependencies = [ ] [[package]] -name = "crossbeam-channel" -version = "0.5.13" +name = "critical-section" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" +checksum = "790eea4361631c5e7d22598ecd5723ff611904e3344ce8720784c93e3d83d40b" + +[[package]] +name = "crossbeam-channel" +version = "0.5.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06ba6d68e24814cb8de6bb986db8222d3a027d15872cabc0d18817bc3c0e4471" dependencies = [ "crossbeam-utils", ] [[package]] name = "crossbeam-utils" -version = "0.8.20" +version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "crossterm" @@ -1205,7 +1280,7 @@ version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f476fe445d41c9e991fd07515a6f463074b782242ccf4a5b7b1d1012e70824df" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "crossterm_winapi", "libc", "mio 0.8.11", @@ -1221,12 +1296,12 @@ version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "crossterm_winapi", "futures-core", - "mio 1.0.2", + "mio 1.0.3", "parking_lot 0.12.3", - "rustix 0.38.34", + "rustix 0.38.44", "signal-hook", "signal-hook-mio", "winapi", @@ -1254,9 +1329,9 @@ dependencies = [ [[package]] name = "crypto-mac" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25fab6889090c8133f3deb8f73ba3c65a7f456f66436fc012a1b1e272b1e103e" +checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" dependencies = [ "generic-array", "subtle", @@ -1264,12 +1339,12 @@ dependencies = [ [[package]] name = "ctor" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edb49164822f3ee45b17acd4a208cfc1251410cf0cad9a833234c9890774dd9f" +checksum = "32a2785755761f3ddc1492979ce1e48d2c00d09311c39e4466429188f3dd6501" dependencies = [ "quote", - "syn 2.0.74", + "syn 2.0.98", ] [[package]] @@ -1299,7 +1374,7 @@ dependencies = [ "signal-hook", "tokio", "unicode-segmentation", - "unicode-width", + "unicode-width 0.1.14", ] [[package]] @@ -1320,7 +1395,7 @@ dependencies = [ "log", "smallvec", "unicode-segmentation", - "unicode-width", + "unicode-width 0.2.0", ] [[package]] @@ -1344,9 +1419,9 @@ dependencies = [ "serde_yaml", "time", "tokio", - "toml 0.8.19", + "toml 0.8.20", "unicode-segmentation", - "unicode-width", + "unicode-width 0.1.14", "xi-unicode", ] @@ -1382,7 +1457,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.98", ] [[package]] @@ -1438,7 +1513,8 @@ dependencies = [ "ident_case", "proc-macro2", "quote", - "syn 2.0.74", + "strsim 0.11.1", + "syn 2.0.98", ] [[package]] @@ -1460,7 +1536,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core 0.20.10", "quote", - "syn 2.0.74", + "syn 2.0.98", ] [[package]] @@ -1478,9 +1554,9 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" +checksum = "0e60eed09d8c01d3cee5b7d30acb059b76614c918fa0f992e0dd6eeb10daad6f" [[package]] name = "der" @@ -1554,10 +1630,21 @@ dependencies = [ ] [[package]] -name = "dyn-clone" -version = "1.0.17" +name = "displaydoc" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "dyn-clone" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "feeef44e73baff3a26d371801df019877a9866a8c493d315ab00177843314f35" [[package]] name = "ed25519" @@ -1591,6 +1678,12 @@ version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" +[[package]] +name = "embedded-io" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef1a6892d9eef45c8fa6b9e0086428a2cca8491aca8f787c534a3d6d0bcb3ced" + [[package]] name = "embedded-io" version = "0.6.1" @@ -1599,29 +1692,29 @@ checksum = "edd0f118536f44f5ccd48bcb8b111bdc3de888b58c74639dfb034a357d0f206d" [[package]] name = "encode_unicode" -version = "0.3.6" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" +checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" [[package]] name = "encoding_rs" -version = "0.8.34" +version = "0.8.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" +checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" dependencies = [ "cfg-if 1.0.0", ] [[package]] name = "enum-as-inner" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" +checksum = "a1e6a265c649f3f5979b601d26f1d05ada116434c87741c9493cb56218f76cbc" dependencies = [ - "heck 0.4.1", + "heck", "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.98", ] [[package]] @@ -1641,7 +1734,7 @@ checksum = "f282cfdfe92516eb26c2af8589c274c7c17681f5ecc03c18255fe741c6aa64eb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.98", ] [[package]] @@ -1684,7 +1777,7 @@ dependencies = [ "darling 0.20.10", "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.98", ] [[package]] @@ -1718,19 +1811,19 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.9" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" +checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "error-code" -version = "3.2.0" +version = "3.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0474425d51df81997e2f90a21591180b38eccf27292d755f3e30750225c175b" +checksum = "a5d9305ccc6942a704f4335694ecd3de2ea531b114ac2d51f5f843750787a92f" [[package]] name = "event-listener" @@ -1740,20 +1833,9 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "event-listener" -version = "3.1.0" +version = "5.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d93877bcde0eb80ca09131a08d23f0a5c18a620b01db137dba666d18cd9b30c2" -dependencies = [ - "concurrent-queue", - "parking", - "pin-project-lite", -] - -[[package]] -name = "event-listener" -version = "5.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" +checksum = "3492acde4c3fc54c845eaab3eed8bd00c7a7d881f78bfc801e43a93dec1331ae" dependencies = [ "concurrent-queue", "parking", @@ -1764,11 +1846,11 @@ dependencies = [ [[package]] name = "event-listener-strategy" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" +checksum = "3c3e4e0dd3673c1139bf041f3008816d9cf2946bbfac2945c09e523b8d7b05b2" dependencies = [ - "event-listener 5.3.1", + "event-listener 5.4.0", "pin-project-lite", ] @@ -1805,9 +1887,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.1.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" [[package]] name = "ffi-support" @@ -1827,9 +1909,9 @@ checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" [[package]] name = "filetime" -version = "0.2.24" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf401df4a4e3872c4fe8151134cf483738e74b67fc934d6532c882b3d24a4550" +checksum = "35c0522e981e68cbfa8c3f978441a5f34b30b96e146b33cd3359176b50fe8586" dependencies = [ "cfg-if 1.0.0", "libc", @@ -1837,20 +1919,14 @@ dependencies = [ "windows-sys 0.59.0", ] -[[package]] -name = "fixedbitset" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" - [[package]] name = "flate2" -version = "1.0.31" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f211bbe8e69bbd0cfdea405084f128ae8b4aaa6b0b522fc8f2b009084797920" +checksum = "c936bfdafb507ebbf50b8074c54fa31c5be9a1e7e5f467dd659697041407d07c" dependencies = [ "crc32fast", - "miniz_oxide", + "miniz_oxide 0.8.3", ] [[package]] @@ -1867,15 +1943,15 @@ dependencies = [ "log", "regex", "rustversion", - "thiserror", + "thiserror 1.0.69", "time", ] [[package]] name = "flume" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55ac459de2512911e4b674ce33cf20befaba382d05b62b008afc1c8b57cbf181" +checksum = "da0e4dd2a88388a1f4ccc7c9ce104604dab68d9f408dc34cd45823d5a9069095" dependencies = [ "futures-core", "futures-sink", @@ -1925,15 +2001,15 @@ version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2eeb4ed9e12f43b7fa0baae3f9cdda28352770132ef2e09a23760c29cae8bd47" dependencies = [ - "rustix 0.38.34", + "rustix 0.38.44", "windows-sys 0.48.0", ] [[package]] name = "futures" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" dependencies = [ "futures-channel", "futures-core", @@ -1946,9 +2022,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", "futures-sink", @@ -1956,15 +2032,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" dependencies = [ "futures-core", "futures-task", @@ -1973,9 +2049,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-lite" @@ -1994,11 +2070,11 @@ dependencies = [ [[package]] name = "futures-lite" -version = "2.3.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" +checksum = "f5edaec856126859abb19ed65f39e90fea3a9574b9707f13539acf4abf7eb532" dependencies = [ - "fastrand 2.1.0", + "fastrand 2.3.0", "futures-core", "futures-io", "parking", @@ -2007,26 +2083,26 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.98", ] [[package]] name = "futures-sink" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-timer" @@ -2034,15 +2110,15 @@ version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" dependencies = [ - "gloo-timers", + "gloo-timers 0.2.6", "send_wrapper 0.4.0", ] [[package]] name = "futures-util" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures-channel", "futures-core", @@ -2056,6 +2132,18 @@ dependencies = [ "slab", ] +[[package]] +name = "futures_codec" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce54d63f8b0c75023ed920d46fd71d0cbbb830b0ee012726b5b4f506fb6dea5b" +dependencies = [ + "bytes 0.5.6", + "futures", + "memchr", + "pin-project 0.4.30", +] + [[package]] name = "gen_ops" version = "0.4.0" @@ -2091,10 +2179,22 @@ dependencies = [ "cfg-if 1.0.0", "js-sys", "libc", - "wasi", + "wasi 0.11.0+wasi-snapshot-preview1", "wasm-bindgen", ] +[[package]] +name = "getrandom" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "wasi 0.13.3+wasi-0.2.2", + "windows-targets 0.52.6", +] + [[package]] name = "gimli" version = "0.28.1" @@ -2103,9 +2203,9 @@ checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" [[package]] name = "glob" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" +checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" [[package]] name = "gloo-timers" @@ -2119,6 +2219,18 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "gloo-timers" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb143cf96099802033e0d4f4963b19fd2e0b728bcf076cd9cf7f6634f092994" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", +] + [[package]] name = "gloo-utils" version = "0.1.7" @@ -2132,6 +2244,19 @@ dependencies = [ "web-sys", ] +[[package]] +name = "gloo-utils" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b5555354113b18c547c1d3a98fbf7fb32a9ff4f6fa112ce823a21641a0ba3aa" +dependencies = [ + "js-sys", + "serde", + "serde_json", + "wasm-bindgen", + "web-sys", +] + [[package]] name = "grpcio" version = "0.12.1" @@ -2169,13 +2294,13 @@ version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" dependencies = [ - "bytes", + "bytes 1.10.0", "fnv", "futures-core", "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.4.0", + "indexmap 2.7.1", "slab", "tokio", "tokio-util", @@ -2188,6 +2313,15 @@ version = "1.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b43ede17f21864e81be2fa654110bf1e793774238d86ef8555c37e6519c0403" +[[package]] +name = "hash32" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0c35f58762feb77d74ebe43bdbc3210f09be9fe6742234d573bacc26ed92b67" +dependencies = [ + "byteorder", +] + [[package]] name = "hash32" version = "0.3.1" @@ -2222,6 +2356,12 @@ dependencies = [ "allocator-api2", ] +[[package]] +name = "hashbrown" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" + [[package]] name = "hashlink" version = "0.8.4" @@ -2231,6 +2371,15 @@ dependencies = [ "hashbrown 0.14.5", ] +[[package]] +name = "hashlink" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" +dependencies = [ + "hashbrown 0.14.5", +] + [[package]] name = "hdrhistogram" version = "7.5.4" @@ -2246,19 +2395,27 @@ dependencies = [ [[package]] name = "heapless" -version = "0.8.0" +version = "0.7.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bfb9eb618601c89945a70e254898da93b13be0388091d42117462b265bb3fad" +checksum = "cdc6457c0eb62c71aac4bc17216026d8410337c4126773b9c5daba343f17964f" dependencies = [ - "hash32", + "atomic-polyfill", + "hash32 0.2.1", + "rustc_version", + "serde", + "spin", "stable_deref_trait", ] [[package]] -name = "heck" -version = "0.4.1" +name = "heapless" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +checksum = "0bfb9eb618601c89945a70e254898da93b13be0388091d42117462b265bb3fad" +dependencies = [ + "hash32 0.3.1", + "stable_deref_trait", +] [[package]] name = "heck" @@ -2295,9 +2452,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hickory-proto" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07698b8420e2f0d6447a436ba999ec85d8fbf2a398bbd737b82cac4a2e96e512" +checksum = "447afdcdb8afb9d0a852af6dc65d9b285ce720ed7a59e42a8bf2e931c67bc1b5" dependencies = [ "async-trait", "cfg-if 1.0.0", @@ -2306,11 +2463,11 @@ dependencies = [ "futures-channel", "futures-io", "futures-util", - "idna 0.4.0", + "idna", "ipnet", "once_cell", "rand", - "thiserror", + "thiserror 1.0.69", "tinyvec", "tokio", "tracing", @@ -2319,9 +2476,9 @@ dependencies = [ [[package]] name = "hickory-resolver" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28757f23aa75c98f254cf0405e6d8c25b831b32921b050a66692427679b1f243" +checksum = "0a2e2aba9c389ce5267d31cf1e4dace82390ae276b0b364ea55630b1fa1b44b4" dependencies = [ "cfg-if 1.0.0", "futures-util", @@ -2333,7 +2490,7 @@ dependencies = [ "rand", "resolv-conf", "smallvec", - "thiserror", + "thiserror 1.0.69", "tokio", "tracing", ] @@ -2360,11 +2517,11 @@ dependencies = [ [[package]] name = "home" -version = "0.5.9" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" +checksum = "589533453244b0995c858700322199b2becb13b627df2851f64a2775d024abcf" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -2378,24 +2535,35 @@ dependencies = [ "winapi", ] +[[package]] +name = "hostname" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9c7c7c8ac16c798734b8a24560c1362120597c40d5e1459f09498f8f6c8f2ba" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "windows 0.52.0", +] + [[package]] name = "http" version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" dependencies = [ - "bytes", + "bytes 1.10.0", "fnv", "itoa", ] [[package]] name = "http" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea" dependencies = [ - "bytes", + "bytes 1.10.0", "fnv", "itoa", ] @@ -2406,16 +2574,16 @@ version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ - "bytes", + "bytes 1.10.0", "http 0.2.12", "pin-project-lite", ] [[package]] name = "httparse" -version = "1.9.4" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" +checksum = "f2d708df4e7140240a16cd6ab0ab65c972d7433ab77819ea693fde9c43811e2a" [[package]] name = "httpdate" @@ -2431,11 +2599,11 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.30" +version = "0.14.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9" +checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7" dependencies = [ - "bytes", + "bytes 1.10.0", "futures-channel", "futures-core", "futures-util", @@ -2446,7 +2614,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.7", + "socket2 0.5.8", "tokio", "tower-service", "tracing", @@ -2471,7 +2639,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ - "bytes", + "bytes 1.10.0", "hyper", "native-tls", "tokio", @@ -2480,9 +2648,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.60" +version = "0.1.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" +checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -2501,6 +2669,124 @@ dependencies = [ "cc", ] +[[package]] +name = "icu_collections" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locid" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_locid_transform" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_locid_transform_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_locid_transform_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" + +[[package]] +name = "icu_normalizer" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "utf16_iter", + "utf8_iter", + "write16", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" + +[[package]] +name = "icu_properties" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_locid_transform", + "icu_properties_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" + +[[package]] +name = "icu_provider" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_provider_macros", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_provider_macros" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + [[package]] name = "ident_case" version = "1.0.1" @@ -2509,22 +2795,23 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" -version = "0.4.0" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" +checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" dependencies = [ - "unicode-bidi", - "unicode-normalization", + "idna_adapter", + "smallvec", + "utf8_iter", ] [[package]] -name = "idna" -version = "0.5.0" +name = "idna_adapter" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" dependencies = [ - "unicode-bidi", - "unicode-normalization", + "icu_normalizer", + "icu_properties", ] [[package]] @@ -2537,6 +2824,29 @@ dependencies = [ "libc", ] +[[package]] +name = "imbl" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc3be8d8cd36f33a46b1849f31f837c44d9fa87223baee3b4bd96b8f11df81eb" +dependencies = [ + "bitmaps", + "imbl-sized-chunks", + "rand_core", + "rand_xoshiro", + "serde", + "version_check", +] + +[[package]] +name = "imbl-sized-chunks" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f4241005618a62f8d57b2febd02510fb96e0137304728543dfc5fd6f052c22d" +dependencies = [ + "bitmaps", +] + [[package]] name = "indent" version = "0.1.1" @@ -2561,12 +2871,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.4.0" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93ead53efc7ea8ed3cfb0c79fc8023fbb782a5432b52830b6518941cebe6505c" +checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652" dependencies = [ "equivalent", - "hashbrown 0.14.5", + "hashbrown 0.15.2", ] [[package]] @@ -2604,7 +2914,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.7", + "socket2 0.5.8", "widestring", "windows-sys 0.48.0", "winreg", @@ -2612,9 +2922,12 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.9.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" +dependencies = [ + "serde", +] [[package]] name = "ipnetwork" @@ -2651,9 +2964,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.11" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" +checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674" [[package]] name = "jni" @@ -2666,7 +2979,7 @@ dependencies = [ "combine", "jni-sys", "log", - "thiserror", + "thiserror 1.0.69", "walkdir", "windows-sys 0.45.0", ] @@ -2679,10 +2992,11 @@ checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" [[package]] name = "js-sys" -version = "0.3.70" +version = "0.3.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" +checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" dependencies = [ + "once_cell", "wasm-bindgen", ] @@ -2792,9 +3106,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.155" +version = "0.2.169" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" +checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" [[package]] name = "libc-print" @@ -2807,9 +3121,9 @@ dependencies = [ [[package]] name = "libloading" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" +checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" dependencies = [ "cfg-if 1.0.0", "windows-targets 0.52.6", @@ -2821,9 +3135,9 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "libc", - "redox_syscall 0.5.3", + "redox_syscall 0.5.8", ] [[package]] @@ -2839,9 +3153,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.19" +version = "1.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdc53a7799a7496ebc9fd29f31f7df80e83c9bda5299768af5f9e59eeea74647" +checksum = "df9b68e50e6e0b26f672573834882eb57759f6db9b3be2ea3c35c91188bb4eaa" dependencies = [ "cc", "libc", @@ -2863,9 +3177,15 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.14" +version = "0.4.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" +checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" + +[[package]] +name = "litemap" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104" [[package]] name = "lock_api" @@ -2879,9 +3199,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.22" +version = "0.4.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" +checksum = "04cbf5b083de1c7e0222a7a51dbfdba1cbe1c6ab0b15e29fff3f6c077fd9cd9f" dependencies = [ "value-bag", ] @@ -2972,9 +3292,9 @@ checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "minicov" -version = "0.3.5" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c71e683cd655513b99affab7d317deb690528255a0d5f717f1024093c12b169" +checksum = "f27fe9f1cc3c22e1687f9446c2083c4c5fc7f0bcf1c7a86bdbded14985895b4b" dependencies = [ "cc", "walkdir", @@ -2995,6 +3315,15 @@ dependencies = [ "adler", ] +[[package]] +name = "miniz_oxide" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8402cab7aefae129c6977bb0ff1b8fd9a04eb5b51efc50a70bea51cda0c7924" +dependencies = [ + "adler2", +] + [[package]] name = "mio" version = "0.8.11" @@ -3003,43 +3332,36 @@ checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", "log", - "wasi", + "wasi 0.11.0+wasi-snapshot-preview1", "windows-sys 0.48.0", ] [[package]] name = "mio" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" +checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" dependencies = [ - "hermit-abi 0.3.9", "libc", "log", - "wasi", + "wasi 0.11.0+wasi-snapshot-preview1", "windows-sys 0.52.0", ] -[[package]] -name = "multimap" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" - [[package]] name = "nanorand" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3" dependencies = [ - "getrandom", + "getrandom 0.2.15", ] [[package]] name = "native-tls" -version = "0.2.12" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8614eb2c83d59d1c8cc974dd3f920198647674a0a035e1af1fa58707e317466" +checksum = "0dab59f8e050d5df8e4dd87d9206fb6f65a483e20ac9fda365ade4fab353196c" dependencies = [ "libc", "log", @@ -3073,7 +3395,7 @@ dependencies = [ "ndk-sys 0.4.1+23.1.7779620", "num_enum", "raw-window-handle", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -3164,22 +3486,21 @@ dependencies = [ "anyhow", "byteorder", "paste", - "thiserror", + "thiserror 1.0.69", ] [[package]] name = "netlink-proto" -version = "0.11.3" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b33524dc0968bfad349684447bfce6db937a9ac3332a1fe60c0c5a5ce63f21" +checksum = "72452e012c2f8d612410d89eea01e2d9b56205274abb35d53f60200b2ec41d60" dependencies = [ - "bytes", + "bytes 1.10.0", "futures", "log", "netlink-packet-core", "netlink-sys", - "thiserror", - "tokio", + "thiserror 2.0.11", ] [[package]] @@ -3189,7 +3510,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6471bf08e7ac0135876a9581bf3217ef0333c191c128d34878079f42ee150411" dependencies = [ "async-io 1.13.0", - "bytes", + "bytes 1.10.0", "futures", "libc", "log", @@ -3226,7 +3547,7 @@ version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "cfg-if 1.0.0", "libc", ] @@ -3237,7 +3558,7 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "cfg-if 1.0.0", "cfg_aliases", "libc", @@ -3403,7 +3724,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4e89ad9e3d7d297152b17d39ed92cd50ca8063a89a9fa569046d41568891eff" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "block2", "libc", "objc2", @@ -3419,7 +3740,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "617fbf49e071c178c0b24c080767db52958f716d9eabdf0890523aeae54773ef" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "block2", "objc2", "objc2-foundation", @@ -3439,9 +3760,9 @@ dependencies = [ [[package]] name = "objc2-encode" -version = "4.0.3" +version = "4.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7891e71393cd1f227313c9379a26a584ff3d7e6e7159e988851f0934c993f0f8" +checksum = "ef25abbcd74fb2609453eb695bd2f860d389e457f67dc17cafc8b8cbc89d0c33" [[package]] name = "objc2-foundation" @@ -3449,7 +3770,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ee638a5da3799329310ad4cfa62fbf045d5f56e3ef5ba4149e7452dcf89d5a8" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "block2", "libc", "objc2", @@ -3461,7 +3782,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd0cba1276f6023976a406a14ffa85e1fdd19df6b0f737b063b95f6c8c7aadd6" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "block2", "objc2", "objc2-foundation", @@ -3473,7 +3794,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e42bee7bff906b14b167da2bac5efe6b6a07e6f7c0a21a7308d40c960242dc7a" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "block2", "objc2", "objc2-foundation", @@ -3491,9 +3812,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.19.0" +version = "1.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" +checksum = "945462a4b81e43c4e3ba96bd7b49d834c6f61198356aa858733bc4acf3cbe62e" [[package]] name = "opaque-debug" @@ -3503,11 +3824,11 @@ checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" [[package]] name = "openssl" -version = "0.10.68" +version = "0.10.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6174bc48f102d208783c2c84bf931bb75927a617866870de8a4ea85597f871f5" +checksum = "61cfb4e166a8bb8c9b55c500bc2308550148ece889be90f609377e58140f42c6" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "cfg-if 1.0.0", "foreign-types", "libc", @@ -3524,20 +3845,20 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.98", ] [[package]] name = "openssl-probe" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "openssl-sys" -version = "0.9.104" +version = "0.9.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45abf306cbf99debc8195b66b7346498d7b10c210de50418b5ccd7ceba08c741" +checksum = "8b22d5b84be05a8d6947c7cb71f7c849aa0f112acd4bf51c2a7c1c988ac0a9dc" dependencies = [ "cc", "libc", @@ -3566,7 +3887,7 @@ dependencies = [ "js-sys", "once_cell", "pin-project-lite", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -3585,7 +3906,7 @@ dependencies = [ "opentelemetry_sdk 0.20.0", "prost 0.11.9", "protobuf", - "thiserror", + "thiserror 1.0.69", "tokio", "tonic 0.9.2", ] @@ -3603,7 +3924,7 @@ dependencies = [ "opentelemetry-proto 0.6.0", "opentelemetry_sdk 0.23.0", "prost 0.12.6", - "thiserror", + "thiserror 1.0.69", "tokio", "tonic 0.11.0", ] @@ -3662,7 +3983,7 @@ dependencies = [ "js-sys", "once_cell", "pin-project-lite", - "thiserror", + "thiserror 1.0.69", "urlencoding", ] @@ -3685,7 +4006,7 @@ dependencies = [ "rand", "regex", "serde_json", - "thiserror", + "thiserror 1.0.69", "tokio", "tokio-stream", ] @@ -3705,10 +4026,10 @@ dependencies = [ "lazy_static", "once_cell", "opentelemetry 0.23.0", - "ordered-float 4.2.2", + "ordered-float 4.6.0", "percent-encoding", "rand", - "thiserror", + "thiserror 1.0.69", "tokio", "tokio-stream", ] @@ -3730,9 +4051,9 @@ dependencies = [ [[package]] name = "ordered-float" -version = "4.2.2" +version = "4.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a91171844676f8c7990ce64959210cd2eaef32c2612c50f9fae9f8aaa6065a6" +checksum = "7bb71e1b3fa6ca1c61f383464aaf2bb0e2f8e772a1f01d486832464de363b951" dependencies = [ "num-traits", ] @@ -3785,9 +4106,9 @@ dependencies = [ [[package]] name = "parking" -version = "2.2.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" [[package]] name = "parking_lot" @@ -3832,7 +4153,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall 0.5.3", + "redox_syscall 0.5.8", "smallvec", "windows-targets 0.52.6", ] @@ -3856,9 +4177,9 @@ checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" [[package]] name = "pathdiff" -version = "0.2.1" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8835116a5c179084a830efb3adc117ab007512b535bc1a21c991d3b32a6b44dd" +checksum = "df94ce210e5bc13cb6651479fa48d14f601d9858cfe0467f43ae157023b938d3" [[package]] name = "peeking_take_while" @@ -3872,16 +4193,6 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" -[[package]] -name = "petgraph" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" -dependencies = [ - "fixedbitset", - "indexmap 2.4.0", -] - [[package]] name = "pharos" version = "0.5.3" @@ -3894,29 +4205,49 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.5" +version = "0.4.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" +checksum = "3ef0f924a5ee7ea9cbcea77529dba45f8a9ba9f622419fe3386ca581a3ae9d5a" dependencies = [ - "pin-project-internal", + "pin-project-internal 0.4.30", +] + +[[package]] +name = "pin-project" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfe2e71e1471fe07709406bf725f710b02927c9c54b2b5b2ec0e8087d97c327d" +dependencies = [ + "pin-project-internal 1.1.9", ] [[package]] name = "pin-project-internal" -version = "1.1.5" +version = "0.4.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" +checksum = "851c8d0ce9bebe43790dedfc86614c23494ac9f423dd618d3a61fc693eafe61e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 1.0.109", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6e859e6e5bd50440ab63c47e3ebabc90f26251f7c73c3d3e837b74a1cc3fa67" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", ] [[package]] name = "pin-project-lite" -version = "0.2.14" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" [[package]] name = "pin-utils" @@ -3931,7 +4262,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" dependencies = [ "atomic-waker", - "fastrand 2.1.0", + "fastrand 2.3.0", "futures-io", ] @@ -3947,9 +4278,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" +checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" [[package]] name = "polling" @@ -3969,15 +4300,15 @@ dependencies = [ [[package]] name = "polling" -version = "3.7.3" +version = "3.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc2790cd301dec6cd3b7a025e4815cf825724a51c98dccfe6a3e55f05ffb6511" +checksum = "a604568c3202727d1507653cb121dbd627a58684eb09a820fd746bee38b4442f" dependencies = [ "cfg-if 1.0.0", "concurrent-queue", "hermit-abi 0.4.0", "pin-project-lite", - "rustix 0.38.34", + "rustix 0.38.44", "tracing", "windows-sys 0.59.0", ] @@ -3995,19 +4326,32 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.7.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da544ee218f0d287a911e9c99a39a8c9bc8fcad3cb8db5959940044ecfc67265" +checksum = "280dc24453071f1b63954171985a0b0d30058d287960968b9b2aca264c8d4ee6" [[package]] name = "portable-atomic-util" -version = "0.2.2" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcdd8420072e66d54a407b3316991fe946ce3ab1083a7f575b2463866624704d" +checksum = "d8a2f0d8d040d7848a709caf78912debcc3f33ee4b3cac47d73d1e1069e83507" dependencies = [ "portable-atomic", ] +[[package]] +name = "postcard" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "170a2601f67cc9dba8edd8c4870b15f71a6a2dc196daec8c83f72b59dff628a8" +dependencies = [ + "cobs", + "embedded-io 0.4.0", + "embedded-io 0.6.1", + "heapless 0.7.17", + "serde", +] + [[package]] name = "powerfmt" version = "0.2.0" @@ -4023,16 +4367,6 @@ dependencies = [ "zerocopy", ] -[[package]] -name = "prettyplease" -version = "0.2.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" -dependencies = [ - "proc-macro2", - "syn 2.0.74", -] - [[package]] name = "proc-macro-crate" version = "0.1.5" @@ -4053,10 +4387,32 @@ dependencies = [ ] [[package]] -name = "proc-macro2" -version = "1.0.86" +name = "proc-macro-error-attr2" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" +dependencies = [ + "proc-macro2", + "quote", +] + +[[package]] +name = "proc-macro-error2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" +dependencies = [ + "proc-macro-error-attr2", + "proc-macro2", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "proc-macro2" +version = "1.0.93" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99" dependencies = [ "unicode-ident", ] @@ -4067,7 +4423,7 @@ version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" dependencies = [ - "bytes", + "bytes 1.10.0", "prost-derive 0.11.9", ] @@ -4077,31 +4433,10 @@ version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "deb1435c188b76130da55f17a466d252ff7b1418b2ad3e037d127b94e3411f29" dependencies = [ - "bytes", + "bytes 1.10.0", "prost-derive 0.12.6", ] -[[package]] -name = "prost-build" -version = "0.12.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22505a5c94da8e3b7c2996394d1c933236c4d743e81a410bcca4e6989fc066a4" -dependencies = [ - "bytes", - "heck 0.5.0", - "itertools 0.12.1", - "log", - "multimap", - "once_cell", - "petgraph", - "prettyplease", - "prost 0.12.6", - "prost-types", - "regex", - "syn 2.0.74", - "tempfile", -] - [[package]] name = "prost-derive" version = "0.11.9" @@ -4125,7 +4460,7 @@ dependencies = [ "itertools 0.12.1", "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.98", ] [[package]] @@ -4143,15 +4478,6 @@ version = "2.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" -[[package]] -name = "protobuf-src" -version = "2.1.0+27.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7edafa3bcc668fa93efafcbdf58d7821bbda0f4b458ac7fae3d57ec0fec8167" -dependencies = [ - "cmake", -] - [[package]] name = "quick-error" version = "1.2.3" @@ -4160,9 +4486,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.36" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" +checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc" dependencies = [ "proc-macro2", ] @@ -4194,7 +4520,16 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom", + "getrandom 0.2.15", +] + +[[package]] +name = "rand_xoshiro" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f97cdb2a36ed4183de61b2f824cc45c9f1037f28afe0a322e9fff4c108b5aaa" +dependencies = [ + "rand_core", ] [[package]] @@ -4226,34 +4561,34 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.3" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4" +checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", ] [[package]] name = "redox_users" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891" +checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ - "getrandom", + "getrandom 0.2.15", "libredox", - "thiserror", + "thiserror 1.0.69", ] [[package]] name = "regex" -version = "1.10.6" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.7", - "regex-syntax 0.8.4", + "regex-automata 0.4.9", + "regex-syntax 0.8.5", ] [[package]] @@ -4267,13 +4602,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.4", + "regex-syntax 0.8.5", ] [[package]] @@ -4284,9 +4619,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reqwest" @@ -4295,7 +4630,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" dependencies = [ "base64", - "bytes", + "bytes 1.10.0", "encoding_rs", "futures-core", "futures-util", @@ -4334,7 +4669,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" dependencies = [ - "hostname", + "hostname 0.3.1", "quick-error", ] @@ -4346,7 +4681,7 @@ checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", "cfg-if 1.0.0", - "getrandom", + "getrandom 0.2.15", "libc", "spin", "untrusted", @@ -4379,7 +4714,7 @@ dependencies = [ "netlink-proto", "netlink-sys", "nix 0.26.4", - "thiserror", + "thiserror 1.0.69", "tokio", ] @@ -4399,10 +4734,10 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "549b9d036d571d42e6e85d1c1425e2ac83491075078ca9a15be021c56b1641f2" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "fallible-iterator", "fallible-streaming-iterator", - "hashlink", + "hashlink 0.8.4", "libsqlite3-sys", "smallvec", ] @@ -4421,18 +4756,18 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustc_version" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ "semver", ] [[package]] name = "rustix" -version = "0.37.27" +version = "0.37.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fea8ca367a3a01fe35e6943c400addf443c0f57670e6ec51196f71a4b8762dd2" +checksum = "519165d378b97752ca44bbe15047d5d3409e875f39327546b42ac81d7e18c1b6" dependencies = [ "bitflags 1.3.2", "errno", @@ -4444,15 +4779,15 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.34" +version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" +checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "errno", "libc", - "linux-raw-sys 0.4.14", - "windows-sys 0.52.0", + "linux-raw-sys 0.4.15", + "windows-sys 0.59.0", ] [[package]] @@ -4488,31 +4823,30 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.17" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" +checksum = "f7c45b9784283f1b2e7fb61b42047c2fd678ef0960d4f6f1eba131594cc369d4" [[package]] name = "rustyline-async" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc9396d834c31f9fddd716e7c279e7cb70207092a1e59767918610f5c560c6eb" +checksum = "6fa3f78c2ea57b827be4c11adbfed26e5fe1b49fb6fb7826e2a9eebbc2e8db10" dependencies = [ "crossterm 0.28.1", - "futures-channel", "futures-util", - "pin-project", + "pin-project 1.1.9", "thingbuf", - "thiserror", + "thiserror 2.0.11", "unicode-segmentation", - "unicode-width", + "unicode-width 0.2.0", ] [[package]] name = "ryu" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" +checksum = "6ea1a2d0a644769cc99faa24c3ad26b379b786fe7c36fd3c546254801650e6dd" [[package]] name = "same-file" @@ -4535,18 +4869,18 @@ dependencies = [ [[package]] name = "scc" -version = "2.1.14" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79da19444d9da7a9a82b80ecf059eceba6d3129d84a8610fd25ff2364f255466" +checksum = "ea091f6cac2595aa38993f04f4ee692ed43757035c36e67c180b6828356385b1" dependencies = [ "sdd", ] [[package]] name = "schannel" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01227be5826fa0690321a2ba6c5cd57a19cf3f6a09e76973b58e61de6ab9d1c1" +checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" dependencies = [ "windows-sys 0.59.0", ] @@ -4572,7 +4906,7 @@ dependencies = [ "proc-macro2", "quote", "serde_derive_internals 0.29.1", - "syn 2.0.74", + "syn 2.0.98", ] [[package]] @@ -4599,9 +4933,9 @@ dependencies = [ [[package]] name = "sdd" -version = "3.0.2" +version = "3.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0495e4577c672de8254beb68d01a9b62d0e8a13c099edecdbedccce3223cd29f" +checksum = "b07779b9b918cc05650cb30f404d4d7835d26df37c235eded8a6832e2fb82cca" [[package]] name = "secret-service" @@ -4629,7 +4963,7 @@ version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "core-foundation", "core-foundation-sys", "libc", @@ -4638,9 +4972,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.11.1" +version = "2.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75da29fe9b9b08fe9d6b22b5b4bcbc75d8db3aa31e639aa56bb62e9d46bfceaf" +checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" dependencies = [ "core-foundation-sys", "libc", @@ -4648,9 +4982,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.23" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" +checksum = "f79dfe2d285b0488816f30e700a7438c5a73d816b5b7d3ac72fbc48b0d185e03" [[package]] name = "send_wrapper" @@ -4669,9 +5003,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.207" +version = "1.0.217" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5665e14a49a4ea1b91029ba7d3bca9f299e1f7cfa194388ccc20f14743e784f2" +checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70" dependencies = [ "serde_derive", ] @@ -4728,13 +5062,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.207" +version = "1.0.217" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6aea2634c86b0e8ef2cfdc0c340baede54ec27b1e46febd7f80dffb2aa44a00e" +checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.98", ] [[package]] @@ -4745,7 +5079,7 @@ checksum = "e578a843d40b4189a4d66bba51d7684f57da5bd7c304c64e14bd63efbef49509" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.98", ] [[package]] @@ -4756,14 +5090,14 @@ checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.98", ] [[package]] name = "serde_json" -version = "1.0.124" +version = "1.0.138" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66ad62847a56b3dba58cc891acd13884b9c61138d330c0d7b6181713d4fce38d" +checksum = "d434192e7da787e94a6ea7e9670b26a036d0ca41e0b7efb2676dd32bae872949" dependencies = [ "itoa", "memchr", @@ -4779,14 +5113,14 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.98", ] [[package]] name = "serde_spanned" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb5b1b31579f3811bf615c144393417496f152e12ac8b7663bf664f4a815306d" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" dependencies = [ "serde", ] @@ -4809,7 +5143,20 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.4.0", + "indexmap 2.7.1", + "itoa", + "ryu", + "serde", + "unsafe-libyaml", +] + +[[package]] +name = "serde_yaml_ng" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b4db627b98b36d4203a7b458cf3573730f2bb591b28871d916dfa9efabfd41f" +dependencies = [ + "indexmap 2.7.1", "itoa", "ryu", "serde", @@ -4832,16 +5179,16 @@ dependencies = [ [[package]] name = "serial_test" -version = "3.1.1" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b4b487fe2acf240a021cf57c6b2b4903b1e78ca0ecd862a71b71d2a51fed77d" +checksum = "1b258109f244e1d6891bf1053a55d63a5cd4f8f4c30cf9a1280989f80e7a1fa9" dependencies = [ "futures", "log", "once_cell", "parking_lot 0.12.3", "scc", - "serial_test_derive 3.1.1", + "serial_test_derive 3.2.0", ] [[package]] @@ -4852,18 +5199,18 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.98", ] [[package]] name = "serial_test_derive" -version = "3.1.1" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82fe9db325bcef1fbcde82e078a5cc4efdf787e96b3b9cf45b50b529f2083d67" +checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.98", ] [[package]] @@ -4952,7 +5299,7 @@ checksum = "34db1a06d485c9142248b7a054f034b349b212551f3dfd19c94d45a754a217cd" dependencies = [ "libc", "mio 0.8.11", - "mio 1.0.2", + "mio 1.0.3", "signal-hook", ] @@ -5006,7 +5353,7 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec62a949bda7f15800481a711909f946e1204f2460f89210eaf7f57730f88f86" dependencies = [ - "thiserror", + "thiserror 1.0.69", "unicode_categories", ] @@ -5022,9 +5369,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" +checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" dependencies = [ "libc", "windows-sys 0.52.0", @@ -5093,9 +5440,9 @@ checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "subtle" -version = "2.6.1" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" +checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" @@ -5110,9 +5457,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.74" +version = "2.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fceb41e3d546d0bd83421d3409b1460cc7444cd389341a4c880fe7a042cb3d7" +checksum = "36147f1a48ae0ec2b5b3bc5b537d267457555a10dc06f3dbc8cb11ba3006d3b1" dependencies = [ "proc-macro2", "quote", @@ -5125,6 +5472,17 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +[[package]] +name = "synstructure" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + [[package]] name = "sysinfo" version = "0.30.13" @@ -5162,14 +5520,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.12.0" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" +checksum = "38c246215d7d24f48ae091a2902398798e05d978b24315d6efbc00ede9a8bb91" dependencies = [ "cfg-if 1.0.0", - "fastrand 2.1.0", + "fastrand 2.3.0", + "getrandom 0.3.1", "once_cell", - "rustix 0.38.34", + "rustix 0.38.44", "windows-sys 0.59.0", ] @@ -5184,12 +5543,12 @@ dependencies = [ [[package]] name = "terminal_size" -version = "0.3.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7" +checksum = "5352447f921fda68cf61b4101566c0bdb5104eff6804d0678e5227580ab6a4e9" dependencies = [ - "rustix 0.38.34", - "windows-sys 0.48.0", + "rustix 0.38.44", + "windows-sys 0.59.0", ] [[package]] @@ -5198,7 +5557,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" dependencies = [ - "unicode-width", + "unicode-width 0.1.14", ] [[package]] @@ -5208,27 +5567,47 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "662b54ef6f7b4e71f683dadc787bbb2d8e8ef2f91b682ebed3164a5a7abca905" dependencies = [ "parking_lot 0.12.3", - "pin-project", + "pin-project 1.1.9", ] [[package]] name = "thiserror" -version = "1.0.63" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" dependencies = [ - "thiserror-impl", + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d452f284b73e6d76dd36758a0c8684b1d5be31f92b89d07fd5822175732206fc" +dependencies = [ + "thiserror-impl 2.0.11", ] [[package]] name = "thiserror-impl" -version = "1.0.63" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.98", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26afc1baea8a989337eeb52b6e72a039780ce45c3edfcc9c5b9d112feeb173c2" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", ] [[package]] @@ -5253,9 +5632,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.36" +version = "0.3.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" +checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21" dependencies = [ "deranged", "itoa", @@ -5276,19 +5655,29 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" +checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de" dependencies = [ "num-conv", "time-core", ] [[package]] -name = "tinyvec" -version = "1.8.0" +name = "tinystr" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" +checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +dependencies = [ + "displaydoc", + "zerovec", +] + +[[package]] +name = "tinyvec" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "022db8904dfa342efe721985167e9fcd16c29b226db4397ed752a761cfce81e8" dependencies = [ "tinyvec_macros", ] @@ -5301,18 +5690,18 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.39.2" +version = "1.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daa4fb1bc778bd6f04cbfc4bb2d06a7396a8f299dc33ea1900cedaa316f467b1" +checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e" dependencies = [ "backtrace", - "bytes", + "bytes 1.10.0", "libc", - "mio 1.0.2", + "mio 1.0.3", "parking_lot 0.12.3", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.7", + "socket2 0.5.8", "tokio-macros", "tracing", "windows-sys 0.52.0", @@ -5330,13 +5719,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" +checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.98", ] [[package]] @@ -5351,9 +5740,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.15" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" dependencies = [ "futures-core", "pin-project-lite", @@ -5362,11 +5751,11 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.11" +version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" +checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078" dependencies = [ - "bytes", + "bytes 1.10.0", "futures-core", "futures-io", "futures-sink", @@ -5385,14 +5774,14 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" +checksum = "cd87a5cdd6ffab733b2f74bc4fd7ee5fff6634124999ac278c35fc78c6120148" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.20", + "toml_edit 0.22.23", ] [[package]] @@ -5410,22 +5799,22 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.4.0", + "indexmap 2.7.1", "toml_datetime", "winnow 0.5.40", ] [[package]] name = "toml_edit" -version = "0.22.20" +version = "0.22.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "583c44c02ad26b0c3f3066fe629275e50627026c51ac2e595cca4c230ce1ce1d" +checksum = "02a8b472d1a3d7c18e2d61a489aee3453fd9031c33e4f55bd533f4a7adca1bee" dependencies = [ - "indexmap 2.4.0", + "indexmap 2.7.1", "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.18", + "winnow 0.7.1", ] [[package]] @@ -5437,7 +5826,7 @@ dependencies = [ "async-trait", "axum", "base64", - "bytes", + "bytes 1.10.0", "futures-core", "futures-util", "h2", @@ -5446,7 +5835,7 @@ dependencies = [ "hyper", "hyper-timeout", "percent-encoding", - "pin-project", + "pin-project 1.1.9", "prost 0.11.9", "tokio", "tokio-stream", @@ -5466,14 +5855,14 @@ dependencies = [ "async-trait", "axum", "base64", - "bytes", + "bytes 1.10.0", "h2", "http 0.2.12", "http-body", "hyper", "hyper-timeout", "percent-encoding", - "pin-project", + "pin-project 1.1.9", "prost 0.12.6", "tokio", "tokio-stream", @@ -5492,7 +5881,7 @@ dependencies = [ "futures-core", "futures-util", "indexmap 1.9.3", - "pin-project", + "pin-project 1.1.9", "pin-project-lite", "rand", "slab", @@ -5517,9 +5906,9 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.40" +version = "0.1.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" dependencies = [ "log", "pin-project-lite", @@ -5534,27 +5923,27 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3566e8ce28cc0a3fe42519fc80e6b4c943cc4c8cef275620eb8dac2d3d4e06cf" dependencies = [ "crossbeam-channel", - "thiserror", + "thiserror 1.0.69", "time", "tracing-subscriber", ] [[package]] name = "tracing-attributes" -version = "0.1.27" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" +checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.98", ] [[package]] name = "tracing-core" -version = "0.1.32" +version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" dependencies = [ "once_cell", "valuable", @@ -5562,9 +5951,9 @@ dependencies = [ [[package]] name = "tracing-error" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d686ec1c0f384b1277f097b2f279a2ecc11afe8c133c1aabf036a27cb4cd206e" +checksum = "8b1581020d7a273442f5b45074a6a57d5757ad0a47dac0e9f0bd57b81936f3db" dependencies = [ "tracing", "tracing-subscriber", @@ -5583,9 +5972,9 @@ dependencies = [ [[package]] name = "tracing-journald" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba316a74e8fc3c3896a850dba2375928a9fa171b085ecddfc7c054d39970f3fd" +checksum = "fc0b4143302cf1022dac868d521e36e8b27691f72c84b3311750d5188ebba657" dependencies = [ "libc", "tracing-core", @@ -5666,16 +6055,14 @@ dependencies = [ [[package]] name = "tracing-perfetto" -version = "0.1.1" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd21777b526dfcb57f11f65aa8a2024d83e1db52841993229b6e282e511978b7" +checksum = "cf599f51530a7211f5aa92c84abdfc1137362d471f0473a4b1be8d625167fb0d" dependencies = [ "anyhow", - "bytes", + "bytes 1.10.0", "chrono", "prost 0.12.6", - "prost-build", - "protobuf-src", "rand", "thread-id", "tracing", @@ -5684,9 +6071,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.18" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" +checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" dependencies = [ "matchers", "nu-ansi-term", @@ -5714,9 +6101,9 @@ dependencies = [ [[package]] name = "triomphe" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6631e42e10b40c0690bf92f404ebcfe6e1fdb480391d15f17cc8e96eeed5369" +checksum = "ef8f7726da4807b58ea5c96fdc122f80702030edc33b35aff9190a51148ccc85" dependencies = [ "serde", "stable_deref_trait", @@ -5734,7 +6121,7 @@ version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6b26cf145f2f3b9ff84e182c448eaf05468e247f148cf3d2a7d67d78ff023a0" dependencies = [ - "gloo-utils", + "gloo-utils 0.1.7", "serde", "serde-wasm-bindgen 0.5.0", "serde_json", @@ -5751,7 +6138,7 @@ dependencies = [ "proc-macro2", "quote", "serde_derive_internals 0.28.0", - "syn 2.0.74", + "syn 2.0.98", ] [[package]] @@ -5761,14 +6148,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e3dac10fd62eaf6617d3a904ae222845979aec67c615d1c842b4002c7666fb9" dependencies = [ "byteorder", - "bytes", + "bytes 1.10.0", "data-encoding", "http 0.2.12", "httparse", "log", "rand", "sha1", - "thiserror", + "thiserror 1.0.69", "url", "utf-8", ] @@ -5780,14 +6167,32 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e2e2ce1e47ed2994fd43b04c8f618008d4cabdd5ee34027cf14f9d918edd9c8" dependencies = [ "byteorder", - "bytes", + "bytes 1.10.0", "data-encoding", - "http 1.1.0", + "http 1.2.0", "httparse", "log", "rand", "sha1", - "thiserror", + "thiserror 1.0.69", + "utf-8", +] + +[[package]] +name = "tungstenite" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18e5b8366ee7a95b16d32197d0b2604b43a0be89dc5fac9f8e96ccafbaedda8a" +dependencies = [ + "byteorder", + "bytes 1.10.0", + "data-encoding", + "http 1.2.0", + "httparse", + "log", + "rand", + "sha1", + "thiserror 1.0.69", "utf-8", ] @@ -5797,38 +6202,29 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" -[[package]] -name = "unicode-bidi" -version = "0.3.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" - [[package]] name = "unicode-ident" -version = "1.0.12" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" - -[[package]] -name = "unicode-normalization" -version = "0.1.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" -dependencies = [ - "tinyvec", -] +checksum = "a210d160f08b701c8721ba1c726c11662f877ea6b7094007e1ca9a1041945034" [[package]] name = "unicode-segmentation" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" +checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" [[package]] name = "unicode-width" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0336d538f7abc86d282a4189614dfaa90810dfc2c6f6427eaf88e16311dd225d" +checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" + +[[package]] +name = "unicode-width" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd" [[package]] name = "unicode_categories" @@ -5860,12 +6256,12 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.2" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" +checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" dependencies = [ "form_urlencoded", - "idna 0.5.0", + "idna", "percent-encoding", ] @@ -5881,6 +6277,18 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" +[[package]] +name = "utf16_iter" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + [[package]] name = "utf8parse" version = "0.2.2" @@ -5888,16 +6296,46 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] -name = "valuable" -version = "0.1.0" +name = "validator" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" +checksum = "d0b4a29d8709210980a09379f27ee31549b73292c87ab9899beee1c0d3be6303" +dependencies = [ + "idna", + "once_cell", + "regex", + "serde", + "serde_derive", + "serde_json", + "url", + "validator_derive", +] + +[[package]] +name = "validator_derive" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bac855a2ce6f843beb229757e6e570a42e837bcb15e5f449dd48d5747d41bf77" +dependencies = [ + "darling 0.20.10", + "once_cell", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "valuable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" [[package]] name = "value-bag" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a84c137d37ab0142f0f2ddfe332651fdbf252e7b7dbb4e67b6c1f1b2e925101" +checksum = "3ef4c4aa54d5d05a279399bfa921ec387b7aba77caf7a682ae8d86785b8fdad2" [[package]] name = "vcpkg" @@ -5929,8 +6367,8 @@ dependencies = [ "async-tungstenite 0.23.0", "cfg-if 1.0.0", "chrono", - "clap 4.5.15", - "config 0.14.0", + "clap 4.5.28", + "config 0.15.7", "console", "crossbeam-channel", "cursive", @@ -5953,10 +6391,10 @@ dependencies = [ "serde_derive", "serial_test 2.0.0", "stop-token", - "thiserror", + "thiserror 1.0.69", "tokio", "tokio-util", - "unicode-width", + "unicode-width 0.1.14", "veilid-bugsalot", "veilid-tools", ] @@ -5966,7 +6404,6 @@ name = "veilid-core" version = "0.4.1" dependencies = [ "argon2", - "async-io 1.13.0", "async-std", "async-std-resolver", "async-tls", @@ -5991,7 +6428,7 @@ dependencies = [ "filetime", "flume", "futures-util", - "getrandom", + "getrandom 0.2.15", "glob", "hex", "hickory-resolver", @@ -6033,11 +6470,10 @@ dependencies = [ "sha2 0.10.8", "shell-words", "simplelog", - "socket2 0.5.7", "static_assertions", "stop-token", "sysinfo", - "thiserror", + "thiserror 1.0.69", "tokio", "tokio-stream", "tokio-util", @@ -6080,7 +6516,7 @@ dependencies = [ "data-encoding", "ffi-support", "futures-util", - "hostname", + "hostname 0.3.1", "jni", "lazy_static", "libc-print", @@ -6135,16 +6571,16 @@ dependencies = [ "backtrace", "cfg-if 1.0.0", "chrono", - "clap 4.5.15", + "clap 4.5.28", "color-eyre", - "config 0.14.0", + "config 0.14.1", "console-subscriber", "ctrlc", "daemonize", "directories", "flume", "futures-util", - "hostname", + "hostname 0.4.0", "json", "lazy_static", "nix 0.29.0", @@ -6156,8 +6592,8 @@ dependencies = [ "rpassword", "serde", "serde_derive", - "serde_yaml", - "serial_test 3.1.1", + "serde_yaml_ng", + "serial_test 3.2.0", "signal-hook", "signal-hook-async-std", "stop-token", @@ -6185,19 +6621,27 @@ name = "veilid-tools" version = "0.4.1" dependencies = [ "android_logger 0.13.3", + "async-io 1.13.0", "async-lock 3.4.0", "async-std", + "async-tungstenite 0.28.2", "async_executors", "backtrace", "cfg-if 1.0.0", "chrono", + "clap 4.5.28", "console_error_panic_hook", + "ctrlc", "eyre", "flume", "fn_name", "futures-util", - "getrandom", + "futures_codec", + "getrandom 0.2.15", "ifstructs", + "imbl", + "indent", + "ipnet", "jni", "jni-sys", "js-sys", @@ -6213,16 +6657,22 @@ dependencies = [ "oslog", "paranoid-android", "parking_lot 0.12.3", + "postcard", "rand", + "rand_chacha", "rand_core", "range-set-blaze", "rtnetlink", "send_wrapper 0.6.0", + "serde", + "serde_yaml_ng", "serial_test 2.0.0", "simplelog", + "socket2 0.5.8", "static_assertions", "stop-token", - "thiserror", + "thiserror 1.0.69", + "time", "tokio", "tokio-stream", "tokio-util", @@ -6230,12 +6680,16 @@ dependencies = [ "tracing-oslog", "tracing-subscriber", "tracing-wasm", + "validator", + "veilid-bugsalot", "wasm-bindgen", "wasm-bindgen-futures", "wasm-bindgen-test", "wasm-logger", "wee_alloc", "winapi", + "ws_stream_tungstenite", + "ws_stream_wasm", ] [[package]] @@ -6246,7 +6700,7 @@ dependencies = [ "console_error_panic_hook", "data-encoding", "futures-util", - "gloo-utils", + "gloo-utils 0.2.0", "js-sys", "lazy_static", "parking_lot 0.12.3", @@ -6304,13 +6758,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] -name = "wasm-bindgen" -version = "0.2.93" +name = "wasi" +version = "0.13.3+wasi-0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" +checksum = "26816d2e1a4a36a2940b96c5296ce403917633dff8f3440e9b236ed6f6bacad2" +dependencies = [ + "wit-bindgen-rt", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" dependencies = [ "cfg-if 1.0.0", "once_cell", + "rustversion", "serde", "serde_json", "wasm-bindgen-macro", @@ -6318,36 +6782,36 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.93" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" +checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" dependencies = [ "bumpalo", "log", - "once_cell", "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.98", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.43" +version = "0.4.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61e9300f63a621e96ed275155c108eb6f843b6a26d053f122ab69724559dc8ed" +checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" dependencies = [ "cfg-if 1.0.0", "js-sys", + "once_cell", "wasm-bindgen", "web-sys", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.93" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" +checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -6355,33 +6819,34 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.93" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" +checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.98", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.93" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" +checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +dependencies = [ + "unicode-ident", +] [[package]] name = "wasm-bindgen-test" -version = "0.3.43" +version = "0.3.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68497a05fb21143a08a7d24fc81763384a3072ee43c44e86aad1744d6adef9d9" +checksum = "66c8d5e33ca3b6d9fa3b4676d774c5778031d27a578c2b007f905acf816152c3" dependencies = [ - "console_error_panic_hook", "js-sys", "minicov", - "scoped-tls", "wasm-bindgen", "wasm-bindgen-futures", "wasm-bindgen-test-macro", @@ -6389,13 +6854,13 @@ dependencies = [ [[package]] name = "wasm-bindgen-test-macro" -version = "0.3.43" +version = "0.3.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b8220be1fa9e4c889b30fd207d4906657e7e90b12e0e6b0c8b8d8709f5de021" +checksum = "17d5042cc5fa009658f9a7333ef24291b1291a25b6382dd68862a7f3b969f69b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.98", ] [[package]] @@ -6417,9 +6882,9 @@ checksum = "323f4da9523e9a669e1eaf9c6e763892769b1d38c623913647bfdc1532fe4549" [[package]] name = "web-sys" -version = "0.3.70" +version = "0.3.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26fdeaafd9bd129f65e7c031593c24d62186301e0c72c8978fa1678be7d532c0" +checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" dependencies = [ "js-sys", "wasm-bindgen", @@ -6474,11 +6939,11 @@ dependencies = [ [[package]] name = "wg" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dadf90865f15d5c2d87f126a56ce3715b3a233641acdd10f59200aa7f4c81fb9" +checksum = "7aafc5e81e847f05d6770e074faf7b1cd4a5dec9a0e88eac5d55e20fdfebee9a" dependencies = [ - "event-listener 5.3.1", + "event-listener 5.4.0", "futures-core", "parking_lot 0.12.3", "pin-project-lite", @@ -6494,7 +6959,7 @@ dependencies = [ "either", "home", "once_cell", - "rustix 0.38.34", + "rustix 0.38.44", ] [[package]] @@ -6588,7 +7053,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d24d6bcc7f734a4091ecf8d7a64c5f7d7066f45585c1861eba06449909609c8a" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "widestring", "windows-sys 0.52.0", ] @@ -6818,9 +7283,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.18" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68a9bda4691f099d435ad181000724da8e5899daa10713c2d432552b9ccd3a6f" +checksum = "86e376c75f4f43f44db463cf729e0d3acbf954d13e22c51e26e4c264b4ab545f" dependencies = [ "memchr", ] @@ -6835,6 +7300,46 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "wit-bindgen-rt" +version = "0.33.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" +dependencies = [ + "bitflags 2.8.0", +] + +[[package]] +name = "write16" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" + +[[package]] +name = "writeable" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" + +[[package]] +name = "ws_stream_tungstenite" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed39ff9f8b2eda91bf6390f9f49eee93d655489e15708e3bb638c1c4f07cecb4" +dependencies = [ + "async-tungstenite 0.28.2", + "async_io_stream", + "bitflags 2.8.0", + "futures-core", + "futures-io", + "futures-sink", + "futures-util", + "pharos", + "rustc_version", + "tracing", + "tungstenite 0.24.0", +] + [[package]] name = "ws_stream_wasm" version = "0.7.4" @@ -6848,7 +7353,7 @@ dependencies = [ "pharos", "rustc_version", "send_wrapper 0.6.0", - "thiserror", + "thiserror 1.0.69", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -6861,7 +7366,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d91ffca73ee7f68ce055750bf9f6eca0780b8c85eff9bc046a3b0da41755e12" dependencies = [ "gethostname", - "rustix 0.38.34", + "rustix 0.38.44", "x11rb-protocol", ] @@ -6891,9 +7396,9 @@ checksum = "a67300977d3dc3f8034dae89778f502b6ba20b269527b3223ba59c0cf393bb8a" [[package]] name = "xml-rs" -version = "0.8.21" +version = "0.8.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "539a77ee7c0de333dcc6da69b177380a0b81e0dacfa4f7344c465a36871ee601" +checksum = "c5b940ebc25896e71dd073bad2dbaa2abfe97b0a391415e22ad1326d9c54e3c4" [[package]] name = "xmltree" @@ -6913,6 +7418,52 @@ dependencies = [ "linked-hash-map", ] +[[package]] +name = "yaml-rust2" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8902160c4e6f2fb145dbe9d6760a75e3c9522d8bf796ed7047c85919ac7115f8" +dependencies = [ + "arraydeque", + "encoding_rs", + "hashlink 0.8.4", +] + +[[package]] +name = "yaml-rust2" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a1a1c0bc9823338a3bdf8c61f994f23ac004c6fa32c08cd152984499b445e8d" +dependencies = [ + "arraydeque", + "encoding_rs", + "hashlink 0.9.1", +] + +[[package]] +name = "yoke" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", + "synstructure", +] + [[package]] name = "zbus" version = "1.9.3" @@ -6966,7 +7517,28 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.98", +] + +[[package]] +name = "zerofrom" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", + "synstructure", ] [[package]] @@ -6986,7 +7558,29 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.98", +] + +[[package]] +name = "zerovec" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index b9b4a26c..2a37196a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,6 +9,13 @@ members = [ ] resolver = "2" +[workspace.package] +repository = "https://gitlab.com/veilid/veilid" +authors = ["Veilid Team "] +license = "MPL-2.0" +edition = "2021" +rust-version = "1.81.0" + [patch.crates-io] cursive = { git = "https://gitlab.com/veilid/cursive.git" } cursive_core = { git = "https://gitlab.com/veilid/cursive.git" } @@ -26,3 +33,31 @@ lto = true [profile.dev.package.backtrace] opt-level = 3 + +[profile.dev.package.argon2] +opt-level = 3 +debug-assertions = false + +[profile.dev.package.ed25519-dalek] +opt-level = 3 +debug-assertions = false + +[profile.dev.package.x25519-dalek] +opt-level = 3 +debug-assertions = false + +[profile.dev.package.curve25519-dalek] +opt-level = 3 +debug-assertions = false + +[profile.dev.package.chacha20poly1305] +opt-level = 3 +debug-assertions = false + +[profile.dev.package.blake3] +opt-level = 3 +debug-assertions = false + +[profile.dev.package.chacha20] +opt-level = 3 +debug-assertions = false diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 43e871c5..7adf77ad 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -36,7 +36,7 @@ command line without it. If you do so, you may skip to [Run Veilid setup script](#Run Veilid setup script). - build-tools;34.0.0 -- ndk;26.3.11579264 +- ndk;27.0.12077973 - cmake;3.22.1 - platform-tools - platforms;android-34 @@ -58,7 +58,7 @@ the command line to install the requisite package versions: sdkmanager --install "platform-tools" sdkmanager --install "platforms;android-34" sdkmanager --install "build-tools;34.0.0" -sdkmanager --install "ndk;26.3.11579264" +sdkmanager --install "ndk;27.0.12077973" sdkmanager --install "cmake;3.22.1" ``` @@ -110,7 +110,7 @@ You will need to use Android Studio [here](https://developer.android.com/studio) to maintain your Android dependencies. Use the SDK Manager in the IDE to install the following packages (use package details view to select version): - Android SDK Build Tools (34.0.0) -- NDK (Side-by-side) (26.3.11579264) +- NDK (Side-by-side) (27.0.12077973) - Cmake (3.22.1) - Android SDK 34 - Android SDK Command Line Tools (latest) (7.0/latest) diff --git a/Earthfile b/Earthfile index 31ca7651..138e1626 100644 --- a/Earthfile +++ b/Earthfile @@ -18,7 +18,7 @@ FROM ubuntu:18.04 ENV ZIG_VERSION=0.13.0 ENV CMAKE_VERSION_MINOR=3.30 ENV CMAKE_VERSION_PATCH=3.30.1 -ENV WASM_BINDGEN_CLI_VERSION=0.2.93 +ENV WASM_BINDGEN_CLI_VERSION=0.2.100 ENV RUST_VERSION=1.81.0 ENV RUSTUP_HOME=/usr/local/rustup ENV RUSTUP_DIST_SERVER=https://static.rust-lang.org @@ -82,7 +82,7 @@ deps-android: RUN mkdir /Android; mkdir /Android/Sdk RUN curl -o /Android/cmdline-tools.zip https://dl.google.com/android/repository/commandlinetools-linux-9123335_latest.zip RUN cd /Android; unzip /Android/cmdline-tools.zip - RUN yes | /Android/cmdline-tools/bin/sdkmanager --sdk_root=/Android/Sdk build-tools\;34.0.0 ndk\;26.3.11579264 cmake\;3.22.1 platform-tools platforms\;android-34 cmdline-tools\;latest + RUN yes | /Android/cmdline-tools/bin/sdkmanager --sdk_root=/Android/Sdk build-tools\;34.0.0 ndk\;27.0.12077973 cmake\;3.22.1 platform-tools platforms\;android-34 cmdline-tools\;latest RUN rm -rf /Android/cmdline-tools RUN apt-get clean @@ -170,7 +170,7 @@ build-linux-arm64: build-android: FROM +code-android WORKDIR /veilid/veilid-core - ENV PATH=$PATH:/Android/Sdk/ndk/26.3.11579264/toolchains/llvm/prebuilt/linux-x86_64/bin/ + ENV PATH=$PATH:/Android/Sdk/ndk/27.0.12077973/toolchains/llvm/prebuilt/linux-x86_64/bin/ RUN cargo build --target aarch64-linux-android --release RUN cargo build --target armv7-linux-androideabi --release RUN cargo build --target i686-linux-android --release diff --git a/dev-setup/install_linux_prerequisites.sh b/dev-setup/install_linux_prerequisites.sh index 994a9fde..4ce6077e 100755 --- a/dev-setup/install_linux_prerequisites.sh +++ b/dev-setup/install_linux_prerequisites.sh @@ -43,14 +43,14 @@ while true; do curl -o $HOME/Android/cmdline-tools.zip https://dl.google.com/android/repository/commandlinetools-linux-9123335_latest.zip cd $HOME/Android unzip $HOME/Android/cmdline-tools.zip - $HOME/Android/cmdline-tools/bin/sdkmanager --sdk_root=$HOME/Android/Sdk build-tools\;34.0.0 ndk\;26.3.11579264 cmake\;3.22.1 platform-tools platforms\;android-34 cmdline-tools\;latest emulator + $HOME/Android/cmdline-tools/bin/sdkmanager --sdk_root=$HOME/Android/Sdk build-tools\;34.0.0 ndk\;27.0.12077973 cmake\;3.22.1 platform-tools platforms\;android-34 cmdline-tools\;latest emulator cd $HOME rm -rf $HOME/Android/cmdline-tools $HOME/Android/cmdline-tools.zip # Add environment variables cat >>$HOME/.profile <"] -edition = "2021" -license = "MPL-2.0" resolver = "2" -rust-version = "1.81.0" +repository.workspace = true +authors.workspace = true +license.workspace = true +edition.workspace = true +rust-version.workspace = true [[bin]] name = "veilid-cli" @@ -17,6 +17,8 @@ path = "src/main.rs" [features] default = ["rt-tokio"] +default-async-std = ["rt-async-std"] + rt-async-std = [ "async-std", "veilid-tools/rt-async-std", diff --git a/veilid-cli/src/client_api_connection.rs b/veilid-cli/src/client_api_connection.rs index 18a3f277..9124764a 100644 --- a/veilid-cli/src/client_api_connection.rs +++ b/veilid-cli/src/client_api_connection.rs @@ -223,13 +223,12 @@ impl ClientApiConnection { trace!("ClientApiConnection::handle_tcp_connection"); // Connect the TCP socket - let stream = TcpStream::connect(connect_addr) + let stream = connect_async_tcp_stream(None, connect_addr, 10_000) .await + .map_err(map_to_string)? + .into_timeout_error() .map_err(map_to_string)?; - // If it succeed, disable nagle algorithm - stream.set_nodelay(true).map_err(map_to_string)?; - // State we connected let comproc = self.inner.lock().comproc.clone(); comproc.set_connection_state(ConnectionState::ConnectedTCP( @@ -239,16 +238,8 @@ impl ClientApiConnection { // Split into reader and writer halves // with line buffering on the reader - cfg_if! { - if #[cfg(feature="rt-async-std")] { - use futures::AsyncReadExt; - let (reader, writer) = stream.split(); - let reader = BufReader::new(reader); - } else { - let (reader, writer) = stream.into_split(); - let reader = BufReader::new(reader); - } - } + let (reader, writer) = split_async_tcp_stream(stream); + let reader = BufReader::new(reader); self.clone().run_json_api_processor(reader, writer).await } diff --git a/veilid-cli/src/command_processor.rs b/veilid-cli/src/command_processor.rs index 98a6bf4b..b802dfd6 100644 --- a/veilid-cli/src/command_processor.rs +++ b/veilid-cli/src/command_processor.rs @@ -57,6 +57,7 @@ struct CommandProcessorInner { #[derive(Clone)] pub struct CommandProcessor { inner: Arc>, + settings: Arc, } impl CommandProcessor { @@ -75,6 +76,7 @@ impl CommandProcessor { last_call_id: None, enable_app_messages: false, })), + settings: Arc::new(settings.clone()), } } pub fn set_client_api_connection(&self, capi: ClientApiConnection) { @@ -186,6 +188,54 @@ Core Debug Commands: Ok(()) } + pub fn cmd_connect(&self, rest: Option, callback: UICallback) -> Result<(), String> { + trace!("CommandProcessor::cmd_connect"); + let capi = self.capi(); + let ui = self.ui_sender(); + + let this = self.clone(); + spawn_detached_local("cmd connect", async move { + capi.disconnect().await; + + if let Some(rest) = rest { + if let Ok(subnode_index) = u16::from_str(&rest) { + let ipc_path = this + .settings + .resolve_ipc_path(this.settings.ipc_path.clone(), subnode_index); + this.set_ipc_path(ipc_path); + this.set_network_address(None); + } else if let Some(ipc_path) = + this.settings.resolve_ipc_path(Some(rest.clone().into()), 0) + { + this.set_ipc_path(Some(ipc_path)); + this.set_network_address(None); + } else if let Ok(Some(network_address)) = + this.settings.resolve_network_address(Some(rest.clone())) + { + if let Some(addr) = network_address.first() { + this.set_network_address(Some(*addr)); + this.set_ipc_path(None); + } else { + ui.add_node_event( + Level::Error, + &format!("Invalid network address: {}", rest), + ); + } + } else { + ui.add_node_event( + Level::Error, + &format!("Invalid connection string: {}", rest), + ); + } + } + + this.start_connection(); + ui.send_callback(callback); + }); + + Ok(()) + } + pub fn cmd_debug(&self, command_line: String, callback: UICallback) -> Result<(), String> { trace!("CommandProcessor::cmd_debug"); let capi = self.capi(); @@ -331,6 +381,7 @@ Core Debug Commands: "exit" => self.cmd_exit(callback), "quit" => self.cmd_exit(callback), "disconnect" => self.cmd_disconnect(callback), + "connect" => self.cmd_connect(rest, callback), "shutdown" => self.cmd_shutdown(callback), "change_log_level" => self.cmd_change_log_level(rest, callback), "change_log_ignore" => self.cmd_change_log_ignore(rest, callback), diff --git a/veilid-cli/src/interactive_ui.rs b/veilid-cli/src/interactive_ui.rs index 34a1e623..427853fc 100644 --- a/veilid-cli/src/interactive_ui.rs +++ b/veilid-cli/src/interactive_ui.rs @@ -28,10 +28,11 @@ pub struct InteractiveUIInner { #[derive(Clone)] pub struct InteractiveUI { inner: Arc>, + _settings: Arc, } impl InteractiveUI { - pub fn new(_settings: &Settings) -> (Self, InteractiveUISender) { + pub fn new(settings: &Settings) -> (Self, InteractiveUISender) { let (cssender, csreceiver) = flume::unbounded::(); let term = Term::stdout(); @@ -45,9 +46,10 @@ impl InteractiveUI { error: None, done: Some(StopSource::new()), connection_state_receiver: csreceiver, - log_enabled: false, + log_enabled: true, enable_color, })), + _settings: Arc::new(settings.clone()), }; let ui_sender = InteractiveUISender { @@ -169,7 +171,6 @@ impl InteractiveUI { eprintln!("Error: {:?}", e); self.inner.lock().done.take(); } - self.inner.lock().log_enabled = true; } } else if line == "log warn" { let opt_cmdproc = self.inner.lock().cmdproc.clone(); @@ -181,7 +182,6 @@ impl InteractiveUI { eprintln!("Error: {:?}", e); self.inner.lock().done.take(); } - self.inner.lock().log_enabled = true; } } else if line == "log info" { let opt_cmdproc = self.inner.lock().cmdproc.clone(); @@ -193,7 +193,6 @@ impl InteractiveUI { eprintln!("Error: {:?}", e); self.inner.lock().done.take(); } - self.inner.lock().log_enabled = true; } } else if line == "log debug" || line == "log" { let opt_cmdproc = self.inner.lock().cmdproc.clone(); @@ -205,6 +204,8 @@ impl InteractiveUI { eprintln!("Error: {:?}", e); self.inner.lock().done.take(); } + } + if line == "log" { self.inner.lock().log_enabled = true; } } else if line == "log trace" { @@ -217,7 +218,6 @@ impl InteractiveUI { eprintln!("Error: {:?}", e); self.inner.lock().done.take(); } - self.inner.lock().log_enabled = true; } } else if line == "log off" { let opt_cmdproc = self.inner.lock().cmdproc.clone(); @@ -229,9 +229,27 @@ impl InteractiveUI { eprintln!("Error: {:?}", e); self.inner.lock().done.take(); } - self.inner.lock().log_enabled = false; } + } else if line == "log hide" || line == "log disable" { + self.inner.lock().log_enabled = false; + } else if line == "log show" || line == "log enable" { + self.inner.lock().log_enabled = true; } else if !line.is_empty() { + if line == "help" { + let _ = writeln!( + stdout, + r#" +Interactive Mode Commands: + help - Display this help + clear - Clear the screen + log [level] - Set the client api log level for the node to one of: error,warn,info,debug,trace,off + hide|disable - Turn off viewing the log without changing the log level for the node + show|enable - Turn on viewing the log without changing the log level for the node + - With no option, 'log' turns on viewing the log and sets the level to 'debug' +"# + ); + } + let cmdproc = self.inner.lock().cmdproc.clone(); if let Some(cmdproc) = &cmdproc { if let Err(e) = cmdproc.run_command( diff --git a/veilid-cli/src/main.rs b/veilid-cli/src/main.rs index 5ab103fd..fe540eb4 100644 --- a/veilid-cli/src/main.rs +++ b/veilid-cli/src/main.rs @@ -3,7 +3,7 @@ #![deny(unused_must_use)] #![recursion_limit = "256"] -use crate::{settings::NamedSocketAddrs, tools::*, ui::*}; +use crate::{tools::*, ui::*}; use clap::{Parser, ValueEnum}; use flexi_logger::*; @@ -37,7 +37,7 @@ struct CmdlineArgs { ipc_path: Option, /// Subnode index to use when connecting #[arg(short('n'), long, default_value = "0")] - subnode_index: usize, + subnode_index: u16, /// Address to connect to #[arg(long, short = 'a')] address: Option, @@ -47,9 +47,9 @@ struct CmdlineArgs { /// Specify a configuration file to use #[arg(short = 'c', long, value_name = "FILE")] config_file: Option, - /// log level - #[arg(value_enum)] - log_level: Option, + /// Log level for the CLI itself (not for the Veilid node) + #[arg(long, value_enum)] + cli_log_level: Option, /// interactive #[arg(long, short = 'i', group = "execution_mode")] interactive: bool, @@ -93,11 +93,11 @@ fn main() -> Result<(), String> { .map_err(|e| format!("configuration is invalid: {}", e))?; // Set config from command line - if let Some(LogLevel::Debug) = args.log_level { + if let Some(LogLevel::Debug) = args.cli_log_level { settings.logging.level = settings::LogLevel::Debug; settings.logging.terminal.enabled = true; } - if let Some(LogLevel::Trace) = args.log_level { + if let Some(LogLevel::Trace) = args.cli_log_level { settings.logging.level = settings::LogLevel::Trace; settings.logging.terminal.enabled = true; } @@ -248,59 +248,14 @@ fn main() -> Result<(), String> { // Determine IPC path to try let mut client_api_ipc_path = None; if enable_ipc { - cfg_if::cfg_if! { - if #[cfg(windows)] { - if let Some(ipc_path) = args.ipc_path.or(settings.ipc_path.clone()) { - if is_ipc_socket_path(&ipc_path) { - // try direct path - enable_network = false; - client_api_ipc_path = Some(ipc_path); - } else { - // try subnode index inside path - let ipc_path = ipc_path.join(args.subnode_index.to_string()); - if is_ipc_socket_path(&ipc_path) { - // subnode indexed path exists - enable_network = false; - client_api_ipc_path = Some(ipc_path); - } - } - } - } else { - if let Some(ipc_path) = args.ipc_path.or(settings.ipc_path.clone()) { - if is_ipc_socket_path(&ipc_path) { - // try direct path - enable_network = false; - client_api_ipc_path = Some(ipc_path); - } else if ipc_path.exists() && ipc_path.is_dir() { - // try subnode index inside path - let ipc_path = ipc_path.join(args.subnode_index.to_string()); - if is_ipc_socket_path(&ipc_path) { - // subnode indexed path exists - enable_network = false; - client_api_ipc_path = Some(ipc_path); - } - } - } - } + client_api_ipc_path = settings.resolve_ipc_path(args.ipc_path, args.subnode_index); + if client_api_ipc_path.is_some() { + enable_network = false; } } let mut client_api_network_addresses = None; if enable_network { - let args_address = if let Some(args_address) = args.address { - match NamedSocketAddrs::try_from(args_address) { - Ok(v) => Some(v), - Err(e) => { - return Err(format!("Invalid server address: {}", e)); - } - } - } else { - None - }; - if let Some(address_arg) = args_address.or(settings.address.clone()) { - client_api_network_addresses = Some(address_arg.addrs); - } else if let Some(address) = settings.address.clone() { - client_api_network_addresses = Some(address.addrs.clone()); - } + client_api_network_addresses = settings.resolve_network_address(args.address)?; } // Create command processor diff --git a/veilid-cli/src/settings.rs b/veilid-cli/src/settings.rs index e83cd915..ddde6bad 100644 --- a/veilid-cli/src/settings.rs +++ b/veilid-cli/src/settings.rs @@ -1,5 +1,6 @@ use directories::*; +use crate::tools::*; use serde_derive::*; use std::ffi::OsStr; use std::net::{SocketAddr, ToSocketAddrs}; @@ -118,7 +119,7 @@ pub fn convert_loglevel(log_level: LogLevel) -> log::LevelFilter { } } -#[derive(Debug, Clone)] +#[derive(Clone, Debug)] pub struct NamedSocketAddrs { pub _name: String, pub addrs: Vec, @@ -148,26 +149,26 @@ impl<'de> serde::Deserialize<'de> for NamedSocketAddrs { } } -#[derive(Debug, Deserialize)] +#[derive(Clone, Debug, Deserialize)] pub struct Terminal { pub enabled: bool, } -#[derive(Debug, Deserialize)] +#[derive(Clone, Debug, Deserialize)] pub struct File { pub enabled: bool, pub directory: String, pub append: bool, } -#[derive(Debug, Deserialize)] +#[derive(Clone, Debug, Deserialize)] pub struct Logging { pub terminal: Terminal, pub file: File, pub level: LogLevel, } -#[derive(Debug, Deserialize)] +#[derive(Clone, Debug, Deserialize)] pub struct Colors { pub background: String, pub shadow: String, @@ -182,7 +183,7 @@ pub struct Colors { pub highlight_text: String, } -#[derive(Debug, Deserialize)] +#[derive(Clone, Debug, Deserialize)] pub struct LogColors { pub trace: String, pub debug: String, @@ -191,7 +192,7 @@ pub struct LogColors { pub error: String, } -#[derive(Debug, Deserialize)] +#[derive(Clone, Debug, Deserialize)] pub struct Theme { pub shadow: bool, pub borders: String, @@ -199,24 +200,24 @@ pub struct Theme { pub log_colors: LogColors, } -#[derive(Debug, Deserialize)] +#[derive(Clone, Debug, Deserialize)] pub struct NodeLog { pub scrollback: usize, } -#[derive(Debug, Deserialize)] +#[derive(Clone, Debug, Deserialize)] pub struct CommandLine { pub history_size: usize, } -#[derive(Debug, Deserialize)] +#[derive(Clone, Debug, Deserialize)] pub struct Interface { pub theme: Theme, pub node_log: NodeLog, pub command_line: CommandLine, } -#[derive(Debug, Deserialize)] +#[derive(Clone, Debug, Deserialize)] pub struct Settings { pub enable_ipc: bool, pub ipc_path: Option, @@ -229,6 +230,90 @@ pub struct Settings { } impl Settings { + ////////////////////////////////////////////////////////////////////////////////// + + pub fn new(config_file: Option<&OsStr>) -> Result { + // Load the default config + let mut cfg = load_default_config()?; + + // Merge in the config file if we have one + if let Some(config_file) = config_file { + let config_file_path = Path::new(config_file); + // If the user specifies a config file on the command line then it must exist + cfg = load_config(cfg, config_file_path)?; + } + + // Generate config + cfg.try_deserialize() + } + + pub fn resolve_ipc_path( + &self, + ipc_path: Option, + subnode_index: u16, + ) -> Option { + let mut client_api_ipc_path = None; + // Determine IPC path to try + cfg_if::cfg_if! { + if #[cfg(windows)] { + if let Some(ipc_path) = ipc_path.or(self.ipc_path.clone()) { + if is_ipc_socket_path(&ipc_path) { + // try direct path + enable_network = false; + client_api_ipc_path = Some(ipc_path); + } else { + // try subnode index inside path + let ipc_path = ipc_path.join(subnode_index.to_string()); + if is_ipc_socket_path(&ipc_path) { + // subnode indexed path exists + client_api_ipc_path = Some(ipc_path); + } + } + } + } else { + if let Some(ipc_path) = ipc_path.or(self.ipc_path.clone()) { + if is_ipc_socket_path(&ipc_path) { + // try direct path + client_api_ipc_path = Some(ipc_path); + } else if ipc_path.exists() && ipc_path.is_dir() { + // try subnode index inside path + let ipc_path = ipc_path.join(subnode_index.to_string()); + if is_ipc_socket_path(&ipc_path) { + // subnode indexed path exists + client_api_ipc_path = Some(ipc_path); + } + } + } + } + } + client_api_ipc_path + } + + pub fn resolve_network_address( + &self, + address: Option, + ) -> Result>, String> { + let mut client_api_network_addresses = None; + + let args_address = if let Some(args_address) = address { + match NamedSocketAddrs::try_from(args_address) { + Ok(v) => Some(v), + Err(e) => { + return Err(format!("Invalid server address: {}", e)); + } + } + } else { + None + }; + if let Some(address_arg) = args_address.or(self.address.clone()) { + client_api_network_addresses = Some(address_arg.addrs); + } else if let Some(address) = self.address.clone() { + client_api_network_addresses = Some(address.addrs.clone()); + } + Ok(client_api_network_addresses) + } + + //////////////////////////////////////////////////////////////////////////// #[cfg_attr(windows, expect(dead_code))] fn get_server_default_directory(subpath: &str) -> PathBuf { #[cfg(unix)] @@ -284,21 +369,6 @@ impl Settings { default_log_directory } - - pub fn new(config_file: Option<&OsStr>) -> Result { - // Load the default config - let mut cfg = load_default_config()?; - - // Merge in the config file if we have one - if let Some(config_file) = config_file { - let config_file_path = Path::new(config_file); - // If the user specifies a config file on the command line then it must exist - cfg = load_config(cfg, config_file_path)?; - } - - // Generate config - cfg.try_deserialize() - } } #[test] diff --git a/veilid-cli/src/tools.rs b/veilid-cli/src/tools.rs index ffe92f2d..580a1d1d 100644 --- a/veilid-cli/src/tools.rs +++ b/veilid-cli/src/tools.rs @@ -8,12 +8,10 @@ use core::str::FromStr; cfg_if! { if #[cfg(feature="rt-async-std")] { - pub use async_std::net::TcpStream; pub fn block_on, T>(f: F) -> T { async_std::task::block_on(f) } } else if #[cfg(feature="rt-tokio")] { - pub use tokio::net::TcpStream; pub fn block_on, T>(f: F) -> T { let rt = tokio::runtime::Runtime::new().unwrap(); let local = tokio::task::LocalSet::new(); diff --git a/veilid-core/Cargo.toml b/veilid-core/Cargo.toml index 5d17bff1..8b1037c7 100644 --- a/veilid-core/Cargo.toml +++ b/veilid-core/Cargo.toml @@ -4,13 +4,13 @@ name = "veilid-core" version = "0.4.1" # --- description = "Core library used to create a Veilid node and operate it as part of an application" -repository = "https://gitlab.com/veilid/veilid" -authors = ["Veilid Team "] -edition = "2021" build = "build.rs" -license = "MPL-2.0" resolver = "2" -rust-version = "1.81.0" +repository.workspace = true +authors.workspace = true +license.workspace = true +edition.workspace = true +rust-version.workspace = true [lib] crate-type = ["cdylib", "staticlib", "rlib"] @@ -56,6 +56,8 @@ veilid_core_ios_tests = ["dep:tracing-oslog"] debug-locks = ["veilid-tools/debug-locks"] unstable-blockstore = [] unstable-tunnels = [] +virtual-network = ["veilid-tools/virtual-network"] +virtual-network-server = ["veilid-tools/virtual-network-server"] # GeoIP geolocation = ["maxminddb", "reqwest"] @@ -133,8 +135,8 @@ hickory-resolver = { version = "0.24.1", optional = true } # Serialization capnp = { version = "0.19.6", default-features = false, features = ["alloc"] } -serde = { version = "1.0.204", features = ["derive", "rc"] } -serde_json = { version = "1.0.120" } +serde = { version = "1.0.214", features = ["derive", "rc"] } +serde_json = { version = "1.0.132" } serde-big-array = "0.5.1" json = "0.12.4" data-encoding = { version = "2.6.0" } @@ -148,7 +150,7 @@ sanitize-filename = "0.5.0" # Dependencies for native builds only # Linux, Windows, Mac, iOS, Android -[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +[target.'cfg(not(all(target_arch = "wasm32", target_os = "unknown")))'.dependencies] # Tools config = { version = "0.13.4", default-features = false, features = ["yaml"] } @@ -164,7 +166,6 @@ sysinfo = { version = "^0.30.13", default-features = false } tokio = { version = "1.38.1", features = ["full"], optional = true } tokio-util = { version = "0.7.11", features = ["compat"], optional = true } tokio-stream = { version = "0.1.15", features = ["net"], optional = true } -async-io = { version = "1.13.0" } futures-util = { version = "0.3.30", default-features = false, features = [ "async-await", "sink", @@ -184,10 +185,9 @@ webpki = "0.22.4" webpki-roots = "0.25.4" rustls = "0.21.12" rustls-pemfile = "1.0.4" -socket2 = { version = "0.5.7", features = ["all"] } # Dependencies for WASM builds only -[target.'cfg(target_arch = "wasm32")'.dependencies] +[target.'cfg(all(target_arch = "wasm32", target_os = "unknown"))'.dependencies] veilid-tools = { version = "0.4.1", path = "../veilid-tools", default-features = false, features = [ "rt-wasm-bindgen", @@ -222,7 +222,7 @@ tracing-wasm = "0.2.1" keyvaluedb-web = "0.1.2" ### Configuration for WASM32 'web-sys' crate -[target.'cfg(target_arch = "wasm32")'.dependencies.web-sys] +[target.'cfg(all(target_arch = "wasm32", target_os = "unknown"))'.dependencies.web-sys] version = "0.3.69" features = [ 'Document', @@ -260,12 +260,12 @@ tracing-oslog = { version = "0.1.2", optional = true } ### DEV DEPENDENCIES -[target.'cfg(not(target_arch = "wasm32"))'.dev-dependencies] +[target.'cfg(not(all(target_arch = "wasm32", target_os = "unknown")))'.dev-dependencies] simplelog = { version = "0.12.2", features = ["test"] } serial_test = "2.0.0" tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } -[target.'cfg(target_arch = "wasm32")'.dev-dependencies] +[target.'cfg(all(target_arch = "wasm32", target_os = "unknown"))'.dev-dependencies] serial_test = { version = "2.0.0", default-features = false, features = [ "async", ] } diff --git a/veilid-core/build.rs b/veilid-core/build.rs index c55a1aba..a46836f7 100644 --- a/veilid-core/build.rs +++ b/veilid-core/build.rs @@ -178,7 +178,7 @@ fn fix_android_emulator() { .or(env::var("ANDROID_SDK_ROOT")) .expect("ANDROID_HOME or ANDROID_SDK_ROOT not set"); let lib_path = glob(&format!( - "{android_home}/ndk/26.3.11579264/**/lib{missing_library}.a" + "{android_home}/ndk/27.0.12077973/**/lib{missing_library}.a" )) .expect("failed to glob") .next() diff --git a/veilid-core/src/attachment_manager.rs b/veilid-core/src/attachment_manager.rs index c90c5e3e..54cc2777 100644 --- a/veilid-core/src/attachment_manager.rs +++ b/veilid-core/src/attachment_manager.rs @@ -1,54 +1,43 @@ -use crate::*; -use crypto::Crypto; -use network_manager::*; -use routing_table::*; -use storage_manager::*; +use crate::{network_manager::StartupDisposition, *}; +use routing_table::RoutingTableHealth; +#[derive(Debug, Clone)] +pub struct AttachmentManagerStartupContext { + pub startup_lock: Arc, +} +impl AttachmentManagerStartupContext { + pub fn new() -> Self { + Self { + startup_lock: Arc::new(StartupLock::new()), + } + } +} +impl Default for AttachmentManagerStartupContext { + fn default() -> Self { + Self::new() + } +} + +#[derive(Debug)] struct AttachmentManagerInner { last_attachment_state: AttachmentState, last_routing_table_health: Option, maintain_peers: bool, started_ts: Timestamp, attach_ts: Option, - update_callback: Option, attachment_maintainer_jh: Option>, } -struct AttachmentManagerUnlockedInner { - _event_bus: EventBus, - config: VeilidConfig, - network_manager: NetworkManager, +#[derive(Debug)] +pub struct AttachmentManager { + registry: VeilidComponentRegistry, + inner: Mutex, + startup_context: AttachmentManagerStartupContext, } -#[derive(Clone)] -pub struct AttachmentManager { - inner: Arc>, - unlocked_inner: Arc, -} +impl_veilid_component!(AttachmentManager); impl AttachmentManager { - fn new_unlocked_inner( - event_bus: EventBus, - config: VeilidConfig, - storage_manager: StorageManager, - table_store: TableStore, - #[cfg(feature = "unstable-blockstore")] block_store: BlockStore, - crypto: Crypto, - ) -> AttachmentManagerUnlockedInner { - AttachmentManagerUnlockedInner { - _event_bus: event_bus.clone(), - config: config.clone(), - network_manager: NetworkManager::new( - event_bus, - config, - storage_manager, - table_store, - #[cfg(feature = "unstable-blockstore")] - block_store, - crypto, - ), - } - } fn new_inner() -> AttachmentManagerInner { AttachmentManagerInner { last_attachment_state: AttachmentState::Detached, @@ -56,52 +45,35 @@ impl AttachmentManager { maintain_peers: false, started_ts: Timestamp::now(), attach_ts: None, - update_callback: None, attachment_maintainer_jh: None, } } pub fn new( - event_bus: EventBus, - config: VeilidConfig, - storage_manager: StorageManager, - table_store: TableStore, - #[cfg(feature = "unstable-blockstore")] block_store: BlockStore, - crypto: Crypto, + registry: VeilidComponentRegistry, + startup_context: AttachmentManagerStartupContext, ) -> Self { Self { - inner: Arc::new(Mutex::new(Self::new_inner())), - unlocked_inner: Arc::new(Self::new_unlocked_inner( - event_bus, - config, - storage_manager, - table_store, - #[cfg(feature = "unstable-blockstore")] - block_store, - crypto, - )), + registry, + inner: Mutex::new(Self::new_inner()), + startup_context, } } - pub fn config(&self) -> VeilidConfig { - self.unlocked_inner.config.clone() + pub fn is_attached(&self) -> bool { + let s = self.inner.lock().last_attachment_state; + !matches!(s, AttachmentState::Detached | AttachmentState::Detaching) } - pub fn network_manager(&self) -> NetworkManager { - self.unlocked_inner.network_manager.clone() + #[allow(dead_code)] + pub fn is_detached(&self) -> bool { + let s = self.inner.lock().last_attachment_state; + matches!(s, AttachmentState::Detached) } - // pub fn is_attached(&self) -> bool { - // let s = self.inner.lock().last_attachment_state; - // !matches!(s, AttachmentState::Detached | AttachmentState::Detaching) - // } - // pub fn is_detached(&self) -> bool { - // let s = self.inner.lock().last_attachment_state; - // matches!(s, AttachmentState::Detached) - // } - - // pub fn get_attach_timestamp(&self) -> Option { - // self.inner.lock().attach_ts - // } + #[allow(dead_code)] + pub fn get_attach_timestamp(&self) -> Option { + self.inner.lock().attach_ts + } fn translate_routing_table_health( health: &RoutingTableHealth, @@ -155,11 +127,6 @@ impl AttachmentManager { inner.last_attachment_state = AttachmentManager::translate_routing_table_health(&health, routing_table_config); - // If we don't have an update callback yet for some reason, just return now - let Some(update_callback) = inner.update_callback.clone() else { - return; - }; - // Send update if one of: // * the attachment state has changed // * routing domain readiness has changed @@ -172,7 +139,7 @@ impl AttachmentManager { }) .unwrap_or(true); if send_update { - Some((update_callback, Self::get_veilid_state_inner(&inner))) + Some(Self::get_veilid_state_inner(&inner)) } else { None } @@ -180,15 +147,14 @@ impl AttachmentManager { // Send the update outside of the lock if let Some(update) = opt_update { - (update.0)(VeilidUpdate::Attachment(update.1)); + (self.update_callback())(VeilidUpdate::Attachment(update)); } } fn update_attaching_detaching_state(&self, state: AttachmentState) { let uptime; let attached_uptime; - - let update_callback = { + { let mut inner = self.inner.lock(); // Clear routing table health so when we start measuring it we start from scratch @@ -211,29 +177,98 @@ impl AttachmentManager { let now = Timestamp::now(); uptime = now - inner.started_ts; attached_uptime = inner.attach_ts.map(|ts| now - ts); - - // Get callback - inner.update_callback.clone() }; // Send update - if let Some(update_callback) = update_callback { - update_callback(VeilidUpdate::Attachment(Box::new(VeilidStateAttachment { - state, - public_internet_ready: false, - local_network_ready: false, - uptime, - attached_uptime, - }))) + (self.update_callback())(VeilidUpdate::Attachment(Box::new(VeilidStateAttachment { + state, + public_internet_ready: false, + local_network_ready: false, + uptime, + attached_uptime, + }))) + } + + async fn startup(&self) -> EyreResult { + let guard = self.startup_context.startup_lock.startup()?; + + let rpc_processor = self.rpc_processor(); + let network_manager = self.network_manager(); + + // Startup network manager + network_manager.startup().await?; + + // Startup rpc processor + if let Err(e) = rpc_processor.startup().await { + network_manager.shutdown().await; + return Err(e); } + + // Startup routing table + let routing_table = self.routing_table(); + if let Err(e) = routing_table.startup().await { + rpc_processor.shutdown().await; + network_manager.shutdown().await; + return Err(e); + } + + // Startup successful + guard.success(); + + // Inform api clients that things have changed + log_net!(debug "sending network state update to api clients"); + network_manager.send_network_update(); + + Ok(StartupDisposition::Success) + } + + async fn shutdown(&self) { + let guard = self + .startup_context + .startup_lock + .shutdown() + .await + .expect("should be started up"); + + let routing_table = self.routing_table(); + let rpc_processor = self.rpc_processor(); + let network_manager = self.network_manager(); + + // Shutdown RoutingTable + routing_table.shutdown().await; + + // Shutdown NetworkManager + network_manager.shutdown().await; + + // Shutdown RPCProcessor + rpc_processor.shutdown().await; + + // Shutdown successful + guard.success(); + + // send update + log_net!(debug "sending network state update to api clients"); + network_manager.send_network_update(); + } + + async fn tick(&self) -> EyreResult<()> { + // Run the network manager tick + let network_manager = self.network_manager(); + network_manager.tick().await?; + + // Run the routing table tick + let routing_table = self.routing_table(); + routing_table.tick().await?; + + Ok(()) } #[instrument(parent = None, level = "debug", skip_all)] - async fn attachment_maintainer(self) { + async fn attachment_maintainer(&self) { log_net!(debug "attachment starting"); self.update_attaching_detaching_state(AttachmentState::Attaching); - let netman = self.network_manager(); + let network_manager = self.network_manager(); let mut restart; let mut restart_delay; @@ -241,9 +276,9 @@ impl AttachmentManager { restart = false; restart_delay = 1; - match netman.startup().await { + match self.startup().await { Err(err) => { - error!("network startup failed: {}", err); + error!("attachment startup failed: {}", err); restart = true; } Ok(StartupDisposition::BindRetry) => { @@ -257,15 +292,15 @@ impl AttachmentManager { while self.inner.lock().maintain_peers { // tick network manager let next_tick_ts = get_timestamp() + 1_000_000u64; - if let Err(err) = netman.tick().await { - error!("Error in network manager: {}", err); + if let Err(err) = self.tick().await { + error!("Error in attachment tick: {}", err); self.inner.lock().maintain_peers = false; restart = true; break; } // see if we need to restart the network - if netman.network_needs_restart() { + if network_manager.network_needs_restart() { info!("Restarting network"); restart = true; break; @@ -288,8 +323,8 @@ impl AttachmentManager { log_net!(debug "attachment stopping"); } - log_net!(debug "stopping network"); - netman.shutdown().await; + log_net!(debug "shutting down attachment"); + self.shutdown().await; } } @@ -313,25 +348,24 @@ impl AttachmentManager { } #[instrument(level = "debug", skip_all, err)] - pub async fn init(&self, update_callback: UpdateCallback) -> EyreResult<()> { - { - let mut inner = self.inner.lock(); - inner.update_callback = Some(update_callback.clone()); - } - - self.network_manager().init(update_callback).await?; + pub async fn init_async(&self) -> EyreResult<()> { + Ok(()) + } + #[instrument(level = "debug", skip_all, err)] + pub async fn post_init_async(&self) -> EyreResult<()> { Ok(()) } #[instrument(level = "debug", skip_all)] - pub async fn terminate(&self) { + pub async fn pre_terminate_async(&self) { // Ensure we detached self.detach().await; - self.network_manager().terminate().await; - self.inner.lock().update_callback = None; } + #[instrument(level = "debug", skip_all)] + pub async fn terminate_async(&self) {} + #[instrument(level = "trace", skip_all)] pub async fn attach(&self) -> bool { // Create long-running connection maintenance routine @@ -340,10 +374,11 @@ impl AttachmentManager { return false; } inner.maintain_peers = true; - inner.attachment_maintainer_jh = Some(spawn( - "attachment maintainer", - self.clone().attachment_maintainer(), - )); + let registry = self.registry(); + inner.attachment_maintainer_jh = Some(spawn("attachment maintainer", async move { + let this = registry.attachment_manager(); + this.attachment_maintainer().await; + })); true } diff --git a/veilid-core/src/component.rs b/veilid-core/src/component.rs new file mode 100644 index 00000000..5bf8c15d --- /dev/null +++ b/veilid-core/src/component.rs @@ -0,0 +1,336 @@ +use std::marker::PhantomData; + +use super::*; + +pub trait AsAnyArcSendSync { + fn as_any_arc_send_sync(self: Arc) -> Arc; +} + +impl AsAnyArcSendSync for T { + fn as_any_arc_send_sync(self: Arc) -> Arc { + self + } +} + +pub trait VeilidComponent: + AsAnyArcSendSync + VeilidComponentRegistryAccessor + core::fmt::Debug +{ + fn init(&self) -> SendPinBoxFutureLifetime<'_, EyreResult<()>>; + fn post_init(&self) -> SendPinBoxFutureLifetime<'_, EyreResult<()>>; + fn pre_terminate(&self) -> SendPinBoxFutureLifetime<'_, ()>; + fn terminate(&self) -> SendPinBoxFutureLifetime<'_, ()>; +} + +pub trait VeilidComponentRegistryAccessor { + fn registry(&self) -> VeilidComponentRegistry; + + fn config(&self) -> VeilidConfig { + self.registry().config.clone() + } + fn update_callback(&self) -> UpdateCallback { + self.registry().config.update_callback() + } + fn event_bus(&self) -> EventBus { + self.registry().event_bus.clone() + } +} + +pub struct VeilidComponentGuard<'a, T: VeilidComponent + Send + Sync + 'static> { + component: Arc, + _phantom: core::marker::PhantomData<&'a T>, +} + +impl<'a, T> core::ops::Deref for VeilidComponentGuard<'a, T> +where + T: VeilidComponent + Send + Sync + 'static, +{ + type Target = T; + + fn deref(&self) -> &Self::Target { + &self.component + } +} + +#[derive(Debug)] +struct VeilidComponentRegistryInner { + type_map: HashMap>, + init_order: Vec, + mock: bool, +} + +#[derive(Clone, Debug)] +pub struct VeilidComponentRegistry { + inner: Arc>, + config: VeilidConfig, + event_bus: EventBus, + init_lock: Arc>, +} + +impl VeilidComponentRegistry { + pub fn new(config: VeilidConfig) -> Self { + Self { + inner: Arc::new(Mutex::new(VeilidComponentRegistryInner { + type_map: HashMap::new(), + init_order: Vec::new(), + mock: false, + })), + config, + event_bus: EventBus::new(), + init_lock: Arc::new(AsyncMutex::new(false)), + } + } + + pub fn enable_mock(&self) { + let mut inner = self.inner.lock(); + inner.mock = true; + } + + pub fn register< + T: VeilidComponent + Send + Sync + 'static, + F: FnOnce(VeilidComponentRegistry) -> T, + >( + &self, + component_constructor: F, + ) { + let component = Arc::new(component_constructor(self.clone())); + let component_type_id = core::any::TypeId::of::(); + + let mut inner = self.inner.lock(); + assert!( + inner + .type_map + .insert(component_type_id, component) + .is_none(), + "should not register same component twice" + ); + inner.init_order.push(component_type_id); + } + + pub fn register_with_context< + C, + T: VeilidComponent + Send + Sync + 'static, + F: FnOnce(VeilidComponentRegistry, C) -> T, + >( + &self, + component_constructor: F, + context: C, + ) { + let component = Arc::new(component_constructor(self.clone(), context)); + let component_type_id = core::any::TypeId::of::(); + + let mut inner = self.inner.lock(); + assert!( + inner + .type_map + .insert(component_type_id, component) + .is_none(), + "should not register same component twice" + ); + inner.init_order.push(component_type_id); + } + + pub async fn init(&self) -> EyreResult<()> { + let Some(mut _init_guard) = asyncmutex_try_lock!(self.init_lock) else { + bail!("init should only happen one at a time"); + }; + if *_init_guard { + bail!("already initialized"); + } + + // Event bus starts up early + self.event_bus.startup().await?; + + // Process components in initialization order + let init_order = self.get_init_order(); + let mut initialized = vec![]; + for component in init_order { + if let Err(e) = component.init().await { + self.terminate_inner(initialized).await; + self.event_bus.shutdown().await; + return Err(e); + } + initialized.push(component); + } + + *_init_guard = true; + Ok(()) + } + + pub async fn post_init(&self) -> EyreResult<()> { + let Some(mut _init_guard) = asyncmutex_try_lock!(self.init_lock) else { + bail!("init should only happen one at a time"); + }; + if !*_init_guard { + bail!("not initialized"); + } + + let init_order = self.get_init_order(); + let mut post_initialized = vec![]; + for component in init_order { + if let Err(e) = component.post_init().await { + self.pre_terminate_inner(post_initialized).await; + return Err(e); + } + post_initialized.push(component) + } + Ok(()) + } + + pub async fn pre_terminate(&self) { + let Some(mut _init_guard) = asyncmutex_try_lock!(self.init_lock) else { + panic!("terminate should only happen one at a time"); + }; + if !*_init_guard { + panic!("not initialized"); + } + + let init_order = self.get_init_order(); + self.pre_terminate_inner(init_order).await; + } + + pub async fn terminate(&self) { + let Some(mut _init_guard) = asyncmutex_try_lock!(self.init_lock) else { + panic!("terminate should only happen one at a time"); + }; + if !*_init_guard { + panic!("not initialized"); + } + + // Terminate components in reverse initialization order + let init_order = self.get_init_order(); + self.terminate_inner(init_order).await; + + // Event bus shuts down last + self.event_bus.shutdown().await; + + *_init_guard = false; + } + + async fn pre_terminate_inner( + &self, + pre_initialized: Vec>, + ) { + for component in pre_initialized.iter().rev() { + component.pre_terminate().await; + } + } + async fn terminate_inner(&self, initialized: Vec>) { + for component in initialized.iter().rev() { + component.terminate().await; + } + } + + fn get_init_order(&self) -> Vec> { + let inner = self.inner.lock(); + inner + .init_order + .iter() + .map(|id| inner.type_map.get(id).unwrap().clone()) + .collect::>() + } + + ////////////////////////////////////////////////////////////// + + pub fn lookup<'a, T: VeilidComponent + Send + Sync + 'static>( + &self, + ) -> Option> { + let inner = self.inner.lock(); + let component_type_id = core::any::TypeId::of::(); + let component_dyn = inner.type_map.get(&component_type_id)?.clone(); + let component = component_dyn + .as_any_arc_send_sync() + .downcast::() + .unwrap(); + Some(VeilidComponentGuard { + component, + _phantom: PhantomData {}, + }) + } +} + +impl VeilidComponentRegistryAccessor for VeilidComponentRegistry { + fn registry(&self) -> VeilidComponentRegistry { + self.clone() + } +} + +//////////////////////////////////////////////////////////////////// + +macro_rules! impl_veilid_component_registry_accessor { + ($struct_name:ident) => { + impl VeilidComponentRegistryAccessor for $struct_name { + fn registry(&self) -> VeilidComponentRegistry { + self.registry.clone() + } + } + }; +} + +pub(crate) use impl_veilid_component_registry_accessor; + +///////////////////////////////////////////////////////////////////// + +macro_rules! impl_veilid_component { + ($component_name:ident) => { + impl_veilid_component_registry_accessor!($component_name); + + impl VeilidComponent for $component_name { + fn init(&self) -> SendPinBoxFutureLifetime<'_, EyreResult<()>> { + Box::pin(async { self.init_async().await }) + } + + fn post_init(&self) -> SendPinBoxFutureLifetime<'_, EyreResult<()>> { + Box::pin(async { self.post_init_async().await }) + } + + fn pre_terminate(&self) -> SendPinBoxFutureLifetime<'_, ()> { + Box::pin(async { self.pre_terminate_async().await }) + } + + fn terminate(&self) -> SendPinBoxFutureLifetime<'_, ()> { + Box::pin(async { self.terminate_async().await }) + } + } + }; +} + +pub(crate) use impl_veilid_component; + +///////////////////////////////////////////////////////////////////// + +// Utility macro for setting up a background TickTask +// Should be called during new/construction of a component with background tasks +// and before any post-init 'tick' operations are started +macro_rules! impl_setup_task { + ($this:expr, $this_type:ty, $task_name:ident, $task_routine:ident ) => {{ + let registry = $this.registry(); + $this.$task_name.set_routine(move |s, l, t| { + let registry = registry.clone(); + Box::pin(async move { + let this = registry.lookup::<$this_type>().unwrap(); + this.$task_routine(s, Timestamp::new(l), Timestamp::new(t)) + .await + }) + }); + }}; +} + +pub(crate) use impl_setup_task; + +// Utility macro for setting up an event bus handler +// Should be called after init, during post-init or later +// Subscription should be unsubscribed before termination +macro_rules! impl_subscribe_event_bus { + ($this:expr, $this_type:ty, $event_handler:ident ) => {{ + let registry = $this.registry(); + $this.event_bus().subscribe(move |evt| { + let registry = registry.clone(); + Box::pin(async move { + let this = registry.lookup::<$this_type>().unwrap(); + this.$event_handler(evt); + }) + }) + }}; +} + +pub(crate) use impl_subscribe_event_bus; diff --git a/veilid-core/src/core_context.rs b/veilid-core/src/core_context.rs index 6eab73d5..0179dfc6 100644 --- a/veilid-core/src/core_context.rs +++ b/veilid-core/src/core_context.rs @@ -1,242 +1,26 @@ -use crate::attachment_manager::*; +use crate::attachment_manager::{AttachmentManager, AttachmentManagerStartupContext}; use crate::crypto::Crypto; use crate::logging::*; -use crate::storage_manager::*; +use crate::network_manager::{NetworkManager, NetworkManagerStartupContext}; +use crate::routing_table::RoutingTable; +use crate::rpc_processor::{RPCProcessor, RPCProcessorStartupContext}; +use crate::storage_manager::StorageManager; use crate::veilid_api::*; use crate::veilid_config::*; use crate::*; pub type UpdateCallback = Arc; -/// Internal services startup mechanism. -/// Ensures that everything is started up, and shut down in the right order -/// and provides an atomic state for if the system is properly operational. -struct StartupShutdownContext { - pub config: VeilidConfig, - pub update_callback: UpdateCallback, - - pub event_bus: Option, - pub protected_store: Option, - pub table_store: Option, - #[cfg(feature = "unstable-blockstore")] - pub block_store: Option, - pub crypto: Option, - pub attachment_manager: Option, - pub storage_manager: Option, -} - -impl StartupShutdownContext { - pub fn new_empty(config: VeilidConfig, update_callback: UpdateCallback) -> Self { - Self { - config, - update_callback, - event_bus: None, - protected_store: None, - table_store: None, - #[cfg(feature = "unstable-blockstore")] - block_store: None, - crypto: None, - attachment_manager: None, - storage_manager: None, - } - } - - #[allow(clippy::too_many_arguments)] - pub fn new_full( - config: VeilidConfig, - update_callback: UpdateCallback, - event_bus: EventBus, - protected_store: ProtectedStore, - table_store: TableStore, - #[cfg(feature = "unstable-blockstore")] block_store: BlockStore, - crypto: Crypto, - attachment_manager: AttachmentManager, - storage_manager: StorageManager, - ) -> Self { - Self { - config, - update_callback, - event_bus: Some(event_bus), - protected_store: Some(protected_store), - table_store: Some(table_store), - #[cfg(feature = "unstable-blockstore")] - block_store: Some(block_store), - crypto: Some(crypto), - attachment_manager: Some(attachment_manager), - storage_manager: Some(storage_manager), - } - } - - #[instrument(level = "trace", target = "core_context", err, skip_all)] - pub async fn startup(&mut self) -> EyreResult<()> { - info!("Veilid API starting up"); - - info!("init api tracing"); - let (program_name, namespace) = { - let config = self.config.get(); - (config.program_name.clone(), config.namespace.clone()) - }; - - ApiTracingLayer::add_callback(program_name, namespace, self.update_callback.clone()) - .await?; - - // Add the event bus - let event_bus = EventBus::new(); - if let Err(e) = event_bus.startup().await { - error!("failed to start up event bus: {}", e); - self.shutdown().await; - return Err(e.into()); - } - self.event_bus = Some(event_bus.clone()); - - // Set up protected store - let protected_store = ProtectedStore::new(event_bus.clone(), self.config.clone()); - if let Err(e) = protected_store.init().await { - error!("failed to init protected store: {}", e); - self.shutdown().await; - return Err(e); - } - self.protected_store = Some(protected_store.clone()); - - // Set up tablestore and crypto system - let table_store = TableStore::new( - event_bus.clone(), - self.config.clone(), - protected_store.clone(), - ); - let crypto = Crypto::new(event_bus.clone(), self.config.clone(), table_store.clone()); - table_store.set_crypto(crypto.clone()); - - // Initialize table store first, so crypto code can load caches - // Tablestore can use crypto during init, just not any cached operations or things - // that require flushing back to the tablestore - if let Err(e) = table_store.init().await { - error!("failed to init table store: {}", e); - self.shutdown().await; - return Err(e); - } - self.table_store = Some(table_store.clone()); - - // Set up crypto - if let Err(e) = crypto.init().await { - error!("failed to init crypto: {}", e); - self.shutdown().await; - return Err(e); - } - self.crypto = Some(crypto.clone()); - - // Set up block store - #[cfg(feature = "unstable-blockstore")] - { - let block_store = BlockStore::new(event_bus.clone(), self.config.clone()); - if let Err(e) = block_store.init().await { - error!("failed to init block store: {}", e); - self.shutdown().await; - return Err(e); - } - self.block_store = Some(block_store.clone()); - } - - // Set up storage manager - let update_callback = self.update_callback.clone(); - - let storage_manager = StorageManager::new( - event_bus.clone(), - self.config.clone(), - self.crypto.clone().unwrap(), - self.table_store.clone().unwrap(), - #[cfg(feature = "unstable-blockstore")] - self.block_store.clone().unwrap(), - ); - if let Err(e) = storage_manager.init(update_callback).await { - error!("failed to init storage manager: {}", e); - self.shutdown().await; - return Err(e); - } - self.storage_manager = Some(storage_manager.clone()); - - // Set up attachment manager - let update_callback = self.update_callback.clone(); - let attachment_manager = AttachmentManager::new( - event_bus.clone(), - self.config.clone(), - storage_manager, - table_store, - #[cfg(feature = "unstable-blockstore")] - block_store, - crypto, - ); - if let Err(e) = attachment_manager.init(update_callback).await { - error!("failed to init attachment manager: {}", e); - self.shutdown().await; - return Err(e); - } - self.attachment_manager = Some(attachment_manager); - - info!("Veilid API startup complete"); - Ok(()) - } - - #[instrument(level = "trace", target = "core_context", skip_all)] - pub async fn shutdown(&mut self) { - info!("Veilid API shutting down"); - - if let Some(attachment_manager) = &mut self.attachment_manager { - attachment_manager.terminate().await; - } - if let Some(storage_manager) = &mut self.storage_manager { - storage_manager.terminate().await; - } - #[cfg(feature = "unstable-blockstore")] - if let Some(block_store) = &mut self.block_store { - block_store.terminate().await; - } - if let Some(crypto) = &mut self.crypto { - crypto.terminate().await; - } - if let Some(table_store) = &mut self.table_store { - table_store.terminate().await; - } - if let Some(protected_store) = &mut self.protected_store { - protected_store.terminate().await; - } - if let Some(event_bus) = &mut self.event_bus { - event_bus.shutdown().await; - } - - info!("Veilid API shutdown complete"); - - // api logger terminate is idempotent - let (program_name, namespace) = { - let config = self.config.get(); - (config.program_name.clone(), config.namespace.clone()) - }; - - if let Err(e) = ApiTracingLayer::remove_callback(program_name, namespace).await { - error!("Error removing callback from ApiTracingLayer: {}", e); - } - - // send final shutdown update - (self.update_callback)(VeilidUpdate::Shutdown); - } -} +type InitKey = (String, String); ///////////////////////////////////////////////////////////////////////////// -pub struct VeilidCoreContext { - pub config: VeilidConfig, - pub update_callback: UpdateCallback, - // Event bus - pub event_bus: EventBus, - // Services - pub storage_manager: StorageManager, - pub protected_store: ProtectedStore, - pub table_store: TableStore, - #[cfg(feature = "unstable-blockstore")] - pub block_store: BlockStore, - pub crypto: Crypto, - pub attachment_manager: AttachmentManager, +#[derive(Clone, Debug)] +pub(crate) struct VeilidCoreContext { + registry: VeilidComponentRegistry, } +impl_veilid_component_registry_accessor!(VeilidCoreContext); + impl VeilidCoreContext { #[instrument(level = "trace", target = "core_context", err, skip_all)] async fn new_with_config_callback( @@ -244,10 +28,9 @@ impl VeilidCoreContext { config_callback: ConfigCallback, ) -> VeilidAPIResult { // Set up config from callback - let mut config = VeilidConfig::new(); - config.setup(config_callback, update_callback.clone())?; + let config = VeilidConfig::new_from_callback(config_callback, update_callback)?; - Self::new_common(update_callback, config).await + Self::new_common(config).await } #[instrument(level = "trace", target = "core_context", err, skip_all)] @@ -256,16 +39,12 @@ impl VeilidCoreContext { config_inner: VeilidConfigInner, ) -> VeilidAPIResult { // Set up config from json - let mut config = VeilidConfig::new(); - config.setup_from_config(config_inner, update_callback.clone())?; - Self::new_common(update_callback, config).await + let config = VeilidConfig::new_from_config(config_inner, update_callback); + Self::new_common(config).await } #[instrument(level = "trace", target = "core_context", err, skip_all)] - async fn new_common( - update_callback: UpdateCallback, - config: VeilidConfig, - ) -> VeilidAPIResult { + async fn new_common(config: VeilidConfig) -> VeilidAPIResult { cfg_if! { if #[cfg(target_os = "android")] { if !crate::intf::android::is_android_ready() { @@ -274,45 +53,134 @@ impl VeilidCoreContext { } } - let mut sc = StartupShutdownContext::new_empty(config.clone(), update_callback); - sc.startup().await.map_err(VeilidAPIError::generic)?; + info!("Veilid API starting up"); - Ok(VeilidCoreContext { - config: sc.config, - update_callback: sc.update_callback, - event_bus: sc.event_bus.unwrap(), - storage_manager: sc.storage_manager.unwrap(), - protected_store: sc.protected_store.unwrap(), - table_store: sc.table_store.unwrap(), - #[cfg(feature = "unstable-blockstore")] - block_store: sc.block_store.unwrap(), - crypto: sc.crypto.unwrap(), - attachment_manager: sc.attachment_manager.unwrap(), - }) + let (program_name, namespace, update_callback) = { + let cfginner = config.get(); + ( + cfginner.program_name.clone(), + cfginner.namespace.clone(), + config.update_callback(), + ) + }; + + ApiTracingLayer::add_callback(program_name, namespace, update_callback.clone()).await?; + + // Create component registry + let registry = VeilidComponentRegistry::new(config); + + // Register all components + registry.register(ProtectedStore::new); + registry.register(Crypto::new); + registry.register(TableStore::new); + #[cfg(feature = "unstable-blockstore")] + registry.register(BlockStore::new); + registry.register(StorageManager::new); + registry.register(RoutingTable::new); + registry + .register_with_context(NetworkManager::new, NetworkManagerStartupContext::default()); + registry.register_with_context(RPCProcessor::new, RPCProcessorStartupContext::default()); + registry.register_with_context( + AttachmentManager::new, + AttachmentManagerStartupContext::default(), + ); + + // Run initialization + // This should make the majority of subsystems functional + registry.init().await.map_err(VeilidAPIError::internal)?; + + // Run post-initialization + // This should resolve any inter-subsystem dependencies + // required for background processes that utilize multiple subsystems + // Background processes also often require registry lookup of the + // current subsystem, which is not available until after init succeeds + if let Err(e) = registry.post_init().await { + registry.terminate().await; + return Err(VeilidAPIError::internal(e)); + } + + info!("Veilid API startup complete"); + + Ok(Self { registry }) } #[instrument(level = "trace", target = "core_context", skip_all)] async fn shutdown(self) { - let mut sc = StartupShutdownContext::new_full( - self.config.clone(), - self.update_callback.clone(), - self.event_bus, - self.protected_store, - self.table_store, - #[cfg(feature = "unstable-blockstore")] - self.block_store, - self.crypto, - self.attachment_manager, - self.storage_manager, - ); - sc.shutdown().await; + info!("Veilid API shutdown complete"); + + let (program_name, namespace, update_callback) = { + let config = self.registry.config(); + let cfginner = config.get(); + ( + cfginner.program_name.clone(), + cfginner.namespace.clone(), + config.update_callback(), + ) + }; + + // Run pre-termination + // This should shut down background processes that may require the existence of + // other subsystems that may not exist during final termination + self.registry.pre_terminate().await; + + // Run termination + // This should finish any shutdown operations for the subsystems + self.registry.terminate().await; + + if let Err(e) = ApiTracingLayer::remove_callback(program_name, namespace).await { + error!("Error removing callback from ApiTracingLayer: {}", e); + } + + // send final shutdown update + update_callback(VeilidUpdate::Shutdown); + } +} + +///////////////////////////////////////////////////////////////////////////// + +pub trait RegisteredComponents { + fn protected_store<'a>(&self) -> VeilidComponentGuard<'a, ProtectedStore>; + fn crypto<'a>(&self) -> VeilidComponentGuard<'a, Crypto>; + fn table_store<'a>(&self) -> VeilidComponentGuard<'a, TableStore>; + fn storage_manager<'a>(&self) -> VeilidComponentGuard<'a, StorageManager>; + fn routing_table<'a>(&self) -> VeilidComponentGuard<'a, RoutingTable>; + fn network_manager<'a>(&self) -> VeilidComponentGuard<'a, NetworkManager>; + fn rpc_processor<'a>(&self) -> VeilidComponentGuard<'a, RPCProcessor>; + fn attachment_manager<'a>(&self) -> VeilidComponentGuard<'a, AttachmentManager>; +} + +impl RegisteredComponents for T { + fn protected_store<'a>(&self) -> VeilidComponentGuard<'a, ProtectedStore> { + self.registry().lookup::().unwrap() + } + fn crypto<'a>(&self) -> VeilidComponentGuard<'a, Crypto> { + self.registry().lookup::().unwrap() + } + fn table_store<'a>(&self) -> VeilidComponentGuard<'a, TableStore> { + self.registry().lookup::().unwrap() + } + fn storage_manager<'a>(&self) -> VeilidComponentGuard<'a, StorageManager> { + self.registry().lookup::().unwrap() + } + fn routing_table<'a>(&self) -> VeilidComponentGuard<'a, RoutingTable> { + self.registry().lookup::().unwrap() + } + fn network_manager<'a>(&self) -> VeilidComponentGuard<'a, NetworkManager> { + self.registry().lookup::().unwrap() + } + fn rpc_processor<'a>(&self) -> VeilidComponentGuard<'a, RPCProcessor> { + self.registry().lookup::().unwrap() + } + fn attachment_manager<'a>(&self) -> VeilidComponentGuard<'a, AttachmentManager> { + self.registry().lookup::().unwrap() } } ///////////////////////////////////////////////////////////////////////////// lazy_static::lazy_static! { - static ref INITIALIZED: AsyncMutex> = AsyncMutex::new(HashSet::new()); + static ref INITIALIZED: Mutex> = Mutex::new(HashSet::new()); + static ref STARTUP_TABLE: AsyncTagLockTable = AsyncTagLockTable::new(); } /// Initialize a Veilid node. @@ -345,9 +213,11 @@ pub async fn api_startup( })?; let init_key = (program_name, namespace); + // Only allow one startup/shutdown per program_name+namespace combination simultaneously + let _tag_guard = STARTUP_TABLE.lock_tag(init_key.clone()).await; + // See if we have an API started up already - let mut initialized_lock = INITIALIZED.lock().await; - if initialized_lock.contains(&init_key) { + if INITIALIZED.lock().contains(&init_key) { apibail_already_initialized!(); } @@ -358,7 +228,8 @@ pub async fn api_startup( // Return an API object around our context let veilid_api = VeilidAPI::new(context); - initialized_lock.insert(init_key); + // Add to the initialized set + INITIALIZED.lock().insert(init_key); Ok(veilid_api) } @@ -403,12 +274,13 @@ pub async fn api_startup_config( // Get the program_name and namespace we're starting up in let program_name = config.program_name.clone(); let namespace = config.namespace.clone(); - let init_key = (program_name, namespace); + // Only allow one startup/shutdown per program_name+namespace combination simultaneously + let _tag_guard = STARTUP_TABLE.lock_tag(init_key.clone()).await; + // See if we have an API started up already - let mut initialized_lock = INITIALIZED.lock().await; - if initialized_lock.contains(&init_key) { + if INITIALIZED.lock().contains(&init_key) { apibail_already_initialized!(); } @@ -418,20 +290,32 @@ pub async fn api_startup_config( // Return an API object around our context let veilid_api = VeilidAPI::new(context); - initialized_lock.insert(init_key); + // Add to the initialized set + INITIALIZED.lock().insert(init_key); Ok(veilid_api) } #[instrument(level = "trace", target = "core_context", skip_all)] -pub async fn api_shutdown(context: VeilidCoreContext) { - let mut initialized_lock = INITIALIZED.lock().await; - +pub(crate) async fn api_shutdown(context: VeilidCoreContext) { let init_key = { - let config = context.config.get(); - (config.program_name.clone(), config.namespace.clone()) + let registry = context.registry(); + let config = registry.config(); + let cfginner = config.get(); + (cfginner.program_name.clone(), cfginner.namespace.clone()) }; + // Only allow one startup/shutdown per program_name+namespace combination simultaneously + let _tag_guard = STARTUP_TABLE.lock_tag(init_key.clone()).await; + + // See if we have an API started up already + if !INITIALIZED.lock().contains(&init_key) { + return; + } + + // Shutdown the context context.shutdown().await; - initialized_lock.remove(&init_key); + + // Remove from the initialized set + INITIALIZED.lock().remove(&init_key); } diff --git a/veilid-core/src/crypto/crypto_system.rs b/veilid-core/src/crypto/crypto_system.rs index 61e140b6..671ac28b 100644 --- a/veilid-core/src/crypto/crypto_system.rs +++ b/veilid-core/src/crypto/crypto_system.rs @@ -1,11 +1,11 @@ use super::*; -const VEILID_DOMAIN_API: &[u8] = b"VEILID_API"; +pub(crate) const VEILID_DOMAIN_API: &[u8] = b"VEILID_API"; pub trait CryptoSystem { // Accessors fn kind(&self) -> CryptoKind; - fn crypto(&self) -> Crypto; + fn crypto(&self) -> VeilidComponentGuard<'_, Crypto>; // Cached Operations fn cached_dh(&self, key: &PublicKey, secret: &SecretKey) -> VeilidAPIResult; diff --git a/veilid-core/src/crypto/envelope.rs b/veilid-core/src/crypto/envelope.rs index 913e54ad..b05f4a6a 100644 --- a/veilid-core/src/crypto/envelope.rs +++ b/veilid-core/src/crypto/envelope.rs @@ -67,7 +67,7 @@ impl Envelope { #[instrument(level = "trace", target = "envelope", skip_all)] pub fn from_signed_data( - crypto: Crypto, + crypto: &Crypto, data: &[u8], network_key: &Option, ) -> VeilidAPIResult { @@ -193,7 +193,7 @@ impl Envelope { #[instrument(level = "trace", target = "envelope", skip_all)] pub fn decrypt_body( &self, - crypto: Crypto, + crypto: &Crypto, data: &[u8], node_id_secret: &SecretKey, network_key: &Option, @@ -226,7 +226,7 @@ impl Envelope { #[instrument(level = "trace", target = "envelope", skip_all, err)] pub fn to_encrypted_data( &self, - crypto: Crypto, + crypto: &Crypto, body: &[u8], node_id_secret: &SecretKey, network_key: &Option, diff --git a/veilid-core/src/crypto/guard.rs b/veilid-core/src/crypto/guard.rs new file mode 100644 index 00000000..abd9e9da --- /dev/null +++ b/veilid-core/src/crypto/guard.rs @@ -0,0 +1,276 @@ +use super::*; + +/// Guard to access a particular cryptosystem +pub struct CryptoSystemGuard<'a> { + crypto_system: Arc, + _phantom: core::marker::PhantomData<&'a (dyn CryptoSystem + Send + Sync)>, +} + +impl<'a> CryptoSystemGuard<'a> { + pub(super) fn new(crypto_system: Arc) -> Self { + Self { + crypto_system, + _phantom: PhantomData, + } + } + pub fn as_async(self) -> AsyncCryptoSystemGuard<'a> { + AsyncCryptoSystemGuard { guard: self } + } +} + +impl<'a> core::ops::Deref for CryptoSystemGuard<'a> { + type Target = dyn CryptoSystem + Send + Sync; + + fn deref(&self) -> &Self::Target { + self.crypto_system.as_ref() + } +} + +/// Async cryptosystem guard to help break up heavy blocking operations +pub struct AsyncCryptoSystemGuard<'a> { + guard: CryptoSystemGuard<'a>, +} + +async fn yielding R>(x: T) -> R { + let out = x(); + sleep(0).await; + out +} + +impl<'a> AsyncCryptoSystemGuard<'a> { + // Accessors + pub fn kind(&self) -> CryptoKind { + self.guard.kind() + } + pub fn crypto(&self) -> VeilidComponentGuard<'_, Crypto> { + self.guard.crypto() + } + + // Cached Operations + pub async fn cached_dh( + &self, + key: &PublicKey, + secret: &SecretKey, + ) -> VeilidAPIResult { + yielding(|| self.guard.cached_dh(key, secret)).await + } + + // Generation + pub async fn random_bytes(&self, len: u32) -> Vec { + yielding(|| self.guard.random_bytes(len)).await + } + pub fn default_salt_length(&self) -> u32 { + self.guard.default_salt_length() + } + pub async fn hash_password(&self, password: &[u8], salt: &[u8]) -> VeilidAPIResult { + yielding(|| self.guard.hash_password(password, salt)).await + } + pub async fn verify_password( + &self, + password: &[u8], + password_hash: &str, + ) -> VeilidAPIResult { + yielding(|| self.guard.verify_password(password, password_hash)).await + } + pub async fn derive_shared_secret( + &self, + password: &[u8], + salt: &[u8], + ) -> VeilidAPIResult { + yielding(|| self.guard.derive_shared_secret(password, salt)).await + } + pub async fn random_nonce(&self) -> Nonce { + yielding(|| self.guard.random_nonce()).await + } + pub async fn random_shared_secret(&self) -> SharedSecret { + yielding(|| self.guard.random_shared_secret()).await + } + pub async fn compute_dh( + &self, + key: &PublicKey, + secret: &SecretKey, + ) -> VeilidAPIResult { + yielding(|| self.guard.compute_dh(key, secret)).await + } + pub async fn generate_shared_secret( + &self, + key: &PublicKey, + secret: &SecretKey, + domain: &[u8], + ) -> VeilidAPIResult { + let dh = self.compute_dh(key, secret).await?; + Ok(self + .generate_hash(&[&dh.bytes, domain, VEILID_DOMAIN_API].concat()) + .await) + } + + pub async fn generate_keypair(&self) -> KeyPair { + yielding(|| self.guard.generate_keypair()).await + } + + pub async fn generate_hash(&self, data: &[u8]) -> HashDigest { + yielding(|| self.guard.generate_hash(data)).await + } + + pub async fn generate_hash_reader( + &self, + reader: &mut dyn std::io::Read, + ) -> VeilidAPIResult { + yielding(|| self.guard.generate_hash_reader(reader)).await + } + + // Validation + pub async fn validate_keypair(&self, key: &PublicKey, secret: &SecretKey) -> bool { + yielding(|| self.guard.validate_keypair(key, secret)).await + } + + pub async fn validate_hash(&self, data: &[u8], hash: &HashDigest) -> bool { + yielding(|| self.guard.validate_hash(data, hash)).await + } + + pub async fn validate_hash_reader( + &self, + reader: &mut dyn std::io::Read, + hash: &HashDigest, + ) -> VeilidAPIResult { + yielding(|| self.guard.validate_hash_reader(reader, hash)).await + } + + // Distance Metric + pub async fn distance(&self, key1: &CryptoKey, key2: &CryptoKey) -> CryptoKeyDistance { + yielding(|| self.guard.distance(key1, key2)).await + } + + // Authentication + pub async fn sign( + &self, + key: &PublicKey, + secret: &SecretKey, + data: &[u8], + ) -> VeilidAPIResult { + yielding(|| self.guard.sign(key, secret, data)).await + } + pub async fn verify( + &self, + key: &PublicKey, + data: &[u8], + signature: &Signature, + ) -> VeilidAPIResult { + yielding(|| self.guard.verify(key, data, signature)).await + } + + // AEAD Encrypt/Decrypt + pub fn aead_overhead(&self) -> usize { + self.guard.aead_overhead() + } + + pub async fn decrypt_in_place_aead( + &self, + body: &mut Vec, + nonce: &Nonce, + shared_secret: &SharedSecret, + associated_data: Option<&[u8]>, + ) -> VeilidAPIResult<()> { + yielding(|| { + self.guard + .decrypt_in_place_aead(body, nonce, shared_secret, associated_data) + }) + .await + } + + pub async fn decrypt_aead( + &self, + body: &[u8], + nonce: &Nonce, + shared_secret: &SharedSecret, + associated_data: Option<&[u8]>, + ) -> VeilidAPIResult> { + yielding(|| { + self.guard + .decrypt_aead(body, nonce, shared_secret, associated_data) + }) + .await + } + + pub async fn encrypt_in_place_aead( + &self, + body: &mut Vec, + nonce: &Nonce, + shared_secret: &SharedSecret, + associated_data: Option<&[u8]>, + ) -> VeilidAPIResult<()> { + yielding(|| { + self.guard + .encrypt_in_place_aead(body, nonce, shared_secret, associated_data) + }) + .await + } + + pub async fn encrypt_aead( + &self, + body: &[u8], + nonce: &Nonce, + shared_secret: &SharedSecret, + associated_data: Option<&[u8]>, + ) -> VeilidAPIResult> { + yielding(|| { + self.guard + .encrypt_aead(body, nonce, shared_secret, associated_data) + }) + .await + } + + // NoAuth Encrypt/Decrypt + pub async fn crypt_in_place_no_auth( + &self, + body: &mut [u8], + nonce: &[u8; NONCE_LENGTH], + shared_secret: &SharedSecret, + ) { + yielding(|| { + self.guard + .crypt_in_place_no_auth(body, nonce, shared_secret) + }) + .await + } + + pub async fn crypt_b2b_no_auth( + &self, + in_buf: &[u8], + out_buf: &mut [u8], + nonce: &[u8; NONCE_LENGTH], + shared_secret: &SharedSecret, + ) { + yielding(|| { + self.guard + .crypt_b2b_no_auth(in_buf, out_buf, nonce, shared_secret) + }) + .await + } + + pub async fn crypt_no_auth_aligned_8( + &self, + body: &[u8], + nonce: &[u8; NONCE_LENGTH], + shared_secret: &SharedSecret, + ) -> Vec { + yielding(|| { + self.guard + .crypt_no_auth_aligned_8(body, nonce, shared_secret) + }) + .await + } + + pub async fn crypt_no_auth_unaligned( + &self, + body: &[u8], + nonce: &[u8; NONCE_LENGTH], + shared_secret: &SharedSecret, + ) -> Vec { + yielding(|| { + self.guard + .crypt_no_auth_unaligned(body, nonce, shared_secret) + }) + .await + } +} diff --git a/veilid-core/src/crypto/mod.rs b/veilid-core/src/crypto/mod.rs index a0338450..6d4b98be 100644 --- a/veilid-core/src/crypto/mod.rs +++ b/veilid-core/src/crypto/mod.rs @@ -1,6 +1,7 @@ mod blake3digest512; mod dh_cache; mod envelope; +mod guard; mod receipt; mod types; @@ -16,6 +17,7 @@ pub use blake3digest512::*; pub use crypto_system::*; pub use envelope::*; +pub use guard::*; pub use receipt::*; pub use types::*; @@ -29,9 +31,7 @@ use core::convert::TryInto; use dh_cache::*; use hashlink::linked_hash_map::Entry; use hashlink::LruCache; - -/// Handle to a particular cryptosystem -pub type CryptoSystemVersion = Arc; +use std::marker::PhantomData; cfg_if! { if #[cfg(all(feature = "enable-crypto-none", feature = "enable-crypto-vld0"))] { @@ -72,23 +72,40 @@ pub fn best_envelope_version() -> EnvelopeVersion { struct CryptoInner { dh_cache: DHCache, flush_future: Option>, - #[cfg(feature = "enable-crypto-vld0")] - crypto_vld0: Option>, - #[cfg(feature = "enable-crypto-none")] - crypto_none: Option>, } -struct CryptoUnlockedInner { - _event_bus: EventBus, - config: VeilidConfig, - table_store: TableStore, +impl fmt::Debug for CryptoInner { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("CryptoInner") + //.field("dh_cache", &self.dh_cache) + // .field("flush_future", &self.flush_future) + // .field("crypto_vld0", &self.crypto_vld0) + // .field("crypto_none", &self.crypto_none) + .finish() + } } /// Crypto factory implementation -#[derive(Clone)] pub struct Crypto { - unlocked_inner: Arc, - inner: Arc>, + registry: VeilidComponentRegistry, + inner: Mutex, + #[cfg(feature = "enable-crypto-vld0")] + crypto_vld0: Arc, + #[cfg(feature = "enable-crypto-none")] + crypto_none: Arc, +} + +impl_veilid_component!(Crypto); + +impl fmt::Debug for Crypto { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Crypto") + //.field("registry", &self.registry) + .field("inner", &self.inner) + // .field("crypto_vld0", &self.crypto_vld0) + // .field("crypto_none", &self.crypto_none) + .finish() + } } impl Crypto { @@ -96,63 +113,43 @@ impl Crypto { CryptoInner { dh_cache: DHCache::new(DH_CACHE_SIZE), flush_future: None, + } + } + + pub fn new(registry: VeilidComponentRegistry) -> Self { + Self { + registry: registry.clone(), + inner: Mutex::new(Self::new_inner()), #[cfg(feature = "enable-crypto-vld0")] - crypto_vld0: None, + crypto_vld0: Arc::new(vld0::CryptoSystemVLD0::new(registry.clone())), #[cfg(feature = "enable-crypto-none")] - crypto_none: None, + crypto_none: Arc::new(none::CryptoSystemNONE::new(registry.clone())), } } - pub fn new(event_bus: EventBus, config: VeilidConfig, table_store: TableStore) -> Self { - let out = Self { - unlocked_inner: Arc::new(CryptoUnlockedInner { - _event_bus: event_bus, - config, - table_store, - }), - inner: Arc::new(Mutex::new(Self::new_inner())), - }; - - #[cfg(feature = "enable-crypto-vld0")] - { - out.inner.lock().crypto_vld0 = Some(Arc::new(vld0::CryptoSystemVLD0::new(out.clone()))); - } - - #[cfg(feature = "enable-crypto-none")] - { - out.inner.lock().crypto_none = Some(Arc::new(none::CryptoSystemNONE::new(out.clone()))); - } - - out - } - - pub fn config(&self) -> VeilidConfig { - self.unlocked_inner.config.clone() - } - #[instrument(level = "trace", target = "crypto", skip_all, err)] - pub async fn init(&self) -> EyreResult<()> { - let table_store = self.unlocked_inner.table_store.clone(); + async fn init_async(&self) -> EyreResult<()> { + // Nothing to initialize at this time + Ok(()) + } + + // Setup called by table store after it get initialized + #[instrument(level = "trace", target = "crypto", skip_all, err)] + pub(crate) async fn table_store_setup(&self, table_store: &TableStore) -> EyreResult<()> { // Init node id from config - if let Err(e) = self - .unlocked_inner - .config - .init_node_ids(self.clone(), table_store.clone()) - .await - { + if let Err(e) = self.setup_node_ids(table_store).await { return Err(e).wrap_err("init node id failed"); } // make local copy of node id for easy access let mut cache_validity_key: Vec = Vec::new(); - { - let c = self.unlocked_inner.config.get(); + self.config().with(|c| { for ck in VALID_CRYPTO_KINDS { if let Some(nid) = c.network.routing_table.node_id.get(ck) { cache_validity_key.append(&mut nid.value.bytes.to_vec()); } } - }; + }); // load caches if they are valid for this node id let mut db = table_store @@ -175,13 +172,17 @@ impl Crypto { db.store(0, b"cache_validity_key", &cache_validity_key) .await?; } + Ok(()) + } + #[instrument(level = "trace", target = "crypto", skip_all, err)] + async fn post_init_async(&self) -> EyreResult<()> { // Schedule flushing - let this = self.clone(); + let registry = self.registry(); let flush_future = interval("crypto flush", 60000, move || { - let this = this.clone(); + let crypto = registry.crypto(); async move { - if let Err(e) = this.flush().await { + if let Err(e) = crypto.flush().await { warn!("flush failed: {}", e); } } @@ -197,16 +198,12 @@ impl Crypto { cache_to_bytes(&inner.dh_cache) }; - let db = self - .unlocked_inner - .table_store - .open("crypto_caches", 1) - .await?; + let db = self.table_store().open("crypto_caches", 1).await?; db.store(0, b"dh_cache", &cache_bytes).await?; Ok(()) } - pub async fn terminate(&self) { + async fn pre_terminate_async(&self) { let flush_future = self.inner.lock().flush_future.take(); if let Some(f) = flush_future { f.await; @@ -222,23 +219,36 @@ impl Crypto { }; } + async fn terminate_async(&self) { + // Nothing to terminate at this time + } + /// Factory method to get a specific crypto version - pub fn get(&self, kind: CryptoKind) -> Option { - let inner = self.inner.lock(); + pub fn get(&self, kind: CryptoKind) -> Option> { match kind { #[cfg(feature = "enable-crypto-vld0")] - CRYPTO_KIND_VLD0 => Some(inner.crypto_vld0.clone().unwrap()), + CRYPTO_KIND_VLD0 => Some(CryptoSystemGuard::new(self.crypto_vld0.clone())), #[cfg(feature = "enable-crypto-none")] - CRYPTO_KIND_NONE => Some(inner.crypto_none.clone().unwrap()), + CRYPTO_KIND_NONE => Some(CryptoSystemGuard::new(self.crypto_none.clone())), _ => None, } } + /// Factory method to get a specific crypto version for async use + pub fn get_async(&self, kind: CryptoKind) -> Option> { + self.get(kind).map(|x| x.as_async()) + } + // Factory method to get the best crypto version - pub fn best(&self) -> CryptoSystemVersion { + pub fn best(&self) -> CryptoSystemGuard<'_> { self.get(best_crypto_kind()).unwrap() } + // Factory method to get the best crypto version for async use + pub fn best_async(&self) -> AsyncCryptoSystemGuard<'_> { + self.get_async(best_crypto_kind()).unwrap() + } + /// Signature set verification /// Returns Some() the set of signature cryptokinds that validate and are supported /// Returns None if any cryptokinds are supported and do not validate @@ -331,4 +341,120 @@ impl Crypto { } Ok(()) } + + #[cfg(not(test))] + async fn setup_node_id( + &self, + vcrypto: AsyncCryptoSystemGuard<'_>, + table_store: &TableStore, + ) -> VeilidAPIResult<(TypedKey, TypedSecret)> { + let config = self.config(); + let ck = vcrypto.kind(); + let (mut node_id, mut node_id_secret) = config.with(|c| { + ( + c.network.routing_table.node_id.get(ck), + c.network.routing_table.node_id_secret.get(ck), + ) + }); + + // See if node id was previously stored in the table store + let config_table = table_store.open("__veilid_config", 1).await?; + + let table_key_node_id = format!("node_id_{}", ck); + let table_key_node_id_secret = format!("node_id_secret_{}", ck); + + if node_id.is_none() { + log_crypto!(debug "pulling {} from storage", table_key_node_id); + if let Ok(Some(stored_node_id)) = config_table + .load_json::(0, table_key_node_id.as_bytes()) + .await + { + log_crypto!(debug "{} found in storage", table_key_node_id); + node_id = Some(stored_node_id); + } else { + log_crypto!(debug "{} not found in storage", table_key_node_id); + } + } + + // See if node id secret was previously stored in the protected store + if node_id_secret.is_none() { + log_crypto!(debug "pulling {} from storage", table_key_node_id_secret); + if let Ok(Some(stored_node_id_secret)) = config_table + .load_json::(0, table_key_node_id_secret.as_bytes()) + .await + { + log_crypto!(debug "{} found in storage", table_key_node_id_secret); + node_id_secret = Some(stored_node_id_secret); + } else { + log_crypto!(debug "{} not found in storage", table_key_node_id_secret); + } + } + + // If we have a node id from storage, check it + let (node_id, node_id_secret) = + if let (Some(node_id), Some(node_id_secret)) = (node_id, node_id_secret) { + // Validate node id + if !vcrypto + .validate_keypair(&node_id.value, &node_id_secret.value) + .await + { + apibail_generic!(format!( + "node_id_secret_{} and node_id_key_{} don't match", + ck, ck + )); + } + (node_id, node_id_secret) + } else { + // If we still don't have a valid node id, generate one + log_crypto!(debug "generating new node_id_{}", ck); + let kp = vcrypto.generate_keypair().await; + (TypedKey::new(ck, kp.key), TypedSecret::new(ck, kp.secret)) + }; + info!("Node Id: {}", node_id); + + // Save the node id / secret in storage + config_table + .store_json(0, table_key_node_id.as_bytes(), &node_id) + .await?; + config_table + .store_json(0, table_key_node_id_secret.as_bytes(), &node_id_secret) + .await?; + + Ok((node_id, node_id_secret)) + } + + /// Get the node id from config if one is specified. + /// Must be done -after- protected store is initialized, during table store init + #[cfg_attr(test, allow(unused_variables))] + async fn setup_node_ids(&self, table_store: &TableStore) -> VeilidAPIResult<()> { + let mut out_node_id = TypedKeyGroup::new(); + let mut out_node_id_secret = TypedSecretGroup::new(); + + for ck in VALID_CRYPTO_KINDS { + let vcrypto = self + .get_async(ck) + .expect("Valid crypto kind is not actually valid."); + + #[cfg(test)] + let (node_id, node_id_secret) = { + let kp = vcrypto.generate_keypair().await; + (TypedKey::new(ck, kp.key), TypedSecret::new(ck, kp.secret)) + }; + #[cfg(not(test))] + let (node_id, node_id_secret) = self.setup_node_id(vcrypto, table_store).await?; + + // Save for config + out_node_id.add(node_id); + out_node_id_secret.add(node_id_secret); + } + + // Commit back to config + self.config().try_with_mut(|c| { + c.network.routing_table.node_id = out_node_id; + c.network.routing_table.node_id_secret = out_node_id_secret; + Ok(()) + })?; + + Ok(()) + } } diff --git a/veilid-core/src/crypto/none/mod.rs b/veilid-core/src/crypto/none/mod.rs index 8f4e1642..abe17927 100644 --- a/veilid-core/src/crypto/none/mod.rs +++ b/veilid-core/src/crypto/none/mod.rs @@ -49,14 +49,13 @@ fn is_bytes_eq_32(a: &[u8], v: u8) -> bool { } /// None CryptoSystem -#[derive(Clone)] pub struct CryptoSystemNONE { - crypto: Crypto, + registry: VeilidComponentRegistry, } impl CryptoSystemNONE { - pub fn new(crypto: Crypto) -> Self { - Self { crypto } + pub fn new(registry: VeilidComponentRegistry) -> Self { + Self { registry } } } @@ -66,13 +65,13 @@ impl CryptoSystem for CryptoSystemNONE { CRYPTO_KIND_NONE } - fn crypto(&self) -> Crypto { - self.crypto.clone() + fn crypto(&self) -> VeilidComponentGuard<'_, Crypto> { + self.registry().lookup::().unwrap() } // Cached Operations fn cached_dh(&self, key: &PublicKey, secret: &SecretKey) -> VeilidAPIResult { - self.crypto + self.crypto() .cached_dh_internal::(self, key, secret) } diff --git a/veilid-core/src/crypto/receipt.rs b/veilid-core/src/crypto/receipt.rs index 65462c45..5231154f 100644 --- a/veilid-core/src/crypto/receipt.rs +++ b/veilid-core/src/crypto/receipt.rs @@ -68,7 +68,7 @@ impl Receipt { } #[instrument(level = "trace", target = "receipt", skip_all, err)] - pub fn from_signed_data(crypto: Crypto, data: &[u8]) -> VeilidAPIResult { + pub fn from_signed_data(crypto: &Crypto, data: &[u8]) -> VeilidAPIResult { // Ensure we are at least the length of the envelope if data.len() < MIN_RECEIPT_SIZE { apibail_parse_error!("receipt too small", data.len()); @@ -157,7 +157,7 @@ impl Receipt { } #[instrument(level = "trace", target = "receipt", skip_all, err)] - pub fn to_signed_data(&self, crypto: Crypto, secret: &SecretKey) -> VeilidAPIResult> { + pub fn to_signed_data(&self, crypto: &Crypto, secret: &SecretKey) -> VeilidAPIResult> { // Ensure extra data isn't too long let receipt_size: usize = self.extra_data.len() + MIN_RECEIPT_SIZE; if receipt_size > MAX_RECEIPT_SIZE { diff --git a/veilid-core/src/crypto/tests/test_crypto.rs b/veilid-core/src/crypto/tests/test_crypto.rs index ac1b41e7..bb9d7539 100644 --- a/veilid-core/src/crypto/tests/test_crypto.rs +++ b/veilid-core/src/crypto/tests/test_crypto.rs @@ -2,20 +2,20 @@ use super::*; static LOREM_IPSUM:&[u8] = b"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. "; -pub async fn test_aead(vcrypto: CryptoSystemVersion) { +pub async fn test_aead(vcrypto: &AsyncCryptoSystemGuard<'_>) { trace!("test_aead"); - let n1 = vcrypto.random_nonce(); + let n1 = vcrypto.random_nonce().await; let n2 = loop { - let n = vcrypto.random_nonce(); + let n = vcrypto.random_nonce().await; if n != n1 { break n; } }; - let ss1 = vcrypto.random_shared_secret(); + let ss1 = vcrypto.random_shared_secret().await; let ss2 = loop { - let ss = vcrypto.random_shared_secret(); + let ss = vcrypto.random_shared_secret().await; if ss != ss1 { break ss; } @@ -27,6 +27,7 @@ pub async fn test_aead(vcrypto: CryptoSystemVersion) { assert!( vcrypto .encrypt_in_place_aead(&mut body, &n1, &ss1, None) + .await .is_ok(), "encrypt should succeed" ); @@ -41,6 +42,7 @@ pub async fn test_aead(vcrypto: CryptoSystemVersion) { assert!( vcrypto .decrypt_in_place_aead(&mut body, &n1, &ss1, None) + .await .is_ok(), "decrypt should succeed" ); @@ -49,6 +51,7 @@ pub async fn test_aead(vcrypto: CryptoSystemVersion) { assert!( vcrypto .decrypt_in_place_aead(&mut body3, &n2, &ss1, None) + .await .is_err(), "decrypt with wrong nonce should fail" ); @@ -57,6 +60,7 @@ pub async fn test_aead(vcrypto: CryptoSystemVersion) { assert!( vcrypto .decrypt_in_place_aead(&mut body4, &n1, &ss2, None) + .await .is_err(), "decrypt with wrong secret should fail" ); @@ -65,37 +69,47 @@ pub async fn test_aead(vcrypto: CryptoSystemVersion) { assert!( vcrypto .decrypt_in_place_aead(&mut body5, &n1, &ss2, Some(b"foobar")) + .await .is_err(), "decrypt with wrong associated data should fail" ); assert_ne!(body5, body, "failure changes data"); assert!( - vcrypto.decrypt_aead(LOREM_IPSUM, &n1, &ss1, None).is_err(), + vcrypto + .decrypt_aead(LOREM_IPSUM, &n1, &ss1, None) + .await + .is_err(), "should fail authentication" ); - let body5 = vcrypto.encrypt_aead(LOREM_IPSUM, &n1, &ss1, None).unwrap(); - let body6 = vcrypto.decrypt_aead(&body5, &n1, &ss1, None).unwrap(); - let body7 = vcrypto.encrypt_aead(LOREM_IPSUM, &n1, &ss1, None).unwrap(); + let body5 = vcrypto + .encrypt_aead(LOREM_IPSUM, &n1, &ss1, None) + .await + .unwrap(); + let body6 = vcrypto.decrypt_aead(&body5, &n1, &ss1, None).await.unwrap(); + let body7 = vcrypto + .encrypt_aead(LOREM_IPSUM, &n1, &ss1, None) + .await + .unwrap(); assert_eq!(body6, LOREM_IPSUM); assert_eq!(body5, body7); } -pub async fn test_no_auth(vcrypto: CryptoSystemVersion) { +pub async fn test_no_auth(vcrypto: &AsyncCryptoSystemGuard<'_>) { trace!("test_no_auth"); - let n1 = vcrypto.random_nonce(); + let n1 = vcrypto.random_nonce().await; let n2 = loop { - let n = vcrypto.random_nonce(); + let n = vcrypto.random_nonce().await; if n != n1 { break n; } }; - let ss1 = vcrypto.random_shared_secret(); + let ss1 = vcrypto.random_shared_secret().await; let ss2 = loop { - let ss = vcrypto.random_shared_secret(); + let ss = vcrypto.random_shared_secret().await; if ss != ss1 { break ss; } @@ -104,7 +118,7 @@ pub async fn test_no_auth(vcrypto: CryptoSystemVersion) { let mut body = LOREM_IPSUM.to_vec(); let body2 = body.clone(); let size_before_encrypt = body.len(); - vcrypto.crypt_in_place_no_auth(&mut body, &n1, &ss1); + vcrypto.crypt_in_place_no_auth(&mut body, &n1, &ss1).await; let size_after_encrypt = body.len(); assert_eq!( @@ -114,49 +128,69 @@ pub async fn test_no_auth(vcrypto: CryptoSystemVersion) { let mut body3 = body.clone(); let mut body4 = body.clone(); - vcrypto.crypt_in_place_no_auth(&mut body, &n1, &ss1); + vcrypto.crypt_in_place_no_auth(&mut body, &n1, &ss1).await; assert_eq!(body, body2, "result after decrypt should be the same"); - vcrypto.crypt_in_place_no_auth(&mut body3, &n2, &ss1); + vcrypto.crypt_in_place_no_auth(&mut body3, &n2, &ss1).await; assert_ne!(body3, body, "decrypt should not be equal with wrong nonce"); - vcrypto.crypt_in_place_no_auth(&mut body4, &n1, &ss2); + vcrypto.crypt_in_place_no_auth(&mut body4, &n1, &ss2).await; assert_ne!(body4, body, "decrypt should not be equal with wrong secret"); - let body5 = vcrypto.crypt_no_auth_unaligned(LOREM_IPSUM, &n1, &ss1); - let body6 = vcrypto.crypt_no_auth_unaligned(&body5, &n1, &ss1); - let body7 = vcrypto.crypt_no_auth_unaligned(LOREM_IPSUM, &n1, &ss1); + let body5 = vcrypto + .crypt_no_auth_unaligned(LOREM_IPSUM, &n1, &ss1) + .await; + let body6 = vcrypto.crypt_no_auth_unaligned(&body5, &n1, &ss1).await; + let body7 = vcrypto + .crypt_no_auth_unaligned(LOREM_IPSUM, &n1, &ss1) + .await; assert_eq!(body6, LOREM_IPSUM); assert_eq!(body5, body7); - let body5 = vcrypto.crypt_no_auth_aligned_8(LOREM_IPSUM, &n1, &ss1); - let body6 = vcrypto.crypt_no_auth_aligned_8(&body5, &n1, &ss1); - let body7 = vcrypto.crypt_no_auth_aligned_8(LOREM_IPSUM, &n1, &ss1); + let body5 = vcrypto + .crypt_no_auth_aligned_8(LOREM_IPSUM, &n1, &ss1) + .await; + let body6 = vcrypto.crypt_no_auth_aligned_8(&body5, &n1, &ss1).await; + let body7 = vcrypto + .crypt_no_auth_aligned_8(LOREM_IPSUM, &n1, &ss1) + .await; assert_eq!(body6, LOREM_IPSUM); assert_eq!(body5, body7); } -pub async fn test_dh(vcrypto: CryptoSystemVersion) { +pub async fn test_dh(vcrypto: &AsyncCryptoSystemGuard<'_>) { trace!("test_dh"); - let (dht_key, dht_key_secret) = vcrypto.generate_keypair().into_split(); - assert!(vcrypto.validate_keypair(&dht_key, &dht_key_secret)); - let (dht_key2, dht_key_secret2) = vcrypto.generate_keypair().into_split(); - assert!(vcrypto.validate_keypair(&dht_key2, &dht_key_secret2)); + let (dht_key, dht_key_secret) = vcrypto.generate_keypair().await.into_split(); + assert!(vcrypto.validate_keypair(&dht_key, &dht_key_secret).await); + let (dht_key2, dht_key_secret2) = vcrypto.generate_keypair().await.into_split(); + assert!(vcrypto.validate_keypair(&dht_key2, &dht_key_secret2).await); - let r1 = vcrypto.compute_dh(&dht_key, &dht_key_secret2).unwrap(); - let r2 = vcrypto.compute_dh(&dht_key2, &dht_key_secret).unwrap(); - let r3 = vcrypto.compute_dh(&dht_key, &dht_key_secret2).unwrap(); - let r4 = vcrypto.compute_dh(&dht_key2, &dht_key_secret).unwrap(); + let r1 = vcrypto + .compute_dh(&dht_key, &dht_key_secret2) + .await + .unwrap(); + let r2 = vcrypto + .compute_dh(&dht_key2, &dht_key_secret) + .await + .unwrap(); + let r3 = vcrypto + .compute_dh(&dht_key, &dht_key_secret2) + .await + .unwrap(); + let r4 = vcrypto + .compute_dh(&dht_key2, &dht_key_secret) + .await + .unwrap(); assert_eq!(r1, r2); assert_eq!(r3, r4); assert_eq!(r2, r3); trace!("dh: {:?}", r1); // test cache - let r5 = vcrypto.cached_dh(&dht_key, &dht_key_secret2).unwrap(); - let r6 = vcrypto.cached_dh(&dht_key2, &dht_key_secret).unwrap(); - let r7 = vcrypto.cached_dh(&dht_key, &dht_key_secret2).unwrap(); - let r8 = vcrypto.cached_dh(&dht_key2, &dht_key_secret).unwrap(); + let r5 = vcrypto.cached_dh(&dht_key, &dht_key_secret2).await.unwrap(); + let r6 = vcrypto.cached_dh(&dht_key2, &dht_key_secret).await.unwrap(); + let r7 = vcrypto.cached_dh(&dht_key, &dht_key_secret2).await.unwrap(); + let r8 = vcrypto.cached_dh(&dht_key2, &dht_key_secret).await.unwrap(); assert_eq!(r1, r5); assert_eq!(r2, r6); assert_eq!(r3, r7); @@ -164,63 +198,67 @@ pub async fn test_dh(vcrypto: CryptoSystemVersion) { trace!("cached_dh: {:?}", r5); } -pub async fn test_generation(vcrypto: CryptoSystemVersion) { - let b1 = vcrypto.random_bytes(32); - let b2 = vcrypto.random_bytes(32); +pub async fn test_generation(vcrypto: &AsyncCryptoSystemGuard<'_>) { + let b1 = vcrypto.random_bytes(32).await; + let b2 = vcrypto.random_bytes(32).await; assert_ne!(b1, b2); assert_eq!(b1.len(), 32); assert_eq!(b2.len(), 32); - let b3 = vcrypto.random_bytes(0); - let b4 = vcrypto.random_bytes(0); + let b3 = vcrypto.random_bytes(0).await; + let b4 = vcrypto.random_bytes(0).await; assert_eq!(b3, b4); assert_eq!(b3.len(), 0); assert_ne!(vcrypto.default_salt_length(), 0); - let pstr1 = vcrypto.hash_password(b"abc123", b"qwerasdf").unwrap(); - let pstr2 = vcrypto.hash_password(b"abc123", b"qwerasdf").unwrap(); + let pstr1 = vcrypto.hash_password(b"abc123", b"qwerasdf").await.unwrap(); + let pstr2 = vcrypto.hash_password(b"abc123", b"qwerasdf").await.unwrap(); assert_eq!(pstr1, pstr2); - let pstr3 = vcrypto.hash_password(b"abc123", b"qwerasdg").unwrap(); + let pstr3 = vcrypto.hash_password(b"abc123", b"qwerasdg").await.unwrap(); assert_ne!(pstr1, pstr3); - let pstr4 = vcrypto.hash_password(b"abc124", b"qwerasdf").unwrap(); + let pstr4 = vcrypto.hash_password(b"abc124", b"qwerasdf").await.unwrap(); assert_ne!(pstr1, pstr4); - let pstr5 = vcrypto.hash_password(b"abc124", b"qwerasdg").unwrap(); + let pstr5 = vcrypto.hash_password(b"abc124", b"qwerasdg").await.unwrap(); assert_ne!(pstr3, pstr5); vcrypto .hash_password(b"abc123", b"qwe") + .await .expect_err("should reject short salt"); vcrypto .hash_password( b"abc123", b"qwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerz", ) + .await .expect_err("should reject long salt"); - assert!(vcrypto.verify_password(b"abc123", &pstr1).unwrap()); - assert!(vcrypto.verify_password(b"abc123", &pstr2).unwrap()); - assert!(vcrypto.verify_password(b"abc123", &pstr3).unwrap()); - assert!(!vcrypto.verify_password(b"abc123", &pstr4).unwrap()); - assert!(!vcrypto.verify_password(b"abc123", &pstr5).unwrap()); + assert!(vcrypto.verify_password(b"abc123", &pstr1).await.unwrap()); + assert!(vcrypto.verify_password(b"abc123", &pstr2).await.unwrap()); + assert!(vcrypto.verify_password(b"abc123", &pstr3).await.unwrap()); + assert!(!vcrypto.verify_password(b"abc123", &pstr4).await.unwrap()); + assert!(!vcrypto.verify_password(b"abc123", &pstr5).await.unwrap()); - let ss1 = vcrypto.derive_shared_secret(b"abc123", b"qwerasdf"); - let ss2 = vcrypto.derive_shared_secret(b"abc123", b"qwerasdf"); + let ss1 = vcrypto.derive_shared_secret(b"abc123", b"qwerasdf").await; + let ss2 = vcrypto.derive_shared_secret(b"abc123", b"qwerasdf").await; assert_eq!(ss1, ss2); - let ss3 = vcrypto.derive_shared_secret(b"abc123", b"qwerasdg"); + let ss3 = vcrypto.derive_shared_secret(b"abc123", b"qwerasdg").await; assert_ne!(ss1, ss3); - let ss4 = vcrypto.derive_shared_secret(b"abc124", b"qwerasdf"); + let ss4 = vcrypto.derive_shared_secret(b"abc124", b"qwerasdf").await; assert_ne!(ss1, ss4); - let ss5 = vcrypto.derive_shared_secret(b"abc124", b"qwerasdg"); + let ss5 = vcrypto.derive_shared_secret(b"abc124", b"qwerasdg").await; assert_ne!(ss3, ss5); vcrypto .derive_shared_secret(b"abc123", b"qwe") + .await .expect_err("should reject short salt"); vcrypto .derive_shared_secret( b"abc123", b"qwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerz", ) + .await .expect_err("should reject long salt"); } @@ -230,11 +268,11 @@ pub async fn test_all() { // Test versions for v in VALID_CRYPTO_KINDS { - let vcrypto = crypto.get(v).unwrap(); - test_aead(vcrypto.clone()).await; - test_no_auth(vcrypto.clone()).await; - test_dh(vcrypto.clone()).await; - test_generation(vcrypto).await; + let vcrypto = crypto.get_async(v).unwrap(); + test_aead(&vcrypto).await; + test_no_auth(&vcrypto).await; + test_dh(&vcrypto).await; + test_generation(&vcrypto).await; } crypto_tests_shutdown(api.clone()).await; diff --git a/veilid-core/src/crypto/tests/test_envelope_receipt.rs b/veilid-core/src/crypto/tests/test_envelope_receipt.rs index 1b8a41f6..fda3a1a3 100644 --- a/veilid-core/src/crypto/tests/test_envelope_receipt.rs +++ b/veilid-core/src/crypto/tests/test_envelope_receipt.rs @@ -2,9 +2,10 @@ use super::*; pub async fn test_envelope_round_trip( envelope_version: EnvelopeVersion, - vcrypto: CryptoSystemVersion, + vcrypto: &AsyncCryptoSystemGuard<'_>, network_key: Option, ) { + let crypto = vcrypto.crypto(); if network_key.is_some() { info!( "--- test envelope round trip {} w/network key ---", @@ -16,9 +17,9 @@ pub async fn test_envelope_round_trip( // Create envelope let ts = Timestamp::from(0x12345678ABCDEF69u64); - let nonce = vcrypto.random_nonce(); - let (sender_id, sender_secret) = vcrypto.generate_keypair().into_split(); - let (recipient_id, recipient_secret) = vcrypto.generate_keypair().into_split(); + let nonce = vcrypto.random_nonce().await; + let (sender_id, sender_secret) = vcrypto.generate_keypair().await.into_split(); + let (recipient_id, recipient_secret) = vcrypto.generate_keypair().await.into_split(); let envelope = Envelope::new( envelope_version, vcrypto.kind(), @@ -33,15 +34,15 @@ pub async fn test_envelope_round_trip( // Serialize to bytes let enc_data = envelope - .to_encrypted_data(vcrypto.crypto(), body, &sender_secret, &network_key) + .to_encrypted_data(&crypto, body, &sender_secret, &network_key) .expect("failed to encrypt data"); // Deserialize from bytes - let envelope2 = Envelope::from_signed_data(vcrypto.crypto(), &enc_data, &network_key) + let envelope2 = Envelope::from_signed_data(&crypto, &enc_data, &network_key) .expect("failed to deserialize envelope from data"); let body2 = envelope2 - .decrypt_body(vcrypto.crypto(), &enc_data, &recipient_secret, &network_key) + .decrypt_body(&crypto, &enc_data, &recipient_secret, &network_key) .expect("failed to decrypt envelope body"); // Compare envelope and body @@ -53,43 +54,44 @@ pub async fn test_envelope_round_trip( let mut mod_enc_data = enc_data.clone(); mod_enc_data[enc_data_len - 1] ^= 0x80u8; assert!( - Envelope::from_signed_data(vcrypto.crypto(), &mod_enc_data, &network_key).is_err(), + Envelope::from_signed_data(&crypto, &mod_enc_data, &network_key).is_err(), "should have failed to decode envelope with modified signature" ); let mut mod_enc_data2 = enc_data.clone(); mod_enc_data2[enc_data_len - 65] ^= 0x80u8; assert!( - Envelope::from_signed_data(vcrypto.crypto(), &mod_enc_data2, &network_key).is_err(), + Envelope::from_signed_data(&crypto, &mod_enc_data2, &network_key).is_err(), "should have failed to decode envelope with modified data" ); } pub async fn test_receipt_round_trip( envelope_version: EnvelopeVersion, - vcrypto: CryptoSystemVersion, + vcrypto: &AsyncCryptoSystemGuard<'_>, ) { + let crypto = vcrypto.crypto(); info!("--- test receipt round trip ---"); // Create arbitrary body let body = b"This is an arbitrary body"; // Create receipt - let nonce = vcrypto.random_nonce(); - let (sender_id, sender_secret) = vcrypto.generate_keypair().into_split(); + let nonce = vcrypto.random_nonce().await; + let (sender_id, sender_secret) = vcrypto.generate_keypair().await.into_split(); let receipt = Receipt::try_new(envelope_version, vcrypto.kind(), nonce, sender_id, body) .expect("should not fail"); // Serialize to bytes let mut enc_data = receipt - .to_signed_data(vcrypto.crypto(), &sender_secret) + .to_signed_data(&crypto, &sender_secret) .expect("failed to make signed data"); // Deserialize from bytes - let receipt2 = Receipt::from_signed_data(vcrypto.crypto(), &enc_data) + let receipt2 = Receipt::from_signed_data(&crypto, &enc_data) .expect("failed to deserialize envelope from data"); // Should not validate even when a single bit is changed enc_data[5] = 0x01; - Receipt::from_signed_data(vcrypto.crypto(), &enc_data) + Receipt::from_signed_data(&crypto, &enc_data) .expect_err("should have failed to decrypt using wrong secret"); // Compare receipts @@ -103,12 +105,12 @@ pub async fn test_all() { // Test versions for ev in VALID_ENVELOPE_VERSIONS { for v in VALID_CRYPTO_KINDS { - let vcrypto = crypto.get(v).unwrap(); + let vcrypto = crypto.get_async(v).unwrap(); - test_envelope_round_trip(ev, vcrypto.clone(), None).await; - test_envelope_round_trip(ev, vcrypto.clone(), Some(vcrypto.random_shared_secret())) + test_envelope_round_trip(ev, &vcrypto, None).await; + test_envelope_round_trip(ev, &vcrypto, Some(vcrypto.random_shared_secret().await)) .await; - test_receipt_round_trip(ev, vcrypto).await; + test_receipt_round_trip(ev, &vcrypto).await; } } diff --git a/veilid-core/src/crypto/tests/test_types.rs b/veilid-core/src/crypto/tests/test_types.rs index 2636d916..2d21c892 100644 --- a/veilid-core/src/crypto/tests/test_types.rs +++ b/veilid-core/src/crypto/tests/test_types.rs @@ -6,10 +6,10 @@ static CHEEZBURGER: &str = "I can has cheezburger"; static EMPTY_KEY: [u8; PUBLIC_KEY_LENGTH] = [0u8; PUBLIC_KEY_LENGTH]; static EMPTY_KEY_SECRET: [u8; SECRET_KEY_LENGTH] = [0u8; SECRET_KEY_LENGTH]; -pub async fn test_generate_secret(vcrypto: CryptoSystemVersion) { +pub async fn test_generate_secret(vcrypto: &AsyncCryptoSystemGuard<'_>) { // Verify keys generate - let (dht_key, dht_key_secret) = vcrypto.generate_keypair().into_split(); - let (dht_key2, dht_key_secret2) = vcrypto.generate_keypair().into_split(); + let (dht_key, dht_key_secret) = vcrypto.generate_keypair().await.into_split(); + let (dht_key2, dht_key_secret2) = vcrypto.generate_keypair().await.into_split(); // Verify byte patterns are different between public and secret assert_ne!(dht_key.bytes, dht_key_secret.bytes); @@ -20,21 +20,24 @@ pub async fn test_generate_secret(vcrypto: CryptoSystemVersion) { assert_ne!(dht_key_secret, dht_key_secret2); } -pub async fn test_sign_and_verify(vcrypto: CryptoSystemVersion) { +pub async fn test_sign_and_verify(vcrypto: &AsyncCryptoSystemGuard<'_>) { // Make two keys - let (dht_key, dht_key_secret) = vcrypto.generate_keypair().into_split(); - let (dht_key2, dht_key_secret2) = vcrypto.generate_keypair().into_split(); + let (dht_key, dht_key_secret) = vcrypto.generate_keypair().await.into_split(); + let (dht_key2, dht_key_secret2) = vcrypto.generate_keypair().await.into_split(); // Sign the same message twice let dht_sig = vcrypto .sign(&dht_key, &dht_key_secret, LOREM_IPSUM.as_bytes()) + .await .unwrap(); trace!("dht_sig: {:?}", dht_sig); let dht_sig_b = vcrypto .sign(&dht_key, &dht_key_secret, LOREM_IPSUM.as_bytes()) + .await .unwrap(); // Sign a second message let dht_sig_c = vcrypto .sign(&dht_key, &dht_key_secret, CHEEZBURGER.as_bytes()) + .await .unwrap(); trace!("dht_sig_c: {:?}", dht_sig_c); // Verify they are the same signature @@ -42,6 +45,7 @@ pub async fn test_sign_and_verify(vcrypto: CryptoSystemVersion) { // Sign the same message with a different key let dht_sig2 = vcrypto .sign(&dht_key2, &dht_key_secret2, LOREM_IPSUM.as_bytes()) + .await .unwrap(); // Verify a different key gives a different signature assert_ne!(dht_sig2, dht_sig_b); @@ -49,73 +53,93 @@ pub async fn test_sign_and_verify(vcrypto: CryptoSystemVersion) { // Try using the wrong secret to sign let a1 = vcrypto .sign(&dht_key, &dht_key_secret, LOREM_IPSUM.as_bytes()) + .await .unwrap(); let a2 = vcrypto .sign(&dht_key2, &dht_key_secret2, LOREM_IPSUM.as_bytes()) + .await .unwrap(); let _b1 = vcrypto .sign(&dht_key, &dht_key_secret2, LOREM_IPSUM.as_bytes()) + .await .unwrap_err(); let _b2 = vcrypto .sign(&dht_key2, &dht_key_secret, LOREM_IPSUM.as_bytes()) + .await .unwrap_err(); assert_ne!(a1, a2); assert_eq!( - vcrypto.verify(&dht_key, LOREM_IPSUM.as_bytes(), &a1), + vcrypto.verify(&dht_key, LOREM_IPSUM.as_bytes(), &a1).await, Ok(true) ); assert_eq!( - vcrypto.verify(&dht_key2, LOREM_IPSUM.as_bytes(), &a2), + vcrypto.verify(&dht_key2, LOREM_IPSUM.as_bytes(), &a2).await, Ok(true) ); assert_eq!( - vcrypto.verify(&dht_key, LOREM_IPSUM.as_bytes(), &a2), + vcrypto.verify(&dht_key, LOREM_IPSUM.as_bytes(), &a2).await, Ok(false) ); assert_eq!( - vcrypto.verify(&dht_key2, LOREM_IPSUM.as_bytes(), &a1), + vcrypto.verify(&dht_key2, LOREM_IPSUM.as_bytes(), &a1).await, Ok(false) ); // Try verifications that should work assert_eq!( - vcrypto.verify(&dht_key, LOREM_IPSUM.as_bytes(), &dht_sig), + vcrypto + .verify(&dht_key, LOREM_IPSUM.as_bytes(), &dht_sig) + .await, Ok(true) ); assert_eq!( - vcrypto.verify(&dht_key, LOREM_IPSUM.as_bytes(), &dht_sig_b), + vcrypto + .verify(&dht_key, LOREM_IPSUM.as_bytes(), &dht_sig_b) + .await, Ok(true) ); assert_eq!( - vcrypto.verify(&dht_key2, LOREM_IPSUM.as_bytes(), &dht_sig2), + vcrypto + .verify(&dht_key2, LOREM_IPSUM.as_bytes(), &dht_sig2) + .await, Ok(true) ); assert_eq!( - vcrypto.verify(&dht_key, CHEEZBURGER.as_bytes(), &dht_sig_c), + vcrypto + .verify(&dht_key, CHEEZBURGER.as_bytes(), &dht_sig_c) + .await, Ok(true) ); // Try verifications that shouldn't work assert_eq!( - vcrypto.verify(&dht_key2, LOREM_IPSUM.as_bytes(), &dht_sig), + vcrypto + .verify(&dht_key2, LOREM_IPSUM.as_bytes(), &dht_sig) + .await, Ok(false) ); assert_eq!( - vcrypto.verify(&dht_key, LOREM_IPSUM.as_bytes(), &dht_sig2), + vcrypto + .verify(&dht_key, LOREM_IPSUM.as_bytes(), &dht_sig2) + .await, Ok(false) ); assert_eq!( - vcrypto.verify(&dht_key2, CHEEZBURGER.as_bytes(), &dht_sig_c), + vcrypto + .verify(&dht_key2, CHEEZBURGER.as_bytes(), &dht_sig_c) + .await, Ok(false) ); assert_eq!( - vcrypto.verify(&dht_key, CHEEZBURGER.as_bytes(), &dht_sig), + vcrypto + .verify(&dht_key, CHEEZBURGER.as_bytes(), &dht_sig) + .await, Ok(false) ); } -pub async fn test_key_conversions(vcrypto: CryptoSystemVersion) { +pub async fn test_key_conversions(vcrypto: &AsyncCryptoSystemGuard<'_>) { // Test default key let (dht_key, dht_key_secret) = (PublicKey::default(), SecretKey::default()); assert_eq!(dht_key.bytes, EMPTY_KEY); @@ -131,10 +155,10 @@ pub async fn test_key_conversions(vcrypto: CryptoSystemVersion) { assert_eq!(dht_key_secret_string, dht_key_string); // Make different keys - let (dht_key2, dht_key_secret2) = vcrypto.generate_keypair().into_split(); + let (dht_key2, dht_key_secret2) = vcrypto.generate_keypair().await.into_split(); trace!("dht_key2: {:?}", dht_key2); trace!("dht_key_secret2: {:?}", dht_key_secret2); - let (dht_key3, _dht_key_secret3) = vcrypto.generate_keypair().into_split(); + let (dht_key3, _dht_key_secret3) = vcrypto.generate_keypair().await.into_split(); trace!("dht_key3: {:?}", dht_key3); trace!("_dht_key_secret3: {:?}", _dht_key_secret3); @@ -185,7 +209,7 @@ pub async fn test_key_conversions(vcrypto: CryptoSystemVersion) { .is_err()); } -pub async fn test_encode_decode(vcrypto: CryptoSystemVersion) { +pub async fn test_encode_decode(vcrypto: &AsyncCryptoSystemGuard<'_>) { let dht_key = PublicKey::try_decode("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA").unwrap(); let dht_key_secret = SecretKey::try_decode("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA").unwrap(); @@ -194,7 +218,7 @@ pub async fn test_encode_decode(vcrypto: CryptoSystemVersion) { assert_eq!(dht_key, dht_key_b); assert_eq!(dht_key_secret, dht_key_secret_b); - let (dht_key2, dht_key_secret2) = vcrypto.generate_keypair().into_split(); + let (dht_key2, dht_key_secret2) = vcrypto.generate_keypair().await.into_split(); let e1 = dht_key.encode(); trace!("e1: {:?}", e1); @@ -229,7 +253,7 @@ pub async fn test_encode_decode(vcrypto: CryptoSystemVersion) { assert!(f2.is_err()); } -pub async fn test_typed_convert(vcrypto: CryptoSystemVersion) { +pub async fn test_typed_convert(vcrypto: &AsyncCryptoSystemGuard<'_>) { let tks1 = format!( "{}:7lxDEabK_qgjbe38RtBa3IZLrud84P6NhGP-pRTZzdQ", vcrypto.kind() @@ -261,15 +285,15 @@ pub async fn test_typed_convert(vcrypto: CryptoSystemVersion) { assert!(tks6x.ends_with(&tks6)); } -async fn test_hash(vcrypto: CryptoSystemVersion) { +async fn test_hash(vcrypto: &AsyncCryptoSystemGuard<'_>) { let mut s = BTreeSet::::new(); - let k1 = vcrypto.generate_hash("abc".as_bytes()); - let k2 = vcrypto.generate_hash("abcd".as_bytes()); - let k3 = vcrypto.generate_hash("".as_bytes()); - let k4 = vcrypto.generate_hash(" ".as_bytes()); - let k5 = vcrypto.generate_hash(LOREM_IPSUM.as_bytes()); - let k6 = vcrypto.generate_hash(CHEEZBURGER.as_bytes()); + let k1 = vcrypto.generate_hash("abc".as_bytes()).await; + let k2 = vcrypto.generate_hash("abcd".as_bytes()).await; + let k3 = vcrypto.generate_hash("".as_bytes()).await; + let k4 = vcrypto.generate_hash(" ".as_bytes()).await; + let k5 = vcrypto.generate_hash(LOREM_IPSUM.as_bytes()).await; + let k6 = vcrypto.generate_hash(CHEEZBURGER.as_bytes()).await; s.insert(k1); s.insert(k2); @@ -279,12 +303,12 @@ async fn test_hash(vcrypto: CryptoSystemVersion) { s.insert(k6); assert_eq!(s.len(), 6); - let v1 = vcrypto.generate_hash("abc".as_bytes()); - let v2 = vcrypto.generate_hash("abcd".as_bytes()); - let v3 = vcrypto.generate_hash("".as_bytes()); - let v4 = vcrypto.generate_hash(" ".as_bytes()); - let v5 = vcrypto.generate_hash(LOREM_IPSUM.as_bytes()); - let v6 = vcrypto.generate_hash(CHEEZBURGER.as_bytes()); + let v1 = vcrypto.generate_hash("abc".as_bytes()).await; + let v2 = vcrypto.generate_hash("abcd".as_bytes()).await; + let v3 = vcrypto.generate_hash("".as_bytes()).await; + let v4 = vcrypto.generate_hash(" ".as_bytes()).await; + let v5 = vcrypto.generate_hash(LOREM_IPSUM.as_bytes()).await; + let v6 = vcrypto.generate_hash(CHEEZBURGER.as_bytes()).await; assert_eq!(k1, v1); assert_eq!(k2, v2); @@ -293,24 +317,24 @@ async fn test_hash(vcrypto: CryptoSystemVersion) { assert_eq!(k5, v5); assert_eq!(k6, v6); - vcrypto.validate_hash("abc".as_bytes(), &v1); - vcrypto.validate_hash("abcd".as_bytes(), &v2); - vcrypto.validate_hash("".as_bytes(), &v3); - vcrypto.validate_hash(" ".as_bytes(), &v4); - vcrypto.validate_hash(LOREM_IPSUM.as_bytes(), &v5); - vcrypto.validate_hash(CHEEZBURGER.as_bytes(), &v6); + vcrypto.validate_hash("abc".as_bytes(), &v1).await; + vcrypto.validate_hash("abcd".as_bytes(), &v2).await; + vcrypto.validate_hash("".as_bytes(), &v3).await; + vcrypto.validate_hash(" ".as_bytes(), &v4).await; + vcrypto.validate_hash(LOREM_IPSUM.as_bytes(), &v5).await; + vcrypto.validate_hash(CHEEZBURGER.as_bytes(), &v6).await; } -async fn test_operations(vcrypto: CryptoSystemVersion) { - let k1 = vcrypto.generate_hash(LOREM_IPSUM.as_bytes()); - let k2 = vcrypto.generate_hash(CHEEZBURGER.as_bytes()); - let k3 = vcrypto.generate_hash("abc".as_bytes()); +async fn test_operations(vcrypto: &AsyncCryptoSystemGuard<'_>) { + let k1 = vcrypto.generate_hash(LOREM_IPSUM.as_bytes()).await; + let k2 = vcrypto.generate_hash(CHEEZBURGER.as_bytes()).await; + let k3 = vcrypto.generate_hash("abc".as_bytes()).await; // Get distance - let d1 = vcrypto.distance(&k1, &k2); - let d2 = vcrypto.distance(&k2, &k1); - let d3 = vcrypto.distance(&k1, &k3); - let d4 = vcrypto.distance(&k2, &k3); + let d1 = vcrypto.distance(&k1, &k2).await; + let d2 = vcrypto.distance(&k2, &k1).await; + let d3 = vcrypto.distance(&k1, &k3).await; + let d4 = vcrypto.distance(&k2, &k3).await; trace!("d1={:?}", d1); trace!("d2={:?}", d2); @@ -393,15 +417,15 @@ pub async fn test_all() { // Test versions for v in VALID_CRYPTO_KINDS { - let vcrypto = crypto.get(v).unwrap(); + let vcrypto = crypto.get_async(v).unwrap(); - test_generate_secret(vcrypto.clone()).await; - test_sign_and_verify(vcrypto.clone()).await; - test_key_conversions(vcrypto.clone()).await; - test_encode_decode(vcrypto.clone()).await; - test_typed_convert(vcrypto.clone()).await; - test_hash(vcrypto.clone()).await; - test_operations(vcrypto).await; + test_generate_secret(&vcrypto).await; + test_sign_and_verify(&vcrypto).await; + test_key_conversions(&vcrypto).await; + test_encode_decode(&vcrypto).await; + test_typed_convert(&vcrypto).await; + test_hash(&vcrypto).await; + test_operations(&vcrypto).await; } crypto_tests_shutdown(api.clone()).await; diff --git a/veilid-core/src/crypto/types/byte_array_types.rs b/veilid-core/src/crypto/types/byte_array_types.rs index b2096a82..a176d5f9 100644 --- a/veilid-core/src/crypto/types/byte_array_types.rs +++ b/veilid-core/src/crypto/types/byte_array_types.rs @@ -78,7 +78,11 @@ where macro_rules! byte_array_type { ($name:ident, $size:expr, $encoded_size:expr) => { #[derive(Clone, Copy, Hash, PartialOrd, Ord, PartialEq, Eq)] - #[cfg_attr(target_arch = "wasm32", derive(Tsify), tsify(into_wasm_abi))] + #[cfg_attr( + all(target_arch = "wasm32", target_os = "unknown"), + derive(Tsify), + tsify(into_wasm_abi) + )] pub struct $name { pub bytes: [u8; $size], } @@ -280,17 +284,17 @@ macro_rules! byte_array_type { byte_array_type!(CryptoKey, CRYPTO_KEY_LENGTH, CRYPTO_KEY_LENGTH_ENCODED); -#[cfg_attr(target_arch = "wasm32", declare)] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)] pub type PublicKey = CryptoKey; -#[cfg_attr(target_arch = "wasm32", declare)] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)] pub type SecretKey = CryptoKey; -#[cfg_attr(target_arch = "wasm32", declare)] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)] pub type HashDigest = CryptoKey; -#[cfg_attr(target_arch = "wasm32", declare)] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)] pub type SharedSecret = CryptoKey; -#[cfg_attr(target_arch = "wasm32", declare)] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)] pub type RouteId = CryptoKey; -#[cfg_attr(target_arch = "wasm32", declare)] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)] pub type CryptoKeyDistance = CryptoKey; byte_array_type!(Signature, SIGNATURE_LENGTH, SIGNATURE_LENGTH_ENCODED); diff --git a/veilid-core/src/crypto/types/keypair.rs b/veilid-core/src/crypto/types/keypair.rs index 4fafd4ea..8bdb6c9d 100644 --- a/veilid-core/src/crypto/types/keypair.rs +++ b/veilid-core/src/crypto/types/keypair.rs @@ -2,7 +2,7 @@ use super::*; #[derive(Clone, Copy, Default, PartialOrd, Ord, PartialEq, Eq, Hash)] #[cfg_attr( - target_arch = "wasm32", + all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify), tsify(from_wasm_abi, into_wasm_abi) )] diff --git a/veilid-core/src/crypto/types/mod.rs b/veilid-core/src/crypto/types/mod.rs index e3f24a81..536df17b 100644 --- a/veilid-core/src/crypto/types/mod.rs +++ b/veilid-core/src/crypto/types/mod.rs @@ -6,7 +6,7 @@ use core::fmt; use core::hash::Hash; /// Cryptography version fourcc code -#[cfg_attr(target_arch = "wasm32", declare)] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)] pub type CryptoKind = FourCC; /// Sort best crypto kinds first @@ -52,24 +52,24 @@ pub use crypto_typed::*; pub use crypto_typed_group::*; pub use keypair::*; -#[cfg_attr(target_arch = "wasm32", declare)] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)] pub type TypedKey = CryptoTyped; -#[cfg_attr(target_arch = "wasm32", declare)] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)] pub type TypedSecret = CryptoTyped; -#[cfg_attr(target_arch = "wasm32", declare)] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)] pub type TypedKeyPair = CryptoTyped; -#[cfg_attr(target_arch = "wasm32", declare)] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)] pub type TypedSignature = CryptoTyped; -#[cfg_attr(target_arch = "wasm32", declare)] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)] pub type TypedSharedSecret = CryptoTyped; -#[cfg_attr(target_arch = "wasm32", declare)] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)] pub type TypedKeyGroup = CryptoTypedGroup; -#[cfg_attr(target_arch = "wasm32", declare)] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)] pub type TypedSecretGroup = CryptoTypedGroup; -#[cfg_attr(target_arch = "wasm32", declare)] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)] pub type TypedKeyPairGroup = CryptoTypedGroup; -#[cfg_attr(target_arch = "wasm32", declare)] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)] pub type TypedSignatureGroup = CryptoTypedGroup; -#[cfg_attr(target_arch = "wasm32", declare)] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)] pub type TypedSharedSecretGroup = CryptoTypedGroup; diff --git a/veilid-core/src/crypto/vld0/mod.rs b/veilid-core/src/crypto/vld0/mod.rs index 62cf90c0..da94e0bd 100644 --- a/veilid-core/src/crypto/vld0/mod.rs +++ b/veilid-core/src/crypto/vld0/mod.rs @@ -47,14 +47,13 @@ pub fn vld0_generate_keypair() -> KeyPair { } /// V0 CryptoSystem -#[derive(Clone)] pub struct CryptoSystemVLD0 { - crypto: Crypto, + registry: VeilidComponentRegistry, } impl CryptoSystemVLD0 { - pub fn new(crypto: Crypto) -> Self { - Self { crypto } + pub fn new(registry: VeilidComponentRegistry) -> Self { + Self { registry } } } @@ -64,14 +63,14 @@ impl CryptoSystem for CryptoSystemVLD0 { CRYPTO_KIND_VLD0 } - fn crypto(&self) -> Crypto { - self.crypto.clone() + fn crypto(&self) -> VeilidComponentGuard<'_, Crypto> { + self.registry.lookup::().unwrap() } // Cached Operations #[instrument(level = "trace", skip_all)] fn cached_dh(&self, key: &PublicKey, secret: &SecretKey) -> VeilidAPIResult { - self.crypto + self.crypto() .cached_dh_internal::(self, key, secret) } diff --git a/veilid-core/src/intf/mod.rs b/veilid-core/src/intf/mod.rs index af238bee..226fad1d 100644 --- a/veilid-core/src/intf/mod.rs +++ b/veilid-core/src/intf/mod.rs @@ -1,12 +1,12 @@ use super::*; -#[cfg(target_arch = "wasm32")] +#[cfg(all(target_arch = "wasm32", target_os = "unknown"))] mod wasm; -#[cfg(target_arch = "wasm32")] +#[cfg(all(target_arch = "wasm32", target_os = "unknown"))] pub use wasm::*; -#[cfg(not(target_arch = "wasm32"))] +#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))] mod native; -#[cfg(not(target_arch = "wasm32"))] +#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))] pub use native::*; pub static KNOWN_PROTECTED_STORE_KEYS: [&str; 2] = ["device_encryption_key", "_test_key"]; diff --git a/veilid-core/src/intf/native/block_store.rs b/veilid-core/src/intf/native/block_store.rs index a280b65f..d313dde7 100644 --- a/veilid-core/src/intf/native/block_store.rs +++ b/veilid-core/src/intf/native/block_store.rs @@ -4,31 +4,37 @@ struct BlockStoreInner { // } -#[derive(Clone)] -pub struct BlockStore { - event_bus: EventBus, - config: VeilidConfig, - inner: Arc>, +impl fmt::Debug for BlockStoreInner { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("BlockStoreInner").finish() + } } +#[derive(Debug)] +pub struct BlockStore { + registry: VeilidComponentRegistry, + inner: Mutex, +} + +impl_veilid_component!(BlockStore); + impl BlockStore { fn new_inner() -> BlockStoreInner { BlockStoreInner {} } - pub fn new(event_bus: EventBus, config: VeilidConfig) -> Self { + pub fn new(registry: VeilidComponentRegistry) -> Self { Self { - event_bus, - config, - inner: Arc::new(Mutex::new(Self::new_inner())), + registry, + inner: Mutex::new(Self::new_inner()), } } - pub async fn init(&self) -> EyreResult<()> { + async fn init_async(&self) -> EyreResult<()> { // Ensure permissions are correct // ensure_file_private_owner(&dbpath)?; Ok(()) } - pub async fn terminate(&self) {} + async fn terminate_async(&self) {} } diff --git a/veilid-core/src/intf/native/protected_store.rs b/veilid-core/src/intf/native/protected_store.rs index 042adebb..74e9b921 100644 --- a/veilid-core/src/intf/native/protected_store.rs +++ b/veilid-core/src/intf/native/protected_store.rs @@ -6,14 +6,20 @@ use std::path::Path; pub struct ProtectedStoreInner { keyring_manager: Option, } - -#[derive(Clone)] -pub struct ProtectedStore { - _event_bus: EventBus, - config: VeilidConfig, - inner: Arc>, +impl fmt::Debug for ProtectedStoreInner { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ProtectedStoreInner").finish() + } } +#[derive(Debug)] +pub struct ProtectedStore { + registry: VeilidComponentRegistry, + inner: Mutex, +} + +impl_veilid_component!(ProtectedStore); + impl ProtectedStore { fn new_inner() -> ProtectedStoreInner { ProtectedStoreInner { @@ -21,11 +27,10 @@ impl ProtectedStore { } } - pub fn new(event_bus: EventBus, config: VeilidConfig) -> Self { + pub fn new(registry: VeilidComponentRegistry) -> Self { Self { - _event_bus: event_bus, - config, - inner: Arc::new(Mutex::new(Self::new_inner())), + registry, + inner: Mutex::new(Self::new_inner()), } } @@ -42,9 +47,10 @@ impl ProtectedStore { } #[instrument(level = "debug", skip(self), err)] - pub async fn init(&self) -> EyreResult<()> { + async fn init_async(&self) -> EyreResult<()> { let delete = { - let c = self.config.get(); + let config = self.config(); + let c = config.get(); let mut inner = self.inner.lock(); if !c.protected_store.always_use_insecure_storage { // Attempt to open the secure keyring @@ -101,13 +107,22 @@ impl ProtectedStore { Ok(()) } + #[instrument(level = "debug", skip(self), err)] + async fn post_init_async(&self) -> EyreResult<()> { + Ok(()) + } + #[instrument(level = "debug", skip(self))] - pub async fn terminate(&self) { + async fn pre_terminate_async(&self) {} + + #[instrument(level = "debug", skip(self))] + async fn terminate_async(&self) { *self.inner.lock() = Self::new_inner(); } fn service_name(&self) -> String { - let c = self.config.get(); + let config = self.config(); + let c = config.get(); if c.namespace.is_empty() { "veilid_protected_store".to_owned() } else { diff --git a/veilid-core/src/intf/wasm/block_store.rs b/veilid-core/src/intf/wasm/block_store.rs index 1beda174..58c471f6 100644 --- a/veilid-core/src/intf/wasm/block_store.rs +++ b/veilid-core/src/intf/wasm/block_store.rs @@ -4,28 +4,34 @@ struct BlockStoreInner { // } -#[derive(Clone)] -pub struct BlockStore { - event_bus: EventBus, - config: VeilidConfig, - inner: Arc>, +impl fmt::Debug for BlockStoreInner { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("BlockStoreInner").finish() + } } +#[derive(Debug)] +pub struct BlockStore { + registry: VeilidComponentRegistry, + inner: Mutex, +} + +impl_veilid_component!(BlockStore); + impl BlockStore { fn new_inner() -> BlockStoreInner { BlockStoreInner {} } - pub fn new(event_bus: EventBus, config: VeilidConfig) -> Self { + pub fn new(registry: VeilidComponentRegistry) -> Self { Self { - event_bus, - config, - inner: Arc::new(Mutex::new(Self::new_inner())), + registry, + inner: Mutex::new(Self::new_inner()), } } - pub async fn init(&self) -> EyreResult<()> { + async fn init_async(&self) -> EyreResult<()> { Ok(()) } - pub async fn terminate(&self) {} + async fn terminate_async(&self) {} } diff --git a/veilid-core/src/intf/wasm/protected_store.rs b/veilid-core/src/intf/wasm/protected_store.rs index 5e44b9fc..d42ada49 100644 --- a/veilid-core/src/intf/wasm/protected_store.rs +++ b/veilid-core/src/intf/wasm/protected_store.rs @@ -3,18 +3,16 @@ use data_encoding::BASE64URL_NOPAD; use web_sys::*; -#[derive(Clone)] +#[derive(Debug)] pub struct ProtectedStore { - _event_bus: EventBus, - config: VeilidConfig, + registry: VeilidComponentRegistry, } +impl_veilid_component!(ProtectedStore); + impl ProtectedStore { - pub fn new(event_bus: EventBus, config: VeilidConfig) -> Self { - Self { - _event_bus: event_bus, - config, - } + pub fn new(registry: VeilidComponentRegistry) -> Self { + Self { registry } } #[instrument(level = "trace", skip(self), err)] @@ -30,15 +28,24 @@ impl ProtectedStore { } #[instrument(level = "debug", skip(self), err)] - pub async fn init(&self) -> EyreResult<()> { + pub(crate) async fn init_async(&self) -> EyreResult<()> { + Ok(()) + } + + #[instrument(level = "debug", skip(self), err)] + pub(crate) async fn post_init_async(&self) -> EyreResult<()> { Ok(()) } #[instrument(level = "debug", skip(self))] - pub async fn terminate(&self) {} + pub(crate) async fn pre_terminate_async(&self) {} + + #[instrument(level = "debug", skip(self))] + pub(crate) async fn terminate_async(&self) {} fn browser_key_name(&self, key: &str) -> String { - let c = self.config.get(); + let config = self.config(); + let c = config.get(); if c.namespace.is_empty() { format!("__veilid_protected_store_{}", key) } else { diff --git a/veilid-core/src/lib.rs b/veilid-core/src/lib.rs index ed2211e4..46cd9d71 100644 --- a/veilid-core/src/lib.rs +++ b/veilid-core/src/lib.rs @@ -28,7 +28,7 @@ #![recursion_limit = "256"] cfg_if::cfg_if! { - if #[cfg(target_arch = "wasm32")] { + if #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] { #[cfg(any(feature = "rt-async-std", feature = "rt-tokio"))] compile_error!("features \"rt-async-std\" and \"rt-tokio\" can not be specified for WASM"); } else { @@ -45,6 +45,7 @@ cfg_if::cfg_if! { extern crate alloc; mod attachment_manager; +mod component; mod core_context; mod crypto; mod intf; @@ -58,6 +59,8 @@ mod veilid_api; mod veilid_config; mod wasm_helpers; +pub(crate) use self::component::*; +pub(crate) use self::core_context::RegisteredComponents; pub use self::core_context::{api_startup, api_startup_config, api_startup_json, UpdateCallback}; pub use self::logging::{ ApiTracingLayer, VeilidLayerFilter, DEFAULT_LOG_FACILITIES_ENABLED_LIST, diff --git a/veilid-core/src/logging/api_tracing_layer.rs b/veilid-core/src/logging/api_tracing_layer.rs index af7be16a..a460a477 100644 --- a/veilid-core/src/logging/api_tracing_layer.rs +++ b/veilid-core/src/logging/api_tracing_layer.rs @@ -2,7 +2,6 @@ use crate::core_context::*; use crate::veilid_api::*; use crate::*; use core::fmt::Write; -use once_cell::sync::OnceCell; use tracing_subscriber::*; struct ApiTracingLayerInner { @@ -21,11 +20,10 @@ struct ApiTracingLayerInner { /// with many copies of Veilid running. #[derive(Clone)] -pub struct ApiTracingLayer { - inner: Arc>>, -} +pub struct ApiTracingLayer {} -static API_LOGGER: OnceCell = OnceCell::new(); +static API_LOGGER_INNER: Mutex> = Mutex::new(None); +static API_LOGGER_ENABLED: AtomicBool = AtomicBool::new(false); impl ApiTracingLayer { /// Initialize an ApiTracingLayer singleton @@ -33,11 +31,7 @@ impl ApiTracingLayer { /// This must be inserted into your tracing subscriber before you /// call api_startup() or api_startup_json() if you are going to use api tracing. pub fn init() -> ApiTracingLayer { - API_LOGGER - .get_or_init(|| ApiTracingLayer { - inner: Arc::new(Mutex::new(None)), - }) - .clone() + ApiTracingLayer {} } fn new_inner() -> ApiTracingLayerInner { @@ -52,12 +46,7 @@ impl ApiTracingLayer { namespace: String, update_callback: UpdateCallback, ) -> VeilidAPIResult<()> { - let Some(api_logger) = API_LOGGER.get() else { - // Did not init, so skip this - return Ok(()); - }; - - let mut inner = api_logger.inner.lock(); + let mut inner = API_LOGGER_INNER.lock(); if inner.is_none() { *inner = Some(Self::new_inner()); } @@ -70,6 +59,9 @@ impl ApiTracingLayer { .unwrap() .update_callbacks .insert(key, update_callback); + + API_LOGGER_ENABLED.store(true, Ordering::Release); + return Ok(()); } @@ -79,28 +71,29 @@ impl ApiTracingLayer { namespace: String, ) -> VeilidAPIResult<()> { let key = (program_name, namespace); - if let Some(api_logger) = API_LOGGER.get() { - let mut inner = api_logger.inner.lock(); - if inner.is_none() { - apibail_not_initialized!(); - } - if inner - .as_mut() - .unwrap() - .update_callbacks - .remove(&key) - .is_none() - { - apibail_not_initialized!(); - } - if inner.as_mut().unwrap().update_callbacks.is_empty() { - *inner = None; - } + + let mut inner = API_LOGGER_INNER.lock(); + if inner.is_none() { + apibail_not_initialized!(); } + if inner + .as_mut() + .unwrap() + .update_callbacks + .remove(&key) + .is_none() + { + apibail_not_initialized!(); + } + if inner.as_mut().unwrap().update_callbacks.is_empty() { + *inner = None; + API_LOGGER_ENABLED.store(false, Ordering::Release); + } + Ok(()) } - fn emit_log(&self, inner: &mut ApiTracingLayerInner, meta: &Metadata<'_>, message: String) { + fn emit_log(&self, meta: &'static Metadata<'static>, message: String) { let level = *meta.level(); let target = meta.target(); let log_level = VeilidLogLevel::from_tracing_level(level); @@ -148,8 +141,10 @@ impl ApiTracingLayer { backtrace, })); - for cb in inner.update_callbacks.values() { - (cb)(log_update.clone()); + if let Some(inner) = &mut *API_LOGGER_INNER.lock() { + for cb in inner.update_callbacks.values() { + (cb)(log_update.clone()); + } } } } @@ -159,17 +154,23 @@ pub struct SpanDuration { end: Timestamp, } -fn simplify_file(file: &str) -> String { - let path = std::path::Path::new(file); - let path_component_count = path.iter().count(); - if path.ends_with("mod.rs") && path_component_count >= 2 { - let outpath: std::path::PathBuf = path.iter().skip(path_component_count - 2).collect(); - outpath.to_string_lossy().to_string() - } else if let Some(filename) = path.file_name() { - filename.to_string_lossy().to_string() - } else { - file.to_string() - } +fn simplify_file(file: &'static str) -> &'static str { + file.static_transform(|file| { + let out = { + let path = std::path::Path::new(file); + let path_component_count = path.iter().count(); + if path.ends_with("mod.rs") && path_component_count >= 2 { + let outpath: std::path::PathBuf = + path.iter().skip(path_component_count - 2).collect(); + outpath.to_string_lossy().to_string() + } else if let Some(filename) = path.file_name() { + filename.to_string_lossy().to_string() + } else { + file.to_string() + } + }; + out.to_static_str() + }) } impl registry::LookupSpan<'a>> Layer for ApiTracingLayer { @@ -179,47 +180,51 @@ impl registry::LookupSpan<'a>> Layer for ApiTracingLa id: &tracing::Id, ctx: layer::Context<'_, S>, ) { - if let Some(_inner) = &mut *self.inner.lock() { - let mut new_debug_record = StringRecorder::new(); - attrs.record(&mut new_debug_record); + if !API_LOGGER_ENABLED.load(Ordering::Acquire) { + // Optimization if api logger has no callbacks + return; + } - if let Some(span_ref) = ctx.span(id) { + let mut new_debug_record = StringRecorder::new(); + attrs.record(&mut new_debug_record); + + if let Some(span_ref) = ctx.span(id) { + span_ref + .extensions_mut() + .insert::(new_debug_record); + if crate::DURATION_LOG_FACILITIES.contains(&attrs.metadata().target()) { span_ref .extensions_mut() - .insert::(new_debug_record); - if crate::DURATION_LOG_FACILITIES.contains(&attrs.metadata().target()) { - span_ref - .extensions_mut() - .insert::(SpanDuration { - start: Timestamp::now(), - end: Timestamp::default(), - }); - } + .insert::(SpanDuration { + start: Timestamp::now(), + end: Timestamp::default(), + }); } } } fn on_close(&self, id: span::Id, ctx: layer::Context<'_, S>) { - if let Some(inner) = &mut *self.inner.lock() { - if let Some(span_ref) = ctx.span(&id) { - if let Some(span_duration) = span_ref.extensions_mut().get_mut::() { - span_duration.end = Timestamp::now(); - let duration = span_duration.end.saturating_sub(span_duration.start); - let meta = span_ref.metadata(); - self.emit_log( - inner, - meta, - format!( - " {}{}: duration={}", - span_ref - .parent() - .map(|p| format!("{}::", p.name())) - .unwrap_or_default(), - span_ref.name(), - format_opt_ts(Some(duration)) - ), - ); - } + if !API_LOGGER_ENABLED.load(Ordering::Acquire) { + // Optimization if api logger has no callbacks + return; + } + if let Some(span_ref) = ctx.span(&id) { + if let Some(span_duration) = span_ref.extensions_mut().get_mut::() { + span_duration.end = Timestamp::now(); + let duration = span_duration.end.saturating_sub(span_duration.start); + let meta = span_ref.metadata(); + self.emit_log( + meta, + format!( + " {}{}: duration={}", + span_ref + .parent() + .map(|p| format!("{}::", p.name())) + .unwrap_or_default(), + span_ref.name(), + format_opt_ts(Some(duration)) + ), + ); } } } @@ -230,22 +235,26 @@ impl registry::LookupSpan<'a>> Layer for ApiTracingLa values: &tracing::span::Record<'_>, ctx: layer::Context<'_, S>, ) { - if let Some(_inner) = &mut *self.inner.lock() { - if let Some(span_ref) = ctx.span(id) { - if let Some(debug_record) = span_ref.extensions_mut().get_mut::() { - values.record(debug_record); - } + if !API_LOGGER_ENABLED.load(Ordering::Acquire) { + // Optimization if api logger has no callbacks + return; + } + if let Some(span_ref) = ctx.span(id) { + if let Some(debug_record) = span_ref.extensions_mut().get_mut::() { + values.record(debug_record); } } } fn on_event(&self, event: &tracing::Event<'_>, _ctx: layer::Context<'_, S>) { - if let Some(inner) = &mut *self.inner.lock() { - let mut recorder = StringRecorder::new(); - event.record(&mut recorder); - let meta = event.metadata(); - self.emit_log(inner, meta, recorder.to_string()); + if !API_LOGGER_ENABLED.load(Ordering::Acquire) { + // Optimization if api logger has no callbacks + return; } + let mut recorder = StringRecorder::new(); + event.record(&mut recorder); + let meta = event.metadata(); + self.emit_log(meta, recorder.to_string()); } } diff --git a/veilid-core/src/logging/facilities.rs b/veilid-core/src/logging/facilities.rs index 5adc608f..399dd15e 100644 --- a/veilid-core/src/logging/facilities.rs +++ b/veilid-core/src/logging/facilities.rs @@ -151,42 +151,6 @@ macro_rules! log_client_api { } } -#[macro_export] -macro_rules! log_network_result { - (error $text:expr) => {error!( - target: "network_result", - "{}", - $text, - )}; - (error $fmt:literal, $($arg:expr),+) => { - error!(target: "network_result", $fmt, $($arg),+); - }; - (warn $text:expr) => {warn!( - target: "network_result", - "{}", - $text, - )}; - (warn $fmt:literal, $($arg:expr),+) => { - warn!(target:"network_result", $fmt, $($arg),+); - }; - (debug $text:expr) => {debug!( - target: "network_result", - "{}", - $text, - )}; - (debug $fmt:literal, $($arg:expr),+) => { - debug!(target:"network_result", $fmt, $($arg),+); - }; - ($text:expr) => {trace!( - target: "network_result", - "{}", - $text, - )}; - ($fmt:literal, $($arg:expr),+) => { - trace!(target:"network_result", $fmt, $($arg),+); - } -} - #[macro_export] macro_rules! log_rpc { (error $text:expr) => { error!( @@ -421,6 +385,14 @@ macro_rules! log_crypto { (warn $fmt:literal, $($arg:expr),+) => { warn!(target:"crypto", $fmt, $($arg),+); }; + (debug $text:expr) => { debug!( + target: "crypto", + "{}", + $text, + )}; + (debug $fmt:literal, $($arg:expr),+) => { + debug!(target:"crypto", $fmt, $($arg),+); + }; ($text:expr) => {trace!( target: "crypto", "{}", diff --git a/veilid-core/src/network_manager/address_check.rs b/veilid-core/src/network_manager/address_check.rs index 8a84d832..6b9981e3 100644 --- a/veilid-core/src/network_manager/address_check.rs +++ b/veilid-core/src/network_manager/address_check.rs @@ -23,6 +23,7 @@ pub const ADDRESS_CHECK_CACHE_SIZE: usize = 10; // TimestampDuration::new(3_600_000_000_u64); // 60 minutes /// Address checker config +#[derive(Debug)] pub struct AddressCheckConfig { pub detect_address_changes: bool, pub ip6_prefix_size: usize, @@ -44,6 +45,22 @@ pub struct AddressCheck { address_consistency_table: BTreeMap>, } +impl fmt::Debug for AddressCheck { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("AddressCheck") + .field("config", &self.config) + //.field("net", &self.net) + .field("current_network_class", &self.current_network_class) + .field("current_addresses", &self.current_addresses) + .field( + "address_inconsistency_table", + &self.address_inconsistency_table, + ) + .field("address_consistency_table", &self.address_consistency_table) + .finish() + } +} + impl AddressCheck { pub fn new(config: AddressCheckConfig, net: Network) -> Self { Self { diff --git a/veilid-core/src/network_manager/address_filter.rs b/veilid-core/src/network_manager/address_filter.rs index 2f6919c6..b3655adb 100644 --- a/veilid-core/src/network_manager/address_filter.rs +++ b/veilid-core/src/network_manager/address_filter.rs @@ -32,63 +32,27 @@ struct AddressFilterInner { dial_info_failures: BTreeMap, } -struct AddressFilterUnlockedInner { +#[derive(Debug)] +pub(crate) struct AddressFilter { + registry: VeilidComponentRegistry, + inner: Mutex, max_connections_per_ip4: usize, max_connections_per_ip6_prefix: usize, max_connections_per_ip6_prefix_size: usize, max_connection_frequency_per_min: usize, punishment_duration_min: usize, dial_info_failure_duration_min: usize, - routing_table: RoutingTable, } -impl fmt::Debug for AddressFilterUnlockedInner { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("AddressFilterUnlockedInner") - .field("max_connections_per_ip4", &self.max_connections_per_ip4) - .field( - "max_connections_per_ip6_prefix", - &self.max_connections_per_ip6_prefix, - ) - .field( - "max_connections_per_ip6_prefix_size", - &self.max_connections_per_ip6_prefix_size, - ) - .field( - "max_connection_frequency_per_min", - &self.max_connection_frequency_per_min, - ) - .field("punishment_duration_min", &self.punishment_duration_min) - .field( - "dial_info_failure_duration_min", - &self.dial_info_failure_duration_min, - ) - .finish() - } -} - -#[derive(Clone, Debug)] -pub(crate) struct AddressFilter { - unlocked_inner: Arc, - inner: Arc>, -} +impl_veilid_component_registry_accessor!(AddressFilter); impl AddressFilter { - pub fn new(config: VeilidConfig, routing_table: RoutingTable) -> Self { + pub fn new(registry: VeilidComponentRegistry) -> Self { + let config = registry.config(); let c = config.get(); Self { - unlocked_inner: Arc::new(AddressFilterUnlockedInner { - max_connections_per_ip4: c.network.max_connections_per_ip4 as usize, - max_connections_per_ip6_prefix: c.network.max_connections_per_ip6_prefix as usize, - max_connections_per_ip6_prefix_size: c.network.max_connections_per_ip6_prefix_size - as usize, - max_connection_frequency_per_min: c.network.max_connection_frequency_per_min - as usize, - punishment_duration_min: PUNISHMENT_DURATION_MIN, - dial_info_failure_duration_min: DIAL_INFO_FAILURE_DURATION_MIN, - routing_table, - }), - inner: Arc::new(Mutex::new(AddressFilterInner { + registry, + inner: Mutex::new(AddressFilterInner { conn_count_by_ip4: BTreeMap::new(), conn_count_by_ip6_prefix: BTreeMap::new(), conn_timestamps_by_ip4: BTreeMap::new(), @@ -97,7 +61,14 @@ impl AddressFilter { punishments_by_ip6_prefix: BTreeMap::new(), punishments_by_node_id: BTreeMap::new(), dial_info_failures: BTreeMap::new(), - })), + }), + max_connections_per_ip4: c.network.max_connections_per_ip4 as usize, + max_connections_per_ip6_prefix: c.network.max_connections_per_ip6_prefix as usize, + max_connections_per_ip6_prefix_size: c.network.max_connections_per_ip6_prefix_size + as usize, + max_connection_frequency_per_min: c.network.max_connection_frequency_per_min as usize, + punishment_duration_min: PUNISHMENT_DURATION_MIN, + dial_info_failure_duration_min: DIAL_INFO_FAILURE_DURATION_MIN, } } @@ -109,7 +80,7 @@ impl AddressFilter { inner.dial_info_failures.clear(); } - fn purge_old_timestamps(&self, inner: &mut AddressFilterInner, cur_ts: Timestamp) { + fn purge_old_timestamps_inner(&self, inner: &mut AddressFilterInner, cur_ts: Timestamp) { // v4 { let mut dead_keys = Vec::::new(); @@ -151,7 +122,7 @@ impl AddressFilter { for (key, value) in &mut inner.punishments_by_ip4 { // Drop punishments older than the punishment duration if cur_ts.as_u64().saturating_sub(value.timestamp.as_u64()) - > self.unlocked_inner.punishment_duration_min as u64 * 60_000_000u64 + > self.punishment_duration_min as u64 * 60_000_000u64 { dead_keys.push(*key); } @@ -167,7 +138,7 @@ impl AddressFilter { for (key, value) in &mut inner.punishments_by_ip6_prefix { // Drop punishments older than the punishment duration if cur_ts.as_u64().saturating_sub(value.timestamp.as_u64()) - > self.unlocked_inner.punishment_duration_min as u64 * 60_000_000u64 + > self.punishment_duration_min as u64 * 60_000_000u64 { dead_keys.push(*key); } @@ -183,7 +154,7 @@ impl AddressFilter { for (key, value) in &mut inner.punishments_by_node_id { // Drop punishments older than the punishment duration if cur_ts.as_u64().saturating_sub(value.timestamp.as_u64()) - > self.unlocked_inner.punishment_duration_min as u64 * 60_000_000u64 + > self.punishment_duration_min as u64 * 60_000_000u64 { dead_keys.push(*key); } @@ -192,7 +163,7 @@ impl AddressFilter { warn!("Forgiving: {}", key); inner.punishments_by_node_id.remove(&key); // make the entry alive again if it's still here - if let Ok(Some(nr)) = self.unlocked_inner.routing_table.lookup_node_ref(key) { + if let Ok(Some(nr)) = self.routing_table().lookup_node_ref(key) { nr.operate_mut(|_rti, e| e.set_punished(None)); } } @@ -203,7 +174,7 @@ impl AddressFilter { for (key, value) in &mut inner.dial_info_failures { // Drop failures older than the failure duration if cur_ts.as_u64().saturating_sub(value.as_u64()) - > self.unlocked_inner.dial_info_failure_duration_min as u64 * 60_000_000u64 + > self.dial_info_failure_duration_min as u64 * 60_000_000u64 { dead_keys.push(key.clone()); } @@ -241,10 +212,7 @@ impl AddressFilter { pub fn is_ip_addr_punished(&self, addr: IpAddr) -> bool { let inner = self.inner.lock(); - let ipblock = ip_to_ipblock( - self.unlocked_inner.max_connections_per_ip6_prefix_size, - addr, - ); + let ipblock = ip_to_ipblock(self.max_connections_per_ip6_prefix_size, addr); self.is_ip_addr_punished_inner(&inner, ipblock) } @@ -273,8 +241,9 @@ impl AddressFilter { let mut inner = self.inner.lock(); inner.punishments_by_ip4.clear(); inner.punishments_by_ip6_prefix.clear(); - self.unlocked_inner.routing_table.clear_punishments(); inner.punishments_by_node_id.clear(); + + self.routing_table().clear_punishments(); } pub fn punish_ip_addr(&self, addr: IpAddr, reason: PunishmentReason) { @@ -282,10 +251,7 @@ impl AddressFilter { let timestamp = Timestamp::now(); let punishment = Punishment { reason, timestamp }; - let ipblock = ip_to_ipblock( - self.unlocked_inner.max_connections_per_ip6_prefix_size, - addr, - ); + let ipblock = ip_to_ipblock(self.max_connections_per_ip6_prefix_size, addr); let mut inner = self.inner.lock(); match ipblock { @@ -315,7 +281,7 @@ impl AddressFilter { } pub fn punish_node_id(&self, node_id: TypedKey, reason: PunishmentReason) { - if let Ok(Some(nr)) = self.unlocked_inner.routing_table.lookup_node_ref(node_id) { + if let Ok(Some(nr)) = self.routing_table().lookup_node_ref(node_id) { // make the entry dead if it's punished nr.operate_mut(|_rti, e| e.set_punished(Some(reason))); } @@ -338,14 +304,14 @@ impl AddressFilter { #[instrument(parent = None, level = "trace", skip_all, err)] pub async fn address_filter_task_routine( - self, + &self, _stop_token: StopToken, _last_ts: Timestamp, cur_ts: Timestamp, ) -> EyreResult<()> { // let mut inner = self.inner.lock(); - self.purge_old_timestamps(&mut inner, cur_ts); + self.purge_old_timestamps_inner(&mut inner, cur_ts); self.purge_old_punishments(&mut inner, cur_ts); Ok(()) @@ -354,23 +320,20 @@ impl AddressFilter { pub fn add_connection(&self, addr: IpAddr) -> Result<(), AddressFilterError> { let inner = &mut *self.inner.lock(); - let ipblock = ip_to_ipblock( - self.unlocked_inner.max_connections_per_ip6_prefix_size, - addr, - ); + let ipblock = ip_to_ipblock(self.max_connections_per_ip6_prefix_size, addr); if self.is_ip_addr_punished_inner(inner, ipblock) { return Err(AddressFilterError::Punished); } let ts = Timestamp::now(); - self.purge_old_timestamps(inner, ts); + self.purge_old_timestamps_inner(inner, ts); match ipblock { IpAddr::V4(v4) => { // See if we have too many connections from this ip block let cnt = inner.conn_count_by_ip4.entry(v4).or_default(); - assert!(*cnt <= self.unlocked_inner.max_connections_per_ip4); - if *cnt == self.unlocked_inner.max_connections_per_ip4 { + assert!(*cnt <= self.max_connections_per_ip4); + if *cnt == self.max_connections_per_ip4 { warn!("Address filter count exceeded: {:?}", v4); return Err(AddressFilterError::CountExceeded); } @@ -380,8 +343,8 @@ impl AddressFilter { // keep timestamps that are less than a minute away ts.saturating_sub(*v) < TimestampDuration::new(60_000_000u64) }); - assert!(tstamps.len() <= self.unlocked_inner.max_connection_frequency_per_min); - if tstamps.len() == self.unlocked_inner.max_connection_frequency_per_min { + assert!(tstamps.len() <= self.max_connection_frequency_per_min); + if tstamps.len() == self.max_connection_frequency_per_min { warn!("Address filter rate exceeded: {:?}", v4); return Err(AddressFilterError::RateExceeded); } @@ -393,15 +356,15 @@ impl AddressFilter { IpAddr::V6(v6) => { // See if we have too many connections from this ip block let cnt = inner.conn_count_by_ip6_prefix.entry(v6).or_default(); - assert!(*cnt <= self.unlocked_inner.max_connections_per_ip6_prefix); - if *cnt == self.unlocked_inner.max_connections_per_ip6_prefix { + assert!(*cnt <= self.max_connections_per_ip6_prefix); + if *cnt == self.max_connections_per_ip6_prefix { warn!("Address filter count exceeded: {:?}", v6); return Err(AddressFilterError::CountExceeded); } // See if this ip block has connected too frequently let tstamps = inner.conn_timestamps_by_ip6_prefix.entry(v6).or_default(); - assert!(tstamps.len() <= self.unlocked_inner.max_connection_frequency_per_min); - if tstamps.len() == self.unlocked_inner.max_connection_frequency_per_min { + assert!(tstamps.len() <= self.max_connection_frequency_per_min); + if tstamps.len() == self.max_connection_frequency_per_min { warn!("Address filter rate exceeded: {:?}", v6); return Err(AddressFilterError::RateExceeded); } @@ -414,16 +377,13 @@ impl AddressFilter { Ok(()) } - pub fn remove_connection(&mut self, addr: IpAddr) -> Result<(), AddressNotInTableError> { + pub fn remove_connection(&self, addr: IpAddr) -> Result<(), AddressNotInTableError> { let mut inner = self.inner.lock(); - let ipblock = ip_to_ipblock( - self.unlocked_inner.max_connections_per_ip6_prefix_size, - addr, - ); + let ipblock = ip_to_ipblock(self.max_connections_per_ip6_prefix_size, addr); let ts = Timestamp::now(); - self.purge_old_timestamps(&mut inner, ts); + self.purge_old_timestamps_inner(&mut inner, ts); match ipblock { IpAddr::V4(v4) => { diff --git a/veilid-core/src/network_manager/connection_manager.rs b/veilid-core/src/network_manager/connection_manager.rs index e385cc2f..924d3dae 100644 --- a/veilid-core/src/network_manager/connection_manager.rs +++ b/veilid-core/src/network_manager/connection_manager.rs @@ -57,17 +57,16 @@ struct ConnectionManagerInner { async_processor_jh: Option>, stop_source: Option, protected_addresses: HashMap, - reconnection_processor: DeferredStreamProcessor, } struct ConnectionManagerArc { - network_manager: NetworkManager, connection_initial_timeout_ms: u32, connection_inactivity_timeout_ms: u32, connection_table: ConnectionTable, address_lock_table: AsyncTagLockTable, startup_lock: StartupLock, inner: Mutex>, + reconnection_processor: DeferredStreamProcessor, } impl core::fmt::Debug for ConnectionManagerArc { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { @@ -79,15 +78,17 @@ impl core::fmt::Debug for ConnectionManagerArc { #[derive(Debug, Clone)] pub struct ConnectionManager { + registry: VeilidComponentRegistry, arc: Arc, } +impl_veilid_component_registry_accessor!(ConnectionManager); + impl ConnectionManager { fn new_inner( stop_source: StopSource, sender: flume::Sender, async_processor_jh: MustJoinHandle<()>, - reconnection_processor: DeferredStreamProcessor, ) -> ConnectionManagerInner { ConnectionManagerInner { next_id: 0.into(), @@ -95,11 +96,10 @@ impl ConnectionManager { sender, async_processor_jh: Some(async_processor_jh), protected_addresses: HashMap::new(), - reconnection_processor, } } - fn new_arc(network_manager: NetworkManager) -> ConnectionManagerArc { - let config = network_manager.config(); + fn new_arc(registry: VeilidComponentRegistry) -> ConnectionManagerArc { + let config = registry.config(); let (connection_initial_timeout_ms, connection_inactivity_timeout_ms) = { let c = config.get(); ( @@ -107,28 +107,24 @@ impl ConnectionManager { c.network.connection_inactivity_timeout_ms, ) }; - let address_filter = network_manager.address_filter(); ConnectionManagerArc { - network_manager, + reconnection_processor: DeferredStreamProcessor::new(), connection_initial_timeout_ms, connection_inactivity_timeout_ms, - connection_table: ConnectionTable::new(config, address_filter), + connection_table: ConnectionTable::new(registry), address_lock_table: AsyncTagLockTable::new(), startup_lock: StartupLock::new(), inner: Mutex::new(None), } } - pub fn new(network_manager: NetworkManager) -> Self { + pub fn new(registry: VeilidComponentRegistry) -> Self { Self { - arc: Arc::new(Self::new_arc(network_manager)), + arc: Arc::new(Self::new_arc(registry.clone())), + registry, } } - pub fn network_manager(&self) -> NetworkManager { - self.arc.network_manager.clone() - } - pub fn connection_inactivity_timeout_ms(&self) -> u32 { self.arc.connection_inactivity_timeout_ms } @@ -150,21 +146,17 @@ impl ConnectionManager { self.clone().async_processor(stop_source.token(), receiver), ); - // Spawn the reconnection processor - let mut reconnection_processor = DeferredStreamProcessor::new(); - reconnection_processor.init().await; - // Store in the inner object - let mut inner = self.arc.inner.lock(); - if inner.is_some() { - panic!("shouldn't start connection manager twice without shutting it down first"); + { + let mut inner = self.arc.inner.lock(); + if inner.is_some() { + panic!("shouldn't start connection manager twice without shutting it down first"); + } + *inner = Some(Self::new_inner(stop_source, sender, async_processor)); } - *inner = Some(Self::new_inner( - stop_source, - sender, - async_processor, - reconnection_processor, - )); + + // Spawn the reconnection processor + self.arc.reconnection_processor.init().await; guard.success(); @@ -178,6 +170,10 @@ impl ConnectionManager { return; }; + // Stop the reconnection processor + log_net!(debug "stopping reconnection processor task"); + self.arc.reconnection_processor.terminate().await; + // Remove the inner from the lock let mut inner = { let mut inner_lock = self.arc.inner.lock(); @@ -188,9 +184,6 @@ impl ConnectionManager { } } }; - // Stop the reconnection processor - log_net!(debug "stopping reconnection processor task"); - inner.reconnection_processor.terminate().await; // Stop all the connections and the async processor log_net!(debug "stopping async processor task"); drop(inner.stop_source.take()); @@ -452,13 +445,14 @@ impl ConnectionManager { // Attempt new connection let mut retry_count = NEW_CONNECTION_RETRY_COUNT; + let network_manager = self.network_manager(); let prot_conn = network_result_try!(loop { let result_net_res = ProtocolNetworkConnection::connect( preferred_local_address, &dial_info, self.arc.connection_initial_timeout_ms, - self.network_manager().address_filter(), + network_manager.address_filter(), ) .await; match result_net_res { @@ -574,7 +568,7 @@ impl ConnectionManager { // Called by low-level network when any connection-oriented protocol connection appears // either from incoming connections. - #[cfg_attr(target_arch = "wasm32", expect(dead_code))] + #[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), expect(dead_code))] pub(super) async fn on_accepted_protocol_network_connection( &self, protocol_connection: ProtocolNetworkConnection, @@ -660,7 +654,7 @@ impl ConnectionManager { // Reconnect the protected connection immediately if reconnect { if let Some(dial_info) = conn.dial_info() { - self.spawn_reconnector_inner(inner, dial_info); + self.spawn_reconnector(dial_info); } else { log_net!(debug "Can't reconnect to accepted protected connection: {} -> {} for node {}", conn.connection_id(), conn.debug_print(Timestamp::now()), protect_nr); } @@ -675,9 +669,9 @@ impl ConnectionManager { } } - fn spawn_reconnector_inner(&self, inner: &mut ConnectionManagerInner, dial_info: DialInfo) { + fn spawn_reconnector(&self, dial_info: DialInfo) { let this = self.clone(); - inner.reconnection_processor.add( + self.arc.reconnection_processor.add( Box::pin(futures_util::stream::once(async { dial_info })), move |dial_info| { let this = this.clone(); diff --git a/veilid-core/src/network_manager/connection_table.rs b/veilid-core/src/network_manager/connection_table.rs index 16381ca8..a838bb64 100644 --- a/veilid-core/src/network_manager/connection_table.rs +++ b/veilid-core/src/network_manager/connection_table.rs @@ -44,17 +44,20 @@ struct ConnectionTableInner { protocol_index_by_id: BTreeMap, id_by_flow: BTreeMap, ids_by_remote: BTreeMap>, - address_filter: AddressFilter, priority_flows: Vec>, } #[derive(Debug)] pub struct ConnectionTable { - inner: Arc>, + registry: VeilidComponentRegistry, + inner: Mutex, } +impl_veilid_component_registry_accessor!(ConnectionTable); + impl ConnectionTable { - pub fn new(config: VeilidConfig, address_filter: AddressFilter) -> Self { + pub fn new(registry: VeilidComponentRegistry) -> Self { + let config = registry.config(); let max_connections = { let c = config.get(); vec![ @@ -64,7 +67,8 @@ impl ConnectionTable { ] }; Self { - inner: Arc::new(Mutex::new(ConnectionTableInner { + registry, + inner: Mutex::new(ConnectionTableInner { conn_by_id: max_connections .iter() .map(|_| LruCache::new_unbounded()) @@ -72,13 +76,12 @@ impl ConnectionTable { protocol_index_by_id: BTreeMap::new(), id_by_flow: BTreeMap::new(), ids_by_remote: BTreeMap::new(), - address_filter, priority_flows: max_connections .iter() .map(|x| LruCache::new(x * PRIORITY_FLOW_PERCENTAGE / 100)) .collect(), max_connections, - })), + }), } } @@ -168,6 +171,7 @@ impl ConnectionTable { /// when it is getting full while adding a new connection. /// Factored out into its own function for clarity. fn lru_out_connection_inner( + &self, inner: &mut ConnectionTableInner, protocol_index: usize, ) -> Result, ()> { @@ -198,7 +202,7 @@ impl ConnectionTable { lruk }; - let dead_conn = Self::remove_connection_records(inner, dead_k); + let dead_conn = self.remove_connection_records_inner(inner, dead_k); Ok(Some(dead_conn)) } @@ -235,20 +239,21 @@ impl ConnectionTable { // Filter by ip for connection limits let ip_addr = flow.remote_address().ip_addr(); - match inner.address_filter.add_connection(ip_addr) { - Ok(()) => {} - Err(e) => { - // Return the connection in the error to be disposed of - return Err(ConnectionTableAddError::address_filter( - network_connection, - e, - )); - } - }; + if let Err(e) = self + .network_manager() + .address_filter() + .add_connection(ip_addr) + { + // Return the connection in the error to be disposed of + return Err(ConnectionTableAddError::address_filter( + network_connection, + e, + )); + } // if we have reached the maximum number of connections per protocol type // then drop the least recently used connection that is not protected or referenced - let out_conn = match Self::lru_out_connection_inner(&mut inner, protocol_index) { + let out_conn = match self.lru_out_connection_inner(&mut inner, protocol_index) { Ok(v) => v, Err(()) => { return Err(ConnectionTableAddError::table_full(network_connection)); @@ -437,7 +442,8 @@ impl ConnectionTable { } #[instrument(level = "trace", skip(inner), ret)] - fn remove_connection_records( + fn remove_connection_records_inner( + &self, inner: &mut ConnectionTableInner, id: NetworkConnectionId, ) -> NetworkConnection { @@ -462,8 +468,8 @@ impl ConnectionTable { } // address_filter let ip_addr = remote.socket_addr().ip(); - inner - .address_filter + self.network_manager() + .address_filter() .remove_connection(ip_addr) .expect("Inconsistency in connection table"); conn @@ -477,7 +483,7 @@ impl ConnectionTable { if !inner.conn_by_id[protocol_index].contains_key(&id) { return None; } - let conn = Self::remove_connection_records(&mut inner, id); + let conn = self.remove_connection_records_inner(&mut inner, id); Some(conn) } diff --git a/veilid-core/src/network_manager/direct_boot.rs b/veilid-core/src/network_manager/direct_boot.rs index 608af287..a095970f 100644 --- a/veilid-core/src/network_manager/direct_boot.rs +++ b/veilid-core/src/network_manager/direct_boot.rs @@ -35,7 +35,7 @@ impl NetworkManager { // Direct bootstrap request #[instrument(level = "trace", target = "net", err, skip(self))] pub async fn boot_request(&self, dial_info: DialInfo) -> EyreResult>> { - let timeout_ms = self.with_config(|c| c.network.rpc.timeout_ms); + let timeout_ms = self.config().with(|c| c.network.rpc.timeout_ms); // Send boot magic to requested peer address let data = BOOT_MAGIC.to_vec(); diff --git a/veilid-core/src/network_manager/mod.rs b/veilid-core/src/network_manager/mod.rs index 86ef95da..d8ecf39f 100644 --- a/veilid-core/src/network_manager/mod.rs +++ b/veilid-core/src/network_manager/mod.rs @@ -1,8 +1,8 @@ -use crate::*; +use super::*; -#[cfg(not(target_arch = "wasm32"))] +#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))] mod native; -#[cfg(target_arch = "wasm32")] +#[cfg(all(target_arch = "wasm32", target_os = "unknown"))] mod wasm; mod address_check; @@ -36,16 +36,15 @@ use connection_handle::*; use crypto::*; use futures_util::stream::FuturesUnordered; use hashlink::LruCache; -#[cfg(not(target_arch = "wasm32"))] +#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))] use native::*; -#[cfg(not(target_arch = "wasm32"))] +#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))] pub use native::{MAX_CAPABILITIES, PUBLIC_INTERNET_CAPABILITIES}; use routing_table::*; use rpc_processor::*; -use storage_manager::*; -#[cfg(target_arch = "wasm32")] +#[cfg(all(target_arch = "wasm32", target_os = "unknown"))] use wasm::*; -#[cfg(target_arch = "wasm32")] +#[cfg(all(target_arch = "wasm32", target_os = "unknown"))] pub use wasm::{/* LOCAL_NETWORK_CAPABILITIES, */ MAX_CAPABILITIES, PUBLIC_INTERNET_CAPABILITIES,}; //////////////////////////////////////////////////////////////////////////////////////// @@ -65,7 +64,6 @@ pub const HOLE_PUNCH_DELAY_MS: u32 = 100; struct NetworkComponents { net: Network, connection_manager: ConnectionManager, - rpc_processor: RPCProcessor, receipt_manager: ReceiptManager, } @@ -119,45 +117,74 @@ enum SendDataToExistingFlowResult { #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum StartupDisposition { Success, - #[cfg_attr(target_arch = "wasm32", expect(dead_code))] + #[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), expect(dead_code))] BindRetry, } +#[derive(Debug, Clone)] +pub struct NetworkManagerStartupContext { + pub startup_lock: Arc, +} +impl NetworkManagerStartupContext { + pub fn new() -> Self { + Self { + startup_lock: Arc::new(StartupLock::new()), + } + } +} +impl Default for NetworkManagerStartupContext { + fn default() -> Self { + Self::new() + } +} + // The mutable state of the network manager +#[derive(Debug)] struct NetworkManagerInner { stats: NetworkManagerStats, client_allowlist: LruCache, node_contact_method_cache: LruCache, address_check: Option, + peer_info_change_subscription: Option, + socket_address_change_subscription: Option, } -struct NetworkManagerUnlockedInner { - // Handles - event_bus: EventBus, - config: VeilidConfig, - storage_manager: StorageManager, - table_store: TableStore, - #[cfg(feature = "unstable-blockstore")] - block_store: BlockStore, - crypto: Crypto, +pub(crate) struct NetworkManager { + registry: VeilidComponentRegistry, + inner: Mutex, + + // Address filter + address_filter: AddressFilter, + // Accessors - routing_table: RwLock>, - address_filter: RwLock>, components: RwLock>, - update_callback: RwLock>, + // Background processes rolling_transfers_task: TickTask, address_filter_task: TickTask, - // Network Key + + // Network key network_key: Option, - // Startup Lock - startup_lock: StartupLock, + + // Startup context + startup_context: NetworkManagerStartupContext, } -#[derive(Clone)] -pub(crate) struct NetworkManager { - inner: Arc>, - unlocked_inner: Arc, +impl_veilid_component!(NetworkManager); + +impl fmt::Debug for NetworkManager { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("NetworkManager") + //.field("registry", &self.registry) + .field("inner", &self.inner) + .field("address_filter", &self.address_filter) + // .field("components", &self.components) + // .field("rolling_transfers_task", &self.rolling_transfers_task) + // .field("address_filter_task", &self.address_filter_task) + .field("network_key", &self.network_key) + .field("startup_context", &self.startup_context) + .finish() + } } impl NetworkManager { @@ -167,52 +194,20 @@ impl NetworkManager { client_allowlist: LruCache::new_unbounded(), node_contact_method_cache: LruCache::new(NODE_CONTACT_METHOD_CACHE_SIZE), address_check: None, - } - } - fn new_unlocked_inner( - event_bus: EventBus, - config: VeilidConfig, - storage_manager: StorageManager, - table_store: TableStore, - #[cfg(feature = "unstable-blockstore")] block_store: BlockStore, - crypto: Crypto, - network_key: Option, - ) -> NetworkManagerUnlockedInner { - NetworkManagerUnlockedInner { - event_bus, - config: config.clone(), - storage_manager, - table_store, - #[cfg(feature = "unstable-blockstore")] - block_store, - crypto, - address_filter: RwLock::new(None), - routing_table: RwLock::new(None), - components: RwLock::new(None), - update_callback: RwLock::new(None), - rolling_transfers_task: TickTask::new( - "rolling_transfers_task", - ROLLING_TRANSFERS_INTERVAL_SECS, - ), - address_filter_task: TickTask::new( - "address_filter_task", - ADDRESS_FILTER_TASK_INTERVAL_SECS, - ), - network_key, - startup_lock: StartupLock::new(), + peer_info_change_subscription: None, + socket_address_change_subscription: None, } } pub fn new( - event_bus: EventBus, - config: VeilidConfig, - storage_manager: StorageManager, - table_store: TableStore, - #[cfg(feature = "unstable-blockstore")] block_store: BlockStore, - crypto: Crypto, + registry: VeilidComponentRegistry, + startup_context: NetworkManagerStartupContext, ) -> Self { // Make the network key let network_key = { + let config = registry.config(); + let crypto = registry.crypto(); + let c = config.get(); let network_key_password = c.network.network_key_password.clone(); let network_key = if let Some(network_key_password) = network_key_password { @@ -238,110 +233,52 @@ impl NetworkManager { network_key }; + let inner = Self::new_inner(); + let address_filter = AddressFilter::new(registry.clone()); + let this = Self { - inner: Arc::new(Mutex::new(Self::new_inner())), - unlocked_inner: Arc::new(Self::new_unlocked_inner( - event_bus, - config, - storage_manager, - table_store, - #[cfg(feature = "unstable-blockstore")] - block_store, - crypto, - network_key, - )), + registry, + inner: Mutex::new(inner), + address_filter, + components: RwLock::new(None), + rolling_transfers_task: TickTask::new( + "rolling_transfers_task", + ROLLING_TRANSFERS_INTERVAL_SECS, + ), + address_filter_task: TickTask::new( + "address_filter_task", + ADDRESS_FILTER_TASK_INTERVAL_SECS, + ), + network_key, + startup_context, }; this.setup_tasks(); this } - pub fn event_bus(&self) -> EventBus { - self.unlocked_inner.event_bus.clone() - } - pub fn config(&self) -> VeilidConfig { - self.unlocked_inner.config.clone() - } - pub fn with_config(&self, f: F) -> R - where - F: FnOnce(&VeilidConfigInner) -> R, - { - f(&self.unlocked_inner.config.get()) - } - pub fn storage_manager(&self) -> StorageManager { - self.unlocked_inner.storage_manager.clone() - } - pub fn table_store(&self) -> TableStore { - self.unlocked_inner.table_store.clone() - } - #[cfg(feature = "unstable-blockstore")] - pub fn block_store(&self) -> BlockStore { - self.unlocked_inner.block_store.clone() - } - pub fn crypto(&self) -> Crypto { - self.unlocked_inner.crypto.clone() - } - pub fn address_filter(&self) -> AddressFilter { - self.unlocked_inner - .address_filter - .read() - .as_ref() - .unwrap() - .clone() - } - pub fn routing_table(&self) -> RoutingTable { - self.unlocked_inner - .routing_table - .read() - .as_ref() - .unwrap() - .clone() + + pub fn address_filter(&self) -> &AddressFilter { + &self.address_filter } + fn net(&self) -> Network { - self.unlocked_inner - .components - .read() - .as_ref() - .unwrap() - .net - .clone() + self.components.read().as_ref().unwrap().net.clone() } fn opt_net(&self) -> Option { - self.unlocked_inner - .components - .read() - .as_ref() - .map(|x| x.net.clone()) + self.components.read().as_ref().map(|x| x.net.clone()) } fn receipt_manager(&self) -> ReceiptManager { - self.unlocked_inner - .components + self.components .read() .as_ref() .unwrap() .receipt_manager .clone() } - pub fn rpc_processor(&self) -> RPCProcessor { - self.unlocked_inner - .components - .read() - .as_ref() - .unwrap() - .rpc_processor - .clone() - } - pub fn opt_rpc_processor(&self) -> Option { - self.unlocked_inner - .components - .read() - .as_ref() - .map(|x| x.rpc_processor.clone()) - } pub fn connection_manager(&self) -> ConnectionManager { - self.unlocked_inner - .components + self.components .read() .as_ref() .unwrap() @@ -349,103 +286,48 @@ impl NetworkManager { .clone() } pub fn opt_connection_manager(&self) -> Option { - self.unlocked_inner - .components + self.components .read() .as_ref() .map(|x| x.connection_manager.clone()) } - pub fn update_callback(&self) -> UpdateCallback { - self.unlocked_inner - .update_callback - .read() - .as_ref() - .unwrap() - .clone() - } - #[instrument(level = "debug", skip_all, err)] - pub async fn init(&self, update_callback: UpdateCallback) -> EyreResult<()> { - let routing_table = RoutingTable::new(self.clone()); - routing_table.init().await?; - let address_filter = AddressFilter::new(self.config(), routing_table.clone()); - *self.unlocked_inner.routing_table.write() = Some(routing_table.clone()); - *self.unlocked_inner.address_filter.write() = Some(address_filter); - *self.unlocked_inner.update_callback.write() = Some(update_callback); - - // Register event handlers - let this = self.clone(); - self.event_bus().subscribe(move |evt| { - let this = this.clone(); - Box::pin(async move { - this.peer_info_change_event_handler(evt); - }) - }); - let this = self.clone(); - self.event_bus().subscribe(move |evt| { - let this = this.clone(); - Box::pin(async move { - this.socket_address_change_event_handler(evt); - }) - }); - + async fn init_async(&self) -> EyreResult<()> { Ok(()) } - #[instrument(level = "debug", skip_all)] - pub async fn terminate(&self) { - let routing_table = self.unlocked_inner.routing_table.write().take(); - if let Some(routing_table) = routing_table { - routing_table.terminate().await; - } - *self.unlocked_inner.update_callback.write() = None; + async fn post_init_async(&self) -> EyreResult<()> { + Ok(()) } + async fn pre_terminate_async(&self) {} + + #[instrument(level = "debug", skip_all)] + async fn terminate_async(&self) {} + #[instrument(level = "debug", skip_all, err)] pub async fn internal_startup(&self) -> EyreResult { - if self.unlocked_inner.components.read().is_some() { + if self.components.read().is_some() { log_net!(debug "NetworkManager::internal_startup already started"); return Ok(StartupDisposition::Success); } // Clean address filter for things that should not be persistent - self.address_filter().restart(); + self.address_filter.restart(); // Create network components - let connection_manager = ConnectionManager::new(self.clone()); - let net = Network::new( - self.clone(), - self.routing_table(), - connection_manager.clone(), - ); - let rpc_processor = RPCProcessor::new( - self.clone(), - self.unlocked_inner - .update_callback - .read() - .as_ref() - .unwrap() - .clone(), - ); + let connection_manager = ConnectionManager::new(self.registry()); + let net = Network::new(self.registry()); let receipt_manager = ReceiptManager::new(); - *self.unlocked_inner.components.write() = Some(NetworkComponents { + + *self.components.write() = Some(NetworkComponents { net: net.clone(), connection_manager: connection_manager.clone(), - rpc_processor: rpc_processor.clone(), receipt_manager: receipt_manager.clone(), }); - // Start network components - connection_manager.startup().await?; - match net.startup().await? { - StartupDisposition::Success => {} - StartupDisposition::BindRetry => { - return Ok(StartupDisposition::BindRetry); - } - } - - let (detect_address_changes, ip6_prefix_size) = self.with_config(|c| { + let (detect_address_changes, ip6_prefix_size) = self.config().with(|c| { ( c.network.detect_address_changes, c.network.max_connections_per_ip6_prefix_size as usize, @@ -456,9 +338,30 @@ impl NetworkManager { ip6_prefix_size, }; let address_check = AddressCheck::new(address_check_config, net.clone()); - self.inner.lock().address_check = Some(address_check); - rpc_processor.startup().await?; + // Register event handlers + let peer_info_change_subscription = + impl_subscribe_event_bus!(self, Self, peer_info_change_event_handler); + + let socket_address_change_subscription = + impl_subscribe_event_bus!(self, Self, socket_address_change_event_handler); + + { + let mut inner = self.inner.lock(); + inner.address_check = Some(address_check); + inner.peer_info_change_subscription = Some(peer_info_change_subscription); + inner.socket_address_change_subscription = Some(socket_address_change_subscription); + } + + // Start network components + connection_manager.startup().await?; + match net.startup().await? { + StartupDisposition::Success => {} + StartupDisposition::BindRetry => { + return Ok(StartupDisposition::BindRetry); + } + } + receipt_manager.startup().await?; log_net!("NetworkManager::internal_startup end"); @@ -468,15 +371,11 @@ impl NetworkManager { #[instrument(level = "debug", skip_all, err)] pub async fn startup(&self) -> EyreResult { - let guard = self.unlocked_inner.startup_lock.startup()?; + let guard = self.startup_context.startup_lock.startup()?; match self.internal_startup().await { Ok(StartupDisposition::Success) => { guard.success(); - - // Inform api clients that things have changed - self.send_network_update(); - Ok(StartupDisposition::Success) } Ok(StartupDisposition::BindRetry) => { @@ -492,25 +391,30 @@ impl NetworkManager { #[instrument(level = "debug", skip_all)] async fn shutdown_internal(&self) { - // Cancel all tasks - self.cancel_tasks().await; - - // Shutdown address check - self.inner.lock().address_check = Option::::None; + // Shutdown event bus subscriptions and address check + { + let mut inner = self.inner.lock(); + if let Some(sub) = inner.socket_address_change_subscription.take() { + self.event_bus().unsubscribe(sub); + } + if let Some(sub) = inner.peer_info_change_subscription.take() { + self.event_bus().unsubscribe(sub); + } + inner.address_check = None; + } // Shutdown network components if they started up log_net!(debug "shutting down network components"); { - let components = self.unlocked_inner.components.read().clone(); + let components = self.components.read().clone(); if let Some(components) = components { components.net.shutdown().await; - components.rpc_processor.shutdown().await; components.receipt_manager.shutdown().await; components.connection_manager.shutdown().await; } } - *self.unlocked_inner.components.write() = None; + *self.components.write() = None; // reset the state log_net!(debug "resetting network manager state"); @@ -521,21 +425,22 @@ impl NetworkManager { #[instrument(level = "debug", skip_all)] pub async fn shutdown(&self) { - log_net!(debug "starting network manager shutdown"); + // Cancel all tasks + log_net!(debug "stopping network manager tasks"); + self.cancel_tasks().await; - let Ok(guard) = self.unlocked_inner.startup_lock.shutdown().await else { - log_net!(debug "network manager is already shut down"); - return; - }; + // Proceed with shutdown + log_net!(debug "starting network manager shutdown"); + let guard = self + .startup_context + .startup_lock + .shutdown() + .await + .expect("should be started up"); self.shutdown_internal().await; guard.success(); - - // send update - log_net!(debug "sending network state update to api clients"); - self.send_network_update(); - log_net!(debug "finished network manager shutdown"); } @@ -568,7 +473,9 @@ impl NetworkManager { } pub fn purge_client_allowlist(&self) { - let timeout_ms = self.with_config(|c| c.network.client_allowlist_timeout_ms); + let timeout_ms = self + .config() + .with(|c| c.network.client_allowlist_timeout_ms); let mut inner = self.inner.lock(); let cutoff_timestamp = Timestamp::now() - TimestampDuration::new((timeout_ms as u64) * 1000u64); @@ -607,14 +514,15 @@ impl NetworkManager { extra_data: D, callback: impl ReceiptCallback, ) -> EyreResult> { - let Ok(_guard) = self.unlocked_inner.startup_lock.enter() else { + let Ok(_guard) = self.startup_context.startup_lock.enter() else { bail!("network is not started"); }; let receipt_manager = self.receipt_manager(); let routing_table = self.routing_table(); + let crypto = self.crypto(); // Generate receipt and serialized form to return - let vcrypto = self.crypto().best(); + let vcrypto = crypto.best(); let nonce = vcrypto.random_nonce(); let node_id = routing_table.node_id(vcrypto.kind()); @@ -628,7 +536,7 @@ impl NetworkManager { extra_data, )?; let out = receipt - .to_signed_data(self.crypto(), &node_id_secret) + .to_signed_data(&crypto, &node_id_secret) .wrap_err("failed to generate signed receipt")?; // Record the receipt for later @@ -645,15 +553,16 @@ impl NetworkManager { expiration_us: TimestampDuration, extra_data: D, ) -> EyreResult<(Vec, EventualValueFuture)> { - let Ok(_guard) = self.unlocked_inner.startup_lock.enter() else { + let Ok(_guard) = self.startup_context.startup_lock.enter() else { bail!("network is not started"); }; let receipt_manager = self.receipt_manager(); let routing_table = self.routing_table(); + let crypto = self.crypto(); // Generate receipt and serialized form to return - let vcrypto = self.crypto().best(); + let vcrypto = crypto.best(); let nonce = vcrypto.random_nonce(); let node_id = routing_table.node_id(vcrypto.kind()); @@ -667,7 +576,7 @@ impl NetworkManager { extra_data, )?; let out = receipt - .to_signed_data(self.crypto(), &node_id_secret) + .to_signed_data(&crypto, &node_id_secret) .wrap_err("failed to generate signed receipt")?; // Record the receipt for later @@ -685,13 +594,14 @@ impl NetworkManager { &self, receipt_data: R, ) -> NetworkResult<()> { - let Ok(_guard) = self.unlocked_inner.startup_lock.enter() else { + let Ok(_guard) = self.startup_context.startup_lock.enter() else { return NetworkResult::service_unavailable("network is not started"); }; let receipt_manager = self.receipt_manager(); + let crypto = self.crypto(); - let receipt = match Receipt::from_signed_data(self.crypto(), receipt_data.as_ref()) { + let receipt = match Receipt::from_signed_data(&crypto, receipt_data.as_ref()) { Err(e) => { return NetworkResult::invalid_message(e.to_string()); } @@ -710,13 +620,14 @@ impl NetworkManager { receipt_data: R, inbound_noderef: FilteredNodeRef, ) -> NetworkResult<()> { - let Ok(_guard) = self.unlocked_inner.startup_lock.enter() else { + let Ok(_guard) = self.startup_context.startup_lock.enter() else { return NetworkResult::service_unavailable("network is not started"); }; let receipt_manager = self.receipt_manager(); + let crypto = self.crypto(); - let receipt = match Receipt::from_signed_data(self.crypto(), receipt_data.as_ref()) { + let receipt = match Receipt::from_signed_data(&crypto, receipt_data.as_ref()) { Err(e) => { return NetworkResult::invalid_message(e.to_string()); } @@ -734,13 +645,14 @@ impl NetworkManager { &self, receipt_data: R, ) -> NetworkResult<()> { - let Ok(_guard) = self.unlocked_inner.startup_lock.enter() else { + let Ok(_guard) = self.startup_context.startup_lock.enter() else { return NetworkResult::service_unavailable("network is not started"); }; let receipt_manager = self.receipt_manager(); + let crypto = self.crypto(); - let receipt = match Receipt::from_signed_data(self.crypto(), receipt_data.as_ref()) { + let receipt = match Receipt::from_signed_data(&crypto, receipt_data.as_ref()) { Err(e) => { return NetworkResult::invalid_message(e.to_string()); } @@ -759,13 +671,14 @@ impl NetworkManager { receipt_data: R, private_route: PublicKey, ) -> NetworkResult<()> { - let Ok(_guard) = self.unlocked_inner.startup_lock.enter() else { + let Ok(_guard) = self.startup_context.startup_lock.enter() else { return NetworkResult::service_unavailable("network is not started"); }; let receipt_manager = self.receipt_manager(); + let crypto = self.crypto(); - let receipt = match Receipt::from_signed_data(self.crypto(), receipt_data.as_ref()) { + let receipt = match Receipt::from_signed_data(&crypto, receipt_data.as_ref()) { Err(e) => { return NetworkResult::invalid_message(e.to_string()); } @@ -784,7 +697,7 @@ impl NetworkManager { signal_flow: Flow, signal_info: SignalInfo, ) -> EyreResult> { - let Ok(_guard) = self.unlocked_inner.startup_lock.enter() else { + let Ok(_guard) = self.startup_context.startup_lock.enter() else { return Ok(NetworkResult::service_unavailable("network is not started")); }; @@ -884,7 +797,8 @@ impl NetworkManager { ) -> EyreResult> { // DH to get encryption key let routing_table = self.routing_table(); - let Some(vcrypto) = self.crypto().get(dest_node_id.kind) else { + let crypto = self.crypto(); + let Some(vcrypto) = crypto.get(dest_node_id.kind) else { bail!("should not have a destination with incompatible crypto here"); }; @@ -905,12 +819,7 @@ impl NetworkManager { dest_node_id.value, ); envelope - .to_encrypted_data( - self.crypto(), - body.as_ref(), - &node_id_secret, - &self.unlocked_inner.network_key, - ) + .to_encrypted_data(&crypto, body.as_ref(), &node_id_secret, &self.network_key) .wrap_err("envelope failed to encode") } @@ -925,7 +834,7 @@ impl NetworkManager { destination_node_ref: Option, body: B, ) -> EyreResult> { - let Ok(_guard) = self.unlocked_inner.startup_lock.enter() else { + let Ok(_guard) = self.startup_context.startup_lock.enter() else { return Ok(NetworkResult::no_connection_other("network is not started")); }; @@ -966,7 +875,7 @@ impl NetworkManager { dial_info: DialInfo, rcpt_data: Vec, ) -> EyreResult<()> { - let Ok(_guard) = self.unlocked_inner.startup_lock.enter() else { + let Ok(_guard) = self.startup_context.startup_lock.enter() else { log_net!(debug "not sending out-of-band receipt to {} because network is stopped", dial_info); return Ok(()); }; @@ -993,7 +902,7 @@ impl NetworkManager { // and passes it to the RPC handler #[instrument(level = "trace", target = "net", skip_all)] async fn on_recv_envelope(&self, data: &mut [u8], flow: Flow) -> EyreResult { - let Ok(_guard) = self.unlocked_inner.startup_lock.enter() else { + let Ok(_guard) = self.startup_context.startup_lock.enter() else { return Ok(false); }; @@ -1043,21 +952,20 @@ impl NetworkManager { } // Decode envelope header (may fail signature validation) - let envelope = - match Envelope::from_signed_data(self.crypto(), data, &self.unlocked_inner.network_key) - { - Ok(v) => v, - Err(e) => { - log_net!(debug "envelope failed to decode: {}", e); - // safe to punish here because relays also check here to ensure they arent forwarding things that don't decode - self.address_filter() - .punish_ip_addr(remote_addr, PunishmentReason::FailedToDecodeEnvelope); - return Ok(false); - } - }; + let crypto = self.crypto(); + let envelope = match Envelope::from_signed_data(&crypto, data, &self.network_key) { + Ok(v) => v, + Err(e) => { + log_net!(debug "envelope failed to decode: {}", e); + // safe to punish here because relays also check here to ensure they arent forwarding things that don't decode + self.address_filter() + .punish_ip_addr(remote_addr, PunishmentReason::FailedToDecodeEnvelope); + return Ok(false); + } + }; // Get timestamp range - let (tsbehind, tsahead) = self.with_config(|c| { + let (tsbehind, tsahead) = self.config().with(|c| { ( c.network .rpc @@ -1136,7 +1044,10 @@ impl NetworkManager { // which only performs a lightweight lookup before passing the packet back out // If our node has the relay capability disabled, we should not be asked to relay - if self.with_config(|c| c.capabilities.disable.contains(&CAP_RELAY)) { + if self + .config() + .with(|c| c.capabilities.disable.contains(&CAP_RELAY)) + { log_net!(debug "node has relay capability disabled, dropping relayed envelope from {} to {}", sender_id, recipient_id); return Ok(false); } @@ -1191,12 +1102,8 @@ impl NetworkManager { let node_id_secret = routing_table.node_id_secret_key(envelope.get_crypto_kind()); // Decrypt the envelope body - let body = match envelope.decrypt_body( - self.crypto(), - data, - &node_id_secret, - &self.unlocked_inner.network_key, - ) { + let crypto = self.crypto(); + let body = match envelope.decrypt_body(&crypto, data, &node_id_secret, &self.network_key) { Ok(v) => v, Err(e) => { log_net!(debug "failed to decrypt envelope body: {}", e); diff --git a/veilid-core/src/network_manager/native/discovery_context.rs b/veilid-core/src/network_manager/native/discovery_context.rs index bbb0dcf6..9d913581 100644 --- a/veilid-core/src/network_manager/native/discovery_context.rs +++ b/veilid-core/src/network_manager/native/discovery_context.rs @@ -2,6 +2,7 @@ /// Also performs UPNP/IGD mapping if enabled and possible use super::*; use futures_util::stream::FuturesUnordered; +use igd_manager::{IGDAddressType, IGDProtocolType}; const PORT_MAP_VALIDATE_TRY_COUNT: usize = 3; const PORT_MAP_VALIDATE_DELAY_MS: u32 = 500; @@ -42,9 +43,7 @@ struct DiscoveryContextInner { external_info: Vec, } -struct DiscoveryContextUnlockedInner { - routing_table: RoutingTable, - net: Network, +pub(super) struct DiscoveryContextUnlockedInner { config: DiscoveryContextConfig, // per-protocol @@ -53,25 +52,30 @@ struct DiscoveryContextUnlockedInner { #[derive(Clone)] pub(super) struct DiscoveryContext { + registry: VeilidComponentRegistry, unlocked_inner: Arc, inner: Arc>, } +impl_veilid_component_registry_accessor!(DiscoveryContext); + +impl core::ops::Deref for DiscoveryContext { + type Target = DiscoveryContextUnlockedInner; + + fn deref(&self) -> &Self::Target { + &self.unlocked_inner + } +} + impl DiscoveryContext { - pub fn new(routing_table: RoutingTable, net: Network, config: DiscoveryContextConfig) -> Self { - let intf_addrs = Self::get_local_addresses( - routing_table.clone(), - config.protocol_type, - config.address_type, - ); + pub fn new(registry: VeilidComponentRegistry, config: DiscoveryContextConfig) -> Self { + let routing_table = registry.routing_table(); + let intf_addrs = + Self::get_local_addresses(&routing_table, config.protocol_type, config.address_type); Self { - unlocked_inner: Arc::new(DiscoveryContextUnlockedInner { - routing_table, - net, - config, - intf_addrs, - }), + registry, + unlocked_inner: Arc::new(DiscoveryContextUnlockedInner { config, intf_addrs }), inner: Arc::new(Mutex::new(DiscoveryContextInner { external_info: Vec::new(), })), @@ -84,7 +88,7 @@ impl DiscoveryContext { // This pulls the already-detected local interface dial info from the routing table #[instrument(level = "trace", skip(routing_table), ret)] fn get_local_addresses( - routing_table: RoutingTable, + routing_table: &RoutingTable, protocol_type: ProtocolType, address_type: AddressType, ) -> Vec { @@ -108,7 +112,7 @@ impl DiscoveryContext { // This is done over the normal port using RPC #[instrument(level = "trace", skip(self), ret)] async fn request_public_address(&self, node_ref: FilteredNodeRef) -> Option { - let rpc = self.unlocked_inner.routing_table.rpc_processor(); + let rpc = self.rpc_processor(); let res = network_result_value_or_log!(match rpc.rpc_call_status(Destination::direct(node_ref.clone())).await { Ok(v) => v, @@ -136,16 +140,14 @@ impl DiscoveryContext { // This is done over the normal port using RPC #[instrument(level = "trace", skip(self), ret)] async fn discover_external_addresses(&self) -> bool { - let node_count = { - let config = self.unlocked_inner.routing_table.network_manager().config(); - let c = config.get(); - c.network.dht.max_find_node_count as usize - }; + let node_count = self + .config() + .with(|c| c.network.dht.max_find_node_count as usize); let routing_domain = RoutingDomain::PublicInternet; - let protocol_type = self.unlocked_inner.config.protocol_type; - let address_type = self.unlocked_inner.config.address_type; - let port = self.unlocked_inner.config.port; + let protocol_type = self.config.protocol_type; + let address_type = self.config.address_type; + let port = self.config.port; // Build an filter that matches our protocol and address type // and excludes relayed nodes so we can get an accurate external address @@ -187,10 +189,11 @@ impl DiscoveryContext { ]); // Find public nodes matching this filter - let nodes = self - .unlocked_inner - .routing_table - .find_fast_non_local_nodes_filtered(routing_domain, node_count, filters); + let nodes = self.routing_table().find_fast_non_local_nodes_filtered( + routing_domain, + node_count, + filters, + ); if nodes.is_empty() { log_net!(debug "no external address detection peers of type {:?}:{:?}", @@ -212,8 +215,8 @@ impl DiscoveryContext { async move { if let Some(address) = this.request_public_address(node.clone()).await { let dial_info = this - .unlocked_inner - .net + .network_manager() + .net() .make_dial_info(address, protocol_type); return Some(ExternalInfo { dial_info, @@ -297,10 +300,9 @@ impl DiscoveryContext { dial_info: DialInfo, redirect: bool, ) -> bool { - let rpc_processor = self.unlocked_inner.routing_table.rpc_processor(); - // ask the node to send us a dial info validation receipt - match rpc_processor + match self + .rpc_processor() .rpc_call_validate_dial_info(node_ref.clone(), dial_info, redirect) .await { @@ -314,14 +316,22 @@ impl DiscoveryContext { #[instrument(level = "trace", skip(self), ret)] async fn try_upnp_port_mapping(&self) -> Option { - let protocol_type = self.unlocked_inner.config.protocol_type; - let address_type = self.unlocked_inner.config.address_type; - let local_port = self.unlocked_inner.config.port; + let protocol_type = self.config.protocol_type; + let address_type = self.config.address_type; + let local_port = self.config.port; + + let igd_protocol_type = match protocol_type.low_level_protocol_type() { + LowLevelProtocolType::UDP => IGDProtocolType::UDP, + LowLevelProtocolType::TCP => IGDProtocolType::TCP, + }; + let igd_address_type = match address_type { + AddressType::IPV6 => IGDAddressType::IPV6, + AddressType::IPV4 => IGDAddressType::IPV4, + }; - let low_level_protocol_type = protocol_type.low_level_protocol_type(); let external_1 = self.inner.lock().external_info.first().unwrap().clone(); - let igd_manager = self.unlocked_inner.net.unlocked_inner.igd_manager.clone(); + let igd_manager = self.network_manager().net().igd_manager.clone(); let mut tries = 0; loop { tries += 1; @@ -329,15 +339,15 @@ impl DiscoveryContext { // Attempt a port mapping. If this doesn't succeed, it's not going to let mapped_external_address = igd_manager .map_any_port( - low_level_protocol_type, - address_type, + igd_protocol_type, + igd_address_type, local_port, Some(external_1.address.ip_addr()), ) .await?; // Make dial info from the port mapping - let external_mapped_dial_info = self.unlocked_inner.net.make_dial_info( + let external_mapped_dial_info = self.network_manager().net().make_dial_info( SocketAddress::from_socket_addr(mapped_external_address), protocol_type, ); @@ -361,10 +371,7 @@ impl DiscoveryContext { if validate_tries != PORT_MAP_VALIDATE_TRY_COUNT { log_net!(debug "UPNP port mapping succeeded but port {}/{} is still unreachable.\nretrying\n", - local_port, match low_level_protocol_type { - LowLevelProtocolType::UDP => "udp", - LowLevelProtocolType::TCP => "tcp", - }); + local_port, igd_protocol_type); sleep(PORT_MAP_VALIDATE_DELAY_MS).await } else { break; @@ -374,18 +381,15 @@ impl DiscoveryContext { // Release the mapping if we're still unreachable let _ = igd_manager .unmap_port( - low_level_protocol_type, - address_type, + igd_protocol_type, + igd_address_type, external_1.address.port(), ) .await; if tries == PORT_MAP_TRY_COUNT { warn!("UPNP port mapping succeeded but port {}/{} is still unreachable.\nYou may need to add a local firewall allowed port on this machine.\n", - local_port, match low_level_protocol_type { - LowLevelProtocolType::UDP => "udp", - LowLevelProtocolType::TCP => "tcp", - } + local_port, igd_protocol_type ); break; } @@ -413,7 +417,7 @@ impl DiscoveryContext { { // Add public dial info with Direct dialinfo class Some(DetectionResult { - config: this.unlocked_inner.config, + config: this.config, ddi: DetectedDialInfo::Detected(DialInfoDetail { dial_info: external_1.dial_info.clone(), class: DialInfoClass::Direct, @@ -423,7 +427,7 @@ impl DiscoveryContext { } else { // Add public dial info with Blocked dialinfo class Some(DetectionResult { - config: this.unlocked_inner.config, + config: this.config, ddi: DetectedDialInfo::Detected(DialInfoDetail { dial_info: external_1.dial_info.clone(), class: DialInfoClass::Blocked, @@ -445,7 +449,7 @@ impl DiscoveryContext { let inner = self.inner.lock(); inner.external_info.clone() }; - let local_port = self.unlocked_inner.config.port; + let local_port = self.config.port; // Get the external dial info histogram for our use here let mut external_info_addr_port_hist = HashMap::::new(); @@ -501,7 +505,7 @@ impl DiscoveryContext { let do_symmetric_nat_fut: SendPinBoxFuture> = Box::pin(async move { Some(DetectionResult { - config: this.unlocked_inner.config, + config: this.config, ddi: DetectedDialInfo::SymmetricNAT, external_address_types, }) @@ -535,7 +539,7 @@ impl DiscoveryContext { { // Add public dial info with Direct dialinfo class return Some(DetectionResult { - config: c_this.unlocked_inner.config, + config: c_this.config, ddi: DetectedDialInfo::Detected(DialInfoDetail { dial_info: external_1_dial_info_with_local_port, class: DialInfoClass::Direct, @@ -558,10 +562,7 @@ impl DiscoveryContext { /////////// let this = self.clone(); let do_nat_detect_fut: SendPinBoxFuture> = Box::pin(async move { - let mut retry_count = { - let c = this.unlocked_inner.net.config.get(); - c.network.restricted_nat_retries - }; + let mut retry_count = this.config().with(|c| c.network.restricted_nat_retries); // Loop for restricted NAT retries loop { @@ -585,7 +586,7 @@ impl DiscoveryContext { // Add public dial info with full cone NAT network class return Some(DetectionResult { - config: c_this.unlocked_inner.config, + config: c_this.config, ddi: DetectedDialInfo::Detected(DialInfoDetail { dial_info: c_external_1.dial_info, class: DialInfoClass::FullConeNAT, @@ -620,7 +621,7 @@ impl DiscoveryContext { { // Got a reply from a non-default port, which means we're only address restricted return Some(DetectionResult { - config: c_this.unlocked_inner.config, + config: c_this.config, ddi: DetectedDialInfo::Detected(DialInfoDetail { dial_info: c_external_1.dial_info.clone(), class: DialInfoClass::AddressRestrictedNAT, @@ -632,7 +633,7 @@ impl DiscoveryContext { } // Didn't get a reply from a non-default port, which means we are also port restricted Some(DetectionResult { - config: c_this.unlocked_inner.config, + config: c_this.config, ddi: DetectedDialInfo::Detected(DialInfoDetail { dial_info: c_external_1.dial_info.clone(), class: DialInfoClass::PortRestrictedNAT, @@ -678,10 +679,7 @@ impl DiscoveryContext { &self, unord: &mut FuturesUnordered>>, ) { - let enable_upnp = { - let c = self.unlocked_inner.net.config.get(); - c.network.upnp - }; + let enable_upnp = self.config().with(|c| c.network.upnp); // Do this right away because it's fast and every detection is going to need it // Get our external addresses from two fast nodes @@ -701,7 +699,7 @@ impl DiscoveryContext { if let Some(external_mapped_dial_info) = this.try_upnp_port_mapping().await { // Got a port mapping, let's use it return Some(DetectionResult { - config: this.unlocked_inner.config, + config: this.config, ddi: DetectedDialInfo::Detected(DialInfoDetail { dial_info: external_mapped_dial_info.clone(), class: DialInfoClass::Mapped, @@ -725,12 +723,7 @@ impl DiscoveryContext { .lock() .external_info .iter() - .find_map(|ei| { - self.unlocked_inner - .intf_addrs - .contains(&ei.address) - .then_some(true) - }) + .find_map(|ei| self.intf_addrs.contains(&ei.address).then_some(true)) .unwrap_or_default(); if local_address_in_external_info { diff --git a/veilid-core/src/network_manager/native/igd_manager.rs b/veilid-core/src/network_manager/native/igd_manager.rs index e40c47a4..3aa288b3 100644 --- a/veilid-core/src/network_manager/native/igd_manager.rs +++ b/veilid-core/src/network_manager/native/igd_manager.rs @@ -5,13 +5,12 @@ use std::net::UdpSocket; const UPNP_GATEWAY_DETECT_TIMEOUT_MS: u32 = 5_000; const UPNP_MAPPING_LIFETIME_MS: u32 = 120_000; const UPNP_MAPPING_ATTEMPTS: u32 = 3; -const UPNP_MAPPING_LIFETIME_US: TimestampDuration = - TimestampDuration::new(UPNP_MAPPING_LIFETIME_MS as u64 * 1000u64); +const UPNP_MAPPING_LIFETIME_US: u64 = UPNP_MAPPING_LIFETIME_MS as u64 * 1000u64; #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] struct PortMapKey { - llpt: LowLevelProtocolType, - at: AddressType, + protocol_type: IGDProtocolType, + address_type: IGDAddressType, local_port: u16, } @@ -19,36 +18,67 @@ struct PortMapKey { struct PortMapValue { ext_ip: IpAddr, mapped_port: u16, - timestamp: Timestamp, - renewal_lifetime: TimestampDuration, + timestamp: u64, + renewal_lifetime: u64, renewal_attempts: u32, } struct IGDManagerInner { - local_ip_addrs: BTreeMap, + local_ip_addrs: BTreeMap, gateways: BTreeMap>, port_maps: BTreeMap, } #[derive(Clone)] pub struct IGDManager { - config: VeilidConfig, + program_name: String, inner: Arc>, } -fn convert_llpt(llpt: LowLevelProtocolType) -> PortMappingProtocol { - match llpt { - LowLevelProtocolType::UDP => PortMappingProtocol::UDP, - LowLevelProtocolType::TCP => PortMappingProtocol::TCP, +fn convert_protocol_type(igdpt: IGDProtocolType) -> PortMappingProtocol { + match igdpt { + IGDProtocolType::UDP => PortMappingProtocol::UDP, + IGDProtocolType::TCP => PortMappingProtocol::TCP, + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub enum IGDAddressType { + IPV6, + IPV4, +} + +impl fmt::Display for IGDAddressType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + IGDAddressType::IPV6 => write!(f, "IPV6"), + IGDAddressType::IPV4 => write!(f, "IPV4"), + } + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub enum IGDProtocolType { + UDP, + TCP, +} + +impl fmt::Display for IGDProtocolType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + IGDProtocolType::UDP => write!(f, "UDP"), + IGDProtocolType::TCP => write!(f, "TCP"), + } } } impl IGDManager { - // + ///////////////////////////////////////////////////////////////////// + // Public Interface - pub fn new(config: VeilidConfig) -> Self { + pub fn new(program_name: String) -> Self { Self { - config, + program_name, inner: Arc::new(Mutex::new(IGDManagerInner { local_ip_addrs: BTreeMap::new(), gateways: BTreeMap::new(), @@ -58,10 +88,306 @@ impl IGDManager { } #[instrument(level = "trace", target = "net", skip_all)] - fn get_routed_local_ip_address(address_type: AddressType) -> Option { + pub async fn unmap_port( + &self, + protocol_type: IGDProtocolType, + address_type: IGDAddressType, + mapped_port: u16, + ) -> Option<()> { + let this = self.clone(); + blocking_wrapper( + "igd unmap_port", + move || { + let mut inner = this.inner.lock(); + + // If we already have this port mapped, just return the existing portmap + let mut found = None; + for (pmk, pmv) in &inner.port_maps { + if pmk.protocol_type == protocol_type + && pmk.address_type == address_type + && pmv.mapped_port == mapped_port + { + found = Some(*pmk); + break; + } + } + let pmk = found?; + let _pmv = inner + .port_maps + .remove(&pmk) + .expect("key found but remove failed"); + + // Get local ip address + let local_ip = Self::find_local_ip(&mut inner, address_type)?; + + // Find gateway + let gw = Self::find_gateway(&mut inner, local_ip)?; + + // Unmap port + match gw.remove_port(convert_protocol_type(protocol_type), mapped_port) { + Ok(()) => (), + Err(e) => { + // Failed to map external port + log_net!(debug "upnp failed to remove external port: {}", e); + return None; + } + }; + Some(()) + }, + None, + ) + .await + } + + #[instrument(level = "trace", target = "net", skip_all)] + pub async fn map_any_port( + &self, + protocol_type: IGDProtocolType, + address_type: IGDAddressType, + local_port: u16, + expected_external_address: Option, + ) -> Option { + let this = self.clone(); + blocking_wrapper("igd map_any_port", move || { + let mut inner = this.inner.lock(); + + // If we already have this port mapped, just return the existing portmap + let pmkey = PortMapKey { + protocol_type, + address_type, + local_port, + }; + if let Some(pmval) = inner.port_maps.get(&pmkey) { + return Some(SocketAddr::new(pmval.ext_ip, pmval.mapped_port)); + } + + // Get local ip address + let local_ip = Self::find_local_ip(&mut inner, address_type)?; + + // Find gateway + let gw = Self::find_gateway(&mut inner, local_ip)?; + + // Get external address + let ext_ip = match gw.get_external_ip() { + Ok(ip) => ip, + Err(e) => { + log_net!(debug "couldn't get external ip from igd: {}", e); + return None; + } + }; + + // Ensure external IP matches address type + if ext_ip.is_ipv4() && address_type != IGDAddressType::IPV4 { + log_net!(debug "mismatched ip address type from igd, wanted v4, got v6"); + return None; + } else if ext_ip.is_ipv6() && address_type != IGDAddressType::IPV6 { + log_net!(debug "mismatched ip address type from igd, wanted v6, got v4"); + return None; + } + + if let Some(expected_external_address) = expected_external_address { + if ext_ip != expected_external_address { + log_net!(debug "gateway external address does not match calculated external address: expected={} vs gateway={}", expected_external_address, ext_ip); + return None; + } + } + + // Map any port + let desc = this.get_description(protocol_type, local_port); + let mapped_port = match gw.add_any_port(convert_protocol_type(protocol_type), SocketAddr::new(local_ip, local_port), (UPNP_MAPPING_LIFETIME_MS + 999) / 1000, &desc) { + Ok(mapped_port) => mapped_port, + Err(e) => { + // Failed to map external port + log_net!(debug "upnp failed to map external port: {}", e); + return None; + } + }; + + // Add to mapping list to keep alive + let timestamp = get_timestamp(); + inner.port_maps.insert(PortMapKey { + protocol_type, + address_type, + local_port, + }, PortMapValue { + ext_ip, + mapped_port, + timestamp, + renewal_lifetime: (UPNP_MAPPING_LIFETIME_MS / 2) as u64 * 1000u64, + renewal_attempts: 0, + }); + + // Succeeded, return the externally mapped port + Some(SocketAddr::new(ext_ip, mapped_port)) + }, None) + .await + } + + #[instrument( + level = "trace", + target = "net", + name = "IGDManager::tick", + skip_all, + err + )] + pub async fn tick(&self) -> EyreResult { + // Refresh mappings if we have them + // If an error is received, then return false to restart the local network + let mut full_renews: Vec<(PortMapKey, PortMapValue)> = Vec::new(); + let mut renews: Vec<(PortMapKey, PortMapValue)> = Vec::new(); + { + let inner = self.inner.lock(); + let now = get_timestamp(); + + for (k, v) in &inner.port_maps { + let mapping_lifetime = now.saturating_sub(v.timestamp); + if mapping_lifetime >= UPNP_MAPPING_LIFETIME_US + || v.renewal_attempts >= UPNP_MAPPING_ATTEMPTS + { + // Past expiration time or tried N times, do a full renew and fail out if we can't + full_renews.push((*k, *v)); + } else if mapping_lifetime >= v.renewal_lifetime { + // Attempt a normal renewal + renews.push((*k, *v)); + } + } + + // See if we need to do some blocking operations + if full_renews.is_empty() && renews.is_empty() { + // Just return now since there's nothing to renew + return Ok(true); + } + } + + let this = self.clone(); + blocking_wrapper( + "igd tick", + move || { + let mut inner = this.inner.lock(); + + // Process full renewals + for (k, v) in full_renews { + // Get local ip for address type + let local_ip = match Self::get_local_ip(&mut inner, k.address_type) { + Some(ip) => ip, + None => { + return Err(eyre!("local ip missing for address type")); + } + }; + + // Get gateway for interface + let gw = match Self::get_gateway(&mut inner, local_ip) { + Some(gw) => gw, + None => { + return Err(eyre!("gateway missing for interface")); + } + }; + + // Delete the mapping if it exists, ignore any errors here + let _ = gw.remove_port(convert_protocol_type(k.protocol_type), v.mapped_port); + inner.port_maps.remove(&k); + + let desc = this.get_description(k.protocol_type, k.local_port); + match gw.add_any_port( + convert_protocol_type(k.protocol_type), + SocketAddr::new(local_ip, k.local_port), + (UPNP_MAPPING_LIFETIME_MS + 999) / 1000, + &desc, + ) { + Ok(mapped_port) => { + log_net!(debug "full-renewed mapped port {:?} -> {:?}", v, k); + inner.port_maps.insert( + k, + PortMapValue { + ext_ip: v.ext_ip, + mapped_port, + timestamp: get_timestamp(), + renewal_lifetime: (UPNP_MAPPING_LIFETIME_MS / 2) as u64 + * 1000u64, + renewal_attempts: 0, + }, + ); + } + Err(e) => { + info!("failed to full-renew mapped port {:?} -> {:?}: {}", v, k, e); + + // Must restart network now :( + return Ok(false); + } + }; + } + // Process normal renewals + for (k, mut v) in renews { + // Get local ip for address type + let local_ip = match Self::get_local_ip(&mut inner, k.address_type) { + Some(ip) => ip, + None => { + return Err(eyre!("local ip missing for address type")); + } + }; + + // Get gateway for interface + let gw = match Self::get_gateway(&mut inner, local_ip) { + Some(gw) => gw, + None => { + return Err(eyre!("gateway missing for address type")); + } + }; + + let desc = this.get_description(k.protocol_type, k.local_port); + match gw.add_port( + convert_protocol_type(k.protocol_type), + v.mapped_port, + SocketAddr::new(local_ip, k.local_port), + (UPNP_MAPPING_LIFETIME_MS + 999) / 1000, + &desc, + ) { + Ok(()) => { + log_net!("renewed mapped port {:?} -> {:?}", v, k); + + inner.port_maps.insert( + k, + PortMapValue { + ext_ip: v.ext_ip, + mapped_port: v.mapped_port, + timestamp: get_timestamp(), + renewal_lifetime: (UPNP_MAPPING_LIFETIME_MS / 2) as u64 + * 1000u64, + renewal_attempts: 0, + }, + ); + } + Err(e) => { + log_net!(debug "failed to renew mapped port {:?} -> {:?}: {}", v, k, e); + + // Get closer to the maximum renewal timeline by a factor of two each time + v.renewal_lifetime = + (v.renewal_lifetime + UPNP_MAPPING_LIFETIME_US) / 2u64; + v.renewal_attempts += 1; + + // Store new value to try again + inner.port_maps.insert(k, v); + } + }; + } + + // Normal exit, no restart + Ok(true) + }, + Err(eyre!("failed to process blocking task")), + ) + .instrument(tracing::trace_span!("igd tick fut")) + .await + } + + ///////////////////////////////////////////////////////////////////// + // Private Implementation + + #[instrument(level = "trace", target = "net", skip_all)] + fn get_routed_local_ip_address(address_type: IGDAddressType) -> Option { let socket = match UdpSocket::bind(match address_type { - AddressType::IPV4 => SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0), - AddressType::IPV6 => SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 0), + IGDAddressType::IPV4 => SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0), + IGDAddressType::IPV6 => SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 0), }) { Ok(s) => s, Err(e) => { @@ -75,8 +401,8 @@ impl IGDManager { // using google's dns, but it wont actually send any packets to it socket .connect(match address_type { - AddressType::IPV4 => SocketAddr::new(IpAddr::V4(Ipv4Addr::new(8, 8, 8, 8)), 80), - AddressType::IPV6 => SocketAddr::new( + IGDAddressType::IPV4 => SocketAddr::new(IpAddr::V4(Ipv4Addr::new(8, 8, 8, 8)), 80), + IGDAddressType::IPV6 => SocketAddr::new( IpAddr::V6(Ipv6Addr::new(0x2001, 0x4860, 0x4860, 0, 0, 0, 0, 0x8888)), 80, ), @@ -91,7 +417,7 @@ impl IGDManager { } #[instrument(level = "trace", target = "net", skip_all)] - fn find_local_ip(inner: &mut IGDManagerInner, address_type: AddressType) -> Option { + fn find_local_ip(inner: &mut IGDManagerInner, address_type: IGDAddressType) -> Option { if let Some(ip) = inner.local_ip_addrs.get(&address_type) { return Some(*ip); } @@ -109,7 +435,7 @@ impl IGDManager { } #[instrument(level = "trace", target = "net", skip_all)] - fn get_local_ip(inner: &mut IGDManagerInner, address_type: AddressType) -> Option { + fn get_local_ip(inner: &mut IGDManagerInner, address_type: IGDAddressType) -> Option { if let Some(ip) = inner.local_ip_addrs.get(&address_type) { return Some(*ip); } @@ -164,304 +490,10 @@ impl IGDManager { None } - fn get_description(&self, llpt: LowLevelProtocolType, local_port: u16) -> String { + fn get_description(&self, protocol_type: IGDProtocolType, local_port: u16) -> String { format!( "{} map {} for port {}", - self.config.get().program_name, - convert_llpt(llpt), - local_port + self.program_name, protocol_type, local_port ) } - - #[instrument(level = "trace", target = "net", skip_all)] - pub async fn unmap_port( - &self, - llpt: LowLevelProtocolType, - at: AddressType, - mapped_port: u16, - ) -> Option<()> { - let this = self.clone(); - blocking_wrapper( - "igd unmap_port", - move || { - let mut inner = this.inner.lock(); - - // If we already have this port mapped, just return the existing portmap - let mut found = None; - for (pmk, pmv) in &inner.port_maps { - if pmk.llpt == llpt && pmk.at == at && pmv.mapped_port == mapped_port { - found = Some(*pmk); - break; - } - } - let pmk = found?; - let _pmv = inner - .port_maps - .remove(&pmk) - .expect("key found but remove failed"); - - // Get local ip address - let local_ip = Self::find_local_ip(&mut inner, at)?; - - // Find gateway - let gw = Self::find_gateway(&mut inner, local_ip)?; - - // Unmap port - match gw.remove_port(convert_llpt(llpt), mapped_port) { - Ok(()) => (), - Err(e) => { - // Failed to map external port - log_net!(debug "upnp failed to remove external port: {}", e); - return None; - } - }; - Some(()) - }, - None, - ) - .await - } - - #[instrument(level = "trace", target = "net", skip_all)] - pub async fn map_any_port( - &self, - llpt: LowLevelProtocolType, - at: AddressType, - local_port: u16, - expected_external_address: Option, - ) -> Option { - let this = self.clone(); - blocking_wrapper("igd map_any_port", move || { - let mut inner = this.inner.lock(); - - // If we already have this port mapped, just return the existing portmap - let pmkey = PortMapKey { - llpt, - at, - local_port, - }; - if let Some(pmval) = inner.port_maps.get(&pmkey) { - return Some(SocketAddr::new(pmval.ext_ip, pmval.mapped_port)); - } - - // Get local ip address - let local_ip = Self::find_local_ip(&mut inner, at)?; - - // Find gateway - let gw = Self::find_gateway(&mut inner, local_ip)?; - - // Get external address - let ext_ip = match gw.get_external_ip() { - Ok(ip) => ip, - Err(e) => { - log_net!(debug "couldn't get external ip from igd: {}", e); - return None; - } - }; - - // Ensure external IP matches address type - if ext_ip.is_ipv4() && at != AddressType::IPV4 { - log_net!(debug "mismatched ip address type from igd, wanted v4, got v6"); - return None; - } else if ext_ip.is_ipv6() && at != AddressType::IPV6 { - log_net!(debug "mismatched ip address type from igd, wanted v6, got v4"); - return None; - } - - if let Some(expected_external_address) = expected_external_address { - if ext_ip != expected_external_address { - log_net!(debug "gateway external address does not match calculated external address: expected={} vs gateway={}", expected_external_address, ext_ip); - return None; - } - } - - // Map any port - let desc = this.get_description(llpt, local_port); - let mapped_port = match gw.add_any_port(convert_llpt(llpt), SocketAddr::new(local_ip, local_port), (UPNP_MAPPING_LIFETIME_MS + 999) / 1000, &desc) { - Ok(mapped_port) => mapped_port, - Err(e) => { - // Failed to map external port - log_net!(debug "upnp failed to map external port: {}", e); - return None; - } - }; - - // Add to mapping list to keep alive - let timestamp = Timestamp::now(); - inner.port_maps.insert(PortMapKey { - llpt, - at, - local_port, - }, PortMapValue { - ext_ip, - mapped_port, - timestamp, - renewal_lifetime: ((UPNP_MAPPING_LIFETIME_MS / 2) as u64 * 1000u64).into(), - renewal_attempts: 0, - }); - - // Succeeded, return the externally mapped port - Some(SocketAddr::new(ext_ip, mapped_port)) - }, None) - .await - } - - #[instrument( - level = "trace", - target = "net", - name = "IGDManager::tick", - skip_all, - err - )] - pub async fn tick(&self) -> EyreResult { - // Refresh mappings if we have them - // If an error is received, then return false to restart the local network - let mut full_renews: Vec<(PortMapKey, PortMapValue)> = Vec::new(); - let mut renews: Vec<(PortMapKey, PortMapValue)> = Vec::new(); - { - let inner = self.inner.lock(); - let now = Timestamp::now(); - - for (k, v) in &inner.port_maps { - let mapping_lifetime = now.saturating_sub(v.timestamp); - if mapping_lifetime >= UPNP_MAPPING_LIFETIME_US - || v.renewal_attempts >= UPNP_MAPPING_ATTEMPTS - { - // Past expiration time or tried N times, do a full renew and fail out if we can't - full_renews.push((*k, *v)); - } else if mapping_lifetime >= v.renewal_lifetime { - // Attempt a normal renewal - renews.push((*k, *v)); - } - } - - // See if we need to do some blocking operations - if full_renews.is_empty() && renews.is_empty() { - // Just return now since there's nothing to renew - return Ok(true); - } - } - - let this = self.clone(); - blocking_wrapper( - "igd tick", - move || { - let mut inner = this.inner.lock(); - - // Process full renewals - for (k, v) in full_renews { - // Get local ip for address type - let local_ip = match Self::get_local_ip(&mut inner, k.at) { - Some(ip) => ip, - None => { - return Err(eyre!("local ip missing for address type")); - } - }; - - // Get gateway for interface - let gw = match Self::get_gateway(&mut inner, local_ip) { - Some(gw) => gw, - None => { - return Err(eyre!("gateway missing for interface")); - } - }; - - // Delete the mapping if it exists, ignore any errors here - let _ = gw.remove_port(convert_llpt(k.llpt), v.mapped_port); - inner.port_maps.remove(&k); - - let desc = this.get_description(k.llpt, k.local_port); - match gw.add_any_port( - convert_llpt(k.llpt), - SocketAddr::new(local_ip, k.local_port), - (UPNP_MAPPING_LIFETIME_MS + 999) / 1000, - &desc, - ) { - Ok(mapped_port) => { - log_net!(debug "full-renewed mapped port {:?} -> {:?}", v, k); - inner.port_maps.insert( - k, - PortMapValue { - ext_ip: v.ext_ip, - mapped_port, - timestamp: Timestamp::now(), - renewal_lifetime: TimestampDuration::new( - (UPNP_MAPPING_LIFETIME_MS / 2) as u64 * 1000u64, - ), - renewal_attempts: 0, - }, - ); - } - Err(e) => { - info!("failed to full-renew mapped port {:?} -> {:?}: {}", v, k, e); - - // Must restart network now :( - return Ok(false); - } - }; - } - // Process normal renewals - for (k, mut v) in renews { - // Get local ip for address type - let local_ip = match Self::get_local_ip(&mut inner, k.at) { - Some(ip) => ip, - None => { - return Err(eyre!("local ip missing for address type")); - } - }; - - // Get gateway for interface - let gw = match Self::get_gateway(&mut inner, local_ip) { - Some(gw) => gw, - None => { - return Err(eyre!("gateway missing for address type")); - } - }; - - let desc = this.get_description(k.llpt, k.local_port); - match gw.add_port( - convert_llpt(k.llpt), - v.mapped_port, - SocketAddr::new(local_ip, k.local_port), - (UPNP_MAPPING_LIFETIME_MS + 999) / 1000, - &desc, - ) { - Ok(()) => { - log_net!("renewed mapped port {:?} -> {:?}", v, k); - - inner.port_maps.insert( - k, - PortMapValue { - ext_ip: v.ext_ip, - mapped_port: v.mapped_port, - timestamp: Timestamp::now(), - renewal_lifetime: ((UPNP_MAPPING_LIFETIME_MS / 2) as u64 - * 1000u64) - .into(), - renewal_attempts: 0, - }, - ); - } - Err(e) => { - log_net!(debug "failed to renew mapped port {:?} -> {:?}: {}", v, k, e); - - // Get closer to the maximum renewal timeline by a factor of two each time - v.renewal_lifetime = - (v.renewal_lifetime + UPNP_MAPPING_LIFETIME_US) / 2u64; - v.renewal_attempts += 1; - - // Store new value to try again - inner.port_maps.insert(k, v); - } - }; - } - - // Normal exit, no restart - Ok(true) - }, - Err(eyre!("failed to process blocking task")), - ) - .instrument(tracing::trace_span!("igd tick fut")) - .await - } } diff --git a/veilid-core/src/network_manager/native/mod.rs b/veilid-core/src/network_manager/native/mod.rs index 5308cbb8..1d639890 100644 --- a/veilid-core/src/network_manager/native/mod.rs +++ b/veilid-core/src/network_manager/native/mod.rs @@ -113,16 +113,13 @@ struct NetworkInner { network_state: Option, } -struct NetworkUnlockedInner { +pub(super) struct NetworkUnlockedInner { // Startup lock startup_lock: StartupLock, - // Accessors - routing_table: RoutingTable, - network_manager: NetworkManager, - connection_manager: ConnectionManager, // Network interfaces: NetworkInterfaces, + // Background processes update_network_class_task: TickTask, network_interfaces_task: TickTask, @@ -135,11 +132,21 @@ struct NetworkUnlockedInner { #[derive(Clone)] pub(super) struct Network { - config: VeilidConfig, + registry: VeilidComponentRegistry, inner: Arc>, unlocked_inner: Arc, } +impl_veilid_component_registry_accessor!(Network); + +impl core::ops::Deref for Network { + type Target = NetworkUnlockedInner; + + fn deref(&self) -> &Self::Target { + &self.unlocked_inner + } +} + impl Network { fn new_inner() -> NetworkInner { NetworkInner { @@ -161,17 +168,11 @@ impl Network { } } - fn new_unlocked_inner( - network_manager: NetworkManager, - routing_table: RoutingTable, - connection_manager: ConnectionManager, - ) -> NetworkUnlockedInner { - let config = network_manager.config(); + fn new_unlocked_inner(registry: VeilidComponentRegistry) -> NetworkUnlockedInner { + let config = registry.config(); + let program_name = config.get().program_name.clone(); NetworkUnlockedInner { startup_lock: StartupLock::new(), - network_manager, - routing_table, - connection_manager, interfaces: NetworkInterfaces::new(), update_network_class_task: TickTask::new( "update_network_class_task", @@ -183,23 +184,15 @@ impl Network { ), upnp_task: TickTask::new("upnp_task", UPNP_TASK_TICK_PERIOD_SECS), network_task_lock: AsyncMutex::new(()), - igd_manager: igd_manager::IGDManager::new(config.clone()), + igd_manager: igd_manager::IGDManager::new(program_name), } } - pub fn new( - network_manager: NetworkManager, - routing_table: RoutingTable, - connection_manager: ConnectionManager, - ) -> Self { + pub fn new(registry: VeilidComponentRegistry) -> Self { let this = Self { - config: network_manager.config(), inner: Arc::new(Mutex::new(Self::new_inner())), - unlocked_inner: Arc::new(Self::new_unlocked_inner( - network_manager, - routing_table, - connection_manager, - )), + unlocked_inner: Arc::new(Self::new_unlocked_inner(registry.clone())), + registry, }; this.setup_tasks(); @@ -207,18 +200,6 @@ impl Network { this } - fn network_manager(&self) -> NetworkManager { - self.unlocked_inner.network_manager.clone() - } - - fn routing_table(&self) -> RoutingTable { - self.unlocked_inner.routing_table.clone() - } - - fn connection_manager(&self) -> ConnectionManager { - self.unlocked_inner.connection_manager.clone() - } - fn load_certs(path: &Path) -> io::Result> { let cvec = certs(&mut BufReader::new(File::open(path)?)) .map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "invalid TLS certificate"))?; @@ -248,7 +229,8 @@ impl Network { } fn load_server_config(&self) -> io::Result { - let c = self.config.get(); + let config = self.config(); + let c = config.get(); // log_net!( "loading certificate from {}", @@ -288,7 +270,10 @@ impl Network { if !from.ip().is_unspecified() { vec![from] } else { - let addrs = self.last_network_state().stable_interface_addresses; + let addrs = self + .last_network_state() + .unwrap() + .stable_interface_addresses; addrs .iter() .filter_map(|a| { @@ -346,16 +331,15 @@ impl Network { dial_info: DialInfo, data: Vec, ) -> EyreResult> { - let _guard = self.unlocked_inner.startup_lock.enter()?; + let _guard = self.startup_lock.enter()?; self.record_dial_info_failure( dial_info.clone(), async move { let data_len = data.len(); - let connect_timeout_ms = { - let c = self.config.get(); - c.network.connection_initial_timeout_ms - }; + let connect_timeout_ms = self + .config() + .with(|c| c.network.connection_initial_timeout_ms); if self .network_manager() @@ -368,10 +352,12 @@ impl Network { match dial_info.protocol_type() { ProtocolType::UDP => { let peer_socket_addr = dial_info.to_socket_addr(); - let h = - RawUdpProtocolHandler::new_unspecified_bound_handler(&peer_socket_addr) - .await - .wrap_err("create socket failure")?; + let h = RawUdpProtocolHandler::new_unspecified_bound_handler( + self.registry(), + &peer_socket_addr, + ) + .await + .wrap_err("create socket failure")?; let _ = network_result_try!(h .send_message(data, peer_socket_addr) .await @@ -423,16 +409,15 @@ impl Network { data: Vec, timeout_ms: u32, ) -> EyreResult>> { - let _guard = self.unlocked_inner.startup_lock.enter()?; + let _guard = self.startup_lock.enter()?; self.record_dial_info_failure( dial_info.clone(), async move { let data_len = data.len(); - let connect_timeout_ms = { - let c = self.config.get(); - c.network.connection_initial_timeout_ms - }; + let connect_timeout_ms = self + .config() + .with(|c| c.network.connection_initial_timeout_ms); if self .network_manager() @@ -445,10 +430,12 @@ impl Network { match dial_info.protocol_type() { ProtocolType::UDP => { let peer_socket_addr = dial_info.to_socket_addr(); - let h = - RawUdpProtocolHandler::new_unspecified_bound_handler(&peer_socket_addr) - .await - .wrap_err("create socket failure")?; + let h = RawUdpProtocolHandler::new_unspecified_bound_handler( + self.registry(), + &peer_socket_addr, + ) + .await + .wrap_err("create socket failure")?; network_result_try!(h .send_message(data, peer_socket_addr) .await @@ -539,7 +526,7 @@ impl Network { flow: Flow, data: Vec, ) -> EyreResult { - let _guard = self.unlocked_inner.startup_lock.enter()?; + let _guard = self.startup_lock.enter()?; let data_len = data.len(); @@ -573,7 +560,11 @@ impl Network { // Handle connection-oriented protocols // Try to send to the exact existing connection if one exists - if let Some(conn) = self.connection_manager().get_connection(flow) { + if let Some(conn) = self + .network_manager() + .connection_manager() + .get_connection(flow) + { // connection exists, send over it match conn.send_async(data).await { ConnectionHandleSendResult::Sent => { @@ -606,7 +597,7 @@ impl Network { dial_info: DialInfo, data: Vec, ) -> EyreResult> { - let _guard = self.unlocked_inner.startup_lock.enter()?; + let _guard = self.startup_lock.enter()?; self.record_dial_info_failure( dial_info.clone(), @@ -635,7 +626,8 @@ impl Network { } else { // Handle connection-oriented protocols let conn = network_result_try!( - self.connection_manager() + self.network_manager() + .connection_manager() .get_or_create_connection(dial_info.clone()) .await? ); @@ -678,14 +670,9 @@ impl Network { } // Start editing routing table - let mut editor_public_internet = self - .unlocked_inner - .routing_table - .edit_public_internet_routing_domain(); - let mut editor_local_network = self - .unlocked_inner - .routing_table - .edit_local_network_routing_domain(); + let routing_table = self.routing_table(); + let mut editor_public_internet = routing_table.edit_public_internet_routing_domain(); + let mut editor_local_network = routing_table.edit_local_network_routing_domain(); // Setup network editor_local_network.set_local_networks(network_state.local_networks); @@ -763,8 +750,8 @@ impl Network { #[instrument(level = "debug", err, skip_all)] pub(super) async fn register_all_dial_info( &self, - editor_public_internet: &mut RoutingDomainEditorPublicInternet, - editor_local_network: &mut RoutingDomainEditorLocalNetwork, + editor_public_internet: &mut RoutingDomainEditorPublicInternet<'_>, + editor_local_network: &mut RoutingDomainEditorLocalNetwork<'_>, ) -> EyreResult<()> { let Some(protocol_config) = ({ let inner = self.inner.lock(); @@ -798,7 +785,7 @@ impl Network { #[instrument(level = "debug", err, skip_all)] pub async fn startup(&self) -> EyreResult { - let guard = self.unlocked_inner.startup_lock.startup()?; + let guard = self.startup_lock.startup()?; match self.startup_internal().await { Ok(StartupDisposition::Success) => { @@ -824,7 +811,7 @@ impl Network { } pub fn is_started(&self) -> bool { - self.unlocked_inner.startup_lock.is_started() + self.startup_lock.is_started() } #[instrument(level = "debug", skip_all)] @@ -836,12 +823,6 @@ impl Network { async fn shutdown_internal(&self) { let routing_table = self.routing_table(); - // Stop all tasks - log_net!(debug "stopping update network class task"); - if let Err(e) = self.unlocked_inner.update_network_class_task.stop().await { - error!("update_network_class_task not cancelled: {}", e); - } - let mut unord = FuturesUnordered::new(); { let mut inner = self.inner.lock(); @@ -876,7 +857,7 @@ impl Network { #[instrument(level = "debug", skip_all)] pub async fn shutdown(&self) { log_net!(debug "starting low level network shutdown"); - let Ok(guard) = self.unlocked_inner.startup_lock.shutdown().await else { + let Ok(guard) = self.startup_lock.shutdown().await else { log_net!(debug "low level network is already shut down"); return; }; @@ -892,7 +873,7 @@ impl Network { &self, punishment: Option>, ) { - let Ok(_guard) = self.unlocked_inner.startup_lock.enter() else { + let Ok(_guard) = self.startup_lock.enter() else { log_net!(debug "ignoring due to not started up"); return; }; @@ -902,7 +883,7 @@ impl Network { } pub fn needs_public_dial_info_check(&self) -> bool { - let Ok(_guard) = self.unlocked_inner.startup_lock.enter() else { + let Ok(_guard) = self.startup_lock.enter() else { log_net!(debug "ignoring due to not started up"); return false; }; diff --git a/veilid-core/src/network_manager/native/network_state.rs b/veilid-core/src/network_manager/native/network_state.rs index 7670c4ed..0b8a7ca0 100644 --- a/veilid-core/src/network_manager/native/network_state.rs +++ b/veilid-core/src/network_manager/native/network_state.rs @@ -28,7 +28,7 @@ pub(super) struct NetworkState { impl Network { fn make_stable_interface_addresses(&self) -> Vec { - let addrs = self.unlocked_inner.interfaces.stable_addresses(); + let addrs = self.interfaces.stable_addresses(); let mut addrs: Vec = addrs .into_iter() .filter(|addr| { @@ -41,8 +41,8 @@ impl Network { addrs } - pub(super) fn last_network_state(&self) -> NetworkState { - self.inner.lock().network_state.clone().unwrap() + pub(super) fn last_network_state(&self) -> Option { + self.inner.lock().network_state.clone() } pub(super) fn is_stable_interface_address(&self, addr: IpAddr) -> bool { @@ -57,8 +57,7 @@ impl Network { pub(super) async fn make_network_state(&self) -> EyreResult { // refresh network interfaces - self.unlocked_inner - .interfaces + self.interfaces .refresh() .await .wrap_err("failed to refresh network interfaces")?; @@ -66,22 +65,20 @@ impl Network { // build the set of networks we should consider for the 'LocalNetwork' routing domain let mut local_networks: HashSet<(IpAddr, IpAddr)> = HashSet::new(); - self.unlocked_inner - .interfaces - .with_interfaces(|interfaces| { - for intf in interfaces.values() { - // Skip networks that we should never encounter - if intf.is_loopback() || !intf.is_running() { - continue; - } - // Add network to local networks table - for addr in &intf.addrs { - let netmask = addr.if_addr().netmask(); - let network_ip = ipaddr_apply_netmask(addr.if_addr().ip(), netmask); - local_networks.insert((network_ip, netmask)); - } + self.interfaces.with_interfaces(|interfaces| { + for intf in interfaces.values() { + // Skip networks that we should never encounter + if intf.is_loopback() || !intf.is_running() { + continue; } - }); + // Add network to local networks table + for addr in &intf.addrs { + let netmask = addr.if_addr().netmask(); + let network_ip = ipaddr_apply_netmask(addr.if_addr().ip(), netmask); + local_networks.insert((network_ip, netmask)); + } + } + }); let mut local_networks: Vec<(IpAddr, IpAddr)> = local_networks.into_iter().collect(); local_networks.sort(); @@ -107,7 +104,8 @@ impl Network { // Get protocol config let protocol_config = { - let c = self.config.get(); + let config = self.config(); + let c = config.get(); let mut inbound = ProtocolTypeSet::new(); if c.network.protocol.udp.enabled { diff --git a/veilid-core/src/network_manager/native/network_tcp.rs b/veilid-core/src/network_manager/native/network_tcp.rs index e3820146..c14e5e51 100644 --- a/veilid-core/src/network_manager/native/network_tcp.rs +++ b/veilid-core/src/network_manager/native/network_tcp.rs @@ -1,6 +1,5 @@ use super::*; use async_tls::TlsAcceptor; -use sockets::*; use stop_token::future::FutureExt; ///////////////////////////////////////////////////////////////// @@ -122,8 +121,11 @@ impl Network { } }; // Check to see if it is punished - let address_filter = self.network_manager().address_filter(); - if address_filter.is_ip_addr_punished(peer_addr.ip()) { + if self + .network_manager() + .address_filter() + .is_ip_addr_punished(peer_addr.ip()) + { return; } @@ -135,39 +137,12 @@ impl Network { } }; - #[cfg(all(feature = "rt-async-std", unix))] + if let Err(e) = set_tcp_stream_linger(&tcp_stream, Some(core::time::Duration::from_secs(0))) { - // async-std does not directly support linger on TcpStream yet - use std::os::fd::{AsRawFd, FromRawFd, IntoRawFd}; - if let Err(e) = unsafe { - let s = socket2::Socket::from_raw_fd(tcp_stream.as_raw_fd()); - let res = s.set_linger(Some(core::time::Duration::from_secs(0))); - s.into_raw_fd(); - res - } { - log_net!(debug "Couldn't set TCP linger: {}", e); - return; - } - } - #[cfg(all(feature = "rt-async-std", windows))] - { - // async-std does not directly support linger on TcpStream yet - use std::os::windows::io::{AsRawSocket, FromRawSocket, IntoRawSocket}; - if let Err(e) = unsafe { - let s = socket2::Socket::from_raw_socket(tcp_stream.as_raw_socket()); - let res = s.set_linger(Some(core::time::Duration::from_secs(0))); - s.into_raw_socket(); - res - } { - log_net!(debug "Couldn't set TCP linger: {}", e); - return; - } - } - #[cfg(not(feature = "rt-async-std"))] - if let Err(e) = tcp_stream.set_linger(Some(core::time::Duration::from_secs(0))) { log_net!(debug "Couldn't set TCP linger: {}", e); return; } + if let Err(e) = tcp_stream.set_nodelay(true) { log_net!(debug "Couldn't set TCP nodelay: {}", e); return; @@ -249,49 +224,19 @@ impl Network { #[instrument(level = "trace", skip_all)] async fn spawn_socket_listener(&self, addr: SocketAddr) -> EyreResult { // Get config - let (connection_initial_timeout_ms, tls_connection_initial_timeout_ms) = { - let c = self.config.get(); - ( - c.network.connection_initial_timeout_ms, - c.network.tls.connection_initial_timeout_ms, - ) - }; - - // Create a socket and bind it - let Some(socket) = new_bound_default_tcp_socket(addr) - .wrap_err("failed to create default socket listener")? - else { - return Ok(false); - }; - - // Drop the socket - drop(socket); + let (connection_initial_timeout_ms, tls_connection_initial_timeout_ms) = + self.config().with(|c| { + ( + c.network.connection_initial_timeout_ms, + c.network.tls.connection_initial_timeout_ms, + ) + }); // Create a shared socket and bind it once we have determined the port is free - let Some(socket) = new_bound_shared_tcp_socket(addr) - .wrap_err("failed to create shared socket listener")? - else { + let Some(listener) = bind_async_tcp_listener(addr)? else { return Ok(false); }; - // Listen on the socket - if socket.listen(128).is_err() { - return Ok(false); - } - - // Make an async tcplistener from the socket2 socket - let std_listener: std::net::TcpListener = socket.into(); - cfg_if! { - if #[cfg(feature="rt-async-std")] { - let listener = TcpListener::from(std_listener); - } else if #[cfg(feature="rt-tokio")] { - std_listener.set_nonblocking(true).expect("failed to set nonblocking"); - let listener = TcpListener::from_std(std_listener).wrap_err("failed to create tokio tcp listener")?; - } else { - compile_error!("needs executor implementation"); - } - } - log_net!(debug "spawn_socket_listener: binding successful to {}", addr); // Create protocol handler records @@ -304,22 +249,14 @@ impl Network { // Spawn the socket task let this = self.clone(); let stop_token = self.inner.lock().stop_source.as_ref().unwrap().token(); - let connection_manager = self.connection_manager(); + let connection_manager = self.network_manager().connection_manager(); //////////////////////////////////////////////////////////// let jh = spawn(&format!("TCP listener {}", addr), async move { // moves listener object in and get incoming iterator // when this task exists, the listener will close the socket - cfg_if! { - if #[cfg(feature="rt-async-std")] { - let incoming_stream = listener.incoming(); - } else if #[cfg(feature="rt-tokio")] { - let incoming_stream = tokio_stream::wrappers::TcpListenerStream::new(listener); - } else { - compile_error!("needs executor implementation"); - } - } + let incoming_stream = async_tcp_listener_incoming(listener); let _ = incoming_stream .for_each_concurrent(None, |tcp_stream| { diff --git a/veilid-core/src/network_manager/native/network_udp.rs b/veilid-core/src/network_manager/native/network_udp.rs index 575f04e1..ad4c18fa 100644 --- a/veilid-core/src/network_manager/native/network_udp.rs +++ b/veilid-core/src/network_manager/native/network_udp.rs @@ -1,15 +1,13 @@ use super::*; -use sockets::*; use stop_token::future::FutureExt; impl Network { #[instrument(level = "trace", skip_all)] pub(super) async fn create_udp_listener_tasks(&self) -> EyreResult<()> { // Spawn socket tasks - let mut task_count = { - let c = self.config.get(); - c.network.protocol.udp.socket_pool_size - }; + let mut task_count = self + .config() + .with(|c| c.network.protocol.udp.socket_pool_size); if task_count == 0 { task_count = get_concurrency() / 2; if task_count == 0 { @@ -38,7 +36,6 @@ impl Network { // Spawn a local async task for each socket let mut protocol_handlers_unordered = FuturesUnordered::new(); - let network_manager = this.network_manager(); let stop_token = { let inner = this.inner.lock(); if inner.stop_source.is_none() { @@ -49,7 +46,7 @@ impl Network { }; for ph in protocol_handlers { - let network_manager = network_manager.clone(); + let network_manager = this.network_manager(); let stop_token = stop_token.clone(); let ph_future = async move { let mut data = vec![0u8; 65536]; @@ -114,28 +111,14 @@ impl Network { async fn create_udp_protocol_handler(&self, addr: SocketAddr) -> EyreResult { log_net!(debug "create_udp_protocol_handler on {:?}", &addr); - // Create a reusable socket - let Some(socket) = new_bound_default_udp_socket(addr)? else { + // Create a single-address-family UDP socket with default options bound to an address + let Some(udp_socket) = bind_async_udp_socket(addr)? else { return Ok(false); }; - - // Make an async UdpSocket from the socket2 socket - let std_udp_socket: std::net::UdpSocket = socket.into(); - cfg_if! { - if #[cfg(feature="rt-async-std")] { - let udp_socket = UdpSocket::from(std_udp_socket); - } else if #[cfg(feature="rt-tokio")] { - std_udp_socket.set_nonblocking(true).expect("failed to set nonblocking"); - let udp_socket = UdpSocket::from_std(std_udp_socket).wrap_err("failed to make inbound tokio udpsocket")?; - } else { - compile_error!("needs executor implementation"); - } - } let socket_arc = Arc::new(udp_socket); // Create protocol handler - let protocol_handler = - RawUdpProtocolHandler::new(socket_arc, Some(self.network_manager().address_filter())); + let protocol_handler = RawUdpProtocolHandler::new(self.registry(), socket_arc); // Record protocol handler let mut inner = self.inner.lock(); diff --git a/veilid-core/src/network_manager/native/protocol/mod.rs b/veilid-core/src/network_manager/native/protocol/mod.rs index eaef2aa1..60d93179 100644 --- a/veilid-core/src/network_manager/native/protocol/mod.rs +++ b/veilid-core/src/network_manager/native/protocol/mod.rs @@ -1,4 +1,3 @@ -pub mod sockets; pub mod tcp; pub mod udp; pub mod wrtc; @@ -22,7 +21,7 @@ impl ProtocolNetworkConnection { local_address: Option, dial_info: &DialInfo, timeout_ms: u32, - address_filter: AddressFilter, + address_filter: &AddressFilter, ) -> io::Result> { if address_filter.is_ip_addr_punished(dial_info.address().ip_addr()) { return Ok(NetworkResult::no_connection_other("punished")); diff --git a/veilid-core/src/network_manager/native/protocol/sockets.rs b/veilid-core/src/network_manager/native/protocol/sockets.rs deleted file mode 100644 index 755ec6d1..00000000 --- a/veilid-core/src/network_manager/native/protocol/sockets.rs +++ /dev/null @@ -1,191 +0,0 @@ -use crate::*; -use async_io::Async; -use std::io; - -cfg_if! { - if #[cfg(feature="rt-async-std")] { - pub use async_std::net::{TcpStream, TcpListener, UdpSocket}; - } else if #[cfg(feature="rt-tokio")] { - pub use tokio::net::{TcpStream, TcpListener, UdpSocket}; - pub use tokio_util::compat::*; - } else { - compile_error!("needs executor implementation"); - } -} - -use socket2::{Domain, Protocol, SockAddr, Socket, Type}; - -// cfg_if! { -// if #[cfg(windows)] { -// use winapi::shared::ws2def::{ SOL_SOCKET, SO_EXCLUSIVEADDRUSE}; -// use winapi::um::winsock2::{SOCKET_ERROR, setsockopt}; -// use winapi::ctypes::c_int; -// use std::os::windows::io::AsRawSocket; - -// fn set_exclusiveaddruse(socket: &Socket) -> io::Result<()> { -// unsafe { -// let optval:c_int = 1; -// if setsockopt(socket.as_raw_socket().try_into().unwrap(), SOL_SOCKET, SO_EXCLUSIVEADDRUSE, (&optval as *const c_int).cast(), -// std::mem::size_of::() as c_int) == SOCKET_ERROR { -// return Err(io::Error::last_os_error()); -// } -// Ok(()) -// } -// } -// } -// } - -#[instrument(level = "trace", ret)] -pub fn new_shared_udp_socket(domain: Domain) -> io::Result { - let socket = Socket::new(domain, Type::DGRAM, Some(Protocol::UDP))?; - if domain == Domain::IPV6 { - socket.set_only_v6(true)?; - } - socket.set_reuse_address(true)?; - - cfg_if! { - if #[cfg(unix)] { - socket.set_reuse_port(true)?; - } - } - Ok(socket) -} - -#[instrument(level = "trace", ret)] -pub fn new_default_udp_socket(domain: Domain) -> io::Result { - let socket = Socket::new(domain, Type::DGRAM, Some(Protocol::UDP))?; - if domain == Domain::IPV6 { - socket.set_only_v6(true)?; - } - - Ok(socket) -} - -#[instrument(level = "trace", ret)] -pub fn new_bound_default_udp_socket(local_address: SocketAddr) -> io::Result> { - let domain = Domain::for_address(local_address); - let socket = new_default_udp_socket(domain)?; - let socket2_addr = SockAddr::from(local_address); - - if socket.bind(&socket2_addr).is_err() { - return Ok(None); - } - - log_net!("created bound default udp socket on {:?}", &local_address); - - Ok(Some(socket)) -} - -#[instrument(level = "trace", ret)] -pub fn new_default_tcp_socket(domain: Domain) -> io::Result { - let socket = Socket::new(domain, Type::STREAM, Some(Protocol::TCP))?; - if let Err(e) = socket.set_linger(Some(core::time::Duration::from_secs(0))) { - log_net!(error "Couldn't set TCP linger: {}", e); - } - if let Err(e) = socket.set_nodelay(true) { - log_net!(error "Couldn't set TCP nodelay: {}", e); - } - if domain == Domain::IPV6 { - socket.set_only_v6(true)?; - } - Ok(socket) -} - -#[instrument(level = "trace", ret)] -pub fn new_shared_tcp_socket(domain: Domain) -> io::Result { - let socket = Socket::new(domain, Type::STREAM, Some(Protocol::TCP))?; - if let Err(e) = socket.set_linger(Some(core::time::Duration::from_secs(0))) { - log_net!(error "Couldn't set TCP linger: {}", e); - } - if let Err(e) = socket.set_nodelay(true) { - log_net!(error "Couldn't set TCP nodelay: {}", e); - } - if domain == Domain::IPV6 { - socket.set_only_v6(true)?; - } - socket.set_reuse_address(true)?; - cfg_if! { - if #[cfg(unix)] { - socket.set_reuse_port(true)?; - } - } - - Ok(socket) -} -#[instrument(level = "trace", ret)] -pub fn new_bound_default_tcp_socket(local_address: SocketAddr) -> io::Result> { - let domain = Domain::for_address(local_address); - let socket = new_default_tcp_socket(domain)?; - let socket2_addr = SockAddr::from(local_address); - if socket.bind(&socket2_addr).is_err() { - return Ok(None); - } - - log_net!("created bound default tcp socket on {:?}", &local_address); - - Ok(Some(socket)) -} - -#[instrument(level = "trace", ret)] -pub fn new_bound_shared_tcp_socket(local_address: SocketAddr) -> io::Result> { - let domain = Domain::for_address(local_address); - let socket = new_shared_tcp_socket(domain)?; - let socket2_addr = SockAddr::from(local_address); - if socket.bind(&socket2_addr).is_err() { - return Ok(None); - } - - log_net!("created bound shared tcp socket on {:?}", &local_address); - - Ok(Some(socket)) -} - -// Non-blocking connect is tricky when you want to start with a prepared socket -// Errors should not be logged as they are valid conditions for this function -#[instrument(level = "trace", ret)] -pub async fn nonblocking_connect( - socket: Socket, - addr: SocketAddr, - timeout_ms: u32, -) -> io::Result> { - // Set for non blocking connect - socket.set_nonblocking(true)?; - - // Make socket2 SockAddr - let socket2_addr = socket2::SockAddr::from(addr); - - // Connect to the remote address - match socket.connect(&socket2_addr) { - Ok(()) => Ok(()), - #[cfg(unix)] - Err(err) if err.raw_os_error() == Some(libc::EINPROGRESS) => Ok(()), - Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => Ok(()), - Err(e) => Err(e), - }?; - let async_stream = Async::new(std::net::TcpStream::from(socket))?; - - // The stream becomes writable when connected - timeout_or_try!( - timeout(timeout_ms, async_stream.writable().in_current_span()) - .await - .into_timeout_or() - .into_result()? - ); - - // Check low level error - let async_stream = match async_stream.get_ref().take_error()? { - None => Ok(async_stream), - Some(err) => Err(err), - }?; - - // Convert back to inner and then return async version - cfg_if! { - if #[cfg(feature="rt-async-std")] { - Ok(TimeoutOr::value(TcpStream::from(async_stream.into_inner()?))) - } else if #[cfg(feature="rt-tokio")] { - Ok(TimeoutOr::value(TcpStream::from_std(async_stream.into_inner()?)?)) - } else { - compile_error!("needs executor implementation"); - } - } -} diff --git a/veilid-core/src/network_manager/native/protocol/tcp.rs b/veilid-core/src/network_manager/native/protocol/tcp.rs index 4b741d85..4a032d49 100644 --- a/veilid-core/src/network_manager/native/protocol/tcp.rs +++ b/veilid-core/src/network_manager/native/protocol/tcp.rs @@ -1,6 +1,5 @@ use super::*; use futures_util::{AsyncReadExt, AsyncWriteExt}; -use sockets::*; pub struct RawTcpNetworkConnection { flow: Flow, @@ -157,32 +156,28 @@ impl RawTcpProtocolHandler { #[instrument(level = "trace", target = "protocol", err)] pub async fn connect( local_address: Option, - socket_addr: SocketAddr, + remote_address: SocketAddr, timeout_ms: u32, ) -> io::Result> { - // Make a shared socket - let socket = match local_address { - Some(a) => { - new_bound_shared_tcp_socket(a)?.ok_or(io::Error::from(io::ErrorKind::AddrInUse))? - } - None => new_default_tcp_socket(socket2::Domain::for_address(socket_addr))?, - }; - // Non-blocking connect to remote address - let ts = network_result_try!(nonblocking_connect(socket, socket_addr, timeout_ms) - .await - .folded()?); + let tcp_stream = network_result_try!(connect_async_tcp_stream( + local_address, + remote_address, + timeout_ms + ) + .await + .folded()?); // See what local address we ended up with and turn this into a stream - let actual_local_address = ts.local_addr()?; + let actual_local_address = tcp_stream.local_addr()?; #[cfg(feature = "rt-tokio")] - let ts = ts.compat(); - let ps = AsyncPeekStream::new(ts); + let tcp_stream = tcp_stream.compat(); + let ps = AsyncPeekStream::new(tcp_stream); // Wrap the stream in a network connection and return it let flow = Flow::new( PeerAddress::new( - SocketAddress::from_socket_addr(socket_addr), + SocketAddress::from_socket_addr(remote_address), ProtocolType::TCP, ), SocketAddress::from_socket_addr(actual_local_address), diff --git a/veilid-core/src/network_manager/native/protocol/udp.rs b/veilid-core/src/network_manager/native/protocol/udp.rs index 37d888ba..bc445273 100644 --- a/veilid-core/src/network_manager/native/protocol/udp.rs +++ b/veilid-core/src/network_manager/native/protocol/udp.rs @@ -1,19 +1,20 @@ use super::*; -use sockets::*; #[derive(Clone)] pub struct RawUdpProtocolHandler { + registry: VeilidComponentRegistry, socket: Arc, assembly_buffer: AssemblyBuffer, - address_filter: Option, } +impl_veilid_component_registry_accessor!(RawUdpProtocolHandler); + impl RawUdpProtocolHandler { - pub fn new(socket: Arc, address_filter: Option) -> Self { + pub fn new(registry: VeilidComponentRegistry, socket: Arc) -> Self { Self { + registry, socket, assembly_buffer: AssemblyBuffer::new(), - address_filter, } } @@ -24,10 +25,12 @@ impl RawUdpProtocolHandler { let (size, remote_addr) = network_result_value_or_log!(self.socket.recv_from(data).await.into_network_result()? => continue); // Check to see if it is punished - if let Some(af) = self.address_filter.as_ref() { - if af.is_ip_addr_punished(remote_addr.ip()) { - continue; - } + if self + .network_manager() + .address_filter() + .is_ip_addr_punished(remote_addr.ip()) + { + continue; } // Insert into assembly buffer @@ -91,10 +94,12 @@ impl RawUdpProtocolHandler { } // Check to see if it is punished - if let Some(af) = self.address_filter.as_ref() { - if af.is_ip_addr_punished(remote_addr.ip()) { - return Ok(NetworkResult::no_connection_other("punished")); - } + if self + .network_manager() + .address_filter() + .is_ip_addr_punished(remote_addr.ip()) + { + return Ok(NetworkResult::no_connection_other("punished")); } // Fragment and send @@ -137,11 +142,13 @@ impl RawUdpProtocolHandler { #[instrument(level = "trace", target = "protocol", err)] pub async fn new_unspecified_bound_handler( + registry: VeilidComponentRegistry, socket_addr: &SocketAddr, ) -> io::Result { // get local wildcard address for bind let local_socket_addr = compatible_unspecified_socket_addr(socket_addr); - let socket = UdpSocket::bind(local_socket_addr).await?; - Ok(RawUdpProtocolHandler::new(Arc::new(socket), None)) + let socket = bind_async_udp_socket(local_socket_addr)? + .ok_or(io::Error::from(io::ErrorKind::AddrInUse))?; + Ok(RawUdpProtocolHandler::new(registry, Arc::new(socket))) } } diff --git a/veilid-core/src/network_manager/native/protocol/ws.rs b/veilid-core/src/network_manager/native/protocol/ws.rs index f7aa768c..1809380c 100644 --- a/veilid-core/src/network_manager/native/protocol/ws.rs +++ b/veilid-core/src/network_manager/native/protocol/ws.rs @@ -10,7 +10,6 @@ use async_tungstenite::tungstenite::protocol::{frame::coding::CloseCode, CloseFr use async_tungstenite::tungstenite::Error; use async_tungstenite::{accept_hdr_async, client_async, WebSocketStream}; use futures_util::{AsyncRead, AsyncWrite, SinkExt}; -use sockets::*; // Maximum number of websocket request headers to permit const MAX_WS_HEADERS: usize = 24; @@ -316,21 +315,16 @@ impl WebsocketProtocolHandler { let domain = split_url.host.clone(); // Resolve remote address - let remote_socket_addr = dial_info.to_socket_addr(); - - // Make a shared socket - let socket = match local_address { - Some(a) => { - new_bound_shared_tcp_socket(a)?.ok_or(io::Error::from(io::ErrorKind::AddrInUse))? - } - None => new_default_tcp_socket(socket2::Domain::for_address(remote_socket_addr))?, - }; + let remote_address = dial_info.to_socket_addr(); // Non-blocking connect to remote address - let tcp_stream = - network_result_try!(nonblocking_connect(socket, remote_socket_addr, timeout_ms) - .await - .folded()?); + let tcp_stream = network_result_try!(connect_async_tcp_stream( + local_address, + remote_address, + timeout_ms + ) + .await + .folded()?); // See what local address we ended up with let actual_local_addr = tcp_stream.local_addr()?; diff --git a/veilid-core/src/network_manager/native/start_protocols.rs b/veilid-core/src/network_manager/native/start_protocols.rs index 140d5b15..9b12d3fa 100644 --- a/veilid-core/src/network_manager/native/start_protocols.rs +++ b/veilid-core/src/network_manager/native/start_protocols.rs @@ -140,14 +140,13 @@ impl Network { #[instrument(level = "trace", skip_all)] pub(super) async fn bind_udp_protocol_handlers(&self) -> EyreResult { log_net!("UDP: binding protocol handlers"); - let (listen_address, public_address, detect_address_changes) = { - let c = self.config.get(); + let (listen_address, public_address, detect_address_changes) = self.config().with(|c| { ( c.network.protocol.udp.listen_address.clone(), c.network.protocol.udp.public_address.clone(), c.network.detect_address_changes, ) - }; + }); // Get the binding parameters from the user-specified listen address let bind_set = self @@ -187,18 +186,17 @@ impl Network { #[instrument(level = "trace", skip_all)] pub(super) async fn register_udp_dial_info( &self, - editor_public_internet: &mut RoutingDomainEditorPublicInternet, - editor_local_network: &mut RoutingDomainEditorLocalNetwork, + editor_public_internet: &mut RoutingDomainEditorPublicInternet<'_>, + editor_local_network: &mut RoutingDomainEditorLocalNetwork<'_>, ) -> EyreResult<()> { log_net!("UDP: registering dial info"); - let (public_address, detect_address_changes) = { - let c = self.config.get(); + let (public_address, detect_address_changes) = self.config().with(|c| { ( c.network.protocol.udp.public_address.clone(), c.network.detect_address_changes, ) - }; + }); let local_dial_info_list = { let mut out = vec![]; @@ -263,14 +261,13 @@ impl Network { #[instrument(level = "trace", skip_all)] pub(super) async fn start_ws_listeners(&self) -> EyreResult { log_net!("WS: binding protocol handlers"); - let (listen_address, url, detect_address_changes) = { - let c = self.config.get(); + let (listen_address, url, detect_address_changes) = self.config().with(|c| { ( c.network.protocol.ws.listen_address.clone(), c.network.protocol.ws.url.clone(), c.network.detect_address_changes, ) - }; + }); // Get the binding parameters from the user-specified listen address let bind_set = self @@ -313,18 +310,17 @@ impl Network { #[instrument(level = "trace", skip_all)] pub(super) async fn register_ws_dial_info( &self, - editor_public_internet: &mut RoutingDomainEditorPublicInternet, - editor_local_network: &mut RoutingDomainEditorLocalNetwork, + editor_public_internet: &mut RoutingDomainEditorPublicInternet<'_>, + editor_local_network: &mut RoutingDomainEditorLocalNetwork<'_>, ) -> EyreResult<()> { log_net!("WS: registering dial info"); - let (url, path, detect_address_changes) = { - let c = self.config.get(); + let (url, path, detect_address_changes) = self.config().with(|c| { ( c.network.protocol.ws.url.clone(), c.network.protocol.ws.path.clone(), c.network.detect_address_changes, ) - }; + }); let mut registered_addresses: HashSet = HashSet::new(); @@ -409,14 +405,13 @@ impl Network { pub(super) async fn start_wss_listeners(&self) -> EyreResult { log_net!("WSS: binding protocol handlers"); - let (listen_address, url, detect_address_changes) = { - let c = self.config.get(); + let (listen_address, url, detect_address_changes) = self.config().with(|c| { ( c.network.protocol.wss.listen_address.clone(), c.network.protocol.wss.url.clone(), c.network.detect_address_changes, ) - }; + }); // Get the binding parameters from the user-specified listen address let bind_set = self @@ -460,18 +455,17 @@ impl Network { #[instrument(level = "trace", skip_all)] pub(super) async fn register_wss_dial_info( &self, - editor_public_internet: &mut RoutingDomainEditorPublicInternet, - editor_local_network: &mut RoutingDomainEditorLocalNetwork, + editor_public_internet: &mut RoutingDomainEditorPublicInternet<'_>, + editor_local_network: &mut RoutingDomainEditorLocalNetwork<'_>, ) -> EyreResult<()> { log_net!("WSS: registering dialinfo"); - let (url, _detect_address_changes) = { - let c = self.config.get(); + let (url, _detect_address_changes) = self.config().with(|c| { ( c.network.protocol.wss.url.clone(), c.network.detect_address_changes, ) - }; + }); // NOTE: No interface dial info for WSS, as there is no way to connect to a local dialinfo via TLS // If the hostname is specified, it is the public dialinfo via the URL. If no hostname @@ -520,14 +514,13 @@ impl Network { pub(super) async fn start_tcp_listeners(&self) -> EyreResult { log_net!("TCP: binding protocol handlers"); - let (listen_address, public_address, detect_address_changes) = { - let c = self.config.get(); + let (listen_address, public_address, detect_address_changes) = self.config().with(|c| { ( c.network.protocol.tcp.listen_address.clone(), c.network.protocol.tcp.public_address.clone(), c.network.detect_address_changes, ) - }; + }); // Get the binding parameters from the user-specified listen address let bind_set = self @@ -570,18 +563,17 @@ impl Network { #[instrument(level = "trace", skip_all)] pub(super) async fn register_tcp_dial_info( &self, - editor_public_internet: &mut RoutingDomainEditorPublicInternet, - editor_local_network: &mut RoutingDomainEditorLocalNetwork, + editor_public_internet: &mut RoutingDomainEditorPublicInternet<'_>, + editor_local_network: &mut RoutingDomainEditorLocalNetwork<'_>, ) -> EyreResult<()> { log_net!("TCP: registering dialinfo"); - let (public_address, detect_address_changes) = { - let c = self.config.get(); + let (public_address, detect_address_changes) = self.config().with(|c| { ( c.network.protocol.tcp.public_address.clone(), c.network.detect_address_changes, ) - }; + }); let mut registered_addresses: HashSet = HashSet::new(); diff --git a/veilid-core/src/network_manager/native/tasks/mod.rs b/veilid-core/src/network_manager/native/tasks/mod.rs index ecce2cf6..ab7caa44 100644 --- a/veilid-core/src/network_manager/native/tasks/mod.rs +++ b/veilid-core/src/network_manager/native/tasks/mod.rs @@ -7,46 +7,41 @@ use super::*; impl Network { pub fn setup_tasks(&self) { // Set update network class tick task - { - let this = self.clone(); - self.unlocked_inner - .update_network_class_task - .set_routine(move |s, l, t| { - Box::pin(this.clone().update_network_class_task_routine( - s, - Timestamp::new(l), - Timestamp::new(t), - )) - }); - } + let this = self.clone(); + self.update_network_class_task.set_routine(move |s, l, t| { + let this = this.clone(); + Box::pin(async move { + this.update_network_class_task_routine(s, Timestamp::new(l), Timestamp::new(t)) + .await + }) + }); + // Set network interfaces tick task - { - let this = self.clone(); - self.unlocked_inner - .network_interfaces_task - .set_routine(move |s, l, t| { - Box::pin(this.clone().network_interfaces_task_routine( - s, - Timestamp::new(l), - Timestamp::new(t), - )) - }); - } + let this = self.clone(); + self.network_interfaces_task.set_routine(move |s, l, t| { + let this = this.clone(); + Box::pin(async move { + this.network_interfaces_task_routine(s, Timestamp::new(l), Timestamp::new(t)) + .await + }) + }); + // Set upnp tick task { let this = self.clone(); - self.unlocked_inner.upnp_task.set_routine(move |s, l, t| { - Box::pin( - this.clone() - .upnp_task_routine(s, Timestamp::new(l), Timestamp::new(t)), - ) + self.upnp_task.set_routine(move |s, l, t| { + let this = this.clone(); + Box::pin(async move { + this.upnp_task_routine(s, Timestamp::new(l), Timestamp::new(t)) + .await + }) }); } } #[instrument(level = "trace", target = "net", name = "Network::tick", skip_all, err)] pub async fn tick(&self) -> EyreResult<()> { - let Ok(_guard) = self.unlocked_inner.startup_lock.enter() else { + let Ok(_guard) = self.startup_lock.enter() else { log_net!(debug "ignoring due to not started up"); return Ok(()); }; @@ -65,7 +60,7 @@ impl Network { // If we need to figure out our network class, tick the task for it if detect_address_changes { // Check our network interfaces to see if they have changed - self.unlocked_inner.network_interfaces_task.tick().await?; + self.network_interfaces_task.tick().await?; // Check our public dial info to see if it has changed let public_internet_network_class = self @@ -95,16 +90,31 @@ impl Network { } if has_at_least_two { - self.unlocked_inner.update_network_class_task.tick().await?; + self.update_network_class_task.tick().await?; } } } // If we need to tick upnp, do it if upnp { - self.unlocked_inner.upnp_task.tick().await?; + self.upnp_task.tick().await?; } Ok(()) } + + pub async fn cancel_tasks(&self) { + log_net!(debug "stopping upnp task"); + if let Err(e) = self.upnp_task.stop().await { + warn!("upnp_task not stopped: {}", e); + } + log_net!(debug "stopping network interfaces task"); + if let Err(e) = self.network_interfaces_task.stop().await { + warn!("network_interfaces_task not stopped: {}", e); + } + log_net!(debug "stopping update network class task"); + if let Err(e) = self.update_network_class_task.stop().await { + warn!("update_network_class_task not stopped: {}", e); + } + } } diff --git a/veilid-core/src/network_manager/native/tasks/network_interfaces_task.rs b/veilid-core/src/network_manager/native/tasks/network_interfaces_task.rs index 69199683..79f57b5d 100644 --- a/veilid-core/src/network_manager/native/tasks/network_interfaces_task.rs +++ b/veilid-core/src/network_manager/native/tasks/network_interfaces_task.rs @@ -3,20 +3,28 @@ use super::*; impl Network { #[instrument(level = "trace", target = "net", skip_all, err)] pub(super) async fn network_interfaces_task_routine( - self, - _stop_token: StopToken, + &self, + stop_token: StopToken, _l: Timestamp, _t: Timestamp, ) -> EyreResult<()> { - let _guard = self.unlocked_inner.network_task_lock.lock().await; + // Network lock ensures only one task operating on the low level network state + // can happen at the same time. + let _guard = match self.network_task_lock.try_lock() { + Ok(v) => v, + Err(_) => { + // If we can't get the lock right now, then + return Ok(()); + } + }; - self.update_network_state().await?; + self.update_network_state(stop_token).await?; Ok(()) } // See if our interface addresses have changed, if so redo public dial info if necessary - async fn update_network_state(&self) -> EyreResult { + async fn update_network_state(&self, _stop_token: StopToken) -> EyreResult { let mut local_network_changed = false; let mut public_internet_changed = false; @@ -29,7 +37,7 @@ impl Network { } }; - if new_network_state != last_network_state { + if last_network_state.is_none() || new_network_state != last_network_state.unwrap() { // Save new network state { let mut inner = self.inner.lock(); @@ -37,17 +45,13 @@ impl Network { } // network state has changed - let mut editor_local_network = self - .unlocked_inner - .routing_table - .edit_local_network_routing_domain(); + let routing_table = self.routing_table(); + + let mut editor_local_network = routing_table.edit_local_network_routing_domain(); editor_local_network.set_local_networks(new_network_state.local_networks); editor_local_network.clear_dial_info_details(None, None); - let mut editor_public_internet = self - .unlocked_inner - .routing_table - .edit_public_internet_routing_domain(); + let mut editor_public_internet = routing_table.edit_public_internet_routing_domain(); // Update protocols self.register_all_dial_info(&mut editor_public_internet, &mut editor_local_network) diff --git a/veilid-core/src/network_manager/native/tasks/update_network_class_task.rs b/veilid-core/src/network_manager/native/tasks/update_network_class_task.rs index a4c56e3a..d27e63e8 100644 --- a/veilid-core/src/network_manager/native/tasks/update_network_class_task.rs +++ b/veilid-core/src/network_manager/native/tasks/update_network_class_task.rs @@ -8,12 +8,20 @@ type InboundProtocolMap = HashMap<(AddressType, LowLevelProtocolType, u16), Vec< impl Network { #[instrument(parent = None, level = "trace", skip(self), err)] pub async fn update_network_class_task_routine( - self, + &self, stop_token: StopToken, l: Timestamp, t: Timestamp, ) -> EyreResult<()> { - let _guard = self.unlocked_inner.network_task_lock.lock().await; + // Network lock ensures only one task operating on the low level network state + // can happen at the same time. + let _guard = match self.network_task_lock.try_lock() { + Ok(v) => v, + Err(_) => { + // If we can't get the lock right now, then + return Ok(()); + } + }; // Do the public dial info check let finished = self.do_public_dial_info_check(stop_token, l, t).await?; @@ -125,8 +133,9 @@ impl Network { }; // Save off existing public dial info for change detection later - let existing_public_dial_info: HashSet = self - .routing_table() + let routing_table = self.routing_table(); + + let existing_public_dial_info: HashSet = routing_table .all_filtered_dial_info_details( RoutingDomain::PublicInternet.into(), &DialInfoFilter::all(), @@ -135,7 +144,7 @@ impl Network { .collect(); // Set most permissive network config and start from scratch - let mut editor = self.routing_table().edit_public_internet_routing_domain(); + let mut editor = routing_table.edit_public_internet_routing_domain(); editor.setup_network( protocol_config.outbound, protocol_config.inbound, @@ -156,7 +165,7 @@ impl Network { port, }; context_configs.insert(dcc); - let discovery_context = DiscoveryContext::new(self.routing_table(), self.clone(), dcc); + let discovery_context = DiscoveryContext::new(self.registry(), dcc); discovery_context.discover(&mut unord).await; } @@ -247,22 +256,18 @@ impl Network { match protocol_type { ProtocolType::UDP => DialInfo::udp(addr), ProtocolType::TCP => DialInfo::tcp(addr), - ProtocolType::WS => { - let c = self.config.get(); - DialInfo::try_ws( - addr, - format!("ws://{}/{}", addr, c.network.protocol.ws.path), - ) - .unwrap() - } - ProtocolType::WSS => { - let c = self.config.get(); - DialInfo::try_wss( - addr, - format!("wss://{}/{}", addr, c.network.protocol.wss.path), - ) - .unwrap() - } + ProtocolType::WS => DialInfo::try_ws( + addr, + self.config() + .with(|c| format!("ws://{}/{}", addr, c.network.protocol.ws.path)), + ) + .unwrap(), + ProtocolType::WSS => DialInfo::try_wss( + addr, + self.config() + .with(|c| format!("wss://{}/{}", addr, c.network.protocol.wss.path)), + ) + .unwrap(), } } } diff --git a/veilid-core/src/network_manager/native/tasks/upnp_task.rs b/veilid-core/src/network_manager/native/tasks/upnp_task.rs index 0452705a..de0f1238 100644 --- a/veilid-core/src/network_manager/native/tasks/upnp_task.rs +++ b/veilid-core/src/network_manager/native/tasks/upnp_task.rs @@ -3,12 +3,12 @@ use super::*; impl Network { #[instrument(parent = None, level = "trace", target = "net", skip_all, err)] pub(super) async fn upnp_task_routine( - self, + &self, _stop_token: StopToken, _l: Timestamp, _t: Timestamp, ) -> EyreResult<()> { - if !self.unlocked_inner.igd_manager.tick().await? { + if !self.igd_manager.tick().await? { info!("upnp failed, restarting local network"); let mut inner = self.inner.lock(); inner.network_needs_restart = true; diff --git a/veilid-core/src/network_manager/network_connection.rs b/veilid-core/src/network_manager/network_connection.rs index 674f087d..9777d51e 100644 --- a/veilid-core/src/network_manager/network_connection.rs +++ b/veilid-core/src/network_manager/network_connection.rs @@ -4,7 +4,7 @@ use std::{io, sync::Arc}; use stop_token::prelude::*; cfg_if::cfg_if! { - if #[cfg(target_arch = "wasm32")] { + if #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] { // No accept support for WASM } else { @@ -307,8 +307,7 @@ impl NetworkConnection { flow ); - let network_manager = connection_manager.network_manager(); - let address_filter = network_manager.address_filter(); + let registry = connection_manager.registry(); let mut unord = FuturesUnordered::new(); let mut need_receiver = true; let mut need_sender = true; @@ -364,14 +363,17 @@ impl NetworkConnection { // Add another message receiver future if necessary if need_receiver { need_receiver = false; + let registry = registry.clone(); let receiver_fut = Self::recv_internal(&protocol_connection, stats.clone()) .then(|res| async { + let registry = registry; + let network_manager = registry.network_manager(); match res { Ok(v) => { let peer_address = protocol_connection.flow().remote(); // Check to see if it is punished - if address_filter.is_ip_addr_punished(peer_address.socket_addr().ip()) { + if network_manager.address_filter().is_ip_addr_punished(peer_address.socket_addr().ip()) { return RecvLoopAction::Finish; } @@ -383,7 +385,7 @@ impl NetworkConnection { // Punish invalid framing (tcp framing or websocket framing) if v.is_invalid_message() { - address_filter.punish_ip_addr(peer_address.socket_addr().ip(), PunishmentReason::InvalidFraming); + network_manager.address_filter().punish_ip_addr(peer_address.socket_addr().ip(), PunishmentReason::InvalidFraming); return RecvLoopAction::Finish; } diff --git a/veilid-core/src/network_manager/receipt_manager.rs b/veilid-core/src/network_manager/receipt_manager.rs index d3a882eb..c81ac551 100644 --- a/veilid-core/src/network_manager/receipt_manager.rs +++ b/veilid-core/src/network_manager/receipt_manager.rs @@ -309,13 +309,7 @@ impl ReceiptManager { Ok(()) } - pub async fn shutdown(&self) { - log_net!(debug "starting receipt manager shutdown"); - let Ok(guard) = self.unlocked_inner.startup_lock.shutdown().await else { - log_net!(debug "receipt manager is already shut down"); - return; - }; - + pub async fn cancel_tasks(&self) { // Stop all tasks let timeout_task = { let mut inner = self.inner.lock(); @@ -329,6 +323,14 @@ impl ReceiptManager { if timeout_task.join().await.is_err() { panic!("joining timeout task failed"); } + } + + pub async fn shutdown(&self) { + log_net!(debug "starting receipt manager shutdown"); + let Ok(guard) = self.unlocked_inner.startup_lock.shutdown().await else { + log_net!(debug "receipt manager is already shut down"); + return; + }; *self.inner.lock() = Self::new_inner(); diff --git a/veilid-core/src/network_manager/send_data.rs b/veilid-core/src/network_manager/send_data.rs index 7577ef4b..4bda7e08 100644 --- a/veilid-core/src/network_manager/send_data.rs +++ b/veilid-core/src/network_manager/send_data.rs @@ -40,9 +40,10 @@ impl NetworkManager { destination_node_ref: FilteredNodeRef, data: Vec, ) -> SendPinBoxFuture>> { - let this = self.clone(); + let registry = self.registry(); Box::pin( async move { + let this = registry.network_manager(); // If we need to relay, do it let (contact_method, target_node_ref, opt_relayed_contact_method) = match possibly_relayed_contact_method.clone() { @@ -652,17 +653,14 @@ impl NetworkManager { data: Vec, ) -> EyreResult> { // Detect if network is stopping so we can break out of this - let Some(stop_token) = self.unlocked_inner.startup_lock.stop_token() else { + let Some(stop_token) = self.startup_context.startup_lock.stop_token() else { return Ok(NetworkResult::service_unavailable("network is stopping")); }; // Build a return receipt for the signal let receipt_timeout = TimestampDuration::new_ms( - self.unlocked_inner - .config - .get() - .network - .reverse_connection_receipt_time_ms as u64, + self.config() + .with(|c| c.network.reverse_connection_receipt_time_ms as u64), ); let (receipt, eventual_value) = self.generate_single_shot_receipt(receipt_timeout, [])?; @@ -763,7 +761,7 @@ impl NetworkManager { data: Vec, ) -> EyreResult> { // Detect if network is stopping so we can break out of this - let Some(stop_token) = self.unlocked_inner.startup_lock.stop_token() else { + let Some(stop_token) = self.startup_context.startup_lock.stop_token() else { return Ok(NetworkResult::service_unavailable("network is stopping")); }; @@ -776,11 +774,8 @@ impl NetworkManager { // Build a return receipt for the signal let receipt_timeout = TimestampDuration::new_ms( - self.unlocked_inner - .config - .get() - .network - .hole_punch_receipt_time_ms as u64, + self.config() + .with(|c| c.network.hole_punch_receipt_time_ms as u64), ); let (receipt, eventual_value) = self.generate_single_shot_receipt(receipt_timeout, [])?; diff --git a/veilid-core/src/network_manager/stats.rs b/veilid-core/src/network_manager/stats.rs index f8fc7a37..ac91a41f 100644 --- a/veilid-core/src/network_manager/stats.rs +++ b/veilid-core/src/network_manager/stats.rs @@ -1,7 +1,7 @@ use super::*; // Statistics per address -#[derive(Clone, Default)] +#[derive(Clone, Debug, Default)] pub struct PerAddressStats { pub last_seen_ts: Timestamp, pub transfer_stats_accounting: TransferStatsAccounting, @@ -18,7 +18,7 @@ impl Default for PerAddressStatsKey { } // Statistics about the low-level network -#[derive(Clone)] +#[derive(Debug, Clone)] pub struct NetworkManagerStats { pub self_stats: PerAddressStats, pub per_address_stats: LruCache, @@ -116,12 +116,10 @@ impl NetworkManager { }) } - pub(super) fn send_network_update(&self) { - let update_cb = self.unlocked_inner.update_callback.read().clone(); - if update_cb.is_none() { - return; - } + pub fn send_network_update(&self) { + let update_cb = self.update_callback(); + let state = self.get_veilid_state(); - (update_cb.unwrap())(VeilidUpdate::Network(state)); + update_cb(VeilidUpdate::Network(state)); } } diff --git a/veilid-core/src/network_manager/tasks/mod.rs b/veilid-core/src/network_manager/tasks/mod.rs index 62242fd8..55a47714 100644 --- a/veilid-core/src/network_manager/tasks/mod.rs +++ b/veilid-core/src/network_manager/tasks/mod.rs @@ -5,48 +5,39 @@ use super::*; impl NetworkManager { pub fn setup_tasks(&self) { // Set rolling transfers tick task - { - let this = self.clone(); - self.unlocked_inner - .rolling_transfers_task - .set_routine(move |s, l, t| { - Box::pin(this.clone().rolling_transfers_task_routine( - s, - Timestamp::new(l), - Timestamp::new(t), - )) - }); - } + impl_setup_task!( + self, + Self, + rolling_transfers_task, + rolling_transfers_task_routine + ); // Set address filter task { - let this = self.clone(); - self.unlocked_inner - .address_filter_task - .set_routine(move |s, l, t| { - Box::pin(this.address_filter().address_filter_task_routine( - s, - Timestamp::new(l), - Timestamp::new(t), - )) - }); + let registry = self.registry(); + self.address_filter_task.set_routine(move |s, l, t| { + let registry = registry.clone(); + Box::pin(async move { + registry + .network_manager() + .address_filter() + .address_filter_task_routine(s, Timestamp::new(l), Timestamp::new(t)) + .await + }) + }); } } #[instrument(level = "trace", name = "NetworkManager::tick", skip_all, err)] pub async fn tick(&self) -> EyreResult<()> { - let routing_table = self.routing_table(); let net = self.net(); let receipt_manager = self.receipt_manager(); // Run the rolling transfers task - self.unlocked_inner.rolling_transfers_task.tick().await?; + self.rolling_transfers_task.tick().await?; // Run the address filter task - self.unlocked_inner.address_filter_task.tick().await?; - - // Run the routing table tick - routing_table.tick().await?; + self.address_filter_task.tick().await?; // Run the low level network tick net.tick().await?; @@ -61,15 +52,21 @@ impl NetworkManager { } pub async fn cancel_tasks(&self) { + log_net!(debug "stopping receipt manager tasks"); + let receipt_manager = self.receipt_manager(); + receipt_manager.cancel_tasks().await; + + let net = self.net(); + net.cancel_tasks().await; + log_net!(debug "stopping rolling transfers task"); - if let Err(e) = self.unlocked_inner.rolling_transfers_task.stop().await { + if let Err(e) = self.rolling_transfers_task.stop().await { warn!("rolling_transfers_task not stopped: {}", e); } - log_net!(debug "stopping routing table tasks"); - let routing_table = self.routing_table(); - routing_table.cancel_tasks().await; - - // other tasks will get cancelled via the 'shutdown' mechanism + log_net!(debug "stopping address filter task"); + if let Err(e) = self.address_filter_task.stop().await { + warn!("address_filter_task not stopped: {}", e); + } } } diff --git a/veilid-core/src/network_manager/tasks/rolling_transfers.rs b/veilid-core/src/network_manager/tasks/rolling_transfers.rs index d0d7d0b8..eba2def7 100644 --- a/veilid-core/src/network_manager/tasks/rolling_transfers.rs +++ b/veilid-core/src/network_manager/tasks/rolling_transfers.rs @@ -4,7 +4,7 @@ impl NetworkManager { // Compute transfer statistics for the low level network #[instrument(level = "trace", skip(self), err)] pub async fn rolling_transfers_task_routine( - self, + &self, _stop_token: StopToken, last_ts: Timestamp, cur_ts: Timestamp, diff --git a/veilid-core/src/network_manager/tests/test_connection_table.rs b/veilid-core/src/network_manager/tests/test_connection_table.rs index 1f20df65..02625a8a 100644 --- a/veilid-core/src/network_manager/tests/test_connection_table.rs +++ b/veilid-core/src/network_manager/tests/test_connection_table.rs @@ -1,13 +1,12 @@ use super::*; use super::connection_table::*; -use crate::tests::common::test_veilid_config::*; -use crate::tests::mock_routing_table; +use crate::tests::mock_registry; pub async fn test_add_get_remove() { - let config = get_config(); - let address_filter = AddressFilter::new(config.clone(), mock_routing_table()); - let table = ConnectionTable::new(config, address_filter); + let registry = mock_registry::init("").await; + + let table = ConnectionTable::new(registry.clone()); let a1 = Flow::new_no_local(PeerAddress::new( SocketAddress::new(Address::IPV4(Ipv4Addr::new(192, 168, 0, 1)), 8080), @@ -122,6 +121,8 @@ pub async fn test_add_get_remove() { a4 ); assert_eq!(table.connection_count(), 0); + + mock_registry::terminate(registry).await; } pub async fn test_all() { diff --git a/veilid-core/src/network_manager/tests/test_signed_node_info.rs b/veilid-core/src/network_manager/tests/test_signed_node_info.rs index 1bcc2bb0..b9f2d255 100644 --- a/veilid-core/src/network_manager/tests/test_signed_node_info.rs +++ b/veilid-core/src/network_manager/tests/test_signed_node_info.rs @@ -30,7 +30,7 @@ pub async fn test_signed_node_info() { // Test correct validation let keypair = vcrypto.generate_keypair(); let sni = SignedDirectNodeInfo::make_signatures( - crypto.clone(), + &crypto, vec![TypedKeyPair::new(ck, keypair)], node_info.clone(), ) @@ -42,7 +42,7 @@ pub async fn test_signed_node_info() { sni.timestamp(), sni.signatures().to_vec(), ); - let tks_validated = sdni.validate(&tks, crypto.clone()).unwrap(); + let tks_validated = sdni.validate(&tks, &crypto).unwrap(); assert_eq!(tks_validated.len(), oldtkslen); assert_eq!(tks_validated.len(), sni.signatures().len()); @@ -54,7 +54,7 @@ pub async fn test_signed_node_info() { sni.timestamp(), sni.signatures().to_vec(), ); - sdni.validate(&tks1, crypto.clone()).unwrap_err(); + sdni.validate(&tks1, &crypto).unwrap_err(); // Test unsupported cryptosystem validation let fake_crypto_kind: CryptoKind = FourCC::from([0, 1, 2, 3]); @@ -65,7 +65,7 @@ pub async fn test_signed_node_info() { tksfake.add(TypedKey::new(ck, keypair.key)); let sdnifake = SignedDirectNodeInfo::new(node_info.clone(), sni.timestamp(), sigsfake.clone()); - let tksfake_validated = sdnifake.validate(&tksfake, crypto.clone()).unwrap(); + let tksfake_validated = sdnifake.validate(&tksfake, &crypto).unwrap(); assert_eq!(tksfake_validated.len(), 1); assert_eq!(sdnifake.signatures().len(), sigsfake.len()); @@ -89,7 +89,7 @@ pub async fn test_signed_node_info() { let oldtks2len = tks2.len(); let sni2 = SignedRelayedNodeInfo::make_signatures( - crypto.clone(), + &crypto, vec![TypedKeyPair::new(ck, keypair2)], node_info2.clone(), tks.clone(), @@ -103,7 +103,7 @@ pub async fn test_signed_node_info() { sni2.timestamp(), sni2.signatures().to_vec(), ); - let tks2_validated = srni.validate(&tks2, crypto.clone()).unwrap(); + let tks2_validated = srni.validate(&tks2, &crypto).unwrap(); assert_eq!(tks2_validated.len(), oldtks2len); assert_eq!(tks2_validated.len(), sni2.signatures().len()); @@ -119,7 +119,7 @@ pub async fn test_signed_node_info() { sni2.timestamp(), sni2.signatures().to_vec(), ); - srni.validate(&tks3, crypto.clone()).unwrap_err(); + srni.validate(&tks3, &crypto).unwrap_err(); // Test unsupported cryptosystem validation let fake_crypto_kind: CryptoKind = FourCC::from([0, 1, 2, 3]); @@ -135,7 +135,7 @@ pub async fn test_signed_node_info() { sni2.timestamp(), sigsfake3.clone(), ); - let tksfake3_validated = srnifake.validate(&tksfake3, crypto.clone()).unwrap(); + let tksfake3_validated = srnifake.validate(&tksfake3, &crypto).unwrap(); assert_eq!(tksfake3_validated.len(), 1); assert_eq!(srnifake.signatures().len(), sigsfake3.len()); } diff --git a/veilid-core/src/network_manager/types/address.rs b/veilid-core/src/network_manager/types/address.rs index 487dc515..0091f08e 100644 --- a/veilid-core/src/network_manager/types/address.rs +++ b/veilid-core/src/network_manager/types/address.rs @@ -20,7 +20,7 @@ impl Address { SocketAddr::V6(v6) => Address::IPV6(*v6.ip()), } } - #[cfg_attr(target_arch = "wasm32", expect(dead_code))] + #[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), expect(dead_code))] pub fn from_ip_addr(addr: IpAddr) -> Address { match addr { IpAddr::V4(v4) => Address::IPV4(v4), diff --git a/veilid-core/src/network_manager/types/dial_info/mod.rs b/veilid-core/src/network_manager/types/dial_info/mod.rs index 80b5d488..a1361e3d 100644 --- a/veilid-core/src/network_manager/types/dial_info/mod.rs +++ b/veilid-core/src/network_manager/types/dial_info/mod.rs @@ -268,7 +268,7 @@ impl DialInfo { Self::WSS(di) => di.socket_address.port(), } } - #[cfg_attr(target_arch = "wasm32", expect(dead_code))] + #[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), expect(dead_code))] pub fn set_port(&mut self, port: u16) { match self { Self::UDP(di) => di.socket_address.set_port(port), @@ -366,7 +366,7 @@ impl DialInfo { // This will not be used on signed dialinfo, only for bootstrapping, so we don't need to worry about // the '0.0.0.0' address being propagated across the routing table cfg_if::cfg_if! { - if #[cfg(target_arch = "wasm32")] { + if #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] { vec![SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0,0,0,0)), port)] } else { match split_url.host { diff --git a/veilid-core/src/network_manager/types/signal_info.rs b/veilid-core/src/network_manager/types/signal_info.rs index 2f88a0df..dc11d788 100644 --- a/veilid-core/src/network_manager/types/signal_info.rs +++ b/veilid-core/src/network_manager/types/signal_info.rs @@ -21,7 +21,7 @@ pub(crate) enum SignalInfo { } impl SignalInfo { - pub fn validate(&self, crypto: Crypto) -> Result<(), RPCError> { + pub fn validate(&self, crypto: &Crypto) -> Result<(), RPCError> { match self { SignalInfo::HolePunch { receipt, peer_info } => { if receipt.len() < MIN_RECEIPT_SIZE { diff --git a/veilid-core/src/network_manager/wasm/.cargo/config.toml b/veilid-core/src/network_manager/wasm/.cargo/config.toml index f4e8c002..1edd5dff 100644 --- a/veilid-core/src/network_manager/wasm/.cargo/config.toml +++ b/veilid-core/src/network_manager/wasm/.cargo/config.toml @@ -1,2 +1,2 @@ [build] -target = "wasm32-unknown-unknown" +all(target_arch = "wasm32", target_os = "unknown") diff --git a/veilid-core/src/network_manager/wasm/mod.rs b/veilid-core/src/network_manager/wasm/mod.rs index ea051a44..3780ce60 100644 --- a/veilid-core/src/network_manager/wasm/mod.rs +++ b/veilid-core/src/network_manager/wasm/mod.rs @@ -3,8 +3,6 @@ mod protocol; use super::*; use crate::routing_table::*; -use connection_manager::*; -use protocol::ws::WebsocketProtocolHandler; pub use protocol::*; use std::io; @@ -64,23 +62,28 @@ struct NetworkInner { protocol_config: ProtocolConfig, } -struct NetworkUnlockedInner { +pub(super) struct NetworkUnlockedInner { // Startup lock startup_lock: StartupLock, - - // Accessors - routing_table: RoutingTable, - network_manager: NetworkManager, - connection_manager: ConnectionManager, } #[derive(Clone)] pub(super) struct Network { - config: VeilidConfig, + registry: VeilidComponentRegistry, inner: Arc>, unlocked_inner: Arc, } +impl_veilid_component_registry_accessor!(Network); + +impl core::ops::Deref for Network { + type Target = NetworkUnlockedInner; + + fn deref(&self) -> &Self::Target { + &self.unlocked_inner + } +} + impl Network { fn new_inner() -> NetworkInner { NetworkInner { @@ -89,45 +92,20 @@ impl Network { } } - fn new_unlocked_inner( - network_manager: NetworkManager, - routing_table: RoutingTable, - connection_manager: ConnectionManager, - ) -> NetworkUnlockedInner { + fn new_unlocked_inner() -> NetworkUnlockedInner { NetworkUnlockedInner { startup_lock: StartupLock::new(), - network_manager, - routing_table, - connection_manager, } } - pub fn new( - network_manager: NetworkManager, - routing_table: RoutingTable, - connection_manager: ConnectionManager, - ) -> Self { + pub fn new(registry: VeilidComponentRegistry) -> Self { Self { - config: network_manager.config(), + registry, inner: Arc::new(Mutex::new(Self::new_inner())), - unlocked_inner: Arc::new(Self::new_unlocked_inner( - network_manager, - routing_table, - connection_manager, - )), + unlocked_inner: Arc::new(Self::new_unlocked_inner()), } } - fn network_manager(&self) -> NetworkManager { - self.unlocked_inner.network_manager.clone() - } - fn routing_table(&self) -> RoutingTable { - self.unlocked_inner.routing_table.clone() - } - fn connection_manager(&self) -> ConnectionManager { - self.unlocked_inner.connection_manager.clone() - } - ///////////////////////////////////////////////////////////////// // Record DialInfo failures @@ -159,10 +137,9 @@ impl Network { self.record_dial_info_failure(dial_info.clone(), async move { let data_len = data.len(); - let timeout_ms = { - let c = self.config.get(); - c.network.connection_initial_timeout_ms - }; + let timeout_ms = self + .config() + .with(|c| c.network.connection_initial_timeout_ms); if self .network_manager() @@ -180,7 +157,7 @@ impl Network { bail!("no support for TCP protocol") } ProtocolType::WS | ProtocolType::WSS => { - let pnc = network_result_try!(WebsocketProtocolHandler::connect( + let pnc = network_result_try!(ws::WebsocketProtocolHandler::connect( &dial_info, timeout_ms ) .await @@ -210,14 +187,13 @@ impl Network { data: Vec, timeout_ms: u32, ) -> EyreResult>> { - let _guard = self.unlocked_inner.startup_lock.enter()?; + let _guard = self.startup_lock.enter()?; self.record_dial_info_failure(dial_info.clone(), async move { let data_len = data.len(); - let connect_timeout_ms = { - let c = self.config.get(); - c.network.connection_initial_timeout_ms - }; + let connect_timeout_ms = self + .config() + .with(|c| c.network.connection_initial_timeout_ms); if self .network_manager() @@ -239,7 +215,7 @@ impl Network { ProtocolType::UDP => unreachable!(), ProtocolType::TCP => unreachable!(), ProtocolType::WS | ProtocolType::WSS => { - WebsocketProtocolHandler::connect(&dial_info, connect_timeout_ms) + ws::WebsocketProtocolHandler::connect(&dial_info, connect_timeout_ms) .await .wrap_err("connect failure")? } @@ -271,7 +247,7 @@ impl Network { flow: Flow, data: Vec, ) -> EyreResult { - let _guard = self.unlocked_inner.startup_lock.enter()?; + let _guard = self.startup_lock.enter()?; let data_len = data.len(); match flow.protocol_type() { @@ -287,7 +263,11 @@ impl Network { // Handle connection-oriented protocols // Try to send to the exact existing connection if one exists - if let Some(conn) = self.connection_manager().get_connection(flow) { + if let Some(conn) = self + .network_manager() + .connection_manager() + .get_connection(flow) + { // connection exists, send over it match conn.send_async(data).await { ConnectionHandleSendResult::Sent => { @@ -320,7 +300,7 @@ impl Network { dial_info: DialInfo, data: Vec, ) -> EyreResult> { - let _guard = self.unlocked_inner.startup_lock.enter()?; + let _guard = self.startup_lock.enter()?; self.record_dial_info_failure(dial_info.clone(), async move { let data_len = data.len(); @@ -333,7 +313,8 @@ impl Network { // Handle connection-oriented protocols let conn = network_result_try!( - self.connection_manager() + self.network_manager() + .connection_manager() .get_or_create_connection(dial_info.clone()) .await? ); @@ -361,7 +342,8 @@ impl Network { log_net!(debug "starting network"); // get protocol config let protocol_config = { - let c = self.config.get(); + let config = self.config(); + let c = config.get(); let inbound = ProtocolTypeSet::new(); let mut outbound = ProtocolTypeSet::new(); @@ -398,10 +380,8 @@ impl Network { self.inner.lock().protocol_config = protocol_config.clone(); // Start editing routing table - let mut editor_public_internet = self - .unlocked_inner - .routing_table - .edit_public_internet_routing_domain(); + let routing_table = self.routing_table(); + let mut editor_public_internet = routing_table.edit_public_internet_routing_domain(); // set up the routing table's network config editor_public_internet.setup_network( @@ -421,7 +401,7 @@ impl Network { #[instrument(level = "debug", err, skip_all)] pub async fn startup(&self) -> EyreResult { - let guard = self.unlocked_inner.startup_lock.startup()?; + let guard = self.startup_lock.startup()?; match self.startup_internal().await { Ok(StartupDisposition::Success) => { @@ -445,7 +425,7 @@ impl Network { } pub fn is_started(&self) -> bool { - self.unlocked_inner.startup_lock.is_started() + self.startup_lock.is_started() } #[instrument(level = "debug", skip_all)] @@ -456,7 +436,7 @@ impl Network { #[instrument(level = "debug", skip_all)] pub async fn shutdown(&self) { log_net!(debug "starting low level network shutdown"); - let Ok(guard) = self.unlocked_inner.startup_lock.shutdown().await else { + let Ok(guard) = self.startup_lock.shutdown().await else { log_net!(debug "low level network is already shut down"); return; }; @@ -493,14 +473,14 @@ impl Network { &self, _punishment: Option>, ) { - let Ok(_guard) = self.unlocked_inner.startup_lock.enter() else { + let Ok(_guard) = self.startup_lock.enter() else { log_net!(debug "ignoring due to not started up"); return; }; } pub fn needs_public_dial_info_check(&self) -> bool { - let Ok(_guard) = self.unlocked_inner.startup_lock.enter() else { + let Ok(_guard) = self.startup_lock.enter() else { log_net!(debug "ignoring due to not started up"); return false; }; @@ -511,11 +491,12 @@ impl Network { ////////////////////////////////////////// #[instrument(level = "trace", target = "net", name = "Network::tick", skip_all, err)] pub async fn tick(&self) -> EyreResult<()> { - let Ok(_guard) = self.unlocked_inner.startup_lock.enter() else { + let Ok(_guard) = self.startup_lock.enter() else { log_net!(debug "ignoring due to not started up"); return Ok(()); }; Ok(()) } + pub async fn cancel_tasks(&self) {} } diff --git a/veilid-core/src/network_manager/wasm/protocol/mod.rs b/veilid-core/src/network_manager/wasm/protocol/mod.rs index ca423acd..c2b1ba24 100644 --- a/veilid-core/src/network_manager/wasm/protocol/mod.rs +++ b/veilid-core/src/network_manager/wasm/protocol/mod.rs @@ -16,7 +16,7 @@ impl ProtocolNetworkConnection { _local_address: Option, dial_info: &DialInfo, timeout_ms: u32, - address_filter: AddressFilter, + address_filter: &AddressFilter, ) -> io::Result> { if address_filter.is_ip_addr_punished(dial_info.address().ip_addr()) { return Ok(NetworkResult::no_connection_other("punished")); diff --git a/veilid-core/src/network_manager/wasm/protocol/ws.rs b/veilid-core/src/network_manager/wasm/protocol/ws.rs index 88a5ee8e..6d68acde 100644 --- a/veilid-core/src/network_manager/wasm/protocol/ws.rs +++ b/veilid-core/src/network_manager/wasm/protocol/ws.rs @@ -9,29 +9,6 @@ struct WebsocketNetworkConnectionInner { ws_stream: CloneStream, } -fn to_io(err: WsErr) -> io::Error { - match err { - WsErr::InvalidWsState { supplied: _ } => { - io::Error::new(io::ErrorKind::InvalidInput, err.to_string()) - } - WsErr::ConnectionNotOpen => io::Error::new(io::ErrorKind::NotConnected, err.to_string()), - WsErr::InvalidUrl { supplied: _ } => { - io::Error::new(io::ErrorKind::InvalidInput, err.to_string()) - } - WsErr::InvalidCloseCode { supplied: _ } => { - io::Error::new(io::ErrorKind::InvalidInput, err.to_string()) - } - WsErr::ReasonStringToLong => io::Error::new(io::ErrorKind::InvalidInput, err.to_string()), - WsErr::ConnectionFailed { event: _ } => { - io::Error::new(io::ErrorKind::ConnectionRefused, err.to_string()) - } - WsErr::InvalidEncoding => io::Error::new(io::ErrorKind::InvalidInput, err.to_string()), - WsErr::CantDecodeBlob => io::Error::new(io::ErrorKind::InvalidInput, err.to_string()), - WsErr::UnknownDataType => io::Error::new(io::ErrorKind::InvalidInput, err.to_string()), - _ => io::Error::new(io::ErrorKind::Other, err.to_string()), - } -} - #[derive(Clone)] pub struct WebsocketNetworkConnection { flow: Flow, @@ -65,7 +42,7 @@ impl WebsocketNetworkConnection { )] pub async fn close(&self) -> io::Result> { #[allow(unused_variables)] - let x = self.inner.ws_meta.close().await.map_err(to_io); + let x = self.inner.ws_meta.close().await.map_err(ws_err_to_io_error); #[cfg(feature = "verbose-tracing")] log_net!(debug "close result: {:?}", x); Ok(NetworkResult::value(())) @@ -83,7 +60,7 @@ impl WebsocketNetworkConnection { .send(WsMessage::Binary(message)), ) .await - .map_err(to_io) + .map_err(ws_err_to_io_error) .into_network_result()?; #[cfg(feature = "verbose-tracing")] @@ -140,7 +117,9 @@ impl WebsocketProtocolHandler { } let fut = SendWrapper::new(timeout(timeout_ms, async move { - WsMeta::connect(request, None).await.map_err(to_io) + WsMeta::connect(request, None) + .await + .map_err(ws_err_to_io_error) })); let (wsmeta, wsio) = network_result_try!(network_result_try!(fut diff --git a/veilid-core/src/routing_table/bucket_entry.rs b/veilid-core/src/routing_table/bucket_entry.rs index c9a3628b..c2d62495 100644 --- a/veilid-core/src/routing_table/bucket_entry.rs +++ b/veilid-core/src/routing_table/bucket_entry.rs @@ -640,7 +640,7 @@ impl BucketEntryInner { only_live: bool, filter: NodeRefFilter, ) -> Vec<(Flow, Timestamp)> { - let opt_connection_manager = rti.unlocked_inner.network_manager.opt_connection_manager(); + let opt_connection_manager = rti.network_manager().opt_connection_manager(); let mut out: Vec<(Flow, Timestamp)> = self .last_flows diff --git a/veilid-core/src/routing_table/debug.rs b/veilid-core/src/routing_table/debug.rs index bbc3758b..9bb02b95 100644 --- a/veilid-core/src/routing_table/debug.rs +++ b/veilid-core/src/routing_table/debug.rs @@ -35,7 +35,6 @@ impl RoutingTable { let valid_envelope_versions = VALID_ENVELOPE_VERSIONS.map(|x| x.to_string()).join(","); let node_ids = self - .unlocked_inner .node_ids() .iter() .map(|x| x.to_string()) @@ -57,7 +56,7 @@ impl RoutingTable { pub fn debug_info_nodeid(&self) -> String { let mut out = String::new(); - for nid in self.unlocked_inner.node_ids().iter() { + for nid in self.node_ids().iter() { out += &format!("{}\n", nid); } out @@ -66,7 +65,7 @@ impl RoutingTable { pub fn debug_info_nodeinfo(&self) -> String { let mut out = String::new(); let inner = self.inner.read(); - out += &format!("Node Ids: {}\n", self.unlocked_inner.node_ids()); + out += &format!("Node Ids: {}\n", self.node_ids()); out += &format!( "Self Transfer Stats:\n{}", indent_all_string(&inner.self_transfer_stats) @@ -250,7 +249,7 @@ impl RoutingTable { out += &format!("{:?}: {}: {}\n", routing_domain, crypto_kind, count); } for ck in &VALID_CRYPTO_KINDS { - let our_node_id = self.unlocked_inner.node_id(*ck); + let our_node_id = self.node_id(*ck); let mut filtered_total = 0; let mut b = 0; @@ -319,7 +318,7 @@ impl RoutingTable { ) -> String { let cur_ts = Timestamp::now(); let relay_node_filter = self.make_public_internet_relay_node_filter(); - let our_node_ids = self.unlocked_inner.node_ids(); + let our_node_ids = self.node_ids(); let mut relay_count = 0usize; let mut relaying_count = 0usize; @@ -340,7 +339,7 @@ impl RoutingTable { node_count, filters, |_rti, entry: Option>| { - NodeRef::new(self.clone(), entry.unwrap().clone()) + NodeRef::new(self.registry(), entry.unwrap().clone()) }, ); let mut out = String::new(); @@ -376,9 +375,10 @@ impl RoutingTable { relaying_count += 1; } + let best_node_id = node.best_node_id(); + out += " "; - out += &node - .operate(|_rti, e| Self::format_entry(cur_ts, node.best_node_id(), e, &relay_tag)); + out += &node.operate(|_rti, e| Self::format_entry(cur_ts, best_node_id, e, &relay_tag)); out += "\n"; } diff --git a/veilid-core/src/routing_table/find_peers.rs b/veilid-core/src/routing_table/find_peers.rs index d09799ea..71b3b073 100644 --- a/veilid-core/src/routing_table/find_peers.rs +++ b/veilid-core/src/routing_table/find_peers.rs @@ -42,10 +42,9 @@ impl RoutingTable { ) as RoutingTableEntryFilter; let filters = VecDeque::from([filter]); - let node_count = { - let c = self.config.get(); - c.network.dht.max_find_node_count as usize - }; + let node_count = self + .config() + .with(|c| c.network.dht.max_find_node_count as usize); let closest_nodes = match self.find_preferred_closest_nodes( node_count, @@ -82,11 +81,13 @@ impl RoutingTable { // find N nodes closest to the target node in our routing table // ensure the nodes returned are only the ones closer to the target node than ourself - let Some(vcrypto) = self.crypto().get(crypto_kind) else { + let crypto = self.crypto(); + let Some(vcrypto) = crypto.get(crypto_kind) else { return NetworkResult::invalid_message("unsupported cryptosystem"); }; + let vcrypto = &vcrypto; + let own_distance = vcrypto.distance(&own_node_id.value, &key.value); - let vcrypto2 = vcrypto.clone(); let filter = Box::new( move |rti: &RoutingTableInner, opt_entry: Option>| { @@ -121,10 +122,9 @@ impl RoutingTable { ) as RoutingTableEntryFilter; let filters = VecDeque::from([filter]); - let node_count = { - let c = self.config.get(); - c.network.dht.max_find_node_count as usize - }; + let node_count = self + .config() + .with(|c| c.network.dht.max_find_node_count as usize); // let closest_nodes = match self.find_preferred_closest_nodes( @@ -147,7 +147,7 @@ impl RoutingTable { // Validate peers returned are, in fact, closer to the key than the node we sent this to // This same test is used on the other side so we vet things here - let valid = match Self::verify_peers_closer(vcrypto2, own_node_id, key, &closest_nodes) { + let valid = match Self::verify_peers_closer(vcrypto, own_node_id, key, &closest_nodes) { Ok(v) => v, Err(e) => { panic!("missing cryptosystem in peers node ids: {}", e); @@ -166,7 +166,7 @@ impl RoutingTable { /// Determine if set of peers is closer to key_near than key_far is to key_near #[instrument(level = "trace", target = "rtab", skip_all, err)] pub fn verify_peers_closer( - vcrypto: CryptoSystemVersion, + vcrypto: &crypto::CryptoSystemGuard<'_>, key_far: TypedKey, key_near: TypedKey, peers: &[Arc], diff --git a/veilid-core/src/routing_table/mod.rs b/veilid-core/src/routing_table/mod.rs index e032fbd1..758a7403 100644 --- a/veilid-core/src/routing_table/mod.rs +++ b/veilid-core/src/routing_table/mod.rs @@ -91,16 +91,12 @@ pub struct RecentPeersEntry { pub last_connection: Flow, } -pub(crate) struct RoutingTableUnlockedInner { - // Accessors - event_bus: EventBus, - config: VeilidConfig, - network_manager: NetworkManager, +pub(crate) struct RoutingTable { + registry: VeilidComponentRegistry, + inner: RwLock, - /// The current node's public DHT keys - node_id: TypedKeyGroup, - /// The current node's public DHT secrets - node_id_secret: TypedSecretGroup, + /// Route spec store + route_spec_store: RouteSpecStore, /// Buckets to kick on our next kick task kick_queue: Mutex>, /// Background process for computing statistics @@ -131,103 +127,27 @@ pub(crate) struct RoutingTableUnlockedInner { private_route_management_task: TickTask, } -impl RoutingTableUnlockedInner { - pub fn network_manager(&self) -> NetworkManager { - self.network_manager.clone() - } - pub fn crypto(&self) -> Crypto { - self.network_manager().crypto() - } - pub fn rpc_processor(&self) -> RPCProcessor { - self.network_manager().rpc_processor() - } - pub fn update_callback(&self) -> UpdateCallback { - self.network_manager().update_callback() - } - pub fn with_config(&self, f: F) -> R - where - F: FnOnce(&VeilidConfigInner) -> R, - { - f(&self.config.get()) - } - - pub fn node_id(&self, kind: CryptoKind) -> TypedKey { - self.node_id.get(kind).unwrap() - } - - pub fn node_id_secret_key(&self, kind: CryptoKind) -> SecretKey { - self.node_id_secret.get(kind).unwrap().value - } - - pub fn node_ids(&self) -> TypedKeyGroup { - self.node_id.clone() - } - - pub fn node_id_typed_key_pairs(&self) -> Vec { - let mut tkps = Vec::new(); - for ck in VALID_CRYPTO_KINDS { - tkps.push(TypedKeyPair::new( - ck, - KeyPair::new(self.node_id(ck).value, self.node_id_secret_key(ck)), - )); - } - tkps - } - - pub fn matches_own_node_id(&self, node_ids: &[TypedKey]) -> bool { - for ni in node_ids { - if let Some(v) = self.node_id.get(ni.kind) { - if v.value == ni.value { - return true; - } - } - } - false - } - - pub fn matches_own_node_id_key(&self, node_id_key: &PublicKey) -> bool { - for tk in self.node_id.iter() { - if tk.value == *node_id_key { - return true; - } - } - false - } - - pub fn calculate_bucket_index(&self, node_id: &TypedKey) -> BucketIndex { - let crypto = self.crypto(); - let self_node_id_key = self.node_id(node_id.kind).value; - let vcrypto = crypto.get(node_id.kind).unwrap(); - ( - node_id.kind, - vcrypto - .distance(&node_id.value, &self_node_id_key) - .first_nonzero_bit() - .unwrap(), - ) +impl fmt::Debug for RoutingTable { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RoutingTable") + // .field("inner", &self.inner) + // .field("unlocked_inner", &self.unlocked_inner) + .finish() } } -#[derive(Clone)] -pub(crate) struct RoutingTable { - inner: Arc>, - unlocked_inner: Arc, -} +impl_veilid_component!(RoutingTable); impl RoutingTable { - fn new_unlocked_inner( - event_bus: EventBus, - config: VeilidConfig, - network_manager: NetworkManager, - ) -> RoutingTableUnlockedInner { + pub fn new(registry: VeilidComponentRegistry) -> Self { + let config = registry.config(); let c = config.get(); - - RoutingTableUnlockedInner { - event_bus, - config: config.clone(), - network_manager, - node_id: c.network.routing_table.node_id.clone(), - node_id_secret: c.network.routing_table.node_id_secret.clone(), + let inner = RwLock::new(RoutingTableInner::new(registry.clone())); + let route_spec_store = RouteSpecStore::new(registry.clone()); + let this = Self { + registry, + inner, + route_spec_store, kick_queue: Mutex::new(BTreeSet::default()), rolling_transfers_task: TickTask::new( "rolling_transfers_task", @@ -269,16 +189,6 @@ impl RoutingTable { "private_route_management_task", PRIVATE_ROUTE_MANAGEMENT_INTERVAL_SECS, ), - } - } - pub fn new(network_manager: NetworkManager) -> Self { - let event_bus = network_manager.event_bus(); - let config = network_manager.config(); - let unlocked_inner = Arc::new(Self::new_unlocked_inner(event_bus, config, network_manager)); - let inner = Arc::new(RwLock::new(RoutingTableInner::new(unlocked_inner.clone()))); - let this = Self { - inner, - unlocked_inner, }; this.setup_tasks(); @@ -290,7 +200,7 @@ impl RoutingTable { /// Initialization /// Called to initialize the routing table after it is created - pub async fn init(&self) -> EyreResult<()> { + async fn init_async(&self) -> EyreResult<()> { log_rtab!(debug "starting routing table init"); // Set up routing buckets @@ -309,42 +219,35 @@ impl RoutingTable { // Set up routespecstore log_rtab!(debug "starting route spec store init"); - let route_spec_store = match RouteSpecStore::load(self.clone()).await { - Ok(v) => v, - Err(e) => { - log_rtab!(debug "Error loading route spec store: {:#?}. Resetting.", e); - RouteSpecStore::new(self.clone()) - } + if let Err(e) = self.route_spec_store().load().await { + log_rtab!(debug "Error loading route spec store: {:#?}. Resetting.", e); + self.route_spec_store().reset(); }; log_rtab!(debug "finished route spec store init"); - { - let mut inner = self.inner.write(); - inner.route_spec_store = Some(route_spec_store); - } - - // Inform storage manager we are up - self.network_manager - .storage_manager() - .set_routing_table(Some(self.clone())) - .await; - log_rtab!(debug "finished routing table init"); Ok(()) } - /// Called to shut down the routing table - pub async fn terminate(&self) { - log_rtab!(debug "starting routing table terminate"); + async fn post_init_async(&self) -> EyreResult<()> { + Ok(()) + } - // Stop storage manager from using us - self.network_manager - .storage_manager() - .set_routing_table(None) - .await; + pub(crate) async fn startup(&self) -> EyreResult<()> { + Ok(()) + } + pub(crate) async fn shutdown(&self) { // Stop tasks + log_net!(debug "stopping routing table tasks"); self.cancel_tasks().await; + } + + async fn pre_terminate_async(&self) {} + + /// Called to shut down the routing table + async fn terminate_async(&self) { + log_rtab!(debug "starting routing table terminate"); // Load bucket entries from table db if possible log_rtab!(debug "saving routing table entries"); @@ -365,11 +268,73 @@ impl RoutingTable { log_rtab!(debug "shutting down routing table"); let mut inner = self.inner.write(); - *inner = RoutingTableInner::new(self.unlocked_inner.clone()); + *inner = RoutingTableInner::new(self.registry()); log_rtab!(debug "finished routing table terminate"); } + /////////////////////////////////////////////////////////////////// + + pub fn node_id(&self, kind: CryptoKind) -> TypedKey { + self.config() + .with(|c| c.network.routing_table.node_id.get(kind).unwrap()) + } + + pub fn node_id_secret_key(&self, kind: CryptoKind) -> SecretKey { + self.config() + .with(|c| c.network.routing_table.node_id_secret.get(kind).unwrap()) + .value + } + + pub fn node_ids(&self) -> TypedKeyGroup { + self.config() + .with(|c| c.network.routing_table.node_id.clone()) + } + + pub fn node_id_typed_key_pairs(&self) -> Vec { + let mut tkps = Vec::new(); + for ck in VALID_CRYPTO_KINDS { + tkps.push(TypedKeyPair::new( + ck, + KeyPair::new(self.node_id(ck).value, self.node_id_secret_key(ck)), + )); + } + tkps + } + + pub fn matches_own_node_id(&self, node_ids: &[TypedKey]) -> bool { + for ni in node_ids { + if let Some(v) = self.node_ids().get(ni.kind) { + if v.value == ni.value { + return true; + } + } + } + false + } + + pub fn matches_own_node_id_key(&self, node_id_key: &PublicKey) -> bool { + for tk in self.node_ids().iter() { + if tk.value == *node_id_key { + return true; + } + } + false + } + + pub fn calculate_bucket_index(&self, node_id: &TypedKey) -> BucketIndex { + let crypto = self.crypto(); + let self_node_id_key = self.node_id(node_id.kind).value; + let vcrypto = crypto.get(node_id.kind).unwrap(); + ( + node_id.kind, + vcrypto + .distance(&node_id.value, &self_node_id_key) + .first_nonzero_bit() + .unwrap(), + ) + } + /// Serialize the routing table. fn serialized_buckets(&self) -> (SerializedBucketMap, SerializedBuckets) { // Since entries are shared by multiple buckets per cryptokind @@ -406,7 +371,7 @@ impl RoutingTable { async fn save_buckets(&self) -> EyreResult<()> { let (serialized_bucket_map, all_entry_bytes) = self.serialized_buckets(); - let table_store = self.unlocked_inner.network_manager().table_store(); + let table_store = self.table_store(); let tdb = table_store.open(ROUTING_TABLE, 1).await?; let dbx = tdb.transact(); if let Err(e) = dbx.store_json(0, SERIALIZED_BUCKET_MAP, &serialized_bucket_map) { @@ -420,12 +385,14 @@ impl RoutingTable { dbx.commit().await?; Ok(()) } + /// Deserialize routing table from table store async fn load_buckets(&self) -> EyreResult<()> { // Make a cache validity key of all our node ids and our bootstrap choice let mut cache_validity_key: Vec = Vec::new(); { - let c = self.unlocked_inner.config.get(); + let config = self.config(); + let c = config.get(); for ck in VALID_CRYPTO_KINDS { if let Some(nid) = c.network.routing_table.node_id.get(ck) { cache_validity_key.append(&mut nid.value.bytes.to_vec()); @@ -446,7 +413,7 @@ impl RoutingTable { }; // Deserialize bucket map and all entries from the table store - let table_store = self.unlocked_inner.network_manager().table_store(); + let table_store = self.table_store(); let db = table_store.open(ROUTING_TABLE, 1).await?; let caches_valid = match db.load(0, CACHE_VALIDITY_KEY).await? { @@ -479,14 +446,13 @@ impl RoutingTable { // Reconstruct all entries let inner = &mut *self.inner.write(); - self.populate_routing_table(inner, serialized_bucket_map, all_entry_bytes)?; + Self::populate_routing_table_inner(inner, serialized_bucket_map, all_entry_bytes)?; Ok(()) } /// Write the deserialized table store data to the routing table. - pub fn populate_routing_table( - &self, + pub fn populate_routing_table_inner( inner: &mut RoutingTableInner, serialized_bucket_map: SerializedBucketMap, all_entry_bytes: SerializedBuckets, @@ -542,8 +508,8 @@ impl RoutingTable { self.inner.read().routing_domain_for_address(address) } - pub fn route_spec_store(&self) -> RouteSpecStore { - self.inner.read().route_spec_store.as_ref().unwrap().clone() + pub fn route_spec_store(&self) -> &RouteSpecStore { + &self.route_spec_store } pub fn relay_node(&self, domain: RoutingDomain) -> Option { @@ -600,12 +566,12 @@ impl RoutingTable { /// Edit the PublicInternet RoutingDomain pub fn edit_public_internet_routing_domain(&self) -> RoutingDomainEditorPublicInternet { - RoutingDomainEditorPublicInternet::new(self.clone()) + RoutingDomainEditorPublicInternet::new(self) } /// Edit the LocalNetwork RoutingDomain pub fn edit_local_network_routing_domain(&self) -> RoutingDomainEditorLocalNetwork { - RoutingDomainEditorLocalNetwork::new(self.clone()) + RoutingDomainEditorLocalNetwork::new(self) } /// Return a copy of our node's peerinfo (may not yet be published) @@ -619,7 +585,7 @@ impl RoutingTable { } /// Return the domain's currently registered network class - #[cfg_attr(target_arch = "wasm32", expect(dead_code))] + #[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), expect(dead_code))] pub fn get_network_class(&self, routing_domain: RoutingDomain) -> Option { self.inner.read().get_network_class(routing_domain) } @@ -656,7 +622,7 @@ impl RoutingTable { ) -> Vec { self.inner .read() - .get_nodes_needing_ping(self.clone(), routing_domain, cur_ts) + .get_nodes_needing_ping(routing_domain, cur_ts) } fn queue_bucket_kicks(&self, node_ids: TypedKeyGroup) { @@ -667,21 +633,19 @@ impl RoutingTable { } // Put it in the kick queue - let x = self.unlocked_inner.calculate_bucket_index(node_id); - self.unlocked_inner.kick_queue.lock().insert(x); + let x = self.calculate_bucket_index(node_id); + self.kick_queue.lock().insert(x); } } /// Resolve an existing routing table entry using any crypto kind and return a reference to it pub fn lookup_any_node_ref(&self, node_id_key: PublicKey) -> EyreResult> { - self.inner - .read() - .lookup_any_node_ref(self.clone(), node_id_key) + self.inner.read().lookup_any_node_ref(node_id_key) } /// Resolve an existing routing table entry and return a reference to it pub fn lookup_node_ref(&self, node_id: TypedKey) -> EyreResult> { - self.inner.read().lookup_node_ref(self.clone(), node_id) + self.inner.read().lookup_node_ref(node_id) } /// Resolve an existing routing table entry and return a filtered reference to it @@ -692,12 +656,9 @@ impl RoutingTable { routing_domain_set: RoutingDomainSet, dial_info_filter: DialInfoFilter, ) -> EyreResult> { - self.inner.read().lookup_and_filter_noderef( - self.clone(), - node_id, - routing_domain_set, - dial_info_filter, - ) + self.inner + .read() + .lookup_and_filter_noderef(node_id, routing_domain_set, dial_info_filter) } /// Shortcut function to add a node to our routing table if it doesn't exist @@ -711,7 +672,7 @@ impl RoutingTable { ) -> EyreResult { self.inner .write() - .register_node_with_peer_info(self.clone(), peer_info, allow_invalid) + .register_node_with_peer_info(peer_info, allow_invalid) } /// Shortcut function to add a node to our routing table if it doesn't exist @@ -726,7 +687,7 @@ impl RoutingTable { ) -> EyreResult { self.inner .write() - .register_node_with_id(self.clone(), routing_domain, node_id, timestamp) + .register_node_with_id(routing_domain, node_id, timestamp) } ////////////////////////////////////////////////////////////////////// @@ -824,7 +785,7 @@ impl RoutingTable { } /// Makes a filter that finds nodes with a matching inbound dialinfo - #[cfg_attr(target_arch = "wasm32", expect(dead_code))] + #[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), expect(dead_code))] pub fn make_inbound_dial_info_entry_filter<'a>( routing_domain: RoutingDomain, dial_info_filter: DialInfoFilter, @@ -885,7 +846,7 @@ impl RoutingTable { filters: VecDeque, ) -> Vec { self.inner.read().find_fast_non_local_nodes_filtered( - self.clone(), + self.registry(), routing_domain, node_count, filters, @@ -971,7 +932,7 @@ impl RoutingTable { protocol_types_len * 2 * max_per_type, filters, |_rti, entry: Option>| { - NodeRef::new(self.clone(), entry.unwrap().clone()) + NodeRef::new(self.registry(), entry.unwrap().clone()) }, ) } @@ -1073,7 +1034,6 @@ impl RoutingTable { let res = network_result_try!( rpc_processor - .clone() .rpc_call_find_node( Destination::direct(node_ref.default_filtered()), node_id, @@ -1162,11 +1122,3 @@ impl RoutingTable { } } } - -impl core::ops::Deref for RoutingTable { - type Target = RoutingTableUnlockedInner; - - fn deref(&self) -> &Self::Target { - &self.unlocked_inner - } -} diff --git a/veilid-core/src/routing_table/node_ref/filtered_node_ref.rs b/veilid-core/src/routing_table/node_ref/filtered_node_ref.rs index 9cdc4e57..a6c21e07 100644 --- a/veilid-core/src/routing_table/node_ref/filtered_node_ref.rs +++ b/veilid-core/src/routing_table/node_ref/filtered_node_ref.rs @@ -1,7 +1,7 @@ use super::*; pub(crate) struct FilteredNodeRef { - routing_table: RoutingTable, + registry: VeilidComponentRegistry, entry: Arc, filter: NodeRefFilter, sequencing: Sequencing, @@ -9,9 +9,11 @@ pub(crate) struct FilteredNodeRef { track_id: usize, } +impl_veilid_component_registry_accessor!(FilteredNodeRef); + impl FilteredNodeRef { pub fn new( - routing_table: RoutingTable, + registry: VeilidComponentRegistry, entry: Arc, filter: NodeRefFilter, sequencing: Sequencing, @@ -19,7 +21,7 @@ impl FilteredNodeRef { entry.ref_count.fetch_add(1u32, Ordering::AcqRel); Self { - routing_table, + registry, entry, filter, sequencing, @@ -29,7 +31,7 @@ impl FilteredNodeRef { } pub fn unfiltered(&self) -> NodeRef { - NodeRef::new(self.routing_table.clone(), self.entry.clone()) + NodeRef::new(self.registry(), self.entry.clone()) } pub fn filtered_clone(&self, filter: NodeRefFilter) -> FilteredNodeRef { @@ -40,7 +42,7 @@ impl FilteredNodeRef { pub fn sequencing_clone(&self, sequencing: Sequencing) -> FilteredNodeRef { FilteredNodeRef::new( - self.routing_table.clone(), + self.registry.clone(), self.entry.clone(), self.filter(), sequencing, @@ -70,9 +72,6 @@ impl FilteredNodeRef { } impl NodeRefAccessorsTrait for FilteredNodeRef { - fn routing_table(&self) -> RoutingTable { - self.routing_table.clone() - } fn entry(&self) -> Arc { self.entry.clone() } @@ -105,7 +104,8 @@ impl NodeRefOperateTrait for FilteredNodeRef { where F: FnOnce(&RoutingTableInner, &BucketEntryInner) -> T, { - let inner = &*self.routing_table.inner.read(); + let routing_table = self.registry.routing_table(); + let inner = &*routing_table.inner.read(); self.entry.with(inner, f) } @@ -113,7 +113,8 @@ impl NodeRefOperateTrait for FilteredNodeRef { where F: FnOnce(&mut RoutingTableInner, &mut BucketEntryInner) -> T, { - let inner = &mut *self.routing_table.inner.write(); + let routing_table = self.registry.routing_table(); + let inner = &mut *routing_table.inner.write(); self.entry.with_mut(inner, f) } } @@ -125,7 +126,7 @@ impl Clone for FilteredNodeRef { self.entry.ref_count.fetch_add(1u32, Ordering::AcqRel); Self { - routing_table: self.routing_table.clone(), + registry: self.registry.clone(), entry: self.entry.clone(), filter: self.filter, sequencing: self.sequencing, @@ -162,7 +163,7 @@ impl Drop for FilteredNodeRef { // get node ids with inner unlocked because nothing could be referencing this entry now // and we don't know when it will get dropped, possibly inside a lock let node_ids = self.entry.with_inner(|e| e.node_ids()); - self.routing_table.queue_bucket_kicks(node_ids); + self.routing_table().queue_bucket_kicks(node_ids); } } } diff --git a/veilid-core/src/routing_table/node_ref/mod.rs b/veilid-core/src/routing_table/node_ref/mod.rs index 9be39762..b168e2e5 100644 --- a/veilid-core/src/routing_table/node_ref/mod.rs +++ b/veilid-core/src/routing_table/node_ref/mod.rs @@ -16,18 +16,20 @@ pub(crate) use traits::*; // Default NodeRef pub(crate) struct NodeRef { - routing_table: RoutingTable, + registry: VeilidComponentRegistry, entry: Arc, #[cfg(feature = "tracking")] track_id: usize, } +impl_veilid_component_registry_accessor!(NodeRef); + impl NodeRef { - pub fn new(routing_table: RoutingTable, entry: Arc) -> Self { + pub fn new(registry: VeilidComponentRegistry, entry: Arc) -> Self { entry.ref_count.fetch_add(1u32, Ordering::AcqRel); Self { - routing_table, + registry, entry, #[cfg(feature = "tracking")] track_id: entry.track(), @@ -36,7 +38,7 @@ impl NodeRef { pub fn default_filtered(&self) -> FilteredNodeRef { FilteredNodeRef::new( - self.routing_table.clone(), + self.registry.clone(), self.entry.clone(), NodeRefFilter::new(), Sequencing::default(), @@ -45,7 +47,7 @@ impl NodeRef { pub fn sequencing_filtered(&self, sequencing: Sequencing) -> FilteredNodeRef { FilteredNodeRef::new( - self.routing_table.clone(), + self.registry.clone(), self.entry.clone(), NodeRefFilter::new(), sequencing, @@ -57,7 +59,7 @@ impl NodeRef { routing_domain_set: R, ) -> FilteredNodeRef { FilteredNodeRef::new( - self.routing_table.clone(), + self.registry.clone(), self.entry.clone(), NodeRefFilter::new().with_routing_domain_set(routing_domain_set.into()), Sequencing::default(), @@ -66,7 +68,7 @@ impl NodeRef { pub fn custom_filtered(&self, filter: NodeRefFilter) -> FilteredNodeRef { FilteredNodeRef::new( - self.routing_table.clone(), + self.registry.clone(), self.entry.clone(), filter, Sequencing::default(), @@ -76,7 +78,7 @@ impl NodeRef { #[expect(dead_code)] pub fn dial_info_filtered(&self, filter: DialInfoFilter) -> FilteredNodeRef { FilteredNodeRef::new( - self.routing_table.clone(), + self.registry.clone(), self.entry.clone(), NodeRefFilter::new().with_dial_info_filter(filter), Sequencing::default(), @@ -92,9 +94,6 @@ impl NodeRef { } impl NodeRefAccessorsTrait for NodeRef { - fn routing_table(&self) -> RoutingTable { - self.routing_table.clone() - } fn entry(&self) -> Arc { self.entry.clone() } @@ -125,7 +124,8 @@ impl NodeRefOperateTrait for NodeRef { where F: FnOnce(&RoutingTableInner, &BucketEntryInner) -> T, { - let inner = &*self.routing_table.inner.read(); + let routing_table = self.routing_table(); + let inner = &*routing_table.inner.read(); self.entry.with(inner, f) } @@ -133,7 +133,8 @@ impl NodeRefOperateTrait for NodeRef { where F: FnOnce(&mut RoutingTableInner, &mut BucketEntryInner) -> T, { - let inner = &mut *self.routing_table.inner.write(); + let routing_table = self.routing_table(); + let inner = &mut *routing_table.inner.write(); self.entry.with_mut(inner, f) } } @@ -145,7 +146,7 @@ impl Clone for NodeRef { self.entry.ref_count.fetch_add(1u32, Ordering::AcqRel); Self { - routing_table: self.routing_table.clone(), + registry: self.registry.clone(), entry: self.entry.clone(), #[cfg(feature = "tracking")] track_id: self.entry.write().track(), @@ -178,7 +179,7 @@ impl Drop for NodeRef { // get node ids with inner unlocked because nothing could be referencing this entry now // and we don't know when it will get dropped, possibly inside a lock let node_ids = self.entry.with_inner(|e| e.node_ids()); - self.routing_table.queue_bucket_kicks(node_ids); + self.routing_table().queue_bucket_kicks(node_ids); } } } diff --git a/veilid-core/src/routing_table/node_ref/node_ref_lock.rs b/veilid-core/src/routing_table/node_ref/node_ref_lock.rs index c38598b7..b8e33f38 100644 --- a/veilid-core/src/routing_table/node_ref/node_ref_lock.rs +++ b/veilid-core/src/routing_table/node_ref/node_ref_lock.rs @@ -15,6 +15,21 @@ pub(crate) struct NodeRefLock< nr: N, } +impl< + 'a, + N: NodeRefAccessorsTrait + + NodeRefOperateTrait + + VeilidComponentRegistryAccessor + + fmt::Debug + + fmt::Display + + Clone, + > VeilidComponentRegistryAccessor for NodeRefLock<'a, N> +{ + fn registry(&self) -> VeilidComponentRegistry { + self.nr.registry() + } +} + impl<'a, N: NodeRefAccessorsTrait + NodeRefOperateTrait + fmt::Debug + fmt::Display + Clone> NodeRefLock<'a, N> { @@ -33,9 +48,6 @@ impl<'a, N: NodeRefAccessorsTrait + NodeRefOperateTrait + fmt::Debug + fmt::Disp impl<'a, N: NodeRefAccessorsTrait + NodeRefOperateTrait + fmt::Debug + fmt::Display + Clone> NodeRefAccessorsTrait for NodeRefLock<'a, N> { - fn routing_table(&self) -> RoutingTable { - self.nr.routing_table() - } fn entry(&self) -> Arc { self.nr.entry() } diff --git a/veilid-core/src/routing_table/node_ref/node_ref_lock_mut.rs b/veilid-core/src/routing_table/node_ref/node_ref_lock_mut.rs index 9b725ab6..c132e85c 100644 --- a/veilid-core/src/routing_table/node_ref/node_ref_lock_mut.rs +++ b/veilid-core/src/routing_table/node_ref/node_ref_lock_mut.rs @@ -15,6 +15,21 @@ pub(crate) struct NodeRefLockMut< nr: N, } +impl< + 'a, + N: NodeRefAccessorsTrait + + NodeRefOperateTrait + + VeilidComponentRegistryAccessor + + fmt::Debug + + fmt::Display + + Clone, + > VeilidComponentRegistryAccessor for NodeRefLockMut<'a, N> +{ + fn registry(&self) -> VeilidComponentRegistry { + self.nr.registry() + } +} + impl<'a, N: NodeRefAccessorsTrait + NodeRefOperateTrait + fmt::Debug + fmt::Display + Clone> NodeRefLockMut<'a, N> { @@ -34,9 +49,6 @@ impl<'a, N: NodeRefAccessorsTrait + NodeRefOperateTrait + fmt::Debug + fmt::Disp impl<'a, N: NodeRefAccessorsTrait + NodeRefOperateTrait + fmt::Debug + fmt::Display + Clone> NodeRefAccessorsTrait for NodeRefLockMut<'a, N> { - fn routing_table(&self) -> RoutingTable { - self.nr.routing_table() - } fn entry(&self) -> Arc { self.nr.entry() } diff --git a/veilid-core/src/routing_table/node_ref/traits.rs b/veilid-core/src/routing_table/node_ref/traits.rs index 4ddecf23..a931d134 100644 --- a/veilid-core/src/routing_table/node_ref/traits.rs +++ b/veilid-core/src/routing_table/node_ref/traits.rs @@ -2,7 +2,6 @@ use super::*; // Field accessors pub(crate) trait NodeRefAccessorsTrait { - fn routing_table(&self) -> RoutingTable; fn entry(&self) -> Arc; fn sequencing(&self) -> Sequencing; fn routing_domain_set(&self) -> RoutingDomainSet; @@ -125,12 +124,12 @@ pub(crate) trait NodeRefCommonTrait: NodeRefAccessorsTrait + NodeRefOperateTrait }; // If relay is ourselves, then return None, because we can't relay through ourselves // and to contact this node we should have had an existing inbound connection - if rti.unlocked_inner.matches_own_node_id(rpi.node_ids()) { + if rti.routing_table().matches_own_node_id(rpi.node_ids()) { bail!("Can't relay though ourselves"); } // Register relay node and return noderef - let nr = rti.register_node_with_peer_info(self.routing_table(), rpi, false)?; + let nr = rti.register_node_with_peer_info(rpi, false)?; Ok(Some(nr)) }) } @@ -253,7 +252,7 @@ pub(crate) trait NodeRefCommonTrait: NodeRefAccessorsTrait + NodeRefOperateTrait else { return false; }; - let our_node_ids = rti.unlocked_inner.node_ids(); + let our_node_ids = rti.routing_table().node_ids(); our_node_ids.contains_any(&relay_ids) }) } diff --git a/veilid-core/src/routing_table/privacy.rs b/veilid-core/src/routing_table/privacy.rs index b15f9b8f..feaf1edf 100644 --- a/veilid-core/src/routing_table/privacy.rs +++ b/veilid-core/src/routing_table/privacy.rs @@ -31,7 +31,7 @@ pub(crate) enum RouteNode { } impl RouteNode { - pub fn validate(&self, crypto: Crypto) -> VeilidAPIResult<()> { + pub fn validate(&self, crypto: &Crypto) -> VeilidAPIResult<()> { match self { RouteNode::NodeId(_) => Ok(()), RouteNode::PeerInfo(pi) => pi.validate(crypto), @@ -40,7 +40,7 @@ impl RouteNode { pub fn node_ref( &self, - routing_table: RoutingTable, + routing_table: &RoutingTable, crypto_kind: CryptoKind, ) -> Option { match self { @@ -91,7 +91,7 @@ pub(crate) struct RouteHop { pub next_hop: Option, } impl RouteHop { - pub fn validate(&self, crypto: Crypto) -> VeilidAPIResult<()> { + pub fn validate(&self, crypto: &Crypto) -> VeilidAPIResult<()> { self.node.validate(crypto) } } @@ -108,7 +108,7 @@ pub(crate) enum PrivateRouteHops { } impl PrivateRouteHops { - pub fn validate(&self, crypto: Crypto) -> VeilidAPIResult<()> { + pub fn validate(&self, crypto: &Crypto) -> VeilidAPIResult<()> { match self { PrivateRouteHops::FirstHop(rh) => rh.validate(crypto), PrivateRouteHops::Data(_) => Ok(()), @@ -138,7 +138,7 @@ impl PrivateRoute { } } - pub fn validate(&self, crypto: Crypto) -> VeilidAPIResult<()> { + pub fn validate(&self, crypto: &Crypto) -> VeilidAPIResult<()> { self.hops.validate(crypto) } diff --git a/veilid-core/src/routing_table/route_spec_store/mod.rs b/veilid-core/src/routing_table/route_spec_store/mod.rs index 529c2d10..568c1964 100644 --- a/veilid-core/src/routing_table/route_spec_store/mod.rs +++ b/veilid-core/src/routing_table/route_spec_store/mod.rs @@ -34,85 +34,71 @@ struct RouteSpecStoreInner { cache: RouteSpecStoreCache, } -struct RouteSpecStoreUnlockedInner { - /// Handle to routing table - routing_table: RoutingTable, +/// The routing table's storage for private/safety routes +#[derive(Debug)] +pub(crate) struct RouteSpecStore { + registry: VeilidComponentRegistry, + inner: Mutex, + /// Maximum number of hops in a route max_route_hop_count: usize, /// Default number of hops in a route default_route_hop_count: usize, } -impl fmt::Debug for RouteSpecStoreUnlockedInner { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("RouteSpecStoreUnlockedInner") - .field("max_route_hop_count", &self.max_route_hop_count) - .field("default_route_hop_count", &self.default_route_hop_count) - .finish() - } -} - -/// The routing table's storage for private/safety routes -#[derive(Clone, Debug)] -pub(crate) struct RouteSpecStore { - inner: Arc>, - unlocked_inner: Arc, -} +impl_veilid_component_registry_accessor!(RouteSpecStore); impl RouteSpecStore { - pub fn new(routing_table: RoutingTable) -> Self { - let config = routing_table.network_manager().config(); + pub fn new(registry: VeilidComponentRegistry) -> Self { + let config = registry.config(); let c = config.get(); Self { - unlocked_inner: Arc::new(RouteSpecStoreUnlockedInner { - max_route_hop_count: c.network.rpc.max_route_hop_count.into(), - default_route_hop_count: c.network.rpc.default_route_hop_count.into(), - routing_table, - }), - inner: Arc::new(Mutex::new(RouteSpecStoreInner { + registry, + inner: Mutex::new(RouteSpecStoreInner { content: RouteSpecStoreContent::new(), cache: Default::default(), - })), + }), + max_route_hop_count: c.network.rpc.max_route_hop_count.into(), + default_route_hop_count: c.network.rpc.default_route_hop_count.into(), } } - #[instrument(level = "trace", target = "route", skip(routing_table), err)] - pub async fn load(routing_table: RoutingTable) -> EyreResult { - let (max_route_hop_count, default_route_hop_count) = { - let config = routing_table.network_manager().config(); - let c = config.get(); - ( - c.network.rpc.max_route_hop_count as usize, - c.network.rpc.default_route_hop_count as usize, - ) - }; - - // Get frozen blob from table store - let content = RouteSpecStoreContent::load(routing_table.clone()).await?; - - let mut inner = RouteSpecStoreInner { - content, + #[instrument(level = "trace", target = "route", skip_all)] + pub fn reset(&self) { + *self.inner.lock() = RouteSpecStoreInner { + content: RouteSpecStoreContent::new(), cache: Default::default(), }; + } - // Rebuild the routespecstore cache - let rti = &*routing_table.inner.read(); - for (_, rssd) in inner.content.iter_details() { - inner.cache.add_to_cache(rti, rssd); - } + #[instrument(level = "trace", target = "route", skip_all, err)] + pub async fn load(&self) -> EyreResult<()> { + let inner = { + let table_store = self.table_store(); + let routing_table = self.routing_table(); - // Return the loaded RouteSpecStore - let rss = RouteSpecStore { - unlocked_inner: Arc::new(RouteSpecStoreUnlockedInner { - max_route_hop_count, - default_route_hop_count, - routing_table: routing_table.clone(), - }), - inner: Arc::new(Mutex::new(inner)), + // Get frozen blob from table store + let content = RouteSpecStoreContent::load(&table_store, &routing_table).await?; + + let mut inner = RouteSpecStoreInner { + content, + cache: Default::default(), + }; + + // Rebuild the routespecstore cache + let rti = &*routing_table.inner.read(); + for (_, rssd) in inner.content.iter_details() { + inner.cache.add_to_cache(rti, rssd); + } + + inner }; - Ok(rss) + // Return the loaded RouteSpecStore + *self.inner.lock() = inner; + + Ok(()) } #[instrument(level = "trace", target = "route", skip(self), err)] @@ -123,9 +109,8 @@ impl RouteSpecStore { }; // Save our content - content - .save(self.unlocked_inner.routing_table.clone()) - .await?; + let table_store = self.table_store(); + content.save(&table_store).await?; Ok(()) } @@ -146,16 +131,17 @@ impl RouteSpecStore { dead_remote_routes, })); - let update_callback = self.unlocked_inner.routing_table.update_callback(); + let update_callback = self.registry.update_callback(); update_callback(update); } /// Purge the route spec store pub async fn purge(&self) -> VeilidAPIResult<()> { // Briefly pause routing table ticker while changes are made - let _tick_guard = self.unlocked_inner.routing_table.pause_tasks().await; - self.unlocked_inner.routing_table.cancel_tasks().await; + let routing_table = self.routing_table(); + let _tick_guard = routing_table.pause_tasks().await; + routing_table.cancel_tasks().await; { let inner = &mut *self.inner.lock(); inner.content = Default::default(); @@ -181,7 +167,7 @@ impl RouteSpecStore { automatic: bool, ) -> VeilidAPIResult { let inner = &mut *self.inner.lock(); - let routing_table = self.unlocked_inner.routing_table.clone(); + let routing_table = self.routing_table(); let rti = &mut *routing_table.inner.write(); self.allocate_route_inner( @@ -213,12 +199,10 @@ impl RouteSpecStore { apibail_generic!("safety_spec.preferred_route must be empty when allocating new route"); } - let ip6_prefix_size = rti - .unlocked_inner - .config - .get() - .network - .max_connections_per_ip6_prefix_size as usize; + let ip6_prefix_size = self + .registry() + .config() + .with(|c| c.network.max_connections_per_ip6_prefix_size as usize); if safety_spec.hop_count < 1 { apibail_invalid_argument!( @@ -228,7 +212,7 @@ impl RouteSpecStore { ); } - if safety_spec.hop_count > self.unlocked_inner.max_route_hop_count { + if safety_spec.hop_count > self.max_route_hop_count { apibail_invalid_argument!( "Not allocating route longer than max route hop count", "hop_count", @@ -492,9 +476,8 @@ impl RouteSpecStore { }) }; - let routing_table = self.unlocked_inner.routing_table.clone(); let transform = |_rti: &RoutingTableInner, entry: Option>| -> NodeRef { - NodeRef::new(routing_table.clone(), entry.unwrap()) + NodeRef::new(self.registry(), entry.unwrap()) }; // Pull the whole routing table in sorted order @@ -667,13 +650,9 @@ impl RouteSpecStore { // Got a unique route, lets build the details, register it, and return it let hop_node_refs: Vec = route_nodes.iter().map(|k| nodes[*k].clone()).collect(); let mut route_set = BTreeMap::::new(); + let crypto = self.crypto(); for crypto_kind in crypto_kinds.iter().copied() { - let vcrypto = self - .unlocked_inner - .routing_table - .crypto() - .get(crypto_kind) - .unwrap(); + let vcrypto = crypto.get(crypto_kind).unwrap(); let keypair = vcrypto.generate_keypair(); let hops: Vec = route_nodes .iter() @@ -734,7 +713,7 @@ impl RouteSpecStore { R: fmt::Debug, { let inner = &*self.inner.lock(); - let crypto = self.unlocked_inner.routing_table.crypto(); + let crypto = self.crypto(); let Some(vcrypto) = crypto.get(public_key.kind) else { log_rpc!(debug "can't handle route with public key: {:?}", public_key); return None; @@ -852,7 +831,7 @@ impl RouteSpecStore { }; // Test with double-round trip ping to self - let rpc_processor = self.unlocked_inner.routing_table.rpc_processor(); + let rpc_processor = self.rpc_processor(); let _res = match rpc_processor.rpc_call_status(dest).await? { NetworkResult::Value(v) => v, _ => { @@ -886,7 +865,7 @@ impl RouteSpecStore { // Get a safety route that is good enough let safety_spec = SafetySpec { preferred_route: None, - hop_count: self.unlocked_inner.default_route_hop_count, + hop_count: self.default_route_hop_count, stability, sequencing, }; @@ -900,8 +879,7 @@ impl RouteSpecStore { }; // Test with double-round trip ping to self - let rpc_processor = self.unlocked_inner.routing_table.rpc_processor(); - let _res = match rpc_processor.rpc_call_status(dest).await? { + let _res = match self.rpc_processor().rpc_call_status(dest).await? { NetworkResult::Value(v) => v, _ => { // Did not error, but did not come back, just return false @@ -921,7 +899,8 @@ impl RouteSpecStore { }; // Remove from hop cache - let rti = &*self.unlocked_inner.routing_table.inner.read(); + let routing_table = self.routing_table(); + let rti = &*routing_table.inner.read(); if !inner.cache.remove_from_cache(rti, id, &rssd) { panic!("hop cache should have contained cache key"); } @@ -1097,7 +1076,7 @@ impl RouteSpecStore { ) -> VeilidAPIResult { // let profile_start_ts = get_timestamp(); let inner = &mut *self.inner.lock(); - let routing_table = self.unlocked_inner.routing_table.clone(); + let routing_table = self.routing_table(); let rti = &mut *routing_table.inner.write(); // Get useful private route properties @@ -1108,7 +1087,7 @@ impl RouteSpecStore { }; let pr_pubkey = private_route.public_key.value; let pr_hopcount = private_route.hop_count as usize; - let max_route_hop_count = self.unlocked_inner.max_route_hop_count; + let max_route_hop_count = self.max_route_hop_count; // Check private route hop count isn't larger than the max route hop count plus one for the 'first hop' header if pr_hopcount > (max_route_hop_count + 1) { @@ -1130,10 +1109,10 @@ impl RouteSpecStore { let opt_first_hop = match pr_first_hop_node { RouteNode::NodeId(id) => rti - .lookup_node_ref(routing_table.clone(), TypedKey::new(crypto_kind, id)) + .lookup_node_ref(TypedKey::new(crypto_kind, id)) .map_err(VeilidAPIError::internal)?, RouteNode::PeerInfo(pi) => Some( - rti.register_node_with_peer_info(routing_table.clone(), pi, false) + rti.register_node_with_peer_info(pi, false) .map_err(VeilidAPIError::internal)? .unfiltered(), ), @@ -1362,7 +1341,7 @@ impl RouteSpecStore { avoid_nodes: &[TypedKey], ) -> VeilidAPIResult { // Ensure the total hop count isn't too long for our config - let max_route_hop_count = self.unlocked_inner.max_route_hop_count; + let max_route_hop_count = self.max_route_hop_count; if safety_spec.hop_count == 0 { apibail_invalid_argument!( "safety route hop count is zero", @@ -1438,7 +1417,7 @@ impl RouteSpecStore { avoid_nodes: &[TypedKey], ) -> VeilidAPIResult { let inner = &mut *self.inner.lock(); - let routing_table = self.unlocked_inner.routing_table.clone(); + let routing_table = self.routing_table(); let rti = &mut *routing_table.inner.write(); self.get_route_for_safety_spec_inner( @@ -1457,7 +1436,7 @@ impl RouteSpecStore { rsd: &RouteSpecDetail, optimized: bool, ) -> VeilidAPIResult { - let routing_table = self.unlocked_inner.routing_table.clone(); + let routing_table = self.routing_table(); let rti = &*routing_table.inner.read(); // Ensure we get the crypto for it @@ -1732,8 +1711,7 @@ impl RouteSpecStore { cur_ts: Timestamp, ) -> VeilidAPIResult<()> { let Some(our_node_info_ts) = self - .unlocked_inner - .routing_table + .routing_table() .get_published_peer_info(RoutingDomain::PublicInternet) .map(|pi| pi.signed_node_info().timestamp()) else { @@ -1767,11 +1745,7 @@ impl RouteSpecStore { let inner = &mut *self.inner.lock(); // Check for stub route - if self - .unlocked_inner - .routing_table - .matches_own_node_id_key(key) - { + if self.routing_table().matches_own_node_id_key(key) { return None; } @@ -1869,7 +1843,7 @@ impl RouteSpecStore { /// Convert binary blob to private route vector pub fn blob_to_private_routes(&self, blob: Vec) -> VeilidAPIResult> { // Get crypto - let crypto = self.unlocked_inner.routing_table.crypto(); + let crypto = self.crypto(); // Deserialize count if blob.is_empty() { @@ -1904,7 +1878,7 @@ impl RouteSpecStore { let private_route = decode_private_route(&decode_context, &pr_reader).map_err(|e| { VeilidAPIError::invalid_argument("failed to decode private route", "e", e) })?; - private_route.validate(crypto.clone()).map_err(|e| { + private_route.validate(&crypto).map_err(|e| { VeilidAPIError::invalid_argument("failed to validate private route", "e", e) })?; @@ -1920,7 +1894,7 @@ impl RouteSpecStore { /// Generate RouteId from typed key set of route public keys fn generate_allocated_route_id(&self, rssd: &RouteSetSpecDetail) -> VeilidAPIResult { let route_set_keys = rssd.get_route_set_keys(); - let crypto = self.unlocked_inner.routing_table.crypto(); + let crypto = self.crypto(); let mut idbytes = Vec::with_capacity(PUBLIC_KEY_LENGTH * route_set_keys.len()); let mut best_kind: Option = None; @@ -1945,7 +1919,7 @@ impl RouteSpecStore { &self, private_routes: &[PrivateRoute], ) -> VeilidAPIResult { - let crypto = self.unlocked_inner.routing_table.crypto(); + let crypto = self.crypto(); let mut idbytes = Vec::with_capacity(PUBLIC_KEY_LENGTH * private_routes.len()); let mut best_kind: Option = None; diff --git a/veilid-core/src/routing_table/route_spec_store/route_spec_store_content.rs b/veilid-core/src/routing_table/route_spec_store/route_spec_store_content.rs index f46b54dd..0ec61aa2 100644 --- a/veilid-core/src/routing_table/route_spec_store/route_spec_store_content.rs +++ b/veilid-core/src/routing_table/route_spec_store/route_spec_store_content.rs @@ -17,9 +17,11 @@ impl RouteSpecStoreContent { } } - pub async fn load(routing_table: RoutingTable) -> EyreResult { + pub async fn load( + table_store: &TableStore, + routing_table: &RoutingTable, + ) -> EyreResult { // Deserialize what we can - let table_store = routing_table.network_manager().table_store(); let rsstdb = table_store.open("RouteSpecStore", 1).await?; let mut content: RouteSpecStoreContent = rsstdb.load_json(0, b"content").await?.unwrap_or_default(); @@ -59,10 +61,9 @@ impl RouteSpecStoreContent { Ok(content) } - pub async fn save(&self, routing_table: RoutingTable) -> EyreResult<()> { + pub async fn save(&self, table_store: &TableStore) -> EyreResult<()> { // Save all the fields we care about to the frozen blob in table storage // This skips #[with(Skip)] saving the secret keys, we save them in the protected store instead - let table_store = routing_table.network_manager().table_store(); let rsstdb = table_store.open("RouteSpecStore", 1).await?; rsstdb.store_json(0, b"content", self).await?; diff --git a/veilid-core/src/routing_table/routing_table_inner/mod.rs b/veilid-core/src/routing_table/routing_table_inner/mod.rs index 9e3dae90..300ab068 100644 --- a/veilid-core/src/routing_table/routing_table_inner/mod.rs +++ b/veilid-core/src/routing_table/routing_table_inner/mod.rs @@ -15,8 +15,8 @@ pub type EntryCounts = BTreeMap<(RoutingDomain, CryptoKind), usize>; /// RoutingTable rwlock-internal data pub struct RoutingTableInner { - /// Extra pointer to unlocked members to simplify access - pub(super) unlocked_inner: Arc, + /// Convenience accessor for the global component registry + pub(super) registry: VeilidComponentRegistry, /// Routing table buckets that hold references to entries, per crypto kind pub(super) buckets: BTreeMap>, /// A weak set of all the entries we have in the buckets for faster iteration @@ -44,10 +44,12 @@ pub struct RoutingTableInner { pub(super) opt_active_watch_keepalive_ts: Option, } +impl_veilid_component_registry_accessor!(RoutingTableInner); + impl RoutingTableInner { - pub(super) fn new(unlocked_inner: Arc) -> RoutingTableInner { + pub(super) fn new(registry: VeilidComponentRegistry) -> RoutingTableInner { RoutingTableInner { - unlocked_inner, + registry, buckets: BTreeMap::new(), public_internet_routing_domain: PublicInternetRoutingDomainDetail::default(), local_network_routing_domain: LocalNetworkRoutingDomainDetail::default(), @@ -458,7 +460,6 @@ impl RoutingTableInner { // Collect all entries that are 'needs_ping' and have some node info making them reachable somehow pub(super) fn get_nodes_needing_ping( &self, - outer_self: RoutingTable, routing_domain: RoutingDomain, cur_ts: Timestamp, ) -> Vec { @@ -559,7 +560,7 @@ impl RoutingTableInner { let transform = |_rti: &RoutingTableInner, v: Option>| { FilteredNodeRef::new( - outer_self.clone(), + self.registry.clone(), v.unwrap().clone(), NodeRefFilter::new().with_routing_domain(routing_domain), Sequencing::default(), @@ -570,10 +571,10 @@ impl RoutingTableInner { } #[expect(dead_code)] - pub fn get_all_alive_nodes(&self, outer_self: RoutingTable, cur_ts: Timestamp) -> Vec { + pub fn get_all_alive_nodes(&self, cur_ts: Timestamp) -> Vec { let mut node_refs = Vec::::with_capacity(self.bucket_entry_count()); self.with_entries(cur_ts, BucketEntryState::Unreliable, |_rti, entry| { - node_refs.push(NodeRef::new(outer_self.clone(), entry)); + node_refs.push(NodeRef::new(self.registry(), entry)); Option::<()>::None }); node_refs @@ -601,6 +602,8 @@ impl RoutingTableInner { entry: Arc, node_ids: &[TypedKey], ) -> EyreResult<()> { + let routing_table = self.routing_table(); + entry.with_mut_inner(|e| { let mut existing_node_ids = e.node_ids(); @@ -631,21 +634,21 @@ impl RoutingTableInner { if let Some(old_node_id) = e.add_node_id(*node_id)? { // Remove any old node id for this crypto kind if VALID_CRYPTO_KINDS.contains(&ck) { - let bucket_index = self.unlocked_inner.calculate_bucket_index(&old_node_id); + let bucket_index = routing_table.calculate_bucket_index(&old_node_id); let bucket = self.get_bucket_mut(bucket_index); bucket.remove_entry(&old_node_id.value); - self.unlocked_inner.kick_queue.lock().insert(bucket_index); + routing_table.kick_queue.lock().insert(bucket_index); } } // Bucket the entry appropriately if VALID_CRYPTO_KINDS.contains(&ck) { - let bucket_index = self.unlocked_inner.calculate_bucket_index(node_id); + let bucket_index = routing_table.calculate_bucket_index(node_id); let bucket = self.get_bucket_mut(bucket_index); bucket.add_existing_entry(node_id.value, entry.clone()); // Kick bucket - self.unlocked_inner.kick_queue.lock().insert(bucket_index); + routing_table.kick_queue.lock().insert(bucket_index); } } @@ -653,7 +656,7 @@ impl RoutingTableInner { for node_id in existing_node_ids.iter() { let ck = node_id.kind; if VALID_CRYPTO_KINDS.contains(&ck) { - let bucket_index = self.unlocked_inner.calculate_bucket_index(node_id); + let bucket_index = routing_table.calculate_bucket_index(node_id); let bucket = self.get_bucket_mut(bucket_index); bucket.remove_entry(&node_id.value); entry.with_mut_inner(|e| e.remove_node_id(ck)); @@ -687,15 +690,16 @@ impl RoutingTableInner { #[instrument(level = "trace", skip_all, err)] fn create_node_ref( &mut self, - outer_self: RoutingTable, node_ids: &TypedKeyGroup, update_func: F, ) -> EyreResult where F: FnOnce(&mut RoutingTableInner, &mut BucketEntryInner), { + let routing_table = self.routing_table(); + // Ensure someone isn't trying register this node itself - if self.unlocked_inner.matches_own_node_id(node_ids) { + if routing_table.matches_own_node_id(node_ids) { bail!("can't register own node"); } @@ -708,7 +712,7 @@ impl RoutingTableInner { continue; } // Find the first in crypto sort order - let bucket_index = self.unlocked_inner.calculate_bucket_index(node_id); + let bucket_index = routing_table.calculate_bucket_index(node_id); let bucket = self.get_bucket(bucket_index); if let Some(entry) = bucket.entry(&node_id.value) { // Best entry is the first one in sorted order that exists from the node id list @@ -730,7 +734,7 @@ impl RoutingTableInner { } // Make a noderef to return - let nr = NodeRef::new(outer_self.clone(), best_entry.clone()); + let nr = NodeRef::new(self.registry(), best_entry.clone()); // Update the entry with the update func best_entry.with_mut_inner(|e| update_func(self, e)); @@ -741,11 +745,11 @@ impl RoutingTableInner { // If no entry exists yet, add the first entry to a bucket, possibly evicting a bucket member let first_node_id = node_ids[0]; - let bucket_entry = self.unlocked_inner.calculate_bucket_index(&first_node_id); + let bucket_entry = routing_table.calculate_bucket_index(&first_node_id); let bucket = self.get_bucket_mut(bucket_entry); let new_entry = bucket.add_new_entry(first_node_id.value); self.all_entries.insert(new_entry.clone()); - self.unlocked_inner.kick_queue.lock().insert(bucket_entry); + routing_table.kick_queue.lock().insert(bucket_entry); // Update the other bucket entries with the remaining node ids if let Err(e) = self.update_bucket_entry_node_ids(new_entry.clone(), node_ids) { @@ -753,7 +757,7 @@ impl RoutingTableInner { } // Make node ref to return - let nr = NodeRef::new(outer_self.clone(), new_entry.clone()); + let nr = NodeRef::new(self.registry(), new_entry.clone()); // Update the entry with the update func new_entry.with_mut_inner(|e| update_func(self, e)); @@ -766,15 +770,9 @@ impl RoutingTableInner { /// Resolve an existing routing table entry using any crypto kind and return a reference to it #[instrument(level = "trace", skip_all, err)] - pub fn lookup_any_node_ref( - &self, - outer_self: RoutingTable, - node_id_key: PublicKey, - ) -> EyreResult> { + pub fn lookup_any_node_ref(&self, node_id_key: PublicKey) -> EyreResult> { for ck in VALID_CRYPTO_KINDS { - if let Some(nr) = - self.lookup_node_ref(outer_self.clone(), TypedKey::new(ck, node_id_key))? - { + if let Some(nr) = self.lookup_node_ref(TypedKey::new(ck, node_id_key))? { return Ok(Some(nr)); } } @@ -783,35 +781,30 @@ impl RoutingTableInner { /// Resolve an existing routing table entry and return a reference to it #[instrument(level = "trace", skip_all, err)] - pub fn lookup_node_ref( - &self, - outer_self: RoutingTable, - node_id: TypedKey, - ) -> EyreResult> { - if self.unlocked_inner.matches_own_node_id(&[node_id]) { + pub fn lookup_node_ref(&self, node_id: TypedKey) -> EyreResult> { + if self.routing_table().matches_own_node_id(&[node_id]) { bail!("can't look up own node id in routing table"); } if !VALID_CRYPTO_KINDS.contains(&node_id.kind) { bail!("can't look up node id with invalid crypto kind"); } - let bucket_index = self.unlocked_inner.calculate_bucket_index(&node_id); + let bucket_index = self.routing_table().calculate_bucket_index(&node_id); let bucket = self.get_bucket(bucket_index); Ok(bucket .entry(&node_id.value) - .map(|e| NodeRef::new(outer_self, e))) + .map(|e| NodeRef::new(self.registry(), e))) } /// Resolve an existing routing table entry and return a filtered reference to it #[instrument(level = "trace", skip_all, err)] pub fn lookup_and_filter_noderef( &self, - outer_self: RoutingTable, node_id: TypedKey, routing_domain_set: RoutingDomainSet, dial_info_filter: DialInfoFilter, ) -> EyreResult> { - let nr = self.lookup_node_ref(outer_self, node_id)?; + let nr = self.lookup_node_ref(node_id)?; Ok(nr.map(|nr| { nr.custom_filtered( NodeRefFilter::new() @@ -826,7 +819,7 @@ impl RoutingTableInner { where F: FnOnce(Arc) -> R, { - if self.unlocked_inner.matches_own_node_id(&[node_id]) { + if self.routing_table().matches_own_node_id(&[node_id]) { log_rtab!(error "can't look up own node id in routing table"); return None; } @@ -834,7 +827,7 @@ impl RoutingTableInner { log_rtab!(error "can't look up node id with invalid crypto kind"); return None; } - let bucket_entry = self.unlocked_inner.calculate_bucket_index(&node_id); + let bucket_entry = self.routing_table().calculate_bucket_index(&node_id); let bucket = self.get_bucket(bucket_entry); bucket.entry(&node_id.value).map(f) } @@ -845,7 +838,6 @@ impl RoutingTableInner { #[instrument(level = "trace", skip_all, err)] pub fn register_node_with_peer_info( &mut self, - outer_self: RoutingTable, peer_info: Arc, allow_invalid: bool, ) -> EyreResult { @@ -853,7 +845,7 @@ impl RoutingTableInner { // if our own node is in the list, then ignore it as we don't add ourselves to our own routing table if self - .unlocked_inner + .routing_table() .matches_own_node_id(peer_info.node_ids()) { bail!("can't register own node id in routing table"); @@ -891,10 +883,10 @@ impl RoutingTableInner { if let Some(relay_peer_info) = peer_info.signed_node_info().relay_peer_info(routing_domain) { if !self - .unlocked_inner + .routing_table() .matches_own_node_id(relay_peer_info.node_ids()) { - self.register_node_with_peer_info(outer_self.clone(), relay_peer_info, false)?; + self.register_node_with_peer_info(relay_peer_info, false)?; } } @@ -902,7 +894,7 @@ impl RoutingTableInner { Arc::unwrap_or_clone(peer_info).destructure(); let mut updated = false; let mut old_peer_info = None; - let nr = self.create_node_ref(outer_self, &node_ids, |_rti, e| { + let nr = self.create_node_ref(&node_ids, |_rti, e| { old_peer_info = e.make_peer_info(routing_domain); updated = e.update_signed_node_info(routing_domain, &signed_node_info); })?; @@ -922,12 +914,11 @@ impl RoutingTableInner { #[instrument(level = "trace", skip_all, err)] pub fn register_node_with_id( &mut self, - outer_self: RoutingTable, routing_domain: RoutingDomain, node_id: TypedKey, timestamp: Timestamp, ) -> EyreResult { - let nr = self.create_node_ref(outer_self, &TypedKeyGroup::from(node_id), |_rti, e| { + let nr = self.create_node_ref(&TypedKeyGroup::from(node_id), |_rti, e| { //e.make_not_dead(timestamp); e.touch_last_seen(timestamp); })?; @@ -1057,7 +1048,7 @@ impl RoutingTableInner { #[instrument(level = "trace", skip_all)] pub fn find_fast_non_local_nodes_filtered( &self, - outer_self: RoutingTable, + registry: VeilidComponentRegistry, routing_domain: RoutingDomain, node_count: usize, mut filters: VecDeque, @@ -1089,7 +1080,7 @@ impl RoutingTableInner { node_count, filters, |_rti: &RoutingTableInner, v: Option>| { - NodeRef::new(outer_self.clone(), v.unwrap().clone()) + NodeRef::new(registry.clone(), v.unwrap().clone()) }, ) } @@ -1283,10 +1274,12 @@ impl RoutingTableInner { T: for<'r> FnMut(&'r RoutingTableInner, Option>) -> O, { let cur_ts = Timestamp::now(); + let routing_table = self.routing_table(); // Get the crypto kind let crypto_kind = node_id.kind; - let Some(vcrypto) = self.unlocked_inner.crypto().get(crypto_kind) else { + let crypto = self.crypto(); + let Some(vcrypto) = crypto.get(crypto_kind) else { apibail_generic!("invalid crypto kind"); }; @@ -1338,12 +1331,12 @@ impl RoutingTableInner { let a_key = if let Some(a_entry) = a_entry { a_entry.with_inner(|e| e.node_ids().get(crypto_kind).unwrap()) } else { - self.unlocked_inner.node_id(crypto_kind) + routing_table.node_id(crypto_kind) }; let b_key = if let Some(b_entry) = b_entry { b_entry.with_inner(|e| e.node_ids().get(crypto_kind).unwrap()) } else { - self.unlocked_inner.node_id(crypto_kind) + routing_table.node_id(crypto_kind) }; // distance is the next metric, closer nodes first @@ -1379,7 +1372,8 @@ impl RoutingTableInner { .collect(); // Sort closest - let sort = make_closest_noderef_sort(self.unlocked_inner.crypto(), node_id); + let crypto = self.crypto(); + let sort = make_closest_noderef_sort(&crypto, node_id); closest_nodes_locked.sort_by(sort); // Unlock noderefs @@ -1388,10 +1382,10 @@ impl RoutingTableInner { } #[instrument(level = "trace", skip_all)] -pub fn make_closest_noderef_sort( - crypto: Crypto, +pub fn make_closest_noderef_sort<'a>( + crypto: &'a Crypto, node_id: TypedKey, -) -> impl Fn(&LockedNodeRef, &LockedNodeRef) -> core::cmp::Ordering { +) -> impl Fn(&LockedNodeRef, &LockedNodeRef) -> core::cmp::Ordering + 'a { let kind = node_id.kind; // Get cryptoversion to check distance with let vcrypto = crypto.get(kind).unwrap(); @@ -1418,9 +1412,9 @@ pub fn make_closest_noderef_sort( } pub fn make_closest_node_id_sort( - crypto: Crypto, + crypto: &Crypto, node_id: TypedKey, -) -> impl Fn(&CryptoKey, &CryptoKey) -> core::cmp::Ordering { +) -> impl Fn(&CryptoKey, &CryptoKey) -> core::cmp::Ordering + '_ { let kind = node_id.kind; // Get cryptoversion to check distance with let vcrypto = crypto.get(kind).unwrap(); diff --git a/veilid-core/src/routing_table/routing_table_inner/routing_domains/editor.rs b/veilid-core/src/routing_table/routing_table_inner/routing_domains/editor.rs index 1544d2f9..3372ad74 100644 --- a/veilid-core/src/routing_table/routing_table_inner/routing_domains/editor.rs +++ b/veilid-core/src/routing_table/routing_table_inner/routing_domains/editor.rs @@ -7,7 +7,7 @@ pub trait RoutingDomainEditorCommonTrait { protocol_type: Option, ) -> &mut Self; fn set_relay_node(&mut self, relay_node: Option) -> &mut Self; - #[cfg_attr(target_arch = "wasm32", expect(dead_code))] + #[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), expect(dead_code))] fn add_dial_info(&mut self, dial_info: DialInfo, class: DialInfoClass) -> &mut Self; fn setup_network( &mut self, @@ -83,7 +83,7 @@ pub(super) enum RoutingDomainChangeCommon { AddDialInfo { dial_info_detail: DialInfoDetail, }, - // #[cfg_attr(target_arch = "wasm32", expect(dead_code))] + // #[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), expect(dead_code))] // RemoveDialInfoDetail { // dial_info_detail: DialInfoDetail, // }, diff --git a/veilid-core/src/routing_table/routing_table_inner/routing_domains/local_network/editor.rs b/veilid-core/src/routing_table/routing_table_inner/routing_domains/local_network/editor.rs index 38b69ab0..7cfc4f76 100644 --- a/veilid-core/src/routing_table/routing_table_inner/routing_domains/local_network/editor.rs +++ b/veilid-core/src/routing_table/routing_table_inner/routing_domains/local_network/editor.rs @@ -1,4 +1,4 @@ -#![cfg_attr(target_arch = "wasm32", expect(dead_code))] +#![cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), expect(dead_code))] use super::*; @@ -10,15 +10,15 @@ enum RoutingDomainChangeLocalNetwork { Common(RoutingDomainChangeCommon), } -pub struct RoutingDomainEditorLocalNetwork { - routing_table: RoutingTable, +pub struct RoutingDomainEditorLocalNetwork<'a> { + routing_table: &'a RoutingTable, changes: Vec, } -impl RoutingDomainEditorLocalNetwork { - pub(in crate::routing_table) fn new(routing_table: RoutingTable) -> Self { +impl<'a> RoutingDomainEditorLocalNetwork<'a> { + pub(in crate::routing_table) fn new(routing_table: &'a RoutingTable) -> Self { Self { - routing_table: routing_table.clone(), + routing_table, changes: Vec::new(), } } @@ -30,7 +30,7 @@ impl RoutingDomainEditorLocalNetwork { } } -impl RoutingDomainEditorCommonTrait for RoutingDomainEditorLocalNetwork { +impl<'a> RoutingDomainEditorCommonTrait for RoutingDomainEditorLocalNetwork<'a> { #[instrument(level = "debug", skip(self))] fn clear_dial_info_details( &mut self, diff --git a/veilid-core/src/routing_table/routing_table_inner/routing_domains/local_network/mod.rs b/veilid-core/src/routing_table/routing_table_inner/routing_domains/local_network/mod.rs index 09d51b32..98305320 100644 --- a/veilid-core/src/routing_table/routing_table_inner/routing_domains/local_network/mod.rs +++ b/veilid-core/src/routing_table/routing_table_inner/routing_domains/local_network/mod.rs @@ -144,11 +144,7 @@ impl RoutingDomainDetail for LocalNetworkRoutingDomainDetail { pi }; - if let Err(e) = rti - .unlocked_inner - .event_bus - .post(PeerInfoChangeEvent { peer_info }) - { + if let Err(e) = rti.event_bus().post(PeerInfoChangeEvent { peer_info }) { log_rtab!(debug "Failed to post event: {}", e); } diff --git a/veilid-core/src/routing_table/routing_table_inner/routing_domains/mod.rs b/veilid-core/src/routing_table/routing_table_inner/routing_domains/mod.rs index cabf87cd..f5050003 100644 --- a/veilid-core/src/routing_table/routing_table_inner/routing_domains/mod.rs +++ b/veilid-core/src/routing_table/routing_table_inner/routing_domains/mod.rs @@ -143,7 +143,7 @@ impl RoutingDomainDetailCommon { pub fn network_class(&self) -> Option { cfg_if! { - if #[cfg(target_arch = "wasm32")] { + if #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] { Some(NetworkClass::WebApp) } else { if self.address_types.is_empty() { @@ -312,6 +312,9 @@ impl RoutingDomainDetailCommon { // Internal functions fn make_peer_info(&self, rti: &RoutingTableInner) -> PeerInfo { + let crypto = rti.crypto(); + let routing_table = rti.routing_table(); + let node_info = NodeInfo::new( self.network_class().unwrap_or(NetworkClass::Invalid), self.outbound_protocols, @@ -343,8 +346,8 @@ impl RoutingDomainDetailCommon { let signed_node_info = match relay_info { Some((relay_ids, relay_sdni)) => SignedNodeInfo::Relayed( SignedRelayedNodeInfo::make_signatures( - rti.unlocked_inner.crypto(), - rti.unlocked_inner.node_id_typed_key_pairs(), + &crypto, + routing_table.node_id_typed_key_pairs(), node_info, relay_ids, relay_sdni, @@ -353,8 +356,8 @@ impl RoutingDomainDetailCommon { ), None => SignedNodeInfo::Direct( SignedDirectNodeInfo::make_signatures( - rti.unlocked_inner.crypto(), - rti.unlocked_inner.node_id_typed_key_pairs(), + &crypto, + routing_table.node_id_typed_key_pairs(), node_info, ) .unwrap(), @@ -363,7 +366,7 @@ impl RoutingDomainDetailCommon { PeerInfo::new( self.routing_domain, - rti.unlocked_inner.node_ids(), + routing_table.node_ids(), signed_node_info, ) } diff --git a/veilid-core/src/routing_table/routing_table_inner/routing_domains/public_internet/editor.rs b/veilid-core/src/routing_table/routing_table_inner/routing_domains/public_internet/editor.rs index dd644395..b3c75323 100644 --- a/veilid-core/src/routing_table/routing_table_inner/routing_domains/public_internet/editor.rs +++ b/veilid-core/src/routing_table/routing_table_inner/routing_domains/public_internet/editor.rs @@ -5,13 +5,13 @@ enum RoutingDomainChangePublicInternet { Common(RoutingDomainChangeCommon), } -pub struct RoutingDomainEditorPublicInternet { - routing_table: RoutingTable, +pub struct RoutingDomainEditorPublicInternet<'a> { + routing_table: &'a RoutingTable, changes: Vec, } -impl RoutingDomainEditorPublicInternet { - pub(in crate::routing_table) fn new(routing_table: RoutingTable) -> Self { +impl<'a> RoutingDomainEditorPublicInternet<'a> { + pub(in crate::routing_table) fn new(routing_table: &'a RoutingTable) -> Self { Self { routing_table, changes: Vec::new(), @@ -41,7 +41,7 @@ impl RoutingDomainEditorPublicInternet { } } -impl RoutingDomainEditorCommonTrait for RoutingDomainEditorPublicInternet { +impl<'a> RoutingDomainEditorCommonTrait for RoutingDomainEditorPublicInternet<'a> { #[instrument(level = "debug", skip(self))] fn clear_dial_info_details( &mut self, @@ -263,8 +263,7 @@ impl RoutingDomainEditorCommonTrait for RoutingDomainEditorPublicInternet { if changed { // Clear the routespecstore cache if our PublicInternet dial info has changed - let rss = self.routing_table.route_spec_store(); - rss.reset_cache(); + self.routing_table.route_spec_store().reset_cache(); } } diff --git a/veilid-core/src/routing_table/routing_table_inner/routing_domains/public_internet/mod.rs b/veilid-core/src/routing_table/routing_table_inner/routing_domains/public_internet/mod.rs index 44fa267c..261f845c 100644 --- a/veilid-core/src/routing_table/routing_table_inner/routing_domains/public_internet/mod.rs +++ b/veilid-core/src/routing_table/routing_table_inner/routing_domains/public_internet/mod.rs @@ -122,11 +122,7 @@ impl RoutingDomainDetail for PublicInternetRoutingDomainDetail { pi }; - if let Err(e) = rti - .unlocked_inner - .event_bus - .post(PeerInfoChangeEvent { peer_info }) - { + if let Err(e) = rti.event_bus().post(PeerInfoChangeEvent { peer_info }) { log_rtab!(debug "Failed to post event: {}", e); } @@ -167,11 +163,8 @@ impl RoutingDomainDetail for PublicInternetRoutingDomainDetail { dif_sort: Option>, ) -> ContactMethod { let ip6_prefix_size = rti - .unlocked_inner - .config - .get() - .network - .max_connections_per_ip6_prefix_size as usize; + .config() + .with(|c| c.network.max_connections_per_ip6_prefix_size as usize); // Get the nodeinfos for convenience let node_a = peer_a.signed_node_info().node_info(); diff --git a/veilid-core/src/routing_table/tasks/bootstrap.rs b/veilid-core/src/routing_table/tasks/bootstrap.rs index aa7a4c1b..c5f30b2f 100644 --- a/veilid-core/src/routing_table/tasks/bootstrap.rs +++ b/veilid-core/src/routing_table/tasks/bootstrap.rs @@ -81,7 +81,7 @@ impl RoutingTable { } // If this is our own node id, then we skip it for bootstrap, in case we are a bootstrap node - if self.unlocked_inner.matches_own_node_id(&node_ids) { + if self.matches_own_node_id(&node_ids) { return Ok(None); } @@ -255,7 +255,7 @@ impl RoutingTable { //#[instrument(level = "trace", skip(self), err)] pub fn bootstrap_with_peer( - self, + &self, crypto_kinds: Vec, pi: Arc, unord: &FuturesUnordered>, @@ -280,19 +280,20 @@ impl RoutingTable { for crypto_kind in crypto_kinds { // Bootstrap this crypto kind let nr = nr.unfiltered(); - let routing_table = self.clone(); unord.push(Box::pin( async move { + let network_manager = nr.network_manager(); + let routing_table = nr.routing_table(); + // Get what contact method would be used for contacting the bootstrap - let bsdi = match routing_table - .network_manager() + let bsdi = match network_manager .get_node_contact_method(nr.default_filtered()) { Ok(NodeContactMethod::Direct(v)) => v, Ok(v) => { log_rtab!(debug "invalid contact method for bootstrap, ignoring peer: {:?}", v); - // let _ = routing_table - // .network_manager() + // let _ = + // network_manager // .get_node_contact_method(nr.clone()); return; } @@ -312,7 +313,7 @@ impl RoutingTable { log_rtab!(debug "bootstrap server is not responding for dialinfo: {}", bsdi); // Try a different dialinfo next time - routing_table.network_manager().address_filter().set_dial_info_failed(bsdi); + network_manager.address_filter().set_dial_info_failed(bsdi); } else { // otherwise this bootstrap is valid, lets ask it to find ourselves now routing_table.reverse_find_node(crypto_kind, nr, true, vec![]).await @@ -325,7 +326,7 @@ impl RoutingTable { #[instrument(level = "trace", skip(self), err)] pub async fn bootstrap_with_peer_list( - self, + &self, peers: Vec>, stop_token: StopToken, ) -> EyreResult<()> { @@ -339,8 +340,7 @@ impl RoutingTable { // Run all bootstrap operations concurrently let mut unord = FuturesUnordered::>::new(); for peer in peers { - self.clone() - .bootstrap_with_peer(crypto_kinds.clone(), peer, &unord); + self.bootstrap_with_peer(crypto_kinds.clone(), peer, &unord); } // Wait for all bootstrap operations to complete before we complete the singlefuture @@ -364,10 +364,15 @@ impl RoutingTable { } #[instrument(level = "trace", skip(self), err)] - pub async fn bootstrap_task_routine(self, stop_token: StopToken) -> EyreResult<()> { + pub async fn bootstrap_task_routine( + &self, + stop_token: StopToken, + _last_ts: Timestamp, + _cur_ts: Timestamp, + ) -> EyreResult<()> { let bootstrap = self - .unlocked_inner - .with_config(|c| c.network.routing_table.bootstrap.clone()); + .config() + .with(|c| c.network.routing_table.bootstrap.clone()); // Don't bother if bootstraps aren't configured if bootstrap.is_empty() { @@ -445,8 +450,6 @@ impl RoutingTable { peers }; - self.clone() - .bootstrap_with_peer_list(peers, stop_token) - .await + self.bootstrap_with_peer_list(peers, stop_token).await } } diff --git a/veilid-core/src/routing_table/tasks/closest_peers_refresh.rs b/veilid-core/src/routing_table/tasks/closest_peers_refresh.rs index 3da4e3cf..e860c0eb 100644 --- a/veilid-core/src/routing_table/tasks/closest_peers_refresh.rs +++ b/veilid-core/src/routing_table/tasks/closest_peers_refresh.rs @@ -10,14 +10,18 @@ impl RoutingTable { /// Ask our closest peers to give us more peers close to ourselves. This will /// assist with the DHT and other algorithms that utilize the distance metric. #[instrument(level = "trace", skip(self), err)] - pub async fn closest_peers_refresh_task_routine(self, stop_token: StopToken) -> EyreResult<()> { + pub async fn closest_peers_refresh_task_routine( + &self, + stop_token: StopToken, + _last_ts: Timestamp, + _cur_ts: Timestamp, + ) -> EyreResult<()> { let mut unord = FuturesUnordered::new(); for crypto_kind in VALID_CRYPTO_KINDS { // Get our node id for this cryptokind let self_node_id = self.node_id(crypto_kind); - let routing_table = self.clone(); let mut filters = VecDeque::new(); let filter = Box::new( move |rti: &RoutingTableInner, opt_entry: Option>| { @@ -47,24 +51,23 @@ impl RoutingTable { ) as RoutingTableEntryFilter; filters.push_front(filter); - let noderefs = routing_table + let noderefs = self .find_preferred_closest_nodes( CLOSEST_PEERS_REQUEST_COUNT, self_node_id, filters, |_rti, entry: Option>| { - NodeRef::new(routing_table.clone(), entry.unwrap().clone()) + NodeRef::new(self.registry(), entry.unwrap().clone()) }, ) .unwrap(); for nr in noderefs { - let routing_table = self.clone(); unord.push( async move { // This would be better if it were 'any' instead of 'all' capabilities // but that requires extending the capnp to support it. - routing_table + nr.routing_table() .reverse_find_node( crypto_kind, nr, diff --git a/veilid-core/src/routing_table/tasks/kick_buckets.rs b/veilid-core/src/routing_table/tasks/kick_buckets.rs index e46a12e6..72f3b644 100644 --- a/veilid-core/src/routing_table/tasks/kick_buckets.rs +++ b/veilid-core/src/routing_table/tasks/kick_buckets.rs @@ -11,15 +11,15 @@ impl RoutingTable { // Attempts to keep the size of the routing table down to the bucket depth #[instrument(level = "trace", skip(self), err)] pub async fn kick_buckets_task_routine( - self, + &self, _stop_token: StopToken, _last_ts: Timestamp, cur_ts: Timestamp, ) -> EyreResult<()> { - let kick_queue: Vec = - core::mem::take(&mut *self.unlocked_inner.kick_queue.lock()) - .into_iter() - .collect(); + let crypto = self.crypto(); + let kick_queue: Vec = core::mem::take(&mut *self.kick_queue.lock()) + .into_iter() + .collect(); let mut inner = self.inner.write(); // Get our exempt nodes for each crypto kind @@ -30,7 +30,7 @@ impl RoutingTable { let Some(buckets) = inner.buckets.get(&kind) else { continue; }; - let sort = make_closest_node_id_sort(self.crypto(), our_node_id); + let sort = make_closest_node_id_sort(&crypto, our_node_id); let mut closest_peers = BTreeSet::::new(); let mut closest_unreliable_count = 0usize; diff --git a/veilid-core/src/routing_table/tasks/mod.rs b/veilid-core/src/routing_table/tasks/mod.rs index 268c8b76..936497ae 100644 --- a/veilid-core/src/routing_table/tasks/mod.rs +++ b/veilid-core/src/routing_table/tasks/mod.rs @@ -12,175 +12,98 @@ use super::*; impl RoutingTable { pub fn setup_tasks(&self) { // Set rolling transfers tick task - { - let this = self.clone(); - self.unlocked_inner - .rolling_transfers_task - .set_routine(move |s, l, t| { - Box::pin(this.clone().rolling_transfers_task_routine( - s, - Timestamp::new(l), - Timestamp::new(t), - )) - }); - } + impl_setup_task!( + self, + Self, + rolling_transfers_task, + rolling_transfers_task_routine + ); // Set update state stats tick task - { - let this = self.clone(); - self.unlocked_inner - .update_state_stats_task - .set_routine(move |s, l, t| { - Box::pin(this.clone().update_state_stats_task_routine( - s, - Timestamp::new(l), - Timestamp::new(t), - )) - }); - } + impl_setup_task!( + self, + Self, + update_state_stats_task, + update_state_stats_task_routine + ); // Set rolling answers tick task - { - let this = self.clone(); - self.unlocked_inner - .rolling_answers_task - .set_routine(move |s, l, t| { - Box::pin(this.clone().rolling_answers_task_routine( - s, - Timestamp::new(l), - Timestamp::new(t), - )) - }); - } + impl_setup_task!( + self, + Self, + rolling_answers_task, + rolling_answers_task_routine + ); // Set kick buckets tick task - { - let this = self.clone(); - self.unlocked_inner - .kick_buckets_task - .set_routine(move |s, l, t| { - Box::pin(this.clone().kick_buckets_task_routine( - s, - Timestamp::new(l), - Timestamp::new(t), - )) - }); - } + impl_setup_task!(self, Self, kick_buckets_task, kick_buckets_task_routine); // Set bootstrap tick task - { - let this = self.clone(); - self.unlocked_inner - .bootstrap_task - .set_routine(move |s, _l, _t| Box::pin(this.clone().bootstrap_task_routine(s))); - } + impl_setup_task!(self, Self, bootstrap_task, bootstrap_task_routine); // Set peer minimum refresh tick task - { - let this = self.clone(); - self.unlocked_inner - .peer_minimum_refresh_task - .set_routine(move |s, _l, _t| { - Box::pin(this.clone().peer_minimum_refresh_task_routine(s)) - }); - } + impl_setup_task!( + self, + Self, + peer_minimum_refresh_task, + peer_minimum_refresh_task_routine + ); // Set closest peers refresh tick task - { - let this = self.clone(); - self.unlocked_inner - .closest_peers_refresh_task - .set_routine(move |s, _l, _t| { - Box::pin(this.clone().closest_peers_refresh_task_routine(s)) - }); - } + impl_setup_task!( + self, + Self, + closest_peers_refresh_task, + closest_peers_refresh_task_routine + ); // Set ping validator PublicInternet tick task - { - let this = self.clone(); - self.unlocked_inner - .ping_validator_public_internet_task - .set_routine(move |s, l, t| { - Box::pin(this.clone().ping_validator_public_internet_task_routine( - s, - Timestamp::new(l), - Timestamp::new(t), - )) - }); - } + impl_setup_task!( + self, + Self, + ping_validator_public_internet_task, + ping_validator_public_internet_task_routine + ); // Set ping validator LocalNetwork tick task - { - let this = self.clone(); - self.unlocked_inner - .ping_validator_local_network_task - .set_routine(move |s, l, t| { - Box::pin(this.clone().ping_validator_local_network_task_routine( - s, - Timestamp::new(l), - Timestamp::new(t), - )) - }); - } + impl_setup_task!( + self, + Self, + ping_validator_local_network_task, + ping_validator_local_network_task_routine + ); // Set ping validator PublicInternet Relay tick task - { - let this = self.clone(); - self.unlocked_inner - .ping_validator_public_internet_relay_task - .set_routine(move |s, l, t| { - Box::pin( - this.clone() - .ping_validator_public_internet_relay_task_routine( - s, - Timestamp::new(l), - Timestamp::new(t), - ), - ) - }); - } + impl_setup_task!( + self, + Self, + ping_validator_public_internet_relay_task, + ping_validator_public_internet_relay_task_routine + ); // Set ping validator Active Watch tick task - { - let this = self.clone(); - self.unlocked_inner - .ping_validator_active_watch_task - .set_routine(move |s, l, t| { - Box::pin(this.clone().ping_validator_active_watch_task_routine( - s, - Timestamp::new(l), - Timestamp::new(t), - )) - }); - } + impl_setup_task!( + self, + Self, + ping_validator_active_watch_task, + ping_validator_active_watch_task_routine + ); // Set relay management tick task - { - let this = self.clone(); - self.unlocked_inner - .relay_management_task - .set_routine(move |s, l, t| { - Box::pin(this.clone().relay_management_task_routine( - s, - Timestamp::new(l), - Timestamp::new(t), - )) - }); - } + impl_setup_task!( + self, + Self, + relay_management_task, + relay_management_task_routine + ); // Set private route management tick task - { - let this = self.clone(); - self.unlocked_inner - .private_route_management_task - .set_routine(move |s, l, t| { - Box::pin(this.clone().private_route_management_task_routine( - s, - Timestamp::new(l), - Timestamp::new(t), - )) - }); - } + impl_setup_task!( + self, + Self, + private_route_management_task, + private_route_management_task_routine + ); } /// Ticks about once per second @@ -197,18 +120,18 @@ impl RoutingTable { }; // Do rolling transfers every ROLLING_TRANSFERS_INTERVAL_SECS secs - self.unlocked_inner.rolling_transfers_task.tick().await?; + self.rolling_transfers_task.tick().await?; // Do state stats update every UPDATE_STATE_STATS_INTERVAL_SECS secs - self.unlocked_inner.update_state_stats_task.tick().await?; + self.update_state_stats_task.tick().await?; // Do rolling answers every ROLLING_ANSWER_INTERVAL_SECS secs - self.unlocked_inner.rolling_answers_task.tick().await?; + self.rolling_answers_task.tick().await?; // Kick buckets task - let kick_bucket_queue_count = self.unlocked_inner.kick_queue.lock().len(); + let kick_bucket_queue_count = self.kick_queue.lock().len(); if kick_bucket_queue_count > 0 { - self.unlocked_inner.kick_buckets_task.tick().await?; + self.kick_buckets_task.tick().await?; } // Refresh entry counts @@ -222,7 +145,9 @@ impl RoutingTable { return Ok(()); } - let min_peer_count = self.with_config(|c| c.network.dht.min_peer_count as usize); + let min_peer_count = self + .config() + .with(|c| c.network.dht.min_peer_count as usize); // Figure out which tables need bootstrap or peer minimum refresh let mut needs_bootstrap = false; @@ -237,40 +162,27 @@ impl RoutingTable { } } if needs_bootstrap { - self.unlocked_inner.bootstrap_task.tick().await?; + self.bootstrap_task.tick().await?; } if needs_peer_minimum_refresh { - self.unlocked_inner.peer_minimum_refresh_task.tick().await?; + self.peer_minimum_refresh_task.tick().await?; } // Ping validate some nodes to groom the table - self.unlocked_inner - .ping_validator_public_internet_task - .tick() - .await?; - self.unlocked_inner - .ping_validator_local_network_task - .tick() - .await?; - self.unlocked_inner - .ping_validator_public_internet_relay_task - .tick() - .await?; - self.unlocked_inner - .ping_validator_active_watch_task + self.ping_validator_public_internet_task.tick().await?; + self.ping_validator_local_network_task.tick().await?; + self.ping_validator_public_internet_relay_task .tick() .await?; + self.ping_validator_active_watch_task.tick().await?; // Run the relay management task - self.unlocked_inner.relay_management_task.tick().await?; + self.relay_management_task.tick().await?; // Get more nodes if we need to if !needs_bootstrap && !needs_peer_minimum_refresh { // Run closest peers refresh task - self.unlocked_inner - .closest_peers_refresh_task - .tick() - .await?; + self.closest_peers_refresh_task.tick().await?; } // Only perform these operations if we already have a published peer info @@ -279,10 +191,7 @@ impl RoutingTable { .is_some() { // Run the private route management task - self.unlocked_inner - .private_route_management_task - .tick() - .await?; + self.private_route_management_task.tick().await?; } Ok(()) @@ -295,82 +204,57 @@ impl RoutingTable { pub async fn cancel_tasks(&self) { // Cancel all tasks being ticked log_rtab!(debug "stopping rolling transfers task"); - if let Err(e) = self.unlocked_inner.rolling_transfers_task.stop().await { + if let Err(e) = self.rolling_transfers_task.stop().await { error!("rolling_transfers_task not stopped: {}", e); } log_rtab!(debug "stopping update state stats task"); - if let Err(e) = self.unlocked_inner.update_state_stats_task.stop().await { + if let Err(e) = self.update_state_stats_task.stop().await { error!("update_state_stats_task not stopped: {}", e); } log_rtab!(debug "stopping rolling answers task"); - if let Err(e) = self.unlocked_inner.rolling_answers_task.stop().await { + if let Err(e) = self.rolling_answers_task.stop().await { error!("rolling_answers_task not stopped: {}", e); } log_rtab!(debug "stopping kick buckets task"); - if let Err(e) = self.unlocked_inner.kick_buckets_task.stop().await { + if let Err(e) = self.kick_buckets_task.stop().await { error!("kick_buckets_task not stopped: {}", e); } log_rtab!(debug "stopping bootstrap task"); - if let Err(e) = self.unlocked_inner.bootstrap_task.stop().await { + if let Err(e) = self.bootstrap_task.stop().await { error!("bootstrap_task not stopped: {}", e); } log_rtab!(debug "stopping peer minimum refresh task"); - if let Err(e) = self.unlocked_inner.peer_minimum_refresh_task.stop().await { + if let Err(e) = self.peer_minimum_refresh_task.stop().await { error!("peer_minimum_refresh_task not stopped: {}", e); } log_rtab!(debug "stopping ping_validator tasks"); - if let Err(e) = self - .unlocked_inner - .ping_validator_public_internet_task - .stop() - .await - { + if let Err(e) = self.ping_validator_public_internet_task.stop().await { error!("ping_validator_public_internet_task not stopped: {}", e); } - if let Err(e) = self - .unlocked_inner - .ping_validator_local_network_task - .stop() - .await - { + if let Err(e) = self.ping_validator_local_network_task.stop().await { error!("ping_validator_local_network_task not stopped: {}", e); } - if let Err(e) = self - .unlocked_inner - .ping_validator_public_internet_relay_task - .stop() - .await - { + if let Err(e) = self.ping_validator_public_internet_relay_task.stop().await { error!( "ping_validator_public_internet_relay_task not stopped: {}", e ); } - if let Err(e) = self - .unlocked_inner - .ping_validator_active_watch_task - .stop() - .await - { + if let Err(e) = self.ping_validator_active_watch_task.stop().await { error!("ping_validator_active_watch_task not stopped: {}", e); } log_rtab!(debug "stopping relay management task"); - if let Err(e) = self.unlocked_inner.relay_management_task.stop().await { + if let Err(e) = self.relay_management_task.stop().await { warn!("relay_management_task not stopped: {}", e); } log_rtab!(debug "stopping private route management task"); - if let Err(e) = self - .unlocked_inner - .private_route_management_task - .stop() - .await - { + if let Err(e) = self.private_route_management_task.stop().await { warn!("private_route_management_task not stopped: {}", e); } log_rtab!(debug "stopping closest peers refresh task"); - if let Err(e) = self.unlocked_inner.closest_peers_refresh_task.stop().await { + if let Err(e) = self.closest_peers_refresh_task.stop().await { warn!("closest_peers_refresh_task not stopped: {}", e); } } diff --git a/veilid-core/src/routing_table/tasks/peer_minimum_refresh.rs b/veilid-core/src/routing_table/tasks/peer_minimum_refresh.rs index f4176e75..febd66ce 100644 --- a/veilid-core/src/routing_table/tasks/peer_minimum_refresh.rs +++ b/veilid-core/src/routing_table/tasks/peer_minimum_refresh.rs @@ -12,11 +12,16 @@ impl RoutingTable { // nodes for their PublicInternet peers, which is a very fast way to get // a new node online. #[instrument(level = "trace", skip(self), err)] - pub async fn peer_minimum_refresh_task_routine(self, stop_token: StopToken) -> EyreResult<()> { + pub async fn peer_minimum_refresh_task_routine( + &self, + stop_token: StopToken, + _last_ts: Timestamp, + _cur_ts: Timestamp, + ) -> EyreResult<()> { // Get counts by crypto kind let entry_count = self.inner.read().cached_entry_counts(); - let (min_peer_count, min_peer_refresh_time_ms) = self.with_config(|c| { + let (min_peer_count, min_peer_refresh_time_ms) = self.config().with(|c| { ( c.network.dht.min_peer_count as usize, c.network.dht.min_peer_refresh_time_ms, @@ -39,7 +44,6 @@ impl RoutingTable { continue; } - let routing_table = self.clone(); let mut filters = VecDeque::new(); let filter = Box::new( move |rti: &RoutingTableInner, opt_entry: Option>| { @@ -64,23 +68,18 @@ impl RoutingTable { ) as RoutingTableEntryFilter; filters.push_front(filter); - let noderefs = routing_table.find_preferred_fastest_nodes( + let noderefs = self.find_preferred_fastest_nodes( min_peer_count, filters, |_rti, entry: Option>| { - NodeRef::new(routing_table.clone(), entry.unwrap().clone()) + NodeRef::new(self.registry(), entry.unwrap().clone()) }, ); for nr in noderefs { - let routing_table = self.clone(); ord.push_back( - async move { - routing_table - .reverse_find_node(crypto_kind, nr, false, vec![]) - .await - } - .instrument(Span::current()), + async move { self.reverse_find_node(crypto_kind, nr, false, vec![]).await } + .instrument(Span::current()), ); } } diff --git a/veilid-core/src/routing_table/tasks/ping_validator.rs b/veilid-core/src/routing_table/tasks/ping_validator.rs index fcceca69..834731b5 100644 --- a/veilid-core/src/routing_table/tasks/ping_validator.rs +++ b/veilid-core/src/routing_table/tasks/ping_validator.rs @@ -18,7 +18,7 @@ impl RoutingTable { // Task routine for PublicInternet status pings #[instrument(level = "trace", skip(self), err)] pub async fn ping_validator_public_internet_task_routine( - self, + &self, stop_token: StopToken, _last_ts: Timestamp, cur_ts: Timestamp, @@ -37,7 +37,7 @@ impl RoutingTable { // Task routine for LocalNetwork status pings #[instrument(level = "trace", skip(self), err)] pub async fn ping_validator_local_network_task_routine( - self, + &self, stop_token: StopToken, _last_ts: Timestamp, cur_ts: Timestamp, @@ -56,7 +56,7 @@ impl RoutingTable { // Task routine for PublicInternet relay keepalive pings #[instrument(level = "trace", skip(self), err)] pub async fn ping_validator_public_internet_relay_task_routine( - self, + &self, stop_token: StopToken, _last_ts: Timestamp, cur_ts: Timestamp, @@ -75,7 +75,7 @@ impl RoutingTable { // Task routine for active watch keepalive pings #[instrument(level = "trace", skip(self), err)] pub async fn ping_validator_active_watch_task_routine( - self, + &self, stop_token: StopToken, _last_ts: Timestamp, cur_ts: Timestamp, @@ -105,7 +105,6 @@ impl RoutingTable { return Ok(()); }; - let rpc = self.rpc_processor(); // Get our publicinternet dial info let dids = self.all_filtered_dial_info_details( RoutingDomain::PublicInternet.into(), @@ -180,11 +179,11 @@ impl RoutingTable { } for relay_nr_filtered in relay_noderefs { - let rpc = rpc.clone(); futurequeue.push_back( async move { log_rtab!("--> PublicInternet Relay ping to {:?}", relay_nr_filtered); - let _ = rpc + let rpc_processor = relay_nr_filtered.rpc_processor(); + let _ = rpc_processor .rpc_call_status(Destination::direct(relay_nr_filtered)) .await?; Ok(()) @@ -202,8 +201,6 @@ impl RoutingTable { cur_ts: Timestamp, futurequeue: &mut VecDeque, ) -> EyreResult<()> { - let rpc = self.rpc_processor(); - let watches_need_keepalive = { let mut inner = self.inner.write(); let need = inner @@ -224,15 +221,15 @@ impl RoutingTable { } // Get all the active watches from the storage manager - let storage_manager = self.unlocked_inner.network_manager.storage_manager(); - let watch_destinations = storage_manager.get_active_watch_nodes().await; + let watch_destinations = self.storage_manager().get_active_watch_nodes().await; for watch_destination in watch_destinations { - let rpc = rpc.clone(); + let registry = self.registry(); futurequeue.push_back( async move { log_rtab!("--> Watch Keepalive ping to {:?}", watch_destination); - let _ = rpc.rpc_call_status(watch_destination).await?; + let rpc_processor = registry.rpc_processor(); + let _ = rpc_processor.rpc_call_status(watch_destination).await?; Ok(()) } .boxed(), @@ -249,8 +246,6 @@ impl RoutingTable { cur_ts: Timestamp, futurequeue: &mut VecDeque, ) -> EyreResult<()> { - let rpc = self.rpc_processor(); - // Get all nodes needing pings in the PublicInternet routing domain let node_refs = self.get_nodes_needing_ping(RoutingDomain::PublicInternet, cur_ts); @@ -258,12 +253,14 @@ impl RoutingTable { for nr in node_refs { let nr = nr.sequencing_clone(Sequencing::PreferOrdered); - let rpc = rpc.clone(); futurequeue.push_back( async move { #[cfg(feature = "verbose-tracing")] log_rtab!(debug "--> PublicInternet Validator ping to {:?}", nr); - let _ = rpc.rpc_call_status(Destination::direct(nr)).await?; + let rpc_processor = nr.rpc_processor(); + let _ = rpc_processor + .rpc_call_status(Destination::direct(nr)) + .await?; Ok(()) } .boxed(), @@ -281,8 +278,6 @@ impl RoutingTable { cur_ts: Timestamp, futurequeue: &mut VecDeque, ) -> EyreResult<()> { - let rpc = self.rpc_processor(); - // Get all nodes needing pings in the LocalNetwork routing domain let node_refs = self.get_nodes_needing_ping(RoutingDomain::LocalNetwork, cur_ts); @@ -290,14 +285,15 @@ impl RoutingTable { for nr in node_refs { let nr = nr.sequencing_clone(Sequencing::PreferOrdered); - let rpc = rpc.clone(); - // Just do a single ping with the best protocol for all the nodes futurequeue.push_back( async move { #[cfg(feature = "verbose-tracing")] log_rtab!(debug "--> LocalNetwork Validator ping to {:?}", nr); - let _ = rpc.rpc_call_status(Destination::direct(nr)).await?; + let rpc_processor = nr.rpc_processor(); + let _ = rpc_processor + .rpc_call_status(Destination::direct(nr)) + .await?; Ok(()) } .boxed(), diff --git a/veilid-core/src/routing_table/tasks/private_route_management.rs b/veilid-core/src/routing_table/tasks/private_route_management.rs index 4b708b80..942d3c39 100644 --- a/veilid-core/src/routing_table/tasks/private_route_management.rs +++ b/veilid-core/src/routing_table/tasks/private_route_management.rs @@ -8,12 +8,13 @@ const BACKGROUND_SAFETY_ROUTE_COUNT: usize = 2; impl RoutingTable { fn get_background_safety_route_count(&self) -> usize { - let c = self.config.get(); - if c.capabilities.disable.contains(&CAP_ROUTE) { - 0 - } else { - BACKGROUND_SAFETY_ROUTE_COUNT - } + self.config().with(|c| { + if c.capabilities.disable.contains(&CAP_ROUTE) { + 0 + } else { + BACKGROUND_SAFETY_ROUTE_COUNT + } + }) } /// Fastest routes sort fn route_sort_latency_fn(a: &(RouteId, u64), b: &(RouteId, u64)) -> cmp::Ordering { @@ -44,14 +45,14 @@ impl RoutingTable { /// If a route doesn't 'need_testing', then we neither test nor drop it #[instrument(level = "trace", skip(self))] fn get_allocated_routes_to_test(&self, cur_ts: Timestamp) -> Vec { - let default_route_hop_count = - self.with_config(|c| c.network.rpc.default_route_hop_count as usize); + let default_route_hop_count = self + .config() + .with(|c| c.network.rpc.default_route_hop_count as usize); - let rss = self.route_spec_store(); let mut must_test_routes = Vec::::new(); let mut unpublished_routes = Vec::<(RouteId, u64)>::new(); let mut expired_routes = Vec::::new(); - rss.list_allocated_routes(|k, v| { + self.route_spec_store().list_allocated_routes(|k, v| { let stats = v.get_stats(); // Ignore nodes that don't need testing if !stats.needs_testing(cur_ts) { @@ -95,7 +96,7 @@ impl RoutingTable { // Process dead routes for r in expired_routes { log_rtab!(debug "Expired route: {}", r); - rss.release_route(r); + self.route_spec_store().release_route(r); } // return routes to test @@ -114,8 +115,6 @@ impl RoutingTable { } log_rtab!("Testing routes: {:?}", routes_needing_testing); - // Test all the routes that need testing at the same time - let rss = self.route_spec_store(); #[derive(Default, Debug)] struct TestRouteContext { dead_routes: Vec, @@ -125,11 +124,10 @@ impl RoutingTable { { let mut unord = FuturesUnordered::new(); for r in routes_needing_testing { - let rss = rss.clone(); let ctx = ctx.clone(); unord.push( async move { - let success = match rss.test_route(r).await { + let success = match self.route_spec_store().test_route(r).await { // Test had result Ok(Some(v)) => v, // Test could not be performed at this time @@ -160,7 +158,7 @@ impl RoutingTable { let ctx = Arc::try_unwrap(ctx).unwrap().into_inner(); for r in ctx.dead_routes { log_rtab!(debug "Dead route failed to test: {}", r); - rss.release_route(r); + self.route_spec_store().release_route(r); } Ok(()) @@ -169,7 +167,7 @@ impl RoutingTable { /// Keep private routes assigned and accessible #[instrument(level = "trace", skip(self, stop_token), err)] pub async fn private_route_management_task_routine( - self, + &self, stop_token: StopToken, _last_ts: Timestamp, cur_ts: Timestamp, @@ -183,11 +181,12 @@ impl RoutingTable { } // Ensure we have a minimum of N allocated local, unpublished routes with the default number of hops and all our supported crypto kinds - let default_route_hop_count = - self.with_config(|c| c.network.rpc.default_route_hop_count as usize); + let default_route_hop_count = self + .config() + .with(|c| c.network.rpc.default_route_hop_count as usize); let mut local_unpublished_route_count = 0usize; - let rss = self.route_spec_store(); - rss.list_allocated_routes(|_k, v| { + + self.route_spec_store().list_allocated_routes(|_k, v| { if !v.is_published() && v.hop_count() == default_route_hop_count && v.get_route_set_keys().kinds() == VALID_CRYPTO_KINDS @@ -213,7 +212,7 @@ impl RoutingTable { stability: Stability::Reliable, sequencing: Sequencing::PreferOrdered, }; - match rss.allocate_route( + match self.route_spec_store().allocate_route( &VALID_CRYPTO_KINDS, &safety_spec, DirectionSet::all(), @@ -238,7 +237,7 @@ impl RoutingTable { } // Test remote routes next - let remote_routes_needing_testing = rss.list_remote_routes(|k, v| { + let remote_routes_needing_testing = self.route_spec_store().list_remote_routes(|k, v| { let stats = v.get_stats(); if stats.needs_testing(cur_ts) { Some(*k) @@ -252,7 +251,7 @@ impl RoutingTable { } // Send update (also may send updates for released routes done by other parts of the program) - rss.send_route_update(); + self.route_spec_store().send_route_update(); Ok(()) } diff --git a/veilid-core/src/routing_table/tasks/relay_management.rs b/veilid-core/src/routing_table/tasks/relay_management.rs index 4d9a5f30..9d35d911 100644 --- a/veilid-core/src/routing_table/tasks/relay_management.rs +++ b/veilid-core/src/routing_table/tasks/relay_management.rs @@ -54,7 +54,7 @@ impl RoutingTable { // Keep relays assigned and accessible #[instrument(level = "trace", skip_all, err)] pub async fn relay_management_task_routine( - self, + &self, _stop_token: StopToken, _last_ts: Timestamp, cur_ts: Timestamp, @@ -162,11 +162,8 @@ impl RoutingTable { .node_info() .clone(); let ip6_prefix_size = self - .unlocked_inner - .config - .get() - .network - .max_connections_per_ip6_prefix_size as usize; + .config() + .with(|c| c.network.max_connections_per_ip6_prefix_size as usize); move |e: &BucketEntryInner| { // Ensure this node is not on the local network and is on the public internet @@ -285,6 +282,6 @@ impl RoutingTable { Option::<()>::None }); // Return the best inbound relay noderef - best_inbound_relay.map(|e| NodeRef::new(self.clone(), e)) + best_inbound_relay.map(|e| NodeRef::new(self.registry(), e)) } } diff --git a/veilid-core/src/routing_table/tasks/update_statistics.rs b/veilid-core/src/routing_table/tasks/update_statistics.rs index c165b630..8706ec88 100644 --- a/veilid-core/src/routing_table/tasks/update_statistics.rs +++ b/veilid-core/src/routing_table/tasks/update_statistics.rs @@ -4,7 +4,7 @@ impl RoutingTable { // Compute transfer statistics to determine how 'fast' a node is #[instrument(level = "trace", skip(self), err)] pub async fn rolling_transfers_task_routine( - self, + &self, _stop_token: StopToken, last_ts: Timestamp, cur_ts: Timestamp, @@ -27,8 +27,7 @@ impl RoutingTable { } // Roll all route transfers - let rss = self.route_spec_store(); - rss.roll_transfers(last_ts, cur_ts); + self.route_spec_store().roll_transfers(last_ts, cur_ts); Ok(()) } @@ -36,7 +35,7 @@ impl RoutingTable { // Update state statistics in PeerStats #[instrument(level = "trace", skip(self), err)] pub async fn update_state_stats_task_routine( - self, + &self, _stop_token: StopToken, _last_ts: Timestamp, _cur_ts: Timestamp, @@ -57,7 +56,7 @@ impl RoutingTable { // Update rolling answers in PeerStats #[instrument(level = "trace", skip(self), err)] pub async fn rolling_answers_task_routine( - self, + &self, _stop_token: StopToken, _last_ts: Timestamp, cur_ts: Timestamp, diff --git a/veilid-core/src/routing_table/tests/mod.rs b/veilid-core/src/routing_table/tests/mod.rs index 23aadf01..a855c58c 100644 --- a/veilid-core/src/routing_table/tests/mod.rs +++ b/veilid-core/src/routing_table/tests/mod.rs @@ -1,39 +1,32 @@ -use super::*; - pub mod test_serialize_routing_table; -pub(crate) fn mock_routing_table() -> routing_table::RoutingTable { - let event_bus = EventBus::new(); - let veilid_config = VeilidConfig::new(); - #[cfg(feature = "unstable-blockstore")] - let block_store = BlockStore::new(event_bus.clone(), veilid_config.clone()); - let protected_store = ProtectedStore::new(event_bus.clone(), veilid_config.clone()); - let table_store = TableStore::new( - event_bus.clone(), - veilid_config.clone(), - protected_store.clone(), - ); - let crypto = Crypto::new( - event_bus.clone(), - veilid_config.clone(), - table_store.clone(), - ); - let storage_manager = storage_manager::StorageManager::new( - event_bus.clone(), - veilid_config.clone(), - crypto.clone(), - table_store.clone(), - #[cfg(feature = "unstable-blockstore")] - block_store.clone(), - ); - let network_manager = network_manager::NetworkManager::new( - event_bus.clone(), - veilid_config.clone(), - storage_manager, - table_store.clone(), - #[cfg(feature = "unstable-blockstore")] - block_store.clone(), - crypto.clone(), - ); - RoutingTable::new(network_manager) +pub(crate) mod mock_registry { + use super::super::*; + use crate::tests::test_veilid_config::setup_veilid_core_with_namespace; + use crate::{network_manager::NetworkManagerStartupContext, storage_manager::StorageManager}; + + pub(crate) async fn init>(namespace: S) -> VeilidComponentRegistry { + let (update_callback, config_callback) = setup_veilid_core_with_namespace(namespace); + let veilid_config = + VeilidConfig::new_from_callback(config_callback, update_callback).unwrap(); + let registry = VeilidComponentRegistry::new(veilid_config); + registry.enable_mock(); + registry.register(ProtectedStore::new); + registry.register(TableStore::new); + registry.register(Crypto::new); + registry.register(StorageManager::new); + registry.register(RoutingTable::new); + let startup_context = NetworkManagerStartupContext::default(); + registry.register_with_context(NetworkManager::new, startup_context); + + registry.init().await.expect("should init"); + registry.post_init().await.expect("should post init"); + + registry + } + + pub(crate) async fn terminate(registry: VeilidComponentRegistry) { + registry.pre_terminate().await; + registry.terminate().await; + } } diff --git a/veilid-core/src/routing_table/tests/test_serialize_routing_table.rs b/veilid-core/src/routing_table/tests/test_serialize_routing_table.rs index 91576a4e..f64cb9bf 100644 --- a/veilid-core/src/routing_table/tests/test_serialize_routing_table.rs +++ b/veilid-core/src/routing_table/tests/test_serialize_routing_table.rs @@ -1,25 +1,27 @@ use super::*; +use crate::{routing_table::*, RegisteredComponents}; pub async fn test_routingtable_buckets_round_trip() { - let original = mock_routing_table(); - let copy = mock_routing_table(); - original.init().await.unwrap(); - copy.init().await.unwrap(); - - // Add lots of routes to `original` here to exercise all various types. - - let (serialized_bucket_map, all_entry_bytes) = original.serialized_buckets(); - - copy.populate_routing_table( - &mut copy.inner.write(), - serialized_bucket_map, - all_entry_bytes, - ) - .unwrap(); + let original_registry = mock_registry::init("a").await; + let copy_registry = mock_registry::init("b").await; // Wrap to close lifetime of 'inner' which is borrowed here so terminate() can succeed // (it also .write() locks routing table inner) { + let original = original_registry.routing_table(); + let copy = copy_registry.routing_table(); + + // Add lots of routes to `original` here to exercise all various types. + + let (serialized_bucket_map, all_entry_bytes) = original.serialized_buckets(); + + RoutingTable::populate_routing_table_inner( + &mut copy.inner.write(), + serialized_bucket_map, + all_entry_bytes, + ) + .unwrap(); + let original_inner = &*original.inner.read(); let copy_inner = &*copy.inner.read(); @@ -51,8 +53,8 @@ pub async fn test_routingtable_buckets_round_trip() { } // Even if these are mocks, we should still practice good hygiene. - original.terminate().await; - copy.terminate().await; + mock_registry::terminate(original_registry).await; + mock_registry::terminate(copy_registry).await; } pub async fn test_round_trip_peerinfo() { diff --git a/veilid-core/src/routing_table/types/peer_info.rs b/veilid-core/src/routing_table/types/peer_info.rs index 1199d14c..2d9e319f 100644 --- a/veilid-core/src/routing_table/types/peer_info.rs +++ b/veilid-core/src/routing_table/types/peer_info.rs @@ -43,7 +43,7 @@ impl PeerInfo { } } - pub fn validate(&self, crypto: Crypto) -> VeilidAPIResult<()> { + pub fn validate(&self, crypto: &Crypto) -> VeilidAPIResult<()> { let validated_node_ids = self.signed_node_info.validate(&self.node_ids, crypto)?; if validated_node_ids.is_empty() { // Shouldn't get here because signed node info validation also checks this @@ -65,11 +65,11 @@ impl PeerInfo { (self.routing_domain, self.node_ids, self.signed_node_info) } - pub fn validate_vec(peer_info_vec: &mut Vec>, crypto: Crypto) { + pub fn validate_vec(peer_info_vec: &mut Vec>, crypto: &Crypto) { let mut n = 0usize; while n < peer_info_vec.len() { let pi = peer_info_vec.get(n).unwrap(); - if pi.validate(crypto.clone()).is_err() { + if pi.validate(crypto).is_err() { peer_info_vec.remove(n); } else { n += 1; diff --git a/veilid-core/src/routing_table/types/signed_direct_node_info.rs b/veilid-core/src/routing_table/types/signed_direct_node_info.rs index e4587262..011aab45 100644 --- a/veilid-core/src/routing_table/types/signed_direct_node_info.rs +++ b/veilid-core/src/routing_table/types/signed_direct_node_info.rs @@ -36,7 +36,7 @@ impl SignedDirectNodeInfo { pub fn validate( &self, node_ids: &TypedKeyGroup, - crypto: Crypto, + crypto: &Crypto, ) -> VeilidAPIResult { let node_info_bytes = Self::make_signature_bytes(&self.node_info, self.timestamp)?; @@ -54,7 +54,7 @@ impl SignedDirectNodeInfo { } pub fn make_signatures( - crypto: Crypto, + crypto: &Crypto, typed_key_pairs: Vec, node_info: NodeInfo, ) -> VeilidAPIResult { diff --git a/veilid-core/src/routing_table/types/signed_node_info.rs b/veilid-core/src/routing_table/types/signed_node_info.rs index e4adfc1e..b59e5e67 100644 --- a/veilid-core/src/routing_table/types/signed_node_info.rs +++ b/veilid-core/src/routing_table/types/signed_node_info.rs @@ -27,7 +27,7 @@ impl SignedNodeInfo { pub fn validate( &self, node_ids: &TypedKeyGroup, - crypto: Crypto, + crypto: &Crypto, ) -> VeilidAPIResult { match self { SignedNodeInfo::Direct(d) => d.validate(node_ids, crypto), diff --git a/veilid-core/src/routing_table/types/signed_relayed_node_info.rs b/veilid-core/src/routing_table/types/signed_relayed_node_info.rs index 275cb0d0..a0950912 100644 --- a/veilid-core/src/routing_table/types/signed_relayed_node_info.rs +++ b/veilid-core/src/routing_table/types/signed_relayed_node_info.rs @@ -49,7 +49,7 @@ impl SignedRelayedNodeInfo { pub fn validate( &self, node_ids: &TypedKeyGroup, - crypto: Crypto, + crypto: &Crypto, ) -> VeilidAPIResult { // Ensure the relay info for the node has a superset of the crypto kinds of the node it is relaying if common_crypto_kinds( @@ -81,7 +81,7 @@ impl SignedRelayedNodeInfo { } pub fn make_signatures( - crypto: Crypto, + crypto: &Crypto, typed_key_pairs: Vec, node_info: NodeInfo, relay_ids: TypedKeyGroup, diff --git a/veilid-core/src/rpc_processor/coders/mod.rs b/veilid-core/src/rpc_processor/coders/mod.rs index c1fb6ff7..84542843 100644 --- a/veilid-core/src/rpc_processor/coders/mod.rs +++ b/veilid-core/src/rpc_processor/coders/mod.rs @@ -68,10 +68,10 @@ pub enum QuestionContext { #[derive(Clone)] pub struct RPCValidateContext { - pub crypto: Crypto, - // pub rpc_processor: RPCProcessor, + pub registry: VeilidComponentRegistry, pub question_context: Option, } +impl_veilid_component_registry_accessor!(RPCValidateContext); #[derive(Clone)] pub struct RPCDecodeContext { diff --git a/veilid-core/src/rpc_processor/coders/operations/operation.rs b/veilid-core/src/rpc_processor/coders/operations/operation.rs index 4a1c2947..4ce3916d 100644 --- a/veilid-core/src/rpc_processor/coders/operations/operation.rs +++ b/veilid-core/src/rpc_processor/coders/operations/operation.rs @@ -100,8 +100,9 @@ impl RPCOperation { pub fn validate(&mut self, validate_context: &RPCValidateContext) -> Result<(), RPCError> { // Validate sender peer info if let Some(sender_peer_info) = &self.sender_peer_info.opt_peer_info { + let crypto = validate_context.crypto(); sender_peer_info - .validate(validate_context.crypto.clone()) + .validate(&crypto) .map_err(RPCError::protocol)?; } diff --git a/veilid-core/src/rpc_processor/coders/operations/operation_find_node.rs b/veilid-core/src/rpc_processor/coders/operations/operation_find_node.rs index cba032ca..8e01280b 100644 --- a/veilid-core/src/rpc_processor/coders/operations/operation_find_node.rs +++ b/veilid-core/src/rpc_processor/coders/operations/operation_find_node.rs @@ -95,7 +95,8 @@ impl RPCOperationFindNodeA { } pub fn validate(&mut self, validate_context: &RPCValidateContext) -> Result<(), RPCError> { - PeerInfo::validate_vec(&mut self.peers, validate_context.crypto.clone()); + let crypto = validate_context.crypto(); + PeerInfo::validate_vec(&mut self.peers, &crypto); Ok(()) } diff --git a/veilid-core/src/rpc_processor/coders/operations/operation_get_value.rs b/veilid-core/src/rpc_processor/coders/operations/operation_get_value.rs index 35366884..1c95cac2 100644 --- a/veilid-core/src/rpc_processor/coders/operations/operation_get_value.rs +++ b/veilid-core/src/rpc_processor/coders/operations/operation_get_value.rs @@ -7,7 +7,7 @@ const MAX_GET_VALUE_A_PEERS_LEN: usize = 20; pub(in crate::rpc_processor) struct ValidateGetValueContext { pub last_descriptor: Option, pub subkey: ValueSubkey, - pub vcrypto: CryptoSystemVersion, + pub crypto_kind: CryptoKind, } impl fmt::Debug for ValidateGetValueContext { @@ -15,7 +15,7 @@ impl fmt::Debug for ValidateGetValueContext { f.debug_struct("ValidateGetValueContext") .field("last_descriptor", &self.last_descriptor) .field("subkey", &self.subkey) - .field("vcrypto", &self.vcrypto.kind().to_string()) + .field("crypto_kind", &self.crypto_kind) .finish() } } @@ -114,12 +114,15 @@ impl RPCOperationGetValueA { panic!("Wrong context type for GetValueA"); }; + let crypto = validate_context.crypto(); + let Some(vcrypto) = crypto.get(get_value_context.crypto_kind) else { + return Err(RPCError::protocol("unsupported cryptosystem")); + }; + // Validate descriptor if let Some(descriptor) = &self.descriptor { // Ensure the descriptor itself validates - descriptor - .validate(get_value_context.vcrypto.clone()) - .map_err(RPCError::protocol)?; + descriptor.validate(&vcrypto).map_err(RPCError::protocol)?; // Ensure descriptor matches last one if let Some(last_descriptor) = &get_value_context.last_descriptor { @@ -146,18 +149,14 @@ impl RPCOperationGetValueA { // And the signed value data if !value - .validate( - descriptor.owner(), - get_value_context.subkey, - get_value_context.vcrypto.clone(), - ) + .validate(descriptor.owner(), get_value_context.subkey, &vcrypto) .map_err(RPCError::protocol)? { return Err(RPCError::protocol("signed value data did not validate")); } } - PeerInfo::validate_vec(&mut self.peers, validate_context.crypto.clone()); + PeerInfo::validate_vec(&mut self.peers, &crypto); Ok(()) } diff --git a/veilid-core/src/rpc_processor/coders/operations/operation_inspect_value.rs b/veilid-core/src/rpc_processor/coders/operations/operation_inspect_value.rs index 2310456a..a730ec8a 100644 --- a/veilid-core/src/rpc_processor/coders/operations/operation_inspect_value.rs +++ b/veilid-core/src/rpc_processor/coders/operations/operation_inspect_value.rs @@ -9,14 +9,14 @@ const MAX_INSPECT_VALUE_A_PEERS_LEN: usize = 20; pub(in crate::rpc_processor) struct ValidateInspectValueContext { pub last_descriptor: Option, pub subkeys: ValueSubkeyRangeSet, - pub vcrypto: CryptoSystemVersion, + pub crypto_kind: CryptoKind, } impl fmt::Debug for ValidateInspectValueContext { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("ValidateInspectValueContext") .field("last_descriptor", &self.last_descriptor) - .field("vcrypto", &self.vcrypto.kind().to_string()) + .field("crypto_kind", &self.crypto_kind) .finish() } } @@ -155,6 +155,11 @@ impl RPCOperationInspectValueA { panic!("Wrong context type for InspectValueA"); }; + let crypto = validate_context.crypto(); + let Some(vcrypto) = crypto.get(inspect_value_context.crypto_kind) else { + return Err(RPCError::protocol("unsupported cryptosystem")); + }; + // Ensure seqs returned does not exceeed subkeys requested #[allow(clippy::unnecessary_cast)] if self.seqs.len() as u64 > inspect_value_context.subkeys.len() as u64 { @@ -168,9 +173,7 @@ impl RPCOperationInspectValueA { // Validate descriptor if let Some(descriptor) = &self.descriptor { // Ensure the descriptor itself validates - descriptor - .validate(inspect_value_context.vcrypto.clone()) - .map_err(RPCError::protocol)?; + descriptor.validate(&vcrypto).map_err(RPCError::protocol)?; // Ensure descriptor matches last one if let Some(last_descriptor) = &inspect_value_context.last_descriptor { @@ -182,7 +185,7 @@ impl RPCOperationInspectValueA { } } - PeerInfo::validate_vec(&mut self.peers, validate_context.crypto.clone()); + PeerInfo::validate_vec(&mut self.peers, &crypto); Ok(()) } diff --git a/veilid-core/src/rpc_processor/coders/operations/operation_set_value.rs b/veilid-core/src/rpc_processor/coders/operations/operation_set_value.rs index 3c1aedcb..0a0198e5 100644 --- a/veilid-core/src/rpc_processor/coders/operations/operation_set_value.rs +++ b/veilid-core/src/rpc_processor/coders/operations/operation_set_value.rs @@ -7,7 +7,7 @@ const MAX_SET_VALUE_A_PEERS_LEN: usize = 20; pub(in crate::rpc_processor) struct ValidateSetValueContext { pub descriptor: SignedValueDescriptor, pub subkey: ValueSubkey, - pub vcrypto: CryptoSystemVersion, + pub crypto_kind: CryptoKind, } impl fmt::Debug for ValidateSetValueContext { @@ -15,7 +15,7 @@ impl fmt::Debug for ValidateSetValueContext { f.debug_struct("ValidateSetValueContext") .field("descriptor", &self.descriptor) .field("subkey", &self.subkey) - .field("vcrypto", &self.vcrypto.kind().to_string()) + .field("crypto_kind", &self.crypto_kind) .finish() } } @@ -144,10 +144,15 @@ impl RPCOperationSetValueA { panic!("Wrong context type for SetValueA"); }; + let crypto = validate_context.crypto(); + let Some(vcrypto) = crypto.get(set_value_context.crypto_kind) else { + return Err(RPCError::protocol("unsupported cryptosystem")); + }; + // Ensure the descriptor itself validates set_value_context .descriptor - .validate(set_value_context.vcrypto.clone()) + .validate(&vcrypto) .map_err(RPCError::protocol)?; if let Some(value) = &self.value { @@ -156,7 +161,7 @@ impl RPCOperationSetValueA { .validate( set_value_context.descriptor.owner(), set_value_context.subkey, - set_value_context.vcrypto.clone(), + &vcrypto, ) .map_err(RPCError::protocol)? { @@ -164,7 +169,7 @@ impl RPCOperationSetValueA { } } - PeerInfo::validate_vec(&mut self.peers, validate_context.crypto.clone()); + PeerInfo::validate_vec(&mut self.peers, &crypto); Ok(()) } diff --git a/veilid-core/src/rpc_processor/coders/operations/operation_signal.rs b/veilid-core/src/rpc_processor/coders/operations/operation_signal.rs index 79fc9b63..b0534157 100644 --- a/veilid-core/src/rpc_processor/coders/operations/operation_signal.rs +++ b/veilid-core/src/rpc_processor/coders/operations/operation_signal.rs @@ -10,7 +10,8 @@ impl RPCOperationSignal { Self { signal_info } } pub fn validate(&mut self, validate_context: &RPCValidateContext) -> Result<(), RPCError> { - self.signal_info.validate(validate_context.crypto.clone()) + let crypto = validate_context.crypto(); + self.signal_info.validate(&crypto) } // pub fn signal_info(&self) -> &SignalInfo { // &self.signal_info diff --git a/veilid-core/src/rpc_processor/coders/operations/operation_watch_value.rs b/veilid-core/src/rpc_processor/coders/operations/operation_watch_value.rs index aaddcf00..68dcad35 100644 --- a/veilid-core/src/rpc_processor/coders/operations/operation_watch_value.rs +++ b/veilid-core/src/rpc_processor/coders/operations/operation_watch_value.rs @@ -22,7 +22,7 @@ impl RPCOperationWatchValueQ { count: u32, watch_id: Option, watcher: KeyPair, - vcrypto: CryptoSystemVersion, + vcrypto: &CryptoSystemGuard<'_>, ) -> Result { if subkeys.ranges_len() > MAX_WATCH_VALUE_Q_SUBKEY_RANGES_LEN { return Err(RPCError::protocol("WatchValueQ subkeys length too long")); @@ -76,7 +76,8 @@ impl RPCOperationWatchValueQ { } pub fn validate(&mut self, validate_context: &RPCValidateContext) -> Result<(), RPCError> { - let Some(vcrypto) = validate_context.crypto.get(self.key.kind) else { + let crypto = validate_context.crypto(); + let Some(vcrypto) = crypto.get(self.key.kind) else { return Err(RPCError::protocol("unsupported cryptosystem")); }; @@ -270,7 +271,8 @@ impl RPCOperationWatchValueA { } pub fn validate(&mut self, validate_context: &RPCValidateContext) -> Result<(), RPCError> { - PeerInfo::validate_vec(&mut self.peers, validate_context.crypto.clone()); + let crypto = validate_context.crypto(); + PeerInfo::validate_vec(&mut self.peers, &crypto); Ok(()) } diff --git a/veilid-core/src/rpc_processor/coders/operations/question.rs b/veilid-core/src/rpc_processor/coders/operations/question.rs index da9bd51c..4434ba8d 100644 --- a/veilid-core/src/rpc_processor/coders/operations/question.rs +++ b/veilid-core/src/rpc_processor/coders/operations/question.rs @@ -11,7 +11,8 @@ impl RPCQuestion { Self { respond_to, detail } } pub fn validate(&mut self, validate_context: &RPCValidateContext) -> Result<(), RPCError> { - self.respond_to.validate(validate_context.crypto.clone())?; + let crypto = validate_context.crypto(); + self.respond_to.validate(&crypto)?; self.detail.validate(validate_context) } pub fn respond_to(&self) -> &RespondTo { diff --git a/veilid-core/src/rpc_processor/coders/operations/respond_to.rs b/veilid-core/src/rpc_processor/coders/operations/respond_to.rs index dbe13649..2cf3e053 100644 --- a/veilid-core/src/rpc_processor/coders/operations/respond_to.rs +++ b/veilid-core/src/rpc_processor/coders/operations/respond_to.rs @@ -7,7 +7,7 @@ pub(in crate::rpc_processor) enum RespondTo { } impl RespondTo { - pub fn validate(&mut self, crypto: Crypto) -> Result<(), RPCError> { + pub fn validate(&mut self, crypto: &Crypto) -> Result<(), RPCError> { match self { RespondTo::Sender => Ok(()), RespondTo::PrivateRoute(pr) => pr.validate(crypto).map_err(RPCError::protocol), diff --git a/veilid-core/src/rpc_processor/destination.rs b/veilid-core/src/rpc_processor/destination.rs index 77425ed7..643bd0fe 100644 --- a/veilid-core/src/rpc_processor/destination.rs +++ b/veilid-core/src/rpc_processor/destination.rs @@ -123,7 +123,7 @@ impl Destination { } } - pub fn get_target(&self, rss: RouteSpecStore) -> Result { + pub fn get_target(&self, routing_table: &RoutingTable) -> Result { match self { Destination::Direct { node, @@ -139,7 +139,8 @@ impl Destination { safety_selection: _, } => { // Add the remote private route if we're going to keep the id - let route_id = rss + let route_id = routing_table + .route_spec_store() .add_remote_private_route(private_route.clone()) .map_err(RPCError::protocol)?; @@ -150,7 +151,7 @@ impl Destination { pub fn get_unsafe_routing_info( &self, - routing_table: RoutingTable, + routing_table: &RoutingTable, ) -> Option { // If there's a safety route in use, the safety route will be responsible for the routing match self.get_safety_selection() { @@ -298,9 +299,11 @@ impl RPCProcessor { } Target::PrivateRoute(rsid) => { // Get remote private route - let rss = self.routing_table().route_spec_store(); - - let Some(private_route) = rss.best_remote_private_route(&rsid) else { + let Some(private_route) = self + .routing_table() + .route_spec_store() + .best_remote_private_route(&rsid) + else { return Err(RPCError::network("could not get remote private route")); }; diff --git a/veilid-core/src/rpc_processor/error.rs b/veilid-core/src/rpc_processor/error.rs index c2030fa7..25fac16d 100644 --- a/veilid-core/src/rpc_processor/error.rs +++ b/veilid-core/src/rpc_processor/error.rs @@ -52,7 +52,7 @@ impl RPCError { pub fn map_network(message: M) -> impl FnOnce(X) -> Self { move |x| Self::Network(format!("{}: {}", message.to_string(), x.to_string())) } - #[cfg_attr(target_arch = "wasm32", expect(dead_code))] + #[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), expect(dead_code))] pub fn try_again(x: X) -> Self { Self::TryAgain(x.to_string()) } diff --git a/veilid-core/src/rpc_processor/fanout/fanout_call.rs b/veilid-core/src/rpc_processor/fanout/fanout_call.rs index 3daaca87..0b793673 100644 --- a/veilid-core/src/rpc_processor/fanout/fanout_call.rs +++ b/veilid-core/src/rpc_processor/fanout/fanout_call.rs @@ -91,14 +91,14 @@ pub fn capability_fanout_node_info_filter(caps: Vec) -> FanoutNodeIn /// If the algorithm times out, a Timeout result is returned, however operations will still have been performed and a /// timeout is not necessarily indicative of an algorithmic 'failure', just that no definitive stopping condition was found /// in the given time -pub(crate) struct FanoutCall +pub(crate) struct FanoutCall<'a, R, F, C, D> where R: Unpin, F: Future, C: Fn(NodeRef) -> F, D: Fn(&[NodeRef]) -> Option, { - routing_table: RoutingTable, + routing_table: &'a RoutingTable, node_id: TypedKey, context: Mutex>, node_count: usize, @@ -109,7 +109,7 @@ where check_done: D, } -impl FanoutCall +impl<'a, R, F, C, D> FanoutCall<'a, R, F, C, D> where R: Unpin, F: Future, @@ -118,7 +118,7 @@ where { #[allow(clippy::too_many_arguments)] pub fn new( - routing_table: RoutingTable, + routing_table: &'a RoutingTable, node_id: TypedKey, node_count: usize, fanout: usize, @@ -126,13 +126,13 @@ where node_info_filter: FanoutNodeInfoFilter, call_routine: C, check_done: D, - ) -> Arc { + ) -> Self { let context = Mutex::new(FanoutContext { fanout_queue: FanoutQueue::new(node_id.kind), result: None, }); - Arc::new(Self { + Self { routing_table, node_id, context, @@ -142,11 +142,11 @@ where node_info_filter, call_routine, check_done, - }) + } } #[instrument(level = "trace", target = "fanout", skip_all)] - fn evaluate_done(self: Arc, ctx: &mut FanoutContext) -> bool { + fn evaluate_done(&self, ctx: &mut FanoutContext) -> bool { // If we have a result, then we're done if ctx.result.is_some() { return true; @@ -158,7 +158,7 @@ where } #[instrument(level = "trace", target = "fanout", skip_all)] - fn add_to_fanout_queue(self: Arc, new_nodes: &[NodeRef]) { + fn add_to_fanout_queue(&self, new_nodes: &[NodeRef]) { event!(target: "fanout", Level::DEBUG, "FanoutCall::add_to_fanout_queue:\n new_nodes={{\n{}}}\n", new_nodes @@ -169,24 +169,23 @@ where ); let ctx = &mut *self.context.lock(); - let this = self.clone(); ctx.fanout_queue.add(new_nodes, |current_nodes| { - let mut current_nodes_vec = this + let mut current_nodes_vec = self .routing_table - .sort_and_clean_closest_noderefs(this.node_id, current_nodes); + .sort_and_clean_closest_noderefs(self.node_id, current_nodes); current_nodes_vec.truncate(self.node_count); current_nodes_vec }); } #[instrument(level = "trace", target = "fanout", skip_all)] - async fn fanout_processor(self: Arc) -> bool { + async fn fanout_processor(&self) -> bool { // Loop until we have a result or are done loop { // Get the closest node we haven't processed yet if we're not done yet let next_node = { let mut ctx = self.context.lock(); - if self.clone().evaluate_done(&mut ctx) { + if self.evaluate_done(&mut ctx) { break true; } ctx.fanout_queue.next() @@ -221,7 +220,7 @@ where let new_nodes = self .routing_table .register_nodes_with_peer_info_list(filtered_v); - self.clone().add_to_fanout_queue(&new_nodes); + self.add_to_fanout_queue(&new_nodes); } #[allow(unused_variables)] Ok(x) => { @@ -239,10 +238,9 @@ where } #[instrument(level = "trace", target = "fanout", skip_all)] - fn init_closest_nodes(self: Arc) -> Result<(), RPCError> { + fn init_closest_nodes(&self) -> Result<(), RPCError> { // Get the 'node_count' closest nodes to the key out of our routing table let closest_nodes = { - let routing_table = self.routing_table.clone(); let node_info_filter = self.node_info_filter.clone(); let filter = Box::new( move |rti: &RoutingTableInner, opt_entry: Option>| { @@ -277,20 +275,20 @@ where let filters = VecDeque::from([filter]); let transform = |_rti: &RoutingTableInner, v: Option>| { - NodeRef::new(routing_table.clone(), v.unwrap().clone()) + NodeRef::new(self.routing_table.registry(), v.unwrap().clone()) }; - routing_table + self.routing_table .find_preferred_closest_nodes(self.node_count, self.node_id, filters, transform) .map_err(RPCError::invalid_format)? }; - self.clone().add_to_fanout_queue(&closest_nodes); + self.add_to_fanout_queue(&closest_nodes); Ok(()) } #[instrument(level = "trace", target = "fanout", skip_all)] pub async fn run( - self: Arc, + &self, init_fanout_queue: Vec, ) -> TimeoutOr, RPCError>> { // Get timeout in milliseconds @@ -302,17 +300,17 @@ where }; // Initialize closest nodes list - if let Err(e) = self.clone().init_closest_nodes() { + if let Err(e) = self.init_closest_nodes() { return TimeoutOr::value(Err(e)); } // Ensure we include the most recent nodes - self.clone().add_to_fanout_queue(&init_fanout_queue); + self.add_to_fanout_queue(&init_fanout_queue); // Do a quick check to see if we're already done { let mut ctx = self.context.lock(); - if self.clone().evaluate_done(&mut ctx) { + if self.evaluate_done(&mut ctx) { return TimeoutOr::value(ctx.result.take().transpose()); } } @@ -322,7 +320,7 @@ where { // Spin up 'fanout' tasks to process the fanout for _ in 0..self.fanout { - let h = self.clone().fanout_processor(); + let h = self.fanout_processor(); unord.push(h); } } diff --git a/veilid-core/src/rpc_processor/mod.rs b/veilid-core/src/rpc_processor/mod.rs index 5259ba47..f447b90d 100644 --- a/veilid-core/src/rpc_processor/mod.rs +++ b/veilid-core/src/rpc_processor/mod.rs @@ -1,3 +1,5 @@ +use super::*; + mod answer; mod coders; mod destination; @@ -46,7 +48,6 @@ pub(crate) use error::*; pub(crate) use fanout::*; pub(crate) use sender_info::*; -use super::*; use futures_util::StreamExt; use stop_token::future::FutureExt as _; @@ -88,29 +89,46 @@ enum RPCKind { ///////////////////////////////////////////////////////////////////// +#[derive(Debug, Clone)] +pub struct RPCProcessorStartupContext { + pub startup_lock: Arc, +} +impl RPCProcessorStartupContext { + pub fn new() -> Self { + Self { + startup_lock: Arc::new(StartupLock::new()), + } + } +} +impl Default for RPCProcessorStartupContext { + fn default() -> Self { + Self::new() + } +} + +///////////////////////////////////////////////////////////////////// + +#[derive(Debug)] struct RPCProcessorInner { send_channel: Option>, stop_source: Option, worker_join_handles: Vec>, } -struct RPCProcessorUnlockedInner { - network_manager: NetworkManager, +#[derive(Debug)] +pub(crate) struct RPCProcessor { + registry: VeilidComponentRegistry, + inner: Mutex, timeout_us: TimestampDuration, queue_size: u32, concurrency: u32, max_route_hop_count: usize, - update_callback: UpdateCallback, waiting_rpc_table: OperationWaiter>, waiting_app_call_table: OperationWaiter, ()>, - startup_lock: StartupLock, + startup_context: RPCProcessorStartupContext, } -#[derive(Clone)] -pub(crate) struct RPCProcessor { - inner: Arc>, - unlocked_inner: Arc, -} +impl_veilid_component!(RPCProcessor); impl RPCProcessor { fn new_inner() -> RPCProcessorInner { @@ -120,13 +138,14 @@ impl RPCProcessor { worker_join_handles: Vec::new(), } } - fn new_unlocked_inner( - network_manager: NetworkManager, - update_callback: UpdateCallback, - ) -> RPCProcessorUnlockedInner { + + pub fn new( + registry: VeilidComponentRegistry, + startup_context: RPCProcessorStartupContext, + ) -> Self { // make local copy of node id for easy access let (concurrency, queue_size, max_route_hop_count, timeout_us) = { - let config = network_manager.config(); + let config = registry.config(); let c = config.get(); // set up channel @@ -146,99 +165,83 @@ impl RPCProcessor { (concurrency, queue_size, max_route_hop_count, timeout_us) }; - RPCProcessorUnlockedInner { - network_manager, + Self { + registry, + inner: Mutex::new(Self::new_inner()), timeout_us, queue_size, concurrency, max_route_hop_count, - update_callback, waiting_rpc_table: OperationWaiter::new(), waiting_app_call_table: OperationWaiter::new(), - startup_lock: StartupLock::new(), - } - } - pub fn new(network_manager: NetworkManager, update_callback: UpdateCallback) -> Self { - Self { - inner: Arc::new(Mutex::new(Self::new_inner())), - unlocked_inner: Arc::new(Self::new_unlocked_inner(network_manager, update_callback)), + startup_context, } } - pub fn network_manager(&self) -> NetworkManager { - self.unlocked_inner.network_manager.clone() + ///////////////////////////////////// + /// Initialization + + async fn init_async(&self) -> EyreResult<()> { + Ok(()) } - pub fn crypto(&self) -> Crypto { - self.unlocked_inner.network_manager.crypto() + async fn post_init_async(&self) -> EyreResult<()> { + Ok(()) } - pub fn event_bus(&self) -> EventBus { - self.unlocked_inner.network_manager.event_bus() + async fn pre_terminate_async(&self) { + // Ensure things have shut down + assert!( + self.startup_context.startup_lock.is_shut_down(), + "should have shut down by now" + ); } - pub fn routing_table(&self) -> RoutingTable { - self.unlocked_inner.network_manager.routing_table() - } - - pub fn storage_manager(&self) -> StorageManager { - self.unlocked_inner.network_manager.storage_manager() - } - - pub fn with_config R>(&self, func: F) -> R { - let config = self.unlocked_inner.network_manager.config(); - let c = config.get(); - func(&c) - } + async fn terminate_async(&self) {} ////////////////////////////////////////////////////////////////////// #[instrument(level = "debug", skip_all, err)] pub async fn startup(&self) -> EyreResult<()> { - log_rpc!(debug "startup rpc processor"); - let guard = self.unlocked_inner.startup_lock.startup()?; + log_rpc!(debug "starting rpc processor startup"); + + let guard = self.startup_context.startup_lock.startup()?; { let mut inner = self.inner.lock(); - let channel = flume::bounded(self.unlocked_inner.queue_size as usize); + let channel = flume::bounded(self.queue_size as usize); inner.send_channel = Some(channel.0.clone()); inner.stop_source = Some(StopSource::new()); // spin up N workers - log_rpc!( - "Spinning up {} RPC workers", - self.unlocked_inner.concurrency - ); - for task_n in 0..self.unlocked_inner.concurrency { - let this = self.clone(); + log_rpc!("Spinning up {} RPC workers", self.concurrency); + for task_n in 0..self.concurrency { + let registry = self.registry(); let receiver = channel.1.clone(); - let jh = spawn( - &format!("rpc worker {}", task_n), - Self::rpc_worker(this, inner.stop_source.as_ref().unwrap().token(), receiver), - ); + let stop_token = inner.stop_source.as_ref().unwrap().token(); + let jh = spawn(&format!("rpc worker {}", task_n), async move { + let this = registry.rpc_processor(); + this.rpc_worker(stop_token, receiver).await + }); inner.worker_join_handles.push(jh); } } - - // Inform storage manager we are up - self.storage_manager() - .set_rpc_processor(Some(self.clone())) - .await; - guard.success(); + + log_rpc!(debug "finished rpc processor startup"); + Ok(()) } #[instrument(level = "debug", skip_all)] pub async fn shutdown(&self) { log_rpc!(debug "starting rpc processor shutdown"); - let Ok(guard) = self.unlocked_inner.startup_lock.shutdown().await else { - log_rpc!(debug "rpc processor already shut down"); - return; - }; - - // Stop storage manager from using us - self.storage_manager().set_rpc_processor(None).await; + let guard = self + .startup_context + .startup_lock + .shutdown() + .await + .expect("should be started up"); // Stop the rpc workers let mut unord = FuturesUnordered::new(); @@ -269,9 +272,7 @@ impl RPCProcessor { /// Get waiting app call id for debugging purposes pub fn get_app_call_ids(&self) -> Vec { - self.unlocked_inner - .waiting_app_call_table - .get_operation_ids() + self.waiting_app_call_table.get_operation_ids() } /// Determine if a SignedNodeInfo can be placed into the specified routing domain @@ -300,12 +301,13 @@ impl RPCProcessor { let Some(peer_info) = sender_peer_info.opt_peer_info.clone() else { return Ok(NetworkResult::value(None)); }; - let address_filter = self.network_manager().address_filter(); // Ensure the sender peer info is for the actual sender specified in the envelope if !peer_info.node_ids().contains(&sender_node_id) { // Attempted to update peer info for the wrong node id - address_filter.punish_node_id(sender_node_id, PunishmentReason::WrongSenderPeerInfo); + self.network_manager() + .address_filter() + .punish_node_id(sender_node_id, PunishmentReason::WrongSenderPeerInfo); return Ok(NetworkResult::invalid_message( "attempt to update peer info for non-sender node id", @@ -318,7 +320,7 @@ impl RPCProcessor { // Don't punish for this because in the case of hairpin NAT // you can legally get LocalNetwork PeerInfo when you expect PublicInternet PeerInfo // - // address_filter.punish_node_id( + // self.network_manager().address_filter().punish_node_id( // sender_node_id, // PunishmentReason::FailedToVerifySenderPeerInfo, // ); @@ -330,7 +332,7 @@ impl RPCProcessor { { Ok(v) => v.unfiltered(), Err(e) => { - address_filter.punish_node_id( + self.network_manager().address_filter().punish_node_id( sender_node_id, PunishmentReason::FailedToRegisterSenderPeerInfo, ); @@ -365,17 +367,17 @@ impl RPCProcessor { // Routine to call to generate fanout let call_routine = |next_node: NodeRef| { - let this = self.clone(); + let registry = self.registry(); async move { + let this = registry.rpc_processor(); let v = network_result_try!( - this.clone() - .rpc_call_find_node( - Destination::direct(next_node.routing_domain_filtered(routing_domain)) - .with_safety(safety_selection), - node_id, - vec![], - ) - .await? + this.rpc_call_find_node( + Destination::direct(next_node.routing_domain_filtered(routing_domain)) + .with_safety(safety_selection), + node_id, + vec![], + ) + .await? ); Ok(NetworkResult::value(FanoutCallOutput { peer_info_list: v.answer, @@ -400,8 +402,9 @@ impl RPCProcessor { }; // Call the fanout + let routing_table = self.routing_table(); let fanout_call = FanoutCall::new( - routing_table.clone(), + &routing_table, node_id, count, fanout, @@ -423,11 +426,13 @@ impl RPCProcessor { node_id: TypedKey, safety_selection: SafetySelection, ) -> SendPinBoxFuture, RPCError>> { - let this = self.clone(); + let registry = self.registry(); Box::pin( async move { + let this = registry.rpc_processor(); + let _guard = this - .unlocked_inner + .startup_context .startup_lock .enter() .map_err(RPCError::map_try_again("not started up"))?; @@ -451,7 +456,7 @@ impl RPCProcessor { } // If nobody knows where this node is, ask the DHT for it - let (node_count, _consensus_count, fanout, timeout) = this.with_config(|c| { + let (node_count, _consensus_count, fanout, timeout) = this.config().with(|c| { ( c.network.dht.max_find_node_count as usize, c.network.dht.resolve_node_count as usize, @@ -494,7 +499,6 @@ impl RPCProcessor { ) -> Result, RPCError> { let id = waitable_reply.handle.id(); let out = self - .unlocked_inner .waiting_rpc_table .wait_for_op(waitable_reply.handle, waitable_reply.timeout_us) .await; @@ -577,6 +581,7 @@ impl RPCProcessor { message_data: Vec, ) -> RPCNetworkResult { let routing_table = self.routing_table(); + let crypto = self.crypto(); let rss = routing_table.route_spec_store(); // Get useful private route properties @@ -584,7 +589,7 @@ impl RPCProcessor { let pr_hop_count = remote_private_route.hop_count; let pr_pubkey = remote_private_route.public_key.value; let crypto_kind = remote_private_route.crypto_kind(); - let Some(vcrypto) = self.crypto().get(crypto_kind) else { + let Some(vcrypto) = crypto.get(crypto_kind) else { return Err(RPCError::internal( "crypto not available for selected private route", )); @@ -786,6 +791,7 @@ impl RPCProcessor { /// And send our timestamp of the target's node info so they can determine if they should update us on their next rpc #[instrument(level = "trace", target = "rpc", skip_all)] fn get_sender_peer_info(&self, dest: &Destination) -> SenderPeerInfo { + let routing_table = self.routing_table(); // Don't do this if the sender is to remain private // Otherwise we would be attaching the original sender's identity to the final destination, // thus defeating the purpose of the safety route entirely :P @@ -793,7 +799,7 @@ impl RPCProcessor { opt_node, opt_relay: _, opt_routing_domain, - }) = dest.get_unsafe_routing_info(self.routing_table()) + }) = dest.get_unsafe_routing_info(&routing_table) else { return SenderPeerInfo::default(); }; @@ -849,13 +855,15 @@ impl RPCProcessor { // If safety route was in use, record failure to send there if let Some(sr_pubkey) = &safety_route { - let rss = self.routing_table().route_spec_store(); - rss.with_route_stats_mut(send_ts, sr_pubkey, |s| s.record_send_failed()); + self.routing_table() + .route_spec_store() + .with_route_stats_mut(send_ts, sr_pubkey, |s| s.record_send_failed()); } else { // If no safety route was in use, then it's the private route's fault if we have one if let Some(pr_pubkey) = &remote_private_route { - let rss = self.routing_table().route_spec_store(); - rss.with_route_stats_mut(send_ts, pr_pubkey, |s| s.record_send_failed()); + self.routing_table() + .route_spec_store() + .with_route_stats_mut(send_ts, pr_pubkey, |s| s.record_send_failed()); } } } @@ -880,7 +888,8 @@ impl RPCProcessor { return; } // Get route spec store - let rss = self.routing_table().route_spec_store(); + let routing_table = self.routing_table(); + let rss = routing_table.route_spec_store(); // If safety route was used, record question lost there if let Some(sr_pubkey) = &safety_route { @@ -927,7 +936,8 @@ impl RPCProcessor { } // Get route spec store - let rss = self.routing_table().route_spec_store(); + let routing_table = self.routing_table(); + let rss = routing_table.route_spec_store(); // If safety route was used, record send there if let Some(sr_pubkey) = &safety_route { @@ -964,7 +974,8 @@ impl RPCProcessor { return; } // Get route spec store - let rss = self.routing_table().route_spec_store(); + let routing_table = self.routing_table(); + let rss = routing_table.route_spec_store(); // Get latency for all local routes let mut total_local_latency = TimestampDuration::new(0u64); @@ -1018,7 +1029,6 @@ impl RPCProcessor { // This is fine because if we sent with a local safety route, // then we must have received with a local private route too, per the design rules if let Some(sr_pubkey) = &safety_route { - let rss = self.routing_table().route_spec_store(); rss.with_route_stats_mut(send_ts, sr_pubkey, |s| { s.record_latency(total_latency / 2u64); }); @@ -1037,6 +1047,9 @@ impl RPCProcessor { let recv_ts = msg.header.timestamp; let bytes = msg.header.body_len; + let routing_table = self.routing_table(); + let rss = routing_table.route_spec_store(); + // Process messages based on how they were received match &msg.header.detail { // Process direct messages @@ -1047,8 +1060,6 @@ impl RPCProcessor { } // Process messages that arrived with no private route (private route stub) RPCMessageHeaderDetail::SafetyRouted(d) => { - let rss = self.routing_table().route_spec_store(); - // This may record nothing if the remote safety route is not also // a remote private route that been imported, but that's okay rss.with_route_stats_mut(recv_ts, &d.remote_safety_route, |s| { @@ -1057,8 +1068,6 @@ impl RPCProcessor { } // Process messages that arrived to our private route RPCMessageHeaderDetail::PrivateRouted(d) => { - let rss = self.routing_table().route_spec_store(); - // This may record nothing if the remote safety route is not also // a remote private route that been imported, but that's okay // it could also be a node id if no remote safety route was used @@ -1107,13 +1116,10 @@ impl RPCProcessor { // Calculate answer timeout // Timeout is number of hops times the timeout per hop - let timeout_us = self.unlocked_inner.timeout_us * (hop_count as u64); + let timeout_us = self.timeout_us * (hop_count as u64); // Set up op id eventual - let handle = self - .unlocked_inner - .waiting_rpc_table - .add_op_waiter(op_id, context); + let handle = self.waiting_rpc_table.add_op_waiter(op_id, context); // Send question let bytes: ByteCount = (message.len() as u64).into(); @@ -1351,16 +1357,14 @@ impl RPCProcessor { // If we received an answer for a question we did not ask, this will return an error let question_context = if let RPCOperationKind::Answer(_) = operation.kind() { let op_id = operation.op_id(); - self.unlocked_inner - .waiting_rpc_table - .get_op_context(op_id)? + self.waiting_rpc_table.get_op_context(op_id)? } else { None }; // Validate the RPC operation let validate_context = RPCValidateContext { - crypto: self.crypto(), + registry: self.registry(), // rpc_processor: self.clone(), question_context, }; @@ -1372,8 +1376,6 @@ impl RPCProcessor { ////////////////////////////////////////////////////////////////////// #[instrument(level = "trace", target = "rpc", skip_all)] async fn process_rpc_message(&self, encoded_msg: MessageEncoded) -> RPCNetworkResult<()> { - let address_filter = self.network_manager().address_filter(); - // Decode operation appropriately based on header detail let msg = match &encoded_msg.header.detail { RPCMessageHeaderDetail::Direct(detail) => { @@ -1391,7 +1393,7 @@ impl RPCProcessor { log_rpc!(debug "Invalid RPC Operation: {}", e); // Punish nodes that send direct undecodable crap - address_filter.punish_node_id( + self.network_manager().address_filter().punish_node_id( sender_node_id, PunishmentReason::FailedToDecodeOperation, ); @@ -1458,7 +1460,7 @@ impl RPCProcessor { log_rpc!(debug "Dropping routed RPC: {}", e); // XXX: Punish routes that send routed undecodable crap - // address_filter.punish_route_id(xxx, PunishmentReason::FailedToDecodeRoutedMessage); + // self.network_manager().address_filter().punish_route_id(xxx, PunishmentReason::FailedToDecodeRoutedMessage); return Ok(NetworkResult::invalid_message(e)); } }; @@ -1533,11 +1535,7 @@ impl RPCProcessor { }, RPCOperationKind::Answer(_) => { let op_id = msg.operation.op_id(); - if let Err(e) = self - .unlocked_inner - .waiting_rpc_table - .complete_op_waiter(op_id, msg) - { + if let Err(e) = self.waiting_rpc_table.complete_op_waiter(op_id, msg) { match e { RPCError::Unimplemented(_) | RPCError::Internal(_) => { log_rpc!(error "Could not complete rpc operation: id = {}: {}", op_id, e); @@ -1560,7 +1558,7 @@ impl RPCProcessor { } async fn rpc_worker( - self, + &self, stop_token: StopToken, receiver: flume::Receiver<(Span, MessageEncoded)>, ) { @@ -1596,10 +1594,10 @@ impl RPCProcessor { body: Vec, ) -> EyreResult<()> { let _guard = self - .unlocked_inner + .startup_context .startup_lock .enter() - .map_err(RPCError::map_try_again("not started up"))?; + .wrap_err("not started up")?; if peer_noderef.routing_domain_set() != routing_domain { bail!("routing domain should match peer noderef filter"); @@ -1642,6 +1640,12 @@ impl RPCProcessor { sequencing: Sequencing, body: Vec, ) -> EyreResult<()> { + let _guard = self + .startup_context + .startup_lock + .enter() + .wrap_err("not started up")?; + let header = MessageHeader { detail: RPCMessageHeaderDetail::SafetyRouted(RPCMessageHeaderDetailSafetyRouted { direct, @@ -1678,6 +1682,12 @@ impl RPCProcessor { safety_spec: SafetySpec, body: Vec, ) -> EyreResult<()> { + let _guard = self + .startup_context + .startup_lock + .enter() + .wrap_err("not started up")?; + let header = MessageHeader { detail: RPCMessageHeaderDetail::PrivateRouted(RPCMessageHeaderDetailPrivateRouted { direct, diff --git a/veilid-core/src/rpc_processor/rpc_app_call.rs b/veilid-core/src/rpc_processor/rpc_app_call.rs index 07651fd1..2700ab05 100644 --- a/veilid-core/src/rpc_processor/rpc_app_call.rs +++ b/veilid-core/src/rpc_processor/rpc_app_call.rs @@ -5,12 +5,12 @@ impl RPCProcessor { // Can be sent via all methods including relays and routes #[instrument(level = "trace", target = "rpc", skip(self, message), fields(message.len = message.len(), ret.latency, ret.len), err)] pub async fn rpc_call_app_call( - self, + &self, dest: Destination, message: Vec, ) -> RPCNetworkResult>> { let _guard = self - .unlocked_inner + .startup_context .startup_lock .enter() .map_err(RPCError::map_try_again("not started up"))?; @@ -117,22 +117,18 @@ impl RPCProcessor { .map(|nr| nr.node_ids().get(crypto_kind).unwrap()); // Register a waiter for this app call - let handle = self - .unlocked_inner - .waiting_app_call_table - .add_op_waiter(op_id, ()); + let handle = self.waiting_app_call_table.add_op_waiter(op_id, ()); // Pass the call up through the update callback let message_q = app_call_q.destructure(); - (self.unlocked_inner.update_callback)(VeilidUpdate::AppCall(Box::new(VeilidAppCall::new( + (self.update_callback())(VeilidUpdate::AppCall(Box::new(VeilidAppCall::new( sender, route_id, message_q, op_id, )))); // Wait for an app call answer to come back from the app let res = self - .unlocked_inner .waiting_app_call_table - .wait_for_op(handle, self.unlocked_inner.timeout_us) + .wait_for_op(handle, self.timeout_us) .await?; let (message_a, _latency) = match res { TimeoutOr::Timeout => { @@ -158,12 +154,11 @@ impl RPCProcessor { #[instrument(level = "trace", target = "rpc", skip_all)] pub fn app_call_reply(&self, call_id: OperationId, message: Vec) -> Result<(), RPCError> { let _guard = self - .unlocked_inner + .startup_context .startup_lock .enter() .map_err(RPCError::map_try_again("not started up"))?; - self.unlocked_inner - .waiting_app_call_table + self.waiting_app_call_table .complete_op_waiter(call_id, message) .map_err(RPCError::ignore) } diff --git a/veilid-core/src/rpc_processor/rpc_app_message.rs b/veilid-core/src/rpc_processor/rpc_app_message.rs index e9da1f0e..93da6adb 100644 --- a/veilid-core/src/rpc_processor/rpc_app_message.rs +++ b/veilid-core/src/rpc_processor/rpc_app_message.rs @@ -5,12 +5,12 @@ impl RPCProcessor { // Can be sent via all methods including relays and routes #[instrument(level = "trace", target = "rpc", skip(self, message), fields(message.len = message.len()), err)] pub async fn rpc_call_app_message( - self, + &self, dest: Destination, message: Vec, ) -> RPCNetworkResult<()> { let _guard = self - .unlocked_inner + .startup_context .startup_lock .enter() .map_err(RPCError::map_try_again("not started up"))?; @@ -81,9 +81,9 @@ impl RPCProcessor { // Pass the message up through the update callback let message = app_message.destructure(); - (self.unlocked_inner.update_callback)(VeilidUpdate::AppMessage(Box::new( - VeilidAppMessage::new(sender, route_id, message), - ))); + (self.update_callback())(VeilidUpdate::AppMessage(Box::new(VeilidAppMessage::new( + sender, route_id, message, + )))); Ok(NetworkResult::value(())) } diff --git a/veilid-core/src/rpc_processor/rpc_find_node.rs b/veilid-core/src/rpc_processor/rpc_find_node.rs index 73a66a3e..31bf3ee5 100644 --- a/veilid-core/src/rpc_processor/rpc_find_node.rs +++ b/veilid-core/src/rpc_processor/rpc_find_node.rs @@ -9,13 +9,13 @@ impl RPCProcessor { /// the identity of the node and defeat the private route. #[instrument(level = "trace", target = "rpc", skip(self), err)] pub async fn rpc_call_find_node( - self, + &self, dest: Destination, node_id: TypedKey, capabilities: Vec, ) -> RPCNetworkResult>>> { let _guard = self - .unlocked_inner + .startup_context .startup_lock .enter() .map_err(RPCError::map_try_again("not started up"))?; diff --git a/veilid-core/src/rpc_processor/rpc_get_value.rs b/veilid-core/src/rpc_processor/rpc_get_value.rs index cbba2b8e..639eeb5f 100644 --- a/veilid-core/src/rpc_processor/rpc_get_value.rs +++ b/veilid-core/src/rpc_processor/rpc_get_value.rs @@ -24,14 +24,14 @@ impl RPCProcessor { ret.latency ),err)] pub async fn rpc_call_get_value( - self, + &self, dest: Destination, key: TypedKey, subkey: ValueSubkey, last_descriptor: Option, ) -> RPCNetworkResult> { let _guard = self - .unlocked_inner + .startup_context .startup_lock .enter() .map_err(RPCError::map_try_again("not started up"))?; @@ -45,7 +45,8 @@ impl RPCProcessor { }; // Get the target node id - let Some(vcrypto) = self.crypto().get(key.kind) else { + let crypto = self.crypto(); + let Some(vcrypto) = crypto.get(key.kind) else { return Err(RPCError::internal("unsupported cryptosystem")); }; let Some(target_node_id) = target_node_ids.get(key.kind) else { @@ -74,7 +75,7 @@ impl RPCProcessor { let question_context = QuestionContext::GetValue(ValidateGetValueContext { last_descriptor, subkey, - vcrypto: vcrypto.clone(), + crypto_kind: vcrypto.kind(), }); log_dht!(debug "{}", debug_string); @@ -137,7 +138,7 @@ impl RPCProcessor { } // Validate peers returned are, in fact, closer to the key than the node we sent this to - let valid = match RoutingTable::verify_peers_closer(vcrypto, target_node_id, key, &peers) { + let valid = match RoutingTable::verify_peers_closer(&vcrypto, target_node_id, key, &peers) { Ok(v) => v, Err(e) => { return Ok(NetworkResult::invalid_message(format!( @@ -231,7 +232,9 @@ impl RPCProcessor { } // See if we would have accepted this as a set - let set_value_count = self.with_config(|c| c.network.dht.set_value_count as usize); + let set_value_count = self + .config() + .with(|c| c.network.dht.set_value_count as usize); let (get_result_value, get_result_descriptor) = if closer_to_key_peers.len() >= set_value_count { // Not close enough diff --git a/veilid-core/src/rpc_processor/rpc_inspect_value.rs b/veilid-core/src/rpc_processor/rpc_inspect_value.rs index 8ddc5203..83ac73ad 100644 --- a/veilid-core/src/rpc_processor/rpc_inspect_value.rs +++ b/veilid-core/src/rpc_processor/rpc_inspect_value.rs @@ -26,14 +26,14 @@ impl RPCProcessor { ),err) ] pub async fn rpc_call_inspect_value( - self, + &self, dest: Destination, key: TypedKey, subkeys: ValueSubkeyRangeSet, last_descriptor: Option, ) -> RPCNetworkResult> { let _guard = self - .unlocked_inner + .startup_context .startup_lock .enter() .map_err(RPCError::map_try_again("not started up"))?; @@ -47,7 +47,8 @@ impl RPCProcessor { }; // Get the target node id - let Some(vcrypto) = self.crypto().get(key.kind) else { + let crypto = self.crypto(); + let Some(vcrypto) = crypto.get(key.kind) else { return Err(RPCError::internal("unsupported cryptosystem")); }; let Some(target_node_id) = target_node_ids.get(key.kind) else { @@ -77,7 +78,7 @@ impl RPCProcessor { let question_context = QuestionContext::InspectValue(ValidateInspectValueContext { last_descriptor, subkeys, - vcrypto: vcrypto.clone(), + crypto_kind: vcrypto.kind(), }); log_dht!(debug "{}", debug_string); @@ -127,7 +128,7 @@ impl RPCProcessor { } // Validate peers returned are, in fact, closer to the key than the node we sent this to - let valid = match RoutingTable::verify_peers_closer(vcrypto, target_node_id, key, &peers) { + let valid = match RoutingTable::verify_peers_closer(&vcrypto, target_node_id, key, &peers) { Ok(v) => v, Err(e) => { return Ok(NetworkResult::invalid_message(format!( @@ -212,7 +213,9 @@ impl RPCProcessor { } // See if we would have accepted this as a set - let set_value_count = self.with_config(|c| c.network.dht.set_value_count as usize); + let set_value_count = self + .config() + .with(|c| c.network.dht.set_value_count as usize); let (inspect_result_seqs, inspect_result_descriptor) = if closer_to_key_peers.len() >= set_value_count { diff --git a/veilid-core/src/rpc_processor/rpc_return_receipt.rs b/veilid-core/src/rpc_processor/rpc_return_receipt.rs index 11dd8473..d9a20702 100644 --- a/veilid-core/src/rpc_processor/rpc_return_receipt.rs +++ b/veilid-core/src/rpc_processor/rpc_return_receipt.rs @@ -5,12 +5,12 @@ impl RPCProcessor { // Can be sent via all methods including relays and routes #[instrument(level = "trace", target = "rpc", skip(self, receipt), ret, err)] pub async fn rpc_call_return_receipt>( - self, + &self, dest: Destination, receipt: D, ) -> RPCNetworkResult<()> { let _guard = self - .unlocked_inner + .startup_context .startup_lock .enter() .map_err(RPCError::map_try_again("not started up"))?; diff --git a/veilid-core/src/rpc_processor/rpc_route.rs b/veilid-core/src/rpc_processor/rpc_route.rs index d061207d..9a1a47c6 100644 --- a/veilid-core/src/rpc_processor/rpc_route.rs +++ b/veilid-core/src/rpc_processor/rpc_route.rs @@ -9,7 +9,7 @@ impl RPCProcessor { safety_route: SafetyRoute, ) -> RPCNetworkResult<()> { // Make sure hop count makes sense - if safety_route.hop_count as usize > self.unlocked_inner.max_route_hop_count { + if safety_route.hop_count as usize > self.max_route_hop_count { return Ok(NetworkResult::invalid_message( "Safety route hop count too high to process", )); @@ -26,9 +26,10 @@ impl RPCProcessor { } // Get next hop node ref + let routing_table = self.routing_table(); let Some(next_hop_nr) = route_hop .node - .node_ref(self.routing_table(), safety_route.public_key.kind) + .node_ref(&routing_table, safety_route.public_key.kind) else { return Ok(NetworkResult::invalid_message(format!( "could not get route node hop ref: {}", @@ -65,15 +66,16 @@ impl RPCProcessor { next_private_route: PrivateRoute, ) -> RPCNetworkResult<()> { // Make sure hop count makes sense - if next_private_route.hop_count as usize > self.unlocked_inner.max_route_hop_count { + if next_private_route.hop_count as usize > self.max_route_hop_count { return Ok(NetworkResult::invalid_message( "Private route hop count too high to process", )); } // Get next hop node ref + let routing_table = self.routing_table(); let Some(next_hop_nr) = - next_route_node.node_ref(self.routing_table(), safety_route_public_key.kind) + next_route_node.node_ref(&routing_table, safety_route_public_key.kind) else { return Ok(NetworkResult::invalid_message(format!( "could not get route node hop ref: {}", @@ -110,7 +112,7 @@ impl RPCProcessor { fn process_safety_routed_operation( &self, detail: RPCMessageHeaderDetailDirect, - vcrypto: CryptoSystemVersion, + vcrypto: &CryptoSystemGuard<'_>, routed_operation: RoutedOperation, remote_sr_pubkey: TypedKey, ) -> RPCNetworkResult<()> { @@ -156,7 +158,7 @@ impl RPCProcessor { fn process_private_routed_operation( &self, detail: RPCMessageHeaderDetailDirect, - vcrypto: CryptoSystemVersion, + vcrypto: &CryptoSystemGuard<'_>, routed_operation: RoutedOperation, remote_sr_pubkey: TypedKey, pr_pubkey: TypedKey, @@ -170,7 +172,8 @@ impl RPCProcessor { // Look up the private route and ensure it's one in our spec store // Ensure the route is validated, and construct a return safetyspec that matches the inbound preferences - let rss = self.routing_table().route_spec_store(); + let routing_table = self.routing_table(); + let rss = routing_table.route_spec_store(); let preferred_route = rss.get_route_id_for_key(&pr_pubkey.value); let Some((secret_key, safety_spec)) = rss.with_signature_validated_route( @@ -230,7 +233,7 @@ impl RPCProcessor { fn process_routed_operation( &self, detail: RPCMessageHeaderDetailDirect, - vcrypto: CryptoSystemVersion, + vcrypto: &CryptoSystemGuard<'_>, routed_operation: RoutedOperation, remote_sr_pubkey: TypedKey, pr_pubkey: TypedKey, @@ -330,8 +333,9 @@ impl RPCProcessor { routed_operation: &mut RoutedOperation, ) -> RPCNetworkResult { // Get crypto kind + let crypto = self.crypto(); let crypto_kind = pr_pubkey.kind; - let Some(vcrypto) = self.crypto().get(crypto_kind) else { + let Some(vcrypto) = crypto.get(crypto_kind) else { return Ok(NetworkResult::invalid_message( "private route hop data crypto is not supported", )); @@ -370,9 +374,7 @@ impl RPCProcessor { }; // Validate the RouteHop - route_hop - .validate(self.crypto()) - .map_err(RPCError::protocol)?; + route_hop.validate(&crypto).map_err(RPCError::protocol)?; // Sign the operation if this is not our last hop // as the last hop is already signed by the envelope @@ -392,6 +394,7 @@ impl RPCProcessor { pub(super) async fn process_route(&self, msg: Message) -> RPCNetworkResult<()> { // Ignore if disabled let routing_table = self.routing_table(); + let crypto = self.crypto(); let Some(published_peer_info) = routing_table.get_published_peer_info(msg.header.routing_domain()) @@ -431,7 +434,7 @@ impl RPCProcessor { // Get crypto kind let crypto_kind = route.safety_route().crypto_kind(); - let Some(vcrypto) = self.crypto().get(crypto_kind) else { + let Some(vcrypto) = crypto.get(crypto_kind) else { return Ok(NetworkResult::invalid_message( "routed operation crypto is not supported", )); @@ -497,7 +500,7 @@ impl RPCProcessor { }; // Validate the private route - if private_route.validate(self.crypto()).is_err() { + if private_route.validate(&crypto).is_err() { return Ok(NetworkResult::invalid_message( "failed to validate private route", )); @@ -534,7 +537,7 @@ impl RPCProcessor { }; // Validate the route hop - if route_hop.validate(self.crypto()).is_err() { + if route_hop.validate(&crypto).is_err() { return Ok(NetworkResult::invalid_message( "failed to validate route hop", )); @@ -617,7 +620,7 @@ impl RPCProcessor { // No hops left, time to process the routed operation network_result_try!(self.process_routed_operation( detail, - vcrypto, + &vcrypto, routed_operation, safety_route.public_key, private_route.public_key, diff --git a/veilid-core/src/rpc_processor/rpc_set_value.rs b/veilid-core/src/rpc_processor/rpc_set_value.rs index c8ab866c..d6f55db4 100644 --- a/veilid-core/src/rpc_processor/rpc_set_value.rs +++ b/veilid-core/src/rpc_processor/rpc_set_value.rs @@ -26,7 +26,7 @@ impl RPCProcessor { ret.latency ), err)] pub async fn rpc_call_set_value( - self, + &self, dest: Destination, key: TypedKey, subkey: ValueSubkey, @@ -35,7 +35,7 @@ impl RPCProcessor { send_descriptor: bool, ) -> RPCNetworkResult> { let _guard = self - .unlocked_inner + .startup_context .startup_lock .enter() .map_err(RPCError::map_try_again("not started up"))?; @@ -49,7 +49,8 @@ impl RPCProcessor { }; // Get the target node id - let Some(vcrypto) = self.crypto().get(key.kind) else { + let crypto = self.crypto(); + let Some(vcrypto) = crypto.get(key.kind) else { return Err(RPCError::internal("unsupported cryptosystem")); }; let Some(target_node_id) = target_node_ids.get(key.kind) else { @@ -84,7 +85,7 @@ impl RPCProcessor { let question_context = QuestionContext::SetValue(ValidateSetValueContext { descriptor, subkey, - vcrypto: vcrypto.clone(), + crypto_kind: vcrypto.kind(), }); if debug_target_enabled!("dht") { @@ -149,7 +150,7 @@ impl RPCProcessor { } // Validate peers returned are, in fact, closer to the key than the node we sent this to - let valid = match RoutingTable::verify_peers_closer(vcrypto, target_node_id, key, &peers) { + let valid = match RoutingTable::verify_peers_closer(&vcrypto, target_node_id, key, &peers) { Ok(v) => v, Err(e) => { return Ok(NetworkResult::invalid_message(format!( @@ -225,8 +226,7 @@ impl RPCProcessor { // Get target for ValueChanged notifications let dest = network_result_try!(self.get_respond_to_destination(&msg)); - let rss = routing_table.route_spec_store(); - let target = dest.get_target(rss)?; + let target = dest.get_target(&routing_table)?; // Get the nodes that we know about that are closer to the the key than our own node let closer_to_key_peers = network_result_try!( @@ -247,7 +247,9 @@ impl RPCProcessor { log_rpc!(debug "{}", debug_string); // If there are less than 'set_value_count' peers that are closer, then store here too - let set_value_count = self.with_config(|c| c.network.dht.set_value_count as usize); + let set_value_count = self + .config() + .with(|c| c.network.dht.set_value_count as usize); let (set, new_value) = if closer_to_key_peers.len() >= set_value_count { // Not close enough diff --git a/veilid-core/src/rpc_processor/rpc_signal.rs b/veilid-core/src/rpc_processor/rpc_signal.rs index 5f1ead15..24fa55c0 100644 --- a/veilid-core/src/rpc_processor/rpc_signal.rs +++ b/veilid-core/src/rpc_processor/rpc_signal.rs @@ -5,12 +5,12 @@ impl RPCProcessor { // Can be sent via relays but not routes. For routed 'signal' like capabilities, use AppMessage. #[instrument(level = "trace", target = "rpc", skip(self), ret, err)] pub async fn rpc_call_signal( - self, + &self, dest: Destination, signal_info: SignalInfo, ) -> RPCNetworkResult<()> { let _guard = self - .unlocked_inner + .startup_context .startup_lock .enter() .map_err(RPCError::map_try_again("not started up"))?; diff --git a/veilid-core/src/rpc_processor/rpc_status.rs b/veilid-core/src/rpc_processor/rpc_status.rs index 63b26f17..2e325f61 100644 --- a/veilid-core/src/rpc_processor/rpc_status.rs +++ b/veilid-core/src/rpc_processor/rpc_status.rs @@ -18,22 +18,23 @@ impl RPCProcessor { // private -> nothing #[instrument(level = "trace", target = "rpc", skip(self), ret, err)] pub async fn rpc_call_status( - self, + &self, dest: Destination, ) -> RPCNetworkResult> { let _guard = self - .unlocked_inner + .startup_context .startup_lock .enter() .map_err(RPCError::map_try_again("not started up"))?; // Determine routing domain and node status to send + let routing_table = self.routing_table(); let (opt_target_nr, routing_domain, node_status) = if let Some(UnsafeRoutingInfo { opt_node, opt_relay, opt_routing_domain, }) = - dest.get_unsafe_routing_info(self.routing_table()) + dest.get_unsafe_routing_info(&routing_table) { let Some(routing_domain) = opt_routing_domain else { // Because this exits before calling 'question()', diff --git a/veilid-core/src/rpc_processor/rpc_validate_dial_info.rs b/veilid-core/src/rpc_processor/rpc_validate_dial_info.rs index 86d228ab..926dcd65 100644 --- a/veilid-core/src/rpc_processor/rpc_validate_dial_info.rs +++ b/veilid-core/src/rpc_processor/rpc_validate_dial_info.rs @@ -4,26 +4,27 @@ impl RPCProcessor { // Can only be sent directly, not via relays or routes #[instrument(level = "trace", target = "rpc", skip(self), ret, err)] pub async fn rpc_call_validate_dial_info( - self, + &self, peer: NodeRef, dial_info: DialInfo, redirect: bool, ) -> Result { let _guard = self - .unlocked_inner + .startup_context .startup_lock .enter() .map_err(RPCError::map_try_again("not started up"))?; let stop_token = self - .unlocked_inner + .startup_context .startup_lock .stop_token() .ok_or(RPCError::try_again("not started up"))?; let network_manager = self.network_manager(); - let validate_dial_info_receipt_time_ms = - self.with_config(|c| c.network.dht.validate_dial_info_receipt_time_ms as u64); + let validate_dial_info_receipt_time_ms = self + .config() + .with(|c| c.network.dht.validate_dial_info_receipt_time_ms as u64); let receipt_time = TimestampDuration::new_ms(validate_dial_info_receipt_time_ms); @@ -130,7 +131,9 @@ impl RPCProcessor { // an ipv6 address let sender_node_id = detail.envelope.get_sender_typed_id(); let routing_domain = detail.routing_domain; - let node_count = self.with_config(|c| c.network.dht.max_find_node_count as usize); + let node_count = self + .config() + .with(|c| c.network.dht.max_find_node_count as usize); // Filter on nodes that can validate dial info, and can reach a specific dial info let outbound_dial_info_entry_filter = diff --git a/veilid-core/src/rpc_processor/rpc_value_changed.rs b/veilid-core/src/rpc_processor/rpc_value_changed.rs index 95f22f47..4abf8603 100644 --- a/veilid-core/src/rpc_processor/rpc_value_changed.rs +++ b/veilid-core/src/rpc_processor/rpc_value_changed.rs @@ -5,7 +5,7 @@ impl RPCProcessor { // Can be sent via all methods including relays and routes but never over a safety route #[instrument(level = "trace", target = "rpc", skip(self, value), err)] pub async fn rpc_call_value_changed( - self, + &self, dest: Destination, key: TypedKey, subkeys: ValueSubkeyRangeSet, @@ -14,7 +14,7 @@ impl RPCProcessor { value: Option, ) -> RPCNetworkResult<()> { let _guard = self - .unlocked_inner + .startup_context .startup_lock .enter() .map_err(RPCError::map_try_again("not started up"))?; diff --git a/veilid-core/src/rpc_processor/rpc_watch_value.rs b/veilid-core/src/rpc_processor/rpc_watch_value.rs index 4ebb2c14..af5e036c 100644 --- a/veilid-core/src/rpc_processor/rpc_watch_value.rs +++ b/veilid-core/src/rpc_processor/rpc_watch_value.rs @@ -23,7 +23,7 @@ impl RPCProcessor { ),err)] #[allow(clippy::too_many_arguments)] pub async fn rpc_call_watch_value( - self, + &self, dest: Destination, key: TypedKey, subkeys: ValueSubkeyRangeSet, @@ -33,7 +33,7 @@ impl RPCProcessor { watch_id: Option, ) -> RPCNetworkResult> { let _guard = self - .unlocked_inner + .startup_context .startup_lock .enter() .map_err(RPCError::map_try_again("not started up"))?; @@ -47,7 +47,8 @@ impl RPCProcessor { }; // Get the target node id - let Some(vcrypto) = self.crypto().get(key.kind) else { + let crypto = self.crypto(); + let Some(vcrypto) = crypto.get(key.kind) else { return Err(RPCError::internal("unsupported cryptosystem")); }; let Some(target_node_id) = target_node_ids.get(key.kind) else { @@ -77,7 +78,7 @@ impl RPCProcessor { count, watch_id, watcher, - vcrypto.clone(), + &vcrypto, )?; let question = RPCQuestion::new( network_result_try!(self.get_destination_respond_to(&dest)?), @@ -150,7 +151,7 @@ impl RPCProcessor { } // Validate peers returned are, in fact, closer to the key than the node we sent this to - let valid = match RoutingTable::verify_peers_closer(vcrypto, target_node_id, key, &peers) { + let valid = match RoutingTable::verify_peers_closer(&vcrypto, target_node_id, key, &peers) { Ok(v) => v, Err(e) => { return Ok(NetworkResult::invalid_message(format!( @@ -230,8 +231,7 @@ impl RPCProcessor { // Get target for ValueChanged notifications let dest = network_result_try!(self.get_respond_to_destination(&msg)); - let rss = routing_table.route_spec_store(); - let target = dest.get_target(rss)?; + let target = dest.get_target(&routing_table)?; if debug_target_enabled!("dht") { let debug_string = format!( @@ -257,7 +257,9 @@ impl RPCProcessor { .find_preferred_peers_closer_to_key(routing_domain, key, vec![CAP_DHT, CAP_DHT_WATCH])); // See if we would have accepted this as a set, same set_value_count for watches - let set_value_count = self.with_config(|c| c.network.dht.set_value_count as usize); + let set_value_count = self + .config() + .with(|c| c.network.dht.set_value_count as usize); let (ret_accepted, ret_expiration, ret_watch_id) = if closer_to_key_peers.len() >= set_value_count { // Not close enough, not accepted diff --git a/veilid-core/src/storage_manager/get_value.rs b/veilid-core/src/storage_manager/get_value.rs index c466af4e..e454492c 100644 --- a/veilid-core/src/storage_manager/get_value.rs +++ b/veilid-core/src/storage_manager/get_value.rs @@ -28,31 +28,27 @@ impl StorageManager { #[instrument(level = "trace", target = "dht", skip_all, err)] pub(super) async fn outbound_get_value( &self, - rpc_processor: RPCProcessor, key: TypedKey, subkey: ValueSubkey, safety_selection: SafetySelection, last_get_result: GetResult, ) -> VeilidAPIResult>> { - let routing_table = rpc_processor.routing_table(); let routing_domain = RoutingDomain::PublicInternet; // Get the DHT parameters for 'GetValue' - let (key_count, consensus_count, fanout, timeout_us) = { - let c = self.unlocked_inner.config.get(); + let (key_count, consensus_count, fanout, timeout_us) = self.config().with(|c| { ( c.network.dht.max_find_node_count as usize, c.network.dht.get_value_count as usize, c.network.dht.get_value_fanout as usize, TimestampDuration::from(ms_to_us(c.network.dht.get_value_timeout_ms)), ) - }; + }); // Get the nodes we know are caching this value to seed the fanout let init_fanout_queue = { - let inner = self.inner.lock().await; - inner - .get_value_nodes(key)? + self.get_value_nodes(key) + .await? .unwrap_or_default() .into_iter() .filter(|x| { @@ -85,15 +81,15 @@ impl StorageManager { // Routine to call to generate fanout let call_routine = { let context = context.clone(); - let rpc_processor = rpc_processor.clone(); + let registry = self.registry(); move |next_node: NodeRef| { let context = context.clone(); - let rpc_processor = rpc_processor.clone(); + let registry = registry.clone(); let last_descriptor = last_get_result.opt_descriptor.clone(); async move { + let rpc_processor = registry.rpc_processor(); let gva = network_result_try!( rpc_processor - .clone() .rpc_call_get_value( Destination::direct(next_node.routing_domain_filtered(routing_domain)) .with_safety(safety_selection), @@ -234,12 +230,14 @@ impl StorageManager { }; // Call the fanout in a spawned task + let registry = self.registry(); spawn( "outbound_get_value fanout", Box::pin( async move { + let routing_table = registry.routing_table(); let fanout_call = FanoutCall::new( - routing_table.clone(), + &routing_table, key, key_count, fanout, @@ -293,21 +291,21 @@ impl StorageManager { } #[instrument(level = "trace", target = "dht", skip_all)] - pub(super) fn process_deferred_outbound_get_value_result_inner( + pub(super) fn process_deferred_outbound_get_value_result( &self, - inner: &mut StorageManagerInner, res_rx: flume::Receiver>, key: TypedKey, subkey: ValueSubkey, last_seq: ValueSeqNum, ) { - let this = self.clone(); - inner.process_deferred_results( + let registry = self.registry(); + self.process_deferred_results( res_rx, Box::new( move |result: VeilidAPIResult| -> SendPinBoxFuture { - let this = this.clone(); + let registry=registry.clone(); Box::pin(async move { + let this = registry.storage_manager(); let result = match result { Ok(v) => v, Err(e) => { @@ -330,13 +328,11 @@ impl StorageManager { // If more partial results show up, don't send an update until we're done return true; } - // If we processed the final result, possibly send an update + // If we processed the final result, possibly send an update // if the sequence number changed since our first partial update // Send with a max count as this is not attached to any watch if last_seq != value_data.seq() { - if let Err(e) = this.update_callback_value_change(key,ValueSubkeyRangeSet::single(subkey), u32::MAX, Some(value_data)).await { - log_rtab!(debug "Failed sending deferred fanout value change: {}", e); - } + this.update_callback_value_change(key,ValueSubkeyRangeSet::single(subkey), u32::MAX, Some(value_data)); } // Return done @@ -362,24 +358,27 @@ impl StorageManager { }; // Keep the list of nodes that returned a value for later reference - let mut inner = self.lock().await?; + let mut inner = self.inner.lock().await; - inner.process_fanout_results( + Self::process_fanout_results_inner( + &mut inner, key, core::iter::once((subkey, &result.fanout_result)), false, + self.config() + .with(|c| c.network.dht.set_value_count as usize), ); // If we got a new value back then write it to the opened record if Some(get_result_value.value_data().seq()) != opt_last_seq { - inner - .handle_set_local_value( - key, - subkey, - get_result_value.clone(), - WatchUpdateMode::UpdateAll, - ) - .await?; + Self::handle_set_local_value_inner( + &mut inner, + key, + subkey, + get_result_value.clone(), + WatchUpdateMode::UpdateAll, + ) + .await?; } Ok(Some(get_result_value.value_data().clone())) } @@ -392,12 +391,13 @@ impl StorageManager { subkey: ValueSubkey, want_descriptor: bool, ) -> VeilidAPIResult> { - let mut inner = self.lock().await?; + let mut inner = self.inner.lock().await; // See if this is a remote or local value let (_is_local, last_get_result) = { // See if the subkey we are getting has a last known local value - let mut last_get_result = inner.handle_get_local_value(key, subkey, true).await?; + let mut last_get_result = + Self::handle_get_local_value_inner(&mut inner, key, subkey, true).await?; // If this is local, it must have a descriptor already if last_get_result.opt_descriptor.is_some() { if !want_descriptor { @@ -406,9 +406,9 @@ impl StorageManager { (true, last_get_result) } else { // See if the subkey we are getting has a last known remote value - let last_get_result = inner - .handle_get_remote_value(key, subkey, want_descriptor) - .await?; + let last_get_result = + Self::handle_get_remote_value_inner(&mut inner, key, subkey, want_descriptor) + .await?; (false, last_get_result) } }; diff --git a/veilid-core/src/storage_manager/inspect_value.rs b/veilid-core/src/storage_manager/inspect_value.rs index 42eeb0b0..289c634d 100644 --- a/veilid-core/src/storage_manager/inspect_value.rs +++ b/veilid-core/src/storage_manager/inspect_value.rs @@ -52,21 +52,17 @@ impl StorageManager { #[instrument(level = "trace", target = "dht", skip_all, err)] pub(super) async fn outbound_inspect_value( &self, - rpc_processor: RPCProcessor, key: TypedKey, subkeys: ValueSubkeyRangeSet, safety_selection: SafetySelection, local_inspect_result: InspectResult, use_set_scope: bool, ) -> VeilidAPIResult { - let routing_table = rpc_processor.routing_table(); let routing_domain = RoutingDomain::PublicInternet; // Get the DHT parameters for 'InspectValue' // Can use either 'get scope' or 'set scope' depending on the purpose of the inspection - let (key_count, consensus_count, fanout, timeout_us) = { - let c = self.unlocked_inner.config.get(); - + let (key_count, consensus_count, fanout, timeout_us) = self.config().with(|c| { if use_set_scope { ( c.network.dht.max_find_node_count as usize, @@ -82,13 +78,12 @@ impl StorageManager { TimestampDuration::from(ms_to_us(c.network.dht.get_value_timeout_ms)), ) } - }; + }); // Get the nodes we know are caching this value to seed the fanout let init_fanout_queue = { - let inner = self.inner.lock().await; - inner - .get_value_nodes(key)? + self.get_value_nodes(key) + .await? .unwrap_or_default() .into_iter() .filter(|x| { @@ -120,125 +115,130 @@ impl StorageManager { })); // Routine to call to generate fanout - let call_routine = |next_node: NodeRef| { - let rpc_processor = rpc_processor.clone(); + let call_routine = { let context = context.clone(); - let opt_descriptor = local_inspect_result.opt_descriptor.clone(); - let subkeys = subkeys.clone(); - async move { - let iva = network_result_try!( - rpc_processor - .clone() - .rpc_call_inspect_value( - Destination::direct(next_node.routing_domain_filtered(routing_domain)).with_safety(safety_selection), - key, - subkeys.clone(), - opt_descriptor.map(|x| (*x).clone()), - ) - .await? - ); - let answer = iva.answer; + let registry = self.registry(); + move |next_node: NodeRef| { + let context = context.clone(); + let registry = registry.clone(); + let opt_descriptor = local_inspect_result.opt_descriptor.clone(); + let subkeys = subkeys.clone(); + async move { + let rpc_processor = registry.rpc_processor(); - // Keep the descriptor if we got one. If we had a last_descriptor it will - // already be validated by rpc_call_inspect_value - if let Some(descriptor) = answer.descriptor { - let mut ctx = context.lock(); - if ctx.opt_descriptor_info.is_none() { - // Get the descriptor info. This also truncates the subkeys list to what can be returned from the network. - let descriptor_info = - match DescriptorInfo::new(Arc::new(descriptor.clone()), &subkeys) { - Ok(v) => v, - Err(e) => { - return Ok(NetworkResult::invalid_message(e)); - } - }; - ctx.opt_descriptor_info = Some(descriptor_info); - } - } + let iva = network_result_try!( + rpc_processor + .rpc_call_inspect_value( + Destination::direct(next_node.routing_domain_filtered(routing_domain)).with_safety(safety_selection), + key, + subkeys.clone(), + opt_descriptor.map(|x| (*x).clone()), + ) + .await? + ); + let answer = iva.answer; - // Keep the value if we got one and it is newer and it passes schema validation - if !answer.seqs.is_empty() { - log_dht!(debug "Got seqs back: len={}", answer.seqs.len()); - let mut ctx = context.lock(); - - // Ensure we have a schema and descriptor etc - let Some(descriptor_info) = &ctx.opt_descriptor_info else { - // Got a value but no descriptor for it - // Move to the next node - return Ok(NetworkResult::invalid_message( - "Got inspection with no descriptor", - )); - }; - - // Get number of subkeys from schema and ensure we are getting the - // right number of sequence numbers betwen that and what we asked for - #[allow(clippy::unnecessary_cast)] - if answer.seqs.len() as u64 != descriptor_info.subkeys.len() as u64 { - // Not the right number of sequence numbers - // Move to the next node - return Ok(NetworkResult::invalid_message(format!( - "wrong number of seqs returned {} (wanted {})", - answer.seqs.len(), - descriptor_info.subkeys.len() - ))); + // Keep the descriptor if we got one. If we had a last_descriptor it will + // already be validated by rpc_call_inspect_value + if let Some(descriptor) = answer.descriptor { + let mut ctx = context.lock(); + if ctx.opt_descriptor_info.is_none() { + // Get the descriptor info. This also truncates the subkeys list to what can be returned from the network. + let descriptor_info = + match DescriptorInfo::new(Arc::new(descriptor.clone()), &subkeys) { + Ok(v) => v, + Err(e) => { + return Ok(NetworkResult::invalid_message(e)); + } + }; + ctx.opt_descriptor_info = Some(descriptor_info); + } } - // If we have a prior seqs list, merge in the new seqs - if ctx.seqcounts.is_empty() { - ctx.seqcounts = answer - .seqs - .iter() - .map(|s| SubkeySeqCount { - seq: *s, - // One node has shown us the newest sequence numbers so far - value_nodes: if *s == ValueSeqNum::MAX { - vec![] - } else { - vec![next_node.clone()] - }, - }) - .collect(); - } else { - if ctx.seqcounts.len() != answer.seqs.len() { - return Err(RPCError::internal( - "seqs list length should always be equal by now", + // Keep the value if we got one and it is newer and it passes schema validation + if !answer.seqs.is_empty() { + log_dht!(debug "Got seqs back: len={}", answer.seqs.len()); + let mut ctx = context.lock(); + + // Ensure we have a schema and descriptor etc + let Some(descriptor_info) = &ctx.opt_descriptor_info else { + // Got a value but no descriptor for it + // Move to the next node + return Ok(NetworkResult::invalid_message( + "Got inspection with no descriptor", )); + }; + + // Get number of subkeys from schema and ensure we are getting the + // right number of sequence numbers betwen that and what we asked for + #[allow(clippy::unnecessary_cast)] + if answer.seqs.len() as u64 != descriptor_info.subkeys.len() as u64 { + // Not the right number of sequence numbers + // Move to the next node + return Ok(NetworkResult::invalid_message(format!( + "wrong number of seqs returned {} (wanted {})", + answer.seqs.len(), + descriptor_info.subkeys.len() + ))); } - for pair in ctx.seqcounts.iter_mut().zip(answer.seqs.iter()) { - let ctx_seqcnt = pair.0; - let answer_seq = *pair.1; - // If we already have consensus for this subkey, don't bother updating it any more - // While we may find a better sequence number if we keep looking, this does not mimic the behavior - // of get and set unless we stop here - if ctx_seqcnt.value_nodes.len() >= consensus_count { - continue; + // If we have a prior seqs list, merge in the new seqs + if ctx.seqcounts.is_empty() { + ctx.seqcounts = answer + .seqs + .iter() + .map(|s| SubkeySeqCount { + seq: *s, + // One node has shown us the newest sequence numbers so far + value_nodes: if *s == ValueSeqNum::MAX { + vec![] + } else { + vec![next_node.clone()] + }, + }) + .collect(); + } else { + if ctx.seqcounts.len() != answer.seqs.len() { + return Err(RPCError::internal( + "seqs list length should always be equal by now", + )); } + for pair in ctx.seqcounts.iter_mut().zip(answer.seqs.iter()) { + let ctx_seqcnt = pair.0; + let answer_seq = *pair.1; - // If the new seq isn't undefined and is better than the old seq (either greater or old is undefined) - // Then take that sequence number and note that we have gotten newer sequence numbers so we keep - // looking for consensus - // If the sequence number matches the old sequence number, then we keep the value node for reference later - if answer_seq != ValueSeqNum::MAX { - if ctx_seqcnt.seq == ValueSeqNum::MAX || answer_seq > ctx_seqcnt.seq - { - // One node has shown us the latest sequence numbers so far - ctx_seqcnt.seq = answer_seq; - ctx_seqcnt.value_nodes = vec![next_node.clone()]; - } else if answer_seq == ctx_seqcnt.seq { - // Keep the nodes that showed us the latest values - ctx_seqcnt.value_nodes.push(next_node.clone()); + // If we already have consensus for this subkey, don't bother updating it any more + // While we may find a better sequence number if we keep looking, this does not mimic the behavior + // of get and set unless we stop here + if ctx_seqcnt.value_nodes.len() >= consensus_count { + continue; + } + + // If the new seq isn't undefined and is better than the old seq (either greater or old is undefined) + // Then take that sequence number and note that we have gotten newer sequence numbers so we keep + // looking for consensus + // If the sequence number matches the old sequence number, then we keep the value node for reference later + if answer_seq != ValueSeqNum::MAX { + if ctx_seqcnt.seq == ValueSeqNum::MAX || answer_seq > ctx_seqcnt.seq + { + // One node has shown us the latest sequence numbers so far + ctx_seqcnt.seq = answer_seq; + ctx_seqcnt.value_nodes = vec![next_node.clone()]; + } else if answer_seq == ctx_seqcnt.seq { + // Keep the nodes that showed us the latest values + ctx_seqcnt.value_nodes.push(next_node.clone()); + } } } } } - } - // Return peers if we have some - log_network_result!(debug "InspectValue fanout call returned peers {}", answer.peers.len()); + // Return peers if we have some + log_network_result!(debug "InspectValue fanout call returned peers {}", answer.peers.len()); - Ok(NetworkResult::value(FanoutCallOutput { peer_info_list: answer.peers})) - }.instrument(tracing::trace_span!("outbound_inspect_value fanout call")) + Ok(NetworkResult::value(FanoutCallOutput { peer_info_list: answer.peers})) + }.instrument(tracing::trace_span!("outbound_inspect_value fanout call")) + } }; // Routine to call to check if we're done at each step @@ -259,8 +259,9 @@ impl StorageManager { }; // Call the fanout + let routing_table = self.routing_table(); let fanout_call = FanoutCall::new( - routing_table.clone(), + &routing_table, key, key_count, fanout, @@ -327,14 +328,14 @@ impl StorageManager { subkeys: ValueSubkeyRangeSet, want_descriptor: bool, ) -> VeilidAPIResult> { - let mut inner = self.lock().await?; + let mut inner = self.inner.lock().await; // See if this is a remote or local value let (_is_local, inspect_result) = { // See if the subkey we are getting has a last known local value - let mut local_inspect_result = inner - .handle_inspect_local_value(key, subkeys.clone(), true) - .await?; + let mut local_inspect_result = + Self::handle_inspect_local_value_inner(&mut inner, key, subkeys.clone(), true) + .await?; // If this is local, it must have a descriptor already if local_inspect_result.opt_descriptor.is_some() { if !want_descriptor { @@ -343,9 +344,13 @@ impl StorageManager { (true, local_inspect_result) } else { // See if the subkey we are getting has a last known remote value - let remote_inspect_result = inner - .handle_inspect_remote_value(key, subkeys, want_descriptor) - .await?; + let remote_inspect_result = Self::handle_inspect_remote_value_inner( + &mut inner, + key, + subkeys, + want_descriptor, + ) + .await?; (false, remote_inspect_result) } }; diff --git a/veilid-core/src/storage_manager/mod.rs b/veilid-core/src/storage_manager/mod.rs index 8ac58f4b..8c9e7c74 100644 --- a/veilid-core/src/storage_manager/mod.rs +++ b/veilid-core/src/storage_manager/mod.rs @@ -3,7 +3,6 @@ mod get_value; mod inspect_value; mod record_store; mod set_value; -mod storage_manager_inner; mod tasks; mod types; mod watch_value; @@ -12,9 +11,9 @@ use super::*; use record_store::*; use routing_table::*; use rpc_processor::*; -use storage_manager_inner::*; pub use record_store::{WatchParameters, WatchResult}; + pub use types::*; /// The maximum size of a single subkey @@ -31,6 +30,10 @@ const SEND_VALUE_CHANGES_INTERVAL_SECS: u32 = 1; const CHECK_ACTIVE_WATCHES_INTERVAL_SECS: u32 = 1; /// Frequency to check for expired server-side watched records const CHECK_WATCHED_RECORDS_INTERVAL_SECS: u32 = 1; +/// Table store table for storage manager metadata +const STORAGE_MANAGER_METADATA: &str = "storage_manager_metadata"; +/// Storage manager metadata key name for offline subkey write persistence +const OFFLINE_SUBKEY_WRITES: &[u8] = b"offline_subkey_writes"; #[derive(Debug, Clone)] /// A single 'value changed' message to send @@ -43,13 +46,40 @@ struct ValueChangedInfo { value: Option>, } -struct StorageManagerUnlockedInner { - _event_bus: EventBus, - config: VeilidConfig, - crypto: Crypto, - table_store: TableStore, - #[cfg(feature = "unstable-blockstore")] - block_store: BlockStore, +/// Locked structure for storage manager +#[derive(Default)] +struct StorageManagerInner { + /// Records that have been 'opened' and are not yet closed + pub opened_records: HashMap, + /// Records that have ever been 'created' or 'opened' by this node, things we care about that we must republish to keep alive + pub local_record_store: Option>, + /// Records that have been pushed to this node for distribution by other nodes, that we make an effort to republish + pub remote_record_store: Option>, + /// Record subkeys that have not been pushed to the network because they were written to offline + pub offline_subkey_writes: HashMap, + /// Storage manager metadata that is persistent, including copy of offline subkey writes + pub metadata_db: Option, + /// Background processing task (not part of attachment manager tick tree so it happens when detached too) + pub tick_future: Option>, +} + +impl fmt::Debug for StorageManagerInner { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("StorageManagerInner") + // .field("unlocked_inner", &self.unlocked_inner) + .field("opened_records", &self.opened_records) + .field("local_record_store", &self.local_record_store) + .field("remote_record_store", &self.remote_record_store) + .field("offline_subkey_writes", &self.offline_subkey_writes) + //.field("metadata_db", &self.metadata_db) + //.field("tick_future", &self.tick_future) + .finish() + } +} + +pub(crate) struct StorageManager { + registry: VeilidComponentRegistry, + inner: AsyncMutex, // Background processes flush_record_stores_task: TickTask, @@ -60,22 +90,43 @@ struct StorageManagerUnlockedInner { // Anonymous watch keys anonymous_watch_keys: TypedKeyPairGroup, + + /// Deferred result processor + deferred_result_processor: DeferredStreamProcessor, } -#[derive(Clone)] -pub(crate) struct StorageManager { - unlocked_inner: Arc, - inner: Arc>, +impl fmt::Debug for StorageManager { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("StorageManager") + .field("registry", &self.registry) + .field("inner", &self.inner) + // .field("flush_record_stores_task", &self.flush_record_stores_task) + // .field( + // "offline_subkey_writes_task", + // &self.offline_subkey_writes_task, + // ) + // .field("send_value_changes_task", &self.send_value_changes_task) + // .field("check_active_watches_task", &self.check_active_watches_task) + // .field( + // "check_watched_records_task", + // &self.check_watched_records_task, + // ) + .field("deferred_result_processor", &self.deferred_result_processor) + .field("anonymous_watch_keys", &self.anonymous_watch_keys) + .finish() + } } +impl_veilid_component!(StorageManager); + impl StorageManager { - fn new_unlocked_inner( - event_bus: EventBus, - config: VeilidConfig, - crypto: Crypto, - table_store: TableStore, - #[cfg(feature = "unstable-blockstore")] block_store: BlockStore, - ) -> StorageManagerUnlockedInner { + fn new_inner() -> StorageManagerInner { + StorageManagerInner::default() + } + + pub fn new(registry: VeilidComponentRegistry) -> StorageManager { + let crypto = registry.crypto(); + // Generate keys to use for anonymous watches let mut anonymous_watch_keys = TypedKeyPairGroup::new(); for ck in VALID_CRYPTO_KINDS { @@ -84,13 +135,11 @@ impl StorageManager { anonymous_watch_keys.add(TypedKeyPair::new(ck, kp)); } - StorageManagerUnlockedInner { - _event_bus: event_bus, - config, - crypto, - table_store, - #[cfg(feature = "unstable-blockstore")] - block_store, + let inner = Self::new_inner(); + let this = StorageManager { + registry, + inner: AsyncMutex::new(inner), + flush_record_stores_task: TickTask::new( "flush_record_stores_task", FLUSH_RECORD_STORES_INTERVAL_SECS, @@ -113,30 +162,7 @@ impl StorageManager { ), anonymous_watch_keys, - } - } - fn new_inner(unlocked_inner: Arc) -> StorageManagerInner { - StorageManagerInner::new(unlocked_inner) - } - - pub fn new( - event_bus: EventBus, - config: VeilidConfig, - crypto: Crypto, - table_store: TableStore, - #[cfg(feature = "unstable-blockstore")] block_store: BlockStore, - ) -> StorageManager { - let unlocked_inner = Arc::new(Self::new_unlocked_inner( - event_bus, - config, - crypto, - table_store, - #[cfg(feature = "unstable-blockstore")] - block_store, - )); - let this = StorageManager { - unlocked_inner: unlocked_inner.clone(), - inner: Arc::new(AsyncMutex::new(Self::new_inner(unlocked_inner))), + deferred_result_processor: DeferredStreamProcessor::new(), }; this.setup_tasks(); @@ -144,71 +170,190 @@ impl StorageManager { this } - #[instrument(level = "debug", skip_all, err)] - pub async fn init(&self, update_callback: UpdateCallback) -> EyreResult<()> { - log_stor!(debug "startup storage manager"); + fn local_limits_from_config(config: VeilidConfig) -> RecordStoreLimits { + let c = config.get(); + RecordStoreLimits { + subkey_cache_size: c.network.dht.local_subkey_cache_size as usize, + max_subkey_size: MAX_SUBKEY_SIZE, + max_record_total_size: MAX_RECORD_DATA_SIZE, + max_records: None, + max_subkey_cache_memory_mb: Some( + c.network.dht.local_max_subkey_cache_memory_mb as usize, + ), + max_storage_space_mb: None, + public_watch_limit: c.network.dht.public_watch_limit as usize, + member_watch_limit: c.network.dht.member_watch_limit as usize, + max_watch_expiration: TimestampDuration::new(ms_to_us( + c.network.dht.max_watch_expiration_ms, + )), + min_watch_expiration: TimestampDuration::new(ms_to_us(c.network.rpc.timeout_ms)), + } + } - let mut inner = self.inner.lock().await; - inner.init(self.clone(), update_callback).await?; + fn remote_limits_from_config(config: VeilidConfig) -> RecordStoreLimits { + let c = config.get(); + RecordStoreLimits { + subkey_cache_size: c.network.dht.remote_subkey_cache_size as usize, + max_subkey_size: MAX_SUBKEY_SIZE, + max_record_total_size: MAX_RECORD_DATA_SIZE, + max_records: Some(c.network.dht.remote_max_records as usize), + max_subkey_cache_memory_mb: Some( + c.network.dht.remote_max_subkey_cache_memory_mb as usize, + ), + max_storage_space_mb: Some(c.network.dht.remote_max_storage_space_mb as usize), + public_watch_limit: c.network.dht.public_watch_limit as usize, + member_watch_limit: c.network.dht.member_watch_limit as usize, + max_watch_expiration: TimestampDuration::new(ms_to_us( + c.network.dht.max_watch_expiration_ms, + )), + min_watch_expiration: TimestampDuration::new(ms_to_us(c.network.rpc.timeout_ms)), + } + } + + #[instrument(level = "debug", skip_all, err)] + async fn init_async(&self) -> EyreResult<()> { + log_stor!(debug "startup storage manager"); + let table_store = self.table_store(); + let config = self.config(); + + let metadata_db = table_store.open(STORAGE_MANAGER_METADATA, 1).await?; + + let local_limits = Self::local_limits_from_config(config.clone()); + let remote_limits = Self::remote_limits_from_config(config.clone()); + + let local_record_store = + RecordStore::try_create(&table_store, "local", local_limits).await?; + let remote_record_store = + RecordStore::try_create(&table_store, "remote", remote_limits).await?; + + { + let mut inner = self.inner.lock().await; + inner.metadata_db = Some(metadata_db); + inner.local_record_store = Some(local_record_store); + inner.remote_record_store = Some(remote_record_store); + Self::load_metadata(&mut inner).await?; + } + + // Start deferred results processors + self.deferred_result_processor.init().await; Ok(()) } - #[instrument(level = "debug", skip_all)] - pub async fn terminate(&self) { - log_stor!(debug "starting storage manager shutdown"); + #[instrument(level = "trace", target = "tstore", skip_all)] + async fn post_init_async(&self) -> EyreResult<()> { + let mut inner = self.inner.lock().await; + // Schedule tick + let registry = self.registry(); + let tick_future = interval("storage manager tick", 1000, move || { + let registry = registry.clone(); + async move { + let this = registry.storage_manager(); + if let Err(e) = this.tick().await { + log_stor!(warn "storage manager tick failed: {}", e); + } + } + }); + inner.tick_future = Some(tick_future); + + Ok(()) + } + + #[instrument(level = "trace", target = "tstore", skip_all)] + async fn pre_terminate_async(&self) { // Stop the background ticker process { let mut inner = self.inner.lock().await; - inner.stop_ticker().await; + // Stop ticker + let tick_future = inner.tick_future.take(); + if let Some(f) = tick_future { + f.await; + } } - // Cancel all tasks + // Cancel all tasks associated with the tick future self.cancel_tasks().await; + } + + #[instrument(level = "debug", skip_all)] + async fn terminate_async(&self) { + log_stor!(debug "starting storage manager shutdown"); + + // Stop deferred result processor + self.deferred_result_processor.terminate().await; // Terminate and release the storage manager { let mut inner = self.inner.lock().await; - inner.terminate().await; - *inner = Self::new_inner(self.unlocked_inner.clone()); + + // Final flush on record stores + if let Some(mut local_record_store) = inner.local_record_store.take() { + if let Err(e) = local_record_store.flush().await { + log_stor!(error "termination local record store tick failed: {}", e); + } + } + if let Some(mut remote_record_store) = inner.remote_record_store.take() { + if let Err(e) = remote_record_store.flush().await { + log_stor!(error "termination remote record store tick failed: {}", e); + } + } + + // Save metadata + if let Err(e) = Self::save_metadata(&mut inner).await { + log_stor!(error "termination metadata save failed: {}", e); + } + + // Reset inner state + *inner = Self::new_inner(); } log_stor!(debug "finished storage manager shutdown"); } - pub async fn set_rpc_processor(&self, opt_rpc_processor: Option) { - let mut inner = self.inner.lock().await; - inner.opt_rpc_processor = opt_rpc_processor - } - - pub async fn set_routing_table(&self, opt_routing_table: Option) { - let mut inner = self.inner.lock().await; - inner.opt_routing_table = opt_routing_table - } - - async fn lock(&self) -> VeilidAPIResult> { - let inner = asyncmutex_lock_arc!(&self.inner); - if !inner.initialized { - apibail_not_initialized!(); + async fn save_metadata(inner: &mut StorageManagerInner) -> EyreResult<()> { + if let Some(metadata_db) = &inner.metadata_db { + let tx = metadata_db.transact(); + tx.store_json(0, OFFLINE_SUBKEY_WRITES, &inner.offline_subkey_writes)?; + tx.commit().await.wrap_err("failed to commit")? } - Ok(inner) + Ok(()) } - fn online_ready_inner(inner: &StorageManagerInner) -> Option { - let routing_table = inner.opt_routing_table.clone()?; - routing_table.get_published_peer_info(RoutingDomain::PublicInternet)?; - inner.opt_rpc_processor.clone() + async fn load_metadata(inner: &mut StorageManagerInner) -> EyreResult<()> { + if let Some(metadata_db) = &inner.metadata_db { + inner.offline_subkey_writes = match metadata_db + .load_json(0, OFFLINE_SUBKEY_WRITES) + .await + { + Ok(v) => v.unwrap_or_default(), + Err(_) => { + if let Err(e) = metadata_db.delete(0, OFFLINE_SUBKEY_WRITES).await { + log_stor!(debug "offline_subkey_writes format changed, clearing: {}", e); + } + Default::default() + } + } + } + Ok(()) } - async fn online_writes_ready(&self) -> EyreResult> { - let inner = self.lock().await?; - Ok(Self::online_ready_inner(&inner)) + pub(super) async fn has_offline_subkey_writes(&self) -> bool { + !self.inner.lock().await.offline_subkey_writes.is_empty() } - async fn has_offline_subkey_writes(&self) -> EyreResult { - let inner = self.lock().await?; - Ok(!inner.offline_subkey_writes.is_empty()) + pub(super) fn dht_is_online(&self) -> bool { + // Check if we have published peer info + // Note, this is a best-effort check, subject to race conditions on the network's state + if self + .routing_table() + .get_published_peer_info(RoutingDomain::PublicInternet) + .is_none() + { + return false; + } + + true } /// Get the set of nodes in our active watches @@ -231,16 +376,23 @@ impl StorageManager { /// Builds the record key for a given schema and owner #[instrument(level = "trace", target = "stor", skip_all)] - pub async fn get_record_key( + pub fn get_record_key( &self, kind: CryptoKind, schema: DHTSchema, owner_key: &PublicKey, ) -> VeilidAPIResult { - let inner = self.lock().await?; - schema.validate()?; + // Get cryptosystem + let crypto = self.crypto(); + let Some(vcrypto) = crypto.get(kind) else { + apibail_generic!("unsupported cryptosystem"); + }; - inner.get_record_key(kind, owner_key, schema).await + // Validate schema + schema.validate()?; + let schema_data = schema.compile(); + + Ok(Self::get_key(&vcrypto, owner_key, &schema_data)) } /// Create a local record from scratch with a new owner key, open it, and return the opened descriptor @@ -251,18 +403,20 @@ impl StorageManager { owner: Option, safety_selection: SafetySelection, ) -> VeilidAPIResult { - let mut inner = self.lock().await?; + // Validate schema schema.validate()?; + // Lock access to the record stores + let mut inner = self.inner.lock().await; + // Create a new owned local record from scratch - let (key, owner) = inner - .create_new_owned_local_record(kind, schema, owner, safety_selection) + let (key, owner) = self + .create_new_owned_local_record_inner(&mut inner, kind, schema, owner, safety_selection) .await?; // Now that the record is made we should always succeed to open the existing record // The initial writer is the owner of the record - inner - .open_existing_record(key, Some(owner), safety_selection) + Self::open_existing_record_inner(&mut inner, key, Some(owner), safety_selection) .await .map(|r| r.unwrap()) } @@ -275,20 +429,17 @@ impl StorageManager { writer: Option, safety_selection: SafetySelection, ) -> VeilidAPIResult { - let mut inner = self.lock().await?; + let mut inner = self.inner.lock().await; // See if we have a local record already or not - if let Some(res) = inner - .open_existing_record(key, writer, safety_selection) - .await? + if let Some(res) = + Self::open_existing_record_inner(&mut inner, key, writer, safety_selection).await? { return Ok(res); } // No record yet, try to get it from the network - - // Get rpc processor and drop mutex so we don't block while getting the value from the network - let Some(rpc_processor) = Self::online_ready_inner(&inner) else { + if !self.dht_is_online() { apibail_try_again!("offline, try again later"); }; @@ -299,13 +450,7 @@ impl StorageManager { // Use the safety selection we opened the record with let subkey: ValueSubkey = 0; let res_rx = self - .outbound_get_value( - rpc_processor, - key, - subkey, - safety_selection, - GetResult::default(), - ) + .outbound_get_value(key, subkey, safety_selection, GetResult::default()) .await?; // Wait for the first result let Ok(result) = res_rx.recv_async().await else { @@ -325,29 +470,34 @@ impl StorageManager { .map(|s| s.value_data().seq()); // Reopen inner to store value we just got - let mut inner = self.lock().await?; + let out = { + let mut inner = self.inner.lock().await; - // Check again to see if we have a local record already or not - // because waiting for the outbound_get_value action could result in the key being opened - // via some parallel process + // Check again to see if we have a local record already or not + // because waiting for the outbound_get_value action could result in the key being opened + // via some parallel process - if let Some(res) = inner - .open_existing_record(key, writer, safety_selection) - .await? - { - return Ok(res); - } + if let Some(res) = + Self::open_existing_record_inner(&mut inner, key, writer, safety_selection).await? + { + return Ok(res); + } - // Open the new record - let out = inner - .open_new_record(key, writer, subkey, result.get_result, safety_selection) - .await; + // Open the new record + Self::open_new_record_inner( + &mut inner, + key, + writer, + subkey, + result.get_result, + safety_selection, + ) + .await + }; if out.is_ok() { if let Some(last_seq) = opt_last_seq { - self.process_deferred_outbound_get_value_result_inner( - &mut inner, res_rx, key, subkey, last_seq, - ); + self.process_deferred_outbound_get_value_result(res_rx, key, subkey, last_seq); } } out @@ -356,50 +506,54 @@ impl StorageManager { /// Close an opened local record #[instrument(level = "trace", target = "stor", skip_all)] pub async fn close_record(&self, key: TypedKey) -> VeilidAPIResult<()> { - let (opt_opened_record, opt_rpc_processor) = { - let mut inner = self.lock().await?; - (inner.close_record(key)?, Self::online_ready_inner(&inner)) + // Attempt to close the record, returning the opened record if it wasn't already closed + let opened_record = { + let mut inner = self.inner.lock().await; + let Some(opened_record) = Self::close_record_inner(&mut inner, key)? else { + return Ok(()); + }; + opened_record + }; + + // See if we have an active watch on the closed record + let Some(active_watch) = opened_record.active_watch() else { + return Ok(()); }; // Send a one-time cancel request for the watch if we have one and we're online - if let Some(opened_record) = opt_opened_record { - if let Some(active_watch) = opened_record.active_watch() { - if let Some(rpc_processor) = opt_rpc_processor { - // Use the safety selection we opened the record with - // Use the writer we opened with as the 'watcher' as well - let opt_owvresult = match self - .outbound_watch_value_cancel( - rpc_processor, - key, - ValueSubkeyRangeSet::full(), - opened_record.safety_selection(), - opened_record.writer().cloned(), - active_watch.id, - active_watch.watch_node, - ) - .await - { - Ok(v) => v, - Err(e) => { - log_stor!(debug - "close record watch cancel failed: {}", e - ); - None - } - }; - if let Some(owvresult) = opt_owvresult { - if owvresult.expiration_ts.as_u64() != 0 { - log_stor!(debug - "close record watch cancel should have zero expiration" - ); - } - } else { - log_stor!(debug "close record watch cancel unsuccessful"); - } - } else { - log_stor!(debug "skipping last-ditch watch cancel because we are offline"); - } + if !self.dht_is_online() { + log_stor!(debug "skipping last-ditch watch cancel because we are offline"); + return Ok(()); + } + // Use the safety selection we opened the record with + // Use the writer we opened with as the 'watcher' as well + let opt_owvresult = match self + .outbound_watch_value_cancel( + key, + ValueSubkeyRangeSet::full(), + opened_record.safety_selection(), + opened_record.writer().cloned(), + active_watch.id, + active_watch.watch_node, + ) + .await + { + Ok(v) => v, + Err(e) => { + log_stor!(debug + "close record watch cancel failed: {}", e + ); + None } + }; + if let Some(owvresult) = opt_owvresult { + if owvresult.expiration_ts.as_u64() != 0 { + log_stor!(debug + "close record watch cancel should have zero expiration" + ); + } + } else { + log_stor!(debug "close record watch cancel unsuccessful"); } Ok(()) @@ -412,7 +566,7 @@ impl StorageManager { self.close_record(key).await?; // Get record from the local store - let mut inner = self.lock().await?; + let mut inner = self.inner.lock().await; let Some(local_record_store) = inner.local_record_store.as_mut() else { apibail_not_initialized!(); }; @@ -429,7 +583,7 @@ impl StorageManager { subkey: ValueSubkey, force_refresh: bool, ) -> VeilidAPIResult> { - let mut inner = self.lock().await?; + let mut inner = self.inner.lock().await; let safety_selection = { let Some(opened_record) = inner.opened_records.get(&key) else { apibail_generic!("record not open"); @@ -438,7 +592,8 @@ impl StorageManager { }; // See if the requested subkey is our local record store - let last_get_result = inner.handle_get_local_value(key, subkey, true).await?; + let last_get_result = + Self::handle_get_local_value_inner(&mut inner, key, subkey, true).await?; // Return the existing value if we have one unless we are forcing a refresh if !force_refresh { @@ -448,9 +603,7 @@ impl StorageManager { } // Refresh if we can - - // Get rpc processor and drop mutex so we don't block while getting the value from the network - let Some(rpc_processor) = Self::online_ready_inner(&inner) else { + if !self.dht_is_online() { // Return the existing value if we have one if we aren't online if let Some(last_get_result_value) = last_get_result.opt_value { return Ok(Some(last_get_result_value.value_data().clone())); @@ -468,13 +621,7 @@ impl StorageManager { .as_ref() .map(|v| v.value_data().seq()); let res_rx = self - .outbound_get_value( - rpc_processor, - key, - subkey, - safety_selection, - last_get_result, - ) + .outbound_get_value(key, subkey, safety_selection, last_get_result) .await?; // Wait for the first result @@ -492,14 +639,7 @@ impl StorageManager { if let Some(out) = &out { // If there's more to process, do it in the background if partial { - let mut inner = self.lock().await?; - self.process_deferred_outbound_get_value_result_inner( - &mut inner, - res_rx, - key, - subkey, - out.seq(), - ); + self.process_deferred_outbound_get_value_result(res_rx, key, subkey, out.seq()); } } @@ -515,10 +655,11 @@ impl StorageManager { data: Vec, writer: Option, ) -> VeilidAPIResult> { - let mut inner = self.lock().await?; + let mut inner = self.inner.lock().await; // Get cryptosystem - let Some(vcrypto) = self.unlocked_inner.crypto.get(key.kind) else { + let crypto = self.crypto(); + let Some(vcrypto) = crypto.get(key.kind) else { apibail_generic!("unsupported cryptosystem"); }; @@ -541,7 +682,8 @@ impl StorageManager { }; // See if the subkey we are modifying has a last known local value - let last_get_result = inner.handle_get_local_value(key, subkey, true).await?; + let last_get_result = + Self::handle_get_local_value_inner(&mut inner, key, subkey, true).await?; // Get the descriptor and schema for the key let Some(descriptor) = last_get_result.opt_descriptor else { @@ -575,26 +717,25 @@ impl StorageManager { value_data, descriptor.owner(), subkey, - vcrypto, + &vcrypto, writer.secret, )?); // Write the value locally first log_stor!(debug "Writing subkey locally: {}:{} len={}", key, subkey, signed_value_data.value_data().data().len() ); - inner - .handle_set_local_value( - key, - subkey, - signed_value_data.clone(), - WatchUpdateMode::NoUpdate, - ) - .await?; + Self::handle_set_local_value_inner( + &mut inner, + key, + subkey, + signed_value_data.clone(), + WatchUpdateMode::NoUpdate, + ) + .await?; - // Get rpc processor and drop mutex so we don't block while getting the value from the network - let Some(rpc_processor) = Self::online_ready_inner(&inner) else { + if !self.dht_is_online() { log_stor!(debug "Writing subkey offline: {}:{} len={}", key, subkey, signed_value_data.value_data().data().len() ); // Add to offline writes to flush - inner.add_offline_subkey_write(key, subkey, safety_selection); + Self::add_offline_subkey_write_inner(&mut inner, key, subkey, safety_selection); return Ok(None); }; @@ -606,7 +747,6 @@ impl StorageManager { // Use the safety selection we opened the record with let res_rx = match self .outbound_set_value( - rpc_processor, key, subkey, safety_selection, @@ -618,8 +758,8 @@ impl StorageManager { Ok(v) => v, Err(e) => { // Failed to write, try again later - let mut inner = self.lock().await?; - inner.add_offline_subkey_write(key, subkey, safety_selection); + let mut inner = self.inner.lock().await; + Self::add_offline_subkey_write_inner(&mut inner, key, subkey, safety_selection); return Err(e); } }; @@ -644,9 +784,7 @@ impl StorageManager { // If there's more to process, do it in the background if partial { - let mut inner = self.lock().await?; - self.process_deferred_outbound_set_value_result_inner( - &mut inner, + self.process_deferred_outbound_set_value_result( res_rx, key, subkey, @@ -668,7 +806,7 @@ impl StorageManager { expiration: Timestamp, count: u32, ) -> VeilidAPIResult { - let inner = self.lock().await?; + let inner = self.inner.lock().await; // Get the safety selection and the writer we opened this record // and whatever active watch id and watch node we may have in case this is a watch update @@ -703,7 +841,7 @@ impl StorageManager { let subkeys = schema.truncate_subkeys(&subkeys, None); // Get rpc processor and drop mutex so we don't block while requesting the watch from the network - let Some(rpc_processor) = Self::online_ready_inner(&inner) else { + if !self.dht_is_online() { apibail_try_again!("offline, try again later"); }; @@ -714,7 +852,6 @@ impl StorageManager { // Use the writer we opened with as the 'watcher' as well let opt_owvresult = self .outbound_watch_value( - rpc_processor, key, subkeys.clone(), expiration, @@ -731,20 +868,19 @@ impl StorageManager { }; // Clear any existing watch if the watch succeeded or got cancelled - let mut inner = self.lock().await?; + let mut inner = self.inner.lock().await; let Some(opened_record) = inner.opened_records.get_mut(&key) else { apibail_generic!("record not open"); }; opened_record.clear_active_watch(); // Get the minimum expiration timestamp we will accept - let (rpc_timeout_us, max_watch_expiration_us) = { - let c = self.unlocked_inner.config.get(); + let (rpc_timeout_us, max_watch_expiration_us) = self.config().with(|c| { ( TimestampDuration::from(ms_to_us(c.network.rpc.timeout_ms)), TimestampDuration::from(ms_to_us(c.network.dht.max_watch_expiration_ms)), ) - }; + }); let cur_ts = get_timestamp(); let min_expiration_ts = cur_ts + rpc_timeout_us.as_u64(); let max_expiration_ts = if expiration.as_u64() == 0 { @@ -793,7 +929,7 @@ impl StorageManager { subkeys: ValueSubkeyRangeSet, ) -> VeilidAPIResult { let (subkeys, active_watch) = { - let inner = self.lock().await?; + let inner = self.inner.lock().await; let Some(opened_record) = inner.opened_records.get(&key) else { apibail_generic!("record not open"); }; @@ -855,7 +991,7 @@ impl StorageManager { subkeys }; - let mut inner = self.lock().await?; + let mut inner = self.inner.lock().await; let safety_selection = { let Some(opened_record) = inner.opened_records.get(&key) else { apibail_generic!("record not open"); @@ -864,9 +1000,8 @@ impl StorageManager { }; // See if the requested record is our local record store - let mut local_inspect_result = inner - .handle_inspect_local_value(key, subkeys.clone(), true) - .await?; + let mut local_inspect_result = + Self::handle_inspect_local_value_inner(&mut inner, key, subkeys.clone(), true).await?; #[allow(clippy::unnecessary_cast)] { @@ -899,7 +1034,7 @@ impl StorageManager { } // Get rpc processor and drop mutex so we don't block while getting the value from the network - let Some(rpc_processor) = Self::online_ready_inner(&inner) else { + if !self.dht_is_online() { apibail_try_again!("offline, try again later"); }; @@ -916,7 +1051,6 @@ impl StorageManager { // Get the inspect record report from the network let result = self .outbound_inspect_value( - rpc_processor, key, subkeys, safety_selection, @@ -947,14 +1081,21 @@ impl StorageManager { } // Keep the list of nodes that returned a value for later reference - let mut inner = self.lock().await?; + let mut inner = self.inner.lock().await; let results_iter = result .inspect_result .subkeys .iter() .zip(result.fanout_results.iter()); - inner.process_fanout_results(key, results_iter, false); + Self::process_fanout_results_inner( + &mut inner, + key, + results_iter, + false, + self.config() + .with(|c| c.network.dht.set_value_count as usize), + ); Ok(DHTRecordReport::new( result.inspect_result.subkeys, @@ -967,15 +1108,12 @@ impl StorageManager { // Send single value change out to the network #[instrument(level = "trace", target = "stor", skip(self), err)] async fn send_value_change(&self, vc: ValueChangedInfo) -> VeilidAPIResult<()> { - let rpc_processor = { - let inner = self.inner.lock().await; - if let Some(rpc_processor) = Self::online_ready_inner(&inner) { - rpc_processor.clone() - } else { - apibail_try_again!("network is not available"); - } + if !self.dht_is_online() { + apibail_try_again!("network is not available"); }; + let rpc_processor = self.rpc_processor(); + let dest = rpc_processor .resolve_target_to_destination( vc.target, @@ -993,28 +1131,21 @@ impl StorageManager { } // Send a value change up through the callback - #[instrument(level = "trace", target = "stor", skip(self, value), err)] - async fn update_callback_value_change( + #[instrument(level = "trace", target = "stor", skip(self, value))] + fn update_callback_value_change( &self, key: TypedKey, subkeys: ValueSubkeyRangeSet, count: u32, value: Option, - ) -> Result<(), VeilidAPIError> { - let opt_update_callback = { - let inner = self.lock().await?; - inner.update_callback.clone() - }; - - if let Some(update_callback) = opt_update_callback { - update_callback(VeilidUpdate::ValueChange(Box::new(VeilidValueChange { - key, - subkeys, - count, - value, - }))); - } - Ok(()) + ) { + let update_callback = self.update_callback(); + update_callback(VeilidUpdate::ValueChange(Box::new(VeilidValueChange { + key, + subkeys, + count, + value, + }))); } #[instrument(level = "trace", target = "stor", skip_all)] @@ -1027,8 +1158,9 @@ impl StorageManager { match fanout_result.kind { FanoutResultKind::Partial => false, FanoutResultKind::Timeout => { - let get_consensus = - self.unlocked_inner.config.get().network.dht.get_value_count as usize; + let get_consensus = self + .config() + .with(|c| c.network.dht.get_value_count as usize); let value_node_count = fanout_result.value_nodes.len(); if value_node_count < get_consensus { log_stor!(debug "timeout with insufficient consensus ({}<{}), adding offline subkey: {}:{}", @@ -1043,8 +1175,9 @@ impl StorageManager { } } FanoutResultKind::Exhausted => { - let get_consensus = - self.unlocked_inner.config.get().network.dht.get_value_count as usize; + let get_consensus = self + .config() + .with(|c| c.network.dht.get_value_count as usize); let value_node_count = fanout_result.value_nodes.len(); if value_node_count < get_consensus { log_stor!(debug "exhausted with insufficient consensus ({}<{}), adding offline subkey: {}:{}", @@ -1061,4 +1194,547 @@ impl StorageManager { FanoutResultKind::Finished => false, } } + + //////////////////////////////////////////////////////////////////////// + #[instrument(level = "trace", target = "stor", skip_all, err)] + async fn create_new_owned_local_record_inner( + &self, + inner: &mut StorageManagerInner, + kind: CryptoKind, + schema: DHTSchema, + owner: Option, + safety_selection: SafetySelection, + ) -> VeilidAPIResult<(TypedKey, KeyPair)> { + // Get cryptosystem + let crypto = self.crypto(); + let Some(vcrypto) = crypto.get(kind) else { + apibail_generic!("unsupported cryptosystem"); + }; + + // Get local record store + let Some(local_record_store) = inner.local_record_store.as_mut() else { + apibail_not_initialized!(); + }; + + // Verify the dht schema does not contain the node id + { + let config = self.config(); + let cfg = config.get(); + if let Some(node_id) = cfg.network.routing_table.node_id.get(kind) { + if schema.is_member(&node_id.value) { + apibail_invalid_argument!( + "node id can not be schema member", + "schema", + node_id.value + ); + } + } + } + + // Compile the dht schema + let schema_data = schema.compile(); + + // New values require a new owner key if not given + let owner = owner.unwrap_or_else(|| vcrypto.generate_keypair()); + + // Calculate dht key + let dht_key = Self::get_key(&vcrypto, &owner.key, &schema_data); + + // Make a signed value descriptor for this dht value + let signed_value_descriptor = Arc::new(SignedValueDescriptor::make_signature( + owner.key, + schema_data, + &vcrypto, + owner.secret, + )?); + + // Add new local value record + let cur_ts = Timestamp::now(); + let local_record_detail = LocalRecordDetail::new(safety_selection); + let record = + Record::::new(cur_ts, signed_value_descriptor, local_record_detail)?; + + local_record_store.new_record(dht_key, record).await?; + + Ok((dht_key, owner)) + } + + #[instrument(level = "trace", target = "stor", skip_all, err)] + async fn move_remote_record_to_local_inner( + inner: &mut StorageManagerInner, + key: TypedKey, + safety_selection: SafetySelection, + ) -> VeilidAPIResult> { + // Get local record store + let Some(local_record_store) = inner.local_record_store.as_mut() else { + apibail_not_initialized!(); + }; + + // Get remote record store + let Some(remote_record_store) = inner.remote_record_store.as_mut() else { + apibail_not_initialized!(); + }; + + let rcb = |r: &Record| { + // Return record details + r.clone() + }; + let Some(remote_record) = remote_record_store.with_record(key, rcb) else { + // No local or remote record found, return None + return Ok(None); + }; + + // Make local record + let cur_ts = Timestamp::now(); + let local_record = Record::new( + cur_ts, + remote_record.descriptor().clone(), + LocalRecordDetail::new(safety_selection), + )?; + local_record_store.new_record(key, local_record).await?; + + // Move copy subkey data from remote to local store + for subkey in remote_record.stored_subkeys().iter() { + let Some(get_result) = remote_record_store.get_subkey(key, subkey, false).await? else { + // Subkey was missing + warn!("Subkey was missing: {} #{}", key, subkey); + continue; + }; + let Some(subkey_data) = get_result.opt_value else { + // Subkey was missing + warn!("Subkey data was missing: {} #{}", key, subkey); + continue; + }; + local_record_store + .set_subkey(key, subkey, subkey_data, WatchUpdateMode::NoUpdate) + .await?; + } + + // Move watches + local_record_store.move_watches(key, remote_record_store.move_watches(key, None)); + + // Delete remote record from store + remote_record_store.delete_record(key).await?; + + // Return record information as transferred to local record + Ok(Some((*remote_record.owner(), remote_record.schema()))) + } + + #[instrument(level = "trace", target = "stor", skip_all, err)] + pub async fn open_existing_record_inner( + inner: &mut StorageManagerInner, + key: TypedKey, + writer: Option, + safety_selection: SafetySelection, + ) -> VeilidAPIResult> { + // Get local record store + let Some(local_record_store) = inner.local_record_store.as_mut() else { + apibail_not_initialized!(); + }; + + // See if we have a local record already or not + let cb = |r: &mut Record| { + // Process local record + + // Keep the safety selection we opened the record with + r.detail_mut().safety_selection = safety_selection; + + // Return record details + (*r.owner(), r.schema()) + }; + let (owner, schema) = match local_record_store.with_record_mut(key, cb) { + Some(v) => v, + None => { + // If we don't have a local record yet, check to see if we have a remote record + // if so, migrate it to a local record + let Some(v) = + Self::move_remote_record_to_local_inner(&mut *inner, key, safety_selection) + .await? + else { + // No remote record either + return Ok(None); + }; + v + } + }; + // Had local record + + // If the writer we chose is also the owner, we have the owner secret + // Otherwise this is just another subkey writer + let owner_secret = if let Some(writer) = writer { + if writer.key == owner { + Some(writer.secret) + } else { + None + } + } else { + None + }; + + // Write open record + inner + .opened_records + .entry(key) + .and_modify(|e| { + e.set_writer(writer); + e.set_safety_selection(safety_selection); + }) + .or_insert_with(|| OpenedRecord::new(writer, safety_selection)); + + // Make DHT Record Descriptor to return + let descriptor = DHTRecordDescriptor::new(key, owner, owner_secret, schema); + Ok(Some(descriptor)) + } + + #[instrument(level = "trace", target = "stor", skip_all, err)] + pub async fn open_new_record_inner( + inner: &mut StorageManagerInner, + key: TypedKey, + writer: Option, + subkey: ValueSubkey, + get_result: GetResult, + safety_selection: SafetySelection, + ) -> VeilidAPIResult { + // Ensure the record is closed + if inner.opened_records.contains_key(&key) { + panic!("new record should never be opened at this point"); + } + + // Must have descriptor + let Some(signed_value_descriptor) = get_result.opt_descriptor else { + // No descriptor for new record, can't store this + apibail_generic!("no descriptor"); + }; + // Get owner + let owner = *signed_value_descriptor.owner(); + + // If the writer we chose is also the owner, we have the owner secret + // Otherwise this is just another subkey writer + let owner_secret = if let Some(writer) = writer { + if writer.key == owner { + Some(writer.secret) + } else { + None + } + } else { + None + }; + let schema = signed_value_descriptor.schema()?; + + // Get local record store + let Some(local_record_store) = inner.local_record_store.as_mut() else { + apibail_not_initialized!(); + }; + + // Make and store a new record for this descriptor + let record = Record::::new( + Timestamp::now(), + signed_value_descriptor, + LocalRecordDetail::new(safety_selection), + )?; + local_record_store.new_record(key, record).await?; + + // If we got a subkey with the getvalue, it has already been validated against the schema, so store it + if let Some(signed_value_data) = get_result.opt_value { + // Write subkey to local store + local_record_store + .set_subkey(key, subkey, signed_value_data, WatchUpdateMode::NoUpdate) + .await?; + } + + // Write open record + inner + .opened_records + .insert(key, OpenedRecord::new(writer, safety_selection)); + + // Make DHT Record Descriptor to return + let descriptor = DHTRecordDescriptor::new(key, owner, owner_secret, schema); + Ok(descriptor) + } + + #[instrument(level = "trace", target = "stor", skip_all, err)] + pub async fn get_value_nodes(&self, key: TypedKey) -> VeilidAPIResult>> { + let inner = self.inner.lock().await; + // Get local record store + let Some(local_record_store) = inner.local_record_store.as_ref() else { + apibail_not_initialized!(); + }; + + // Get routing table to see if we still know about these nodes + let routing_table = self.routing_table(); + + let opt_value_nodes = local_record_store.peek_record(key, |r| { + let d = r.detail(); + d.nodes + .keys() + .copied() + .filter_map(|x| { + routing_table + .lookup_node_ref(TypedKey::new(key.kind, x)) + .ok() + .flatten() + }) + .collect() + }); + + Ok(opt_value_nodes) + } + + #[instrument(level = "trace", target = "stor", skip_all)] + pub(super) fn process_fanout_results_inner< + 'a, + I: IntoIterator, + >( + inner: &mut StorageManagerInner, + key: TypedKey, + subkey_results_iter: I, + is_set: bool, + consensus_count: usize, + ) { + // Get local record store + let local_record_store = inner.local_record_store.as_mut().unwrap(); + + let cur_ts = Timestamp::now(); + local_record_store.with_record_mut(key, |r| { + let d = r.detail_mut(); + + for (subkey, fanout_result) in subkey_results_iter { + for node_id in fanout_result + .value_nodes + .iter() + .filter_map(|x| x.node_ids().get(key.kind).map(|k| k.value)) + { + let pnd = d.nodes.entry(node_id).or_default(); + if is_set || pnd.last_set == Timestamp::default() { + pnd.last_set = cur_ts; + } + pnd.last_seen = cur_ts; + pnd.subkeys.insert(subkey); + } + } + + // Purge nodes down to the N most recently seen, where N is the consensus count for a set operation + let mut nodes_ts = d + .nodes + .iter() + .map(|kv| (*kv.0, kv.1.last_seen)) + .collect::>(); + nodes_ts.sort_by(|a, b| b.1.cmp(&a.1)); + + for dead_node_key in nodes_ts.iter().skip(consensus_count) { + d.nodes.remove(&dead_node_key.0); + } + }); + } + + fn close_record_inner( + inner: &mut StorageManagerInner, + key: TypedKey, + ) -> VeilidAPIResult> { + let Some(local_record_store) = inner.local_record_store.as_mut() else { + apibail_not_initialized!(); + }; + if local_record_store.peek_record(key, |_| {}).is_none() { + return Err(VeilidAPIError::key_not_found(key)); + } + + Ok(inner.opened_records.remove(&key)) + } + + #[instrument(level = "trace", target = "stor", skip_all, err)] + async fn handle_get_local_value_inner( + inner: &mut StorageManagerInner, + key: TypedKey, + subkey: ValueSubkey, + want_descriptor: bool, + ) -> VeilidAPIResult { + // See if it's in the local record store + let Some(local_record_store) = inner.local_record_store.as_mut() else { + apibail_not_initialized!(); + }; + if let Some(get_result) = local_record_store + .get_subkey(key, subkey, want_descriptor) + .await? + { + return Ok(get_result); + } + + Ok(GetResult { + opt_value: None, + opt_descriptor: None, + }) + } + + #[instrument(level = "trace", target = "stor", skip_all, err)] + pub(super) async fn handle_set_local_value_inner( + inner: &mut StorageManagerInner, + key: TypedKey, + subkey: ValueSubkey, + signed_value_data: Arc, + watch_update_mode: WatchUpdateMode, + ) -> VeilidAPIResult<()> { + // See if it's in the local record store + let Some(local_record_store) = inner.local_record_store.as_mut() else { + apibail_not_initialized!(); + }; + + // Write subkey to local store + local_record_store + .set_subkey(key, subkey, signed_value_data, watch_update_mode) + .await?; + + Ok(()) + } + + #[instrument(level = "trace", target = "stor", skip_all, err)] + pub(super) async fn handle_inspect_local_value_inner( + inner: &mut StorageManagerInner, + key: TypedKey, + subkeys: ValueSubkeyRangeSet, + want_descriptor: bool, + ) -> VeilidAPIResult { + // See if it's in the local record store + let Some(local_record_store) = inner.local_record_store.as_mut() else { + apibail_not_initialized!(); + }; + if let Some(inspect_result) = local_record_store + .inspect_record(key, subkeys, want_descriptor) + .await? + { + return Ok(inspect_result); + } + + Ok(InspectResult { + subkeys: ValueSubkeyRangeSet::new(), + seqs: vec![], + opt_descriptor: None, + }) + } + + #[instrument(level = "trace", target = "stor", skip_all, err)] + pub(super) async fn handle_get_remote_value_inner( + inner: &mut StorageManagerInner, + key: TypedKey, + subkey: ValueSubkey, + want_descriptor: bool, + ) -> VeilidAPIResult { + // See if it's in the remote record store + let Some(remote_record_store) = inner.remote_record_store.as_mut() else { + apibail_not_initialized!(); + }; + if let Some(get_result) = remote_record_store + .get_subkey(key, subkey, want_descriptor) + .await? + { + return Ok(get_result); + } + + Ok(GetResult { + opt_value: None, + opt_descriptor: None, + }) + } + + #[instrument(level = "trace", target = "stor", skip_all, err)] + pub(super) async fn handle_set_remote_value_inner( + inner: &mut StorageManagerInner, + key: TypedKey, + subkey: ValueSubkey, + signed_value_data: Arc, + signed_value_descriptor: Arc, + watch_update_mode: WatchUpdateMode, + ) -> VeilidAPIResult<()> { + // See if it's in the remote record store + let Some(remote_record_store) = inner.remote_record_store.as_mut() else { + apibail_not_initialized!(); + }; + + // See if we have a remote record already or not + if remote_record_store.with_record(key, |_| {}).is_none() { + // record didn't exist, make it + let cur_ts = Timestamp::now(); + let remote_record_detail = RemoteRecordDetail {}; + let record = Record::::new( + cur_ts, + signed_value_descriptor, + remote_record_detail, + )?; + remote_record_store.new_record(key, record).await? + }; + + // Write subkey to remote store + remote_record_store + .set_subkey(key, subkey, signed_value_data, watch_update_mode) + .await?; + + Ok(()) + } + + #[instrument(level = "trace", target = "stor", skip_all, err)] + pub(super) async fn handle_inspect_remote_value_inner( + inner: &mut StorageManagerInner, + key: TypedKey, + subkeys: ValueSubkeyRangeSet, + want_descriptor: bool, + ) -> VeilidAPIResult { + // See if it's in the local record store + let Some(remote_record_store) = inner.remote_record_store.as_mut() else { + apibail_not_initialized!(); + }; + if let Some(inspect_result) = remote_record_store + .inspect_record(key, subkeys, want_descriptor) + .await? + { + return Ok(inspect_result); + } + + Ok(InspectResult { + subkeys: ValueSubkeyRangeSet::new(), + seqs: vec![], + opt_descriptor: None, + }) + } + + fn get_key( + vcrypto: &CryptoSystemGuard<'_>, + owner_key: &PublicKey, + schema_data: &[u8], + ) -> TypedKey { + let mut hash_data = Vec::::with_capacity(PUBLIC_KEY_LENGTH + 4 + schema_data.len()); + hash_data.extend_from_slice(&vcrypto.kind().0); + hash_data.extend_from_slice(&owner_key.bytes); + hash_data.extend_from_slice(schema_data); + let hash = vcrypto.generate_hash(&hash_data); + TypedKey::new(vcrypto.kind(), hash) + } + + #[instrument(level = "trace", target = "stor", skip_all)] + pub(super) fn add_offline_subkey_write_inner( + inner: &mut StorageManagerInner, + key: TypedKey, + subkey: ValueSubkey, + safety_selection: SafetySelection, + ) { + inner + .offline_subkey_writes + .entry(key) + .and_modify(|x| { + x.subkeys.insert(subkey); + }) + .or_insert(tasks::offline_subkey_writes::OfflineSubkeyWrite { + safety_selection, + subkeys: ValueSubkeyRangeSet::single(subkey), + subkeys_in_flight: ValueSubkeyRangeSet::new(), + }); + } + + #[instrument(level = "trace", target = "stor", skip_all)] + pub(super) fn process_deferred_results( + &self, + receiver: flume::Receiver, + handler: impl FnMut(T) -> SendPinBoxFuture + Send + 'static, + ) -> bool { + self.deferred_result_processor + .add(receiver.into_stream(), handler) + } } diff --git a/veilid-core/src/storage_manager/record_store/inspect_cache.rs b/veilid-core/src/storage_manager/record_store/inspect_cache.rs index d3b09189..12e2136e 100644 --- a/veilid-core/src/storage_manager/record_store/inspect_cache.rs +++ b/veilid-core/src/storage_manager/record_store/inspect_cache.rs @@ -20,6 +20,7 @@ impl InspectCacheL2 { } } +#[derive(Debug)] pub struct InspectCache { cache: LruCache, } diff --git a/veilid-core/src/storage_manager/record_store/mod.rs b/veilid-core/src/storage_manager/record_store/mod.rs index 0fbc818f..b99659b7 100644 --- a/veilid-core/src/storage_manager/record_store/mod.rs +++ b/veilid-core/src/storage_manager/record_store/mod.rs @@ -50,14 +50,13 @@ pub(super) struct RecordStore where D: fmt::Debug + Clone + Serialize + for<'d> Deserialize<'d>, { - table_store: TableStore, name: String, limits: RecordStoreLimits, /// The tabledb used for record data - record_table: Option, + record_table: TableDB, /// The tabledb used for subkey data - subkey_table: Option, + subkey_table: TableDB, /// The in-memory index that keeps track of what records are in the tabledb record_index: LruCache>, /// The in-memory cache of commonly accessed subkey data so we don't have to keep hitting the db @@ -80,6 +79,30 @@ where purge_dead_records_mutex: Arc>, } +impl fmt::Debug for RecordStore +where + D: fmt::Debug + Clone + Serialize + for<'d> Deserialize<'d>, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RecordStore") + .field("name", &self.name) + .field("limits", &self.limits) + .field("record_table", &self.record_table) + .field("subkey_table", &self.subkey_table) + .field("record_index", &self.record_index) + .field("subkey_cache", &self.subkey_cache) + .field("inspect_cache", &self.inspect_cache) + .field("subkey_cache_total_size", &self.subkey_cache_total_size) + .field("total_storage_space", &self.total_storage_space) + .field("dead_records", &self.dead_records) + .field("changed_records", &self.changed_records) + .field("watched_records", &self.watched_records) + .field("changed_watched_values", &self.changed_watched_values) + .field("purge_dead_records_mutex", &self.purge_dead_records_mutex) + .finish() + } +} + /// The result of the do_get_value_operation #[derive(Default, Clone, Debug)] pub struct GetResult { @@ -104,7 +127,11 @@ impl RecordStore where D: fmt::Debug + Clone + Serialize + for<'d> Deserialize<'d>, { - pub fn new(table_store: TableStore, name: &str, limits: RecordStoreLimits) -> Self { + pub async fn try_create( + table_store: &TableStore, + name: &str, + limits: RecordStoreLimits, + ) -> EyreResult { let subkey_cache_size = limits.subkey_cache_size; let limit_subkey_cache_total_size = limits .max_subkey_cache_memory_mb @@ -113,12 +140,14 @@ where .max_storage_space_mb .map(|mb| mb as u64 * 1_048_576u64); - Self { - table_store, + let record_table = table_store.open(&format!("{}_records", name), 1).await?; + let subkey_table = table_store.open(&format!("{}_subkeys", name), 1).await?; + + let mut out = Self { name: name.to_owned(), limits, - record_table: None, - subkey_table: None, + record_table, + subkey_table, record_index: LruCache::new(limits.max_records.unwrap_or(usize::MAX)), subkey_cache: LruCache::new(subkey_cache_size), inspect_cache: InspectCache::new(subkey_cache_size), @@ -137,25 +166,20 @@ where watched_records: HashMap::new(), purge_dead_records_mutex: Arc::new(AsyncMutex::new(())), changed_watched_values: HashSet::new(), - } + }; + + out.setup().await?; + + Ok(out) } - pub async fn init(&mut self) -> EyreResult<()> { - let record_table = self - .table_store - .open(&format!("{}_records", self.name), 1) - .await?; - let subkey_table = self - .table_store - .open(&format!("{}_subkeys", self.name), 1) - .await?; - + async fn setup(&mut self) -> EyreResult<()> { // Pull record index from table into a vector to ensure we sort them - let record_table_keys = record_table.get_keys(0).await?; + let record_table_keys = self.record_table.get_keys(0).await?; let mut record_index_saved: Vec<(RecordTableKey, Record)> = Vec::with_capacity(record_table_keys.len()); for rtk in record_table_keys { - if let Some(vr) = record_table.load_json::>(0, &rtk).await? { + if let Some(vr) = self.record_table.load_json::>(0, &rtk).await? { let rik = RecordTableKey::try_from(rtk.as_ref())?; record_index_saved.push((rik, vr)); } @@ -204,8 +228,6 @@ where self.dead_records.push(dr); } - self.record_table = Some(record_table); - self.subkey_table = Some(subkey_table); Ok(()) } @@ -284,11 +306,8 @@ where return; } - let record_table = self.record_table.clone().unwrap(); - let subkey_table = self.subkey_table.clone().unwrap(); - - let rt_xact = record_table.transact(); - let st_xact = subkey_table.transact(); + let rt_xact = self.record_table.transact(); + let st_xact = self.subkey_table.transact(); let dead_records = mem::take(&mut self.dead_records); for dr in dead_records { // Record should already be gone from index @@ -350,9 +369,7 @@ where return; } - let record_table = self.record_table.clone().unwrap(); - - let rt_xact = record_table.transact(); + let rt_xact = self.record_table.transact(); let changed_records = mem::take(&mut self.changed_records); for rtk in changed_records { // Get the changed record and save it to the table @@ -381,11 +398,6 @@ where apibail_internal!("record already exists"); } - // Get record table - let Some(record_table) = self.record_table.clone() else { - apibail_internal!("record store not initialized"); - }; - // If over size limit, dont create record self.total_storage_space .add((mem::size_of::() + record.total_size()) as u64) @@ -396,7 +408,7 @@ where } // Save to record table - record_table + self.record_table .store_json(0, &rtk.bytes(), &record) .await .map_err(VeilidAPIError::internal)?; @@ -552,11 +564,6 @@ where })); } - // Get subkey table - let Some(subkey_table) = self.subkey_table.clone() else { - apibail_internal!("record store not initialized"); - }; - // If subkey exists in subkey cache, use that let stk = SubkeyTableKey { key, subkey }; if let Some(record_data) = self.subkey_cache.get(&stk) { @@ -568,7 +575,8 @@ where })); } // If not in cache, try to pull from table store if it is in our stored subkey set - let Some(record_data) = subkey_table + let Some(record_data) = self + .subkey_table .load_json::(0, &stk.bytes()) .await .map_err(VeilidAPIError::internal)? @@ -624,11 +632,6 @@ where })); } - // Get subkey table - let Some(subkey_table) = self.subkey_table.clone() else { - apibail_internal!("record store not initialized"); - }; - // If subkey exists in subkey cache, use that let stk = SubkeyTableKey { key, subkey }; if let Some(record_data) = self.subkey_cache.peek(&stk) { @@ -640,7 +643,8 @@ where })); } // If not in cache, try to pull from table store if it is in our stored subkey set - let Some(record_data) = subkey_table + let Some(record_data) = self + .subkey_table .load_json::(0, &stk.bytes()) .await .map_err(VeilidAPIError::internal)? @@ -724,11 +728,6 @@ where apibail_invalid_argument!("subkey out of range", "subkey", subkey); } - // Get subkey table - let Some(subkey_table) = self.subkey_table.clone() else { - apibail_internal!("record store not initialized"); - }; - // Get the previous subkey and ensure we aren't going over the record size limit let mut prior_subkey_size = 0usize; @@ -740,7 +739,8 @@ where prior_subkey_size = record_data.data_size(); } else { // If not in cache, try to pull from table store - if let Some(record_data) = subkey_table + if let Some(record_data) = self + .subkey_table .load_json::(0, &stk_bytes) .await .map_err(VeilidAPIError::internal)? @@ -771,7 +771,7 @@ where } // Write subkey - subkey_table + self.subkey_table .store_json(0, &stk_bytes, &subkey_record_data) .await .map_err(VeilidAPIError::internal)?; @@ -810,11 +810,6 @@ where subkeys: ValueSubkeyRangeSet, want_descriptor: bool, ) -> VeilidAPIResult> { - // Get subkey table - let Some(subkey_table) = self.subkey_table.clone() else { - apibail_internal!("record store not initialized"); - }; - // Get record from index let Some((subkeys, opt_descriptor)) = self.with_record(key, |record| { // Get number of subkeys from schema and ensure we are getting the @@ -859,7 +854,8 @@ where } else { // If not in cache, try to pull from table store if it is in our stored subkey set // XXX: This would be better if it didn't have to pull the whole record data to get the seq. - if let Some(record_data) = subkey_table + if let Some(record_data) = self + .subkey_table .load_json::(0, &stk.bytes()) .await .map_err(VeilidAPIError::internal)? diff --git a/veilid-core/src/storage_manager/set_value.rs b/veilid-core/src/storage_manager/set_value.rs index 69bc968f..98f1f54a 100644 --- a/veilid-core/src/storage_manager/set_value.rs +++ b/veilid-core/src/storage_manager/set_value.rs @@ -28,33 +28,30 @@ impl StorageManager { #[instrument(level = "trace", target = "dht", skip_all, err)] pub(super) async fn outbound_set_value( &self, - rpc_processor: RPCProcessor, key: TypedKey, subkey: ValueSubkey, safety_selection: SafetySelection, value: Arc, descriptor: Arc, ) -> VeilidAPIResult>> { - let routing_table = rpc_processor.routing_table(); let routing_domain = RoutingDomain::PublicInternet; // Get the DHT parameters for 'SetValue' - let (key_count, get_consensus_count, set_consensus_count, fanout, timeout_us) = { - let c = self.unlocked_inner.config.get(); - ( - c.network.dht.max_find_node_count as usize, - c.network.dht.get_value_count as usize, - c.network.dht.set_value_count as usize, - c.network.dht.set_value_fanout as usize, - TimestampDuration::from(ms_to_us(c.network.dht.set_value_timeout_ms)), - ) - }; + let (key_count, get_consensus_count, set_consensus_count, fanout, timeout_us) = + self.config().with(|c| { + ( + c.network.dht.max_find_node_count as usize, + c.network.dht.get_value_count as usize, + c.network.dht.set_value_count as usize, + c.network.dht.set_value_fanout as usize, + TimestampDuration::from(ms_to_us(c.network.dht.set_value_timeout_ms)), + ) + }); // Get the nodes we know are caching this value to seed the fanout let init_fanout_queue = { - let inner = self.inner.lock().await; - inner - .get_value_nodes(key)? + self.get_value_nodes(key) + .await? .unwrap_or_default() .into_iter() .filter(|x| { @@ -81,13 +78,15 @@ impl StorageManager { // Routine to call to generate fanout let call_routine = { let context = context.clone(); - let rpc_processor = rpc_processor.clone(); + let registry = self.registry(); move |next_node: NodeRef| { - let rpc_processor = rpc_processor.clone(); + let registry = registry.clone(); let context = context.clone(); let descriptor = descriptor.clone(); async move { + let rpc_processor = registry.rpc_processor(); + let send_descriptor = true; // xxx check if next_node needs the descriptor or not, see issue #203 // get most recent value to send @@ -99,7 +98,6 @@ impl StorageManager { // send across the wire let sva = network_result_try!( rpc_processor - .clone() .rpc_call_set_value( Destination::direct(next_node.routing_domain_filtered(routing_domain)) .with_safety(safety_selection), @@ -236,12 +234,14 @@ impl StorageManager { }; // Call the fanout in a spawned task + let registry = self.registry(); spawn( "outbound_set_value fanout", Box::pin( async move { + let routing_table = registry.routing_table(); let fanout_call = FanoutCall::new( - routing_table.clone(), + &routing_table, key, key_count, fanout, @@ -292,24 +292,25 @@ impl StorageManager { } #[instrument(level = "trace", target = "dht", skip_all)] - pub(super) fn process_deferred_outbound_set_value_result_inner( + pub(super) fn process_deferred_outbound_set_value_result( &self, - inner: &mut StorageManagerInner, res_rx: flume::Receiver>, key: TypedKey, subkey: ValueSubkey, last_value_data: ValueData, safety_selection: SafetySelection, ) { - let this = self.clone(); + let registry = self.registry(); let last_value_data = Arc::new(Mutex::new(last_value_data)); - inner.process_deferred_results( + self.process_deferred_results( res_rx, Box::new( move |result: VeilidAPIResult| -> SendPinBoxFuture { - let this = this.clone(); + let registry = registry.clone(); let last_value_data = last_value_data.clone(); Box::pin(async move { + let this = registry.storage_manager(); + let result = match result { Ok(v) => v, Err(e) => { @@ -333,7 +334,7 @@ impl StorageManager { // If more partial results show up, don't send an update until we're done return true; } - // If we processed the final result, possibly send an update + // If we processed the final result, possibly send an update // if the sequence number changed since our first partial update // Send with a max count as this is not attached to any watch let changed = { @@ -346,9 +347,7 @@ impl StorageManager { } }; if changed { - if let Err(e) = this.update_callback_value_change(key,ValueSubkeyRangeSet::single(subkey), u32::MAX, Some(value_data)).await { - log_rtab!(debug "Failed sending deferred fanout value change: {}", e); - } + this.update_callback_value_change(key,ValueSubkeyRangeSet::single(subkey), u32::MAX, Some(value_data)); } // Return done @@ -369,29 +368,37 @@ impl StorageManager { result: set_value::OutboundSetValueResult, ) -> Result, VeilidAPIError> { // Regain the lock after network access - let mut inner = self.lock().await?; + let mut inner = self.inner.lock().await; // Report on fanout result offline let was_offline = self.check_fanout_set_offline(key, subkey, &result.fanout_result); if was_offline { // Failed to write, try again later - inner.add_offline_subkey_write(key, subkey, safety_selection); + Self::add_offline_subkey_write_inner(&mut inner, key, subkey, safety_selection); } // Keep the list of nodes that returned a value for later reference - inner.process_fanout_results(key, core::iter::once((subkey, &result.fanout_result)), true); + Self::process_fanout_results_inner( + &mut inner, + key, + core::iter::once((subkey, &result.fanout_result)), + true, + self.config() + .with(|c| c.network.dht.set_value_count as usize), + ); // Return the new value if it differs from what was asked to set if result.signed_value_data.value_data() != &last_value_data { // Record the newer value and send and update since it is different than what we just set - inner - .handle_set_local_value( - key, - subkey, - result.signed_value_data.clone(), - WatchUpdateMode::UpdateAll, - ) - .await?; + + Self::handle_set_local_value_inner( + &mut inner, + key, + subkey, + result.signed_value_data.clone(), + WatchUpdateMode::UpdateAll, + ) + .await?; return Ok(Some(result.signed_value_data.value_data().clone())); } @@ -412,18 +419,20 @@ impl StorageManager { descriptor: Option>, target: Target, ) -> VeilidAPIResult>>> { - let mut inner = self.lock().await?; + let mut inner = self.inner.lock().await; // See if this is a remote or local value let (is_local, last_get_result) = { // See if the subkey we are modifying has a last known local value - let last_get_result = inner.handle_get_local_value(key, subkey, true).await?; + let last_get_result = + Self::handle_get_local_value_inner(&mut inner, key, subkey, true).await?; // If this is local, it must have a descriptor already if last_get_result.opt_descriptor.is_some() { (true, last_get_result) } else { // See if the subkey we are modifying has a last known remote value - let last_get_result = inner.handle_get_remote_value(key, subkey, true).await?; + let last_get_result = + Self::handle_get_remote_value_inner(&mut inner, key, subkey, true).await?; (false, last_get_result) } }; @@ -483,19 +492,24 @@ impl StorageManager { // Do the set and return no new value let res = if is_local { - inner - .handle_set_local_value(key, subkey, value, WatchUpdateMode::ExcludeTarget(target)) - .await + Self::handle_set_local_value_inner( + &mut inner, + key, + subkey, + value, + WatchUpdateMode::ExcludeTarget(target), + ) + .await } else { - inner - .handle_set_remote_value( - key, - subkey, - value, - actual_descriptor, - WatchUpdateMode::ExcludeTarget(target), - ) - .await + Self::handle_set_remote_value_inner( + &mut inner, + key, + subkey, + value, + actual_descriptor, + WatchUpdateMode::ExcludeTarget(target), + ) + .await }; match res { Ok(()) => {} diff --git a/veilid-core/src/storage_manager/storage_manager_inner.rs b/veilid-core/src/storage_manager/storage_manager_inner.rs deleted file mode 100644 index 2907d977..00000000 --- a/veilid-core/src/storage_manager/storage_manager_inner.rs +++ /dev/null @@ -1,762 +0,0 @@ -use super::*; - -const STORAGE_MANAGER_METADATA: &str = "storage_manager_metadata"; -const OFFLINE_SUBKEY_WRITES: &[u8] = b"offline_subkey_writes"; - -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] -pub(super) struct OfflineSubkeyWrite { - pub safety_selection: SafetySelection, - pub subkeys: ValueSubkeyRangeSet, - #[serde(default)] - pub subkeys_in_flight: ValueSubkeyRangeSet, -} - -/// Locked structure for storage manager -pub(super) struct StorageManagerInner { - unlocked_inner: Arc, - /// If we are started up - pub initialized: bool, - /// Records that have been 'opened' and are not yet closed - pub opened_records: HashMap, - /// Records that have ever been 'created' or 'opened' by this node, things we care about that we must republish to keep alive - pub local_record_store: Option>, - /// Records that have been pushed to this node for distribution by other nodes, that we make an effort to republish - pub remote_record_store: Option>, - /// Record subkeys that have not been pushed to the network because they were written to offline - pub offline_subkey_writes: HashMap, - /// Storage manager metadata that is persistent, including copy of offline subkey writes - pub metadata_db: Option, - /// RPC processor if it is available - pub opt_rpc_processor: Option, - /// Routing table if it is available - pub opt_routing_table: Option, - /// Background processing task (not part of attachment manager tick tree so it happens when detached too) - pub tick_future: Option>, - /// Update callback to send ValueChanged notification to - pub update_callback: Option, - /// Deferred result processor - pub deferred_result_processor: DeferredStreamProcessor, - - /// The maximum consensus count - set_consensus_count: usize, -} - -fn local_limits_from_config(config: VeilidConfig) -> RecordStoreLimits { - let c = config.get(); - RecordStoreLimits { - subkey_cache_size: c.network.dht.local_subkey_cache_size as usize, - max_subkey_size: MAX_SUBKEY_SIZE, - max_record_total_size: MAX_RECORD_DATA_SIZE, - max_records: None, - max_subkey_cache_memory_mb: Some(c.network.dht.local_max_subkey_cache_memory_mb as usize), - max_storage_space_mb: None, - public_watch_limit: c.network.dht.public_watch_limit as usize, - member_watch_limit: c.network.dht.member_watch_limit as usize, - max_watch_expiration: TimestampDuration::new(ms_to_us( - c.network.dht.max_watch_expiration_ms, - )), - min_watch_expiration: TimestampDuration::new(ms_to_us(c.network.rpc.timeout_ms)), - } -} - -fn remote_limits_from_config(config: VeilidConfig) -> RecordStoreLimits { - let c = config.get(); - RecordStoreLimits { - subkey_cache_size: c.network.dht.remote_subkey_cache_size as usize, - max_subkey_size: MAX_SUBKEY_SIZE, - max_record_total_size: MAX_RECORD_DATA_SIZE, - max_records: Some(c.network.dht.remote_max_records as usize), - max_subkey_cache_memory_mb: Some(c.network.dht.remote_max_subkey_cache_memory_mb as usize), - max_storage_space_mb: Some(c.network.dht.remote_max_storage_space_mb as usize), - public_watch_limit: c.network.dht.public_watch_limit as usize, - member_watch_limit: c.network.dht.member_watch_limit as usize, - max_watch_expiration: TimestampDuration::new(ms_to_us( - c.network.dht.max_watch_expiration_ms, - )), - min_watch_expiration: TimestampDuration::new(ms_to_us(c.network.rpc.timeout_ms)), - } -} - -impl StorageManagerInner { - pub fn new(unlocked_inner: Arc) -> Self { - let set_consensus_count = unlocked_inner.config.get().network.dht.set_value_count as usize; - Self { - unlocked_inner, - initialized: false, - opened_records: Default::default(), - local_record_store: Default::default(), - remote_record_store: Default::default(), - offline_subkey_writes: Default::default(), - metadata_db: Default::default(), - opt_rpc_processor: Default::default(), - opt_routing_table: Default::default(), - tick_future: Default::default(), - update_callback: None, - deferred_result_processor: DeferredStreamProcessor::default(), - set_consensus_count, - } - } - - pub async fn init( - &mut self, - outer_self: StorageManager, - update_callback: UpdateCallback, - ) -> EyreResult<()> { - let metadata_db = self - .unlocked_inner - .table_store - .open(STORAGE_MANAGER_METADATA, 1) - .await?; - - let local_limits = local_limits_from_config(self.unlocked_inner.config.clone()); - let remote_limits = remote_limits_from_config(self.unlocked_inner.config.clone()); - - let mut local_record_store = RecordStore::new( - self.unlocked_inner.table_store.clone(), - "local", - local_limits, - ); - local_record_store.init().await?; - - let mut remote_record_store = RecordStore::new( - self.unlocked_inner.table_store.clone(), - "remote", - remote_limits, - ); - remote_record_store.init().await?; - - self.metadata_db = Some(metadata_db); - self.local_record_store = Some(local_record_store); - self.remote_record_store = Some(remote_record_store); - - self.load_metadata().await?; - - // Start deferred results processors - self.deferred_result_processor.init().await; - - // Schedule tick - let tick_future = interval("storage manager tick", 1000, move || { - let this = outer_self.clone(); - async move { - if let Err(e) = this.tick().await { - log_stor!(warn "storage manager tick failed: {}", e); - } - } - }); - self.tick_future = Some(tick_future); - self.update_callback = Some(update_callback); - self.initialized = true; - - Ok(()) - } - - pub async fn stop_ticker(&mut self) { - // Stop ticker - let tick_future = self.tick_future.take(); - if let Some(f) = tick_future { - f.await; - } - } - - pub async fn terminate(&mut self) { - self.update_callback = None; - - // Stop deferred result processor - self.deferred_result_processor.terminate().await; - - // Final flush on record stores - if let Some(mut local_record_store) = self.local_record_store.take() { - if let Err(e) = local_record_store.flush().await { - log_stor!(error "termination local record store tick failed: {}", e); - } - } - if let Some(mut remote_record_store) = self.remote_record_store.take() { - if let Err(e) = remote_record_store.flush().await { - log_stor!(error "termination remote record store tick failed: {}", e); - } - } - - // Save metadata - if self.metadata_db.is_some() { - if let Err(e) = self.save_metadata().await { - log_stor!(error "termination metadata save failed: {}", e); - } - self.metadata_db = None; - } - self.offline_subkey_writes.clear(); - - // Mark not initialized - self.initialized = false; - } - - async fn save_metadata(&mut self) -> EyreResult<()> { - if let Some(metadata_db) = &self.metadata_db { - let tx = metadata_db.transact(); - tx.store_json(0, OFFLINE_SUBKEY_WRITES, &self.offline_subkey_writes)?; - tx.commit().await.wrap_err("failed to commit")? - } - Ok(()) - } - - async fn load_metadata(&mut self) -> EyreResult<()> { - if let Some(metadata_db) = &self.metadata_db { - self.offline_subkey_writes = match metadata_db.load_json(0, OFFLINE_SUBKEY_WRITES).await - { - Ok(v) => v.unwrap_or_default(), - Err(_) => { - if let Err(e) = metadata_db.delete(0, OFFLINE_SUBKEY_WRITES).await { - log_stor!(debug "offline_subkey_writes format changed, clearing: {}", e); - } - Default::default() - } - } - } - Ok(()) - } - - #[instrument(level = "trace", target = "stor", skip_all, err)] - pub async fn create_new_owned_local_record( - &mut self, - kind: CryptoKind, - schema: DHTSchema, - owner: Option, - safety_selection: SafetySelection, - ) -> VeilidAPIResult<(TypedKey, KeyPair)> { - // Get cryptosystem - let Some(vcrypto) = self.unlocked_inner.crypto.get(kind) else { - apibail_generic!("unsupported cryptosystem"); - }; - - // Get local record store - let Some(local_record_store) = self.local_record_store.as_mut() else { - apibail_not_initialized!(); - }; - - // Verify the dht schema does not contain the node id - { - let cfg = self.unlocked_inner.config.get(); - if let Some(node_id) = cfg.network.routing_table.node_id.get(kind) { - if schema.is_member(&node_id.value) { - apibail_invalid_argument!( - "node id can not be schema member", - "schema", - node_id.value - ); - } - } - } - - // Compile the dht schema - let schema_data = schema.compile(); - - // New values require a new owner key if not given - let owner = owner.unwrap_or_else(|| vcrypto.generate_keypair()); - - // Calculate dht key - let dht_key = Self::get_key(vcrypto.clone(), &owner.key, &schema_data); - - // Make a signed value descriptor for this dht value - let signed_value_descriptor = Arc::new(SignedValueDescriptor::make_signature( - owner.key, - schema_data, - vcrypto.clone(), - owner.secret, - )?); - // Add new local value record - let cur_ts = Timestamp::now(); - let local_record_detail = LocalRecordDetail::new(safety_selection); - let record = - Record::::new(cur_ts, signed_value_descriptor, local_record_detail)?; - - local_record_store.new_record(dht_key, record).await?; - - Ok((dht_key, owner)) - } - - #[instrument(level = "trace", target = "stor", skip_all, err)] - async fn move_remote_record_to_local( - &mut self, - key: TypedKey, - safety_selection: SafetySelection, - ) -> VeilidAPIResult> { - // Get local record store - let Some(local_record_store) = self.local_record_store.as_mut() else { - apibail_not_initialized!(); - }; - - // Get remote record store - let Some(remote_record_store) = self.remote_record_store.as_mut() else { - apibail_not_initialized!(); - }; - - let rcb = |r: &Record| { - // Return record details - r.clone() - }; - let Some(remote_record) = remote_record_store.with_record(key, rcb) else { - // No local or remote record found, return None - return Ok(None); - }; - - // Make local record - let cur_ts = Timestamp::now(); - let local_record = Record::new( - cur_ts, - remote_record.descriptor().clone(), - LocalRecordDetail::new(safety_selection), - )?; - local_record_store.new_record(key, local_record).await?; - - // Move copy subkey data from remote to local store - for subkey in remote_record.stored_subkeys().iter() { - let Some(get_result) = remote_record_store.get_subkey(key, subkey, false).await? else { - // Subkey was missing - warn!("Subkey was missing: {} #{}", key, subkey); - continue; - }; - let Some(subkey_data) = get_result.opt_value else { - // Subkey was missing - warn!("Subkey data was missing: {} #{}", key, subkey); - continue; - }; - local_record_store - .set_subkey(key, subkey, subkey_data, WatchUpdateMode::NoUpdate) - .await?; - } - - // Move watches - local_record_store.move_watches(key, remote_record_store.move_watches(key, None)); - - // Delete remote record from store - remote_record_store.delete_record(key).await?; - - // Return record information as transferred to local record - Ok(Some((*remote_record.owner(), remote_record.schema()))) - } - - #[instrument(level = "trace", target = "stor", skip_all, err)] - pub async fn open_existing_record( - &mut self, - key: TypedKey, - writer: Option, - safety_selection: SafetySelection, - ) -> VeilidAPIResult> { - // Get local record store - let Some(local_record_store) = self.local_record_store.as_mut() else { - apibail_not_initialized!(); - }; - - // See if we have a local record already or not - let cb = |r: &mut Record| { - // Process local record - - // Keep the safety selection we opened the record with - r.detail_mut().safety_selection = safety_selection; - - // Return record details - (*r.owner(), r.schema()) - }; - let (owner, schema) = match local_record_store.with_record_mut(key, cb) { - Some(v) => v, - None => { - // If we don't have a local record yet, check to see if we have a remote record - // if so, migrate it to a local record - let Some(v) = self - .move_remote_record_to_local(key, safety_selection) - .await? - else { - // No remote record either - return Ok(None); - }; - v - } - }; - // Had local record - - // If the writer we chose is also the owner, we have the owner secret - // Otherwise this is just another subkey writer - let owner_secret = if let Some(writer) = writer { - if writer.key == owner { - Some(writer.secret) - } else { - None - } - } else { - None - }; - - // Write open record - self.opened_records - .entry(key) - .and_modify(|e| { - e.set_writer(writer); - e.set_safety_selection(safety_selection); - }) - .or_insert_with(|| OpenedRecord::new(writer, safety_selection)); - - // Make DHT Record Descriptor to return - let descriptor = DHTRecordDescriptor::new(key, owner, owner_secret, schema); - Ok(Some(descriptor)) - } - - #[instrument(level = "trace", target = "stor", skip_all, err)] - pub async fn open_new_record( - &mut self, - key: TypedKey, - writer: Option, - subkey: ValueSubkey, - get_result: GetResult, - safety_selection: SafetySelection, - ) -> VeilidAPIResult { - // Ensure the record is closed - if self.opened_records.contains_key(&key) { - panic!("new record should never be opened at this point"); - } - - // Must have descriptor - let Some(signed_value_descriptor) = get_result.opt_descriptor else { - // No descriptor for new record, can't store this - apibail_generic!("no descriptor"); - }; - // Get owner - let owner = *signed_value_descriptor.owner(); - - // If the writer we chose is also the owner, we have the owner secret - // Otherwise this is just another subkey writer - let owner_secret = if let Some(writer) = writer { - if writer.key == owner { - Some(writer.secret) - } else { - None - } - } else { - None - }; - let schema = signed_value_descriptor.schema()?; - - // Get local record store - let Some(local_record_store) = self.local_record_store.as_mut() else { - apibail_not_initialized!(); - }; - - // Make and store a new record for this descriptor - let record = Record::::new( - Timestamp::now(), - signed_value_descriptor, - LocalRecordDetail::new(safety_selection), - )?; - local_record_store.new_record(key, record).await?; - - // If we got a subkey with the getvalue, it has already been validated against the schema, so store it - if let Some(signed_value_data) = get_result.opt_value { - // Write subkey to local store - local_record_store - .set_subkey(key, subkey, signed_value_data, WatchUpdateMode::NoUpdate) - .await?; - } - - // Write open record - self.opened_records - .insert(key, OpenedRecord::new(writer, safety_selection)); - - // Make DHT Record Descriptor to return - let descriptor = DHTRecordDescriptor::new(key, owner, owner_secret, schema); - Ok(descriptor) - } - - #[instrument(level = "trace", target = "stor", skip_all, err)] - pub fn get_value_nodes(&self, key: TypedKey) -> VeilidAPIResult>> { - // Get local record store - let Some(local_record_store) = self.local_record_store.as_ref() else { - apibail_not_initialized!(); - }; - - // Get routing table to see if we still know about these nodes - let Some(routing_table) = self.opt_rpc_processor.as_ref().map(|r| r.routing_table()) else { - apibail_try_again!("offline, try again later"); - }; - - let opt_value_nodes = local_record_store.peek_record(key, |r| { - let d = r.detail(); - d.nodes - .keys() - .copied() - .filter_map(|x| { - routing_table - .lookup_node_ref(TypedKey::new(key.kind, x)) - .ok() - .flatten() - }) - .collect() - }); - - Ok(opt_value_nodes) - } - - #[instrument(level = "trace", target = "stor", skip_all)] - pub(super) fn process_fanout_results< - 'a, - I: IntoIterator, - >( - &mut self, - key: TypedKey, - subkey_results_iter: I, - is_set: bool, - ) { - // Get local record store - let local_record_store = self.local_record_store.as_mut().unwrap(); - - let cur_ts = Timestamp::now(); - local_record_store.with_record_mut(key, |r| { - let d = r.detail_mut(); - - for (subkey, fanout_result) in subkey_results_iter { - for node_id in fanout_result - .value_nodes - .iter() - .filter_map(|x| x.node_ids().get(key.kind).map(|k| k.value)) - { - let pnd = d.nodes.entry(node_id).or_default(); - if is_set || pnd.last_set == Timestamp::default() { - pnd.last_set = cur_ts; - } - pnd.last_seen = cur_ts; - pnd.subkeys.insert(subkey); - } - } - - // Purge nodes down to the N most recently seen, where N is the consensus count for a set operation - let mut nodes_ts = d - .nodes - .iter() - .map(|kv| (*kv.0, kv.1.last_seen)) - .collect::>(); - nodes_ts.sort_by(|a, b| b.1.cmp(&a.1)); - - for dead_node_key in nodes_ts.iter().skip(self.set_consensus_count) { - d.nodes.remove(&dead_node_key.0); - } - }); - } - - pub fn close_record(&mut self, key: TypedKey) -> VeilidAPIResult> { - let Some(local_record_store) = self.local_record_store.as_mut() else { - apibail_not_initialized!(); - }; - if local_record_store.peek_record(key, |_| {}).is_none() { - return Err(VeilidAPIError::key_not_found(key)); - } - - Ok(self.opened_records.remove(&key)) - } - - #[instrument(level = "trace", target = "stor", skip_all, err)] - pub(super) async fn handle_get_local_value( - &mut self, - key: TypedKey, - subkey: ValueSubkey, - want_descriptor: bool, - ) -> VeilidAPIResult { - // See if it's in the local record store - let Some(local_record_store) = self.local_record_store.as_mut() else { - apibail_not_initialized!(); - }; - if let Some(get_result) = local_record_store - .get_subkey(key, subkey, want_descriptor) - .await? - { - return Ok(get_result); - } - - Ok(GetResult { - opt_value: None, - opt_descriptor: None, - }) - } - - #[instrument(level = "trace", target = "stor", skip_all, err)] - pub(super) async fn handle_set_local_value( - &mut self, - key: TypedKey, - subkey: ValueSubkey, - signed_value_data: Arc, - watch_update_mode: WatchUpdateMode, - ) -> VeilidAPIResult<()> { - // See if it's in the local record store - let Some(local_record_store) = self.local_record_store.as_mut() else { - apibail_not_initialized!(); - }; - - // Write subkey to local store - local_record_store - .set_subkey(key, subkey, signed_value_data, watch_update_mode) - .await?; - - Ok(()) - } - - #[instrument(level = "trace", target = "stor", skip_all, err)] - pub(super) async fn handle_inspect_local_value( - &mut self, - key: TypedKey, - subkeys: ValueSubkeyRangeSet, - want_descriptor: bool, - ) -> VeilidAPIResult { - // See if it's in the local record store - let Some(local_record_store) = self.local_record_store.as_mut() else { - apibail_not_initialized!(); - }; - if let Some(inspect_result) = local_record_store - .inspect_record(key, subkeys, want_descriptor) - .await? - { - return Ok(inspect_result); - } - - Ok(InspectResult { - subkeys: ValueSubkeyRangeSet::new(), - seqs: vec![], - opt_descriptor: None, - }) - } - - #[instrument(level = "trace", target = "stor", skip_all, err)] - pub(super) async fn handle_get_remote_value( - &mut self, - key: TypedKey, - subkey: ValueSubkey, - want_descriptor: bool, - ) -> VeilidAPIResult { - // See if it's in the remote record store - let Some(remote_record_store) = self.remote_record_store.as_mut() else { - apibail_not_initialized!(); - }; - if let Some(get_result) = remote_record_store - .get_subkey(key, subkey, want_descriptor) - .await? - { - return Ok(get_result); - } - - Ok(GetResult { - opt_value: None, - opt_descriptor: None, - }) - } - - #[instrument(level = "trace", target = "stor", skip_all, err)] - pub(super) async fn handle_set_remote_value( - &mut self, - key: TypedKey, - subkey: ValueSubkey, - signed_value_data: Arc, - signed_value_descriptor: Arc, - watch_update_mode: WatchUpdateMode, - ) -> VeilidAPIResult<()> { - // See if it's in the remote record store - let Some(remote_record_store) = self.remote_record_store.as_mut() else { - apibail_not_initialized!(); - }; - - // See if we have a remote record already or not - if remote_record_store.with_record(key, |_| {}).is_none() { - // record didn't exist, make it - let cur_ts = Timestamp::now(); - let remote_record_detail = RemoteRecordDetail {}; - let record = Record::::new( - cur_ts, - signed_value_descriptor, - remote_record_detail, - )?; - remote_record_store.new_record(key, record).await? - }; - - // Write subkey to remote store - remote_record_store - .set_subkey(key, subkey, signed_value_data, watch_update_mode) - .await?; - - Ok(()) - } - - #[instrument(level = "trace", target = "stor", skip_all, err)] - pub(super) async fn handle_inspect_remote_value( - &mut self, - key: TypedKey, - subkeys: ValueSubkeyRangeSet, - want_descriptor: bool, - ) -> VeilidAPIResult { - // See if it's in the local record store - let Some(remote_record_store) = self.remote_record_store.as_mut() else { - apibail_not_initialized!(); - }; - if let Some(inspect_result) = remote_record_store - .inspect_record(key, subkeys, want_descriptor) - .await? - { - return Ok(inspect_result); - } - - Ok(InspectResult { - subkeys: ValueSubkeyRangeSet::new(), - seqs: vec![], - opt_descriptor: None, - }) - } - - pub async fn get_record_key( - &self, - kind: CryptoKind, - owner_key: &PublicKey, - schema: DHTSchema, - ) -> VeilidAPIResult { - // Get cryptosystem - let Some(vcrypto) = self.unlocked_inner.crypto.get(kind) else { - apibail_generic!("unsupported cryptosystem"); - }; - - Ok(Self::get_key(vcrypto, owner_key, &schema.compile())) - } - - fn get_key( - vcrypto: CryptoSystemVersion, - owner_key: &PublicKey, - schema_data: &[u8], - ) -> TypedKey { - let mut hash_data = Vec::::with_capacity(PUBLIC_KEY_LENGTH + 4 + schema_data.len()); - hash_data.extend_from_slice(&vcrypto.kind().0); - hash_data.extend_from_slice(&owner_key.bytes); - hash_data.extend_from_slice(schema_data); - let hash = vcrypto.generate_hash(&hash_data); - TypedKey::new(vcrypto.kind(), hash) - } - - #[instrument(level = "trace", target = "stor", skip_all)] - pub(super) fn add_offline_subkey_write( - &mut self, - key: TypedKey, - subkey: ValueSubkey, - safety_selection: SafetySelection, - ) { - self.offline_subkey_writes - .entry(key) - .and_modify(|x| { - x.subkeys.insert(subkey); - }) - .or_insert(OfflineSubkeyWrite { - safety_selection, - subkeys: ValueSubkeyRangeSet::single(subkey), - subkeys_in_flight: ValueSubkeyRangeSet::new(), - }); - } - - #[instrument(level = "trace", target = "stor", skip_all)] - pub fn process_deferred_results( - &mut self, - receiver: flume::Receiver, - handler: impl FnMut(T) -> SendPinBoxFuture + Send + 'static, - ) -> bool { - self.deferred_result_processor - .add(receiver.into_stream(), handler) - } -} diff --git a/veilid-core/src/storage_manager/tasks/check_active_watches.rs b/veilid-core/src/storage_manager/tasks/check_active_watches.rs index 5500cd6b..5d6b3545 100644 --- a/veilid-core/src/storage_manager/tasks/check_active_watches.rs +++ b/veilid-core/src/storage_manager/tasks/check_active_watches.rs @@ -4,19 +4,16 @@ impl StorageManager { // Check if client-side watches on opened records either have dead nodes or if the watch has expired #[instrument(level = "trace", target = "stor", skip_all, err)] pub(super) async fn check_active_watches_task_routine( - self, + &self, _stop_token: StopToken, _last_ts: Timestamp, _cur_ts: Timestamp, ) -> EyreResult<()> { { let mut inner = self.inner.lock().await; - let Some(routing_table) = inner.opt_routing_table.clone() else { - return Ok(()); - }; - let rss = routing_table.route_spec_store(); - let opt_update_callback = inner.update_callback.clone(); + let routing_table = self.routing_table(); + let update_callback = self.update_callback(); let cur_ts = Timestamp::now(); for (k, v) in inner.opened_records.iter_mut() { @@ -35,7 +32,11 @@ impl StorageManager { // See if the private route we're using is dead if !is_dead { if let Some(value_changed_route) = active_watch.opt_value_changed_route { - if rss.get_route_id_for_key(&value_changed_route).is_none() { + if routing_table + .route_spec_store() + .get_route_id_for_key(&value_changed_route) + .is_none() + { // Route we would receive value changes on is dead is_dead = true; } @@ -50,15 +51,13 @@ impl StorageManager { if is_dead { v.clear_active_watch(); - if let Some(update_callback) = opt_update_callback.clone() { - // Send valuechange with dead count and no subkeys - update_callback(VeilidUpdate::ValueChange(Box::new(VeilidValueChange { - key: *k, - subkeys: ValueSubkeyRangeSet::new(), - count: 0, - value: None, - }))); - } + // Send valuechange with dead count and no subkeys + update_callback(VeilidUpdate::ValueChange(Box::new(VeilidValueChange { + key: *k, + subkeys: ValueSubkeyRangeSet::new(), + count: 0, + value: None, + }))); } } } diff --git a/veilid-core/src/storage_manager/tasks/check_watched_records.rs b/veilid-core/src/storage_manager/tasks/check_watched_records.rs index 792e5df5..c784555f 100644 --- a/veilid-core/src/storage_manager/tasks/check_watched_records.rs +++ b/veilid-core/src/storage_manager/tasks/check_watched_records.rs @@ -4,7 +4,7 @@ impl StorageManager { // Check if server-side watches have expired #[instrument(level = "trace", target = "stor", skip_all, err)] pub(super) async fn check_watched_records_task_routine( - self, + &self, _stop_token: StopToken, _last_ts: Timestamp, _cur_ts: Timestamp, diff --git a/veilid-core/src/storage_manager/tasks/flush_record_stores.rs b/veilid-core/src/storage_manager/tasks/flush_record_stores.rs index 8dbb3d41..295c1fb9 100644 --- a/veilid-core/src/storage_manager/tasks/flush_record_stores.rs +++ b/veilid-core/src/storage_manager/tasks/flush_record_stores.rs @@ -4,7 +4,7 @@ impl StorageManager { // Flush records stores to disk and remove dead records #[instrument(level = "trace", target = "stor", skip_all, err)] pub(super) async fn flush_record_stores_task_routine( - self, + &self, _stop_token: StopToken, _last_ts: Timestamp, _cur_ts: Timestamp, diff --git a/veilid-core/src/storage_manager/tasks/mod.rs b/veilid-core/src/storage_manager/tasks/mod.rs index d946ff16..18b83225 100644 --- a/veilid-core/src/storage_manager/tasks/mod.rs +++ b/veilid-core/src/storage_manager/tasks/mod.rs @@ -10,102 +10,70 @@ impl StorageManager { pub(super) fn setup_tasks(&self) { // Set flush records tick task log_stor!(debug "starting flush record stores task"); - { - let this = self.clone(); - self.unlocked_inner - .flush_record_stores_task - .set_routine(move |s, l, t| { - Box::pin(this.clone().flush_record_stores_task_routine( - s, - Timestamp::new(l), - Timestamp::new(t), - )) - }); - } + impl_setup_task!( + self, + Self, + flush_record_stores_task, + flush_record_stores_task_routine + ); + // Set offline subkey writes tick task log_stor!(debug "starting offline subkey writes task"); - { - let this = self.clone(); - self.unlocked_inner - .offline_subkey_writes_task - .set_routine(move |s, l, t| { - Box::pin(this.clone().offline_subkey_writes_task_routine( - s, - Timestamp::new(l), - Timestamp::new(t), - )) - }); - } + impl_setup_task!( + self, + Self, + offline_subkey_writes_task, + offline_subkey_writes_task_routine + ); + // Set send value changes tick task log_stor!(debug "starting send value changes task"); - { - let this = self.clone(); - self.unlocked_inner - .send_value_changes_task - .set_routine(move |s, l, t| { - Box::pin(this.clone().send_value_changes_task_routine( - s, - Timestamp::new(l), - Timestamp::new(t), - )) - }); - } + impl_setup_task!( + self, + Self, + send_value_changes_task, + send_value_changes_task_routine + ); + // Set check active watches tick task log_stor!(debug "starting check active watches task"); - { - let this = self.clone(); - self.unlocked_inner - .check_active_watches_task - .set_routine(move |s, l, t| { - Box::pin(this.clone().check_active_watches_task_routine( - s, - Timestamp::new(l), - Timestamp::new(t), - )) - }); - } + impl_setup_task!( + self, + Self, + check_active_watches_task, + check_active_watches_task_routine + ); + // Set check watched records tick task log_stor!(debug "starting checked watched records task"); - { - let this = self.clone(); - self.unlocked_inner - .check_watched_records_task - .set_routine(move |s, l, t| { - Box::pin(this.clone().check_watched_records_task_routine( - s, - Timestamp::new(l), - Timestamp::new(t), - )) - }); - } + impl_setup_task!( + self, + Self, + check_watched_records_task, + check_watched_records_task_routine + ); } #[instrument(parent = None, level = "trace", target = "stor", name = "StorageManager::tick", skip_all, err)] pub async fn tick(&self) -> EyreResult<()> { // Run the flush stores task - self.unlocked_inner.flush_record_stores_task.tick().await?; + self.flush_record_stores_task.tick().await?; // Check active watches - self.unlocked_inner.check_active_watches_task.tick().await?; + self.check_active_watches_task.tick().await?; // Check watched records - self.unlocked_inner - .check_watched_records_task - .tick() - .await?; + self.check_watched_records_task.tick().await?; // Run online-only tasks - if self.online_writes_ready().await?.is_some() { + if self.dht_is_online() { // Run offline subkey writes task if there's work to be done - if self.has_offline_subkey_writes().await? { - self.unlocked_inner - .offline_subkey_writes_task - .tick() - .await?; + if self.has_offline_subkey_writes().await { + self.offline_subkey_writes_task.tick().await?; } // Send value changed notifications - self.unlocked_inner.send_value_changes_task.tick().await?; + self.send_value_changes_task.tick().await?; } Ok(()) } @@ -113,23 +81,23 @@ impl StorageManager { #[instrument(level = "trace", target = "stor", skip_all)] pub(super) async fn cancel_tasks(&self) { log_stor!(debug "stopping check watched records task"); - if let Err(e) = self.unlocked_inner.check_watched_records_task.stop().await { + if let Err(e) = self.check_watched_records_task.stop().await { warn!("check_watched_records_task not stopped: {}", e); } log_stor!(debug "stopping check active watches task"); - if let Err(e) = self.unlocked_inner.check_active_watches_task.stop().await { + if let Err(e) = self.check_active_watches_task.stop().await { warn!("check_active_watches_task not stopped: {}", e); } log_stor!(debug "stopping send value changes task"); - if let Err(e) = self.unlocked_inner.send_value_changes_task.stop().await { + if let Err(e) = self.send_value_changes_task.stop().await { warn!("send_value_changes_task not stopped: {}", e); } log_stor!(debug "stopping flush record stores task"); - if let Err(e) = self.unlocked_inner.flush_record_stores_task.stop().await { + if let Err(e) = self.flush_record_stores_task.stop().await { warn!("flush_record_stores_task not stopped: {}", e); } log_stor!(debug "stopping offline subkey writes task"); - if let Err(e) = self.unlocked_inner.offline_subkey_writes_task.stop().await { + if let Err(e) = self.offline_subkey_writes_task.stop().await { warn!("offline_subkey_writes_task not stopped: {}", e); } } diff --git a/veilid-core/src/storage_manager/tasks/offline_subkey_writes.rs b/veilid-core/src/storage_manager/tasks/offline_subkey_writes.rs index 680bce8e..8a5a4b77 100644 --- a/veilid-core/src/storage_manager/tasks/offline_subkey_writes.rs +++ b/veilid-core/src/storage_manager/tasks/offline_subkey_writes.rs @@ -2,6 +2,14 @@ use super::*; use futures_util::*; use stop_token::future::FutureExt as _; +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct OfflineSubkeyWrite { + pub safety_selection: SafetySelection, + pub subkeys: ValueSubkeyRangeSet, + #[serde(default)] + pub subkeys_in_flight: ValueSubkeyRangeSet, +} + #[derive(Debug)] enum OfflineSubkeyWriteResult { Finished(set_value::OutboundSetValueResult), @@ -27,19 +35,19 @@ impl StorageManager { // Write a single offline subkey #[instrument(level = "trace", target = "stor", skip_all, err)] async fn write_single_offline_subkey( - self, + &self, stop_token: StopToken, key: TypedKey, subkey: ValueSubkey, safety_selection: SafetySelection, ) -> EyreResult { - let Some(rpc_processor) = self.online_writes_ready().await? else { + if !self.dht_is_online() { // Cancel this operation because we're offline return Ok(OfflineSubkeyWriteResult::Cancelled); }; let get_result = { - let mut inner = self.lock().await?; - inner.handle_get_local_value(key, subkey, true).await + let mut inner = self.inner.lock().await; + Self::handle_get_local_value_inner(&mut inner, key, subkey, true).await }; let Ok(get_result) = get_result else { log_stor!(debug "Offline subkey write had no subkey result: {}:{}", key, subkey); @@ -57,14 +65,7 @@ impl StorageManager { }; log_stor!(debug "Offline subkey write: {}:{} len={}", key, subkey, value.value_data().data().len()); let osvres = self - .outbound_set_value( - rpc_processor, - key, - subkey, - safety_selection, - value.clone(), - descriptor, - ) + .outbound_set_value(key, subkey, safety_selection, value.clone(), descriptor) .await; match osvres { Ok(res_rx) => { @@ -80,15 +81,16 @@ impl StorageManager { // Set the new value if it differs from what was asked to set if result.signed_value_data.value_data() != value.value_data() { // Record the newer value and send and update since it is different than what we just set - let mut inner = self.lock().await?; - inner - .handle_set_local_value( - key, - subkey, - result.signed_value_data.clone(), - WatchUpdateMode::UpdateAll, - ) - .await?; + let mut inner = self.inner.lock().await; + + Self::handle_set_local_value_inner( + &mut inner, + key, + subkey, + result.signed_value_data.clone(), + WatchUpdateMode::UpdateAll, + ) + .await?; } return Ok(OfflineSubkeyWriteResult::Finished(result)); @@ -112,7 +114,7 @@ impl StorageManager { // Write a set of subkeys of the same key #[instrument(level = "trace", target = "stor", skip_all, err)] async fn process_work_item( - self, + &self, stop_token: StopToken, work_item: WorkItem, ) -> EyreResult { @@ -125,7 +127,6 @@ impl StorageManager { } let result = match self - .clone() .write_single_offline_subkey( stop_token.clone(), work_item.key, @@ -178,7 +179,13 @@ impl StorageManager { // Process all results #[instrument(level = "trace", target = "stor", skip_all)] - fn process_single_result_inner(inner: &mut StorageManagerInner, result: WorkItemResult) { + async fn process_single_result(&self, result: WorkItemResult) { + let consensus_count = self + .config() + .with(|c| c.network.dht.set_value_count as usize); + + let mut inner = self.inner.lock().await; + // Debug print the result log_stor!(debug "Offline write result: {:?}", result); @@ -209,16 +216,18 @@ impl StorageManager { } // Keep the list of nodes that returned a value for later reference - inner.process_fanout_results( + Self::process_fanout_results_inner( + &mut inner, result.key, result.fanout_results.iter().map(|x| (x.0, &x.1)), true, + consensus_count, ); } #[instrument(level = "trace", target = "stor", skip_all, err)] pub(super) async fn process_offline_subkey_writes( - self, + &self, stop_token: StopToken, work_items: Arc>>, ) -> EyreResult<()> { @@ -228,12 +237,10 @@ impl StorageManager { break; }; let result = self - .clone() .process_work_item(stop_token.clone(), work_item) .await?; { - let mut inner = self.lock().await?; - Self::process_single_result_inner(&mut inner, result); + self.process_single_result(result).await; } } @@ -243,14 +250,14 @@ impl StorageManager { // Best-effort write subkeys to the network that were written offline #[instrument(level = "trace", target = "stor", skip_all, err)] pub(super) async fn offline_subkey_writes_task_routine( - self, + &self, stop_token: StopToken, _last_ts: Timestamp, _cur_ts: Timestamp, ) -> EyreResult<()> { // Operate on a copy of the offline subkey writes map let work_items = { - let mut inner = self.lock().await?; + let mut inner = self.inner.lock().await; // Move the current set of writes to 'in flight' for osw in &mut inner.offline_subkey_writes { osw.1.subkeys_in_flight = mem::take(&mut osw.1.subkeys); @@ -264,13 +271,12 @@ impl StorageManager { // Process everything let res = self - .clone() .process_offline_subkey_writes(stop_token, work_items) .await; // Ensure nothing is left in-flight when returning even due to an error { - let mut inner = self.lock().await?; + let mut inner = self.inner.lock().await; for osw in &mut inner.offline_subkey_writes { osw.1.subkeys = osw .1 diff --git a/veilid-core/src/storage_manager/tasks/send_value_changes.rs b/veilid-core/src/storage_manager/tasks/send_value_changes.rs index 98a43c24..08e13486 100644 --- a/veilid-core/src/storage_manager/tasks/send_value_changes.rs +++ b/veilid-core/src/storage_manager/tasks/send_value_changes.rs @@ -6,7 +6,7 @@ impl StorageManager { // Send value change notifications across the network #[instrument(level = "trace", target = "stor", skip_all, err)] pub(super) async fn send_value_changes_task_routine( - self, + &self, stop_token: StopToken, _last_ts: Timestamp, _cur_ts: Timestamp, @@ -31,10 +31,9 @@ impl StorageManager { // Add a future for each value change for vc in value_changes { - let this = self.clone(); unord.push( async move { - if let Err(e) = this.send_value_change(vc).await { + if let Err(e) = self.send_value_change(vc).await { log_stor!(debug "Failed to send value change: {}", e); } } diff --git a/veilid-core/src/storage_manager/types/signed_value_data.rs b/veilid-core/src/storage_manager/types/signed_value_data.rs index e89354f0..3cfcc989 100644 --- a/veilid-core/src/storage_manager/types/signed_value_data.rs +++ b/veilid-core/src/storage_manager/types/signed_value_data.rs @@ -19,7 +19,7 @@ impl SignedValueData { &self, owner: &PublicKey, subkey: ValueSubkey, - vcrypto: CryptoSystemVersion, + vcrypto: &CryptoSystemGuard<'_>, ) -> VeilidAPIResult { let node_info_bytes = Self::make_signature_bytes(&self.value_data, owner, subkey)?; // validate signature @@ -30,7 +30,7 @@ impl SignedValueData { value_data: ValueData, owner: &PublicKey, subkey: ValueSubkey, - vcrypto: CryptoSystemVersion, + vcrypto: &CryptoSystemGuard<'_>, writer_secret: SecretKey, ) -> VeilidAPIResult { let node_info_bytes = Self::make_signature_bytes(&value_data, owner, subkey)?; diff --git a/veilid-core/src/storage_manager/types/signed_value_descriptor.rs b/veilid-core/src/storage_manager/types/signed_value_descriptor.rs index 10832c38..48bf2a6a 100644 --- a/veilid-core/src/storage_manager/types/signed_value_descriptor.rs +++ b/veilid-core/src/storage_manager/types/signed_value_descriptor.rs @@ -17,7 +17,7 @@ impl SignedValueDescriptor { } } - pub fn validate(&self, vcrypto: CryptoSystemVersion) -> VeilidAPIResult<()> { + pub fn validate(&self, vcrypto: &CryptoSystemGuard<'_>) -> VeilidAPIResult<()> { // validate signature if !vcrypto.verify(&self.owner, &self.schema_data, &self.signature)? { apibail_parse_error!( @@ -49,7 +49,7 @@ impl SignedValueDescriptor { pub fn make_signature( owner: PublicKey, schema_data: Vec, - vcrypto: CryptoSystemVersion, + vcrypto: &CryptoSystemGuard<'_>, owner_secret: SecretKey, ) -> VeilidAPIResult { // create signature diff --git a/veilid-core/src/storage_manager/watch_value.rs b/veilid-core/src/storage_manager/watch_value.rs index 561d0ad8..b1044e51 100644 --- a/veilid-core/src/storage_manager/watch_value.rs +++ b/veilid-core/src/storage_manager/watch_value.rs @@ -25,7 +25,6 @@ impl StorageManager { #[instrument(level = "trace", target = "dht", skip_all, err)] pub(super) async fn outbound_watch_value_cancel( &self, - rpc_processor: RPCProcessor, key: TypedKey, subkeys: ValueSubkeyRangeSet, safety_selection: SafetySelection, @@ -37,17 +36,11 @@ impl StorageManager { // Get the appropriate watcher key, if anonymous use a static anonymous watch key // which lives for the duration of the app's runtime - let watcher = opt_watcher.unwrap_or_else(|| { - self.unlocked_inner - .anonymous_watch_keys - .get(key.kind) - .unwrap() - .value - }); + let watcher = + opt_watcher.unwrap_or_else(|| self.anonymous_watch_keys.get(key.kind).unwrap().value); let wva = VeilidAPIError::from_network_result( - rpc_processor - .clone() + self.rpc_processor() .rpc_call_watch_value( Destination::direct(watch_node.routing_domain_filtered(routing_domain)) .with_safety(safety_selection), @@ -80,7 +73,6 @@ impl StorageManager { #[instrument(target = "dht", level = "debug", skip_all, err)] pub(super) async fn outbound_watch_value_change( &self, - rpc_processor: RPCProcessor, key: TypedKey, subkeys: ValueSubkeyRangeSet, expiration: Timestamp, @@ -101,17 +93,11 @@ impl StorageManager { // Get the appropriate watcher key, if anonymous use a static anonymous watch key // which lives for the duration of the app's runtime - let watcher = opt_watcher.unwrap_or_else(|| { - self.unlocked_inner - .anonymous_watch_keys - .get(key.kind) - .unwrap() - .value - }); + let watcher = + opt_watcher.unwrap_or_else(|| self.anonymous_watch_keys.get(key.kind).unwrap().value); let wva = VeilidAPIError::from_network_result( - rpc_processor - .clone() + self.rpc_processor() .rpc_call_watch_value( Destination::direct(watch_node.routing_domain_filtered(routing_domain)) .with_safety(safety_selection), @@ -149,7 +135,6 @@ impl StorageManager { #[instrument(level = "trace", target = "dht", skip_all, err)] pub(super) async fn outbound_watch_value( &self, - rpc_processor: RPCProcessor, key: TypedKey, subkeys: ValueSubkeyRangeSet, expiration: Timestamp, @@ -171,7 +156,6 @@ impl StorageManager { }; return self .outbound_watch_value_cancel( - rpc_processor, key, subkeys, safety_selection, @@ -190,7 +174,6 @@ impl StorageManager { }; if let Some(res) = self .outbound_watch_value_change( - rpc_processor.clone(), key, subkeys.clone(), expiration, @@ -209,34 +192,26 @@ impl StorageManager { // Otherwise, treat this like a new watch } - let routing_table = rpc_processor.routing_table(); let routing_domain = RoutingDomain::PublicInternet; // Get the DHT parameters for 'WatchValue', some of which are the same for 'SetValue' operations - let (key_count, timeout_us, set_value_count) = { - let c = self.unlocked_inner.config.get(); + let (key_count, timeout_us, set_value_count) = self.config().with(|c| { ( c.network.dht.max_find_node_count as usize, TimestampDuration::from(ms_to_us(c.network.dht.set_value_timeout_ms)), c.network.dht.set_value_count as usize, ) - }; + }); // Get the appropriate watcher key, if anonymous use a static anonymous watch key // which lives for the duration of the app's runtime - let watcher = opt_watcher.unwrap_or_else(|| { - self.unlocked_inner - .anonymous_watch_keys - .get(key.kind) - .unwrap() - .value - }); + let watcher = + opt_watcher.unwrap_or_else(|| self.anonymous_watch_keys.get(key.kind).unwrap().value); // Get the nodes we know are caching this value to seed the fanout let init_fanout_queue = { - let inner = self.inner.lock().await; - inner - .get_value_nodes(key)? + self.get_value_nodes(key) + .await? .unwrap_or_default() .into_iter() .filter(|x| { @@ -253,55 +228,60 @@ impl StorageManager { })); // Routine to call to generate fanout - let call_routine = |next_node: NodeRef| { - let rpc_processor = rpc_processor.clone(); + let call_routine = { let context = context.clone(); - let subkeys = subkeys.clone(); + let registry = self.registry(); + move |next_node: NodeRef| { + let context = context.clone(); + let registry = registry.clone(); - async move { - let wva = network_result_try!( - rpc_processor - .clone() - .rpc_call_watch_value( - Destination::direct(next_node.routing_domain_filtered(routing_domain)).with_safety(safety_selection), - key, - subkeys, - expiration, - count, - watcher, - None - ) - .await? - ); + let subkeys = subkeys.clone(); - // Keep answer if we got one - // (accepted means the node could provide an answer, not that the watch is active) - if wva.answer.accepted { - let mut done = false; - if wva.answer.expiration_ts.as_u64() > 0 { - // If the expiration time is greater than zero this watch is active - log_dht!(debug "Watch created: id={} expiration_ts={} ({})", wva.answer.watch_id, display_ts(wva.answer.expiration_ts.as_u64()), next_node); - done = true; - } else { - // If the returned expiration time is zero, this watch was cancelled or rejected - // If we are asking to cancel then check_done will stop after the first node + async move { + let rpc_processor = registry.rpc_processor(); + let wva = network_result_try!( + rpc_processor + .rpc_call_watch_value( + Destination::direct(next_node.routing_domain_filtered(routing_domain)).with_safety(safety_selection), + key, + subkeys, + expiration, + count, + watcher, + None + ) + .await? + ); + + // Keep answer if we got one + // (accepted means the node could provide an answer, not that the watch is active) + if wva.answer.accepted { + let mut done = false; + if wva.answer.expiration_ts.as_u64() > 0 { + // If the expiration time is greater than zero this watch is active + log_dht!(debug "Watch created: id={} expiration_ts={} ({})", wva.answer.watch_id, display_ts(wva.answer.expiration_ts.as_u64()), next_node); + done = true; + } else { + // If the returned expiration time is zero, this watch was cancelled or rejected + // If we are asking to cancel then check_done will stop after the first node + } + if done { + let mut ctx = context.lock(); + ctx.opt_watch_value_result = Some(OutboundWatchValueResult { + expiration_ts: wva.answer.expiration_ts, + watch_id: wva.answer.watch_id, + watch_node: next_node.clone(), + opt_value_changed_route: wva.reply_private_route, + }); + } } - if done { - let mut ctx = context.lock(); - ctx.opt_watch_value_result = Some(OutboundWatchValueResult { - expiration_ts: wva.answer.expiration_ts, - watch_id: wva.answer.watch_id, - watch_node: next_node.clone(), - opt_value_changed_route: wva.reply_private_route, - }); - } - } - // Return peers if we have some - log_network_result!(debug "WatchValue fanout call returned peers {} ({})", wva.answer.peers.len(), next_node); + // Return peers if we have some + log_network_result!(debug "WatchValue fanout call returned peers {} ({})", wva.answer.peers.len(), next_node); - Ok(NetworkResult::value(FanoutCallOutput{peer_info_list: wva.answer.peers})) - }.instrument(tracing::trace_span!("outbound_watch_value call routine")) + Ok(NetworkResult::value(FanoutCallOutput{peer_info_list: wva.answer.peers})) + }.instrument(tracing::trace_span!("outbound_watch_value call routine")) + } }; // Routine to call to check if we're done at each step @@ -318,8 +298,9 @@ impl StorageManager { // Use a fixed fanout concurrency of 1 because we only want one watch // Use a longer timeout (timeout_us * set_value_count) because we may need to try multiple nodes // and each one might take timeout_us time. + let routing_table = self.routing_table(); let fanout_call = FanoutCall::new( - routing_table.clone(), + &routing_table, key, key_count, 1, @@ -381,7 +362,7 @@ impl StorageManager { params: WatchParameters, watch_id: Option, ) -> VeilidAPIResult> { - let mut inner = self.lock().await?; + let mut inner = self.inner.lock().await; // Validate input if params.count == 0 && (watch_id.unwrap_or_default() == 0) { @@ -427,7 +408,7 @@ impl StorageManager { ) -> VeilidAPIResult> { // Update local record store with new value let (is_value_seq_newer, value) = { - let mut inner = self.lock().await?; + let mut inner = self.inner.lock().await; // Don't process update if the record is closed let Some(opened_record) = inner.opened_records.get_mut(&key) else { @@ -484,9 +465,8 @@ impl StorageManager { apibail_internal!("should not have value without first subkey"); }; - let last_get_result = inner - .handle_get_local_value(key, first_subkey, true) - .await?; + let last_get_result = + Self::handle_get_local_value_inner(&mut inner, key, first_subkey, true).await?; let descriptor = last_get_result.opt_descriptor.unwrap(); let schema = descriptor.schema()?; @@ -514,14 +494,14 @@ impl StorageManager { } } if is_value_seq_newer { - inner - .handle_set_local_value( - key, - first_subkey, - value.clone(), - WatchUpdateMode::NoUpdate, - ) - .await?; + Self::handle_set_local_value_inner( + &mut inner, + key, + first_subkey, + value.clone(), + WatchUpdateMode::NoUpdate, + ) + .await?; } } @@ -540,8 +520,7 @@ impl StorageManager { } else { None }; - self.update_callback_value_change(key, subkeys, count, value) - .await?; + self.update_callback_value_change(key, subkeys, count, value); } Ok(NetworkResult::value(())) diff --git a/veilid-core/src/table_store/mod.rs b/veilid-core/src/table_store/mod.rs index eb016890..72f0825a 100644 --- a/veilid-core/src/table_store/mod.rs +++ b/veilid-core/src/table_store/mod.rs @@ -5,13 +5,13 @@ pub use table_db::*; pub mod tests; -#[cfg(target_arch = "wasm32")] +#[cfg(all(target_arch = "wasm32", target_os = "unknown"))] mod wasm; -#[cfg(target_arch = "wasm32")] +#[cfg(all(target_arch = "wasm32", target_os = "unknown"))] use wasm::*; -#[cfg(not(target_arch = "wasm32"))] +#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))] mod native; -#[cfg(not(target_arch = "wasm32"))] +#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))] use native::*; use keyvaluedb::*; @@ -20,14 +20,14 @@ const ALL_TABLE_NAMES: &[u8] = b"all_table_names"; /// Description of column #[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct ColumnInfo { pub key_count: AlignedU64, } /// IO Stats for table #[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct IOStatsInfo { /// Number of transaction. pub transactions: AlignedU64, @@ -51,7 +51,7 @@ pub struct IOStatsInfo { /// Description of table #[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct TableInfo { /// Internal table name pub table_name: String, @@ -70,21 +70,41 @@ struct TableStoreInner { encryption_key: Option, all_table_names: HashMap, all_tables_db: Option, - crypto: Option, +} + +impl fmt::Debug for TableStoreInner { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("TableStoreInner") + .field("opened", &self.opened) + .field("encryption_key", &self.encryption_key) + .field("all_table_names", &self.all_table_names) + //.field("all_tables_db", &self.all_tables_db) + .finish() + } } /// Veilid Table Storage. /// Database for storing key value pairs persistently and securely across runs. -#[derive(Clone)] pub struct TableStore { - _event_bus: EventBus, - config: VeilidConfig, - protected_store: ProtectedStore, + registry: VeilidComponentRegistry, + inner: Mutex, // Sync mutex here because TableDB drops can happen at any time table_store_driver: TableStoreDriver, - inner: Arc>, // Sync mutex here because TableDB drops can happen at any time - async_lock: Arc>, // Async mutex for operations + async_lock: Arc>, // Async mutex for operations } +impl fmt::Debug for TableStore { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("TableStore") + .field("registry", &self.registry) + .field("inner", &self.inner) + //.field("table_store_driver", &self.table_store_driver) + .field("async_lock", &self.async_lock) + .finish() + } +} + +impl_veilid_component!(TableStore); + impl TableStore { fn new_inner() -> TableStoreInner { TableStoreInner { @@ -92,33 +112,21 @@ impl TableStore { encryption_key: None, all_table_names: HashMap::new(), all_tables_db: None, - crypto: None, } } - pub(crate) fn new( - event_bus: EventBus, - config: VeilidConfig, - protected_store: ProtectedStore, - ) -> Self { + pub(crate) fn new(registry: VeilidComponentRegistry) -> Self { let inner = Self::new_inner(); - let table_store_driver = TableStoreDriver::new(config.clone()); + let table_store_driver = TableStoreDriver::new(registry.config()); Self { - _event_bus: event_bus, - config, - protected_store, - inner: Arc::new(Mutex::new(inner)), + registry, + inner: Mutex::new(inner), table_store_driver, async_lock: Arc::new(AsyncMutex::new(())), } } - pub(crate) fn set_crypto(&self, crypto: Crypto) { - let mut inner = self.inner.lock(); - inner.crypto = Some(crypto); - } - - // Flush internal control state (must not use crypto) + // Flush internal control state async fn flush(&self) { let (all_table_names_value, all_tables_db) = { let inner = self.inner.lock(); @@ -142,8 +150,7 @@ impl TableStore { { apibail_invalid_argument!("table name is invalid", "table", table); } - let c = self.config.get(); - let namespace = c.namespace.clone(); + let namespace = self.config().with(|c| c.namespace.clone()); Ok(if namespace.is_empty() { table.to_string() } else { @@ -248,7 +255,7 @@ impl TableStore { } #[instrument(level = "trace", target = "tstore", skip_all)] - pub(crate) fn maybe_unprotect_device_encryption_key( + pub(crate) async fn maybe_unprotect_device_encryption_key( &self, dek_bytes: &[u8], device_encryption_key_password: &str, @@ -260,8 +267,8 @@ impl TableStore { // Get cryptosystem let kind = FourCC::try_from(&dek_bytes[0..4]).unwrap(); - let crypto = self.inner.lock().crypto.as_ref().unwrap().clone(); - let Some(vcrypto) = crypto.get(kind) else { + let crypto = self.crypto(); + let Some(vcrypto) = crypto.get_async(kind) else { bail!("unsupported cryptosystem '{kind}'"); }; @@ -278,10 +285,14 @@ impl TableStore { let shared_secret = vcrypto .derive_shared_secret(device_encryption_key_password.as_bytes(), &nonce.bytes) + .await .wrap_err("failed to derive shared secret")?; + let unprotected_key = vcrypto .decrypt_aead(protected_key, &nonce, &shared_secret, None) + .await .wrap_err("failed to decrypt device encryption key")?; + return Ok(TypedSharedSecret::new( kind, SharedSecret::try_from(unprotected_key.as_slice()) @@ -300,7 +311,7 @@ impl TableStore { } #[instrument(level = "trace", target = "tstore", skip_all)] - pub(crate) fn maybe_protect_device_encryption_key( + pub(crate) async fn maybe_protect_device_encryption_key( &self, dek: TypedSharedSecret, device_encryption_key_password: &str, @@ -316,17 +327,19 @@ impl TableStore { } // Get cryptosystem - let crypto = self.inner.lock().crypto.as_ref().unwrap().clone(); - let Some(vcrypto) = crypto.get(dek.kind) else { + let crypto = self.crypto(); + let Some(vcrypto) = crypto.get_async(dek.kind) else { bail!("unsupported cryptosystem '{}'", dek.kind); }; - let nonce = vcrypto.random_nonce(); + let nonce = vcrypto.random_nonce().await; let shared_secret = vcrypto .derive_shared_secret(device_encryption_key_password.as_bytes(), &nonce.bytes) + .await .wrap_err("failed to derive shared secret")?; let mut protected_key = vcrypto .encrypt_aead(&dek.value.bytes, &nonce, &shared_secret, None) + .await .wrap_err("failed to decrypt device encryption key")?; let mut out = Vec::with_capacity(4 + SHARED_SECRET_LENGTH + vcrypto.aead_overhead() + NONCE_LENGTH); @@ -340,7 +353,7 @@ impl TableStore { #[instrument(level = "trace", target = "tstore", skip_all)] async fn load_device_encryption_key(&self) -> EyreResult> { let dek_bytes: Option> = self - .protected_store + .protected_store() .load_user_secret("device_encryption_key") .await?; let Some(dek_bytes) = dek_bytes else { @@ -349,15 +362,14 @@ impl TableStore { }; // Get device encryption key protection password if we have it - let device_encryption_key_password = { - let c = self.config.get(); - c.protected_store.device_encryption_key_password.clone() - }; + let device_encryption_key_password = self + .config() + .with(|c| c.protected_store.device_encryption_key_password.clone()); - Ok(Some(self.maybe_unprotect_device_encryption_key( - &dek_bytes, - &device_encryption_key_password, - )?)) + Ok(Some( + self.maybe_unprotect_device_encryption_key(&dek_bytes, &device_encryption_key_password) + .await?, + )) } #[instrument(level = "trace", target = "tstore", skip_all)] @@ -368,7 +380,7 @@ impl TableStore { let Some(device_encryption_key) = device_encryption_key else { // Remove the device encryption key let existed = self - .protected_store + .protected_store() .remove_user_secret("device_encryption_key") .await?; log_tstore!(debug "removed device encryption key. existed: {}", existed); @@ -377,15 +389,15 @@ impl TableStore { // Get new device encryption key protection password if we are changing it let new_device_encryption_key_password = { - let c = self.config.get(); - c.protected_store.new_device_encryption_key_password.clone() + self.config() + .with(|c| c.protected_store.new_device_encryption_key_password.clone()) }; let device_encryption_key_password = if let Some(new_device_encryption_key_password) = new_device_encryption_key_password { // Change password log_tstore!(debug "changing dek password"); - self.config - .with_mut(|c| { + self.config() + .try_with_mut(|c| { c.protected_store .device_encryption_key_password .clone_from(&new_device_encryption_key_password); @@ -395,18 +407,20 @@ impl TableStore { } else { // Get device encryption key protection password if we have it log_tstore!(debug "saving with existing dek password"); - let c = self.config.get(); - c.protected_store.device_encryption_key_password.clone() + self.config() + .with(|c| c.protected_store.device_encryption_key_password.clone()) }; - let dek_bytes = self.maybe_protect_device_encryption_key( - device_encryption_key, - &device_encryption_key_password, - )?; + let dek_bytes = self + .maybe_protect_device_encryption_key( + device_encryption_key, + &device_encryption_key_password, + ) + .await?; // Save the new device encryption key let existed = self - .protected_store + .protected_store() .save_user_secret("device_encryption_key", &dek_bytes) .await?; log_tstore!(debug "saving device encryption key. existed: {}", existed); @@ -414,87 +428,97 @@ impl TableStore { } #[instrument(level = "trace", target = "tstore", skip_all)] - pub(crate) async fn init(&self) -> EyreResult<()> { - let _async_guard = self.async_lock.lock().await; + async fn init_async(&self) -> EyreResult<()> { + { + let _async_guard = self.async_lock.lock().await; - // Get device encryption key from protected store - let mut device_encryption_key = self.load_device_encryption_key().await?; - let mut device_encryption_key_changed = false; - if let Some(device_encryption_key) = device_encryption_key { - // If encryption in current use is not the best encryption, then run table migration - let best_kind = best_crypto_kind(); - if device_encryption_key.kind != best_kind { - // XXX: Run migration. See issue #209 + // Get device encryption key from protected store + let mut device_encryption_key = self.load_device_encryption_key().await?; + let mut device_encryption_key_changed = false; + if let Some(device_encryption_key) = device_encryption_key { + // If encryption in current use is not the best encryption, then run table migration + let best_kind = best_crypto_kind(); + if device_encryption_key.kind != best_kind { + // XXX: Run migration. See issue #209 + } + } else { + // If we don't have an encryption key yet, then make one with the best cryptography and save it + let best_kind = best_crypto_kind(); + let mut shared_secret = SharedSecret::default(); + random_bytes(&mut shared_secret.bytes); + + device_encryption_key = Some(TypedSharedSecret::new(best_kind, shared_secret)); + device_encryption_key_changed = true; } - } else { - // If we don't have an encryption key yet, then make one with the best cryptography and save it - let best_kind = best_crypto_kind(); - let mut shared_secret = SharedSecret::default(); - random_bytes(&mut shared_secret.bytes); - device_encryption_key = Some(TypedSharedSecret::new(best_kind, shared_secret)); - device_encryption_key_changed = true; - } + // Check for password change + let changing_password = self.config().with(|c| { + c.protected_store + .new_device_encryption_key_password + .is_some() + }); - // Check for password change - let changing_password = self - .config - .get() - .protected_store - .new_device_encryption_key_password - .is_some(); + // Save encryption key if it has changed or if the protecting password wants to change + if device_encryption_key_changed || changing_password { + self.save_device_encryption_key(device_encryption_key) + .await?; + } - // Save encryption key if it has changed or if the protecting password wants to change - if device_encryption_key_changed || changing_password { - self.save_device_encryption_key(device_encryption_key) - .await?; - } - - // Deserialize all table names - let all_tables_db = self - .table_store_driver - .open("__veilid_all_tables", 1) - .await - .wrap_err("failed to create all tables table")?; - match all_tables_db.get(0, ALL_TABLE_NAMES).await { - Ok(Some(v)) => match deserialize_json_bytes::>(&v) { - Ok(all_table_names) => { - let mut inner = self.inner.lock(); - inner.all_table_names = all_table_names; + // Deserialize all table names + let all_tables_db = self + .table_store_driver + .open("__veilid_all_tables", 1) + .await + .wrap_err("failed to create all tables table")?; + match all_tables_db.get(0, ALL_TABLE_NAMES).await { + Ok(Some(v)) => match deserialize_json_bytes::>(&v) { + Ok(all_table_names) => { + let mut inner = self.inner.lock(); + inner.all_table_names = all_table_names; + } + Err(e) => { + error!("could not deserialize __veilid_all_tables: {}", e); + } + }, + Ok(None) => { + // No table names yet, that's okay + log_tstore!("__veilid_all_tables is empty"); } Err(e) => { - error!("could not deserialize __veilid_all_tables: {}", e); + error!("could not get __veilid_all_tables: {}", e); } - }, - Ok(None) => { - // No table names yet, that's okay - log_tstore!("__veilid_all_tables is empty"); - } - Err(e) => { - error!("could not get __veilid_all_tables: {}", e); - } - }; + }; - { - let mut inner = self.inner.lock(); - inner.encryption_key = device_encryption_key; - inner.all_tables_db = Some(all_tables_db); + { + let mut inner = self.inner.lock(); + inner.encryption_key = device_encryption_key; + inner.all_tables_db = Some(all_tables_db); + } + + let do_delete = self.config().with(|c| c.table_store.delete); + + if do_delete { + self.delete_all().await; + } } - let do_delete = { - let c = self.config.get(); - c.table_store.delete - }; - - if do_delete { - self.delete_all().await; - } + // Set up crypto + let crypto = self.crypto(); + crypto.table_store_setup(self).await?; Ok(()) } #[instrument(level = "trace", target = "tstore", skip_all)] - pub(crate) async fn terminate(&self) { + async fn post_init_async(&self) -> EyreResult<()> { + Ok(()) + } + + #[instrument(level = "trace", target = "tstore", skip_all)] + async fn pre_terminate_async(&self) {} + + #[instrument(level = "trace", target = "tstore", skip_all)] + async fn terminate_async(&self) { let _async_guard = self.async_lock.lock().await; self.flush().await; @@ -599,8 +623,7 @@ impl TableStore { let mut inner = self.inner.lock(); let table_db = TableDB::new( table_name.clone(), - self.clone(), - inner.crypto.as_ref().unwrap().clone(), + self.registry(), db, inner.encryption_key, inner.encryption_key, diff --git a/veilid-core/src/table_store/table_db.rs b/veilid-core/src/table_store/table_db.rs index f26e68db..146a9250 100644 --- a/veilid-core/src/table_store/table_db.rs +++ b/veilid-core/src/table_store/table_db.rs @@ -1,7 +1,7 @@ use crate::*; cfg_if! { - if #[cfg(target_arch = "wasm32")] { + if #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] { use keyvaluedb_web::*; use keyvaluedb::*; } else { @@ -11,35 +11,34 @@ cfg_if! { } struct CryptInfo { - vcrypto: CryptoSystemVersion, - key: SharedSecret, + typed_key: TypedSharedSecret, } impl CryptInfo { - pub fn new(crypto: Crypto, typed_key: TypedSharedSecret) -> Self { - let vcrypto = crypto.get(typed_key.kind).unwrap(); - let key = typed_key.value; - Self { vcrypto, key } + pub fn new(typed_key: TypedSharedSecret) -> Self { + Self { typed_key } } } pub struct TableDBUnlockedInner { + registry: VeilidComponentRegistry, table: String, - table_store: TableStore, database: Database, // Encryption and decryption key will be the same unless configured for an in-place migration encrypt_info: Option, decrypt_info: Option, } +impl_veilid_component_registry_accessor!(TableDBUnlockedInner); impl fmt::Debug for TableDBUnlockedInner { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "TableDBInner(table={})", self.table) + write!(f, "TableDBUnlockedInner(table={})", self.table) } } impl Drop for TableDBUnlockedInner { fn drop(&mut self) { - self.table_store.on_table_db_drop(self.table.clone()); + let table_store = self.table_store(); + table_store.on_table_db_drop(self.table.clone()); } } @@ -52,15 +51,14 @@ pub struct TableDB { impl TableDB { pub(super) fn new( table: String, - table_store: TableStore, - crypto: Crypto, + registry: VeilidComponentRegistry, database: Database, encryption_key: Option, decryption_key: Option, opened_column_count: u32, ) -> Self { - let encrypt_info = encryption_key.map(|ek| CryptInfo::new(crypto.clone(), ek)); - let decrypt_info = decryption_key.map(|dk| CryptInfo::new(crypto.clone(), dk)); + let encrypt_info = encryption_key.map(CryptInfo::new); + let decrypt_info = decryption_key.map(CryptInfo::new); let total_columns = database.num_columns().unwrap(); @@ -72,7 +70,7 @@ impl TableDB { }, unlocked_inner: Arc::new(TableDBUnlockedInner { table, - table_store, + registry, database, encrypt_info, decrypt_info, @@ -102,6 +100,10 @@ impl TableDB { Arc::downgrade(&self.unlocked_inner) } + pub(super) fn crypto(&self) -> VeilidComponentGuard<'_, Crypto> { + self.unlocked_inner.crypto() + } + /// Get the internal name of the table pub fn table_name(&self) -> String { self.unlocked_inner.table.clone() @@ -131,14 +133,16 @@ impl TableDB { fn maybe_encrypt(&self, data: &[u8], keyed_nonce: bool) -> Vec { let data = compress_prepend_size(data); if let Some(ei) = &self.unlocked_inner.encrypt_info { + let crypto = self.crypto(); + let vcrypto = crypto.get(ei.typed_key.kind).unwrap(); let mut out = unsafe { unaligned_u8_vec_uninit(NONCE_LENGTH + data.len()) }; if keyed_nonce { // Key content nonce let mut noncedata = Vec::with_capacity(data.len() + PUBLIC_KEY_LENGTH); noncedata.extend_from_slice(&data); - noncedata.extend_from_slice(&ei.key.bytes); - let noncehash = ei.vcrypto.generate_hash(&noncedata); + noncedata.extend_from_slice(&ei.typed_key.value.bytes); + let noncehash = vcrypto.generate_hash(&noncedata); out[0..NONCE_LENGTH].copy_from_slice(&noncehash[0..NONCE_LENGTH]) } else { // Random nonce @@ -146,11 +150,11 @@ impl TableDB { } let (nonce, encout) = out.split_at_mut(NONCE_LENGTH); - ei.vcrypto.crypt_b2b_no_auth( + vcrypto.crypt_b2b_no_auth( &data, encout, (nonce as &[u8]).try_into().unwrap(), - &ei.key, + &ei.typed_key.value, ); out } else { @@ -162,6 +166,8 @@ impl TableDB { #[instrument(level = "trace", target = "tstore", skip_all)] fn maybe_decrypt(&self, data: &[u8]) -> std::io::Result> { if let Some(di) = &self.unlocked_inner.decrypt_info { + let crypto = self.crypto(); + let vcrypto = crypto.get(di.typed_key.kind).unwrap(); assert!(data.len() >= NONCE_LENGTH); if data.len() == NONCE_LENGTH { return Ok(Vec::new()); @@ -169,11 +175,11 @@ impl TableDB { let mut out = unsafe { unaligned_u8_vec_uninit(data.len() - NONCE_LENGTH) }; - di.vcrypto.crypt_b2b_no_auth( + vcrypto.crypt_b2b_no_auth( &data[NONCE_LENGTH..], &mut out, (&data[0..NONCE_LENGTH]).try_into().unwrap(), - &di.key, + &di.typed_key.value, ); decompress_size_prepended(&out, None) .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e.to_string())) diff --git a/veilid-core/src/table_store/tests/test_table_store.rs b/veilid-core/src/table_store/tests/test_table_store.rs index e0331470..edf72ae0 100644 --- a/veilid-core/src/table_store/tests/test_table_store.rs +++ b/veilid-core/src/table_store/tests/test_table_store.rs @@ -15,7 +15,7 @@ async fn shutdown(api: VeilidAPI) { trace!("test_table_store: finished"); } -pub async fn test_delete_open_delete(ts: TableStore) { +pub async fn test_delete_open_delete(ts: &TableStore) { trace!("test_delete_open_delete"); let _ = ts.delete("test").await; @@ -47,7 +47,7 @@ pub async fn test_delete_open_delete(ts: TableStore) { ); } -pub async fn test_store_delete_load(ts: TableStore) { +pub async fn test_store_delete_load(ts: &TableStore) { trace!("test_store_delete_load"); let _ = ts.delete("test").await; @@ -132,7 +132,7 @@ pub async fn test_store_delete_load(ts: TableStore) { assert_eq!(db.load(2, b"baz").await.unwrap(), Some(b"QWERTY".to_vec())); } -pub async fn test_transaction(ts: TableStore) { +pub async fn test_transaction(ts: &TableStore) { trace!("test_transaction"); let _ = ts.delete("test").await; @@ -162,12 +162,12 @@ pub async fn test_transaction(ts: TableStore) { assert_eq!(db.load(0, b"ddd").await, Ok(None)); } -pub async fn test_json(vcrypto: CryptoSystemVersion, ts: TableStore) { +pub async fn test_json(vcrypto: &AsyncCryptoSystemGuard<'_>, ts: &TableStore) { trace!("test_json"); let _ = ts.delete("test").await; let db = ts.open("test", 3).await.expect("should have opened"); - let keypair = vcrypto.generate_keypair(); + let keypair = vcrypto.generate_keypair().await; assert!(db.store_json(0, b"asdf", &keypair).await.is_ok()); @@ -200,7 +200,7 @@ pub async fn test_json(vcrypto: CryptoSystemVersion, ts: TableStore) { ); } -pub async fn test_protect_unprotect(vcrypto: CryptoSystemVersion, ts: TableStore) { +pub async fn test_protect_unprotect(vcrypto: &AsyncCryptoSystemGuard<'_>, ts: &TableStore) { trace!("test_protect_unprotect"); let dek1 = TypedSharedSecret::new( @@ -237,16 +237,21 @@ pub async fn test_protect_unprotect(vcrypto: CryptoSystemVersion, ts: TableStore for dek in deks { for password in passwords { + trace!("testing dek {} with password {}", dek, password); let dek_bytes = ts .maybe_protect_device_encryption_key(dek, password) + .await .unwrap_or_else(|_| panic!("protect: dek: '{}' pw: '{}'", dek, password)); + let unprotected = ts .maybe_unprotect_device_encryption_key(&dek_bytes, password) + .await .unwrap_or_else(|_| panic!("unprotect: dek: '{}' pw: '{}'", dek, password)); assert_eq!(unprotected, dek); let invalid_password = format!("{}x", password); let _ = ts .maybe_unprotect_device_encryption_key(&dek_bytes, &invalid_password) + .await .expect_err(&format!( "invalid_password: dek: '{}' pw: '{}'", dek, &invalid_password @@ -254,6 +259,7 @@ pub async fn test_protect_unprotect(vcrypto: CryptoSystemVersion, ts: TableStore if !password.is_empty() { let _ = ts .maybe_unprotect_device_encryption_key(&dek_bytes, "") + .await .expect_err(&format!("empty_password: dek: '{}' pw: ''", dek)); } } @@ -266,12 +272,12 @@ pub async fn test_all() { let ts = api.table_store().unwrap(); for ck in VALID_CRYPTO_KINDS { - let vcrypto = crypto.get(ck).unwrap(); - test_protect_unprotect(vcrypto.clone(), ts.clone()).await; - test_delete_open_delete(ts.clone()).await; - test_store_delete_load(ts.clone()).await; - test_transaction(ts.clone()).await; - test_json(vcrypto, ts.clone()).await; + let vcrypto = crypto.get_async(ck).unwrap(); + test_protect_unprotect(&vcrypto, &ts).await; + test_delete_open_delete(&ts).await; + test_store_delete_load(&ts).await; + test_transaction(&ts).await; + test_json(&vcrypto, &ts).await; let _ = ts.delete("test").await; } diff --git a/veilid-core/src/tests/android/gradle/wrapper/gradle-wrapper.properties b/veilid-core/src/tests/android/gradle/wrapper/gradle-wrapper.properties index 609ab8e6..18330fcb 100644 --- a/veilid-core/src/tests/android/gradle/wrapper/gradle-wrapper.properties +++ b/veilid-core/src/tests/android/gradle/wrapper/gradle-wrapper.properties @@ -1,5 +1,5 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-7.6.3-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.10.2-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists diff --git a/veilid-core/src/tests/android/veilid_core_android_tests/app/build.gradle b/veilid-core/src/tests/android/veilid_core_android_tests/app/build.gradle index e3e31a9d..514ad111 100644 --- a/veilid-core/src/tests/android/veilid_core_android_tests/app/build.gradle +++ b/veilid-core/src/tests/android/veilid_core_android_tests/app/build.gradle @@ -35,10 +35,10 @@ android { } } compileOptions { - sourceCompatibility JavaVersion.VERSION_1_8 - targetCompatibility JavaVersion.VERSION_1_8 + sourceCompatibility JavaVersion.VERSION_17 + targetCompatibility JavaVersion.VERSION_17 } - ndkVersion '26.3.11579264' + ndkVersion '27.0.12077973' // Required to copy libc++_shared.so externalNativeBuild { diff --git a/veilid-core/src/tests/android/veilid_core_android_tests/build.gradle b/veilid-core/src/tests/android/veilid_core_android_tests/build.gradle index cab50d59..a85d413a 100644 --- a/veilid-core/src/tests/android/veilid_core_android_tests/build.gradle +++ b/veilid-core/src/tests/android/veilid_core_android_tests/build.gradle @@ -13,7 +13,7 @@ buildscript { } plugins { - id "org.mozilla.rust-android-gradle.rust-android" version "0.9.3" + id "org.mozilla.rust-android-gradle.rust-android" version "0.9.6" } allprojects { diff --git a/veilid-core/src/tests/android/veilid_core_android_tests/gradle/wrapper/gradle-wrapper.properties b/veilid-core/src/tests/android/veilid_core_android_tests/gradle/wrapper/gradle-wrapper.properties index 83146d1a..2c32f5b0 100644 --- a/veilid-core/src/tests/android/veilid_core_android_tests/gradle/wrapper/gradle-wrapper.properties +++ b/veilid-core/src/tests/android/veilid_core_android_tests/gradle/wrapper/gradle-wrapper.properties @@ -3,4 +3,4 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-7.6.3-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.10.2-all.zip diff --git a/veilid-core/src/tests/common/test_dht.rs b/veilid-core/src/tests/common/test_dht.rs index f2aeb20b..26346579 100644 --- a/veilid-core/src/tests/common/test_dht.rs +++ b/veilid-core/src/tests/common/test_dht.rs @@ -78,7 +78,8 @@ pub async fn test_create_dht_record_with_owner(api: VeilidAPI) { .with_safety(SafetySelection::Unsafe(Sequencing::EnsureOrdered)) .unwrap(); - let cs = api.crypto().unwrap().get(CRYPTO_KIND_VLD0).unwrap(); + let crypto = api.crypto().unwrap(); + let cs = crypto.get(CRYPTO_KIND_VLD0).unwrap(); let owner_keypair = cs.generate_keypair(); let rec = rc @@ -104,7 +105,8 @@ pub async fn test_get_dht_record_key(api: VeilidAPI) { .with_safety(SafetySelection::Unsafe(Sequencing::EnsureOrdered)) .unwrap(); - let cs = api.crypto().unwrap().get(CRYPTO_KIND_VLD0).unwrap(); + let crypto = api.crypto().unwrap(); + let cs = crypto.get(CRYPTO_KIND_VLD0).unwrap(); let owner_keypair = cs.generate_keypair(); let schema = DHTSchema::dflt(1).unwrap(); @@ -117,7 +119,6 @@ pub async fn test_get_dht_record_key(api: VeilidAPI) { // recreate the record key from the metadata alone let key = rc .get_dht_record_key(schema.clone(), &owner_keypair.key, Some(CRYPTO_KIND_VLD0)) - .await .unwrap(); // keys should be the same @@ -332,7 +333,8 @@ pub async fn test_open_writer_dht_value(api: VeilidAPI) { // 3. Try writing to subkey 1, expect error // 4. Try writing to subkey 0, expect error - let cs = api.crypto().unwrap().get(key.kind).unwrap(); + let crypto = api.crypto().unwrap(); + let cs = crypto.get(key.kind).unwrap(); assert!(cs.validate_keypair(owner, secret)); let other_keypair = cs.generate_keypair(); diff --git a/veilid-core/src/tests/common/test_protected_store.rs b/veilid-core/src/tests/common/test_protected_store.rs index ac6bf5ec..4347e105 100644 --- a/veilid-core/src/tests/common/test_protected_store.rs +++ b/veilid-core/src/tests/common/test_protected_store.rs @@ -15,7 +15,7 @@ async fn shutdown(api: VeilidAPI) { trace!("test_table_store: finished"); } -pub async fn test_protected_store(ps: ProtectedStore) { +pub async fn test_protected_store(ps: &ProtectedStore) { info!("testing protected store"); let _ = ps.remove_user_secret("_test_key").await; @@ -81,7 +81,7 @@ pub async fn test_protected_store(ps: ProtectedStore) { pub async fn test_all() { let api = startup().await; let ps = api.protected_store().unwrap(); - test_protected_store(ps.clone()).await; + test_protected_store(&ps).await; shutdown(api).await; } diff --git a/veilid-core/src/tests/common/test_veilid_config.rs b/veilid-core/src/tests/common/test_veilid_config.rs index dbabbf69..d5f7a44e 100644 --- a/veilid-core/src/tests/common/test_veilid_config.rs +++ b/veilid-core/src/tests/common/test_veilid_config.rs @@ -1,7 +1,7 @@ use crate::*; cfg_if! { - if #[cfg(not(target_arch = "wasm32"))] { + if #[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))] { use std::fs::File; use std::io::prelude::*; use std::path::PathBuf; @@ -61,7 +61,7 @@ wFAbkZY9eS/x6P7qrpd7dUA= cfg_if! { - if #[cfg(target_arch = "wasm32")] { + if #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] { pub fn get_table_store_path() -> String { String::new() } @@ -154,8 +154,8 @@ cfg_if! { } } -fn update_callback(_update: VeilidUpdate) { - // info!("update_callback: {:?}", update); +fn update_callback(update: VeilidUpdate) { + info!("update_callback: {:?}", update); } pub fn setup_veilid_core() -> (UpdateCallback, ConfigCallback) { @@ -207,9 +207,9 @@ pub fn config_callback(key: String) -> ConfigCallbackReturn { "network.routing_table.node_id" => Ok(Box::new(TypedKeyGroup::new())), "network.routing_table.node_id_secret" => Ok(Box::new(TypedSecretGroup::new())), // "network.routing_table.bootstrap" => Ok(Box::new(Vec::::new())), - #[cfg(not(target_arch = "wasm32"))] + #[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))] "network.routing_table.bootstrap" => Ok(Box::new(vec!["bootstrap.veilid.net".to_string()])), - #[cfg(target_arch = "wasm32")] + #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] "network.routing_table.bootstrap" => Ok(Box::new(vec![ "ws://bootstrap.veilid.net:5150/ws".to_string(), ])), @@ -284,6 +284,11 @@ pub fn config_callback(key: String) -> ConfigCallbackReturn { "network.protocol.wss.url" => Ok(Box::new(Option::::None)), #[cfg(feature = "geolocation")] "network.privacy.country_code_denylist" => Ok(Box::new(Vec::::new())), + #[cfg(feature = "virtual-network")] + "network.virtual_network.enabled" => Ok(Box::new(false)), + #[cfg(feature = "virtual-network")] + "network.virtual_network.server_address" => Ok(Box::new("".to_owned())), + _ => { let err = format!("config key '{}' doesn't exist", key); debug!("{}", err); @@ -293,26 +298,17 @@ pub fn config_callback(key: String) -> ConfigCallbackReturn { } pub fn get_config() -> VeilidConfig { - let mut vc = VeilidConfig::new(); - match vc.setup(Arc::new(config_callback), Arc::new(update_callback)) { - Ok(()) => (), - Err(e) => { - error!("Error: {}", e); - unreachable!(); - } - }; - vc -} - -pub async fn test_config() { - let mut vc = VeilidConfig::new(); - match vc.setup(Arc::new(config_callback), Arc::new(update_callback)) { - Ok(()) => (), + match VeilidConfig::new_from_callback(Arc::new(config_callback), Arc::new(update_callback)) { + Ok(vc) => vc, Err(e) => { error!("Error: {}", e); unreachable!(); } } +} + +pub async fn test_config() { + let vc = get_config(); let inner = vc.get(); assert_eq!(inner.program_name, String::from("VeilidCoreTests")); @@ -351,12 +347,12 @@ pub async fn test_config() { assert_eq!(inner.network.rpc.default_route_hop_count, 1u8); assert_eq!(inner.network.routing_table.node_id.len(), 0); assert_eq!(inner.network.routing_table.node_id_secret.len(), 0); - #[cfg(not(target_arch = "wasm32"))] + #[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))] assert_eq!( inner.network.routing_table.bootstrap, vec!["bootstrap.veilid.net"], ); - #[cfg(target_arch = "wasm32")] + #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] assert_eq!( inner.network.routing_table.bootstrap, vec!["ws://bootstrap.veilid.net:5150/ws"], @@ -424,6 +420,10 @@ pub async fn test_config() { #[cfg(feature = "geolocation")] assert_eq!(inner.network.privacy.country_code_denylist, Vec::new()); + #[cfg(feature = "virtual-network")] + assert!(!inner.network.virtual_network.enabled); + #[cfg(feature = "virtual-network")] + assert_eq!(inner.network.virtual_network.server_address, ""); } pub async fn test_all() { diff --git a/veilid-core/src/tests/mod.rs b/veilid-core/src/tests/mod.rs index e82fa3cc..f507a62d 100644 --- a/veilid-core/src/tests/mod.rs +++ b/veilid-core/src/tests/mod.rs @@ -3,7 +3,7 @@ mod android; pub mod common; #[cfg(all(target_os = "ios", feature = "veilid_core_ios_tests"))] mod ios; -#[cfg(not(target_arch = "wasm32"))] +#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))] mod native; #[allow(unused_imports)] diff --git a/veilid-core/src/tests/native/mod.rs b/veilid-core/src/tests/native/mod.rs index da3b577a..84d96ebf 100644 --- a/veilid-core/src/tests/native/mod.rs +++ b/veilid-core/src/tests/native/mod.rs @@ -1,5 +1,5 @@ //! Test suite for Native -#![cfg(not(target_arch = "wasm32"))] +#![cfg(not(all(target_arch = "wasm32", target_os = "unknown")))] use crate::tests::*; use crate::*; diff --git a/veilid-core/src/veilid_api/api.rs b/veilid-core/src/veilid_api/api.rs index 77e346eb..2cc2e768 100644 --- a/veilid-core/src/veilid_api/api.rs +++ b/veilid-core/src/veilid_api/api.rs @@ -23,7 +23,7 @@ impl Drop for VeilidAPIInner { /// The primary developer entrypoint into `veilid-core` functionality. /// -/// From [VeilidAPI] one can access: +/// From [VeilidAPI] one can access various components: /// /// * [VeilidConfig] - The Veilid configuration specified at startup time. /// * [Crypto] - The available set of cryptosystems provided by Veilid. @@ -36,13 +36,13 @@ impl Drop for VeilidAPIInner { /// * Reply to `AppCall` RPCs. #[derive(Clone, Debug)] pub struct VeilidAPI { - pub(super) inner: Arc>, + inner: Arc>, } impl VeilidAPI { #[instrument(target = "veilid_api", level = "debug", skip_all)] pub(crate) fn new(context: VeilidCoreContext) -> Self { - event!(target: "veilid_api", Level::DEBUG, + event!(target: "veilid_api", Level::DEBUG, "VeilidAPI::new()"); Self { inner: Arc::new(Mutex::new(VeilidAPIInner { @@ -60,7 +60,7 @@ impl VeilidAPI { /// Shut down Veilid and terminate the API. #[instrument(target = "veilid_api", level = "debug", skip_all)] pub async fn shutdown(self) { - event!(target: "veilid_api", Level::DEBUG, + event!(target: "veilid_api", Level::DEBUG, "VeilidAPI::shutdown()"); let context = { self.inner.lock().context.take() }; if let Some(context) = context { @@ -79,83 +79,75 @@ impl VeilidAPI { /// Access the configuration that Veilid was initialized with. pub fn config(&self) -> VeilidAPIResult { let inner = self.inner.lock(); - if let Some(context) = &inner.context { - return Ok(context.config.clone()); - } - Err(VeilidAPIError::NotInitialized) + let Some(context) = &inner.context else { + return Err(VeilidAPIError::NotInitialized); + }; + Ok(context.registry().config()) } - /// Get the cryptosystem manager. - pub fn crypto(&self) -> VeilidAPIResult { + /// Get the cryptosystem component. + pub fn crypto(&self) -> VeilidAPIResult> { let inner = self.inner.lock(); - if let Some(context) = &inner.context { - return Ok(context.crypto.clone()); - } - Err(VeilidAPIError::NotInitialized) + let Some(context) = &inner.context else { + return Err(VeilidAPIError::NotInitialized); + }; + context + .registry() + .lookup::() + .ok_or(VeilidAPIError::NotInitialized) } - /// Get the TableStore manager. - pub fn table_store(&self) -> VeilidAPIResult { + /// Get the TableStore component. + pub fn table_store(&self) -> VeilidAPIResult> { let inner = self.inner.lock(); - if let Some(context) = &inner.context { - return Ok(context.table_store.clone()); - } - Err(VeilidAPIError::not_initialized()) + let Some(context) = &inner.context else { + return Err(VeilidAPIError::NotInitialized); + }; + context + .registry() + .lookup::() + .ok_or(VeilidAPIError::NotInitialized) } - /// Get the ProtectedStore manager. - pub fn protected_store(&self) -> VeilidAPIResult { + /// Get the ProtectedStore component. + pub fn protected_store(&self) -> VeilidAPIResult> { let inner = self.inner.lock(); - if let Some(context) = &inner.context { - return Ok(context.protected_store.clone()); - } - Err(VeilidAPIError::not_initialized()) + let Some(context) = &inner.context else { + return Err(VeilidAPIError::NotInitialized); + }; + context + .registry() + .lookup::() + .ok_or(VeilidAPIError::NotInitialized) + } + + /// Get the BlockStore component. + #[cfg(feature = "unstable-blockstore")] + pub fn block_store(&self) -> VeilidAPIResult> { + let inner = self.inner.lock(); + let Some(context) = &inner.context else { + return Err(VeilidAPIError::NotInitialized); + }; + context + .registry() + .lookup::() + .ok_or(VeilidAPIError::NotInitialized) } //////////////////////////////////////////////////////////////// // Internal Accessors - pub(crate) fn attachment_manager(&self) -> VeilidAPIResult { + + pub(crate) fn core_context(&self) -> VeilidAPIResult { let inner = self.inner.lock(); - if let Some(context) = &inner.context { - return Ok(context.attachment_manager.clone()); - } - Err(VeilidAPIError::not_initialized()) + let Some(context) = &inner.context else { + return Err(VeilidAPIError::NotInitialized); + }; + Ok(context.clone()) } - pub(crate) fn network_manager(&self) -> VeilidAPIResult { - let inner = self.inner.lock(); - if let Some(context) = &inner.context { - return Ok(context.attachment_manager.network_manager()); - } - Err(VeilidAPIError::not_initialized()) - } - pub(crate) fn rpc_processor(&self) -> VeilidAPIResult { - let inner = self.inner.lock(); - if let Some(context) = &inner.context { - return Ok(context.attachment_manager.network_manager().rpc_processor()); - } - Err(VeilidAPIError::NotInitialized) - } - pub(crate) fn routing_table(&self) -> VeilidAPIResult { - let inner = self.inner.lock(); - if let Some(context) = &inner.context { - return Ok(context.attachment_manager.network_manager().routing_table()); - } - Err(VeilidAPIError::NotInitialized) - } - pub(crate) fn storage_manager(&self) -> VeilidAPIResult { - let inner = self.inner.lock(); - if let Some(context) = &inner.context { - return Ok(context.storage_manager.clone()); - } - Err(VeilidAPIError::NotInitialized) - } - #[cfg(feature = "unstable-blockstore")] - pub(crate) fn block_store(&self) -> VeilidAPIResult { - let inner = self.inner.lock(); - if let Some(context) = &inner.context { - return Ok(context.block_store.clone()); - } - Err(VeilidAPIError::not_initialized()) + + pub(crate) fn with_debug_cache R>(&self, callback: F) -> R { + let mut inner = self.inner.lock(); + callback(&mut inner.debug_cache) } //////////////////////////////////////////////////////////////// @@ -163,7 +155,7 @@ impl VeilidAPI { /// Get a full copy of the current state of Veilid. pub async fn get_state(&self) -> VeilidAPIResult { - let attachment_manager = self.attachment_manager()?; + let attachment_manager = self.core_context()?.attachment_manager(); let network_manager = attachment_manager.network_manager(); let config = self.config()?; @@ -181,10 +173,10 @@ impl VeilidAPI { /// Connect to the network. #[instrument(target = "veilid_api", level = "debug", skip_all, ret, err)] pub async fn attach(&self) -> VeilidAPIResult<()> { - event!(target: "veilid_api", Level::DEBUG, + event!(target: "veilid_api", Level::DEBUG, "VeilidAPI::attach()"); - let attachment_manager = self.attachment_manager()?; + let attachment_manager = self.core_context()?.attachment_manager(); if !attachment_manager.attach().await { apibail_generic!("Already attached"); } @@ -194,10 +186,10 @@ impl VeilidAPI { /// Disconnect from the network. #[instrument(target = "veilid_api", level = "debug", skip_all, ret, err)] pub async fn detach(&self) -> VeilidAPIResult<()> { - event!(target: "veilid_api", Level::DEBUG, + event!(target: "veilid_api", Level::DEBUG, "VeilidAPI::detach()"); - let attachment_manager = self.attachment_manager()?; + let attachment_manager = self.core_context()?.attachment_manager(); if !attachment_manager.detach().await { apibail_generic!("Already detached"); } @@ -210,7 +202,7 @@ impl VeilidAPI { /// Get a new `RoutingContext` object to use to send messages over the Veilid network with default safety, sequencing, and stability parameters. #[instrument(target = "veilid_api", level = "debug", skip_all, err, ret)] pub fn routing_context(&self) -> VeilidAPIResult { - event!(target: "veilid_api", Level::DEBUG, + event!(target: "veilid_api", Level::DEBUG, "VeilidAPI::routing_context()"); RoutingContext::try_new(self.clone()) @@ -227,12 +219,12 @@ impl VeilidAPI { pub fn parse_as_target(&self, s: S) -> VeilidAPIResult { let s = s.to_string(); - event!(target: "veilid_api", Level::DEBUG, + event!(target: "veilid_api", Level::DEBUG, "VeilidAPI::parse_as_target(s: {:?})", s); // Is this a route id? if let Ok(rrid) = RouteId::from_str(&s) { - let routing_table = self.routing_table()?; + let routing_table = self.core_context()?.routing_table(); let rss = routing_table.route_spec_store(); // Is this a valid remote route id? (can't target allocated routes) @@ -284,7 +276,7 @@ impl VeilidAPI { stability: Stability, sequencing: Sequencing, ) -> VeilidAPIResult<(RouteId, Vec)> { - event!(target: "veilid_api", Level::DEBUG, + event!(target: "veilid_api", Level::DEBUG, "VeilidAPI::new_custom_private_route(crypto_kinds: {:?}, stability: {:?}, sequencing: {:?})", crypto_kinds, stability, @@ -307,7 +299,8 @@ impl VeilidAPI { sequencing, }; - let rss = self.routing_table()?.route_spec_store(); + let routing_table = self.core_context()?.routing_table(); + let rss = routing_table.route_spec_store(); let route_id = rss.allocate_route(crypto_kinds, &safety_spec, DirectionSet::all(), &[], false)?; match rss.test_route(route_id).await? { @@ -342,9 +335,10 @@ impl VeilidAPI { /// Returns a route id that can be used to send private messages to the node creating this route. #[instrument(target = "veilid_api", level = "debug", skip(self), ret, err)] pub fn import_remote_private_route(&self, blob: Vec) -> VeilidAPIResult { - event!(target: "veilid_api", Level::DEBUG, + event!(target: "veilid_api", Level::DEBUG, "VeilidAPI::import_remote_private_route(blob: {:?})", blob); - let rss = self.routing_table()?.route_spec_store(); + let routing_table = self.core_context()?.routing_table(); + let rss = routing_table.route_spec_store(); rss.import_remote_private_route_blob(blob) } @@ -354,9 +348,10 @@ impl VeilidAPI { /// or received from. #[instrument(target = "veilid_api", level = "debug", skip(self), ret, err)] pub fn release_private_route(&self, route_id: RouteId) -> VeilidAPIResult<()> { - event!(target: "veilid_api", Level::DEBUG, + event!(target: "veilid_api", Level::DEBUG, "VeilidAPI::release_private_route(route_id: {:?})", route_id); - let rss = self.routing_table()?.route_spec_store(); + let routing_table = self.core_context()?.routing_table(); + let rss = routing_table.route_spec_store(); if !rss.release_route(route_id) { apibail_invalid_argument!("release_private_route", "key", route_id); } @@ -376,10 +371,10 @@ impl VeilidAPI { call_id: OperationId, message: Vec, ) -> VeilidAPIResult<()> { - event!(target: "veilid_api", Level::DEBUG, + event!(target: "veilid_api", Level::DEBUG, "VeilidAPI::app_call_reply(call_id: {:?}, message: {:?})", call_id, message); - let rpc_processor = self.rpc_processor()?; + let rpc_processor = self.core_context()?.rpc_processor(); rpc_processor .app_call_reply(call_id, message) .map_err(|e| e.into()) diff --git a/veilid-core/src/veilid_api/debug.rs b/veilid-core/src/veilid_api/debug.rs index 66e93a38..ae7b836f 100644 --- a/veilid-core/src/veilid_api/debug.rs +++ b/veilid-core/src/veilid_api/debug.rs @@ -95,7 +95,7 @@ fn get_dht_report_scope(text: &str) -> Option { } fn get_route_id( - rss: RouteSpecStore, + registry: VeilidComponentRegistry, allow_allocated: bool, allow_remote: bool, ) -> impl Fn(&str) -> Option { @@ -103,6 +103,9 @@ fn get_route_id( if text.is_empty() { return None; } + let routing_table = registry.routing_table(); + let rss = routing_table.route_spec_store(); + match RouteId::from_str(text).ok() { Some(key) => { if allow_allocated { @@ -153,11 +156,13 @@ fn get_dht_schema(text: &str) -> Option> { Some(deserialize_json::(text)) } -fn get_safety_selection(routing_table: RoutingTable) -> impl Fn(&str) -> Option { +fn get_safety_selection( + registry: VeilidComponentRegistry, +) -> impl Fn(&str) -> Option { move |text| { - let rss = routing_table.route_spec_store(); - let default_route_hop_count = - routing_table.with_config(|c| c.network.rpc.default_route_hop_count as usize); + let default_route_hop_count = registry + .config() + .with(|c| c.network.rpc.default_route_hop_count as usize); if !text.is_empty() && &text[0..1] == "-" { // Unsafe @@ -172,7 +177,7 @@ fn get_safety_selection(routing_table: RoutingTable) -> impl Fn(&str) -> Option< let mut sequencing = Sequencing::default(); for x in text.split(',') { let x = x.trim(); - if let Some(pr) = get_route_id(rss.clone(), true, false)(x) { + if let Some(pr) = get_route_id(registry.clone(), true, false)(x) { preferred_route = Some(pr) } if let Some(n) = get_number(x) { @@ -229,7 +234,9 @@ fn get_keypair(text: &str) -> Option { KeyPair::from_str(text).ok() } -fn get_crypto_system_version(crypto: Crypto) -> impl FnOnce(&str) -> Option { +fn get_crypto_system_version<'a>( + crypto: &'a Crypto, +) -> impl FnOnce(&str) -> Option> { move |text| { let kindstr = get_string(text)?; let kind = CryptoKind::from_str(&kindstr).ok()?; @@ -250,12 +257,12 @@ fn get_dht_key_no_safety(text: &str) -> Option { } fn get_dht_key( - routing_table: RoutingTable, + registry: VeilidComponentRegistry, ) -> impl FnOnce(&str) -> Option<(TypedKey, Option)> { move |text| { // Safety selection let (text, ss) = if let Some((first, second)) = text.split_once('+') { - let ss = get_safety_selection(routing_table.clone())(second)?; + let ss = get_safety_selection(registry)(second)?; (first, Some(ss)) } else { (text, None) @@ -277,7 +284,7 @@ fn get_dht_key( } fn resolve_filtered_node_ref( - routing_table: RoutingTable, + registry: VeilidComponentRegistry, safety_selection: SafetySelection, ) -> impl FnOnce(&str) -> SendPinBoxFuture> { move |text| { @@ -290,14 +297,14 @@ fn resolve_filtered_node_ref( let nr = if let Some(key) = get_public_key(text) { let node_id = TypedKey::new(best_crypto_kind(), key); - routing_table + registry .rpc_processor() .resolve_node(node_id, safety_selection) .await .ok() .flatten()? } else if let Some(node_id) = get_typed_key(text) { - routing_table + registry .rpc_processor() .resolve_node(node_id, safety_selection) .await @@ -315,8 +322,9 @@ fn resolve_filtered_node_ref( } } -fn get_node_ref(routing_table: RoutingTable) -> impl FnOnce(&str) -> Option { +fn get_node_ref(registry: VeilidComponentRegistry) -> impl FnOnce(&str) -> Option { move |text| { + let routing_table = registry.routing_table(); let nr = if let Some(key) = get_public_key(text) { routing_table.lookup_any_node_ref(key).ok().flatten()? } else if let Some(node_id) = get_typed_key(text) { @@ -329,9 +337,11 @@ fn get_node_ref(routing_table: RoutingTable) -> impl FnOnce(&str) -> Option impl FnOnce(&str) -> Option { move |text| { + let routing_table = registry.routing_table(); + // Safety selection let (text, seq) = if let Some((first, second)) = text.split_once('+') { let seq = get_sequencing(second)?; @@ -558,19 +568,19 @@ impl VeilidAPI { )?; } // Dump routing table bucket info - let routing_table = self.network_manager()?.routing_table(); + let routing_table = self.core_context()?.routing_table(); Ok(routing_table.debug_info_buckets(min_state)) } async fn debug_dialinfo(&self, _args: String) -> VeilidAPIResult { // Dump routing table dialinfo - let routing_table = self.network_manager()?.routing_table(); + let routing_table = self.core_context()?.routing_table(); Ok(routing_table.debug_info_dialinfo()) } async fn debug_peerinfo(&self, args: String) -> VeilidAPIResult { // Dump routing table peerinfo let args: Vec = args.split_whitespace().map(|s| s.to_owned()).collect(); - let routing_table = self.network_manager()?.routing_table(); + let routing_table = self.core_context()?.routing_table(); let mut ai = 0; let mut opt_routing_domain = None; @@ -601,7 +611,7 @@ impl VeilidAPI { async fn debug_txtrecord(&self, _args: String) -> VeilidAPIResult { // Dump routing table txt record - let routing_table = self.network_manager()?.routing_table(); + let routing_table = self.core_context()?.routing_table(); Ok(routing_table.debug_info_txtrecord().await) } @@ -614,7 +624,7 @@ impl VeilidAPI { 0, "debug_keypair", "kind", - get_crypto_system_version(crypto.clone()), + get_crypto_system_version(&crypto), ) .unwrap_or_else(|_| crypto.best()); @@ -646,7 +656,7 @@ impl VeilidAPI { } // Dump routing table entries - let routing_table = self.network_manager()?.routing_table(); + let routing_table = self.core_context()?.routing_table(); Ok(match fastest { true => routing_table.debug_info_entries_fastest(min_state, capabilities, 100000), false => routing_table.debug_info_entries(min_state, capabilities), @@ -655,31 +665,30 @@ impl VeilidAPI { async fn debug_entry(&self, args: String) -> VeilidAPIResult { let args: Vec = args.split_whitespace().map(|s| s.to_owned()).collect(); - let routing_table = self.network_manager()?.routing_table(); + let registry = self.core_context()?.registry(); let node_ref = get_debug_argument_at( &args, 0, "debug_entry", "node_id", - get_node_ref(routing_table), + get_node_ref(registry.clone()), )?; // Dump routing table entry - let routing_table = self.network_manager()?.routing_table(); - Ok(routing_table.debug_info_entry(node_ref)) + Ok(registry.routing_table().debug_info_entry(node_ref)) } async fn debug_relay(&self, args: String) -> VeilidAPIResult { let args: Vec = args.split_whitespace().map(|s| s.to_owned()).collect(); - let routing_table = self.network_manager()?.routing_table(); + let registry = self.core_context()?.registry(); let relay_node = get_debug_argument_at( &args, 0, "debug_relay", "node_id", - get_node_ref(routing_table), + get_node_ref(registry.clone()), ) .ok(); @@ -694,7 +703,7 @@ impl VeilidAPI { .unwrap_or(RoutingDomain::PublicInternet); // Dump routing table entry - let routing_table = self.network_manager()?.routing_table(); + let routing_table = registry.routing_table(); match routing_domain { RoutingDomain::LocalNetwork => { let mut editor = routing_table.edit_local_network_routing_domain(); @@ -715,8 +724,8 @@ impl VeilidAPI { async fn debug_nodeinfo(&self, _args: String) -> VeilidAPIResult { // Dump routing table entry - let routing_table = self.network_manager()?.routing_table(); - let nodeinfo = routing_table.debug_info_nodeinfo(); + let registry = self.core_context()?.registry(); + let nodeinfo = registry.routing_table().debug_info_nodeinfo(); // Dump core state let state = self.get_state().await?; @@ -739,7 +748,7 @@ impl VeilidAPI { // Dump connection table let connman = - if let Some(connection_manager) = self.network_manager()?.opt_connection_manager() { + if let Some(connection_manager) = registry.network_manager().opt_connection_manager() { connection_manager.debug_print().await } else { "Connection manager unavailable when detached".to_owned() @@ -750,8 +759,8 @@ impl VeilidAPI { async fn debug_nodeid(&self, _args: String) -> VeilidAPIResult { // Dump routing table entry - let routing_table = self.network_manager()?.routing_table(); - let nodeid = routing_table.debug_info_nodeid(); + let registry = self.core_context()?.registry(); + let nodeid = registry.routing_table().debug_info_nodeid(); Ok(nodeid) } @@ -808,8 +817,8 @@ impl VeilidAPI { apibail_internal!("Must be attached to restart network"); } - let netman = self.network_manager()?; - netman.restart_network(); + let registry = self.core_context()?.registry(); + registry.network_manager().restart_network(); Ok("Network restarted".to_owned()) } else { @@ -818,6 +827,8 @@ impl VeilidAPI { } async fn debug_purge(&self, args: String) -> VeilidAPIResult { + let registry = self.core_context()?.registry(); + let args: Vec = args.split_whitespace().map(|s| s.to_owned()).collect(); if !args.is_empty() { if args[0] == "buckets" { @@ -828,20 +839,18 @@ impl VeilidAPI { ) { apibail_internal!("Must be detached to purge"); } - self.network_manager()?.routing_table().purge_buckets(); + registry.routing_table().purge_buckets(); Ok("Buckets purged".to_owned()) } else if args[0] == "connections" { // Purge connection table - let opt_connection_manager = self.network_manager()?.opt_connection_manager(); + let opt_connection_manager = registry.network_manager().opt_connection_manager(); if let Some(connection_manager) = &opt_connection_manager { connection_manager.shutdown().await; } // Eliminate last_connections from routing table entries - self.network_manager()? - .routing_table() - .purge_last_connections(); + registry.routing_table().purge_last_connections(); if let Some(connection_manager) = &opt_connection_manager { connection_manager @@ -852,9 +861,10 @@ impl VeilidAPI { Ok("Connections purged".to_owned()) } else if args[0] == "routes" { // Purge route spec store - self.inner.lock().debug_cache.imported_routes.clear(); - let rss = self.network_manager()?.routing_table().route_spec_store(); - match rss.purge().await { + self.with_debug_cache(|dc| { + dc.imported_routes.clear(); + }); + match registry.routing_table().route_spec_store().purge().await { Ok(_) => Ok("Routes purged".to_owned()), Err(e) => Ok(format!("Routes purged but failed to save: {}", e)), } @@ -902,18 +912,18 @@ impl VeilidAPI { async fn debug_contact(&self, args: String) -> VeilidAPIResult { let args: Vec = args.split_whitespace().map(|s| s.to_owned()).collect(); - let network_manager = self.network_manager()?; - let routing_table = network_manager.routing_table(); + let registry = self.core_context()?.registry(); let node_ref = get_debug_argument_at( &args, 0, "debug_contact", "node_ref", - get_filtered_node_ref(routing_table), + get_filtered_node_ref(registry.clone()), )?; - let cm = network_manager + let cm = registry + .network_manager() .get_node_contact_method(node_ref) .map_err(VeilidAPIError::internal)?; @@ -921,9 +931,8 @@ impl VeilidAPI { } async fn debug_resolve(&self, args: String) -> VeilidAPIResult { - let netman = self.network_manager()?; - let routing_table = netman.routing_table(); - let Some(_rpc) = netman.opt_rpc_processor() else { + let registry = self.core_context()?.registry(); + if !registry.attachment_manager().is_attached() { apibail_internal!("Must be attached first"); }; @@ -934,10 +943,11 @@ impl VeilidAPI { 0, "debug_resolve", "destination", - self.clone().get_destination(routing_table.clone()), + self.clone().get_destination(registry.clone()), ) .await?; + let routing_table = registry.routing_table(); match &dest { Destination::Direct { node: target, @@ -954,7 +964,7 @@ impl VeilidAPI { } => Ok(format!( "Destination: {:#?}\nTarget Entry:\n{}\nRelay Entry:\n{}\n", &dest, - routing_table.clone().debug_info_entry(target.clone()), + routing_table.debug_info_entry(target.clone()), routing_table.debug_info_entry(relay.unfiltered()) )), Destination::PrivateRoute { @@ -965,9 +975,8 @@ impl VeilidAPI { } async fn debug_ping(&self, args: String) -> VeilidAPIResult { - let netman = self.network_manager()?; - let routing_table = netman.routing_table(); - let Some(rpc) = netman.opt_rpc_processor() else { + let registry = self.core_context()?.registry(); + if !registry.attachment_manager().is_attached() { apibail_internal!("Must be attached first"); }; @@ -978,12 +987,13 @@ impl VeilidAPI { 0, "debug_ping", "destination", - self.clone().get_destination(routing_table), + self.clone().get_destination(registry.clone()), ) .await?; // Send a StatusQ - let out = match rpc + let rpc_processor = registry.rpc_processor(); + let out = match rpc_processor .rpc_call_status(dest) .await .map_err(VeilidAPIError::internal)? @@ -998,9 +1008,8 @@ impl VeilidAPI { } async fn debug_app_message(&self, args: String) -> VeilidAPIResult { - let netman = self.network_manager()?; - let routing_table = netman.routing_table(); - let Some(rpc) = netman.opt_rpc_processor() else { + let registry = self.core_context()?.registry(); + if !registry.attachment_manager().is_attached() { apibail_internal!("Must be attached first"); }; @@ -1011,15 +1020,17 @@ impl VeilidAPI { arg, "debug_app_message", "destination", - self.clone().get_destination(routing_table), + self.clone().get_destination(registry.clone()), ) .await?; let data = get_debug_argument(&rest, "debug_app_message", "data", get_data)?; let data_len = data.len(); - // Send a AppMessage - let out = match rpc + // Send an AppMessage + let rpc_processor = registry.rpc_processor(); + + let out = match rpc_processor .rpc_call_app_message(dest, data) .await .map_err(VeilidAPIError::internal)? @@ -1034,9 +1045,8 @@ impl VeilidAPI { } async fn debug_app_call(&self, args: String) -> VeilidAPIResult { - let netman = self.network_manager()?; - let routing_table = netman.routing_table(); - let Some(rpc) = netman.opt_rpc_processor() else { + let registry = self.core_context()?.registry(); + if !registry.attachment_manager().is_attached() { apibail_internal!("Must be attached first"); }; @@ -1047,15 +1057,17 @@ impl VeilidAPI { arg, "debug_app_call", "destination", - self.clone().get_destination(routing_table), + self.clone().get_destination(registry.clone()), ) .await?; let data = get_debug_argument(&rest, "debug_app_call", "data", get_data)?; let data_len = data.len(); - // Send a AppMessage - let out = match rpc + // Send an AppCall + let rpc_processor = registry.rpc_processor(); + + let out = match rpc_processor .rpc_call_app_call(dest, data) .await .map_err(VeilidAPIError::internal)? @@ -1074,8 +1086,8 @@ impl VeilidAPI { } async fn debug_app_reply(&self, args: String) -> VeilidAPIResult { - let netman = self.network_manager()?; - let Some(rpc) = netman.opt_rpc_processor() else { + let registry = self.core_context()?.registry(); + if !registry.attachment_manager().is_attached() { apibail_internal!("Must be attached first"); }; @@ -1087,7 +1099,9 @@ impl VeilidAPI { let data = get_debug_argument(&rest, "debug_app_reply", "data", get_data)?; (call_id, data) } else { - let call_id = rpc + let rpc_processor = registry.rpc_processor(); + + let call_id = rpc_processor .get_app_call_ids() .first() .cloned() @@ -1109,8 +1123,8 @@ impl VeilidAPI { async fn debug_route_allocate(&self, args: Vec) -> VeilidAPIResult { // [ord|*ord] [rel] [] [in|out] [avoid_node_id] - let netman = self.network_manager()?; - let routing_table = netman.routing_table(); + let registry = self.core_context()?.registry(); + let routing_table = registry.routing_table(); let rss = routing_table.route_spec_store(); let config = self.config().unwrap(); let default_route_hop_count = { @@ -1167,8 +1181,8 @@ impl VeilidAPI { } async fn debug_route_release(&self, args: Vec) -> VeilidAPIResult { // - let netman = self.network_manager()?; - let routing_table = netman.routing_table(); + let registry = self.core_context()?.registry(); + let routing_table = registry.routing_table(); let rss = routing_table.route_spec_store(); let route_id = get_debug_argument_at( @@ -1176,21 +1190,21 @@ impl VeilidAPI { 1, "debug_route", "route_id", - get_route_id(rss.clone(), true, true), + get_route_id(registry.clone(), true, true), )?; // Release route let out = match rss.release_route(route_id) { true => { // release imported - let mut inner = self.inner.lock(); - let dc = &mut inner.debug_cache; - for (n, ir) in dc.imported_routes.iter().enumerate() { - if *ir == route_id { - dc.imported_routes.remove(n); - break; + self.with_debug_cache(|dc| { + for (n, ir) in dc.imported_routes.iter().enumerate() { + if *ir == route_id { + dc.imported_routes.remove(n); + break; + } } - } + }); "Released".to_owned() } false => "Route does not exist".to_owned(), @@ -1200,8 +1214,8 @@ impl VeilidAPI { } async fn debug_route_publish(&self, args: Vec) -> VeilidAPIResult { // [full] - let netman = self.network_manager()?; - let routing_table = netman.routing_table(); + let registry = self.core_context()?.registry(); + let routing_table = registry.routing_table(); let rss = routing_table.route_spec_store(); let route_id = get_debug_argument_at( @@ -1209,7 +1223,7 @@ impl VeilidAPI { 1, "debug_route", "route_id", - get_route_id(rss.clone(), true, false), + get_route_id(registry.clone(), true, false), )?; let full = { if args.len() > 2 { @@ -1252,8 +1266,8 @@ impl VeilidAPI { } async fn debug_route_unpublish(&self, args: Vec) -> VeilidAPIResult { // - let netman = self.network_manager()?; - let routing_table = netman.routing_table(); + let registry = self.core_context()?.registry(); + let routing_table = registry.routing_table(); let rss = routing_table.route_spec_store(); let route_id = get_debug_argument_at( @@ -1261,7 +1275,7 @@ impl VeilidAPI { 1, "debug_route", "route_id", - get_route_id(rss.clone(), true, false), + get_route_id(registry.clone(), true, false), )?; // Unpublish route @@ -1274,8 +1288,8 @@ impl VeilidAPI { } async fn debug_route_print(&self, args: Vec) -> VeilidAPIResult { // - let netman = self.network_manager()?; - let routing_table = netman.routing_table(); + let registry = self.core_context()?.registry(); + let routing_table = registry.routing_table(); let rss = routing_table.route_spec_store(); let route_id = get_debug_argument_at( @@ -1283,7 +1297,7 @@ impl VeilidAPI { 1, "debug_route", "route_id", - get_route_id(rss.clone(), true, true), + get_route_id(registry.clone(), true, true), )?; match rss.debug_route(&route_id) { @@ -1293,8 +1307,8 @@ impl VeilidAPI { } async fn debug_route_list(&self, _args: Vec) -> VeilidAPIResult { // - let netman = self.network_manager()?; - let routing_table = netman.routing_table(); + let registry = self.core_context()?.registry(); + let routing_table = registry.routing_table(); let rss = routing_table.route_spec_store(); let routes = rss.list_allocated_routes(|k, _| Some(*k)); @@ -1316,29 +1330,33 @@ impl VeilidAPI { } async fn debug_route_import(&self, args: Vec) -> VeilidAPIResult { // + let registry = self.core_context()?.registry(); + let routing_table = registry.routing_table(); + let rss = routing_table.route_spec_store(); let blob = get_debug_argument_at(&args, 1, "debug_route", "blob", get_string)?; let blob_dec = BASE64URL_NOPAD .decode(blob.as_bytes()) .map_err(VeilidAPIError::generic)?; - let rss = self.routing_table()?.route_spec_store(); + let route_id = rss .import_remote_private_route_blob(blob_dec) .map_err(VeilidAPIError::generic)?; - let mut inner = self.inner.lock(); - let dc = &mut inner.debug_cache; - let n = dc.imported_routes.len(); - let out = format!("Private route #{} imported: {}", n, route_id); - dc.imported_routes.push(route_id); + let out = self.with_debug_cache(|dc| { + let n = dc.imported_routes.len(); + let out = format!("Private route #{} imported: {}", n, route_id); + dc.imported_routes.push(route_id); + out + }); Ok(out) } async fn debug_route_test(&self, args: Vec) -> VeilidAPIResult { // - let netman = self.network_manager()?; - let routing_table = netman.routing_table(); + let registry = self.core_context()?.registry(); + let routing_table = registry.routing_table(); let rss = routing_table.route_spec_store(); let route_id = get_debug_argument_at( @@ -1346,7 +1364,7 @@ impl VeilidAPI { 1, "debug_route", "route_id", - get_route_id(rss.clone(), true, true), + get_route_id(registry.clone(), true, true), )?; let success = rss @@ -1391,7 +1409,8 @@ impl VeilidAPI { async fn debug_record_list(&self, args: Vec) -> VeilidAPIResult { // - let storage_manager = self.storage_manager()?; + let registry = self.core_context()?.registry(); + let storage_manager = registry.storage_manager(); let scope = get_debug_argument_at(&args, 1, "debug_record_list", "scope", get_string)?; let out = match scope.as_str() { @@ -1422,7 +1441,8 @@ impl VeilidAPI { async fn debug_record_purge(&self, args: Vec) -> VeilidAPIResult { // [bytes] - let storage_manager = self.storage_manager()?; + let registry = self.core_context()?.registry(); + let storage_manager = registry.storage_manager(); let scope = get_debug_argument_at(&args, 1, "debug_record_purge", "scope", get_string)?; let bytes = get_debug_argument_at(&args, 2, "debug_record_purge", "bytes", get_number).ok(); @@ -1435,8 +1455,6 @@ impl VeilidAPI { } async fn debug_record_create(&self, args: Vec) -> VeilidAPIResult { - let netman = self.network_manager()?; - let routing_table = netman.routing_table(); let crypto = self.crypto()?; let schema = get_debug_argument_at( @@ -1453,7 +1471,7 @@ impl VeilidAPI { 2, "debug_record_create", "kind", - get_crypto_system_version(crypto.clone()), + get_crypto_system_version(&crypto), ) .unwrap_or_else(|_| crypto.best()); @@ -1462,7 +1480,7 @@ impl VeilidAPI { 3, "debug_record_create", "safety_selection", - get_safety_selection(routing_table), + get_safety_selection(self.core_context()?.registry()), ) .ok(); @@ -1484,9 +1502,9 @@ impl VeilidAPI { }; // Save routing context for record - let mut inner = self.inner.lock(); - let dc = &mut inner.debug_cache; - dc.opened_record_contexts.insert(*record.key(), rc); + self.with_debug_cache(|dc| { + dc.opened_record_contexts.insert(*record.key(), rc); + }); Ok(format!( "Created: {} {}:{}\n{:?}", @@ -1498,15 +1516,14 @@ impl VeilidAPI { } async fn debug_record_open(&self, args: Vec) -> VeilidAPIResult { - let netman = self.network_manager()?; - let routing_table = netman.routing_table(); + let registry = self.core_context()?.registry(); let (key, ss) = get_debug_argument_at( &args, 1, "debug_record_open", "key", - get_dht_key(routing_table), + get_dht_key(registry.clone()), )?; let writer = get_debug_argument_at(&args, 2, "debug_record_open", "writer", get_keypair).ok(); @@ -1529,9 +1546,9 @@ impl VeilidAPI { }; // Save routing context for record - let mut inner = self.inner.lock(); - let dc = &mut inner.debug_cache; - dc.opened_record_contexts.insert(*record.key(), rc); + self.with_debug_cache(|dc| { + dc.opened_record_contexts.insert(*record.key(), rc); + }); Ok(format!("Opened: {} : {:?}", key, record)) } @@ -1670,7 +1687,8 @@ impl VeilidAPI { } async fn debug_record_info(&self, args: Vec) -> VeilidAPIResult { - let storage_manager = self.storage_manager()?; + let registry = self.core_context()?.registry(); + let storage_manager = registry.storage_manager(); let key = get_debug_argument_at(&args, 1, "debug_record_info", "key", get_dht_key_no_safety)?; @@ -1971,7 +1989,8 @@ impl VeilidAPI { async fn debug_punish_list(&self, _args: Vec) -> VeilidAPIResult { // - let network_manager = self.network_manager()?; + let registry = self.core_context()?.registry(); + let network_manager = registry.network_manager(); let address_filter = network_manager.address_filter(); let out = format!("Address filter punishments:\n{:#?}", address_filter); @@ -1980,7 +1999,8 @@ impl VeilidAPI { async fn debug_punish_clear(&self, _args: Vec) -> VeilidAPIResult { // - let network_manager = self.network_manager()?; + let registry = self.core_context()?.registry(); + let network_manager = registry.network_manager(); let address_filter = network_manager.address_filter(); address_filter.clear_punishments(); @@ -2192,14 +2212,14 @@ TableDB Operations: fn get_destination( self, - routing_table: RoutingTable, + registry: VeilidComponentRegistry, ) -> impl FnOnce(&str) -> SendPinBoxFuture> { move |text| { let text = text.to_owned(); Box::pin(async move { // Safety selection let (text, ss) = if let Some((first, second)) = text.split_once('+') { - let ss = get_safety_selection(routing_table.clone())(second)?; + let ss = get_safety_selection(registry.clone())(second)?; (first, Some(ss)) } else { (text.as_str(), None) @@ -2208,18 +2228,20 @@ TableDB Operations: return None; } if &text[0..1] == "#" { + let routing_table = registry.routing_table(); let rss = routing_table.route_spec_store(); // Private route let text = &text[1..]; - let private_route = - if let Some(prid) = get_route_id(rss.clone(), false, true)(text) { - rss.best_remote_private_route(&prid)? - } else { - let mut inner = self.inner.lock(); - let dc = &mut inner.debug_cache; - let n = get_number(text)?; + let private_route = if let Some(prid) = + get_route_id(registry.clone(), false, true)(text) + { + rss.best_remote_private_route(&prid)? + } else { + let n = get_number(text)?; + + self.with_debug_cache(|dc| { let prid = *dc.imported_routes.get(n)?; let Some(private_route) = rss.best_remote_private_route(&prid) else { // Remove imported route @@ -2227,8 +2249,9 @@ TableDB Operations: info!("removed dead imported route {}", n); return None; }; - private_route - }; + Some(private_route) + })? + }; Some(Destination::private_route( private_route, @@ -2236,12 +2259,10 @@ TableDB Operations: )) } else if let Some((first, second)) = text.split_once('@') { // Relay - let relay_nr = resolve_filtered_node_ref( - routing_table.clone(), - ss.unwrap_or_default(), - )(second) - .await?; - let target_nr = get_node_ref(routing_table)(first)?; + let relay_nr = + resolve_filtered_node_ref(registry.clone(), ss.unwrap_or_default())(second) + .await?; + let target_nr = get_node_ref(registry.clone())(first)?; let mut d = Destination::relay(relay_nr, target_nr); if let Some(ss) = ss { @@ -2252,7 +2273,7 @@ TableDB Operations: } else { // Direct let target_nr = - resolve_filtered_node_ref(routing_table, ss.unwrap_or_default())(text) + resolve_filtered_node_ref(registry.clone(), ss.unwrap_or_default())(text) .await?; let mut d = Destination::direct(target_nr); @@ -2273,14 +2294,11 @@ TableDB Operations: key: &str, arg: usize, ) -> VeilidAPIResult<(TypedKey, RoutingContext)> { - let mut inner = self.inner.lock(); - let dc = &mut inner.debug_cache; - let key = match get_debug_argument_at(args, arg, context, key, get_dht_key_no_safety) .ok() .or_else(|| { // If unspecified, use the most recent key opened or created - dc.opened_record_contexts.back().map(|kv| kv.0).copied() + self.with_debug_cache(|dc| dc.opened_record_contexts.back().map(|kv| kv.0).copied()) }) { Some(k) => k, None => { @@ -2289,7 +2307,9 @@ TableDB Operations: }; // Get routing context for record - let Some(rc) = dc.opened_record_contexts.get(&key).cloned() else { + + let Some(rc) = self.with_debug_cache(|dc| dc.opened_record_contexts.get(&key).cloned()) + else { apibail_missing_argument!("key is not opened", "key"); }; diff --git a/veilid-core/src/veilid_api/error.rs b/veilid-core/src/veilid_api/error.rs index bbe4cf17..fd564dcf 100644 --- a/veilid-core/src/veilid_api/error.rs +++ b/veilid-core/src/veilid_api/error.rs @@ -107,7 +107,11 @@ macro_rules! apibail_already_initialized { #[derive( ThisError, Clone, Debug, PartialOrd, PartialEq, Eq, Ord, Serialize, Deserialize, JsonSchema, )] -#[cfg_attr(target_arch = "wasm32", derive(Tsify), tsify(into_wasm_abi))] +#[cfg_attr( + all(target_arch = "wasm32", target_os = "unknown"), + derive(Tsify), + tsify(into_wasm_abi) +)] #[serde(tag = "kind")] pub enum VeilidAPIError { #[error("Not initialized")] diff --git a/veilid-core/src/veilid_api/json_api/process.rs b/veilid-core/src/veilid_api/json_api/process.rs index b35c8c4b..af84a7d7 100644 --- a/veilid-core/src/veilid_api/json_api/process.rs +++ b/veilid-core/src/veilid_api/json_api/process.rs @@ -59,7 +59,7 @@ struct JsonRequestProcessorInner { routing_contexts: BTreeMap, table_dbs: BTreeMap, table_db_transactions: BTreeMap, - crypto_systems: BTreeMap, + crypto_kinds: BTreeMap, } #[derive(Clone)] @@ -76,7 +76,7 @@ impl JsonRequestProcessor { routing_contexts: Default::default(), table_dbs: Default::default(), table_db_transactions: Default::default(), - crypto_systems: Default::default(), + crypto_kinds: Default::default(), })), } } @@ -179,18 +179,18 @@ impl JsonRequestProcessor { } // CryptoSystem - fn add_crypto_system(&self, csv: CryptoSystemVersion) -> u32 { + fn add_crypto_system(&self, csv: CryptoKind) -> u32 { let mut inner = self.inner.lock(); let mut next_id: u32 = 1; - while inner.crypto_systems.contains_key(&next_id) { + while inner.crypto_kinds.contains_key(&next_id) { next_id += 1; } - inner.crypto_systems.insert(next_id, csv); + inner.crypto_kinds.insert(next_id, csv); next_id } - fn lookup_crypto_system(&self, id: u32, cs_id: u32) -> Result { + fn lookup_crypto_system(&self, id: u32, cs_id: u32) -> Result { let inner = self.inner.lock(); - let Some(crypto_system) = inner.crypto_systems.get(&cs_id).cloned() else { + let Some(crypto_kind) = inner.crypto_kinds.get(&cs_id).cloned() else { return Err(Response { id, op: ResponseOp::CryptoSystem(CryptoSystemResponse { @@ -199,11 +199,11 @@ impl JsonRequestProcessor { }), }); }; - Ok(crypto_system) + Ok(crypto_kind) } fn release_crypto_system(&self, id: u32) -> i32 { let mut inner = self.inner.lock(); - if inner.crypto_systems.remove(&id).is_none() { + if inner.crypto_kinds.remove(&id).is_none() { return 0; } 1 @@ -215,7 +215,7 @@ impl JsonRequestProcessor { async fn parse_target(&self, s: String) -> VeilidAPIResult { // Is this a route id? if let Ok(rrid) = RouteId::from_str(&s) { - let routing_table = self.api.routing_table()?; + let routing_table = self.api.core_context()?.routing_table(); let rss = routing_table.route_spec_store(); // Is this a valid remote route id? (can't target allocated routes) @@ -467,7 +467,7 @@ impl JsonRequestProcessor { #[instrument(level = "trace", target = "json_api", skip_all)] pub async fn process_crypto_system_request( &self, - csv: CryptoSystemVersion, + csv: &CryptoSystemGuard<'_>, csr: CryptoSystemRequest, ) -> CryptoSystemResponse { let cs_op = match csr.cs_op { @@ -691,7 +691,7 @@ impl JsonRequestProcessor { Err(e) => { return Response { id, - op: ResponseOp::OpenTableDb { + op: ResponseOp::DeleteTableDb { result: to_json_api_result(Err(e)), }, } @@ -741,11 +741,31 @@ impl JsonRequestProcessor { kind, ) }) - .map(|csv| self.add_crypto_system(csv)), + .map(|csv| self.add_crypto_system(csv.kind())), ), } } RequestOp::BestCryptoSystem => { + let crypto = match self.api.crypto() { + Ok(v) => v, + Err(e) => { + return Response { + id, + op: ResponseOp::BestCryptoSystem { + result: to_json_api_result(Err(e)), + }, + } + } + }; + ResponseOp::BestCryptoSystem { + result: to_json_api_result(Ok(self.add_crypto_system(crypto.best().kind()))), + } + } + RequestOp::CryptoSystem(csr) => { + let crypto_kind = match self.lookup_crypto_system(id, csr.cs_id) { + Ok(v) => v, + Err(e) => return e, + }; let crypto = match self.api.crypto() { Ok(v) => v, Err(e) => { @@ -757,16 +777,9 @@ impl JsonRequestProcessor { } } }; - ResponseOp::BestCryptoSystem { - result: to_json_api_result(Ok(self.add_crypto_system(crypto.best()))), - } - } - RequestOp::CryptoSystem(csr) => { - let csv = match self.lookup_crypto_system(id, csr.cs_id) { - Ok(v) => v, - Err(e) => return e, - }; - ResponseOp::CryptoSystem(self.process_crypto_system_request(csv, csr).await) + let csv = crypto.get(crypto_kind).unwrap(); + + ResponseOp::CryptoSystem(self.process_crypto_system_request(&csv, csr).await) } RequestOp::VerifySignatures { node_ids, diff --git a/veilid-core/src/veilid_api/mod.rs b/veilid-core/src/veilid_api/mod.rs index 087c9e89..f6d592f3 100644 --- a/veilid-core/src/veilid_api/mod.rs +++ b/veilid-core/src/veilid_api/mod.rs @@ -23,11 +23,8 @@ pub use types::*; use crate::*; -use attachment_manager::AttachmentManager; use core_context::{api_shutdown, VeilidCoreContext}; -use network_manager::NetworkManager; -use routing_table::{DirectionSet, RouteSpecStore, RoutingTable}; +use routing_table::{DirectionSet, RouteSpecStore}; use rpc_processor::*; -use storage_manager::StorageManager; ///////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/veilid-core/src/veilid_api/routing_context.rs b/veilid-core/src/veilid_api/routing_context.rs index ec00593f..f366cfd0 100644 --- a/veilid-core/src/veilid_api/routing_context.rs +++ b/veilid-core/src/veilid_api/routing_context.rs @@ -143,7 +143,7 @@ impl RoutingContext { event!(target: "veilid_api", Level::DEBUG, "RoutingContext::get_destination(self: {:?}, target: {:?})", self, target); - let rpc_processor = self.api.rpc_processor()?; + let rpc_processor = self.api.core_context()?.rpc_processor(); rpc_processor .resolve_target_to_destination(target, self.unlocked_inner.safety_selection) .await @@ -166,7 +166,7 @@ impl RoutingContext { event!(target: "veilid_api", Level::DEBUG, "RoutingContext::app_call(self: {:?}, target: {:?}, message: {:?})", self, target, message); - let rpc_processor = self.api.rpc_processor()?; + let rpc_processor = self.api.core_context()?.rpc_processor(); // Get destination let dest = self.get_destination(target).await?; @@ -200,7 +200,7 @@ impl RoutingContext { event!(target: "veilid_api", Level::DEBUG, "RoutingContext::app_message(self: {:?}, target: {:?}, message: {:?})", self, target, message); - let rpc_processor = self.api.rpc_processor()?; + let rpc_processor = self.api.core_context()?.rpc_processor(); // Get destination let dest = self.get_destination(target).await?; @@ -227,7 +227,7 @@ impl RoutingContext { /// Builds the record key for a given schema and owner public key #[instrument(target = "veilid_api", level = "debug", ret, err)] - pub async fn get_dht_record_key( + pub fn get_dht_record_key( &self, schema: DHTSchema, owner_key: &PublicKey, @@ -239,10 +239,8 @@ impl RoutingContext { let kind = kind.unwrap_or(best_crypto_kind()); Crypto::validate_crypto_kind(kind)?; - let storage_manager = self.api.storage_manager()?; - storage_manager - .get_record_key(kind, schema, owner_key) - .await + let storage_manager = self.api.core_context()?.storage_manager(); + storage_manager.get_record_key(kind, schema, owner_key) } /// Creates a new DHT record @@ -267,7 +265,8 @@ impl RoutingContext { let kind = kind.unwrap_or(best_crypto_kind()); Crypto::validate_crypto_kind(kind)?; - let storage_manager = self.api.storage_manager()?; + + let storage_manager = self.api.core_context()?.storage_manager(); storage_manager .create_record(kind, schema, owner, self.unlocked_inner.safety_selection) .await @@ -294,7 +293,8 @@ impl RoutingContext { "RoutingContext::open_dht_record(self: {:?}, key: {:?}, default_writer: {:?})", self, key, default_writer); Crypto::validate_crypto_kind(key.kind)?; - let storage_manager = self.api.storage_manager()?; + + let storage_manager = self.api.core_context()?.storage_manager(); storage_manager .open_record(key, default_writer, self.unlocked_inner.safety_selection) .await @@ -309,7 +309,8 @@ impl RoutingContext { "RoutingContext::close_dht_record(self: {:?}, key: {:?})", self, key); Crypto::validate_crypto_kind(key.kind)?; - let storage_manager = self.api.storage_manager()?; + + let storage_manager = self.api.core_context()?.storage_manager(); storage_manager.close_record(key).await } @@ -324,7 +325,8 @@ impl RoutingContext { "RoutingContext::delete_dht_record(self: {:?}, key: {:?})", self, key); Crypto::validate_crypto_kind(key.kind)?; - let storage_manager = self.api.storage_manager()?; + + let storage_manager = self.api.core_context()?.storage_manager(); storage_manager.delete_record(key).await } @@ -345,7 +347,8 @@ impl RoutingContext { "RoutingContext::get_dht_value(self: {:?}, key: {:?}, subkey: {:?}, force_refresh: {:?})", self, key, subkey, force_refresh); Crypto::validate_crypto_kind(key.kind)?; - let storage_manager = self.api.storage_manager()?; + + let storage_manager = self.api.core_context()?.storage_manager(); storage_manager.get_value(key, subkey, force_refresh).await } @@ -368,7 +371,8 @@ impl RoutingContext { "RoutingContext::set_dht_value(self: {:?}, key: {:?}, subkey: {:?}, data: len={}, writer: {:?})", self, key, subkey, data.len(), writer); Crypto::validate_crypto_kind(key.kind)?; - let storage_manager = self.api.storage_manager()?; + + let storage_manager = self.api.core_context()?.storage_manager(); storage_manager.set_value(key, subkey, data, writer).await } @@ -404,7 +408,8 @@ impl RoutingContext { "RoutingContext::watch_dht_values(self: {:?}, key: {:?}, subkeys: {:?}, expiration: {}, count: {})", self, key, subkeys, expiration, count); Crypto::validate_crypto_kind(key.kind)?; - let storage_manager = self.api.storage_manager()?; + + let storage_manager = self.api.core_context()?.storage_manager(); storage_manager .watch_values(key, subkeys, expiration, count) .await @@ -429,7 +434,8 @@ impl RoutingContext { "RoutingContext::cancel_dht_watch(self: {:?}, key: {:?}, subkeys: {:?}", self, key, subkeys); Crypto::validate_crypto_kind(key.kind)?; - let storage_manager = self.api.storage_manager()?; + + let storage_manager = self.api.core_context()?.storage_manager(); storage_manager.cancel_watch_values(key, subkeys).await } @@ -483,7 +489,8 @@ impl RoutingContext { "RoutingContext::inspect_dht_record(self: {:?}, key: {:?}, subkeys: {:?}, scope: {:?})", self, key, subkeys, scope); Crypto::validate_crypto_kind(key.kind)?; - let storage_manager = self.api.storage_manager()?; + + let storage_manager = self.api.core_context()?.storage_manager(); storage_manager.inspect_record(key, subkeys, scope).await } diff --git a/veilid-core/src/veilid_api/tests/fixtures.rs b/veilid-core/src/veilid_api/tests/fixtures.rs index 8ba61d49..a71075d1 100644 --- a/veilid-core/src/veilid_api/tests/fixtures.rs +++ b/veilid-core/src/veilid_api/tests/fixtures.rs @@ -241,6 +241,11 @@ pub fn fix_veilidconfiginner() -> VeilidConfigInner { privacy: VeilidConfigPrivacy { country_code_denylist: vec![CountryCode([b'N', b'Z'])], }, + #[cfg(feature = "virtual-network")] + virtual_network: VeilidConfigVirtualNetwork { + enabled: false, + server_address: "".to_owned(), + }, }, } } diff --git a/veilid-core/src/veilid_api/types/aligned_u64.rs b/veilid-core/src/veilid_api/types/aligned_u64.rs index b70a56e3..efaff0d5 100644 --- a/veilid-core/src/veilid_api/types/aligned_u64.rs +++ b/veilid-core/src/veilid_api/types/aligned_u64.rs @@ -21,13 +21,16 @@ macro_rules! aligned_u64_type { Deserialize, JsonSchema, )] - #[cfg_attr(target_arch = "wasm32", derive(Tsify))] + #[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] #[repr(C, align(8))] #[serde(transparent)] pub struct $name( #[serde(with = "as_human_string")] #[schemars(with = "String")] - #[cfg_attr(target_arch = "wasm32", tsify(type = "string"))] + #[cfg_attr( + all(target_arch = "wasm32", target_os = "unknown"), + tsify(type = "string") + )] u64, ); diff --git a/veilid-core/src/veilid_api/types/app_message_call.rs b/veilid-core/src/veilid_api/types/app_message_call.rs index a3c09bae..1885a240 100644 --- a/veilid-core/src/veilid_api/types/app_message_call.rs +++ b/veilid-core/src/veilid_api/types/app_message_call.rs @@ -2,22 +2,31 @@ use super::*; /// Direct statement blob passed to hosting application for processing. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct VeilidAppMessage { #[serde(with = "as_human_opt_string")] #[schemars(with = "Option")] - #[cfg_attr(target_arch = "wasm32", tsify(optional, type = "string"))] + #[cfg_attr( + all(target_arch = "wasm32", target_os = "unknown"), + tsify(optional, type = "string") + )] sender: Option, #[serde(with = "as_human_opt_string")] #[schemars(with = "Option")] - #[cfg_attr(target_arch = "wasm32", tsify(optional, type = "string"))] + #[cfg_attr( + all(target_arch = "wasm32", target_os = "unknown"), + tsify(optional, type = "string") + )] route_id: Option, - #[cfg_attr(not(target_arch = "wasm32"), serde(with = "as_human_base64"))] + #[cfg_attr( + not(all(target_arch = "wasm32", target_os = "unknown")), + serde(with = "as_human_base64") + )] #[schemars(with = "String")] #[cfg_attr( - target_arch = "wasm32", + all(target_arch = "wasm32", target_os = "unknown"), serde(with = "serde_bytes"), tsify(type = "Uint8Array") )] @@ -51,22 +60,28 @@ impl VeilidAppMessage { /// Direct question blob passed to hosting application for processing to send an eventual AppReply. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct VeilidAppCall { #[serde(with = "as_human_opt_string")] #[schemars(with = "Option")] - #[cfg_attr(target_arch = "wasm32", tsify(optional))] + #[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), tsify(optional))] sender: Option, #[serde(with = "as_human_opt_string")] #[schemars(with = "Option")] - #[cfg_attr(target_arch = "wasm32", tsify(optional, type = "string"))] + #[cfg_attr( + all(target_arch = "wasm32", target_os = "unknown"), + tsify(optional, type = "string") + )] route_id: Option, - #[cfg_attr(not(target_arch = "wasm32"), serde(with = "as_human_base64"))] + #[cfg_attr( + not(all(target_arch = "wasm32", target_os = "unknown")), + serde(with = "as_human_base64") + )] #[schemars(with = "String")] #[cfg_attr( - target_arch = "wasm32", + all(target_arch = "wasm32", target_os = "unknown"), serde(with = "serde_bytes"), tsify(type = "Uint8Array") )] diff --git a/veilid-core/src/veilid_api/types/dht/dht_record_descriptor.rs b/veilid-core/src/veilid_api/types/dht/dht_record_descriptor.rs index 9d619652..d4422279 100644 --- a/veilid-core/src/veilid_api/types/dht/dht_record_descriptor.rs +++ b/veilid-core/src/veilid_api/types/dht/dht_record_descriptor.rs @@ -3,7 +3,7 @@ use super::*; /// DHT Record Descriptor #[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] #[cfg_attr( - target_arch = "wasm32", + all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify), tsify(from_wasm_abi, into_wasm_abi) )] @@ -17,7 +17,7 @@ pub struct DHTRecordDescriptor { /// If this key is being created: Some(the secret key of the owner) /// If this key is just being opened: None #[schemars(with = "Option")] - #[cfg_attr(target_arch = "wasm32", tsify(optional))] + #[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), tsify(optional))] owner_secret: Option, /// The schema in use associated with the key schema: DHTSchema, diff --git a/veilid-core/src/veilid_api/types/dht/dht_record_report.rs b/veilid-core/src/veilid_api/types/dht/dht_record_report.rs index f1fce660..4bf81b99 100644 --- a/veilid-core/src/veilid_api/types/dht/dht_record_report.rs +++ b/veilid-core/src/veilid_api/types/dht/dht_record_report.rs @@ -3,7 +3,7 @@ use super::*; /// DHT Record Report #[derive(Default, Clone, PartialOrd, Ord, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] #[cfg_attr( - target_arch = "wasm32", + all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify), tsify(from_wasm_abi, into_wasm_abi) )] @@ -67,7 +67,7 @@ impl fmt::Debug for DHTRecordReport { Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, JsonSchema, )] #[cfg_attr( - target_arch = "wasm32", + all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify), tsify(from_wasm_abi, into_wasm_abi, namespace) )] diff --git a/veilid-core/src/veilid_api/types/dht/mod.rs b/veilid-core/src/veilid_api/types/dht/mod.rs index 693a1c68..8a55b0ac 100644 --- a/veilid-core/src/veilid_api/types/dht/mod.rs +++ b/veilid-core/src/veilid_api/types/dht/mod.rs @@ -13,10 +13,10 @@ pub use value_data::*; pub use value_subkey_range_set::*; /// Value subkey -#[cfg_attr(target_arch = "wasm32", declare)] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)] pub type ValueSubkey = u32; /// Value sequence number -#[cfg_attr(target_arch = "wasm32", declare)] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)] pub type ValueSeqNum = u32; pub(crate) fn debug_seqs(seqs: &[ValueSeqNum]) -> String { diff --git a/veilid-core/src/veilid_api/types/dht/schema/dflt.rs b/veilid-core/src/veilid_api/types/dht/schema/dflt.rs index efb5bce2..7dfefab8 100644 --- a/veilid-core/src/veilid_api/types/dht/schema/dflt.rs +++ b/veilid-core/src/veilid_api/types/dht/schema/dflt.rs @@ -3,7 +3,11 @@ use crate::storage_manager::{MAX_RECORD_DATA_SIZE, MAX_SUBKEY_SIZE}; /// Default DHT Schema (DFLT) #[derive(Debug, Clone, PartialEq, Eq, Ord, PartialOrd, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify), tsify(from_wasm_abi))] +#[cfg_attr( + all(target_arch = "wasm32", target_os = "unknown"), + derive(Tsify), + tsify(from_wasm_abi) +)] pub struct DHTSchemaDFLT { /// Owner subkey count o_cnt: u16, diff --git a/veilid-core/src/veilid_api/types/dht/schema/mod.rs b/veilid-core/src/veilid_api/types/dht/schema/mod.rs index e384e49f..f60397c0 100644 --- a/veilid-core/src/veilid_api/types/dht/schema/mod.rs +++ b/veilid-core/src/veilid_api/types/dht/schema/mod.rs @@ -9,7 +9,11 @@ pub use smpl::*; /// Enum over all the supported DHT Schemas #[derive(Debug, Clone, PartialEq, Eq, Ord, PartialOrd, Serialize, Deserialize, JsonSchema)] #[serde(tag = "kind")] -#[cfg_attr(target_arch = "wasm32", derive(Tsify), tsify(from_wasm_abi))] +#[cfg_attr( + all(target_arch = "wasm32", target_os = "unknown"), + derive(Tsify), + tsify(from_wasm_abi) +)] pub enum DHTSchema { DFLT(DHTSchemaDFLT), SMPL(DHTSchemaSMPL), diff --git a/veilid-core/src/veilid_api/types/dht/schema/smpl.rs b/veilid-core/src/veilid_api/types/dht/schema/smpl.rs index 4bca6422..03d6b96c 100644 --- a/veilid-core/src/veilid_api/types/dht/schema/smpl.rs +++ b/veilid-core/src/veilid_api/types/dht/schema/smpl.rs @@ -3,7 +3,11 @@ use crate::storage_manager::{MAX_RECORD_DATA_SIZE, MAX_SUBKEY_SIZE}; /// Simple DHT Schema (SMPL) Member #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify), tsify(from_wasm_abi))] +#[cfg_attr( + all(target_arch = "wasm32", target_os = "unknown"), + derive(Tsify), + tsify(from_wasm_abi) +)] pub struct DHTSchemaSMPLMember { /// Member key #[schemars(with = "String")] @@ -14,7 +18,11 @@ pub struct DHTSchemaSMPLMember { /// Simple DHT Schema (SMPL) #[derive(Debug, Clone, PartialEq, Eq, Ord, PartialOrd, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify), tsify(from_wasm_abi))] +#[cfg_attr( + all(target_arch = "wasm32", target_os = "unknown"), + derive(Tsify), + tsify(from_wasm_abi) +)] pub struct DHTSchemaSMPL { /// Owner subkey count o_cnt: u16, diff --git a/veilid-core/src/veilid_api/types/dht/value_data.rs b/veilid-core/src/veilid_api/types/dht/value_data.rs index 2e4dca5a..6c7d0a82 100644 --- a/veilid-core/src/veilid_api/types/dht/value_data.rs +++ b/veilid-core/src/veilid_api/types/dht/value_data.rs @@ -2,16 +2,23 @@ use super::*; use veilid_api::VeilidAPIResult; #[derive(Clone, Default, PartialOrd, PartialEq, Eq, Ord, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify), tsify(into_wasm_abi))] +#[cfg_attr( + all(target_arch = "wasm32", target_os = "unknown"), + derive(Tsify), + tsify(into_wasm_abi) +)] pub struct ValueData { /// An increasing sequence number to time-order the DHT record changes seq: ValueSeqNum, /// The contents of a DHT Record - #[cfg_attr(not(target_arch = "wasm32"), serde(with = "as_human_base64"))] + #[cfg_attr( + not(all(target_arch = "wasm32", target_os = "unknown")), + serde(with = "as_human_base64") + )] #[schemars(with = "String")] #[cfg_attr( - target_arch = "wasm32", + all(target_arch = "wasm32", target_os = "unknown"), serde(with = "serde_bytes"), tsify(type = "Uint8Array") )] diff --git a/veilid-core/src/veilid_api/types/dht/value_subkey_range_set.rs b/veilid-core/src/veilid_api/types/dht/value_subkey_range_set.rs index 7f3b3150..23458a2c 100644 --- a/veilid-core/src/veilid_api/types/dht/value_subkey_range_set.rs +++ b/veilid-core/src/veilid_api/types/dht/value_subkey_range_set.rs @@ -6,7 +6,7 @@ use range_set_blaze::*; Clone, Default, Hash, PartialOrd, PartialEq, Eq, Ord, Serialize, Deserialize, JsonSchema, )] #[cfg_attr( - target_arch = "wasm32", + all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify), tsify(from_wasm_abi, into_wasm_abi) )] diff --git a/veilid-core/src/veilid_api/types/safety.rs b/veilid-core/src/veilid_api/types/safety.rs index 5f23f146..360f56ac 100644 --- a/veilid-core/src/veilid_api/types/safety.rs +++ b/veilid-core/src/veilid_api/types/safety.rs @@ -5,7 +5,7 @@ use super::*; Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, JsonSchema, )] #[cfg_attr( - target_arch = "wasm32", + all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify), tsify(from_wasm_abi, into_wasm_abi, namespace) )] @@ -27,7 +27,7 @@ impl Default for Sequencing { Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, JsonSchema, )] #[cfg_attr( - target_arch = "wasm32", + all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify), tsify(from_wasm_abi, into_wasm_abi, namespace) )] @@ -48,7 +48,7 @@ impl Default for Stability { Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, JsonSchema, )] #[cfg_attr( - target_arch = "wasm32", + all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify), tsify(from_wasm_abi, into_wasm_abi, namespace) )] @@ -79,11 +79,14 @@ impl Default for SafetySelection { #[derive( Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, JsonSchema, )] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct SafetySpec { /// Preferred safety route set id if it still exists. #[schemars(with = "Option")] - #[cfg_attr(target_arch = "wasm32", tsify(optional, type = "string"))] + #[cfg_attr( + all(target_arch = "wasm32", target_os = "unknown"), + tsify(optional, type = "string") + )] pub preferred_route: Option, /// Must be greater than 0. pub hop_count: usize, diff --git a/veilid-core/src/veilid_api/types/stats.rs b/veilid-core/src/veilid_api/types/stats.rs index ca856de4..5ec81501 100644 --- a/veilid-core/src/veilid_api/types/stats.rs +++ b/veilid-core/src/veilid_api/types/stats.rs @@ -2,7 +2,7 @@ use super::*; /// Measurement of communications latency to this node over all RPC questions #[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct LatencyStats { /// fastest latency in the ROLLING_LATENCIES_SIZE last latencies pub fastest: TimestampDuration, @@ -25,7 +25,7 @@ impl fmt::Display for LatencyStats { /// Measurement of how much data has transferred to or from this node over a time span #[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct TransferStats { /// total amount transferred ever pub total: ByteCount, @@ -50,7 +50,7 @@ impl fmt::Display for TransferStats { /// Transfer statistics from a node to our own (down) and #[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct TransferStatsDownUp { pub down: TransferStats, pub up: TransferStats, @@ -66,7 +66,7 @@ impl fmt::Display for TransferStatsDownUp { /// Measurement of what states the node has been in over a time span #[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct StateStats { /// total amount of time measured pub span: TimestampDuration, @@ -97,7 +97,7 @@ impl fmt::Display for StateStats { /// Measurement of what state reasons the node has been in over a time span #[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct StateReasonStats { /// time spent dead due to being unable to send pub can_not_send: TimestampDuration, @@ -139,7 +139,7 @@ impl fmt::Display for StateReasonStats { /// Measurement of round-trip RPC question/answer performance #[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct AnswerStats { /// total amount of time measured pub span: TimestampDuration, @@ -192,7 +192,7 @@ impl fmt::Display for AnswerStats { /// Statistics for RPC operations performed on a node #[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct RPCStats { /// number of rpcs that have been sent in the total entry time range pub messages_sent: u32, @@ -263,7 +263,7 @@ impl fmt::Display for RPCStats { /// Statistics for a peer in the routing table #[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct PeerStats { /// when the peer was added to the routing table pub time_added: Timestamp, diff --git a/veilid-core/src/veilid_api/types/veilid_log.rs b/veilid-core/src/veilid_api/types/veilid_log.rs index 8b195fc3..46d6a8c4 100644 --- a/veilid-core/src/veilid_api/types/veilid_log.rs +++ b/veilid-core/src/veilid_api/types/veilid_log.rs @@ -4,7 +4,11 @@ use super::*; #[derive( Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Copy, Serialize, Deserialize, JsonSchema, )] -#[cfg_attr(target_arch = "wasm32", derive(Tsify), tsify(namespace))] +#[cfg_attr( + all(target_arch = "wasm32", target_os = "unknown"), + derive(Tsify), + tsify(namespace) +)] pub enum VeilidLogLevel { Error = 1, Warn = 2, @@ -81,10 +85,10 @@ impl fmt::Display for VeilidLogLevel { } /// A VeilidCore log message with optional backtrace. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct VeilidLog { pub log_level: VeilidLogLevel, pub message: String, - #[cfg_attr(target_arch = "wasm32", tsify(optional))] + #[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), tsify(optional))] pub backtrace: Option, } diff --git a/veilid-core/src/veilid_api/types/veilid_state.rs b/veilid-core/src/veilid_api/types/veilid_state.rs index 5a9daad3..ff6afa01 100644 --- a/veilid-core/src/veilid_api/types/veilid_state.rs +++ b/veilid-core/src/veilid_api/types/veilid_state.rs @@ -3,7 +3,7 @@ use super::*; /// Attachment abstraction for network 'signal strength'. #[derive(Debug, PartialEq, Eq, Clone, Copy, Serialize, Deserialize, JsonSchema)] #[cfg_attr( - target_arch = "wasm32", + all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify), tsify(namespace, from_wasm_abi, into_wasm_abi) )] @@ -77,7 +77,7 @@ impl TryFrom<&str> for AttachmentState { /// Describe the attachment state of the Veilid node #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct VeilidStateAttachment { /// The overall quality of the routing table if attached, or the current state the attachment state machine. pub state: AttachmentState, @@ -94,11 +94,14 @@ pub struct VeilidStateAttachment { /// Describe a recently accessed peer #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct PeerTableData { /// The node ids used by this peer #[schemars(with = "Vec")] - #[cfg_attr(target_arch = "wasm32", tsify(type = "string[]"))] + #[cfg_attr( + all(target_arch = "wasm32", target_os = "unknown"), + tsify(type = "string[]") + )] pub node_ids: Vec, /// The peer's human readable address. pub peer_address: String, @@ -108,7 +111,7 @@ pub struct PeerTableData { /// Describe the current network state of the Veilid node #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct VeilidStateNetwork { /// If the network has been started or not. pub started: bool, @@ -123,7 +126,7 @@ pub struct VeilidStateNetwork { /// Describe a private route change that has happened #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct VeilidRouteChange { /// If a private route that was allocated has died, it is listed here. #[schemars(with = "Vec")] @@ -138,7 +141,7 @@ pub struct VeilidRouteChange { /// add the ability to change the configuration or have it changed by the Veilid node /// itself during runtime. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct VeilidStateConfig { /// If the Veilid node configuration has changed the full new config will be here. pub config: VeilidConfigInner, @@ -146,7 +149,7 @@ pub struct VeilidStateConfig { /// Describe when DHT records have subkey values changed #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct VeilidValueChange { /// The DHT Record key that changed #[schemars(with = "String")] @@ -167,7 +170,11 @@ pub struct VeilidValueChange { /// An update from the veilid-core to the host application describing a change /// to the internal state of the Veilid node. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify), tsify(into_wasm_abi))] +#[cfg_attr( + all(target_arch = "wasm32", target_os = "unknown"), + derive(Tsify), + tsify(into_wasm_abi) +)] #[serde(tag = "kind")] pub enum VeilidUpdate { Log(Box), @@ -184,7 +191,11 @@ from_impl_to_jsvalue!(VeilidUpdate); /// A queriable state of the internals of veilid-core. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify), tsify(into_wasm_abi))] +#[cfg_attr( + all(target_arch = "wasm32", target_os = "unknown"), + derive(Tsify), + tsify(into_wasm_abi) +)] pub struct VeilidState { pub attachment: Box, pub network: Box, diff --git a/veilid-core/src/veilid_config.rs b/veilid-core/src/veilid_config.rs index abd7a1b4..5fc0431e 100644 --- a/veilid-core/src/veilid_config.rs +++ b/veilid-core/src/veilid_config.rs @@ -1,7 +1,7 @@ use crate::*; cfg_if::cfg_if! { - if #[cfg(not(target_arch = "wasm32"))] { + if #[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))] { use sysinfo::System; use lazy_static::*; use directories::ProjectDirs; @@ -31,12 +31,12 @@ pub type ConfigCallback = Arc ConfigCallbackReturn + Send + Sy /// ``` /// #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct VeilidConfigHTTPS { pub enabled: bool, pub listen_address: String, pub path: String, - #[cfg_attr(target_arch = "wasm32", tsify(optional))] + #[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), tsify(optional))] pub url: Option, // Fixed URL is not optional for TLS-based protocols and is dynamically validated } @@ -62,12 +62,12 @@ impl Default for VeilidConfigHTTPS { /// ``` /// #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct VeilidConfigHTTP { pub enabled: bool, pub listen_address: String, pub path: String, - #[cfg_attr(target_arch = "wasm32", tsify(optional))] + #[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), tsify(optional))] pub url: Option, } @@ -89,7 +89,7 @@ impl Default for VeilidConfigHTTP { /// To be implemented... /// #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct VeilidConfigApplication { pub https: VeilidConfigHTTPS, pub http: VeilidConfigHTTP, @@ -106,19 +106,19 @@ pub struct VeilidConfigApplication { /// ``` /// #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct VeilidConfigUDP { pub enabled: bool, pub socket_pool_size: u32, pub listen_address: String, - #[cfg_attr(target_arch = "wasm32", tsify(optional))] + #[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), tsify(optional))] pub public_address: Option, } impl Default for VeilidConfigUDP { fn default() -> Self { cfg_if::cfg_if! { - if #[cfg(target_arch = "wasm32")] { + if #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] { let enabled = false; } else { let enabled = true; @@ -144,20 +144,20 @@ impl Default for VeilidConfigUDP { /// public_address: '' /// #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct VeilidConfigTCP { pub connect: bool, pub listen: bool, pub max_connections: u32, pub listen_address: String, - #[cfg_attr(target_arch = "wasm32", tsify(optional))] + #[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), tsify(optional))] pub public_address: Option, } impl Default for VeilidConfigTCP { fn default() -> Self { cfg_if::cfg_if! { - if #[cfg(target_arch = "wasm32")] { + if #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] { let connect = false; let listen = false; } else { @@ -187,22 +187,21 @@ impl Default for VeilidConfigTCP { /// url: 'ws://localhost:5150/ws' /// #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] - +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct VeilidConfigWS { pub connect: bool, pub listen: bool, pub max_connections: u32, pub listen_address: String, pub path: String, - #[cfg_attr(target_arch = "wasm32", tsify(optional))] + #[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), tsify(optional))] pub url: Option, } impl Default for VeilidConfigWS { fn default() -> Self { cfg_if::cfg_if! { - if #[cfg(target_arch = "wasm32")] { + if #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] { let connect = true; let listen = false; } else { @@ -233,15 +232,14 @@ impl Default for VeilidConfigWS { /// url: '' /// #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] - +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct VeilidConfigWSS { pub connect: bool, pub listen: bool, pub max_connections: u32, pub listen_address: String, pub path: String, - #[cfg_attr(target_arch = "wasm32", tsify(optional))] + #[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), tsify(optional))] pub url: Option, // Fixed URL is not optional for TLS-based protocols and is dynamically validated } @@ -266,8 +264,7 @@ impl Default for VeilidConfigWSS { /// sort out which protocol is used for each peer connection. /// #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] - +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct VeilidConfigProtocol { pub udp: VeilidConfigUDP, pub tcp: VeilidConfigTCP, @@ -297,6 +294,21 @@ impl Default for VeilidConfigPrivacy { } } +/// Virtual networking client support for testing/simulation purposes +/// +/// ```yaml +/// virtual_network: +/// enabled: false +/// server_address: "" +/// ``` +#[cfg(feature = "virtual-network")] +#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] +#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +pub struct VeilidConfigVirtualNetwork { + pub enabled: bool, + pub server_address: String, +} + /// Configure TLS. /// /// ```yaml @@ -306,7 +318,7 @@ impl Default for VeilidConfigPrivacy { /// connection_initial_timeout_ms: 2000 /// #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct VeilidConfigTLS { pub certificate_path: String, pub private_key_path: String, @@ -323,7 +335,10 @@ impl Default for VeilidConfigTLS { } } -#[cfg_attr(target_arch = "wasm32", allow(unused_variables))] +#[cfg_attr( + all(target_arch = "wasm32", target_os = "unknown"), + allow(unused_variables) +)] pub fn get_default_ssl_directory( program_name: &str, organization: &str, @@ -331,7 +346,7 @@ pub fn get_default_ssl_directory( sub_path: &str, ) -> String { cfg_if::cfg_if! { - if #[cfg(target_arch = "wasm32")] { + if #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] { "".to_owned() } else { use std::path::PathBuf; @@ -349,7 +364,7 @@ pub fn get_default_ssl_directory( /// If you change the count/fanout/timeout parameters, you may render your node inoperable /// for correct DHT operations. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct VeilidConfigDHT { pub max_find_node_count: u32, pub resolve_node_timeout_ms: u32, @@ -378,7 +393,7 @@ pub struct VeilidConfigDHT { impl Default for VeilidConfigDHT { fn default() -> Self { cfg_if::cfg_if! { - if #[cfg(target_arch = "wasm32")] { + if #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] { let local_subkey_cache_size = 128; let local_max_subkey_cache_memory_mb = 256; let remote_subkey_cache_size = 64; @@ -433,13 +448,13 @@ impl Default for VeilidConfigDHT { /// Configure RPC. /// #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct VeilidConfigRPC { pub concurrency: u32, pub queue_size: u32, - #[cfg_attr(target_arch = "wasm32", tsify(optional))] + #[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), tsify(optional))] pub max_timestamp_behind_ms: Option, - #[cfg_attr(target_arch = "wasm32", tsify(optional))] + #[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), tsify(optional))] pub max_timestamp_ahead_ms: Option, pub timeout_ms: u32, pub max_route_hop_count: u8, @@ -463,7 +478,7 @@ impl Default for VeilidConfigRPC { /// Configure the network routing table. /// #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct VeilidConfigRoutingTable { #[schemars(with = "Vec")] pub node_id: TypedKeyGroup, @@ -482,7 +497,7 @@ pub struct VeilidConfigRoutingTable { impl Default for VeilidConfigRoutingTable { fn default() -> Self { cfg_if::cfg_if! { - if #[cfg(target_arch = "wasm32")] { + if #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] { let bootstrap = vec!["ws://bootstrap.veilid.net:5150/ws".to_string()]; } else { let bootstrap = vec!["bootstrap.veilid.net".to_string()]; @@ -503,7 +518,7 @@ impl Default for VeilidConfigRoutingTable { } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct VeilidConfigNetwork { pub connection_initial_timeout_ms: u32, pub connection_inactivity_timeout_ms: u32, @@ -514,7 +529,7 @@ pub struct VeilidConfigNetwork { pub client_allowlist_timeout_ms: u32, pub reverse_connection_receipt_time_ms: u32, pub hole_punch_receipt_time_ms: u32, - #[cfg_attr(target_arch = "wasm32", tsify(optional))] + #[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), tsify(optional))] pub network_key_password: Option, pub routing_table: VeilidConfigRoutingTable, pub rpc: VeilidConfigRPC, @@ -527,6 +542,8 @@ pub struct VeilidConfigNetwork { pub protocol: VeilidConfigProtocol, #[cfg(feature = "geolocation")] pub privacy: VeilidConfigPrivacy, + #[cfg(feature = "virtual-network")] + pub virtual_network: VeilidConfigVirtualNetwork, } impl Default for VeilidConfigNetwork { @@ -553,18 +570,23 @@ impl Default for VeilidConfigNetwork { protocol: VeilidConfigProtocol::default(), #[cfg(feature = "geolocation")] privacy: VeilidConfigPrivacy::default(), + #[cfg(feature = "virtual-network")] + virtual_network: VeilidConfigVirtualNetwork::default(), } } } #[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct VeilidConfigTableStore { pub directory: String, pub delete: bool, } -#[cfg_attr(target_arch = "wasm32", allow(unused_variables))] +#[cfg_attr( + all(target_arch = "wasm32", target_os = "unknown"), + allow(unused_variables) +)] fn get_default_store_path( program_name: &str, organization: &str, @@ -572,7 +594,7 @@ fn get_default_store_path( store_type: &str, ) -> String { cfg_if::cfg_if! { - if #[cfg(target_arch = "wasm32")] { + if #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] { "".to_owned() } else { use std::path::PathBuf; @@ -587,7 +609,7 @@ fn get_default_store_path( } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct VeilidConfigBlockStore { pub directory: String, pub delete: bool, @@ -603,14 +625,14 @@ impl Default for VeilidConfigBlockStore { } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct VeilidConfigProtectedStore { pub allow_insecure_fallback: bool, pub always_use_insecure_storage: bool, pub directory: String, pub delete: bool, pub device_encryption_key_password: String, - #[cfg_attr(target_arch = "wasm32", tsify(optional))] + #[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), tsify(optional))] pub new_device_encryption_key_password: Option, } @@ -628,14 +650,17 @@ impl Default for VeilidConfigProtectedStore { } #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct VeilidConfigCapabilities { pub disable: Vec, } #[derive(Clone, Copy, PartialEq, Eq, Debug, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] -#[cfg_attr(target_arch = "wasm32", tsify(namespace, from_wasm_abi))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] +#[cfg_attr( + all(target_arch = "wasm32", target_os = "unknown"), + tsify(namespace, from_wasm_abi) +)] pub enum VeilidConfigLogLevel { Off, Error, @@ -724,7 +749,7 @@ impl fmt::Display for VeilidConfigLogLevel { /// Top level of the Veilid configuration tree #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct VeilidConfigInner { /// An identifier used to describe the program using veilid-core. /// Used to partition storage locations in places like the ProtectedStore. @@ -833,7 +858,7 @@ impl VeilidConfigInner { /// The configuration built for each Veilid node during API startup #[derive(Clone)] pub struct VeilidConfig { - update_cb: Option, + update_cb: UpdateCallback, inner: Arc>, } @@ -846,159 +871,144 @@ impl fmt::Debug for VeilidConfig { } } -impl Default for VeilidConfig { - fn default() -> Self { - Self::new() - } -} impl VeilidConfig { - fn new_inner() -> VeilidConfigInner { - VeilidConfigInner::default() - } - - pub(crate) fn new() -> Self { + pub(crate) fn new_from_config(config: VeilidConfigInner, update_cb: UpdateCallback) -> Self { Self { - update_cb: None, - inner: Arc::new(RwLock::new(Self::new_inner())), + update_cb, + inner: Arc::new(RwLock::new(config)), } } - pub(crate) fn setup_from_config( - &mut self, - config: VeilidConfigInner, - update_cb: UpdateCallback, - ) -> VeilidAPIResult<()> { - self.update_cb = Some(update_cb); - - self.with_mut(|inner| { - *inner = config; - Ok(()) - }) - } - - pub(crate) fn setup( - &mut self, + pub(crate) fn new_from_callback( cb: ConfigCallback, update_cb: UpdateCallback, - ) -> VeilidAPIResult<()> { - self.update_cb = Some(update_cb); - self.with_mut(|inner| { - // Simple config transformation - macro_rules! get_config { - ($key:expr) => { - let keyname = &stringify!($key)[6..]; - let v = cb(keyname.to_owned())?; - $key = match v.downcast() { - Ok(v) => *v, - Err(e) => { - apibail_generic!(format!( - "incorrect type for key {}: {:?}", - keyname, - type_name_of_val(&*e) - )) - } - }; - }; - } + ) -> VeilidAPIResult { + let mut inner = VeilidConfigInner::default(); - get_config!(inner.program_name); - get_config!(inner.namespace); - get_config!(inner.capabilities.disable); - get_config!(inner.table_store.directory); - get_config!(inner.table_store.delete); - get_config!(inner.block_store.directory); - get_config!(inner.block_store.delete); - get_config!(inner.protected_store.allow_insecure_fallback); - get_config!(inner.protected_store.always_use_insecure_storage); - get_config!(inner.protected_store.directory); - get_config!(inner.protected_store.delete); - get_config!(inner.protected_store.device_encryption_key_password); - get_config!(inner.protected_store.new_device_encryption_key_password); - get_config!(inner.network.connection_initial_timeout_ms); - get_config!(inner.network.connection_inactivity_timeout_ms); - get_config!(inner.network.max_connections_per_ip4); - get_config!(inner.network.max_connections_per_ip6_prefix); - get_config!(inner.network.max_connections_per_ip6_prefix_size); - get_config!(inner.network.max_connection_frequency_per_min); - get_config!(inner.network.client_allowlist_timeout_ms); - get_config!(inner.network.reverse_connection_receipt_time_ms); - get_config!(inner.network.hole_punch_receipt_time_ms); - get_config!(inner.network.network_key_password); - get_config!(inner.network.routing_table.node_id); - get_config!(inner.network.routing_table.node_id_secret); - get_config!(inner.network.routing_table.bootstrap); - get_config!(inner.network.routing_table.limit_over_attached); - get_config!(inner.network.routing_table.limit_fully_attached); - get_config!(inner.network.routing_table.limit_attached_strong); - get_config!(inner.network.routing_table.limit_attached_good); - get_config!(inner.network.routing_table.limit_attached_weak); - get_config!(inner.network.dht.max_find_node_count); - get_config!(inner.network.dht.resolve_node_timeout_ms); - get_config!(inner.network.dht.resolve_node_count); - get_config!(inner.network.dht.resolve_node_fanout); - get_config!(inner.network.dht.get_value_timeout_ms); - get_config!(inner.network.dht.get_value_count); - get_config!(inner.network.dht.get_value_fanout); - get_config!(inner.network.dht.set_value_timeout_ms); - get_config!(inner.network.dht.set_value_count); - get_config!(inner.network.dht.set_value_fanout); - get_config!(inner.network.dht.min_peer_count); - get_config!(inner.network.dht.min_peer_refresh_time_ms); - get_config!(inner.network.dht.validate_dial_info_receipt_time_ms); - get_config!(inner.network.dht.local_subkey_cache_size); - get_config!(inner.network.dht.local_max_subkey_cache_memory_mb); - get_config!(inner.network.dht.remote_subkey_cache_size); - get_config!(inner.network.dht.remote_max_records); - get_config!(inner.network.dht.remote_max_subkey_cache_memory_mb); - get_config!(inner.network.dht.remote_max_storage_space_mb); - get_config!(inner.network.dht.public_watch_limit); - get_config!(inner.network.dht.member_watch_limit); - get_config!(inner.network.dht.max_watch_expiration_ms); - get_config!(inner.network.rpc.concurrency); - get_config!(inner.network.rpc.queue_size); - get_config!(inner.network.rpc.max_timestamp_behind_ms); - get_config!(inner.network.rpc.max_timestamp_ahead_ms); - get_config!(inner.network.rpc.timeout_ms); - get_config!(inner.network.rpc.max_route_hop_count); - get_config!(inner.network.rpc.default_route_hop_count); - get_config!(inner.network.upnp); - get_config!(inner.network.detect_address_changes); - get_config!(inner.network.restricted_nat_retries); - get_config!(inner.network.tls.certificate_path); - get_config!(inner.network.tls.private_key_path); - get_config!(inner.network.tls.connection_initial_timeout_ms); - get_config!(inner.network.application.https.enabled); - get_config!(inner.network.application.https.listen_address); - get_config!(inner.network.application.https.path); - get_config!(inner.network.application.https.url); - get_config!(inner.network.application.http.enabled); - get_config!(inner.network.application.http.listen_address); - get_config!(inner.network.application.http.path); - get_config!(inner.network.application.http.url); - get_config!(inner.network.protocol.udp.enabled); - get_config!(inner.network.protocol.udp.socket_pool_size); - get_config!(inner.network.protocol.udp.listen_address); - get_config!(inner.network.protocol.udp.public_address); - get_config!(inner.network.protocol.tcp.connect); - get_config!(inner.network.protocol.tcp.listen); - get_config!(inner.network.protocol.tcp.max_connections); - get_config!(inner.network.protocol.tcp.listen_address); - get_config!(inner.network.protocol.tcp.public_address); - get_config!(inner.network.protocol.ws.connect); - get_config!(inner.network.protocol.ws.listen); - get_config!(inner.network.protocol.ws.max_connections); - get_config!(inner.network.protocol.ws.listen_address); - get_config!(inner.network.protocol.ws.path); - get_config!(inner.network.protocol.ws.url); - get_config!(inner.network.protocol.wss.connect); - get_config!(inner.network.protocol.wss.listen); - get_config!(inner.network.protocol.wss.max_connections); - get_config!(inner.network.protocol.wss.listen_address); - get_config!(inner.network.protocol.wss.path); - get_config!(inner.network.protocol.wss.url); - #[cfg(feature = "geolocation")] - get_config!(inner.network.privacy.country_code_denylist); - Ok(()) + // Simple config transformation + macro_rules! get_config { + ($key:expr) => { + let keyname = &stringify!($key)[6..]; + let v = cb(keyname.to_owned())?; + $key = match v.downcast() { + Ok(v) => *v, + Err(e) => { + apibail_generic!(format!( + "incorrect type for key {}: {:?}", + keyname, + type_name_of_val(&*e) + )) + } + }; + }; + } + + get_config!(inner.program_name); + get_config!(inner.namespace); + get_config!(inner.capabilities.disable); + get_config!(inner.table_store.directory); + get_config!(inner.table_store.delete); + get_config!(inner.block_store.directory); + get_config!(inner.block_store.delete); + get_config!(inner.protected_store.allow_insecure_fallback); + get_config!(inner.protected_store.always_use_insecure_storage); + get_config!(inner.protected_store.directory); + get_config!(inner.protected_store.delete); + get_config!(inner.protected_store.device_encryption_key_password); + get_config!(inner.protected_store.new_device_encryption_key_password); + get_config!(inner.network.connection_initial_timeout_ms); + get_config!(inner.network.connection_inactivity_timeout_ms); + get_config!(inner.network.max_connections_per_ip4); + get_config!(inner.network.max_connections_per_ip6_prefix); + get_config!(inner.network.max_connections_per_ip6_prefix_size); + get_config!(inner.network.max_connection_frequency_per_min); + get_config!(inner.network.client_allowlist_timeout_ms); + get_config!(inner.network.reverse_connection_receipt_time_ms); + get_config!(inner.network.hole_punch_receipt_time_ms); + get_config!(inner.network.network_key_password); + get_config!(inner.network.routing_table.node_id); + get_config!(inner.network.routing_table.node_id_secret); + get_config!(inner.network.routing_table.bootstrap); + get_config!(inner.network.routing_table.limit_over_attached); + get_config!(inner.network.routing_table.limit_fully_attached); + get_config!(inner.network.routing_table.limit_attached_strong); + get_config!(inner.network.routing_table.limit_attached_good); + get_config!(inner.network.routing_table.limit_attached_weak); + get_config!(inner.network.dht.max_find_node_count); + get_config!(inner.network.dht.resolve_node_timeout_ms); + get_config!(inner.network.dht.resolve_node_count); + get_config!(inner.network.dht.resolve_node_fanout); + get_config!(inner.network.dht.get_value_timeout_ms); + get_config!(inner.network.dht.get_value_count); + get_config!(inner.network.dht.get_value_fanout); + get_config!(inner.network.dht.set_value_timeout_ms); + get_config!(inner.network.dht.set_value_count); + get_config!(inner.network.dht.set_value_fanout); + get_config!(inner.network.dht.min_peer_count); + get_config!(inner.network.dht.min_peer_refresh_time_ms); + get_config!(inner.network.dht.validate_dial_info_receipt_time_ms); + get_config!(inner.network.dht.local_subkey_cache_size); + get_config!(inner.network.dht.local_max_subkey_cache_memory_mb); + get_config!(inner.network.dht.remote_subkey_cache_size); + get_config!(inner.network.dht.remote_max_records); + get_config!(inner.network.dht.remote_max_subkey_cache_memory_mb); + get_config!(inner.network.dht.remote_max_storage_space_mb); + get_config!(inner.network.dht.public_watch_limit); + get_config!(inner.network.dht.member_watch_limit); + get_config!(inner.network.dht.max_watch_expiration_ms); + get_config!(inner.network.rpc.concurrency); + get_config!(inner.network.rpc.queue_size); + get_config!(inner.network.rpc.max_timestamp_behind_ms); + get_config!(inner.network.rpc.max_timestamp_ahead_ms); + get_config!(inner.network.rpc.timeout_ms); + get_config!(inner.network.rpc.max_route_hop_count); + get_config!(inner.network.rpc.default_route_hop_count); + get_config!(inner.network.upnp); + get_config!(inner.network.detect_address_changes); + get_config!(inner.network.restricted_nat_retries); + get_config!(inner.network.tls.certificate_path); + get_config!(inner.network.tls.private_key_path); + get_config!(inner.network.tls.connection_initial_timeout_ms); + get_config!(inner.network.application.https.enabled); + get_config!(inner.network.application.https.listen_address); + get_config!(inner.network.application.https.path); + get_config!(inner.network.application.https.url); + get_config!(inner.network.application.http.enabled); + get_config!(inner.network.application.http.listen_address); + get_config!(inner.network.application.http.path); + get_config!(inner.network.application.http.url); + get_config!(inner.network.protocol.udp.enabled); + get_config!(inner.network.protocol.udp.socket_pool_size); + get_config!(inner.network.protocol.udp.listen_address); + get_config!(inner.network.protocol.udp.public_address); + get_config!(inner.network.protocol.tcp.connect); + get_config!(inner.network.protocol.tcp.listen); + get_config!(inner.network.protocol.tcp.max_connections); + get_config!(inner.network.protocol.tcp.listen_address); + get_config!(inner.network.protocol.tcp.public_address); + get_config!(inner.network.protocol.ws.connect); + get_config!(inner.network.protocol.ws.listen); + get_config!(inner.network.protocol.ws.max_connections); + get_config!(inner.network.protocol.ws.listen_address); + get_config!(inner.network.protocol.ws.path); + get_config!(inner.network.protocol.ws.url); + get_config!(inner.network.protocol.wss.connect); + get_config!(inner.network.protocol.wss.listen); + get_config!(inner.network.protocol.wss.max_connections); + get_config!(inner.network.protocol.wss.listen_address); + get_config!(inner.network.protocol.wss.path); + get_config!(inner.network.protocol.wss.url); + #[cfg(feature = "geolocation")] + get_config!(inner.network.privacy.country_code_denylist); + #[cfg(feature = "virtual-network")] + { + get_config!(inner.network.virtual_network.enabled); + get_config!(inner.network.virtual_network.server_address); + } + + Ok(Self { + update_cb, + inner: Arc::new(RwLock::new(inner)), }) } @@ -1009,6 +1019,10 @@ impl VeilidConfig { }) } + pub fn update_callback(&self) -> UpdateCallback { + self.update_cb.clone() + } + pub fn get(&self) -> RwLockReadGuard { self.inner.read() } @@ -1038,7 +1052,15 @@ impl VeilidConfig { } } - pub fn with_mut(&self, f: F) -> VeilidAPIResult + pub fn with(&self, f: F) -> R + where + F: FnOnce(&VeilidConfigInner) -> R, + { + let inner = self.inner.read(); + f(&inner) + } + + pub fn try_with_mut(&self, f: F) -> VeilidAPIResult where F: FnOnce(&mut VeilidConfigInner) -> VeilidAPIResult, { @@ -1061,12 +1083,10 @@ impl VeilidConfig { }; // Send configuration update to clients - if let Some(update_cb) = &self.update_cb { - let safe_cfg = self.safe_config_inner(); - update_cb(VeilidUpdate::Config(Box::new(VeilidStateConfig { - config: safe_cfg, - }))); - } + let safe_cfg = self.safe_config_inner(); + (self.update_cb)(VeilidUpdate::Config(Box::new(VeilidStateConfig { + config: safe_cfg, + }))); Ok(out) } @@ -1103,7 +1123,7 @@ impl VeilidConfig { } } pub fn set_key_json(&self, key: &str, value: &str) -> VeilidAPIResult<()> { - self.with_mut(|c| { + self.try_with_mut(|c| { // Split key into path parts let keypath: Vec<&str> = key.split('.').collect(); @@ -1275,124 +1295,6 @@ impl VeilidConfig { Ok(()) } - - #[cfg(not(test))] - async fn init_node_id( - &self, - vcrypto: CryptoSystemVersion, - table_store: TableStore, - ) -> VeilidAPIResult<(TypedKey, TypedSecret)> { - let ck = vcrypto.kind(); - let mut node_id = self.inner.read().network.routing_table.node_id.get(ck); - let mut node_id_secret = self - .inner - .read() - .network - .routing_table - .node_id_secret - .get(ck); - - // See if node id was previously stored in the table store - let config_table = table_store.open("__veilid_config", 1).await?; - - let table_key_node_id = format!("node_id_{}", ck); - let table_key_node_id_secret = format!("node_id_secret_{}", ck); - - if node_id.is_none() { - log_tstore!(debug "pulling {} from storage", table_key_node_id); - if let Ok(Some(stored_node_id)) = config_table - .load_json::(0, table_key_node_id.as_bytes()) - .await - { - log_tstore!(debug "{} found in storage", table_key_node_id); - node_id = Some(stored_node_id); - } else { - log_tstore!(debug "{} not found in storage", table_key_node_id); - } - } - - // See if node id secret was previously stored in the protected store - if node_id_secret.is_none() { - log_tstore!(debug "pulling {} from storage", table_key_node_id_secret); - if let Ok(Some(stored_node_id_secret)) = config_table - .load_json::(0, table_key_node_id_secret.as_bytes()) - .await - { - log_tstore!(debug "{} found in storage", table_key_node_id_secret); - node_id_secret = Some(stored_node_id_secret); - } else { - log_tstore!(debug "{} not found in storage", table_key_node_id_secret); - } - } - - // If we have a node id from storage, check it - let (node_id, node_id_secret) = - if let (Some(node_id), Some(node_id_secret)) = (node_id, node_id_secret) { - // Validate node id - if !vcrypto.validate_keypair(&node_id.value, &node_id_secret.value) { - apibail_generic!(format!( - "node_id_secret_{} and node_id_key_{} don't match", - ck, ck - )); - } - (node_id, node_id_secret) - } else { - // If we still don't have a valid node id, generate one - log_tstore!(debug "generating new node_id_{}", ck); - let kp = vcrypto.generate_keypair(); - (TypedKey::new(ck, kp.key), TypedSecret::new(ck, kp.secret)) - }; - info!("Node Id: {}", node_id); - - // Save the node id / secret in storage - config_table - .store_json(0, table_key_node_id.as_bytes(), &node_id) - .await?; - config_table - .store_json(0, table_key_node_id_secret.as_bytes(), &node_id_secret) - .await?; - - Ok((node_id, node_id_secret)) - } - - /// Get the node id from config if one is specified. - /// Must be done -after- protected store startup. - #[cfg_attr(test, allow(unused_variables))] - pub async fn init_node_ids( - &self, - crypto: Crypto, - table_store: TableStore, - ) -> VeilidAPIResult<()> { - let mut out_node_id = TypedKeyGroup::new(); - let mut out_node_id_secret = TypedSecretGroup::new(); - - for ck in VALID_CRYPTO_KINDS { - let vcrypto = crypto - .get(ck) - .expect("Valid crypto kind is not actually valid."); - - #[cfg(test)] - let (node_id, node_id_secret) = { - let kp = vcrypto.generate_keypair(); - (TypedKey::new(ck, kp.key), TypedSecret::new(ck, kp.secret)) - }; - #[cfg(not(test))] - let (node_id, node_id_secret) = self.init_node_id(vcrypto, table_store.clone()).await?; - - // Save for config - out_node_id.add(node_id); - out_node_id_secret.add(node_id_secret); - } - - // Commit back to config - self.with_mut(|c| { - c.network.routing_table.node_id = out_node_id; - c.network.routing_table.node_id_secret = out_node_id_secret; - Ok(()) - })?; - - Ok(()) - } } /// Return the default veilid config as a json object. diff --git a/veilid-core/src/wasm_helpers.rs b/veilid-core/src/wasm_helpers.rs index c2f8994b..952d6c87 100644 --- a/veilid-core/src/wasm_helpers.rs +++ b/veilid-core/src/wasm_helpers.rs @@ -1,5 +1,5 @@ cfg_if::cfg_if! { - if #[cfg(target_arch = "wasm32")] { + if #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] { pub use tsify::*; pub use wasm_bindgen::prelude::*; diff --git a/veilid-core/tests/web.rs b/veilid-core/tests/web.rs index eb60ea38..361421e6 100644 --- a/veilid-core/tests/web.rs +++ b/veilid-core/tests/web.rs @@ -1,5 +1,5 @@ //! Test suite for the Web and headless browsers. -#![cfg(target_arch = "wasm32")] +#![cfg(all(target_arch = "wasm32", target_os = "unknown"))] #![recursion_limit = "256"] use parking_lot::Once; @@ -21,7 +21,7 @@ pub fn setup() -> () { let mut builder = tracing_wasm::WASMLayerConfigBuilder::new(); builder.set_report_logs_in_timings(false); - builder.set_max_level(Level::DEBUG); + builder.set_max_level(Level::TRACE); builder.set_console_config(tracing_wasm::ConsoleConfig::ReportWithoutConsoleColor); tracing_wasm::set_as_global_default_with_config(builder.build()); }); diff --git a/veilid-flutter/android/CMakeLists.txt b/veilid-flutter/android/CMakeLists.txt index 57518a5d..9578fa92 100644 --- a/veilid-flutter/android/CMakeLists.txt +++ b/veilid-flutter/android/CMakeLists.txt @@ -1,3 +1,3 @@ -cmake_minimum_required(VERSION 3.1) +cmake_minimum_required(VERSION 3.3) project(cpplink CXX) add_library(cpplink cpplink.cpp) \ No newline at end of file diff --git a/veilid-flutter/android/build.gradle b/veilid-flutter/android/build.gradle index a0a5930c..89c6c6e7 100644 --- a/veilid-flutter/android/build.gradle +++ b/veilid-flutter/android/build.gradle @@ -1,18 +1,18 @@ buildscript { - ext.kotlin_version = '1.6.10' + ext.kotlin_version = '1.9.25' repositories { google() mavenCentral() } dependencies { - classpath 'com.android.tools.build:gradle:7.2.0' + classpath 'com.android.tools.build:gradle:8.8.0' classpath "org.jetbrains.kotlin:kotlin-gradle-plugin:$kotlin_version" } } plugins { - id "org.mozilla.rust-android-gradle.rust-android" version "0.9.3" + id "org.mozilla.rust-android-gradle.rust-android" version "0.9.6" } group 'com.veilid.veilid' @@ -30,10 +30,11 @@ apply plugin: 'kotlin-android' android { compileSdkVersion 31 + namespace = "com.veilid.veilid" compileOptions { - sourceCompatibility JavaVersion.VERSION_1_8 - targetCompatibility JavaVersion.VERSION_1_8 + sourceCompatibility JavaVersion.VERSION_17 + targetCompatibility JavaVersion.VERSION_17 } kotlinOptions { @@ -49,7 +50,7 @@ android { targetSdkVersion 31 versionCode 1 versionName "1.0" - + ndk { abiFilters 'armeabi-v7a', 'arm64-v8a', 'x86', 'x86_64' } @@ -63,8 +64,8 @@ android { } } - ndkVersion '26.3.11579264' - + ndkVersion '27.0.12077973' + // Required to copy libc++_shared.so externalNativeBuild { cmake { diff --git a/veilid-flutter/android/src/main/AndroidManifest.xml b/veilid-flutter/android/src/main/AndroidManifest.xml index 77cd0e39..4d4167fd 100644 --- a/veilid-flutter/android/src/main/AndroidManifest.xml +++ b/veilid-flutter/android/src/main/AndroidManifest.xml @@ -1,5 +1,4 @@ - + diff --git a/veilid-flutter/example/android/app/build.gradle b/veilid-flutter/example/android/app/build.gradle index ea37478f..a23e45e1 100644 --- a/veilid-flutter/example/android/app/build.gradle +++ b/veilid-flutter/example/android/app/build.gradle @@ -1,3 +1,9 @@ +plugins { + id "com.android.application" + id "kotlin-android" + id "dev.flutter.flutter-gradle-plugin" +} + def localProperties = new Properties() def localPropertiesFile = rootProject.file('local.properties') if (localPropertiesFile.exists()) { @@ -6,11 +12,6 @@ if (localPropertiesFile.exists()) { } } -def flutterRoot = localProperties.getProperty('flutter.sdk') -if (flutterRoot == null) { - throw new GradleException("Flutter SDK not found. Define location with flutter.sdk in the local.properties file.") -} - def flutterVersionCode = localProperties.getProperty('flutter.versionCode') if (flutterVersionCode == null) { flutterVersionCode = '1' @@ -21,16 +22,12 @@ if (flutterVersionName == null) { flutterVersionName = '1.0' } -apply plugin: 'com.android.application' -apply plugin: 'kotlin-android' -apply from: "$flutterRoot/packages/flutter_tools/gradle/flutter.gradle" - android { compileSdkVersion flutter.compileSdkVersion - ndkVersion '26.3.11579264' + ndkVersion '27.0.12077973' compileOptions { - sourceCompatibility JavaVersion.VERSION_1_8 - targetCompatibility JavaVersion.VERSION_1_8 + sourceCompatibility JavaVersion.VERSION_17 + targetCompatibility JavaVersion.VERSION_17 } kotlinOptions { @@ -40,6 +37,7 @@ android { sourceSets { main.java.srcDirs += 'src/main/kotlin' } + namespace "com.veilid.veilid_example" defaultConfig { // TODO: Specify your own unique Application ID (https://developer.android.com/studio/build/application-id.html). @@ -68,7 +66,3 @@ android { flutter { source '../..' } - -dependencies { - implementation "org.jetbrains.kotlin:kotlin-stdlib-jdk8:$kotlin_version" -} \ No newline at end of file diff --git a/veilid-flutter/example/android/app/src/main/AndroidManifest.xml b/veilid-flutter/example/android/app/src/main/AndroidManifest.xml index 5817ccd8..e9f31fd3 100644 --- a/veilid-flutter/example/android/app/src/main/AndroidManifest.xml +++ b/veilid-flutter/example/android/app/src/main/AndroidManifest.xml @@ -1,5 +1,4 @@ - + properties.load(reader) } + repositories { + google() + mavenCentral() + gradlePluginPortal() + } +} -def flutterSdkPath = properties.getProperty("flutter.sdk") -assert flutterSdkPath != null, "flutter.sdk not set in local.properties" -apply from: "$flutterSdkPath/packages/flutter_tools/gradle/app_plugin_loader.gradle" +plugins { + id "dev.flutter.flutter-plugin-loader" version "1.0.0" + id "com.android.application" version "8.8.0" apply false + id "org.jetbrains.kotlin.android" version "1.9.25" apply false +} + +include ":app" \ No newline at end of file diff --git a/veilid-flutter/example/pubspec.lock b/veilid-flutter/example/pubspec.lock index db0901a0..89460f85 100644 --- a/veilid-flutter/example/pubspec.lock +++ b/veilid-flutter/example/pubspec.lock @@ -69,10 +69,10 @@ packages: dependency: transitive description: name: collection - sha256: ee67cb0715911d28db6bf4af1026078bd6f0128b07a5f66fb2ed94ec6783c09a + sha256: a1ace0a119f20aabc852d165077c036cd864315bd99b7eaa10a60100341941bf url: "https://pub.dev" source: hosted - version: "1.18.0" + version: "1.19.0" convert: dependency: transitive description: @@ -195,18 +195,18 @@ packages: dependency: transitive description: name: leak_tracker - sha256: "3f87a60e8c63aecc975dda1ceedbc8f24de75f09e4856ea27daf8958f2f0ce05" + sha256: "7bb2830ebd849694d1ec25bf1f44582d6ac531a57a365a803a6034ff751d2d06" url: "https://pub.dev" source: hosted - version: "10.0.5" + version: "10.0.7" leak_tracker_flutter_testing: dependency: transitive description: name: leak_tracker_flutter_testing - sha256: "932549fb305594d82d7183ecd9fa93463e9914e1b67cacc34bc40906594a1806" + sha256: "9491a714cca3667b60b5c420da8217e6de0d1ba7a5ec322fab01758f6998f379" url: "https://pub.dev" source: hosted - version: "3.0.5" + version: "3.0.8" leak_tracker_testing: dependency: transitive description: @@ -355,7 +355,7 @@ packages: dependency: transitive description: flutter source: sdk - version: "0.0.99" + version: "0.0.0" source_span: dependency: transitive description: @@ -368,10 +368,10 @@ packages: dependency: transitive description: name: stack_trace - sha256: "73713990125a6d93122541237550ee3352a2d84baad52d375a4cad2eb9b7ce0b" + sha256: "9f47fd3630d76be3ab26f0ee06d213679aa425996925ff3feffdec504931c377" url: "https://pub.dev" source: hosted - version: "1.11.1" + version: "1.12.0" stream_channel: dependency: transitive description: @@ -384,10 +384,10 @@ packages: dependency: transitive description: name: string_scanner - sha256: "556692adab6cfa87322a115640c11f13cb77b3f076ddcc5d6ae3c20242bedcde" + sha256: "688af5ed3402a4bde5b3a6c15fd768dbf2621a614950b17f04626c431ab3c4c3" url: "https://pub.dev" source: hosted - version: "1.2.0" + version: "1.3.0" sync_http: dependency: transitive description: @@ -408,10 +408,10 @@ packages: dependency: transitive description: name: system_info_plus - sha256: b915c811c6605b802f3988859bc2bb79c95f735762a75b5451741f7a2b949d1b + sha256: df94187e95527f9cb459e6a9f6e0b1ea20c157d8029bc233de34b3c1e17e1c48 url: "https://pub.dev" source: hosted - version: "0.0.5" + version: "0.0.6" term_glyph: dependency: transitive description: @@ -424,10 +424,10 @@ packages: dependency: transitive description: name: test_api - sha256: "5b8a98dafc4d5c4c9c72d8b31ab2b23fc13422348d2997120294d3bac86b4ddb" + sha256: "664d3a9a64782fcdeb83ce9c6b39e78fd2971d4e37827b9b06c3aa1edc5e760c" url: "https://pub.dev" source: hosted - version: "0.7.2" + version: "0.7.3" typed_data: dependency: transitive description: @@ -450,7 +450,7 @@ packages: path: ".." relative: true source: path - version: "0.3.4" + version: "0.4.1" veilid_test: dependency: "direct dev" description: @@ -462,18 +462,18 @@ packages: dependency: transitive description: name: vm_service - sha256: "5c5f338a667b4c644744b661f309fb8080bb94b18a7e91ef1dbd343bed00ed6d" + sha256: f6be3ed8bd01289b34d679c2b62226f63c0e69f9fd2e50a6b3c1c729a961041b url: "https://pub.dev" source: hosted - version: "14.2.5" + version: "14.3.0" webdriver: dependency: transitive description: name: webdriver - sha256: "003d7da9519e1e5f329422b36c4dcdf18d7d2978d1ba099ea4e45ba490ed845e" + sha256: "3d773670966f02a646319410766d3b5e1037efb7f07cc68f844d5e06cd4d61c8" url: "https://pub.dev" source: hosted - version: "3.0.3" + version: "3.0.4" xdg_directories: dependency: transitive description: diff --git a/veilid-flutter/example/web/index.html b/veilid-flutter/example/web/index.html index 373dae36..152feb79 100644 --- a/veilid-flutter/example/web/index.html +++ b/veilid-flutter/example/web/index.html @@ -19,31 +19,20 @@ - + - + - + - Veilid Example + ftest - - - - - - - - - + + + \ No newline at end of file diff --git a/veilid-flutter/pubspec.yaml b/veilid-flutter/pubspec.yaml index 00a5010b..8b7d4b06 100644 --- a/veilid-flutter/pubspec.yaml +++ b/veilid-flutter/pubspec.yaml @@ -25,7 +25,7 @@ dependencies: path: ^1.9.0 path_provider: ^2.1.3 system_info2: ^4.0.0 - system_info_plus: ^0.0.5 + system_info_plus: ^0.0.6 dev_dependencies: build_runner: ^2.4.10 diff --git a/veilid-flutter/rust/Cargo.toml b/veilid-flutter/rust/Cargo.toml index 5e82c89b..e8f35c11 100644 --- a/veilid-flutter/rust/Cargo.toml +++ b/veilid-flutter/rust/Cargo.toml @@ -4,12 +4,12 @@ name = "veilid-flutter" version = "0.4.1" # --- description = "Flutter/Dart bindings for Veilid" -repository = "https://gitlab.com/veilid/veilid" -authors = ["Veilid Team "] -license = "MPL-2.0" -edition = "2021" -rust-version = "1.81.0" resolver = "2" +repository.workspace = true +authors.workspace = true +license.workspace = true +edition.workspace = true +rust-version.workspace = true [lib] crate-type = ["cdylib", "staticlib", "rlib"] @@ -50,7 +50,7 @@ tracing-flame = "0.2.0" # Dependencies for native builds only # Linux, Windows, Mac, iOS, Android -[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +[target.'cfg(not(all(target_arch = "wasm32", target_os = "unknown")))'.dependencies] tracing-opentelemetry = "0.21" opentelemetry = { version = "0.20" } opentelemetry-otlp = { version = "0.13" } @@ -68,7 +68,7 @@ libc-print = { version = "0.1.23", optional = true } # Dependencies for WASM builds only -[target.'cfg(target_arch = "wasm32")'.dependencies] +[target.'cfg(all(target_arch = "wasm32", target_os = "unknown"))'.dependencies] # Dependencies for Android builds only [target.'cfg(target_os = "android")'.dependencies] diff --git a/veilid-python/tests/test_dht.py b/veilid-python/tests/test_dht.py index 414967fe..6af2637c 100644 --- a/veilid-python/tests/test_dht.py +++ b/veilid-python/tests/test_dht.py @@ -129,6 +129,8 @@ async def test_open_writer_dht_value(api_connection: veilid.VeilidAPI): vdtemp = await rc.set_dht_value(key, ValueSubkey(0), vb) assert vdtemp is None + await sync(rc, [rec]) + vdtemp = await rc.get_dht_value(key, ValueSubkey(0), True) assert vdtemp.data == vb @@ -147,6 +149,7 @@ async def test_open_writer_dht_value(api_connection: veilid.VeilidAPI): # and verified they stored correctly # Delete things locally and reopen and see if we can write # with the same writer key + await sync(rc, [rec]) await rc.close_dht_record(key) await rc.delete_dht_record(key) @@ -170,6 +173,8 @@ async def test_open_writer_dht_value(api_connection: veilid.VeilidAPI): vdtemp = await rc.set_dht_value(key, ValueSubkey(1), vc) assert vdtemp is None + await sync(rc, [rec]) + # Verify the network got the subkey update with a refresh check vdtemp = await rc.get_dht_value(key, ValueSubkey(1), True) assert vdtemp is not None @@ -575,22 +580,7 @@ async def test_dht_write_read_local(): print(f' {n}: {desc.key} {desc.owner}:{desc.owner_secret}') - print('syncing records to the network') - - syncrecords = records.copy() - while len(syncrecords) > 0: - donerecords = set() - subkeysleft = 0 - for desc0 in records: - rr = await rc0.inspect_dht_record(desc0.key, []) - left = 0; [left := left + (x[1]-x[0]+1) for x in rr.offline_subkeys] - if left == 0: - donerecords.add(desc0) - else: - subkeysleft += left - syncrecords = [x for x in syncrecords if x not in donerecords] - print(f' {len(syncrecords)} records {subkeysleft} subkeys left') - time.sleep(1) + await sync(rc0, records) for desc0 in records: await rc0.close_dht_record(desc0.key) @@ -613,3 +603,20 @@ async def test_dht_write_read_local(): print(f' {n}') n += 1 + +async def sync(rc: veilid.RoutingContext, records: list[veilid.DHTRecordDescriptor]): + print('syncing records to the network') + syncrecords = records.copy() + while len(syncrecords) > 0: + donerecords = set() + subkeysleft = 0 + for desc in records: + rr = await rc.inspect_dht_record(desc.key, []) + left = 0; [left := left + (x[1]-x[0]+1) for x in rr.offline_subkeys] + if left == 0: + donerecords.add(desc) + else: + subkeysleft += left + syncrecords = [x for x in syncrecords if x not in donerecords] + print(f' {len(syncrecords)} records {subkeysleft} subkeys left') + time.sleep(1) diff --git a/veilid-server/Cargo.toml b/veilid-server/Cargo.toml index 55e3e0f4..349c6cc0 100644 --- a/veilid-server/Cargo.toml +++ b/veilid-server/Cargo.toml @@ -4,12 +4,12 @@ name = "veilid-server" version = "0.4.1" # --- description = "Veilid Headless Node" -repository = "https://gitlab.com/veilid/veilid" -authors = ["Veilid Team "] -license = "MPL-2.0" -edition = "2021" resolver = "2" -rust-version = "1.81.0" +repository.workspace = true +authors.workspace = true +license.workspace = true +edition.workspace = true +rust-version.workspace = true [[bin]] name = "veilid-server" @@ -19,6 +19,11 @@ path = "src/main.rs" default = ["rt-tokio", "veilid-core/default", "otlp-tonic"] default-async-std = ["rt-async-std", "veilid-core/default-async-std"] +virtual-network = [ + "veilid-core/virtual-network", + "veilid-core/virtual-network-server", +] + crypto-test = ["rt-tokio", "veilid-core/crypto-test"] crypto-test-none = ["rt-tokio", "veilid-core/crypto-test-none"] @@ -70,7 +75,7 @@ config = { version = "^0.14.0", default-features = false, features = ["yaml"] } cfg-if = "^1.0.0" serde = "^1.0.204" serde_derive = "^1.0.204" -serde_yaml = "^0.9.34" +serde_yaml = { package = "serde_yaml_ng", version = "^0.10.0" } json = "^0" futures-util = { version = "^0", default-features = false, features = [ "alloc", diff --git a/veilid-server/src/client_api.rs b/veilid-server/src/client_api.rs index 47dc0886..5009b3f4 100644 --- a/veilid-server/src/client_api.rs +++ b/veilid-server/src/client_api.rs @@ -39,7 +39,7 @@ struct RequestLine { // Request to process line: String, // Where to send the response - responses_tx: flume::Sender, + responses_tx: flume::Sender>, } struct ClientApiInner { @@ -48,7 +48,7 @@ struct ClientApiInner { settings: Settings, stop: Option, join_handle: Option, - update_channels: HashMap>, + update_channels: HashMap>>, } #[derive(Clone)] @@ -165,17 +165,12 @@ impl ClientApi { } async fn handle_tcp_incoming(self, bind_addr: SocketAddr) -> std::io::Result<()> { - let listener = TcpListener::bind(bind_addr).await?; + let listener = bind_async_tcp_listener(bind_addr)? + .ok_or(std::io::Error::from(std::io::ErrorKind::AddrInUse))?; debug!(target: "client_api", "TCPClient API listening on: {:?}", bind_addr); // Process the incoming accept stream - cfg_if! { - if #[cfg(feature="rt-async-std")] { - let mut incoming_stream = listener.incoming(); - } else { - let mut incoming_stream = tokio_stream::wrappers::TcpListenerStream::new(listener); - } - } + let mut incoming_stream = async_tcp_listener_incoming(listener); // Make wait group for all incoming connections let awg = AsyncWaitGroup::new(); @@ -310,7 +305,8 @@ impl ClientApi { debug!("JSONAPI: Response: {:?}", response); // Marshal json + newline => NDJSON - let response_string = serialize_json(json_api::RecvMessage::Response(response)) + "\n"; + let response_string = + Arc::new(serialize_json(json_api::RecvMessage::Response(response)) + "\n"); if let Err(e) = responses_tx.send_async(response_string).await { eprintln!("response not sent: {}", e) } @@ -327,7 +323,7 @@ impl ClientApi { self, mut reader: R, requests_tx: flume::Sender>, - responses_tx: flume::Sender, + responses_tx: flume::Sender>, ) -> VeilidAPIResult> { let mut linebuf = String::new(); while let Ok(size) = reader.read_line(&mut linebuf).await { @@ -361,7 +357,7 @@ impl ClientApi { async fn send_responses( self, - responses_rx: flume::Receiver, + responses_rx: flume::Receiver>, mut writer: W, ) -> VeilidAPIResult> { while let Ok(resp) = responses_rx.recv_async().await { @@ -526,11 +522,16 @@ impl ClientApi { } pub fn handle_update(&self, veilid_update: veilid_core::VeilidUpdate) { - // serialize update to NDJSON - let veilid_update = serialize_json(json_api::RecvMessage::Update(veilid_update)) + "\n"; - - // Pass other updates to clients let inner = self.inner.lock(); + if inner.update_channels.is_empty() { + return; + } + + // serialize update to NDJSON + let veilid_update = + Arc::new(serialize_json(json_api::RecvMessage::Update(veilid_update)) + "\n"); + + // Pass updates to clients for ch in inner.update_channels.values() { if ch.send(veilid_update.clone()).is_err() { // eprintln!("failed to send update: {}", e); diff --git a/veilid-server/src/main.rs b/veilid-server/src/main.rs index be71a51b..360d52b6 100644 --- a/veilid-server/src/main.rs +++ b/veilid-server/src/main.rs @@ -33,10 +33,10 @@ use veilid_logs::*; #[derive(Args, Debug, Clone)] #[group(multiple = false)] pub struct Logging { - /// Turn on debug logging on the terminal + /// Turn on debug logging on the terminal and over the client api #[arg(long)] debug: bool, - /// Turn on trace logging on the terminal + /// Turn on trace logging on the terminal and over the client api #[arg(long)] trace: bool, } @@ -93,10 +93,24 @@ pub struct CmdlineArgs { #[arg(long, hide = true, value_name = "PATH", num_args=0..=1, require_equals=true, default_missing_value = "")] perfetto: Option, - /// Run as an extra daemon on the same machine for testing purposes, specify a number greater than zero to offset the listening ports + /// Run as an extra daemon on the same machine for testing purposes #[arg(short('n'), long)] subnode_index: Option, + /// Run several nodes in parallel on the same machine for testing purposes + /// + /// Will run subnodes N through N+(subnode_count-1), where N is 0 or set via --subnode_index + #[arg(long, value_name = "COUNT")] + subnode_count: Option, + + /// Connect to a virtual network router + /// + /// Specify either an remote tcp or ws url ('tcp://localhost:5149' or 'ws://localhost:5148') + /// or '' or 'local' to specify using an internally spawned server + #[cfg(feature = "virtual-network")] + #[arg(long, value_name = "URL", default_missing_value = "")] + virtual_router: Option, + /// Only generate a new keypair and print it /// /// Generate a new keypair for a specific crypto kind and print both the key and its secret to the terminal, then exit immediately. @@ -200,35 +214,45 @@ fn main() -> EyreResult<()> { if args.foreground { settingsrw.daemon.enabled = false; } - if let Some(subnode_index) = args.subnode_index { - settingsrw.testing.subnode_index = subnode_index; - }; - if args.logging.debug { settingsrw.logging.terminal.enabled = true; settingsrw.logging.terminal.level = LogLevel::Debug; + settingsrw.logging.api.enabled = true; + settingsrw.logging.api.level = LogLevel::Debug; } if args.logging.trace { settingsrw.logging.terminal.enabled = true; settingsrw.logging.terminal.level = LogLevel::Trace; + settingsrw.logging.api.enabled = true; + settingsrw.logging.api.level = LogLevel::Trace; } + + if let Some(subnode_index) = args.subnode_index { + settingsrw.testing.subnode_index = subnode_index; + }; + if let Some(subnode_count) = args.subnode_count { + if subnode_count == 0 { + bail!("subnode count must be positive"); + } + settingsrw.testing.subnode_count = subnode_count; + }; + #[cfg(feature = "opentelemetry-otlp")] - if args.otlp.is_some() { + if let Some(otlp) = args.otlp { println!("Enabling OTLP tracing"); settingsrw.logging.otlp.enabled = true; - settingsrw.logging.otlp.grpc_endpoint = NamedSocketAddrs::from_str( - args.otlp - .expect("should not be null because of default missing value") - .as_str(), - ) - .wrap_err("failed to parse OTLP address")?; + settingsrw.logging.otlp.grpc_endpoint = + NamedSocketAddrs::from_str(&otlp).wrap_err("failed to parse OTLP address")?; settingsrw.logging.otlp.level = LogLevel::Trace; } if let Some(flame) = args.flame { let flame = if flame.is_empty() { - Settings::get_default_flame_path(settingsrw.testing.subnode_index) - .to_string_lossy() - .to_string() + Settings::get_default_flame_path( + settingsrw.testing.subnode_index, + settingsrw.testing.subnode_count, + ) + .to_string_lossy() + .to_string() } else { flame.to_string_lossy().to_string() }; @@ -239,9 +263,12 @@ fn main() -> EyreResult<()> { #[cfg(unix)] if let Some(perfetto) = args.perfetto { let perfetto = if perfetto.is_empty() { - Settings::get_default_perfetto_path(settingsrw.testing.subnode_index) - .to_string_lossy() - .to_string() + Settings::get_default_perfetto_path( + settingsrw.testing.subnode_index, + settingsrw.testing.subnode_count, + ) + .to_string_lossy() + .to_string() } else { perfetto.to_string_lossy().to_string() }; @@ -283,6 +310,10 @@ fn main() -> EyreResult<()> { } let mut node_id_set = false; if let Some(key_set) = args.set_node_id { + if settingsrw.testing.subnode_count != 1 { + bail!("subnode count must be 1 if setting node id/secret"); + } + node_id_set = true; // Turn off terminal logging so we can be interactive settingsrw.logging.terminal.enabled = false; @@ -346,11 +377,6 @@ fn main() -> EyreResult<()> { } } - // Apply subnode index if we're testing - settings - .apply_subnode_index() - .wrap_err("failed to apply subnode index")?; - // --- Verify Config --- settings.verify()?; diff --git a/veilid-server/src/server.rs b/veilid-server/src/server.rs index 20820737..9182b557 100644 --- a/veilid-server/src/server.rs +++ b/veilid-server/src/server.rs @@ -32,8 +32,8 @@ pub fn shutdown() { } } -//#[instrument(err, skip_all)] -pub async fn run_veilid_server( +pub async fn run_veilid_server_subnode( + subnode: u16, settings: Settings, server_mode: ServerMode, veilid_logs: VeilidLogs, @@ -44,17 +44,34 @@ pub async fn run_veilid_server( settings_client_api_network_enabled, settings_client_api_ipc_directory, settings_client_api_listen_address_addrs, - subnode_index, + subnode_offset, ) = { let settingsr = settings.read(); + cfg_if! { + if #[cfg(feature = "virtual-network")] { + let subnode_offset = if inner.core.network.virtual_network.enabled { + // Don't offset ports when using virtual networking + 0 + } else { + subnode + }; + } else { + let subnode_offset = subnode; + } + } + ( settingsr.auto_attach, settingsr.client_api.ipc_enabled, settingsr.client_api.network_enabled, settingsr.client_api.ipc_directory.clone(), - settingsr.client_api.listen_address.addrs.clone(), - settingsr.testing.subnode_index, + settingsr + .client_api + .listen_address + .with_offset_port(subnode_offset)? + .addrs, + subnode_offset, ) }; @@ -72,7 +89,7 @@ pub async fn run_veilid_server( eprintln!("error sending veilid update callback: {:?}", change); } }); - let config_callback = settings.get_core_config_callback(); + let config_callback = settings.get_core_config_callback(subnode, subnode_offset); // Start Veilid Core and get API let veilid_api = veilid_core::api_startup(update_callback, config_callback) @@ -86,7 +103,7 @@ pub async fn run_veilid_server( client_api::ClientApi::new(veilid_api.clone(), veilid_logs.clone(), settings.clone()); some_capi.clone().run( if settings_client_api_ipc_enabled { - Some(settings_client_api_ipc_directory.join(subnode_index.to_string())) + Some(settings_client_api_ipc_directory.join(subnode.to_string())) } else { None }, @@ -108,7 +125,7 @@ pub async fn run_veilid_server( let capi2 = capi.clone(); let update_receiver_shutdown = SingleShotEventual::new(Some(())); let mut update_receiver_shutdown_instance = update_receiver_shutdown.instance().fuse(); - let update_receiver_jh = spawn_local( + let update_receiver_jh = spawn( "update_receiver", async move { loop { @@ -116,7 +133,7 @@ pub async fn run_veilid_server( res = receiver.recv_async() => { if let Ok(change) = res { if let Some(capi) = &capi2 { - // Handle state changes on main thread for capnproto rpc + // Handle state changes for JSON API capi.clone().handle_update(change); } } else { @@ -201,9 +218,48 @@ pub async fn run_veilid_server( // Wait for update receiver to exit let _ = update_receiver_jh.await; + out +} + +//#[instrument(err, skip_all)] +pub async fn run_veilid_server( + settings: Settings, + server_mode: ServerMode, + veilid_logs: VeilidLogs, +) -> EyreResult<()> { + let (subnode_index, subnode_count) = { + let settingsr = settings.read(); + ( + settingsr.testing.subnode_index, + settingsr.testing.subnode_count, + ) + }; + + // Ensure we only try to spawn multiple subnodes in 'normal' execution mode + if !matches!(server_mode, ServerMode::Normal) && subnode_count != 1 { + bail!("can only have multiple subnodes in 'normal' execution mode"); + } + + // Run all subnodes + let mut all_subnodes_jh = vec![]; + for subnode in subnode_index..(subnode_index + subnode_count) { + debug!("Spawning subnode {}", subnode); + let jh = spawn( + &format!("subnode{}", subnode), + run_veilid_server_subnode(subnode, settings.clone(), server_mode, veilid_logs.clone()), + ); + all_subnodes_jh.push(jh); + } + + // Wait for all subnodes to complete + for (sn, jh) in all_subnodes_jh.into_iter().enumerate() { + jh.await?; + debug!("Subnode {} exited", sn); + } + // Finally, drop logs // this is explicit to ensure we don't accidentally drop them too soon via a move drop(veilid_logs); - out + Ok(()) } diff --git a/veilid-server/src/settings.rs b/veilid-server/src/settings.rs index 501f74e3..3c494ac5 100644 --- a/veilid-server/src/settings.rs +++ b/veilid-server/src/settings.rs @@ -40,6 +40,29 @@ pub fn load_default_config() -> EyreResult { country_code_denylist: [] "#; + #[cfg(not(feature = "virtual-network"))] + let virtual_network_section = ""; + #[cfg(feature = "virtual-network")] + let virtual_network_section = r#" + virtual_network: + enabled: false + server_address: '' + "#; + + #[cfg(not(feature = "virtual-network"))] + let virtual_network_server_section = ""; + #[cfg(feature = "virtual-network")] + let virtual_network_server_section = r#" + virtual_network_server: + enabled: false + tcp: + listen: true + listen_address: 'localhost:5149' + ws: + listen: true + listen_address: 'localhost:5148' + "#; + let mut default_config = String::from( r#"--- daemon: @@ -84,6 +107,8 @@ logging: enabled: false testing: subnode_index: 0 + subnode_count: 1 +%VIRTUAL_NETWORK_SERVER_SECTION% core: capabilities: disable: [] @@ -196,6 +221,7 @@ core: listen_address: ':5150' path: 'ws' # url: '' + %VIRTUAL_NETWORK_SECTION% %PRIVACY_SECTION% "#, ) @@ -227,7 +253,12 @@ core: "%REMOTE_MAX_SUBKEY_CACHE_MEMORY_MB%", &Settings::get_default_remote_max_subkey_cache_memory_mb().to_string(), ) - .replace("%PRIVACY_SECTION%", privacy_section); + .replace("%PRIVACY_SECTION%", privacy_section) + .replace("%VIRTUAL_NETWORK_SECTION%", virtual_network_section) + .replace( + "%VIRTUAL_NETWORK_SERVER_SECTION%", + virtual_network_server_section, + ); let dek_password = if let Some(dek_password) = std::env::var_os("DEK_PASSWORD") { dek_password @@ -346,6 +377,11 @@ impl ParsedUrl { self.urlstring = self.url.to_string(); Ok(()) } + pub fn with_offset_port(&self, offset: u16) -> EyreResult { + let mut x = self.clone(); + x.offset_port(offset)?; + Ok(x) + } } impl FromStr for ParsedUrl { @@ -450,6 +486,12 @@ impl NamedSocketAddrs { Ok(true) } + + pub fn with_offset_port(&self, offset: u16) -> EyreResult { + let mut x = self.clone(); + x.offset_port(offset)?; + Ok(x) + } } #[derive(Debug, Deserialize, Serialize)] @@ -679,11 +721,43 @@ pub struct Network { pub protocol: Protocol, #[cfg(feature = "geolocation")] pub privacy: Privacy, + #[cfg(feature = "virtual-network")] + pub virtual_network: VirtualNetwork, +} + +#[cfg(feature = "virtual-network")] +#[derive(Debug, Deserialize, Serialize)] +pub struct VirtualNetwork { + pub enabled: bool, + pub server_address: String, +} + +#[cfg(feature = "virtual-network")] +#[derive(Debug, Deserialize, Serialize)] +pub struct VirtualNetworkServer { + pub enabled: bool, + pub tcp: VirtualNetworkServerTcp, + pub ws: VirtualNetworkServerWs, +} +#[cfg(feature = "virtual-network")] +#[derive(Debug, Deserialize, Serialize)] +pub struct VirtualNetworkServerTcp { + pub listen: bool, + pub listen_address: NamedSocketAddrs, +} +#[cfg(feature = "virtual-network")] +#[derive(Debug, Deserialize, Serialize)] +pub struct VirtualNetworkServerWs { + pub listen: bool, + pub listen_address: NamedSocketAddrs, } #[derive(Debug, Deserialize, Serialize)] pub struct Testing { pub subnode_index: u16, + pub subnode_count: u16, + #[cfg(feature = "virtual-network")] + pub virtual_network_server: VirtualNetworkServer, } #[derive(Debug, Deserialize, Serialize)] @@ -800,75 +874,6 @@ impl Settings { self.inner.write() } - pub fn apply_subnode_index(&self) -> EyreResult<()> { - let mut settingsrw = self.write(); - let idx = settingsrw.testing.subnode_index; - if idx == 0 { - return Ok(()); - } - - // bump client api port - settingsrw.client_api.listen_address.offset_port(idx)?; - - // bump protocol ports - settingsrw - .core - .network - .protocol - .udp - .listen_address - .offset_port(idx)?; - settingsrw - .core - .network - .protocol - .tcp - .listen_address - .offset_port(idx)?; - settingsrw - .core - .network - .protocol - .ws - .listen_address - .offset_port(idx)?; - if let Some(url) = &mut settingsrw.core.network.protocol.ws.url { - url.offset_port(idx)?; - } - settingsrw - .core - .network - .protocol - .wss - .listen_address - .offset_port(idx)?; - if let Some(url) = &mut settingsrw.core.network.protocol.wss.url { - url.offset_port(idx)?; - } - // bump application ports - settingsrw - .core - .network - .application - .http - .listen_address - .offset_port(idx)?; - if let Some(url) = &mut settingsrw.core.network.application.http.url { - url.offset_port(idx)?; - } - settingsrw - .core - .network - .application - .https - .listen_address - .offset_port(idx)?; - if let Some(url) = &mut settingsrw.core.network.application.https.url { - url.offset_port(idx)?; - } - Ok(()) - } - /// Determine default config path /// /// In a unix-like environment, veilid-server will look for its config file @@ -899,22 +904,40 @@ impl Settings { } /// Determine default flamegraph output path - pub fn get_default_flame_path(subnode_index: u16) -> PathBuf { - std::env::temp_dir().join(if subnode_index == 0 { - "veilid-server.folded".to_owned() + pub fn get_default_flame_path(subnode_index: u16, subnode_count: u16) -> PathBuf { + let name = if subnode_count == 1 { + if subnode_index == 0 { + "veilid-server.folded".to_owned() + } else { + format!("veilid-server-{}.folded", subnode_index) + } } else { - format!("veilid-server-{}.folded", subnode_index) - }) + format!( + "veilid-server-{}-{}.folded", + subnode_index, + subnode_index + subnode_count - 1 + ) + }; + std::env::temp_dir().join(name) } /// Determine default perfetto output path #[cfg(unix)] - pub fn get_default_perfetto_path(subnode_index: u16) -> PathBuf { - std::env::temp_dir().join(if subnode_index == 0 { - "veilid-server.pftrace".to_owned() + pub fn get_default_perfetto_path(subnode_index: u16, subnode_count: u16) -> PathBuf { + let name = if subnode_count == 1 { + if subnode_index == 0 { + "veilid-server.pftrace".to_owned() + } else { + format!("veilid-server-{}.pftrace", subnode_index) + } } else { - format!("veilid-server-{}.pftrace", subnode_index) - }) + format!( + "veilid-server-{}-{}.pftrace", + subnode_index, + subnode_index + subnode_count - 1 + ) + }; + std::env::temp_dir().join(name) } #[cfg_attr(windows, expect(dead_code))] @@ -1062,6 +1085,20 @@ impl Settings { set_config_value!(inner.logging.perfetto.path, value); set_config_value!(inner.logging.console.enabled, value); set_config_value!(inner.testing.subnode_index, value); + #[cfg(feature = "virtual-network")] + { + set_config_value!(inner.testing.virtual_network_server.enabled, value); + set_config_value!(inner.testing.virtual_network_server.tcp.listen, value); + set_config_value!( + inner.testing.virtual_network_server.tcp.listen_address, + value + ); + set_config_value!(inner.testing.virtual_network_server.ws.listen, value); + set_config_value!( + inner.testing.virtual_network_server.ws.listen_address, + value + ); + } set_config_value!(inner.core.capabilities.disable, value); set_config_value!(inner.core.protected_store.allow_insecure_fallback, value); set_config_value!( @@ -1184,20 +1221,31 @@ impl Settings { set_config_value!(inner.core.network.protocol.wss.url, value); #[cfg(feature = "geolocation")] set_config_value!(inner.core.network.privacy.country_code_denylist, value); + #[cfg(feature = "virtual-network")] + { + set_config_value!(inner.core.network.virtual_network.enabled, value); + set_config_value!(inner.core.network.virtual_network.server_address, value); + } + Err(eyre!("settings key '{key}' not found")) } - pub fn get_core_config_callback(&self) -> veilid_core::ConfigCallback { + pub fn get_core_config_callback( + &self, + subnode: u16, + subnode_offset: u16, + ) -> veilid_core::ConfigCallback { let inner = self.inner.clone(); Arc::new(move |key: String| { let inner = inner.read(); + let out: ConfigCallbackReturn = match key.as_str() { "program_name" => Ok(Box::new("veilid-server".to_owned())), - "namespace" => Ok(Box::new(if inner.testing.subnode_index == 0 { + "namespace" => Ok(Box::new(if subnode == 0 { "".to_owned() } else { - format!("subnode{}", inner.testing.subnode_index) + format!("subnode{}", subnode) })), "capabilities.disable" => { let mut caps = Vec::::new(); @@ -1409,6 +1457,8 @@ impl Settings { .application .https .listen_address + .with_offset_port(subnode_offset) + .map_err(VeilidAPIError::internal)? .name .clone(), )), @@ -1422,16 +1472,16 @@ impl Settings { .to_string_lossy() .to_string(), )), - "network.application.https.url" => Ok(Box::new( - inner - .core - .network - .application - .https - .url - .as_ref() - .map(|a| a.urlstring.clone()), - )), + "network.application.https.url" => { + Ok(Box::new(match inner.core.network.application.https.url { + Some(ref a) => Some( + a.with_offset_port(subnode_offset) + .map_err(VeilidAPIError::internal) + .map(|x| x.urlstring.clone())?, + ), + None => None, + })) + } "network.application.http.enabled" => { Ok(Box::new(inner.core.network.application.http.enabled)) } @@ -1442,6 +1492,8 @@ impl Settings { .application .http .listen_address + .with_offset_port(subnode_offset) + .map_err(VeilidAPIError::internal)? .name .clone(), )), @@ -1455,16 +1507,16 @@ impl Settings { .to_string_lossy() .to_string(), )), - "network.application.http.url" => Ok(Box::new( - inner - .core - .network - .application - .http - .url - .as_ref() - .map(|a| a.urlstring.clone()), - )), + "network.application.http.url" => { + Ok(Box::new(match inner.core.network.application.http.url { + Some(ref a) => Some( + a.with_offset_port(subnode_offset) + .map_err(VeilidAPIError::internal) + .map(|x| x.urlstring.clone())?, + ), + None => None, + })) + } "network.protocol.udp.enabled" => { Ok(Box::new(inner.core.network.protocol.udp.enabled)) } @@ -1472,7 +1524,16 @@ impl Settings { Ok(Box::new(inner.core.network.protocol.udp.socket_pool_size)) } "network.protocol.udp.listen_address" => Ok(Box::new( - inner.core.network.protocol.udp.listen_address.name.clone(), + inner + .core + .network + .protocol + .udp + .listen_address + .with_offset_port(subnode_offset) + .map_err(VeilidAPIError::internal)? + .name + .clone(), )), "network.protocol.udp.public_address" => Ok(Box::new( inner @@ -1494,7 +1555,16 @@ impl Settings { Ok(Box::new(inner.core.network.protocol.tcp.max_connections)) } "network.protocol.tcp.listen_address" => Ok(Box::new( - inner.core.network.protocol.tcp.listen_address.name.clone(), + inner + .core + .network + .protocol + .tcp + .listen_address + .with_offset_port(subnode_offset) + .map_err(VeilidAPIError::internal)? + .name + .clone(), )), "network.protocol.tcp.public_address" => Ok(Box::new( inner @@ -1514,7 +1584,16 @@ impl Settings { Ok(Box::new(inner.core.network.protocol.ws.max_connections)) } "network.protocol.ws.listen_address" => Ok(Box::new( - inner.core.network.protocol.ws.listen_address.name.clone(), + inner + .core + .network + .protocol + .ws + .listen_address + .with_offset_port(subnode_offset) + .map_err(VeilidAPIError::internal)? + .name + .clone(), )), "network.protocol.ws.path" => Ok(Box::new( inner @@ -1526,16 +1605,16 @@ impl Settings { .to_string_lossy() .to_string(), )), - "network.protocol.ws.url" => Ok(Box::new( - inner - .core - .network - .protocol - .ws - .url - .as_ref() - .map(|a| a.urlstring.clone()), - )), + "network.protocol.ws.url" => { + Ok(Box::new(match inner.core.network.protocol.ws.url { + Some(ref a) => Some( + a.with_offset_port(subnode_offset) + .map_err(VeilidAPIError::internal) + .map(|x| x.urlstring.clone())?, + ), + None => None, + })) + } "network.protocol.wss.connect" => { Ok(Box::new(inner.core.network.protocol.wss.connect)) } @@ -1546,7 +1625,16 @@ impl Settings { Ok(Box::new(inner.core.network.protocol.wss.max_connections)) } "network.protocol.wss.listen_address" => Ok(Box::new( - inner.core.network.protocol.wss.listen_address.name.clone(), + inner + .core + .network + .protocol + .wss + .listen_address + .with_offset_port(subnode_offset) + .map_err(VeilidAPIError::internal)? + .name + .clone(), )), "network.protocol.wss.path" => Ok(Box::new( inner @@ -1558,20 +1646,29 @@ impl Settings { .to_string_lossy() .to_string(), )), - "network.protocol.wss.url" => Ok(Box::new( - inner - .core - .network - .protocol - .wss - .url - .as_ref() - .map(|a| a.urlstring.clone()), - )), + "network.protocol.wss.url" => { + Ok(Box::new(match inner.core.network.protocol.wss.url { + Some(ref a) => Some( + a.with_offset_port(subnode_offset) + .map_err(VeilidAPIError::internal) + .map(|x| x.urlstring.clone())?, + ), + None => None, + })) + } #[cfg(feature = "geolocation")] "network.privacy.country_code_denylist" => Ok(Box::new( inner.core.network.privacy.country_code_denylist.clone(), )), + #[cfg(feature = "virtual-network")] + "network.virtual_network.enabled" => { + Ok(Box::new(inner.core.network.virtual_network.enabled)) + } + #[cfg(feature = "virtual-network")] + "network.virtual_network.server_address" => Ok(Box::new( + inner.core.network.virtual_network.server_address.clone(), + )), + _ => Err(VeilidAPIError::generic(format!( "config key '{}' doesn't exist", key @@ -1639,7 +1736,20 @@ mod tests { assert_eq!(s.logging.perfetto.path, ""); assert!(!s.logging.console.enabled); assert_eq!(s.testing.subnode_index, 0); - + #[cfg(feature = "virtual-network")] + { + assert_eq!(s.testing.virtual_network_server.enabled, false); + assert_eq!(s.testing.virtual_network_server.tcp.listen, false); + assert_eq!( + s.testing.virtual_network_server.tcp.listen_address, + "localhost:5149" + ); + assert_eq!(s.testing.virtual_network_server.ws.listen, false); + assert_eq!( + s.testing.virtual_network_server.ws.listen_address, + "localhost:5148" + ); + } assert_eq!( s.core.table_store.directory, Settings::get_default_table_store_directory() @@ -1814,5 +1924,10 @@ mod tests { // #[cfg(feature = "geolocation")] assert_eq!(s.core.network.privacy.country_code_denylist, &[]); + #[cfg(feature = "virtual-network")] + { + assert_eq!(s.core.network.virtual_network.enabled, false); + assert_eq!(s.core.network.virtual_network.server_address, ""); + } } } diff --git a/veilid-server/src/tools.rs b/veilid-server/src/tools.rs index 6d5a4778..f1d073a3 100644 --- a/veilid-server/src/tools.rs +++ b/veilid-server/src/tools.rs @@ -7,8 +7,6 @@ pub use tracing::*; cfg_if! { if #[cfg(feature="rt-async-std")] { // pub use async_std::task::JoinHandle; - pub use async_std::net::TcpListener; - pub use async_std::net::TcpStream; pub use async_std::io::BufReader; //pub use async_std::future::TimeoutError; //pub fn spawn_detached + Send + 'static, T: Send + 'static>(f: F) -> JoinHandle { @@ -27,8 +25,6 @@ cfg_if! { } } else if #[cfg(feature="rt-tokio")] { //pub use tokio::task::JoinHandle; - pub use tokio::net::TcpListener; - pub use tokio::net::TcpStream; pub use tokio::io::BufReader; //pub use tokio_util::compat::*; //pub use tokio::time::error::Elapsed as TimeoutError; diff --git a/veilid-tools/Cargo.toml b/veilid-tools/Cargo.toml index 62a08aa9..622ccaec 100644 --- a/veilid-tools/Cargo.toml +++ b/veilid-tools/Cargo.toml @@ -4,18 +4,23 @@ name = "veilid-tools" version = "0.4.1" # --- description = "A collection of baseline tools for Rust development use by Veilid and Veilid-enabled Rust applications" -repository = "https://gitlab.com/veilid/veilid" -authors = ["Veilid Team "] -license = "MPL-2.0" -edition = "2021" -rust-version = "1.81.0" resolver = "2" +repository.workspace = true +authors.workspace = true +license.workspace = true +edition.workspace = true +rust-version.workspace = true [lib] # staticlib for iOS tests, cydlib for android tests, rlib for everything else crate-type = ["cdylib", "staticlib", "rlib"] path = "src/lib.rs" +[[bin]] +name = "virtual_router" +path = "src/bin/virtual_router/main.rs" +required-features = ["virtual-router-bin"] + [features] default = ["rt-tokio"] rt-async-std = [ @@ -32,13 +37,35 @@ rt-tokio = [ "async_executors/tokio_io", "async_executors/tokio_timer", ] -rt-wasm-bindgen = ["async_executors/bindgen", "async_executors/timer"] +rt-wasm-bindgen = [ + "async_executors/bindgen", + "async_executors/timer", + "ws_stream_wasm", +] veilid_tools_android_tests = ["dep:paranoid-android"] veilid_tools_ios_tests = ["dep:tracing", "dep:oslog", "dep:tracing-oslog"] tracing = ["dep:tracing", "dep:tracing-subscriber", "tokio/tracing"] debug-locks = [] +virtual-network = [] +virtual-network-server = [ + "dep:async-tungstenite", + "dep:indent", + "dep:ipnet", + "dep:serde_yaml", + "dep:validator", + "dep:ws_stream_tungstenite", + "dep:rand_chacha", +] +virtual-router-bin = [ + "tracing", + "virtual-network-server", + "dep:clap", + "dep:time", + "dep:bugsalot", +] + [dependencies] tracing = { version = "0.1.40", features = [ "log", @@ -46,15 +73,19 @@ tracing = { version = "0.1.40", features = [ ], optional = true } tracing-subscriber = { version = "0.3.18", features = [ "env-filter", + "time", ], optional = true } log = { version = "0.4.22" } eyre = "0.6.12" static_assertions = "1.1.0" +serde = { version = "1.0.214", features = ["derive", "rc"] } +postcard = { version = "1.0.10", features = ["use-std"] } cfg-if = "1.0.0" thiserror = "1.0.63" futures-util = { version = "0.3.30", default-features = false, features = [ "alloc", ] } +futures_codec = "0.4.1" parking_lot = "0.12.3" async-lock = "3.4.0" once_cell = "1.19.0" @@ -65,33 +96,51 @@ backtrace = "0.3.71" fn_name = "0.1.0" range-set-blaze = "0.1.16" flume = { version = "0.11.0", features = ["async"] } +imbl = { version = "3.0.0", features = ["serde"] } + # Dependencies for native builds only # Linux, Windows, Mac, iOS, Android -[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +[target.'cfg(not(all(target_arch = "wasm32", target_os = "unknown")))'.dependencies] +async-io = { version = "1.13.0" } async-std = { version = "1.12.0", features = ["unstable"], optional = true } -tokio = { version = "1.38.1", features = ["full"], optional = true } -tokio-util = { version = "0.7.11", features = ["compat"], optional = true } -tokio-stream = { version = "0.1.15", features = ["net"], optional = true } +bugsalot = { package = "veilid-bugsalot", version = "0.2.0", optional = true } +time = { version = "0.3.36", features = [ + "local-offset", + "formatting", +], optional = true } +chrono = "0.4.38" +ctrlc = "^3" futures-util = { version = "0.3.30", default-features = false, features = [ "async-await", "sink", "std", "io", ] } -chrono = "0.4.38" - +indent = { version = "0.1.1", optional = true } libc = "0.2.155" nix = { version = "0.27.1", features = ["user"] } +socket2 = { version = "0.5.7", features = ["all"] } +tokio = { version = "1.38.1", features = ["full"], optional = true } +tokio-util = { version = "0.7.11", features = ["compat"], optional = true } +tokio-stream = { version = "0.1.15", features = ["net"], optional = true } + +ws_stream_tungstenite = { version = "0.14.0", optional = true } +async-tungstenite = { version = "0.28.0", optional = true } +clap = { version = "4", features = ["derive"], optional = true } +ipnet = { version = "2", features = ["serde"], optional = true } +serde_yaml = { package = "serde_yaml_ng", version = "^0.10.0", optional = true } +validator = { version = "0.19.0", features = ["derive"], optional = true } +rand_chacha = { version = "0.3.1", optional = true } # Dependencies for WASM builds only -[target.'cfg(target_arch = "wasm32")'.dependencies] +[target.'cfg(all(target_arch = "wasm32", target_os = "unknown"))'.dependencies] wasm-bindgen = "0.2.92" js-sys = "0.3.70" wasm-bindgen-futures = "0.4.42" async_executors = { version = "0.7.0", default-features = false } getrandom = { version = "0.2", features = ["js"] } - +ws_stream_wasm = { version = "0.7.4", optional = true } send_wrapper = { version = "0.6.0", features = ["futures"] } # Dependencies for Linux or Android @@ -125,11 +174,11 @@ ifstructs = "0.1.1" ### DEV DEPENDENCIES -[target.'cfg(not(target_arch = "wasm32"))'.dev-dependencies] +[target.'cfg(not(all(target_arch = "wasm32", target_os = "unknown")))'.dev-dependencies] simplelog = { version = "0.12.2", features = ["test"] } serial_test = "2.0.0" -[target.'cfg(target_arch = "wasm32")'.dev-dependencies] +[target.'cfg(all(target_arch = "wasm32", target_os = "unknown"))'.dev-dependencies] serial_test = { version = "2.0.0", default-features = false, features = [ "async", ] } diff --git a/veilid-tools/src/bin/virtual_router/README.md b/veilid-tools/src/bin/virtual_router/README.md new file mode 100644 index 00000000..36105d2e --- /dev/null +++ b/veilid-tools/src/bin/virtual_router/README.md @@ -0,0 +1,27 @@ +# Veilid VirtualRouter + +VirtualRouter is a virtual networking router (`RouterServer`) standalone application built specifically for Veilid applications. + +## RouterServer Concepts + +`RouterServer` is a is a deterministic network simulator with an 'infrastructure as code' language for defining whole 'Internets', in terms of a few primitive components: + +* `Allocation` - sets of IPv4 and IPv6 addresses that are used for a common function. For example, '192.168.0.0/16' is an allocation for IPv4 private addresses. +* `Machine` - an instance storing a single Veilid node's state, including its connection/socket tables, IP addresses and interfaces. +* `Network` - an instance storing a single `Network`'s allocations, to which one or more machines may belong. `Network`s also specify how they are connected together, including to the 'Internet', and how translation and gateway routing is performed. +* `Template` - instructions for creating `Machine`s, along with limits on how many `Machine`s per `Network` can be created, and which `Network`s or `Blueprint`s they are connected to. +* `Blueprint` - instructions for creating `Network`s, along with limits on how many `Network`s can be created. +* `Profiles` - a set of `Machine`s and `Template`s to use when attaching a Veilid application to the RouterServer. + +Applications can connect to VirtualRouter over TCP or WebSockets, see the `--help` for more details. + +Applications can also host a `RouterServer` inside their own process for fully encapsulated simulation and testing, connected via a `flume` channel. + +## Example + +To run VirtualRouter: + +``` +cargo run --bin virtual_router --features=virtual-router-bin +``` + diff --git a/veilid-tools/src/bin/virtual_router/main.rs b/veilid-tools/src/bin/virtual_router/main.rs new file mode 100644 index 00000000..29430ada --- /dev/null +++ b/veilid-tools/src/bin/virtual_router/main.rs @@ -0,0 +1,216 @@ +#![cfg(not(all(target_arch = "wasm32", target_os = "unknown")))] + +use cfg_if::*; +use clap::{Args, Parser}; +use parking_lot::*; +use std::path::PathBuf; +use stop_token::StopSource; +use tracing_subscriber::{fmt, layer::SubscriberExt, util::SubscriberInitExt, Layer, Registry}; +use veilid_tools::*; +use virtual_network::*; + +const VERSION: &str = env!("CARGO_PKG_VERSION"); + +cfg_if! { + if #[cfg(feature="rt-async-std")] { + pub fn block_on, T>(f: F) -> T { + async_std::task::block_on(f) + } + } else if #[cfg(feature="rt-tokio")] { + pub fn block_on, T>(f: F) -> T { + let rt = tokio::runtime::Runtime::new().unwrap(); + let local = tokio::task::LocalSet::new(); + local.block_on(&rt, f) + } + } else { + compile_error!("needs executor implementation"); + } +} + +const DEFAULT_IGNORE_LOG_TARGETS: &[&str] = &["tokio", "runtime"]; + +#[derive(Args, Debug, Clone)] +#[group(multiple = false)] +pub struct Logging { + /// Turn on debug logging on the terminal + #[arg(long, group = "logging")] + debug: bool, + /// Turn on trace logging on the terminal + #[arg(long, group = "logging")] + trace: bool, + /// Ignore log targets + #[arg(long)] + ignore_log_targets: Vec, + /// Enable log targets + #[arg(long)] + enable_log_targets: Vec, +} + +#[derive(Parser, Debug)] +#[command(author, version, about = "Veilid VirtualRouter")] +struct CmdlineArgs { + /// TCP address to listen on + #[arg(short('t'), long)] + tcp_addr: Option, + /// Turn off TCP listener + #[arg(long)] + no_tcp: bool, + /// WS address to listen on + #[arg(short('w'), long)] + ws_addr: Option, + /// Turn off WS listener + #[arg(long)] + no_ws: bool, + /// Specify an initial list of configuration files to use + #[arg(short = 'c', long, value_name = "FILE")] + config_file: Vec, + /// Specify to load configuration without a predefined config first + #[arg(long)] + no_predefined_config: bool, + /// Instead of running the virtual router, print the configuration it would use to the console + #[arg(long)] + dump_config: bool, + /// Wait for debugger to attach + #[cfg(debug_assertions)] + #[arg(long)] + wait_for_debug: bool, + + #[command(flatten)] + logging: Logging, +} + +fn setup_tracing(logging: &Logging) -> Result<(), String> { + // Set up subscriber and layers + let subscriber = Registry::default(); + let mut layers = Vec::new(); + + // Get log level + let level = if logging.trace { + tracing::Level::TRACE + } else if logging.debug { + tracing::Level::DEBUG + } else { + tracing::Level::INFO + }; + + // Get ignore log targets + let mut ignore_log_targets: Vec = DEFAULT_IGNORE_LOG_TARGETS + .iter() + .map(|x| x.to_string()) + .collect(); + for x in &logging.ignore_log_targets { + if !ignore_log_targets.contains(x) { + ignore_log_targets.push(x.clone()); + } + } + ignore_log_targets.retain(|x| !logging.enable_log_targets.contains(x)); + + let timer = + time::format_description::parse("[hour]:[minute]:[second]").expect("invalid time format"); + + // Use chrono instead of time crate to get local offset + let offset_in_sec = chrono::Local::now().offset().local_minus_utc(); + let time_offset = + time::UtcOffset::from_whole_seconds(offset_in_sec).expect("invalid utc offset"); + let timer = fmt::time::OffsetTime::new(time_offset, timer); + + let mut filter = tracing_subscriber::EnvFilter::from_default_env().add_directive(level.into()); + for x in ignore_log_targets { + filter = filter.add_directive(format!("{x}=off").parse().unwrap()); + } + + let layer = fmt::Layer::new() + .pretty() + .with_timer(timer) + .with_ansi(true) + .with_writer(std::io::stdout) + .with_filter(filter); + + layers.push(layer.boxed()); + + let subscriber = subscriber.with(layers); + subscriber + .try_init() + .map_err(|e| format!("failed to initialize tracing: {e}"))?; + + Ok(()) +} + +fn main() { + if let Err(e) = real_main() { + eprintln!("{}", e); + std::process::exit(1); + } + std::process::exit(0); +} +fn real_main() -> Result<(), String> { + let stop_source = StopSource::new(); + let stop_token = stop_source.token(); + let stop_mutex = Mutex::new(Some(stop_source)); + + ctrlc::set_handler(move || { + println!("Exiting..."); + *(stop_mutex.lock()) = None; + }) + .expect("Error setting Ctrl-C handler"); + + block_on(async { + println!("Veilid VirtualRouter v{}", VERSION); + + let args = CmdlineArgs::parse(); + + #[cfg(debug_assertions)] + if args.wait_for_debug { + use bugsalot::debugger; + debugger::wait_until_attached(None).expect("state() not implemented on this platform"); + } + + setup_tracing(&args.logging)?; + + let initial_config = config::Config::new(&args.config_file, args.no_predefined_config) + .map_err(|e| format!("Error loading config: {}", e))?; + + if args.dump_config { + let cfg_yaml = serde_yaml::to_string(&initial_config) + .map_err(|e| format!("Error serializing config: {}", e))?; + println!("{}", cfg_yaml); + return Ok(()); + } + + let router_server = virtual_network::RouterServer::new(); + + router_server + .execute_config(initial_config) + .map_err(|e| format!("Error executing config: {}", e))?; + + let _ss_tcp = if !args.no_tcp { + Some( + router_server + .listen_tcp(args.tcp_addr) + .await + .map_err(|e| e.to_string())?, + ) + } else { + None + }; + + let _ss_ws = if !args.no_ws { + Some( + router_server + .listen_ws(args.ws_addr) + .await + .map_err(|e| e.to_string())?, + ) + } else { + None + }; + + println!("Running..."); + router_server + .run(stop_token) + .await + .map_err(|e| e.to_string())?; + println!("Done"); + Ok(()) + }) +} diff --git a/veilid-tools/src/bump_port.rs b/veilid-tools/src/bump_port.rs deleted file mode 100644 index ebdd2628..00000000 --- a/veilid-tools/src/bump_port.rs +++ /dev/null @@ -1,108 +0,0 @@ -use super::*; - -cfg_if! { - if #[cfg(target_arch = "wasm32")] { - - } else { - use std::net::{TcpListener, UdpSocket}; - } -} - -#[derive(ThisError, Debug, Clone, PartialEq, Eq)] -pub enum BumpPortError { - #[error("Unsupported architecture")] - Unsupported, - #[error("Failure: {0}")] - Failed(String), -} - -pub enum BumpPortType { - UDP, - TCP, -} - -pub fn tcp_port_available(addr: &SocketAddr) -> bool { - cfg_if! { - if #[cfg(target_arch = "wasm32")] { - true - } else { - match TcpListener::bind(addr) { - Ok(_) => true, - Err(_) => false, - } - } - } -} - -pub fn udp_port_available(addr: &SocketAddr) -> bool { - cfg_if! { - if #[cfg(target_arch = "wasm32")] { - true - } else { - match UdpSocket::bind(addr) { - Ok(_) => true, - Err(_) => false, - } - } - } -} - -pub fn bump_port(addr: &mut SocketAddr, bpt: BumpPortType) -> Result { - cfg_if! { - if #[cfg(target_arch = "wasm32")] { - Err(BumpPortError::Unsupported) - } - else - { - let mut bumped = false; - let mut port = addr.port(); - let mut addr_bump = addr.clone(); - loop { - - if match bpt { - BumpPortType::TCP => tcp_port_available(&addr_bump), - BumpPortType::UDP => udp_port_available(&addr_bump), - } { - *addr = addr_bump; - return Ok(bumped); - } - if port == u16::MAX { - break; - } - port += 1; - addr_bump.set_port(port); - bumped = true; - } - - Err(BumpPortError::Failure("no ports remaining".to_owned())) - } - } -} - -pub fn bump_port_string(addr: &mut String, bpt: BumpPortType) -> Result { - cfg_if! { - if #[cfg(target_arch = "wasm32")] { - return Err(BumpPortError::Unsupported); - } - else - { - let savec: Vec = addr - .to_socket_addrs() - .map_err(|x| BumpPortError::Failure(format!("failed to resolve socket address: {}", x)))? - .collect(); - - if savec.len() == 0 { - return Err(BumpPortError::Failure("No socket addresses resolved".to_owned())); - } - let mut sa = savec.first().unwrap().clone(); - - if !bump_port(&mut sa, bpt)? { - return Ok(false); - } - - *addr = sa.to_string(); - - Ok(true) - } - } -} diff --git a/veilid-tools/src/deferred_stream_processor.rs b/veilid-tools/src/deferred_stream_processor.rs index 5afdb252..047eab3b 100644 --- a/veilid-tools/src/deferred_stream_processor.rs +++ b/veilid-tools/src/deferred_stream_processor.rs @@ -7,43 +7,56 @@ use stop_token::future::FutureExt as _; use super::*; +#[derive(Debug)] +struct DeferredStreamProcessorInner { + opt_deferred_stream_channel: Option>>, + opt_stopper: Option, + opt_join_handle: Option>, +} + /// Background processor for streams /// Handles streams to completion, passing each item from the stream to a callback #[derive(Debug)] pub struct DeferredStreamProcessor { - pub opt_deferred_stream_channel: Option>>, - pub opt_stopper: Option, - pub opt_join_handle: Option>, + inner: Mutex, } impl DeferredStreamProcessor { /// Create a new DeferredStreamProcessor pub fn new() -> Self { Self { - opt_deferred_stream_channel: None, - opt_stopper: None, - opt_join_handle: None, + inner: Mutex::new(DeferredStreamProcessorInner { + opt_deferred_stream_channel: None, + opt_stopper: None, + opt_join_handle: None, + }), } } /// Initialize the processor before use - pub async fn init(&mut self) { + pub async fn init(&self) { let stopper = StopSource::new(); let stop_token = stopper.token(); - self.opt_stopper = Some(stopper); + + let mut inner = self.inner.lock(); + inner.opt_stopper = Some(stopper); let (dsc_tx, dsc_rx) = flume::unbounded::>(); - self.opt_deferred_stream_channel = Some(dsc_tx); - self.opt_join_handle = Some(spawn( + inner.opt_deferred_stream_channel = Some(dsc_tx); + inner.opt_join_handle = Some(spawn( "deferred stream processor", Self::processor(stop_token, dsc_rx), )); } /// Terminate the processor and ensure all streams are closed - pub async fn terminate(&mut self) { - drop(self.opt_deferred_stream_channel.take()); - drop(self.opt_stopper.take()); - if let Some(jh) = self.opt_join_handle.take() { + pub async fn terminate(&self) { + let opt_jh = { + let mut inner = self.inner.lock(); + drop(inner.opt_deferred_stream_channel.take()); + drop(inner.opt_stopper.take()); + inner.opt_join_handle.take() + }; + if let Some(jh) = opt_jh { jh.await; } } @@ -100,15 +113,19 @@ impl DeferredStreamProcessor { /// /// Returns 'true' if the stream was added for processing, and 'false' if the stream could not be added, possibly due to not being initialized. pub fn add + Unpin + Send + 'static>( - &mut self, + &self, mut receiver: S, mut handler: impl FnMut(T) -> SendPinBoxFuture + Send + 'static, ) -> bool { - let Some(st) = self.opt_stopper.as_ref().map(|s| s.token()) else { - return false; - }; - let Some(dsc_tx) = self.opt_deferred_stream_channel.clone() else { - return false; + let (st, dsc_tx) = { + let inner = self.inner.lock(); + let Some(st) = inner.opt_stopper.as_ref().map(|s| s.token()) else { + return false; + }; + let Some(dsc_tx) = inner.opt_deferred_stream_channel.clone() else { + return false; + }; + (st, dsc_tx) }; let drp = Box::pin(async move { while let Ok(Some(res)) = receiver.next().timeout_at(st.clone()).await { diff --git a/veilid-tools/src/interval.rs b/veilid-tools/src/interval.rs index 2ea5974f..1873bb1e 100644 --- a/veilid-tools/src/interval.rs +++ b/veilid-tools/src/interval.rs @@ -1,7 +1,7 @@ use super::*; cfg_if! { - if #[cfg(target_arch = "wasm32")] { + if #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] { pub fn interval(name: &str, freq_ms: u32, callback: F) -> SendPinBoxFuture<()> where diff --git a/veilid-tools/src/lib.rs b/veilid-tools/src/lib.rs index 856d287a..fdabf632 100644 --- a/veilid-tools/src/lib.rs +++ b/veilid-tools/src/lib.rs @@ -24,7 +24,6 @@ #![allow(clippy::comparison_chain, clippy::upper_case_acronyms)] #![deny(unused_must_use)] -// pub mod bump_port; pub mod assembly_buffer; pub mod async_peek_stream; pub mod async_tag_lock; @@ -43,21 +42,25 @@ pub mod ipc; pub mod must_join_handle; pub mod must_join_single_future; pub mod mutable_future; -#[cfg(not(target_arch = "wasm32"))] pub mod network_interfaces; pub mod network_result; pub mod random; pub mod single_shot_eventual; pub mod sleep; +#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))] +pub mod socket_tools; pub mod spawn; pub mod split_url; pub mod startup_lock; +pub mod static_string_table; pub mod tick_task; pub mod timeout; pub mod timeout_or; pub mod timestamp; pub mod tools; -#[cfg(target_arch = "wasm32")] +#[cfg(feature = "virtual-network")] +pub mod virtual_network; +#[cfg(all(target_arch = "wasm32", target_os = "unknown"))] pub mod wasm; pub type PinBox = Pin>; @@ -124,10 +127,14 @@ pub use async_lock::RwLock as AsyncRwLock; #[doc(no_inline)] pub use async_lock::RwLockReadGuard as AsyncRwLockReadGuard; #[doc(no_inline)] +pub use async_lock::RwLockReadGuardArc as AsyncRwLockReadGuardArc; +#[doc(no_inline)] pub use async_lock::RwLockWriteGuard as AsyncRwLockWriteGuard; +#[doc(no_inline)] +pub use async_lock::RwLockWriteGuardArc as AsyncRwLockWriteGuardArc; cfg_if! { - if #[cfg(target_arch = "wasm32")] { + if #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] { #[doc(no_inline)] pub use async_lock::Mutex as AsyncMutex; #[doc(no_inline)] @@ -148,13 +155,6 @@ cfg_if! { #[doc(no_inline)] pub use async_std::sync::MutexGuardArc as AsyncMutexGuardArc; - // #[doc(no_inline)] - // pub use async_std::sync::RwLock as AsyncRwLock; - // #[doc(no_inline)] - // pub use async_std::sync::RwLockReadGuard as AsyncRwLockReadGuard; - // #[doc(no_inline)] - // pub use async_std::sync::RwLockWriteGuard as AsyncRwLockWriteGuard; - #[doc(no_inline)] pub use async_std::task::JoinHandle as LowLevelJoinHandle; @@ -166,14 +166,6 @@ cfg_if! { #[doc(no_inline)] pub use tokio::sync::OwnedMutexGuard as AsyncMutexGuardArc; - // #[doc(no_inline)] - // pub use tokio::sync::RwLock as AsyncRwLock; - // #[doc(no_inline)] - // pub use tokio::sync::RwLockReadGuard as AsyncRwLockReadGuard; - // #[doc(no_inline)] - // pub use tokio::sync::RwLockWriteGuard as AsyncRwLockWriteGuard; - - #[doc(no_inline)] pub use tokio::task::JoinHandle as LowLevelJoinHandle; } else { @@ -183,7 +175,6 @@ cfg_if! { } } -// pub use bump_port::*; #[doc(inline)] pub use assembly_buffer::*; #[doc(inline)] @@ -221,7 +212,6 @@ pub use must_join_single_future::*; #[doc(inline)] pub use mutable_future::*; #[doc(inline)] -#[cfg(not(target_arch = "wasm32"))] pub use network_interfaces::*; #[doc(inline)] pub use network_result::*; @@ -232,12 +222,17 @@ pub use single_shot_eventual::*; #[doc(inline)] pub use sleep::*; #[doc(inline)] +#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))] +pub use socket_tools::*; +#[doc(inline)] pub use spawn::*; #[doc(inline)] pub use split_url::*; #[doc(inline)] pub use startup_lock::*; #[doc(inline)] +pub use static_string_table::*; +#[doc(inline)] pub use tick_task::*; #[doc(inline)] pub use timeout::*; @@ -247,8 +242,7 @@ pub use timeout_or::*; pub use timestamp::*; #[doc(inline)] pub use tools::*; - -#[cfg(target_arch = "wasm32")] +#[cfg(all(target_arch = "wasm32", target_os = "unknown"))] pub use wasm::*; // Tests must be public for wasm-pack tests diff --git a/veilid-tools/src/must_join_handle.rs b/veilid-tools/src/must_join_handle.rs index 20128550..0abeb399 100644 --- a/veilid-tools/src/must_join_handle.rs +++ b/veilid-tools/src/must_join_handle.rs @@ -22,7 +22,7 @@ impl MustJoinHandle { self.join_handle = None; } else if #[cfg(feature="rt-tokio")] { self.join_handle = None; - } else if #[cfg(target_arch = "wasm32")] { + } else if #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] { if let Some(jh) = self.join_handle.take() { jh.detach(); } @@ -48,7 +48,7 @@ impl MustJoinHandle { let _ = jh.await; self.completed = true; } - } else if #[cfg(target_arch = "wasm32")] { + } else if #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] { drop(self.join_handle.take()); self.completed = true; } else { @@ -94,7 +94,7 @@ impl Future for MustJoinHandle { } } } - } else if #[cfg(target_arch = "wasm32")] { + } else if #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] { Poll::Ready(t) } else { compile_error!("needs executor implementation"); diff --git a/veilid-tools/src/network_interfaces/mod.rs b/veilid-tools/src/network_interfaces/mod.rs index 2fce46b7..8046006b 100644 --- a/veilid-tools/src/network_interfaces/mod.rs +++ b/veilid-tools/src/network_interfaces/mod.rs @@ -1,6 +1,7 @@ mod tools; use crate::*; +use serde::*; cfg_if::cfg_if! { if #[cfg(any(target_os = "linux", target_os = "android"))] { @@ -18,12 +19,15 @@ cfg_if::cfg_if! { mod openbsd; mod sockaddr_tools; use self::openbsd::PlatformSupportOpenBSD as PlatformSupport; + } else if #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] { + mod wasm; + use self::wasm::PlatformSupportWasm as PlatformSupport; } else { compile_error!("No network interfaces support for this platform!"); } } -#[derive(Debug, PartialEq, Eq, Ord, PartialOrd, Hash, Clone)] +#[derive(Debug, PartialEq, Eq, Ord, PartialOrd, Hash, Clone, Serialize, Deserialize)] pub enum IfAddr { V4(Ifv4Addr), V6(Ifv6Addr), @@ -51,7 +55,7 @@ impl IfAddr { } /// Details about the ipv4 address of an interface on this host. -#[derive(Debug, PartialEq, Eq, Ord, PartialOrd, Hash, Clone)] +#[derive(Debug, PartialEq, Eq, Ord, PartialOrd, Hash, Clone, Serialize, Deserialize)] pub struct Ifv4Addr { /// The IP address of the interface. pub ip: Ipv4Addr, @@ -62,7 +66,7 @@ pub struct Ifv4Addr { } /// Details about the ipv6 address of an interface on this host. -#[derive(Debug, PartialEq, Eq, Ord, PartialOrd, Hash, Clone)] +#[derive(Debug, PartialEq, Eq, Ord, PartialOrd, Hash, Clone, Serialize, Deserialize)] pub struct Ifv6Addr { /// The IP address of the interface. pub ip: Ipv6Addr, @@ -73,7 +77,9 @@ pub struct Ifv6Addr { } /// Some of the flags associated with an interface. -#[derive(Debug, Default, PartialEq, Eq, Ord, PartialOrd, Hash, Clone, Copy)] +#[derive( + Debug, Default, PartialEq, Eq, Ord, PartialOrd, Hash, Clone, Copy, Serialize, Deserialize, +)] pub struct InterfaceFlags { pub is_loopback: bool, pub is_running: bool, @@ -82,7 +88,9 @@ pub struct InterfaceFlags { } /// Some of the flags associated with an address. -#[derive(Debug, Default, PartialEq, Eq, Ord, PartialOrd, Hash, Clone, Copy)] +#[derive( + Debug, Default, PartialEq, Eq, Ord, PartialOrd, Hash, Clone, Copy, Serialize, Deserialize, +)] pub struct AddressFlags { // common flags pub is_dynamic: bool, @@ -91,10 +99,10 @@ pub struct AddressFlags { pub is_preferred: bool, } -#[derive(PartialEq, Eq, Clone, Debug)] +#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] pub struct InterfaceAddress { - if_addr: IfAddr, - flags: AddressFlags, + pub if_addr: IfAddr, + pub flags: AddressFlags, } use core::cmp::Ordering; @@ -226,7 +234,7 @@ impl InterfaceAddress { // Wired, // Wired is usually free or cheap and high speed // } -#[derive(PartialEq, Eq, Clone)] +#[derive(PartialEq, Eq, Clone, Serialize, Deserialize)] pub struct NetworkInterface { pub name: String, pub flags: InterfaceFlags, diff --git a/veilid-tools/src/network_interfaces/wasm.rs b/veilid-tools/src/network_interfaces/wasm.rs new file mode 100644 index 00000000..c2a3ec43 --- /dev/null +++ b/veilid-tools/src/network_interfaces/wasm.rs @@ -0,0 +1,18 @@ +use super::*; +use std::io; + +pub struct PlatformSupportWasm {} + +impl PlatformSupportWasm { + pub fn new() -> Self { + PlatformSupportWasm {} + } + + pub async fn get_interfaces( + &mut self, + interfaces: &mut BTreeMap, + ) -> io::Result<()> { + interfaces.clear(); + Ok(()) + } +} diff --git a/veilid-tools/src/network_result.rs b/veilid-tools/src/network_result.rs index ee7cd1b7..f2a5c654 100644 --- a/veilid-tools/src/network_result.rs +++ b/veilid-tools/src/network_result.rs @@ -25,7 +25,7 @@ pub trait IoNetworkResultExt { } fn io_error_kind_from_error(e: io::Error) -> io::Result> { - #[cfg(not(target_arch = "wasm32"))] + #[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))] if let Some(os_err) = e.raw_os_error() { if os_err == libc::EHOSTUNREACH || os_err == libc::ENETUNREACH { return Ok(NetworkResult::NoConnection(e)); @@ -42,7 +42,9 @@ fn io_error_kind_from_error(e: io::Error) -> io::Result> { io::ErrorKind::InvalidInput | io::ErrorKind::InvalidData => { Ok(NetworkResult::InvalidMessage(e.to_string())) } - io::ErrorKind::AddrNotAvailable => Ok(NetworkResult::AlreadyExists(e)), + io::ErrorKind::AddrNotAvailable | io::ErrorKind::AddrInUse => { + Ok(NetworkResult::AlreadyExists(e)) + } _ => Err(e), } } @@ -51,21 +53,6 @@ impl IoNetworkResultExt for io::Result { fn into_network_result(self) -> io::Result> { match self { Ok(v) => Ok(NetworkResult::Value(v)), - // #[cfg(feature = "io_error_more")] - // Err(e) => match e.kind() { - // io::ErrorKind::TimedOut => Ok(NetworkResult::Timeout), - // io::ErrorKind::UnexpectedEof - // | io::ErrorKind::NotConnected - // | io::ErrorKind::BrokenPipe - // | io::ErrorKind::ConnectionAborted - // | io::ErrorKind::ConnectionRefused - // | io::ErrorKind::ConnectionReset - // | io::ErrorKind::HostUnreachable - // | io::ErrorKind::NetworkUnreachable => Ok(NetworkResult::NoConnection(e)), - // io::ErrorKind::AddrNotAvailable => Ok(NetworkResult::AlreadyExists(e)), - // _ => Err(e), - // }, - // #[cfg(not(feature = "io_error_more"))] Err(e) => io_error_kind_from_error(e), } } @@ -98,18 +85,6 @@ impl FoldedNetworkResultExt for io::Result> { match self { Ok(TimeoutOr::Timeout) => Ok(NetworkResult::Timeout), Ok(TimeoutOr::Value(v)) => Ok(NetworkResult::Value(v)), - // #[cfg(feature = "io_error_more")] - // Err(e) => match e.kind() { - // io::ErrorKind::TimedOut => Ok(NetworkResult::Timeout), - // io::ErrorKind::ConnectionAborted - // | io::ErrorKind::ConnectionRefused - // | io::ErrorKind::ConnectionReset - // | io::ErrorKind::HostUnreachable - // | io::ErrorKind::NetworkUnreachable => Ok(NetworkResult::NoConnection(e)), - // io::ErrorKind::AddrNotAvailable => Ok(NetworkResult::AlreadyExists(e)), - // _ => Err(e), - // }, - // #[cfg(not(feature = "io_error_more"))] Err(e) => io_error_kind_from_error(e), } } @@ -119,18 +94,6 @@ impl FoldedNetworkResultExt for io::Result> { fn folded(self) -> io::Result> { match self { Ok(v) => Ok(v), - // #[cfg(feature = "io_error_more")] - // Err(e) => match e.kind() { - // io::ErrorKind::TimedOut => Ok(NetworkResult::Timeout), - // io::ErrorKind::ConnectionAborted - // | io::ErrorKind::ConnectionRefused - // | io::ErrorKind::ConnectionReset - // | io::ErrorKind::HostUnreachable - // | io::ErrorKind::NetworkUnreachable => Ok(NetworkResult::NoConnection(e)), - // io::ErrorKind::AddrNotAvailable => Ok(NetworkResult::AlreadyExists(e)), - // _ => Err(e), - // }, - // #[cfg(not(feature = "io_error_more"))] Err(e) => io_error_kind_from_error(e), } } @@ -308,6 +271,42 @@ macro_rules! network_result_try { }; } +#[macro_export] +macro_rules! log_network_result { + (error $text:expr) => {error!( + target: "network_result", + "{}", + $text, + )}; + (error $fmt:literal, $($arg:expr),+) => { + error!(target: "network_result", $fmt, $($arg),+); + }; + (warn $text:expr) => {warn!( + target: "network_result", + "{}", + $text, + )}; + (warn $fmt:literal, $($arg:expr),+) => { + warn!(target:"network_result", $fmt, $($arg),+); + }; + (debug $text:expr) => {debug!( + target: "network_result", + "{}", + $text, + )}; + (debug $fmt:literal, $($arg:expr),+) => { + debug!(target:"network_result", $fmt, $($arg),+); + }; + ($text:expr) => {trace!( + target: "network_result", + "{}", + $text, + )}; + ($fmt:literal, $($arg:expr),+) => { + trace!(target:"network_result", $fmt, $($arg),+); + } +} + #[macro_export] macro_rules! network_result_value_or_log { ($r:expr => $f:expr) => { diff --git a/veilid-tools/src/sleep.rs b/veilid-tools/src/sleep.rs index 31c458c5..5527c24f 100644 --- a/veilid-tools/src/sleep.rs +++ b/veilid-tools/src/sleep.rs @@ -2,7 +2,7 @@ use super::*; use std::time::Duration; cfg_if! { - if #[cfg(target_arch = "wasm32")] { + if #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] { use async_executors::{Bindgen, Timer}; pub async fn sleep(millis: u32) { diff --git a/veilid-tools/src/socket_tools.rs b/veilid-tools/src/socket_tools.rs new file mode 100644 index 00000000..3e42969f --- /dev/null +++ b/veilid-tools/src/socket_tools.rs @@ -0,0 +1,284 @@ +use super::*; +use async_io::Async; +use std::io; + +cfg_if! { + if #[cfg(feature="rt-async-std")] { + pub use async_std::net::{TcpStream, TcpListener, UdpSocket}; + } else if #[cfg(feature="rt-tokio")] { + pub use tokio::net::{TcpStream, TcpListener, UdpSocket}; + pub use tokio_util::compat::*; + } else { + compile_error!("needs executor implementation"); + } +} + +use socket2::{Domain, Protocol, SockAddr, Socket, Type}; + +////////////////////////////////////////////////////////////////////////////////////////// + +pub fn bind_async_udp_socket(local_address: SocketAddr) -> io::Result> { + let Some(socket) = new_bound_default_socket2_udp(local_address)? else { + return Ok(None); + }; + + // Make an async UdpSocket from the socket2 socket + let std_udp_socket: std::net::UdpSocket = socket.into(); + cfg_if! { + if #[cfg(feature="rt-async-std")] { + let udp_socket = UdpSocket::from(std_udp_socket); + } else if #[cfg(feature="rt-tokio")] { + std_udp_socket.set_nonblocking(true)?; + let udp_socket = UdpSocket::from_std(std_udp_socket)?; + } else { + compile_error!("needs executor implementation"); + } + } + Ok(Some(udp_socket)) +} + +pub fn bind_async_tcp_listener(local_address: SocketAddr) -> io::Result> { + // Create a default non-shared socket and bind it + let Some(socket) = new_bound_default_socket2_tcp(local_address)? else { + return Ok(None); + }; + + // Drop the socket so we can make another shared socket in its place + drop(socket); + + // Create a shared socket and bind it now we have determined the port is free + let Some(socket) = new_bound_shared_socket2_tcp(local_address)? else { + return Ok(None); + }; + + // Listen on the socket + if socket.listen(128).is_err() { + return Ok(None); + } + + // Make an async tcplistener from the socket2 socket + let std_listener: std::net::TcpListener = socket.into(); + cfg_if! { + if #[cfg(feature="rt-async-std")] { + let listener = TcpListener::from(std_listener); + } else if #[cfg(feature="rt-tokio")] { + std_listener.set_nonblocking(true)?; + let listener = TcpListener::from_std(std_listener)?; + } else { + compile_error!("needs executor implementation"); + } + } + Ok(Some(listener)) +} + +pub async fn connect_async_tcp_stream( + local_address: Option, + remote_address: SocketAddr, + timeout_ms: u32, +) -> io::Result> { + let socket = match local_address { + Some(a) => { + new_bound_shared_socket2_tcp(a)?.ok_or(io::Error::from(io::ErrorKind::AddrInUse))? + } + None => new_default_socket2_tcp(domain_for_address(remote_address))?, + }; + + // Non-blocking connect to remote address + nonblocking_connect(socket, remote_address, timeout_ms).await +} + +pub fn set_tcp_stream_linger( + tcp_stream: &TcpStream, + linger: Option, +) -> io::Result<()> { + #[cfg(all(feature = "rt-async-std", unix))] + { + // async-std does not directly support linger on TcpStream yet + use std::os::fd::{AsRawFd, FromRawFd, IntoRawFd}; + unsafe { + let s = socket2::Socket::from_raw_fd(tcp_stream.as_raw_fd()); + let res = s.set_linger(linger); + s.into_raw_fd(); + res + } + } + #[cfg(all(feature = "rt-async-std", windows))] + { + // async-std does not directly support linger on TcpStream yet + use std::os::windows::io::{AsRawSocket, FromRawSocket, IntoRawSocket}; + unsafe { + let s = socket2::Socket::from_raw_socket(tcp_stream.as_raw_socket()); + let res = s.set_linger(linger); + s.into_raw_socket(); + res + } + } + #[cfg(not(feature = "rt-async-std"))] + tcp_stream.set_linger(linger) +} + +cfg_if! { + if #[cfg(feature="rt-async-std")] { + pub type ReadHalf = futures_util::io::ReadHalf; + pub type WriteHalf = futures_util::io::WriteHalf; + } else if #[cfg(feature="rt-tokio")] { + pub type ReadHalf = tokio::net::tcp::OwnedReadHalf; + pub type WriteHalf = tokio::net::tcp::OwnedWriteHalf; + } else { + compile_error!("needs executor implementation"); + } +} + +pub fn async_tcp_listener_incoming( + tcp_listener: TcpListener, +) -> Pin> + Send>> { + cfg_if! { + if #[cfg(feature="rt-async-std")] { + Box::pin(tcp_listener.into_incoming()) + } else if #[cfg(feature="rt-tokio")] { + Box::pin(tokio_stream::wrappers::TcpListenerStream::new(tcp_listener)) + } else { + compile_error!("needs executor implementation"); + } + } +} + +pub fn split_async_tcp_stream(tcp_stream: TcpStream) -> (ReadHalf, WriteHalf) { + cfg_if! { + if #[cfg(feature="rt-async-std")] { + use futures_util::AsyncReadExt; + tcp_stream.split() + } else if #[cfg(feature="rt-tokio")] { + tcp_stream.into_split() + } else { + compile_error!("needs executor implementation"); + } + } +} + +////////////////////////////////////////////////////////////////////////////////////////// + +fn new_default_udp_socket(domain: core::ffi::c_int) -> io::Result { + let domain = Domain::from(domain); + let socket = Socket::new(domain, Type::DGRAM, Some(Protocol::UDP))?; + if domain == Domain::IPV6 { + socket.set_only_v6(true)?; + } + + Ok(socket) +} + +fn new_bound_default_socket2_udp(local_address: SocketAddr) -> io::Result> { + let domain = domain_for_address(local_address); + let socket = new_default_udp_socket(domain)?; + let socket2_addr = SockAddr::from(local_address); + + if socket.bind(&socket2_addr).is_err() { + return Ok(None); + } + + Ok(Some(socket)) +} + +pub fn new_default_socket2_tcp(domain: core::ffi::c_int) -> io::Result { + let domain = Domain::from(domain); + let socket = Socket::new(domain, Type::STREAM, Some(Protocol::TCP))?; + socket.set_linger(Some(core::time::Duration::from_secs(0)))?; + socket.set_nodelay(true)?; + if domain == Domain::IPV6 { + socket.set_only_v6(true)?; + } + Ok(socket) +} + +fn new_shared_socket2_tcp(domain: core::ffi::c_int) -> io::Result { + let domain = Domain::from(domain); + let socket = Socket::new(domain, Type::STREAM, Some(Protocol::TCP))?; + socket.set_linger(Some(core::time::Duration::from_secs(0)))?; + socket.set_nodelay(true)?; + if domain == Domain::IPV6 { + socket.set_only_v6(true)?; + } + socket.set_reuse_address(true)?; + cfg_if! { + if #[cfg(unix)] { + socket.set_reuse_port(true)?; + } + } + + Ok(socket) +} + +fn new_bound_default_socket2_tcp(local_address: SocketAddr) -> io::Result> { + let domain = domain_for_address(local_address); + let socket = new_default_socket2_tcp(domain)?; + let socket2_addr = SockAddr::from(local_address); + if socket.bind(&socket2_addr).is_err() { + return Ok(None); + } + + Ok(Some(socket)) +} + +fn new_bound_shared_socket2_tcp(local_address: SocketAddr) -> io::Result> { + // Create the reuseaddr/reuseport socket now that we've asserted the port is free + let domain = domain_for_address(local_address); + let socket = new_shared_socket2_tcp(domain)?; + let socket2_addr = SockAddr::from(local_address); + if socket.bind(&socket2_addr).is_err() { + return Ok(None); + } + + Ok(Some(socket)) +} + +// Non-blocking connect is tricky when you want to start with a prepared socket +// Errors should not be logged as they are valid conditions for this function +async fn nonblocking_connect( + socket: Socket, + addr: SocketAddr, + timeout_ms: u32, +) -> io::Result> { + // Set for non blocking connect + socket.set_nonblocking(true)?; + + // Make socket2 SockAddr + let socket2_addr = socket2::SockAddr::from(addr); + + // Connect to the remote address + match socket.connect(&socket2_addr) { + Ok(()) => Ok(()), + #[cfg(unix)] + Err(err) if err.raw_os_error() == Some(libc::EINPROGRESS) => Ok(()), + Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => Ok(()), + Err(e) => Err(e), + }?; + let async_stream = Async::new(std::net::TcpStream::from(socket))?; + + // The stream becomes writable when connected + timeout_or_try!(timeout(timeout_ms, async_stream.writable()) + .await + .into_timeout_or() + .into_result()?); + + // Check low level error + let async_stream = match async_stream.get_ref().take_error()? { + None => Ok(async_stream), + Some(err) => Err(err), + }?; + + // Convert back to inner and then return async version + cfg_if! { + if #[cfg(feature="rt-async-std")] { + Ok(TimeoutOr::value(TcpStream::from(async_stream.into_inner()?))) + } else if #[cfg(feature="rt-tokio")] { + Ok(TimeoutOr::value(TcpStream::from_std(async_stream.into_inner()?)?)) + } else { + compile_error!("needs executor implementation"); + } + } +} + +pub fn domain_for_address(address: SocketAddr) -> core::ffi::c_int { + socket2::Domain::for_address(address).into() +} diff --git a/veilid-tools/src/spawn.rs b/veilid-tools/src/spawn.rs index 5cb9fdfb..7105d0b4 100644 --- a/veilid-tools/src/spawn.rs +++ b/veilid-tools/src/spawn.rs @@ -1,7 +1,7 @@ use super::*; cfg_if! { - if #[cfg(target_arch = "wasm32")] { + if #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] { use async_executors::{Bindgen, LocalSpawnHandleExt, SpawnHandleExt}; pub fn spawn(_name: &str, future: impl Future + Send + 'static) -> MustJoinHandle diff --git a/veilid-tools/src/startup_lock.rs b/veilid-tools/src/startup_lock.rs index dccd37ae..086b2631 100644 --- a/veilid-tools/src/startup_lock.rs +++ b/veilid-tools/src/startup_lock.rs @@ -47,6 +47,23 @@ impl<'a> Drop for StartupLockEnterGuard<'a> { } } +/// RAII-style lock for entry operations on a started-up region of code. +#[derive(Debug)] +pub struct StartupLockEnterGuardArc { + _guard: AsyncRwLockReadGuardArc, + #[cfg(feature = "debug-locks")] + id: usize, + #[cfg(feature = "debug-locks")] + active_guards: Arc>>, +} + +#[cfg(feature = "debug-locks")] +impl Drop for StartupLockEnterGuardArc { + fn drop(&mut self) { + self.active_guards.lock().remove(&self.id); + } +} + #[cfg(feature = "debug-locks")] static GUARD_ID: AtomicUsize = AtomicUsize::new(0); @@ -59,7 +76,7 @@ static GUARD_ID: AtomicUsize = AtomicUsize::new(0); /// asynchronous shutdown to wait for operations to finish before proceeding. #[derive(Debug)] pub struct StartupLock { - startup_state: AsyncRwLock, + startup_state: Arc>, stop_source: Mutex>, #[cfg(feature = "debug-locks")] active_guards: Arc>>, @@ -68,7 +85,7 @@ pub struct StartupLock { impl StartupLock { pub fn new() -> Self { Self { - startup_state: AsyncRwLock::new(false), + startup_state: Arc::new(AsyncRwLock::new(false)), stop_source: Mutex::new(None), #[cfg(feature = "debug-locks")] active_guards: Arc::new(Mutex::new(HashMap::new())), @@ -168,6 +185,31 @@ impl StartupLock { Ok(out) } + + /// Enter an operation in a started-up module, using an owned lock. + /// If this module has not yet started up or is in the process of startup or shutdown + /// this will fail. + pub fn enter_arc(&self) -> Result { + let guard = + asyncrwlock_try_read_arc!(self.startup_state).ok_or(StartupLockNotStartedError)?; + if !*guard { + return Err(StartupLockNotStartedError); + } + let out = StartupLockEnterGuardArc { + _guard: guard, + #[cfg(feature = "debug-locks")] + id: GUARD_ID.fetch_add(1, Ordering::AcqRel), + #[cfg(feature = "debug-locks")] + active_guards: self.active_guards.clone(), + }; + + #[cfg(feature = "debug-locks")] + self.active_guards + .lock() + .insert(out.id, backtrace::Backtrace::new()); + + Ok(out) + } } impl Default for StartupLock { diff --git a/veilid-tools/src/static_string_table.rs b/veilid-tools/src/static_string_table.rs new file mode 100644 index 00000000..16a29ff4 --- /dev/null +++ b/veilid-tools/src/static_string_table.rs @@ -0,0 +1,55 @@ +use super::*; + +static STRING_TABLE: std::sync::LazyLock>> = + std::sync::LazyLock::new(|| Mutex::new(BTreeSet::new())); + +static STRING_TRANSFORM_TABLE: std::sync::LazyLock>> = + std::sync::LazyLock::new(|| Mutex::new(HashMap::new())); + +pub trait ToStaticStr { + fn to_static_str(&self) -> &'static str; +} + +impl> ToStaticStr for T { + fn to_static_str(&self) -> &'static str { + let s = self.as_ref(); + let mut string_table = STRING_TABLE.lock(); + if let Some(v) = string_table.get(s) { + return v; + } + let ss = Box::leak(s.to_owned().into_boxed_str()); + string_table.insert(ss); + ss + } +} + +pub trait StaticStrTransform { + fn static_transform &'static str>( + self, + transform: F, + ) -> &'static str; +} + +impl StaticStrTransform for &'static str { + fn static_transform &'static str>( + self, + transform: F, + ) -> &'static str { + // multiple keys can point to the same data, but it must be bounded due to static lifetime + // a pointer to static memory plus its length must always be the same immutable slice + // this is maybe slightly faster for use in log string transformation where speed is essential at scale + // otherwise we would have used a hash here. + // TODO: if performance does not suffer, consider switching to a hash at a later point, as this could cause + // the STRING_TRANSFORM_TABLE to be bigger than necessary, depending on unknowns in rustc about 'static str deduplication. + + let key = (self.as_ptr() as usize, self.len()); + + let mut transform_table = STRING_TRANSFORM_TABLE.lock(); + if let Some(v) = transform_table.get(&key) { + return v; + } + let out = transform(self); + transform_table.insert(key, out); + out + } +} diff --git a/veilid-tools/src/tests/android/veilid_tools_android_tests/app/build.gradle b/veilid-tools/src/tests/android/veilid_tools_android_tests/app/build.gradle index 8a2c040c..833ae58a 100644 --- a/veilid-tools/src/tests/android/veilid_tools_android_tests/app/build.gradle +++ b/veilid-tools/src/tests/android/veilid_tools_android_tests/app/build.gradle @@ -35,10 +35,10 @@ android { } } compileOptions { - sourceCompatibility JavaVersion.VERSION_1_8 - targetCompatibility JavaVersion.VERSION_1_8 + sourceCompatibility JavaVersion.VERSION_17 + targetCompatibility JavaVersion.VERSION_17 } - ndkVersion '26.3.11579264' + ndkVersion '27.0.12077973' // Required to copy libc++_shared.so externalNativeBuild { diff --git a/veilid-tools/src/tests/android/veilid_tools_android_tests/build.gradle b/veilid-tools/src/tests/android/veilid_tools_android_tests/build.gradle index e49af7c9..dbfc9509 100644 --- a/veilid-tools/src/tests/android/veilid_tools_android_tests/build.gradle +++ b/veilid-tools/src/tests/android/veilid_tools_android_tests/build.gradle @@ -13,7 +13,7 @@ buildscript { } plugins { - id "org.mozilla.rust-android-gradle.rust-android" version "0.9.3" + id "org.mozilla.rust-android-gradle.rust-android" version "0.9.6" } allprojects { diff --git a/veilid-tools/src/tests/android/veilid_tools_android_tests/gradle/wrapper/gradle-wrapper.properties b/veilid-tools/src/tests/android/veilid_tools_android_tests/gradle/wrapper/gradle-wrapper.properties index 0f25c15d..77dd618d 100644 --- a/veilid-tools/src/tests/android/veilid_tools_android_tests/gradle/wrapper/gradle-wrapper.properties +++ b/veilid-tools/src/tests/android/veilid_tools_android_tests/gradle/wrapper/gradle-wrapper.properties @@ -1,6 +1,6 @@ #Mon Nov 28 22:38:53 EST 2022 distributionBase=GRADLE_USER_HOME -distributionUrl=https\://services.gradle.org/distributions/gradle-7.6.3-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.10.2-all.zip distributionPath=wrapper/dists zipStorePath=wrapper/dists zipStoreBase=GRADLE_USER_HOME diff --git a/veilid-tools/src/tests/common/test_host_interface.rs b/veilid-tools/src/tests/common/test_host_interface.rs index 014af920..aa9d1dba 100644 --- a/veilid-tools/src/tests/common/test_host_interface.rs +++ b/veilid-tools/src/tests/common/test_host_interface.rs @@ -1,7 +1,7 @@ use crate::*; cfg_if! { - if #[cfg(target_arch = "wasm32")] { + if #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] { use js_sys::*; } else { use std::time::{Duration, SystemTime}; @@ -293,7 +293,7 @@ pub async fn test_timeout() { pub async fn test_sleep() { info!("testing sleep"); cfg_if! { - if #[cfg(target_arch = "wasm32")] { + if #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] { let t1 = Date::now(); sleep(1000).await; @@ -548,7 +548,7 @@ pub async fn test_all() { test_get_random_u64().await; test_get_random_u32().await; test_sleep().await; - #[cfg(not(target_arch = "wasm32"))] + #[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))] test_must_join_single_future().await; test_eventual().await; test_eventual_value().await; diff --git a/veilid-tools/src/tests/mod.rs b/veilid-tools/src/tests/mod.rs index e0b727a3..4c6244ea 100644 --- a/veilid-tools/src/tests/mod.rs +++ b/veilid-tools/src/tests/mod.rs @@ -3,7 +3,7 @@ mod android; pub mod common; #[cfg(all(target_os = "ios", feature = "veilid_tools_ios_tests"))] mod ios; -#[cfg(not(target_arch = "wasm32"))] +#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))] mod native; #[allow(unused_imports)] diff --git a/veilid-tools/src/tests/native/mod.rs b/veilid-tools/src/tests/native/mod.rs index 28452a85..c45a67e4 100644 --- a/veilid-tools/src/tests/native/mod.rs +++ b/veilid-tools/src/tests/native/mod.rs @@ -1,5 +1,5 @@ //! Test suite for Native -#![cfg(not(target_arch = "wasm32"))] +#![cfg(not(all(target_arch = "wasm32", target_os = "unknown")))] mod test_assembly_buffer; mod test_async_peek_stream; diff --git a/veilid-tools/src/tests/native/test_async_peek_stream.rs b/veilid-tools/src/tests/native/test_async_peek_stream.rs index 1fde13f5..64028ccf 100644 --- a/veilid-tools/src/tests/native/test_async_peek_stream.rs +++ b/veilid-tools/src/tests/native/test_async_peek_stream.rs @@ -8,7 +8,6 @@ cfg_if! { } else if #[cfg(feature="rt-tokio")] { use tokio::net::{TcpListener, TcpStream}; use tokio::time::sleep; - use tokio_util::compat::*; } } diff --git a/veilid-tools/src/tests/native/test_network_interfaces.rs b/veilid-tools/src/tests/native/test_network_interfaces.rs index 4ba1844b..7cdc0ccc 100644 --- a/veilid-tools/src/tests/native/test_network_interfaces.rs +++ b/veilid-tools/src/tests/native/test_network_interfaces.rs @@ -1,7 +1,7 @@ use crate::*; cfg_if! { - if #[cfg(not(target_arch = "wasm32"))] { + if #[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))] { use network_interfaces::NetworkInterfaces; pub async fn test_network_interfaces() { @@ -24,6 +24,6 @@ cfg_if! { } pub async fn test_all() { - #[cfg(not(target_arch = "wasm32"))] + #[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))] test_network_interfaces().await; } diff --git a/veilid-tools/src/timeout.rs b/veilid-tools/src/timeout.rs index 514bb727..89d35fb5 100644 --- a/veilid-tools/src/timeout.rs +++ b/veilid-tools/src/timeout.rs @@ -1,7 +1,7 @@ use super::*; cfg_if! { - if #[cfg(target_arch = "wasm32")] { + if #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] { use futures_util::future::{select, Either}; pub async fn timeout(dur_ms: u32, f: F) -> Result diff --git a/veilid-tools/src/timestamp.rs b/veilid-tools/src/timestamp.rs index 61d978d5..0913a1db 100644 --- a/veilid-tools/src/timestamp.rs +++ b/veilid-tools/src/timestamp.rs @@ -1,7 +1,7 @@ use super::*; cfg_if! { - if #[cfg(target_arch = "wasm32")] { + if #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] { use js_sys::Date; pub fn get_timestamp() -> u64 { diff --git a/veilid-tools/src/tools.rs b/veilid-tools/src/tools.rs index 492782b4..ae1dff04 100644 --- a/veilid-tools/src/tools.rs +++ b/veilid-tools/src/tools.rs @@ -104,6 +104,19 @@ macro_rules! asyncrwlock_try_write { }; } +#[macro_export] +macro_rules! asyncrwlock_try_read_arc { + ($x:expr) => { + $x.try_read_arc() + }; +} +#[macro_export] +macro_rules! asyncrwlock_try_write_arc { + ($x:expr) => { + $x.try_write_arc() + }; +} + ////////////////////////////////////////////////////////////////////////////////////////////////////////////// pub fn system_boxed<'a, Out>( @@ -115,7 +128,7 @@ pub fn system_boxed<'a, Out>( ////////////////////////////////////////////////////////////////////////////////////////////////////////////// cfg_if! { - if #[cfg(not(target_arch = "wasm32"))] { + if #[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))] { pub fn get_concurrency() -> u32 { std::thread::available_parallelism() .map(|x| x.get()) @@ -259,7 +272,7 @@ pub fn compatible_unspecified_socket_addr(socket_addr: &SocketAddr) -> SocketAdd } cfg_if! { - if #[cfg(not(target_arch = "wasm32"))] { + if #[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))] { use std::net::UdpSocket; static IPV6_IS_SUPPORTED: Mutex> = Mutex::new(None); @@ -308,7 +321,7 @@ pub fn listen_address_to_socket_addrs(listen_address: &str) -> Result bool { cfg_if! { if #[cfg(debug_assertions)] { cfg_if! { - if #[cfg(target_arch = "wasm32")] { + if #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] { let rbenv = get_wasm_global_string_value("RUST_BACKTRACE").unwrap_or_default(); } else @@ -496,6 +509,18 @@ pub fn is_debug_backtrace_enabled() -> bool { } } +#[track_caller] +pub fn debug_duration, T: FnOnce() -> F>(f: T) -> impl Future { + let location = std::panic::Location::caller(); + async move { + let t1 = get_timestamp(); + let out = f().await; + let t2 = get_timestamp(); + debug!("duration@{}: {}", location, display_duration(t2 - t1)); + out + } +} + pub fn type_name_of_val(_val: &T) -> &'static str { std::any::type_name::() } diff --git a/veilid-tools/src/virtual_network/commands.rs b/veilid-tools/src/virtual_network/commands.rs new file mode 100644 index 00000000..a318c813 --- /dev/null +++ b/veilid-tools/src/virtual_network/commands.rs @@ -0,0 +1,227 @@ +use super::*; +use serde::*; +use std::io; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +#[repr(transparent)] +pub struct MessageId(pub u64); + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +#[repr(transparent)] +pub struct SocketId(pub u64); + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +#[repr(transparent)] +pub struct GatewayId(pub u64); + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +pub enum VirtualAddressType { + IPV6, + IPV4, +} + +impl fmt::Display for VirtualAddressType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + VirtualAddressType::IPV6 => write!(f, "IPV6"), + VirtualAddressType::IPV4 => write!(f, "IPV4"), + } + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +pub enum VirtualProtocolType { + UDP, + TCP, +} + +impl fmt::Display for VirtualProtocolType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + VirtualProtocolType::UDP => write!(f, "UDP"), + VirtualProtocolType::TCP => write!(f, "TCP"), + } + } +} + +#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] +pub enum ServerProcessorRequest { + AllocateMachine { + profile: String, + }, + ReleaseMachine { + machine_id: MachineId, + }, + GetInterfaces { + machine_id: MachineId, + }, + TcpConnect { + machine_id: MachineId, + local_address: Option, + remote_address: SocketAddr, + timeout_ms: u32, + options: VirtualTcpOptions, + }, + TcpBind { + machine_id: MachineId, + local_address: Option, + options: VirtualTcpOptions, + }, + TcpAccept { + machine_id: MachineId, + listen_socket_id: SocketId, + }, + TcpShutdown { + machine_id: MachineId, + socket_id: SocketId, + }, + UdpBind { + machine_id: MachineId, + local_address: Option, + options: VirtualUdpOptions, + }, + Send { + machine_id: MachineId, + socket_id: SocketId, + data: Vec, + }, + SendTo { + machine_id: MachineId, + socket_id: SocketId, + remote_address: SocketAddr, + data: Vec, + }, + Recv { + machine_id: MachineId, + socket_id: SocketId, + len: u32, + }, + RecvFrom { + machine_id: MachineId, + socket_id: SocketId, + len: u32, + }, + GetRoutedLocalAddress { + machine_id: MachineId, + address_type: VirtualAddressType, + }, + FindGateway { + machine_id: MachineId, + }, + GetExternalAddress { + gateway_id: GatewayId, + }, + AddPort { + gateway_id: GatewayId, + protocol: VirtualProtocolType, + external_port: Option, + local_address: SocketAddr, + lease_duration_ms: u32, + description: String, + }, + RemovePort { + gateway_id: GatewayId, + protocol: VirtualProtocolType, + external_port: u16, + }, + TXTQuery { + name: String, + }, +} + +#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] +pub struct ServerProcessorMessage { + pub message_id: MessageId, + pub request: ServerProcessorRequest, +} + +#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] +pub enum ServerProcessorCommand { + Message(ServerProcessorMessage), + CloseSocket { + machine_id: MachineId, + socket_id: SocketId, + }, +} + +#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] +pub enum ServerProcessorReplyValue { + AllocateMachine { + machine_id: MachineId, + }, + ReleaseMachine, + GetInterfaces { + interfaces: BTreeMap, + }, + TcpConnect { + socket_id: SocketId, + local_address: SocketAddr, + }, + TcpBind { + socket_id: SocketId, + local_address: SocketAddr, + }, + TcpAccept { + socket_id: SocketId, + address: SocketAddr, + }, + TcpShutdown, + UdpBind { + socket_id: SocketId, + local_address: SocketAddr, + }, + Send { + len: u32, + }, + SendTo { + len: u32, + }, + Recv { + data: Vec, + }, + RecvFrom { + remote_address: SocketAddr, + data: Vec, + }, + GetRoutedLocalAddress { + address: IpAddr, + }, + FindGateway { + opt_gateway_id: Option, + }, + GetExternalAddress { + address: IpAddr, + }, + AddPort { + external_port: u16, + }, + RemovePort, + TXTQuery { + result: Vec, + }, +} + +#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] +pub enum ServerProcessorReplyStatus { + Value(ServerProcessorReplyValue), + InvalidMachineId, + InvalidSocketId, + MissingProfile, + ProfileComplete, + IoError(#[serde(with = "serde_io_error::SerdeIoErrorKindDef")] io::ErrorKind), +} + +#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] +pub struct ServerProcessorReply { + pub message_id: MessageId, + pub status: ServerProcessorReplyStatus, +} + +#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] +pub enum ServerProcessorEvent { + Reply(ServerProcessorReply), + // DeadSocket { + // machine_id: MachineId, + // socket_id: SocketId, + // }, +} diff --git a/veilid-tools/src/virtual_network/machine.rs b/veilid-tools/src/virtual_network/machine.rs new file mode 100644 index 00000000..d8e727e4 --- /dev/null +++ b/veilid-tools/src/virtual_network/machine.rs @@ -0,0 +1,23 @@ +use super::*; + +pub type MachineId = u64; + +#[derive(Debug, Clone)] +pub struct Machine { + pub router_client: RouterClient, + pub id: MachineId, +} + +pub fn set_default_machine(machine: Machine) { + *DEFAULT_MACHINE.lock() = Some(machine); +} + +pub fn take_default_machine() -> Option { + DEFAULT_MACHINE.lock().take() +} + +pub fn default_machine() -> Option { + (*DEFAULT_MACHINE.lock()).clone() +} + +static DEFAULT_MACHINE: Mutex> = Mutex::new(None); diff --git a/veilid-tools/src/virtual_network/mod.rs b/veilid-tools/src/virtual_network/mod.rs new file mode 100644 index 00000000..38e46d70 --- /dev/null +++ b/veilid-tools/src/virtual_network/mod.rs @@ -0,0 +1,75 @@ +//! # Virtual Network +//! +//! ## Networking abstraction layer +//! +//! Support for mocking and virtualizing network connections, as well as passing through to supported +//! networking functionality. +//! +//! The following structs are available that allow connecting to a centralized virtual +//! router to emulate a large scale network. +//! +//! * RouterClient +//! * RouterServer +//! * Machine +//! +//! Additional traits are is implemented for all shimmed APIs that have static methods +//! like `new()`, `default()`, `connect()` and `bind()` to allow optional namespacing +//! such that the structs they produce are new network router clients with their own +//! distinct IP addresses, network segments, and network characteristics as allocated +//! by the [RouterServer]. +//! +//! A singleton RouterClient can be registered with this module that is used by default unless the +//! `*_with_machine` API are used to override it with another Machine instance. +//! +//! ## Available APIs +//! +//! [VirtualTcpStream] +//! [VirtualUdpSocket] +//! [VirtualTcpListener] +//! [VirtualTcpListenerStream] +//! [VirtualGateway] +//! [VirtualWsMeta] +//! [VirtualWsStream] +//! +//! Traits are implemented for [futures_util::AsyncRead] and [futures_util::AsyncWrite] +//! Conversion traits are available for use with Tokio +//! +//! ## Other modules leveraging this module +//! +//! * `veilid-core`'s network `native` and `wasm` modules +//! * This crate's `network_interfaces` module +//! * This crate's `dns_lookup` module + +mod commands; +mod machine; +mod router_client; +mod router_op_table; +#[cfg(all( + feature = "virtual-network-server", + not(all(target_arch = "wasm32", target_os = "unknown")) +))] +mod router_server; +mod serde_io_error; +mod virtual_gateway; +mod virtual_network_error; +mod virtual_tcp_listener; +mod virtual_tcp_listener_stream; +mod virtual_tcp_stream; +mod virtual_udp_socket; + +use super::*; +use commands::*; + +pub use machine::*; +pub use router_client::*; +#[cfg(all( + feature = "virtual-network-server", + not(all(target_arch = "wasm32", target_os = "unknown")) +))] +pub use router_server::*; +pub use virtual_gateway::*; +pub use virtual_network_error::*; +pub use virtual_tcp_listener::*; +pub use virtual_tcp_listener_stream::*; +pub use virtual_tcp_stream::*; +pub use virtual_udp_socket::*; diff --git a/veilid-tools/src/virtual_network/router_client.rs b/veilid-tools/src/virtual_network/router_client.rs new file mode 100644 index 00000000..71a4bc8a --- /dev/null +++ b/veilid-tools/src/virtual_network/router_client.rs @@ -0,0 +1,664 @@ +use super::*; +use core::sync::atomic::AtomicU64; +use futures_codec::{Bytes, BytesCodec, FramedRead, FramedWrite}; +use futures_util::{ + stream::FuturesUnordered, AsyncReadExt, AsyncWriteExt, StreamExt, TryStreamExt, +}; +use postcard::{from_bytes, to_stdvec}; +use router_op_table::*; +use std::io; +use stop_token::future::FutureExt as _; + +struct RouterClientInner { + jh_handler: Option>, + stop_source: Option, +} + +impl fmt::Debug for RouterClientInner { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RouterClientInner") + .field("jh_handler", &self.jh_handler) + .field("stop_source", &self.stop_source) + .finish() + } +} + +struct RouterClientUnlockedInner { + sender: flume::Sender, + next_message_id: AtomicU64, + router_op_waiter: RouterOpWaiter, +} + +impl fmt::Debug for RouterClientUnlockedInner { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RouterClientUnlockedInner") + .field("sender", &self.sender) + .field("next_message_id", &self.next_message_id) + .field("router_op_waiter", &self.router_op_waiter) + .finish() + } +} + +#[derive(Debug, Clone)] +pub struct RouterClient { + unlocked_inner: Arc, + inner: Arc>, +} + +impl RouterClient { + ////////////////////////////////////////////////////////////////////////// + // Public interface + + #[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))] + pub async fn router_connect_tcp(host: H) -> io::Result { + let addrs = host.to_socket_addrs()?.collect::>(); + + // Connect to RouterServer + let ts_reader; + let ts_writer; + cfg_if! { + if #[cfg(feature="rt-tokio")] { + let ts = ::tokio::net::TcpStream::connect(addrs.as_slice()).await?; + let (reader, writer) = ts.into_split(); + use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt}; + ts_reader = reader.compat(); + ts_writer = writer.compat_write(); + } else if #[cfg(feature="rt-async-std")] { + use futures_util::io::AsyncReadExt; + let ts = ::async_std::net::TcpStream::connect(addrs.as_slice()).await?; + (ts_reader, ts_writer) = ts.split(); + } else { + compile_error!("must choose an executor"); + } + } + + // Create channels + let (client_sender, server_receiver) = flume::unbounded::(); + + // Create stopper + let stop_source = StopSource::new(); + + // Create router operation waiter + let router_op_waiter = RouterOpWaiter::new(); + + // Spawn a client connection handler + let jh_handler = spawn( + "RouterClient server processor", + Self::run_server_processor( + ts_reader, + ts_writer, + server_receiver, + router_op_waiter.clone(), + stop_source.token(), + ), + ); + + Ok(Self::new( + client_sender, + router_op_waiter, + jh_handler, + stop_source, + )) + } + + #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] + pub async fn router_connect_ws>(request: R) -> io::Result { + let request = request.as_ref(); + + // Connect to RouterServer + let wsio_reader; + let wsio_writer; + cfg_if! { + if #[cfg(feature="rt-wasm-bindgen")] { + use ws_stream_wasm::*; + let (_wsmeta, wsio) = WsMeta::connect(request, None) + .await.map_err(ws_err_to_io_error)?; + use futures_util::io::AsyncReadExt; + (wsio_reader, wsio_writer) = wsio.into_io().split(); + } else { + compile_error!("must choose an executor"); + } + } + + // Create channels + let (client_sender, server_receiver) = flume::unbounded::(); + + // Create stopper + let stop_source = StopSource::new(); + + // Create router operation waiter + let router_op_waiter = RouterOpWaiter::new(); + + // Spawn a client connection handler + let jh_handler = spawn( + "RouterClient server processor", + Self::run_server_processor( + wsio_reader, + wsio_writer, + server_receiver, + router_op_waiter.clone(), + stop_source.token(), + ), + ); + + Ok(Self::new( + client_sender, + router_op_waiter, + jh_handler, + stop_source, + )) + } + + pub(super) fn local_router_client( + client_sender: flume::Sender, + server_receiver: flume::Receiver, + ) -> RouterClient { + // Create stopper + let stop_source = StopSource::new(); + + // Create router operation waiter + let router_op_waiter = RouterOpWaiter::new(); + + // Spawn a client connection handler + let jh_handler = spawn( + "RouterClient local processor", + Self::run_local_processor( + server_receiver, + router_op_waiter.clone(), + stop_source.token(), + ), + ); + + Self::new(client_sender, router_op_waiter, jh_handler, stop_source) + } + + pub async fn disconnect(self) { + drop(self.inner.lock().stop_source.take()); + let jh_handler = self.inner.lock().jh_handler.take(); + if let Some(jh_handler) = jh_handler { + jh_handler.await; + } + } + + pub async fn allocate_machine(self, profile: String) -> VirtualNetworkResult { + let request = ServerProcessorRequest::AllocateMachine { profile }; + let ServerProcessorReplyValue::AllocateMachine { machine_id } = + self.perform_request(request).await? + else { + return Err(VirtualNetworkError::ResponseMismatch); + }; + Ok(machine_id) + } + + pub async fn release_machine(self, machine_id: MachineId) -> VirtualNetworkResult<()> { + let request = ServerProcessorRequest::ReleaseMachine { machine_id }; + let ServerProcessorReplyValue::ReleaseMachine = self.perform_request(request).await? else { + return Err(VirtualNetworkError::ResponseMismatch); + }; + Ok(()) + } + + pub async fn get_interfaces( + self, + machine_id: MachineId, + ) -> VirtualNetworkResult> { + let request = ServerProcessorRequest::GetInterfaces { machine_id }; + let ServerProcessorReplyValue::GetInterfaces { interfaces } = + self.perform_request(request).await? + else { + return Err(VirtualNetworkError::ResponseMismatch); + }; + Ok(interfaces) + } + + pub async fn tcp_connect( + self, + machine_id: MachineId, + remote_address: SocketAddr, + opt_local_address: Option, + timeout_ms: u32, + options: VirtualTcpOptions, + ) -> VirtualNetworkResult<(SocketId, SocketAddr)> { + let request = ServerProcessorRequest::TcpConnect { + machine_id, + local_address: opt_local_address, + remote_address, + timeout_ms, + options, + }; + let ServerProcessorReplyValue::TcpConnect { + socket_id, + local_address, + } = self.perform_request(request).await? + else { + return Err(VirtualNetworkError::ResponseMismatch); + }; + Ok((socket_id, local_address)) + } + + pub async fn tcp_bind( + self, + machine_id: MachineId, + opt_local_address: Option, + options: VirtualTcpOptions, + ) -> VirtualNetworkResult<(SocketId, SocketAddr)> { + let request = ServerProcessorRequest::TcpBind { + machine_id, + local_address: opt_local_address, + options, + }; + let ServerProcessorReplyValue::TcpBind { + socket_id, + local_address, + } = self.perform_request(request).await? + else { + return Err(VirtualNetworkError::ResponseMismatch); + }; + Ok((socket_id, local_address)) + } + + pub async fn tcp_accept( + self, + machine_id: MachineId, + listen_socket_id: SocketId, + ) -> VirtualNetworkResult<(SocketId, SocketAddr)> { + let request = ServerProcessorRequest::TcpAccept { + machine_id, + listen_socket_id, + }; + let ServerProcessorReplyValue::TcpAccept { socket_id, address } = + self.perform_request(request).await? + else { + return Err(VirtualNetworkError::ResponseMismatch); + }; + Ok((socket_id, address)) + } + + pub async fn tcp_shutdown( + self, + machine_id: MachineId, + socket_id: SocketId, + ) -> VirtualNetworkResult<()> { + let request = ServerProcessorRequest::TcpShutdown { + machine_id, + socket_id, + }; + let ServerProcessorReplyValue::TcpShutdown = self.perform_request(request).await? else { + return Err(VirtualNetworkError::ResponseMismatch); + }; + Ok(()) + } + + pub async fn udp_bind( + self, + machine_id: MachineId, + opt_local_address: Option, + options: VirtualUdpOptions, + ) -> VirtualNetworkResult<(SocketId, SocketAddr)> { + let request = ServerProcessorRequest::UdpBind { + machine_id, + local_address: opt_local_address, + options, + }; + let ServerProcessorReplyValue::UdpBind { + socket_id, + local_address, + } = self.perform_request(request).await? + else { + return Err(VirtualNetworkError::ResponseMismatch); + }; + Ok((socket_id, local_address)) + } + + pub async fn send( + self, + machine_id: MachineId, + socket_id: SocketId, + data: Vec, + ) -> VirtualNetworkResult { + let request = ServerProcessorRequest::Send { + machine_id, + socket_id, + data, + }; + let ServerProcessorReplyValue::Send { len } = self.perform_request(request).await? else { + return Err(VirtualNetworkError::ResponseMismatch); + }; + Ok(len as usize) + } + + pub async fn send_to( + self, + machine_id: MachineId, + socket_id: SocketId, + remote_address: SocketAddr, + data: Vec, + ) -> VirtualNetworkResult { + let request = ServerProcessorRequest::SendTo { + machine_id, + socket_id, + data, + remote_address, + }; + let ServerProcessorReplyValue::SendTo { len } = self.perform_request(request).await? else { + return Err(VirtualNetworkError::ResponseMismatch); + }; + Ok(len as usize) + } + + pub async fn recv( + self, + machine_id: MachineId, + socket_id: SocketId, + len: usize, + ) -> VirtualNetworkResult> { + let request = ServerProcessorRequest::Recv { + machine_id, + socket_id, + len: len as u32, + }; + let ServerProcessorReplyValue::Recv { data } = self.perform_request(request).await? else { + return Err(VirtualNetworkError::ResponseMismatch); + }; + Ok(data) + } + + pub async fn recv_from( + self, + machine_id: MachineId, + socket_id: SocketId, + len: usize, + ) -> VirtualNetworkResult<(Vec, SocketAddr)> { + let request = ServerProcessorRequest::RecvFrom { + machine_id, + socket_id, + len: len as u32, + }; + let ServerProcessorReplyValue::RecvFrom { + data, + remote_address, + } = self.perform_request(request).await? + else { + return Err(VirtualNetworkError::ResponseMismatch); + }; + Ok((data, remote_address)) + } + + pub async fn get_routed_local_address( + self, + machine_id: MachineId, + address_type: VirtualAddressType, + ) -> VirtualNetworkResult { + let request = ServerProcessorRequest::GetRoutedLocalAddress { + machine_id, + address_type, + }; + let ServerProcessorReplyValue::GetRoutedLocalAddress { address } = + self.perform_request(request).await? + else { + return Err(VirtualNetworkError::ResponseMismatch); + }; + Ok(address) + } + + pub async fn find_gateway( + self, + machine_id: MachineId, + ) -> VirtualNetworkResult> { + let request = ServerProcessorRequest::FindGateway { machine_id }; + let ServerProcessorReplyValue::FindGateway { opt_gateway_id } = + self.perform_request(request).await? + else { + return Err(VirtualNetworkError::ResponseMismatch); + }; + Ok(opt_gateway_id) + } + + pub async fn get_external_address(self, gateway_id: GatewayId) -> VirtualNetworkResult { + let request = ServerProcessorRequest::GetExternalAddress { gateway_id }; + let ServerProcessorReplyValue::GetExternalAddress { address } = + self.perform_request(request).await? + else { + return Err(VirtualNetworkError::ResponseMismatch); + }; + Ok(address) + } + + pub async fn add_port( + self, + gateway_id: GatewayId, + protocol: VirtualProtocolType, + external_port: Option, + local_address: SocketAddr, + lease_duration_ms: u32, + description: String, + ) -> VirtualNetworkResult { + let request = ServerProcessorRequest::AddPort { + gateway_id, + protocol, + external_port, + local_address, + lease_duration_ms, + description, + }; + let ServerProcessorReplyValue::AddPort { external_port } = + self.perform_request(request).await? + else { + return Err(VirtualNetworkError::ResponseMismatch); + }; + Ok(external_port) + } + + pub async fn remove_port( + self, + gateway_id: GatewayId, + protocol: VirtualProtocolType, + external_port: u16, + ) -> VirtualNetworkResult<()> { + let request = ServerProcessorRequest::RemovePort { + gateway_id, + protocol, + external_port, + }; + let ServerProcessorReplyValue::RemovePort = self.perform_request(request).await? else { + return Err(VirtualNetworkError::ResponseMismatch); + }; + Ok(()) + } + + pub async fn txt_query(self, name: String) -> VirtualNetworkResult> { + let request = ServerProcessorRequest::TXTQuery { name }; + let ServerProcessorReplyValue::TXTQuery { result } = self.perform_request(request).await? + else { + return Err(VirtualNetworkError::ResponseMismatch); + }; + Ok(result) + } + + ////////////////////////////////////////////////////////////////////////// + // Private implementation + + fn new( + sender: flume::Sender, + router_op_waiter: RouterOpWaiter, + jh_handler: MustJoinHandle<()>, + stop_source: StopSource, + ) -> RouterClient { + RouterClient { + unlocked_inner: Arc::new(RouterClientUnlockedInner { + sender, + next_message_id: AtomicU64::new(0), + router_op_waiter, + }), + inner: Arc::new(Mutex::new(RouterClientInner { + jh_handler: Some(jh_handler), + stop_source: Some(stop_source), + })), + } + } + + fn report_closed_socket(&self, machine_id: MachineId, socket_id: SocketId) { + let command = ServerProcessorCommand::CloseSocket { + machine_id, + socket_id, + }; + + if let Err(e) = self + .unlocked_inner + .sender + .send(command) + .map_err(|_| VirtualNetworkError::IoError(io::ErrorKind::BrokenPipe)) + { + error!("{}", e); + } + } + + pub(super) fn drop_tcp_stream(&self, machine_id: MachineId, socket_id: SocketId) { + self.report_closed_socket(machine_id, socket_id); + } + + pub(super) fn drop_tcp_listener(&self, machine_id: MachineId, socket_id: SocketId) { + self.report_closed_socket(machine_id, socket_id); + } + + pub(super) fn drop_udp_socket(&self, machine_id: MachineId, socket_id: SocketId) { + self.report_closed_socket(machine_id, socket_id); + } + + async fn perform_request( + &self, + request: ServerProcessorRequest, + ) -> VirtualNetworkResult { + let message_id = MessageId( + self.unlocked_inner + .next_message_id + .fetch_add(1, Ordering::AcqRel), + ); + let command = ServerProcessorCommand::Message(ServerProcessorMessage { + message_id, + request, + }); + + self.unlocked_inner + .sender + .send_async(command) + .await + .map_err(|_| VirtualNetworkError::IoError(io::ErrorKind::BrokenPipe))?; + let handle = self + .unlocked_inner + .router_op_waiter + .add_op_waiter(message_id.0, ()); + + let status = self + .unlocked_inner + .router_op_waiter + .wait_for_op(handle) + .await + .map_err(|_| VirtualNetworkError::WaitError)?; + + match status { + ServerProcessorReplyStatus::Value(server_processor_response) => { + Ok(server_processor_response) + } + ServerProcessorReplyStatus::InvalidMachineId => { + Err(VirtualNetworkError::InvalidMachineId) + } + ServerProcessorReplyStatus::InvalidSocketId => { + Err(VirtualNetworkError::InvalidSocketId) + } + ServerProcessorReplyStatus::MissingProfile => Err(VirtualNetworkError::MissingProfile), + ServerProcessorReplyStatus::ProfileComplete => { + Err(VirtualNetworkError::ProfileComplete) + } + ServerProcessorReplyStatus::IoError(k) => Err(VirtualNetworkError::IoError(k)), + } + } + + async fn run_server_processor( + reader: R, + writer: W, + receiver: flume::Receiver, + router_op_waiter: RouterOpWaiter, + stop_token: StopToken, + ) where + R: AsyncReadExt + Unpin + Send, + W: AsyncWriteExt + Unpin + Send, + { + let mut unord = FuturesUnordered::new(); + + let framed_reader = FramedRead::new(reader, BytesCodec); + let framed_writer = FramedWrite::new(writer, BytesCodec); + + let framed_writer_fut = system_boxed(async move { + if let Err(e) = receiver + .into_stream() + .map(|command| { + to_stdvec(&command) + .map_err(io::Error::other) + .map(Bytes::from) + }) + .forward(framed_writer) + .await + { + error!("{}", e); + } + }); + let framed_reader_fut = system_boxed(async move { + let fut = framed_reader.try_for_each(|x| async { + let x = x; + let evt = from_bytes::(&x) + .map_err(VirtualNetworkError::SerializationError)?; + + Self::process_event(evt, router_op_waiter.clone()).await + }); + if let Err(e) = fut.await { + error!("{}", e); + } + }); + + unord.push(framed_writer_fut); + unord.push(framed_reader_fut); + while let Ok(Some(_)) = unord.next().timeout_at(stop_token.clone()).await {} + } + + async fn run_local_processor( + receiver: flume::Receiver, + router_op_waiter: RouterOpWaiter, + stop_token: StopToken, + ) { + let mut unord = FuturesUnordered::new(); + let receiver = receiver + .into_stream() + .map(io::Result::::Ok); + + let receiver_fut = system_boxed(async move { + let fut = + receiver.try_for_each(|evt| Self::process_event(evt, router_op_waiter.clone())); + if let Err(e) = fut.await { + error!("{}", e); + } + }); + unord.push(receiver_fut); + while let Ok(Some(_)) = unord.next().timeout_at(stop_token.clone()).await {} + } + + async fn process_event( + evt: ServerProcessorEvent, + router_op_waiter: RouterOpWaiter, + ) -> io::Result<()> { + match evt { + ServerProcessorEvent::Reply(reply) => { + router_op_waiter + .complete_op_waiter(reply.message_id.0, reply.status) + .map_err(io::Error::other)?; + } // ServerProcessorEvent::DeadSocket { + // machine_id, + // socket_id, + // } => { + // // + // } + } + + Ok(()) + } +} diff --git a/veilid-tools/src/virtual_network/router_op_table.rs b/veilid-tools/src/virtual_network/router_op_table.rs new file mode 100644 index 00000000..b403700d --- /dev/null +++ b/veilid-tools/src/virtual_network/router_op_table.rs @@ -0,0 +1,163 @@ +use super::*; + +pub type RouterOpId = u64; + +#[derive(Debug, Clone, PartialEq, Eq, ThisError)] +pub enum RouterOpWaitError { + #[error("Send error: {0}")] + SendError(flume::SendError), + #[error("Recv error: {0}")] + RecvError(flume::RecvError), + #[error("Unmatched operation id: {0}")] + UnmatchedOpId(RouterOpId), + #[error("Missing operation id: {0}")] + MissingOpId(RouterOpId), +} + +#[derive(Debug)] +pub struct RouterOpWaitHandle +where + T: Unpin, + C: Unpin + Clone, +{ + waiter: RouterOpWaiter, + op_id: RouterOpId, + result_receiver: Option>, +} + +impl Drop for RouterOpWaitHandle +where + T: Unpin, + C: Unpin + Clone, +{ + fn drop(&mut self) { + if self.result_receiver.is_some() { + self.waiter.cancel_op_waiter(self.op_id); + } + } +} + +#[derive(Debug)] +struct RouterWaitingOp +where + T: Unpin, + C: Unpin + Clone, +{ + context: C, + result_sender: flume::Sender, +} + +#[derive(Debug)] +struct RouterOpWaiterInner +where + T: Unpin, + C: Unpin + Clone, +{ + waiting_op_table: HashMap>, +} + +#[derive(Debug)] +pub(super) struct RouterOpWaiter +where + T: Unpin, + C: Unpin + Clone, +{ + inner: Arc>>, +} + +impl Clone for RouterOpWaiter +where + T: Unpin, + C: Unpin + Clone, +{ + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + } + } +} + +impl RouterOpWaiter +where + T: Unpin, + C: Unpin + Clone, +{ + pub fn new() -> Self { + Self { + inner: Arc::new(Mutex::new(RouterOpWaiterInner { + waiting_op_table: HashMap::new(), + })), + } + } + + /// Set up wait for operation to complete + pub fn add_op_waiter(&self, op_id: RouterOpId, context: C) -> RouterOpWaitHandle { + let mut inner = self.inner.lock(); + let (result_sender, result_receiver) = flume::bounded(1); + let waiting_op = RouterWaitingOp { + context, + result_sender, + }; + if inner.waiting_op_table.insert(op_id, waiting_op).is_some() { + error!( + "add_op_waiter collision should not happen for op_id {}", + op_id + ); + } + + RouterOpWaitHandle { + waiter: self.clone(), + op_id, + result_receiver: Some(result_receiver), + } + } + + /// Get operation context + #[expect(dead_code)] + pub fn get_op_context(&self, op_id: RouterOpId) -> Result> { + let inner = self.inner.lock(); + let Some(waiting_op) = inner.waiting_op_table.get(&op_id) else { + return Err(RouterOpWaitError::MissingOpId(op_id)); + }; + Ok(waiting_op.context.clone()) + } + + /// Remove wait for op + fn cancel_op_waiter(&self, op_id: RouterOpId) { + let mut inner = self.inner.lock(); + inner.waiting_op_table.remove(&op_id); + } + + /// Complete the waiting op + pub fn complete_op_waiter( + &self, + op_id: RouterOpId, + message: T, + ) -> Result<(), RouterOpWaitError> { + let waiting_op = { + let mut inner = self.inner.lock(); + inner + .waiting_op_table + .remove(&op_id) + .ok_or_else(|| RouterOpWaitError::UnmatchedOpId(op_id))? + }; + waiting_op + .result_sender + .send(message) + .map_err(RouterOpWaitError::SendError) + } + + /// Wait for operation to complete + pub async fn wait_for_op( + &self, + mut handle: RouterOpWaitHandle, + ) -> Result> { + // Take the receiver + // After this, we must manually cancel since the cancel on handle drop is disabled + let result_receiver = handle.result_receiver.take().unwrap(); + let result_fut = result_receiver.recv_async(); + + // wait for eventualvalue + result_fut.await.map_err(RouterOpWaitError::RecvError) + } +} diff --git a/veilid-tools/src/virtual_network/router_server/config.rs b/veilid-tools/src/virtual_network/router_server/config.rs new file mode 100644 index 00000000..a234d7d3 --- /dev/null +++ b/veilid-tools/src/virtual_network/router_server/config.rs @@ -0,0 +1,678 @@ +use super::*; +use serde::*; +use std::path::Path; + +use validator::{Validate, ValidationError, ValidationErrors}; + +const PREDEFINED_CONFIG: &str = include_str!("predefined_config.yml"); +const DEFAULT_CONFIG: &str = include_str!("default_config.yml"); + +#[derive(Debug, ThisError)] +pub enum ConfigError { + #[error("io error: {0}")] + IoError(std::io::Error), + #[error("parse error: {0}: {1}")] + ParseError(String, serde_yaml::Error), + #[error("validate error: {0}")] + ValidateError(String), + #[error("no configuration files specified")] + NoConfigFiles, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Validate)] +pub struct Profile { + #[validate(length(min = 1), nested)] + pub instances: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum Instance { + Machine { machine: WeightedList }, + Template { template: WeightedList }, +} + +impl Validate for Instance { + fn validate(&self) -> Result<(), ValidationErrors> { + match self { + Instance::Machine { machine } => machine.validate()?, + Instance::Template { template } => template.validate()?, + } + Ok(()) + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, Validate)] +pub struct Machine { + #[serde(flatten)] + #[validate(nested)] + pub location: MachineLocation, + #[serde(default)] + pub disable_capabilities: Vec, + #[serde(default)] + pub bootstrap: bool, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum MachineLocation { + Network { + network: String, + #[serde(default)] + address4: Option, + #[serde(default)] + address6: Option, + }, +} + +impl Validate for MachineLocation { + fn validate(&self) -> Result<(), ValidationErrors> { + let mut errors = ValidationErrors::new(); + match self { + MachineLocation::Network { + network: _, + address4, + address6, + } => { + if address4.is_none() && address6.is_none() { + errors.add( + "MachineLocation", + ValidationError::new("badaddr") + .with_message("machine must have at least one address".into()), + ); + } + } + } + + if !errors.is_empty() { + Err(errors) + } else { + Ok(()) + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, Validate)] +pub struct Template { + #[serde(flatten)] + #[validate(nested)] + pub location: TemplateLocation, + #[serde(flatten)] + #[validate(nested)] + pub limits: TemplateLimits, + #[serde(default)] + #[validate(custom(function = "validate_disable_capabilities"))] + pub disable_capabilities: Vec, +} + +fn validate_disable_capabilities(disable_capabilities: &[String]) -> Result<(), ValidationError> { + if disable_capabilities.contains(&("".to_string())) { + return Err(ValidationError::new("badcap").with_message("empty disabled capability".into())); + } + Ok(()) +} + +#[derive(Debug, Clone, Serialize, Deserialize, Validate)] +#[validate(schema(function = "validate_template_limits"))] +pub struct TemplateLimits { + /// maximum number of machines this template will generate + #[validate(nested)] + #[serde(default)] + pub machine_count: Option>, + #[validate(nested)] + #[serde(default)] + pub machines_per_network: Option>, +} + +fn validate_template_limits(limits: &TemplateLimits) -> Result<(), ValidationError> { + let mut has_at_least_one_limit = false; + if let Some(machine_count) = &limits.machine_count { + machine_count.try_for_each(|x| { + if *x == 0 { + return Err(ValidationError::new("badcount") + .with_message("template limits has zero machine count".into())); + } + Ok(()) + })?; + has_at_least_one_limit = true; + } + if let Some(machines_per_network) = &limits.machines_per_network { + machines_per_network.try_for_each(|x| { + if *x == 0 { + return Err(ValidationError::new("badcount") + .with_message("template limits has zero machines per network count".into())); + } + Ok(()) + })?; + has_at_least_one_limit = true; + } + + if !has_at_least_one_limit { + return Err(ValidationError::new("nolimit") + .with_message("template can not be unlimited per network".into())); + } + + Ok(()) +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum TemplateLocation { + Network { network: WeightedList }, + Blueprint { blueprint: WeightedList }, +} + +impl Validate for TemplateLocation { + fn validate(&self) -> Result<(), ValidationErrors> { + match self { + TemplateLocation::Network { network } => network.validate()?, + TemplateLocation::Blueprint { blueprint } => blueprint.validate()?, + } + Ok(()) + } +} + +//////////////////////////////////////////////////////////////// + +#[derive(Debug, Clone, Serialize, Deserialize, Validate)] +#[validate(schema(function = "validate_network"))] +pub struct Network { + #[serde(default)] + pub model: Option, + #[serde(default)] + #[validate(nested)] + pub ipv4: Option, + #[serde(default)] + #[validate(nested)] + pub ipv6: Option, +} + +fn validate_network(network: &Network) -> Result<(), ValidationError> { + if network.ipv4.is_none() && network.ipv6.is_none() { + return Err(ValidationError::new("badaddr") + .with_message("network must support at least one address type".into())); + } + Ok(()) +} + +#[derive(Debug, Clone, Serialize, Deserialize, Validate)] +pub struct NetworkIpv4 { + #[validate(length(min = 1))] + pub allocation: String, + #[serde(default)] + #[validate(nested)] + pub gateway: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Validate)] +pub struct NetworkIpv6 { + #[validate(length(min = 1))] + pub allocation: String, + #[serde(default)] + #[validate(nested)] + pub gateway: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Validate)] +pub struct NetworkGateway { + pub translation: Translation, + pub upnp: bool, + #[validate(length(min = 1))] + pub network: Option, +} + +//////////////////////////////////////////////////////////////// + +#[derive(Debug, Clone, Serialize, Deserialize, Validate)] +#[validate(schema(function = "validate_blueprint"))] +pub struct Blueprint { + #[serde(default)] + #[validate(nested)] + pub model: Option>, + #[serde(flatten)] + #[validate(nested)] + pub limits: BlueprintLimits, + #[serde(default)] + #[validate(nested)] + pub ipv4: Option, + #[serde(default)] + #[validate(nested)] + pub ipv6: Option, +} + +fn validate_blueprint(blueprint: &Blueprint) -> Result<(), ValidationError> { + if blueprint.ipv4.is_none() && blueprint.ipv6.is_none() { + return Err(ValidationError::new("badaddr") + .with_message("blueprint must support at least one address type".into())); + } + Ok(()) +} + +#[derive(Debug, Clone, Serialize, Deserialize, Validate)] +#[validate(schema(function = "validate_blueprint_limits"))] +pub struct BlueprintLimits { + /// maximum number of networks this blueprint will generate + #[validate(nested)] + #[serde(default)] + pub network_count: Option>, +} + +fn validate_blueprint_limits(limits: &BlueprintLimits) -> Result<(), ValidationError> { + if let Some(network_count) = &limits.network_count { + network_count.try_for_each(|x| { + if *x == 0 { + return Err(ValidationError::new("badcount") + .with_message("blueprint limits has zero network count".into())); + } + Ok(()) + })?; + } + + Ok(()) +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum BlueprintLocation { + Allocation { + allocation: WeightedList, + }, + Network { + #[serde(default)] + network: Option>, + }, +} + +impl Validate for BlueprintLocation { + fn validate(&self) -> Result<(), ValidationErrors> { + match self { + BlueprintLocation::Allocation { allocation } => allocation.validate()?, + BlueprintLocation::Network { network } => { + if let Some(network) = network { + network.validate()?; + } + } + } + + Ok(()) + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, Validate)] +#[validate(schema(function = "validate_blueprint_ipv4"))] +pub struct BlueprintIpv4 { + #[serde(flatten)] + #[validate(nested)] + pub location: BlueprintLocation, + #[validate(nested)] + pub prefix: WeightedList, + #[serde(default)] + #[validate(nested)] + pub gateway: Option, +} + +fn validate_blueprint_ipv4(blueprint_ipv4: &BlueprintIpv4) -> Result<(), ValidationError> { + blueprint_ipv4.prefix.try_for_each(|x| { + if *x > 32 { + return Err(ValidationError::new("badprefix") + .with_message("ipv4 blueprint prefix too long".into())); + } + Ok(()) + })?; + + Ok(()) +} + +#[derive(Debug, Clone, Serialize, Deserialize, Validate)] +#[validate(schema(function = "validate_blueprint_ipv6"))] +pub struct BlueprintIpv6 { + #[serde(flatten)] + #[validate(nested)] + pub location: BlueprintLocation, + #[validate(nested)] + pub prefix: WeightedList, + #[serde(default)] + #[validate(nested)] + pub gateway: Option, +} + +fn validate_blueprint_ipv6(blueprint_ipv6: &BlueprintIpv6) -> Result<(), ValidationError> { + blueprint_ipv6.prefix.try_for_each(|x| { + if *x > 128 { + return Err(ValidationError::new("badprefix") + .with_message("ipv6 blueprint prefix too long".into())); + } + Ok(()) + })?; + Ok(()) +} + +#[derive(Debug, Clone, Serialize, Deserialize, Validate)] +pub struct BlueprintGateway { + #[validate(nested)] + pub translation: WeightedList, + #[validate(range(min = 0.0, max = 1.0))] + pub upnp: Probability, + #[serde(default, flatten)] + #[validate(nested)] + pub location: Option, +} + +//////////////////////////////////////////////////////////////// + +#[derive(Debug, Clone, Serialize, Deserialize, Validate)] +pub struct Scope4 { + #[validate(length(min = 1))] + pub scope4: Vec, + #[serde(default)] + pub pool4: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Validate)] +pub struct Scope6 { + #[validate(length(min = 1))] + pub scope6: Vec, + #[serde(default)] + pub pool6: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Validate)] +#[validate(schema(function = "validate_distance"))] +pub struct Distance { + pub min: f32, + pub max: f32, +} + +fn validate_distance(distance: &Distance) -> Result<(), ValidationError> { + if distance.min < 0.0 { + return Err(ValidationError::new("baddist") + .with_message("distance minimum must not be negative".into())); + } + if distance.max < distance.min { + return Err(ValidationError::new("baddist") + .with_message("distance maximum must not be less than the minimum".into())); + } + Ok(()) +} + +#[derive(Debug, Clone, Serialize, Deserialize, Validate, Default)] +#[validate(schema(function = "validate_distribution"))] +pub struct Distribution { + pub mean: f32, + pub sigma: f32, + pub skew: f32, + pub min: f32, + pub max: f32, +} + +fn validate_distribution(distribution: &Distribution) -> Result<(), ValidationError> { + if distribution.mean < 0.0 { + return Err(ValidationError::new("baddistrib") + .with_message("distribution mean must not be negative".into())); + } + if distribution.sigma < 0.0 { + return Err(ValidationError::new("baddistrib") + .with_message("distribution sigma must not be negative".into())); + } + if distribution.max < distribution.min { + return Err(ValidationError::new("baddistrib") + .with_message("distribution maximum must not be less than the minimum".into())); + } + Ok(()) +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum Translation { + None, + PortRestricted, + AddressRestricted, + Symmetric, +} + +impl Default for Translation { + fn default() -> Self { + Self::None + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, Validate)] +pub struct Model { + #[validate(nested)] + pub latency: Distribution, + #[serde(default)] + #[validate(nested)] + pub distance: Option, + #[serde(default)] + #[validate(range(min = 0.0, max = 1.0))] + pub loss: Probability, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Validate)] +pub struct Allocation { + #[serde(flatten)] + #[validate(nested)] + pub scope4: Option, + #[serde(flatten)] + #[validate(nested)] + pub scope6: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct Config { + #[serde(default)] + pub seed: Option, + #[serde(default)] + pub default_network: Option, + #[serde(default)] + pub default_model: Option, + #[serde(default)] + pub default_pool: Option, + #[serde(default)] + pub profiles: HashMap, + #[serde(default)] + pub machines: HashMap, + #[serde(default)] + pub templates: HashMap, + #[serde(default)] + pub networks: HashMap, + #[serde(default)] + pub blueprints: HashMap, + #[serde(default)] + pub allocations: HashMap, + #[serde(default)] + pub models: HashMap, +} + +impl Validate for Config { + fn validate(&self) -> Result<(), ValidationErrors> { + // Validate config + let mut errors = ValidationErrors::new(); + + if let Some(default_network) = self.default_network.as_ref() { + if default_network.is_empty() { + errors.add( + "default_network", + ValidationError::new("badlen").with_message( + "Config must have non-empty default network if specified".into(), + ), + ); + } + } + + if let Some(default_model) = self.default_model.as_ref() { + if default_model.is_empty() { + errors.add( + "default_model", + ValidationError::new("badlen").with_message( + "Config must have non-empty default model if specified".into(), + ), + ); + } + } + + if let Some(default_pool) = self.default_pool.as_ref() { + if default_pool.is_empty() { + errors.add( + "default_pool", + ValidationError::new("badlen").with_message( + "Config must have non-empty default pool if specified".into(), + ), + ); + } + } + + errors.merge_self("profiles", validate_hash_map(&self.profiles)); + errors.merge_self("machines", validate_hash_map(&self.machines)); + errors.merge_self("templates", validate_hash_map(&self.templates)); + errors.merge_self("networks", validate_hash_map(&self.networks)); + errors.merge_self("blueprints", validate_hash_map(&self.blueprints)); + errors.merge_self("allocation", validate_hash_map(&self.allocations)); + errors.merge_self("models", validate_hash_map(&self.models)); + + if !errors.is_empty() { + return Err(errors); + } + + Ok(()) + } +} + +fn expand_validation_errors(errors: ValidationErrors) -> String { + let mut out = String::new(); + let errors = errors.into_errors(); + let mut keys: Vec<&str> = errors.keys().copied().collect(); + keys.sort(); + for k in keys { + let v = errors.get(k).unwrap(); + let v_out = match v.clone() { + validator::ValidationErrorsKind::Struct(validation_errors) => { + expand_validation_errors(*validation_errors) + } + validator::ValidationErrorsKind::List(btree_map) => { + let mut l_out = String::new(); + for (_, v) in btree_map { + l_out += &expand_validation_errors(*v); + } + l_out + } + validator::ValidationErrorsKind::Field(vec) => { + let mut v_out = String::new(); + for v in vec { + v_out += &format!("{}\n", v); + } + v_out + } + }; + let v_out = indent::indent_all_by(4, v_out); + + out += &format!("{k}:\n{v_out}\n"); + } + out +} + +fn map_validation_error>( + name: S, +) -> impl FnOnce(validator::ValidationErrors) -> ConfigError { + let name = name.as_ref().to_string(); + move |errors| { + ConfigError::ValidateError(format!("{name}: {}", expand_validation_errors(errors))) + } +} + +impl Config { + pub fn new>( + config_files: &[P], + no_predefined_config: bool, + ) -> Result { + let mut out = Self::default(); + + if !no_predefined_config { + out = load_predefined_config()?; + out.validate() + .map_err(map_validation_error(""))?; + + // Load default config file + if config_files.is_empty() { + let cfg: Self = load_default_config()?; + cfg.validate() + .map_err(map_validation_error(""))?; + + out = out.combine(cfg)?; + } + } else { + // There must be config files specified to use this option + if config_files.is_empty() { + return Err(ConfigError::NoConfigFiles); + } + } + + // Load specified config files + for config_file in config_files { + let cfg: Self = load_config_file(config_file)?; + cfg.validate().map_err(map_validation_error(format!( + "{}", + config_file.as_ref().to_string_lossy() + )))?; + + out = out.combine(cfg)?; + } + + Ok(out) + } + + pub fn combine(self, other: Self) -> Result { + let out = Config { + seed: other.seed.or(self.seed), + default_network: other.default_network.or(self.default_network), + default_model: other.default_model.or(self.default_model), + default_pool: other.default_pool.or(self.default_pool), + profiles: self.profiles.into_iter().chain(other.profiles).collect(), + machines: self.machines.into_iter().chain(other.machines).collect(), + templates: self.templates.into_iter().chain(other.templates).collect(), + networks: self.networks.into_iter().chain(other.networks).collect(), + blueprints: self + .blueprints + .into_iter() + .chain(other.blueprints) + .collect(), + allocations: self + .allocations + .into_iter() + .chain(other.allocations) + .collect(), + models: self.models.into_iter().chain(other.models).collect(), + }; + + // Validate config (should never fail if combine inputs also validated) + out.validate().map_err(map_validation_error(""))?; + Ok(out) + } +} + +fn validate_hash_map(value: &HashMap) -> Result<(), ValidationErrors> { + let mut errors = ValidationErrors::new(); + for (n, x) in value.values().enumerate() { + errors.merge_self(format!("[{n}]").to_static_str(), x.validate()); + } + if !errors.is_empty() { + return Err(errors); + } + Ok(()) +} + +fn load_predefined_config() -> Result { + serde_yaml::from_str(PREDEFINED_CONFIG) + .map_err(|x| ConfigError::ParseError("".to_string(), x)) +} + +fn load_default_config() -> Result { + serde_yaml::from_str(DEFAULT_CONFIG) + .map_err(|x| ConfigError::ParseError("".to_string(), x)) +} + +fn load_config_file>(config_file: P) -> Result { + let rdr = std::fs::File::open(&config_file).map_err(ConfigError::IoError)?; + serde_yaml::from_reader(rdr) + .map_err(|x| ConfigError::ParseError(config_file.as_ref().to_string_lossy().to_string(), x)) +} diff --git a/veilid-tools/src/virtual_network/router_server/default_config.yml b/veilid-tools/src/virtual_network/router_server/default_config.yml new file mode 100644 index 00000000..56dc4e68 --- /dev/null +++ b/veilid-tools/src/virtual_network/router_server/default_config.yml @@ -0,0 +1,263 @@ +--- +# Random number seed used to generate all profile configurations +# for a test. The seed can be overriden on the VirtualRouter command +# line to choose a different test scenario. The same seed will +# generate the same configuration on all machines given the same +# configuration file. +# seed: 0 + +# The name of the predefined network to use by default (typically +# this is '$internet') +# default_network: "$internet" + +# The name of the predefined performance model to use by default (typically +# this is '$lan') +# default_model: "$lan" + +# The name of the default allocation pool that subnets are allocated from +# default_pool: "$internet" + +################################################################# +# Profiles +# +# Profiles are ordered lists of machine instances or machine templates +# to assign to new instances of Veilid as they are allocated from +# the VirtualRouter + +profiles: + bootstrap: + instances: + # two bootstrap machines + - machine: "bootstrap-1.veilid.net" + - machine: "bootstrap-2.veilid.net" + # pool of up to 4 relay-capable servers + - template: "bootrelay" + # geographically disperse servers of various configurations + server: + instances: + - template: + - "relayserver" + - "ipv4server" + - "ipv6server" + - "nat4home" + - "nat4+6home" + +################################################################# +# Machines +# +# Machines are single, fully-defined machine specifications that +# can only be allocated one time + +machines: + bootstrap-1.veilid.net: + network: "boot" + address4: "170.64.128.16" + address6: "2a03:b0c0:2:dd::ddd:0010" + disable_capabilities: + ["ROUT", "TUNL", "SGNL", "RLAY", "DIAL", "DHTV", "DHTW", "APPM"] + bootstrap: true + bootstrap-2.veilid.net: + network: "boot" + address4: "170.64.128.17" + address6: "2a03:b0c0:2:dd::ddd:0011" + disable_capabilities: + ["ROUT", "TUNL", "SGNL", "RLAY", "DIAL", "DHTV", "DHTW", "APPM"] + bootstrap: true + +################################################################# +# Templates +# +# Templates are used to generate Machines +# * if networks are specified, then all machines are created on that +# single network. A maximum number of machines are allocated on the +# network within the limits specified. +# * if a blueprint is spec + +templates: + # Default servers on the boot network + # - will have ipv4 and ipv6 addresses + # - will have no capabilities disabled + # - will not use NAT, and be directly connected + # - limited to 4 machines + bootrelay: + network: "boot" + machine_count: 4 + # Servers on subnets within the 'internet' network + relayserver: + blueprint: "direct" + machines_per_network: [1, 2, 3] + ipv4server: + blueprint: "direct_ipv4_no_ipv6" + machines_per_network: [1, 2, 3] + ipv6server: + blueprint: "direct_ipv6_no_ipv4" + machines_per_network: [1, 2, 3] + nat4home: + blueprint: "nat_ipv4_no_ipv6" + machines_per_network: [1, 2, 3] + nat4+6home: + blueprint: "nat_ipv4_direct_ipv6" + machines_per_network: [1, 2, 3] + +################################################################# +# Networks +# +# Networks are a location where Machines can be allocated and represent +# a network segment with address allocations per address type +# and a gateway to another network. The performance characteristics of +# a network are defined by a performance Model + +networks: + # Custom networks + boot: + ipv4: + allocation: "boot" + ipv6: + allocation: "boot" + + # # Predefined networks + # $internet: + # ipv4: + # allocation: "$internet" + # ipv6: + # allocation: "$internet" + # model: "$internet" + +################################################################# +# Blueprints +# +# Blueprints are used to generate Networks for use with Machines + +blueprints: + # * A subnet of the internet directly attached with no translation + # with both ipv4 and ipv6 networking + direct: + ipv4: + prefix: 24 + ipv6: + prefix: 64 + # * An ipv4-only subnet of the internet directly attached with no translation + direct_ipv4_no_ipv6: + ipv4: + prefix: 24 + # * An ipv6-only subnet of the internet directly attached with no translation + direct_ipv6_no_ipv4: + ipv6: + prefix: 64 + # * An ipv4-only subnet of the internet attached via NAT to an + # an ipv4-only subnet of the internet directly attached with no translation + nat_ipv4_no_ipv6: + ipv4: + allocation: "$private" + prefix: 24 + gateway: + translation: "port_restricted" + upnp: 0.25 + blueprint: "direct_ipv4_no_ipv6" + # * An ipv4 subnet of the internet attached via NAT to an + # an ipv4-only subnet of the internet directly attached with no translation + # * An ipv6 subnet of the internet directly attached with no translation + nat_ipv4_direct_ipv6: + ipv4: + allocation: "$private" + prefix: 24 + gateway: + translation: "port_restricted" + upnp: 0.25 + blueprint: "direct_ipv4_no_ipv6" + ipv6: + prefix: 56 + +################################################################# +# Allocations +# +# Allocations are partitions of the address space that networks +# can be assigned to. Machines on the networks will be given +# addresses within these ranges. If an allocation +# is not specified, an address -outside- any of the allocation +# will be used (on the 'public internet'). + +allocations: + # Custom network allocations + boot: + scope4: ["170.64.128.0/24"] + scope6: ["2a03:b0c0:2::/48"] + # # Predefined allocations + # $internet: + # scope4: ["0.0.0.0/0"] + # scope6: ["::/0"] + # $private: + # scope4: ["10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"] + # scope6: ["fc00::/7"] + # $cgnat: + # scope4: ["100.64.0.0/10"] + # $linklocal: + # scope4: ["169.254.0.0/16"] + # scope6: ["fe80::/10"] + # $localhost: + # scope4: ["127.0.0.0/8"] + # scope6: ["::1/128"] + # $ietf: + # scope4: ["192.0.0.0/24"] + # $cellnat: + # scope4: ["192.0.0.0/29"] + # $documentation: + # scope4: ["192.0.2.0/24", "198.51.100.0/24", "203.0.113.0/24"] + # scope6: ["2001:db8::/32", "3fff::/20"] + # $benchmark: + # scope4: ["198.18.0.0/15"] + # $mulitcast: + # scope4: ["224.0.0.0/4"] + # $mulitcasttest: + # scope4: ["233.252.0.0/24"] + # scope6: ["ff00::/8"] + # $unspecified: + # scope4: ["0.0.0.0/8"] + # scope6: ["::/128"] + # $reserved: + # scope4: ["192.88.99.0/24", "240.0.0.0/4"] + # $broadcast: + # scope4: ["255.255.255.255/32"] + # $mapped: + # scope6: ["::ffff:0:0/96", "::ffff:0:0:0/96"] + # $translation: + # scope6: ["64:ff9b::/96", "64:ff9b:1::/48"] + # $discard: + # scope6: ["100::/64"] + # $teredo: + # scope6: ["2001::/32"] + # $orchidv2: + # scope6: ["2001:20::/28"] + # $6to4: + # scope6: ["2002::/16"] + # $srv6: + # scope6: ["5f00::/16"] +################################################################# +# Models +# +# Performance models representing how a network behaves +# Latency models are a skewed normal distribution +# Distance is assigned over a circular probability and then +# mapped linearly as a multiplier to latency and loss + +# models: +# # Predefined models +# $lan: +# latency: +# mean: 0.0038 +# sigma: 0.001416 +# skew: 0.0009 +# min: 0.0015 +# max: 0.0075 +# loss: 0.0 +# $internet: +# distance: +# min: 0.04 +# max: 2.0 +# latency: +# mean: 0.200 +# sigma: 0.080 +# skew: 0 +# min: 0.030 +# max: 0.400 +# loss: 0.01 diff --git a/veilid-tools/src/virtual_network/router_server/global_state_manager/address_pool.rs b/veilid-tools/src/virtual_network/router_server/global_state_manager/address_pool.rs new file mode 100644 index 00000000..1d429ad5 --- /dev/null +++ b/veilid-tools/src/virtual_network/router_server/global_state_manager/address_pool.rs @@ -0,0 +1,568 @@ +use super::*; + +#[derive(Debug, Clone)] +pub struct AddressPool { + scope_v4: imbl::Vector, + scope_v6: imbl::Vector, + + allocated_v4: imbl::Vector, + allocated_v6: imbl::Vector, + + owner_tags_v4: imbl::HashMap>, + owner_tags_v6: imbl::HashMap>, +} + +impl AddressPool { + pub fn new() -> Self { + Self { + scope_v4: imbl::Vector::new(), + scope_v6: imbl::Vector::new(), + allocated_v4: imbl::Vector::new(), + allocated_v6: imbl::Vector::new(), + owner_tags_v4: imbl::HashMap::new(), + owner_tags_v6: imbl::HashMap::new(), + } + } + + ///////////////////////////////////////////////////////////////////// + + pub fn scopes_v4(&self) -> Vec { + self.scope_v4.iter().cloned().collect() + } + pub fn allocations_v4(&self) -> Vec { + self.allocated_v4.iter().cloned().collect() + } + pub fn scopes_v6(&self) -> Vec { + self.scope_v6.iter().cloned().collect() + } + pub fn allocations_v6(&self) -> Vec { + self.allocated_v6.iter().cloned().collect() + } + + pub fn add_scope_v4(&mut self, allocation: Ipv4Net) { + let mut scopes = self.scope_v4.iter().copied().collect::>(); + scopes.push(allocation); + scopes = Ipv4Net::aggregate(&scopes); + self.scope_v4 = scopes.into(); + } + + pub fn add_scope_v6(&mut self, allocation: Ipv6Net) { + let mut scopes = self.scope_v6.iter().copied().collect::>(); + scopes.push(allocation); + scopes = Ipv6Net::aggregate(&scopes); + self.scope_v6 = scopes.into(); + } + + pub fn find_scope_v4(&self, allocation: Ipv4Net) -> Option { + for x in &self.scope_v4 { + if x.contains(&allocation) { + return Some(*x); + } + } + None + } + + pub fn find_scope_v6(&self, allocation: Ipv6Net) -> Option { + for x in &self.scope_v6 { + if x.contains(&allocation) { + return Some(*x); + } + } + None + } + + pub fn can_allocate_v6(&self, prefix: u8) -> GlobalStateManagerResult { + if prefix > 128 { + return Err(GlobalStateManagerError::InvalidPrefix(prefix)); + } + + let mut srng = StableRng::new(0); + let opt_allocation = self.find_random_allocation_v6(&mut srng, prefix); + Ok(opt_allocation.is_some()) + } + + pub fn can_allocate_v4(&self, prefix: u8) -> GlobalStateManagerResult { + if prefix > 32 { + return Err(GlobalStateManagerError::InvalidPrefix(prefix)); + } + + let mut srng = StableRng::new(0); + let opt_allocation = self.find_random_allocation_v4(&mut srng, prefix); + Ok(opt_allocation.is_some()) + } + + #[instrument(level = "debug", skip(self), err)] + pub fn reserve_allocation_v4( + &mut self, + allocation: Ipv4Net, + opt_tag: Option, + ) -> GlobalStateManagerResult { + // Ensure the allocation is in our scope + let Some(scope) = self.find_scope_v4(allocation) else { + return Err(GlobalStateManagerError::NoAllocation); + }; + + // Only reserve if it's not overlapping an allocation + if !self.get_overlaps_v4(allocation).is_empty() { + return Err(GlobalStateManagerError::NoAllocation); + } + + // Add to our allocated pool + self.allocated_v4.insert_ord(allocation); + self.owner_tags_v4.insert(allocation, opt_tag); + + Ok(scope) + } + + #[instrument(level = "debug", skip(self), err)] + pub fn reserve_allocation_v6( + &mut self, + allocation: Ipv6Net, + opt_tag: Option, + ) -> GlobalStateManagerResult { + // Ensure the allocation is in our scope + let Some(scope) = self.find_scope_v6(allocation) else { + return Err(GlobalStateManagerError::NoAllocation); + }; + + // Only reserve if it's not overlapping an allocation + if !self.get_overlaps_v6(allocation).is_empty() { + return Err(GlobalStateManagerError::NoAllocation); + } + + // Add to our allocated pool + self.allocated_v6.insert_ord(allocation); + self.owner_tags_v6.insert(allocation, opt_tag); + + Ok(scope) + } + + pub fn get_overlaps_v4(&self, allocation: Ipv4Net) -> Vec { + let mut overlaps = Vec::::new(); + for x in &self.allocated_v4 { + if x.contains(&allocation) || allocation.contains(x) { + overlaps.push(*x); + overlaps = Ipv4Net::aggregate(&overlaps); + } + } + overlaps + } + + pub fn get_overlaps_v6(&self, allocation: Ipv6Net) -> Vec { + let mut overlaps = Vec::::new(); + for x in &self.allocated_v6 { + if x.contains(&allocation) || allocation.contains(x) { + overlaps.push(*x); + overlaps = Ipv6Net::aggregate(&overlaps); + } + } + overlaps + } + + #[instrument(level = "debug", skip(self, srng), err)] + pub fn allocate_random_v4( + &mut self, + srng: &mut StableRng, + prefix: u8, + tag: T, + ) -> GlobalStateManagerResult> { + if prefix > 32 { + return Err(GlobalStateManagerError::InvalidPrefix(prefix)); + } + + let opt_allocation = self.find_random_allocation_v4(srng, prefix); + + // If we found a free subnet, add it to our allocations + if let Some(allocation) = opt_allocation { + // Add to our allocated pool + self.allocated_v4.insert_ord(allocation); + self.owner_tags_v4.insert(allocation, Some(tag)); + return Ok(Some(allocation)); + } + + // No allocation + Ok(None) + } + + #[instrument(level = "debug", skip(self, srng), err)] + pub fn allocate_random_v6( + &mut self, + srng: &mut StableRng, + prefix: u8, + tag: T, + ) -> GlobalStateManagerResult> { + if prefix > 128 { + return Err(GlobalStateManagerError::InvalidPrefix(prefix)); + } + + let opt_allocation = self.find_random_allocation_v6(srng, prefix); + + // If we found a free subnet, add it to our allocations + if let Some(allocation) = opt_allocation { + // Add to our allocated pool + self.allocated_v6.insert_ord(allocation); + self.owner_tags_v6.insert(allocation, Some(tag)); + return Ok(Some(allocation)); + } + + // No allocation + Ok(None) + } + + #[instrument(level = "debug", skip(self), err)] + pub fn release_allocation_v4( + &mut self, + allocation: Ipv4Net, + ) -> GlobalStateManagerResult> { + let Some(pos) = self.allocated_v4.iter().position(|x| *x == allocation) else { + return Err(GlobalStateManagerError::NoAllocation); + }; + + let Some(opt_tag) = self.owner_tags_v4.remove(&allocation) else { + return Err(GlobalStateManagerError::NoAllocation); + }; + + self.allocated_v4.remove(pos); + + Ok(opt_tag) + } + + #[instrument(level = "debug", skip(self), err)] + pub fn release_allocation_v6( + &mut self, + allocation: Ipv6Net, + ) -> GlobalStateManagerResult> { + let Some(pos) = self.allocated_v6.iter().position(|x| *x == allocation) else { + return Err(GlobalStateManagerError::NoAllocation); + }; + + let Some(opt_tag) = self.owner_tags_v6.remove(&allocation) else { + return Err(GlobalStateManagerError::NoAllocation); + }; + + self.allocated_v4.remove(pos); + + Ok(opt_tag) + } + + pub fn is_ipv4(&self) -> bool { + !self.scope_v4.is_empty() + } + + pub fn is_ipv4_allocated(&self) -> bool { + self.is_ipv4() && !self.allocated_v4.is_empty() + } + + pub fn is_ipv6(&self) -> bool { + !self.scope_v6.is_empty() + } + + pub fn is_ipv6_allocated(&self) -> bool { + self.is_ipv6() && !self.allocated_v6.is_empty() + } + + pub fn is_in_use bool>(&self, mut check: F) -> bool { + for (netv4, opt_tag) in self.owner_tags_v4.iter() { + if let Some(tag) = opt_tag.as_ref() { + if check(IpNet::V4(*netv4), tag) { + return true; + } + } + } + for (netv6, opt_tag) in self.owner_tags_v6.iter() { + if let Some(tag) = opt_tag.as_ref() { + if check(IpNet::V6(*netv6), tag) { + return true; + } + } + } + + false + } + + #[instrument(level = "debug", skip_all, err)] + pub fn clear_ipv4 bool>( + &mut self, + mut check: F, + ) -> GlobalStateManagerResult<()> { + if !self.is_ipv4() { + return Ok(()); + } + if self.is_in_use(|n, t| match n { + IpNet::V4(ipv4_net) => check(ipv4_net, t), + IpNet::V6(_ipv6_net) => false, + }) { + return Err(GlobalStateManagerError::ResourceInUse( + "AddressPool-v4".to_owned(), + )); + } + assert!(self.owner_tags_v4.is_empty(), "tags should be empty"); + self.scope_v4.clear(); + self.allocated_v4.clear(); + self.owner_tags_v4.clear(); + Ok(()) + } + + #[instrument(level = "debug", skip_all, err)] + pub fn clear_ipv6 bool>( + &mut self, + mut check: F, + ) -> GlobalStateManagerResult<()> { + if !self.is_ipv6() { + return Ok(()); + } + if self.is_in_use(|n, t| match n { + IpNet::V4(_ipv4_net) => false, + IpNet::V6(ipv6_net) => check(ipv6_net, t), + }) { + return Err(GlobalStateManagerError::ResourceInUse( + "AddressPool-v6".to_owned(), + )); + } + assert!(self.owner_tags_v6.is_empty(), "tags should be empty"); + self.scope_v6.clear(); + self.allocated_v6.clear(); + self.owner_tags_v6.clear(); + + Ok(()) + } + + ///////////////////////////////////////////////////////////////////// + + fn range_in_prefix_32(scope_prefix: u8, iterable_prefix_bits: u8) -> u32 { + // If we're allocating addresses, exclude scope's network and broadcast address + if scope_prefix + iterable_prefix_bits == 32 { + // Subtract two from total + if scope_prefix == 0 { + // Overflow case + 0xFFFF_FFFEu32 + } else { + // Non-overflow case + (1u32 << iterable_prefix_bits) - 2 + } + } else { + // network only iteration + 1u32 << iterable_prefix_bits + } + } + + fn range_in_prefix_128(scope_prefix: u8, iterable_prefix_bits: u8) -> u128 { + // If we're allocating addresses, exclude scope's network and broadcast address + if scope_prefix + iterable_prefix_bits == 128 { + // Subtract two from total + if scope_prefix == 0 { + // Overflow case + 0xFFFF_FFFF_FFFF_FFFF_FFFF_FFFF_FFFF_FFFEu128 + } else { + // Non-overflow case + (1u128 << iterable_prefix_bits) - 2 + } + } else { + // network only iteration + 1u128 << iterable_prefix_bits + } + } + + fn find_random_allocation_v4(&self, srng: &mut StableRng, prefix: u8) -> Option { + // Scope ranges to iterate + let mut scope_ranges = Vec::<(Ipv4Net, u8, u32)>::new(); + let mut total_subnets = 0u32; + + // Build range set from scopes, minus the prefix to allocate + for scope in self.scope_v4.iter().copied() { + // If the prefix we are looking to allocate doesn't fit in this scope + // then we exclude it + if scope.prefix_len() > prefix { + continue; + } + + // Get the number of prefix bits we can iterate + let iterable_prefix_bits = prefix - scope.prefix_len(); + let iterable_range = Self::range_in_prefix_32(scope.prefix_len(), iterable_prefix_bits); + + // Scope ranges to try + scope_ranges.push((scope, iterable_prefix_bits, iterable_range)); + total_subnets += iterable_range; + } + if total_subnets == 0 { + // No range + return None; + } + + // Choose a random subnet to start with + let chosen_subnet_index = srng.next_u32(0, total_subnets - 1); + + // Find the starting scope and starting subnet index within + // the scope of the chosen subnet index + let mut scope_index = 0usize; + let mut scope_start_subnet_index = 0u32; + loop { + assert!( + scope_index < scope_ranges.len(), + "should always have chosen a starting point inside a scope" + ); + + let scope_end_subnet_index = scope_start_subnet_index + scope_ranges[scope_index].2; + if chosen_subnet_index < scope_end_subnet_index { + break; + } + + // chosen starting point is in the next scope + scope_index += 1; + scope_start_subnet_index = scope_end_subnet_index; + } + let initial_subnet_index = chosen_subnet_index; + let initial_scope_index = scope_index; + + // Iterate forward until we find a free range + let mut current_subnet_index = initial_subnet_index; + let mut current_scope_index = initial_scope_index; + let mut current_scope_start_subnet_index = scope_start_subnet_index; + let mut current_scope_end_subnet_index = + scope_start_subnet_index + scope_ranges[scope_index].2; + + loop { + // Get the net at this current subnet index + let netbits = u32::from(scope_ranges[current_scope_index].0.network()); + let subnetbits = if prefix == 32 { + // Allocating addresses + ((current_subnet_index - current_scope_start_subnet_index) + 1) << (32 - prefix) + } else { + // Allocating subnets + (current_subnet_index - current_scope_start_subnet_index) << (32 - prefix) + }; + let net = Ipv4Net::new(Ipv4Addr::from(netbits | subnetbits), prefix) + .expect("prefix must be valid"); + // See if this net is available + if self.get_overlaps_v4(net).is_empty() { + break Some(net); + } + // If not, go to the next subnet + current_subnet_index += 1; + + // If we got back to the beginning we failed to allocate + if current_scope_index == initial_scope_index + && current_subnet_index == initial_subnet_index + { + break None; + } + + // If we've reached the end of this scope then go to the next scope + if current_subnet_index == current_scope_end_subnet_index { + current_scope_index += 1; + // Wrap around + if current_scope_index == scope_ranges.len() { + current_subnet_index = 0; + current_scope_index = 0; + current_scope_start_subnet_index = 0; + } else { + current_scope_start_subnet_index = current_scope_end_subnet_index; + } + current_scope_end_subnet_index = + current_scope_start_subnet_index + scope_ranges[current_scope_index].2; + } + } + } + + fn find_random_allocation_v6(&self, srng: &mut StableRng, prefix: u8) -> Option { + // Scope ranges to iterate + let mut scope_ranges = Vec::<(Ipv6Net, u8, u128)>::new(); + let mut total_subnets = 0u128; + + // Build range set from scopes, minus the prefix to allocate + for scope in self.scope_v6.iter().copied() { + // If the prefix we are looking to allocate doesn't fit in this scope + // then we exclude it + if scope.prefix_len() > prefix { + continue; + } + + // Get the number of prefix bits we can iterate + let iterable_prefix_bits = prefix - scope.prefix_len(); + let iterable_range = + Self::range_in_prefix_128(scope.prefix_len(), iterable_prefix_bits); + + // Scope ranges to try + scope_ranges.push((scope, iterable_prefix_bits, iterable_range)); + total_subnets += iterable_range; + } + if total_subnets == 0 { + // No range + return None; + } + + // Choose a random subnet to start with + let chosen_subnet_index = srng.next_u128(0, total_subnets - 1); + + // Find the starting scope and starting subnet index within + // the scope of the chosen subnet index + let mut scope_index = 0usize; + let mut scope_start_subnet_index = 0u128; + loop { + assert!( + scope_index < scope_ranges.len(), + "should always have chosen a starting point inside a scope" + ); + + let scope_end_subnet_index = scope_start_subnet_index + scope_ranges[scope_index].2; + if chosen_subnet_index < scope_end_subnet_index { + break; + } + + // chosen starting point is in the next scope + scope_index += 1; + scope_start_subnet_index = scope_end_subnet_index; + } + let initial_subnet_index = chosen_subnet_index; + let initial_scope_index = scope_index; + + // Iterate forward until we find a free range + let mut current_subnet_index = initial_subnet_index; + let mut current_scope_index = initial_scope_index; + let mut current_scope_start_subnet_index = scope_start_subnet_index; + let mut current_scope_end_subnet_index = + scope_start_subnet_index + scope_ranges[scope_index].2; + + loop { + // Get the net at this current subnet index + let netbits = u128::from(scope_ranges[current_scope_index].0.network()); + let subnetbits = if prefix == 128 { + // Allocating addresses + ((current_subnet_index - current_scope_start_subnet_index) + 1) << (128 - prefix) + } else { + // Allocating subnets + (current_subnet_index - current_scope_start_subnet_index) << (128 - prefix) + }; + let net = Ipv6Net::new(Ipv6Addr::from(netbits | subnetbits), prefix) + .expect("prefix must be valid"); + // See if this net is available + if self.get_overlaps_v6(net).is_empty() { + break Some(net); + } + // If not, go to the next subnet + current_subnet_index += 1; + + // If we got back to the beginning we failed to allocate + if current_scope_index == initial_scope_index + && current_subnet_index == initial_subnet_index + { + break None; + } + + // If we've reached the end of this scope then go to the next scope + if current_subnet_index == current_scope_end_subnet_index { + current_scope_index += 1; + // Wrap around + if current_scope_index == scope_ranges.len() { + current_subnet_index = 0; + current_scope_index = 0; + current_scope_start_subnet_index = 0; + } else { + current_scope_start_subnet_index = current_scope_end_subnet_index; + } + current_scope_end_subnet_index = + current_scope_start_subnet_index + scope_ranges[current_scope_index].2; + } + } + } +} diff --git a/veilid-tools/src/virtual_network/router_server/global_state_manager/global_state_manager_inner.rs b/veilid-tools/src/virtual_network/router_server/global_state_manager/global_state_manager_inner.rs new file mode 100644 index 00000000..52d5f5a4 --- /dev/null +++ b/veilid-tools/src/virtual_network/router_server/global_state_manager/global_state_manager_inner.rs @@ -0,0 +1,1077 @@ +use super::*; + +#[derive(Debug, Clone)] +pub(super) struct Allocation { + pub config: config::Allocation, + pub address_pool: AddressPool<()>, +} + +#[derive(Debug, Clone)] +pub(super) struct GlobalStateManagerInner { + unlocked_inner: Arc, + srng: StableRng, + default_network: Option, + default_model: Option, + default_pool: Option, + models: imbl::HashMap, + allocations: imbl::HashMap>, + allocated_machines: imbl::HashSet, + profile_state_registry: StateRegistry, + machine_state_registry: StateRegistry, + template_state_registry: StateRegistry, + network_state_registry: StateRegistry, + blueprint_state_registry: StateRegistry, +} + +impl GlobalStateManagerInner { + /////////////////////////////////////////////////////////// + /// Public Interface + + pub fn new(unlocked_inner: Arc) -> Self { + GlobalStateManagerInner { + unlocked_inner, + srng: StableRng::new(0), + default_network: None, + default_model: None, + default_pool: None, + models: imbl::HashMap::new(), + allocations: imbl::HashMap::new(), + allocated_machines: imbl::HashSet::new(), + profile_state_registry: StateRegistry::new(), + machine_state_registry: StateRegistry::new(), + template_state_registry: StateRegistry::new(), + network_state_registry: StateRegistry::new(), + blueprint_state_registry: StateRegistry::new(), + } + } + + #[instrument(level = "debug", skip_all, err)] + pub fn execute_config(&mut self, cfg: config::Config) -> GlobalStateManagerResult<()> { + // Create random number generator + if let Some(seed) = cfg.seed { + self.srng = StableRng::new(seed); + } + + // Set default network name + if let Some(default_network) = cfg.default_network { + self.default_network = Some(default_network); + } + + // Set default model name + if let Some(default_model) = cfg.default_model { + self.default_model = Some(default_model); + } + + // Set default pool name + if let Some(default_pool) = cfg.default_pool { + self.default_pool = Some(default_pool); + } + + // Import all allocation definitions + self.execute_config_allocations(&cfg.allocations)?; + + // Import all models + for (name, model) in cfg.models { + self.execute_config_model(&name, &model)?; + } + + // Create all profile states + for (name, profile) in cfg.profiles { + self.execute_config_profile(&name, &profile)?; + } + + // Create all network states + // Don't process gateways yet because they will depend on networks existing + for (name, network) in &cfg.networks { + self.execute_config_network(name, network)?; + } + // Process all ipv4 and ipv6 configurations + for (name, network) in &cfg.networks { + if let Some(ipv4) = network.ipv4.as_ref() { + self.execute_config_network_ipv4(name, ipv4)?; + } + if let Some(ipv6) = network.ipv6.as_ref() { + self.execute_config_network_ipv6(name, ipv6)?; + } + } + // Process all network gateways + for (name, network) in &cfg.networks { + if let Some(ipv4) = network.ipv4.as_ref() { + if let Some(ipv4gw) = ipv4.gateway.as_ref() { + self.execute_config_network_ipv4_gateway(name, ipv4gw)?; + } + } + if let Some(ipv6) = network.ipv6.as_ref() { + if let Some(ipv6gw) = ipv6.gateway.as_ref() { + self.execute_config_network_ipv6_gateway(name, ipv6gw)?; + } + } + } + + // Create all blueprint states + // Don't process gateways yet because they will depend on blueprints existing + for (name, blueprint) in &cfg.blueprints { + self.execute_config_blueprint(name, blueprint)?; + } + // Process all ipv4 and ipv6 configurations + for (name, blueprint) in &cfg.blueprints { + if let Some(ipv4) = blueprint.ipv4.as_ref() { + self.execute_config_blueprint_ipv4(name, ipv4)?; + } + if let Some(ipv6) = blueprint.ipv6.as_ref() { + self.execute_config_blueprint_ipv6(name, ipv6)?; + } + } + // Process all blueprint gateways + for (name, blueprint) in &cfg.blueprints { + if let Some(ipv4) = blueprint.ipv4.as_ref() { + if let Some(ipv4gw) = ipv4.gateway.as_ref() { + self.execute_config_blueprint_ipv4_gateway(name, ipv4gw)?; + } + } + if let Some(ipv6) = blueprint.ipv6.as_ref() { + if let Some(ipv6gw) = ipv6.gateway.as_ref() { + self.execute_config_blueprint_ipv6_gateway(name, ipv6gw)?; + } + } + } + + // Create all template states + for (name, template) in &cfg.templates { + self.execute_config_template(name, template)?; + } + + // Create all machine states + for (name, machine) in &cfg.machines { + self.execute_config_machine(name, machine)?; + } + + Ok(()) + } + + pub fn allocate(&mut self, profile: String) -> GlobalStateManagerResult { + // Get current profile state + let Some(profile_state_id) = self.profile_state_registry.get_state_id_by_name(&profile) + else { + return Err(GlobalStateManagerError::ProfileNotFound(profile)); + }; + + // Get the next instance from the definition + loop { + // Move to the next profile instance + let mut profile_state = self.profile_states().get_state(profile_state_id)?; + let Some(instance_def) = profile_state.next_instance() else { + return Err(GlobalStateManagerError::ProfileComplete(profile)); + }; + self.profile_states_mut().set_state(profile_state); + + let machine_state_id = match instance_def { + config::Instance::Machine { + machine: machine_names, + } => { + // Filter out machines that are already allocated + let opt_machine_states_ids = machine_names.try_filter_map(|name| { + let Some(machine_state_id) = + self.machine_states().get_state_id_by_name(name) + else { + return Err(GlobalStateManagerError::MachineNotFound(name.clone())); + }; + if self.allocated_machines.contains(&machine_state_id) { + Ok(None) + } else { + Ok(Some(machine_state_id)) + } + })?; + let Some(machine_state_ids) = opt_machine_states_ids else { + // All machines in this instance are allocated + continue; + }; + + // Choose a machine state to activate + let machine_state_id = self.srng.weighted_choice(machine_state_ids); + + // Activate it + self.allocated_machines.insert(machine_state_id); + + machine_state_id + } + config::Instance::Template { + template: template_names, + } => { + // Filter out templates that are no longer active + let opt_template_states = template_names.try_filter_map(|name| { + let Some(template_state_id) = + self.template_states().get_state_id_by_name(name) + else { + return Err(GlobalStateManagerError::TemplateNotFound(name.clone())); + }; + let template_state = self + .template_states() + .get_state(template_state_id) + .expect("must exist"); + if !template_state.is_active(self) { + Ok(None) + } else { + Ok(Some(template_state)) + } + })?; + let Some(template_states) = opt_template_states else { + // No templates in this instance are still active + continue; + }; + + // Chose a template + let mut template_state = self.srng.weighted_choice(template_states); + + // Generate a machine from the template + let machine_state_id = template_state.generate(self)?; + + // Save the updated template + self.template_states_mut().set_state(template_state); + + machine_state_id + } + }; + + break Ok(machine_state_id.external_id()); + } + } + + pub fn release(&mut self, machine_id: MachineId) -> GlobalStateManagerResult<()> { + let id = StateId::::new(machine_id); + if self.allocated_machines.contains(&id) { + // Was a fixed machine, so we leave the machine state so it can + // be reallocated later + self.allocated_machines.remove(&id); + } else { + // Was a templated machine, so remove the machine state + let machine_state = self.machine_states().get_state(id)?; + machine_state.release(self); + self.machine_states_mut().release_id(id)?; + } + + Ok(()) + } + + /////////////////////////////////////////////////////////// + /// Private Implementation + + pub(super) fn srng(&mut self) -> &mut StableRng { + &mut self.srng + } + + pub(super) fn or_default_network( + &self, + network: Option, + ) -> GlobalStateManagerResult { + match network { + Some(x) => Ok(x), + None => self + .default_network + .clone() + .ok_or(GlobalStateManagerError::NoDefaultNetwork), + } + } + pub(super) fn or_default_model( + &self, + model: Option, + ) -> GlobalStateManagerResult { + match model { + Some(x) => Ok(x), + None => self + .default_model + .clone() + .ok_or(GlobalStateManagerError::NoDefaultModel), + } + } + pub(super) fn or_default_pool(&self, pool: Option) -> GlobalStateManagerResult { + match pool { + Some(x) => Ok(x), + None => self + .default_pool + .clone() + .ok_or(GlobalStateManagerError::NoDefaultPool), + } + } + + pub(super) fn models(&self) -> &imbl::HashMap { + &self.models + } + pub(super) fn allocations(&self) -> &imbl::HashMap> { + &self.allocations + } + + pub(super) fn profile_states(&self) -> &StateRegistry { + &self.profile_state_registry + } + pub(super) fn machine_states(&self) -> &StateRegistry { + &self.machine_state_registry + } + pub(super) fn template_states(&self) -> &StateRegistry { + &self.template_state_registry + } + pub(super) fn network_states(&self) -> &StateRegistry { + &self.network_state_registry + } + pub(super) fn blueprint_states(&self) -> &StateRegistry { + &self.blueprint_state_registry + } + + pub(super) fn profile_states_mut(&mut self) -> &mut StateRegistry { + &mut self.profile_state_registry + } + pub(super) fn machine_states_mut(&mut self) -> &mut StateRegistry { + &mut self.machine_state_registry + } + pub(super) fn template_states_mut(&mut self) -> &mut StateRegistry { + &mut self.template_state_registry + } + pub(super) fn network_states_mut(&mut self) -> &mut StateRegistry { + &mut self.network_state_registry + } + pub(super) fn blueprint_states_mut(&mut self) -> &mut StateRegistry { + &mut self.blueprint_state_registry + } + + #[instrument(level = "debug", skip_all, err)] + fn execute_config_allocations( + &mut self, + config_allocations: &HashMap, + ) -> GlobalStateManagerResult<()> { + for (name, allocation_config) in config_allocations { + if self.allocations.contains_key(name) { + return Err(GlobalStateManagerError::DuplicateName(name.clone())); + } + let address_pool = self.resolve_address_pool(name.clone(), config_allocations)?; + + let allocation = Arc::new(Allocation { + config: allocation_config.clone(), + address_pool, + }); + debug!("Added allocation: {}: {:?}", name, allocation); + self.allocations.insert(name.clone(), allocation); + } + Ok(()) + } + #[instrument(level = "debug", skip(self, model), err)] + fn execute_config_model( + &mut self, + name: &str, + model: &config::Model, + ) -> GlobalStateManagerResult<()> { + if self.models.contains_key(name) { + return Err(GlobalStateManagerError::DuplicateName(name.to_owned())); + } + debug!("Added model: {}: {:?}", name, model); + self.models.insert(name.to_owned(), model.to_owned()); + Ok(()) + } + + #[instrument(level = "debug", skip(self, profile), err)] + fn execute_config_profile( + &mut self, + name: &str, + profile: &config::Profile, + ) -> GlobalStateManagerResult<()> { + if self + .profile_state_registry + .get_state_id_by_name(name) + .is_some() + { + return Err(GlobalStateManagerError::DuplicateName(name.to_owned())); + } + + let id = self.profile_state_registry.allocate_id(); + let state = ProfileState::new(id, name.to_owned(), profile.clone()); + self.profile_state_registry + .attach_state(state) + .expect("must attach"); + + debug!("Added profile: {}: {:?}", name, profile); + + Ok(()) + } + + #[instrument(level = "debug", skip(self, network), err)] + fn execute_config_network( + &mut self, + name: &str, + network: &config::Network, + ) -> GlobalStateManagerResult<()> { + if self + .network_state_registry + .get_state_id_by_name(name) + .is_some() + { + return Err(GlobalStateManagerError::DuplicateName(name.to_owned())); + } + + let id = self.network_state_registry.allocate_id(); + let state = { + let mut network_state = + NetworkState::new(id, Some(name.to_owned()), NetworkOrigin::Direct); + + // Set model + let model_name = self.or_default_model(network.model.to_owned())?; + let model = self + .models + .get(&model_name) + .ok_or(GlobalStateManagerError::ModelNotFound(model_name))?; + network_state.set_model(NetworkStateModelParams { + latency: model.latency.clone(), + distance: model.distance.clone(), + loss: model.loss, + }); + + Ok(network_state) + } + .inspect_err(|_| { + self.network_state_registry + .release_id(id) + .expect("must release"); + })?; + self.network_state_registry + .attach_state(state) + .expect("must attach"); + + debug!("Added network: {}: {:?}", name, network); + Ok(()) + } + + #[instrument(level = "debug", skip(self, ipv4), err)] + fn execute_config_network_ipv4( + &mut self, + name: &str, + ipv4: &config::NetworkIpv4, + ) -> GlobalStateManagerResult<()> { + let network_state_id = self + .network_state_registry + .get_state_id_by_name(name) + .expect("must exist"); + let mut network_state = self + .network_state_registry + .get_state(network_state_id) + .expect("must exist"); + + // Get IPV4 allocation + let address_pool = &self + .allocations + .get(&ipv4.allocation) + .cloned() + .ok_or_else(|| GlobalStateManagerError::AllocationNotFound(ipv4.allocation.clone()))? + .address_pool; + let scope = address_pool.scopes_v4(); + let reserve = address_pool.allocations_v4(); + + // Set IPV4 config + network_state.set_ipv4( + self, + NetworkStateIpv4Params { + scope, + reserve, + super_net: None, + }, + )?; + + // Update state + self.network_state_registry.set_state(network_state); + + Ok(()) + } + + #[instrument(level = "debug", skip(self, ipv6), err)] + fn execute_config_network_ipv6( + &mut self, + name: &str, + ipv6: &config::NetworkIpv6, + ) -> GlobalStateManagerResult<()> { + let network_state_id = self + .network_state_registry + .get_state_id_by_name(name) + .expect("must exist"); + let mut network_state = self + .network_state_registry + .get_state(network_state_id) + .expect("must exist"); + + // Get IPV4 allocation + let address_pool = &self + .allocations + .get(&ipv6.allocation) + .cloned() + .ok_or_else(|| GlobalStateManagerError::AllocationNotFound(ipv6.allocation.clone()))? + .address_pool; + let scope = address_pool.scopes_v6(); + let reserve = address_pool.allocations_v6(); + + // Set IPV4 config + network_state.set_ipv6( + self, + NetworkStateIpv6Params { + scope, + reserve, + super_net: None, + }, + )?; + + // Update state + self.network_state_registry.set_state(network_state); + + Ok(()) + } + + #[instrument(level = "debug", skip(self, ipv4gw), err)] + fn execute_config_network_ipv4_gateway( + &mut self, + name: &str, + ipv4gw: &config::NetworkGateway, + ) -> GlobalStateManagerResult<()> { + let network_state_id = self + .network_state_registry + .get_state_id_by_name(name) + .expect("must exist"); + let mut network_state = self + .network_state_registry + .get_state(network_state_id) + .expect("must exist"); + + let translation = ipv4gw.translation; + let upnp = ipv4gw.upnp; + let external_network_name = self.or_default_network(ipv4gw.network.clone())?; + let external_network = self + .network_state_registry + .get_state_id_by_name(&external_network_name) + .ok_or(GlobalStateManagerError::NetworkNotFound( + external_network_name, + ))?; + + let gateway_params = NetworkStateIpv4GatewayParams { + translation, + upnp, + external_network, + internal_address: None, + external_address: None, + }; + + network_state.set_ipv4_gateway(self, gateway_params)?; + + // Update state + self.network_state_registry.set_state(network_state); + + Ok(()) + } + + #[instrument(level = "debug", skip(self, ipv6gw), err)] + fn execute_config_network_ipv6_gateway( + &mut self, + name: &str, + ipv6gw: &config::NetworkGateway, + ) -> GlobalStateManagerResult<()> { + let network_state_id = self + .network_state_registry + .get_state_id_by_name(name) + .expect("must exist"); + let mut network_state = self + .network_state_registry + .get_state(network_state_id) + .expect("must exist"); + + let translation = ipv6gw.translation; + let upnp = ipv6gw.upnp; + let external_network_name = self.or_default_network(ipv6gw.network.clone())?; + let external_network = self + .network_state_registry + .get_state_id_by_name(&external_network_name) + .ok_or(GlobalStateManagerError::NetworkNotFound( + external_network_name, + ))?; + + let gateway_params = NetworkStateIpv4GatewayParams { + translation, + upnp, + external_network, + internal_address: None, + external_address: None, + }; + + network_state.set_ipv4_gateway(self, gateway_params)?; + + // Update state + self.network_state_registry.set_state(network_state); + + Ok(()) + } + + #[instrument(level = "debug", skip(self, blueprint), err)] + fn execute_config_blueprint( + &mut self, + name: &str, + blueprint: &config::Blueprint, + ) -> GlobalStateManagerResult<()> { + if self + .blueprint_state_registry + .get_state_id_by_name(name) + .is_some() + { + return Err(GlobalStateManagerError::DuplicateName(name.to_owned())); + } + + let id = self.blueprint_state_registry.allocate_id(); + let state = { + let mut blueprint_state = BlueprintState::new(id, name.to_owned()); + + // Set model + let model = match blueprint.model.to_owned() { + Some(x) => x, + None => WeightedList::Single( + self.default_model + .clone() + .ok_or(GlobalStateManagerError::NoDefaultModel)?, + ), + }; + blueprint_state.set_model(model); + blueprint_state.set_limit_network_count( + blueprint + .limits + .to_owned() + .network_count + .map(|wl| self.srng().weighted_choice(wl)), + ); + + Ok(blueprint_state) + } + .inspect_err(|_| { + self.blueprint_state_registry + .release_id(id) + .expect("must release"); + })?; + self.blueprint_state_registry + .attach_state(state) + .expect("must attach"); + + debug!("Added blueprint: {}: {:?}", name, blueprint); + + Ok(()) + } + + #[instrument(level = "debug", skip(self, ipv4), err)] + fn execute_config_blueprint_ipv4( + &mut self, + name: &str, + ipv4: &config::BlueprintIpv4, + ) -> GlobalStateManagerResult<()> { + let blueprint_state_id = self + .blueprint_state_registry + .get_state_id_by_name(name) + .expect("must exist"); + let mut blueprint_state = self + .blueprint_state_registry + .get_state(blueprint_state_id) + .expect("must exist"); + + let locations = match ipv4.location.clone() { + config::BlueprintLocation::Allocation { allocation } => { + BlueprintLocationsList::Allocations { + allocations: allocation, + } + } + config::BlueprintLocation::Network { network } => { + if let Some(network) = network { + let networks = network.try_map(|n| { + self.network_state_registry + .get_state_id_by_name(n) + .ok_or_else(|| GlobalStateManagerError::NetworkNotFound(n.clone())) + })?; + BlueprintLocationsList::Networks { networks } + } else { + let default_network = self.or_default_network(None)?; + let default_network_state_id = self + .network_state_registry + .get_state_id_by_name(&default_network) + .ok_or(GlobalStateManagerError::NetworkNotFound(default_network))?; + + BlueprintLocationsList::Networks { + networks: WeightedList::Single(default_network_state_id), + } + } + } + }; + + let prefix = ipv4.prefix.clone(); + + // Set IPV4 config + blueprint_state.set_ipv4( + self, + BlueprintStateIpv4Params { + locations, + prefix, + gateway: None, + }, + )?; + + // Update state + self.blueprint_state_registry.set_state(blueprint_state); + + Ok(()) + } + + #[instrument(level = "debug", skip(self, ipv4gw), err)] + fn execute_config_blueprint_ipv4_gateway( + &mut self, + name: &str, + ipv4gw: &config::BlueprintGateway, + ) -> GlobalStateManagerResult<()> { + let blueprint_state_id = self + .blueprint_state_registry + .get_state_id_by_name(name) + .expect("must exist"); + let mut blueprint_state = self + .blueprint_state_registry + .get_state(blueprint_state_id) + .expect("must exist"); + + let translation = ipv4gw.translation.clone(); + let upnp = ipv4gw.upnp; + let locations = match ipv4gw.location.clone() { + Some(config::TemplateLocation::Network { network }) => { + let networks = network.try_map(|n| { + self.network_state_registry + .get_state_id_by_name(n) + .ok_or_else(|| GlobalStateManagerError::NetworkNotFound(n.clone())) + })?; + Some(TemplateLocationsList::Networks { networks }) + } + Some(config::TemplateLocation::Blueprint { blueprint }) => { + let blueprints = blueprint.try_map(|n| { + self.blueprint_state_registry + .get_state_id_by_name(n) + .ok_or_else(|| GlobalStateManagerError::BlueprintNotFound(n.clone())) + })?; + Some(TemplateLocationsList::Blueprints { blueprints }) + } + None => None, + }; + + let gateway_params = BlueprintStateGatewayParams { + translation, + upnp, + locations, + }; + + blueprint_state.set_ipv4_gateway(self, Some(gateway_params))?; + + // Update state + self.blueprint_state_registry.set_state(blueprint_state); + + Ok(()) + } + + #[instrument(level = "debug", skip(self, ipv6), err)] + fn execute_config_blueprint_ipv6( + &mut self, + name: &str, + ipv6: &config::BlueprintIpv6, + ) -> GlobalStateManagerResult<()> { + let blueprint_state_id = self + .blueprint_state_registry + .get_state_id_by_name(name) + .expect("must exist"); + let mut blueprint_state = self + .blueprint_state_registry + .get_state(blueprint_state_id) + .expect("must exist"); + + let locations = match ipv6.location.clone() { + config::BlueprintLocation::Allocation { allocation } => { + BlueprintLocationsList::Allocations { + allocations: allocation, + } + } + config::BlueprintLocation::Network { network } => { + if let Some(network) = network { + let networks = network.try_map(|n| { + self.network_state_registry + .get_state_id_by_name(n) + .ok_or_else(|| GlobalStateManagerError::NetworkNotFound(n.clone())) + })?; + BlueprintLocationsList::Networks { networks } + } else { + let default_network = self.or_default_network(None)?; + let default_network_state_id = self + .network_state_registry + .get_state_id_by_name(&default_network) + .ok_or(GlobalStateManagerError::NetworkNotFound(default_network))?; + + BlueprintLocationsList::Networks { + networks: WeightedList::Single(default_network_state_id), + } + } + } + }; + + let prefix = ipv6.prefix.clone(); + + // Set IPV6 config + blueprint_state.set_ipv6( + self, + BlueprintStateIpv6Params { + locations, + prefix, + gateway: None, + }, + )?; + + // Update state + self.blueprint_state_registry.set_state(blueprint_state); + + Ok(()) + } + + #[instrument(level = "debug", skip(self, ipv6gw), err)] + fn execute_config_blueprint_ipv6_gateway( + &mut self, + name: &str, + ipv6gw: &config::BlueprintGateway, + ) -> GlobalStateManagerResult<()> { + let blueprint_state_id = self + .blueprint_state_registry + .get_state_id_by_name(name) + .expect("must exist"); + let mut blueprint_state = self + .blueprint_state_registry + .get_state(blueprint_state_id) + .expect("must exist"); + + let translation = ipv6gw.translation.clone(); + let upnp = ipv6gw.upnp; + let locations = match ipv6gw.location.clone() { + Some(config::TemplateLocation::Network { network }) => { + let networks = network.try_map(|n| { + self.network_state_registry + .get_state_id_by_name(n) + .ok_or_else(|| GlobalStateManagerError::NetworkNotFound(n.clone())) + })?; + Some(TemplateLocationsList::Networks { networks }) + } + Some(config::TemplateLocation::Blueprint { blueprint }) => { + let blueprints = blueprint.try_map(|n| { + self.blueprint_state_registry + .get_state_id_by_name(n) + .ok_or_else(|| GlobalStateManagerError::BlueprintNotFound(n.clone())) + })?; + Some(TemplateLocationsList::Blueprints { blueprints }) + } + None => None, + }; + + let gateway_params = BlueprintStateGatewayParams { + translation, + upnp, + locations, + }; + + blueprint_state.set_ipv6_gateway(self, Some(gateway_params))?; + + // Update state + self.blueprint_state_registry.set_state(blueprint_state); + + Ok(()) + } + + #[instrument(level = "debug", skip(self, template), err)] + fn execute_config_template( + &mut self, + name: &str, + template: &config::Template, + ) -> GlobalStateManagerResult<()> { + if self + .template_state_registry + .get_state_id_by_name(name) + .is_some() + { + return Err(GlobalStateManagerError::DuplicateName(name.to_owned())); + } + + let id = self.template_state_registry.allocate_id(); + let state = { + let mut template_state = TemplateState::new(id, name.to_owned()); + + template_state.set_disable_capabilities(template.disable_capabilities.to_owned()); + if let Some(wl) = template.limits.to_owned().machine_count { + template_state.set_limit_machine_count(Some(self.srng().weighted_choice(wl))); + } + template_state + .set_limit_machines_per_network(template.limits.machines_per_network.clone()); + + match template.location.clone() { + config::TemplateLocation::Network { network } => { + let networks = network.try_map(|x| { + self.network_state_registry + .get_state_id_by_name(x) + .ok_or_else(|| GlobalStateManagerError::NetworkNotFound(x.clone())) + })?; + + template_state.set_networks_list(networks); + } + config::TemplateLocation::Blueprint { blueprint } => { + let blueprints = blueprint.try_map(|x| { + self.blueprint_state_registry + .get_state_id_by_name(x) + .ok_or_else(|| GlobalStateManagerError::BlueprintNotFound(x.clone())) + })?; + + template_state.set_blueprints_list(blueprints); + } + } + + Ok(template_state) + } + .inspect_err(|_| { + self.template_state_registry + .release_id(id) + .expect("must release"); + })?; + self.template_state_registry + .attach_state(state) + .expect("must attach"); + + debug!("Added template: {}: {:?}", name, template); + Ok(()) + } + + #[instrument(level = "debug", skip(self, machine), err)] + fn execute_config_machine( + &mut self, + name: &str, + machine: &config::Machine, + ) -> GlobalStateManagerResult<()> { + if self + .machine_state_registry + .get_state_id_by_name(name) + .is_some() + { + return Err(GlobalStateManagerError::DuplicateName(name.to_owned())); + } + + let id = self.machine_state_registry.allocate_id(); + let state = { + let mut machine_state = + MachineState::new(id, Some(name.to_owned()), MachineOrigin::Config); + + machine_state.set_disable_capabilities(machine.disable_capabilities.to_owned()); + machine_state.set_bootstrap(machine.bootstrap); + + // Create primary interface + let interface_name = machine_state.allocate_interface(None, None)?; + + match machine.location.to_owned() { + config::MachineLocation::Network { + network, + address4, + address6, + } => { + // Look up network + let network_state_id = self + .network_state_registry + .get_state_id_by_name(&network) + .ok_or(GlobalStateManagerError::NetworkNotFound(network))?; + + machine_state.attach_network(self, &interface_name, network_state_id)?; + if let Some(address4) = address4 { + machine_state.allocate_address_ipv4( + self, + &interface_name, + Some(address4), + None, + )?; + } + if let Some(address6) = address6 { + machine_state.allocate_address_ipv6( + self, + &interface_name, + Some(address6), + None, + )?; + } + } + } + + Ok(machine_state) + } + .inspect_err(|_| { + self.machine_state_registry + .release_id(id) + .expect("must release"); + })?; + self.machine_state_registry + .attach_state(state) + .expect("must attach"); + debug!("Added machine: {}: {:?}", name, machine); + Ok(()) + } + + #[instrument(level = "debug", skip(self, config_allocations), err)] + fn resolve_address_pool( + &self, + allocation_name: String, + config_allocations: &HashMap, + ) -> GlobalStateManagerResult> { + // Get the allocation config + let allocation = config_allocations + .get(&allocation_name) + .ok_or_else(|| GlobalStateManagerError::AllocationNotFound(allocation_name.clone()))?; + + // Create an address pool + let mut address_pool = AddressPool::<()>::new(); + + // Apply the scope present in the allocation + if let Some(scope4) = allocation.scope4.as_ref() { + for s in &scope4.scope4 { + address_pool.add_scope_v4(*s); + } + } + if let Some(scope6) = allocation.scope6.as_ref() { + for s in &scope6.scope6 { + address_pool.add_scope_v6(*s); + } + } + + // Reserve out any allocations that used this as their pool + let mut scope4_allocs: Vec = Vec::new(); + let mut scope6_allocs: Vec = Vec::new(); + + for (k, v) in config_allocations { + // Exclude our own allocation + if *k == allocation_name { + continue; + } + if let Some(scope4) = v.scope4.as_ref() { + let pool = self.or_default_pool(scope4.pool4.clone())?; + if pool == allocation_name { + for s in &scope4.scope4 { + scope4_allocs.push(*s); + scope4_allocs = Ipv4Net::aggregate(&scope4_allocs); + } + } + } + if let Some(scope6) = v.scope6.as_ref() { + let pool = self.or_default_pool(scope6.pool6.clone())?; + if pool == allocation_name { + for s in &scope6.scope6 { + scope6_allocs.push(*s); + scope6_allocs = Ipv6Net::aggregate(&scope6_allocs); + } + } + } + } + + for s in scope4_allocs { + address_pool.reserve_allocation_v4(s, None)?; + } + for s in scope6_allocs { + address_pool.reserve_allocation_v6(s, None)?; + } + + Ok(address_pool) + } +} diff --git a/veilid-tools/src/virtual_network/router_server/global_state_manager/mod.rs b/veilid-tools/src/virtual_network/router_server/global_state_manager/mod.rs new file mode 100644 index 00000000..ec2f42ef --- /dev/null +++ b/veilid-tools/src/virtual_network/router_server/global_state_manager/mod.rs @@ -0,0 +1,123 @@ +mod address_pool; +mod global_state_manager_inner; +mod state; + +use super::*; + +use address_pool::*; +use global_state_manager_inner::*; +use state::*; + +#[derive(Debug)] +struct Machine {} + +#[derive(Debug)] +struct GlobalStateManagerUnlockedInner {} + +#[derive(Debug, Clone, ThisError, PartialEq, Eq)] +pub enum GlobalStateManagerError { + #[error("Invalid id: {0}")] + InvalidId(u64), + #[error("Invalid name: {0}")] + InvalidName(String), + #[error("Invalid prefix: {0}")] + InvalidPrefix(u8), + #[error("Already attached")] + AlreadyAttached, + #[error("Not attached")] + NotAttached, + #[error("Duplicate name: {0}")] + DuplicateName(String), + #[error("Profile complete: {0}")] + ProfileComplete(String), + #[error("Template complete: {0}")] + TemplateComplete(String), + #[error("Network complete: {0}")] + NetworkComplete(String), + #[error("Blueprint complete: {0}")] + BlueprintComplete(String), + #[error("Profile not found: {0}")] + ProfileNotFound(String), + #[error("Machine not found: {0}")] + MachineNotFound(String), + #[error("Network not found: {0}")] + NetworkNotFound(String), + #[error("Template not found: {0}")] + TemplateNotFound(String), + #[error("Blueprint not found: {0}")] + BlueprintNotFound(String), + #[error("Model not found: {0}")] + ModelNotFound(String), + #[error("Allocation not found: {0}")] + AllocationNotFound(String), + #[error("No default model")] + NoDefaultModel, + #[error("No default network")] + NoDefaultNetwork, + #[error("No default pool")] + NoDefaultPool, + #[error("No allocation available")] + NoAllocation, + #[error("Resource in use: {0}")] + ResourceInUse(String), + #[error("Invalid gateway")] + InvalidGateway, +} + +pub type GlobalStateManagerResult = Result; + +#[derive(Debug, Clone)] +pub struct GlobalStateManager { + unlocked_inner: Arc, + inner: Arc>, +} + +impl GlobalStateManager { + /////////////////////////////////////////////////////////// + /// Public Interface + pub fn new() -> Self { + let unlocked_inner = Arc::new(GlobalStateManagerUnlockedInner {}); + Self { + inner: Arc::new(Mutex::new(GlobalStateManagerInner::new( + unlocked_inner.clone(), + ))), + unlocked_inner, + } + } + + pub fn execute_config(&self, cfg: config::Config) -> GlobalStateManagerResult<()> { + let mut inner = self.inner.lock(); + let saved_state = (*inner).clone(); + match inner.execute_config(cfg) { + Ok(v) => Ok(v), + Err(e) => { + *inner = saved_state; + Err(e) + } + } + } + + pub fn allocate(&self, profile: String) -> GlobalStateManagerResult { + let mut inner = self.inner.lock(); + let saved_state = (*inner).clone(); + match inner.allocate(profile) { + Ok(v) => Ok(v), + Err(e) => { + *inner = saved_state; + Err(e) + } + } + } + + pub fn release(&self, machine_id: MachineId) -> GlobalStateManagerResult<()> { + let mut inner = self.inner.lock(); + let saved_state = (*inner).clone(); + match inner.release(machine_id) { + Ok(v) => Ok(v), + Err(e) => { + *inner = saved_state; + Err(e) + } + } + } +} diff --git a/veilid-tools/src/virtual_network/router_server/global_state_manager/state/blueprint_locations_list.rs b/veilid-tools/src/virtual_network/router_server/global_state_manager/state/blueprint_locations_list.rs new file mode 100644 index 00000000..469c8b9b --- /dev/null +++ b/veilid-tools/src/virtual_network/router_server/global_state_manager/state/blueprint_locations_list.rs @@ -0,0 +1,245 @@ +use super::*; + +/// Locations where a network can be instantiated when a blueprint is generated +#[derive(Debug, Clone)] +pub enum BlueprintLocationsList { + /// Network will be a new allocation + Allocations { allocations: WeightedList }, + /// Network will be allocated as a subnet of an existing network + Networks { + networks: WeightedList, + }, +} + +#[derive(Debug, Clone)] +pub struct NetworkLocation { + pub scope: Vec, + pub reserve: Vec, + pub super_net: Option, +} + +impl BlueprintLocationsList { + #[instrument(level = "debug", skip_all, err)] + pub fn pick_v4( + &self, + gsm_inner: &mut GlobalStateManagerInner, + prefix: &WeightedList, + ) -> GlobalStateManagerResult>> { + // Get maximum prefix + let max_prefix = prefix + .iter() + .max() + .copied() + .expect("must have at least one element"); + + // Get addresses for network + match self { + BlueprintLocationsList::Allocations { allocations } => { + // Get allocations which have subnets that would fit + // our maximum requested prefix + let Some(address_pools) = allocations.try_filter_map(|allocation_name| { + let allocation = gsm_inner + .allocations() + .get(allocation_name) + .expect("must exist"); + if allocation.address_pool.can_allocate_v4(max_prefix)? { + Ok(Some(allocation.address_pool.clone())) + } else { + Ok(None) + } + })? + else { + return Ok(None); + }; + + // Pick an address pool + let mut address_pool = gsm_inner.srng().weighted_choice(address_pools); + + // Pick a prefix length that would fit in the subnet + let opt_subnet = prefix + .try_filter(|p| address_pool.can_allocate_v4(*p))? + .as_ref() + .map(|wl| { + let subnet_prefix = *gsm_inner.srng().weighted_choice_ref(wl); + + address_pool.allocate_random_v4(gsm_inner.srng(), subnet_prefix, ()) + }) + .transpose()? + .flatten(); + let Some(subnet) = opt_subnet else { + return Ok(None); + }; + Ok(Some(NetworkLocation { + scope: vec![subnet], + reserve: Vec::new(), + super_net: None, + })) + } + BlueprintLocationsList::Networks { networks } => { + // Get networks which have subnets that would fit + // our maximum requested prefix + let Some(available_networks) = networks.try_filter(|network_id| { + let super_network_state = gsm_inner + .network_states() + .get_state(*network_id) + .expect("must exist"); + + Ok(super_network_state.can_allocate_subnet_v4(None, max_prefix)) + })? + else { + return Ok(None); + }; + + // Pick a network + let super_network_id = *gsm_inner.srng().weighted_choice_ref(&available_networks); + let mut super_network_state = gsm_inner + .network_states() + .get_state(super_network_id) + .expect("must exist"); + + // Pick a prefix that fits in this network and allocate from it + let opt_subnet = prefix + .filter(|p| super_network_state.can_allocate_subnet_v4(None, *p)) + .as_ref() + .map(|wl| { + let subnet_prefix = *gsm_inner.srng().weighted_choice_ref(wl); + + // Allocate subnet from this network + super_network_state.allocate_subnet_v4( + gsm_inner, + OwnerTag::Network(super_network_state.id()), + None, + subnet_prefix, + ) + }) + .transpose()?; + let Some(subnet) = opt_subnet else { + return Ok(None); + }; + + // Update network state + gsm_inner + .network_states_mut() + .set_state(super_network_state); + + Ok(Some(NetworkLocation { + scope: vec![subnet], + reserve: Vec::new(), + super_net: Some(super_network_id), + })) + } + } + } + + #[instrument(level = "debug", skip_all, err)] + pub fn pick_v6( + &self, + gsm_inner: &mut GlobalStateManagerInner, + prefix: &WeightedList, + ) -> GlobalStateManagerResult>> { + // Get maximum prefix + let max_prefix = prefix + .iter() + .max() + .copied() + .expect("must have at least one element"); + + // Get addresses for network + match self { + BlueprintLocationsList::Allocations { allocations } => { + // Get allocations which have subnets that would fit + // our maximum requested prefix + let Some(address_pools) = allocations.try_filter_map(|allocation_name| { + let allocation = gsm_inner + .allocations() + .get(allocation_name) + .expect("must exist"); + if allocation.address_pool.can_allocate_v6(max_prefix)? { + Ok(Some(allocation.address_pool.clone())) + } else { + Ok(None) + } + })? + else { + return Ok(None); + }; + + // Pick an address pool + let mut address_pool = gsm_inner.srng().weighted_choice(address_pools); + + // Pick a prefix length that would fit in the subnet + let opt_subnet = prefix + .try_filter(|p| address_pool.can_allocate_v6(*p))? + .as_ref() + .map(|wl| { + let subnet_prefix = *gsm_inner.srng().weighted_choice_ref(wl); + + address_pool.allocate_random_v6(gsm_inner.srng(), subnet_prefix, ()) + }) + .transpose()? + .flatten(); + let Some(subnet) = opt_subnet else { + return Ok(None); + }; + Ok(Some(NetworkLocation { + scope: vec![subnet], + reserve: Vec::new(), + super_net: None, + })) + } + BlueprintLocationsList::Networks { networks } => { + // Get networks which have subnets that would fit + // our maximum requested prefix + let Some(available_networks) = networks.try_filter(|network_id| { + let super_network_state = gsm_inner + .network_states() + .get_state(*network_id) + .expect("must exist"); + + Ok(super_network_state.can_allocate_subnet_v6(None, max_prefix)) + })? + else { + return Ok(None); + }; + + // Pick a network + let super_network_id = *gsm_inner.srng().weighted_choice_ref(&available_networks); + let mut super_network_state = gsm_inner + .network_states() + .get_state(super_network_id) + .expect("must exist"); + + // Pick a prefix that fits in this network and allocate from it + let opt_subnet = prefix + .filter(|p| super_network_state.can_allocate_subnet_v6(None, *p)) + .as_ref() + .map(|wl| { + let subnet_prefix = *gsm_inner.srng().weighted_choice_ref(wl); + + // Allocate subnet from this network + super_network_state.allocate_subnet_v6( + gsm_inner, + OwnerTag::Network(super_network_state.id()), + None, + subnet_prefix, + ) + }) + .transpose()?; + let Some(subnet) = opt_subnet else { + return Ok(None); + }; + + // Update network state + gsm_inner + .network_states_mut() + .set_state(super_network_state); + + Ok(Some(NetworkLocation { + scope: vec![subnet], + reserve: Vec::new(), + super_net: Some(super_network_id), + })) + } + } + } +} diff --git a/veilid-tools/src/virtual_network/router_server/global_state_manager/state/blueprint_state.rs b/veilid-tools/src/virtual_network/router_server/global_state_manager/state/blueprint_state.rs new file mode 100644 index 00000000..ec8092b2 --- /dev/null +++ b/veilid-tools/src/virtual_network/router_server/global_state_manager/state/blueprint_state.rs @@ -0,0 +1,672 @@ +use super::*; + +#[derive(Debug)] +struct BlueprintStateImmutable { + /// The unique id of this blueprint + id: BlueprintStateId, + /// The name of this blueprint state + name: String, +} + +#[derive(Debug, Clone)] +pub struct BlueprintStateIpv4Params { + pub locations: BlueprintLocationsList, + pub prefix: WeightedList, + pub gateway: Option, +} + +#[derive(Debug, Clone)] +pub struct BlueprintStateIpv6Params { + pub locations: BlueprintLocationsList, + pub prefix: WeightedList, + pub gateway: Option, +} + +#[derive(Debug, Clone)] +pub struct BlueprintStateGatewayParams { + pub translation: WeightedList, + pub upnp: Probability, + pub locations: Option, +} + +#[derive(Debug, Clone)] +struct BlueprintStateIpv4 { + params: BlueprintStateIpv4Params, + gateway: Option, +} + +#[derive(Debug, Clone)] +struct BlueprintStateIpv6 { + params: BlueprintStateIpv6Params, + gateway: Option, +} + +#[derive(Debug, Clone)] +struct BlueprintStateIpv4Gateway { + params: BlueprintStateGatewayParams, +} + +#[derive(Debug, Clone)] +struct BlueprintStateIpv6Gateway { + params: BlueprintStateGatewayParams, +} + +#[derive(Debug, Clone)] +struct BlueprintStateFields { + limit_network_count: Option, + networks: imbl::Vector, + model: Option>>, + ipv4: Option, + ipv6: Option, +} + +#[derive(Debug, Clone)] +pub struct BlueprintState { + immutable: Arc, + fields: Arc, +} + +pub type BlueprintStateId = StateId; + +impl BlueprintState { + pub fn new(id: BlueprintStateId, name: String) -> Self { + Self { + immutable: Arc::new(BlueprintStateImmutable { id, name }), + fields: Arc::new(BlueprintStateFields { + limit_network_count: None, + networks: imbl::Vector::new(), + model: None, + ipv4: None, + ipv6: None, + }), + } + } + + #[instrument(level = "debug", skip(self))] + pub fn set_limit_network_count(&mut self, limit_network_count: Option) { + // Update fields + self.fields = Arc::new(BlueprintStateFields { + limit_network_count, + ..(*self.fields).clone() + }); + } + + #[instrument(level = "debug", skip(self))] + pub fn set_model(&mut self, model: WeightedList) { + let model = Some(model.map(|x| Arc::new(x.clone()))); + // Update fields + self.fields = Arc::new(BlueprintStateFields { + model, + ..(*self.fields).clone() + }); + } + + #[instrument(level = "debug", skip(self, gsm_inner), err)] + pub fn clear_ipv4( + &mut self, + gsm_inner: &mut GlobalStateManagerInner, + ) -> GlobalStateManagerResult<()> { + self.clear_ipv4_gateway(gsm_inner)?; + + if self.fields.ipv4.is_none() { + return Ok(()); + }; + + // Update fields + self.fields = Arc::new(BlueprintStateFields { + ipv4: None, + ..(*self.fields).clone() + }); + + Ok(()) + } + + #[instrument(level = "debug", skip(self, _gsm_inner), err)] + pub fn clear_ipv4_gateway( + &mut self, + _gsm_inner: &mut GlobalStateManagerInner, + ) -> GlobalStateManagerResult<()> { + let Some(mut ipv4) = self.fields.ipv4.clone() else { + return Ok(()); + }; + let Some(_gateway) = ipv4.gateway else { + return Ok(()); + }; + + // Clear gateway + ipv4.gateway = None; + + // Update fields + self.fields = Arc::new(BlueprintStateFields { + ipv4: Some(ipv4), + ..(*self.fields).clone() + }); + + Ok(()) + } + + #[instrument(level = "debug", skip(self, gsm_inner), err)] + pub fn set_ipv4( + &mut self, + gsm_inner: &mut GlobalStateManagerInner, + params: BlueprintStateIpv4Params, + ) -> GlobalStateManagerResult<()> { + self.clear_ipv4(gsm_inner)?; + + let ipv4 = if let Some(ipv4) = self.fields.ipv4.clone() { + BlueprintStateIpv4 { params, ..ipv4 } + } else { + BlueprintStateIpv4 { + params, + gateway: None, + } + }; + + // Update fields + self.fields = Arc::new(BlueprintStateFields { + ipv4: Some(ipv4), + ..(*self.fields).clone() + }); + + Ok(()) + } + + #[instrument(level = "debug", skip(self, gsm_inner), err)] + pub fn set_ipv4_gateway( + &mut self, + gsm_inner: &mut GlobalStateManagerInner, + gateway_params: Option, + ) -> GlobalStateManagerResult<()> { + self.clear_ipv4_gateway(gsm_inner)?; + + let Some(mut ipv4) = self.fields.ipv4.clone() else { + return Err(GlobalStateManagerError::InvalidGateway); + }; + + if ipv4.gateway.is_some() { + if let Some(gateway_params) = gateway_params { + ipv4.gateway.as_mut().expect("must exist").params = gateway_params; + } else { + ipv4.gateway = None; + } + } else if let Some(gateway_params) = gateway_params { + ipv4.gateway = Some(BlueprintStateIpv4Gateway { + params: gateway_params, + }) + } + + // Update fields + self.fields = Arc::new(BlueprintStateFields { + ipv4: Some(ipv4), + ..(*self.fields).clone() + }); + + Ok(()) + } + + #[instrument(level = "debug", skip(self, gsm_inner), err)] + pub fn clear_ipv6( + &mut self, + gsm_inner: &mut GlobalStateManagerInner, + ) -> GlobalStateManagerResult<()> { + self.clear_ipv6_gateway(gsm_inner)?; + + if self.fields.ipv6.is_none() { + return Ok(()); + }; + + // Update fields + self.fields = Arc::new(BlueprintStateFields { + ipv6: None, + ..(*self.fields).clone() + }); + + Ok(()) + } + + #[instrument(level = "debug", skip(self, _gsm_inner), err)] + pub fn clear_ipv6_gateway( + &mut self, + _gsm_inner: &mut GlobalStateManagerInner, + ) -> GlobalStateManagerResult<()> { + let Some(mut ipv6) = self.fields.ipv6.clone() else { + return Ok(()); + }; + let Some(_gateway) = ipv6.gateway else { + return Ok(()); + }; + + // Clear gateway + ipv6.gateway = None; + + // Update fields + self.fields = Arc::new(BlueprintStateFields { + ipv6: Some(ipv6), + ..(*self.fields).clone() + }); + + Ok(()) + } + + #[instrument(level = "debug", skip(self, gsm_inner), err)] + pub fn set_ipv6( + &mut self, + gsm_inner: &mut GlobalStateManagerInner, + params: BlueprintStateIpv6Params, + ) -> GlobalStateManagerResult<()> { + self.clear_ipv6(gsm_inner)?; + + let ipv6 = if let Some(ipv6) = self.fields.ipv6.clone() { + BlueprintStateIpv6 { params, ..ipv6 } + } else { + BlueprintStateIpv6 { + params, + gateway: None, + } + }; + + // Update fields + self.fields = Arc::new(BlueprintStateFields { + ipv6: Some(ipv6), + ..(*self.fields).clone() + }); + + Ok(()) + } + + #[instrument(level = "debug", skip(self, gsm_inner), err)] + pub fn set_ipv6_gateway( + &mut self, + gsm_inner: &mut GlobalStateManagerInner, + gateway_params: Option, + ) -> GlobalStateManagerResult<()> { + self.clear_ipv6_gateway(gsm_inner)?; + + let Some(mut ipv6) = self.fields.ipv6.clone() else { + return Err(GlobalStateManagerError::InvalidGateway); + }; + + if ipv6.gateway.is_some() { + if let Some(gateway_params) = gateway_params { + ipv6.gateway.as_mut().expect("must exist").params = gateway_params; + } else { + ipv6.gateway = None; + } + } else if let Some(gateway_params) = gateway_params { + ipv6.gateway = Some(BlueprintStateIpv6Gateway { + params: gateway_params, + }) + } + + // Update fields + self.fields = Arc::new(BlueprintStateFields { + ipv6: Some(ipv6), + ..(*self.fields).clone() + }); + + Ok(()) + } + + pub fn is_active(&self, gsm_inner: &mut GlobalStateManagerInner) -> bool { + // Save a backup of the entire state + let backup = gsm_inner.clone(); + + // Make a copy of this blueprint state + let mut current_state = self.clone(); + + // See what would happen if we try to generate this blueprint + let ok = current_state.generate(gsm_inner).is_ok(); + + // Restore the backup + *gsm_inner = backup; + + // Return if this worked or not + ok + } + + #[instrument(level = "debug", skip(self, gsm_inner), err)] + fn generate_model_inner( + &mut self, + gsm_inner: &mut GlobalStateManagerInner, + network_state: &mut NetworkState, + ) -> GlobalStateManagerResult<()> { + let Some(model_list) = self.fields.model.as_ref() else { + return Err(GlobalStateManagerError::NoDefaultModel); + }; + let model_name = (**gsm_inner.srng().weighted_choice_ref(model_list)).clone(); + + let Some(model) = gsm_inner.models().get(&model_name) else { + return Err(GlobalStateManagerError::ModelNotFound(model_name)); + }; + + let params = NetworkStateModelParams { + latency: model.latency.clone(), + distance: model.distance.clone(), + loss: model.loss, + }; + network_state.set_model(params); + Ok(()) + } + + /// Network filter that ensures we can allocate an ipv4 gateway address on a network + #[instrument(level = "debug", skip(self, gsm_inner), err)] + fn gateway_network_filter_v4( + &self, + gsm_inner: &GlobalStateManagerInner, + network_state_id: NetworkStateId, + ) -> GlobalStateManagerResult { + // Get the network state + let network_state = gsm_inner.network_states().get_state(network_state_id)?; + + // See if we can allocate on this network + let can_allocate = network_state.can_allocate_address_v4(None); + + Ok(can_allocate) + } + + /// Network filter that ensures we can allocate an ipv4 gateway address on a network + #[instrument(level = "debug", skip(self, gsm_inner), err)] + fn gateway_network_filter_v6( + &self, + gsm_inner: &GlobalStateManagerInner, + network_state_id: NetworkStateId, + ) -> GlobalStateManagerResult { + // Get the network state + let network_state = gsm_inner.network_states().get_state(network_state_id)?; + + // See if we can allocate on this network + let can_allocate = network_state.can_allocate_address_v6(None); + + Ok(can_allocate) + } + + #[instrument(level = "debug", skip(self, gsm_inner), err)] + fn generate_ipv4_inner( + &mut self, + gsm_inner: &mut GlobalStateManagerInner, + network_state: &mut NetworkState, + ) -> GlobalStateManagerResult<()> { + network_state.clear_ipv4(gsm_inner)?; + let Some(ipv4) = self.fields.ipv4.as_ref() else { + return Ok(()); + }; + + // Get addresses for network + let Some(NetworkLocation { + scope, + reserve, + super_net, + }) = ipv4 + .params + .locations + .pick_v4(gsm_inner, &ipv4.params.prefix)? + else { + return Err(GlobalStateManagerError::BlueprintComplete( + self.debug_name(), + )); + }; + + let params = NetworkStateIpv4Params { + scope, + reserve, + super_net, + }; + + let gateway_params = match ipv4.gateway.as_ref() { + Some(v4gw) => { + let translation = *gsm_inner + .srng() + .weighted_choice_ref(&v4gw.params.translation); + let upnp = gsm_inner.srng().probability_test(v4gw.params.upnp); + + let (external_network, external_address) = match v4gw.params.locations.as_ref() { + Some(locations_list) => { + // A external network location was specified, pick one + // Get a network to generate the machine on + let Some(mut gateway_network_state) = locations_list + .pick(gsm_inner, |gsm_inner, id| { + self.gateway_network_filter_v4(gsm_inner, id) + })? + else { + return Err(GlobalStateManagerError::BlueprintComplete( + self.debug_name(), + )); + }; + + let gateway_network_state_id = gateway_network_state.id(); + + // Allocate an external address on this network + let external_interface_address = gateway_network_state + .allocate_address_v4( + gsm_inner, + OwnerTag::Gateway(network_state.id()), + None, + )?; + + // Update the network state + gsm_inner + .network_states_mut() + .set_state(gateway_network_state); + + ( + gateway_network_state_id, + Some(external_interface_address.ip), + ) + } + None => { + // No external network specified for gateway machine + // So use the same network as ourselves + (network_state.id(), None) + } + }; + + Some(NetworkStateIpv4GatewayParams { + translation, + upnp, + external_network, + internal_address: None, + external_address, + }) + } + None => None, + }; + + network_state.set_ipv4(gsm_inner, params)?; + if let Some(gateway_params) = gateway_params { + network_state.set_ipv4_gateway(gsm_inner, gateway_params)?; + } + Ok(()) + } + + #[instrument(level = "debug", skip(self, gsm_inner), err)] + fn generate_ipv6_inner( + &mut self, + gsm_inner: &mut GlobalStateManagerInner, + network_state: &mut NetworkState, + ) -> GlobalStateManagerResult<()> { + network_state.clear_ipv6(gsm_inner)?; + let Some(ipv6) = self.fields.ipv6.as_ref() else { + return Ok(()); + }; + + // Get addresses for network + let Some(NetworkLocation { + scope, + reserve, + super_net, + }) = ipv6 + .params + .locations + .pick_v6(gsm_inner, &ipv6.params.prefix)? + else { + return Err(GlobalStateManagerError::BlueprintComplete( + self.debug_name(), + )); + }; + + let params = NetworkStateIpv6Params { + scope, + reserve, + super_net, + }; + + let gateway_params = match ipv6.gateway.as_ref() { + Some(v6gw) => { + let translation = *gsm_inner + .srng() + .weighted_choice_ref(&v6gw.params.translation); + let upnp = gsm_inner.srng().probability_test(v6gw.params.upnp); + + let (external_network, external_address) = match v6gw.params.locations.as_ref() { + Some(locations_list) => { + // A external network location was specified, pick one + // Get a network to generate the machine on + let Some(mut gateway_network_state) = locations_list + .pick(gsm_inner, |gsm_inner, id| { + self.gateway_network_filter_v6(gsm_inner, id) + })? + else { + return Err(GlobalStateManagerError::BlueprintComplete( + self.debug_name(), + )); + }; + + let gateway_network_state_id = gateway_network_state.id(); + + // Allocate an external address on this network + let external_interface_address = gateway_network_state + .allocate_address_v6( + gsm_inner, + OwnerTag::Gateway(network_state.id()), + None, + )?; + + // Update the network state + gsm_inner + .network_states_mut() + .set_state(gateway_network_state); + + ( + gateway_network_state_id, + Some(external_interface_address.ip), + ) + } + None => { + // No external network specified for gateway machine + // So use the same network as ourselves + (network_state.id(), None) + } + }; + + Some(NetworkStateIpv6GatewayParams { + translation, + upnp, + external_network, + internal_address: None, + external_address, + }) + } + None => None, + }; + + network_state.set_ipv6(gsm_inner, params)?; + if let Some(gateway_params) = gateway_params { + network_state.set_ipv6_gateway(gsm_inner, gateway_params)?; + } + Ok(()) + } + + #[instrument(level = "debug", skip(self, gsm_inner), err)] + pub fn generate( + &mut self, + gsm_inner: &mut GlobalStateManagerInner, + ) -> GlobalStateManagerResult { + // See if there's room for another network + if let Some(limit_network_count) = self.fields.limit_network_count { + if self.fields.networks.len() >= limit_network_count { + return Err(GlobalStateManagerError::BlueprintComplete( + self.debug_name(), + )); + } + } + + // Allocate a network id + let network_state_id = gsm_inner.network_states_mut().allocate_id(); + + // Create an anonymous network state + let mut network_state = + NetworkState::new(network_state_id, None, NetworkOrigin::Blueprint(self.id())); + + if let Err(e) = (|| { + self.generate_model_inner(gsm_inner, &mut network_state)?; + self.generate_ipv4_inner(gsm_inner, &mut network_state)?; + self.generate_ipv6_inner(gsm_inner, &mut network_state)?; + Ok(()) + })() { + // Release the network state and id if things failed to allocate + network_state.release(gsm_inner); + gsm_inner + .network_states_mut() + .release_id(network_state_id) + .expect("must succeed"); + return Err(e); + } + + // Attach the state to the id + gsm_inner.network_states_mut().attach_state(network_state)?; + + // Record the newly instantiated network + let mut networks = self.fields.networks.clone(); + networks.push_back(network_state_id); + + // Update fields + self.fields = Arc::new(BlueprintStateFields { + networks, + ..(*self.fields).clone() + }); + + Ok(network_state_id) + } + + #[instrument(level = "debug", skip(self, callback), err)] + pub fn for_each_network_id(&self, mut callback: F) -> GlobalStateManagerResult> + where + F: FnMut(NetworkStateId) -> GlobalStateManagerResult>, + { + for network_id in &self.fields.networks { + if let Some(res) = callback(*network_id)? { + return Ok(Some(res)); + } + } + Ok(None) + } + + #[instrument(level = "debug", skip(self))] + pub fn on_network_released(&mut self, network_id: NetworkStateId) { + // Remove network from list + let pos = self + .fields + .networks + .iter() + .position(|id| *id == network_id) + .expect("must exist"); + let mut networks = self.fields.networks.clone(); + networks.remove(pos); + + // Update fields + self.fields = Arc::new(BlueprintStateFields { + networks, + ..(*self.fields).clone() + }); + } +} + +impl State for BlueprintState { + fn id(&self) -> StateId { + self.immutable.id + } + + fn name(&self) -> Option { + Some(self.immutable.name.clone()) + } +} diff --git a/veilid-tools/src/virtual_network/router_server/global_state_manager/state/machine_state.rs b/veilid-tools/src/virtual_network/router_server/global_state_manager/state/machine_state.rs new file mode 100644 index 00000000..26baa15e --- /dev/null +++ b/veilid-tools/src/virtual_network/router_server/global_state_manager/state/machine_state.rs @@ -0,0 +1,570 @@ +use super::*; + +#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub enum MachineOrigin { + Config, + Direct, + Template(TemplateStateId), +} + +#[derive(Debug, Clone)] +struct MachineStateFields { + /// The current network interfaces definition + interfaces: imbl::HashMap, MachineStateInterface>, + /// Capabilities to disable on this machine + disable_capabilities: imbl::Vector, + /// If this machine is a bootstrap + bootstrap: bool, +} + +#[derive(Debug, Clone)] +pub struct MachineStateInterface { + /// The network this interface belongs to + pub network_id: Option, + /// The veilid NetworkInterface state + pub network_interface: Arc, +} + +#[derive(Debug)] +struct MachineStateImmutable { + /// The id of this machine + id: MachineStateId, + /// The name of this machine if it is named + opt_name: Option, + /// Where this machine came for housekeeping purposes + origin: MachineOrigin, +} + +#[derive(Debug, Clone)] +pub struct MachineState { + immutable: Arc, + fields: Arc, +} + +pub type MachineStateId = StateId; + +impl MachineState { + pub fn new(id: MachineStateId, opt_name: Option, origin: MachineOrigin) -> Self { + // Create a localhost interface for this machine + Self { + immutable: Arc::new(MachineStateImmutable { + id, + opt_name, + origin, + }), + fields: Arc::new(MachineStateFields { + interfaces: imbl::HashMap::new(), + disable_capabilities: imbl::Vector::new(), + bootstrap: false, + }), + } + } + + #[instrument(level = "debug", skip(self, gsm_inner))] + pub fn release(mut self, gsm_inner: &mut GlobalStateManagerInner) { + self.release_all_interfaces(gsm_inner) + .expect("must succeed"); + + if let MachineOrigin::Template(generating_template) = self.immutable.origin { + let mut template_state = gsm_inner + .template_states() + .get_state(generating_template) + .expect("must exist"); + template_state.on_machine_released(self.id()); + gsm_inner.template_states_mut().set_state(template_state); + } + } + + #[instrument(level = "debug", skip(self))] + pub fn set_disable_capabilities(&mut self, disable_capabilities: Vec) { + self.fields = Arc::new(MachineStateFields { + disable_capabilities: disable_capabilities.into(), + ..(*self.fields).clone() + }); + } + + #[instrument(level = "debug", skip(self))] + pub fn set_bootstrap(&mut self, bootstrap: bool) { + self.fields = Arc::new(MachineStateFields { + bootstrap, + ..(*self.fields).clone() + }); + } + + fn next_free_interface_key(&self) -> Arc { + let mut inum = 0usize; + loop { + let name = format!("vin{}", inum); + if !self.fields.interfaces.contains_key(&name) { + return Arc::new(name); + } + inum += 1; + } + } + + #[instrument(level = "debug", skip(self), err)] + pub fn allocate_interface( + &mut self, + interface_name: Option, + opt_interface_flags: Option, + ) -> GlobalStateManagerResult> { + let interface_key = interface_name + .map(Arc::new) + .unwrap_or_else(|| self.next_free_interface_key()); + if self.fields.interfaces.contains_key(&interface_key) { + return Err(GlobalStateManagerError::DuplicateName( + (*interface_key).clone(), + )); + } + let flags = opt_interface_flags.unwrap_or(InterfaceFlags { + is_loopback: false, + is_running: true, + is_point_to_point: false, + has_default_route: true, + }); + let interfaces = self.fields.interfaces.update( + interface_key.clone(), + MachineStateInterface { + network_id: None, + network_interface: Arc::new(NetworkInterface { + name: (*interface_key).clone(), + flags, + addrs: Vec::new(), + }), + }, + ); + + self.fields = Arc::new(MachineStateFields { + interfaces, + ..(*self.fields).clone() + }); + + Ok(interface_key) + } + + pub fn interfaces(&self) -> Vec> { + let mut intfs: Vec<_> = self.fields.interfaces.keys().cloned().collect(); + intfs.sort(); + intfs + } + + #[instrument(level = "debug", skip(self, gsm_inner), err)] + pub fn allocate_address_ipv4( + &mut self, + gsm_inner: &mut GlobalStateManagerInner, + interface_name: &str, + opt_address: Option, + opt_address_flags: Option, + ) -> GlobalStateManagerResult { + let interface_key = Arc::new(interface_name.to_string()); + let Some(mut machine_state_interface) = self.fields.interfaces.get(&interface_key).cloned() + else { + return Err(GlobalStateManagerError::InvalidName( + (*interface_key).clone(), + )); + }; + + // Get the network state + let Some(network_id) = machine_state_interface.network_id else { + return Err(GlobalStateManagerError::NetworkNotFound( + (*interface_key).clone(), + )); + }; + let mut network_state = gsm_inner.network_states().get_state(network_id)?; + + // Allocate interface address + let is_dynamic = opt_address.is_none(); + let ifv4_addr = network_state.allocate_address_v4( + gsm_inner, + OwnerTag::Machine(self.id()), + opt_address, + )?; + + // Update the network state + gsm_inner.network_states_mut().set_state(network_state); + + // Get address flags + let flags = opt_address_flags.unwrap_or(AddressFlags { + is_dynamic, + is_temporary: false, + is_preferred: true, + }); + + // Update interface addresses + let mut new_intf = (*machine_state_interface.network_interface).clone(); + new_intf.addrs.push(InterfaceAddress { + if_addr: IfAddr::V4(ifv4_addr.clone()), + flags, + }); + + // Update interface + machine_state_interface.network_interface = Arc::new(new_intf); + + // Update interfaces map + let interfaces = self + .fields + .interfaces + .update(interface_key, machine_state_interface); + + // Update fields + self.fields = Arc::new(MachineStateFields { + interfaces, + ..(*self.fields).clone() + }); + + Ok(ifv4_addr) + } + + #[instrument(level = "debug", skip(self, gsm_inner), err)] + pub fn allocate_address_ipv6( + &mut self, + gsm_inner: &mut GlobalStateManagerInner, + interface_name: &str, + opt_address: Option, + opt_address_flags: Option, + ) -> GlobalStateManagerResult { + let interface_key = Arc::new(interface_name.to_string()); + let Some(mut machine_state_interface) = self.fields.interfaces.get(&interface_key).cloned() + else { + return Err(GlobalStateManagerError::InvalidName( + (*interface_key).clone(), + )); + }; + + // Get the network state + let Some(network_id) = machine_state_interface.network_id else { + return Err(GlobalStateManagerError::NetworkNotFound( + (*interface_key).clone(), + )); + }; + let mut network_state = gsm_inner.network_states().get_state(network_id)?; + + // Allocate interface address + let is_dynamic = opt_address.is_none(); + let ifv6_addr = network_state.allocate_address_v6( + gsm_inner, + OwnerTag::Machine(self.id()), + opt_address, + )?; + // Update the network state + gsm_inner.network_states_mut().set_state(network_state); + + // Get address flags + let flags = opt_address_flags.unwrap_or(AddressFlags { + is_dynamic, + is_temporary: false, + is_preferred: true, + }); + + // Update interface addresses + let mut new_intf = (*machine_state_interface.network_interface).clone(); + new_intf.addrs.push(InterfaceAddress { + if_addr: IfAddr::V6(ifv6_addr.clone()), + flags, + }); + + // Update interface + machine_state_interface.network_interface = Arc::new(new_intf); + + // Update interfaces map + let interfaces = self + .fields + .interfaces + .update(interface_key, machine_state_interface); + + // Update fields + self.fields = Arc::new(MachineStateFields { + interfaces, + ..(*self.fields).clone() + }); + + Ok(ifv6_addr) + } + + #[instrument(level = "debug", skip(self, gsm_inner), err)] + pub fn attach_network( + &mut self, + gsm_inner: &mut GlobalStateManagerInner, + interface_name: &str, + network_id: NetworkStateId, + ) -> GlobalStateManagerResult<()> { + let interface_key = Arc::new(interface_name.to_string()); + let Some(mut machine_state_interface) = self.fields.interfaces.get(&interface_key).cloned() + else { + return Err(GlobalStateManagerError::InvalidName( + (*interface_key).clone(), + )); + }; + + if machine_state_interface.network_id.is_some() { + Self::detach_network_inner(gsm_inner, &mut machine_state_interface)?; + } + + machine_state_interface.network_id = Some(network_id); + + // Update interfaces map + let interfaces = self + .fields + .interfaces + .update(interface_key, machine_state_interface); + + // Update fields + self.fields = Arc::new(MachineStateFields { + interfaces, + ..(*self.fields).clone() + }); + + Ok(()) + } + + #[instrument(level = "debug", skip(self, gsm_inner), err)] + pub fn detach_network( + &mut self, + gsm_inner: &mut GlobalStateManagerInner, + interface_name: &str, + ) -> GlobalStateManagerResult<()> { + let interface_key = Arc::new(interface_name.to_string()); + let Some(mut machine_state_interface) = self.fields.interfaces.get(&interface_key).cloned() + else { + return Err(GlobalStateManagerError::InvalidName( + (*interface_key).clone(), + )); + }; + + Self::detach_network_inner(gsm_inner, &mut machine_state_interface)?; + + // Update interfaces map + let interfaces = self + .fields + .interfaces + .update(interface_key, machine_state_interface); + + // Update fields + self.fields = Arc::new(MachineStateFields { + interfaces, + ..(*self.fields).clone() + }); + + Ok(()) + } + + pub fn attached_network_interfaces( + &self, + network_id: NetworkStateId, + ) -> GlobalStateManagerResult>> { + let mut out = Vec::new(); + for intf in &self.fields.interfaces { + if intf.1.network_id == Some(network_id) { + out.push(intf.0.clone()); + } + } + Ok(out) + } + + #[instrument(level = "debug", skip(self, gsm_inner), err)] + pub fn release_address( + &mut self, + gsm_inner: &mut GlobalStateManagerInner, + interface_name: &str, + address: IpAddr, + ) -> GlobalStateManagerResult<()> { + let interface_key = Arc::new(interface_name.to_owned()); + let Some(mut machine_state_interface) = self.fields.interfaces.get(&interface_key).cloned() + else { + return Err(GlobalStateManagerError::InvalidName( + (*interface_key).clone(), + )); + }; + + let Some(network_id) = machine_state_interface.network_id else { + return Err(GlobalStateManagerError::NetworkNotFound( + (*interface_key).clone(), + )); + }; + + // Get the network state + let mut network_state = gsm_inner.network_states().get_state(network_id)?; + + // Release the address from the network + match address { + IpAddr::V4(ipv4_addr) => network_state.release_address_v4(ipv4_addr)?, + IpAddr::V6(ipv6_addr) => network_state.release_address_v6(ipv6_addr)?, + }; + + // Update the network state + gsm_inner.network_states_mut().set_state(network_state); + + // Remove the address from the interface + let addrs: Vec<_> = machine_state_interface + .network_interface + .addrs + .iter() + .filter(|x| x.if_addr().ip() != address) + .cloned() + .collect(); + + // Update network interface + machine_state_interface.network_interface = Arc::new(NetworkInterface { + addrs, + ..(*machine_state_interface.network_interface).clone() + }); + + // Update interfaces map + let interfaces = self + .fields + .interfaces + .update(interface_key, machine_state_interface); + + // Update fields + self.fields = Arc::new(MachineStateFields { + interfaces, + ..(*self.fields).clone() + }); + + Ok(()) + } + + #[instrument(level = "debug", skip(self, gsm_inner), err)] + pub fn release_all_addresses( + &mut self, + gsm_inner: &mut GlobalStateManagerInner, + interface_name: &str, + ) -> GlobalStateManagerResult<()> { + let interface_key = Arc::new(interface_name.to_string()); + let Some(mut machine_state_interface) = self.fields.interfaces.get(&interface_key).cloned() + else { + return Err(GlobalStateManagerError::InvalidName( + (*interface_key).clone(), + )); + }; + + Self::release_all_addresses_inner(gsm_inner, &mut machine_state_interface)?; + + // Update interfaces map + let interfaces = self + .fields + .interfaces + .update(interface_key, machine_state_interface); + + // Update fields + self.fields = Arc::new(MachineStateFields { + interfaces, + ..(*self.fields).clone() + }); + + Ok(()) + } + + #[instrument(level = "debug", skip(self, gsm_inner), err)] + pub fn release_all_interfaces( + &mut self, + gsm_inner: &mut GlobalStateManagerInner, + ) -> GlobalStateManagerResult<()> { + let interface_names: Vec = self + .fields + .interfaces + .keys() + .map(|x| (**x).clone()) + .collect(); + for interface_name in interface_names { + let interface_key = Arc::new(interface_name); + let Some(mut machine_state_interface) = + self.fields.interfaces.get(&interface_key).cloned() + else { + return Err(GlobalStateManagerError::InvalidName( + (*interface_key).clone(), + )); + }; + + Self::detach_network_inner(gsm_inner, &mut machine_state_interface)?; + } + + // Update fields + self.fields = Arc::new(MachineStateFields { + interfaces: imbl::HashMap::new(), + ..(*self.fields).clone() + }); + + Ok(()) + } + + //////////////////////////////////////////////////////////////////////// + + fn detach_network_inner( + gsm_inner: &mut GlobalStateManagerInner, + machine_state_interface: &mut MachineStateInterface, + ) -> GlobalStateManagerResult<()> { + Self::release_all_addresses_inner(gsm_inner, machine_state_interface)?; + machine_state_interface.network_id = None; + Ok(()) + } + + fn release_all_addresses_inner( + gsm_inner: &mut GlobalStateManagerInner, + machine_state_interface: &mut MachineStateInterface, + ) -> GlobalStateManagerResult<()> { + let Some(network_id) = machine_state_interface.network_id else { + return Ok(()); + }; + + // Get the network state + let mut network_state = gsm_inner.network_states().get_state(network_id)?; + + // Release the addresses from the network + for addr in &machine_state_interface.network_interface.addrs { + match addr.if_addr.ip() { + IpAddr::V4(ipv4_addr) => network_state.release_address_v4(ipv4_addr)?, + IpAddr::V6(ipv6_addr) => network_state.release_address_v6(ipv6_addr)?, + }; + } + + // Update the network state + gsm_inner.network_states_mut().set_state(network_state); + + // Remove the addresses from the interface + let mut new_intf = (*machine_state_interface.network_interface).clone(); + new_intf.addrs.clear(); + + // Update interface + machine_state_interface.network_interface = Arc::new(new_intf); + + Ok(()) + } + + pub fn release_interface( + &mut self, + gsm_inner: &mut GlobalStateManagerInner, + interface_name: &str, + ) -> GlobalStateManagerResult<()> { + let interface_key = Arc::new(interface_name.to_string()); + let Some(mut machine_state_interface) = self.fields.interfaces.get(&interface_key).cloned() + else { + return Err(GlobalStateManagerError::InvalidName( + (*interface_key).clone(), + )); + }; + + Self::detach_network_inner(gsm_inner, &mut machine_state_interface)?; + + // Update interfaces map + let interfaces = self.fields.interfaces.without(&interface_key); + + // Update fields + self.fields = Arc::new(MachineStateFields { + interfaces, + ..(*self.fields).clone() + }); + + Ok(()) + } +} + +impl State for MachineState { + fn id(&self) -> StateId { + self.immutable.id + } + + fn name(&self) -> Option { + self.immutable.opt_name.clone() + } +} diff --git a/veilid-tools/src/virtual_network/router_server/global_state_manager/state/mod.rs b/veilid-tools/src/virtual_network/router_server/global_state_manager/state/mod.rs new file mode 100644 index 00000000..e67aecb1 --- /dev/null +++ b/veilid-tools/src/virtual_network/router_server/global_state_manager/state/mod.rs @@ -0,0 +1,19 @@ +mod blueprint_locations_list; +mod blueprint_state; +mod machine_state; +mod network_state; +mod profile_state; +mod state_registry; +mod template_locations_list; +mod template_state; + +use super::*; + +pub use blueprint_locations_list::*; +pub use blueprint_state::*; +pub use machine_state::*; +pub use network_state::*; +pub use profile_state::*; +pub use state_registry::*; +pub use template_locations_list::*; +pub use template_state::*; diff --git a/veilid-tools/src/virtual_network/router_server/global_state_manager/state/network_state.rs b/veilid-tools/src/virtual_network/router_server/global_state_manager/state/network_state.rs new file mode 100644 index 00000000..5848d602 --- /dev/null +++ b/veilid-tools/src/virtual_network/router_server/global_state_manager/state/network_state.rs @@ -0,0 +1,835 @@ +use super::*; + +#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub enum OwnerTag { + Machine(MachineStateId), + Network(NetworkStateId), + Gateway(NetworkStateId), +} + +#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub enum NetworkOrigin { + Config, + Direct, + Blueprint(BlueprintStateId), +} + +#[derive(Debug)] +struct NetworkStateImmutable { + /// The unique id of this network + id: NetworkStateId, + /// The name of this network state if it was made directly + opt_name: Option, + /// Where this network came for housekeeping purposes + origin: NetworkOrigin, +} + +#[derive(Debug, Clone)] +struct NetworkStateFields { + /// Model for this network + model: NetworkStateModel, + /// The addresses allocated by this network + address_pool: AddressPool, + /// IPv4 state if it is enabled + ipv4: Option, + /// IPv6 state if it is enabled + ipv6: Option, +} + +#[derive(Debug, Clone)] +struct NetworkStateModel { + params: NetworkStateModelParams, +} + +#[derive(Debug, Clone)] +pub struct NetworkStateModelParams { + /// Network latency distribution + pub latency: config::Distribution, + /// Distance simulation metric + pub distance: Option, + /// Packet loss probability + pub loss: Probability, +} + +#[derive(Debug, Clone)] +pub struct NetworkStateIpv4Params { + pub scope: Vec, + pub reserve: Vec, + pub super_net: Option, +} + +#[derive(Debug, Clone)] +struct NetworkStateIpv4 { + params: NetworkStateIpv4Params, + gateway: Option, +} + +#[derive(Debug, Clone)] +pub struct NetworkStateIpv6Params { + pub scope: Vec, + pub reserve: Vec, + pub super_net: Option, +} +#[derive(Debug, Clone)] +struct NetworkStateIpv6 { + params: NetworkStateIpv6Params, + gateway: Option, +} + +#[derive(Debug, Clone)] +pub struct NetworkStateIpv4GatewayParams { + pub translation: config::Translation, + pub upnp: bool, + pub external_network: NetworkStateId, + pub internal_address: Option, + pub external_address: Option, +} + +#[derive(Debug, Clone)] +pub struct NetworkStateIpv6GatewayParams { + pub translation: config::Translation, + pub upnp: bool, + pub external_network: NetworkStateId, + pub internal_address: Option, + pub external_address: Option, +} + +#[derive(Debug, Clone)] +struct NetworkStateIpv4Gateway { + params: NetworkStateIpv4GatewayParams, + internal_interface_address: Ifv4Addr, + external_interface_address: Ifv4Addr, +} + +#[derive(Debug, Clone)] +struct NetworkStateIpv6Gateway { + params: NetworkStateIpv6GatewayParams, + internal_interface_address: Ifv6Addr, + external_interface_address: Ifv6Addr, +} + +#[derive(Debug, Clone)] +pub struct NetworkState { + immutable: Arc, + fields: Arc, +} + +pub type NetworkStateId = StateId; + +impl NetworkState { + pub fn new(id: NetworkStateId, opt_name: Option, origin: NetworkOrigin) -> Self { + Self { + immutable: Arc::new(NetworkStateImmutable { + id, + opt_name, + origin, + }), + fields: Arc::new(NetworkStateFields { + address_pool: AddressPool::new(), + model: NetworkStateModel { + params: NetworkStateModelParams { + latency: config::Distribution::default(), + distance: None, + loss: 0.0, + }, + }, + ipv4: None, + ipv6: None, + }), + } + } + + #[instrument(level = "debug", skip(self, gsm_inner))] + pub fn release(self, gsm_inner: &mut GlobalStateManagerInner) { + if let NetworkOrigin::Blueprint(generating_blueprint) = self.immutable.origin { + let mut blueprint_state = gsm_inner + .blueprint_states() + .get_state(generating_blueprint) + .expect("must exist"); + blueprint_state.on_network_released(self.id()); + gsm_inner.blueprint_states_mut().set_state(blueprint_state) + } + } + + #[instrument(level = "debug", skip(self))] + pub fn set_model(&mut self, params: NetworkStateModelParams) { + self.fields = Arc::new(NetworkStateFields { + model: NetworkStateModel { params }, + ..(*self.fields).clone() + }); + } + + #[instrument(level = "debug", skip(self, gsm_inner), err)] + pub fn clear_ipv4( + &mut self, + gsm_inner: &mut GlobalStateManagerInner, + ) -> GlobalStateManagerResult<()> { + self.clear_ipv4_gateway(gsm_inner)?; + + if self.fields.ipv4.is_none() { + return Ok(()); + }; + + let mut address_pool = self.fields.address_pool.clone(); + address_pool + .clear_ipv4(|_n, t| match t { + OwnerTag::Machine(_) => true, + OwnerTag::Network(nsid) => *nsid != self.id(), + OwnerTag::Gateway(nsid) => *nsid != self.id(), + }) + .map_err(|_| { + GlobalStateManagerError::ResourceInUse(format!("{}-v4", self.debug_name())) + })?; + + // Update fields + self.fields = Arc::new(NetworkStateFields { + ipv4: None, + address_pool, + ..(*self.fields).clone() + }); + + Ok(()) + } + + #[instrument(level = "debug", skip(self, gsm_inner), err)] + pub fn clear_ipv4_gateway( + &mut self, + gsm_inner: &mut GlobalStateManagerInner, + ) -> GlobalStateManagerResult<()> { + let Some(mut ipv4) = self.fields.ipv4.clone() else { + return Ok(()); + }; + let Some(gateway) = ipv4.gateway else { + return Ok(()); + }; + if gateway.params.external_network != self.id() { + // Get the external network state + let mut external_network_state = gsm_inner + .network_states() + .get_state(gateway.params.external_network) + .expect("must succeed"); + + // Release external address + external_network_state + .release_address_v4(gateway.external_interface_address.ip) + .expect("must succeed"); + + // Update external network + gsm_inner + .network_states_mut() + .set_state(external_network_state); + } + + // Release internal address + self.release_address_v4(gateway.internal_interface_address.ip) + .expect("must succeed"); + + // Clear gateway + ipv4.gateway = None; + + // Update fields + self.fields = Arc::new(NetworkStateFields { + ipv4: Some(ipv4), + ..(*self.fields).clone() + }); + + Ok(()) + } + + #[instrument(level = "debug", skip(self, gsm_inner), err)] + pub fn set_ipv4( + &mut self, + gsm_inner: &mut GlobalStateManagerInner, + params: NetworkStateIpv4Params, + ) -> GlobalStateManagerResult<()> { + self.clear_ipv4(gsm_inner)?; + + let mut address_pool = self.fields.address_pool.clone(); + for scope in ¶ms.scope { + address_pool.add_scope_v4(*scope); + } + for reserve in ¶ms.reserve { + address_pool.reserve_allocation_v4(*reserve, None)?; + } + + let ipv4 = NetworkStateIpv4 { + params, + gateway: None, + }; + + // Update fields + self.fields = Arc::new(NetworkStateFields { + ipv4: Some(ipv4), + address_pool, + ..(*self.fields).clone() + }); + + Ok(()) + } + + #[instrument(level = "debug", skip(self, gsm_inner), err)] + pub fn set_ipv4_gateway( + &mut self, + gsm_inner: &mut GlobalStateManagerInner, + gateway_params: NetworkStateIpv4GatewayParams, + ) -> GlobalStateManagerResult<()> { + self.clear_ipv4_gateway(gsm_inner)?; + + let Some(mut ipv4) = self.fields.ipv4.clone() else { + return Err(GlobalStateManagerError::InvalidGateway); + }; + + let mut address_pool = self.fields.address_pool.clone(); + + // Allocate or reserve an internal network address for the gateway + let internal_interface_address = + if let Some(internal_address) = gateway_params.internal_address { + let scope = address_pool.reserve_allocation_v4( + Ipv4Net::new(internal_address, 32).expect("must succeed"), + Some(OwnerTag::Gateway(self.id())), + )?; + + // Make interface address + Ifv4Addr { + ip: internal_address, + netmask: scope.netmask(), + broadcast: Some(scope.broadcast()), + } + } else { + let Some(internal_address) = address_pool.allocate_random_v4( + gsm_inner.srng(), + 32, + OwnerTag::Gateway(self.id()), + )? + else { + return Err(GlobalStateManagerError::NoAllocation); + }; + + // Get the scope this allocation fits in + let scope = address_pool + .find_scope_v4(internal_address) + .expect("must succeed"); + + // Make interface address + let internal_address = internal_address.addr(); + Ifv4Addr { + ip: internal_address, + netmask: scope.netmask(), + broadcast: Some(scope.broadcast()), + } + }; + + // Get the external network state + let mut external_network_state = gsm_inner + .network_states() + .get_state(gateway_params.external_network) + .expect("must succeed"); + + // Allocate or reserve an external network address for the gateway + let external_interface_address = + if matches!(gateway_params.translation, config::Translation::None) { + // If the translation mode is 'none', then the external and internal + // addresses must be the same + external_network_state.allocate_address_v4( + gsm_inner, + OwnerTag::Gateway(self.id()), + Some(internal_interface_address.ip), + )? + } else { + // Network translation means the internal and external addresses + // will be different + external_network_state.allocate_address_v4( + gsm_inner, + OwnerTag::Gateway(self.id()), + None, + )? + }; + + // Update external network + gsm_inner + .network_states_mut() + .set_state(external_network_state); + + // Set the gateway state + ipv4.gateway = Some(NetworkStateIpv4Gateway { + params: gateway_params, + internal_interface_address, + external_interface_address, + }); + + // Update fields + self.fields = Arc::new(NetworkStateFields { + ipv4: Some(ipv4), + address_pool, + ..(*self.fields).clone() + }); + + Ok(()) + } + + #[instrument(level = "debug", skip(self, gsm_inner), err)] + pub fn clear_ipv6( + &mut self, + gsm_inner: &mut GlobalStateManagerInner, + ) -> GlobalStateManagerResult<()> { + self.clear_ipv6_gateway(gsm_inner)?; + + if self.fields.ipv6.is_none() { + return Ok(()); + }; + + let mut address_pool = self.fields.address_pool.clone(); + address_pool + .clear_ipv6(|_n, t| match t { + OwnerTag::Machine(_) => true, + OwnerTag::Network(nsid) => *nsid != self.id(), + OwnerTag::Gateway(nsid) => *nsid != self.id(), + }) + .map_err(|_| { + GlobalStateManagerError::ResourceInUse(format!("{}-v6", self.debug_name())) + })?; + + // Update fields + self.fields = Arc::new(NetworkStateFields { + ipv6: None, + address_pool, + ..(*self.fields).clone() + }); + + Ok(()) + } + + #[instrument(level = "debug", skip(self, gsm_inner), err)] + pub fn clear_ipv6_gateway( + &mut self, + gsm_inner: &mut GlobalStateManagerInner, + ) -> GlobalStateManagerResult<()> { + let Some(mut ipv6) = self.fields.ipv6.clone() else { + return Ok(()); + }; + let Some(gateway) = ipv6.gateway else { + return Ok(()); + }; + if gateway.params.external_network != self.id() { + // Get the external network state + let mut external_network_state = gsm_inner + .network_states() + .get_state(gateway.params.external_network) + .expect("must succeed"); + + // Release external address + external_network_state + .release_address_v6(gateway.external_interface_address.ip) + .expect("must succeed"); + + // Update external network + gsm_inner + .network_states_mut() + .set_state(external_network_state); + } + + // Release internal address + self.release_address_v6(gateway.internal_interface_address.ip) + .expect("must succeed"); + + // Clear gateway + ipv6.gateway = None; + + // Update fields + self.fields = Arc::new(NetworkStateFields { + ipv6: Some(ipv6), + ..(*self.fields).clone() + }); + + Ok(()) + } + + #[instrument(level = "debug", skip(self, gsm_inner), err)] + pub fn set_ipv6( + &mut self, + gsm_inner: &mut GlobalStateManagerInner, + params: NetworkStateIpv6Params, + ) -> GlobalStateManagerResult<()> { + self.clear_ipv6(gsm_inner)?; + + let mut address_pool = self.fields.address_pool.clone(); + for scope in ¶ms.scope { + address_pool.add_scope_v6(*scope); + } + for reserve in ¶ms.reserve { + address_pool.reserve_allocation_v6(*reserve, None)?; + } + let ipv6 = NetworkStateIpv6 { + params, + gateway: None, + }; + + // Update fields + self.fields = Arc::new(NetworkStateFields { + ipv6: Some(ipv6), + address_pool, + ..(*self.fields).clone() + }); + + Ok(()) + } + + #[instrument(level = "debug", skip(self, gsm_inner), err)] + pub fn set_ipv6_gateway( + &mut self, + gsm_inner: &mut GlobalStateManagerInner, + gateway_params: NetworkStateIpv6GatewayParams, + ) -> GlobalStateManagerResult<()> { + self.clear_ipv6_gateway(gsm_inner)?; + + let Some(mut ipv6) = self.fields.ipv6.clone() else { + return Err(GlobalStateManagerError::InvalidGateway); + }; + + let mut address_pool = self.fields.address_pool.clone(); + + // Allocate or reserve an internal network address for the gateway + let internal_interface_address = + if let Some(internal_address) = gateway_params.internal_address { + let scope = address_pool.reserve_allocation_v6( + Ipv6Net::new(internal_address, 128).expect("must succeed"), + Some(OwnerTag::Gateway(self.id())), + )?; + // Make interface address + Ifv6Addr { + ip: internal_address, + netmask: scope.netmask(), + broadcast: Some(scope.broadcast()), + } + } else { + let Some(internal_address) = address_pool.allocate_random_v6( + gsm_inner.srng(), + 128, + OwnerTag::Gateway(self.id()), + )? + else { + return Err(GlobalStateManagerError::NoAllocation); + }; + // Get the scope this allocation fits in + let scope = address_pool + .find_scope_v6(internal_address) + .expect("must succeed"); + + // Make interface address + let internal_address = internal_address.addr(); + Ifv6Addr { + ip: internal_address, + netmask: scope.netmask(), + broadcast: Some(scope.broadcast()), + } + }; + + // Get the external network state + let mut external_network_state = gsm_inner + .network_states() + .get_state(gateway_params.external_network) + .expect("must succeed"); + + // Allocate or reserve an external network address for the gateway + let external_interface_address = + if matches!(gateway_params.translation, config::Translation::None) { + // If the translation mode is 'none', then the external and internal + // addresses must be the same + external_network_state.allocate_address_v6( + gsm_inner, + OwnerTag::Gateway(self.id()), + Some(internal_interface_address.ip), + )? + } else { + // Network translation means the internal and external addresses + // will be different + external_network_state.allocate_address_v6( + gsm_inner, + OwnerTag::Gateway(self.id()), + None, + )? + }; + + // Update external network + gsm_inner + .network_states_mut() + .set_state(external_network_state); + + // Set the gateway state + ipv6.gateway = Some(NetworkStateIpv6Gateway { + params: gateway_params, + internal_interface_address, + external_interface_address, + }); + + // Update fields + self.fields = Arc::new(NetworkStateFields { + ipv6: Some(ipv6), + address_pool, + ..(*self.fields).clone() + }); + + Ok(()) + } + + pub fn is_ipv4(&self) -> bool { + self.fields.ipv4.is_some() + } + + pub fn is_ipv6(&self) -> bool { + self.fields.ipv6.is_some() + } + + pub fn is_active(&self) -> GlobalStateManagerResult { + let mut can_allocate = false; + + if self.fields.ipv4.is_some() { + // + if !self.fields.address_pool.can_allocate_v4(32)? { + can_allocate = false; + } + } + if self.fields.ipv6.is_some() { + // + if !self.fields.address_pool.can_allocate_v6(128)? { + can_allocate = false; + } + } + Ok(can_allocate) + } + + #[instrument(level = "debug", skip(self, gsm_inner), err)] + pub fn allocate_address_v4( + &mut self, + gsm_inner: &mut GlobalStateManagerInner, + owner_tag: OwnerTag, + opt_address: Option, + ) -> GlobalStateManagerResult { + let net = self.allocate_subnet_v4(gsm_inner, owner_tag, opt_address, 32)?; + let scope = self + .fields + .address_pool + .find_scope_v4(net) + .expect("must succeed"); + let ip = net.addr(); + let netmask = scope.netmask(); + let broadcast = scope.broadcast(); + + let ifaddr = Ifv4Addr { + ip, + netmask, + broadcast: Some(broadcast), + }; + + Ok(ifaddr) + } + + pub fn can_allocate_address_v4(&self, opt_address: Option) -> bool { + self.can_allocate_subnet_v4(opt_address, 32) + } + + #[instrument(level = "debug", skip(self, gsm_inner), err)] + pub fn allocate_subnet_v4( + &mut self, + gsm_inner: &mut GlobalStateManagerInner, + owner_tag: OwnerTag, + opt_address: Option, + prefix: u8, + ) -> GlobalStateManagerResult { + if self.fields.ipv4.is_none() { + return Err(GlobalStateManagerError::NoAllocation); + } + + // See if we are requesting a specific address + let mut address_pool = self.fields.address_pool.clone(); + + let net = if let Some(address) = opt_address { + // Get the net form for this address + let net = Ipv4Net::new(address, prefix).expect("must succeed"); + address_pool.reserve_allocation_v4(net, Some(owner_tag))?; + net + } else { + // Get a random address if available + let Some(allocation) = + address_pool.allocate_random_v4(gsm_inner.srng(), prefix, owner_tag)? + else { + return Err(GlobalStateManagerError::NoAllocation); + }; + allocation + }; + + // Update fields + self.fields = Arc::new(NetworkStateFields { + address_pool, + ..(*self.fields).clone() + }); + + Ok(net) + } + + pub fn can_allocate_subnet_v4(&self, opt_address: Option, prefix: u8) -> bool { + if self.fields.ipv4.is_none() { + return false; + }; + + // See if we are requesting a specific address + if let Some(address) = opt_address { + // Get the net form for this address + let net = Ipv4Net::new(address, prefix).expect("must succeed"); + self.fields.address_pool.get_overlaps_v4(net).is_empty() + } else { + // Get a random address if available + self.fields + .address_pool + .can_allocate_v4(prefix) + .unwrap_or(false) + } + } + + #[instrument(level = "debug", skip(self), err)] + pub fn release_address_v4( + &mut self, + addr: Ipv4Addr, + ) -> GlobalStateManagerResult> { + self.release_subnet_v4(Ipv4Net::new(addr, 32).expect("must succeed")) + } + + #[instrument(level = "debug", skip(self), err)] + pub fn release_subnet_v4( + &mut self, + net: Ipv4Net, + ) -> GlobalStateManagerResult> { + let mut address_pool = self.fields.address_pool.clone(); + let opt_tag = address_pool.release_allocation_v4(net)?; + + // Update fields + self.fields = Arc::new(NetworkStateFields { + address_pool, + ..(*self.fields).clone() + }); + Ok(opt_tag) + } + + #[instrument(level = "debug", skip(self, gsm_inner), err)] + pub fn allocate_address_v6( + &mut self, + gsm_inner: &mut GlobalStateManagerInner, + owner_tag: OwnerTag, + opt_address: Option, + ) -> GlobalStateManagerResult { + let net = self.allocate_subnet_v6(gsm_inner, owner_tag, opt_address, 128)?; + let scope = self + .fields + .address_pool + .find_scope_v6(net) + .expect("must succeed"); + + let ip = net.addr(); + let netmask = scope.netmask(); + let broadcast = scope.broadcast(); + + let ifaddr = Ifv6Addr { + ip, + netmask, + broadcast: Some(broadcast), + }; + + Ok(ifaddr) + } + + pub fn can_allocate_address_v6(&self, opt_address: Option) -> bool { + self.can_allocate_subnet_v6(opt_address, 128) + } + + #[instrument(level = "debug", skip(self, gsm_inner), err)] + pub fn allocate_subnet_v6( + &mut self, + gsm_inner: &mut GlobalStateManagerInner, + owner_tag: OwnerTag, + opt_address: Option, + prefix: u8, + ) -> GlobalStateManagerResult { + if self.fields.ipv6.is_none() { + return Err(GlobalStateManagerError::NoAllocation); + } + + // See if we are requesting a specific address + let mut address_pool = self.fields.address_pool.clone(); + + let net = if let Some(address) = opt_address { + // Get the net form for this address + let net = Ipv6Net::new(address, prefix).expect("must succeed"); + address_pool.reserve_allocation_v6(net, Some(owner_tag))?; + net + } else { + // Get a random address if available + let Some(allocation) = + address_pool.allocate_random_v6(gsm_inner.srng(), prefix, owner_tag)? + else { + return Err(GlobalStateManagerError::NoAllocation); + }; + allocation + }; + + // Update fields + self.fields = Arc::new(NetworkStateFields { + address_pool, + ..(*self.fields).clone() + }); + + Ok(net) + } + + pub fn can_allocate_subnet_v6(&self, opt_address: Option, prefix: u8) -> bool { + if self.fields.ipv6.is_none() { + return false; + }; + + // See if we are requesting a specific address + if let Some(address) = opt_address { + // Get the net form for this address + let net = Ipv6Net::new(address, prefix).expect("must succeed"); + self.fields.address_pool.get_overlaps_v6(net).is_empty() + } else { + // Get a random address if available + self.fields + .address_pool + .can_allocate_v6(prefix) + .unwrap_or(false) + } + } + + #[instrument(level = "debug", skip(self), err)] + pub fn release_address_v6( + &mut self, + addr: Ipv6Addr, + ) -> GlobalStateManagerResult> { + self.release_subnet_v6(Ipv6Net::new(addr, 128).expect("must succeed")) + } + + #[instrument(level = "debug", skip(self), err)] + pub fn release_subnet_v6( + &mut self, + net: Ipv6Net, + ) -> GlobalStateManagerResult> { + let mut address_pool = self.fields.address_pool.clone(); + let opt_tag = address_pool.release_allocation_v6(net)?; + + // Update fields + self.fields = Arc::new(NetworkStateFields { + address_pool, + ..(*self.fields).clone() + }); + Ok(opt_tag) + } +} + +impl State for NetworkState { + fn id(&self) -> StateId { + self.immutable.id + } + + fn name(&self) -> Option { + self.immutable.opt_name.clone() + } +} diff --git a/veilid-tools/src/virtual_network/router_server/global_state_manager/state/profile_state.rs b/veilid-tools/src/virtual_network/router_server/global_state_manager/state/profile_state.rs new file mode 100644 index 00000000..1142e05a --- /dev/null +++ b/veilid-tools/src/virtual_network/router_server/global_state_manager/state/profile_state.rs @@ -0,0 +1,57 @@ +use super::*; + +#[derive(Debug)] +struct ProfileStateFields { + next_instance_index: usize, +} + +#[derive(Debug)] +struct ProfileStateImmutable { + id: ProfileStateId, + name: String, + def: config::Profile, +} + +#[derive(Debug, Clone)] +pub struct ProfileState { + immutable: Arc, + fields: Arc, +} + +pub type ProfileStateId = StateId; + +impl ProfileState { + pub fn new(id: ProfileStateId, name: String, def: config::Profile) -> Self { + Self { + immutable: Arc::new(ProfileStateImmutable { id, name, def }), + fields: Arc::new(ProfileStateFields { + next_instance_index: 0, + }), + } + } + + #[instrument(level = "debug", skip(self))] + pub fn next_instance(&mut self) -> Option { + let instance_index = { + let instance_index = self.fields.next_instance_index; + if instance_index >= self.immutable.def.instances.len() { + return None; + } + self.fields = Arc::new(ProfileStateFields { + next_instance_index: instance_index + 1, + }); + instance_index + }; + Some(self.immutable.def.instances[instance_index].clone()) + } +} + +impl State for ProfileState { + fn id(&self) -> StateId { + self.immutable.id + } + + fn name(&self) -> Option { + Some(self.immutable.name.clone()) + } +} diff --git a/veilid-tools/src/virtual_network/router_server/global_state_manager/state/state_registry.rs b/veilid-tools/src/virtual_network/router_server/global_state_manager/state/state_registry.rs new file mode 100644 index 00000000..b5d4e157 --- /dev/null +++ b/veilid-tools/src/virtual_network/router_server/global_state_manager/state/state_registry.rs @@ -0,0 +1,186 @@ +use super::*; +use std::marker::PhantomData; + +pub trait State: fmt::Debug + Clone { + fn id(&self) -> StateId; + fn name(&self) -> Option; + fn debug_name(&self) -> String { + self.name() + .unwrap_or_else(|| format!("<{}>", self.id().external_id())) + } +} + +type StateIdInternal = u64; + +#[derive(Debug, Clone)] +pub struct StateId(pub StateIdInternal, core::marker::PhantomData); +impl StateId { + pub fn new(external_id: u64) -> Self { + Self(external_id, PhantomData {}) + } + + pub fn external_id(&self) -> u64 { + self.0 + } +} + +impl Copy for StateId {} +impl PartialEq for StateId { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } +} +impl Eq for StateId {} +impl PartialOrd for StateId { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.0.cmp(&other.0)) + } +} +impl Ord for StateId { + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + self.0.cmp(&other.0) + } +} +impl core::hash::Hash for StateId { + fn hash(&self, state: &mut H) { + self.0.hash(state) + } +} + +#[derive(Debug, Clone)] +pub struct StateRegistry { + state_id_by_name: imbl::HashMap, + state_by_id: imbl::HashMap>, + next_state_id: StateIdInternal, + free_state_ids: imbl::Vector, +} + +impl StateRegistry { + pub fn new() -> Self { + Self { + state_id_by_name: imbl::HashMap::new(), + state_by_id: imbl::HashMap::new(), + next_state_id: 0, + free_state_ids: imbl::Vector::new(), + } + } + + pub fn allocate_id(&mut self) -> StateId { + // Allocate new internal id + let state_id = self.free_state_ids.pop_back().unwrap_or_else(|| { + let x = self.next_state_id; + self.next_state_id += 1; + x + }); + + // Associate with an empty state slot + self.state_by_id.insert(state_id, None); + + // Return the type-safe wrapped id + StateId(state_id, PhantomData {}) + } + + pub fn release_id(&mut self, id: StateId) -> GlobalStateManagerResult<()> { + // Remove id to state mapping + let Some(old_opt_state) = self.state_by_id.remove(&id.0) else { + return Err(GlobalStateManagerError::InvalidId(id.external_id())); + }; + + // Release state if it is attached + if let Some(old_state) = old_opt_state { + // Release name of state if it is named + if let Some(name) = old_state.name() { + self.state_id_by_name + .remove(&name) + .expect("named states should be registered"); + } + } + + // Keep old id in the free list + self.free_state_ids.push_back(id.0); + + Ok(()) + } + + pub fn attach_state(&mut self, state: S) -> GlobalStateManagerResult<()> { + // Get the id from the state + let id = state.id(); + + // Get the allocator slot + let Some(opt_state) = self.state_by_id.get_mut(&id.0) else { + return Err(GlobalStateManagerError::InvalidId(id.external_id())); + }; + + // Ensure the state slot isn't attached already + if opt_state.is_some() { + return Err(GlobalStateManagerError::AlreadyAttached); + } + + // Ensure the name isn't duplicated + if let Some(name) = state.name() { + if self.state_id_by_name.contains_key(&name) { + return Err(GlobalStateManagerError::DuplicateName(name)); + } + // Register the named state + assert!( + self.state_id_by_name.insert(name, id.0).is_none(), + "should not have a duplicated name here" + ); + } + + // Attach the state to the state slot + *opt_state = Some(state); + + Ok(()) + } + + pub fn detach_state(&mut self, id: StateId) -> GlobalStateManagerResult { + // Get the allocator slot + let Some(opt_state) = self.state_by_id.get_mut(&id.0) else { + return Err(GlobalStateManagerError::InvalidId(id.external_id())); + }; + + // Take the state out of the slot and ensure the state slot isn't detached already + let Some(state) = opt_state.take() else { + return Err(GlobalStateManagerError::NotAttached); + }; + + // Release the name if it exists + if let Some(name) = state.name() { + let dead_name_id = self + .state_id_by_name + .remove(&name) + .expect("name should be registered"); + assert_eq!(dead_name_id, id.0, "name id and state id should match"); + } + + Ok(state) + } + + pub fn get_state(&self, id: StateId) -> GlobalStateManagerResult { + // Get the allocator slot + let Some(opt_state) = self.state_by_id.get(&id.0) else { + return Err(GlobalStateManagerError::InvalidId(id.external_id())); + }; + let Some(state) = opt_state else { + return Err(GlobalStateManagerError::NotAttached); + }; + Ok(state.clone()) + } + + pub fn set_state(&mut self, state: S) { + self.state_by_id.insert(state.id().0, Some(state)); + } + + pub fn get_state_id_by_name(&self, name: &str) -> Option> { + // Get the id associated with this name + let id = self.state_id_by_name.get(name)?; + Some(StateId::new(*id)) + } +} + +impl Default for StateRegistry { + fn default() -> Self { + Self::new() + } +} diff --git a/veilid-tools/src/virtual_network/router_server/global_state_manager/state/template_locations_list.rs b/veilid-tools/src/virtual_network/router_server/global_state_manager/state/template_locations_list.rs new file mode 100644 index 00000000..d43862c1 --- /dev/null +++ b/veilid-tools/src/virtual_network/router_server/global_state_manager/state/template_locations_list.rs @@ -0,0 +1,185 @@ +use super::*; + +#[derive(Debug, Clone)] +enum BlueprintAvailability { + Existing(NetworkState), + Generate(BlueprintState), +} + +/// Locations where a machine can be instantiated when a template is generated +#[derive(Debug, Clone)] +pub enum TemplateLocationsList { + Networks { + networks: WeightedList, + }, + Blueprints { + blueprints: WeightedList, + }, +} + +impl TemplateLocationsList { + #[instrument(level = "debug", skip_all, err)] + pub fn can_pick( + &self, + gsm_inner: &mut GlobalStateManagerInner, + mut network_filter: F, + ) -> GlobalStateManagerResult + where + F: FnMut(&GlobalStateManagerInner, NetworkStateId) -> GlobalStateManagerResult, + { + match self { + TemplateLocationsList::Networks { networks } => { + // Filter the weighted list of networks to those that are still active and or not yet started + if networks + .try_filter(|id| { + let network_state = gsm_inner.network_states().get_state(*id)?; + self.is_network_available(gsm_inner, network_state, &mut network_filter) + })? + .is_none() + { + return Ok(false); + }; + } + TemplateLocationsList::Blueprints { blueprints } => { + // Filter the weighted list of blueprints to those that are still active or not yet started and can allocate + if blueprints + .try_filter(|id| { + let blueprint_state = gsm_inner.blueprint_states().get_state(*id)?; + + self.is_blueprint_available(gsm_inner, blueprint_state, &mut network_filter) + .map(|x| x.is_some()) + })? + .is_none() + { + return Ok(false); + }; + } + }; + Ok(true) + } + + #[instrument(level = "debug", skip_all, err)] + pub fn pick( + &self, + gsm_inner: &mut GlobalStateManagerInner, + mut network_filter: F, + ) -> GlobalStateManagerResult> + where + F: FnMut(&GlobalStateManagerInner, NetworkStateId) -> GlobalStateManagerResult, + { + // Get a network to generate the machine on + let network_state = match self { + TemplateLocationsList::Networks { networks } => { + // Filter the weighted list of networks to those that are still active and or not yet started + let Some(available_networks) = networks.try_filter_map(|id| { + let network_state = gsm_inner.network_states().get_state(*id)?; + if self.is_network_available( + gsm_inner, + network_state.clone(), + &mut network_filter, + )? { + Ok(Some(network_state)) + } else { + Ok(None) + } + })? + else { + return Ok(None); + }; + + // Weighted choice of network now that we have a candidate list + let network_state = gsm_inner.srng().weighted_choice(available_networks); + + // Return network state to use + network_state + } + TemplateLocationsList::Blueprints { blueprints } => { + // Filter the weighted list of blueprints to those that are still active or not yet started and can allocate + let Some(available_blueprints) = blueprints.try_filter_map(|id| { + let blueprint_state = gsm_inner.blueprint_states().get_state(*id)?; + + self.is_blueprint_available(gsm_inner, blueprint_state, &mut network_filter) + })? + else { + return Ok(None); + }; + + // Weighted choice of blueprint now that we have a candidate list + match gsm_inner.srng().weighted_choice(available_blueprints) { + BlueprintAvailability::Existing(network_state) => network_state, + BlueprintAvailability::Generate(mut blueprint_state) => { + // Generate network state from blueprint state + let network_state_id = blueprint_state.generate(gsm_inner)?; + + // Update blueprint state + gsm_inner.blueprint_states_mut().set_state(blueprint_state); + + // Return network state + gsm_inner.network_states().get_state(network_state_id)? + } + } + } + }; + + Ok(Some(network_state)) + } + + #[instrument(level = "debug", skip_all, err)] + fn is_network_available( + &self, + gsm_inner: &GlobalStateManagerInner, + network_state: NetworkState, + mut network_filter: F, + ) -> GlobalStateManagerResult + where + F: FnMut(&GlobalStateManagerInner, NetworkStateId) -> GlobalStateManagerResult, + { + // If the network is not active, it is not available + if !network_state.is_active()? { + return Ok(false); + } + + // Check the network filter + if !network_filter(gsm_inner, network_state.id())? { + return Ok(false); + } + + Ok(true) + } + + #[instrument(level = "debug", skip_all, err)] + fn is_blueprint_available( + &self, + gsm_inner: &mut GlobalStateManagerInner, + blueprint_state: BlueprintState, + mut network_filter: F, + ) -> GlobalStateManagerResult> + where + F: FnMut(&GlobalStateManagerInner, NetworkStateId) -> GlobalStateManagerResult, + { + // See if the networks generated from this blueprint so far have availability + // in this template + if let Some(available_network_state) = blueprint_state.for_each_network_id(|id| { + // Check the network's availability + let network_state = gsm_inner.network_states().get_state(id)?; + if self.is_network_available(gsm_inner, network_state.clone(), &mut network_filter)? { + // We found one + return Ok(Some(network_state)); + } + // Try next network + Ok(None) + })? { + // We found a usable network + return Ok(Some(BlueprintAvailability::Existing( + available_network_state, + ))); + } + + // If the blueprint is active, it is available because it can make a new network + if blueprint_state.is_active(gsm_inner) { + return Ok(Some(BlueprintAvailability::Generate(blueprint_state))); + } + + Ok(None) + } +} diff --git a/veilid-tools/src/virtual_network/router_server/global_state_manager/state/template_state.rs b/veilid-tools/src/virtual_network/router_server/global_state_manager/state/template_state.rs new file mode 100644 index 00000000..08014cca --- /dev/null +++ b/veilid-tools/src/virtual_network/router_server/global_state_manager/state/template_state.rs @@ -0,0 +1,272 @@ +use super::*; + +#[derive(Debug)] +struct TemplateStateImmutable { + /// The unique id of this template + id: TemplateStateId, + /// The name of this template state + name: String, +} + +#[derive(Debug, Clone)] +struct PerNetworkInfo { + limit_machine_count: Option, + machines: imbl::HashSet, +} + +#[derive(Debug, Clone)] +struct TemplateStateFields { + limit_machine_count: Option, + limit_machines_per_network: Option>, + locations_list: Option, + machines: imbl::HashSet, + machines_per_network: imbl::HashMap, + disable_capabilities: imbl::Vector>, +} + +#[derive(Debug, Clone)] +pub struct TemplateState { + immutable: Arc, + fields: Arc, +} + +pub type TemplateStateId = StateId; + +impl TemplateState { + pub fn new(id: TemplateStateId, name: String) -> Self { + Self { + immutable: Arc::new(TemplateStateImmutable { id, name }), + fields: Arc::new(TemplateStateFields { + limit_machine_count: None, + limit_machines_per_network: None, + locations_list: None, + machines: imbl::HashSet::new(), + machines_per_network: imbl::HashMap::new(), + disable_capabilities: imbl::Vector::new(), + }), + } + } + + #[instrument(level = "debug", skip(self))] + pub fn set_disable_capabilities(&mut self, disable_capabilities: Vec) { + let disable_capabilities = + imbl::Vector::from_iter(disable_capabilities.into_iter().map(Arc::new)); + // Update fields + self.fields = Arc::new(TemplateStateFields { + disable_capabilities, + ..(*self.fields).clone() + }); + } + + #[instrument(level = "debug", skip(self))] + pub fn set_networks_list(&mut self, networks: WeightedList) { + let locations_list = Some(TemplateLocationsList::Networks { networks }); + + // Update fields + self.fields = Arc::new(TemplateStateFields { + locations_list, + ..(*self.fields).clone() + }); + } + + #[instrument(level = "debug", skip(self))] + pub fn set_blueprints_list(&mut self, blueprints: WeightedList) { + let locations_list = Some(TemplateLocationsList::Blueprints { blueprints }); + + // Update fields + self.fields = Arc::new(TemplateStateFields { + locations_list, + ..(*self.fields).clone() + }); + } + + #[instrument(level = "debug", skip(self))] + pub fn clear_locations_list(&mut self) { + let locations_list = None; + + // Update fields + self.fields = Arc::new(TemplateStateFields { + locations_list, + ..(*self.fields).clone() + }); + } + + #[instrument(level = "debug", skip(self))] + pub fn set_limit_machine_count(&mut self, limit_machine_count: Option) { + // Update fields + self.fields = Arc::new(TemplateStateFields { + limit_machine_count, + ..(*self.fields).clone() + }); + } + + #[instrument(level = "debug", skip(self))] + pub fn set_limit_machines_per_network( + &mut self, + limit_machines_per_network: Option>, + ) { + // Update fields + self.fields = Arc::new(TemplateStateFields { + limit_machines_per_network, + ..(*self.fields).clone() + }); + } + + pub fn is_active(&self, gsm_inner: &mut GlobalStateManagerInner) -> bool { + // Save a backup of the entire state + let backup = gsm_inner.clone(); + + // Make a copy of this template state + let mut current_state = self.clone(); + + // See what would happen if we try to generate this template + let ok = current_state.generate(gsm_inner).is_ok(); + + // Restore the backup + *gsm_inner = backup; + + // Return if this worked or not + ok + } + + /// Network filter that keeps this template generation within per-network limits + fn network_filter(&self, network_state_id: NetworkStateId) -> GlobalStateManagerResult { + // Get the per network info + let Some(pni) = self.fields.machines_per_network.get(&network_state_id) else { + // If we haven't allocated anything in the network yet it is + // by definition available + return Ok(true); + }; + + // If this template has allocated the maximum number of machines per-network + // for this network, then it is not available + if let Some(limit_machine_count) = pni.limit_machine_count { + if pni.machines.len() >= limit_machine_count { + return Ok(false); + } + } + Ok(true) + } + + #[instrument(level = "debug", skip(self, gsm_inner), err)] + pub fn generate( + &mut self, + gsm_inner: &mut GlobalStateManagerInner, + ) -> GlobalStateManagerResult { + // See if we have reached our machine limit + if let Some(limit_machine_count) = self.fields.limit_machine_count { + if self.fields.machines.len() < limit_machine_count { + return Err(GlobalStateManagerError::TemplateComplete(self.debug_name())); + } + } + + // If existing networks are all full, we'd have to allocate one, see if we'd be able to do that + let Some(locations_list) = self.fields.locations_list.as_ref() else { + return Err(GlobalStateManagerError::TemplateComplete(self.debug_name())); + }; + + // Get a network to generate the machine on + let Some(network_state) = locations_list.pick(gsm_inner, |_, x| self.network_filter(x))? + else { + return Err(GlobalStateManagerError::TemplateComplete(self.debug_name())); + }; + + // Allocate a machine id + let machine_state_id = gsm_inner.machine_states_mut().allocate_id(); + + // Create an anonymous machine state + let mut machine_state = + MachineState::new(machine_state_id, None, MachineOrigin::Template(self.id())); + + // Scope to release state on error + if let Err(e) = (|| { + // Build out the machine state from the template + machine_state.set_disable_capabilities( + self.fields + .disable_capabilities + .iter() + .map(|x| (**x).clone()) + .collect(), + ); + machine_state.set_bootstrap(false); + + // Make the default route interface + let vin0 = machine_state.allocate_interface(None, None)?; + machine_state.attach_network(gsm_inner, &vin0, network_state.id())?; + if network_state.is_ipv4() { + machine_state.allocate_address_ipv4(gsm_inner, &vin0, None, None)?; + } + if network_state.is_ipv6() { + machine_state.allocate_address_ipv6(gsm_inner, &vin0, None, None)?; + } + Ok(()) + })() { + // Release the machine state and id if things failed to allocate + machine_state.release(gsm_inner); + gsm_inner + .machine_states_mut() + .release_id(machine_state_id) + .expect("must succeed"); + return Err(e); + } + + // Attach the state to the id + gsm_inner + .machine_states_mut() + .attach_state(machine_state) + .expect("must succeed"); + + // Record the newly instantiated machine + let machines = self.fields.machines.update(machine_state_id); + let mut machines_per_network = self.fields.machines_per_network.clone(); + let per_network_info = machines_per_network + .entry(network_state.id()) + .or_insert_with(|| { + let limit_machine_count = self + .fields + .limit_machines_per_network + .as_ref() + .map(|wl| *gsm_inner.srng().weighted_choice_ref(wl)); + PerNetworkInfo { + limit_machine_count, + machines: imbl::HashSet::new(), + } + }); + per_network_info.machines.insert(machine_state_id); + + // Update fields + self.fields = Arc::new(TemplateStateFields { + machines, + machines_per_network, + ..(*self.fields).clone() + }); + + Ok(machine_state_id) + } + + #[instrument(level = "debug", skip(self))] + pub fn on_machine_released(&mut self, machine_state_id: MachineStateId) { + let machines = self.fields.machines.without(&machine_state_id); + let mut machines_per_network = self.fields.machines_per_network.clone(); + for (_network_id, pni) in machines_per_network.iter_mut() { + pni.machines.remove(&machine_state_id); + } + + // Update fields + self.fields = Arc::new(TemplateStateFields { + machines, + machines_per_network, + ..(*self.fields).clone() + }); + } +} + +impl State for TemplateState { + fn id(&self) -> StateId { + self.immutable.id + } + + fn name(&self) -> Option { + Some(self.immutable.name.clone()) + } +} diff --git a/veilid-tools/src/virtual_network/router_server/mod.rs b/veilid-tools/src/virtual_network/router_server/mod.rs new file mode 100644 index 00000000..eeaeba1b --- /dev/null +++ b/veilid-tools/src/virtual_network/router_server/mod.rs @@ -0,0 +1,351 @@ +pub mod config; +mod global_state_manager; +mod server_processor; +mod stable_rng; +mod weighted_list; + +use super::*; + +use global_state_manager::*; +use server_processor::*; +use stable_rng::*; +use weighted_list::*; + +use async_tungstenite::accept_async; +use futures_codec::{Bytes, BytesCodec, FramedRead, FramedWrite}; +use futures_util::{stream::FuturesUnordered, AsyncReadExt, StreamExt, TryStreamExt}; +use ipnet::*; +use postcard::{from_bytes, to_stdvec}; +use std::io; +use stop_token::future::FutureExt as _; +use ws_stream_tungstenite::*; + +#[derive(ThisError, Debug, Clone, PartialEq, Eq)] +pub enum RouterServerError { + #[error("Serialization Error: {0}")] + SerializationError(postcard::Error), + #[error("IO Error: {0}")] + IoError(io::ErrorKind), + #[error("State Error: {0}")] + StateError(global_state_manager::GlobalStateManagerError), +} + +pub type RouterServerResult = Result; + +pub const DEFAULT_VIRTUAL_ROUTER_PORT_TCP: u16 = 5149u16; +pub const DEFAULT_VIRTUAL_ROUTER_PORT_WS: u16 = 5148u16; + +enum RunLoopEvent { + AddClient(SendPinBoxFuture), + Done, +} + +#[derive(Debug)] +struct RouterServerUnlockedInner { + new_client_sender: flume::Sender>, + new_client_receiver: flume::Receiver>, + server_processor: ServerProcessor, + global_state_manager: GlobalStateManager, +} + +#[derive(Debug)] +struct RouterServerInner {} + +/// Router server for virtual networking +/// +/// Connect to this with a `RouterClient`. Simulates machines, allocates sockets +/// and gateways, manages a virtual simulated Internet and routes packets +/// virtually between `Machines` associated with `RouterClient`s. +#[derive(Debug, Clone)] +pub struct RouterServer { + unlocked_inner: Arc, + _inner: Arc>, +} + +impl Default for RouterServer { + fn default() -> Self { + Self::new() + } +} + +impl RouterServer { + //////////////////////////////////////////////////////////////////// + // Public Interface + + /// Create a router server for virtual networking + pub fn new() -> Self { + // Make a channel to receive new clients + let (new_client_sender, new_client_receiver) = flume::unbounded(); + + // Make a machine registry to manage state + let global_state_manager = GlobalStateManager::new(); + + // Make a server processor to handle messages + let server_processor = ServerProcessor::new(global_state_manager.clone()); + + Self { + unlocked_inner: Arc::new(RouterServerUnlockedInner { + new_client_sender, + new_client_receiver, + server_processor, + global_state_manager, + }), + _inner: Arc::new(Mutex::new(RouterServerInner {})), + } + } + + /// Execute a config file on the global state manager + pub fn execute_config(&self, cfg: config::Config) -> RouterServerResult<()> { + self.unlocked_inner + .global_state_manager + .execute_config(cfg) + .map_err(RouterServerError::StateError) + } + + /// Accept RouterClient connections on a TCP socket + pub async fn listen_tcp(&self, addr: Option) -> RouterServerResult { + let listener = TcpListener::bind(addr.unwrap_or(SocketAddr::V6(SocketAddrV6::new( + Ipv6Addr::UNSPECIFIED, + DEFAULT_VIRTUAL_ROUTER_PORT_TCP, + 0, + 0, + )))) + .await + .map_err(|e| RouterServerError::IoError(e.kind()))?; + + let stop_source = StopSource::new(); + let stop_token = stop_source.token(); + + let this = self.clone(); + let listener_fut = system_boxed(async move { + loop { + // Wait for a new connection + match listener.accept().timeout_at(stop_token.clone()).await { + Ok(Ok((conn, _addr))) => { + let conn = conn.compat(); + // Register a connection processing inbound receiver + let this2 = this.clone(); + let inbound_receiver_fut = system_boxed(async move { + let (reader, writer) = conn.split(); + + this2.process_connection(reader, writer).await + }); + if let Err(e) = this + .unlocked_inner + .new_client_sender + .send(inbound_receiver_fut) + { + // Error registering connection processor + error!("{}", e); + break; + } + } + Ok(Err(e)) => { + // Error processing an accept + error!("{}", e); + break; + } + Err(_) => { + // Stop requested + break; + } + } + } + + RunLoopEvent::Done + }); + + self.unlocked_inner + .new_client_sender + .send(listener_fut) + .expect("should be able to send client"); + + Ok(stop_source) + } + + /// Accept RouterClient connections on a WebSocket + pub async fn listen_ws(&self, addr: Option) -> RouterServerResult { + let listener = TcpListener::bind(addr.unwrap_or(SocketAddr::V6(SocketAddrV6::new( + Ipv6Addr::UNSPECIFIED, + DEFAULT_VIRTUAL_ROUTER_PORT_WS, + 0, + 0, + )))) + .await + .map_err(|e| RouterServerError::IoError(e.kind()))?; + + let stop_source = StopSource::new(); + let stop_token = stop_source.token(); + + let this = self.clone(); + let listener_fut = system_boxed(async move { + loop { + // Wait for a new connection + match listener.accept().timeout_at(stop_token.clone()).await { + Ok(Ok((conn, _addr))) => { + let conn = conn.compat(); + if let Ok(s) = accept_async(conn).await { + let ws = WsStream::new(s); + // Register a connection processing inbound receiver + let this2 = this.clone(); + let inbound_receiver_fut = system_boxed(async move { + let (reader, writer) = ws.split(); + this2.process_connection(reader, writer).await + }); + if let Err(e) = this + .unlocked_inner + .new_client_sender + .send(inbound_receiver_fut) + { + // Error registering connection processor + error!("{}", e); + break; + } + } + } + Ok(Err(e)) => { + // Error processing an accept + error!("{}", e); + break; + } + Err(_) => { + // Stop requested + break; + } + } + } + + RunLoopEvent::Done + }); + + self.unlocked_inner + .new_client_sender + .send(listener_fut) + .expect("should be able to send client"); + + Ok(stop_source) + } + + /// Return a local RouterClient + pub fn router_client(&self) -> RouterClient { + // Create the inbound/outbound channels + let (local_inbound_sender, local_inbound_receiver) = flume::unbounded(); + let (local_outbound_sender, local_outbound_receiver) = flume::unbounded(); + + let this = self.clone(); + let inbound_receiver_fut = system_boxed(async move { + local_inbound_receiver + .into_stream() + .for_each(|cmd| async { + this.unlocked_inner + .server_processor + .enqueue_command(cmd, local_outbound_sender.clone()); + }) + .await; + RunLoopEvent::Done + }); + + // Send the new client to the run loop + self.unlocked_inner + .new_client_sender + .send(inbound_receiver_fut) + .expect("should be able to send client"); + + // Create a RouterClient directly connected to this RouterServer + RouterClient::local_router_client(local_inbound_sender, local_outbound_receiver) + } + + /// Run the router server until a stop is requested + pub async fn run(&self, stop_token: StopToken) -> RouterServerResult<()> { + let mut unord = FuturesUnordered::>::new(); + + let mut need_new_client_fut = true; + + // Add server processor to run loop + unord.push( + self.unlocked_inner + .server_processor + .run_loop_process_commands(), + ); + + loop { + if need_new_client_fut { + let new_client_receiver = self.unlocked_inner.new_client_receiver.clone(); + unord.push(Box::pin(async move { + if let Ok(res) = new_client_receiver.into_recv_async().await { + return RunLoopEvent::AddClient(res); + } + RunLoopEvent::Done + })); + } + + match unord.next().timeout_at(stop_token.clone()).await { + Ok(Some(RunLoopEvent::AddClient(client_fut))) => { + // Add new client + unord.push(client_fut); + + // Wait for next new client + need_new_client_fut = true; + } + Ok(Some(RunLoopEvent::Done)) => { + // Do nothing + } + Ok(None) => { + // Finished normally + break; + } + Err(_) => { + // Stop requested + break; + } + } + } + + Ok(()) + } + + //////////////////////////////////////////////////////////////////// + // Private Implementation + + async fn process_connection(self, reader: R, writer: W) -> RunLoopEvent + where + R: AsyncRead + Send + Unpin, + W: AsyncWrite + Send + Unpin, + { + let framed_reader = FramedRead::new(reader, BytesCodec); + let framed_writer = FramedWrite::new(writer, BytesCodec); + + let (outbound_sender, outbound_receiver) = flume::unbounded(); + let outbound_fut = system_boxed( + outbound_receiver + .into_stream() + .map(|command| { + to_stdvec(&command) + .map_err(io::Error::other) + .map(Bytes::from) + }) + .forward(framed_writer), + ); + + let inbound_fut = system_boxed(framed_reader.try_for_each(|x| async { + let x = x; + let cmd = from_bytes::(&x).map_err(io::Error::other)?; + + self.unlocked_inner + .server_processor + .enqueue_command(cmd, outbound_sender.clone()); + + Ok(()) + })); + + let mut unord = FuturesUnordered::new(); + unord.push(outbound_fut); + unord.push(inbound_fut); + + if let Some(Err(e)) = unord.next().await { + error!("{}", e); + } + + RunLoopEvent::Done + } +} diff --git a/veilid-tools/src/virtual_network/router_server/predefined_config.yml b/veilid-tools/src/virtual_network/router_server/predefined_config.yml new file mode 100644 index 00000000..b4dd02a1 --- /dev/null +++ b/veilid-tools/src/virtual_network/router_server/predefined_config.yml @@ -0,0 +1,126 @@ +--- +# Random number seed used to generate all profile configurations +# for a test. The seed can be overriden on the VirtualRouter command +# line to choose a different test scenario. The same seed will +# generate the same configuration on all machines given the same +# configuration file. +seed: 0 + +# The name of the predefined network to use by default (typically +# this is '$internet') +default_network: "$internet" + +# The name of the predefined performance model to use by default (typically +# this is '$lan') +default_model: "$lan" + +# The name of the default allocation pool that subnets are allocated from +default_pool: "$internet" + +################################################################# +# Networks +# +# Networks are a location where Machines can be allocated and represent +# a network segment with address allocations per address type +# and a gateway to another network. The performance characteristics of +# a network are defined by a performance Model + +networks: + # Predefined networks + $internet: + ipv4: + allocation: "$internet" + ipv6: + allocation: "$internet" + model: "$internet" + +################################################################# +# Allocations +# +# Allocations are partitions of the address space that networks +# can be assigned to. Machines on the networks will be given +# addresses within these ranges. If an allocation +# is not specified, an address -outside- any of the allocation +# will be used (on the 'public internet'). + +allocations: + # Predefined allocations + $internet: + scope4: ["0.0.0.0/0"] + scope6: ["::/0"] + $private: + scope4: ["10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"] + scope6: ["fc00::/7"] + $cgnat: + scope4: ["100.64.0.0/10"] + $linklocal: + scope4: ["169.254.0.0/16"] + scope6: ["fe80::/10"] + $localhost: + scope4: ["127.0.0.0/8"] + scope6: ["::1/128"] + $ietf: + scope4: ["192.0.0.0/24"] + $cellnat: + scope4: ["192.0.0.0/29"] + $documentation: + scope4: ["192.0.2.0/24", "198.51.100.0/24", "203.0.113.0/24"] + scope6: ["2001:db8::/32", "3fff::/20"] + $benchmark: + scope4: ["198.18.0.0/15"] + $mulitcast: + scope4: ["224.0.0.0/4"] + $mulitcasttest: + scope4: ["233.252.0.0/24"] + scope6: ["ff00::/8"] + $unspecified: + scope4: ["0.0.0.0/8"] + scope6: ["::/128"] + $reserved: + scope4: ["192.88.99.0/24", "240.0.0.0/4"] + $broadcast: + scope4: ["255.255.255.255/32"] + $mapped: + scope6: ["::ffff:0:0/96", "::ffff:0:0:0/96"] + $translation: + scope6: ["64:ff9b::/96", "64:ff9b:1::/48"] + $discard: + scope6: ["100::/64"] + $teredo: + scope6: ["2001::/32"] + $orchidv2: + scope6: ["2001:20::/28"] + $6to4: + scope6: ["2002::/16"] + $srv6: + scope6: ["5f00::/16"] + +################################################################# +# Models +# +# Performance models representing how a network behaves +# Latency models are a skewed normal distribution +# Distance is assigned over a circular probability and then +# mapped linearly as a multiplier to latency and loss + +models: + # Predefined models + $lan: + latency: + mean: 0.0038 + sigma: 0.001416 + skew: 0.0009 + min: 0.0015 + max: 0.0075 + loss: 0.0 + $internet: + distance: + min: 0.04 + max: 2.0 + latency: + mean: 0.200 + sigma: 0.080 + skew: 0 + min: 0.030 + max: 0.400 + loss: 0.01 diff --git a/veilid-tools/src/virtual_network/router_server/server_processor.rs b/veilid-tools/src/virtual_network/router_server/server_processor.rs new file mode 100644 index 00000000..e2379a26 --- /dev/null +++ b/veilid-tools/src/virtual_network/router_server/server_processor.rs @@ -0,0 +1,193 @@ +use super::*; + +struct ServerProcessorCommandRecord { + cmd: ServerProcessorCommand, + outbound_sender: flume::Sender, +} + +#[derive(Debug)] +struct ServerProcessorInner { + // +} + +#[derive(Debug)] +struct ServerProcessorUnlockedInner { + machine_registry: GlobalStateManager, + receiver: flume::Receiver, + sender: flume::Sender, +} + +#[derive(Debug, Clone)] +pub struct ServerProcessor { + unlocked_inner: Arc, + _inner: Arc>, +} + +impl ServerProcessor { + //////////////////////////////////////////////////////////////////////// + // Public Interface + + pub fn new(machine_registry: GlobalStateManager) -> Self { + let (sender, receiver) = flume::unbounded(); + + Self { + unlocked_inner: Arc::new(ServerProcessorUnlockedInner { + sender, + receiver, + machine_registry, + }), + _inner: Arc::new(Mutex::new(ServerProcessorInner {})), + } + } + + pub fn enqueue_command( + &self, + cmd: ServerProcessorCommand, + outbound_sender: flume::Sender, + ) { + if let Err(e) = self + .unlocked_inner + .sender + .send(ServerProcessorCommandRecord { + cmd, + outbound_sender, + }) + { + eprintln!("Dropped command: {}", e); + } + } + + pub fn run_loop_process_commands(&self) -> SendPinBoxFuture { + let receiver_stream = self.unlocked_inner.receiver.clone().into_stream(); + let this = self.clone(); + Box::pin(async move { + receiver_stream + .for_each_concurrent(None, |x| { + let this = this.clone(); + async move { + if let Err(e) = this.process_command(x.cmd, x.outbound_sender).await { + eprintln!("Failed to process command: {}", e); + } + } + }) + .await; + + RunLoopEvent::Done + }) + } + + //////////////////////////////////////////////////////////////////////// + // Private Implementation + + async fn process_command( + self, + cmd: ServerProcessorCommand, + outbound_sender: flume::Sender, + ) -> RouterServerResult<()> { + match cmd { + ServerProcessorCommand::Message(server_processor_message) => { + self.process_message( + server_processor_message.message_id, + server_processor_message.request, + outbound_sender, + ) + .await + } + ServerProcessorCommand::CloseSocket { + machine_id, + socket_id, + } => { + self.process_close_socket(machine_id, socket_id, outbound_sender) + .await + } + } + } + async fn process_close_socket( + self, + machine_id: MachineId, + socket_id: SocketId, + outbound_sender: flume::Sender, + ) -> RouterServerResult<()> { + // + Ok(()) + } + + async fn process_message( + self, + message_id: MessageId, + request: ServerProcessorRequest, + outbound_sender: flume::Sender, + ) -> RouterServerResult<()> { + match request { + ServerProcessorRequest::AllocateMachine { profile } => todo!(), + ServerProcessorRequest::ReleaseMachine { machine_id } => todo!(), + ServerProcessorRequest::GetInterfaces { machine_id } => todo!(), + ServerProcessorRequest::TcpConnect { + machine_id, + local_address, + remote_address, + timeout_ms, + options, + } => todo!(), + ServerProcessorRequest::TcpBind { + machine_id, + local_address, + options, + } => todo!(), + ServerProcessorRequest::TcpAccept { + machine_id, + listen_socket_id, + } => todo!(), + ServerProcessorRequest::TcpShutdown { + machine_id, + socket_id, + } => todo!(), + ServerProcessorRequest::UdpBind { + machine_id, + local_address, + options, + } => todo!(), + ServerProcessorRequest::Send { + machine_id, + socket_id, + data, + } => todo!(), + ServerProcessorRequest::SendTo { + machine_id, + socket_id, + remote_address, + data, + } => todo!(), + ServerProcessorRequest::Recv { + machine_id, + socket_id, + len, + } => todo!(), + ServerProcessorRequest::RecvFrom { + machine_id, + socket_id, + len, + } => todo!(), + ServerProcessorRequest::GetRoutedLocalAddress { + machine_id, + address_type, + } => todo!(), + ServerProcessorRequest::FindGateway { machine_id } => todo!(), + ServerProcessorRequest::GetExternalAddress { gateway_id } => todo!(), + ServerProcessorRequest::AddPort { + gateway_id, + protocol, + external_port, + local_address, + lease_duration_ms, + description, + } => todo!(), + ServerProcessorRequest::RemovePort { + gateway_id, + protocol, + external_port, + } => todo!(), + ServerProcessorRequest::TXTQuery { name } => todo!(), + } + } +} diff --git a/veilid-tools/src/virtual_network/router_server/stable_rng.rs b/veilid-tools/src/virtual_network/router_server/stable_rng.rs new file mode 100644 index 00000000..071d45ff --- /dev/null +++ b/veilid-tools/src/virtual_network/router_server/stable_rng.rs @@ -0,0 +1,127 @@ +use super::*; + +use rand::{seq::SliceRandom, Rng, SeedableRng}; +use rand_chacha::ChaCha20Rng; + +#[derive(Clone)] +pub struct StableRngState { + srng: ChaCha20Rng, + count: usize, +} + +impl fmt::Debug for StableRngState { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("StableRngInner") + .field("count", &self.count) + .finish() + } +} + +#[derive(Clone, Debug)] +pub struct StableRng { + state: StableRngState, +} + +impl StableRng { + //////////////////////////////////////////////////////// + // Public Interface + + pub fn new(seed: u64) -> Self { + Self { + state: StableRngState { + srng: ChaCha20Rng::seed_from_u64(seed), + count: 0, + }, + } + } + + pub fn save_state(&self) -> StableRngState { + self.state.clone() + } + + pub fn restore_state(&mut self, state: StableRngState) { + self.state = state; + } + + pub fn probability_test(&mut self, probability: Probability) -> bool { + if probability == 1.0 { + return true; + } else if probability == 0.0 { + return false; + } + let num = self.next_f32(0.0, 1.0); + num < probability + } + + pub fn weighted_choice_ref<'a, T: fmt::Debug + Clone>( + &mut self, + weighted_list: &'a WeightedList, + ) -> &'a T { + match weighted_list { + WeightedList::Single(x) => x, + WeightedList::List(vec) => { + let total_weight = vec + .iter() + .map(|x| x.weight()) + .reduce(|acc, x| acc + x) + .expect("config validation broken"); + + let r = self.next_f32(0.0, total_weight); + let mut current_weight = 0.0f32; + for x in vec { + current_weight += x.weight(); + if r < current_weight { + return x.item(); + } + } + // Catch f32 imprecision + vec.last().expect("config validation broken").item() + } + } + } + + pub fn weighted_choice(&mut self, weighted_list: WeightedList) -> T { + match weighted_list { + WeightedList::Single(x) => x, + WeightedList::List(mut vec) => { + let total_weight = vec + .iter() + .map(|x| x.weight()) + .reduce(|acc, x| acc + x) + .expect("config validation broken"); + + let r = self.next_f32(0.0, total_weight); + let mut current_weight = 0.0f32; + let last = vec.pop().expect("config validation broken").into_item(); + for x in vec { + current_weight += x.weight(); + if r < current_weight { + return x.into_item(); + } + } + // Catch f32 imprecision + last + } + } + } + + pub fn shuffle_vec(&mut self, v: &mut Vec) { + self.state.count += 1; + v.shuffle(&mut self.state.srng); + } + + pub fn next_u32(&mut self, min: u32, max: u32) -> u32 { + self.state.count += 1; + self.state.srng.gen_range(min..=max) + } + + pub fn next_u128(&mut self, min: u128, max: u128) -> u128 { + self.state.count += 1; + self.state.srng.gen_range(min..=max) + } + + pub fn next_f32(&mut self, min: f32, max: f32) -> f32 { + self.state.count += 1; + self.state.srng.gen_range(min..=max) + } +} diff --git a/veilid-tools/src/virtual_network/router_server/weighted_list.rs b/veilid-tools/src/virtual_network/router_server/weighted_list.rs new file mode 100644 index 00000000..6e1a9dee --- /dev/null +++ b/veilid-tools/src/virtual_network/router_server/weighted_list.rs @@ -0,0 +1,329 @@ +use super::*; +use serde::*; +use validator::{Validate, ValidationError, ValidationErrors}; + +pub type Probability = f32; + +////////////////////////////////////////////////////////////////////////// +/// WeightedList + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum WeightedList { + Single(T), + List(Vec>), +} +impl Default for WeightedList { + fn default() -> Self { + Self::List(Vec::new()) + } +} +impl WeightedList { + pub fn len(&self) -> usize { + match self { + WeightedList::Single(_) => 1, + WeightedList::List(vec) => vec.len(), + } + } + + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + pub fn try_for_each Result<(), E>>(&self, mut f: F) -> Result<(), E> { + match self { + WeightedList::Single(v) => f(v), + WeightedList::List(vec) => vec + .iter() + .map(|v| match v { + Weighted::Weighted { item, weight: _ } => item, + Weighted::Unweighted(item) => item, + }) + .try_for_each(f), + } + } + + pub fn map(&self, mut map: F) -> WeightedList + where + F: FnMut(&T) -> S, + S: fmt::Debug + Clone, + { + match self { + WeightedList::Single(v) => WeightedList::Single(map(v)), + WeightedList::List(vec) => { + let mut out = Vec::>::with_capacity(vec.len()); + for v in vec { + out.push(match v { + Weighted::Weighted { item, weight } => Weighted::Weighted { + item: map(item), + weight: *weight, + }, + Weighted::Unweighted(item) => Weighted::Unweighted(map(item)), + }); + } + WeightedList::List(out) + } + } + } + + pub fn filter(&self, mut filter: F) -> Option> + where + F: FnMut(&T) -> bool, + { + match self { + WeightedList::Single(v) => { + if filter(v) { + Some(self.clone()) + } else { + None + } + } + WeightedList::List(vec) => { + let mut out = Vec::>::with_capacity(vec.len()); + for v in vec { + if filter(v.item()) { + out.push(v.clone()); + } + } + if out.is_empty() { + None + } else { + Some(WeightedList::List(out)) + } + } + } + } + + pub fn try_filter(&self, mut filter: F) -> Result>, E> + where + F: FnMut(&T) -> Result, + { + match self { + WeightedList::Single(v) => { + if filter(v)? { + Ok(Some(self.clone())) + } else { + Ok(None) + } + } + WeightedList::List(vec) => { + let mut out = Vec::>::with_capacity(vec.len()); + for v in vec { + if filter(v.item())? { + out.push(v.clone()); + } + } + if out.is_empty() { + Ok(None) + } else { + Ok(Some(WeightedList::List(out))) + } + } + } + } + pub fn try_filter_map(&self, mut filter: F) -> Result>, E> + where + F: FnMut(&T) -> Result, E>, + S: fmt::Debug + Clone, + { + match self { + WeightedList::Single(v) => { + if let Some(item) = filter(v)? { + Ok(Some(WeightedList::Single(item))) + } else { + Ok(None) + } + } + WeightedList::List(vec) => { + let mut out = Vec::>::with_capacity(vec.len()); + for v in vec { + if let Some(item) = filter(v.item())? { + out.push(match v { + Weighted::Weighted { item: _, weight } => Weighted::Weighted { + item, + weight: *weight, + }, + Weighted::Unweighted(_) => Weighted::Unweighted(item), + }); + } + } + if out.is_empty() { + Ok(None) + } else { + Ok(Some(WeightedList::List(out))) + } + } + } + } + + pub fn try_map(&self, mut filter: F) -> Result, E> + where + F: FnMut(&T) -> Result, + S: fmt::Debug + Clone, + { + match self { + WeightedList::Single(v) => { + let item = filter(v)?; + Ok(WeightedList::Single(item)) + } + WeightedList::List(vec) => { + let mut out = Vec::>::with_capacity(vec.len()); + for v in vec { + let item = filter(v.item())?; + + out.push(match v { + Weighted::Weighted { item: _, weight } => Weighted::Weighted { + item, + weight: *weight, + }, + Weighted::Unweighted(_) => Weighted::Unweighted(item), + }); + } + Ok(WeightedList::List(out)) + } + } + } + + pub fn iter(&self) -> WeightedListIter<'_, T> { + WeightedListIter { + values: self, + index: 0, + } + } +} + +////////////////////////////////////////////////////////////////////////// +/// Index + +impl core::ops::Index for WeightedList { + type Output = T; + + fn index(&self, index: usize) -> &Self::Output { + match self { + WeightedList::Single(s) => s, + WeightedList::List(vec) => vec[index].item(), + } + } +} + +////////////////////////////////////////////////////////////////////////// +/// Iterator + +pub struct WeightedListIter<'a, T: fmt::Debug + Clone> { + values: &'a WeightedList, + index: usize, +} + +impl<'a, T: fmt::Debug + Clone> Iterator for WeightedListIter<'a, T> { + type Item = &'a T; + + fn next(&mut self) -> Option { + if self.index >= self.values.len() { + return None; + } + + self.index += 1; + Some(&self.values[self.index - 1]) + } +} + +////////////////////////////////////////////////////////////////////////// +/// Validate + +impl Validate for WeightedList { + fn validate(&self) -> Result<(), ValidationErrors> { + let mut errors = ValidationErrors::new(); + + // Ensure weighted list does not have duplicates + let items = self.iter().collect::>(); + if items.len() != self.len() { + errors.add( + "List", + ValidationError::new("weightdup") + .with_message("weighted list must not have duplicate items".into()), + ); + } + + // Make sure list is not empty + match self { + Self::List(v) => { + if v.is_empty() { + errors.add( + "List", + ValidationError::new("len") + .with_message("weighted list must not be empty".into()), + ) + } + errors.merge_self("List", v.validate()); + } + Self::Single(_addr) => {} + } + + if errors.is_empty() { + Ok(()) + } else { + Err(errors) + } + } +} + +// impl WeightedList { +// pub fn validate_once(&self) -> Result<(), ValidationError> { +// self.validate().map_err(|errs| { +// ValidationError::new("multiple") +// .with_message(format!("multiple validation errors: {}", errs).into()) +// }) +// } +// } + +////////////////////////////////////////////////////////////////////////// +/// Weighted + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum Weighted { + Weighted { item: T, weight: f32 }, + Unweighted(T), +} + +impl Validate for Weighted { + fn validate(&self) -> Result<(), ValidationErrors> { + let mut errors = ValidationErrors::new(); + if let Self::Weighted { item: _, weight } = self { + if *weight <= 0.0 { + errors.add( + "Weighted", + ValidationError::new("len") + .with_message("weight must be a positive value".into()), + ) + } + } + + if errors.is_empty() { + Ok(()) + } else { + Err(errors) + } + } +} + +impl Weighted { + pub fn item(&self) -> &T { + match self { + Weighted::Weighted { item, weight: _ } => item, + Weighted::Unweighted(item) => item, + } + } + pub fn into_item(self) -> T { + match self { + Weighted::Weighted { item, weight: _ } => item, + Weighted::Unweighted(item) => item, + } + } + pub fn weight(&self) -> f32 { + match self { + Weighted::Weighted { item: _, weight } => *weight, + Weighted::Unweighted(_) => 1.0f32, + } + } +} diff --git a/veilid-tools/src/virtual_network/serde_io_error.rs b/veilid-tools/src/virtual_network/serde_io_error.rs new file mode 100644 index 00000000..3798db96 --- /dev/null +++ b/veilid-tools/src/virtual_network/serde_io_error.rs @@ -0,0 +1,70 @@ +use serde::*; + +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize, Deserialize)] +#[allow(deprecated)] +#[non_exhaustive] +#[serde(remote = "std::io::ErrorKind")] +pub enum SerdeIoErrorKindDef { + NotFound, + PermissionDenied, + ConnectionRefused, + ConnectionReset, + // #[cfg(feature = "io_error_more")] + // HostUnreachable, + // #[cfg(feature = "io_error_more")] + // NetworkUnreachable, + ConnectionAborted, + NotConnected, + AddrInUse, + AddrNotAvailable, + // #[cfg(feature = "io_error_more")] + // NetworkDown, + BrokenPipe, + AlreadyExists, + WouldBlock, + // #[cfg(feature = "io_error_more")] + // NotADirectory, + // #[cfg(feature = "io_error_more")] + // IsADirectory, + // #[cfg(feature = "io_error_more")] + // DirectoryNotEmpty, + // #[cfg(feature = "io_error_more")] + // ReadOnlyFilesystem, + // #[cfg(feature = "io_error_more")] + // FilesystemLoop, + // #[cfg(feature = "io_error_more")] + // StaleNetworkFileHandle, + InvalidInput, + InvalidData, + TimedOut, + WriteZero, + // #[cfg(feature = "io_error_more")] + // StorageFull, + // #[cfg(feature = "io_error_more")] + // NotSeekable, + // #[cfg(feature = "io_error_more")] + // FilesystemQuotaExceeded, + // #[cfg(feature = "io_error_more")] + // FileTooLarge, + // #[cfg(feature = "io_error_more")] + // ResourceBusy, + // #[cfg(feature = "io_error_more")] + // ExecutableFileBusy, + // #[cfg(feature = "io_error_more")] + // Deadlock, + // #[cfg(feature = "io_error_more")] + // CrossesDevices, + // #[cfg(feature = "io_error_more")] + // TooManyLinks, + // #[cfg(feature = "io_error_more")] + // InvalidFilename, + // #[cfg(feature = "io_error_more")] + // ArgumentListTooLong, + Interrupted, + Unsupported, + UnexpectedEof, + OutOfMemory, + Other, + // #[cfg(feature = "io_error_uncategorized")] + // Uncategorized, +} diff --git a/veilid-tools/src/virtual_network/virtual_gateway.rs b/veilid-tools/src/virtual_network/virtual_gateway.rs new file mode 100644 index 00000000..00698502 --- /dev/null +++ b/veilid-tools/src/virtual_network/virtual_gateway.rs @@ -0,0 +1,89 @@ +use super::*; + +#[derive(Debug)] +pub struct VirtualGateway { + machine: Machine, + gateway_id: GatewayId, +} + +impl VirtualGateway { + ///////////////////////////////////////////////////////////// + // Public Interface + + pub async fn find() -> VirtualNetworkResult> { + let machine = default_machine().unwrap(); + Self::find_with_machine(machine).await + } + + pub async fn find_with_machine(machine: Machine) -> VirtualNetworkResult> { + machine + .router_client + .clone() + .find_gateway(machine.id) + .await + .map(|opt_gateway_id| opt_gateway_id.map(|gateway_id| Self::new(machine, gateway_id))) + } + + pub async fn get_routed_local_address( + &self, + address_type: VirtualAddressType, + ) -> VirtualNetworkResult { + self.machine + .router_client + .clone() + .get_routed_local_address(self.machine.id, address_type) + .await + } + + pub async fn get_external_address(&self) -> VirtualNetworkResult { + self.machine + .router_client + .clone() + .get_external_address(self.gateway_id) + .await + } + + pub async fn add_port( + &self, + protocol: VirtualProtocolType, + external_port: Option, + local_address: SocketAddr, + lease_duration_ms: u32, + description: String, + ) -> VirtualNetworkResult { + self.machine + .router_client + .clone() + .add_port( + self.gateway_id, + protocol, + external_port, + local_address, + lease_duration_ms, + description, + ) + .await + } + + pub async fn remove_port( + &self, + protocol: VirtualProtocolType, + external_port: u16, + ) -> VirtualNetworkResult<()> { + self.machine + .router_client + .clone() + .remove_port(self.gateway_id, protocol, external_port) + .await + } + + ///////////////////////////////////////////////////////////// + // Private Implementation + + fn new(machine: Machine, gateway_id: GatewayId) -> Self { + Self { + machine, + gateway_id, + } + } +} diff --git a/veilid-tools/src/virtual_network/virtual_network_error.rs b/veilid-tools/src/virtual_network/virtual_network_error.rs new file mode 100644 index 00000000..4e7b564b --- /dev/null +++ b/veilid-tools/src/virtual_network/virtual_network_error.rs @@ -0,0 +1,33 @@ +use super::*; + +use std::io; +#[derive(ThisError, Clone, Debug, PartialEq, Eq)] +pub enum VirtualNetworkError { + #[error("Serialization Error: {0}")] + SerializationError(postcard::Error), + #[error("Response Mismatch")] + ResponseMismatch, + #[error("Wait error")] + WaitError, + #[error("Invalid machine id")] + InvalidMachineId, + #[error("Invalid socket id")] + InvalidSocketId, + #[error("Missing profile")] + MissingProfile, + #[error("Profile complete")] + ProfileComplete, + #[error("Io error: {0}")] + IoError(io::ErrorKind), +} + +impl From for io::Error { + fn from(value: VirtualNetworkError) -> Self { + match value { + VirtualNetworkError::IoError(e) => io::Error::from(e), + e => io::Error::other(e), + } + } +} + +pub type VirtualNetworkResult = Result; diff --git a/veilid-tools/src/virtual_network/virtual_tcp_listener.rs b/veilid-tools/src/virtual_network/virtual_tcp_listener.rs new file mode 100644 index 00000000..19555cd5 --- /dev/null +++ b/veilid-tools/src/virtual_network/virtual_tcp_listener.rs @@ -0,0 +1,67 @@ +use super::*; + +#[derive(Debug)] +pub struct VirtualTcpListener { + pub(super) machine: Machine, + pub(super) socket_id: SocketId, + pub(super) local_address: SocketAddr, +} + +impl VirtualTcpListener { + ///////////////////////////////////////////////////////////// + // Public Interface + + pub async fn bind( + opt_local_address: Option, + options: VirtualTcpOptions, + ) -> VirtualNetworkResult { + let machine = default_machine().unwrap(); + Self::bind_with_machine(machine, opt_local_address, options).await + } + + pub async fn bind_with_machine( + machine: Machine, + opt_local_address: Option, + options: VirtualTcpOptions, + ) -> VirtualNetworkResult { + machine + .router_client + .clone() + .tcp_bind(machine.id, opt_local_address, options) + .await + .map(|(socket_id, local_address)| Self::new(machine, socket_id, local_address)) + } + + pub async fn accept(&self) -> VirtualNetworkResult<(VirtualTcpStream, SocketAddr)> { + self.machine + .router_client + .clone() + .tcp_accept(self.machine.id, self.socket_id) + .await + .map(|v| { + ( + VirtualTcpStream::new(self.machine.clone(), v.0, self.local_address, v.1), + v.1, + ) + }) + } + + ///////////////////////////////////////////////////////////// + // Private Implementation + + fn new(machine: Machine, socket_id: SocketId, local_address: SocketAddr) -> Self { + Self { + machine, + socket_id, + local_address, + } + } +} + +impl Drop for VirtualTcpListener { + fn drop(&mut self) { + self.machine + .router_client + .drop_tcp_listener(self.machine.id, self.socket_id); + } +} diff --git a/veilid-tools/src/virtual_network/virtual_tcp_listener_stream.rs b/veilid-tools/src/virtual_network/virtual_tcp_listener_stream.rs new file mode 100644 index 00000000..31d79315 --- /dev/null +++ b/veilid-tools/src/virtual_network/virtual_tcp_listener_stream.rs @@ -0,0 +1,86 @@ +use super::*; + +use core::pin::Pin; +use core::task::{Context, Poll}; +use futures_util::{stream::Stream, FutureExt}; +use std::io; + +/// A wrapper around [`VirtualTcpListener`] that implements [`Stream`]. +/// +/// [`VirtualTcpListener`]: struct@crate::VirtualTcpListener +/// [`Stream`]: trait@futures_util::stream::Stream +pub struct VirtualTcpListenerStream { + inner: VirtualTcpListener, + current_accept_fut: Option>>, +} + +impl fmt::Debug for VirtualTcpListenerStream { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("VirtualTcpListenerStream") + .field("inner", &self.inner) + .field( + "current_accept_fut", + if self.current_accept_fut.is_some() { + &"Some(...)" + } else { + &"None" + }, + ) + .finish() + } +} + +impl VirtualTcpListenerStream { + /// Create a new `VirtualTcpListenerStream`. + pub fn new(listener: VirtualTcpListener) -> Self { + Self { + inner: listener, + current_accept_fut: None, + } + } + + /// Get back the inner `VirtualTcpListener`. + pub fn into_inner(self) -> VirtualTcpListener { + self.inner + } +} + +impl Stream for VirtualTcpListenerStream { + type Item = io::Result; + + fn poll_next( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll>> { + if self.current_accept_fut.is_none() { + let machine_id = self.inner.machine.id; + let router_client = self.inner.machine.router_client.clone(); + let socket_id = self.inner.socket_id; + + self.current_accept_fut = + Some(Box::pin(router_client.tcp_accept(machine_id, socket_id))); + } + let fut = self.current_accept_fut.as_mut().unwrap(); + fut.poll_unpin(cx).map(|v| match v { + Ok(v) => Some(Ok(VirtualTcpStream::new( + self.inner.machine.clone(), + v.0, + self.inner.local_address, + v.1, + ))), + Err(e) => Some(Err(e.into())), + }) + } +} + +impl AsRef for VirtualTcpListenerStream { + fn as_ref(&self) -> &VirtualTcpListener { + &self.inner + } +} + +impl AsMut for VirtualTcpListenerStream { + fn as_mut(&mut self) -> &mut VirtualTcpListener { + &mut self.inner + } +} diff --git a/veilid-tools/src/virtual_network/virtual_tcp_stream.rs b/veilid-tools/src/virtual_network/virtual_tcp_stream.rs new file mode 100644 index 00000000..6cb63dc8 --- /dev/null +++ b/veilid-tools/src/virtual_network/virtual_tcp_stream.rs @@ -0,0 +1,210 @@ +use super::*; +use futures_util::FutureExt; +use serde::*; + +#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] +pub struct VirtualTcpOptions { + linger: Option, + no_delay: bool, + only_v6: bool, + reuse_address_port: bool, +} + +pub struct VirtualTcpStream { + machine: Machine, + socket_id: SocketId, + local_address: SocketAddr, + remote_address: SocketAddr, + current_recv_fut: Option, VirtualNetworkError>>>, + current_send_fut: Option>>, + current_tcp_shutdown_fut: Option>>, +} + +impl fmt::Debug for VirtualTcpStream { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("VirtualTcpStream") + .field("machine", &self.machine) + .field("socket_id", &self.socket_id) + .field("local_address", &self.local_address) + .field("remote_address", &self.remote_address) + .field( + "current_recv_fut", + if self.current_recv_fut.is_some() { + &"Some(...)" + } else { + &"None" + }, + ) + .field( + "current_send_fut", + if self.current_send_fut.is_some() { + &"Some(...)" + } else { + &"None" + }, + ) + .field( + "current_close_fut", + if self.current_tcp_shutdown_fut.is_some() { + &"Some(...)" + } else { + &"None" + }, + ) + .finish() + } +} + +impl VirtualTcpStream { + ////////////////////////////////////////////////////////////////////////// + // Public Interface + + pub async fn connect( + remote_address: SocketAddr, + local_address: Option, + timeout_ms: u32, + options: VirtualTcpOptions, + ) -> VirtualNetworkResult { + let machine = default_machine().unwrap(); + Self::connect_with_machine(machine, remote_address, local_address, timeout_ms, options) + .await + } + + pub async fn connect_with_machine( + machine: Machine, + remote_address: SocketAddr, + local_address: Option, + timeout_ms: u32, + options: VirtualTcpOptions, + ) -> VirtualNetworkResult { + machine + .router_client + .clone() + .tcp_connect( + machine.id, + remote_address, + local_address, + timeout_ms, + options, + ) + .await + .map(|(socket_id, local_address)| { + Self::new(machine, socket_id, local_address, remote_address) + }) + } + + pub fn local_addr(&self) -> VirtualNetworkResult { + Ok(self.local_address) + } + + pub fn peer_addr(&self) -> VirtualNetworkResult { + Ok(self.remote_address) + } + + ////////////////////////////////////////////////////////////////////////// + // Private Implementation + + pub(super) fn new( + machine: Machine, + socket_id: SocketId, + local_address: SocketAddr, + remote_address: SocketAddr, + ) -> Self { + Self { + machine, + socket_id, + local_address, + remote_address, + current_recv_fut: None, + current_send_fut: None, + current_tcp_shutdown_fut: None, + } + } +} + +impl Drop for VirtualTcpStream { + fn drop(&mut self) { + self.machine + .router_client + .drop_tcp_stream(self.machine.id, self.socket_id); + } +} + +impl futures_util::AsyncRead for VirtualTcpStream { + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut task::Context<'_>, + buf: &mut [u8], + ) -> task::Poll> { + if self.current_recv_fut.is_none() { + self.current_recv_fut = Some(Box::pin(self.machine.router_client.clone().recv( + self.machine.id, + self.socket_id, + buf.len(), + ))); + } + let fut = self.current_recv_fut.as_mut().unwrap(); + fut.poll_unpin(cx).map(|v| match v { + Ok(v) => { + let len = usize::min(buf.len(), v.len()); + buf[0..len].copy_from_slice(&v[0..len]); + self.current_recv_fut = None; + Ok(len) + } + Err(e) => Err(e.into()), + }) + } +} + +impl futures_util::AsyncWrite for VirtualTcpStream { + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut task::Context<'_>, + buf: &[u8], + ) -> task::Poll> { + if self.current_send_fut.is_none() { + self.current_send_fut = Some(Box::pin(self.machine.router_client.clone().send( + self.machine.id, + self.socket_id, + buf.to_vec(), + ))); + } + let fut = self.current_send_fut.as_mut().unwrap(); + fut.poll_unpin(cx).map(|v| match v { + Ok(v) => { + self.current_send_fut = None; + Ok(v) + } + Err(e) => Err(e.into()), + }) + } + + fn poll_flush( + self: Pin<&mut Self>, + _cx: &mut task::Context<'_>, + ) -> task::Poll> { + task::Poll::Ready(Ok(())) + } + + fn poll_close( + mut self: Pin<&mut Self>, + cx: &mut task::Context<'_>, + ) -> task::Poll> { + if self.current_tcp_shutdown_fut.is_none() { + self.current_tcp_shutdown_fut = Some(Box::pin( + self.machine + .router_client + .clone() + .tcp_shutdown(self.machine.id, self.socket_id), + )); + } + let fut = self.current_tcp_shutdown_fut.as_mut().unwrap(); + fut.poll_unpin(cx).map(|v| match v { + Ok(v) => { + self.current_tcp_shutdown_fut = None; + Ok(v) + } + Err(e) => Err(e.into()), + }) + } +} diff --git a/veilid-tools/src/virtual_network/virtual_udp_socket.rs b/veilid-tools/src/virtual_network/virtual_udp_socket.rs new file mode 100644 index 00000000..926196f4 --- /dev/null +++ b/veilid-tools/src/virtual_network/virtual_udp_socket.rs @@ -0,0 +1,86 @@ +use super::*; +use serde::*; + +#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] +pub struct VirtualUdpOptions { + only_v6: bool, + reuse_address_port: bool, +} + +#[derive(Debug)] +pub struct VirtualUdpSocket { + machine: Machine, + socket_id: SocketId, + local_address: SocketAddr, +} + +impl VirtualUdpSocket { + ///////////////////////////////////////////////////////////// + // Public Interface + + pub async fn bind( + opt_local_address: Option, + options: VirtualUdpOptions, + ) -> VirtualNetworkResult { + let machine = default_machine().unwrap(); + Self::bind_with_machine(machine, opt_local_address, options).await + } + + pub async fn bind_with_machine( + machine: Machine, + opt_local_address: Option, + options: VirtualUdpOptions, + ) -> VirtualNetworkResult { + machine + .router_client + .clone() + .udp_bind(machine.id, opt_local_address, options) + .await + .map(|(socket_id, local_address)| Self::new(machine, socket_id, local_address)) + } + + pub async fn send_to(&self, buf: &[u8], target: SocketAddr) -> VirtualNetworkResult { + self.machine + .router_client + .clone() + .send_to(self.machine.id, self.socket_id, target, buf.to_vec()) + .await + } + + pub async fn recv_from(&self, buf: &mut [u8]) -> VirtualNetworkResult<(usize, SocketAddr)> { + let (v, addr) = self + .machine + .router_client + .clone() + .recv_from(self.machine.id, self.socket_id, buf.len()) + .await?; + + let len = usize::min(buf.len(), v.len()); + buf[0..len].copy_from_slice(&v[0..len]); + + Ok((len, addr)) + } + + pub fn local_addr(&self) -> VirtualNetworkResult { + Ok(self.local_address) + } + + ///////////////////////////////////////////////////////////// + // Private Implementation + + fn new(machine: Machine, socket_id: SocketId, local_address: SocketAddr) -> Self { + Self { + machine, + socket_id, + local_address, + } + } +} + +impl Drop for VirtualUdpSocket { + fn drop(&mut self) { + self.machine + .router_client + .drop_udp_socket(self.machine.id, self.socket_id); + } +} diff --git a/veilid-tools/src/wasm.rs b/veilid-tools/src/wasm.rs index d658a609..5d854931 100644 --- a/veilid-tools/src/wasm.rs +++ b/veilid-tools/src/wasm.rs @@ -1,7 +1,9 @@ use super::*; use core::sync::atomic::{AtomicI8, AtomicU32, Ordering}; use js_sys::{global, Reflect}; +use std::io; use wasm_bindgen::prelude::*; +use ws_stream_wasm::WsErr; #[wasm_bindgen] extern "C" { @@ -66,13 +68,6 @@ pub fn is_ipv6_supported() -> bool { if let Some(supp) = *opt_supp { return supp; } - // let supp = match UdpSocket::bind(SocketAddrV6::new(Ipv6Addr::LOCALHOST, 0, 0, 0)) { - // Ok(_) => true, - // Err(e) => !matches!( - // e.kind(), - // std::io::ErrorKind::AddrNotAvailable | std::io::ErrorKind::Unsupported - // ), - // }; // XXX: See issue #92 let supp = false; @@ -96,3 +91,26 @@ pub fn get_concurrency() -> u32 { res } + +pub fn ws_err_to_io_error(err: WsErr) -> io::Error { + match err { + WsErr::InvalidWsState { supplied: _ } => { + io::Error::new(io::ErrorKind::InvalidInput, err.to_string()) + } + WsErr::ConnectionNotOpen => io::Error::new(io::ErrorKind::NotConnected, err.to_string()), + WsErr::InvalidUrl { supplied: _ } => { + io::Error::new(io::ErrorKind::InvalidInput, err.to_string()) + } + WsErr::InvalidCloseCode { supplied: _ } => { + io::Error::new(io::ErrorKind::InvalidInput, err.to_string()) + } + WsErr::ReasonStringToLong => io::Error::new(io::ErrorKind::InvalidInput, err.to_string()), + WsErr::ConnectionFailed { event: _ } => { + io::Error::new(io::ErrorKind::ConnectionRefused, err.to_string()) + } + WsErr::InvalidEncoding => io::Error::new(io::ErrorKind::InvalidInput, err.to_string()), + WsErr::CantDecodeBlob => io::Error::new(io::ErrorKind::InvalidInput, err.to_string()), + WsErr::UnknownDataType => io::Error::new(io::ErrorKind::InvalidInput, err.to_string()), + _ => io::Error::new(io::ErrorKind::Other, err.to_string()), + } +} diff --git a/veilid-tools/tests/web.rs b/veilid-tools/tests/web.rs index 96ce61f6..8d4a68ea 100644 --- a/veilid-tools/tests/web.rs +++ b/veilid-tools/tests/web.rs @@ -1,5 +1,5 @@ //! Test suite for the Web and headless browsers. -#![cfg(target_arch = "wasm32")] +#![cfg(all(target_arch = "wasm32", target_os = "unknown"))] use cfg_if::*; use parking_lot::Once; diff --git a/veilid-wasm/Cargo.toml b/veilid-wasm/Cargo.toml index 3b9e6c73..161fded6 100644 --- a/veilid-wasm/Cargo.toml +++ b/veilid-wasm/Cargo.toml @@ -4,12 +4,12 @@ name = "veilid-wasm" version = "0.4.1" # --- description = "Veilid bindings for WebAssembly" -repository = "https://gitlab.com/veilid/veilid" -authors = ["Veilid Team "] -license = "MPL-2.0" -edition = "2021" -rust-version = "1.81.0" resolver = "2" +repository.workspace = true +authors.workspace = true +license.workspace = true +edition.workspace = true +rust-version.workspace = true [lib] crate-type = ["cdylib", "rlib"] diff --git a/veilid-wasm/src/lib.rs b/veilid-wasm/src/lib.rs index 5983f580..eed6e5ab 100644 --- a/veilid-wasm/src/lib.rs +++ b/veilid-wasm/src/lib.rs @@ -2,7 +2,7 @@ #![deny(clippy::all)] #![allow(clippy::comparison_chain, clippy::upper_case_acronyms)] #![deny(unused_must_use)] -#![cfg(target_arch = "wasm32")] +#![cfg(all(target_arch = "wasm32", target_os = "unknown"))] #![no_std] /// Veilid WASM Bindings for Flutter/Dart, as well as Native Javascript @@ -142,7 +142,7 @@ where // WASM-specific #[derive(Debug, Deserialize, Serialize)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct VeilidWASMConfigLoggingPerformance { pub enabled: bool, pub level: veilid_core::VeilidConfigLogLevel, @@ -152,7 +152,7 @@ pub struct VeilidWASMConfigLoggingPerformance { } #[derive(Debug, Deserialize, Serialize)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct VeilidWASMConfigLoggingAPI { pub enabled: bool, pub level: veilid_core::VeilidConfigLogLevel, @@ -160,28 +160,35 @@ pub struct VeilidWASMConfigLoggingAPI { } #[derive(Debug, Deserialize, Serialize)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] pub struct VeilidWASMConfigLogging { pub performance: VeilidWASMConfigLoggingPerformance, pub api: VeilidWASMConfigLoggingAPI, } #[derive(Debug, Deserialize, Serialize)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify), tsify(from_wasm_abi))] +#[cfg_attr( + all(target_arch = "wasm32", target_os = "unknown"), + derive(Tsify), + tsify(from_wasm_abi) +)] pub struct VeilidWASMConfig { pub logging: VeilidWASMConfigLogging, } #[derive(Debug, Deserialize, Serialize)] #[cfg_attr( - target_arch = "wasm32", + all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify), tsify(from_wasm_abi, into_wasm_abi) )] pub struct VeilidRouteBlob { pub route_id: veilid_core::RouteId, #[serde(with = "veilid_core::as_human_base64")] - #[cfg_attr(target_arch = "wasm32", tsify(type = "string"))] + #[cfg_attr( + all(target_arch = "wasm32", target_os = "unknown"), + tsify(type = "string") + )] pub blob: Vec, } from_impl_to_jsvalue!(VeilidRouteBlob); @@ -1587,7 +1594,7 @@ pub fn veilid_version_string() -> String { } #[derive(Serialize)] -#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] #[tsify(into_wasm_abi)] pub struct VeilidVersion { pub major: u32, diff --git a/veilid-wasm/src/wasm_helpers.rs b/veilid-wasm/src/wasm_helpers.rs index 6fe034a5..df3e91fa 100644 --- a/veilid-wasm/src/wasm_helpers.rs +++ b/veilid-wasm/src/wasm_helpers.rs @@ -1,7 +1,7 @@ use super::*; cfg_if::cfg_if! { - if #[cfg(target_arch = "wasm32")] { + if #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] { pub use wasm_bindgen::prelude::*; macro_rules! from_impl_to_jsvalue { diff --git a/veilid-wasm/tests/package-lock.json b/veilid-wasm/tests/package-lock.json index 52cb6c75..e8975760 100644 --- a/veilid-wasm/tests/package-lock.json +++ b/veilid-wasm/tests/package-lock.json @@ -21,7 +21,7 @@ }, "../pkg": { "name": "veilid-wasm", - "version": "0.3.4", + "version": "0.4.1", "dev": true, "license": "MPL-2.0" },