diff --git a/.vscode/launch.json b/.vscode/launch.json index 07ea4a8b..c3af759b 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -3,27 +3,44 @@ // Hover to view descriptions of existing attributes. // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 "version": "0.2.0", + "inputs": [ + { + "id": "pickPid", + "type": "promptString", + "description": "Enter process id" + } + ], "configurations": [ { "type": "lldb", "request": "attach", "name": "Attach to veilid-server", "program": "${workspaceFolder}/target/debug/veilid-server", - "pid": "${command:pickMyProcess}" + "pid": "${command:pickMyProcess}", + "sourceLanguages": [ + "rust" + ] }, { "type": "lldb", "request": "attach", "name": "Attach to veilid-cli", "program": "${workspaceFolder}/target/debug/veilid-cli", - "pid": "${command:pickMyProcess}" + "pid": "${command:pickMyProcess}", + "sourceLanguages": [ + "rust" + ] }, { "type": "lldb", "request": "attach", "name": "Attach to veilid-flutter example", "program": "${workspaceFolder}/veilid-flutter/example/build/linux/x64/debug/bundle/veilid_example", - "pid": "${command:pickMyProcess}" + "pid": "${command:pickMyProcess}", + "sourceLanguages": [ + "rust", + "dart" + ] }, { "type": "lldb", @@ -42,16 +59,6 @@ ], "terminal": "console" }, - // { - // "type": "lldb", - // "request": "launch", - // "name": "Debug veilid-server", - // "cargo": { - // "args": ["run", "--manifest-path", "veilid-server/Cargo.toml"] - // }, - // "args": ["--trace"], - // "cwd": "${workspaceFolder}/veilid-server" - // } { "type": "lldb", "request": "launch", @@ -75,6 +82,7 @@ "args": [ "test", "--no-run", + "--features=rt-tokio", "--manifest-path", "veilid-core/Cargo.toml" ], diff --git a/Cargo.lock b/Cargo.lock index 8e5e591a..0d769411 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -51,9 +51,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf6ccdb167abbf410dcb915cabd428929d7f6a04980b54a11f26a39f1c7f7107" +checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" dependencies = [ "cfg-if 1.0.0", "getrandom 0.2.8", @@ -72,9 +72,9 @@ dependencies = [ [[package]] name = "allo-isolate" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccb993621e6bf1b67591005b0adad126159a0ab31af379743906158aed5330d0" +checksum = "8ed55848be9f41d44c79df6045b680a74a78bc579e0813f7f196cd7928e22fb1" dependencies = [ "atomic", ] @@ -388,9 +388,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.60" +version = "0.1.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d1d8ab452a3936018a687b20e6f7cf5363d713b732b8884001317b0e48aa3" +checksum = "eff18d764974428cf3a9328e23fc5c986f5fbed46e6cd4cdf42544df5d297ec1" dependencies = [ "proc-macro2", "quote", @@ -438,7 +438,7 @@ dependencies = [ "futures-util", "pin-project 1.0.12", "rustc_version 0.4.0", - "tokio 1.23.0", + "tokio 1.24.2", "wasm-bindgen-futures", ] @@ -464,9 +464,9 @@ dependencies = [ [[package]] name = "atomic-waker" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "065374052e7df7ee4047b1160cca5e1467a12351a40b3da123c870ba0b8eda2a" +checksum = "debc29dde2e69f9e47506b525f639ed42300fc014a3e007832592448fa8e4599" [[package]] name = "attohttpc" @@ -499,9 +499,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.6.1" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08b108ad2665fa3f6e6a517c3d80ec3e77d224c47d605167aefaa5d7ef97fa48" +checksum = "e5694b64066a2459918d8074c2ce0d5a88f409431994c2356617c8ae0c4721fc" dependencies = [ "async-trait", "axum-core", @@ -528,9 +528,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.3.0" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79b8558f5a0581152dc94dcd289132a1d377494bdeafcd41869b3258e3e2ad92" +checksum = "1cae3e661676ffbacb30f1a824089a8c9150e71017f7e1e38f2aa32009188d34" dependencies = [ "async-trait", "bytes 1.3.0", @@ -647,6 +647,7 @@ dependencies = [ "cc", "cfg-if 1.0.0", "constant_time_eq", + "digest 0.10.6", ] [[package]] @@ -731,9 +732,9 @@ checksum = "cc12a55e9bd3840279c248c96ecf541d5ba98d6654e08869fe167121384a582c" [[package]] name = "bumpalo" -version = "3.11.1" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba" +checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535" [[package]] name = "byte-slice-cast" @@ -782,15 +783,15 @@ checksum = "dfb24e866b15a1af2a1b663f10c6b6b8f397a84aadb828f12e5b289ec23a3a3c" [[package]] name = "capnp" -version = "0.15.2" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afaa14ddcf4553e700608c1c0ee3ca1f4cf673470462b99ff6dd6bedcdb6c6ce" +checksum = "35400c6acb55f1a91e6843beca189aba6bccd3c11fae5a7c0288fe5a1c3da822" [[package]] name = "capnp-futures" -version = "0.15.1" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "addd5d5f64da51c84060b760cc1a39b5de0de8b67f254c38e3e4889d9dcf9137" +checksum = "6cbe2479d667c6d44219a07d3da971379e4321ea7343b319994695be7da97a17" dependencies = [ "capnp", "futures", @@ -798,9 +799,9 @@ dependencies = [ [[package]] name = "capnp-rpc" -version = "0.15.0" +version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96b9b0311f48c3fba1cca45dcddb6f922ca544c9d9d5151f1096afd8a9d7bc31" +checksum = "62097700b1da1dd567d77e91ddd7da0fd22c790599ce56c4eea346d60dff0acd" dependencies = [ "capnp", "capnp-futures", @@ -809,9 +810,9 @@ dependencies = [ [[package]] name = "capnpc" -version = "0.15.2" +version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "476b328e8298e5454f9d72b53a15da8d8725e572bc3d43e4e4cdb77a49093ee9" +checksum = "74147d35b0920efb5d676f49c7b4c6f643eb231a3597a1ca82af3b94cc29841c" dependencies = [ "capnp", ] @@ -824,9 +825,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.78" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a20104e2335ce8a659d6dd92a51a767a0c062599c73b343fd152cb401e828c3d" +checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" [[package]] name = "cesu8" @@ -849,7 +850,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" dependencies = [ - "nom 7.1.2", + "nom 7.1.3", ] [[package]] @@ -1055,9 +1056,9 @@ dependencies = [ [[package]] name = "concurrent-queue" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd7bef69dc86e3c610e4e7aed41035e2a7ed12e72dd7530f61327a6579a4390b" +checksum = "c278839b831783b70278b14df4d45e1beb1aad306c07bb796637de9a0e323e8e" dependencies = [ "crossbeam-utils", ] @@ -1071,7 +1072,7 @@ dependencies = [ "async-trait", "json5", "lazy_static", - "nom 7.1.2", + "nom 7.1.3", "pathdiff", "ron", "rust-ini", @@ -1109,7 +1110,7 @@ dependencies = [ "serde", "serde_json", "thread_local", - "tokio 1.23.0", + "tokio 1.24.2", "tokio-stream", "tonic", "tracing", @@ -1345,7 +1346,7 @@ version = "3.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1631ca6e3c59112501a9d87fd86f21591ff77acd31331e8a73f8d80a65bbdd71" dependencies = [ - "nix 0.26.1", + "nix 0.26.2", "windows-sys 0.42.0", ] @@ -1359,7 +1360,7 @@ checksum = "b365fabc795046672053e29c954733ec3b05e4be654ab130fe8f1f94d7051f35" name = "cursive" version = "0.20.0" dependencies = [ - "ahash 0.8.2", + "ahash 0.8.3", "async-std", "cfg-if 1.0.0", "crossbeam-channel", @@ -1369,7 +1370,7 @@ dependencies = [ "libc", "log", "signal-hook", - "tokio 1.23.0", + "tokio 1.24.2", "unicode-segmentation", "unicode-width", ] @@ -1404,7 +1405,7 @@ dependencies = [ name = "cursive_core" version = "0.3.5" dependencies = [ - "ahash 0.8.2", + "ahash 0.8.3", "ansi-parser", "async-std", "crossbeam-channel", @@ -1415,7 +1416,7 @@ dependencies = [ "num", "owning_ref", "time 0.3.17", - "tokio 1.23.0", + "tokio 1.24.2", "toml", "unicode-segmentation", "unicode-width", @@ -1459,9 +1460,9 @@ dependencies = [ [[package]] name = "cxx" -version = "1.0.85" +version = "1.0.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5add3fc1717409d029b20c5b6903fc0c0b02fa6741d820054f4a2efa5e5816fd" +checksum = "322296e2f2e5af4270b54df9e85a02ff037e271af20ba3e7fe1575515dc840b8" dependencies = [ "cc", "cxxbridge-flags", @@ -1471,9 +1472,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.85" +version = "1.0.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4c87959ba14bc6fbc61df77c3fcfe180fc32b93538c4f1031dd802ccb5f2ff0" +checksum = "017a1385b05d631e7875b1f151c9f012d37b53491e2a87f65bff5c262b2111d8" dependencies = [ "cc", "codespan-reporting", @@ -1486,15 +1487,15 @@ dependencies = [ [[package]] name = "cxxbridge-flags" -version = "1.0.85" +version = "1.0.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69a3e162fde4e594ed2b07d0f83c6c67b745e7f28ce58c6df5e6b6bef99dfb59" +checksum = "c26bbb078acf09bc1ecda02d4223f03bdd28bd4874edcb0379138efc499ce971" [[package]] name = "cxxbridge-macro" -version = "1.0.85" +version = "1.0.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e7e2adeb6a0d4a282e581096b06e1791532b7d576dcde5ccd9382acf55db8e6" +checksum = "357f40d1f06a24b60ae1fe122542c1fb05d28d32acb2aed064e84bc2ad1e252e" dependencies = [ "proc-macro2", "quote", @@ -1590,7 +1591,7 @@ dependencies = [ "hashbrown", "lock_api", "once_cell", - "parking_lot_core 0.9.5", + "parking_lot_core 0.9.6", ] [[package]] @@ -1627,6 +1628,7 @@ checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" dependencies = [ "block-buffer 0.10.3", "crypto-common", + "subtle", ] [[package]] @@ -1663,9 +1665,9 @@ checksum = "0688c2a7f92e427f44895cd63841bff7b29f8d7a1648b9e7e07a4a365b2e1257" [[package]] name = "ed25519" -version = "1.5.2" +version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9c280362032ea4203659fc489832d0204ef09f247a0506f170dafcac08c369" +checksum = "91cff35c70bba8a626e3185d8cd48cc11b5437e1a5bcd15b9b5fa3c64b6dfee7" dependencies = [ "signature", ] @@ -1679,15 +1681,15 @@ dependencies = [ "curve25519-dalek", "ed25519", "rand 0.7.3", - "sha2", + "sha2 0.9.9", "zeroize", ] [[package]] name = "either" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" +checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" [[package]] name = "enum-as-inner" @@ -2181,21 +2183,21 @@ dependencies = [ [[package]] name = "gimli" -version = "0.27.0" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dec7af912d60cdbd3677c1af9352ebae6fb8394d165568a2234df0fa00f87793" +checksum = "221996f774192f0f718773def8201c4ae31f02616a54ccfc2d358bb0e5cefdec" [[package]] name = "glob" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" +checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "gloo-timers" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98c4a8d6391675c6b2ee1a6c8d06e8e2d03605c44cec1270675985a4c2a5500b" +checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" dependencies = [ "futures-channel", "futures-core", @@ -2260,7 +2262,7 @@ dependencies = [ "http", "indexmap", "slab", - "tokio 1.23.0", + "tokio 1.24.2", "tokio-util", "tracing", ] @@ -2315,7 +2317,7 @@ dependencies = [ "base64 0.13.1", "byteorder", "flate2", - "nom 7.1.2", + "nom 7.1.3", "num-traits", ] @@ -2456,7 +2458,7 @@ dependencies = [ "itoa", "pin-project-lite 0.2.9", "socket2", - "tokio 1.23.0", + "tokio 1.24.2", "tower-service", "tracing", "want", @@ -2470,7 +2472,7 @@ checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" dependencies = [ "hyper", "pin-project-lite 0.2.9", - "tokio 1.23.0", + "tokio 1.24.2", "tokio-io-timeout", ] @@ -2549,7 +2551,7 @@ dependencies = [ "simplelog 0.9.0", "tokio 0.2.25", "tokio 0.3.7", - "tokio 1.23.0", + "tokio 1.24.2", "url", "xmltree", ] @@ -2661,9 +2663,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.7.0" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11b0d96e660696543b251e58030cf9787df56da39dab19ad60eae7353040917e" +checksum = "30e22bd8629359895450b59ea7a776c850561b96a3b1d31321c1949d9e6c9146" [[package]] name = "itertools" @@ -2796,7 +2798,7 @@ dependencies = [ "keyvaluedb", "keyvaluedb-shared-tests", "parking_lot 0.12.1", - "tokio 1.23.0", + "tokio 1.24.2", "wasm-bindgen-futures", "wasm-bindgen-test", ] @@ -2827,7 +2829,7 @@ dependencies = [ "rusqlite", "sysinfo", "tempfile", - "tokio 1.23.0", + "tokio 1.24.2", ] [[package]] @@ -2977,9 +2979,9 @@ dependencies = [ [[package]] name = "matches" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" +checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" [[package]] name = "matchit" @@ -3183,7 +3185,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0df7ac00c4672f9d5aece54ee3347520b7e20f158656c7db2e6de01902eb7a6c" dependencies = [ "darling 0.13.4", - "proc-macro-crate 1.2.1", + "proc-macro-crate 1.3.0", "proc-macro2", "quote", "syn", @@ -3259,7 +3261,7 @@ dependencies = [ "log", "netlink-packet-core", "netlink-sys", - "tokio 1.23.0", + "tokio 1.24.2", ] [[package]] @@ -3271,7 +3273,7 @@ dependencies = [ "futures", "libc", "log", - "tokio 1.23.0", + "tokio 1.24.2", ] [[package]] @@ -3289,9 +3291,9 @@ dependencies = [ [[package]] name = "nix" -version = "0.26.1" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46a58d1d356c6597d08cde02c2f09d785b09e28711837b1ed667dc652c08a694" +checksum = "bfdda3d196821d6af13126e40375cdf7da646a96114af134d5f417a9a1dc8e1a" dependencies = [ "bitflags", "cfg-if 1.0.0", @@ -3323,14 +3325,23 @@ dependencies = [ [[package]] name = "nom" -version = "7.1.2" +version = "7.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5507769c4919c998e69e49c839d9dc6e693ede4cc4290d6ad8b41d4f09c548c" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" dependencies = [ "memchr", "minimal-lexical", ] +[[package]] +name = "nom8" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae01545c9c7fc4486ab7debaf2aad7003ac19431791868fb2e8066df97fad2f8" +dependencies = [ + "memchr", +] + [[package]] name = "ntapi" version = "0.3.7" @@ -3386,9 +3397,9 @@ dependencies = [ [[package]] name = "num-complex" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ae39348c8bc5fbd7f40c727a9925f03517afd2ab27d46702108b6a7e5414c19" +checksum = "02e0d21255c828d6f128a1e41534206671e8c3ea0c62f32291e808dc82cff17d" dependencies = [ "num-traits", ] @@ -3447,20 +3458,20 @@ dependencies = [ [[package]] name = "num_enum" -version = "0.5.7" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf5395665662ef45796a4ff5486c5d41d29e0c09640af4c5f17fd94ee2c119c9" +checksum = "8d829733185c1ca374f17e52b762f24f535ec625d2cc1f070e34c8a9068f341b" dependencies = [ "num_enum_derive", ] [[package]] name = "num_enum_derive" -version = "0.5.7" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b0498641e53dd6ac1a4f22547548caa6864cc4933784319cd1775271c5a46ce" +checksum = "2be1598bf1c313dcdd12092e3f1920f463462525a21b7b4e11b4168353d0123e" dependencies = [ - "proc-macro-crate 1.2.1", + "proc-macro-crate 1.3.0", "proc-macro2", "quote", "syn", @@ -3477,9 +3488,9 @@ dependencies = [ [[package]] name = "object" -version = "0.30.0" +version = "0.30.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "239da7f290cfa979f43f85a8efeee9a8a76d0827c356d37f9d3d7254d6b537fb" +checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439" dependencies = [ "memchr", ] @@ -3528,7 +3539,7 @@ dependencies = [ "prost", "protobuf", "thiserror", - "tokio 1.23.0", + "tokio 1.24.2", "tonic", ] @@ -3592,7 +3603,7 @@ dependencies = [ "percent-encoding", "rand 0.8.5", "thiserror", - "tokio 1.23.0", + "tokio 1.24.2", "tokio-stream", ] @@ -3660,9 +3671,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.2.1" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "366e44391a8af4cfd6002ef6ba072bae071a96aafca98d7d448a34c5dca38b6a" +checksum = "c3840933452adf7b3b9145e27086a5a3376c619dca1a21b1e5a5af0d54979bed" dependencies = [ "arrayvec", "bitvec", @@ -3674,11 +3685,11 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.1.3" +version = "3.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9299338969a3d2f491d65f140b00ddec470858402f888af98e8642fb5e8965cd" +checksum = "86b26a931f824dd4eca30b3e43bb4f31cd5f0d3a403c5f5ff27106b805bfde7b" dependencies = [ - "proc-macro-crate 1.2.1", + "proc-macro-crate 1.3.0", "proc-macro2", "quote", "syn", @@ -3708,7 +3719,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.5", + "parking_lot_core 0.9.6", ] [[package]] @@ -3727,9 +3738,9 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.5" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ff9f3fef3968a3ec5945535ed654cb38ff72d7495a25619e2247fb15a2ed9ba" +checksum = "ba1ef8814b5c993410bb3adfad7a5ed269563e4a2f90c41f5d85be7fb47133bf" dependencies = [ "cfg-if 1.0.0", "libc", @@ -3764,9 +3775,9 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "pest" -version = "2.5.2" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f6e86fb9e7026527a0d46bc308b841d73170ef8f443e1807f6ef88526a816d4" +checksum = "4ab62d2fa33726dbe6321cc97ef96d8cde531e3eeaf858a058de53a8a6d40d8f" dependencies = [ "thiserror", "ucd-trie", @@ -3774,9 +3785,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.5.2" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96504449aa860c8dcde14f9fba5c58dc6658688ca1fe363589d6327b8662c603" +checksum = "8bf026e2d0581559db66d837fe5242320f525d85c76283c61f4d51a1238d65ea" dependencies = [ "pest", "pest_generator", @@ -3784,9 +3795,9 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.5.2" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "798e0220d1111ae63d66cb66a5dcb3fc2d986d520b98e49e1852bfdb11d7c5e7" +checksum = "2b27bd18aa01d91c8ed2b61ea23406a676b42d82609c6e2581fba42f0c15f17f" dependencies = [ "pest", "pest_meta", @@ -3797,13 +3808,13 @@ dependencies = [ [[package]] name = "pest_meta" -version = "2.5.2" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "984298b75898e30a843e278a9f2452c31e349a073a0ce6fd950a12a74464e065" +checksum = "9f02b677c1859756359fc9983c2e56a0237f18624a3789528804406b7e915e5d" dependencies = [ "once_cell", "pest", - "sha1 0.10.5", + "sha2 0.10.6", ] [[package]] @@ -3951,9 +3962,9 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "prettyplease" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c8992a85d8e93a28bdf76137db888d3874e3b230dee5ed8bebac4c9f7617773" +checksum = "e97e3215779627f01ee256d2fad52f3d95e8e1c11e9fc6fd08f7cd455d5d5c78" dependencies = [ "proc-macro2", "syn", @@ -3983,13 +3994,12 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eda0fc3b0fb7c975631757e14d9049da17374063edb6ebbcbc54d880d4fe94e9" +checksum = "66618389e4ec1c7afe67d51a9bf34ff9236480f8d51e7489b7d5ab0303c13f34" dependencies = [ "once_cell", - "thiserror", - "toml", + "toml_edit", ] [[package]] @@ -4024,18 +4034,18 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.49" +version = "1.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57a8eca9f9c4ffde41714334dee777596264c7825420f521abc92b5b5deb63a5" +checksum = "6ef7d57beacfaf2d8aee5937dab7b7f28de3cb8b1828479bb5de2a7106f2bae2" dependencies = [ "unicode-ident", ] [[package]] name = "prost" -version = "0.11.5" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c01db6702aa05baa3f57dec92b8eeeeb4cb19e894e73996b32a4093289e54592" +checksum = "21dc42e00223fc37204bd4aa177e69420c604ca4a183209a8f9de30c6d934698" dependencies = [ "bytes 1.3.0", "prost-derive", @@ -4043,9 +4053,9 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.11.5" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb5320c680de74ba083512704acb90fe00f28f79207286a848e730c45dd73ed6" +checksum = "a3f8ad728fb08fe212df3c05169e940fbb6d9d16a877ddde14644a983ba2012e" dependencies = [ "bytes 1.3.0", "heck", @@ -4065,9 +4075,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.11.5" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8842bad1a5419bca14eac663ba798f6bc19c413c2fdceb5f3ba3b0932d96720" +checksum = "8bda8c0881ea9f722eb9629376db3d0b903b462477c1aafcb0566610ac28ac5d" dependencies = [ "anyhow", "itertools", @@ -4078,9 +4088,9 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.11.5" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "017f79637768cde62820bc2d4fe0e45daaa027755c323ad077767c6c5f173091" +checksum = "a5e0526209433e96d83d750dd81a99118edbc55739e7e61a46764fd2ad537788" dependencies = [ "bytes 1.3.0", "prost", @@ -4225,9 +4235,9 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.10.1" +version = "1.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cac410af5d00ab6884528b4ab69d1e8e146e8d471201800fa1b4524126de6ad3" +checksum = "356a0625f1954f730c0201cdab48611198dc6ce21f4acff55089b5a78e6e835b" dependencies = [ "crossbeam-channel", "crossbeam-deque", @@ -4257,9 +4267,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.7.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a" +checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733" dependencies = [ "aho-corasick", "memchr", @@ -4391,7 +4401,7 @@ dependencies = [ "netlink-proto", "nix 0.22.3", "thiserror", - "tokio 1.23.0", + "tokio 1.24.2", ] [[package]] @@ -4572,7 +4582,7 @@ dependencies = [ "num", "rand 0.8.5", "serde", - "sha2", + "sha2 0.9.9", "zbus", "zbus_macros", "zvariant", @@ -4581,9 +4591,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.7.0" +version = "2.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bc1bb97804af6631813c55739f771071e0f2ed33ee20b68c86ec505d906356c" +checksum = "a332be01508d814fed64bf28f798a146d73792121129962fdf335bb3c49a4254" dependencies = [ "bitflags", "core-foundation 0.9.3", @@ -4594,9 +4604,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.6.1" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0160a13a177a45bfb43ce71c01580998474f556ad854dcbca936dd2841a5c556" +checksum = "31c9bb296072e961fcbd8853511dd39c2d8be2deb1e17c6860b1d30732b323b4" dependencies = [ "core-foundation-sys 0.8.3", "libc", @@ -4707,9 +4717,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.9.16" +version = "0.9.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92b5b431e8907b50339b51223b97d102db8d987ced36f6e4d03621db9316c834" +checksum = "8fb06d4b6cdaef0e0c51fa881acb721bed3c924cfaa71d9c94a3b771dfdf6567" dependencies = [ "indexmap", "itoa", @@ -4795,6 +4805,17 @@ dependencies = [ "opaque-debug", ] +[[package]] +name = "sha2" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" +dependencies = [ + "cfg-if 1.0.0", + "cpufeatures", + "digest 0.10.6", +] + [[package]] name = "sharded-slab" version = "0.1.4" @@ -5073,9 +5094,9 @@ dependencies = [ [[package]] name = "sysinfo" -version = "0.27.2" +version = "0.27.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17351d0e9eb8841897b14e9669378f3c69fb57779cc04f8ca9a9d512edfb2563" +checksum = "975fe381e0ecba475d4acff52466906d95b153a40324956552e027b2a9eaa89e" dependencies = [ "cfg-if 1.0.0", "core-foundation-sys 0.8.3", @@ -5301,9 +5322,9 @@ dependencies = [ [[package]] name = "tokio" -version = "1.23.0" +version = "1.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eab6d665857cc6ca78d6e80303a02cea7a7851e85dfbd77cbdc09bd129f1ef46" +checksum = "597a12a59981d9e3c38d216785b0c37399f6e415e8d0712047620f189371b0bb" dependencies = [ "autocfg", "bytes 1.3.0", @@ -5327,7 +5348,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" dependencies = [ "pin-project-lite 0.2.9", - "tokio 1.23.0", + "tokio 1.24.2", ] [[package]] @@ -5349,7 +5370,7 @@ checksum = "d660770404473ccd7bc9f8b28494a811bc18542b915c0855c51e8f419d5223ce" dependencies = [ "futures-core", "pin-project-lite 0.2.9", - "tokio 1.23.0", + "tokio 1.24.2", ] [[package]] @@ -5363,19 +5384,36 @@ dependencies = [ "futures-io", "futures-sink", "pin-project-lite 0.2.9", - "tokio 1.23.0", + "tokio 1.24.2", "tracing", ] [[package]] name = "toml" -version = "0.5.10" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1333c76748e868a4d9d1017b5ab53171dfd095f70c712fdb4653a406547f598f" +checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" dependencies = [ "serde", ] +[[package]] +name = "toml_datetime" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4553f467ac8e3d374bc9a177a26801e5d0f9b211aa1673fb137a403afd1c9cf5" + +[[package]] +name = "toml_edit" +version = "0.18.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56c59d8dd7d0dcbc6428bf7aa2f0e823e26e43b3c9aca15bbc9475d23e5fa12b" +dependencies = [ + "indexmap", + "nom8", + "toml_datetime", +] + [[package]] name = "tonic" version = "0.8.3" @@ -5398,7 +5436,7 @@ dependencies = [ "pin-project 1.0.12", "prost", "prost-derive", - "tokio 1.23.0", + "tokio 1.24.2", "tokio-stream", "tokio-util", "tower", @@ -5434,7 +5472,7 @@ dependencies = [ "pin-project-lite 0.2.9", "rand 0.8.5", "slab", - "tokio 1.23.0", + "tokio 1.24.2", "tokio-util", "tower-layer", "tower-service", @@ -5638,7 +5676,7 @@ dependencies = [ "smallvec", "thiserror", "tinyvec", - "tokio 1.23.0", + "tokio 1.24.2", "tracing", "url", ] @@ -5658,16 +5696,16 @@ dependencies = [ "resolv-conf", "smallvec", "thiserror", - "tokio 1.23.0", + "tokio 1.24.2", "tracing", "trust-dns-proto", ] [[package]] name = "try-lock" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" +checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "tungstenite" @@ -5733,9 +5771,9 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.8" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" +checksum = "d54675592c1dbefd78cbd98db9bacd89886e1ca50692a0692baefffdeb92dd58" [[package]] name = "unicode-ident" @@ -5872,7 +5910,7 @@ dependencies = [ "serde_derive", "serial_test", "thiserror", - "tokio 1.23.0", + "tokio 1.24.2", "tokio-util", "veilid-core", ] @@ -5928,7 +5966,7 @@ dependencies = [ "maplit", "ndk", "ndk-glue", - "nix 0.26.1", + "nix 0.26.2", "once_cell", "owning_ref", "owo-colors", @@ -5951,7 +5989,7 @@ dependencies = [ "static_assertions", "stop-token", "thiserror", - "tokio 1.23.0", + "tokio 1.24.2", "tokio-stream", "tokio-util", "tracing", @@ -5965,6 +6003,7 @@ dependencies = [ "wasm-bindgen-futures", "wasm-bindgen-test", "wasm-logger", + "weak-table", "web-sys", "webpki 0.22.0", "webpki-roots 0.22.6", @@ -5996,7 +6035,7 @@ dependencies = [ "parking_lot 0.12.1", "serde", "serde_json", - "tokio 1.23.0", + "tokio 1.24.2", "tokio-stream", "tokio-util", "tracing", @@ -6030,7 +6069,7 @@ dependencies = [ "hostname", "json", "lazy_static", - "nix 0.26.1", + "nix 0.26.2", "opentelemetry", "opentelemetry-otlp", "opentelemetry-semantic-conventions", @@ -6043,7 +6082,7 @@ dependencies = [ "signal-hook", "signal-hook-async-std", "stop-token", - "tokio 1.23.0", + "tokio 1.24.2", "tokio-stream", "tokio-util", "tracing", @@ -6078,7 +6117,7 @@ dependencies = [ "maplit", "ndk", "ndk-glue", - "nix 0.26.1", + "nix 0.26.2", "once_cell", "oslog", "owo-colors", @@ -6092,7 +6131,7 @@ dependencies = [ "static_assertions", "stop-token", "thiserror", - "tokio 1.23.0", + "tokio 1.24.2", "tokio-util", "tracing", "tracing-oslog", @@ -6289,6 +6328,12 @@ dependencies = [ "web-sys", ] +[[package]] +name = "weak-table" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "323f4da9523e9a669e1eaf9c6e763892769b1d38c623913647bfdc1532fe4549" + [[package]] name = "web-sys" version = "0.3.60" @@ -6360,9 +6405,9 @@ dependencies = [ [[package]] name = "which" -version = "4.3.0" +version = "4.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c831fbbee9e129a8cf93e7747a82da9d95ba8e16621cae60ec2cdc849bacb7b" +checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269" dependencies = [ "either", "libc", @@ -6485,19 +6530,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" dependencies = [ "windows_aarch64_gnullvm", - "windows_aarch64_msvc 0.42.0", - "windows_i686_gnu 0.42.0", - "windows_i686_msvc 0.42.0", - "windows_x86_64_gnu 0.42.0", + "windows_aarch64_msvc 0.42.1", + "windows_i686_gnu 0.42.1", + "windows_i686_msvc 0.42.1", + "windows_x86_64_gnu 0.42.1", "windows_x86_64_gnullvm", - "windows_x86_64_msvc 0.42.0", + "windows_x86_64_msvc 0.42.1", ] [[package]] name = "windows_aarch64_gnullvm" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d2aa71f6f0cbe00ae5167d90ef3cfe66527d6f613ca78ac8024c3ccab9a19e" +checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608" [[package]] name = "windows_aarch64_msvc" @@ -6513,9 +6558,9 @@ checksum = "b12add87e2fb192fff3f4f7e4342b3694785d79f3a64e2c20d5ceb5ccbcfc3cd" [[package]] name = "windows_aarch64_msvc" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd0f252f5a35cac83d6311b2e795981f5ee6e67eb1f9a7f64eb4500fbc4dcdb4" +checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7" [[package]] name = "windows_i686_gnu" @@ -6531,9 +6576,9 @@ checksum = "4c98f2db372c23965c5e0f43896a8f0316dc0fbe48d1aa65bea9bdd295d43c15" [[package]] name = "windows_i686_gnu" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbeae19f6716841636c28d695375df17562ca208b2b7d0dc47635a50ae6c5de7" +checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640" [[package]] name = "windows_i686_msvc" @@ -6549,9 +6594,9 @@ checksum = "cdf0569be0f2863ab6a12a6ba841fcfa7d107cbc7545a3ebd57685330db0a3ff" [[package]] name = "windows_i686_msvc" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84c12f65daa39dd2babe6e442988fc329d6243fdce47d7d2d155b8d874862246" +checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605" [[package]] name = "windows_x86_64_gnu" @@ -6567,15 +6612,15 @@ checksum = "905858262c8380a36f32cb8c1990d7e7c3b7a8170e58ed9a98ca6d940b7ea9f1" [[package]] name = "windows_x86_64_gnu" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf7b1b21b5362cbc318f686150e5bcea75ecedc74dd157d874d754a2ca44b0ed" +checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45" [[package]] name = "windows_x86_64_gnullvm" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09d525d2ba30eeb3297665bd434a54297e4170c7f1a44cad4ef58095b4cd2028" +checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463" [[package]] name = "windows_x86_64_msvc" @@ -6591,9 +6636,9 @@ checksum = "890c3c6341d441ffb38f705f47196e3665dc6dd79f6d72fa185d937326730561" [[package]] name = "windows_x86_64_msvc" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5" +checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd" [[package]] name = "winreg" @@ -6759,7 +6804,7 @@ version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4ca5e22593eb4212382d60d26350065bf2a02c34b85bc850474a74b589a3de9" dependencies = [ - "proc-macro-crate 1.2.1", + "proc-macro-crate 1.3.0", "proc-macro2", "quote", "syn", diff --git a/Earthfile b/Earthfile index fae4e054..f39414de 100644 --- a/Earthfile +++ b/Earthfile @@ -54,7 +54,8 @@ deps-android: RUN mkdir /Android; mkdir /Android/Sdk RUN curl -o /Android/cmdline-tools.zip https://dl.google.com/android/repository/commandlinetools-linux-9123335_latest.zip RUN cd /Android; unzip /Android/cmdline-tools.zip - RUN yes | /Android/cmdline-tools/bin/sdkmanager --sdk_root=/Android/Sdk build-tools\;33.0.1 ndk\;25.1.8937393 cmake\;3.22.1 platform-tools platforms\;android-33 + RUN yes | /Android/cmdline-tools/bin/sdkmanager --sdk_root=/Android/Sdk build-tools\;33.0.1 ndk\;25.1.8937393 cmake\;3.22.1 platform-tools platforms\;android-33 cmdline-tools\;latest + RUN rm -rf /Android/cmdline-tools RUN apt-get clean # Just linux build not android diff --git a/README.md b/README.md index 1dc9bd17..ee9ad764 100644 --- a/README.md +++ b/README.md @@ -35,6 +35,13 @@ command line without it. If you do so, you may skip to #### Setup Dependencies using the CLI + +You can automatically install the prerequisites using this script: + +```shell +./install_linux_prerequisites.sh +``` + Otherwise, you may choose to use Android `sdkmanager`. Follow the installation instructions for `sdkmanager` [here](https://developer.android.com/studio/command-line/sdkmanager), then use diff --git a/doc/config/sample.config b/doc/config/sample.config index 8c939d13..a7dc1033 100644 --- a/doc/config/sample.config +++ b/doc/config/sample.config @@ -50,7 +50,6 @@ core: node_id: '' node_id_secret: '' bootstrap: ['bootstrap.dev.veilid.net'] - bootstrap_nodes: [] routing_table: limit_over_attached: 64 limit_fully_attached: 32 diff --git a/doc/config/veilid-server-config.md b/doc/config/veilid-server-config.md index 8fbe74fe..525ce32d 100644 --- a/doc/config/veilid-server-config.md +++ b/doc/config/veilid-server-config.md @@ -14,14 +14,14 @@ and the `veilid-server.conf` file. ## Global Directives -| Directive | Description | -|-------------------------------|-----------------------------------------| -| [daemon](#daemon) | Run `veilid-server` in the background | -| [client\_api](#client_api) || -| [auto\_attach](#auto_attach) || -| [logging](#logging) || -| [testing](#testing) || -| [core](#core) || +| Directive | Description | +| ---------------------------- | ------------------------------------- | +| [daemon](#daemon) | Run `veilid-server` in the background | +| [client\_api](#client_api) | | +| [auto\_attach](#auto_attach) | | +| [logging](#logging) | | +| [testing](#testing) | | +| [core](#core) | | ### daemon @@ -39,10 +39,10 @@ client_api: listen_address: 'localhost:5959' ``` -| Parameter | Description | -|-----------------------------------------------|-------------| -| [enabled](#client_apienabled) || -| [listen\_address](#client_apilisten_address) || +| Parameter | Description | +| -------------------------------------------- | ----------- | +| [enabled](#client_apienabled) | | +| [listen\_address](#client_apilisten_address) | | #### client\_api:enabled @@ -82,13 +82,13 @@ logging: grpc_endpoint: 'localhost:4317' ``` -| Parameter | Description | -|-------------------------------|-------------| -| [system](#loggingsystem) || -| [terminal](#loggingterminal) || -| [file](#loggingfile) || -| [api](#loggingapi) || -| [otlp](#loggingotlp) || +| Parameter | Description | +| ---------------------------- | ----------- | +| [system](#loggingsystem) | | +| [terminal](#loggingterminal) | | +| [file](#loggingfile) | | +| [api](#loggingapi) | | +| [otlp](#loggingotlp) | | #### logging:system @@ -142,12 +142,12 @@ testing: ### core -| Parameter | Description | -|-------------------------------------------|-------------| -| [protected\_store](#coreprotected_store) || -| [table\_store](#coretable_store) || -| [block\_store](#block_store) || -| [network](#corenetwork) || +| Parameter | Description | +| ---------------------------------------- | ----------- | +| [protected\_store](#coreprotected_store) | | +| [table\_store](#coretable_store) | | +| [block\_store](#block_store) | | +| [network](#corenetwork) | | #### core:protected\_store @@ -191,7 +191,6 @@ network: node_id: '' node_id_secret: '' bootstrap: ['bootstrap.dev.veilid.net'] - bootstrap_nodes: [] upnp: true detect_address_changes: true enable_local_peer_scope: false @@ -199,13 +198,13 @@ network: ``` | Parameter | Description | -|---------------------------------------------|-------------| -| [routing\_table](#corenetworkrouting_table) || -| [rpc](#corenetworkrpc) || -| [dht](#corenetworkdht) || -| [tls](#corenetworktls) || -| [application](#corenetworkapplication) || -| [protocol](#corenetworkprotocol) || +| ------------------------------------------- | ----------- | +| [routing\_table](#corenetworkrouting_table) | | +| [rpc](#corenetworkrpc) | | +| [dht](#corenetworkdht) | | +| [tls](#corenetworktls) | | +| [application](#corenetworkapplication) | | +| [protocol](#corenetworkprotocol) | | #### core:network:routing\_table diff --git a/external/hashlink b/external/hashlink index a089b448..ddab4623 160000 --- a/external/hashlink +++ b/external/hashlink @@ -1 +1 @@ -Subproject commit a089b448071ef36633947693b90023c67dc8485f +Subproject commit ddab4623e19a8b3e9e3ee48be999908eebc54301 diff --git a/install_linux_prerequisites.sh b/install_linux_prerequisites.sh new file mode 100755 index 00000000..202ddf50 --- /dev/null +++ b/install_linux_prerequisites.sh @@ -0,0 +1,33 @@ +#!/bin/bash +set -eo pipefail + +if [ $(id -u) -eq 0 ]; then + echo "Don't run this as root" + exit +fi + +# Install APT dependencies +sudo apt update -y +sudo apt install -y openjdk-11-jdk-headless iproute2 curl build-essential cmake libssl-dev openssl file git pkg-config libdbus-1-dev libdbus-glib-1-dev libgirepository1.0-dev libcairo2-dev checkinstall unzip llvm wabt checkinstall + +# Install Rust +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y -c clippy --profile default +source "$HOME/.cargo/env" + +# Install Android SDK +mkdir $HOME/Android; mkdir $HOME/Android/Sdk +curl -o $HOME/Android/cmdline-tools.zip https://dl.google.com/android/repository/commandlinetools-linux-9123335_latest.zip +cd $HOME/Android; unzip $HOME/Android/cmdline-tools.zip +$HOME/Android/cmdline-tools/bin/sdkmanager --sdk_root=$HOME/Android/Sdk build-tools\;33.0.1 ndk\;25.1.8937393 cmake\;3.22.1 platform-tools platforms\;android-33 cmdline-tools\;latest emulator +cd $HOME +rm -rf $HOME/Android/cmdline-tools $HOME/Android/cmdline-tools.zip + +# Add environment variables +cat >> $HOME/.profile < self.comproc.update_shutdown(), + VeilidUpdate::ValueChange(value_change) => { + self.comproc.update_value_change(value_change); + } } Promise::ok(()) @@ -192,45 +195,55 @@ impl ClientApiConnection { let rpc_jh = spawn_local(rpc_system); - // Send the request and get the state object and the registration object - let response = request - .send() - .promise - .await - .map_err(|e| format!("failed to send register request: {}", e))?; - let response = response - .get() - .map_err(|e| format!("failed to get register response: {}", e))?; + let reg_res: Result = (async { + // Send the request and get the state object and the registration object + let response = request + .send() + .promise + .await + .map_err(|e| format!("failed to send register request: {}", e))?; + let response = response + .get() + .map_err(|e| format!("failed to get register response: {}", e))?; - // Get the registration object, which drops our connection when it is dropped - let _registration = response - .get_registration() - .map_err(|e| format!("failed to get registration object: {}", e))?; + // Get the registration object, which drops our connection when it is dropped + let registration = response + .get_registration() + .map_err(|e| format!("failed to get registration object: {}", e))?; - // Get the initial veilid state - let veilid_state = response - .get_state() - .map_err(|e| format!("failed to get initial veilid state: {}", e))?; + // Get the initial veilid state + let veilid_state = response + .get_state() + .map_err(|e| format!("failed to get initial veilid state: {}", e))?; - // Set up our state for the first time - let veilid_state: VeilidState = deserialize_json(veilid_state) - .map_err(|e| format!("failed to get deserialize veilid state: {}", e))?; - self.process_veilid_state(veilid_state).await?; + // Set up our state for the first time + let veilid_state: VeilidState = deserialize_json(veilid_state) + .map_err(|e| format!("failed to get deserialize veilid state: {}", e))?; + self.process_veilid_state(veilid_state).await?; - // Save server settings - let server_settings = response - .get_settings() - .map_err(|e| format!("failed to get initial veilid server settings: {}", e))? - .to_owned(); - self.inner.borrow_mut().server_settings = Some(server_settings.clone()); + // Save server settings + let server_settings = response + .get_settings() + .map_err(|e| format!("failed to get initial veilid server settings: {}", e))? + .to_owned(); + self.inner.borrow_mut().server_settings = Some(server_settings.clone()); - // Don't drop the registration, doing so will remove the client - // object mapping from the server which we need for the update backchannel + // Don't drop the registration, doing so will remove the client + // object mapping from the server which we need for the update backchannel + Ok(registration) + }) + .await; + + let _registration = match reg_res { + Ok(v) => v, + Err(e) => { + rpc_jh.abort().await; + return Err(e); + } + }; // Wait until rpc system completion or disconnect was requested let res = rpc_jh.await; - // #[cfg(feature = "rt-tokio")] - // let res = res.map_err(|e| format!("join error: {}", e))?; res.map_err(|e| format!("client RPC system error: {}", e)) } diff --git a/veilid-cli/src/command_processor.rs b/veilid-cli/src/command_processor.rs index 8027eb92..2e32f584 100644 --- a/veilid-cli/src/command_processor.rs +++ b/veilid-cli/src/command_processor.rs @@ -424,6 +424,10 @@ reply - reply to an AppCall not handled directly by the server self.inner().ui.add_node_event(out); } } + pub fn update_value_change(&mut self, value_change: veilid_core::VeilidValueChange) { + let out = format!("Value change: {:?}", value_change); + self.inner().ui.add_node_event(out); + } pub fn update_log(&mut self, log: veilid_core::VeilidLog) { self.inner().ui.add_node_event(format!( diff --git a/veilid-cli/src/peers_table_view.rs b/veilid-cli/src/peers_table_view.rs index 870eaea5..82593c71 100644 --- a/veilid-cli/src/peers_table_view.rs +++ b/veilid-cli/src/peers_table_view.rs @@ -50,7 +50,11 @@ fn format_bps(bps: ByteCount) -> String { impl TableViewItem for PeerTableData { fn to_column(&self, column: PeerTableColumn) -> String { match column { - PeerTableColumn::NodeId => self.node_id.encode(), + PeerTableColumn::NodeId => self + .node_ids + .best() + .map(|n| n.value.encode()) + .unwrap_or_else(|| "???".to_owned()), PeerTableColumn::Address => format!( "{:?}:{}", self.peer_address.protocol_type(), @@ -74,7 +78,21 @@ impl TableViewItem for PeerTableData { Self: Sized, { match column { - PeerTableColumn::NodeId => self.node_id.cmp(&other.node_id), + PeerTableColumn::NodeId => { + let n1 = self + .node_ids + .best() + .map(|n| n.value.encode()) + .unwrap_or_else(|| "???".to_owned()); + + let n2 = other + .node_ids + .best() + .map(|n| n.value.encode()) + .unwrap_or_else(|| "???".to_owned()); + + n1.cmp(&n2) + } PeerTableColumn::Address => self.to_column(column).cmp(&other.to_column(column)), PeerTableColumn::LatencyAvg => self .peer_stats diff --git a/veilid-cli/src/ui.rs b/veilid-cli/src/ui.rs index 71b67a7f..793e71cf 100644 --- a/veilid-cli/src/ui.rs +++ b/veilid-cli/src/ui.rs @@ -881,13 +881,11 @@ impl UI { } pub fn set_config(&mut self, config: VeilidConfigInner) { let mut inner = self.inner.borrow_mut(); - inner.ui_state.node_id.set( - config - .network - .node_id - .map(|x| x.encode()) - .unwrap_or("".to_owned()), - ); + + inner + .ui_state + .node_id + .set(config.network.routing_table.node_id.to_string()); } pub fn set_connection_state(&mut self, state: ConnectionState) { let mut inner = self.inner.borrow_mut(); diff --git a/veilid-core/Cargo.toml b/veilid-core/Cargo.toml index 78fccd45..b293390d 100644 --- a/veilid-core/Cargo.toml +++ b/veilid-core/Cargo.toml @@ -10,12 +10,14 @@ license = "LGPL-2.0-or-later OR MPL-2.0 OR (MIT AND BSD-3-Clause)" crate-type = ["cdylib", "staticlib", "rlib"] [features] -default = [] -rt-async-std = [ "async-std", "async-std-resolver", "async_executors/async_std", "rtnetlink?/smol_socket", "veilid-tools/rt-async-std" ] -rt-tokio = [ "tokio", "tokio-util", "tokio-stream", "trust-dns-resolver/tokio-runtime", "async_executors/tokio_tp", "async_executors/tokio_io", "async_executors/tokio_timer", "rtnetlink?/tokio_socket", "veilid-tools/rt-tokio" ] +default = [ "enable-crypto-vld0" ] +enable-crypto-vld0 = [] +enable-crypto-none = [] +rt-async-std = ["async-std", "async-std-resolver", "async_executors/async_std", "rtnetlink?/smol_socket", "veilid-tools/rt-async-std"] +rt-tokio = ["tokio", "tokio-util", "tokio-stream", "trust-dns-resolver/tokio-runtime", "async_executors/tokio_tp", "async_executors/tokio_io", "async_executors/tokio_timer", "rtnetlink?/tokio_socket", "veilid-tools/rt-tokio"] -veilid_core_android_tests = [ "dep:paranoid-android" ] -veilid_core_ios_tests = [ "dep:tracing-oslog" ] +veilid_core_android_tests = ["dep:paranoid-android"] +veilid_core_ios_tests = ["dep:tracing-oslog"] tracking = [] [dependencies] @@ -55,8 +57,8 @@ curve25519-dalek = { package = "curve25519-dalek-ng", version = "^4", default_fe # ed25519-dalek needs rand 0.7 until it updates itself rand = "0.7" # curve25519-dalek-ng is stuck on digest 0.9.0 -blake3 = { version = "1.1.0", default_features = false } digest = "0.9.0" +blake3 = { version = "1.1.0" } rtnetlink = { version = "^0", default-features = false, optional = true } async-std-resolver = { version = "^0", optional = true } trust-dns-resolver = { version = "^0", optional = true } @@ -65,6 +67,7 @@ keyvaluedb = { path = "../external/keyvaluedb/keyvaluedb" } rkyv = { git = "https://github.com/rkyv/rkyv.git", rev = "57e2a8d", default_features = false, features = ["std", "alloc", "strict", "size_32", "validation"] } bytecheck = "^0" data-encoding = { version = "^2" } +weak-table = "0.3.2" # Dependencies for native builds only # Linux, Windows, Mac, iOS, Android diff --git a/veilid-core/proto/veilid.capnp b/veilid-core/proto/veilid.capnp index f38acab9..5474d11a 100644 --- a/veilid-core/proto/veilid.capnp +++ b/veilid-core/proto/veilid.capnp @@ -27,13 +27,24 @@ struct Nonce24 @0xb6260db25d8d7dfc { u2 @2 :UInt64; } -using NodeID = Key256; -using RoutePublicKey = Key256; -using ValueID = Key256; -using Nonce = Nonce24; -using Signature = Signature512; -using BlockID = Key256; -using TunnelID = UInt64; +using PublicKey = Key256; # Node id / DHT key / Route id, etc +using Nonce = Nonce24; # One-time encryption nonce +using Signature = Signature512; # Signature block +using TunnelID = UInt64; # Id for tunnels +using CryptoKind = UInt32; # FOURCC code for cryptography type +using ValueSeqNum = UInt32; # sequence numbers for values +using ValueSchema = UInt32; # FOURCC code for schema (0 = freeform, SUB0 = subkey control v0) +using Subkey = UInt32; # subkey index for dht + +struct TypedKey @0xe2d567a9f1e61b29 { + kind @0 :CryptoKind; + key @1 :PublicKey; +} + +struct TypedSignature @0x963170c7298e3884 { + kind @0 :CryptoKind; + signature @1 :Signature; +} # Node Dial Info ################################################################ @@ -123,7 +134,7 @@ struct RouteHopData @0x8ce231f9d1b7adf2 { struct RouteHop @0xf8f672d75cce0c3b { node :union { - nodeId @0 :NodeID; # node id only for established routes + nodeId @0 :PublicKey; # node id key only for established routes (kind is the same as the pr or sr it is part of) peerInfo @1 :PeerInfo; # full peer info for this hop to establish the route } nextHop @2 :RouteHopData; # optional: If this the end of a private route, this field will not exist @@ -131,7 +142,7 @@ struct RouteHop @0xf8f672d75cce0c3b { } struct PrivateRoute @0x8a83fccb0851e776 { - publicKey @0 :RoutePublicKey; # private route public key (unique per private route) + publicKey @0 :TypedKey; # private route public key (unique per private route) hopCount @1 :UInt8; # Count of hops left in the private route (for timeout calculation purposes only) hops :union { firstHop @2 :RouteHop; # first hop of a private route is unencrypted (hopcount > 0) @@ -141,7 +152,7 @@ struct PrivateRoute @0x8a83fccb0851e776 { } struct SafetyRoute @0xf554734d07cb5d59 { - publicKey @0 :RoutePublicKey; # safety route public key (unique per safety route) + publicKey @0 :TypedKey; # safety route public key (unique per safety route) hopCount @1 :UInt8; # Count of hops left in the safety route (for timeout calculation purposes only) hops :union { data @2 :RouteHopData; # safety route has more hops @@ -149,21 +160,6 @@ struct SafetyRoute @0xf554734d07cb5d59 { } } -# Values -############################## - -using ValueSeqNum = UInt32; # sequence numbers for values - -struct ValueKey @0xe64b0992c21a0736 { - publicKey @0 :ValueID; # the location of the value - subkey @1 :Text; # the name of the subkey (or empty for the default subkey) -} - -struct ValueData @0xb4b7416f169f2a3d { - seq @0 :ValueSeqNum; # sequence number of value - data @1 :Data; # value or subvalue contents -} - # Operations ############################## @@ -234,23 +230,23 @@ struct NodeInfo @0xe125d847e3f9f419 { networkClass @0 :NetworkClass; # network class of this node outboundProtocols @1 :ProtocolTypeSet; # protocols that can go outbound addressTypes @2 :AddressTypeSet; # address types supported - minVersion @3 :UInt8; # minimum protocol version for rpc - maxVersion @4 :UInt8; # maximum protocol version for rpc + envelopeSupport @3 :List(UInt8); # supported rpc envelope/receipt versions + cryptoSupport @4 :List(CryptoKind); # cryptography systems supported dialInfoDetailList @5 :List(DialInfoDetail); # inbound dial info details for this node } struct SignedDirectNodeInfo @0xe0e7ea3e893a3dd7 { nodeInfo @0 :NodeInfo; # node info timestamp @1 :UInt64; # when signed node info was generated - signature @2 :Signature; # signature + signatures @2 :List(TypedSignature); # signatures } struct SignedRelayedNodeInfo @0xb39e8428ccd87cbb { nodeInfo @0 :NodeInfo; # node info - relayId @1 :NodeID; # node id for relay + relayIds @1 :List(TypedKey); # node ids for relay relayInfo @2 :SignedDirectNodeInfo; # signed node info for relay timestamp @3 :UInt64; # when signed node info was generated - signature @4 :Signature; # signature + signatures @4 :List(TypedSignature); # signatures } struct SignedNodeInfo @0xd2478ce5f593406a { @@ -261,16 +257,15 @@ struct SignedNodeInfo @0xd2478ce5f593406a { } struct PeerInfo @0xfe2d722d5d3c4bcb { - nodeId @0 :NodeID; # node id for 'closer peer' + nodeIds @0 :List(TypedKey); # node ids for 'closer peer' signedNodeInfo @1 :SignedNodeInfo; # signed node info for 'closer peer' } struct RoutedOperation @0xcbcb8535b839e9dd { - version @0 :UInt8; # crypto version in use for the data - sequencing @1 :Sequencing; # sequencing preference to use to pass the message along - signatures @2 :List(Signature); # signatures from nodes that have handled the private route - nonce @3 :Nonce; # nonce Xmsg - data @4 :Data; # operation encrypted with ENC(Xmsg,DH(PKapr,SKbsr)) + sequencing @0 :Sequencing; # sequencing preference to use to pass the message along + signatures @1 :List(Signature); # signatures from nodes that have handled the private route + nonce @2 :Nonce; # nonce Xmsg + data @3 :Data; # operation encrypted with ENC(Xmsg,DH(PKapr,SKbsr)) } struct OperationStatusQ @0x865d80cea70d884a { @@ -293,7 +288,7 @@ struct OperationReturnReceipt @0xeb0fb5b5a9160eeb { } struct OperationFindNodeQ @0xfdef788fe9623bcd { - nodeId @0 :NodeID; # node id to locate + nodeId @0 :TypedKey; # node id to locate } struct OperationFindNodeA @0xa84cf2fb40c77089 { @@ -301,24 +296,36 @@ struct OperationFindNodeA @0xa84cf2fb40c77089 { } struct OperationRoute @0x96741859ce6ac7dd { - safetyRoute @0 :SafetyRoute; # Where this should go - operation @1 :RoutedOperation; # The operation to be routed + safetyRoute @0 :SafetyRoute; # where this should go + operation @1 :RoutedOperation; # the operation to be routed } struct OperationAppCallQ @0xade67b9f09784507 { - message @0 :Data; # Opaque request to application + message @0 :Data; # opaque request to application } struct OperationAppCallA @0xf7c797ac85f214b8 { - message @0 :Data; # Opaque response from application + message @0 :Data; # opaque response from application } struct OperationAppMessage @0x9baf542d81b411f5 { - message @0 :Data; # Opaque message to application + message @0 :Data; # opaque message to application +} + +struct SubkeyRange { + start @0 :Subkey; # the start of a subkey range + end @1 :Subkey; # the end of a subkey range +} + +struct ValueData @0xb4b7416f169f2a3d { + seq @0 :ValueSeqNum; # sequence number of value + schema @1 :ValueSchema; # fourcc code of schema for value + data @2 :Data; # value or subvalue contents } struct OperationGetValueQ @0xf88a5b6da5eda5d0 { - key @0 :ValueKey; # key for value to get + key @0 :TypedKey; # the location of the value + subkey @1 :Subkey; # the index of the subkey (0 for the default subkey) } struct OperationGetValueA @0xd896bb46f2e0249f { @@ -329,8 +336,9 @@ struct OperationGetValueA @0xd896bb46f2e0249f { } struct OperationSetValueQ @0xbac06191ff8bdbc5 { - key @0 :ValueKey; # key for value to update - value @1 :ValueData; # value or subvalue contents (older or equal seq number gets dropped) + key @0 :TypedKey; # the location of the value + subkey @1 :Subkey; # the index of the subkey (0 for the default subkey) + value @2 :ValueData; # value or subvalue contents (older or equal seq number gets dropped) } struct OperationSetValueA @0x9378d0732dc95be2 { @@ -341,7 +349,10 @@ struct OperationSetValueA @0x9378d0732dc95be2 { } struct OperationWatchValueQ @0xf9a5a6c547b9b228 { - key @0 :ValueKey; # key for value to watch + key @0 :TypedKey; # key for value to watch + subkeys @1 :List(SubkeyRange); # subkey range to watch, if empty, watch everything + expiration @2 :UInt64; # requested timestamp when this watch will expire in usec since epoch (can be return less, 0 for max) + count @3 :UInt32; # requested number of changes to watch for (0 = cancel, 1 = single shot, 2+ = counter, UINT32_MAX = continuous) } struct OperationWatchValueA @0xa726cab7064ba893 { @@ -350,12 +361,14 @@ struct OperationWatchValueA @0xa726cab7064ba893 { } struct OperationValueChanged @0xd1c59ebdd8cc1bf6 { - key @0 :ValueKey; # key for value that changed - value @1 :ValueData; # value or subvalue contents with sequence number + key @0 :TypedKey; # key for value that changed + subkeys @1 :List(SubkeyRange); # subkey range that changed (up to 512 ranges at a time) + count @2 :UInt32; # remaining changes left (0 means watch has expired) + value @3 :ValueData; # first value that changed (the rest can be gotten with getvalue) } struct OperationSupplyBlockQ @0xadbf4c542d749971 { - blockId @0 :BlockID; # hash of the block we can supply + blockId @0 :TypedKey; # hash of the block we can supply } struct OperationSupplyBlockA @0xf003822e83b5c0d7 { @@ -366,7 +379,7 @@ struct OperationSupplyBlockA @0xf003822e83b5c0d7 { } struct OperationFindBlockQ @0xaf4353ff004c7156 { - blockId @0 :BlockID; # hash of the block to locate + blockId @0 :TypedKey; # hash of the block to locate } struct OperationFindBlockA @0xc51455bc4915465d { @@ -516,7 +529,7 @@ struct Answer @0xacacb8b6988c1058 { struct Operation @0xbf2811c435403c3b { opId @0 :UInt64; # Random RPC ID. Must be random to foil reply forgery attacks. - senderNodeInfo @1 :SignedNodeInfo; # (optional) SignedNodeInfo for the sender to be cached by the receiver. + senderPeerInfo @1 :PeerInfo; # (optional) PeerInfo for the sender to be cached by the receiver. targetNodeInfoTs @2 :UInt64; # Timestamp the sender believes the target's node info to be at or zero if not sent kind :union { question @3 :Question; diff --git a/veilid-core/src/core_context.rs b/veilid-core/src/core_context.rs index c8d7a434..49b115a7 100644 --- a/veilid-core/src/core_context.rs +++ b/veilid-core/src/core_context.rs @@ -67,12 +67,6 @@ impl ServicesContext { } self.protected_store = Some(protected_store.clone()); - // Init node id from config now that protected store is set up - if let Err(e) = self.config.init_node_id(protected_store.clone()).await { - self.shutdown().await; - return Err(e).wrap_err("init node id failed"); - } - // Set up tablestore trace!("init table store"); let table_store = TableStore::new(self.config.clone()); @@ -84,7 +78,11 @@ impl ServicesContext { // Set up crypto trace!("init crypto"); - let crypto = Crypto::new(self.config.clone(), table_store.clone()); + let crypto = Crypto::new( + self.config.clone(), + table_store.clone(), + protected_store.clone(), + ); if let Err(e) = crypto.init().await { self.shutdown().await; return Err(e); diff --git a/veilid-core/src/crypto/byte_array_types.rs b/veilid-core/src/crypto/byte_array_types.rs new file mode 100644 index 00000000..d8c7bc77 --- /dev/null +++ b/veilid-core/src/crypto/byte_array_types.rs @@ -0,0 +1,266 @@ +use super::*; + +use core::cmp::{Eq, Ord, PartialEq, PartialOrd}; +use core::convert::{TryFrom, TryInto}; +use core::fmt; +use core::hash::Hash; + +use data_encoding::BASE64URL_NOPAD; + +use rkyv::{Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize}; + +////////////////////////////////////////////////////////////////////// + +/// Length of a public key in bytes +#[allow(dead_code)] +pub const PUBLIC_KEY_LENGTH: usize = 32; +/// Length of a public key in bytes after encoding to base64url +#[allow(dead_code)] +pub const PUBLIC_KEY_LENGTH_ENCODED: usize = 43; +/// Length of a secret key in bytes +#[allow(dead_code)] +pub const SECRET_KEY_LENGTH: usize = 32; +/// Length of a secret key in bytes after encoding to base64url +#[allow(dead_code)] +pub const SECRET_KEY_LENGTH_ENCODED: usize = 43; +/// Length of a signature in bytes +#[allow(dead_code)] +pub const SIGNATURE_LENGTH: usize = 64; +/// Length of a signature in bytes after encoding to base64url +#[allow(dead_code)] +pub const SIGNATURE_LENGTH_ENCODED: usize = 86; +/// Length of a nonce in bytes +#[allow(dead_code)] +pub const NONCE_LENGTH: usize = 24; +/// Length of a nonce in bytes after encoding to base64url +#[allow(dead_code)] +pub const NONCE_LENGTH_ENCODED: usize = 32; +/// Length of a shared secret in bytes +#[allow(dead_code)] +pub const SHARED_SECRET_LENGTH: usize = 32; +/// Length of a shared secret in bytes after encoding to base64url +#[allow(dead_code)] +pub const SHARED_SECRET_LENGTH_ENCODED: usize = 43; +/// Length of a route id in bytes +#[allow(dead_code)] +pub const ROUTE_ID_LENGTH: usize = 32; +/// Length of a route id in bytes afer encoding to base64url +#[allow(dead_code)] +pub const ROUTE_ID_LENGTH_ENCODED: usize = 43; + +////////////////////////////////////////////////////////////////////// + +pub trait Encodable +where + Self: Sized, +{ + fn encode(&self) -> String; + fn encoded_len() -> usize; + fn try_decode>(input: S) -> Result { + let b = input.as_ref().as_bytes(); + Self::try_decode_bytes(b) + } + fn try_decode_bytes(b: &[u8]) -> Result; +} + +////////////////////////////////////////////////////////////////////// + +macro_rules! byte_array_type { + ($name:ident, $size:expr, $encoded_size:expr) => { + #[derive( + Clone, + Copy, + Hash, + Eq, + PartialEq, + PartialOrd, + Ord, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, + )] + #[archive_attr(repr(C), derive(CheckBytes, Hash, Eq, PartialEq, PartialOrd, Ord))] + pub struct $name { + pub bytes: [u8; $size], + } + + impl Default for $name { + fn default() -> Self { + Self { + bytes: [0u8; $size], + } + } + } + + impl serde::Serialize for $name { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + let s = self.encode(); + serde::Serialize::serialize(&s, serializer) + } + } + + impl<'de> serde::Deserialize<'de> for $name { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + let s = ::deserialize(deserializer)?; + if s == "" { + return Ok($name::default()); + } + $name::try_decode(s.as_str()).map_err(serde::de::Error::custom) + } + } + + impl $name { + pub fn new(bytes: [u8; $size]) -> Self { + Self { bytes } + } + + pub fn try_from_vec(v: Vec) -> Result { + let vl = v.len(); + Ok(Self { + bytes: v.try_into().map_err(|_| { + VeilidAPIError::generic(format!( + "Expected a Vec of length {} but it was {}", + $size, vl + )) + })?, + }) + } + + pub fn bit(&self, index: usize) -> bool { + assert!(index < ($size * 8)); + let bi = index / 8; + let ti = 7 - (index % 8); + ((self.bytes[bi] >> ti) & 1) != 0 + } + + pub fn first_nonzero_bit(&self) -> Option { + for i in 0..$size { + let b = self.bytes[i]; + if b != 0 { + for n in 0..8 { + if ((b >> (7 - n)) & 1u8) != 0u8 { + return Some((i * 8) + n); + } + } + panic!("wtf") + } + } + None + } + + pub fn nibble(&self, index: usize) -> u8 { + assert!(index < ($size * 2)); + let bi = index / 2; + if index & 1 == 0 { + (self.bytes[bi] >> 4) & 0xFu8 + } else { + self.bytes[bi] & 0xFu8 + } + } + + pub fn first_nonzero_nibble(&self) -> Option<(usize, u8)> { + for i in 0..($size * 2) { + let n = self.nibble(i); + if n != 0 { + return Some((i, n)); + } + } + None + } + } + + impl Encodable for $name { + fn encode(&self) -> String { + BASE64URL_NOPAD.encode(&self.bytes) + } + fn encoded_len() -> usize { + $encoded_size + } + fn try_decode_bytes(b: &[u8]) -> Result { + let mut bytes = [0u8; $size]; + let res = BASE64URL_NOPAD.decode_len(b.len()); + match res { + Ok(v) => { + if v != $size { + apibail_generic!("Incorrect length in decode"); + } + } + Err(_) => { + apibail_generic!("Failed to decode"); + } + } + + let res = BASE64URL_NOPAD.decode_mut(b, &mut bytes); + match res { + Ok(_) => Ok(Self::new(bytes)), + Err(_) => apibail_generic!("Failed to decode"), + } + } + } + impl fmt::Display for $name { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.encode()) + } + } + + impl fmt::Debug for $name { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, concat!(stringify!($name), "("))?; + write!(f, "{}", self.encode())?; + write!(f, ")") + } + } + + impl From<&$name> for String { + fn from(value: &$name) -> Self { + value.encode() + } + } + + impl FromStr for $name { + type Err = VeilidAPIError; + + fn from_str(s: &str) -> Result { + $name::try_from(s) + } + } + + impl TryFrom for $name { + type Error = VeilidAPIError; + fn try_from(value: String) -> Result { + $name::try_from(value.as_str()) + } + } + + impl TryFrom<&str> for $name { + type Error = VeilidAPIError; + fn try_from(value: &str) -> Result { + Self::try_decode(value) + } + } + }; +} + +///////////////////////////////////////// + +byte_array_type!(PublicKey, PUBLIC_KEY_LENGTH, PUBLIC_KEY_LENGTH_ENCODED); +byte_array_type!(SecretKey, SECRET_KEY_LENGTH, SECRET_KEY_LENGTH_ENCODED); +byte_array_type!(Signature, SIGNATURE_LENGTH, SIGNATURE_LENGTH_ENCODED); +byte_array_type!( + PublicKeyDistance, + PUBLIC_KEY_LENGTH, + PUBLIC_KEY_LENGTH_ENCODED +); +byte_array_type!(Nonce, NONCE_LENGTH, NONCE_LENGTH_ENCODED); +byte_array_type!( + SharedSecret, + SHARED_SECRET_LENGTH, + SHARED_SECRET_LENGTH_ENCODED +); +byte_array_type!(RouteId, ROUTE_ID_LENGTH, ROUTE_ID_LENGTH_ENCODED); diff --git a/veilid-core/src/crypto/crypto_system.rs b/veilid-core/src/crypto/crypto_system.rs new file mode 100644 index 00000000..5de8fb57 --- /dev/null +++ b/veilid-core/src/crypto/crypto_system.rs @@ -0,0 +1,113 @@ +use super::*; + +pub trait CryptoSystem { + // Accessors + fn kind(&self) -> CryptoKind; + fn crypto(&self) -> Crypto; + + // Cached Operations + fn cached_dh( + &self, + key: &PublicKey, + secret: &SecretKey, + ) -> Result; + + // Generation + fn random_nonce(&self) -> Nonce; + fn random_shared_secret(&self) -> SharedSecret; + fn compute_dh( + &self, + key: &PublicKey, + secret: &SecretKey, + ) -> Result; + fn generate_keypair(&self) -> KeyPair; + fn generate_hash(&self, data: &[u8]) -> PublicKey; + fn generate_hash_reader( + &self, + reader: &mut dyn std::io::Read, + ) -> Result; + + // Validation + fn validate_keypair(&self, dht_key: &PublicKey, dht_key_secret: &SecretKey) -> bool; + fn validate_hash(&self, data: &[u8], dht_key: &PublicKey) -> bool; + fn validate_hash_reader( + &self, + reader: &mut dyn std::io::Read, + key: &PublicKey, + ) -> Result; + + // Distance Metric + fn distance(&self, key1: &PublicKey, key2: &PublicKey) -> PublicKeyDistance; + + // Authentication + fn sign( + &self, + key: &PublicKey, + secret: &SecretKey, + data: &[u8], + ) -> Result; + fn verify( + &self, + key: &PublicKey, + data: &[u8], + signature: &Signature, + ) -> Result<(), VeilidAPIError>; + + // AEAD Encrypt/Decrypt + fn aead_overhead(&self) -> usize; + fn decrypt_in_place_aead( + &self, + body: &mut Vec, + nonce: &Nonce, + shared_secret: &SharedSecret, + associated_data: Option<&[u8]>, + ) -> Result<(), VeilidAPIError>; + fn decrypt_aead( + &self, + body: &[u8], + nonce: &Nonce, + shared_secret: &SharedSecret, + associated_data: Option<&[u8]>, + ) -> Result, VeilidAPIError>; + fn encrypt_in_place_aead( + &self, + body: &mut Vec, + nonce: &Nonce, + shared_secret: &SharedSecret, + associated_data: Option<&[u8]>, + ) -> Result<(), VeilidAPIError>; + fn encrypt_aead( + &self, + body: &[u8], + nonce: &Nonce, + shared_secret: &SharedSecret, + associated_data: Option<&[u8]>, + ) -> Result, VeilidAPIError>; + + // NoAuth Encrypt/Decrypt + fn crypt_in_place_no_auth( + &self, + body: &mut Vec, + nonce: &Nonce, + shared_secret: &SharedSecret, + ); + fn crypt_b2b_no_auth( + &self, + in_buf: &[u8], + out_buf: &mut [u8], + nonce: &Nonce, + shared_secret: &SharedSecret, + ); + fn crypt_no_auth_aligned_8( + &self, + body: &[u8], + nonce: &Nonce, + shared_secret: &SharedSecret, + ) -> Vec; + fn crypt_no_auth_unaligned( + &self, + body: &[u8], + nonce: &Nonce, + shared_secret: &SharedSecret, + ) -> Vec; +} diff --git a/veilid-core/src/crypto/dh_cache.rs b/veilid-core/src/crypto/dh_cache.rs new file mode 100644 index 00000000..17c5d4ae --- /dev/null +++ b/veilid-core/src/crypto/dh_cache.rs @@ -0,0 +1,45 @@ +use super::*; +use crate::*; + +// Diffie-Hellman key exchange cache +#[derive(Serialize, Deserialize, PartialEq, Eq, Hash)] +pub struct DHCacheKey { + pub key: PublicKey, + pub secret: SecretKey, +} + +#[derive(Serialize, Deserialize)] +pub struct DHCacheValue { + pub shared_secret: SharedSecret, +} + +pub type DHCache = LruCache; +pub const DH_CACHE_SIZE: usize = 4096; + +pub fn cache_to_bytes(cache: &DHCache) -> Vec { + let cnt: usize = cache.len(); + let mut out: Vec = Vec::with_capacity(cnt * (32 + 32 + 32)); + for e in cache.iter() { + out.extend(&e.0.key.bytes); + out.extend(&e.0.secret.bytes); + out.extend(&e.1.shared_secret.bytes); + } + let mut rev: Vec = Vec::with_capacity(out.len()); + for d in out.chunks(32 + 32 + 32).rev() { + rev.extend(d); + } + rev +} + +pub fn bytes_to_cache(bytes: &[u8], cache: &mut DHCache) { + for d in bytes.chunks(32 + 32 + 32) { + let k = DHCacheKey { + key: PublicKey::new(d[0..32].try_into().expect("asdf")), + secret: SecretKey::new(d[32..64].try_into().expect("asdf")), + }; + let v = DHCacheValue { + shared_secret: SharedSecret::new(d[64..96].try_into().expect("asdf")), + }; + cache.insert(k, v, |_k, _v| {}); + } +} diff --git a/veilid-core/src/crypto/envelope.rs b/veilid-core/src/crypto/envelope.rs index 7d0b6734..fe22f2fc 100644 --- a/veilid-core/src/crypto/envelope.rs +++ b/veilid-core/src/crypto/envelope.rs @@ -1,69 +1,64 @@ #![allow(dead_code)] #![allow(clippy::absurd_extreme_comparisons)] use super::*; -use crate::routing_table::VersionRange; use crate::*; use core::convert::TryInto; -// #[repr(C, packed)] -// struct EnvelopeHeader { -// // Size is at least 8 bytes. Depending on the version specified, the size may vary and should be case to the appropriate struct -// magic: [u8; 4], // 0x00: 0x56 0x4C 0x49 0x44 ("VLID") -// version: u8, // 0x04: 0 = EnvelopeV0 -// min_version: u8, // 0x05: 0 = EnvelopeV0 -// max_version: u8, // 0x06: 0 = EnvelopeV0 -// reserved: u8, // 0x07: Reserved for future use -// } - -// #[repr(C, packed)] -// struct EnvelopeV0 { -// // Size is 106 bytes. -// magic: [u8; 4], // 0x00: 0x56 0x4C 0x49 0x44 ("VLID") -// version: u8, // 0x04: 0 = EnvelopeV0 -// min_version: u8, // 0x05: 0 = EnvelopeV0 -// max_version: u8, // 0x06: 0 = EnvelopeV0 -// reserved: u8, // 0x07: Reserved for future use -// size: u16, // 0x08: Total size of the envelope including the encrypted operations message. Maximum size is 65,507 bytes, which is the data size limit for a single UDP message on IPv4. -// timestamp: u64, // 0x0A: Duration since UNIX_EPOCH in microseconds when this message is sent. Messages older than 10 seconds are dropped. -// nonce: [u8; 24], // 0x12: Random nonce for replay protection and for x25519 -// sender_id: [u8; 32], // 0x2A: Node ID of the message source, which is the Ed25519 public key of the sender (must be verified with find_node if this is a new node_id/address combination) -// recipient_id: [u8; 32], // 0x4A: Node ID of the intended recipient, which is the Ed25519 public key of the recipient (must be the receiving node, or a relay lease holder) -// // 0x6A: message is appended (operations) -// // encrypted by XChaCha20Poly1305(nonce,x25519(recipient_id, sender_secret_key)) -// signature: [u8; 64], // 0x?? (end-0x40): Ed25519 signature of the entire envelope including header is appended to the packet -// // entire header needs to be included in message digest, relays are not allowed to modify the envelope without invalidating the signature. -// } +/// Envelopes are versioned +/// +/// These are the formats for the on-the-wire serialization performed by this module +/// +/// #[repr(C, packed)] +/// struct EnvelopeHeader { +/// // Size is at least 4 bytes. Depending on the version specified, the size may vary and should be case to the appropriate struct +/// magic: [u8; 3], // 0x00: 0x56 0x4C 0x44 ("VLD") +/// version: u8, // 0x03: 0 = EnvelopeV0 +/// } +/// +/// #[repr(C, packed)] +/// struct EnvelopeV0 { +/// // Size is 106 bytes without signature and 170 with signature +/// magic: [u8; 3], // 0x00: 0x56 0x4C 0x44 ("VLD") +/// version: u8, // 0x03: 0 = EnvelopeV0 +/// crypto_kind: [u8; 4], // 0x04: CryptoSystemVersion FOURCC code (CryptoKind) +/// size: u16, // 0x08: Total size of the envelope including the encrypted operations message. Maximum size is 65,507 bytes, which is the data size limit for a single UDP message on IPv4. +/// timestamp: u64, // 0x0A: Duration since UNIX_EPOCH in microseconds when this message is sent. Messages older than 10 seconds are dropped. +/// nonce: [u8; 24], // 0x12: Random nonce for replay protection and for dh +/// sender_id: [u8; 32], // 0x2A: Node ID of the message source, which is the public key of the sender (must be verified with find_node if this is a new node_id/address combination) +/// recipient_id: [u8; 32], // 0x4A: Node ID of the intended recipient, which is the public key of the recipient (must be the receiving node, or a relay lease holder) +/// // 0x6A: message is appended (operations) +/// signature: [u8; 64], // 0x?? (end-0x40): Signature of the entire envelope including header is appended to the packet +/// // entire header needs to be included in message digest, relays are not allowed to modify the envelope without invalidating the signature. +/// } pub const MAX_ENVELOPE_SIZE: usize = 65507; pub const MIN_ENVELOPE_SIZE: usize = 0x6A + 0x40; // Header + Signature -pub const ENVELOPE_MAGIC: &[u8; 4] = b"VLID"; -pub type EnvelopeNonce = [u8; 24]; +pub const ENVELOPE_MAGIC: &[u8; 3] = b"VLD"; #[derive(Debug, Clone, PartialEq, Eq, Default)] pub struct Envelope { - version: u8, - min_version: u8, - max_version: u8, + version: EnvelopeVersion, + crypto_kind: CryptoKind, timestamp: Timestamp, - nonce: EnvelopeNonce, - sender_id: DHTKey, - recipient_id: DHTKey, + nonce: Nonce, + sender_id: PublicKey, + recipient_id: PublicKey, } impl Envelope { pub fn new( - version: u8, + version: EnvelopeVersion, + crypto_kind: CryptoKind, timestamp: Timestamp, - nonce: EnvelopeNonce, - sender_id: DHTKey, - recipient_id: DHTKey, + nonce: Nonce, + sender_id: PublicKey, + recipient_id: PublicKey, ) -> Self { - assert!(version >= MIN_CRYPTO_VERSION); - assert!(version <= MAX_CRYPTO_VERSION); + assert!(VALID_ENVELOPE_VERSIONS.contains(&version)); + assert!(VALID_CRYPTO_KINDS.contains(&crypto_kind)); Self { version, - min_version: MIN_CRYPTO_VERSION, - max_version: MAX_CRYPTO_VERSION, + crypto_kind, timestamp, nonce, sender_id, @@ -71,7 +66,7 @@ impl Envelope { } } - pub fn from_signed_data(data: &[u8]) -> Result { + pub fn from_signed_data(crypto: Crypto, data: &[u8]) -> Result { // Ensure we are at least the length of the envelope // Silent drop here, as we use zero length packets as part of the protocol for hole punching if data.len() < MIN_ENVELOPE_SIZE { @@ -79,33 +74,28 @@ impl Envelope { } // Verify magic number - let magic: [u8; 4] = data[0x00..0x04] + let magic: [u8; 3] = data[0x00..0x03] .try_into() .map_err(VeilidAPIError::internal)?; if magic != *ENVELOPE_MAGIC { apibail_generic!("bad magic number"); } - // Check version - let version = data[0x04]; - if version > MAX_CRYPTO_VERSION || version < MIN_CRYPTO_VERSION { - apibail_parse_error!("unsupported cryptography version", version); + // Check envelope version + let version = data[0x03]; + if !VALID_ENVELOPE_VERSIONS.contains(&version) { + apibail_parse_error!("unsupported envelope version", version); } - // Get min version - let min_version = data[0x05]; - if min_version > version { - apibail_parse_error!("version too low", version); - } - - // Get max version - let max_version = data[0x06]; - if version > max_version { - apibail_parse_error!("version too high", version); - } - if min_version > max_version { - apibail_generic!("version information invalid"); - } + // Check crypto kind + let crypto_kind = FourCC( + data[0x04..0x08] + .try_into() + .map_err(VeilidAPIError::internal)?, + ); + let Some(vcrypto) = crypto.get(crypto_kind) else { + apibail_parse_error!("unsupported crypto kind", crypto_kind); + }; // Get size and ensure it matches the size of the envelope and is less than the maximum message size let size: u16 = u16::from_le_bytes( @@ -136,17 +126,18 @@ impl Envelope { .into(); // Get nonce and sender node id - let nonce: EnvelopeNonce = data[0x12..0x2A] + let nonce_slice: [u8; NONCE_LENGTH] = data[0x12..0x2A] .try_into() .map_err(VeilidAPIError::internal)?; - let sender_id_slice: [u8; 32] = data[0x2A..0x4A] + let sender_id_slice: [u8; PUBLIC_KEY_LENGTH] = data[0x2A..0x4A] .try_into() .map_err(VeilidAPIError::internal)?; - let recipient_id_slice: [u8; 32] = data[0x4A..0x6A] + let recipient_id_slice: [u8; PUBLIC_KEY_LENGTH] = data[0x4A..0x6A] .try_into() .map_err(VeilidAPIError::internal)?; - let sender_id = DHTKey::new(sender_id_slice); - let recipient_id = DHTKey::new(recipient_id_slice); + let nonce: Nonce = Nonce::new(nonce_slice); + let sender_id = PublicKey::new(sender_id_slice); + let recipient_id = PublicKey::new(recipient_id_slice); // Ensure sender_id and recipient_id are not the same if sender_id == recipient_id { @@ -157,21 +148,21 @@ impl Envelope { } // Get signature - let signature = DHTSignature::new( + let signature = Signature::new( data[(data.len() - 64)..] .try_into() .map_err(VeilidAPIError::internal)?, ); // Validate signature - verify(&sender_id, &data[0..(data.len() - 64)], &signature) + vcrypto + .verify(&sender_id, &data[0..(data.len() - 64)], &signature) .map_err(VeilidAPIError::internal)?; // Return envelope Ok(Self { version, - min_version, - max_version, + crypto_kind, timestamp, nonce, sender_id, @@ -183,13 +174,17 @@ impl Envelope { &self, crypto: Crypto, data: &[u8], - node_id_secret: &DHTKeySecret, + node_id_secret: &SecretKey, ) -> Result, VeilidAPIError> { // Get DH secret - let dh_secret = crypto.cached_dh(&self.sender_id, node_id_secret)?; + let vcrypto = crypto + .get(self.crypto_kind) + .expect("need to ensure only valid crypto kinds here"); + let dh_secret = vcrypto.cached_dh(&self.sender_id, node_id_secret)?; // Decrypt message without authentication - let body = Crypto::crypt_no_auth(&data[0x6A..data.len() - 64], &self.nonce, &dh_secret); + let body = + vcrypto.crypt_no_auth_aligned_8(&data[0x6A..data.len() - 64], &self.nonce, &dh_secret); Ok(body) } @@ -198,39 +193,41 @@ impl Envelope { &self, crypto: Crypto, body: &[u8], - node_id_secret: &DHTKeySecret, + node_id_secret: &SecretKey, ) -> Result, VeilidAPIError> { // Ensure body isn't too long let envelope_size: usize = body.len() + MIN_ENVELOPE_SIZE; if envelope_size > MAX_ENVELOPE_SIZE { apibail_parse_error!("envelope size is too large", envelope_size); } + // Generate dh secret + let vcrypto = crypto + .get(self.crypto_kind) + .expect("need to ensure only valid crypto kinds here"); + let dh_secret = vcrypto.cached_dh(&self.recipient_id, node_id_secret)?; + + // Write envelope body let mut data = vec![0u8; envelope_size]; // Write magic - data[0x00..0x04].copy_from_slice(ENVELOPE_MAGIC); + data[0x00..0x03].copy_from_slice(ENVELOPE_MAGIC); // Write version - data[0x04] = self.version; - // Write min version - data[0x05] = self.min_version; - // Write max version - data[0x06] = self.max_version; + data[0x03] = self.version; + // Write crypto kind + data[0x04..0x08].copy_from_slice(&self.crypto_kind.0); // Write size data[0x08..0x0A].copy_from_slice(&(envelope_size as u16).to_le_bytes()); // Write timestamp data[0x0A..0x12].copy_from_slice(&self.timestamp.as_u64().to_le_bytes()); // Write nonce - data[0x12..0x2A].copy_from_slice(&self.nonce); + data[0x12..0x2A].copy_from_slice(&self.nonce.bytes); // Write sender node id data[0x2A..0x4A].copy_from_slice(&self.sender_id.bytes); // Write recipient node id data[0x4A..0x6A].copy_from_slice(&self.recipient_id.bytes); - // Generate dh secret - let dh_secret = crypto.cached_dh(&self.recipient_id, node_id_secret)?; - // Encrypt and authenticate message - let encrypted_body = Crypto::crypt_no_auth(body, &self.nonce, &dh_secret); + let encrypted_body = vcrypto.crypt_no_auth_unaligned(body, &self.nonce, &dh_secret); // Write body if !encrypted_body.is_empty() { @@ -238,7 +235,7 @@ impl Envelope { } // Sign the envelope - let signature = sign( + let signature = vcrypto.sign( &self.sender_id, node_id_secret, &data[0..(envelope_size - 64)], @@ -254,25 +251,23 @@ impl Envelope { self.version } - pub fn get_min_max_version(&self) -> VersionRange { - VersionRange { - min: self.min_version, - max: self.max_version, - } + pub fn get_crypto_kind(&self) -> CryptoKind { + self.crypto_kind } pub fn get_timestamp(&self) -> Timestamp { self.timestamp } - pub fn get_nonce(&self) -> EnvelopeNonce { + pub fn get_nonce(&self) -> Nonce { self.nonce } - pub fn get_sender_id(&self) -> DHTKey { + pub fn get_sender_id(&self) -> PublicKey { self.sender_id } - pub fn get_recipient_id(&self) -> DHTKey { + + pub fn get_recipient_id(&self) -> PublicKey { self.recipient_id } } diff --git a/veilid-core/src/crypto/key.rs b/veilid-core/src/crypto/key.rs deleted file mode 100644 index 218ffcb8..00000000 --- a/veilid-core/src/crypto/key.rs +++ /dev/null @@ -1,389 +0,0 @@ -use crate::*; - -use core::cmp::{Eq, Ord, PartialEq, PartialOrd}; -use core::convert::{TryFrom, TryInto}; -use core::fmt; -use core::hash::Hash; - -use data_encoding::BASE64URL_NOPAD; -use digest::generic_array::typenum::U64; -use digest::{Digest, Output}; -use ed25519_dalek::{Keypair, PublicKey, Signature}; -use generic_array::GenericArray; -use rkyv::{Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize}; - -////////////////////////////////////////////////////////////////////// - -/// Length of a DHT key in bytes -#[allow(dead_code)] -pub const DHT_KEY_LENGTH: usize = 32; -/// Length of a DHT key in bytes after encoding to base64url -#[allow(dead_code)] -pub const DHT_KEY_LENGTH_ENCODED: usize = 43; -/// Length of a DHT secret in bytes -#[allow(dead_code)] -pub const DHT_KEY_SECRET_LENGTH: usize = 32; -/// Length of a DHT secret in bytes after encoding to base64url -#[allow(dead_code)] -pub const DHT_KEY_SECRET_LENGTH_ENCODED: usize = 43; -/// Length of a DHT signature in bytes -#[allow(dead_code)] -/// Length of a DHT signature in bytes after encoding to base64url -pub const DHT_SIGNATURE_LENGTH: usize = 64; -#[allow(dead_code)] -pub const DHT_SIGNATURE_LENGTH_ENCODED: usize = 86; - -////////////////////////////////////////////////////////////////////// - -macro_rules! byte_array_type { - ($name:ident, $size:expr) => { - #[derive( - Clone, - Copy, - Hash, - Eq, - PartialEq, - PartialOrd, - Ord, - RkyvArchive, - RkyvSerialize, - RkyvDeserialize, - )] - #[archive_attr(repr(C), derive(CheckBytes, Hash, Eq, PartialEq, PartialOrd, Ord))] - pub struct $name { - pub bytes: [u8; $size], - } - - impl Default for $name { - fn default() -> Self { - Self { - bytes: [0u8; $size], - } - } - } - - impl serde::Serialize for $name { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - let s = self.encode(); - serde::Serialize::serialize(&s, serializer) - } - } - - impl<'de> serde::Deserialize<'de> for $name { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - let s = ::deserialize(deserializer)?; - if s == "" { - return Ok($name::default()); - } - $name::try_decode(s.as_str()).map_err(serde::de::Error::custom) - } - } - - impl $name { - pub fn new(bytes: [u8; $size]) -> Self { - Self { bytes } - } - - pub fn try_from_vec(v: Vec) -> Result { - let vl = v.len(); - Ok(Self { - bytes: v.try_into().map_err(|_| { - VeilidAPIError::generic(format!( - "Expected a Vec of length {} but it was {}", - $size, vl - )) - })?, - }) - } - - pub fn bit(&self, index: usize) -> bool { - assert!(index < ($size * 8)); - let bi = index / 8; - let ti = 7 - (index % 8); - ((self.bytes[bi] >> ti) & 1) != 0 - } - - pub fn first_nonzero_bit(&self) -> Option { - for i in 0..$size { - let b = self.bytes[i]; - if b != 0 { - for n in 0..8 { - if ((b >> (7 - n)) & 1u8) != 0u8 { - return Some((i * 8) + n); - } - } - panic!("wtf") - } - } - None - } - - pub fn nibble(&self, index: usize) -> u8 { - assert!(index < ($size * 2)); - let bi = index / 2; - if index & 1 == 0 { - (self.bytes[bi] >> 4) & 0xFu8 - } else { - self.bytes[bi] & 0xFu8 - } - } - - pub fn first_nonzero_nibble(&self) -> Option<(usize, u8)> { - for i in 0..($size * 2) { - let n = self.nibble(i); - if n != 0 { - return Some((i, n)); - } - } - None - } - - pub fn encode(&self) -> String { - BASE64URL_NOPAD.encode(&self.bytes) - } - - pub fn try_decode>(input: S) -> Result { - let mut bytes = [0u8; $size]; - - let res = BASE64URL_NOPAD.decode_len(input.as_ref().len()); - match res { - Ok(v) => { - if v != $size { - apibail_generic!("Incorrect length in decode"); - } - } - Err(_) => { - apibail_generic!("Failed to decode"); - } - } - - let res = BASE64URL_NOPAD.decode_mut(input.as_ref().as_bytes(), &mut bytes); - match res { - Ok(_) => Ok(Self::new(bytes)), - Err(_) => apibail_generic!("Failed to decode"), - } - } - } - - impl fmt::Display for $name { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - //write!(f, "{}", String::from(self)) - write!(f, "{}", self.encode()) - } - } - - impl fmt::Debug for $name { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, concat!(stringify!($name), "("))?; - write!(f, "{}", self.encode())?; - write!(f, ")") - } - } - - impl From<&$name> for String { - fn from(value: &$name) -> Self { - // let mut s = String::new(); - // for n in 0..($size / 8) { - // let b: [u8; 8] = value.bytes[n * 8..(n + 1) * 8].try_into().unwrap(); - // s.push_str(hex::encode(b).as_str()); - // } - // s - value.encode() - } - } - - impl TryFrom for $name { - type Error = VeilidAPIError; - fn try_from(value: String) -> Result { - $name::try_from(value.as_str()) - } - } - - impl TryFrom<&str> for $name { - type Error = VeilidAPIError; - fn try_from(value: &str) -> Result { - // let mut out = $name::default(); - // if value == "" { - // return Ok(out); - // } - // if value.len() != ($size * 2) { - // apibail_generic!(concat!(stringify!($name), " is incorrect length")); - // } - // match hex::decode_to_slice(value, &mut out.bytes) { - // Ok(_) => Ok(out), - // Err(err) => Err(VeilidAPIError::generic(err)), - // } - Self::try_decode(value) - } - } - }; -} - -byte_array_type!(DHTKey, DHT_KEY_LENGTH); -byte_array_type!(DHTKeySecret, DHT_KEY_SECRET_LENGTH); -byte_array_type!(DHTSignature, DHT_SIGNATURE_LENGTH); -byte_array_type!(DHTKeyDistance, DHT_KEY_LENGTH); - -///////////////////////////////////////// - -struct Blake3Digest512 { - dig: blake3::Hasher, -} - -impl Digest for Blake3Digest512 { - type OutputSize = U64; - - fn new() -> Self { - Self { - dig: blake3::Hasher::new(), - } - } - - fn update(&mut self, data: impl AsRef<[u8]>) { - self.dig.update(data.as_ref()); - } - - fn chain(mut self, data: impl AsRef<[u8]>) -> Self - where - Self: Sized, - { - self.update(data); - self - } - - fn finalize(self) -> Output { - let mut b = [0u8; 64]; - self.dig.finalize_xof().fill(&mut b); - let mut out = GenericArray::::default(); - for n in 0..64 { - out[n] = b[n]; - } - out - } - - fn finalize_reset(&mut self) -> Output { - let mut b = [0u8; 64]; - self.dig.finalize_xof().fill(&mut b); - let mut out = GenericArray::::default(); - for n in 0..64 { - out[n] = b[n]; - } - self.reset(); - out - } - - fn reset(&mut self) { - self.dig.reset(); - } - - fn output_size() -> usize { - 64 - } - - fn digest(data: &[u8]) -> Output { - let mut dig = blake3::Hasher::new(); - dig.update(data); - let mut b = [0u8; 64]; - dig.finalize_xof().fill(&mut b); - let mut out = GenericArray::::default(); - for n in 0..64 { - out[n] = b[n]; - } - out - } -} - -///////////////////////////////////////// - -pub fn generate_secret() -> (DHTKey, DHTKeySecret) { - let mut csprng = VeilidRng {}; - let keypair = Keypair::generate(&mut csprng); - let dht_key = DHTKey::new(keypair.public.to_bytes()); - let dht_key_secret = DHTKeySecret::new(keypair.secret.to_bytes()); - - (dht_key, dht_key_secret) -} - -pub fn sign( - dht_key: &DHTKey, - dht_key_secret: &DHTKeySecret, - data: &[u8], -) -> Result { - let mut kpb: [u8; DHT_KEY_SECRET_LENGTH + DHT_KEY_LENGTH] = - [0u8; DHT_KEY_SECRET_LENGTH + DHT_KEY_LENGTH]; - - kpb[..DHT_KEY_SECRET_LENGTH].copy_from_slice(&dht_key_secret.bytes); - kpb[DHT_KEY_SECRET_LENGTH..].copy_from_slice(&dht_key.bytes); - let keypair = Keypair::from_bytes(&kpb) - .map_err(|e| VeilidAPIError::parse_error("Keypair is invalid", e))?; - - let mut dig = Blake3Digest512::new(); - dig.update(data); - - let sig = keypair - .sign_prehashed(dig, None) - .map_err(VeilidAPIError::internal)?; - - let dht_sig = DHTSignature::new(sig.to_bytes()); - Ok(dht_sig) -} - -pub fn verify( - dht_key: &DHTKey, - data: &[u8], - signature: &DHTSignature, -) -> Result<(), VeilidAPIError> { - let pk = PublicKey::from_bytes(&dht_key.bytes) - .map_err(|e| VeilidAPIError::parse_error("Public key is invalid", e))?; - let sig = Signature::from_bytes(&signature.bytes) - .map_err(|e| VeilidAPIError::parse_error("Signature is invalid", e))?; - - let mut dig = Blake3Digest512::new(); - dig.update(data); - - pk.verify_prehashed(dig, None, &sig) - .map_err(|e| VeilidAPIError::parse_error("Verification failed", e))?; - Ok(()) -} - -pub fn generate_hash(data: &[u8]) -> DHTKey { - DHTKey::new(*blake3::hash(data).as_bytes()) -} - -pub fn validate_hash(data: &[u8], dht_key: &DHTKey) -> bool { - let bytes = *blake3::hash(data).as_bytes(); - - bytes == dht_key.bytes -} - -pub fn validate_key(dht_key: &DHTKey, dht_key_secret: &DHTKeySecret) -> bool { - let data = vec![0u8; 512]; - let sig = match sign(dht_key, dht_key_secret, &data) { - Ok(s) => s, - Err(_) => { - return false; - } - }; - verify(dht_key, &data, &sig).is_ok() -} - -pub fn distance(key1: &DHTKey, key2: &DHTKey) -> DHTKeyDistance { - let mut bytes = [0u8; DHT_KEY_LENGTH]; - - for (n, byte) in bytes.iter_mut().enumerate() { - *byte = key1.bytes[n] ^ key2.bytes[n]; - } - - DHTKeyDistance::new(bytes) -} - -#[allow(dead_code)] -pub fn sort_closest_fn(key: DHTKey) -> impl FnMut(&DHTKey, &DHTKey) -> std::cmp::Ordering { - move |k1, k2| distance(k1, &key).cmp(&distance(k2, &key)) -} diff --git a/veilid-core/src/crypto/mod.rs b/veilid-core/src/crypto/mod.rs index 104dd600..dbe666f8 100644 --- a/veilid-core/src/crypto/mod.rs +++ b/veilid-core/src/crypto/mod.rs @@ -1,126 +1,141 @@ +mod byte_array_types; +mod dh_cache; mod envelope; -mod key; mod receipt; +mod types; mod value; +pub mod crypto_system; pub mod tests; +pub mod vld0; +pub use byte_array_types::*; +pub use crypto_system::*; +pub use dh_cache::*; pub use envelope::*; -pub use key::*; pub use receipt::*; +pub use types::*; pub use value::*; - -pub const MIN_CRYPTO_VERSION: u8 = 0u8; -pub const MAX_CRYPTO_VERSION: u8 = 0u8; +pub use vld0::*; use crate::*; -use chacha20::cipher::{KeyIvInit, StreamCipher}; -use chacha20::XChaCha20; -use chacha20poly1305 as ch; -use chacha20poly1305::aead::{AeadInPlace, NewAead}; use core::convert::TryInto; -use curve25519_dalek as cd; -use ed25519_dalek as ed; use hashlink::linked_hash_map::Entry; use hashlink::LruCache; use serde::{Deserialize, Serialize}; -use x25519_dalek as xd; +// Handle to a particular cryptosystem +pub type CryptoSystemVersion = Arc; -pub type SharedSecret = [u8; 32]; -pub type Nonce = [u8; 24]; - -const DH_CACHE_SIZE: usize = 1024; -pub const AEAD_OVERHEAD: usize = 16; - -#[derive(Serialize, Deserialize, PartialEq, Eq, Hash)] -struct DHCacheKey { - key: DHTKey, - secret: DHTKeySecret, +/// Crypto kinds in order of preference, best cryptosystem is the first one, worst is the last one +pub const VALID_CRYPTO_KINDS: [CryptoKind; 1] = [CRYPTO_KIND_VLD0]; +/// Number of cryptosystem signatures to keep on structures if many are present beyond the ones we consider valid +pub const MAX_CRYPTO_KINDS: usize = 3; +/// Return the best cryptosystem kind we support +pub fn best_crypto_kind() -> CryptoKind { + VALID_CRYPTO_KINDS[0] } -#[derive(Serialize, Deserialize)] -struct DHCacheValue { - shared_secret: SharedSecret, -} -type DHCache = LruCache; +// Version number of envelope format +pub type EnvelopeVersion = u8; -fn cache_to_bytes(cache: &DHCache) -> Vec { - let cnt: usize = cache.len(); - let mut out: Vec = Vec::with_capacity(cnt * (32 + 32 + 32)); - for e in cache.iter() { - out.extend(&e.0.key.bytes); - out.extend(&e.0.secret.bytes); - out.extend(&e.1.shared_secret); - } - let mut rev: Vec = Vec::with_capacity(out.len()); - for d in out.chunks(32 + 32 + 32).rev() { - rev.extend(d); - } - rev -} - -fn bytes_to_cache(bytes: &[u8], cache: &mut DHCache) { - for d in bytes.chunks(32 + 32 + 32) { - let k = DHCacheKey { - key: DHTKey::new(d[0..32].try_into().expect("asdf")), - secret: DHTKeySecret::new(d[32..64].try_into().expect("asdf")), - }; - let v = DHCacheValue { - shared_secret: d[64..96].try_into().expect("asdf"), - }; - cache.insert(k, v); - } +/// Envelope versions in order of preference, best envelope version is the first one, worst is the last one +pub const VALID_ENVELOPE_VERSIONS: [EnvelopeVersion; 1] = [0u8]; +/// Number of envelope versions to keep on structures if many are present beyond the ones we consider valid +pub const MAX_ENVELOPE_VERSIONS: usize = 3; +/// Return the best envelope version we support +pub fn best_envelope_version() -> EnvelopeVersion { + VALID_ENVELOPE_VERSIONS[0] } struct CryptoInner { - table_store: TableStore, - node_id: DHTKey, - node_id_secret: DHTKeySecret, dh_cache: DHCache, flush_future: Option>, + crypto_vld0: Option>, } +struct CryptoUnlockedInner { + config: VeilidConfig, + table_store: TableStore, + protected_store: ProtectedStore, +} + +/// Crypto factory implementation #[derive(Clone)] pub struct Crypto { - config: VeilidConfig, + unlocked_inner: Arc, inner: Arc>, } impl Crypto { - fn new_inner(table_store: TableStore) -> CryptoInner { + fn new_inner() -> CryptoInner { CryptoInner { - table_store, - node_id: Default::default(), - node_id_secret: Default::default(), dh_cache: DHCache::new(DH_CACHE_SIZE), flush_future: None, + crypto_vld0: None, } } - pub fn new(config: VeilidConfig, table_store: TableStore) -> Self { - Self { - config, - inner: Arc::new(Mutex::new(Self::new_inner(table_store))), - } + pub fn new( + config: VeilidConfig, + table_store: TableStore, + protected_store: ProtectedStore, + ) -> Self { + let out = Self { + unlocked_inner: Arc::new(CryptoUnlockedInner { + config, + table_store, + protected_store, + }), + inner: Arc::new(Mutex::new(Self::new_inner())), + }; + + out.inner.lock().crypto_vld0 = Some(Arc::new(vld0::CryptoSystemVLD0::new(out.clone()))); + + out + } + + pub fn config(&self) -> VeilidConfig { + self.unlocked_inner.config.clone() } pub async fn init(&self) -> EyreResult<()> { trace!("Crypto::init"); + let table_store = self.unlocked_inner.table_store.clone(); + + // Init node id from config + if let Err(e) = self + .unlocked_inner + .config + .init_node_ids(self.clone(), self.unlocked_inner.protected_store.clone()) + .await + { + return Err(e).wrap_err("init node id failed"); + } // make local copy of node id for easy access - let (table_store, node_id) = { - let mut inner = self.inner.lock(); - let c = self.config.get(); - inner.node_id = c.network.node_id.unwrap(); - inner.node_id_secret = c.network.node_id_secret.unwrap(); - (inner.table_store.clone(), c.network.node_id) + let mut cache_validity_key: Vec = Vec::new(); + { + let c = self.unlocked_inner.config.get(); + for ck in VALID_CRYPTO_KINDS { + cache_validity_key.append( + &mut c + .network + .routing_table + .node_id + .get(ck) + .unwrap() + .value + .bytes + .to_vec(), + ); + } }; // load caches if they are valid for this node id let mut db = table_store.open("crypto_caches", 1).await?; - let caches_valid = match db.load(0, b"node_id")? { - Some(v) => v.as_slice() == node_id.unwrap().bytes, + let caches_valid = match db.load(0, b"cache_validity_key")? { + Some(v) => v == cache_validity_key, None => false, }; if caches_valid { @@ -132,7 +147,8 @@ impl Crypto { drop(db); table_store.delete("crypto_caches").await?; db = table_store.open("crypto_caches", 1).await?; - db.store(0, b"node_id", &node_id.unwrap().bytes).await?; + db.store(0, b"cache_validity_key", &cache_validity_key) + .await?; } // Schedule flushing @@ -152,13 +168,16 @@ impl Crypto { pub async fn flush(&self) -> EyreResult<()> { //trace!("Crypto::flush"); - let (table_store, cache_bytes) = { + let cache_bytes = { let inner = self.inner.lock(); - let cache_bytes = cache_to_bytes(&inner.dh_cache); - (inner.table_store.clone(), cache_bytes) + cache_to_bytes(&inner.dh_cache) }; - let db = table_store.open("crypto_caches", 1).await?; + let db = self + .unlocked_inner + .table_store + .open("crypto_caches", 1) + .await?; db.store(0, b"dh_cache", &cache_bytes).await?; Ok(()) } @@ -180,139 +199,99 @@ impl Crypto { }; } - fn ed25519_to_x25519_pk(key: &ed::PublicKey) -> Result { - let bytes = key.to_bytes(); - let compressed = cd::edwards::CompressedEdwardsY(bytes); - let point = compressed - .decompress() - .ok_or_else(|| VeilidAPIError::internal("ed25519_to_x25519_pk failed"))?; - let mp = point.to_montgomery(); - Ok(xd::PublicKey::from(mp.to_bytes())) - } - fn ed25519_to_x25519_sk(key: &ed::SecretKey) -> Result { - let exp = ed::ExpandedSecretKey::from(key); - let bytes: [u8; ed::EXPANDED_SECRET_KEY_LENGTH] = exp.to_bytes(); - let lowbytes: [u8; 32] = bytes[0..32].try_into().map_err(VeilidAPIError::internal)?; - Ok(xd::StaticSecret::from(lowbytes)) + /// Factory method to get a specific crypto version + pub fn get(&self, kind: CryptoKind) -> Option { + let inner = self.inner.lock(); + match kind { + CRYPTO_KIND_VLD0 => Some(inner.crypto_vld0.clone().unwrap()), + _ => None, + } } - pub fn cached_dh( + // Factory method to get the best crypto version + pub fn best(&self) -> CryptoSystemVersion { + self.get(best_crypto_kind()).unwrap() + } + + /// Signature set verification + /// Returns the set of signature cryptokinds that validate and are supported + /// If any cryptokinds are supported and do not validate, the whole operation + /// returns an error + pub fn verify_signatures( &self, - key: &DHTKey, - secret: &DHTKeySecret, + node_ids: &[TypedKey], + data: &[u8], + typed_signatures: &[TypedSignature], + ) -> Result { + let mut out = TypedKeySet::with_capacity(node_ids.len()); + for sig in typed_signatures { + for nid in node_ids { + if nid.kind == sig.kind { + if let Some(vcrypto) = self.get(sig.kind) { + vcrypto.verify(&nid.value, data, &sig.value)?; + out.add(*nid); + } + } + } + } + Ok(out) + } + + /// Signature set generation + /// Generates the set of signatures that are supported + /// Any cryptokinds that are not supported are silently dropped + pub fn generate_signatures( + &self, + data: &[u8], + typed_key_pairs: &[TypedKeyPair], + transform: F, + ) -> Result, VeilidAPIError> + where + F: Fn(&TypedKeyPair, Signature) -> R, + { + let mut out = Vec::::with_capacity(typed_key_pairs.len()); + for kp in typed_key_pairs { + if let Some(vcrypto) = self.get(kp.kind) { + let sig = vcrypto.sign(&kp.value.key, &kp.value.secret, data)?; + out.push(transform(kp, sig)) + } + } + Ok(out) + } + + /// Generate keypair + /// Does not require startup/init + pub fn generate_keypair(crypto_kind: CryptoKind) -> Result { + if crypto_kind == CRYPTO_KIND_VLD0 { + let kp = vld0_generate_keypair(); + return Ok(TypedKeyPair::new(crypto_kind, kp)); + } + Err(VeilidAPIError::generic("invalid crypto kind")) + } + + // Internal utilities + + fn cached_dh_internal( + &self, + vcrypto: &T, + key: &PublicKey, + secret: &SecretKey, ) -> Result { Ok( - match self.inner.lock().dh_cache.entry(DHCacheKey { - key: *key, - secret: *secret, - }) { + match self.inner.lock().dh_cache.entry( + DHCacheKey { + key: *key, + secret: *secret, + }, + |_k, _v| {}, + ) { Entry::Occupied(e) => e.get().shared_secret, Entry::Vacant(e) => { - let shared_secret = Self::compute_dh(key, secret)?; + let shared_secret = vcrypto.compute_dh(key, secret)?; e.insert(DHCacheValue { shared_secret }); shared_secret } }, ) } - - /////////// - // These are safe to use regardless of initialization status - - pub fn compute_dh(key: &DHTKey, secret: &DHTKeySecret) -> Result { - let pk_ed = ed::PublicKey::from_bytes(&key.bytes).map_err(VeilidAPIError::internal)?; - let pk_xd = Self::ed25519_to_x25519_pk(&pk_ed)?; - let sk_ed = ed::SecretKey::from_bytes(&secret.bytes).map_err(VeilidAPIError::internal)?; - let sk_xd = Self::ed25519_to_x25519_sk(&sk_ed)?; - Ok(sk_xd.diffie_hellman(&pk_xd).to_bytes()) - } - - pub fn get_random_nonce() -> Nonce { - let mut nonce = [0u8; 24]; - random_bytes(&mut nonce).unwrap(); - nonce - } - - pub fn get_random_secret() -> SharedSecret { - let mut s = [0u8; 32]; - random_bytes(&mut s).unwrap(); - s - } - - pub fn decrypt_in_place_aead( - body: &mut Vec, - nonce: &Nonce, - shared_secret: &SharedSecret, - associated_data: Option<&[u8]>, - ) -> Result<(), VeilidAPIError> { - let key = ch::Key::from(*shared_secret); - let xnonce = ch::XNonce::from(*nonce); - let aead = ch::XChaCha20Poly1305::new(&key); - aead.decrypt_in_place(&xnonce, associated_data.unwrap_or(b""), body) - .map_err(map_to_string) - .map_err(VeilidAPIError::generic) - } - - pub fn decrypt_aead( - body: &[u8], - nonce: &Nonce, - shared_secret: &SharedSecret, - associated_data: Option<&[u8]>, - ) -> Result, VeilidAPIError> { - let mut out = body.to_vec(); - Self::decrypt_in_place_aead(&mut out, nonce, shared_secret, associated_data) - .map_err(map_to_string) - .map_err(VeilidAPIError::generic)?; - Ok(out) - } - - pub fn encrypt_in_place_aead( - body: &mut Vec, - nonce: &Nonce, - shared_secret: &SharedSecret, - associated_data: Option<&[u8]>, - ) -> Result<(), VeilidAPIError> { - let key = ch::Key::from(*shared_secret); - let xnonce = ch::XNonce::from(*nonce); - let aead = ch::XChaCha20Poly1305::new(&key); - - aead.encrypt_in_place(&xnonce, associated_data.unwrap_or(b""), body) - .map_err(map_to_string) - .map_err(VeilidAPIError::generic) - } - - pub fn encrypt_aead( - body: &[u8], - nonce: &Nonce, - shared_secret: &SharedSecret, - associated_data: Option<&[u8]>, - ) -> Result, VeilidAPIError> { - let mut out = body.to_vec(); - Self::encrypt_in_place_aead(&mut out, nonce, shared_secret, associated_data) - .map_err(map_to_string) - .map_err(VeilidAPIError::generic)?; - Ok(out) - } - - pub fn crypt_in_place_no_auth(body: &mut Vec, nonce: &Nonce, shared_secret: &SharedSecret) { - let mut cipher = XChaCha20::new(shared_secret.into(), nonce.into()); - cipher.apply_keystream(body); - } - - pub fn crypt_b2b_no_auth( - in_buf: &[u8], - nonce: &Nonce, - shared_secret: &SharedSecret, - ) -> Vec { - let mut cipher = XChaCha20::new(shared_secret.into(), nonce.into()); - // Allocate uninitialized memory, aligned to 8 byte boundary because capnp is faster this way - // and the Vec returned here will be used to hold decrypted rpc messages - let mut out_buf = unsafe { aligned_8_u8_vec_uninit(in_buf.len()) }; - cipher.apply_keystream_b2b(in_buf, &mut out_buf).unwrap(); - out_buf - } - - pub fn crypt_no_auth(body: &[u8], nonce: &Nonce, shared_secret: &SharedSecret) -> Vec { - Self::crypt_b2b_no_auth(body, nonce, shared_secret) - } } diff --git a/veilid-core/src/crypto/none/blake3digest512.rs b/veilid-core/src/crypto/none/blake3digest512.rs new file mode 100644 index 00000000..1db950af --- /dev/null +++ b/veilid-core/src/crypto/none/blake3digest512.rs @@ -0,0 +1,70 @@ +use digest::generic_array::typenum::U64; +use digest::{Digest, Output}; +use generic_array::GenericArray; + +pub struct Blake3Digest512 { + dig: blake3::Hasher, +} + +impl Digest for Blake3Digest512 { + type OutputSize = U64; + + fn new() -> Self { + Self { + dig: blake3::Hasher::new(), + } + } + + fn update(&mut self, data: impl AsRef<[u8]>) { + self.dig.update(data.as_ref()); + } + + fn chain(mut self, data: impl AsRef<[u8]>) -> Self + where + Self: Sized, + { + self.update(data); + self + } + + fn finalize(self) -> Output { + let mut b = [0u8; 64]; + self.dig.finalize_xof().fill(&mut b); + let mut out = GenericArray::::default(); + for n in 0..64 { + out[n] = b[n]; + } + out + } + + fn finalize_reset(&mut self) -> Output { + let mut b = [0u8; 64]; + self.dig.finalize_xof().fill(&mut b); + let mut out = GenericArray::::default(); + for n in 0..64 { + out[n] = b[n]; + } + self.reset(); + out + } + + fn reset(&mut self) { + self.dig.reset(); + } + + fn output_size() -> usize { + 64 + } + + fn digest(data: &[u8]) -> Output { + let mut dig = blake3::Hasher::new(); + dig.update(data); + let mut b = [0u8; 64]; + dig.finalize_xof().fill(&mut b); + let mut out = GenericArray::::default(); + for n in 0..64 { + out[n] = b[n]; + } + out + } +} diff --git a/veilid-core/src/crypto/none/mod.rs b/veilid-core/src/crypto/none/mod.rs new file mode 100644 index 00000000..fd2b42a4 --- /dev/null +++ b/veilid-core/src/crypto/none/mod.rs @@ -0,0 +1,300 @@ +pub mod blake3digest512; +pub use blake3digest512::*; + +use super::*; + +use chacha20::cipher::{KeyIvInit, StreamCipher}; +use chacha20::XChaCha20; +use chacha20poly1305 as ch; +use chacha20poly1305::aead::{AeadInPlace, NewAead}; +use core::convert::TryInto; +use curve25519_dalek as cd; +use digest::Digest; +use ed25519_dalek as ed; +use x25519_dalek as xd; + +const AEAD_OVERHEAD: usize = 16; +pub const CRYPTO_KIND_VLD0: CryptoKind = FourCC([b'V', b'L', b'D', b'0']); + +fn ed25519_to_x25519_pk(key: &ed::PublicKey) -> Result { + let bytes = key.to_bytes(); + let compressed = cd::edwards::CompressedEdwardsY(bytes); + let point = compressed + .decompress() + .ok_or_else(|| VeilidAPIError::internal("ed25519_to_x25519_pk failed"))?; + let mp = point.to_montgomery(); + Ok(xd::PublicKey::from(mp.to_bytes())) +} +fn ed25519_to_x25519_sk(key: &ed::SecretKey) -> Result { + let exp = ed::ExpandedSecretKey::from(key); + let bytes: [u8; ed::EXPANDED_SECRET_KEY_LENGTH] = exp.to_bytes(); + let lowbytes: [u8; 32] = bytes[0..32].try_into().map_err(VeilidAPIError::internal)?; + Ok(xd::StaticSecret::from(lowbytes)) +} + +pub fn vld0_generate_keypair() -> KeyPair { + let mut csprng = VeilidRng {}; + let keypair = ed::Keypair::generate(&mut csprng); + let dht_key = PublicKey::new(keypair.public.to_bytes()); + let dht_key_secret = SecretKey::new(keypair.secret.to_bytes()); + + KeyPair::new(dht_key, dht_key_secret) +} + +/// V0 CryptoSystem +#[derive(Clone)] +pub struct CryptoSystemVLD0 { + crypto: Crypto, +} + +impl CryptoSystemVLD0 { + pub fn new(crypto: Crypto) -> Self { + Self { crypto } + } +} + +impl CryptoSystem for CryptoSystemVLD0 { + // Accessors + fn kind(&self) -> CryptoKind { + CRYPTO_KIND_VLD0 + } + + fn crypto(&self) -> Crypto { + self.crypto.clone() + } + + // Cached Operations + fn cached_dh( + &self, + key: &PublicKey, + secret: &SecretKey, + ) -> Result { + self.crypto + .cached_dh_internal::(self, key, secret) + } + + // Generation + fn random_nonce(&self) -> Nonce { + let mut nonce = [0u8; 24]; + random_bytes(&mut nonce).unwrap(); + Nonce::new(nonce) + } + fn random_shared_secret(&self) -> SharedSecret { + let mut s = [0u8; 32]; + random_bytes(&mut s).unwrap(); + SharedSecret::new(s) + } + fn compute_dh( + &self, + key: &PublicKey, + secret: &SecretKey, + ) -> Result { + let pk_ed = ed::PublicKey::from_bytes(&key.bytes).map_err(VeilidAPIError::internal)?; + let pk_xd = ed25519_to_x25519_pk(&pk_ed)?; + let sk_ed = ed::SecretKey::from_bytes(&secret.bytes).map_err(VeilidAPIError::internal)?; + let sk_xd = ed25519_to_x25519_sk(&sk_ed)?; + Ok(SharedSecret::new(sk_xd.diffie_hellman(&pk_xd).to_bytes())) + } + fn generate_keypair(&self) -> KeyPair { + vld0_generate_keypair() + } + fn generate_hash(&self, data: &[u8]) -> PublicKey { + PublicKey::new(*blake3::hash(data).as_bytes()) + } + fn generate_hash_reader( + &self, + reader: &mut dyn std::io::Read, + ) -> Result { + let mut hasher = blake3::Hasher::new(); + std::io::copy(reader, &mut hasher).map_err(VeilidAPIError::generic)?; + Ok(PublicKey::new(*hasher.finalize().as_bytes())) + } + + // Validation + fn validate_keypair(&self, dht_key: &PublicKey, dht_key_secret: &SecretKey) -> bool { + let data = vec![0u8; 512]; + let sig = match self.sign(dht_key, dht_key_secret, &data) { + Ok(s) => s, + Err(_) => { + return false; + } + }; + self.verify(dht_key, &data, &sig).is_ok() + } + fn validate_hash(&self, data: &[u8], dht_key: &PublicKey) -> bool { + let bytes = *blake3::hash(data).as_bytes(); + + bytes == dht_key.bytes + } + fn validate_hash_reader( + &self, + reader: &mut dyn std::io::Read, + dht_key: &PublicKey, + ) -> Result { + let mut hasher = blake3::Hasher::new(); + std::io::copy(reader, &mut hasher).map_err(VeilidAPIError::generic)?; + let bytes = *hasher.finalize().as_bytes(); + Ok(bytes == dht_key.bytes) + } + // Distance Metric + fn distance(&self, key1: &PublicKey, key2: &PublicKey) -> PublicKeyDistance { + let mut bytes = [0u8; PUBLIC_KEY_LENGTH]; + + for (n, byte) in bytes.iter_mut().enumerate() { + *byte = key1.bytes[n] ^ key2.bytes[n]; + } + + PublicKeyDistance::new(bytes) + } + + // Authentication + fn sign( + &self, + dht_key: &PublicKey, + dht_key_secret: &SecretKey, + data: &[u8], + ) -> Result { + let mut kpb: [u8; SECRET_KEY_LENGTH + PUBLIC_KEY_LENGTH] = + [0u8; SECRET_KEY_LENGTH + PUBLIC_KEY_LENGTH]; + + kpb[..SECRET_KEY_LENGTH].copy_from_slice(&dht_key_secret.bytes); + kpb[SECRET_KEY_LENGTH..].copy_from_slice(&dht_key.bytes); + let keypair = ed::Keypair::from_bytes(&kpb) + .map_err(|e| VeilidAPIError::parse_error("Keypair is invalid", e))?; + + let mut dig = Blake3Digest512::new(); + dig.update(data); + + let sig = keypair + .sign_prehashed(dig, None) + .map_err(VeilidAPIError::internal)?; + + let dht_sig = Signature::new(sig.to_bytes()); + Ok(dht_sig) + } + fn verify( + &self, + dht_key: &PublicKey, + data: &[u8], + signature: &Signature, + ) -> Result<(), VeilidAPIError> { + let pk = ed::PublicKey::from_bytes(&dht_key.bytes) + .map_err(|e| VeilidAPIError::parse_error("Public key is invalid", e))?; + let sig = ed::Signature::from_bytes(&signature.bytes) + .map_err(|e| VeilidAPIError::parse_error("Signature is invalid", e))?; + + let mut dig = Blake3Digest512::new(); + dig.update(data); + + pk.verify_prehashed(dig, None, &sig) + .map_err(|e| VeilidAPIError::parse_error("Verification failed", e))?; + Ok(()) + } + + // AEAD Encrypt/Decrypt + fn aead_overhead(&self) -> usize { + AEAD_OVERHEAD + } + fn decrypt_in_place_aead( + &self, + body: &mut Vec, + nonce: &Nonce, + shared_secret: &SharedSecret, + associated_data: Option<&[u8]>, + ) -> Result<(), VeilidAPIError> { + let key = ch::Key::from(shared_secret.bytes); + let xnonce = ch::XNonce::from(nonce.bytes); + let aead = ch::XChaCha20Poly1305::new(&key); + aead.decrypt_in_place(&xnonce, associated_data.unwrap_or(b""), body) + .map_err(map_to_string) + .map_err(VeilidAPIError::generic) + } + + fn decrypt_aead( + &self, + body: &[u8], + nonce: &Nonce, + shared_secret: &SharedSecret, + associated_data: Option<&[u8]>, + ) -> Result, VeilidAPIError> { + let mut out = body.to_vec(); + self.decrypt_in_place_aead(&mut out, nonce, shared_secret, associated_data) + .map_err(map_to_string) + .map_err(VeilidAPIError::generic)?; + Ok(out) + } + + fn encrypt_in_place_aead( + &self, + body: &mut Vec, + nonce: &Nonce, + shared_secret: &SharedSecret, + associated_data: Option<&[u8]>, + ) -> Result<(), VeilidAPIError> { + let key = ch::Key::from(shared_secret.bytes); + let xnonce = ch::XNonce::from(nonce.bytes); + let aead = ch::XChaCha20Poly1305::new(&key); + + aead.encrypt_in_place(&xnonce, associated_data.unwrap_or(b""), body) + .map_err(map_to_string) + .map_err(VeilidAPIError::generic) + } + + fn encrypt_aead( + &self, + body: &[u8], + nonce: &Nonce, + shared_secret: &SharedSecret, + associated_data: Option<&[u8]>, + ) -> Result, VeilidAPIError> { + let mut out = body.to_vec(); + self.encrypt_in_place_aead(&mut out, nonce, shared_secret, associated_data) + .map_err(map_to_string) + .map_err(VeilidAPIError::generic)?; + Ok(out) + } + + // NoAuth Encrypt/Decrypt + fn crypt_in_place_no_auth( + &self, + body: &mut Vec, + nonce: &Nonce, + shared_secret: &SharedSecret, + ) { + let mut cipher = XChaCha20::new(&shared_secret.bytes.into(), &nonce.bytes.into()); + cipher.apply_keystream(body); + } + + fn crypt_b2b_no_auth( + &self, + in_buf: &[u8], + out_buf: &mut [u8], + nonce: &Nonce, + shared_secret: &SharedSecret, + ) { + let mut cipher = XChaCha20::new(&shared_secret.bytes.into(), &nonce.bytes.into()); + cipher.apply_keystream_b2b(in_buf, out_buf).unwrap(); + } + + fn crypt_no_auth_aligned_8( + &self, + in_buf: &[u8], + nonce: &Nonce, + shared_secret: &SharedSecret, + ) -> Vec { + let mut out_buf = unsafe { aligned_8_u8_vec_uninit(in_buf.len()) }; + self.crypt_b2b_no_auth(in_buf, &mut out_buf, nonce, shared_secret); + out_buf + } + + fn crypt_no_auth_unaligned( + &self, + in_buf: &[u8], + nonce: &Nonce, + shared_secret: &SharedSecret, + ) -> Vec { + let mut out_buf = unsafe { unaligned_u8_vec_uninit(in_buf.len()) }; + self.crypt_b2b_no_auth(in_buf, &mut out_buf, nonce, shared_secret); + out_buf + } +} diff --git a/veilid-core/src/crypto/receipt.rs b/veilid-core/src/crypto/receipt.rs index d59e5f36..50496d04 100644 --- a/veilid-core/src/crypto/receipt.rs +++ b/veilid-core/src/crypto/receipt.rs @@ -3,60 +3,56 @@ use super::*; use crate::*; use core::convert::TryInto; -use data_encoding::BASE64URL_NOPAD; -// #[repr(C, packed)] -// struct ReceiptHeader { -// // Size is at least 8 bytes. Depending on the version specified, the size may vary and should be case to the appropriate struct -// magic: [u8; 4], // 0x00: 0x52 0x43 0x50 0x54 ("RCPT") -// version: u8, // 0x04: 0 = ReceiptV0 -// reserved: u8, // 0x05: Reserved for future use -// } +/// Out-of-band receipts are versioned along with envelope versions +/// +/// These are the formats for the on-the-wire serialization performed by this module +/// +/// #[repr(C, packed)] +/// struct ReceiptHeader { +/// // Size is at least 4 bytes. Depending on the version specified, the size may vary and should be case to the appropriate struct +/// magic: [u8; 3], // 0x00: 0x52 0x43 0x50 ("RCP") +/// version: u8, // 0x03: 0 = ReceiptV0 +/// } +/// +/// #[repr(C, packed)] +/// struct ReceiptV0 { +/// // Size is 66 bytes without extra data and signature, 130 with signature +/// magic: [u8; 3], // 0x00: 0x52 0x43 0x50 ("RCP") +/// version: u8, // 0x03: 0 = ReceiptV0 +/// crypto_kind: [u8; 4], // 0x04: CryptoSystemVersion FOURCC code +/// size: u16, // 0x08: Total size of the receipt including the extra data and the signature. Maximum size is 1380 bytes. +/// nonce: [u8; 24], // 0x0A: Randomly chosen bytes that represent a unique receipt. Could be used to encrypt the extra data, but it's not required. +/// sender_id: [u8; 32], // 0x22: Node ID of the message source, which is the public key of the sender +/// extra_data: [u8; ??], // 0x42: Extra data is appended (arbitrary extra data, not encrypted by receipt itself, maximum size is 1250 bytes) +/// signature: [u8; 64], // 0x?? (end-0x40): Signature of the entire receipt including header and extra data is appended to the packet +/// } -// #[repr(C, packed)] -// struct ReceiptV0 { -// // Size is 106 bytes. -// magic: [u8; 4], // 0x00: 0x52 0x43 0x50 0x54 ("RCPT") -// version: u8, // 0x04: 0 = ReceiptV0 -// reserved: u8, // 0x05: Reserved for future use -// size: u16, // 0x06: Total size of the receipt including the extra data and the signature. Maximum size is 1152 bytes. -// nonce: [u8; 24], // 0x08: Randomly chosen bytes that represent a unique receipt. Could be used to encrypt the extra data, but it's not required. -// sender_id: [u8; 32], // 0x20: Node ID of the message source, which is the Ed25519 public key of the sender -// extra_data: [u8; ??], // 0x40: Extra data is appended (arbitrary extra data, not encrypted by receipt itself, maximum size is 1024 bytes) -// signature: [u8; 64], // 0x?? (end-0x40): Ed25519 signature of the entire receipt including header and extra data is appended to the packet -// } - -pub const MAX_RECEIPT_SIZE: usize = 1152; -pub const MAX_EXTRA_DATA_SIZE: usize = 1024; -pub const MIN_RECEIPT_SIZE: usize = 128; -pub const RECEIPT_MAGIC: &[u8; 4] = b"RCPT"; -pub type ReceiptNonce = [u8; 24]; - -pub trait Encodable { - fn encode(&self) -> String; -} - -impl Encodable for ReceiptNonce { - fn encode(&self) -> String { - BASE64URL_NOPAD.encode(self) - } -} +pub const MAX_RECEIPT_SIZE: usize = 1380; +pub const MAX_EXTRA_DATA_SIZE: usize = MAX_RECEIPT_SIZE - MIN_RECEIPT_SIZE; // 1250 +pub const MIN_RECEIPT_SIZE: usize = 130; +pub const RECEIPT_MAGIC: &[u8; 3] = b"RCP"; #[derive(Debug, Clone, PartialEq, Eq, Default)] pub struct Receipt { version: u8, - nonce: ReceiptNonce, - sender_id: DHTKey, + crypto_kind: CryptoKind, + nonce: Nonce, + sender_id: PublicKey, extra_data: Vec, } impl Receipt { pub fn try_new>( version: u8, - nonce: ReceiptNonce, - sender_id: DHTKey, + crypto_kind: CryptoKind, + nonce: Nonce, + sender_id: PublicKey, extra_data: D, ) -> Result { + assert!(VALID_ENVELOPE_VERSIONS.contains(&version)); + assert!(VALID_CRYPTO_KINDS.contains(&crypto_kind)); + if extra_data.as_ref().len() > MAX_EXTRA_DATA_SIZE { apibail_parse_error!( "extra data too large for receipt", @@ -65,20 +61,21 @@ impl Receipt { } Ok(Self { version, + crypto_kind, nonce, sender_id, extra_data: Vec::from(extra_data.as_ref()), }) } - pub fn from_signed_data(data: &[u8]) -> Result { + pub fn from_signed_data(crypto: Crypto, data: &[u8]) -> Result { // Ensure we are at least the length of the envelope if data.len() < MIN_RECEIPT_SIZE { apibail_parse_error!("receipt too small", data.len()); } // Verify magic number - let magic: [u8; 4] = data[0x00..0x04] + let magic: [u8; 3] = data[0x00..0x03] .try_into() .map_err(VeilidAPIError::internal)?; if magic != *RECEIPT_MAGIC { @@ -86,14 +83,24 @@ impl Receipt { } // Check version - let version = data[0x04]; - if version > MAX_CRYPTO_VERSION || version < MIN_CRYPTO_VERSION { - apibail_parse_error!("unsupported cryptography version", version); + let version = data[0x03]; + if !VALID_ENVELOPE_VERSIONS.contains(&version) { + apibail_parse_error!("unsupported envelope version", version); } + // Check crypto kind + let crypto_kind = FourCC( + data[0x04..0x08] + .try_into() + .map_err(VeilidAPIError::internal)?, + ); + let Some(vcrypto) = crypto.get(crypto_kind) else { + apibail_parse_error!("unsupported crypto kind", crypto_kind); + }; + // Get size and ensure it matches the size of the envelope and is less than the maximum message size let size: u16 = u16::from_le_bytes( - data[0x06..0x08] + data[0x08..0x0A] .try_into() .map_err(VeilidAPIError::internal)?, ); @@ -108,64 +115,80 @@ impl Receipt { } // Get sender id - let sender_id = DHTKey::new( - data[0x20..0x40] + let sender_id = PublicKey::new( + data[0x22..0x42] .try_into() .map_err(VeilidAPIError::internal)?, ); // Get signature - let signature = DHTSignature::new( + let signature = Signature::new( data[(data.len() - 64)..] .try_into() .map_err(VeilidAPIError::internal)?, ); // Validate signature - verify(&sender_id, &data[0..(data.len() - 64)], &signature) + vcrypto + .verify(&sender_id, &data[0..(data.len() - 64)], &signature) .map_err(VeilidAPIError::generic)?; // Get nonce - let nonce: ReceiptNonce = data[0x08..0x20] - .try_into() - .map_err(VeilidAPIError::internal)?; + let nonce: Nonce = Nonce::new( + data[0x0A..0x22] + .try_into() + .map_err(VeilidAPIError::internal)?, + ); // Get extra data and signature - let extra_data: Vec = Vec::from(&data[0x40..(data.len() - 64)]); + let extra_data: Vec = Vec::from(&data[0x42..(data.len() - 64)]); // Return receipt Ok(Self { version, + crypto_kind, nonce, sender_id, extra_data, }) } - pub fn to_signed_data(&self, secret: &DHTKeySecret) -> Result, VeilidAPIError> { + pub fn to_signed_data( + &self, + crypto: Crypto, + secret: &SecretKey, + ) -> Result, VeilidAPIError> { // Ensure extra data isn't too long let receipt_size: usize = self.extra_data.len() + MIN_RECEIPT_SIZE; if receipt_size > MAX_RECEIPT_SIZE { apibail_parse_error!("receipt too large", receipt_size); } + // Get crypto version + let vcrypto = crypto + .get(self.crypto_kind) + .expect("need to ensure only valid crypto kinds here"); + let mut data: Vec = vec![0u8; receipt_size]; // Write magic - data[0x00..0x04].copy_from_slice(RECEIPT_MAGIC); + data[0x00..0x03].copy_from_slice(RECEIPT_MAGIC); // Write version - data[0x04] = self.version; + data[0x03] = self.version; + // Write crypto kind + data[0x04..0x08].copy_from_slice(&self.crypto_kind.0); // Write size - data[0x06..0x08].copy_from_slice(&(receipt_size as u16).to_le_bytes()); + data[0x08..0x0A].copy_from_slice(&(receipt_size as u16).to_le_bytes()); // Write nonce - data[0x08..0x20].copy_from_slice(&self.nonce); + data[0x0A..0x22].copy_from_slice(&self.nonce.bytes); // Write sender node id - data[0x20..0x40].copy_from_slice(&self.sender_id.bytes); + data[0x22..0x42].copy_from_slice(&self.sender_id.bytes); // Write extra data if !self.extra_data.is_empty() { - data[0x40..(receipt_size - 64)].copy_from_slice(self.extra_data.as_slice()); + data[0x42..(receipt_size - 64)].copy_from_slice(self.extra_data.as_slice()); } // Sign the receipt - let signature = sign(&self.sender_id, secret, &data[0..(receipt_size - 64)]) + let signature = vcrypto + .sign(&self.sender_id, secret, &data[0..(receipt_size - 64)]) .map_err(VeilidAPIError::generic)?; // Append the signature data[(receipt_size - 64)..].copy_from_slice(&signature.bytes); @@ -177,11 +200,15 @@ impl Receipt { self.version } - pub fn get_nonce(&self) -> ReceiptNonce { + pub fn get_crypto_kind(&self) -> CryptoKind { + self.crypto_kind + } + + pub fn get_nonce(&self) -> Nonce { self.nonce } - pub fn get_sender_id(&self) -> DHTKey { + pub fn get_sender_id(&self) -> PublicKey { self.sender_id } pub fn get_extra_data(&self) -> &[u8] { diff --git a/veilid-core/src/crypto/tests/mod.rs b/veilid-core/src/crypto/tests/mod.rs index 3bd8e60f..04a29d9f 100644 --- a/veilid-core/src/crypto/tests/mod.rs +++ b/veilid-core/src/crypto/tests/mod.rs @@ -1,5 +1,21 @@ pub mod test_crypto; -pub mod test_dht_key; pub mod test_envelope_receipt; +pub mod test_types; use super::*; +use crate::tests::common::test_veilid_config::*; + +async fn crypto_tests_startup() -> VeilidAPI { + trace!("crypto_tests: starting"); + let (update_callback, config_callback) = setup_veilid_core(); + let api = api_startup(update_callback, config_callback) + .await + .expect("startup failed"); + api +} + +async fn crypto_tests_shutdown(api: VeilidAPI) { + trace!("crypto_tests: shutting down"); + api.shutdown().await; + trace!("crypto_tests: finished"); +} diff --git a/veilid-core/src/crypto/tests/test_crypto.rs b/veilid-core/src/crypto/tests/test_crypto.rs index 5710c8d9..3f236b2e 100644 --- a/veilid-core/src/crypto/tests/test_crypto.rs +++ b/veilid-core/src/crypto/tests/test_crypto.rs @@ -1,37 +1,21 @@ use super::*; -use crate::tests::common::test_veilid_config::*; static LOREM_IPSUM:&[u8] = b"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. "; -async fn startup() -> VeilidAPI { - trace!("test_table_store: starting"); - let (update_callback, config_callback) = setup_veilid_core(); - let api = api_startup(update_callback, config_callback) - .await - .expect("startup failed"); - api -} - -async fn shutdown(api: VeilidAPI) { - trace!("test_table_store: shutting down"); - api.shutdown().await; - trace!("test_table_store: finished"); -} - -pub async fn test_aead() { +pub async fn test_aead(vcrypto: CryptoSystemVersion) { trace!("test_aead"); - let n1 = Crypto::get_random_nonce(); + let n1 = vcrypto.random_nonce(); let n2 = loop { - let n = Crypto::get_random_nonce(); + let n = vcrypto.random_nonce(); if n != n1 { break n; } }; - let ss1 = Crypto::get_random_secret(); + let ss1 = vcrypto.random_shared_secret(); let ss2 = loop { - let ss = Crypto::get_random_secret(); + let ss = vcrypto.random_shared_secret(); if ss != ss1 { break ss; } @@ -41,67 +25,77 @@ pub async fn test_aead() { let body2 = body.clone(); let size_before_encrypt = body.len(); assert!( - Crypto::encrypt_in_place_aead(&mut body, &n1, &ss1, None).is_ok(), + vcrypto + .encrypt_in_place_aead(&mut body, &n1, &ss1, None) + .is_ok(), "encrypt should succeed" ); let size_after_encrypt = body.len(); assert!( - size_after_encrypt - size_before_encrypt == AEAD_OVERHEAD, + size_after_encrypt - size_before_encrypt == vcrypto.aead_overhead(), "overhead should match" ); let mut body3 = body.clone(); let mut body4 = body.clone(); let mut body5 = body.clone(); assert!( - Crypto::decrypt_in_place_aead(&mut body, &n1, &ss1, None).is_ok(), + vcrypto + .decrypt_in_place_aead(&mut body, &n1, &ss1, None) + .is_ok(), "decrypt should succeed" ); assert_eq!(body, body2, "results should be the same"); assert!( - Crypto::decrypt_in_place_aead(&mut body3, &n2, &ss1, None).is_err(), + vcrypto + .decrypt_in_place_aead(&mut body3, &n2, &ss1, None) + .is_err(), "decrypt with wrong nonce should fail" ); assert_ne!(body3, body, "failure changes data"); assert!( - Crypto::decrypt_in_place_aead(&mut body4, &n1, &ss2, None).is_err(), + vcrypto + .decrypt_in_place_aead(&mut body4, &n1, &ss2, None) + .is_err(), "decrypt with wrong secret should fail" ); assert_ne!(body4, body, "failure changes data"); assert!( - Crypto::decrypt_in_place_aead(&mut body5, &n1, &ss2, Some(b"foobar")).is_err(), + vcrypto + .decrypt_in_place_aead(&mut body5, &n1, &ss2, Some(b"foobar")) + .is_err(), "decrypt with wrong associated data should fail" ); assert_ne!(body5, body, "failure changes data"); assert!( - Crypto::decrypt_aead(LOREM_IPSUM, &n1, &ss1, None).is_err(), + vcrypto.decrypt_aead(LOREM_IPSUM, &n1, &ss1, None).is_err(), "should fail authentication" ); - let body5 = Crypto::encrypt_aead(LOREM_IPSUM, &n1, &ss1, None).unwrap(); - let body6 = Crypto::decrypt_aead(&body5, &n1, &ss1, None).unwrap(); - let body7 = Crypto::encrypt_aead(LOREM_IPSUM, &n1, &ss1, None).unwrap(); + let body5 = vcrypto.encrypt_aead(LOREM_IPSUM, &n1, &ss1, None).unwrap(); + let body6 = vcrypto.decrypt_aead(&body5, &n1, &ss1, None).unwrap(); + let body7 = vcrypto.encrypt_aead(LOREM_IPSUM, &n1, &ss1, None).unwrap(); assert_eq!(body6, LOREM_IPSUM); assert_eq!(body5, body7); } -pub async fn test_no_auth() { +pub async fn test_no_auth(vcrypto: CryptoSystemVersion) { trace!("test_no_auth"); - let n1 = Crypto::get_random_nonce(); + let n1 = vcrypto.random_nonce(); let n2 = loop { - let n = Crypto::get_random_nonce(); + let n = vcrypto.random_nonce(); if n != n1 { break n; } }; - let ss1 = Crypto::get_random_secret(); + let ss1 = vcrypto.random_shared_secret(); let ss2 = loop { - let ss = Crypto::get_random_secret(); + let ss = vcrypto.random_shared_secret(); if ss != ss1 { break ss; } @@ -110,7 +104,7 @@ pub async fn test_no_auth() { let mut body = LOREM_IPSUM.to_vec(); let body2 = body.clone(); let size_before_encrypt = body.len(); - Crypto::crypt_in_place_no_auth(&mut body, &n1, &ss1); + vcrypto.crypt_in_place_no_auth(&mut body, &n1, &ss1); let size_after_encrypt = body.len(); assert_eq!( @@ -120,41 +114,47 @@ pub async fn test_no_auth() { let mut body3 = body.clone(); let mut body4 = body.clone(); - Crypto::crypt_in_place_no_auth(&mut body, &n1, &ss1); + vcrypto.crypt_in_place_no_auth(&mut body, &n1, &ss1); assert_eq!(body, body2, "result after decrypt should be the same"); - Crypto::crypt_in_place_no_auth(&mut body3, &n2, &ss1); + vcrypto.crypt_in_place_no_auth(&mut body3, &n2, &ss1); assert_ne!(body3, body, "decrypt should not be equal with wrong nonce"); - Crypto::crypt_in_place_no_auth(&mut body4, &n1, &ss2); + vcrypto.crypt_in_place_no_auth(&mut body4, &n1, &ss2); assert_ne!(body4, body, "decrypt should not be equal with wrong secret"); - let body5 = Crypto::crypt_no_auth(LOREM_IPSUM, &n1, &ss1); - let body6 = Crypto::crypt_no_auth(&body5, &n1, &ss1); - let body7 = Crypto::crypt_no_auth(LOREM_IPSUM, &n1, &ss1); + let body5 = vcrypto.crypt_no_auth_unaligned(LOREM_IPSUM, &n1, &ss1); + let body6 = vcrypto.crypt_no_auth_unaligned(&body5, &n1, &ss1); + let body7 = vcrypto.crypt_no_auth_unaligned(LOREM_IPSUM, &n1, &ss1); + assert_eq!(body6, LOREM_IPSUM); + assert_eq!(body5, body7); + + let body5 = vcrypto.crypt_no_auth_aligned_8(LOREM_IPSUM, &n1, &ss1); + let body6 = vcrypto.crypt_no_auth_aligned_8(&body5, &n1, &ss1); + let body7 = vcrypto.crypt_no_auth_aligned_8(LOREM_IPSUM, &n1, &ss1); assert_eq!(body6, LOREM_IPSUM); assert_eq!(body5, body7); } -pub async fn test_dh(crypto: Crypto) { +pub async fn test_dh(vcrypto: CryptoSystemVersion) { trace!("test_dh"); - let (dht_key, dht_key_secret) = key::generate_secret(); - let (dht_key2, dht_key_secret2) = key::generate_secret(); + let (dht_key, dht_key_secret) = vcrypto.generate_keypair().into_split(); + let (dht_key2, dht_key_secret2) = vcrypto.generate_keypair().into_split(); - let r1 = Crypto::compute_dh(&dht_key, &dht_key_secret2).unwrap(); - let r2 = Crypto::compute_dh(&dht_key2, &dht_key_secret).unwrap(); - let r3 = Crypto::compute_dh(&dht_key, &dht_key_secret2).unwrap(); - let r4 = Crypto::compute_dh(&dht_key2, &dht_key_secret).unwrap(); + let r1 = vcrypto.compute_dh(&dht_key, &dht_key_secret2).unwrap(); + let r2 = vcrypto.compute_dh(&dht_key2, &dht_key_secret).unwrap(); + let r3 = vcrypto.compute_dh(&dht_key, &dht_key_secret2).unwrap(); + let r4 = vcrypto.compute_dh(&dht_key2, &dht_key_secret).unwrap(); assert_eq!(r1, r2); assert_eq!(r3, r4); assert_eq!(r2, r3); trace!("dh: {:?}", r1); // test cache - let r5 = crypto.cached_dh(&dht_key, &dht_key_secret2).unwrap(); - let r6 = crypto.cached_dh(&dht_key2, &dht_key_secret).unwrap(); - let r7 = crypto.cached_dh(&dht_key, &dht_key_secret2).unwrap(); - let r8 = crypto.cached_dh(&dht_key2, &dht_key_secret).unwrap(); + let r5 = vcrypto.cached_dh(&dht_key, &dht_key_secret2).unwrap(); + let r6 = vcrypto.cached_dh(&dht_key2, &dht_key_secret).unwrap(); + let r7 = vcrypto.cached_dh(&dht_key, &dht_key_secret2).unwrap(); + let r8 = vcrypto.cached_dh(&dht_key2, &dht_key_secret).unwrap(); assert_eq!(r1, r5); assert_eq!(r2, r6); assert_eq!(r3, r7); @@ -163,11 +163,17 @@ pub async fn test_dh(crypto: Crypto) { } pub async fn test_all() { - let api = startup().await; + let api = crypto_tests_startup().await; let crypto = api.crypto().unwrap(); - test_aead().await; - test_no_auth().await; - test_dh(crypto).await; - shutdown(api.clone()).await; + + // Test versions + for v in VALID_CRYPTO_KINDS { + let vcrypto = crypto.get(v).unwrap(); + test_aead(vcrypto.clone()).await; + test_no_auth(vcrypto.clone()).await; + test_dh(vcrypto).await; + } + + crypto_tests_shutdown(api.clone()).await; assert!(api.is_shutdown()); } diff --git a/veilid-core/src/crypto/tests/test_dht_key.rs b/veilid-core/src/crypto/tests/test_dht_key.rs deleted file mode 100644 index 26526d7b..00000000 --- a/veilid-core/src/crypto/tests/test_dht_key.rs +++ /dev/null @@ -1,304 +0,0 @@ -#![allow(clippy::bool_assert_comparison)] - -use super::*; -use core::convert::TryFrom; - -static LOREM_IPSUM:&str = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. "; -static CHEEZBURGER: &str = "I can has cheezburger"; -static EMPTY_KEY: [u8; key::DHT_KEY_LENGTH] = [0u8; key::DHT_KEY_LENGTH]; -static EMPTY_KEY_SECRET: [u8; key::DHT_KEY_SECRET_LENGTH] = [0u8; key::DHT_KEY_SECRET_LENGTH]; - -pub async fn test_generate_secret() { - // Verify keys generate - let (dht_key, dht_key_secret) = key::generate_secret(); - let (dht_key2, dht_key_secret2) = key::generate_secret(); - - // Verify byte patterns are different between public and secret - assert_ne!(dht_key.bytes, dht_key_secret.bytes); - assert_ne!(dht_key2.bytes, dht_key_secret2.bytes); - - // Verify the keys and secrets are different across keypairs - assert_ne!(dht_key, dht_key2); - assert_ne!(dht_key_secret, dht_key_secret2); -} - -pub async fn test_sign_and_verify() { - // Make two keys - let (dht_key, dht_key_secret) = key::generate_secret(); - let (dht_key2, dht_key_secret2) = key::generate_secret(); - // Sign the same message twice - let dht_sig = key::sign(&dht_key, &dht_key_secret, LOREM_IPSUM.as_bytes()).unwrap(); - trace!("dht_sig: {:?}", dht_sig); - let dht_sig_b = key::sign(&dht_key, &dht_key_secret, LOREM_IPSUM.as_bytes()).unwrap(); - // Sign a second message - let dht_sig_c = key::sign(&dht_key, &dht_key_secret, CHEEZBURGER.as_bytes()).unwrap(); - trace!("dht_sig_c: {:?}", dht_sig_c); - // Verify they are the same signature - assert_eq!(dht_sig, dht_sig_b); - // Sign the same message with a different key - let dht_sig2 = key::sign(&dht_key2, &dht_key_secret2, LOREM_IPSUM.as_bytes()).unwrap(); - // Verify a different key gives a different signature - assert_ne!(dht_sig2, dht_sig_b); - - // Try using the wrong secret to sign - let a1 = key::sign(&dht_key, &dht_key_secret, LOREM_IPSUM.as_bytes()).unwrap(); - let a2 = key::sign(&dht_key2, &dht_key_secret2, LOREM_IPSUM.as_bytes()).unwrap(); - let b1 = key::sign(&dht_key, &dht_key_secret2, LOREM_IPSUM.as_bytes()).unwrap(); - let b2 = key::sign(&dht_key2, &dht_key_secret, LOREM_IPSUM.as_bytes()).unwrap(); - assert_ne!(a1, b1); - assert_ne!(a2, b2); - assert_ne!(a1, b2); - assert_ne!(a2, b1); - assert_ne!(a1, a2); - assert_ne!(b1, b2); - assert_ne!(a1, b2); - assert_ne!(b1, a2); - - assert_eq!(key::verify(&dht_key, LOREM_IPSUM.as_bytes(), &a1), Ok(())); - assert_eq!(key::verify(&dht_key2, LOREM_IPSUM.as_bytes(), &a2), Ok(())); - assert!(key::verify(&dht_key, LOREM_IPSUM.as_bytes(), &b1).is_err()); - assert!(key::verify(&dht_key2, LOREM_IPSUM.as_bytes(), &b2).is_err()); - - // Try verifications that should work - assert_eq!( - key::verify(&dht_key, LOREM_IPSUM.as_bytes(), &dht_sig), - Ok(()) - ); - assert_eq!( - key::verify(&dht_key, LOREM_IPSUM.as_bytes(), &dht_sig_b), - Ok(()) - ); - assert_eq!( - key::verify(&dht_key2, LOREM_IPSUM.as_bytes(), &dht_sig2), - Ok(()) - ); - assert_eq!( - key::verify(&dht_key, CHEEZBURGER.as_bytes(), &dht_sig_c), - Ok(()) - ); - // Try verifications that shouldn't work - assert!(key::verify(&dht_key2, LOREM_IPSUM.as_bytes(), &dht_sig).is_err()); - assert!(key::verify(&dht_key, LOREM_IPSUM.as_bytes(), &dht_sig2).is_err()); - assert!(key::verify(&dht_key2, CHEEZBURGER.as_bytes(), &dht_sig_c).is_err()); - assert!(key::verify(&dht_key, CHEEZBURGER.as_bytes(), &dht_sig).is_err()); -} - -pub async fn test_key_conversions() { - // Test default key - let (dht_key, dht_key_secret) = (key::DHTKey::default(), key::DHTKeySecret::default()); - assert_eq!(dht_key.bytes, EMPTY_KEY); - assert_eq!(dht_key_secret.bytes, EMPTY_KEY_SECRET); - let dht_key_string = String::from(&dht_key); - trace!("dht_key_string: {:?}", dht_key_string); - let dht_key_string2 = String::from(&dht_key); - trace!("dht_key_string2: {:?}", dht_key_string2); - assert_eq!(dht_key_string, dht_key_string2); - - let dht_key_secret_string = String::from(&dht_key_secret); - trace!("dht_key_secret_string: {:?}", dht_key_secret_string); - assert_eq!(dht_key_secret_string, dht_key_string); - - // Make different keys - let (dht_key2, dht_key_secret2) = key::generate_secret(); - trace!("dht_key2: {:?}", dht_key2); - trace!("dht_key_secret2: {:?}", dht_key_secret2); - let (dht_key3, _dht_key_secret3) = key::generate_secret(); - trace!("dht_key3: {:?}", dht_key3); - trace!("_dht_key_secret3: {:?}", _dht_key_secret3); - - let dht_key2_string = String::from(&dht_key2); - let dht_key2_string2 = String::from(&dht_key2); - let dht_key3_string = String::from(&dht_key3); - assert_eq!(dht_key2_string, dht_key2_string2); - assert_ne!(dht_key3_string, dht_key2_string); - let dht_key_secret2_string = String::from(&dht_key_secret2); - assert_ne!(dht_key_secret2_string, dht_key_secret_string); - assert_ne!(dht_key_secret2_string, dht_key2_string); - - // Assert they convert back correctly - let dht_key_back = key::DHTKey::try_from(dht_key_string.as_str()).unwrap(); - let dht_key_back2 = key::DHTKey::try_from(dht_key_string2.as_str()).unwrap(); - assert_eq!(dht_key_back, dht_key_back2); - assert_eq!(dht_key_back, dht_key); - assert_eq!(dht_key_back2, dht_key); - - let dht_key_secret_back = key::DHTKeySecret::try_from(dht_key_secret_string.as_str()).unwrap(); - assert_eq!(dht_key_secret_back, dht_key_secret); - - let dht_key2_back = key::DHTKey::try_from(dht_key2_string.as_str()).unwrap(); - let dht_key2_back2 = key::DHTKey::try_from(dht_key2_string2.as_str()).unwrap(); - assert_eq!(dht_key2_back, dht_key2_back2); - assert_eq!(dht_key2_back, dht_key2); - assert_eq!(dht_key2_back2, dht_key2); - - let dht_key_secret2_back = - key::DHTKeySecret::try_from(dht_key_secret2_string.as_str()).unwrap(); - assert_eq!(dht_key_secret2_back, dht_key_secret2); - - // Assert string roundtrip - assert_eq!(String::from(&dht_key2_back), dht_key2_string); - // These conversions should fail - assert!(key::DHTKey::try_from("whatever").is_err()); - assert!(key::DHTKeySecret::try_from("whatever").is_err()); - assert!(key::DHTKey::try_from("").is_err()); - assert!(key::DHTKeySecret::try_from("").is_err()); - assert!(key::DHTKey::try_from(" ").is_err()); - assert!(key::DHTKeySecret::try_from(" ").is_err()); - assert!(key::DHTKey::try_from( - "qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq" - ) - .is_err()); - assert!(key::DHTKeySecret::try_from( - "qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq" - ) - .is_err()); -} - -pub async fn test_encode_decode() { - let dht_key = key::DHTKey::try_decode("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA").unwrap(); - let dht_key_secret = - key::DHTKeySecret::try_decode("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA").unwrap(); - let dht_key_b = key::DHTKey::new(EMPTY_KEY); - let dht_key_secret_b = key::DHTKeySecret::new(EMPTY_KEY_SECRET); - assert_eq!(dht_key, dht_key_b); - assert_eq!(dht_key_secret, dht_key_secret_b); - - let (dht_key2, dht_key_secret2) = key::generate_secret(); - - let e1 = dht_key.encode(); - trace!("e1: {:?}", e1); - assert_eq!(e1, "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA".to_owned()); - let e1s = dht_key_secret.encode(); - trace!("e1s: {:?}", e1s); - let e2 = dht_key2.encode(); - trace!("e2: {:?}", e2); - let e2s = dht_key_secret2.encode(); - trace!("e2s: {:?}", e2s); - - let d1 = key::DHTKey::try_decode(e1.as_str()).unwrap(); - trace!("d1: {:?}", d1); - assert_eq!(dht_key, d1); - - let d1s = key::DHTKeySecret::try_decode(e1s.as_str()).unwrap(); - trace!("d1s: {:?}", d1s); - assert_eq!(dht_key_secret, d1s); - - let d2 = key::DHTKey::try_decode(e2.as_str()).unwrap(); - trace!("d2: {:?}", d2); - assert_eq!(dht_key2, d2); - - let d2s = key::DHTKeySecret::try_decode(e2s.as_str()).unwrap(); - trace!("d2s: {:?}", d2s); - assert_eq!(dht_key_secret2, d2s); - - // Failures - let f1 = key::DHTKeySecret::try_decode("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"); - assert!(f1.is_err()); - let f2 = key::DHTKeySecret::try_decode("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA&"); - assert!(f2.is_err()); -} - -async fn test_hash() { - let mut s = BTreeSet::::new(); - - let k1 = key::generate_hash("abc".as_bytes()); - let k2 = key::generate_hash("abcd".as_bytes()); - let k3 = key::generate_hash("".as_bytes()); - let k4 = key::generate_hash(" ".as_bytes()); - let k5 = key::generate_hash(LOREM_IPSUM.as_bytes()); - let k6 = key::generate_hash(CHEEZBURGER.as_bytes()); - - s.insert(k1); - s.insert(k2); - s.insert(k3); - s.insert(k4); - s.insert(k5); - s.insert(k6); - assert_eq!(s.len(), 6); - - let v1 = key::generate_hash("abc".as_bytes()); - let v2 = key::generate_hash("abcd".as_bytes()); - let v3 = key::generate_hash("".as_bytes()); - let v4 = key::generate_hash(" ".as_bytes()); - let v5 = key::generate_hash(LOREM_IPSUM.as_bytes()); - let v6 = key::generate_hash(CHEEZBURGER.as_bytes()); - - assert_eq!(k1, v1); - assert_eq!(k2, v2); - assert_eq!(k3, v3); - assert_eq!(k4, v4); - assert_eq!(k5, v5); - assert_eq!(k6, v6); - - key::validate_hash("abc".as_bytes(), &v1); - key::validate_hash("abcd".as_bytes(), &v2); - key::validate_hash("".as_bytes(), &v3); - key::validate_hash(" ".as_bytes(), &v4); - key::validate_hash(LOREM_IPSUM.as_bytes(), &v5); - key::validate_hash(CHEEZBURGER.as_bytes(), &v6); -} - -async fn test_operations() { - let k1 = key::generate_hash(LOREM_IPSUM.as_bytes()); - let k2 = key::generate_hash(CHEEZBURGER.as_bytes()); - let k3 = key::generate_hash("abc".as_bytes()); - - // Get distance - let d1 = key::distance(&k1, &k2); - let d2 = key::distance(&k2, &k1); - let d3 = key::distance(&k1, &k3); - let d4 = key::distance(&k2, &k3); - - trace!("d1={:?}", d1); - trace!("d2={:?}", d2); - trace!("d3={:?}", d3); - trace!("d4={:?}", d4); - - // Verify commutativity - assert_eq!(d1, d2); - assert!(d1 <= d2); - assert!(d1 >= d2); - assert!(d1 >= d2); - assert!(d1 <= d2); - assert_eq!(d2, d1); - assert!(d2 <= d1); - assert!(d2 >= d1); - assert!(d2 >= d1); - assert!(d2 <= d1); - - // Verify nibbles - assert_eq!(d1.nibble(0), 0x9u8); - assert_eq!(d1.nibble(1), 0x4u8); - assert_eq!(d1.nibble(2), 0x3u8); - assert_eq!(d1.nibble(3), 0x6u8); - assert_eq!(d1.nibble(63), 0x6u8); - - assert_eq!(d1.first_nonzero_nibble(), Some((0, 0x9u8))); - assert_eq!(d2.first_nonzero_nibble(), Some((0, 0x9u8))); - assert_eq!(d3.first_nonzero_nibble(), Some((1, 0x4u8))); - assert_eq!(d4.first_nonzero_nibble(), Some((0, 0x9u8))); - - // Verify bits - assert_eq!(d1.bit(0), true); - assert_eq!(d1.bit(1), false); - assert_eq!(d1.bit(7), false); - assert_eq!(d1.bit(8), false); - assert_eq!(d1.bit(14), true); - assert_eq!(d1.bit(15), false); - assert_eq!(d1.bit(254), true); - assert_eq!(d1.bit(255), false); - - assert_eq!(d1.first_nonzero_bit(), Some(0)); - assert_eq!(d2.first_nonzero_bit(), Some(0)); - assert_eq!(d3.first_nonzero_bit(), Some(5)); - assert_eq!(d4.first_nonzero_bit(), Some(0)); -} - -pub async fn test_all() { - test_generate_secret().await; - test_sign_and_verify().await; - test_key_conversions().await; - test_encode_decode().await; - test_hash().await; - test_operations().await; -} diff --git a/veilid-core/src/crypto/tests/test_envelope_receipt.rs b/veilid-core/src/crypto/tests/test_envelope_receipt.rs index a5b2e45e..1e2698fc 100644 --- a/veilid-core/src/crypto/tests/test_envelope_receipt.rs +++ b/veilid-core/src/crypto/tests/test_envelope_receipt.rs @@ -1,37 +1,39 @@ use super::*; -use crate::tests::common::test_veilid_config::*; -pub async fn test_envelope_round_trip() { +pub async fn test_envelope_round_trip( + envelope_version: EnvelopeVersion, + vcrypto: CryptoSystemVersion, +) { info!("--- test envelope round trip ---"); - let (update_callback, config_callback) = setup_veilid_core(); - let api = api_startup(update_callback, config_callback) - .await - .expect("startup failed"); - - // Get crypto - let crypto = api.crypto().unwrap(); // Create envelope let ts = Timestamp::from(0x12345678ABCDEF69u64); - let nonce = Crypto::get_random_nonce(); - let (sender_id, sender_secret) = generate_secret(); - let (recipient_id, recipient_secret) = generate_secret(); - let envelope = Envelope::new(0, ts, nonce, sender_id, recipient_id); + let nonce = vcrypto.random_nonce(); + let (sender_id, sender_secret) = vcrypto.generate_keypair().into_split(); + let (recipient_id, recipient_secret) = vcrypto.generate_keypair().into_split(); + let envelope = Envelope::new( + envelope_version, + vcrypto.kind(), + ts, + nonce, + sender_id, + recipient_id, + ); // Create arbitrary body let body = b"This is an arbitrary body"; // Serialize to bytes let enc_data = envelope - .to_encrypted_data(crypto.clone(), body, &sender_secret) + .to_encrypted_data(vcrypto.crypto(), body, &sender_secret) .expect("failed to encrypt data"); // Deserialize from bytes - let envelope2 = - Envelope::from_signed_data(&enc_data).expect("failed to deserialize envelope from data"); + let envelope2 = Envelope::from_signed_data(vcrypto.crypto(), &enc_data) + .expect("failed to deserialize envelope from data"); let body2 = envelope2 - .decrypt_body(crypto.clone(), &enc_data, &recipient_secret) + .decrypt_body(vcrypto.crypto(), &enc_data, &recipient_secret) .expect("failed to decrypt envelope body"); // Compare envelope and body @@ -43,41 +45,43 @@ pub async fn test_envelope_round_trip() { let mut mod_enc_data = enc_data.clone(); mod_enc_data[enc_data_len - 1] ^= 0x80u8; assert!( - Envelope::from_signed_data(&mod_enc_data).is_err(), + Envelope::from_signed_data(vcrypto.crypto(), &mod_enc_data).is_err(), "should have failed to decode envelope with modified signature" ); let mut mod_enc_data2 = enc_data.clone(); mod_enc_data2[enc_data_len - 65] ^= 0x80u8; assert!( - Envelope::from_signed_data(&mod_enc_data2).is_err(), + Envelope::from_signed_data(vcrypto.crypto(), &mod_enc_data2).is_err(), "should have failed to decode envelope with modified data" ); - - api.shutdown().await; } -pub async fn test_receipt_round_trip() { +pub async fn test_receipt_round_trip( + envelope_version: EnvelopeVersion, + vcrypto: CryptoSystemVersion, +) { info!("--- test receipt round trip ---"); // Create arbitrary body let body = b"This is an arbitrary body"; // Create receipt - let nonce = Crypto::get_random_nonce(); - let (sender_id, sender_secret) = generate_secret(); - let receipt = Receipt::try_new(0, nonce, sender_id, body).expect("should not fail"); + let nonce = vcrypto.random_nonce(); + let (sender_id, sender_secret) = vcrypto.generate_keypair().into_split(); + let receipt = Receipt::try_new(envelope_version, vcrypto.kind(), nonce, sender_id, body) + .expect("should not fail"); // Serialize to bytes let mut enc_data = receipt - .to_signed_data(&sender_secret) + .to_signed_data(vcrypto.crypto(), &sender_secret) .expect("failed to make signed data"); // Deserialize from bytes - let receipt2 = - Receipt::from_signed_data(&enc_data).expect("failed to deserialize envelope from data"); + let receipt2 = Receipt::from_signed_data(vcrypto.crypto(), &enc_data) + .expect("failed to deserialize envelope from data"); // Should not validate even when a single bit is changed enc_data[5] = 0x01; - Receipt::from_signed_data(&enc_data) + Receipt::from_signed_data(vcrypto.crypto(), &enc_data) .expect_err("should have failed to decrypt using wrong secret"); // Compare receipts @@ -85,6 +89,19 @@ pub async fn test_receipt_round_trip() { } pub async fn test_all() { - test_envelope_round_trip().await; - test_receipt_round_trip().await; + let api = crypto_tests_startup().await; + let crypto = api.crypto().unwrap(); + + // Test versions + for ev in VALID_ENVELOPE_VERSIONS { + for v in VALID_CRYPTO_KINDS { + let vcrypto = crypto.get(v).unwrap(); + + test_envelope_round_trip(ev, vcrypto.clone()).await; + test_receipt_round_trip(ev, vcrypto).await; + } + } + + crypto_tests_shutdown(api.clone()).await; + assert!(api.is_shutdown()); } diff --git a/veilid-core/src/crypto/tests/test_types.rs b/veilid-core/src/crypto/tests/test_types.rs new file mode 100644 index 00000000..739d0e5f --- /dev/null +++ b/veilid-core/src/crypto/tests/test_types.rs @@ -0,0 +1,348 @@ +#![allow(clippy::bool_assert_comparison)] + +use super::*; +use core::convert::TryFrom; + +static LOREM_IPSUM:&str = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. "; +static CHEEZBURGER: &str = "I can has cheezburger"; +static EMPTY_KEY: [u8; PUBLIC_KEY_LENGTH] = [0u8; PUBLIC_KEY_LENGTH]; +static EMPTY_KEY_SECRET: [u8; SECRET_KEY_LENGTH] = [0u8; SECRET_KEY_LENGTH]; + +pub async fn test_generate_secret(vcrypto: CryptoSystemVersion) { + // Verify keys generate + let (dht_key, dht_key_secret) = vcrypto.generate_keypair().into_split(); + let (dht_key2, dht_key_secret2) = vcrypto.generate_keypair().into_split(); + + // Verify byte patterns are different between public and secret + assert_ne!(dht_key.bytes, dht_key_secret.bytes); + assert_ne!(dht_key2.bytes, dht_key_secret2.bytes); + + // Verify the keys and secrets are different across keypairs + assert_ne!(dht_key, dht_key2); + assert_ne!(dht_key_secret, dht_key_secret2); +} + +pub async fn test_sign_and_verify(vcrypto: CryptoSystemVersion) { + // Make two keys + let (dht_key, dht_key_secret) = vcrypto.generate_keypair().into_split(); + let (dht_key2, dht_key_secret2) = vcrypto.generate_keypair().into_split(); + // Sign the same message twice + let dht_sig = vcrypto + .sign(&dht_key, &dht_key_secret, LOREM_IPSUM.as_bytes()) + .unwrap(); + trace!("dht_sig: {:?}", dht_sig); + let dht_sig_b = vcrypto + .sign(&dht_key, &dht_key_secret, LOREM_IPSUM.as_bytes()) + .unwrap(); + // Sign a second message + let dht_sig_c = vcrypto + .sign(&dht_key, &dht_key_secret, CHEEZBURGER.as_bytes()) + .unwrap(); + trace!("dht_sig_c: {:?}", dht_sig_c); + // Verify they are the same signature + assert_eq!(dht_sig, dht_sig_b); + // Sign the same message with a different key + let dht_sig2 = vcrypto + .sign(&dht_key2, &dht_key_secret2, LOREM_IPSUM.as_bytes()) + .unwrap(); + // Verify a different key gives a different signature + assert_ne!(dht_sig2, dht_sig_b); + + // Try using the wrong secret to sign + let a1 = vcrypto + .sign(&dht_key, &dht_key_secret, LOREM_IPSUM.as_bytes()) + .unwrap(); + let a2 = vcrypto + .sign(&dht_key2, &dht_key_secret2, LOREM_IPSUM.as_bytes()) + .unwrap(); + let b1 = vcrypto + .sign(&dht_key, &dht_key_secret2, LOREM_IPSUM.as_bytes()) + .unwrap(); + let b2 = vcrypto + .sign(&dht_key2, &dht_key_secret, LOREM_IPSUM.as_bytes()) + .unwrap(); + assert_ne!(a1, b1); + assert_ne!(a2, b2); + assert_ne!(a1, b2); + assert_ne!(a2, b1); + assert_ne!(a1, a2); + assert_ne!(b1, b2); + assert_ne!(a1, b2); + assert_ne!(b1, a2); + + assert_eq!( + vcrypto.verify(&dht_key, LOREM_IPSUM.as_bytes(), &a1), + Ok(()) + ); + assert_eq!( + vcrypto.verify(&dht_key2, LOREM_IPSUM.as_bytes(), &a2), + Ok(()) + ); + assert!(vcrypto + .verify(&dht_key, LOREM_IPSUM.as_bytes(), &b1) + .is_err()); + assert!(vcrypto + .verify(&dht_key2, LOREM_IPSUM.as_bytes(), &b2) + .is_err()); + + // Try verifications that should work + assert_eq!( + vcrypto.verify(&dht_key, LOREM_IPSUM.as_bytes(), &dht_sig), + Ok(()) + ); + assert_eq!( + vcrypto.verify(&dht_key, LOREM_IPSUM.as_bytes(), &dht_sig_b), + Ok(()) + ); + assert_eq!( + vcrypto.verify(&dht_key2, LOREM_IPSUM.as_bytes(), &dht_sig2), + Ok(()) + ); + assert_eq!( + vcrypto.verify(&dht_key, CHEEZBURGER.as_bytes(), &dht_sig_c), + Ok(()) + ); + // Try verifications that shouldn't work + assert!(vcrypto + .verify(&dht_key2, LOREM_IPSUM.as_bytes(), &dht_sig) + .is_err()); + assert!(vcrypto + .verify(&dht_key, LOREM_IPSUM.as_bytes(), &dht_sig2) + .is_err()); + assert!(vcrypto + .verify(&dht_key2, CHEEZBURGER.as_bytes(), &dht_sig_c) + .is_err()); + assert!(vcrypto + .verify(&dht_key, CHEEZBURGER.as_bytes(), &dht_sig) + .is_err()); +} + +pub async fn test_key_conversions(vcrypto: CryptoSystemVersion) { + // Test default key + let (dht_key, dht_key_secret) = (PublicKey::default(), SecretKey::default()); + assert_eq!(dht_key.bytes, EMPTY_KEY); + assert_eq!(dht_key_secret.bytes, EMPTY_KEY_SECRET); + let dht_key_string = String::from(&dht_key); + trace!("dht_key_string: {:?}", dht_key_string); + let dht_key_string2 = String::from(&dht_key); + trace!("dht_key_string2: {:?}", dht_key_string2); + assert_eq!(dht_key_string, dht_key_string2); + + let dht_key_secret_string = String::from(&dht_key_secret); + trace!("dht_key_secret_string: {:?}", dht_key_secret_string); + assert_eq!(dht_key_secret_string, dht_key_string); + + // Make different keys + let (dht_key2, dht_key_secret2) = vcrypto.generate_keypair().into_split(); + trace!("dht_key2: {:?}", dht_key2); + trace!("dht_key_secret2: {:?}", dht_key_secret2); + let (dht_key3, _dht_key_secret3) = vcrypto.generate_keypair().into_split(); + trace!("dht_key3: {:?}", dht_key3); + trace!("_dht_key_secret3: {:?}", _dht_key_secret3); + + let dht_key2_string = String::from(&dht_key2); + let dht_key2_string2 = String::from(&dht_key2); + let dht_key3_string = String::from(&dht_key3); + assert_eq!(dht_key2_string, dht_key2_string2); + assert_ne!(dht_key3_string, dht_key2_string); + let dht_key_secret2_string = String::from(&dht_key_secret2); + assert_ne!(dht_key_secret2_string, dht_key_secret_string); + assert_ne!(dht_key_secret2_string, dht_key2_string); + + // Assert they convert back correctly + let dht_key_back = PublicKey::try_from(dht_key_string.as_str()).unwrap(); + let dht_key_back2 = PublicKey::try_from(dht_key_string2.as_str()).unwrap(); + assert_eq!(dht_key_back, dht_key_back2); + assert_eq!(dht_key_back, dht_key); + assert_eq!(dht_key_back2, dht_key); + + let dht_key_secret_back = SecretKey::try_from(dht_key_secret_string.as_str()).unwrap(); + assert_eq!(dht_key_secret_back, dht_key_secret); + + let dht_key2_back = PublicKey::try_from(dht_key2_string.as_str()).unwrap(); + let dht_key2_back2 = PublicKey::try_from(dht_key2_string2.as_str()).unwrap(); + assert_eq!(dht_key2_back, dht_key2_back2); + assert_eq!(dht_key2_back, dht_key2); + assert_eq!(dht_key2_back2, dht_key2); + + let dht_key_secret2_back = SecretKey::try_from(dht_key_secret2_string.as_str()).unwrap(); + assert_eq!(dht_key_secret2_back, dht_key_secret2); + + // Assert string roundtrip + assert_eq!(String::from(&dht_key2_back), dht_key2_string); + // These conversions should fail + assert!(PublicKey::try_from("whatever").is_err()); + assert!(SecretKey::try_from("whatever").is_err()); + assert!(PublicKey::try_from("").is_err()); + assert!(SecretKey::try_from("").is_err()); + assert!(PublicKey::try_from(" ").is_err()); + assert!(SecretKey::try_from(" ").is_err()); + assert!(PublicKey::try_from( + "qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq" + ) + .is_err()); + assert!(SecretKey::try_from( + "qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq" + ) + .is_err()); +} + +pub async fn test_encode_decode(vcrypto: CryptoSystemVersion) { + let dht_key = PublicKey::try_decode("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA").unwrap(); + let dht_key_secret = + SecretKey::try_decode("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA").unwrap(); + let dht_key_b = PublicKey::new(EMPTY_KEY); + let dht_key_secret_b = SecretKey::new(EMPTY_KEY_SECRET); + assert_eq!(dht_key, dht_key_b); + assert_eq!(dht_key_secret, dht_key_secret_b); + + let (dht_key2, dht_key_secret2) = vcrypto.generate_keypair().into_split(); + + let e1 = dht_key.encode(); + trace!("e1: {:?}", e1); + assert_eq!(e1, "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA".to_owned()); + let e1s = dht_key_secret.encode(); + trace!("e1s: {:?}", e1s); + let e2 = dht_key2.encode(); + trace!("e2: {:?}", e2); + let e2s = dht_key_secret2.encode(); + trace!("e2s: {:?}", e2s); + + let d1 = PublicKey::try_decode(e1.as_str()).unwrap(); + trace!("d1: {:?}", d1); + assert_eq!(dht_key, d1); + + let d1s = SecretKey::try_decode(e1s.as_str()).unwrap(); + trace!("d1s: {:?}", d1s); + assert_eq!(dht_key_secret, d1s); + + let d2 = PublicKey::try_decode(e2.as_str()).unwrap(); + trace!("d2: {:?}", d2); + assert_eq!(dht_key2, d2); + + let d2s = SecretKey::try_decode(e2s.as_str()).unwrap(); + trace!("d2s: {:?}", d2s); + assert_eq!(dht_key_secret2, d2s); + + // Failures + let f1 = SecretKey::try_decode("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"); + assert!(f1.is_err()); + let f2 = SecretKey::try_decode("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA&"); + assert!(f2.is_err()); +} + +async fn test_hash(vcrypto: CryptoSystemVersion) { + let mut s = BTreeSet::::new(); + + let k1 = vcrypto.generate_hash("abc".as_bytes()); + let k2 = vcrypto.generate_hash("abcd".as_bytes()); + let k3 = vcrypto.generate_hash("".as_bytes()); + let k4 = vcrypto.generate_hash(" ".as_bytes()); + let k5 = vcrypto.generate_hash(LOREM_IPSUM.as_bytes()); + let k6 = vcrypto.generate_hash(CHEEZBURGER.as_bytes()); + + s.insert(k1); + s.insert(k2); + s.insert(k3); + s.insert(k4); + s.insert(k5); + s.insert(k6); + assert_eq!(s.len(), 6); + + let v1 = vcrypto.generate_hash("abc".as_bytes()); + let v2 = vcrypto.generate_hash("abcd".as_bytes()); + let v3 = vcrypto.generate_hash("".as_bytes()); + let v4 = vcrypto.generate_hash(" ".as_bytes()); + let v5 = vcrypto.generate_hash(LOREM_IPSUM.as_bytes()); + let v6 = vcrypto.generate_hash(CHEEZBURGER.as_bytes()); + + assert_eq!(k1, v1); + assert_eq!(k2, v2); + assert_eq!(k3, v3); + assert_eq!(k4, v4); + assert_eq!(k5, v5); + assert_eq!(k6, v6); + + vcrypto.validate_hash("abc".as_bytes(), &v1); + vcrypto.validate_hash("abcd".as_bytes(), &v2); + vcrypto.validate_hash("".as_bytes(), &v3); + vcrypto.validate_hash(" ".as_bytes(), &v4); + vcrypto.validate_hash(LOREM_IPSUM.as_bytes(), &v5); + vcrypto.validate_hash(CHEEZBURGER.as_bytes(), &v6); +} + +async fn test_operations(vcrypto: CryptoSystemVersion) { + let k1 = vcrypto.generate_hash(LOREM_IPSUM.as_bytes()); + let k2 = vcrypto.generate_hash(CHEEZBURGER.as_bytes()); + let k3 = vcrypto.generate_hash("abc".as_bytes()); + + // Get distance + let d1 = vcrypto.distance(&k1, &k2); + let d2 = vcrypto.distance(&k2, &k1); + let d3 = vcrypto.distance(&k1, &k3); + let d4 = vcrypto.distance(&k2, &k3); + + trace!("d1={:?}", d1); + trace!("d2={:?}", d2); + trace!("d3={:?}", d3); + trace!("d4={:?}", d4); + + // Verify commutativity + assert_eq!(d1, d2); + assert!(d1 <= d2); + assert!(d1 >= d2); + assert!(d1 >= d2); + assert!(d1 <= d2); + assert_eq!(d2, d1); + assert!(d2 <= d1); + assert!(d2 >= d1); + assert!(d2 >= d1); + assert!(d2 <= d1); + + // Verify nibbles + assert_eq!(d1.nibble(0), 0x9u8); + assert_eq!(d1.nibble(1), 0x4u8); + assert_eq!(d1.nibble(2), 0x3u8); + assert_eq!(d1.nibble(3), 0x6u8); + assert_eq!(d1.nibble(63), 0x6u8); + + assert_eq!(d1.first_nonzero_nibble(), Some((0, 0x9u8))); + assert_eq!(d2.first_nonzero_nibble(), Some((0, 0x9u8))); + assert_eq!(d3.first_nonzero_nibble(), Some((1, 0x4u8))); + assert_eq!(d4.first_nonzero_nibble(), Some((0, 0x9u8))); + + // Verify bits + assert_eq!(d1.bit(0), true); + assert_eq!(d1.bit(1), false); + assert_eq!(d1.bit(7), false); + assert_eq!(d1.bit(8), false); + assert_eq!(d1.bit(14), true); + assert_eq!(d1.bit(15), false); + assert_eq!(d1.bit(254), true); + assert_eq!(d1.bit(255), false); + + assert_eq!(d1.first_nonzero_bit(), Some(0)); + assert_eq!(d2.first_nonzero_bit(), Some(0)); + assert_eq!(d3.first_nonzero_bit(), Some(5)); + assert_eq!(d4.first_nonzero_bit(), Some(0)); +} + +pub async fn test_all() { + let api = crypto_tests_startup().await; + let crypto = api.crypto().unwrap(); + + // Test versions + for v in VALID_CRYPTO_KINDS { + let vcrypto = crypto.get(v).unwrap(); + + test_generate_secret(vcrypto.clone()).await; + test_sign_and_verify(vcrypto.clone()).await; + test_key_conversions(vcrypto.clone()).await; + test_encode_decode(vcrypto.clone()).await; + test_hash(vcrypto.clone()).await; + test_operations(vcrypto).await; + } + + crypto_tests_shutdown(api.clone()).await; + assert!(api.is_shutdown()); +} diff --git a/veilid-core/src/crypto/types/crypto_typed.rs b/veilid-core/src/crypto/types/crypto_typed.rs new file mode 100644 index 00000000..b6575769 --- /dev/null +++ b/veilid-core/src/crypto/types/crypto_typed.rs @@ -0,0 +1,184 @@ +use super::*; + +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, RkyvArchive, RkyvSerialize, RkyvDeserialize)] +#[archive_attr(repr(C), derive(CheckBytes, Hash, PartialEq, Eq))] +pub struct CryptoTyped +where + K: Clone + + Copy + + fmt::Debug + + fmt::Display + + FromStr + + PartialEq + + Eq + + Ord + + PartialOrd + + Hash + + RkyvArchive + + Encodable, + ::Archived: Hash + PartialEq + Eq, +{ + pub kind: CryptoKind, + pub value: K, +} + +impl CryptoTyped +where + K: Clone + + Copy + + fmt::Debug + + fmt::Display + + FromStr + + PartialEq + + Eq + + Ord + + PartialOrd + + Hash + + RkyvArchive + + Encodable, + ::Archived: Hash + PartialEq + Eq, +{ + pub fn new(kind: CryptoKind, value: K) -> Self { + Self { kind, value } + } +} +impl PartialOrd for CryptoTyped +where + K: Clone + + Copy + + fmt::Debug + + fmt::Display + + FromStr + + PartialEq + + Eq + + Ord + + PartialOrd + + Hash + + RkyvArchive + + Encodable, + ::Archived: Hash + PartialEq + Eq, +{ + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for CryptoTyped +where + K: Clone + + Copy + + fmt::Debug + + fmt::Display + + FromStr + + PartialEq + + Eq + + Ord + + PartialOrd + + Hash + + RkyvArchive + + Encodable, + ::Archived: Hash + PartialEq + Eq, +{ + fn cmp(&self, other: &Self) -> cmp::Ordering { + let x = compare_crypto_kind(&self.kind, &other.kind); + if x != cmp::Ordering::Equal { + return x; + } + self.value.cmp(&other.value) + } +} + +impl fmt::Display for CryptoTyped +where + K: Clone + + Copy + + fmt::Debug + + fmt::Display + + FromStr + + PartialEq + + Eq + + Ord + + PartialOrd + + Hash + + RkyvArchive + + Encodable, + ::Archived: Hash + PartialEq + Eq, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + write!(f, "{}:{}", self.kind, self.value) + } +} +impl FromStr for CryptoTyped +where + K: Clone + + Copy + + fmt::Debug + + fmt::Display + + FromStr + + PartialEq + + Eq + + Ord + + PartialOrd + + Hash + + RkyvArchive + + Encodable, + ::Archived: Hash + PartialEq + Eq, +{ + type Err = VeilidAPIError; + fn from_str(s: &str) -> Result { + let b = s.as_bytes(); + if b.len() != (5 + K::encoded_len()) || b[4..5] != b":"[..] { + apibail_parse_error!("invalid typed key", s); + } + let kind: CryptoKind = b[0..4].try_into().expect("should not fail to convert"); + let value = K::try_decode_bytes(&b[5..])?; + Ok(Self { kind, value }) + } +} +impl<'de, K> Deserialize<'de> for CryptoTyped +where + K: Clone + + Copy + + fmt::Debug + + fmt::Display + + FromStr + + PartialEq + + Eq + + Ord + + PartialOrd + + Hash + + RkyvArchive + + Encodable, + ::Archived: Hash + PartialEq + Eq, +{ + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + let s = ::deserialize(deserializer)?; + FromStr::from_str(&s).map_err(serde::de::Error::custom) + } +} +impl Serialize for CryptoTyped +where + K: Clone + + Copy + + fmt::Debug + + fmt::Display + + FromStr + + PartialEq + + Eq + + Ord + + PartialOrd + + Hash + + RkyvArchive + + Encodable, + ::Archived: Hash + PartialEq + Eq, +{ + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serializer.collect_str(self) + } +} diff --git a/veilid-core/src/crypto/types/crypto_typed_set.rs b/veilid-core/src/crypto/types/crypto_typed_set.rs new file mode 100644 index 00000000..e1edbe01 --- /dev/null +++ b/veilid-core/src/crypto/types/crypto_typed_set.rs @@ -0,0 +1,303 @@ +use super::*; + +#[derive( + Clone, + Debug, + Serialize, + Deserialize, + PartialOrd, + Ord, + PartialEq, + Eq, + Hash, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, + Default, +)] +#[archive_attr(repr(C), derive(CheckBytes, Hash, PartialEq, Eq))] +#[serde(from = "Vec>", into = "Vec>")] +pub struct CryptoTypedSet +where + K: Clone + + Copy + + fmt::Debug + + fmt::Display + + FromStr + + PartialEq + + Eq + + PartialOrd + + Ord + + Hash + + RkyvArchive + + Encodable, + ::Archived: Hash + PartialEq + Eq, + > as RkyvArchive>::Archived: Hash + PartialEq + Eq, +{ + items: Vec>, +} + +impl CryptoTypedSet +where + K: Clone + + Copy + + fmt::Debug + + fmt::Display + + FromStr + + PartialEq + + Eq + + PartialOrd + + Ord + + Hash + + RkyvArchive + + Encodable, + ::Archived: Hash + PartialEq + Eq, +{ + pub fn new() -> Self { + Self { items: Vec::new() } + } + pub fn with_capacity(cap: usize) -> Self { + Self { + items: Vec::with_capacity(cap), + } + } + pub fn kinds(&self) -> Vec { + let mut out = Vec::new(); + for tk in &self.items { + out.push(tk.kind); + } + out.sort_by(compare_crypto_kind); + out + } + pub fn keys(&self) -> Vec { + let mut out = Vec::new(); + for tk in &self.items { + out.push(tk.value); + } + out + } + pub fn get(&self, kind: CryptoKind) -> Option> { + self.items.iter().find(|x| x.kind == kind).copied() + } + pub fn add(&mut self, typed_key: CryptoTyped) { + for x in &mut self.items { + if x.kind == typed_key.kind { + *x = typed_key; + return; + } + } + self.items.push(typed_key); + self.items.sort() + } + pub fn add_all(&mut self, typed_keys: &[CryptoTyped]) { + 'outer: for typed_key in typed_keys { + for x in &mut self.items { + if x.kind == typed_key.kind { + *x = *typed_key; + continue 'outer; + } + } + self.items.push(*typed_key); + } + self.items.sort() + } + pub fn remove(&mut self, kind: CryptoKind) { + if let Some(idx) = self.items.iter().position(|x| x.kind == kind) { + self.items.remove(idx); + } + } + pub fn remove_all(&mut self, kinds: &[CryptoKind]) { + for k in kinds { + self.remove(*k); + } + } + /// Return preferred typed key of our supported crypto kinds + pub fn best(&self) -> Option> { + match self.items.first().copied() { + None => None, + Some(k) => { + if !VALID_CRYPTO_KINDS.contains(&k.kind) { + None + } else { + Some(k) + } + } + } + } + pub fn len(&self) -> usize { + self.items.len() + } + pub fn iter(&self) -> core::slice::Iter<'_, CryptoTyped> { + self.items.iter() + } + pub fn contains(&self, typed_key: &CryptoTyped) -> bool { + self.items.contains(typed_key) + } + pub fn contains_any(&self, typed_keys: &[CryptoTyped]) -> bool { + for typed_key in typed_keys { + if self.items.contains(typed_key) { + return true; + } + } + false + } + pub fn contains_key(&self, key: &K) -> bool { + for tk in &self.items { + if tk.value == *key { + return true; + } + } + false + } +} + +impl core::ops::Deref for CryptoTypedSet +where + K: Clone + + Copy + + fmt::Debug + + fmt::Display + + FromStr + + PartialEq + + Eq + + PartialOrd + + Ord + + Hash + + RkyvArchive + + Encodable, + ::Archived: Hash + PartialEq + Eq, +{ + type Target = [CryptoTyped]; + + #[inline] + fn deref(&self) -> &[CryptoTyped] { + &self.items + } +} + +impl fmt::Display for CryptoTypedSet +where + K: Clone + + Copy + + fmt::Debug + + fmt::Display + + FromStr + + PartialEq + + Eq + + PartialOrd + + Ord + + Hash + + RkyvArchive + + Encodable, + ::Archived: Hash + PartialEq + Eq, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + write!(f, "[")?; + let mut first = true; + for x in &self.items { + if !first { + write!(f, ",")?; + first = false; + } + write!(f, "{}", x)?; + } + write!(f, "]") + } +} +impl FromStr for CryptoTypedSet +where + K: Clone + + Copy + + fmt::Debug + + fmt::Display + + FromStr + + PartialEq + + Eq + + PartialOrd + + Ord + + Hash + + RkyvArchive + + Encodable, + ::Archived: Hash + PartialEq + Eq, +{ + type Err = VeilidAPIError; + fn from_str(s: &str) -> Result { + let mut items = Vec::new(); + if s.len() < 2 { + apibail_parse_error!("invalid length", s); + } + if &s[0..1] != "[" || &s[(s.len() - 1)..] != "]" { + apibail_parse_error!("invalid format", s); + } + for x in s[1..s.len() - 1].split(",") { + let tk = CryptoTyped::::from_str(x.trim())?; + items.push(tk); + } + + Ok(Self { items }) + } +} +impl From> for CryptoTypedSet +where + K: Clone + + Copy + + fmt::Debug + + fmt::Display + + FromStr + + PartialEq + + Eq + + PartialOrd + + Ord + + Hash + + RkyvArchive + + Encodable, + ::Archived: Hash + PartialEq + Eq, +{ + fn from(x: CryptoTyped) -> Self { + let mut tks = CryptoTypedSet::::with_capacity(1); + tks.add(x); + tks + } +} +impl From>> for CryptoTypedSet +where + K: Clone + + Copy + + fmt::Debug + + fmt::Display + + FromStr + + PartialEq + + Eq + + PartialOrd + + Ord + + Hash + + RkyvArchive + + Encodable, + ::Archived: Hash + PartialEq + Eq, +{ + fn from(x: Vec>) -> Self { + let mut tks = CryptoTypedSet::::with_capacity(x.len()); + tks.add_all(&x); + tks + } +} +impl Into>> for CryptoTypedSet +where + K: Clone + + Copy + + fmt::Debug + + fmt::Display + + FromStr + + PartialEq + + Eq + + PartialOrd + + Ord + + Hash + + RkyvArchive + + Encodable, + ::Archived: Hash + PartialEq + Eq, +{ + fn into(self) -> Vec> { + self.items + } +} diff --git a/veilid-core/src/crypto/types/keypair.rs b/veilid-core/src/crypto/types/keypair.rs new file mode 100644 index 00000000..253f84ea --- /dev/null +++ b/veilid-core/src/crypto/types/keypair.rs @@ -0,0 +1,91 @@ +use super::*; + +#[derive( + Clone, + Copy, + Serialize, + Deserialize, + PartialOrd, + Ord, + PartialEq, + Eq, + Hash, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes, Hash, PartialEq, Eq))] +pub struct KeyPair { + pub key: PublicKey, + pub secret: SecretKey, +} + +impl KeyPair { + pub fn new(key: PublicKey, secret: SecretKey) -> Self { + Self { key, secret } + } + pub fn split(&self) -> (PublicKey, SecretKey) { + (self.key, self.secret) + } + pub fn into_split(self) -> (PublicKey, SecretKey) { + (self.key, self.secret) + } +} + +impl Encodable for KeyPair { + fn encode(&self) -> String { + format!("{}:{}", self.key.encode(), self.secret.encode()) + } + fn encoded_len() -> usize { + PublicKey::encoded_len() + 1 + SecretKey::encoded_len() + } + fn try_decode_bytes(b: &[u8]) -> Result { + if b.len() != Self::encoded_len() { + apibail_parse_error!("input has wrong encoded length", format!("len={}", b.len())); + } + let key = PublicKey::try_decode_bytes(&b[0..PublicKey::encoded_len()])?; + let secret = SecretKey::try_decode_bytes(&b[(PublicKey::encoded_len() + 1)..])?; + Ok(KeyPair { key, secret }) + } +} +impl fmt::Display for KeyPair { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.encode()) + } +} + +impl fmt::Debug for KeyPair { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, concat!(stringify!($name), "("))?; + write!(f, "{}", self.encode())?; + write!(f, ")") + } +} + +impl From<&KeyPair> for String { + fn from(value: &KeyPair) -> Self { + value.encode() + } +} + +impl FromStr for KeyPair { + type Err = VeilidAPIError; + + fn from_str(s: &str) -> Result { + KeyPair::try_from(s) + } +} + +impl TryFrom for KeyPair { + type Error = VeilidAPIError; + fn try_from(value: String) -> Result { + KeyPair::try_from(value.as_str()) + } +} + +impl TryFrom<&str> for KeyPair { + type Error = VeilidAPIError; + fn try_from(value: &str) -> Result { + Self::try_decode(value) + } +} diff --git a/veilid-core/src/crypto/types/mod.rs b/veilid-core/src/crypto/types/mod.rs new file mode 100644 index 00000000..355c34b0 --- /dev/null +++ b/veilid-core/src/crypto/types/mod.rs @@ -0,0 +1,59 @@ +use super::*; + +use core::cmp::{Eq, Ord, PartialEq, PartialOrd}; +use core::convert::TryInto; +use core::fmt; +use core::hash::Hash; + +use rkyv::{Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize}; + +/// Cryptography version fourcc code +pub type CryptoKind = FourCC; + +/// Sort best crypto kinds first +/// Better crypto kinds are 'less', ordered toward the front of a list +pub fn compare_crypto_kind(a: &CryptoKind, b: &CryptoKind) -> cmp::Ordering { + let a_idx = VALID_CRYPTO_KINDS.iter().position(|k| k == a); + let b_idx = VALID_CRYPTO_KINDS.iter().position(|k| k == b); + if let Some(a_idx) = a_idx { + if let Some(b_idx) = b_idx { + // Both are valid, prefer better crypto kind + a_idx.cmp(&b_idx) + } else { + // A is valid, B is not + cmp::Ordering::Less + } + } else if b_idx.is_some() { + // B is valid, A is not + cmp::Ordering::Greater + } else { + // Both are invalid, so use lex comparison + a.cmp(b) + } +} + +/// Intersection of crypto kind vectors +pub fn common_crypto_kinds(a: &[CryptoKind], b: &[CryptoKind]) -> Vec { + let mut out = Vec::new(); + for ack in a { + if b.contains(ack) { + out.push(*ack); + } + } + out +} + +mod crypto_typed; +mod crypto_typed_set; +mod keypair; + +pub use crypto_typed::*; +pub use crypto_typed_set::*; +pub use keypair::*; + +pub type TypedKey = CryptoTyped; +pub type TypedSecret = CryptoTyped; +pub type TypedKeyPair = CryptoTyped; +pub type TypedSignature = CryptoTyped; +pub type TypedKeySet = CryptoTypedSet; +pub type TypedSecretSet = CryptoTypedSet; diff --git a/veilid-core/src/crypto/vld0/blake3digest512.rs b/veilid-core/src/crypto/vld0/blake3digest512.rs new file mode 100644 index 00000000..1db950af --- /dev/null +++ b/veilid-core/src/crypto/vld0/blake3digest512.rs @@ -0,0 +1,70 @@ +use digest::generic_array::typenum::U64; +use digest::{Digest, Output}; +use generic_array::GenericArray; + +pub struct Blake3Digest512 { + dig: blake3::Hasher, +} + +impl Digest for Blake3Digest512 { + type OutputSize = U64; + + fn new() -> Self { + Self { + dig: blake3::Hasher::new(), + } + } + + fn update(&mut self, data: impl AsRef<[u8]>) { + self.dig.update(data.as_ref()); + } + + fn chain(mut self, data: impl AsRef<[u8]>) -> Self + where + Self: Sized, + { + self.update(data); + self + } + + fn finalize(self) -> Output { + let mut b = [0u8; 64]; + self.dig.finalize_xof().fill(&mut b); + let mut out = GenericArray::::default(); + for n in 0..64 { + out[n] = b[n]; + } + out + } + + fn finalize_reset(&mut self) -> Output { + let mut b = [0u8; 64]; + self.dig.finalize_xof().fill(&mut b); + let mut out = GenericArray::::default(); + for n in 0..64 { + out[n] = b[n]; + } + self.reset(); + out + } + + fn reset(&mut self) { + self.dig.reset(); + } + + fn output_size() -> usize { + 64 + } + + fn digest(data: &[u8]) -> Output { + let mut dig = blake3::Hasher::new(); + dig.update(data); + let mut b = [0u8; 64]; + dig.finalize_xof().fill(&mut b); + let mut out = GenericArray::::default(); + for n in 0..64 { + out[n] = b[n]; + } + out + } +} diff --git a/veilid-core/src/crypto/vld0/mod.rs b/veilid-core/src/crypto/vld0/mod.rs new file mode 100644 index 00000000..fd2b42a4 --- /dev/null +++ b/veilid-core/src/crypto/vld0/mod.rs @@ -0,0 +1,300 @@ +pub mod blake3digest512; +pub use blake3digest512::*; + +use super::*; + +use chacha20::cipher::{KeyIvInit, StreamCipher}; +use chacha20::XChaCha20; +use chacha20poly1305 as ch; +use chacha20poly1305::aead::{AeadInPlace, NewAead}; +use core::convert::TryInto; +use curve25519_dalek as cd; +use digest::Digest; +use ed25519_dalek as ed; +use x25519_dalek as xd; + +const AEAD_OVERHEAD: usize = 16; +pub const CRYPTO_KIND_VLD0: CryptoKind = FourCC([b'V', b'L', b'D', b'0']); + +fn ed25519_to_x25519_pk(key: &ed::PublicKey) -> Result { + let bytes = key.to_bytes(); + let compressed = cd::edwards::CompressedEdwardsY(bytes); + let point = compressed + .decompress() + .ok_or_else(|| VeilidAPIError::internal("ed25519_to_x25519_pk failed"))?; + let mp = point.to_montgomery(); + Ok(xd::PublicKey::from(mp.to_bytes())) +} +fn ed25519_to_x25519_sk(key: &ed::SecretKey) -> Result { + let exp = ed::ExpandedSecretKey::from(key); + let bytes: [u8; ed::EXPANDED_SECRET_KEY_LENGTH] = exp.to_bytes(); + let lowbytes: [u8; 32] = bytes[0..32].try_into().map_err(VeilidAPIError::internal)?; + Ok(xd::StaticSecret::from(lowbytes)) +} + +pub fn vld0_generate_keypair() -> KeyPair { + let mut csprng = VeilidRng {}; + let keypair = ed::Keypair::generate(&mut csprng); + let dht_key = PublicKey::new(keypair.public.to_bytes()); + let dht_key_secret = SecretKey::new(keypair.secret.to_bytes()); + + KeyPair::new(dht_key, dht_key_secret) +} + +/// V0 CryptoSystem +#[derive(Clone)] +pub struct CryptoSystemVLD0 { + crypto: Crypto, +} + +impl CryptoSystemVLD0 { + pub fn new(crypto: Crypto) -> Self { + Self { crypto } + } +} + +impl CryptoSystem for CryptoSystemVLD0 { + // Accessors + fn kind(&self) -> CryptoKind { + CRYPTO_KIND_VLD0 + } + + fn crypto(&self) -> Crypto { + self.crypto.clone() + } + + // Cached Operations + fn cached_dh( + &self, + key: &PublicKey, + secret: &SecretKey, + ) -> Result { + self.crypto + .cached_dh_internal::(self, key, secret) + } + + // Generation + fn random_nonce(&self) -> Nonce { + let mut nonce = [0u8; 24]; + random_bytes(&mut nonce).unwrap(); + Nonce::new(nonce) + } + fn random_shared_secret(&self) -> SharedSecret { + let mut s = [0u8; 32]; + random_bytes(&mut s).unwrap(); + SharedSecret::new(s) + } + fn compute_dh( + &self, + key: &PublicKey, + secret: &SecretKey, + ) -> Result { + let pk_ed = ed::PublicKey::from_bytes(&key.bytes).map_err(VeilidAPIError::internal)?; + let pk_xd = ed25519_to_x25519_pk(&pk_ed)?; + let sk_ed = ed::SecretKey::from_bytes(&secret.bytes).map_err(VeilidAPIError::internal)?; + let sk_xd = ed25519_to_x25519_sk(&sk_ed)?; + Ok(SharedSecret::new(sk_xd.diffie_hellman(&pk_xd).to_bytes())) + } + fn generate_keypair(&self) -> KeyPair { + vld0_generate_keypair() + } + fn generate_hash(&self, data: &[u8]) -> PublicKey { + PublicKey::new(*blake3::hash(data).as_bytes()) + } + fn generate_hash_reader( + &self, + reader: &mut dyn std::io::Read, + ) -> Result { + let mut hasher = blake3::Hasher::new(); + std::io::copy(reader, &mut hasher).map_err(VeilidAPIError::generic)?; + Ok(PublicKey::new(*hasher.finalize().as_bytes())) + } + + // Validation + fn validate_keypair(&self, dht_key: &PublicKey, dht_key_secret: &SecretKey) -> bool { + let data = vec![0u8; 512]; + let sig = match self.sign(dht_key, dht_key_secret, &data) { + Ok(s) => s, + Err(_) => { + return false; + } + }; + self.verify(dht_key, &data, &sig).is_ok() + } + fn validate_hash(&self, data: &[u8], dht_key: &PublicKey) -> bool { + let bytes = *blake3::hash(data).as_bytes(); + + bytes == dht_key.bytes + } + fn validate_hash_reader( + &self, + reader: &mut dyn std::io::Read, + dht_key: &PublicKey, + ) -> Result { + let mut hasher = blake3::Hasher::new(); + std::io::copy(reader, &mut hasher).map_err(VeilidAPIError::generic)?; + let bytes = *hasher.finalize().as_bytes(); + Ok(bytes == dht_key.bytes) + } + // Distance Metric + fn distance(&self, key1: &PublicKey, key2: &PublicKey) -> PublicKeyDistance { + let mut bytes = [0u8; PUBLIC_KEY_LENGTH]; + + for (n, byte) in bytes.iter_mut().enumerate() { + *byte = key1.bytes[n] ^ key2.bytes[n]; + } + + PublicKeyDistance::new(bytes) + } + + // Authentication + fn sign( + &self, + dht_key: &PublicKey, + dht_key_secret: &SecretKey, + data: &[u8], + ) -> Result { + let mut kpb: [u8; SECRET_KEY_LENGTH + PUBLIC_KEY_LENGTH] = + [0u8; SECRET_KEY_LENGTH + PUBLIC_KEY_LENGTH]; + + kpb[..SECRET_KEY_LENGTH].copy_from_slice(&dht_key_secret.bytes); + kpb[SECRET_KEY_LENGTH..].copy_from_slice(&dht_key.bytes); + let keypair = ed::Keypair::from_bytes(&kpb) + .map_err(|e| VeilidAPIError::parse_error("Keypair is invalid", e))?; + + let mut dig = Blake3Digest512::new(); + dig.update(data); + + let sig = keypair + .sign_prehashed(dig, None) + .map_err(VeilidAPIError::internal)?; + + let dht_sig = Signature::new(sig.to_bytes()); + Ok(dht_sig) + } + fn verify( + &self, + dht_key: &PublicKey, + data: &[u8], + signature: &Signature, + ) -> Result<(), VeilidAPIError> { + let pk = ed::PublicKey::from_bytes(&dht_key.bytes) + .map_err(|e| VeilidAPIError::parse_error("Public key is invalid", e))?; + let sig = ed::Signature::from_bytes(&signature.bytes) + .map_err(|e| VeilidAPIError::parse_error("Signature is invalid", e))?; + + let mut dig = Blake3Digest512::new(); + dig.update(data); + + pk.verify_prehashed(dig, None, &sig) + .map_err(|e| VeilidAPIError::parse_error("Verification failed", e))?; + Ok(()) + } + + // AEAD Encrypt/Decrypt + fn aead_overhead(&self) -> usize { + AEAD_OVERHEAD + } + fn decrypt_in_place_aead( + &self, + body: &mut Vec, + nonce: &Nonce, + shared_secret: &SharedSecret, + associated_data: Option<&[u8]>, + ) -> Result<(), VeilidAPIError> { + let key = ch::Key::from(shared_secret.bytes); + let xnonce = ch::XNonce::from(nonce.bytes); + let aead = ch::XChaCha20Poly1305::new(&key); + aead.decrypt_in_place(&xnonce, associated_data.unwrap_or(b""), body) + .map_err(map_to_string) + .map_err(VeilidAPIError::generic) + } + + fn decrypt_aead( + &self, + body: &[u8], + nonce: &Nonce, + shared_secret: &SharedSecret, + associated_data: Option<&[u8]>, + ) -> Result, VeilidAPIError> { + let mut out = body.to_vec(); + self.decrypt_in_place_aead(&mut out, nonce, shared_secret, associated_data) + .map_err(map_to_string) + .map_err(VeilidAPIError::generic)?; + Ok(out) + } + + fn encrypt_in_place_aead( + &self, + body: &mut Vec, + nonce: &Nonce, + shared_secret: &SharedSecret, + associated_data: Option<&[u8]>, + ) -> Result<(), VeilidAPIError> { + let key = ch::Key::from(shared_secret.bytes); + let xnonce = ch::XNonce::from(nonce.bytes); + let aead = ch::XChaCha20Poly1305::new(&key); + + aead.encrypt_in_place(&xnonce, associated_data.unwrap_or(b""), body) + .map_err(map_to_string) + .map_err(VeilidAPIError::generic) + } + + fn encrypt_aead( + &self, + body: &[u8], + nonce: &Nonce, + shared_secret: &SharedSecret, + associated_data: Option<&[u8]>, + ) -> Result, VeilidAPIError> { + let mut out = body.to_vec(); + self.encrypt_in_place_aead(&mut out, nonce, shared_secret, associated_data) + .map_err(map_to_string) + .map_err(VeilidAPIError::generic)?; + Ok(out) + } + + // NoAuth Encrypt/Decrypt + fn crypt_in_place_no_auth( + &self, + body: &mut Vec, + nonce: &Nonce, + shared_secret: &SharedSecret, + ) { + let mut cipher = XChaCha20::new(&shared_secret.bytes.into(), &nonce.bytes.into()); + cipher.apply_keystream(body); + } + + fn crypt_b2b_no_auth( + &self, + in_buf: &[u8], + out_buf: &mut [u8], + nonce: &Nonce, + shared_secret: &SharedSecret, + ) { + let mut cipher = XChaCha20::new(&shared_secret.bytes.into(), &nonce.bytes.into()); + cipher.apply_keystream_b2b(in_buf, out_buf).unwrap(); + } + + fn crypt_no_auth_aligned_8( + &self, + in_buf: &[u8], + nonce: &Nonce, + shared_secret: &SharedSecret, + ) -> Vec { + let mut out_buf = unsafe { aligned_8_u8_vec_uninit(in_buf.len()) }; + self.crypt_b2b_no_auth(in_buf, &mut out_buf, nonce, shared_secret); + out_buf + } + + fn crypt_no_auth_unaligned( + &self, + in_buf: &[u8], + nonce: &Nonce, + shared_secret: &SharedSecret, + ) -> Vec { + let mut out_buf = unsafe { unaligned_u8_vec_uninit(in_buf.len()) }; + self.crypt_b2b_no_auth(in_buf, &mut out_buf, nonce, shared_secret); + out_buf + } +} diff --git a/veilid-core/src/intf/native/protected_store.rs b/veilid-core/src/intf/native/protected_store.rs index 7ad89551..022a3477 100644 --- a/veilid-core/src/intf/native/protected_store.rs +++ b/veilid-core/src/intf/native/protected_store.rs @@ -113,28 +113,35 @@ impl ProtectedStore { } #[instrument(level = "trace", skip(self, value), ret, err)] - pub async fn save_user_secret_string(&self, key: &str, value: &str) -> EyreResult { + pub async fn save_user_secret_string + fmt::Debug, V: AsRef + fmt::Debug>( + &self, + key: K, + value: V, + ) -> EyreResult { let inner = self.inner.lock(); inner .keyring_manager .as_ref() .ok_or_else(|| eyre!("Protected store not initialized"))? - .with_keyring(&self.service_name(), key, |kr| { + .with_keyring(&self.service_name(), key.as_ref(), |kr| { let existed = kr.get_value().is_ok(); - kr.set_value(value)?; + kr.set_value(value.as_ref())?; Ok(existed) }) .wrap_err("failed to save user secret") } #[instrument(level = "trace", skip(self), err)] - pub async fn load_user_secret_string(&self, key: &str) -> EyreResult> { + pub async fn load_user_secret_string + fmt::Debug>( + &self, + key: K, + ) -> EyreResult> { let inner = self.inner.lock(); match inner .keyring_manager .as_ref() .ok_or_else(|| eyre!("Protected store not initialized"))? - .with_keyring(&self.service_name(), key, |kr| kr.get_value()) + .with_keyring(&self.service_name(), key.as_ref(), |kr| kr.get_value()) { Ok(v) => Ok(Some(v)), Err(KeyringError::NoPasswordFound) => Ok(None), @@ -143,17 +150,19 @@ impl ProtectedStore { } #[instrument(level = "trace", skip(self, value))] - pub async fn save_user_secret_rkyv(&self, key: &str, value: &T) -> EyreResult + pub async fn save_user_secret_rkyv(&self, key: K, value: &T) -> EyreResult where + K: AsRef + fmt::Debug, T: RkyvSerialize>, { let v = to_rkyv(value)?; - self.save_user_secret(&key, &v).await + self.save_user_secret(key, &v).await } #[instrument(level = "trace", skip(self, value))] - pub async fn save_user_secret_json(&self, key: &str, value: &T) -> EyreResult + pub async fn save_user_secret_json(&self, key: K, value: &T) -> EyreResult where + K: AsRef + fmt::Debug, T: serde::Serialize, { let v = serde_json::to_vec(value)?; @@ -161,8 +170,9 @@ impl ProtectedStore { } #[instrument(level = "trace", skip(self))] - pub async fn load_user_secret_rkyv(&self, key: &str) -> EyreResult> + pub async fn load_user_secret_rkyv(&self, key: K) -> EyreResult> where + K: AsRef + fmt::Debug, T: RkyvArchive, ::Archived: for<'t> bytecheck::CheckBytes>, @@ -182,8 +192,9 @@ impl ProtectedStore { } #[instrument(level = "trace", skip(self))] - pub async fn load_user_secret_json(&self, key: &str) -> EyreResult> + pub async fn load_user_secret_json(&self, key: K) -> EyreResult> where + K: AsRef + fmt::Debug, T: for<'de> serde::de::Deserialize<'de>, { let out = self.load_user_secret(key).await?; @@ -199,7 +210,11 @@ impl ProtectedStore { } #[instrument(level = "trace", skip(self, value), ret, err)] - pub async fn save_user_secret(&self, key: &str, value: &[u8]) -> EyreResult { + pub async fn save_user_secret + fmt::Debug>( + &self, + key: K, + value: &[u8], + ) -> EyreResult { let mut s = BASE64URL_NOPAD.encode(value); s.push('!'); @@ -207,7 +222,10 @@ impl ProtectedStore { } #[instrument(level = "trace", skip(self), err)] - pub async fn load_user_secret(&self, key: &str) -> EyreResult>> { + pub async fn load_user_secret + fmt::Debug>( + &self, + key: K, + ) -> EyreResult>> { let mut s = match self.load_user_secret_string(key).await? { Some(s) => s, None => { @@ -238,13 +256,13 @@ impl ProtectedStore { } #[instrument(level = "trace", skip(self), ret, err)] - pub async fn remove_user_secret(&self, key: &str) -> EyreResult { + pub async fn remove_user_secret + fmt::Debug>(&self, key: K) -> EyreResult { let inner = self.inner.lock(); match inner .keyring_manager .as_ref() .ok_or_else(|| eyre!("Protected store not initialized"))? - .with_keyring(&self.service_name(), key, |kr| kr.delete_value()) + .with_keyring(&self.service_name(), key.as_ref(), |kr| kr.delete_value()) { Ok(_) => Ok(true), Err(KeyringError::NoPasswordFound) => Ok(false), diff --git a/veilid-core/src/intf/wasm/protected_store.rs b/veilid-core/src/intf/wasm/protected_store.rs index 9a2c1646..23288fbc 100644 --- a/veilid-core/src/intf/wasm/protected_store.rs +++ b/veilid-core/src/intf/wasm/protected_store.rs @@ -50,7 +50,11 @@ impl ProtectedStore { } //#[instrument(level = "trace", skip(self, value), ret, err)] - pub async fn save_user_secret_string(&self, key: &str, value: &str) -> EyreResult { + pub async fn save_user_secret_string + fmt::Debug, V: AsRef + fmt::Debug>( + &self, + key: K, + value: V, + ) -> EyreResult { if is_browser() { let win = match window() { Some(w) => w, @@ -70,7 +74,7 @@ impl ProtectedStore { } }; - let vkey = self.browser_key_name(key); + let vkey = self.browser_key_name(key.as_ref()); let prev = match ls .get_item(&vkey) @@ -81,7 +85,7 @@ impl ProtectedStore { None => false, }; - ls.set_item(&vkey, value) + ls.set_item(&vkey, value.as_ref()) .map_err(map_jsvalue_error) .wrap_err("exception_thrown")?; @@ -92,7 +96,10 @@ impl ProtectedStore { } #[instrument(level = "trace", skip(self), err)] - pub async fn load_user_secret_string(&self, key: &str) -> EyreResult> { + pub async fn load_user_secret_string + fmt::Debug>( + &self, + key: K, + ) -> EyreResult> { if is_browser() { let win = match window() { Some(w) => w, @@ -112,7 +119,7 @@ impl ProtectedStore { } }; - let vkey = self.browser_key_name(key); + let vkey = self.browser_key_name(key.as_ref()); ls.get_item(&vkey) .map_err(map_jsvalue_error) @@ -123,26 +130,29 @@ impl ProtectedStore { } #[instrument(level = "trace", skip(self, value))] - pub async fn save_user_secret_rkyv(&self, key: &str, value: &T) -> EyreResult + pub async fn save_user_secret_rkyv(&self, key: K, value: &T) -> EyreResult where + K: AsRef + fmt::Debug, T: RkyvSerialize>, { let v = to_rkyv(value)?; - self.save_user_secret(&key, &v).await + self.save_user_secret(key, &v).await } #[instrument(level = "trace", skip(self, value))] - pub async fn save_user_secret_json(&self, key: &str, value: &T) -> EyreResult + pub async fn save_user_secret_json(&self, key: K, value: &T) -> EyreResult where + K: AsRef + fmt::Debug, T: serde::Serialize, { let v = serde_json::to_vec(value)?; - self.save_user_secret(&key, &v).await + self.save_user_secret(key, &v).await } #[instrument(level = "trace", skip(self))] - pub async fn load_user_secret_rkyv(&self, key: &str) -> EyreResult> + pub async fn load_user_secret_rkyv(&self, key: K) -> EyreResult> where + K: AsRef + fmt::Debug, T: RkyvArchive, ::Archived: for<'t> bytecheck::CheckBytes>, @@ -162,8 +172,9 @@ impl ProtectedStore { } #[instrument(level = "trace", skip(self))] - pub async fn load_user_secret_json(&self, key: &str) -> EyreResult> + pub async fn load_user_secret_json(&self, key: K) -> EyreResult> where + K: AsRef + fmt::Debug, T: for<'de> serde::de::Deserialize<'de>, { let out = self.load_user_secret(key).await?; @@ -179,7 +190,11 @@ impl ProtectedStore { } #[instrument(level = "trace", skip(self, value), ret, err)] - pub async fn save_user_secret(&self, key: &str, value: &[u8]) -> EyreResult { + pub async fn save_user_secret + fmt::Debug>( + &self, + key: K, + value: &[u8], + ) -> EyreResult { let mut s = BASE64URL_NOPAD.encode(value); s.push('!'); @@ -187,7 +202,10 @@ impl ProtectedStore { } #[instrument(level = "trace", skip(self), err)] - pub async fn load_user_secret(&self, key: &str) -> EyreResult>> { + pub async fn load_user_secret + fmt::Debug>( + &self, + key: K, + ) -> EyreResult>> { let mut s = match self.load_user_secret_string(key).await? { Some(s) => s, None => { @@ -218,7 +236,7 @@ impl ProtectedStore { } #[instrument(level = "trace", skip(self), ret, err)] - pub async fn remove_user_secret(&self, key: &str) -> EyreResult { + pub async fn remove_user_secret + fmt::Debug>(&self, key: K) -> EyreResult { if is_browser() { let win = match window() { Some(w) => w, @@ -238,7 +256,7 @@ impl ProtectedStore { } }; - let vkey = self.browser_key_name(key); + let vkey = self.browser_key_name(key.as_ref()); match ls .get_item(&vkey) diff --git a/veilid-core/src/lib.rs b/veilid-core/src/lib.rs index f6cc2f76..39f6bbae 100644 --- a/veilid-core/src/lib.rs +++ b/veilid-core/src/lib.rs @@ -35,6 +35,7 @@ mod veilid_layer_filter; pub use self::api_tracing_layer::ApiTracingLayer; pub use self::core_context::{api_startup, api_startup_json, UpdateCallback}; +pub use self::crypto::vld0_generate_keypair; pub use self::veilid_api::*; pub use self::veilid_config::*; pub use self::veilid_layer_filter::*; diff --git a/veilid-core/src/network_manager/connection_table.rs b/veilid-core/src/network_manager/connection_table.rs index 82d13ae3..0a09eb21 100644 --- a/veilid-core/src/network_manager/connection_table.rs +++ b/veilid-core/src/network_manager/connection_table.rs @@ -83,6 +83,7 @@ impl ConnectionTable { unord.push(v); } } + inner.protocol_index_by_id.clear(); inner.id_by_descriptor.clear(); inner.ids_by_remote.clear(); unord @@ -136,7 +137,9 @@ impl ConnectionTable { }; // Add the connection to the table - let res = inner.conn_by_id[protocol_index].insert(id, network_connection); + let res = inner.conn_by_id[protocol_index].insert(id, network_connection, |_k, _v| { + // never lrus, unbounded + }); assert!(res.is_none()); // if we have reached the maximum number of connections per protocol type diff --git a/veilid-core/src/network_manager/mod.rs b/veilid-core/src/network_manager/mod.rs index 923c6ea3..5884c69a 100644 --- a/veilid-core/src/network_manager/mod.rs +++ b/veilid-core/src/network_manager/mod.rs @@ -120,7 +120,7 @@ pub(crate) enum NodeContactMethod { Direct(DialInfo), /// Request via signal the node connect back directly (relay, target) SignalReverse(NodeRef, NodeRef), - /// Request via signal the node negotiate a hole punch (relay, target_node) + /// Request via signal the node negotiate a hole punch (relay, target) SignalHolePunch(NodeRef, NodeRef), /// Must use an inbound relay to reach the node InboundRelay(NodeRef), @@ -134,7 +134,7 @@ struct PublicAddressCheckCacheKey(ProtocolType, AddressType); // The mutable state of the network manager struct NetworkManagerInner { stats: NetworkManagerStats, - client_whitelist: LruCache, + client_whitelist: LruCache, public_address_check_cache: BTreeMap>, public_address_inconsistencies_table: @@ -396,9 +396,11 @@ impl NetworkManager { debug!("finished network manager shutdown"); } - pub fn update_client_whitelist(&self, client: DHTKey) { + pub fn update_client_whitelist(&self, client: TypedKey) { let mut inner = self.inner.lock(); - match inner.client_whitelist.entry(client) { + match inner.client_whitelist.entry(client, |_k,_v| { + // do nothing on LRU evict + }) { hashlink::lru_cache::Entry::Occupied(mut entry) => { entry.get_mut().last_seen_ts = get_aligned_timestamp() } @@ -411,10 +413,12 @@ impl NetworkManager { } #[instrument(level = "trace", skip(self), ret)] - pub fn check_client_whitelist(&self, client: DHTKey) -> bool { + pub fn check_client_whitelist(&self, client: TypedKey) -> bool { let mut inner = self.inner.lock(); - match inner.client_whitelist.entry(client) { + match inner.client_whitelist.entry(client, |_k,_v| { + // do nothing on LRU evict + }) { hashlink::lru_cache::Entry::Occupied(mut entry) => { entry.get_mut().last_seen_ts = get_aligned_timestamp(); true @@ -519,10 +523,15 @@ impl NetworkManager { let routing_table = self.routing_table(); // Generate receipt and serialized form to return - let nonce = Crypto::get_random_nonce(); - let receipt = Receipt::try_new(0, nonce, routing_table.node_id(), extra_data)?; + let vcrypto = self.crypto().best(); + + let nonce = vcrypto.random_nonce(); + let node_id = routing_table.node_id(vcrypto.kind()); + let node_id_secret = routing_table.node_id_secret_key(vcrypto.kind()); + + let receipt = Receipt::try_new(best_envelope_version(), node_id.kind, nonce, node_id.value, extra_data)?; let out = receipt - .to_signed_data(&routing_table.node_id_secret()) + .to_signed_data(self.crypto(), &node_id_secret) .wrap_err("failed to generate signed receipt")?; // Record the receipt for later @@ -543,10 +552,15 @@ impl NetworkManager { let routing_table = self.routing_table(); // Generate receipt and serialized form to return - let nonce = Crypto::get_random_nonce(); - let receipt = Receipt::try_new(0, nonce, routing_table.node_id(), extra_data)?; + let vcrypto = self.crypto().best(); + + let nonce = vcrypto.random_nonce(); + let node_id = routing_table.node_id(vcrypto.kind()); + let node_id_secret = routing_table.node_id_secret_key(vcrypto.kind()); + + let receipt = Receipt::try_new(best_envelope_version(), node_id.kind, nonce, node_id.value, extra_data)?; let out = receipt - .to_signed_data(&routing_table.node_id_secret()) + .to_signed_data(self.crypto(), &node_id_secret) .wrap_err("failed to generate signed receipt")?; // Record the receipt for later @@ -566,7 +580,7 @@ impl NetworkManager { ) -> NetworkResult<()> { let receipt_manager = self.receipt_manager(); - let receipt = match Receipt::from_signed_data(receipt_data.as_ref()) { + let receipt = match Receipt::from_signed_data(self.crypto(), receipt_data.as_ref()) { Err(e) => { return NetworkResult::invalid_message(e.to_string()); } @@ -587,7 +601,7 @@ impl NetworkManager { ) -> NetworkResult<()> { let receipt_manager = self.receipt_manager(); - let receipt = match Receipt::from_signed_data(receipt_data.as_ref()) { + let receipt = match Receipt::from_signed_data(self.crypto(), receipt_data.as_ref()) { Err(e) => { return NetworkResult::invalid_message(e.to_string()); } @@ -607,7 +621,7 @@ impl NetworkManager { ) -> NetworkResult<()> { let receipt_manager = self.receipt_manager(); - let receipt = match Receipt::from_signed_data(receipt_data.as_ref()) { + let receipt = match Receipt::from_signed_data(self.crypto(), receipt_data.as_ref()) { Err(e) => { return NetworkResult::invalid_message(e.to_string()); } @@ -624,11 +638,11 @@ impl NetworkManager { pub async fn handle_private_receipt>( &self, receipt_data: R, - private_route: DHTKey, + private_route: PublicKey, ) -> NetworkResult<()> { let receipt_manager = self.receipt_manager(); - let receipt = match Receipt::from_signed_data(receipt_data.as_ref()) { + let receipt = match Receipt::from_signed_data(self.crypto(), receipt_data.as_ref()) { Err(e) => { return NetworkResult::invalid_message(e.to_string()); } @@ -649,10 +663,9 @@ impl NetworkManager { let rpc = self.rpc_processor(); // Add the peer info to our routing table - let peer_nr = match routing_table.register_node_with_signed_node_info( + let peer_nr = match routing_table.register_node_with_peer_info( RoutingDomain::PublicInternet, - peer_info.node_id.key, - peer_info.signed_node_info, + peer_info, false, ) { None => { @@ -673,10 +686,9 @@ impl NetworkManager { let rpc = self.rpc_processor(); // Add the peer info to our routing table - let mut peer_nr = match routing_table.register_node_with_signed_node_info( + let mut peer_nr = match routing_table.register_node_with_peer_info( RoutingDomain::PublicInternet, - peer_info.node_id.key, - peer_info.signed_node_info, + peer_info, false, ) { None => { @@ -731,21 +743,25 @@ impl NetworkManager { #[instrument(level = "trace", skip(self, body), err)] fn build_envelope>( &self, - dest_node_id: DHTKey, + dest_node_id: TypedKey, version: u8, body: B, ) -> EyreResult> { // DH to get encryption key let routing_table = self.routing_table(); - let node_id = routing_table.node_id(); - let node_id_secret = routing_table.node_id_secret(); + let Some(vcrypto) = self.crypto().get(dest_node_id.kind) else { + bail!("should not have a destination with incompatible crypto here"); + }; + + let node_id = routing_table.node_id(vcrypto.kind()); + let node_id_secret = routing_table.node_id_secret_key(vcrypto.kind()); // Get timestamp, nonce let ts = get_aligned_timestamp(); - let nonce = Crypto::get_random_nonce(); + let nonce = vcrypto.random_nonce(); // Encode envelope - let envelope = Envelope::new(version, ts, nonce, node_id, dest_node_id); + let envelope = Envelope::new(version, node_id.kind, ts, nonce, node_id.value, dest_node_id.value); envelope .to_encrypted_data(self.crypto(), body.as_ref(), &node_id_secret) .wrap_err("envelope failed to encode") @@ -753,50 +769,44 @@ impl NetworkManager { /// Called by the RPC handler when we want to issue an RPC request or response /// node_ref is the direct destination to which the envelope will be sent - /// If 'node_id' is specified, it can be different than node_ref.node_id() + /// If 'destination_node_ref' is specified, it can be different than the node_ref being sent to /// which will cause the envelope to be relayed #[instrument(level = "trace", skip(self, body), ret, err)] pub async fn send_envelope>( &self, node_ref: NodeRef, - envelope_node_id: Option, + destination_node_ref: Option, body: B, ) -> EyreResult> { - let via_node_id = node_ref.node_id(); - let envelope_node_id = envelope_node_id.unwrap_or(via_node_id); - if envelope_node_id != via_node_id { + let destination_node_ref = destination_node_ref.as_ref().unwrap_or(&node_ref).clone(); + + if !node_ref.same_entry(&destination_node_ref) { log_net!( "sending envelope to {:?} via {:?}", - envelope_node_id, + destination_node_ref, node_ref ); } else { log_net!("sending envelope to {:?}", node_ref); } - // Get node's min/max version and see if we can send to it + + let best_node_id = destination_node_ref.best_node_id(); + + // Get node's envelope versions and see if we can send to it // and if so, get the max version we can use - let version = if let Some(min_max_version) = node_ref.min_max_version() { - #[allow(clippy::absurd_extreme_comparisons)] - if min_max_version.min > MAX_CRYPTO_VERSION || min_max_version.max < MIN_CRYPTO_VERSION - { - bail!( - "can't talk to this node {} because version is unsupported: ({},{})", - via_node_id, - min_max_version.min, - min_max_version.max - ); - } - cmp::min(min_max_version.max, MAX_CRYPTO_VERSION) - } else { - MAX_CRYPTO_VERSION + let Some(envelope_version) = destination_node_ref.best_envelope_version() else { + bail!( + "can't talk to this node {} because we dont support its envelope versions", + node_ref + ); }; // Build the envelope to send - let out = self.build_envelope(envelope_node_id, version, body)?; + let out = self.build_envelope(best_node_id, envelope_version, body)?; // Send the envelope via whatever means necessary - self.send_data(node_ref.clone(), out).await + self.send_data(node_ref, out).await } /// Called by the RPC handler when we want to issue an direct receipt @@ -860,7 +870,7 @@ impl NetworkManager { let rpc = self.rpc_processor(); network_result_try!(rpc .rpc_call_signal( - Destination::relay(relay_nr, target_nr.node_id()), + Destination::relay(relay_nr, target_nr.clone()), SignalInfo::ReverseConnect { receipt, peer_info }, ) .await @@ -886,7 +896,7 @@ impl NetworkManager { // We expect the inbound noderef to be the same as the target noderef // if they aren't the same, we should error on this and figure out what then hell is up - if target_nr.node_id() != inbound_nr.node_id() { + if !target_nr.same_entry(&inbound_nr) { bail!("unexpected noderef mismatch on reverse connect"); } @@ -965,7 +975,7 @@ impl NetworkManager { let rpc = self.rpc_processor(); network_result_try!(rpc .rpc_call_signal( - Destination::relay(relay_nr, target_nr.node_id()), + Destination::relay(relay_nr, target_nr.clone()), SignalInfo::HolePunch { receipt, peer_info }, ) .await @@ -991,7 +1001,7 @@ impl NetworkManager { // We expect the inbound noderef to be the same as the target noderef // if they aren't the same, we should error on this and figure out what then hell is up - if target_nr.node_id() != inbound_nr.node_id() { + if !target_nr.same_entry(&inbound_nr) { bail!( "unexpected noderef mismatch on hole punch {}, expected {}", inbound_nr, @@ -1069,7 +1079,7 @@ impl NetworkManager { let relay_nr = routing_table .lookup_and_filter_noderef(relay_key, routing_domain.into(), dial_info_filter) .ok_or_else(|| eyre!("couldn't look up relay"))?; - if target_node_ref.node_id() != target_key { + if !target_node_ref.node_ids().contains(&target_key) { bail!("target noderef didn't match target key"); } NodeContactMethod::SignalReverse(relay_nr, target_node_ref) @@ -1078,7 +1088,7 @@ impl NetworkManager { let relay_nr = routing_table .lookup_and_filter_noderef(relay_key, routing_domain.into(), dial_info_filter) .ok_or_else(|| eyre!("couldn't look up relay"))?; - if target_node_ref.node_id() != target_key { + if target_node_ref.node_ids().contains(&target_key) { bail!("target noderef didn't match target key"); } NodeContactMethod::SignalHolePunch(relay_nr, target_node_ref) @@ -1320,13 +1330,13 @@ impl NetworkManager { } // Is this an out-of-band receipt instead of an envelope? - if data[0..4] == *RECEIPT_MAGIC { + if data[0..3] == *RECEIPT_MAGIC { network_result_value_or_log!(self.handle_out_of_band_receipt(data).await => {}); return Ok(true); } // Decode envelope header (may fail signature validation) - let envelope = match Envelope::from_signed_data(data) { + let envelope = match Envelope::from_signed_data(self.crypto(), data) { Ok(v) => v, Err(e) => { log_net!(debug "envelope failed to decode: {}", e); @@ -1370,16 +1380,16 @@ impl NetworkManager { // Peek at header and see if we need to relay this // If the recipient id is not our node id, then it needs relaying - let sender_id = envelope.get_sender_id(); - let recipient_id = envelope.get_recipient_id(); - if recipient_id != routing_table.node_id() { + let sender_id = TypedKey::new(envelope.get_crypto_kind(), envelope.get_sender_id()); + let recipient_id = TypedKey::new(envelope.get_crypto_kind(), envelope.get_recipient_id()); + if !routing_table.matches_own_node_id(&[recipient_id]) { // See if the source node is allowed to resolve nodes // This is a costly operation, so only outbound-relay permitted // nodes are allowed to do this, for example PWA users let some_relay_nr = if self.check_client_whitelist(sender_id) { // Full relay allowed, do a full resolve_node - match rpc.resolve_node(recipient_id).await { + match rpc.resolve_node(recipient_id.value).await { Ok(v) => v, Err(e) => { log_net!(debug "failed to resolve recipient node for relay, dropping outbound relayed packet: {}" ,e); @@ -1417,7 +1427,7 @@ impl NetworkManager { } // DH to get decryption key (cached) - let node_id_secret = routing_table.node_id_secret(); + let node_id_secret = routing_table.node_id_secret_key(envelope.get_crypto_kind()); // Decrypt the envelope body let body = match envelope @@ -1432,7 +1442,7 @@ impl NetworkManager { // Cache the envelope information in the routing table let source_noderef = match routing_table.register_node_with_existing_connection( - envelope.get_sender_id(), + TypedKey::new(envelope.get_crypto_kind(), envelope.get_sender_id()), connection_descriptor, ts, ) { @@ -1443,7 +1453,7 @@ impl NetworkManager { } Some(v) => v, }; - source_noderef.set_min_max_version(envelope.get_min_max_version()); + source_noderef.add_envelope_version(envelope.get_version()); // xxx: deal with spoofing and flooding here? @@ -1471,7 +1481,9 @@ impl NetworkManager { inner .stats .per_address_stats - .entry(PerAddressStatsKey(addr)) + .entry(PerAddressStatsKey(addr), |_k,_v| { + // do nothing on LRU evict + }) .or_insert(PerAddressStats::default()) .transfer_stats_accounting .add_up(bytes); @@ -1487,7 +1499,9 @@ impl NetworkManager { inner .stats .per_address_stats - .entry(PerAddressStatsKey(addr)) + .entry(PerAddressStatsKey(addr), |_k,_v| { + // do nothing on LRU evict + }) .or_insert(PerAddressStats::default()) .transfer_stats_accounting .add_down(bytes); @@ -1537,7 +1551,7 @@ impl NetworkManager { if let Some(nr) = routing_table.lookup_node_ref(k) { let peer_stats = nr.peer_stats(); let peer = PeerTableData { - node_id: k, + node_ids: nr.node_ids(), peer_address: v.last_connection.remote(), peer_stats, }; @@ -1622,7 +1636,9 @@ impl NetworkManager { if pait.contains_key(&ipblock) { return; } - pacc.insert(ipblock, socket_address); + pacc.insert(ipblock, socket_address, |_k,_v| { + // do nothing on LRU evict + }); // Determine if our external address has likely changed let mut bad_public_address_detection_punishment: Option< diff --git a/veilid-core/src/network_manager/native/network_class_discovery.rs b/veilid-core/src/network_manager/native/network_class_discovery.rs index 86acc197..0d7de718 100644 --- a/veilid-core/src/network_manager/native/network_class_discovery.rs +++ b/veilid-core/src/network_manager/native/network_class_discovery.rs @@ -112,7 +112,7 @@ impl DiscoveryContext { &self, protocol_type: ProtocolType, address_type: AddressType, - ignore_node: Option, + ignore_node_ids: Option, ) -> Option<(SocketAddress, NodeRef)> { let node_count = { let config = self.routing_table.network_manager().config(); @@ -121,7 +121,7 @@ impl DiscoveryContext { }; // Build an filter that matches our protocol and address type - // and excludes relays so we can get an accurate external address + // and excludes relayed nodes so we can get an accurate external address let dial_info_filter = DialInfoFilter::all() .with_protocol_type(protocol_type) .with_address_type(address_type); @@ -130,11 +130,11 @@ impl DiscoveryContext { dial_info_filter.clone(), ); let disallow_relays_filter = Box::new( - move |rti: &RoutingTableInner, _k: DHTKey, v: Option>| { + move |rti: &RoutingTableInner, v: Option>| { let v = v.unwrap(); v.with(rti, |_rti, e| { if let Some(n) = e.signed_node_info(RoutingDomain::PublicInternet) { - n.relay_id().is_none() + n.relay_ids().is_empty() } else { false } @@ -158,8 +158,8 @@ impl DiscoveryContext { // For each peer, if it's not our ignore-node, ask them for our public address, filtering on desired dial info for mut peer in peers { - if let Some(ignore_node) = ignore_node { - if peer.node_id() == ignore_node { + if let Some(ignore_node_ids) = &ignore_node_ids { + if peer.node_ids().contains_any(ignore_node_ids) { continue; } } @@ -478,12 +478,12 @@ impl DiscoveryContext { // Get our external address from some fast node, that is not node 1, call it node 2 let (external_2_address, node_2) = match self - .discover_external_address(protocol_type, address_type, Some(node_1.node_id())) + .discover_external_address(protocol_type, address_type, Some(node_1.node_ids())) .await { None => { // If we can't get an external address, allow retry - log_net!(debug "failed to discover external address 2 for {:?}:{:?}, skipping node {:?}", protocol_type, address_type, node_1.node_id()); + log_net!(debug "failed to discover external address 2 for {:?}:{:?}, skipping node {:?}", protocol_type, address_type, node_1); return Ok(false); } Some(v) => v, diff --git a/veilid-core/src/receipt_manager.rs b/veilid-core/src/receipt_manager.rs index 17fcf000..0df15aa1 100644 --- a/veilid-core/src/receipt_manager.rs +++ b/veilid-core/src/receipt_manager.rs @@ -11,7 +11,7 @@ pub enum ReceiptEvent { ReturnedOutOfBand, ReturnedInBand { inbound_noderef: NodeRef }, ReturnedSafety, - ReturnedPrivate { private_route: DHTKey }, + ReturnedPrivate { private_route: PublicKey }, Expired, Cancelled, } @@ -21,7 +21,7 @@ pub enum ReceiptReturned { OutOfBand, InBand { inbound_noderef: NodeRef }, Safety, - Private { private_route: DHTKey }, + Private { private_route: PublicKey }, } pub trait ReceiptCallback: Send + 'static { @@ -149,7 +149,7 @@ impl PartialOrd for ReceiptRecordTimestampSort { pub struct ReceiptManagerInner { network_manager: NetworkManager, - records_by_nonce: BTreeMap>>, + records_by_nonce: BTreeMap>>, next_oldest_ts: Option, stop_source: Option, timeout_task: MustJoinSingleFuture<()>, @@ -370,7 +370,7 @@ impl ReceiptManager { inner.next_oldest_ts = new_next_oldest_ts; } - pub async fn cancel_receipt(&self, nonce: &ReceiptNonce) -> EyreResult<()> { + pub async fn cancel_receipt(&self, nonce: &Nonce) -> EyreResult<()> { log_rpc!(debug "== Cancel Receipt {}", nonce.encode()); // Remove the record diff --git a/veilid-core/src/routing_table/bucket.rs b/veilid-core/src/routing_table/bucket.rs index ce0794bf..93c1a65d 100644 --- a/veilid-core/src/routing_table/bucket.rs +++ b/veilid-core/src/routing_table/bucket.rs @@ -2,25 +2,31 @@ use super::*; use core::sync::atomic::Ordering; use rkyv::{Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize}; +/// Routing Table Bucket +/// Stores map of public keys to entries, which may be in multiple routing tables per crypto kind +/// Keeps entries at a particular 'dht distance' from this cryptokind's node id +/// Helps to keep managed lists at particular distances so we can evict nodes by priority +/// where the priority comes from liveness and age of the entry (older is better) pub struct Bucket { - routing_table: RoutingTable, - entries: BTreeMap>, - newest_entry: Option, + /// Map of keys to entries for this bucket + entries: BTreeMap>, + /// The crypto kind in use for the public keys in this bucket + kind: CryptoKind, } -pub(super) type EntriesIter<'a> = alloc::collections::btree_map::Iter<'a, DHTKey, Arc>; +pub(super) type EntriesIter<'a> = + alloc::collections::btree_map::Iter<'a, PublicKey, Arc>; #[derive(Debug, RkyvArchive, RkyvSerialize, RkyvDeserialize)] #[archive_attr(repr(C), derive(CheckBytes))] -struct BucketEntryData { - key: DHTKey, - value: Vec, +struct SerializedBucketEntryData { + key: PublicKey, + value: u32, // index into serialized entries list } #[derive(Debug, RkyvArchive, RkyvSerialize, RkyvDeserialize)] #[archive_attr(repr(C), derive(CheckBytes))] -struct BucketData { - entries: Vec, - newest_entry: Option, +struct SerializedBucketData { + entries: Vec, } fn state_ordering(state: BucketEntryState) -> usize { @@ -32,68 +38,79 @@ fn state_ordering(state: BucketEntryState) -> usize { } impl Bucket { - pub fn new(routing_table: RoutingTable) -> Self { + pub fn new(kind: CryptoKind) -> Self { Self { - routing_table, entries: BTreeMap::new(), - newest_entry: None, + kind, } } - pub(super) fn load_bucket(&mut self, data: Vec) -> EyreResult<()> { - let bucket_data: BucketData = from_rkyv(data)?; + pub(super) fn load_bucket( + &mut self, + data: Vec, + all_entries: &[Arc], + ) -> EyreResult<()> { + let bucket_data: SerializedBucketData = from_rkyv(data)?; for e in bucket_data.entries { - let entryinner = from_rkyv(e.value).wrap_err("failed to deserialize bucket entry")?; self.entries - .insert(e.key, Arc::new(BucketEntry::new_with_inner(entryinner))); + .insert(e.key, all_entries[e.value as usize].clone()); } - self.newest_entry = bucket_data.newest_entry; - Ok(()) } - pub(super) fn save_bucket(&self) -> EyreResult> { + + pub(super) fn save_bucket( + &self, + all_entries: &mut Vec>, + entry_map: &mut HashMap<*const BucketEntry, u32>, + ) -> EyreResult> { let mut entries = Vec::new(); for (k, v) in &self.entries { - let entry_bytes = v.with_inner(|e| to_rkyv(e))?; - entries.push(BucketEntryData { + let entry_index = entry_map.entry(Arc::as_ptr(v)).or_insert_with(|| { + let entry_index = all_entries.len(); + all_entries.push(v.clone()); + entry_index as u32 + }); + entries.push(SerializedBucketEntryData { key: *k, - value: entry_bytes, + value: *entry_index, }); } - let bucket_data = BucketData { - entries, - newest_entry: self.newest_entry.clone(), - }; + let bucket_data = SerializedBucketData { entries }; let out = to_rkyv(&bucket_data)?; Ok(out) } - pub(super) fn add_entry(&mut self, node_id: DHTKey) -> NodeRef { - log_rtab!("Node added: {}", node_id.encode()); + /// Create a new entry with a node_id of this crypto kind and return it + pub(super) fn add_new_entry(&mut self, node_id_key: PublicKey) -> Arc { + log_rtab!("Node added: {}:{}", self.kind, node_id_key); // Add new entry - self.entries.insert(node_id, Arc::new(BucketEntry::new())); + let entry = Arc::new(BucketEntry::new(TypedKey::new(self.kind, node_id_key))); + self.entries.insert(node_id_key, entry.clone()); - // This is now the newest bucket entry - self.newest_entry = Some(node_id); - - // Get a node ref to return - let entry = self.entries.get(&node_id).unwrap().clone(); - NodeRef::new(self.routing_table.clone(), node_id, entry, None) + // Return the new entry + entry } - pub(super) fn remove_entry(&mut self, node_id: &DHTKey) { - log_rtab!("Node removed: {}", node_id); + /// Add an existing entry with a new node_id for this crypto kind + pub(super) fn add_existing_entry(&mut self, node_id_key: PublicKey, entry: Arc) { + log_rtab!("Existing node added: {}:{}", self.kind, node_id_key); + + // Add existing entry + self.entries.insert(node_id_key, entry); + } + + /// Remove an entry with a node_id for this crypto kind from the bucket + pub(super) fn remove_entry(&mut self, node_id_key: &PublicKey) { + log_rtab!("Node removed: {}:{}", self.kind, node_id_key); // Remove the entry - self.entries.remove(node_id); - - // newest_entry is updated by kick_bucket() + self.entries.remove(node_id_key); } - pub(super) fn entry(&self, key: &DHTKey) -> Option> { + pub(super) fn entry(&self, key: &PublicKey) -> Option> { self.entries.get(key).cloned() } @@ -101,7 +118,7 @@ impl Bucket { self.entries.iter() } - pub(super) fn kick(&mut self, bucket_depth: usize) -> Option> { + pub(super) fn kick(&mut self, bucket_depth: usize) -> Option> { // Get number of entries to attempt to purge from bucket let bucket_len = self.entries.len(); @@ -111,11 +128,11 @@ impl Bucket { } // Try to purge the newest entries that overflow the bucket - let mut dead_node_ids: BTreeSet = BTreeSet::new(); + let mut dead_node_ids: BTreeSet = BTreeSet::new(); let mut extra_entries = bucket_len - bucket_depth; // Get the sorted list of entries by their kick order - let mut sorted_entries: Vec<(DHTKey, Arc)> = self + let mut sorted_entries: Vec<(PublicKey, Arc)> = self .entries .iter() .map(|(k, v)| (k.clone(), v.clone())) @@ -144,24 +161,15 @@ impl Bucket { }) }); - self.newest_entry = None; for entry in sorted_entries { // If we're not evicting more entries, exit, noting this may be the newest entry if extra_entries == 0 { - // The first 'live' entry we find is our newest entry - if self.newest_entry.is_none() { - self.newest_entry = Some(entry.0); - } break; } extra_entries -= 1; // if this entry has references we can't drop it yet if entry.1.ref_count.load(Ordering::Acquire) > 0 { - // The first 'live' entry we fine is our newest entry - if self.newest_entry.is_none() { - self.newest_entry = Some(entry.0); - } continue; } diff --git a/veilid-core/src/routing_table/bucket_entry.rs b/veilid-core/src/routing_table/bucket_entry.rs index 3ca93132..1c7d46df 100644 --- a/veilid-core/src/routing_table/bucket_entry.rs +++ b/veilid-core/src/routing_table/bucket_entry.rs @@ -68,23 +68,14 @@ pub struct BucketEntryLocalNetwork { node_status: Option, } -/// A range of cryptography versions supported by this entry -#[derive(Copy, Clone, Debug, RkyvArchive, RkyvSerialize, RkyvDeserialize)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct VersionRange { - /// The minimum cryptography version supported by this entry - pub min: u8, - /// The maximum cryptography version supported by this entry - pub max: u8, -} - /// The data associated with each bucket entry #[derive(Debug, RkyvArchive, RkyvSerialize, RkyvDeserialize)] #[archive_attr(repr(C), derive(CheckBytes))] pub struct BucketEntryInner { - /// The minimum and maximum range of cryptography versions supported by the node, - /// inclusive of the requirements of any relay the node may be using - min_max_version: Option, + /// The node ids matching this bucket entry, with the cryptography versions supported by this node as the 'kind' field + node_ids: TypedKeySet, + /// The set of envelope versions supported by the node inclusive of the requirements of any relay the node may be using + envelope_support: Vec, /// If this node has updated it's SignedNodeInfo since our network /// and dial info has last changed, for example when our IP address changes /// Used to determine if we should make this entry 'live' again when we receive a signednodeinfo update that @@ -131,6 +122,38 @@ impl BucketEntryInner { self.node_ref_tracks.remove(&track_id); } + /// Get node ids + pub fn node_ids(&self) -> TypedKeySet { + self.node_ids.clone() + } + /// Add a node id for a particular crypto kind. + /// Returns any previous existing node id associated with that crypto kind + pub fn add_node_id(&mut self, node_id: TypedKey) -> Option { + if let Some(old_node_id) = self.node_ids.get(node_id.kind) { + // If this was already there we do nothing + if old_node_id == node_id { + return None; + } + self.node_ids.add(node_id); + return Some(old_node_id); + } + self.node_ids.add(node_id); + None + } + pub fn best_node_id(&self) -> TypedKey { + self.node_ids.best().unwrap() + } + + /// Get crypto kinds + pub fn crypto_kinds(&self) -> Vec { + self.node_ids.kinds() + } + /// Compare sets of crypto kinds + pub fn common_crypto_kinds(&self, other: &[CryptoKind]) -> Vec { + common_crypto_kinds(&self.node_ids.kinds(), other) + } + + // Less is faster pub fn cmp_fastest(e1: &Self, e2: &Self) -> std::cmp::Ordering { // Lower latency to the front @@ -219,7 +242,7 @@ impl BucketEntryInner { // See if we have an existing signed_node_info to update or not if let Some(current_sni) = opt_current_sni { // Always allow overwriting invalid/unsigned node - if current_sni.has_valid_signature() { + if current_sni.has_any_signature() { // If the timestamp hasn't changed or is less, ignore this update if signed_node_info.timestamp() <= current_sni.timestamp() { // If we received a node update with the same timestamp @@ -238,25 +261,12 @@ impl BucketEntryInner { } } - // Update the protocol min/max version we have to use, to include relay requirements if needed - let mut version_range = VersionRange { - min: signed_node_info.node_info().min_version, - max: signed_node_info.node_info().max_version, - }; - if let Some(relay_info) = signed_node_info.relay_info() { - version_range.min.max_assign(relay_info.min_version); - version_range.max.min_assign(relay_info.max_version); - } - if version_range.min <= version_range.max { - // Can be reached with at least one crypto version - self.min_max_version = Some(version_range); - } else { - // No valid crypto version in range - self.min_max_version = None; - } - + // Update the envelope version support we have to use + let envelope_support = signed_node_info.node_info().envelope_support.clone(); + // Update the signed node info *opt_current_sni = Some(Box::new(signed_node_info)); + self.set_envelope_support(envelope_support); self.updated_since_last_network_change = true; self.touch_last_seen(get_aligned_timestamp()); } @@ -310,13 +320,13 @@ impl BucketEntryInner { opt_current_sni.as_ref().map(|s| s.as_ref()) } - pub fn make_peer_info(&self, key: DHTKey, routing_domain: RoutingDomain) -> Option { + pub fn make_peer_info(&self, routing_domain: RoutingDomain) -> Option { let opt_current_sni = match routing_domain { RoutingDomain::LocalNetwork => &self.local_network.signed_node_info, RoutingDomain::PublicInternet => &self.public_internet.signed_node_info, }; opt_current_sni.as_ref().map(|s| PeerInfo { - node_id: NodeId::new(key), + node_ids: self.node_ids.clone(), signed_node_info: *s.clone(), }) } @@ -447,12 +457,27 @@ impl BucketEntryInner { out } - pub fn set_min_max_version(&mut self, min_max_version: VersionRange) { - self.min_max_version = Some(min_max_version); + pub fn add_envelope_version(&mut self, envelope_version: u8) { + if self.envelope_support.contains(&envelope_version) { + return; + } + self.envelope_support.push(envelope_version); + self.envelope_support.dedup(); + self.envelope_support.sort(); } - pub fn min_max_version(&self) -> Option { - self.min_max_version + pub fn set_envelope_support(&mut self, mut envelope_support: Vec) { + envelope_support.dedup(); + envelope_support.sort(); + self.envelope_support = envelope_support; + } + + pub fn envelope_support(&self) -> Vec { + self.envelope_support.clone() + } + + pub fn best_envelope_version(&self) -> Option { + self.envelope_support.iter().rev().find(|x| VALID_ENVELOPE_VERSIONS.contains(x)).copied() } pub fn state(&self, cur_ts: Timestamp) -> BucketEntryState { @@ -746,38 +771,41 @@ pub struct BucketEntry { } impl BucketEntry { - pub(super) fn new() -> Self { + pub(super) fn new(first_node_id: TypedKey) -> Self { let now = get_aligned_timestamp(); - Self { - ref_count: AtomicU32::new(0), - inner: RwLock::new(BucketEntryInner { - min_max_version: None, - updated_since_last_network_change: false, - last_connections: BTreeMap::new(), - local_network: BucketEntryLocalNetwork { - last_seen_our_node_info_ts: Timestamp::new(0u64), - signed_node_info: None, - node_status: None, - }, - public_internet: BucketEntryPublicInternet { - last_seen_our_node_info_ts: Timestamp::new(0u64), - signed_node_info: None, - node_status: None, - }, - peer_stats: PeerStats { - time_added: now, - rpc_stats: RPCStats::default(), - latency: None, - transfer: TransferStatsDownUp::default(), - }, - latency_stats_accounting: LatencyStatsAccounting::new(), - transfer_stats_accounting: TransferStatsAccounting::new(), - #[cfg(feature = "tracking")] - next_track_id: 0, - #[cfg(feature = "tracking")] - node_ref_tracks: HashMap::new(), - }), - } + let mut node_ids = TypedKeySet::new(); + node_ids.add(first_node_id); + + let inner = BucketEntryInner { + node_ids, + envelope_support: Vec::new(), + updated_since_last_network_change: false, + last_connections: BTreeMap::new(), + local_network: BucketEntryLocalNetwork { + last_seen_our_node_info_ts: Timestamp::new(0u64), + signed_node_info: None, + node_status: None, + }, + public_internet: BucketEntryPublicInternet { + last_seen_our_node_info_ts: Timestamp::new(0u64), + signed_node_info: None, + node_status: None, + }, + peer_stats: PeerStats { + time_added: now, + rpc_stats: RPCStats::default(), + latency: None, + transfer: TransferStatsDownUp::default(), + }, + latency_stats_accounting: LatencyStatsAccounting::new(), + transfer_stats_accounting: TransferStatsAccounting::new(), + #[cfg(feature = "tracking")] + next_track_id: 0, + #[cfg(feature = "tracking")] + node_ref_tracks: HashMap::new(), + }; + + Self::new_with_inner(inner) } pub(super) fn new_with_inner(inner: BucketEntryInner) -> Self { diff --git a/veilid-core/src/routing_table/debug.rs b/veilid-core/src/routing_table/debug.rs index 64299c78..a3409bb3 100644 --- a/veilid-core/src/routing_table/debug.rs +++ b/veilid-core/src/routing_table/debug.rs @@ -1,5 +1,5 @@ use super::*; -use routing_table::tasks::bootstrap::BOOTSTRAP_TXT_VERSION; +use routing_table::tasks::bootstrap::BOOTSTRAP_TXT_VERSION_0; impl RoutingTable { pub(crate) fn debug_info_nodeinfo(&self) -> String { @@ -7,7 +7,7 @@ impl RoutingTable { let inner = self.inner.read(); out += "Routing Table Info:\n"; - out += &format!(" Node Id: {}\n", self.unlocked_inner.node_id.encode()); + out += &format!(" Node Ids: {}\n", self.unlocked_inner.node_ids()); out += &format!( " Self Latency Stats Accounting: {:#?}\n\n", inner.self_latency_stats_accounting @@ -55,13 +55,20 @@ impl RoutingTable { short_urls.sort(); short_urls.dedup(); + let valid_envelope_versions = VALID_ENVELOPE_VERSIONS.map(|x| x.to_string()).join(","); + let node_ids = self + .unlocked_inner + .node_ids() + .iter() + .map(|x| x.to_string()) + .collect::>() + .join(","); out += "TXT Record:\n"; out += &format!( - "{},{},{},{},{}", - BOOTSTRAP_TXT_VERSION, - MIN_CRYPTO_VERSION, - MAX_CRYPTO_VERSION, - self.node_id().encode(), + "{}|{}|{}|{}|", + BOOTSTRAP_TXT_VERSION_0, + valid_envelope_versions, + node_ids, some_hostname.unwrap() ); for short_url in short_urls { @@ -108,56 +115,53 @@ impl RoutingTable { let mut out = String::new(); - let blen = inner.buckets.len(); let mut b = 0; let mut cnt = 0; - out += &format!("Entries: {}\n", inner.bucket_entry_count); - while b < blen { - let filtered_entries: Vec<(&DHTKey, &Arc)> = inner.buckets[b] - .entries() - .filter(|e| { - let state = e.1.with(inner, |_rti, e| e.state(cur_ts)); - state >= min_state - }) - .collect(); - if !filtered_entries.is_empty() { - out += &format!(" Bucket #{}:\n", b); - for e in filtered_entries { - let state = e.1.with(inner, |_rti, e| e.state(cur_ts)); - out += &format!( - " {} [{}]\n", - e.0.encode(), - match state { - BucketEntryState::Reliable => "R", - BucketEntryState::Unreliable => "U", - BucketEntryState::Dead => "D", - } - ); + out += &format!("Entries: {}\n", inner.bucket_entry_count()); - cnt += 1; + for ck in &VALID_CRYPTO_KINDS { + let blen = inner.buckets[ck].len(); + while b < blen { + let filtered_entries: Vec<(&PublicKey, &Arc)> = inner.buckets[ck][b] + .entries() + .filter(|e| { + let state = e.1.with(inner, |_rti, e| e.state(cur_ts)); + state >= min_state + }) + .collect(); + if !filtered_entries.is_empty() { + out += &format!("{} Bucket #{}:\n", ck, b); + for e in filtered_entries { + let state = e.1.with(inner, |_rti, e| e.state(cur_ts)); + out += &format!( + " {} [{}]\n", + e.0.encode(), + match state { + BucketEntryState::Reliable => "R", + BucketEntryState::Unreliable => "U", + BucketEntryState::Dead => "D", + } + ); + + cnt += 1; + if cnt >= limit { + break; + } + } if cnt >= limit { break; } } - if cnt >= limit { - break; - } + b += 1; } - b += 1; } out } - pub(crate) fn debug_info_entry(&self, node_id: DHTKey) -> String { + pub(crate) fn debug_info_entry(&self, node_ref: NodeRef) -> String { let mut out = String::new(); - out += &format!("Entry {:?}:\n", node_id); - if let Some(nr) = self.lookup_node_ref(node_id) { - out += &nr.operate(|_rt, e| format!("{:#?}\n", e)); - } else { - out += "Entry not found\n"; - } - + out += &node_ref.operate(|_rt, e| format!("{:#?}\n", e)); out } @@ -168,26 +172,28 @@ impl RoutingTable { let mut out = String::new(); const COLS: usize = 16; - let rows = inner.buckets.len() / COLS; - let mut r = 0; - let mut b = 0; out += "Buckets:\n"; - while r < rows { - let mut c = 0; - out += format!(" {:>3}: ", b).as_str(); - while c < COLS { - let mut cnt = 0; - for e in inner.buckets[b].entries() { - if e.1.with(inner, |_rti, e| e.state(cur_ts) >= min_state) { - cnt += 1; + for ck in &VALID_CRYPTO_KINDS { + let rows = inner.buckets[ck].len() / COLS; + let mut r = 0; + let mut b = 0; + while r < rows { + let mut c = 0; + out += format!(" {:>3}: ", b).as_str(); + while c < COLS { + let mut cnt = 0; + for e in inner.buckets[ck][b].entries() { + if e.1.with(inner, |_rti, e| e.state(cur_ts) >= min_state) { + cnt += 1; + } } + out += format!("{:>3} ", cnt).as_str(); + b += 1; + c += 1; } - out += format!("{:>3} ", cnt).as_str(); - b += 1; - c += 1; + out += "\n"; + r += 1; } - out += "\n"; - r += 1; } out diff --git a/veilid-core/src/routing_table/mod.rs b/veilid-core/src/routing_table/mod.rs index 8ddd5c5a..60e7dfc4 100644 --- a/veilid-core/src/routing_table/mod.rs +++ b/veilid-core/src/routing_table/mod.rs @@ -49,7 +49,7 @@ pub struct LowLevelPortInfo { pub protocol_to_port: ProtocolToPortMapping, } pub type RoutingTableEntryFilter<'t> = - Box>) -> bool + Send + 't>; + Box>) -> bool + Send + 't>; #[derive(Clone, Debug, Default, Eq, PartialEq)] pub struct RoutingTableHealth { @@ -65,17 +65,19 @@ pub struct RoutingTableHealth { pub local_network_ready: bool, } -pub(super) struct RoutingTableUnlockedInner { +pub type BucketIndex = (CryptoKind, usize); + +pub struct RoutingTableUnlockedInner { // Accessors config: VeilidConfig, network_manager: NetworkManager, - /// The current node's public DHT key - node_id: DHTKey, - /// The current node's DHT key secret - node_id_secret: DHTKeySecret, + /// The current node's public DHT keys + node_id: TypedKeySet, + /// The current node's public DHT secrets + node_id_secret: TypedSecretSet, /// Buckets to kick on our next kick task - kick_queue: Mutex>, + kick_queue: Mutex>, /// Background process for computing statistics rolling_transfers_task: TickTask, /// Background process to purge dead routing table entries when necessary @@ -92,6 +94,83 @@ pub(super) struct RoutingTableUnlockedInner { private_route_management_task: TickTask, } +impl RoutingTableUnlockedInner { + pub fn network_manager(&self) -> NetworkManager { + self.network_manager.clone() + } + pub fn crypto(&self) -> Crypto { + self.network_manager().crypto() + } + pub fn rpc_processor(&self) -> RPCProcessor { + self.network_manager().rpc_processor() + } + pub fn update_callback(&self) -> UpdateCallback { + self.network_manager().update_callback() + } + pub fn with_config(&self, f: F) -> R + where + F: FnOnce(&VeilidConfigInner) -> R, + { + f(&*self.config.get()) + } + + pub fn node_id(&self, kind: CryptoKind) -> TypedKey { + self.node_id.get(kind).unwrap() + } + + pub fn node_id_secret_key(&self, kind: CryptoKind) -> SecretKey { + self.node_id_secret.get(kind).unwrap().value + } + + pub fn node_ids(&self) -> TypedKeySet { + self.node_id.clone() + } + + pub fn node_id_typed_key_pairs(&self) -> Vec { + let mut tkps = Vec::new(); + for ck in VALID_CRYPTO_KINDS { + tkps.push(TypedKeyPair::new( + ck, + KeyPair::new(self.node_id(ck).value, self.node_id_secret_key(ck)), + )); + } + tkps + } + + pub fn matches_own_node_id(&self, node_ids: &[TypedKey]) -> bool { + for ni in node_ids { + if let Some(v) = self.node_id.get(ni.kind) { + if v.value == ni.value { + return true; + } + } + } + false + } + + pub fn matches_own_node_id_key(&self, node_id_key: &PublicKey) -> bool { + for tk in self.node_id.iter() { + if tk.value == *node_id_key { + return true; + } + } + false + } + + pub fn calculate_bucket_index(&self, node_id: &TypedKey) -> BucketIndex { + let crypto = self.crypto(); + let self_node_id_key = self.node_id(node_id.kind).value; + let vcrypto = crypto.get(node_id.kind).unwrap(); + ( + node_id.kind, + vcrypto + .distance(&node_id.value, &self_node_id_key) + .first_nonzero_bit() + .unwrap(), + ) + } +} + #[derive(Clone)] pub struct RoutingTable { inner: Arc>, @@ -104,11 +183,12 @@ impl RoutingTable { network_manager: NetworkManager, ) -> RoutingTableUnlockedInner { let c = config.get(); + RoutingTableUnlockedInner { config: config.clone(), network_manager, - node_id: c.network.node_id.unwrap(), - node_id_secret: c.network.node_id_secret.unwrap(), + node_id: c.network.routing_table.node_id.clone(), + node_id_secret: c.network.routing_table.node_id_secret.clone(), kick_queue: Mutex::new(BTreeSet::default()), rolling_transfers_task: TickTask::new(ROLLING_TRANSFERS_INTERVAL_SECS), kick_buckets_task: TickTask::new(1), @@ -133,30 +213,6 @@ impl RoutingTable { this } - pub fn network_manager(&self) -> NetworkManager { - self.unlocked_inner.network_manager.clone() - } - pub fn rpc_processor(&self) -> RPCProcessor { - self.network_manager().rpc_processor() - } - pub fn update_callback(&self) -> UpdateCallback { - self.network_manager().update_callback() - } - pub fn with_config(&self, f: F) -> R - where - F: FnOnce(&VeilidConfigInner) -> R, - { - f(&*self.unlocked_inner.config.get()) - } - - pub fn node_id(&self) -> DHTKey { - self.unlocked_inner.node_id - } - - pub fn node_id_secret(&self) -> DHTKeySecret { - self.unlocked_inner.node_id_secret - } - ///////////////////////////////////// /// Initialization @@ -167,7 +223,7 @@ impl RoutingTable { // Set up routing buckets { let mut inner = self.inner.write(); - inner.init_buckets(self.clone()); + inner.init_buckets(); } // Load bucket entries from table db if possible @@ -175,7 +231,7 @@ impl RoutingTable { if let Err(e) = self.load_buckets().await { log_rtab!(debug "Error loading buckets from storage: {:#?}. Resetting.", e); let mut inner = self.inner.write(); - inner.init_buckets(self.clone()); + inner.init_buckets(); } // Set up routespecstore @@ -229,56 +285,99 @@ impl RoutingTable { debug!("finished routing table terminate"); } + /// Serialize routing table to table store async fn save_buckets(&self) -> EyreResult<()> { - // Serialize all entries - let mut bucketvec: Vec> = Vec::new(); + // Since entries are shared by multiple buckets per cryptokind + // we need to get the list of all unique entries when serializing + let mut all_entries: Vec> = Vec::new(); + + // Serialize all buckets and get map of entries + let mut serialized_bucket_map: BTreeMap>> = BTreeMap::new(); { + let mut entry_map: HashMap<*const BucketEntry, u32> = HashMap::new(); let inner = &*self.inner.read(); - for bucket in &inner.buckets { - bucketvec.push(bucket.save_bucket()?) + for ck in VALID_CRYPTO_KINDS { + let buckets = inner.buckets.get(&ck).unwrap(); + let mut serialized_buckets = Vec::new(); + for bucket in buckets.iter() { + serialized_buckets.push(bucket.save_bucket(&mut all_entries, &mut entry_map)?) + } + serialized_bucket_map.insert(ck, serialized_buckets); } } - let table_store = self.network_manager().table_store(); + + // Serialize all the entries + let mut all_entry_bytes = Vec::with_capacity(all_entries.len()); + for entry in all_entries { + // Serialize entry + let entry_bytes = entry.with_inner(|e| to_rkyv(e))?; + all_entry_bytes.push(entry_bytes); + } + + let table_store = self.unlocked_inner.network_manager().table_store(); let tdb = table_store.open("routing_table", 1).await?; - let bucket_count = bucketvec.len(); let dbx = tdb.transact(); - if let Err(e) = dbx.store_rkyv(0, b"bucket_count", &bucket_count) { + if let Err(e) = dbx.store_rkyv(0, b"serialized_bucket_map", &serialized_bucket_map) { dbx.rollback(); return Err(e); } - - for (n, b) in bucketvec.iter().enumerate() { - dbx.store(0, format!("bucket_{}", n).as_bytes(), b) + if let Err(e) = dbx.store_rkyv(0, b"all_entry_bytes", &all_entry_bytes) { + dbx.rollback(); + return Err(e); } dbx.commit().await?; Ok(()) } + /// Deserialize routing table from table store async fn load_buckets(&self) -> EyreResult<()> { - // Deserialize all entries - let tstore = self.network_manager().table_store(); + // Deserialize bucket map and all entries from the table store + let tstore = self.unlocked_inner.network_manager().table_store(); let tdb = tstore.open("routing_table", 1).await?; - let Some(bucket_count): Option = tdb.load_rkyv(0, b"bucket_count")? else { - log_rtab!(debug "no bucket count in saved routing table"); + let Some(serialized_bucket_map): Option>>> = tdb.load_rkyv(0, b"serialized_bucket_map")? else { + log_rtab!(debug "no bucket map in saved routing table"); return Ok(()); }; - let inner = &mut *self.inner.write(); - if bucket_count != inner.buckets.len() { - // Must have the same number of buckets - warn!("bucket count is different, not loading routing table"); + let Some(all_entry_bytes): Option>> = tdb.load_rkyv(0, b"all_entry_bytes")? else { + log_rtab!(debug "no all_entry_bytes in saved routing table"); return Ok(()); + }; + + // Reconstruct all entries + let inner = &mut *self.inner.write(); + + let mut all_entries: Vec> = Vec::with_capacity(all_entry_bytes.len()); + for entry_bytes in all_entry_bytes { + let entryinner = + from_rkyv(entry_bytes).wrap_err("failed to deserialize bucket entry")?; + let entry = Arc::new(BucketEntry::new_with_inner(entryinner)); + + // Keep strong reference in table + all_entries.push(entry.clone()); + + // Keep all entries in weak table too + inner.all_entries.insert(entry); } - let mut bucketdata_vec: Vec> = Vec::new(); - for n in 0..bucket_count { - let Some(bucketdata): Option> = - tdb.load(0, format!("bucket_{}", n).as_bytes())? else { - warn!("bucket data not loading, skipping loading routing table"); - return Ok(()); - }; - bucketdata_vec.push(bucketdata); + + // Validate serialized bucket map + for (k, v) in &serialized_bucket_map { + if !VALID_CRYPTO_KINDS.contains(k) { + warn!("crypto kind is not valid, not loading routing table"); + return Ok(()); + } + if v.len() != PUBLIC_KEY_LENGTH * 8 { + warn!("bucket count is different, not loading routing table"); + return Ok(()); + } } - for (n, bucketdata) in bucketdata_vec.into_iter().enumerate() { - inner.buckets[n].load_bucket(bucketdata)?; + + // Recreate buckets + for (k, v) in serialized_bucket_map { + let buckets = inner.buckets.get_mut(&k).unwrap(); + + for n in 0..v.len() { + buckets[n].load_bucket(v[n].clone(), &all_entries)?; + } } Ok(()) @@ -443,7 +542,7 @@ impl RoutingTable { } /// Attempt to empty the routing table - /// should only be performed when there are no node_refs (detached) + /// May not empty buckets completely if there are existing node_refs pub fn purge_buckets(&self) { self.inner.write().purge_buckets(); } @@ -453,20 +552,25 @@ impl RoutingTable { self.inner.write().purge_last_connections(); } - fn find_bucket_index(&self, node_id: DHTKey) -> usize { - distance(&node_id, &self.unlocked_inner.node_id) - .first_nonzero_bit() - .unwrap() - } - pub fn get_entry_count( &self, routing_domain_set: RoutingDomainSet, min_state: BucketEntryState, + crypto_kinds: &[CryptoKind], ) -> usize { self.inner .read() - .get_entry_count(routing_domain_set, min_state) + .get_entry_count(routing_domain_set, min_state, crypto_kinds) + } + + pub fn get_entry_count_per_crypto_kind( + &self, + routing_domain_set: RoutingDomainSet, + min_state: BucketEntryState, + ) -> BTreeMap { + self.inner + .read() + .get_entry_count_per_crypto_kind(routing_domain_set, min_state) } pub fn get_nodes_needing_ping( @@ -484,32 +588,29 @@ impl RoutingTable { inner.get_all_nodes(self.clone(), cur_ts) } - fn queue_bucket_kick(&self, node_id: DHTKey) { - let idx = self.find_bucket_index(node_id); - self.unlocked_inner.kick_queue.lock().insert(idx); + fn queue_bucket_kicks(&self, node_ids: TypedKeySet) { + for node_id in node_ids.iter() { + let x = self.unlocked_inner.calculate_bucket_index(node_id); + self.unlocked_inner.kick_queue.lock().insert(x); + } } - /// Create a node reference, possibly creating a bucket entry - /// the 'update_func' closure is called on the node, and, if created, - /// in a locked fashion as to ensure the bucket entry state is always valid - pub fn create_node_ref(&self, node_id: DHTKey, update_func: F) -> Option - where - F: FnOnce(&mut RoutingTableInner, &mut BucketEntryInner), - { + /// Resolve an existing routing table entry using any crypto kind and return a reference to it + pub fn lookup_any_node_ref(&self, node_id_key: PublicKey) -> Option { self.inner - .write() - .create_node_ref(self.clone(), node_id, update_func) + .read() + .lookup_any_node_ref(self.clone(), node_id_key) } /// Resolve an existing routing table entry and return a reference to it - pub fn lookup_node_ref(&self, node_id: DHTKey) -> Option { + pub fn lookup_node_ref(&self, node_id: TypedKey) -> Option { self.inner.read().lookup_node_ref(self.clone(), node_id) } /// Resolve an existing routing table entry and return a filtered reference to it pub fn lookup_and_filter_noderef( &self, - node_id: DHTKey, + node_id: TypedKey, routing_domain_set: RoutingDomainSet, dial_info_filter: DialInfoFilter, ) -> Option { @@ -524,18 +625,16 @@ impl RoutingTable { /// Shortcut function to add a node to our routing table if it doesn't exist /// and add the dial info we have for it. Returns a noderef filtered to /// the routing domain in which this node was registered for convenience. - pub fn register_node_with_signed_node_info( + pub fn register_node_with_peer_info( &self, routing_domain: RoutingDomain, - node_id: DHTKey, - signed_node_info: SignedNodeInfo, + peer_info: PeerInfo, allow_invalid: bool, ) -> Option { - self.inner.write().register_node_with_signed_node_info( + self.inner.write().register_node_with_peer_info( self.clone(), routing_domain, - node_id, - signed_node_info, + peer_info, allow_invalid, ) } @@ -544,7 +643,7 @@ impl RoutingTable { /// and add the last peer address we have for it, since that's pretty common pub fn register_node_with_existing_connection( &self, - node_id: DHTKey, + node_id: TypedKey, descriptor: ConnectionDescriptor, timestamp: Timestamp, ) -> Option { @@ -563,7 +662,7 @@ impl RoutingTable { self.inner.read().get_routing_table_health() } - pub fn get_recent_peers(&self) -> Vec<(DHTKey, RecentPeersEntry)> { + pub fn get_recent_peers(&self) -> Vec<(TypedKey, RecentPeersEntry)> { let mut recent_peers = Vec::new(); let mut dead_peers = Vec::new(); let mut out = Vec::new(); @@ -602,7 +701,7 @@ impl RoutingTable { out } - pub fn touch_recent_peer(&self, node_id: DHTKey, last_connection: ConnectionDescriptor) { + pub fn touch_recent_peer(&self, node_id: TypedKey, last_connection: ConnectionDescriptor) { self.inner .write() .touch_recent_peer(node_id, last_connection) @@ -651,7 +750,7 @@ impl RoutingTable { dial_info_filter: DialInfoFilter, ) -> RoutingTableEntryFilter<'a> { // does it have matching public dial info? - Box::new(move |rti, _k, e| { + Box::new(move |rti, e| { if let Some(e) = e { e.with(rti, |_rti, e| { if let Some(ni) = e.node_info(routing_domain) { @@ -679,7 +778,7 @@ impl RoutingTable { dial_info: DialInfo, ) -> RoutingTableEntryFilter<'a> { // does the node's outbound capabilities match the dialinfo? - Box::new(move |rti, _k, e| { + Box::new(move |rti, e| { if let Some(e) = e { e.with(rti, |_rti, e| { if let Some(ni) = e.node_info(routing_domain) { @@ -709,27 +808,37 @@ impl RoutingTable { .find_fast_public_nodes_filtered(self.clone(), node_count, filters) } - /// Retrieve up to N of each type of protocol capable nodes - pub fn find_bootstrap_nodes_filtered(&self, max_per_type: usize) -> Vec { + /// Retrieve up to N of each type of protocol capable nodes for a single crypto kind + fn find_bootstrap_nodes_filtered_per_crypto_kind( + &self, + crypto_kind: CryptoKind, + max_per_type: usize, + ) -> Vec { let protocol_types = vec![ ProtocolType::UDP, ProtocolType::TCP, ProtocolType::WS, ProtocolType::WSS, ]; + let protocol_types_len = protocol_types.len(); let mut nodes_proto_v4 = vec![0usize, 0usize, 0usize, 0usize]; let mut nodes_proto_v6 = vec![0usize, 0usize, 0usize, 0usize]; let filter = Box::new( - move |rti: &RoutingTableInner, _k: DHTKey, v: Option>| { - let entry = v.unwrap(); + move |rti: &RoutingTableInner, entry: Option>| { + let entry = entry.unwrap(); entry.with(rti, |_rti, e| { // skip nodes on our local network here if e.has_node_info(RoutingDomain::LocalNetwork.into()) { return false; } + // Ensure crypto kind is supported + if !e.crypto_kinds().contains(&crypto_kind) { + return false; + } + // does it have some dial info we need? let filter = |n: &NodeInfo| { let mut keep = false; @@ -769,12 +878,33 @@ impl RoutingTable { self.find_fastest_nodes( protocol_types_len * 2 * max_per_type, filters, - |_rti, k: DHTKey, v: Option>| { - NodeRef::new(self.clone(), k, v.unwrap().clone(), None) + |_rti, entry: Option>| { + NodeRef::new(self.clone(), entry.unwrap().clone(), None) }, ) } + /// Retrieve up to N of each type of protocol capable nodes for all crypto kinds + pub fn find_bootstrap_nodes_filtered(&self, max_per_type: usize) -> Vec { + let mut out = + self.find_bootstrap_nodes_filtered_per_crypto_kind(VALID_CRYPTO_KINDS[0], max_per_type); + + // Merge list of nodes so we don't have duplicates + for crypto_kind in &VALID_CRYPTO_KINDS[1..] { + let nrs = + self.find_bootstrap_nodes_filtered_per_crypto_kind(*crypto_kind, max_per_type); + 'nrloop: for nr in nrs { + for nro in &out { + if nro.same_entry(&nr) { + continue 'nrloop; + } + } + out.push(nr); + } + } + out + } + pub fn find_peers_with_sort_and_filter( &self, node_count: usize, @@ -786,10 +916,10 @@ impl RoutingTable { where C: for<'a, 'b> FnMut( &'a RoutingTableInner, - &'b (DHTKey, Option>), - &'b (DHTKey, Option>), + &'b Option>, + &'b Option>, ) -> core::cmp::Ordering, - T: for<'r> FnMut(&'r RoutingTableInner, DHTKey, Option>) -> O + Send, + T: for<'r> FnMut(&'r RoutingTableInner, Option>) -> O + Send, { self.inner .read() @@ -803,7 +933,7 @@ impl RoutingTable { transform: T, ) -> Vec where - T: for<'r> FnMut(&'r RoutingTableInner, DHTKey, Option>) -> O + Send, + T: for<'r> FnMut(&'r RoutingTableInner, Option>) -> O + Send, { self.inner .read() @@ -812,44 +942,42 @@ impl RoutingTable { pub fn find_closest_nodes<'a, T, O>( &self, - node_id: DHTKey, + node_count: usize, + node_id: TypedKey, filters: VecDeque, transform: T, ) -> Vec where - T: for<'r> FnMut(&'r RoutingTableInner, DHTKey, Option>) -> O + Send, + T: for<'r> FnMut(&'r RoutingTableInner, Option>) -> O + Send, { self.inner .read() - .find_closest_nodes(node_id, filters, transform) + .find_closest_nodes(node_count, node_id, filters, transform) } #[instrument(level = "trace", skip(self), ret)] - pub fn register_find_node_answer(&self, peers: Vec) -> Vec { - let node_id = self.node_id(); - - // register nodes we'd found + pub fn register_find_node_answer( + &self, + crypto_kind: CryptoKind, + peers: Vec, + ) -> Vec { + // Register nodes we'd found let mut out = Vec::::with_capacity(peers.len()); for p in peers { - // if our own node if is in the list then ignore it, as we don't add ourselves to our own routing table - if p.node_id.key == node_id { + // Ensure we're getting back nodes we asked for + if !p.node_ids.kinds().contains(&crypto_kind) { continue; } - // node can not be its own relay - if let Some(rid) = &p.signed_node_info.relay_id() { - if rid.key == p.node_id.key { - continue; - } + // Don't register our own node + if self.matches_own_node_id(&p.node_ids) { + continue; } - // register the node if it's new - if let Some(nr) = self.register_node_with_signed_node_info( - RoutingDomain::PublicInternet, - p.node_id.key, - p.signed_node_info.clone(), - false, - ) { + // Register the node if it's new + if let Some(nr) = + self.register_node_with_peer_info(RoutingDomain::PublicInternet, p, false) + { out.push(nr); } } @@ -860,7 +988,7 @@ impl RoutingTable { pub async fn find_node( &self, node_ref: NodeRef, - node_id: DHTKey, + node_id: TypedKey, ) -> EyreResult>> { let rpc_processor = self.rpc_processor(); @@ -873,29 +1001,41 @@ impl RoutingTable { // register nodes we'd found Ok(NetworkResult::value( - self.register_find_node_answer(res.answer), + self.register_find_node_answer(node_id.kind, res.answer), )) } + /// Ask a remote node to list the nodes it has around the current node #[instrument(level = "trace", skip(self), ret, err)] - pub async fn find_self(&self, node_ref: NodeRef) -> EyreResult>> { - let node_id = self.node_id(); - self.find_node(node_ref, node_id).await + pub async fn find_self( + &self, + crypto_kind: CryptoKind, + node_ref: NodeRef, + ) -> EyreResult>> { + let self_node_id = self.node_id(crypto_kind); + self.find_node(node_ref, self_node_id).await } + /// Ask a remote node to list the nodes it has around itself #[instrument(level = "trace", skip(self), ret, err)] - pub async fn find_target(&self, node_ref: NodeRef) -> EyreResult>> { - let node_id = node_ref.node_id(); - self.find_node(node_ref, node_id).await + pub async fn find_target( + &self, + crypto_kind: CryptoKind, + node_ref: NodeRef, + ) -> EyreResult>> { + let Some(target_node_id) = node_ref.node_ids().get(crypto_kind) else { + bail!("no target node ids for this crypto kind"); + }; + self.find_node(node_ref, target_node_id).await } #[instrument(level = "trace", skip(self))] - pub async fn reverse_find_node(&self, node_ref: NodeRef, wide: bool) { - // Ask bootstrap node to 'find' our own node so we can get some more nodes near ourselves + pub async fn reverse_find_node(&self, crypto_kind: CryptoKind, node_ref: NodeRef, wide: bool) { + // Ask node to 'find node' on own node so we can get some more nodes near ourselves // and then contact those nodes to inform -them- that we exist - // Ask bootstrap server for nodes closest to our own node - let closest_nodes = network_result_value_or_log!(match self.find_self(node_ref.clone()).await { + // Ask node for nodes closest to our own node + let closest_nodes = network_result_value_or_log!(match self.find_self(crypto_kind, node_ref.clone()).await { Err(e) => { log_rtab!(error "find_self failed for {:?}: {:?}", @@ -911,7 +1051,7 @@ impl RoutingTable { // Ask each node near us to find us as well if wide { for closest_nr in closest_nodes { - network_result_value_or_log!(match self.find_self(closest_nr.clone()).await { + network_result_value_or_log!(match self.find_self(crypto_kind, closest_nr.clone()).await { Err(e) => { log_rtab!(error "find_self failed for {:?}: {:?}", @@ -986,12 +1126,12 @@ impl RoutingTable { // Go through all entries and find fastest entry that matches filter function let inner = self.inner.read(); let inner = &*inner; - let mut best_inbound_relay: Option<(DHTKey, Arc)> = None; + let mut best_inbound_relay: Option> = None; // Iterate all known nodes for candidates - inner.with_entries(cur_ts, BucketEntryState::Unreliable, |rti, k, v| { - let v2 = v.clone(); - v.with(rti, |rti, e| { + inner.with_entries(cur_ts, BucketEntryState::Unreliable, |rti, entry| { + let entry2 = entry.clone(); + entry.with(rti, |rti, e| { // Ensure we have the node's status if let Some(node_status) = e.node_status(routing_domain) { // Ensure the node will relay @@ -999,18 +1139,18 @@ impl RoutingTable { // Compare against previous candidate if let Some(best_inbound_relay) = best_inbound_relay.as_mut() { // Less is faster - let better = best_inbound_relay.1.with(rti, |_rti, best| { + let better = best_inbound_relay.with(rti, |_rti, best| { // choose low latency stability for relays BucketEntryInner::cmp_fastest_reliable(cur_ts, e, best) == std::cmp::Ordering::Less }); // Now apply filter function and see if this node should be included if better && relay_node_filter(e) { - *best_inbound_relay = (k, v2); + *best_inbound_relay = entry2; } } else if relay_node_filter(e) { // Always store the first candidate - best_inbound_relay = Some((k, v2)); + best_inbound_relay = Some(entry2); } } } @@ -1019,6 +1159,14 @@ impl RoutingTable { Option::<()>::None }); // Return the best inbound relay noderef - best_inbound_relay.map(|(k, e)| NodeRef::new(self.clone(), k, e, None)) + best_inbound_relay.map(|e| NodeRef::new(self.clone(), e, None)) + } +} + +impl core::ops::Deref for RoutingTable { + type Target = RoutingTableUnlockedInner; + + fn deref(&self) -> &Self::Target { + &self.unlocked_inner } } diff --git a/veilid-core/src/routing_table/node_ref.rs b/veilid-core/src/routing_table/node_ref.rs index 7e53effe..bf44ab99 100644 --- a/veilid-core/src/routing_table/node_ref.rs +++ b/veilid-core/src/routing_table/node_ref.rs @@ -6,7 +6,6 @@ use alloc::fmt; pub struct NodeRefBaseCommon { routing_table: RoutingTable, - node_id: DHTKey, entry: Arc, filter: Option, sequencing: Sequencing, @@ -21,6 +20,14 @@ pub trait NodeRefBase: Sized { fn common(&self) -> &NodeRefBaseCommon; fn common_mut(&mut self) -> &mut NodeRefBaseCommon; + // Comparators + fn same_entry(&self, other: &T) -> bool { + Arc::ptr_eq(&self.common().entry, &other.common().entry) + } + fn same_bucket_entry(&self, entry: &Arc) -> bool { + Arc::ptr_eq(&self.common().entry, entry) + } + // Implementation-specific operators fn operate(&self, f: F) -> T where @@ -99,8 +106,11 @@ pub trait NodeRefBase: Sized { fn routing_table(&self) -> RoutingTable { self.common().routing_table.clone() } - fn node_id(&self) -> DHTKey { - self.common().node_id + fn node_ids(&self) -> TypedKeySet { + self.operate(|_rti, e| e.node_ids()) + } + fn best_node_id(&self) -> TypedKey { + self.operate(|_rti, e| e.best_node_id()) } fn has_updated_since_last_network_change(&self) -> bool { self.operate(|_rti, e| e.has_updated_since_last_network_change()) @@ -113,11 +123,17 @@ pub trait NodeRefBase: Sized { e.update_node_status(node_status); }); } - fn min_max_version(&self) -> Option { - self.operate(|_rti, e| e.min_max_version()) + fn envelope_support(&self) -> Vec { + self.operate(|_rti, e| e.envelope_support()) } - fn set_min_max_version(&self, min_max_version: VersionRange) { - self.operate_mut(|_rti, e| e.set_min_max_version(min_max_version)) + fn add_envelope_version(&self, envelope_version: u8) { + self.operate_mut(|_rti, e| e.add_envelope_version(envelope_version)) + } + fn set_envelope_support(&self, envelope_support: Vec) { + self.operate_mut(|_rti, e| e.set_envelope_support(envelope_support)) + } + fn best_envelope_version(&self) -> Option { + self.operate(|_rti, e| e.best_envelope_version()) } fn state(&self, cur_ts: Timestamp) -> BucketEntryState { self.operate(|_rti, e| e.state(cur_ts)) @@ -128,7 +144,7 @@ pub trait NodeRefBase: Sized { // Per-RoutingDomain accessors fn make_peer_info(&self, routing_domain: RoutingDomain) -> Option { - self.operate(|_rti, e| e.make_peer_info(self.node_id(), routing_domain)) + self.operate(|_rti, e| e.make_peer_info(routing_domain)) } fn node_info(&self, routing_domain: RoutingDomain) -> Option { self.operate(|_rti, e| e.node_info(routing_domain).cloned()) @@ -136,7 +152,7 @@ pub trait NodeRefBase: Sized { fn signed_node_info_has_valid_signature(&self, routing_domain: RoutingDomain) -> bool { self.operate(|_rti, e| { e.signed_node_info(routing_domain) - .map(|sni| sni.has_valid_signature()) + .map(|sni| sni.has_any_signature()) .unwrap_or(false) }) } @@ -180,19 +196,18 @@ pub trait NodeRefBase: Sized { self.operate_mut(|rti, e| { e.signed_node_info(routing_domain) .and_then(|n| n.relay_peer_info()) - .and_then(|t| { + .and_then(|rpi| { // If relay is ourselves, then return None, because we can't relay through ourselves // and to contact this node we should have had an existing inbound connection - if t.node_id.key == rti.unlocked_inner.node_id { + if rti.unlocked_inner.matches_own_node_id(&rpi.node_ids) { return None; } // Register relay node and return noderef - rti.register_node_with_signed_node_info( + rti.register_node_with_peer_info( self.routing_table(), routing_domain, - t.node_id.key, - t.signed_node_info, + rpi, false, ) }) @@ -280,7 +295,7 @@ pub trait NodeRefBase: Sized { fn set_last_connection(&self, connection_descriptor: ConnectionDescriptor, ts: Timestamp) { self.operate_mut(|rti, e| { e.set_last_connection(connection_descriptor, ts); - rti.touch_recent_peer(self.common().node_id, connection_descriptor); + rti.touch_recent_peer(e.best_node_id(), connection_descriptor); }) } @@ -346,7 +361,6 @@ pub struct NodeRef { impl NodeRef { pub fn new( routing_table: RoutingTable, - node_id: DHTKey, entry: Arc, filter: Option, ) -> Self { @@ -355,7 +369,6 @@ impl NodeRef { Self { common: NodeRefBaseCommon { routing_table, - node_id, entry, filter, sequencing: Sequencing::NoPreference, @@ -415,7 +428,6 @@ impl Clone for NodeRef { Self { common: NodeRefBaseCommon { routing_table: self.common.routing_table.clone(), - node_id: self.common.node_id, entry: self.common.entry.clone(), filter: self.common.filter.clone(), sequencing: self.common.sequencing, @@ -428,14 +440,14 @@ impl Clone for NodeRef { impl fmt::Display for NodeRef { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.common.node_id.encode()) + write!(f, "{}", self.common.entry.with_inner(|e| e.best_node_id())) } } impl fmt::Debug for NodeRef { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("NodeRef") - .field("node_id", &self.common.node_id) + .field("node_ids", &self.common.entry.with_inner(|e| e.node_ids())) .field("filter", &self.common.filter) .field("sequencing", &self.common.sequencing) .finish() @@ -455,9 +467,10 @@ impl Drop for NodeRef { .fetch_sub(1u32, Ordering::Relaxed) - 1; if new_ref_count == 0 { - self.common - .routing_table - .queue_bucket_kick(self.common.node_id); + // get node ids with inner unlocked because nothing could be referencing this entry now + // and we don't know when it will get dropped, possibly inside a lock + let node_ids = self.common().entry.with_inner(|e| e.node_ids()); + self.common.routing_table.queue_bucket_kicks(node_ids); } } } @@ -480,6 +493,10 @@ impl<'a> NodeRefLocked<'a> { nr, } } + + pub fn unlocked(&self) -> NodeRef { + self.nr.clone() + } } impl<'a> NodeRefBase for NodeRefLocked<'a> { @@ -539,6 +556,10 @@ impl<'a> NodeRefLockedMut<'a> { nr, } } + + pub fn unlocked(&self) -> NodeRef { + self.nr.clone() + } } impl<'a> NodeRefBase for NodeRefLockedMut<'a> { diff --git a/veilid-core/src/routing_table/privacy.rs b/veilid-core/src/routing_table/privacy.rs index e902622f..e4aa4c34 100644 --- a/veilid-core/src/routing_table/privacy.rs +++ b/veilid-core/src/routing_table/privacy.rs @@ -16,20 +16,45 @@ pub struct RouteHopData { #[derive(Clone, Debug)] pub enum RouteNode { /// Route node is optimized, no contact method information as this node id has been seen before - NodeId(NodeId), + NodeId(PublicKey), /// Route node with full contact method information to ensure the peer is reachable PeerInfo(PeerInfo), } -impl fmt::Display for RouteNode { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!( - f, - "{}", - match self { - RouteNode::NodeId(x) => x.key.encode(), - RouteNode::PeerInfo(pi) => pi.node_id.key.encode(), + +impl RouteNode { + pub fn node_ref( + &self, + routing_table: RoutingTable, + crypto_kind: CryptoKind, + ) -> Option { + match self { + RouteNode::NodeId(id) => { + // + routing_table.lookup_node_ref(TypedKey::new(crypto_kind, *id)) } - ) + RouteNode::PeerInfo(pi) => { + // + routing_table.register_node_with_peer_info( + RoutingDomain::PublicInternet, + pi.clone(), + false, + ) + } + } + } + + pub fn describe(&self, crypto_kind: CryptoKind) -> String { + match self { + RouteNode::NodeId(id) => { + format!("{}", TypedKey::new(crypto_kind, *id)) + } + RouteNode::PeerInfo(pi) => match pi.node_ids.get(crypto_kind) { + Some(id) => format!("{}", id), + None => { + format!("({})?{}", crypto_kind, pi.node_ids) + } + }, + } } } @@ -57,14 +82,14 @@ pub enum PrivateRouteHops { #[derive(Clone, Debug)] pub struct PrivateRoute { /// The public key used for the entire route - pub public_key: DHTKey, + pub public_key: TypedKey, pub hop_count: u8, pub hops: PrivateRouteHops, } impl PrivateRoute { /// Empty private route is the form used when receiving the last hop - pub fn new_empty(public_key: DHTKey) -> Self { + pub fn new_empty(public_key: TypedKey) -> Self { Self { public_key, hop_count: 0, @@ -72,7 +97,7 @@ impl PrivateRoute { } } /// Stub route is the form used when no privacy is required, but you need to specify the destination for a safety route - pub fn new_stub(public_key: DHTKey, node: RouteNode) -> Self { + pub fn new_stub(public_key: TypedKey, node: RouteNode) -> Self { Self { public_key, hop_count: 1, @@ -91,6 +116,11 @@ impl PrivateRoute { false } + /// Get the crypto kind in use for this route + pub fn crypto_kind(&self) -> CryptoKind { + self.public_key.kind + } + /// Remove the first unencrypted hop if possible pub fn pop_first_hop(&mut self) -> Option { match &mut self.hops { @@ -117,15 +147,15 @@ impl PrivateRoute { } } - pub fn first_hop_node_id(&self) -> Option { + pub fn first_hop_node_id(&self) -> Option { let PrivateRouteHops::FirstHop(pr_first_hop) = &self.hops else { return None; }; // Get the safety route to use from the spec Some(match &pr_first_hop.node { - RouteNode::NodeId(n) => n.key, - RouteNode::PeerInfo(p) => p.node_id.key, + RouteNode::NodeId(n) => TypedKey::new(self.public_key.kind, *n), + RouteNode::PeerInfo(p) => p.node_ids.get(self.public_key.kind).unwrap(), }) } } @@ -138,8 +168,13 @@ impl fmt::Display for PrivateRoute { self.public_key, self.hop_count, match &self.hops { - PrivateRouteHops::FirstHop(fh) => { - format!("->{}", fh.node) + PrivateRouteHops::FirstHop(_) => { + format!( + "->{}", + self.first_hop_node_id() + .map(|n| n.to_string()) + .unwrap_or_else(|| "None".to_owned()) + ) } PrivateRouteHops::Data(_) => { "->?".to_owned() @@ -162,13 +197,14 @@ pub enum SafetyRouteHops { #[derive(Clone, Debug)] pub struct SafetyRoute { - pub public_key: DHTKey, + pub public_key: TypedKey, pub hop_count: u8, pub hops: SafetyRouteHops, } impl SafetyRoute { - pub fn new_stub(public_key: DHTKey, private_route: PrivateRoute) -> Self { + /// Stub route is the form used when no privacy is required, but you need to directly contact a private route + pub fn new_stub(public_key: TypedKey, private_route: PrivateRoute) -> Self { // First hop should have already been popped off for stubbed safety routes since // we are sending directly to the first hop assert!(matches!(private_route.hops, PrivateRouteHops::Data(_))); @@ -178,9 +214,16 @@ impl SafetyRoute { hops: SafetyRouteHops::Private(private_route), } } + + /// Check if this is a stub route pub fn is_stub(&self) -> bool { matches!(self.hops, SafetyRouteHops::Private(_)) } + + /// Get the crypto kind in use for this route + pub fn crypto_kind(&self) -> CryptoKind { + self.public_key.kind + } } impl fmt::Display for SafetyRoute { diff --git a/veilid-core/src/routing_table/route_spec_store.rs b/veilid-core/src/routing_table/route_spec_store.rs deleted file mode 100644 index 4c7d8d5b..00000000 --- a/veilid-core/src/routing_table/route_spec_store.rs +++ /dev/null @@ -1,2061 +0,0 @@ -use super::*; -use crate::veilid_api::*; -use rkyv::{ - with::Skip, Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize, -}; - -/// The size of the remote private route cache -const REMOTE_PRIVATE_ROUTE_CACHE_SIZE: usize = 1024; -/// Remote private route cache entries expire in 5 minutes if they haven't been used -const REMOTE_PRIVATE_ROUTE_CACHE_EXPIRY: TimestampDuration = TimestampDuration::new(300_000_000u64); -/// Amount of time a route can remain idle before it gets tested -const ROUTE_MIN_IDLE_TIME_MS: u32 = 30_000; -/// The size of the compiled route cache -const COMPILED_ROUTE_CACHE_SIZE: usize = 256; - - -// Compiled route key for caching -#[derive(Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)] -struct CompiledRouteCacheKey { - sr_pubkey: DHTKey, - pr_pubkey: DHTKey, -} - -/// Compiled route (safety route + private route) -#[derive(Clone, Debug)] -pub struct CompiledRoute { - /// The safety route attached to the private route - pub safety_route: SafetyRoute, - /// The secret used to encrypt the message payload - pub secret: DHTKeySecret, - /// The node ref to the first hop in the compiled route - pub first_hop: NodeRef, -} - -#[derive(Clone, Debug, RkyvArchive, RkyvSerialize, RkyvDeserialize)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct KeyPair { - key: DHTKey, - secret: DHTKeySecret, -} - -#[derive(Clone, Debug, Default, RkyvArchive, RkyvSerialize, RkyvDeserialize)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct RouteStats { - /// Consecutive failed to send count - #[with(Skip)] - pub failed_to_send: u32, - /// Questions lost - #[with(Skip)] - pub questions_lost: u32, - /// Timestamp of when the route was created - pub created_ts: Timestamp, - /// Timestamp of when the route was last checked for validity - #[with(Skip)] - pub last_tested_ts: Option, - /// Timestamp of when the route was last sent to - #[with(Skip)] - pub last_sent_ts: Option, - /// Timestamp of when the route was last received over - #[with(Skip)] - pub last_received_ts: Option, - /// Transfers up and down - pub transfer_stats_down_up: TransferStatsDownUp, - /// Latency stats - pub latency_stats: LatencyStats, - /// Accounting mechanism for this route's RPC latency - #[with(Skip)] - latency_stats_accounting: LatencyStatsAccounting, - /// Accounting mechanism for the bandwidth across this route - #[with(Skip)] - transfer_stats_accounting: TransferStatsAccounting, -} - -impl RouteStats { - /// Make new route stats - pub fn new(created_ts: Timestamp) -> Self { - Self { - created_ts, - ..Default::default() - } - } - /// Mark a route as having failed to send - pub fn record_send_failed(&mut self) { - self.failed_to_send += 1; - } - - /// Mark a route as having lost a question - pub fn record_question_lost(&mut self) { - self.questions_lost += 1; - } - - /// Mark a route as having received something - pub fn record_received(&mut self, cur_ts: Timestamp, bytes: ByteCount) { - self.last_received_ts = Some(cur_ts); - self.last_tested_ts = Some(cur_ts); - self.transfer_stats_accounting.add_down(bytes); - } - - /// Mark a route as having been sent to - pub fn record_sent(&mut self, cur_ts: Timestamp, bytes: ByteCount) { - self.last_sent_ts = Some(cur_ts); - self.transfer_stats_accounting.add_up(bytes); - } - - /// Mark a route as having been sent to - pub fn record_latency(&mut self, latency: TimestampDuration) { - self.latency_stats = self.latency_stats_accounting.record_latency(latency); - } - - /// Mark a route as having been tested - pub fn record_tested(&mut self, cur_ts: Timestamp) { - self.last_tested_ts = Some(cur_ts); - - // Reset question_lost and failed_to_send if we test clean - self.failed_to_send = 0; - self.questions_lost = 0; - } - - /// Roll transfers for these route stats - pub fn roll_transfers(&mut self, last_ts: Timestamp, cur_ts: Timestamp) { - self.transfer_stats_accounting.roll_transfers( - last_ts, - cur_ts, - &mut self.transfer_stats_down_up, - ) - } - - /// Get the latency stats - pub fn latency_stats(&self) -> &LatencyStats { - &self.latency_stats - } - - /// Get the transfer stats - pub fn transfer_stats(&self) -> &TransferStatsDownUp { - &self.transfer_stats_down_up - } - - /// Reset stats when network restarts - pub fn reset(&mut self) { - self.last_tested_ts = None; - self.last_sent_ts = None; - self.last_received_ts = None; - } - - /// Check if a route needs testing - pub fn needs_testing(&self, cur_ts: Timestamp) -> bool { - // Has the route had any failures lately? - if self.questions_lost > 0 || self.failed_to_send > 0 { - // If so, always test - return true; - } - - // Has the route been tested within the idle time we'd want to check things? - // (also if we've received successfully over the route, this will get set) - if let Some(last_tested_ts) = self.last_tested_ts { - if cur_ts.saturating_sub(last_tested_ts) - > TimestampDuration::new(ROUTE_MIN_IDLE_TIME_MS as u64 * 1000u64) - { - return true; - } - } else { - // If this route has never been tested, it needs to be - return true; - } - - false - } -} - -#[derive(Clone, Debug, RkyvArchive, RkyvSerialize, RkyvDeserialize)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct RouteSpecDetail { - /// Secret key - #[with(Skip)] - secret_key: DHTKeySecret, - /// Route hops - hops: Vec, - /// Route noderefs - #[with(Skip)] - hop_node_refs: Vec, - /// Published private route, do not reuse for ephemeral routes - /// Not serialized because all routes should be re-published when restarting - #[with(Skip)] - published: bool, - /// Directions this route is guaranteed to work in - #[with(RkyvEnumSet)] - directions: DirectionSet, - /// Stability preference (prefer reliable nodes over faster) - stability: Stability, - /// Sequencing capability (connection oriented protocols vs datagram) - can_do_sequenced: bool, - /// Stats - stats: RouteStats, -} - -impl RouteSpecDetail { - pub fn get_stats(&self) -> &RouteStats { - &self.stats - } - pub fn get_stats_mut(&mut self) -> &mut RouteStats { - &mut self.stats - } - pub fn is_published(&self) -> bool { - self.published - } - pub fn hop_count(&self) -> usize { - self.hops.len() - } - pub fn get_secret_key(&self) -> DHTKeySecret { - self.secret_key - } - pub fn get_stability(&self) -> Stability { - self.stability - } - pub fn is_sequencing_match(&self, sequencing: Sequencing) -> bool { - match sequencing { - Sequencing::NoPreference => true, - Sequencing::PreferOrdered => true, - Sequencing::EnsureOrdered => { - self.can_do_sequenced - } - } - } -} - -/// The core representation of the RouteSpecStore that can be serialized -#[derive(Debug, Clone, Default, RkyvArchive, RkyvSerialize, RkyvDeserialize)] -#[archive_attr(repr(C, align(8)), derive(CheckBytes))] -pub struct RouteSpecStoreContent { - /// All of the routes we have allocated so far - details: HashMap, -} - -/// What remote private routes have seen -#[derive(Debug, Clone, Default)] -pub struct RemotePrivateRouteInfo { - // The private route itself - private_route: Option, - /// Did this remote private route see our node info due to no safety route in use - last_seen_our_node_info_ts: Timestamp, - /// Last time this remote private route was requested for any reason (cache expiration) - last_touched_ts: Timestamp, - /// Stats - stats: RouteStats, -} - -impl RemotePrivateRouteInfo { - pub fn get_stats(&self) -> &RouteStats { - &self.stats - } - pub fn get_stats_mut(&mut self) -> &mut RouteStats { - &mut self.stats - } -} - -/// Ephemeral data used to help the RouteSpecStore operate efficiently -#[derive(Debug)] -pub struct RouteSpecStoreCache { - /// How many times nodes have been used - used_nodes: HashMap, - /// How many times nodes have been used at the terminal point of a route - used_end_nodes: HashMap, - /// Route spec hop cache, used to quickly disqualify routes - hop_cache: HashSet>, - /// Has a remote private route responded to a question and when - remote_private_route_cache: LruCache, - /// Compiled route cache - compiled_route_cache: LruCache, - /// List of dead allocated routes - dead_routes: Vec, - /// List of dead remote routes - dead_remote_routes: Vec, -} - -impl Default for RouteSpecStoreCache { - fn default() -> Self { - Self { - used_nodes: Default::default(), - used_end_nodes: Default::default(), - hop_cache: Default::default(), - remote_private_route_cache: LruCache::new(REMOTE_PRIVATE_ROUTE_CACHE_SIZE), - compiled_route_cache: LruCache::new(COMPILED_ROUTE_CACHE_SIZE), - dead_routes: Default::default(), - dead_remote_routes: Default::default(), - } - } -} - -#[derive(Debug)] -pub struct RouteSpecStoreInner { - /// Serialize RouteSpecStore content - content: RouteSpecStoreContent, - /// RouteSpecStore cache - cache: RouteSpecStoreCache, -} - -pub struct RouteSpecStoreUnlockedInner { - /// Handle to routing table - routing_table: RoutingTable, - /// Maximum number of hops in a route - max_route_hop_count: usize, - /// Default number of hops in a route - default_route_hop_count: usize, -} - -impl fmt::Debug for RouteSpecStoreUnlockedInner { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("RouteSpecStoreUnlockedInner") - .field("max_route_hop_count", &self.max_route_hop_count) - .field("default_route_hop_count", &self.default_route_hop_count) - .finish() - } -} - -/// The routing table's storage for private/safety routes -#[derive(Clone, Debug)] -pub struct RouteSpecStore { - inner: Arc>, - unlocked_inner: Arc, -} - -fn route_hops_to_hop_cache(hops: &[DHTKey]) -> Vec { - let mut cache: Vec = Vec::with_capacity(hops.len() * DHT_KEY_LENGTH); - for hop in hops { - cache.extend_from_slice(&hop.bytes); - } - cache -} - -/// get the hop cache key for a particular route permutation -fn route_permutation_to_hop_cache(nodes: &[PeerInfo], perm: &[usize]) -> Vec { - let mut cache: Vec = Vec::with_capacity(perm.len() * DHT_KEY_LENGTH); - for n in perm { - cache.extend_from_slice(&nodes[*n].node_id.key.bytes) - } - cache -} - -/// number of route permutations is the number of unique orderings -/// for a set of nodes, given that the first node is fixed -fn _get_route_permutation_count(hop_count: usize) -> usize { - if hop_count == 0 { - unreachable!(); - } - // a single node or two nodes is always fixed - if hop_count == 1 || hop_count == 2 { - return 1; - } - // more than two nodes has factorial permutation - // hop_count = 3 -> 2! -> 2 - // hop_count = 4 -> 3! -> 6 - (3..hop_count).into_iter().fold(2usize, |acc, x| acc * x) -} -type PermReturnType = (Vec, Vec, bool); -type PermFunc<'t> = Box Option + Send + 't>; - -/// get the route permutation at particular 'perm' index, starting at the 'start' index -/// for a set of 'hop_count' nodes. the first node is always fixed, and the maximum -/// number of permutations is given by get_route_permutation_count() - -fn with_route_permutations( - hop_count: usize, - start: usize, - f: &PermFunc, -) -> Option { - if hop_count == 0 { - unreachable!(); - } - // initial permutation - let mut permutation: Vec = Vec::with_capacity(hop_count); - for n in 0..hop_count { - permutation.push(start + n); - } - // if we have one hop or two, then there's only one permutation - if hop_count == 1 || hop_count == 2 { - return f(&permutation); - } - - // heaps algorithm, but skipping the first element - fn heaps_permutation( - permutation: &mut [usize], - size: usize, - f: &PermFunc, - ) -> Option { - if size == 1 { - return f(&permutation); - } - - for i in 0..size { - let out = heaps_permutation(permutation, size - 1, f); - if out.is_some() { - return out; - } - if size % 2 == 1 { - permutation.swap(1, size); - } else { - permutation.swap(1 + i, size); - } - } - - None - } - - // recurse - heaps_permutation(&mut permutation, hop_count - 1, f) -} - -impl RouteSpecStore { - pub fn new(routing_table: RoutingTable) -> Self { - let config = routing_table.network_manager().config(); - let c = config.get(); - - Self { - unlocked_inner: Arc::new(RouteSpecStoreUnlockedInner { - max_route_hop_count: c.network.rpc.max_route_hop_count.into(), - default_route_hop_count: c.network.rpc.default_route_hop_count.into(), - routing_table, - }), - inner: Arc::new(Mutex::new(RouteSpecStoreInner { - content: RouteSpecStoreContent { - details: HashMap::new(), - }, - cache: Default::default(), - })), - } - } - - #[instrument(level = "trace", skip(routing_table), err)] - pub async fn load(routing_table: RoutingTable) -> EyreResult { - let (max_route_hop_count, default_route_hop_count) = { - let config = routing_table.network_manager().config(); - let c = config.get(); - ( - c.network.rpc.max_route_hop_count as usize, - c.network.rpc.default_route_hop_count as usize, - ) - }; - - // Get frozen blob from table store - let table_store = routing_table.network_manager().table_store(); - let rsstdb = table_store.open("RouteSpecStore", 1).await?; - let mut content: RouteSpecStoreContent = - rsstdb.load_rkyv(0, b"content")?.unwrap_or_default(); - - // Look up all route hop noderefs since we can't serialize those - let mut dead_keys = Vec::new(); - for (k, rsd) in &mut content.details { - for h in &rsd.hops { - let Some(nr) = routing_table.lookup_node_ref(*h) else { - dead_keys.push(*k); - break; - }; - rsd.hop_node_refs.push(nr); - } - } - for k in dead_keys { - log_rtab!(debug "no entry, killing off private route: {}", k.encode()); - content.details.remove(&k); - } - - // Load secrets from pstore - let pstore = routing_table.network_manager().protected_store(); - let out: Vec = pstore - .load_user_secret_rkyv("RouteSpecStore") - .await? - .unwrap_or_default(); - - let mut dead_keys = Vec::new(); - for KeyPair { key, secret } in out { - if let Some(rsd) = content.details.get_mut(&key) { - rsd.secret_key = secret; - } else { - dead_keys.push(key); - } - } - for k in dead_keys { - log_rtab!(debug "killing off private route: {}", k.encode()); - content.details.remove(&k); - } - - let mut inner = RouteSpecStoreInner { - content, - cache: Default::default(), - }; - - // Rebuild the routespecstore cache - Self::rebuild_cache(&mut inner); - - let rss = RouteSpecStore { - unlocked_inner: Arc::new(RouteSpecStoreUnlockedInner { - max_route_hop_count, - default_route_hop_count, - routing_table, - }), - inner: Arc::new(Mutex::new(inner)), - }; - - Ok(rss) - } - - #[instrument(level = "trace", skip(self), err)] - pub async fn save(&self) -> EyreResult<()> { - let content = { - let inner = self.inner.lock(); - inner.content.clone() - }; - - // Save all the fields we care about to the frozen blob in table storage - let table_store = self - .unlocked_inner - .routing_table - .network_manager() - .table_store(); - let rsstdb = table_store.open("RouteSpecStore", 1).await?; - rsstdb.store_rkyv(0, b"content", &content).await?; - - // // Keep secrets in protected store as well - let pstore = self - .unlocked_inner - .routing_table - .network_manager() - .protected_store(); - - let mut out: Vec = Vec::with_capacity(content.details.len()); - for (k, v) in &content.details { - out.push(KeyPair { - key: *k, - secret: v.secret_key, - }); - } - - let _ = pstore.save_user_secret_rkyv("RouteSpecStore", &out).await?; // ignore if this previously existed or not - - Ok(()) - } - - #[instrument(level = "trace", skip(self))] - pub fn send_route_update(&self) { - let update_callback = self.unlocked_inner.routing_table.update_callback(); - - let (dead_routes, dead_remote_routes) = { - let mut inner = self.inner.lock(); - if inner.cache.dead_routes.is_empty() && inner.cache.dead_remote_routes.is_empty() { - // Nothing to do - return; - } - let dead_routes = core::mem::take(&mut inner.cache.dead_routes); - let dead_remote_routes = core::mem::take(&mut inner.cache.dead_remote_routes); - (dead_routes, dead_remote_routes) - }; - - let update = VeilidUpdate::Route(VeilidStateRoute { - dead_routes, - dead_remote_routes, - }); - - update_callback(update); - } - - fn add_to_cache(cache: &mut RouteSpecStoreCache, cache_key: Vec, rsd: &RouteSpecDetail) { - if !cache.hop_cache.insert(cache_key) { - panic!("route should never be inserted twice"); - } - for h in &rsd.hops { - cache - .used_nodes - .entry(*h) - .and_modify(|e| *e += 1) - .or_insert(1); - } - cache - .used_end_nodes - .entry(*rsd.hops.last().unwrap()) - .and_modify(|e| *e += 1) - .or_insert(1); - } - - fn rebuild_cache(inner: &mut RouteSpecStoreInner) { - for v in inner.content.details.values() { - let cache_key = route_hops_to_hop_cache(&v.hops); - Self::add_to_cache(&mut inner.cache, cache_key, &v); - } - } - - fn detail<'a>( - inner: &'a RouteSpecStoreInner, - public_key: &DHTKey, - ) -> Option<&'a RouteSpecDetail> { - inner.content.details.get(public_key) - } - fn detail_mut<'a>( - inner: &'a mut RouteSpecStoreInner, - public_key: &DHTKey, - ) -> Option<&'a mut RouteSpecDetail> { - inner.content.details.get_mut(public_key) - } - - /// Purge the route spec store - pub async fn purge(&self) -> EyreResult<()> { - { - let inner = &mut *self.inner.lock(); - inner.content = Default::default(); - inner.cache = Default::default(); - } - self.save().await - } - - /// Create a new route - /// Prefers nodes that are not currently in use by another route - /// The route is not yet tested for its reachability - /// Returns None if no route could be allocated at this time - #[instrument(level = "trace", skip(self), ret, err)] - pub fn allocate_route( - &self, - stability: Stability, - sequencing: Sequencing, - hop_count: usize, - directions: DirectionSet, - avoid_node_ids: &[DHTKey], - ) -> EyreResult> { - let inner = &mut *self.inner.lock(); - let routing_table = self.unlocked_inner.routing_table.clone(); - let rti = &mut *routing_table.inner.write(); - - self.allocate_route_inner( - inner, - rti, - stability, - sequencing, - hop_count, - directions, - avoid_node_ids, - ) - } - - #[instrument(level = "trace", skip(self, inner, rti), ret, err)] - fn allocate_route_inner( - &self, - inner: &mut RouteSpecStoreInner, - rti: &RoutingTableInner, - stability: Stability, - sequencing: Sequencing, - hop_count: usize, - directions: DirectionSet, - avoid_node_ids: &[DHTKey], - ) -> EyreResult> { - use core::cmp::Ordering; - - if hop_count < 1 { - bail!("Not allocating route less than one hop in length"); - } - - if hop_count > self.unlocked_inner.max_route_hop_count { - bail!("Not allocating route longer than max route hop count"); - } - - let Some(our_peer_info) = rti.get_own_peer_info(RoutingDomain::PublicInternet) else { - bail!("Can't allocate route until we have our own peer info"); - }; - - // Get relay node id if we have one - let opt_relay_id = rti - .relay_node(RoutingDomain::PublicInternet) - .map(|nr| nr.node_id()); - - // Get list of all nodes, and sort them for selection - let cur_ts = get_aligned_timestamp(); - let filter = Box::new( - move |rti: &RoutingTableInner, k: DHTKey, v: Option>| -> bool { - // Exclude our own node from routes - if v.is_none() { - return false; - } - let v = v.unwrap(); - - // Exclude our relay if we have one - if let Some(own_relay_id) = opt_relay_id { - if k == own_relay_id { - return false; - } - } - - // Exclude nodes we have specifically chosen to avoid - if avoid_node_ids.contains(&k) { - return false; - } - - // Process node info exclusions - let keep = v.with(rti, |_rti, e| { - // Exclude nodes on our local network - if e.node_info(RoutingDomain::LocalNetwork).is_some() { - return false; - } - // Exclude nodes that have no publicinternet signednodeinfo - let Some(sni) = e.signed_node_info(RoutingDomain::PublicInternet) else { - return false; - }; - // Relay check - if let Some(relay_id) = sni.relay_id() { - // Exclude nodes whose relays we have chosen to avoid - if avoid_node_ids.contains(&relay_id.key) { - return false; - } - // Exclude nodes whose relay is our own relay if we have one - if let Some(own_relay_id) = opt_relay_id { - if own_relay_id == relay_id.key { - return false; - } - } - } - return true; - }); - if !keep { - return false; - } - - // Exclude nodes with no publicinternet nodeinfo, or incompatible nodeinfo or node status won't route - v.with(rti, move |_rti, e| { - let node_info_ok = - if let Some(sni) = e.signed_node_info(RoutingDomain::PublicInternet) { - sni.has_sequencing_matched_dial_info(sequencing) - } else { - false - }; - let node_status_ok = - if let Some(ns) = e.node_status(RoutingDomain::PublicInternet) { - ns.will_route() - } else { - false - }; - - node_info_ok && node_status_ok - }) - }, - ) as RoutingTableEntryFilter; - let filters = VecDeque::from([filter]); - let compare = |rti: &RoutingTableInner, - v1: &(DHTKey, Option>), - v2: &(DHTKey, Option>)| - -> Ordering { - // deprioritize nodes that we have already used as end points - let e1_used_end = inner - .cache - .used_end_nodes - .get(&v1.0) - .cloned() - .unwrap_or_default(); - let e2_used_end = inner - .cache - .used_end_nodes - .get(&v2.0) - .cloned() - .unwrap_or_default(); - let cmp_used_end = e1_used_end.cmp(&e2_used_end); - if !matches!(cmp_used_end, Ordering::Equal) { - return cmp_used_end; - } - - // deprioritize nodes we have used already anywhere - let e1_used = inner - .cache - .used_nodes - .get(&v1.0) - .cloned() - .unwrap_or_default(); - let e2_used = inner - .cache - .used_nodes - .get(&v2.0) - .cloned() - .unwrap_or_default(); - let cmp_used = e1_used.cmp(&e2_used); - if !matches!(cmp_used, Ordering::Equal) { - return cmp_used; - } - - // apply sequencing preference - // ensureordered will be taken care of by filter - // and nopreference doesn't care - if matches!(sequencing, Sequencing::PreferOrdered) { - let cmp_seq = v1.1.as_ref().unwrap().with(rti, |rti, e1| { - v2.1.as_ref() - .unwrap() - .with(rti, |_rti, e2| { - let e1_can_do_ordered = e1.signed_node_info(RoutingDomain::PublicInternet).map(|sni| sni.has_sequencing_matched_dial_info(sequencing)).unwrap_or(false); - let e2_can_do_ordered = e2.signed_node_info(RoutingDomain::PublicInternet).map(|sni| sni.has_sequencing_matched_dial_info(sequencing)).unwrap_or(false); - e2_can_do_ordered.cmp(&e1_can_do_ordered) - }) - }); - if !matches!(cmp_seq, Ordering::Equal) { - return cmp_seq; - } - } - - // always prioritize reliable nodes, but sort by oldest or fastest - let cmpout = v1.1.as_ref().unwrap().with(rti, |rti, e1| { - v2.1.as_ref() - .unwrap() - .with(rti, |_rti, e2| match stability { - Stability::LowLatency => { - BucketEntryInner::cmp_fastest_reliable(cur_ts, e1, e2) - } - Stability::Reliable => { - BucketEntryInner::cmp_oldest_reliable(cur_ts, e1, e2) - } - }) - }); - cmpout - }; - let transform = - |rti: &RoutingTableInner, k: DHTKey, v: Option>| -> PeerInfo { - // Return the peerinfo for that key - v.unwrap().with(rti, |_rti, e| { - e.make_peer_info(k, RoutingDomain::PublicInternet.into()) - .unwrap() - .clone() - }) - }; - - // Pull the whole routing table in sorted order - let node_count = rti.get_entry_count( - RoutingDomain::PublicInternet.into(), - BucketEntryState::Unreliable, - ); - let nodes = - rti.find_peers_with_sort_and_filter(node_count, cur_ts, filters, compare, transform); - - // If we couldn't find enough nodes, wait until we have more nodes in the routing table - if nodes.len() < hop_count { - log_rtab!(debug "not enough nodes to construct route at this time"); - return Ok(None); - } - - // Now go through nodes and try to build a route we haven't seen yet - let perm_func = Box::new(|permutation: &[usize]| { - // Get the route cache key - let cache_key = route_permutation_to_hop_cache(&nodes, permutation); - - // Skip routes we have already seen - if inner.cache.hop_cache.contains(&cache_key) { - return None; - } - - // Ensure the route doesn't contain both a node and its relay - let mut seen_nodes: HashSet = HashSet::new(); - for n in permutation { - let node = nodes.get(*n).unwrap(); - if !seen_nodes.insert(node.node_id.key) { - // Already seen this node, should not be in the route twice - return None; - } - if let Some(relay_id) = node.signed_node_info.relay_id() { - if !seen_nodes.insert(relay_id.key) { - // Already seen this node, should not be in the route twice - return None; - } - } - } - - // Ensure this route is viable by checking that each node can contact the next one - let mut can_do_sequenced = true; - if directions.contains(Direction::Outbound) { - let mut previous_node = &our_peer_info; - let mut reachable = true; - for n in permutation { - let current_node = nodes.get(*n).unwrap(); - let cm = rti.get_contact_method( - RoutingDomain::PublicInternet, - previous_node, - current_node, - DialInfoFilter::all(), - sequencing, - ); - if matches!(cm, ContactMethod::Unreachable) { - reachable = false; - break; - } - - // Check if we can do sequenced specifically - if can_do_sequenced { - let cm = rti.get_contact_method( - RoutingDomain::PublicInternet, - previous_node, - current_node, - DialInfoFilter::all(), - Sequencing::EnsureOrdered, - ); - if matches!(cm, ContactMethod::Unreachable) { - can_do_sequenced = false; - } - } - - previous_node = current_node; - } - if !reachable { - return None; - } - } - if directions.contains(Direction::Inbound) { - let mut next_node = &our_peer_info; - let mut reachable = true; - for n in permutation.iter().rev() { - let current_node = nodes.get(*n).unwrap(); - let cm = rti.get_contact_method( - RoutingDomain::PublicInternet, - next_node, - current_node, - DialInfoFilter::all(), - sequencing, - ); - if matches!(cm, ContactMethod::Unreachable) { - reachable = false; - break; - } - - // Check if we can do sequenced specifically - if can_do_sequenced { - let cm = rti.get_contact_method( - RoutingDomain::PublicInternet, - next_node, - current_node, - DialInfoFilter::all(), - Sequencing::EnsureOrdered, - ); - if matches!(cm, ContactMethod::Unreachable) { - can_do_sequenced = false; - } - } - next_node = current_node; - } - if !reachable { - return None; - } - } - // Keep this route - let route_nodes = permutation.to_vec(); - Some((route_nodes, cache_key, can_do_sequenced)) - }) as PermFunc; - - let mut route_nodes: Vec = Vec::new(); - let mut cache_key: Vec = Vec::new(); - let mut can_do_sequenced: bool = true; - - for start in 0..(nodes.len() - hop_count) { - // Try the permutations available starting with 'start' - if let Some((rn, ck, cds)) = with_route_permutations(hop_count, start, &perm_func) { - route_nodes = rn; - cache_key = ck; - can_do_sequenced = cds; - break; - } - } - if route_nodes.is_empty() { - log_rtab!(debug "unable to find unique route at this time"); - return Ok(None); - } - - // Got a unique route, lets build the detail, register it, and return it - let hops: Vec = route_nodes.iter().map(|v| nodes[*v].node_id.key).collect(); - let hop_node_refs = hops - .iter() - .map(|k| { - rti.lookup_node_ref(self.unlocked_inner.routing_table.clone(), *k) - .unwrap() - }) - .collect(); - - let (public_key, secret_key) = generate_secret(); - - - - let rsd = RouteSpecDetail { - secret_key, - hops, - hop_node_refs, - published: false, - directions, - stability, - can_do_sequenced, - stats: RouteStats::new(cur_ts), - }; - - drop(perm_func); - - // Add to cache - Self::add_to_cache(&mut inner.cache, cache_key, &rsd); - - // Keep route in spec store - inner.content.details.insert(public_key, rsd); - - Ok(Some(public_key)) - } - - #[instrument(level = "trace", skip(self, data, callback), ret)] - pub fn with_signature_validated_route( - &self, - public_key: &DHTKey, - signatures: &[DHTSignature], - data: &[u8], - last_hop_id: DHTKey, - callback: F, - ) -> Option - where F: FnOnce(&RouteSpecDetail) -> R, - R: fmt::Debug, - { - let inner = &*self.inner.lock(); - let Some(rsd) = Self::detail(inner, &public_key) else { - log_rpc!(debug "route does not exist: {:?}", public_key); - return None; - }; - - // Ensure we have the right number of signatures - if signatures.len() != rsd.hops.len() - 1 { - // Wrong number of signatures - log_rpc!(debug "wrong number of signatures ({} should be {}) for routed operation on private route {}", signatures.len(), rsd.hops.len() - 1, public_key); - return None; - } - // Validate signatures to ensure the route was handled by the nodes and not messed with - // This is in private route (reverse) order as we are receiving over the route - for (hop_n, hop_public_key) in rsd.hops.iter().rev().enumerate() { - // The last hop is not signed, as the whole packet is signed - if hop_n == signatures.len() { - // Verify the node we received the routed operation from is the last hop in our route - if *hop_public_key != last_hop_id { - log_rpc!(debug "received routed operation from the wrong hop ({} should be {}) on private route {}", hop_public_key.encode(), last_hop_id.encode(), public_key); - return None; - } - } else { - // Verify a signature for a hop node along the route - if let Err(e) = verify(hop_public_key, data, &signatures[hop_n]) { - log_rpc!(debug "failed to verify signature for hop {} at {} on private route {}: {}", hop_n, hop_public_key, public_key, e); - return None; - } - } - } - // We got the correct signatures, return a key and response safety spec - Some(callback(rsd)) - } - - #[instrument(level = "trace", skip(self), ret, err)] - async fn test_allocated_route(&self, key: &DHTKey) -> EyreResult { - // Make loopback route to test with - let dest = { - let private_route = self.assemble_private_route(key, None)?; - - let inner = &mut *self.inner.lock(); - let rsd = Self::detail(inner, &key).ok_or_else(|| eyre!("route does not exist"))?; - - // Match the private route's hop length for safety route length - let hop_count = rsd.hops.len(); - // Always test routes with safety routes that are more likely to succeed - let stability = Stability::Reliable; - // Routes can test with whatever sequencing they were allocated with - let sequencing = Sequencing::NoPreference; - - let safety_spec = SafetySpec { - preferred_route: Some(key.clone()), - hop_count, - stability, - sequencing, - }; - let safety_selection = SafetySelection::Safe(safety_spec); - - Destination::PrivateRoute { - private_route, - safety_selection, - } - }; - - // Test with double-round trip ping to self - let rpc_processor = self.unlocked_inner.routing_table.rpc_processor(); - let _res = match rpc_processor.rpc_call_status(dest).await? { - NetworkResult::Value(v) => v, - _ => { - // Did not error, but did not come back, just return false - return Ok(false); - } - }; - - Ok(true) - } - - #[instrument(level = "trace", skip(self), ret, err)] - async fn test_remote_route(&self, key: &DHTKey) -> EyreResult { - // Make private route test - let dest = { - // Get the route to test - let private_route = match self.peek_remote_private_route(key) { - Some(pr) => pr, - None => return Ok(false), - }; - - // Get a safety route that is good enough - let safety_spec = SafetySpec { - preferred_route: None, - hop_count: self.unlocked_inner.default_route_hop_count, - stability: Stability::default(), - sequencing: Sequencing::default(), - }; - - let safety_selection = SafetySelection::Safe(safety_spec); - - Destination::PrivateRoute { - private_route, - safety_selection, - } - }; - - // Test with double-round trip ping to self - let rpc_processor = self.unlocked_inner.routing_table.rpc_processor(); - let _res = match rpc_processor.rpc_call_status(dest).await? { - NetworkResult::Value(v) => v, - _ => { - // Did not error, but did not come back, just return false - return Ok(false); - } - }; - - Ok(true) - } - - /// Test an allocated route for continuity - #[instrument(level = "trace", skip(self), ret, err)] - pub async fn test_route(&self, key: &DHTKey) -> EyreResult { - let is_remote = { - let inner = &mut *self.inner.lock(); - let cur_ts = get_aligned_timestamp(); - Self::with_peek_remote_private_route(inner, cur_ts, key, |_| {}).is_some() - }; - if is_remote { - self.test_remote_route(key).await - } else { - self.test_allocated_route(key).await - } - } - - /// Release an allocated route that is no longer in use - #[instrument(level = "trace", skip(self), ret)] - fn release_allocated_route(&self, public_key: &DHTKey) -> bool { - let mut inner = self.inner.lock(); - let Some(detail) = inner.content.details.remove(public_key) else { - return false; - }; - - // Mark it as dead for the update - inner.cache.dead_routes.push(*public_key); - - // Remove from hop cache - let cache_key = route_hops_to_hop_cache(&detail.hops); - if !inner.cache.hop_cache.remove(&cache_key) { - panic!("hop cache should have contained cache key"); - } - // Remove from used nodes cache - for h in &detail.hops { - match inner.cache.used_nodes.entry(*h) { - std::collections::hash_map::Entry::Occupied(mut o) => { - *o.get_mut() -= 1; - if *o.get() == 0 { - o.remove(); - } - } - std::collections::hash_map::Entry::Vacant(_) => { - panic!("used_nodes cache should have contained hop"); - } - } - } - // Remove from end nodes cache - match inner - .cache - .used_end_nodes - .entry(*detail.hops.last().unwrap()) - { - std::collections::hash_map::Entry::Occupied(mut o) => { - *o.get_mut() -= 1; - if *o.get() == 0 { - o.remove(); - } - } - std::collections::hash_map::Entry::Vacant(_) => { - panic!("used_end_nodes cache should have contained hop"); - } - } - true - } - - /// Release an allocated or remote route that is no longer in use - #[instrument(level = "trace", skip(self), ret)] - pub fn release_route(&self, key: &DHTKey) -> bool { - - let is_remote = { - let inner = &mut *self.inner.lock(); - - // Release from compiled route cache if it's used there - self.invalidate_compiled_route_cache(inner, key); - - // Check to see if this is a remote route - let cur_ts = get_aligned_timestamp(); - Self::with_peek_remote_private_route(inner, cur_ts, key, |_| {}).is_some() - }; - - if is_remote { - self.release_remote_private_route(key) - } else { - self.release_allocated_route(key) - } - } - - /// Find first matching unpublished route that fits into the selection criteria - /// Don't pick any routes that have failed and haven't been tested yet - fn first_available_route_inner<'a>( - inner: &'a RouteSpecStoreInner, - min_hop_count: usize, - max_hop_count: usize, - stability: Stability, - sequencing: Sequencing, - directions: DirectionSet, - avoid_node_ids: &[DHTKey], - ) -> Option { - let cur_ts = get_aligned_timestamp(); - - let mut routes = Vec::new(); - - // Get all valid routes, allow routes that need testing - // but definitely prefer routes that have been recently tested - for detail in &inner.content.details { - if detail.1.stability >= stability - && detail.1.is_sequencing_match(sequencing) - && detail.1.hops.len() >= min_hop_count - && detail.1.hops.len() <= max_hop_count - && detail.1.directions.is_superset(directions) - && !detail.1.published - { - let mut avoid = false; - for h in &detail.1.hops { - if avoid_node_ids.contains(h) { - avoid = true; - break; - } - } - if !avoid { - routes.push(detail); - } - } - } - - // Sort the routes by preference - routes.sort_by(|a, b| { - let a_needs_testing = a.1.stats.needs_testing(cur_ts); - let b_needs_testing = b.1.stats.needs_testing(cur_ts); - if !a_needs_testing && b_needs_testing { - return cmp::Ordering::Less; - } - if !b_needs_testing && a_needs_testing { - return cmp::Ordering::Greater; - } - let a_latency = a.1.stats.latency_stats().average; - let b_latency = b.1.stats.latency_stats().average; - - a_latency.cmp(&b_latency) - }); - - // Return the best one if we got one - routes.first().map(|r| *r.0) - } - - /// List all allocated routes - pub fn list_allocated_routes(&self, mut filter: F) -> Vec - where - F: FnMut(&DHTKey, &RouteSpecDetail) -> Option, - { - let inner = self.inner.lock(); - let mut out = Vec::with_capacity(inner.content.details.len()); - for detail in &inner.content.details { - if let Some(x) = filter(detail.0, detail.1) { - out.push(x); - } - } - out - } - - /// List all allocated routes - pub fn list_remote_routes(&self, mut filter: F) -> Vec - where - F: FnMut(&DHTKey, &RemotePrivateRouteInfo) -> Option, - { - let inner = self.inner.lock(); - let mut out = Vec::with_capacity(inner.cache.remote_private_route_cache.len()); - for info in &inner.cache.remote_private_route_cache { - if let Some(x) = filter(info.0, info.1) { - out.push(x); - } - } - out - } - - /// Get the debug description of a route - pub fn debug_route(&self, key: &DHTKey) -> Option { - let inner = &mut *self.inner.lock(); - let cur_ts = get_aligned_timestamp(); - // If this is a remote route, print it - if let Some(s) = - Self::with_peek_remote_private_route(inner, cur_ts, key, |rpi| format!("{:#?}", rpi)) - { - return Some(s); - } - // Otherwise check allocated routes - Self::detail(inner, key).map(|rsd| format!("{:#?}", rsd)) - } - - ////////////////////////////////////////////////////////////////////// - - // Route cache - fn add_to_compiled_route_cache(&self, inner: &mut RouteSpecStoreInner, pr_pubkey: DHTKey, safety_route: SafetyRoute) - { - let key = CompiledRouteCacheKey { - sr_pubkey: safety_route.public_key, - pr_pubkey, - }; - - if let Some(v) = inner.cache.compiled_route_cache.insert(key, safety_route) { - log_rtab!(error "route cache already contained key: sr_pubkey={:?}, pr_pubkey={:?}", v.public_key, pr_pubkey); - } - } - - fn lookup_compiled_route_cache(&self, inner: &mut RouteSpecStoreInner, sr_pubkey: DHTKey, pr_pubkey: DHTKey) -> Option { - - let key = CompiledRouteCacheKey { - sr_pubkey, - pr_pubkey, - }; - - inner.cache.compiled_route_cache.get(&key).cloned() - } - - fn invalidate_compiled_route_cache(&self, inner: &mut RouteSpecStoreInner, dead_key: &DHTKey) { - let mut dead_entries = Vec::new(); - for (k, _v) in inner.cache.compiled_route_cache.iter() { - if k.sr_pubkey == *dead_key || k.pr_pubkey == *dead_key { - dead_entries.push(k.clone()); - } - } - for d in dead_entries { - inner.cache.compiled_route_cache.remove(&d); - } - } - - /// Compiles a safety route to the private route, with caching - /// Returns an Err() if the parameters are wrong - /// Returns Ok(None) if no allocation could happen at this time (not an error) - pub fn compile_safety_route( - &self, - safety_selection: SafetySelection, - mut private_route: PrivateRoute, - ) -> EyreResult> { - // let profile_start_ts = get_timestamp(); - - let inner = &mut *self.inner.lock(); - let routing_table = self.unlocked_inner.routing_table.clone(); - let rti = &mut *routing_table.inner.write(); - - let pr_pubkey = private_route.public_key; - let pr_hopcount = private_route.hop_count as usize; - let max_route_hop_count = self.unlocked_inner.max_route_hop_count; - // Check private route hop count isn't larger than the max route hop count plus one for the 'first hop' header - if pr_hopcount > (max_route_hop_count + 1) { - bail!("private route hop count too long"); - } - // See if we are using a safety route, if not, short circuit this operation - let safety_spec = match safety_selection { - // Safety route spec to use - SafetySelection::Safe(safety_spec) => safety_spec, - // Safety route stub with the node's public key as the safety route key since it's the 0th hop - SafetySelection::Unsafe(sequencing) => { - let Some(pr_first_hop_node) = private_route.pop_first_hop() else { - bail!("compiled private route should have first hop"); - }; - - let opt_first_hop = match pr_first_hop_node { - RouteNode::NodeId(id) => rti.lookup_node_ref(routing_table.clone(), id.key), - RouteNode::PeerInfo(pi) => rti.register_node_with_signed_node_info( - routing_table.clone(), - RoutingDomain::PublicInternet, - pi.node_id.key, - pi.signed_node_info.clone(), - false, - ), - }; - if opt_first_hop.is_none() { - // Can't reach this private route any more - log_rtab!(debug "can't reach private route any more"); - return Ok(None); - } - let mut first_hop = opt_first_hop.unwrap(); - - // Set sequencing requirement - first_hop.set_sequencing(sequencing); - - // Return the compiled safety route - //println!("compile_safety_route profile (stub): {} us", (get_timestamp() - profile_start_ts)); - return Ok(Some(CompiledRoute { - safety_route: SafetyRoute::new_stub(routing_table.node_id(), private_route), - secret: routing_table.node_id_secret(), - first_hop, - })); - } - }; - - // If the safety route requested is also the private route, this is a loopback test, just accept it - let sr_pubkey = if safety_spec.preferred_route == Some(private_route.public_key) { - // Private route is also safety route during loopback test - private_route.public_key - } else { - let Some(avoid_node_id) = private_route.first_hop_node_id() else { - bail!("compiled private route should have first hop"); - }; - let Some(sr_pubkey) = self.get_route_for_safety_spec_inner(inner, rti, &safety_spec, Direction::Outbound.into(), &[avoid_node_id])? else { - // No safety route could be found for this spec - return Ok(None); - }; - sr_pubkey - }; - - // Look up a few things from the safety route detail we want for the compiled route and don't borrow inner - let (optimize, first_hop, secret) = { - let safety_rsd = Self::detail(inner, &sr_pubkey).ok_or_else(|| eyre!("route missing"))?; - - // We can optimize the peer info in this safety route if it has been successfully - // communicated over either via an outbound test, or used as a private route inbound - // and we are replying over the same route as our safety route outbound - let optimize = safety_rsd.stats.last_tested_ts.is_some() || safety_rsd.stats.last_received_ts.is_some(); - - // Get the first hop noderef of the safety route - let mut first_hop = safety_rsd.hop_node_refs.first().unwrap().clone(); - // Ensure sequencing requirement is set on first hop - first_hop.set_sequencing(safety_spec.sequencing); - - // Get the safety route secret key - let secret = safety_rsd.secret_key; - - (optimize, first_hop, secret) - }; - - // See if we have a cached route we can use - if optimize { - if let Some(safety_route) = self.lookup_compiled_route_cache(inner, sr_pubkey, pr_pubkey) { - // Build compiled route - let compiled_route = CompiledRoute { - safety_route, - secret, - first_hop, - }; - // Return compiled route - //println!("compile_safety_route profile (cached): {} us", (get_timestamp() - profile_start_ts)); - return Ok(Some(compiled_route)); - } - } - - // Create hops - let hops = { - let safety_rsd = Self::detail(inner, &sr_pubkey).ok_or_else(|| eyre!("route missing"))?; - - // start last blob-to-encrypt data off as private route - let mut blob_data = { - let mut pr_message = ::capnp::message::Builder::new_default(); - let mut pr_builder = pr_message.init_root::(); - encode_private_route(&private_route, &mut pr_builder)?; - let mut blob_data = builder_to_vec(pr_message)?; - - // append the private route tag so we know how to decode it later - blob_data.push(1u8); - blob_data - }; - - // Encode each hop from inside to outside - // skips the outermost hop since that's entering the - // safety route and does not include the dialInfo - // (outer hop is a RouteHopData, not a RouteHop). - // Each loop mutates 'nonce', and 'blob_data' - let mut nonce = Crypto::get_random_nonce(); - let crypto = routing_table.network_manager().crypto(); - // Forward order (safety route), but inside-out - for h in (1..safety_rsd.hops.len()).rev() { - // Get blob to encrypt for next hop - blob_data = { - // Encrypt the previous blob ENC(nonce, DH(PKhop,SKsr)) - let dh_secret = crypto - .cached_dh(&safety_rsd.hops[h], &safety_rsd.secret_key) - .wrap_err("dh failed")?; - let enc_msg_data = - Crypto::encrypt_aead(blob_data.as_slice(), &nonce, &dh_secret, None) - .wrap_err("encryption failed")?; - - // Make route hop data - let route_hop_data = RouteHopData { - nonce, - blob: enc_msg_data, - }; - - // Make route hop - let route_hop = RouteHop { - node: if optimize { - // Optimized, no peer info, just the dht key - RouteNode::NodeId(NodeId::new(safety_rsd.hops[h])) - } else { - // Full peer info, required until we are sure the route has been fully established - let node_id = safety_rsd.hops[h]; - let pi = rti - .with_node_entry(node_id, |entry| { - entry.with(rti, |_rti, e| { - e.make_peer_info(node_id, RoutingDomain::PublicInternet) - }) - }) - .flatten(); - if pi.is_none() { - bail!("peer info should exist for route but doesn't"); - } - RouteNode::PeerInfo(pi.unwrap()) - }, - next_hop: Some(route_hop_data), - }; - - // Make next blob from route hop - let mut rh_message = ::capnp::message::Builder::new_default(); - let mut rh_builder = rh_message.init_root::(); - encode_route_hop(&route_hop, &mut rh_builder)?; - let mut blob_data = builder_to_vec(rh_message)?; - - // Append the route hop tag so we know how to decode it later - blob_data.push(0u8); - blob_data - }; - - // Make another nonce for the next hop - nonce = Crypto::get_random_nonce(); - } - - // Encode first RouteHopData - let dh_secret = crypto - .cached_dh(&safety_rsd.hops[0], &safety_rsd.secret_key) - .map_err(RPCError::map_internal("dh failed"))?; - let enc_msg_data = Crypto::encrypt_aead(blob_data.as_slice(), &nonce, &dh_secret, None) - .map_err(RPCError::map_internal("encryption failed"))?; - - let route_hop_data = RouteHopData { - nonce, - blob: enc_msg_data, - }; - - SafetyRouteHops::Data(route_hop_data) - }; - - // Build safety route - let safety_route = SafetyRoute { - public_key: sr_pubkey, - hop_count: safety_spec.hop_count as u8, - hops, - }; - - // Add to cache but only if we have an optimized route - if optimize { - self.add_to_compiled_route_cache(inner, pr_pubkey, safety_route.clone()); - } - - // Build compiled route - let compiled_route = CompiledRoute { - safety_route, - secret, - first_hop, - }; - - // Return compiled route - //println!("compile_safety_route profile (uncached): {} us", (get_timestamp() - profile_start_ts)); - Ok(Some(compiled_route)) - } - - /// Get a route that matches a particular safety spec - #[instrument(level = "trace", skip(self, inner, rti), ret, err)] - fn get_route_for_safety_spec_inner( - &self, - inner: &mut RouteSpecStoreInner, - rti: &RoutingTableInner, - safety_spec: &SafetySpec, - direction: DirectionSet, - avoid_node_ids: &[DHTKey], - ) -> EyreResult> { - // Ensure the total hop count isn't too long for our config - let max_route_hop_count = self.unlocked_inner.max_route_hop_count; - if safety_spec.hop_count == 0 { - bail!("safety route hop count is zero"); - } - if safety_spec.hop_count > max_route_hop_count { - bail!("safety route hop count too long"); - } - - // See if the preferred route is here - if let Some(preferred_route) = safety_spec.preferred_route { - if let Some(preferred_rsd) = inner.content.details.get(&preferred_route) { - // Only use the preferred route if it doesn't end with the avoid nodes - if !avoid_node_ids.contains(preferred_rsd.hops.last().unwrap()) { - return Ok(Some(preferred_route)); - } - } - } - - // Select a safety route from the pool or make one if we don't have one that matches - let sr_pubkey = if let Some(sr_pubkey) = Self::first_available_route_inner( - inner, - safety_spec.hop_count, - safety_spec.hop_count, - safety_spec.stability, - safety_spec.sequencing, - direction, - avoid_node_ids, - ) { - // Found a route to use - sr_pubkey - } else { - // No route found, gotta allocate one - let sr_pubkey = match self - .allocate_route_inner( - inner, - rti, - safety_spec.stability, - safety_spec.sequencing, - safety_spec.hop_count, - direction, - avoid_node_ids, - ) - .map_err(RPCError::internal)? - { - Some(pk) => pk, - None => return Ok(None), - }; - sr_pubkey - }; - Ok(Some(sr_pubkey)) - } - - /// Get a private sroute to use for the answer to question - #[instrument(level = "trace", skip(self), ret, err)] - pub fn get_private_route_for_safety_spec( - &self, - safety_spec: &SafetySpec, - avoid_node_ids: &[DHTKey], - ) -> EyreResult> { - let inner = &mut *self.inner.lock(); - let routing_table = self.unlocked_inner.routing_table.clone(); - let rti = &*routing_table.inner.read(); - - Ok(self.get_route_for_safety_spec_inner( - inner, - rti, - safety_spec, - Direction::Inbound.into(), - avoid_node_ids, - )?) - } - - /// Assemble private route for publication - #[instrument(level = "trace", skip(self), err)] - pub fn assemble_private_route( - &self, - key: &DHTKey, - optimized: Option, - ) -> EyreResult { - let inner = &*self.inner.lock(); - let routing_table = self.unlocked_inner.routing_table.clone(); - let rti = &*routing_table.inner.read(); - - let rsd = Self::detail(inner, key).ok_or_else(|| eyre!("route does not exist"))?; - - // See if we can optimize this compilation yet - // We don't want to include full nodeinfo if we don't have to - let optimized = optimized - .unwrap_or(rsd.stats.last_tested_ts.is_some() || rsd.stats.last_received_ts.is_some()); - - // Make innermost route hop to our own node - let mut route_hop = RouteHop { - node: if optimized { - if !rti.has_valid_own_node_info(RoutingDomain::PublicInternet) { - bail!("can't make private routes until our node info is valid"); - } - RouteNode::NodeId(NodeId::new(routing_table.node_id())) - } else { - let Some(pi) = rti.get_own_peer_info(RoutingDomain::PublicInternet) else { - bail!("can't make private routes until our node info is valid"); - }; - RouteNode::PeerInfo(pi) - }, - next_hop: None, - }; - - let crypto = routing_table.network_manager().crypto(); - // Loop for each hop - let hop_count = rsd.hops.len(); - // iterate hops in private route order (reverse, but inside out) - for h in 0..hop_count { - let nonce = Crypto::get_random_nonce(); - - let blob_data = { - let mut rh_message = ::capnp::message::Builder::new_default(); - let mut rh_builder = rh_message.init_root::(); - encode_route_hop(&route_hop, &mut rh_builder)?; - builder_to_vec(rh_message)? - }; - - // Encrypt the previous blob ENC(nonce, DH(PKhop,SKpr)) - let dh_secret = crypto - .cached_dh(&rsd.hops[h], &rsd.secret_key) - .wrap_err("dh failed")?; - let enc_msg_data = Crypto::encrypt_aead(blob_data.as_slice(), &nonce, &dh_secret, None) - .wrap_err("encryption failed")?; - let route_hop_data = RouteHopData { - nonce, - blob: enc_msg_data, - }; - - route_hop = RouteHop { - node: if optimized { - // Optimized, no peer info, just the dht key - RouteNode::NodeId(NodeId::new(rsd.hops[h])) - } else { - // Full peer info, required until we are sure the route has been fully established - let node_id = rsd.hops[h]; - let pi = rti - .with_node_entry(node_id, |entry| { - entry.with(rti, |_rti, e| { - e.make_peer_info(node_id, RoutingDomain::PublicInternet) - }) - }) - .flatten(); - if pi.is_none() { - bail!("peer info should exist for route but doesn't",); - } - RouteNode::PeerInfo(pi.unwrap()) - }, - next_hop: Some(route_hop_data), - } - } - - let private_route = PrivateRoute { - public_key: key.clone(), - // add hop for 'FirstHop' - hop_count: (hop_count + 1).try_into().unwrap(), - hops: PrivateRouteHops::FirstHop(route_hop), - }; - Ok(private_route) - } - - /// Import a remote private route for compilation - #[instrument(level = "trace", skip(self, blob), ret, err)] - pub fn import_remote_private_route(&self, blob: Vec) -> EyreResult { - // decode the pr blob - let private_route = RouteSpecStore::blob_to_private_route(blob)?; - - // ensure private route has first hop - if !matches!(private_route.hops, PrivateRouteHops::FirstHop(_)) { - bail!("private route must have first hop"); - } - - // ensure this isn't also an allocated route - let inner = &mut *self.inner.lock(); - if Self::detail(inner, &private_route.public_key).is_some() { - bail!("should not import allocated route"); - } - - // store the private route in our cache - let cur_ts = get_aligned_timestamp(); - let key = Self::with_create_remote_private_route(inner, cur_ts, private_route, |r| { - r.private_route.as_ref().unwrap().public_key.clone() - }); - Ok(key) - } - - /// Release a remote private route that is no longer in use - #[instrument(level = "trace", skip(self), ret)] - fn release_remote_private_route(&self, key: &DHTKey) -> bool { - let inner = &mut *self.inner.lock(); - if inner.cache.remote_private_route_cache.remove(key).is_some() { - // Mark it as dead for the update - inner.cache.dead_remote_routes.push(*key); - true - } else { - false - } - } - - /// Retrieve an imported remote private route by its public key - pub fn get_remote_private_route(&self, key: &DHTKey) -> Option { - let inner = &mut *self.inner.lock(); - let cur_ts = get_aligned_timestamp(); - Self::with_get_remote_private_route(inner, cur_ts, key, |r| { - r.private_route.as_ref().unwrap().clone() - }) - } - - /// Retrieve an imported remote private route by its public key but don't 'touch' it - pub fn peek_remote_private_route(&self, key: &DHTKey) -> Option { - let inner = &mut *self.inner.lock(); - let cur_ts = get_aligned_timestamp(); - Self::with_peek_remote_private_route(inner, cur_ts, key, |r| { - r.private_route.as_ref().unwrap().clone() - }) - } - - // get or create a remote private route cache entry - fn with_create_remote_private_route( - inner: &mut RouteSpecStoreInner, - cur_ts: Timestamp, - private_route: PrivateRoute, - f: F, - ) -> R - where - F: FnOnce(&mut RemotePrivateRouteInfo) -> R, - { - let pr_pubkey = private_route.public_key; - - let rpr = inner - .cache - .remote_private_route_cache - .entry(pr_pubkey) - .and_modify(|rpr| { - if cur_ts.saturating_sub(rpr.last_touched_ts) >= REMOTE_PRIVATE_ROUTE_CACHE_EXPIRY { - // Start fresh if this had expired - rpr.last_seen_our_node_info_ts = Timestamp::new(0); - rpr.last_touched_ts = cur_ts; - rpr.stats = RouteStats::new(cur_ts); - } else { - // If not expired, just mark as being used - rpr.last_touched_ts = cur_ts; - } - }) - .or_insert_with(|| RemotePrivateRouteInfo { - // New remote private route cache entry - private_route: Some(private_route), - last_seen_our_node_info_ts: Timestamp::new(0), - last_touched_ts: cur_ts, - stats: RouteStats::new(cur_ts), - }); - - let out = f(rpr); - - // Ensure we LRU out items - if inner.cache.remote_private_route_cache.len() - > inner.cache.remote_private_route_cache.capacity() - { - let (dead_k, _) = inner.cache.remote_private_route_cache.remove_lru().unwrap(); - // Mark it as dead for the update - inner.cache.dead_remote_routes.push(dead_k); - } - - out - } - - // get a remote private route cache entry - fn with_get_remote_private_route( - inner: &mut RouteSpecStoreInner, - cur_ts: Timestamp, - key: &DHTKey, - f: F, - ) -> Option - where - F: FnOnce(&mut RemotePrivateRouteInfo) -> R, - { - let rpr = inner.cache.remote_private_route_cache.get_mut(key)?; - if cur_ts.saturating_sub(rpr.last_touched_ts) < REMOTE_PRIVATE_ROUTE_CACHE_EXPIRY { - rpr.last_touched_ts = cur_ts; - return Some(f(rpr)); - } - inner.cache.remote_private_route_cache.remove(key); - inner.cache.dead_remote_routes.push(*key); - None - } - - // peek a remote private route cache entry - fn with_peek_remote_private_route( - inner: &mut RouteSpecStoreInner, - cur_ts: Timestamp, - key: &DHTKey, - f: F, - ) -> Option - where - F: FnOnce(&mut RemotePrivateRouteInfo) -> R, - { - match inner.cache.remote_private_route_cache.entry(*key) { - hashlink::lru_cache::Entry::Occupied(mut o) => { - let rpr = o.get_mut(); - if cur_ts.saturating_sub(rpr.last_touched_ts) < REMOTE_PRIVATE_ROUTE_CACHE_EXPIRY { - return Some(f(rpr)); - } - o.remove(); - inner.cache.dead_remote_routes.push(*key); - None - } - hashlink::lru_cache::Entry::Vacant(_) => None, - } - } - - /// Check to see if this remote (not ours) private route has seen our current node info yet - /// This happens when you communicate with a private route without a safety route - pub fn has_remote_private_route_seen_our_node_info(&self, key: &DHTKey) -> bool { - let our_node_info_ts = { - let rti = &*self.unlocked_inner.routing_table.inner.read(); - let Some(ts) = rti.get_own_node_info_ts(RoutingDomain::PublicInternet) else { - return false; - }; - ts - }; - - let opt_rpr_node_info_ts = { - let inner = &mut *self.inner.lock(); - let cur_ts = get_aligned_timestamp(); - Self::with_peek_remote_private_route(inner, cur_ts, key, |rpr| { - rpr.last_seen_our_node_info_ts - }) - }; - - let Some(rpr_node_info_ts) = opt_rpr_node_info_ts else { - return false; - }; - - our_node_info_ts == rpr_node_info_ts - } - - /// Mark a remote private route as having seen our current node info - /// PRIVACY: - /// We do not accept node info timestamps from remote private routes because this would - /// enable a deanonymization attack, whereby a node could be 'pinged' with a doctored node_info with a - /// special 'timestamp', which then may be sent back over a private route, identifying that it - /// was that node that had the private route. - pub fn mark_remote_private_route_seen_our_node_info( - &self, - key: &DHTKey, - cur_ts: Timestamp, - ) -> EyreResult<()> { - let our_node_info_ts = { - let rti = &*self.unlocked_inner.routing_table.inner.read(); - let Some(ts) = rti.get_own_node_info_ts(RoutingDomain::PublicInternet) else { - // Node info is invalid, skipping this - return Ok(()); - }; - ts - }; - - let inner = &mut *self.inner.lock(); - // Check for local route. If this is not a remote private route - // then we just skip the recording. We may be running a test and using - // our own local route as the destination private route. - if let Some(_) = Self::detail_mut(inner, key) { - return Ok(()); - } - if Self::with_get_remote_private_route(inner, cur_ts, key, |rpr| { - rpr.last_seen_our_node_info_ts = our_node_info_ts; - }) - .is_none() - { - bail!("private route is missing from store: {}", key); - } - Ok(()) - } - - /// Get the route statistics for any route we know about, local or remote - pub fn with_route_stats(&self, cur_ts: Timestamp, key: &DHTKey, f: F) -> Option - where - F: FnOnce(&mut RouteStats) -> R, - { - let inner = &mut *self.inner.lock(); - - // Check for stub route - if *key == self.unlocked_inner.routing_table.node_id() { - return None; - } - // Check for local route - if let Some(rsd) = Self::detail_mut(inner, key) { - return Some(f(&mut rsd.stats)); - } - // Check for remote route - if let Some(res) = - Self::with_peek_remote_private_route(inner, cur_ts, key, |rpr| f(&mut rpr.stats)) - { - return Some(res); - } - - None - } - - /// Clear caches when local our local node info changes - #[instrument(level = "trace", skip(self))] - pub fn reset(&self) { - let inner = &mut *self.inner.lock(); - - // Clean up local allocated routes - for (_k, v) in &mut inner.content.details { - // Must republish route now - v.published = false; - // Restart stats for routes so we test the route again - v.stats.reset(); - } - - // Reset private route cache - for (_k, v) in &mut inner.cache.remote_private_route_cache { - // Restart stats for routes so we test the route again - v.stats.reset(); - } - } - - /// Mark route as published - /// When first deserialized, routes must be re-published in order to ensure they remain - /// in the RouteSpecStore. - pub fn mark_route_published(&self, key: &DHTKey, published: bool) -> EyreResult<()> { - let inner = &mut *self.inner.lock(); - Self::detail_mut(inner, key) - .ok_or_else(|| eyre!("route does not exist"))? - .published = published; - Ok(()) - } - - /// Process transfer statistics to get averages - pub fn roll_transfers(&self, last_ts: Timestamp, cur_ts: Timestamp) { - let inner = &mut *self.inner.lock(); - - // Roll transfers for locally allocated routes - for rsd in inner.content.details.values_mut() { - rsd.stats.roll_transfers(last_ts, cur_ts); - } - // Roll transfers for remote private routes - for (_k, v) in inner.cache.remote_private_route_cache.iter_mut() { - v.stats.roll_transfers(last_ts, cur_ts); - } - } - - /// Convert private route to binary blob - pub fn private_route_to_blob(private_route: &PrivateRoute) -> EyreResult> { - let mut pr_message = ::capnp::message::Builder::new_default(); - let mut pr_builder = pr_message.init_root::(); - encode_private_route(&private_route, &mut pr_builder) - .wrap_err("failed to encode private route")?; - - let mut buffer = vec![]; - capnp::serialize_packed::write_message(&mut buffer, &pr_message) - .map_err(RPCError::internal) - .wrap_err("failed to convert builder to vec")?; - Ok(buffer) - } - - /// Convert binary blob to private route - pub fn blob_to_private_route(blob: Vec) -> EyreResult { - let reader = capnp::serialize_packed::read_message( - blob.as_slice(), - capnp::message::ReaderOptions::new(), - ) - .map_err(RPCError::internal) - .wrap_err("failed to make message reader")?; - - let pr_reader = reader - .get_root::() - .map_err(RPCError::internal) - .wrap_err("failed to make reader for private_route")?; - decode_private_route(&pr_reader).wrap_err("failed to decode private route") - } -} diff --git a/veilid-core/src/routing_table/route_spec_store/mod.rs b/veilid-core/src/routing_table/route_spec_store/mod.rs new file mode 100644 index 00000000..302b2b6f --- /dev/null +++ b/veilid-core/src/routing_table/route_spec_store/mod.rs @@ -0,0 +1,30 @@ +use super::*; + +mod permutation; +mod remote_private_route_info; +mod route_set_spec_detail; +mod route_spec_store; +mod route_spec_store_cache; +mod route_spec_store_content; +mod route_stats; + +pub use remote_private_route_info::*; +pub use route_set_spec_detail::*; +pub use route_spec_store::*; +pub use route_spec_store_cache::*; +pub use route_spec_store_content::*; +pub use route_stats::*; + +use crate::veilid_api::*; +use rkyv::{ + with::Skip, Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize, +}; + +/// The size of the remote private route cache +const REMOTE_PRIVATE_ROUTE_CACHE_SIZE: usize = 1024; +/// Remote private route cache entries expire in 5 minutes if they haven't been used +const REMOTE_PRIVATE_ROUTE_CACHE_EXPIRY: TimestampDuration = TimestampDuration::new(300_000_000u64); +/// Amount of time a route can remain idle before it gets tested +const ROUTE_MIN_IDLE_TIME_MS: u32 = 30_000; +/// The size of the compiled route cache +const COMPILED_ROUTE_CACHE_SIZE: usize = 256; diff --git a/veilid-core/src/routing_table/route_spec_store/permutation.rs b/veilid-core/src/routing_table/route_spec_store/permutation.rs new file mode 100644 index 00000000..691e75c3 --- /dev/null +++ b/veilid-core/src/routing_table/route_spec_store/permutation.rs @@ -0,0 +1,70 @@ +use super::*; + +/// number of route permutations is the number of unique orderings +/// for a set of nodes, given that the first node is fixed +fn _get_route_permutation_count(hop_count: usize) -> usize { + if hop_count == 0 { + unreachable!(); + } + // a single node or two nodes is always fixed + if hop_count == 1 || hop_count == 2 { + return 1; + } + // more than two nodes has factorial permutation + // hop_count = 3 -> 2! -> 2 + // hop_count = 4 -> 3! -> 6 + (3..hop_count).into_iter().fold(2usize, |acc, x| acc * x) +} +pub type PermReturnType = (Vec, bool); +pub type PermFunc<'t> = Box Option + Send + 't>; + +/// get the route permutation at particular 'perm' index, starting at the 'start' index +/// for a set of 'hop_count' nodes. the first node is always fixed, and the maximum +/// number of permutations is given by get_route_permutation_count() + +pub fn with_route_permutations( + hop_count: usize, + start: usize, + f: &mut PermFunc, +) -> Option { + if hop_count == 0 { + unreachable!(); + } + // initial permutation + let mut permutation: Vec = Vec::with_capacity(hop_count); + for n in 0..hop_count { + permutation.push(start + n); + } + // if we have one hop or two, then there's only one permutation + if hop_count == 1 || hop_count == 2 { + return f(&permutation); + } + + // heaps algorithm, but skipping the first element + fn heaps_permutation( + permutation: &mut [usize], + size: usize, + f: &mut PermFunc, + ) -> Option { + if size == 1 { + return f(&permutation); + } + + for i in 0..size { + let out = heaps_permutation(permutation, size - 1, f); + if out.is_some() { + return out; + } + if size % 2 == 1 { + permutation.swap(1, size); + } else { + permutation.swap(1 + i, size); + } + } + + None + } + + // recurse + heaps_permutation(&mut permutation, hop_count - 1, f) +} diff --git a/veilid-core/src/routing_table/route_spec_store/remote_private_route_info.rs b/veilid-core/src/routing_table/route_spec_store/remote_private_route_info.rs new file mode 100644 index 00000000..58270ade --- /dev/null +++ b/veilid-core/src/routing_table/route_spec_store/remote_private_route_info.rs @@ -0,0 +1,71 @@ +use super::*; + +/// What remote private routes have seen +#[derive(Debug, Clone, Default)] +pub struct RemotePrivateRouteInfo { + /// The private routes themselves + private_routes: Vec, + /// Did this remote private route see our node info due to no safety route in use + last_seen_our_node_info_ts: Timestamp, + /// Last time this remote private route was requested for any reason (cache expiration) + last_touched_ts: Timestamp, + /// Stats + stats: RouteStats, +} + +impl RemotePrivateRouteInfo { + pub fn new(private_routes: Vec, cur_ts: Timestamp) -> Self { + RemotePrivateRouteInfo { + private_routes, + last_seen_our_node_info_ts: Timestamp::new(0), + last_touched_ts: cur_ts, + stats: RouteStats::new(cur_ts), + } + } + pub fn get_private_routes(&self) -> &[PrivateRoute] { + &self.private_routes + } + pub fn best_private_route(&self) -> Option { + self.private_routes + .iter() + .reduce(|acc, x| { + if x.public_key < acc.public_key { + x + } else { + acc + } + }) + .filter(|x| VALID_CRYPTO_KINDS.contains(&x.public_key.kind)) + .cloned() + } + pub fn get_stats(&self) -> &RouteStats { + &self.stats + } + pub fn get_stats_mut(&mut self) -> &mut RouteStats { + &mut self.stats + } + + pub fn has_seen_our_node_info_ts(&mut self, our_node_info_ts: Timestamp) -> bool { + self.last_seen_our_node_info_ts == our_node_info_ts + } + pub fn set_last_seen_our_node_info_ts(&mut self, last_seen_our_node_info_ts: Timestamp) { + self.last_seen_our_node_info_ts = last_seen_our_node_info_ts; + } + + // Check to see if this remote private route has expired + pub fn did_expire(&self, cur_ts: Timestamp) -> bool { + cur_ts.saturating_sub(self.last_touched_ts) >= REMOTE_PRIVATE_ROUTE_CACHE_EXPIRY + } + + /// Start fresh if this had expired + pub fn unexpire(&mut self, cur_ts: Timestamp) { + self.last_seen_our_node_info_ts = Timestamp::new(0); + self.last_touched_ts = cur_ts; + self.stats = RouteStats::new(cur_ts); + } + + /// Note when this was last used + pub fn touch(&mut self, cur_ts: Timestamp) { + self.last_touched_ts = cur_ts; + } +} diff --git a/veilid-core/src/routing_table/route_spec_store/route_set_spec_detail.rs b/veilid-core/src/routing_table/route_spec_store/route_set_spec_detail.rs new file mode 100644 index 00000000..e7858816 --- /dev/null +++ b/veilid-core/src/routing_table/route_spec_store/route_set_spec_detail.rs @@ -0,0 +1,137 @@ +use super::*; + +#[derive(Clone, Debug, RkyvArchive, RkyvSerialize, RkyvDeserialize)] +#[archive_attr(repr(C, align(8)), derive(CheckBytes))] +pub struct RouteSpecDetail { + /// Crypto kind + pub crypto_kind: CryptoKind, + /// Secret key + #[with(Skip)] + pub secret_key: SecretKey, + /// Route hops (node id keys) + pub hops: Vec, +} + +#[derive(Clone, Debug, RkyvArchive, RkyvSerialize, RkyvDeserialize)] +#[archive_attr(repr(C, align(8)), derive(CheckBytes))] +pub struct RouteSetSpecDetail { + /// Route set per crypto kind + route_set: BTreeMap, + /// Route noderefs + #[with(Skip)] + hop_node_refs: Vec, + /// Published private route, do not reuse for ephemeral routes + /// Not serialized because all routes should be re-published when restarting + #[with(Skip)] + published: bool, + /// Directions this route is guaranteed to work in + #[with(RkyvEnumSet)] + directions: DirectionSet, + /// Stability preference (prefer reliable nodes over faster) + stability: Stability, + /// Sequencing capability (connection oriented protocols vs datagram) + can_do_sequenced: bool, + /// Stats + stats: RouteStats, +} + +impl RouteSetSpecDetail { + pub fn new( + cur_ts: Timestamp, + route_set: BTreeMap, + hop_node_refs: Vec, + directions: DirectionSet, + stability: Stability, + can_do_sequenced: bool, + ) -> Self { + Self { + route_set, + hop_node_refs, + published: false, + directions, + stability, + can_do_sequenced, + stats: RouteStats::new(cur_ts), + } + } + pub fn get_route_by_key(&self, key: &PublicKey) -> Option<&RouteSpecDetail> { + self.route_set.get(key) + } + pub fn get_route_by_key_mut(&mut self, key: &PublicKey) -> Option<&mut RouteSpecDetail> { + self.route_set.get_mut(key) + } + pub fn get_route_set_keys(&self) -> TypedKeySet { + let mut tks = TypedKeySet::new(); + for (k, v) in &self.route_set { + tks.add(TypedKey::new(v.crypto_kind, *k)); + } + tks + } + pub fn get_best_route_set_key(&self) -> Option { + self.get_route_set_keys().best().map(|k| k.value) + } + pub fn set_hop_node_refs(&mut self, node_refs: Vec) { + self.hop_node_refs = node_refs; + } + pub fn iter_route_set( + &self, + ) -> alloc::collections::btree_map::Iter { + self.route_set.iter() + } + pub fn iter_route_set_mut( + &mut self, + ) -> alloc::collections::btree_map::IterMut { + self.route_set.iter_mut() + } + pub fn get_stats(&self) -> &RouteStats { + &self.stats + } + pub fn get_stats_mut(&mut self) -> &mut RouteStats { + &mut self.stats + } + pub fn is_published(&self) -> bool { + self.published + } + pub fn set_published(&mut self, published: bool) { + self.published = published; + } + pub fn hop_count(&self) -> usize { + self.hop_node_refs.len() + } + pub fn hop_node_ref(&self, idx: usize) -> Option { + self.hop_node_refs.get(idx).cloned() + } + pub fn get_stability(&self) -> Stability { + self.stability + } + pub fn get_directions(&self) -> DirectionSet { + self.directions + } + pub fn is_sequencing_match(&self, sequencing: Sequencing) -> bool { + match sequencing { + Sequencing::NoPreference => true, + Sequencing::PreferOrdered => true, + Sequencing::EnsureOrdered => self.can_do_sequenced, + } + } + pub fn contains_nodes(&self, nodes: &[TypedKey]) -> bool { + for tk in nodes { + for (_pk, rsd) in &self.route_set { + if rsd.crypto_kind == tk.kind && rsd.hops.contains(&tk.value) { + return true; + } + } + } + false + } + + /// Generate a key for the cache that can be used to uniquely identify this route's contents + pub fn make_cache_key(&self, rti: &RoutingTableInner) -> Vec { + let hops = &self.hop_node_refs; + let mut cache: Vec = Vec::with_capacity(hops.len() * PUBLIC_KEY_LENGTH); + for hop in hops { + cache.extend_from_slice(&hop.locked(rti).best_node_id().value.bytes); + } + cache + } +} diff --git a/veilid-core/src/routing_table/route_spec_store/route_spec_store.rs b/veilid-core/src/routing_table/route_spec_store/route_spec_store.rs new file mode 100644 index 00000000..7f5d79ae --- /dev/null +++ b/veilid-core/src/routing_table/route_spec_store/route_spec_store.rs @@ -0,0 +1,1607 @@ +use super::*; +use permutation::*; + +#[derive(Debug)] +pub struct RouteSpecStoreInner { + /// Serialize RouteSpecStore content + content: RouteSpecStoreContent, + /// RouteSpecStore cache + cache: RouteSpecStoreCache, +} + +pub struct RouteSpecStoreUnlockedInner { + /// Handle to routing table + routing_table: RoutingTable, + /// Maximum number of hops in a route + max_route_hop_count: usize, + /// Default number of hops in a route + default_route_hop_count: usize, +} + +impl fmt::Debug for RouteSpecStoreUnlockedInner { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RouteSpecStoreUnlockedInner") + .field("max_route_hop_count", &self.max_route_hop_count) + .field("default_route_hop_count", &self.default_route_hop_count) + .finish() + } +} + +/// The routing table's storage for private/safety routes +#[derive(Clone, Debug)] +pub struct RouteSpecStore { + inner: Arc>, + unlocked_inner: Arc, +} + +impl RouteSpecStore { + pub fn new(routing_table: RoutingTable) -> Self { + let config = routing_table.network_manager().config(); + let c = config.get(); + + Self { + unlocked_inner: Arc::new(RouteSpecStoreUnlockedInner { + max_route_hop_count: c.network.rpc.max_route_hop_count.into(), + default_route_hop_count: c.network.rpc.default_route_hop_count.into(), + routing_table, + }), + inner: Arc::new(Mutex::new(RouteSpecStoreInner { + content: RouteSpecStoreContent::new(), + cache: Default::default(), + })), + } + } + + #[instrument(level = "trace", skip(routing_table), err)] + pub async fn load(routing_table: RoutingTable) -> EyreResult { + let (max_route_hop_count, default_route_hop_count) = { + let config = routing_table.network_manager().config(); + let c = config.get(); + ( + c.network.rpc.max_route_hop_count as usize, + c.network.rpc.default_route_hop_count as usize, + ) + }; + + // Get frozen blob from table store + let content = RouteSpecStoreContent::load(routing_table.clone()).await?; + + let mut inner = RouteSpecStoreInner { + content, + cache: Default::default(), + }; + + // Rebuild the routespecstore cache + let rti = &*routing_table.inner.read(); + for (_, rssd) in inner.content.iter_details() { + inner.cache.add_to_cache(rti, &rssd); + } + + // Return the loaded RouteSpecStore + let rss = RouteSpecStore { + unlocked_inner: Arc::new(RouteSpecStoreUnlockedInner { + max_route_hop_count, + default_route_hop_count, + routing_table: routing_table.clone(), + }), + inner: Arc::new(Mutex::new(inner)), + }; + + Ok(rss) + } + + + #[instrument(level = "trace", skip(self), err)] + pub async fn save(&self) -> EyreResult<()> { + let content = { + let inner = self.inner.lock(); + inner.content.clone() + }; + + // Save our content + content.save(self.unlocked_inner.routing_table.clone()).await?; + + Ok(()) + } + + #[instrument(level = "trace", skip(self))] + pub fn send_route_update(&self) { + let (dead_routes, dead_remote_routes) = { + let mut inner = self.inner.lock(); + let Some(dr) = inner.cache.take_dead_routes() else { + // Nothing to do + return; + }; + dr + }; + + let update = VeilidUpdate::Route(VeilidStateRoute { + dead_routes, + dead_remote_routes, + }); + + let update_callback = self.unlocked_inner.routing_table.update_callback(); + update_callback(update); + } + + /// Purge the route spec store + pub async fn purge(&self) -> EyreResult<()> { + { + let inner = &mut *self.inner.lock(); + inner.content = Default::default(); + inner.cache = Default::default(); + } + self.save().await + } + + /// Create a new route + /// Prefers nodes that are not currently in use by another route + /// The route is not yet tested for its reachability + /// Returns None if no route could be allocated at this time + /// Returns Some route id string + #[instrument(level = "trace", skip(self), ret, err)] + pub fn allocate_route( + &self, + crypto_kinds: &[CryptoKind], + stability: Stability, + sequencing: Sequencing, + hop_count: usize, + directions: DirectionSet, + avoid_nodes: &[TypedKey], + ) -> EyreResult> { + let inner = &mut *self.inner.lock(); + let routing_table = self.unlocked_inner.routing_table.clone(); + let rti = &mut *routing_table.inner.write(); + + self.allocate_route_inner( + inner, + rti, + crypto_kinds, + stability, + sequencing, + hop_count, + directions, + avoid_nodes, + ) + } + + #[instrument(level = "trace", skip(self, inner, rti), ret, err)] + fn allocate_route_inner( + &self, + inner: &mut RouteSpecStoreInner, + rti: &mut RoutingTableInner, + crypto_kinds: &[CryptoKind], + stability: Stability, + sequencing: Sequencing, + hop_count: usize, + directions: DirectionSet, + avoid_nodes: &[TypedKey], + ) -> EyreResult> { + use core::cmp::Ordering; + + if hop_count < 1 { + bail!("Not allocating route less than one hop in length"); + } + + if hop_count > self.unlocked_inner.max_route_hop_count { + bail!("Not allocating route longer than max route hop count"); + } + + let Some(our_peer_info) = rti.get_own_peer_info(RoutingDomain::PublicInternet) else { + bail!("Can't allocate route until we have our own peer info"); + }; + + // Get relay node if we have one + let opt_own_relay_nr = rti.relay_node(RoutingDomain::PublicInternet).map(|nr| nr.locked(rti)); + + // Get list of all nodes, and sort them for selection + let cur_ts = get_aligned_timestamp(); + let filter = Box::new( + |_rti: &RoutingTableInner, entry: Option>| -> bool { + // Exclude our own node from routes + if entry.is_none() { + return false; + } + let entry = entry.unwrap(); + + // Exclude our relay if we have one + if let Some(own_relay_nr) = &opt_own_relay_nr { + if own_relay_nr.same_bucket_entry(&entry) { + return false; + } + } + + // Process node info exclusions + let keep = entry.with_inner(|e| { + + // Exclude nodes that don't have our requested crypto kinds + let common_ck = e.common_crypto_kinds(crypto_kinds); + if common_ck.len() != crypto_kinds.len() { + return false; + } + + // Exclude nodes we have specifically chosen to avoid + if e.node_ids().contains_any(avoid_nodes) { + return false; + } + + // Exclude nodes on our local network + if e.node_info(RoutingDomain::LocalNetwork).is_some() { + return false; + } + + // Exclude nodes that have no publicinternet signednodeinfo + let Some(sni) = e.signed_node_info(RoutingDomain::PublicInternet) else { + return false; + }; + + // Relay check + let relay_ids = sni.relay_ids(); + if relay_ids.len() != 0 { + // Exclude nodes whose relays we have chosen to avoid + if relay_ids.contains_any(avoid_nodes) { + return false; + } + // Exclude nodes whose relay is our own relay if we have one + if let Some(own_relay_nr) = &opt_own_relay_nr { + if relay_ids.contains_any(&own_relay_nr.node_ids()) { + return false; + } + } + } + return true; + }); + if !keep { + return false; + } + + // Exclude nodes with no publicinternet nodeinfo, or incompatible nodeinfo or node status won't route + entry.with_inner(|e| { + let node_info_ok = + if let Some(sni) = e.signed_node_info(RoutingDomain::PublicInternet) { + sni.has_sequencing_matched_dial_info(sequencing) + } else { + false + }; + let node_status_ok = + if let Some(ns) = e.node_status(RoutingDomain::PublicInternet) { + ns.will_route() + } else { + false + }; + + node_info_ok && node_status_ok + }) + }, + ) as RoutingTableEntryFilter; + let filters = VecDeque::from([filter]); + let compare = |_rti: &RoutingTableInner, + entry1: &Option>, + entry2: &Option>| + -> Ordering { + + // Our own node is filtered out + let entry1 = entry1.as_ref().unwrap().clone(); + let entry2 = entry2.as_ref().unwrap().clone(); + let entry1_node_ids = entry1.with_inner(|e| e.node_ids()); + let entry2_node_ids = entry2.with_inner(|e| e.node_ids()); + + // deprioritize nodes that we have already used as end points + let e1_used_end = inner.cache.get_used_end_node_count(&entry1_node_ids); + let e2_used_end = inner.cache.get_used_end_node_count(&entry2_node_ids); + let cmp_used_end = e1_used_end.cmp(&e2_used_end); + if !matches!(cmp_used_end, Ordering::Equal) { + return cmp_used_end; + } + + // deprioritize nodes we have used already anywhere + let e1_used = inner.cache.get_used_node_count(&entry1_node_ids); + let e2_used = inner.cache.get_used_node_count(&entry2_node_ids); + let cmp_used = e1_used.cmp(&e2_used); + if !matches!(cmp_used, Ordering::Equal) { + return cmp_used; + } + + // apply sequencing preference + // ensureordered will be taken care of by filter + // and nopreference doesn't care + if matches!(sequencing, Sequencing::PreferOrdered) { + let cmp_seq = entry1.with_inner(|e1| { + entry2.with_inner(|e2| { + let e1_can_do_ordered = e1.signed_node_info(RoutingDomain::PublicInternet).map(|sni| sni.has_sequencing_matched_dial_info(sequencing)).unwrap_or(false); + let e2_can_do_ordered = e2.signed_node_info(RoutingDomain::PublicInternet).map(|sni| sni.has_sequencing_matched_dial_info(sequencing)).unwrap_or(false); + e2_can_do_ordered.cmp(&e1_can_do_ordered) + }) + }); + if !matches!(cmp_seq, Ordering::Equal) { + return cmp_seq; + } + } + + // always prioritize reliable nodes, but sort by oldest or fastest + let cmpout = entry1.with_inner(|e1| { + entry2.with_inner(|e2| match stability { + Stability::LowLatency => { + BucketEntryInner::cmp_fastest_reliable(cur_ts, e1, e2) + } + Stability::Reliable => { + BucketEntryInner::cmp_oldest_reliable(cur_ts, e1, e2) + } + }) + }); + cmpout + }; + + let routing_table = self.unlocked_inner.routing_table.clone(); + let transform = + |_rti: &RoutingTableInner, entry: Option>| -> NodeRef { + NodeRef::new(routing_table.clone(), entry.unwrap(), None) + }; + + // Pull the whole routing table in sorted order + let nodes:Vec = + rti.find_peers_with_sort_and_filter(usize::MAX, cur_ts, filters, compare, transform); + + // If we couldn't find enough nodes, wait until we have more nodes in the routing table + if nodes.len() < hop_count { + log_rtab!(debug "not enough nodes to construct route at this time"); + return Ok(None); + } + + // Get peer info for everything + let nodes_pi: Vec = nodes.iter().map(|nr| nr.locked(rti).make_peer_info(RoutingDomain::PublicInternet).unwrap()).collect(); + + // Now go through nodes and try to build a route we haven't seen yet + let mut perm_func = Box::new(|permutation: &[usize]| { + + // Get the hop cache key for a particular route permutation + // uses the same algorithm as RouteSetSpecDetail::make_cache_key + let route_permutation_to_hop_cache = |_rti: &RoutingTableInner, nodes: &[NodeRef], perm: &[usize]| -> Vec { + let mut cache: Vec = Vec::with_capacity(perm.len() * PUBLIC_KEY_LENGTH); + for n in perm { + cache.extend_from_slice(&nodes[*n].locked(rti).best_node_id().value.bytes) + } + cache + }; + let cache_key = route_permutation_to_hop_cache(rti, &nodes, permutation); + + // Skip routes we have already seen + if inner.cache.contains_route(&cache_key) { + return None; + } + + // Ensure the route doesn't contain both a node and its relay + let mut seen_nodes: HashSet = HashSet::new(); + for n in permutation { + let node = nodes.get(*n).unwrap(); + if !seen_nodes.insert(node.locked(rti).best_node_id()) { + // Already seen this node, should not be in the route twice + return None; + } + if let Some(relay) = node.locked_mut(rti).relay(RoutingDomain::PublicInternet) { + let relay_id = relay.locked(rti).best_node_id(); + if !seen_nodes.insert(relay_id) { + // Already seen this node, should not be in the route twice + return None; + } + } + } + + // Ensure this route is viable by checking that each node can contact the next one + let mut can_do_sequenced = true; + if directions.contains(Direction::Outbound) { + let mut previous_node = &our_peer_info; + let mut reachable = true; + for n in permutation { + let current_node = nodes_pi.get(*n).unwrap(); + let cm = rti.get_contact_method( + RoutingDomain::PublicInternet, + previous_node, + current_node, + DialInfoFilter::all(), + sequencing, + ); + if matches!(cm, ContactMethod::Unreachable) { + reachable = false; + break; + } + + // Check if we can do sequenced specifically + if can_do_sequenced { + let cm = rti.get_contact_method( + RoutingDomain::PublicInternet, + previous_node, + current_node, + DialInfoFilter::all(), + Sequencing::EnsureOrdered, + ); + if matches!(cm, ContactMethod::Unreachable) { + can_do_sequenced = false; + } + } + + previous_node = current_node; + } + if !reachable { + return None; + } + } + if directions.contains(Direction::Inbound) { + let mut next_node = &our_peer_info; + let mut reachable = true; + for n in permutation.iter().rev() { + let current_node = nodes_pi.get(*n).unwrap(); + let cm = rti.get_contact_method( + RoutingDomain::PublicInternet, + next_node, + current_node, + DialInfoFilter::all(), + sequencing, + ); + if matches!(cm, ContactMethod::Unreachable) { + reachable = false; + break; + } + + // Check if we can do sequenced specifically + if can_do_sequenced { + let cm = rti.get_contact_method( + RoutingDomain::PublicInternet, + next_node, + current_node, + DialInfoFilter::all(), + Sequencing::EnsureOrdered, + ); + if matches!(cm, ContactMethod::Unreachable) { + can_do_sequenced = false; + } + } + next_node = current_node; + } + if !reachable { + return None; + } + } + // Keep this route + let route_nodes = permutation.to_vec(); + Some((route_nodes, can_do_sequenced)) + }) as PermFunc; + + let mut route_nodes: Vec = Vec::new(); + let mut can_do_sequenced: bool = true; + + for start in 0..(nodes.len() - hop_count) { + // Try the permutations available starting with 'start' + if let Some((rn, cds)) = with_route_permutations(hop_count, start, &mut perm_func) { + route_nodes = rn; + can_do_sequenced = cds; + break; + } + } + if route_nodes.is_empty() { + log_rtab!(debug "unable to find unique route at this time"); + return Ok(None); + } + + drop(perm_func); + + // Got a unique route, lets build the details, register it, and return it + let hop_node_refs:Vec = route_nodes + .iter() + .map(|k| nodes[*k].clone()) + .collect(); + let mut route_set = BTreeMap::::new(); + for crypto_kind in crypto_kinds.iter().copied() { + let vcrypto = self.unlocked_inner.routing_table.crypto().get(crypto_kind).unwrap(); + let keypair = vcrypto.generate_keypair(); + let hops: Vec = route_nodes.iter().map(|v| nodes[*v].locked(rti).node_ids().get(crypto_kind).unwrap().value).collect(); + + route_set.insert(keypair.key, RouteSpecDetail { + crypto_kind, + secret_key: keypair.secret, + hops, + }); + } + + let rssd = RouteSetSpecDetail::new( + cur_ts, + route_set, + hop_node_refs, + directions, + stability, + can_do_sequenced, + ); + + + // make id + let id = self.generate_allocated_route_id(&rssd)?; + + // Add to cache + inner.cache.add_to_cache(rti, &rssd); + + // Keep route in spec store + inner.content.add_detail(id.clone(), rssd); + + Ok(Some(id)) + } + + /// validate data using a private route's key and signature chain + #[instrument(level = "trace", skip(self, data, callback), ret)] + pub fn with_signature_validated_route( + &self, + public_key: &TypedKey, + signatures: &[Signature], + data: &[u8], + last_hop_id: PublicKey, + callback: F, + ) -> Option + where F: FnOnce(&RouteSetSpecDetail, &RouteSpecDetail) -> R, + R: fmt::Debug, + { + let inner = &*self.inner.lock(); + let crypto = self.unlocked_inner.routing_table.crypto(); + let Some(vcrypto) = crypto.get(public_key.kind) else { + log_rpc!(debug "can't handle route with public key: {:?}", public_key); + return None; + }; + + let Some(rsid) = inner.content.get_id_by_key(&public_key.value) else { + log_rpc!(debug "route id does not exist: {:?}", public_key.value); + return None; + }; + let Some(rssd) = inner.content.get_detail(&rsid) else { + log_rpc!(debug "route detail does not exist: {:?}", rsid); + return None; + }; + let Some(rsd) = rssd.get_route_by_key(&public_key.value) else { + log_rpc!(debug "route set {:?} does not have key: {:?}", rsid, public_key.value); + return None; + }; + + // Ensure we have the right number of signatures + if signatures.len() != rsd.hops.len() - 1 { + // Wrong number of signatures + log_rpc!(debug "wrong number of signatures ({} should be {}) for routed operation on private route {}", signatures.len(), rsd.hops.len() - 1, public_key); + return None; + } + // Validate signatures to ensure the route was handled by the nodes and not messed with + // This is in private route (reverse) order as we are receiving over the route + for (hop_n, hop_public_key) in rsd.hops.iter().rev().enumerate() { + // The last hop is not signed, as the whole packet is signed + if hop_n == signatures.len() { + // Verify the node we received the routed operation from is the last hop in our route + if *hop_public_key != last_hop_id { + log_rpc!(debug "received routed operation from the wrong hop ({} should be {}) on private route {}", hop_public_key.encode(), last_hop_id.encode(), public_key); + return None; + } + } else { + // Verify a signature for a hop node along the route + if let Err(e) = vcrypto.verify(hop_public_key, data, &signatures[hop_n]) { + log_rpc!(debug "failed to verify signature for hop {} at {} on private route {}: {}", hop_n, hop_public_key, public_key, e); + return None; + } + } + } + // We got the correct signatures, return a key and response safety spec + Some(callback(rssd, rsd)) + } + + #[instrument(level = "trace", skip(self), ret, err)] + async fn test_allocated_route(&self, private_route_id: RouteId) -> EyreResult { + // Make loopback route to test with + let dest = { + + // Get the best private route for this id + let (key, hop_count) = { + let inner = &mut *self.inner.lock(); + let Some(rssd) = inner.content.get_detail(&private_route_id) else { + bail!("route id not allocated"); + }; + let Some(key) = rssd.get_best_route_set_key() else { + bail!("no best key to test allocated route"); + }; + // Match the private route's hop length for safety route length + let hop_count = rssd.hop_count(); + (key, hop_count) + }; + + // Get the private route to send to + let private_route = self.assemble_private_route(&key, None)?; + // Always test routes with safety routes that are more likely to succeed + let stability = Stability::Reliable; + // Routes can test with whatever sequencing they were allocated with + let sequencing = Sequencing::NoPreference; + + let safety_spec = SafetySpec { + preferred_route: Some(private_route_id), + hop_count, + stability, + sequencing, + }; + let safety_selection = SafetySelection::Safe(safety_spec); + + Destination::PrivateRoute { + private_route, + safety_selection, + } + }; + + // Test with double-round trip ping to self + let rpc_processor = self.unlocked_inner.routing_table.rpc_processor(); + let _res = match rpc_processor.rpc_call_status(dest).await? { + NetworkResult::Value(v) => v, + _ => { + // Did not error, but did not come back, just return false + return Ok(false); + } + }; + + Ok(true) + } + + #[instrument(level = "trace", skip(self), ret, err)] + async fn test_remote_route(&self, private_route_id: RouteId) -> EyreResult { + + // Make private route test + let dest = { + + // Get the route to test + let Some(private_route) = self.best_remote_private_route(&private_route_id) else { + bail!("no best key to test remote route"); + }; + + // Get a safety route that is good enough + let safety_spec = SafetySpec { + preferred_route: None, + hop_count: self.unlocked_inner.default_route_hop_count, + stability: Stability::default(), + sequencing: Sequencing::default(), + }; + + let safety_selection = SafetySelection::Safe(safety_spec); + + Destination::PrivateRoute { + private_route, + safety_selection, + } + }; + + // Test with double-round trip ping to self + let rpc_processor = self.unlocked_inner.routing_table.rpc_processor(); + let _res = match rpc_processor.rpc_call_status(dest).await? { + NetworkResult::Value(v) => v, + _ => { + // Did not error, but did not come back, just return false + return Ok(false); + } + }; + + Ok(true) + } + + /// Release an allocated route that is no longer in use + #[instrument(level = "trace", skip(self), ret)] + fn release_allocated_route(&self, id: RouteId) -> bool { + let mut inner = self.inner.lock(); + let Some(rssd) = inner.content.remove_detail(&id) else { + return false; + }; + + // Remove from hop cache + let rti = &*self.unlocked_inner.routing_table.inner.read(); + if !inner.cache.remove_from_cache(rti, id, &rssd) { + panic!("hop cache should have contained cache key"); + } + + true + } + + /// Check if a route id is remote or not + pub fn is_route_id_remote(&self, id: &RouteId) -> bool { + let inner = &mut *self.inner.lock(); + let cur_ts = get_aligned_timestamp(); + inner.cache.peek_remote_private_route_mut(cur_ts, &id).is_some() + } + + /// Test an allocated route for continuity + #[instrument(level = "trace", skip(self), ret, err)] + pub async fn test_route(&self, id: RouteId) -> EyreResult { + let is_remote = self.is_route_id_remote(&id); + if is_remote { + self.test_remote_route(id).await + } else { + self.test_allocated_route(id).await + } + } + + /// Release an allocated or remote route that is no longer in use + #[instrument(level = "trace", skip(self), ret)] + pub fn release_route(&self, id: RouteId) -> bool { + let is_remote = self.is_route_id_remote(&id); + if is_remote { + self.release_remote_private_route(id) + } else { + self.release_allocated_route(id) + } + } + + /// Find first matching unpublished route that fits into the selection criteria + /// Don't pick any routes that have failed and haven't been tested yet + fn first_available_route_inner<'a>( + inner: &'a RouteSpecStoreInner, + crypto_kind: CryptoKind, + min_hop_count: usize, + max_hop_count: usize, + stability: Stability, + sequencing: Sequencing, + directions: DirectionSet, + avoid_nodes: &[TypedKey], + ) -> Option { + let cur_ts = get_aligned_timestamp(); + + let mut routes = Vec::new(); + + // Get all valid routes, allow routes that need testing + // but definitely prefer routes that have been recently tested + for (id, rssd) in inner.content.iter_details() { + if rssd.get_stability() >= stability + && rssd.is_sequencing_match(sequencing) + && rssd.hop_count() >= min_hop_count + && rssd.hop_count() <= max_hop_count + && rssd.get_directions().is_superset(directions) + && rssd.get_route_set_keys().kinds().contains(&crypto_kind) + && !rssd.is_published() + && !rssd.contains_nodes(avoid_nodes) + { + routes.push((id, rssd)); + } + } + + // Sort the routes by preference + routes.sort_by(|a, b| { + let a_needs_testing = a.1.get_stats().needs_testing(cur_ts); + let b_needs_testing = b.1.get_stats().needs_testing(cur_ts); + if !a_needs_testing && b_needs_testing { + return cmp::Ordering::Less; + } + if !b_needs_testing && a_needs_testing { + return cmp::Ordering::Greater; + } + let a_latency = a.1.get_stats().latency_stats().average; + let b_latency = b.1.get_stats().latency_stats().average; + + a_latency.cmp(&b_latency) + }); + + // Return the best one if we got one + routes.first().map(|r| *r.0) + } + + /// List all allocated routes + pub fn list_allocated_routes(&self, mut filter: F) -> Vec + where + F: FnMut(&RouteId, &RouteSetSpecDetail) -> Option, + { + let inner = self.inner.lock(); + let mut out = Vec::with_capacity(inner.content.get_detail_count()); + for detail in inner.content.iter_details() { + if let Some(x) = filter(detail.0, detail.1) { + out.push(x); + } + } + out + } + + /// List all allocated routes + pub fn list_remote_routes(&self, mut filter: F) -> Vec + where + F: FnMut(&RouteId, &RemotePrivateRouteInfo) -> Option, + { + let inner = self.inner.lock(); + let mut out = Vec::with_capacity(inner.cache.get_remote_private_route_count()); + for info in inner.cache.iter_remote_private_routes() { + if let Some(x) = filter(info.0, info.1) { + out.push(x); + } + } + out + } + + /// Get the debug description of a route + pub fn debug_route(&self, id: &RouteId) -> Option { + let inner = &mut *self.inner.lock(); + let cur_ts = get_aligned_timestamp(); + if let Some(rpri) = inner.cache.peek_remote_private_route_mut(cur_ts, &id) { + return Some(format!("{:#?}", rpri)); + } + if let Some(rssd) = inner.content.get_detail(id) { + return Some(format!("{:#?}", rssd)); + } + None + } + + ////////////////////////////////////////////////////////////////////// + + /// Choose the best private route from a private route set to communicate with + pub fn best_remote_private_route(&self, id: &RouteId) -> Option { + let inner = &mut *self.inner.lock(); + let cur_ts = get_aligned_timestamp(); + let rpri = inner.cache.get_remote_private_route(cur_ts, id)?; + rpri.best_private_route() + } + + + /// Compiles a safety route to the private route, with caching + /// Returns an Err() if the parameters are wrong + /// Returns Ok(None) if no allocation could happen at this time (not an error) + pub fn compile_safety_route( + &self, + safety_selection: SafetySelection, + mut private_route: PrivateRoute, + ) -> EyreResult> { + // let profile_start_ts = get_timestamp(); + let inner = &mut *self.inner.lock(); + let routing_table = self.unlocked_inner.routing_table.clone(); + let rti = &mut *routing_table.inner.write(); + + // Get useful private route properties + let crypto_kind = private_route.crypto_kind(); + let crypto = routing_table.crypto(); + let Some(vcrypto) = crypto.get(crypto_kind) else { + bail!("crypto not supported for route"); + }; + let pr_pubkey = private_route.public_key.value; + let pr_hopcount = private_route.hop_count as usize; + let max_route_hop_count = self.unlocked_inner.max_route_hop_count; + + // Check private route hop count isn't larger than the max route hop count plus one for the 'first hop' header + if pr_hopcount > (max_route_hop_count + 1) { + bail!("private route hop count too long"); + } + // See if we are using a safety route, if not, short circuit this operation + let safety_spec = match safety_selection { + // Safety route spec to use + SafetySelection::Safe(safety_spec) => safety_spec, + // Safety route stub with the node's public key as the safety route key since it's the 0th hop + SafetySelection::Unsafe(sequencing) => { + let Some(pr_first_hop_node) = private_route.pop_first_hop() else { + bail!("compiled private route should have first hop"); + }; + + let opt_first_hop = match pr_first_hop_node { + RouteNode::NodeId(id) => rti.lookup_node_ref(routing_table.clone(), TypedKey::new(crypto_kind, id)), + RouteNode::PeerInfo(pi) => rti.register_node_with_peer_info( + routing_table.clone(), + RoutingDomain::PublicInternet, + pi, + false, + ), + }; + if opt_first_hop.is_none() { + // Can't reach this private route any more + log_rtab!(debug "can't reach private route any more"); + return Ok(None); + } + let mut first_hop = opt_first_hop.unwrap(); + + // Set sequencing requirement + first_hop.set_sequencing(sequencing); + + // Return the compiled safety route + //println!("compile_safety_route profile (stub): {} us", (get_timestamp() - profile_start_ts)); + return Ok(Some(CompiledRoute { + safety_route: SafetyRoute::new_stub(routing_table.node_id(crypto_kind), private_route), + secret: routing_table.node_id_secret_key(crypto_kind), + first_hop, + })); + } + }; + + // If the safety route requested is also the private route, this is a loopback test, just accept it + let opt_private_route_id = inner.content.get_id_by_key(&pr_pubkey); + let sr_pubkey = if opt_private_route_id.is_some() && safety_spec.preferred_route == opt_private_route_id { + // Private route is also safety route during loopback test + pr_pubkey + } else { + let Some(avoid_node_id) = private_route.first_hop_node_id() else { + bail!("compiled private route should have first hop"); + }; + let Some(sr_pubkey) = self.get_route_for_safety_spec_inner(inner, rti, crypto_kind, &safety_spec, Direction::Outbound.into(), &[avoid_node_id])? else { + // No safety route could be found for this spec + return Ok(None); + }; + sr_pubkey + }; + + // Look up a few things from the safety route detail we want for the compiled route and don't borrow inner + let Some(safety_route_id) = inner.content.get_id_by_key(&sr_pubkey) else { + bail!("route id missing"); + }; + let Some(safety_rssd) = inner.content.get_detail(&safety_route_id) else { + bail!("route set detail missing"); + }; + let Some(safety_rsd) = safety_rssd.get_route_by_key(&sr_pubkey) else { + bail!("route detail missing"); + }; + + // We can optimize the peer info in this safety route if it has been successfully + // communicated over either via an outbound test, or used as a private route inbound + // and we are replying over the same route as our safety route outbound + let optimize = safety_rssd.get_stats().last_tested_ts.is_some() || safety_rssd.get_stats().last_received_ts.is_some(); + + // Get the first hop noderef of the safety route + let mut first_hop = safety_rssd.hop_node_ref(0).unwrap(); + + // Ensure sequencing requirement is set on first hop + first_hop.set_sequencing(safety_spec.sequencing); + + // Get the safety route secret key + let secret = safety_rsd.secret_key; + + // See if we have a cached route we can use + if optimize { + if let Some(safety_route) = inner.cache.lookup_compiled_route_cache(sr_pubkey, pr_pubkey) { + // Build compiled route + let compiled_route = CompiledRoute { + safety_route, + secret, + first_hop, + }; + // Return compiled route + //println!("compile_safety_route profile (cached): {} us", (get_timestamp() - profile_start_ts)); + return Ok(Some(compiled_route)); + } + } + + // Create hops + let hops = { + // start last blob-to-encrypt data off as private route + let mut blob_data = { + let mut pr_message = ::capnp::message::Builder::new_default(); + let mut pr_builder = pr_message.init_root::(); + encode_private_route(&private_route, &mut pr_builder)?; + let mut blob_data = builder_to_vec(pr_message)?; + + // append the private route tag so we know how to decode it later + blob_data.push(1u8); + blob_data + }; + + // Encode each hop from inside to outside + // skips the outermost hop since that's entering the + // safety route and does not include the dialInfo + // (outer hop is a RouteHopData, not a RouteHop). + // Each loop mutates 'nonce', and 'blob_data' + let mut nonce = vcrypto.random_nonce(); + // Forward order (safety route), but inside-out + for h in (1..safety_rsd.hops.len()).rev() { + // Get blob to encrypt for next hop + blob_data = { + // Encrypt the previous blob ENC(nonce, DH(PKhop,SKsr)) + let dh_secret = vcrypto + .cached_dh(&safety_rsd.hops[h], &safety_rsd.secret_key) + .wrap_err("dh failed")?; + let enc_msg_data = + vcrypto.encrypt_aead(blob_data.as_slice(), &nonce, &dh_secret, None) + .wrap_err("encryption failed")?; + + // Make route hop data + let route_hop_data = RouteHopData { + nonce, + blob: enc_msg_data, + }; + + // Make route hop + let route_hop = RouteHop { + node: if optimize { + // Optimized, no peer info, just the dht key + RouteNode::NodeId(safety_rsd.hops[h]) + } else { + // Full peer info, required until we are sure the route has been fully established + let node_id = TypedKey::new(safety_rsd.crypto_kind, safety_rsd.hops[h]); + let pi = rti + .with_node_entry(node_id, |entry| { + entry.with(rti, |_rti, e| { + e.make_peer_info(RoutingDomain::PublicInternet) + }) + }) + .flatten(); + if pi.is_none() { + bail!("peer info should exist for route but doesn't"); + } + RouteNode::PeerInfo(pi.unwrap()) + }, + next_hop: Some(route_hop_data), + }; + + // Make next blob from route hop + let mut rh_message = ::capnp::message::Builder::new_default(); + let mut rh_builder = rh_message.init_root::(); + encode_route_hop(&route_hop, &mut rh_builder)?; + let mut blob_data = builder_to_vec(rh_message)?; + + // Append the route hop tag so we know how to decode it later + blob_data.push(0u8); + blob_data + }; + + // Make another nonce for the next hop + nonce = vcrypto.random_nonce(); + } + + // Encode first RouteHopData + let dh_secret = vcrypto + .cached_dh(&safety_rsd.hops[0], &safety_rsd.secret_key) + .map_err(RPCError::map_internal("dh failed"))?; + let enc_msg_data = vcrypto.encrypt_aead(blob_data.as_slice(), &nonce, &dh_secret, None) + .map_err(RPCError::map_internal("encryption failed"))?; + + let route_hop_data = RouteHopData { + nonce, + blob: enc_msg_data, + }; + + SafetyRouteHops::Data(route_hop_data) + }; + + // Build safety route + let safety_route = SafetyRoute { + public_key: TypedKey::new(crypto_kind, sr_pubkey), + hop_count: safety_spec.hop_count as u8, + hops, + }; + + // Add to cache but only if we have an optimized route + if optimize { + inner.cache.add_to_compiled_route_cache( pr_pubkey, safety_route.clone()); + } + + // Build compiled route + let compiled_route = CompiledRoute { + safety_route, + secret, + first_hop, + }; + + // Return compiled route + //println!("compile_safety_route profile (uncached): {} us", (get_timestamp() - profile_start_ts)); + Ok(Some(compiled_route)) + } + + /// Get an allocated route that matches a particular safety spec + #[instrument(level = "trace", skip(self, inner, rti), ret, err)] + fn get_route_for_safety_spec_inner( + &self, + inner: &mut RouteSpecStoreInner, + rti: &mut RoutingTableInner, + crypto_kind: CryptoKind, + safety_spec: &SafetySpec, + direction: DirectionSet, + avoid_nodes: &[TypedKey], + ) -> EyreResult> { + // Ensure the total hop count isn't too long for our config + let max_route_hop_count = self.unlocked_inner.max_route_hop_count; + if safety_spec.hop_count == 0 { + bail!("safety route hop count is zero"); + } + if safety_spec.hop_count > max_route_hop_count { + bail!("safety route hop count too long"); + } + + // See if the preferred route is here + if let Some(preferred_route) = safety_spec.preferred_route { + if let Some(preferred_rssd) = inner.content.get_detail(&preferred_route) { + // Only use the preferred route if it has the desired crypto kind + if let Some(preferred_key) = preferred_rssd.get_route_set_keys().get(crypto_kind) { + // Only use the preferred route if it doesn't contain the avoid nodes + if !preferred_rssd.contains_nodes(avoid_nodes) { + return Ok(Some(preferred_key.value)); + } + } + } + } + + // Select a safety route from the pool or make one if we don't have one that matches + let sr_route_id = if let Some(sr_route_id) = Self::first_available_route_inner( + inner, + crypto_kind, + safety_spec.hop_count, + safety_spec.hop_count, + safety_spec.stability, + safety_spec.sequencing, + direction, + avoid_nodes, + ) { + // Found a route to use + sr_route_id + } else { + // No route found, gotta allocate one + let Some(sr_route_id) = self + .allocate_route_inner( + inner, + rti, + &[crypto_kind], + safety_spec.stability, + safety_spec.sequencing, + safety_spec.hop_count, + direction, + avoid_nodes, + ) + .map_err(RPCError::internal)? + else { + return Ok(None); + }; + sr_route_id + }; + + let sr_pubkey = inner.content.get_detail(&sr_route_id).unwrap().get_route_set_keys().get(crypto_kind).unwrap().value; + + Ok(Some(sr_pubkey)) + } + + /// Get a private route to use for the answer to question + #[instrument(level = "trace", skip(self), ret, err)] + pub fn get_private_route_for_safety_spec( + &self, + crypto_kind: CryptoKind, + safety_spec: &SafetySpec, + avoid_nodes: &[TypedKey], + ) -> EyreResult> { + let inner = &mut *self.inner.lock(); + let routing_table = self.unlocked_inner.routing_table.clone(); + let rti = &mut *routing_table.inner.write(); + + Ok(self.get_route_for_safety_spec_inner( + inner, + rti, + crypto_kind, + safety_spec, + Direction::Inbound.into(), + avoid_nodes, + )?) + } + + fn assemble_private_route_inner(&self, key: &PublicKey, rsd: &RouteSpecDetail, optimized: bool) -> EyreResult + { + let routing_table = self.unlocked_inner.routing_table.clone(); + let rti = &*routing_table.inner.read(); + + // Ensure we get the crypto for it + let crypto = routing_table.network_manager().crypto(); + let Some(vcrypto) = crypto.get(rsd.crypto_kind) else { + bail!("crypto not supported for route"); + }; + + // Make innermost route hop to our own node + let mut route_hop = RouteHop { + node: if optimized { + if !rti.has_valid_own_node_info(RoutingDomain::PublicInternet) { + bail!("can't make private routes until our node info is valid"); + } + let Some(node_id) = routing_table.node_ids().get(rsd.crypto_kind) else { + bail!("missing node id for crypto kind"); + }; + RouteNode::NodeId(node_id.value) + } else { + let Some(pi) = rti.get_own_peer_info(RoutingDomain::PublicInternet) else { + bail!("can't make private routes until our node info is valid"); + }; + RouteNode::PeerInfo(pi) + }, + next_hop: None, + }; + + // Loop for each hop + let hop_count = rsd.hops.len(); + // iterate hops in private route order (reverse, but inside out) + for h in 0..hop_count { + let nonce = vcrypto.random_nonce(); + + let blob_data = { + let mut rh_message = ::capnp::message::Builder::new_default(); + let mut rh_builder = rh_message.init_root::(); + encode_route_hop(&route_hop, &mut rh_builder)?; + builder_to_vec(rh_message)? + }; + + // Encrypt the previous blob ENC(nonce, DH(PKhop,SKpr)) + let dh_secret = vcrypto + .cached_dh(&rsd.hops[h], &rsd.secret_key) + .wrap_err("dh failed")?; + let enc_msg_data = vcrypto.encrypt_aead(blob_data.as_slice(), &nonce, &dh_secret, None) + .wrap_err("encryption failed")?; + let route_hop_data = RouteHopData { + nonce, + blob: enc_msg_data, + }; + + route_hop = RouteHop { + node: if optimized { + // Optimized, no peer info, just the dht key + RouteNode::NodeId(rsd.hops[h]) + } else { + // Full peer info, required until we are sure the route has been fully established + let node_id = TypedKey::new(rsd.crypto_kind, rsd.hops[h]); + let pi = rti + .with_node_entry(node_id, |entry| { + entry.with(rti, |_rti, e| { + e.make_peer_info(RoutingDomain::PublicInternet) + }) + }) + .flatten(); + if pi.is_none() { + bail!("peer info should exist for route but doesn't",); + } + RouteNode::PeerInfo(pi.unwrap()) + }, + next_hop: Some(route_hop_data), + } + } + + let private_route = PrivateRoute { + public_key: TypedKey::new(rsd.crypto_kind, key.clone()), + // add hop for 'FirstHop' + hop_count: (hop_count + 1).try_into().unwrap(), + hops: PrivateRouteHops::FirstHop(route_hop), + }; + Ok(private_route) + } + + /// Assemble a single private route for publication + /// Returns a PrivateRoute object for an allocated private route key + #[instrument(level = "trace", skip(self), err)] + pub fn assemble_private_route( + &self, + key: &PublicKey, + optimized: Option, + ) -> EyreResult { + let inner = &*self.inner.lock(); + let Some(rsid) = inner.content.get_id_by_key(key) else { + bail!("route key does not exist"); + }; + let Some(rssd) = inner.content.get_detail(&rsid) else { + bail!("route id does not exist"); + }; + + // See if we can optimize this compilation yet + // We don't want to include full nodeinfo if we don't have to + let optimized = optimized + .unwrap_or(rssd.get_stats().last_tested_ts.is_some() || rssd.get_stats().last_received_ts.is_some()); + + let rsd = rssd.get_route_by_key(key).expect("route key index is broken"); + + self.assemble_private_route_inner(key, rsd, optimized) + } + + + /// Assemble private route set for publication + /// Returns a vec of PrivateRoute objects for an allocated private route + #[instrument(level = "trace", skip(self), err)] + pub fn assemble_private_routes( + &self, + id: &RouteId, + optimized: Option, + ) -> EyreResult> { + let inner = &*self.inner.lock(); + let Some(rssd) = inner.content.get_detail(id) else { + bail!("route id does not exist"); + }; + + // See if we can optimize this compilation yet + // We don't want to include full nodeinfo if we don't have to + let optimized = optimized + .unwrap_or(rssd.get_stats().last_tested_ts.is_some() || rssd.get_stats().last_received_ts.is_some()); + + let mut out = Vec::new(); + for (key, rsd) in rssd.iter_route_set() { + out.push(self.assemble_private_route_inner(key, rsd, optimized)?); + } + Ok(out) + } + + /// Import a remote private route for compilation + /// It is safe to import the same route more than once and it will return the same route id + /// Returns a route set id + #[instrument(level = "trace", skip(self, blob), ret, err)] + pub fn import_remote_private_route(&self, blob: Vec) -> EyreResult { + let cur_ts = get_aligned_timestamp(); + + // decode the pr blob + let private_routes = RouteSpecStore::blob_to_private_routes(self.unlocked_inner.routing_table.crypto(), blob)?; + + // make the route id + let id = self.generate_remote_route_id(&private_routes)?; + + // validate the private routes + let inner = &mut *self.inner.lock(); + for private_route in &private_routes { + + // ensure private route has first hop + if !matches!(private_route.hops, PrivateRouteHops::FirstHop(_)) { + bail!("private route must have first hop"); + } + + // ensure this isn't also an allocated route + if inner.content.get_id_by_key(&private_route.public_key.value).is_some() { + bail!("should not import allocated route"); + } + } + + inner.cache.cache_remote_private_route(cur_ts, id, private_routes); + + Ok(id) + } + + /// Release a remote private route that is no longer in use + #[instrument(level = "trace", skip(self), ret)] + pub fn release_remote_private_route(&self, id: RouteId) -> bool { + let inner = &mut *self.inner.lock(); + inner.cache.remove_remote_private_route(id) + } + + /// Get a route id for a route's public key + pub fn get_route_id_for_key(&self, key: &PublicKey) -> Option + { + let inner = &mut *self.inner.lock(); + // Check for local route + if let Some(id) = inner.content.get_id_by_key(key) { + return Some(id); + } + + // Check for remote route + if let Some(rrid) = inner.cache.get_remote_private_route_id_by_key(key) { + return Some(rrid); + } + + None + } + + /// Check to see if this remote (not ours) private route has seen our current node info yet + /// This happens when you communicate with a private route without a safety route + pub fn has_remote_private_route_seen_our_node_info(&self, key: &PublicKey) -> bool { + + let inner = &mut *self.inner.lock(); + + // Check for local route. If this is not a remote private route, + // we may be running a test and using our own local route as the destination private route. + // In that case we definitely have already seen our own node info + if let Some(_) = inner.content.get_id_by_key(key) { + return true; + } + + if let Some(rrid) = inner.cache.get_remote_private_route_id_by_key(key) { + let cur_ts = get_aligned_timestamp(); + if let Some(rpri) = inner.cache.peek_remote_private_route_mut(cur_ts, &rrid) + { + let our_node_info_ts = { + let rti = &*self.unlocked_inner.routing_table.inner.read(); + let Some(ts) = rti.get_own_node_info_ts(RoutingDomain::PublicInternet) else { + // Node info is invalid, skip this + return false; + }; + ts + }; + + return rpri.has_seen_our_node_info_ts(our_node_info_ts); + } + } + + false + } + + /// Mark a remote private route as having seen our current node info + /// PRIVACY: + /// We do not accept node info timestamps from remote private routes because this would + /// enable a deanonymization attack, whereby a node could be 'pinged' with a doctored node_info with a + /// special 'timestamp', which then may be sent back over a private route, identifying that it + /// was that node that had the private route. + pub fn mark_remote_private_route_seen_our_node_info( + &self, + key: &PublicKey, + cur_ts: Timestamp, + ) -> EyreResult<()> { + let our_node_info_ts = { + let rti = &*self.unlocked_inner.routing_table.inner.read(); + let Some(ts) = rti.get_own_node_info_ts(RoutingDomain::PublicInternet) else { + // Node info is invalid, skipping this + return Ok(()); + }; + ts + }; + + let inner = &mut *self.inner.lock(); + + // Check for local route. If this is not a remote private route + // then we just skip the recording. We may be running a test and using + // our own local route as the destination private route. + if let Some(_) = inner.content.get_id_by_key(key) { + return Ok(()); + } + + if let Some(rrid) = inner.cache.get_remote_private_route_id_by_key(key) { + if let Some(rpri) = inner.cache.peek_remote_private_route_mut(cur_ts, &rrid) + { + rpri.set_last_seen_our_node_info_ts(our_node_info_ts); + return Ok(()); + } + } + + bail!("private route is missing from store: {}", key); + } + + /// Get the route statistics for any route we know about, local or remote + pub fn with_route_stats(&self, cur_ts: Timestamp, key: &PublicKey, f: F) -> Option + where + F: FnOnce(&mut RouteStats) -> R, + { + let inner = &mut *self.inner.lock(); + + // Check for stub route + if self.unlocked_inner.routing_table.matches_own_node_id_key(key) { + return None; + } + + // Check for local route + if let Some(rsid) = inner.content.get_id_by_key(key) { + if let Some(rsd) = inner.content.get_detail_mut(&rsid) { + return Some(f(rsd.get_stats_mut())); + } + } + + // Check for remote route + if let Some(rrid) = inner.cache.get_remote_private_route_id_by_key(key) { + if let Some(rpri) = inner.cache.peek_remote_private_route_mut(cur_ts, &rrid) + { + return Some(f(rpri.get_stats_mut())); + } + } + + None + } + + /// Clear caches when local our local node info changes + #[instrument(level = "trace", skip(self))] + pub fn reset(&self) { + let inner = &mut *self.inner.lock(); + + // Clean up local allocated routes + inner.content.reset_details(); + + // Reset private route cache + inner.cache.reset_remote_private_routes(); + } + + /// Mark route as published + /// When first deserialized, routes must be re-published in order to ensure they remain + /// in the RouteSpecStore. + pub fn mark_route_published(&self, id: &RouteId, published: bool) -> EyreResult<()> { + let inner = &mut *self.inner.lock(); + let Some(rssd) = inner.content.get_detail_mut(id) else { + bail!("route does not exist"); + }; + rssd.set_published(published); + Ok(()) + } + + /// Process transfer statistics to get averages + pub fn roll_transfers(&self, last_ts: Timestamp, cur_ts: Timestamp) { + let inner = &mut *self.inner.lock(); + + // Roll transfers for locally allocated routes + inner.content.roll_transfers(last_ts, cur_ts); + + // Roll transfers for remote private routes + inner.cache.roll_transfers(last_ts, cur_ts); + } + + /// Convert private route list to binary blob + pub fn private_routes_to_blob(private_routes: &[PrivateRoute]) -> EyreResult> { + + let mut buffer = vec![]; + + // Serialize count + let pr_count = private_routes.len(); + if pr_count > MAX_CRYPTO_KINDS { + bail!("too many crypto kinds to encode blob"); + } + let pr_count = pr_count as u8; + buffer.push(pr_count); + + // Serialize stream of private routes + for private_route in private_routes { + let mut pr_message = ::capnp::message::Builder::new_default(); + let mut pr_builder = pr_message.init_root::(); + + encode_private_route(private_route, &mut pr_builder) + .wrap_err("failed to encode private route")?; + + capnp::serialize_packed::write_message(&mut buffer, &pr_message) + .map_err(RPCError::internal) + .wrap_err("failed to convert builder to vec")?; + } + Ok(buffer) + } + + /// Convert binary blob to private route + pub fn blob_to_private_routes(crypto: Crypto, blob: Vec) -> EyreResult> { + + // Deserialize count + if blob.is_empty() { + bail!("not deserializing empty private route blob"); + } + + let pr_count = blob[0] as usize; + if pr_count > MAX_CRYPTO_KINDS { + bail!("too many crypto kinds to decode blob"); + } + + // Deserialize stream of private routes + let mut pr_slice = &blob[1..]; + let mut out = Vec::with_capacity(pr_count); + for _ in 0..pr_count { + let reader = capnp::serialize_packed::read_message( + &mut pr_slice, + capnp::message::ReaderOptions::new(), + ) + .map_err(RPCError::internal) + .wrap_err("failed to make message reader")?; + + let pr_reader = reader + .get_root::() + .map_err(RPCError::internal) + .wrap_err("failed to make reader for private_route")?; + let private_route = decode_private_route(&pr_reader, crypto.clone()).wrap_err("failed to decode private route")?; + out.push(private_route); + } + + // Don't trust the order of the blob + out.sort_by(|a,b| { + a.public_key.cmp(&b.public_key) + }); + + Ok(out) + } + + /// Generate RouteId from typed key set of route public keys + fn generate_allocated_route_id(&self, rssd: &RouteSetSpecDetail) -> EyreResult { + let route_set_keys = rssd.get_route_set_keys(); + let crypto = self.unlocked_inner.routing_table.crypto(); + + let mut idbytes = Vec::with_capacity(PUBLIC_KEY_LENGTH * route_set_keys.len()); + let mut best_kind : Option = None; + for tk in route_set_keys.iter() { + if best_kind.is_none() || compare_crypto_kind(&tk.kind, best_kind.as_ref().unwrap()) == cmp::Ordering::Less { + best_kind = Some(tk.kind); + } + idbytes.extend_from_slice(&tk.value.bytes); + } + let Some(best_kind) = best_kind else { + bail!("no compatible crypto kinds in route"); + }; + let vcrypto = crypto.get(best_kind).unwrap(); + + Ok(RouteId::new(vcrypto.generate_hash(&idbytes).bytes)) + + } + + /// Generate RouteId from set of private routes + fn generate_remote_route_id(&self, private_routes: &[PrivateRoute]) -> EyreResult { + let crypto = self.unlocked_inner.routing_table.crypto(); + + let mut idbytes = Vec::with_capacity(PUBLIC_KEY_LENGTH * private_routes.len()); + let mut best_kind : Option = None; + for private_route in private_routes { + if best_kind.is_none() || compare_crypto_kind(&private_route.public_key.kind, best_kind.as_ref().unwrap()) == cmp::Ordering::Less { + best_kind = Some(private_route.public_key.kind); + } + idbytes.extend_from_slice(&private_route.public_key.value.bytes); + } + let Some(best_kind) = best_kind else { + bail!("no compatible crypto kinds in route"); + }; + let vcrypto = crypto.get(best_kind).unwrap(); + + Ok(RouteId::new(vcrypto.generate_hash(&idbytes).bytes)) + } + +} diff --git a/veilid-core/src/routing_table/route_spec_store/route_spec_store_cache.rs b/veilid-core/src/routing_table/route_spec_store/route_spec_store_cache.rs new file mode 100644 index 00000000..e4f9b3ef --- /dev/null +++ b/veilid-core/src/routing_table/route_spec_store/route_spec_store_cache.rs @@ -0,0 +1,365 @@ +use super::*; + +// Compiled route key for caching +#[derive(Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)] +struct CompiledRouteCacheKey { + sr_pubkey: PublicKey, + pr_pubkey: PublicKey, +} + +/// Compiled route (safety route + private route) +#[derive(Clone, Debug)] +pub struct CompiledRoute { + /// The safety route attached to the private route + pub safety_route: SafetyRoute, + /// The secret used to encrypt the message payload + pub secret: SecretKey, + /// The node ref to the first hop in the compiled route + pub first_hop: NodeRef, +} + +/// Ephemeral data used to help the RouteSpecStore operate efficiently +#[derive(Debug)] +pub struct RouteSpecStoreCache { + /// How many times nodes have been used + used_nodes: HashMap, + /// How many times nodes have been used at the terminal point of a route + used_end_nodes: HashMap, + /// Route spec hop cache, used to quickly disqualify routes + hop_cache: HashSet>, + /// Remote private routes we've imported and statistics + remote_private_route_set_cache: LruCache, + /// Remote private routes indexed by public key + remote_private_routes_by_key: HashMap, + /// Compiled route cache + compiled_route_cache: LruCache, + /// List of dead allocated routes + dead_routes: Vec, + /// List of dead remote routes + dead_remote_routes: Vec, +} + +impl RouteSpecStoreCache { + /// add an allocated route set to our cache via its cache key + pub fn add_to_cache(&mut self, rti: &RoutingTableInner, rssd: &RouteSetSpecDetail) { + let cache_key = rssd.make_cache_key(rti); + if !self.hop_cache.insert(cache_key) { + panic!("route should never be inserted twice"); + } + for (_pk, rsd) in rssd.iter_route_set() { + for h in &rsd.hops { + self.used_nodes + .entry(*h) + .and_modify(|e| *e += 1) + .or_insert(1); + } + self.used_end_nodes + .entry(*rsd.hops.last().unwrap()) + .and_modify(|e| *e += 1) + .or_insert(1); + } + } + + /// checks if an allocated route is in our cache + pub fn contains_route(&self, cache_key: &Vec) -> bool { + self.hop_cache.contains(cache_key) + } + + /// removes an allocated route set from our cache + pub fn remove_from_cache( + &mut self, + rti: &RoutingTableInner, + id: RouteId, + rssd: &RouteSetSpecDetail, + ) -> bool { + let cache_key = rssd.make_cache_key(rti); + + // Remove from hop cache + if !self.hop_cache.remove(&cache_key) { + return false; + } + for (pk, rsd) in rssd.iter_route_set() { + for h in &rsd.hops { + // Remove from used nodes cache + match self.used_nodes.entry(*h) { + std::collections::hash_map::Entry::Occupied(mut o) => { + *o.get_mut() -= 1; + if *o.get() == 0 { + o.remove(); + } + } + std::collections::hash_map::Entry::Vacant(_) => { + panic!("used_nodes cache should have contained hop"); + } + } + } + // Remove from end nodes cache + match self.used_end_nodes.entry(*rsd.hops.last().unwrap()) { + std::collections::hash_map::Entry::Occupied(mut o) => { + *o.get_mut() -= 1; + if *o.get() == 0 { + o.remove(); + } + } + std::collections::hash_map::Entry::Vacant(_) => { + panic!("used_end_nodes cache should have contained hop"); + } + } + + // Invalidate compiled route cache + self.invalidate_compiled_route_cache(pk); + } + + // Mark it as dead for the update + self.dead_routes.push(id); + + true + } + + /// calculate how many times a node with a particular node id set has been used anywhere in the path of our allocated routes + pub fn get_used_node_count(&self, node_ids: &TypedKeySet) -> usize { + node_ids.iter().fold(0usize, |acc, k| { + acc + self.used_nodes.get(&k.value).cloned().unwrap_or_default() + }) + } + + /// calculate how many times a node with a particular node id set has been used at the end of the path of our allocated routes + pub fn get_used_end_node_count(&self, node_ids: &TypedKeySet) -> usize { + node_ids.iter().fold(0usize, |acc, k| { + acc + self + .used_end_nodes + .get(&k.value) + .cloned() + .unwrap_or_default() + }) + } + + /// add remote private route to caches + /// returns a remote private route set id + fn add_remote_private_route( + &mut self, + id: RouteId, + rprinfo: RemotePrivateRouteInfo, + ) -> RouteId { + // also store in id by key table + for private_route in rprinfo.get_private_routes() { + self.remote_private_routes_by_key + .insert(private_route.public_key.value, id.clone()); + } + + let mut dead = None; + self.remote_private_route_set_cache + .insert(id, rprinfo, |dead_id, dead_rpri| { + dead = Some((dead_id, dead_rpri)); + }); + + if let Some((dead_id, dead_rpri)) = dead { + // If anything LRUs out, remove from the by-key table + // Follow the same logic as 'remove_remote_private_route' here + for dead_private_route in dead_rpri.get_private_routes() { + self.remote_private_routes_by_key + .remove(&dead_private_route.public_key.value) + .unwrap(); + self.invalidate_compiled_route_cache(&dead_private_route.public_key.value); + } + self.dead_remote_routes.push(dead_id); + } + + id + } + + /// get count of remote private routes in cache + pub fn get_remote_private_route_count(&self) -> usize { + self.remote_private_route_set_cache.len() + } + + /// iterate all of the remote private routes we have in the cache + pub fn iter_remote_private_routes( + &self, + ) -> hashlink::linked_hash_map::Iter { + self.remote_private_route_set_cache.iter() + } + + /// remote private route cache accessor + /// will LRU entries and may expire entries and not return them if they are stale + pub fn get_remote_private_route( + &mut self, + cur_ts: Timestamp, + id: &RouteId, + ) -> Option<&RemotePrivateRouteInfo> { + if let Some(rpri) = self.remote_private_route_set_cache.get_mut(id) { + if !rpri.did_expire(cur_ts) { + rpri.touch(cur_ts); + return Some(rpri); + } + } + None + } + + /// mutable remote private route cache accessor + /// will LRU entries and may expire entries and not return them if they are stale + pub fn get_remote_private_route_mut( + &mut self, + cur_ts: Timestamp, + id: &RouteId, + ) -> Option<&mut RemotePrivateRouteInfo> { + if let Some(rpri) = self.remote_private_route_set_cache.get_mut(id) { + if !rpri.did_expire(cur_ts) { + rpri.touch(cur_ts); + return Some(rpri); + } + } + None + } + + /// mutable remote private route cache accessor without lru action + /// will not LRU entries but may expire entries and not return them if they are stale + pub fn peek_remote_private_route_mut( + &mut self, + cur_ts: Timestamp, + id: &RouteId, + ) -> Option<&mut RemotePrivateRouteInfo> { + if let Some(rpri) = self.remote_private_route_set_cache.peek_mut(id) { + if !rpri.did_expire(cur_ts) { + rpri.touch(cur_ts); + return Some(rpri); + } + } + None + } + + /// look up a remote private route id by one of the route public keys + pub fn get_remote_private_route_id_by_key(&self, key: &PublicKey) -> Option { + self.remote_private_routes_by_key.get(key).cloned() + } + + /// get or create a remote private route cache entry + /// may LRU and/or expire other cache entries to make room for the new one + /// or update an existing entry with the same private route set + /// returns the route set id + pub fn cache_remote_private_route( + &mut self, + cur_ts: Timestamp, + id: RouteId, + private_routes: Vec, + ) { + // get id for this route set + if let Some(rpri) = self.get_remote_private_route_mut(cur_ts, &id) { + if rpri.did_expire(cur_ts) { + // Start fresh if this had expired + rpri.unexpire(cur_ts); + } else { + // If not expired, just mark as being used + rpri.touch(cur_ts); + } + } else { + // New remote private route cache entry + let rpri = RemotePrivateRouteInfo::new(private_routes, cur_ts); + + self.add_remote_private_route(id, rpri); + if self.peek_remote_private_route_mut(cur_ts, &id).is_none() { + panic!("remote private route should exist"); + }; + }; + } + + /// remove a remote private route from the cache + pub fn remove_remote_private_route(&mut self, id: RouteId) -> bool { + let Some(rprinfo) = self.remote_private_route_set_cache.remove(&id) else { + return false; + }; + for private_route in rprinfo.get_private_routes() { + self.remote_private_routes_by_key + .remove(&private_route.public_key.value) + .unwrap(); + self.invalidate_compiled_route_cache(&private_route.public_key.value); + } + self.dead_remote_routes.push(id); + true + } + + /// Stores a compiled 'safety + private' route so we don't have to compile it again later + pub fn add_to_compiled_route_cache(&mut self, pr_pubkey: PublicKey, safety_route: SafetyRoute) { + let key = CompiledRouteCacheKey { + sr_pubkey: safety_route.public_key.value, + pr_pubkey, + }; + + if let Some(v) = self + .compiled_route_cache + .insert(key, safety_route, |_k, _v| { + // Do nothing on LRU evict + }) + { + log_rtab!(error "route cache already contained key: sr_pubkey={:?}, pr_pubkey={:?}", v.public_key, pr_pubkey); + } + } + + /// Looks up an existing compiled route from the safety and private route components + pub fn lookup_compiled_route_cache( + &mut self, + sr_pubkey: PublicKey, + pr_pubkey: PublicKey, + ) -> Option { + let key = CompiledRouteCacheKey { + sr_pubkey, + pr_pubkey, + }; + self.compiled_route_cache.get(&key).cloned() + } + + /// When routes are dropped, they should be removed from the compiled route cache + fn invalidate_compiled_route_cache(&mut self, dead_key: &PublicKey) { + let mut dead_entries = Vec::new(); + for (k, _v) in self.compiled_route_cache.iter() { + if k.sr_pubkey == *dead_key || k.pr_pubkey == *dead_key { + dead_entries.push(k.clone()); + } + } + for d in dead_entries { + self.compiled_route_cache.remove(&d); + } + } + + /// Take the dead local and remote routes so we can update clients + pub fn take_dead_routes(&mut self) -> Option<(Vec, Vec)> { + if self.dead_routes.is_empty() && self.dead_remote_routes.is_empty() { + // Nothing to do + return None; + } + let dead_routes = core::mem::take(&mut self.dead_routes); + let dead_remote_routes = core::mem::take(&mut self.dead_remote_routes); + Some((dead_routes, dead_remote_routes)) + } + + /// Clean up imported remote routes + /// Resets statistics for when our node info changes + pub fn reset_remote_private_routes(&mut self) { + // Restart stats for routes so we test the route again + for (_k, v) in self.remote_private_route_set_cache.iter_mut() { + v.get_stats_mut().reset(); + } + } + + /// Roll transfer statistics + pub fn roll_transfers(&mut self, last_ts: Timestamp, cur_ts: Timestamp) { + for (_k, v) in self.remote_private_route_set_cache.iter_mut() { + v.get_stats_mut().roll_transfers(last_ts, cur_ts); + } + } +} + +impl Default for RouteSpecStoreCache { + fn default() -> Self { + Self { + used_nodes: Default::default(), + used_end_nodes: Default::default(), + hop_cache: Default::default(), + remote_private_route_set_cache: LruCache::new(REMOTE_PRIVATE_ROUTE_CACHE_SIZE), + remote_private_routes_by_key: HashMap::new(), + compiled_route_cache: LruCache::new(COMPILED_ROUTE_CACHE_SIZE), + dead_routes: Default::default(), + dead_remote_routes: Default::default(), + } + } +} diff --git a/veilid-core/src/routing_table/route_spec_store/route_spec_store_content.rs b/veilid-core/src/routing_table/route_spec_store/route_spec_store_content.rs new file mode 100644 index 00000000..4b5b06ec --- /dev/null +++ b/veilid-core/src/routing_table/route_spec_store/route_spec_store_content.rs @@ -0,0 +1,177 @@ +use super::*; + +/// The core representation of the RouteSpecStore that can be serialized +#[derive(Debug, Clone, Default, RkyvArchive, RkyvSerialize, RkyvDeserialize)] +#[archive_attr(repr(C, align(8)), derive(CheckBytes))] +pub struct RouteSpecStoreContent { + /// All of the route sets we have allocated so far indexed by key + id_by_key: HashMap, + /// All of the route sets we have allocated so far + details: HashMap, +} + +impl RouteSpecStoreContent { + pub fn new() -> Self { + Self { + id_by_key: HashMap::new(), + details: HashMap::new(), + } + } + + pub async fn load(routing_table: RoutingTable) -> EyreResult { + // Deserialize what we can + let table_store = routing_table.network_manager().table_store(); + let rsstdb = table_store.open("RouteSpecStore", 1).await?; + let mut content: RouteSpecStoreContent = + rsstdb.load_rkyv(0, b"content")?.unwrap_or_default(); + + // Look up all route hop noderefs since we can't serialize those + let mut dead_ids = Vec::new(); + for (rsid, rssd) in content.details.iter_mut() { + // Get best route since they all should resolve + let Some(pk) = rssd.get_best_route_set_key() else { + dead_ids.push(rsid.clone()); + continue; + }; + let Some(rsd) = rssd.get_route_by_key(&pk) else { + dead_ids.push(rsid.clone()); + continue; + }; + // Go through best route and resolve noderefs + let mut hop_node_refs = Vec::with_capacity(rsd.hops.len()); + for h in &rsd.hops { + let Some(nr) = routing_table.lookup_node_ref(TypedKey::new(rsd.crypto_kind, *h)) else { + dead_ids.push(rsid.clone()); + break; + }; + hop_node_refs.push(nr); + } + + // Apply noderefs + rssd.set_hop_node_refs(hop_node_refs); + } + for id in dead_ids { + log_rtab!(debug "no entry, killing off private route: {}", id); + content.remove_detail(&id); + } + + // Load secrets from pstore + let pstore = routing_table.network_manager().protected_store(); + let secret_key_map: HashMap = pstore + .load_user_secret_rkyv("RouteSpecStore") + .await? + .unwrap_or_default(); + + // Ensure we got secret keys for all the public keys + let mut got_secret_key_ids = HashSet::new(); + for (rsid, rssd) in content.details.iter_mut() { + let mut found_all = true; + for (pk, rsd) in rssd.iter_route_set_mut() { + if let Some(sk) = secret_key_map.get(pk) { + rsd.secret_key = *sk; + } else { + found_all = false; + break; + } + } + if found_all { + got_secret_key_ids.insert(rsid.clone()); + } + } + + // If we missed any, nuke those route ids + let dead_ids: Vec = content + .details + .keys() + .filter_map(|id| { + if !got_secret_key_ids.contains(id) { + Some(*id) + } else { + None + } + }) + .collect(); + for id in dead_ids { + log_rtab!(debug "missing secret key, killing off private route: {}", id); + content.remove_detail(&id); + } + + Ok(content) + } + + pub async fn save(&self, routing_table: RoutingTable) -> EyreResult<()> { + // Save all the fields we care about to the frozen blob in table storage + // This skips #[with(Skip)] saving the secret keys, we save them in the protected store instead + let table_store = routing_table.network_manager().table_store(); + let rsstdb = table_store.open("RouteSpecStore", 1).await?; + rsstdb.store_rkyv(0, b"content", self).await?; + + // Keep secrets in protected store as well + let pstore = routing_table.network_manager().protected_store(); + + let mut out: HashMap = HashMap::new(); + for (_rsid, rssd) in self.details.iter() { + for (pk, rsd) in rssd.iter_route_set() { + out.insert(*pk, rsd.secret_key); + } + } + + let _ = pstore.save_user_secret_rkyv("RouteSpecStore", &out).await?; // ignore if this previously existed or not + + Ok(()) + } + + pub fn add_detail(&mut self, id: RouteId, detail: RouteSetSpecDetail) { + assert!(!self.details.contains_key(&id)); + + // also store in id by key table + for (pk, _) in detail.iter_route_set() { + self.id_by_key.insert(*pk, id.clone()); + } + self.details.insert(id.clone(), detail); + } + pub fn remove_detail(&mut self, id: &RouteId) -> Option { + let detail = self.details.remove(id)?; + for (pk, _) in detail.iter_route_set() { + self.id_by_key.remove(&pk).unwrap(); + } + Some(detail) + } + pub fn get_detail_count(&self) -> usize { + self.details.len() + } + pub fn get_detail(&self, id: &RouteId) -> Option<&RouteSetSpecDetail> { + self.details.get(id) + } + pub fn get_detail_mut(&mut self, id: &RouteId) -> Option<&mut RouteSetSpecDetail> { + self.details.get_mut(id) + } + pub fn get_id_by_key(&self, key: &PublicKey) -> Option { + self.id_by_key.get(key).cloned() + } + pub fn iter_ids(&self) -> std::collections::hash_map::Keys { + self.details.keys() + } + pub fn iter_details(&self) -> std::collections::hash_map::Iter { + self.details.iter() + } + + /// Clean up local allocated routes + /// Resets publication status and statistics for when our node info changes + /// Routes must be republished + pub fn reset_details(&mut self) { + for (_k, v) in &mut self.details { + // Must republish route now + v.set_published(false); + // Restart stats for routes so we test the route again + v.get_stats_mut().reset(); + } + } + + /// Roll transfer statistics + pub fn roll_transfers(&mut self, last_ts: Timestamp, cur_ts: Timestamp) { + for rssd in self.details.values_mut() { + rssd.get_stats_mut().roll_transfers(last_ts, cur_ts); + } + } +} diff --git a/veilid-core/src/routing_table/route_spec_store/route_stats.rs b/veilid-core/src/routing_table/route_spec_store/route_stats.rs new file mode 100644 index 00000000..93b4f304 --- /dev/null +++ b/veilid-core/src/routing_table/route_spec_store/route_stats.rs @@ -0,0 +1,129 @@ +use super::*; + +#[derive(Clone, Debug, Default, RkyvArchive, RkyvSerialize, RkyvDeserialize)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct RouteStats { + /// Consecutive failed to send count + #[with(Skip)] + pub failed_to_send: u32, + /// Questions lost + #[with(Skip)] + pub questions_lost: u32, + /// Timestamp of when the route was created + pub created_ts: Timestamp, + /// Timestamp of when the route was last checked for validity + #[with(Skip)] + pub last_tested_ts: Option, + /// Timestamp of when the route was last sent to + #[with(Skip)] + pub last_sent_ts: Option, + /// Timestamp of when the route was last received over + #[with(Skip)] + pub last_received_ts: Option, + /// Transfers up and down + pub transfer_stats_down_up: TransferStatsDownUp, + /// Latency stats + pub latency_stats: LatencyStats, + /// Accounting mechanism for this route's RPC latency + #[with(Skip)] + latency_stats_accounting: LatencyStatsAccounting, + /// Accounting mechanism for the bandwidth across this route + #[with(Skip)] + transfer_stats_accounting: TransferStatsAccounting, +} + +impl RouteStats { + /// Make new route stats + pub fn new(created_ts: Timestamp) -> Self { + Self { + created_ts, + ..Default::default() + } + } + /// Mark a route as having failed to send + pub fn record_send_failed(&mut self) { + self.failed_to_send += 1; + } + + /// Mark a route as having lost a question + pub fn record_question_lost(&mut self) { + self.questions_lost += 1; + } + + /// Mark a route as having received something + pub fn record_received(&mut self, cur_ts: Timestamp, bytes: ByteCount) { + self.last_received_ts = Some(cur_ts); + self.last_tested_ts = Some(cur_ts); + self.transfer_stats_accounting.add_down(bytes); + } + + /// Mark a route as having been sent to + pub fn record_sent(&mut self, cur_ts: Timestamp, bytes: ByteCount) { + self.last_sent_ts = Some(cur_ts); + self.transfer_stats_accounting.add_up(bytes); + } + + /// Mark a route as having been sent to + pub fn record_latency(&mut self, latency: TimestampDuration) { + self.latency_stats = self.latency_stats_accounting.record_latency(latency); + } + + /// Mark a route as having been tested + pub fn record_tested(&mut self, cur_ts: Timestamp) { + self.last_tested_ts = Some(cur_ts); + + // Reset question_lost and failed_to_send if we test clean + self.failed_to_send = 0; + self.questions_lost = 0; + } + + /// Roll transfers for these route stats + pub fn roll_transfers(&mut self, last_ts: Timestamp, cur_ts: Timestamp) { + self.transfer_stats_accounting.roll_transfers( + last_ts, + cur_ts, + &mut self.transfer_stats_down_up, + ) + } + + /// Get the latency stats + pub fn latency_stats(&self) -> &LatencyStats { + &self.latency_stats + } + + /// Get the transfer stats + pub fn transfer_stats(&self) -> &TransferStatsDownUp { + &self.transfer_stats_down_up + } + + /// Reset stats when network restarts + pub fn reset(&mut self) { + self.last_tested_ts = None; + self.last_sent_ts = None; + self.last_received_ts = None; + } + + /// Check if a route needs testing + pub fn needs_testing(&self, cur_ts: Timestamp) -> bool { + // Has the route had any failures lately? + if self.questions_lost > 0 || self.failed_to_send > 0 { + // If so, always test + return true; + } + + // Has the route been tested within the idle time we'd want to check things? + // (also if we've received successfully over the route, this will get set) + if let Some(last_tested_ts) = self.last_tested_ts { + if cur_ts.saturating_sub(last_tested_ts) + > TimestampDuration::new(ROUTE_MIN_IDLE_TIME_MS as u64 * 1000u64) + { + return true; + } + } else { + // If this route has never been tested, it needs to be + return true; + } + + false + } +} diff --git a/veilid-core/src/routing_table/routing_domain_editor.rs b/veilid-core/src/routing_table/routing_domain_editor.rs index 28785c12..d1694ec8 100644 --- a/veilid-core/src/routing_table/routing_domain_editor.rs +++ b/veilid-core/src/routing_table/routing_domain_editor.rs @@ -102,7 +102,7 @@ impl RoutingDomainEditor { let mut changed = false; { - let node_id = self.routing_table.node_id(); + let node_ids = self.routing_table.node_ids(); let mut inner = self.routing_table.inner.write(); inner.with_routing_domain_mut(self.routing_domain, |detail| { @@ -134,9 +134,7 @@ impl RoutingDomainEditor { info!( "{:?} Dial Info: {}@{}", - self.routing_domain, - NodeId::new(node_id), - dial_info_detail.dial_info + self.routing_domain, node_ids, dial_info_detail.dial_info ); changed = true; } diff --git a/veilid-core/src/routing_table/routing_domains.rs b/veilid-core/src/routing_table/routing_domains.rs index 21fcfefe..bfd52d88 100644 --- a/veilid-core/src/routing_table/routing_domains.rs +++ b/veilid-core/src/routing_table/routing_domains.rs @@ -10,13 +10,13 @@ pub enum ContactMethod { /// Contact the node directly Direct(DialInfo), /// Request via signal the node connect back directly (relay, target) - SignalReverse(DHTKey, DHTKey), - /// Request via signal the node negotiate a hole punch (relay, target_node) - SignalHolePunch(DHTKey, DHTKey), + SignalReverse(TypedKey, TypedKey), + /// Request via signal the node negotiate a hole punch (relay, target) + SignalHolePunch(TypedKey, TypedKey), /// Must use an inbound relay to reach the node - InboundRelay(DHTKey), + InboundRelay(TypedKey), /// Must use outbound relay to reach the node - OutboundRelay(DHTKey), + OutboundRelay(TypedKey), } #[derive(Debug)] @@ -106,8 +106,8 @@ impl RoutingDomainDetailCommon { network_class: self.network_class.unwrap_or(NetworkClass::Invalid), outbound_protocols: self.outbound_protocols, address_types: self.address_types, - min_version: MIN_CRYPTO_VERSION, - max_version: MAX_CRYPTO_VERSION, + envelope_support: VALID_ENVELOPE_VERSIONS.to_vec(), + crypto_support: VALID_CRYPTO_KINDS.to_vec(), dial_info_detail_list: self.dial_info_details.clone(), }; @@ -118,7 +118,7 @@ impl RoutingDomainDetailCommon { let opt_relay_pi = rn.locked(rti).make_peer_info(self.routing_domain); if let Some(relay_pi) = opt_relay_pi { match relay_pi.signed_node_info { - SignedNodeInfo::Direct(d) => Some((relay_pi.node_id, d)), + SignedNodeInfo::Direct(d) => Some((relay_pi.node_ids, d)), SignedNodeInfo::Relayed(_) => { warn!("relay node should not have a relay itself! if this happens, a relay updated its signed node info and became a relay, which should cause the relay to be dropped"); None @@ -130,27 +130,27 @@ impl RoutingDomainDetailCommon { }); let signed_node_info = match relay_info { - Some((relay_id, relay_sdni)) => SignedNodeInfo::Relayed( - SignedRelayedNodeInfo::with_secret( - NodeId::new(rti.unlocked_inner.node_id), + Some((relay_ids, relay_sdni)) => SignedNodeInfo::Relayed( + SignedRelayedNodeInfo::make_signatures( + rti.unlocked_inner.crypto(), + rti.unlocked_inner.node_id_typed_key_pairs(), node_info, - relay_id, + relay_ids, relay_sdni, - &rti.unlocked_inner.node_id_secret, ) .unwrap(), ), None => SignedNodeInfo::Direct( - SignedDirectNodeInfo::with_secret( - NodeId::new(rti.unlocked_inner.node_id), + SignedDirectNodeInfo::make_signatures( + rti.unlocked_inner.crypto(), + rti.unlocked_inner.node_id_typed_key_pairs(), node_info, - &rti.unlocked_inner.node_id_secret, ) .unwrap() ), }; - PeerInfo::new(NodeId::new(rti.unlocked_inner.node_id), signed_node_info) + PeerInfo::new(rti.unlocked_inner.node_ids(), signed_node_info) } pub fn with_peer_info(&self, rti: &RoutingTableInner, f: F) -> R @@ -280,7 +280,17 @@ impl RoutingDomainDetail for PublicInternetRoutingDomainDetail { // Get the nodeinfos for convenience let node_a = peer_a.signed_node_info.node_info(); let node_b = peer_b.signed_node_info.node_info(); - + + // Get the node ids that would be used between these peers + let cck = common_crypto_kinds(&peer_a.node_ids.kinds(), &peer_b.node_ids.kinds()); + let Some(best_ck) = cck.first().copied() else { + // No common crypto kinds between these nodes, can't contact + return ContactMethod::Unreachable; + }; + + //let node_a_id = peer_a.node_ids.get(best_ck).unwrap(); + let node_b_id = peer_b.node_ids.get(best_ck).unwrap(); + // Get the best match dial info for node B if we have it if let Some(target_did) = first_filtered_dial_info_detail(node_a, node_b, &dial_info_filter, sequencing) @@ -293,15 +303,20 @@ impl RoutingDomainDetail for PublicInternetRoutingDomainDetail { // Get the target's inbound relay, it must have one or it is not reachable if let Some(node_b_relay) = peer_b.signed_node_info.relay_info() { - let node_b_relay_id = peer_b.signed_node_info.relay_id().unwrap(); // Note that relay_peer_info could be node_a, in which case a connection already exists // and we only get here if the connection had dropped, in which case node_a is unreachable until // it gets a new relay connection up - if node_b_relay_id.key == peer_a.node_id.key { + if peer_b.signed_node_info.relay_ids().contains_any(&peer_a.node_ids) { return ContactMethod::Existing; } + // Get best node id to contact relay with + let Some(node_b_relay_id) = peer_b.signed_node_info.relay_ids().get(best_ck) else { + // No best relay id + return ContactMethod::Unreachable; + }; + // Can node A reach the inbound relay directly? if first_filtered_dial_info_detail( node_a, @@ -329,8 +344,8 @@ impl RoutingDomainDetail for PublicInternetRoutingDomainDetail { // Can we receive a direct reverse connection? if !reverse_did.class.requires_signal() { return ContactMethod::SignalReverse( - node_b_relay_id.key, - peer_b.node_id.key, + node_b_relay_id, + node_b_id, ); } } @@ -361,8 +376,8 @@ impl RoutingDomainDetail for PublicInternetRoutingDomainDetail { { // The target and ourselves have a udp dialinfo that they can reach return ContactMethod::SignalHolePunch( - node_b_relay_id.key, - peer_a.node_id.key, + node_b_relay_id, + node_b_id, ); } } @@ -370,21 +385,26 @@ impl RoutingDomainDetail for PublicInternetRoutingDomainDetail { // Otherwise we have to inbound relay } - return ContactMethod::InboundRelay(node_b_relay_id.key); + return ContactMethod::InboundRelay(node_b_relay_id); } } } // If the node B has no direct dial info, it needs to have an inbound relay else if let Some(node_b_relay) = peer_b.signed_node_info.relay_info() { - let node_b_relay_id = peer_b.signed_node_info.relay_id().unwrap(); - + // Note that relay_peer_info could be node_a, in which case a connection already exists // and we only get here if the connection had dropped, in which case node_a is unreachable until // it gets a new relay connection up - if node_b_relay_id.key == peer_a.node_id.key { + if peer_b.signed_node_info.relay_ids().contains_any(&peer_a.node_ids) { return ContactMethod::Existing; } + // Get best node id to contact relay with + let Some(node_b_relay_id) = peer_b.signed_node_info.relay_ids().get(best_ck) else { + // No best relay id + return ContactMethod::Unreachable; + }; + // Can we reach the full relay? if first_filtered_dial_info_detail( node_a, @@ -394,13 +414,13 @@ impl RoutingDomainDetail for PublicInternetRoutingDomainDetail { ) .is_some() { - return ContactMethod::InboundRelay(node_b_relay_id.key); + return ContactMethod::InboundRelay(node_b_relay_id); } } // If node A can't reach the node by other means, it may need to use its own relay - if let Some(node_a_relay_id) = peer_a.signed_node_info.relay_id() { - return ContactMethod::OutboundRelay(node_a_relay_id.key); + if let Some(node_a_relay_id) = peer_a.signed_node_info.relay_ids().get(best_ck) { + return ContactMethod::OutboundRelay(node_a_relay_id); } ContactMethod::Unreachable diff --git a/veilid-core/src/routing_table/routing_table_inner.rs b/veilid-core/src/routing_table/routing_table_inner.rs index 50913789..40f2f30d 100644 --- a/veilid-core/src/routing_table/routing_table_inner.rs +++ b/veilid-core/src/routing_table/routing_table_inner.rs @@ -1,7 +1,8 @@ use super::*; +use weak_table::PtrWeakHashSet; const RECENT_PEERS_TABLE_SIZE: usize = 64; - +pub type EntryCounts = BTreeMap<(RoutingDomain, CryptoKind), usize>; ////////////////////////////////////////////////////////////////////////// #[derive(Debug, Clone, Copy)] @@ -13,10 +14,12 @@ pub struct RecentPeersEntry { pub struct RoutingTableInner { /// Extra pointer to unlocked members to simplify access pub(super) unlocked_inner: Arc, - /// Routing table buckets that hold entries - pub(super) buckets: Vec, - /// A fast counter for the number of entries in the table, total - pub(super) bucket_entry_count: usize, + /// Routing table buckets that hold references to entries, per crypto kind + pub(super) buckets: BTreeMap>, + /// A weak set of all the entries we have in the buckets for faster iteration + pub(super) all_entries: PtrWeakHashSet>, + /// A rough count of the entries in the table per routing domain and crypto kind + pub(super) live_entry_count: EntryCounts, /// The public internet routing domain pub(super) public_internet_routing_domain: PublicInternetRoutingDomainDetail, /// The dial info we use on the local network @@ -28,7 +31,7 @@ pub struct RoutingTableInner { /// Statistics about the total bandwidth to/from this node pub(super) self_transfer_stats: TransferStatsDownUp, /// Peers we have recently communicated with - pub(super) recent_peers: LruCache, + pub(super) recent_peers: LruCache, /// Storage for private/safety RouteSpecs pub(super) route_spec_store: Option, } @@ -37,10 +40,11 @@ impl RoutingTableInner { pub(super) fn new(unlocked_inner: Arc) -> RoutingTableInner { RoutingTableInner { unlocked_inner, - buckets: Vec::new(), + buckets: BTreeMap::new(), public_internet_routing_domain: PublicInternetRoutingDomainDetail::default(), local_network_routing_domain: LocalNetworkRoutingDomainDetail::default(), - bucket_entry_count: 0, + all_entries: PtrWeakHashSet::new(), + live_entry_count: BTreeMap::new(), self_latency_stats_accounting: LatencyStatsAccounting::new(), self_transfer_stats_accounting: TransferStatsAccounting::new(), self_transfer_stats: TransferStatsDownUp::default(), @@ -49,23 +53,8 @@ impl RoutingTableInner { } } - pub fn network_manager(&self) -> NetworkManager { - self.unlocked_inner.network_manager.clone() - } - pub fn rpc_processor(&self) -> RPCProcessor { - self.network_manager().rpc_processor() - } - - pub fn node_id(&self) -> DHTKey { - self.unlocked_inner.node_id - } - - pub fn node_id_secret(&self) -> DHTKeySecret { - self.unlocked_inner.node_id_secret - } - - pub fn config(&self) -> VeilidConfig { - self.unlocked_inner.config.clone() + pub fn bucket_entry_count(&self) -> usize { + self.all_entries.len() } pub fn transfer_stats_accounting(&mut self) -> &mut TransferStatsAccounting { @@ -228,7 +217,7 @@ impl RoutingTableInner { pub fn reset_all_updated_since_last_network_change(&mut self) { let cur_ts = get_aligned_timestamp(); - self.with_entries_mut(cur_ts, BucketEntryState::Dead, |rti, _, v| { + self.with_entries_mut(cur_ts, BucketEntryState::Dead, |rti, v| { v.with_mut(rti, |_rti, e| { e.set_updated_since_last_network_change(false) }); @@ -309,8 +298,8 @@ impl RoutingTableInner { .with_dial_info_filter(dif) } - fn bucket_depth(index: usize) -> usize { - match index { + fn bucket_depth(bucket_index: BucketIndex) -> usize { + match bucket_index.1 { 0 => 256, 1 => 128, 2 => 64, @@ -323,13 +312,16 @@ impl RoutingTableInner { } } - pub fn init_buckets(&mut self, routing_table: RoutingTable) { - // Size the buckets (one per bit) + pub fn init_buckets(&mut self) { + // Size the buckets (one per bit), one bucket set per crypto kind self.buckets.clear(); - self.buckets.reserve(DHT_KEY_LENGTH * 8); - for _ in 0..DHT_KEY_LENGTH * 8 { - let bucket = Bucket::new(routing_table.clone()); - self.buckets.push(bucket); + for ck in VALID_CRYPTO_KINDS { + let mut ckbuckets = Vec::with_capacity(PUBLIC_KEY_LENGTH * 8); + for _ in 0..PUBLIC_KEY_LENGTH * 8 { + let bucket = Bucket::new(ck); + ckbuckets.push(bucket); + } + self.buckets.insert(ck, ckbuckets); } } @@ -346,7 +338,7 @@ impl RoutingTableInner { // If the local network topology has changed, nuke the existing local node info and let new local discovery happen if changed { let cur_ts = get_aligned_timestamp(); - self.with_entries_mut(cur_ts, BucketEntryState::Dead, |rti, _, e| { + self.with_entries_mut(cur_ts, BucketEntryState::Dead, |rti, e| { e.with_mut(rti, |_rti, e| { e.clear_signed_node_info(RoutingDomain::LocalNetwork); e.set_updated_since_last_network_change(false); @@ -361,14 +353,18 @@ impl RoutingTableInner { pub fn purge_buckets(&mut self) { log_rtab!( "Starting routing table buckets purge. Table currently has {} nodes", - self.bucket_entry_count + self.bucket_entry_count() ); - for bucket in &mut self.buckets { - bucket.kick(0); + for ck in VALID_CRYPTO_KINDS { + for bucket in self.buckets.get_mut(&ck).unwrap().iter_mut() { + bucket.kick(0); + } } + self.all_entries.remove_expired(); + log_rtab!(debug - "Routing table buckets purge complete. Routing table now has {} nodes", - self.bucket_entry_count + "Routing table buckets purge complete. Routing table now has {} nodes", + self.bucket_entry_count() ); } @@ -376,32 +372,36 @@ impl RoutingTableInner { pub fn purge_last_connections(&mut self) { log_rtab!( "Starting routing table last_connections purge. Table currently has {} nodes", - self.bucket_entry_count + self.bucket_entry_count() ); - for bucket in &self.buckets { - for entry in bucket.entries() { - entry.1.with_mut_inner(|e| { - e.clear_last_connections(); - }); + for ck in VALID_CRYPTO_KINDS { + for bucket in &self.buckets[&ck] { + for entry in bucket.entries() { + entry.1.with_mut_inner(|e| { + e.clear_last_connections(); + }); + } } } + self.all_entries.remove_expired(); log_rtab!(debug - "Routing table last_connections purge complete. Routing table now has {} nodes", - self.bucket_entry_count + "Routing table last_connections purge complete. Routing table now has {} nodes", + self.bucket_entry_count() ); } /// Attempt to settle buckets and remove entries down to the desired number /// which may not be possible due extant NodeRefs - pub fn kick_bucket(&mut self, idx: usize) { - let bucket = &mut self.buckets[idx]; - let bucket_depth = Self::bucket_depth(idx); + pub fn kick_bucket(&mut self, bucket_index: BucketIndex) { + let bucket = self.get_bucket_mut(bucket_index); + let bucket_depth = Self::bucket_depth(bucket_index); - if let Some(dead_node_ids) = bucket.kick(bucket_depth) { - // Remove counts - self.bucket_entry_count -= dead_node_ids.len(); - log_rtab!(debug "Routing table now has {} nodes", self.bucket_entry_count); + if let Some(_dead_node_ids) = bucket.kick(bucket_depth) { + // Remove expired entries + self.all_entries.remove_expired(); + + log_rtab!(debug "Bucket {}:{} kicked Routing table now has {} nodes", bucket_index.0, bucket_index.1, self.bucket_entry_count()); // Now purge the routing table inner vectors //let filter = |k: &DHTKey| dead_node_ids.contains(k); @@ -412,23 +412,54 @@ impl RoutingTableInner { } } - pub fn find_bucket_index(&self, node_id: DHTKey) -> usize { - distance(&node_id, &self.unlocked_inner.node_id) - .first_nonzero_bit() - .unwrap() + /// Build the counts of entries per routing domain and crypto kind and cache them + /// Only considers entries that have valid signed node info + pub fn refresh_cached_entry_counts(&mut self) -> EntryCounts { + self.live_entry_count.clear(); + let cur_ts = get_aligned_timestamp(); + self.with_entries_mut(cur_ts, BucketEntryState::Unreliable, |rti, entry| { + entry.with_inner(|e| { + // Tally per routing domain and crypto kind + for rd in RoutingDomain::all() { + if let Some(sni) = e.signed_node_info(rd) { + // Only consider entries that have valid signed node info in this domain + if sni.has_any_signature() { + // Tally + for crypto_kind in e.crypto_kinds() { + rti.live_entry_count + .entry((rd, crypto_kind)) + .and_modify(|x| *x += 1) + .or_insert(1); + } + } + } + } + }); + Option::<()>::None + }); + self.live_entry_count.clone() } + /// Return the last cached entry counts + /// Only considers entries that have valid signed node info + pub fn cached_entry_counts(&self) -> EntryCounts { + self.live_entry_count.clone() + } + + /// Count entries that match some criteria pub fn get_entry_count( &self, routing_domain_set: RoutingDomainSet, min_state: BucketEntryState, + crypto_kinds: &[CryptoKind], ) -> usize { let mut count = 0usize; let cur_ts = get_aligned_timestamp(); - self.with_entries(cur_ts, min_state, |rti, _, e| { - if e.with(rti, |rti, e| e.best_routing_domain(rti, routing_domain_set)) - .is_some() - { + self.with_entries(cur_ts, min_state, |rti, e| { + if e.with_inner(|e| { + e.best_routing_domain(rti, routing_domain_set).is_some() + && !common_crypto_kinds(&e.crypto_kinds(), crypto_kinds).is_empty() + }) { count += 1; } Option::<()>::None @@ -436,52 +467,68 @@ impl RoutingTableInner { count } - pub fn with_entries) -> Option>( + /// Count entries per crypto kind that match some criteria + pub fn get_entry_count_per_crypto_kind( + &self, + routing_domain_set: RoutingDomainSet, + min_state: BucketEntryState, + ) -> BTreeMap { + let mut counts = BTreeMap::new(); + let cur_ts = get_aligned_timestamp(); + self.with_entries(cur_ts, min_state, |rti, e| { + if let Some(crypto_kinds) = e.with_inner(|e| { + if e.best_routing_domain(rti, routing_domain_set).is_some() { + Some(e.crypto_kinds()) + } else { + None + } + }) { + // Got crypto kinds, add to map + for ck in crypto_kinds { + counts.entry(ck).and_modify(|x| *x += 1).or_insert(1); + } + } + Option::<()>::None + }); + counts + } + + /// Iterate entries with a filter + pub fn with_entries) -> Option>( &self, cur_ts: Timestamp, min_state: BucketEntryState, mut f: F, ) -> Option { - let mut entryvec = Vec::with_capacity(self.bucket_entry_count); - for bucket in &self.buckets { - for entry in bucket.entries() { - if entry.1.with(self, |_rti, e| e.state(cur_ts) >= min_state) { - entryvec.push((*entry.0, entry.1.clone())); + for entry in &self.all_entries { + if entry.with_inner(|e| e.state(cur_ts) >= min_state) { + if let Some(out) = f(self, entry) { + return Some(out); } } } - for entry in entryvec { - if let Some(out) = f(self, entry.0, entry.1) { - return Some(out); - } - } None } - pub fn with_entries_mut< - T, - F: FnMut(&mut RoutingTableInner, DHTKey, Arc) -> Option, - >( + /// Iterate entries with a filter mutably + pub fn with_entries_mut) -> Option>( &mut self, cur_ts: Timestamp, min_state: BucketEntryState, mut f: F, ) -> Option { - let mut entryvec = Vec::with_capacity(self.bucket_entry_count); - for bucket in &self.buckets { - for entry in bucket.entries() { - if entry.1.with(self, |_rti, e| e.state(cur_ts) >= min_state) { - entryvec.push((*entry.0, entry.1.clone())); - } + let mut entries = Vec::with_capacity(self.all_entries.len()); + for entry in self.all_entries.iter() { + if entry.with_inner(|e| e.state(cur_ts) >= min_state) { + entries.push(entry); } } - for entry in entryvec { - if let Some(out) = f(self, entry.0, entry.1) { + for entry in entries { + if let Some(out) = f(self, entry) { return Some(out); } } - None } @@ -492,21 +539,24 @@ impl RoutingTableInner { cur_ts: Timestamp, ) -> Vec { // Collect relay nodes - let opt_relay_id = self.with_routing_domain(routing_domain, |rd| { - rd.common().relay_node().map(|rn| rn.node_id()) - }); + let opt_relay = self.with_routing_domain(routing_domain, |rd| rd.common().relay_node()); let own_node_info_ts = self.get_own_node_info_ts(routing_domain); // Collect all entries that are 'needs_ping' and have some node info making them reachable somehow - let mut node_refs = Vec::::with_capacity(self.bucket_entry_count); - self.with_entries(cur_ts, BucketEntryState::Unreliable, |rti, k, v| { - if v.with(rti, |rti, e| { + let mut node_refs = Vec::::with_capacity(self.bucket_entry_count()); + self.with_entries(cur_ts, BucketEntryState::Unreliable, |rti, entry| { + if entry.with_inner(|e| { // If this isn't in the routing domain we are checking, don't include it if !e.exists_in_routing_domain(rti, routing_domain) { return false; } // If we need a ping via the normal timing mechanism, then do it - if e.needs_ping(cur_ts, opt_relay_id == Some(k)) { + // or if this node is our own relay, then we keep it alive + let is_our_relay = opt_relay + .as_ref() + .map(|nr| nr.same_bucket_entry(&entry)) + .unwrap_or(false); + if e.needs_ping(cur_ts, is_our_relay) { return true; } // If we need a ping because this node hasn't seen our latest node info, then do it @@ -519,8 +569,7 @@ impl RoutingTableInner { }) { node_refs.push(NodeRef::new( outer_self.clone(), - k, - v, + entry, Some(NodeRefFilter::new().with_routing_domain(routing_domain)), )); } @@ -530,90 +579,169 @@ impl RoutingTableInner { } pub fn get_all_nodes(&self, outer_self: RoutingTable, cur_ts: Timestamp) -> Vec { - let mut node_refs = Vec::::with_capacity(self.bucket_entry_count); - self.with_entries(cur_ts, BucketEntryState::Unreliable, |_rti, k, v| { - node_refs.push(NodeRef::new(outer_self.clone(), k, v, None)); + let mut node_refs = Vec::::with_capacity(self.bucket_entry_count()); + self.with_entries(cur_ts, BucketEntryState::Unreliable, |_rti, entry| { + node_refs.push(NodeRef::new(outer_self.clone(), entry, None)); Option::<()>::None }); node_refs } + fn get_bucket_mut(&mut self, bucket_index: BucketIndex) -> &mut Bucket { + self.buckets + .get_mut(&bucket_index.0) + .unwrap() + .get_mut(bucket_index.1) + .unwrap() + } + + fn get_bucket(&self, bucket_index: BucketIndex) -> &Bucket { + self.buckets + .get(&bucket_index.0) + .unwrap() + .get(bucket_index.1) + .unwrap() + } + + // Update buckets with new node ids we may have learned belong to this entry + fn update_bucket_entries(&mut self, entry: Arc, node_ids: &[TypedKey]) { + entry.with_mut_inner(|e| { + let existing_node_ids = e.node_ids(); + for node_id in node_ids { + if !existing_node_ids.contains(node_id) { + // Add new node id to entry + if let Some(old_node_id) = e.add_node_id(*node_id) { + // Remove any old node id for this crypto kind + let bucket_index = self.unlocked_inner.calculate_bucket_index(&old_node_id); + let bucket = self.get_bucket_mut(bucket_index); + bucket.remove_entry(&old_node_id.value); + self.unlocked_inner.kick_queue.lock().insert(bucket_index); + } + + // Bucket the entry appropriately + let bucket_index = self.unlocked_inner.calculate_bucket_index(node_id); + let bucket = self.get_bucket_mut(bucket_index); + bucket.add_existing_entry(node_id.value, entry.clone()); + + // Kick bucket + self.unlocked_inner.kick_queue.lock().insert(bucket_index); + } + } + }) + } + /// Create a node reference, possibly creating a bucket entry /// the 'update_func' closure is called on the node, and, if created, /// in a locked fashion as to ensure the bucket entry state is always valid - pub fn create_node_ref( + fn create_node_ref( &mut self, outer_self: RoutingTable, - node_id: DHTKey, + node_ids: &[TypedKey], update_func: F, ) -> Option where F: FnOnce(&mut RoutingTableInner, &mut BucketEntryInner), { // Ensure someone isn't trying register this node itself - if node_id == self.node_id() { + if self.unlocked_inner.matches_own_node_id(node_ids) { log_rtab!(debug "can't register own node"); return None; } - // Look up existing entry - let idx = self.find_bucket_index(node_id); - let noderef = { - let bucket = &self.buckets[idx]; - let entry = bucket.entry(&node_id); - entry.map(|e| NodeRef::new(outer_self.clone(), node_id, e, None)) - }; - - // If one doesn't exist, insert into bucket, possibly evicting a bucket member - let noderef = match noderef { - None => { - // Make new entry - self.bucket_entry_count += 1; - let cnt = self.bucket_entry_count; - let bucket = &mut self.buckets[idx]; - let nr = bucket.add_entry(node_id); - - // Update the entry - let entry = bucket.entry(&node_id).unwrap(); - entry.with_mut(self, update_func); - - // Kick the bucket - self.unlocked_inner.kick_queue.lock().insert(idx); - log_rtab!(debug "Routing table now has {} nodes, {} live", cnt, self.get_entry_count(RoutingDomainSet::all(), BucketEntryState::Unreliable)); - - nr + // Look up all bucket entries and make sure we only have zero or one + // If we have more than one, pick the one with the best cryptokind to add node ids to + let mut best_entry: Option> = None; + for node_id in node_ids { + if !VALID_CRYPTO_KINDS.contains(&node_id.kind) { + log_rtab!(error "can't look up node id with invalid crypto kind"); + return None; } - Some(nr) => { - // Update the entry - let bucket = &mut self.buckets[idx]; - let entry = bucket.entry(&node_id).unwrap(); - entry.with_mut(self, update_func); + let bucket_index = self.unlocked_inner.calculate_bucket_index(node_id); + let bucket = self.get_bucket(bucket_index); + if let Some(entry) = bucket.entry(&node_id.value) { + // Best entry is the first one in sorted order that exists from the node id list + // Everything else that matches will be overwritten in the bucket and the + // existing noderefs will eventually unref and drop the old unindexed bucketentry + // We do this instead of merging for now. We could 'kill' entries and have node_refs + // rewrite themselves to point to the merged entry upon dereference. The use case for this + // may not be worth the effort. + best_entry = Some(entry); + break; + }; + } - nr - } - }; + // If the entry does exist already, update it + if let Some(best_entry) = best_entry { + // Update the entry with all of the node ids + self.update_bucket_entries(best_entry.clone(), node_ids); - Some(noderef) + // Make a noderef to return + let nr = NodeRef::new(outer_self.clone(), best_entry.clone(), None); + + // Update the entry with the update func + best_entry.with_mut_inner(|e| update_func(self, e)); + + // Return the noderef + return Some(nr); + } + + // If no entry exists yet, add the first entry to a bucket, possibly evicting a bucket member + let first_node_id = node_ids[0]; + let bucket_entry = self.unlocked_inner.calculate_bucket_index(&first_node_id); + let bucket = self.get_bucket_mut(bucket_entry); + let new_entry = bucket.add_new_entry(first_node_id.value); + self.all_entries.insert(new_entry.clone()); + self.unlocked_inner.kick_queue.lock().insert(bucket_entry); + + // Update the other bucket entries with the remaining node ids + self.update_bucket_entries(new_entry.clone(), node_ids); + + // Make node ref to return + let nr = NodeRef::new(outer_self.clone(), new_entry.clone(), None); + + // Update the entry with the update func + new_entry.with_mut_inner(|e| update_func(self, e)); + + // Kick the bucket + log_rtab!(debug "Routing table now has {} nodes, {} live", self.bucket_entry_count(), self.get_entry_count(RoutingDomainSet::all(), BucketEntryState::Unreliable, &VALID_CRYPTO_KINDS)); + + Some(nr) + } + + /// Resolve an existing routing table entry using any crypto kind and return a reference to it + pub fn lookup_any_node_ref( + &self, + outer_self: RoutingTable, + node_id_key: PublicKey, + ) -> Option { + VALID_CRYPTO_KINDS.iter().find_map(|ck| { + self.lookup_node_ref(outer_self.clone(), TypedKey::new(*ck, node_id_key)) + }) } /// Resolve an existing routing table entry and return a reference to it - pub fn lookup_node_ref(&self, outer_self: RoutingTable, node_id: DHTKey) -> Option { - if node_id == self.unlocked_inner.node_id { + pub fn lookup_node_ref(&self, outer_self: RoutingTable, node_id: TypedKey) -> Option { + if self.unlocked_inner.matches_own_node_id(&[node_id]) { log_rtab!(error "can't look up own node id in routing table"); return None; } - let idx = self.find_bucket_index(node_id); - let bucket = &self.buckets[idx]; + if !VALID_CRYPTO_KINDS.contains(&node_id.kind) { + log_rtab!(error "can't look up node id with invalid crypto kind"); + return None; + } + + let bucket_index = self.unlocked_inner.calculate_bucket_index(&node_id); + let bucket = self.get_bucket(bucket_index); bucket - .entry(&node_id) - .map(|e| NodeRef::new(outer_self, node_id, e, None)) + .entry(&node_id.value) + .map(|e| NodeRef::new(outer_self, e, None)) } /// Resolve an existing routing table entry and return a filtered reference to it pub fn lookup_and_filter_noderef( &self, outer_self: RoutingTable, - node_id: DHTKey, + node_id: TypedKey, routing_domain_set: RoutingDomainSet, dial_info_filter: DialInfoFilter, ) -> Option { @@ -628,60 +756,64 @@ impl RoutingTableInner { } /// Resolve an existing routing table entry and call a function on its entry without using a noderef - pub fn with_node_entry(&self, node_id: DHTKey, f: F) -> Option + pub fn with_node_entry(&self, node_id: TypedKey, f: F) -> Option where F: FnOnce(Arc) -> R, { - if node_id == self.unlocked_inner.node_id { + if self.unlocked_inner.matches_own_node_id(&[node_id]) { log_rtab!(error "can't look up own node id in routing table"); return None; } - let idx = self.find_bucket_index(node_id); - let bucket = &self.buckets[idx]; - if let Some(e) = bucket.entry(&node_id) { - return Some(f(e)); + if !VALID_CRYPTO_KINDS.contains(&node_id.kind) { + log_rtab!(error "can't look up node id with invalid crypto kind"); + return None; } - None + let bucket_entry = self.unlocked_inner.calculate_bucket_index(&node_id); + let bucket = self.get_bucket(bucket_entry); + bucket.entry(&node_id.value).map(f) } /// Shortcut function to add a node to our routing table if it doesn't exist /// and add the dial info we have for it. Returns a noderef filtered to /// the routing domain in which this node was registered for convenience. - pub fn register_node_with_signed_node_info( + pub fn register_node_with_peer_info( &mut self, outer_self: RoutingTable, routing_domain: RoutingDomain, - node_id: DHTKey, - signed_node_info: SignedNodeInfo, + peer_info: PeerInfo, allow_invalid: bool, ) -> Option { - // validate signed node info is not something malicious - if node_id == self.node_id() { + // if our own node if is in the list then ignore it, as we don't add ourselves to our own routing table + if self.unlocked_inner.matches_own_node_id(&peer_info.node_ids) { log_rtab!(debug "can't register own node id in routing table"); return None; } - if let Some(relay_id) = signed_node_info.relay_id() { - if relay_id.key == node_id { - log_rtab!(debug "node can not be its own relay"); - return None; - } + + // node can not be its own relay + let rids = peer_info.signed_node_info.relay_ids(); + if self.unlocked_inner.matches_own_node_id(&rids) { + log_rtab!(debug "node can not be its own relay"); + return None; } + if !allow_invalid { // verify signature - if !signed_node_info.has_valid_signature() { - log_rtab!(debug "signed node info for {} has invalid signature", node_id); + if !peer_info.signed_node_info.has_any_signature() { + log_rtab!(debug "signed node info for {:?} has invalid signature", &peer_info.node_ids); return None; } // verify signed node info is valid in this routing domain - if !self.signed_node_info_is_valid_in_routing_domain(routing_domain, &signed_node_info) - { - log_rtab!(debug "signed node info for {} not valid in the {:?} routing domain", node_id, routing_domain); + if !self.signed_node_info_is_valid_in_routing_domain( + routing_domain, + &peer_info.signed_node_info, + ) { + log_rtab!(debug "signed node info for {:?} not valid in the {:?} routing domain", peer_info.node_ids, routing_domain); return None; } } - self.create_node_ref(outer_self, node_id, |_rti, e| { - e.update_signed_node_info(routing_domain, signed_node_info); + self.create_node_ref(outer_self, &peer_info.node_ids, |_rti, e| { + e.update_signed_node_info(routing_domain, peer_info.signed_node_info); }) .map(|mut nr| { nr.set_filter(Some( @@ -696,11 +828,11 @@ impl RoutingTableInner { pub fn register_node_with_existing_connection( &mut self, outer_self: RoutingTable, - node_id: DHTKey, + node_id: TypedKey, descriptor: ConnectionDescriptor, timestamp: Timestamp, ) -> Option { - let out = self.create_node_ref(outer_self, node_id, |_rti, e| { + let out = self.create_node_ref(outer_self, &[node_id], |_rti, e| { // this node is live because it literally just connected to us e.touch_last_seen(timestamp); }); @@ -721,18 +853,16 @@ impl RoutingTableInner { let mut dead_entry_count: usize = 0; let cur_ts = get_aligned_timestamp(); - for bucket in &self.buckets { - for (_, v) in bucket.entries() { - match v.with(self, |_rti, e| e.state(cur_ts)) { - BucketEntryState::Reliable => { - reliable_entry_count += 1; - } - BucketEntryState::Unreliable => { - unreliable_entry_count += 1; - } - BucketEntryState::Dead => { - dead_entry_count += 1; - } + for entry in self.all_entries.iter() { + match entry.with_inner(|e| e.state(cur_ts)) { + BucketEntryState::Reliable => { + reliable_entry_count += 1; + } + BucketEntryState::Unreliable => { + unreliable_entry_count += 1; + } + BucketEntryState::Dead => { + dead_entry_count += 1; } } } @@ -757,9 +887,11 @@ impl RoutingTableInner { } } - pub fn touch_recent_peer(&mut self, node_id: DHTKey, last_connection: ConnectionDescriptor) { + pub fn touch_recent_peer(&mut self, node_id: TypedKey, last_connection: ConnectionDescriptor) { self.recent_peers - .insert(node_id, RecentPeersEntry { last_connection }); + .insert(node_id, RecentPeersEntry { last_connection }, |_k, _v| { + // do nothing on lru eviction + }); } ////////////////////////////////////////////////////////////////////// @@ -772,10 +904,10 @@ impl RoutingTableInner { node_count: usize, mut filters: VecDeque, ) -> Vec { - let public_node_filter = Box::new( - |rti: &RoutingTableInner, _k: DHTKey, v: Option>| { + let public_node_filter = + Box::new(|_rti: &RoutingTableInner, v: Option>| { let entry = v.unwrap(); - entry.with(rti, |_rti, e| { + entry.with_inner(|e| { // skip nodes on local network if e.node_info(RoutingDomain::LocalNetwork).is_some() { return false; @@ -786,15 +918,14 @@ impl RoutingTableInner { } true }) - }, - ) as RoutingTableEntryFilter; + }) as RoutingTableEntryFilter; filters.push_front(public_node_filter); self.find_fastest_nodes( node_count, filters, - |_rti: &RoutingTableInner, k: DHTKey, v: Option>| { - NodeRef::new(outer_self.clone(), k, v.unwrap().clone(), None) + |_rti: &RoutingTableInner, v: Option>| { + NodeRef::new(outer_self.clone(), v.unwrap().clone(), None) }, ) } @@ -803,13 +934,13 @@ impl RoutingTableInner { &self, routing_domain: RoutingDomain, has_valid_own_node_info: bool, - v: Option>, + entry: Option>, ) -> bool { - match v { + match entry { None => has_valid_own_node_info, - Some(entry) => entry.with(self, |_rti, e| { + Some(entry) => entry.with_inner(|e| { e.signed_node_info(routing_domain.into()) - .map(|sni| sni.has_valid_signature()) + .map(|sni| sni.has_any_signature()) .unwrap_or(false) }), } @@ -819,12 +950,11 @@ impl RoutingTableInner { &self, routing_domain: RoutingDomain, own_peer_info: &PeerInfo, - k: DHTKey, - v: Option>, + entry: Option>, ) -> PeerInfo { - match v { + match entry { None => own_peer_info.clone(), - Some(entry) => entry.with(self, |_rti, e| e.make_peer_info(k, routing_domain).unwrap()), + Some(entry) => entry.with_inner(|e| e.make_peer_info(routing_domain).unwrap()), } } @@ -839,36 +969,40 @@ impl RoutingTableInner { where C: for<'a, 'b> FnMut( &'a RoutingTableInner, - &'b (DHTKey, Option>), - &'b (DHTKey, Option>), + &'b Option>, + &'b Option>, ) -> core::cmp::Ordering, - T: for<'r> FnMut(&'r RoutingTableInner, DHTKey, Option>) -> O, + T: for<'r, 't> FnMut(&'r RoutingTableInner, Option>) -> O, { // collect all the nodes for sorting let mut nodes = - Vec::<(DHTKey, Option>)>::with_capacity(self.bucket_entry_count + 1); + Vec::>>::with_capacity(self.bucket_entry_count() + 1); // add our own node (only one of there with the None entry) let mut filtered = false; for filter in &mut filters { - if !filter(self, self.unlocked_inner.node_id, None) { + if !filter(self, None) { filtered = true; break; } } if !filtered { - nodes.push((self.unlocked_inner.node_id, None)); + nodes.push(None); } - // add all nodes from buckets - self.with_entries(cur_ts, BucketEntryState::Unreliable, |rti, k, v| { + // add all nodes that match filter + self.with_entries(cur_ts, BucketEntryState::Unreliable, |rti, v| { // Apply filter + let mut filtered = false; for filter in &mut filters { - if filter(rti, k, Some(v.clone())) { - nodes.push((k, Some(v.clone()))); + if !filter(rti, Some(v.clone())) { + filtered = true; break; } } + if !filtered { + nodes.push(Some(v.clone())); + } Option::<()>::None }); @@ -879,7 +1013,7 @@ impl RoutingTableInner { let cnt = usize::min(node_count, nodes.len()); let mut out = Vec::::with_capacity(cnt); for node in nodes { - let val = transform(self, node.0, node.1); + let val = transform(self, node); out.push(val); } @@ -893,16 +1027,16 @@ impl RoutingTableInner { transform: T, ) -> Vec where - T: for<'r> FnMut(&'r RoutingTableInner, DHTKey, Option>) -> O, + T: for<'r> FnMut(&'r RoutingTableInner, Option>) -> O, { let cur_ts = get_aligned_timestamp(); // Add filter to remove dead nodes always let filter_dead = Box::new( - move |rti: &RoutingTableInner, _k: DHTKey, v: Option>| { + move |_rti: &RoutingTableInner, v: Option>| { if let Some(entry) = &v { // always filter out dead nodes - if entry.with(rti, |_rti, e| e.state(cur_ts) == BucketEntryState::Dead) { + if entry.with_inner(|e| e.state(cur_ts) == BucketEntryState::Dead) { false } else { true @@ -916,13 +1050,20 @@ impl RoutingTableInner { filters.push_front(filter_dead); // Fastest sort - let sort = |rti: &RoutingTableInner, - (a_key, a_entry): &(DHTKey, Option>), - (b_key, b_entry): &(DHTKey, Option>)| { + let sort = |_rti: &RoutingTableInner, + a_entry: &Option>, + b_entry: &Option>| { // same nodes are always the same - if a_key == b_key { + if let Some(a_entry) = a_entry { + if let Some(b_entry) = b_entry { + if Arc::ptr_eq(&a_entry, &b_entry) { + return core::cmp::Ordering::Equal; + } + } + } else if b_entry.is_none() { return core::cmp::Ordering::Equal; } + // our own node always comes last (should not happen, here for completeness) if a_entry.is_none() { return core::cmp::Ordering::Greater; @@ -933,8 +1074,8 @@ impl RoutingTableInner { // reliable nodes come first let ae = a_entry.as_ref().unwrap(); let be = b_entry.as_ref().unwrap(); - ae.with(rti, |rti, ae| { - be.with(rti, |_rti, be| { + ae.with_inner(|ae| { + be.with_inner(|be| { let ra = ae.check_reliable(cur_ts); let rb = be.check_reliable(cur_ts); if ra != rb { @@ -973,36 +1114,56 @@ impl RoutingTableInner { pub fn find_closest_nodes( &self, - node_id: DHTKey, - filters: VecDeque, + node_count: usize, + node_id: TypedKey, + mut filters: VecDeque, transform: T, ) -> Vec where - T: for<'r> FnMut(&'r RoutingTableInner, DHTKey, Option>) -> O, + T: for<'r> FnMut(&'r RoutingTableInner, Option>) -> O, { let cur_ts = get_aligned_timestamp(); - let node_count = { - let config = self.config(); - let c = config.get(); - c.network.dht.max_find_node_count as usize - }; - // closest sort - let sort = |rti: &RoutingTableInner, - (a_key, a_entry): &(DHTKey, Option>), - (b_key, b_entry): &(DHTKey, Option>)| { + // Get the crypto kind + let crypto_kind = node_id.kind; + let vcrypto = self.unlocked_inner.crypto().get(crypto_kind).unwrap(); + + // Filter to ensure entries support the crypto kind in use + + let filter = Box::new( + move |_rti: &RoutingTableInner, opt_entry: Option>| { + if let Some(entry) = opt_entry { + entry.with_inner(|e| e.crypto_kinds().contains(&crypto_kind)) + } else { + VALID_CRYPTO_KINDS.contains(&crypto_kind) + } + }, + ) as RoutingTableEntryFilter; + filters.push_front(filter); + + // Closest sort + // Distance is done using the node id's distance metric which may vary based on crypto system + let sort = |_rti: &RoutingTableInner, + a_entry: &Option>, + b_entry: &Option>| { // same nodes are always the same - if a_key == b_key { + if let Some(a_entry) = a_entry { + if let Some(b_entry) = b_entry { + if Arc::ptr_eq(&a_entry, &b_entry) { + return core::cmp::Ordering::Equal; + } + } + } else if b_entry.is_none() { return core::cmp::Ordering::Equal; } // reliable nodes come first, pessimistically treating our own node as unreliable let ra = a_entry .as_ref() - .map_or(false, |x| x.with(rti, |_rti, x| x.check_reliable(cur_ts))); + .map_or(false, |x| x.with_inner(|x| x.check_reliable(cur_ts))); let rb = b_entry .as_ref() - .map_or(false, |x| x.with(rti, |_rti, x| x.check_reliable(cur_ts))); + .map_or(false, |x| x.with_inner(|x| x.check_reliable(cur_ts))); if ra != rb { if ra { return core::cmp::Ordering::Less; @@ -1011,9 +1172,24 @@ impl RoutingTableInner { } } + // get keys + let a_key = if let Some(a_entry) = a_entry { + a_entry.with_inner(|e| e.node_ids().get(crypto_kind).unwrap()) + } else { + self.unlocked_inner.node_id(crypto_kind) + }; + let b_key = if let Some(b_entry) = b_entry { + b_entry.with_inner(|e| e.node_ids().get(crypto_kind).unwrap()) + } else { + self.unlocked_inner.node_id(crypto_kind) + }; + // distance is the next metric, closer nodes first - let da = distance(a_key, &node_id); - let db = distance(b_key, &node_id); + // since multiple cryptosystems are in use, the distance for a key is the shortest + // distance to that key over all supported cryptosystems + + let da = vcrypto.distance(&a_key.value, &node_id.value); + let db = vcrypto.distance(&b_key.value, &node_id.value); da.cmp(&db) }; diff --git a/veilid-core/src/routing_table/tasks/bootstrap.rs b/veilid-core/src/routing_table/tasks/bootstrap.rs index 6b16f84a..f467c2ba 100644 --- a/veilid-core/src/routing_table/tasks/bootstrap.rs +++ b/veilid-core/src/routing_table/tasks/bootstrap.rs @@ -3,23 +3,123 @@ use super::*; use futures_util::stream::{FuturesUnordered, StreamExt}; use stop_token::future::FutureExt as StopFutureExt; -pub const BOOTSTRAP_TXT_VERSION: u8 = 0; +pub const BOOTSTRAP_TXT_VERSION_0: u8 = 0; #[derive(Clone, Debug)] pub struct BootstrapRecord { - min_version: u8, - max_version: u8, + node_ids: TypedKeySet, + envelope_support: Vec, dial_info_details: Vec, } -pub type BootstrapRecordMap = BTreeMap; +impl BootstrapRecord { + pub fn merge(&mut self, other: BootstrapRecord) { + self.node_ids.add_all(&other.node_ids); + for x in other.envelope_support { + if !self.envelope_support.contains(&x) { + self.envelope_support.push(x); + self.envelope_support.sort(); + } + } + for did in other.dial_info_details { + if !self.dial_info_details.contains(&did) { + self.dial_info_details.push(did); + } + } + } +} impl RoutingTable { + /// Process bootstrap version 0 + async fn process_bootstrap_records_v0( + &self, + records: Vec, + ) -> EyreResult> { + // Bootstrap TXT Record Format Version 0: + // txt_version|envelope_support|node_ids|hostname|dialinfoshort* + // + // Split bootstrap node record by '|' and then lists by ','. Example: + // 0|0|VLD0:7lxDEabK_qgjbe38RtBa3IZLrud84P6NhGP-pRTZzdQ|bootstrap-1.dev.veilid.net|T5150,U5150,W5150/ws + + if records.len() != 5 { + bail!("invalid number of fields in bootstrap v0 txt record"); + } + + // Envelope support + let mut envelope_support = Vec::new(); + for ess in records[1].split(",") { + let ess = ess.trim(); + let es = match ess.parse::() { + Ok(v) => v, + Err(e) => { + bail!( + "invalid envelope version specified in bootstrap node txt record: {}", + e + ); + } + }; + envelope_support.push(es); + } + envelope_support.dedup(); + envelope_support.sort(); + + // Node Id + let mut node_ids = TypedKeySet::new(); + for node_id_str in records[2].split(",") { + let node_id_str = node_id_str.trim(); + let node_id = match TypedKey::from_str(&node_id_str) { + Ok(v) => v, + Err(e) => { + bail!( + "Invalid node id in bootstrap node record {}: {}", + node_id_str, + e + ); + } + }; + node_ids.add(node_id); + } + + // If this is our own node id, then we skip it for bootstrap, in case we are a bootstrap node + if self.unlocked_inner.matches_own_node_id(&node_ids) { + return Ok(None); + } + + // Hostname + let hostname_str = records[3].trim(); + + // Resolve each record and store in node dial infos list + let mut dial_info_details = Vec::new(); + for rec in records[4].split(",") { + let rec = rec.trim(); + let dial_infos = match DialInfo::try_vec_from_short(rec, hostname_str) { + Ok(dis) => dis, + Err(e) => { + warn!("Couldn't resolve bootstrap node dial info {}: {}", rec, e); + continue; + } + }; + + for di in dial_infos { + dial_info_details.push(DialInfoDetail { + dial_info: di, + class: DialInfoClass::Direct, + }); + } + } + + Ok(Some(BootstrapRecord { + node_ids, + envelope_support, + dial_info_details, + })) + } + // Bootstrap lookup process #[instrument(level = "trace", skip(self), ret, err)] pub(crate) async fn resolve_bootstrap( &self, bootstrap: Vec, - ) -> EyreResult { + ) -> EyreResult> { // Resolve from bootstrap root to bootstrap hostnames let mut bsnames = Vec::::new(); for bh in bootstrap { @@ -58,22 +158,14 @@ impl RoutingTable { Ok(v) => v, }; // for each record resolve into key/bootstraprecord pairs - let mut bootstrap_records: Vec<(DHTKey, BootstrapRecord)> = Vec::new(); + let mut bootstrap_records: Vec = Vec::new(); for bsnirecord in bsnirecords { - // Bootstrap TXT Record Format Version 0: - // txt_version,min_version,max_version,nodeid,hostname,dialinfoshort* - // - // Split bootstrap node record by commas. Example: - // 0,0,0,7lxDEabK_qgjbe38RtBa3IZLrud84P6NhGP-pRTZzdQ,bootstrap-1.dev.veilid.net,T5150,U5150,W5150/ws + // All formats split on '|' character let records: Vec = bsnirecord .trim() - .split(',') + .split('|') .map(|x| x.trim().to_owned()) .collect(); - if records.len() < 6 { - warn!("invalid number of fields in bootstrap txt record"); - continue; - } // Bootstrap TXT record version let txt_version: u8 = match records[0].parse::() { @@ -86,81 +178,30 @@ impl RoutingTable { continue; } }; - if txt_version != BOOTSTRAP_TXT_VERSION { - warn!("unsupported bootstrap txt record version"); - continue; - } - - // Min/Max wire protocol version - let min_version: u8 = match records[1].parse::() { - Ok(v) => v, - Err(e) => { - warn!( - "invalid min_version specified in bootstrap node txt record: {}", - e - ); - continue; - } - }; - let max_version: u8 = match records[2].parse::() { - Ok(v) => v, - Err(e) => { - warn!( - "invalid max_version specified in bootstrap node txt record: {}", - e - ); - continue; - } - }; - - // Node Id - let node_id_str = &records[3]; - let node_id_key = match DHTKey::try_decode(node_id_str) { - Ok(v) => v, - Err(e) => { - warn!( - "Invalid node id in bootstrap node record {}: {}", - node_id_str, e - ); - continue; - } - }; - - // Hostname - let hostname_str = &records[4]; - - // If this is our own node id, then we skip it for bootstrap, in case we are a bootstrap node - if self.node_id() == node_id_key { - continue; - } - - // Resolve each record and store in node dial infos list - let mut bootstrap_record = BootstrapRecord { - min_version, - max_version, - dial_info_details: Vec::new(), - }; - for rec in &records[5..] { - let rec = rec.trim(); - let dial_infos = match DialInfo::try_vec_from_short(rec, hostname_str) { - Ok(dis) => dis, - Err(e) => { - warn!( - "Couldn't resolve bootstrap node dial info {}: {}", - rec, e - ); - continue; + let bootstrap_record = match txt_version { + BOOTSTRAP_TXT_VERSION_0 => { + match self.process_bootstrap_records_v0(records).await { + Err(e) => { + warn!( + "couldn't process v0 bootstrap records from {}: {}", + bsname, e + ); + continue; + } + Ok(Some(v)) => v, + Ok(None) => { + // skipping + continue; + } } - }; - - for di in dial_infos { - bootstrap_record.dial_info_details.push(DialInfoDetail { - dial_info: di, - class: DialInfoClass::Direct, - }); } - } - bootstrap_records.push((node_id_key, bootstrap_record)); + _ => { + warn!("unsupported bootstrap txt record version"); + continue; + } + }; + + bootstrap_records.push(bootstrap_record); } Some(bootstrap_records) } @@ -168,21 +209,35 @@ impl RoutingTable { ); } - let mut bsmap = BootstrapRecordMap::new(); + let mut merged_bootstrap_records: Vec = Vec::new(); while let Some(bootstrap_records) = unord.next().await { - if let Some(bootstrap_records) = bootstrap_records { - for (bskey, mut bsrec) in bootstrap_records { - let rec = bsmap.entry(bskey).or_insert_with(|| BootstrapRecord { - min_version: bsrec.min_version, - max_version: bsrec.max_version, - dial_info_details: Vec::new(), - }); - rec.dial_info_details.append(&mut bsrec.dial_info_details); + let Some(bootstrap_records) = bootstrap_records else { + continue; + }; + for mut bsrec in bootstrap_records { + let mut mbi = 0; + while mbi < merged_bootstrap_records.len() { + let mbr = &mut merged_bootstrap_records[mbi]; + if mbr.node_ids.contains_any(&bsrec.node_ids) { + // Merge record, pop this one out + let mbr = merged_bootstrap_records.remove(mbi); + bsrec.merge(mbr); + } else { + // No overlap, go to next record + mbi += 1; + } } + // Append merged record + merged_bootstrap_records.push(bsrec); } } - Ok(bsmap) + // ensure dial infos are sorted + for mbr in &mut merged_bootstrap_records { + mbr.dial_info_details.sort(); + } + + Ok(merged_bootstrap_records) } // 'direct' bootstrap task routine for systems incapable of resolving TXT records, such as browser WASM @@ -203,21 +258,20 @@ impl RoutingTable { // Got peer info, let's add it to the routing table for pi in peer_info { - let k = pi.node_id.key; // Register the node - if let Some(nr) = self.register_node_with_signed_node_info( - RoutingDomain::PublicInternet, - k, - pi.signed_node_info, - false, - ) { + if let Some(nr) = + self.register_node_with_peer_info(RoutingDomain::PublicInternet, pi, false) + { // Add this our futures to process in parallel - let routing_table = self.clone(); - unord.push( - // lets ask bootstrap to find ourselves now - async move { routing_table.reverse_find_node(nr, true).await } - .instrument(Span::current()), - ); + for crypto_kind in VALID_CRYPTO_KINDS { + let routing_table = self.clone(); + let nr = nr.clone(); + unord.push( + // lets ask bootstrap to find ourselves now + async move { routing_table.reverse_find_node(crypto_kind, nr, true).await } + .instrument(Span::current()), + ); + } } } } @@ -230,112 +284,100 @@ impl RoutingTable { #[instrument(level = "trace", skip(self), err)] pub(crate) async fn bootstrap_task_routine(self, stop_token: StopToken) -> EyreResult<()> { - let (bootstrap, bootstrap_nodes) = self.with_config(|c| { - ( - c.network.bootstrap.clone(), - c.network.bootstrap_nodes.clone(), - ) - }); + let bootstrap = self + .unlocked_inner + .with_config(|c| c.network.routing_table.bootstrap.clone()); + + // Don't bother if bootstraps aren't configured + if bootstrap.is_empty() { + return Ok(()); + } log_rtab!(debug "--- bootstrap_task"); + // Get counts by crypto kind + let entry_count = self.inner.read().cached_entry_counts(); + // See if we are specifying a direct dialinfo for bootstrap, if so use the direct mechanism - if !bootstrap.is_empty() && bootstrap_nodes.is_empty() { - let mut bootstrap_dialinfos = Vec::::new(); - for b in &bootstrap { - if let Ok(bootstrap_di_vec) = DialInfo::try_vec_from_url(&b) { - for bootstrap_di in bootstrap_di_vec { - bootstrap_dialinfos.push(bootstrap_di); - } + let mut bootstrap_dialinfos = Vec::::new(); + for b in &bootstrap { + if let Ok(bootstrap_di_vec) = DialInfo::try_vec_from_url(&b) { + for bootstrap_di in bootstrap_di_vec { + bootstrap_dialinfos.push(bootstrap_di); } } - if bootstrap_dialinfos.len() > 0 { - return self - .direct_bootstrap_task_routine(stop_token, bootstrap_dialinfos) - .await; - } + } + if bootstrap_dialinfos.len() > 0 { + return self + .direct_bootstrap_task_routine(stop_token, bootstrap_dialinfos) + .await; } - // If we aren't specifying a bootstrap node list explicitly, then pull from the bootstrap server(s) - let bsmap: BootstrapRecordMap = if !bootstrap_nodes.is_empty() { - let mut bsmap = BootstrapRecordMap::new(); - let mut bootstrap_node_dial_infos = Vec::new(); - for b in bootstrap_nodes { - let (id_str, di_str) = b - .split_once('@') - .ok_or_else(|| eyre!("Invalid node dial info in bootstrap entry"))?; - let node_id = - NodeId::from_str(id_str).wrap_err("Invalid node id in bootstrap entry")?; - let dial_info = - DialInfo::from_str(di_str).wrap_err("Invalid dial info in bootstrap entry")?; - bootstrap_node_dial_infos.push((node_id, dial_info)); - } - for (node_id, dial_info) in bootstrap_node_dial_infos { - bsmap - .entry(node_id.key) - .or_insert_with(|| BootstrapRecord { - min_version: MIN_CRYPTO_VERSION, - max_version: MAX_CRYPTO_VERSION, - dial_info_details: Vec::new(), - }) - .dial_info_details - .push(DialInfoDetail { - dial_info, - class: DialInfoClass::Direct, // Bootstraps are always directly reachable - }); - } - bsmap - } else { - // Resolve bootstrap servers and recurse their TXT entries - self.resolve_bootstrap(bootstrap).await? - }; - - // Map all bootstrap entries to a single key with multiple dialinfo + // If not direct, resolve bootstrap servers and recurse their TXT entries + let bsrecs = self.resolve_bootstrap(bootstrap).await?; // Run all bootstrap operations concurrently let mut unord = FuturesUnordered::new(); - for (k, mut v) in bsmap { - // Sort dial info so we get the preferred order correct - v.dial_info_details.sort(); + for bsrec in bsrecs { + log_rtab!( + "--- bootstrapping {} with {:?}", + &bsrec.node_ids, + &bsrec.dial_info_details + ); - log_rtab!("--- bootstrapping {} with {:?}", k.encode(), &v); + // Get crypto support from list of node ids + let crypto_support = bsrec.node_ids.kinds(); - // Make invalid signed node info (no signature) - if let Some(nr) = self.register_node_with_signed_node_info( - RoutingDomain::PublicInternet, - k, - SignedNodeInfo::Direct(SignedDirectNodeInfo::with_no_signature(NodeInfo { - network_class: NetworkClass::InboundCapable, // Bootstraps are always inbound capable - outbound_protocols: ProtocolTypeSet::only(ProtocolType::UDP), // Bootstraps do not participate in relaying and will not make outbound requests, but will have UDP enabled - address_types: AddressTypeSet::all(), // Bootstraps are always IPV4 and IPV6 capable - min_version: v.min_version, // Minimum crypto version specified in txt record - max_version: v.max_version, // Maximum crypto version specified in txt record - dial_info_detail_list: v.dial_info_details, // Dial info is as specified in the bootstrap list - })), - true, - ) { + // Make unsigned SignedNodeInfo + let sni = SignedNodeInfo::Direct(SignedDirectNodeInfo::with_no_signature(NodeInfo { + network_class: NetworkClass::InboundCapable, // Bootstraps are always inbound capable + outbound_protocols: ProtocolTypeSet::only(ProtocolType::UDP), // Bootstraps do not participate in relaying and will not make outbound requests, but will have UDP enabled + address_types: AddressTypeSet::all(), // Bootstraps are always IPV4 and IPV6 capable + envelope_support: bsrec.envelope_support, // Envelope support is as specified in the bootstrap list + crypto_support, // Crypto support is derived from list of node ids + dial_info_detail_list: bsrec.dial_info_details, // Dial info is as specified in the bootstrap list + })); + + let pi = PeerInfo::new(bsrec.node_ids, sni); + + if let Some(nr) = + self.register_node_with_peer_info(RoutingDomain::PublicInternet, pi, true) + { // Add this our futures to process in parallel - let routing_table = self.clone(); - unord.push( - async move { - // Need VALID signed peer info, so ask bootstrap to find_node of itself - // which will ensure it has the bootstrap's signed peer info as part of the response - let _ = routing_table.find_target(nr.clone()).await; - - // Ensure we got the signed peer info - if !nr.signed_node_info_has_valid_signature(RoutingDomain::PublicInternet) { - log_rtab!(warn - "bootstrap at {:?} did not return valid signed node info", - nr - ); - // If this node info is invalid, it will time out after being unpingable - } else { - // otherwise this bootstrap is valid, lets ask it to find ourselves now - routing_table.reverse_find_node(nr, true).await - } + for crypto_kind in VALID_CRYPTO_KINDS { + // Do we need to bootstrap this crypto kind? + let eckey = (RoutingDomain::PublicInternet, crypto_kind); + let cnt = entry_count.get(&eckey).copied().unwrap_or_default(); + if cnt != 0 { + continue; } - .instrument(Span::current()), - ); + + // Bootstrap this crypto kind + let nr = nr.clone(); + let routing_table = self.clone(); + unord.push( + async move { + // Need VALID signed peer info, so ask bootstrap to find_node of itself + // which will ensure it has the bootstrap's signed peer info as part of the response + let _ = routing_table.find_target(crypto_kind, nr.clone()).await; + + // Ensure we got the signed peer info + if !nr + .signed_node_info_has_valid_signature(RoutingDomain::PublicInternet) + { + log_rtab!(warn + "bootstrap at {:?} did not return valid signed node info", + nr + ); + // If this node info is invalid, it will time out after being unpingable + } else { + // otherwise this bootstrap is valid, lets ask it to find ourselves now + routing_table.reverse_find_node(crypto_kind, nr, true).await + } + } + .instrument(Span::current()), + ); + } } } diff --git a/veilid-core/src/routing_table/tasks/kick_buckets.rs b/veilid-core/src/routing_table/tasks/kick_buckets.rs index 318f1915..186e9ebb 100644 --- a/veilid-core/src/routing_table/tasks/kick_buckets.rs +++ b/veilid-core/src/routing_table/tasks/kick_buckets.rs @@ -10,12 +10,13 @@ impl RoutingTable { _last_ts: Timestamp, cur_ts: Timestamp, ) -> EyreResult<()> { - let kick_queue: Vec = core::mem::take(&mut *self.unlocked_inner.kick_queue.lock()) - .into_iter() - .collect(); + let kick_queue: Vec = + core::mem::take(&mut *self.unlocked_inner.kick_queue.lock()) + .into_iter() + .collect(); let mut inner = self.inner.write(); - for idx in kick_queue { - inner.kick_bucket(idx) + for bucket_index in kick_queue { + inner.kick_bucket(bucket_index) } Ok(()) } diff --git a/veilid-core/src/routing_table/tasks/mod.rs b/veilid-core/src/routing_table/tasks/mod.rs index 1cc3e317..b7841b78 100644 --- a/veilid-core/src/routing_table/tasks/mod.rs +++ b/veilid-core/src/routing_table/tasks/mod.rs @@ -134,21 +134,30 @@ impl RoutingTable { self.unlocked_inner.kick_buckets_task.tick().await?; } - // See how many live PublicInternet entries we have - let live_public_internet_entry_count = self.get_entry_count( - RoutingDomain::PublicInternet.into(), - BucketEntryState::Unreliable, - ); + // Refresh entry counts + let entry_counts = { + let mut inner = self.inner.write(); + inner.refresh_cached_entry_counts() + }; + let min_peer_count = self.with_config(|c| c.network.dht.min_peer_count as usize); - // If none, then add the bootstrap nodes to it - if live_public_internet_entry_count == 0 { + // Figure out which tables need bootstrap or peer minimum refresh + let mut needs_bootstrap = false; + let mut needs_peer_minimum_refresh = false; + for ck in VALID_CRYPTO_KINDS { + let eckey = (RoutingDomain::PublicInternet, ck); + let cnt = entry_counts.get(&eckey).copied().unwrap_or_default(); + if cnt == 0 { + needs_bootstrap = true; + } else if cnt < min_peer_count { + needs_peer_minimum_refresh = true; + } + } + if needs_bootstrap { self.unlocked_inner.bootstrap_task.tick().await?; } - // If we still don't have enough peers, find nodes until we do - else if !self.unlocked_inner.bootstrap_task.is_running() - && live_public_internet_entry_count < min_peer_count - { + if needs_peer_minimum_refresh { self.unlocked_inner.peer_minimum_refresh_task.tick().await?; } diff --git a/veilid-core/src/routing_table/tasks/peer_minimum_refresh.rs b/veilid-core/src/routing_table/tasks/peer_minimum_refresh.rs index 157e6030..97cc9c01 100644 --- a/veilid-core/src/routing_table/tasks/peer_minimum_refresh.rs +++ b/veilid-core/src/routing_table/tasks/peer_minimum_refresh.rs @@ -16,26 +16,60 @@ impl RoutingTable { self, stop_token: StopToken, ) -> EyreResult<()> { + // Get counts by crypto kind + let entry_count = self.inner.read().cached_entry_counts(); + let min_peer_count = self.with_config(|c| c.network.dht.min_peer_count as usize); // For the PublicInternet routing domain, get list of all peers we know about // even the unreliable ones, and ask them to find nodes close to our node too - let routing_table = self.clone(); - let noderefs = routing_table.find_fastest_nodes( - min_peer_count, - VecDeque::new(), - |_rti, k: DHTKey, v: Option>| { - NodeRef::new(routing_table.clone(), k, v.unwrap().clone(), None) - }, - ); let mut ord = FuturesOrdered::new(); - for nr in noderefs { + + for crypto_kind in VALID_CRYPTO_KINDS { + // Do we need to peer minimum refresh this crypto kind? + let eckey = (RoutingDomain::PublicInternet, crypto_kind); + let cnt = entry_count.get(&eckey).copied().unwrap_or_default(); + if cnt == 0 || cnt > min_peer_count { + // If we have enough nodes, skip it + // If we have zero nodes, bootstrap will get it + continue; + } + let routing_table = self.clone(); - ord.push_back( - async move { routing_table.reverse_find_node(nr, false).await } - .instrument(Span::current()), + + let mut filters = VecDeque::new(); + let filter = Box::new( + move |_rti: &RoutingTableInner, opt_entry: Option>| { + // Keep only the entries that contain the crypto kind we're looking for + if let Some(entry) = opt_entry { + entry.with_inner(|e| e.crypto_kinds().contains(&crypto_kind)) + } else { + VALID_CRYPTO_KINDS.contains(&crypto_kind) + } + }, + ) as RoutingTableEntryFilter; + filters.push_front(filter); + + let noderefs = routing_table.find_fastest_nodes( + min_peer_count, + filters, + |_rti, entry: Option>| { + NodeRef::new(routing_table.clone(), entry.unwrap().clone(), None) + }, ); + + for nr in noderefs { + let routing_table = self.clone(); + ord.push_back( + async move { + routing_table + .reverse_find_node(crypto_kind, nr, false) + .await + } + .instrument(Span::current()), + ); + } } // do peer minimum search in order from fastest to slowest diff --git a/veilid-core/src/routing_table/tasks/ping_validator.rs b/veilid-core/src/routing_table/tasks/ping_validator.rs index f94f603c..b539d205 100644 --- a/veilid-core/src/routing_table/tasks/ping_validator.rs +++ b/veilid-core/src/routing_table/tasks/ping_validator.rs @@ -25,7 +25,6 @@ impl RoutingTable { // Get the PublicInternet relay if we are using one let opt_relay_nr = self.relay_node(RoutingDomain::PublicInternet); - let opt_relay_id = opt_relay_nr.map(|nr| nr.node_id()); // Get our publicinternet dial info let dids = self.all_filtered_dial_info_details( @@ -35,38 +34,42 @@ impl RoutingTable { // For all nodes needing pings, figure out how many and over what protocols for nr in node_refs { - // If this is a relay, let's check for NAT keepalives + // If this is our relay, let's check for NAT keepalives let mut did_pings = false; - if Some(nr.node_id()) == opt_relay_id { - // Relay nodes get pinged over all protocols we have inbound dialinfo for - // This is so we can preserve the inbound NAT mappings at our router - for did in &dids { - // Do we need to do this ping? - // Check if we have already pinged over this low-level-protocol/address-type/port combo - // We want to ensure we do the bare minimum required here - let pt = did.dial_info.protocol_type(); - let at = did.dial_info.address_type(); - let needs_ping = if let Some((llpt, port)) = - mapped_port_info.protocol_to_port.get(&(pt, at)) - { - mapped_port_info - .low_level_protocol_ports - .remove(&(*llpt, at, *port)) - } else { - false - }; - if needs_ping { - let rpc = rpc.clone(); - let dif = did.dial_info.make_filter(); - let nr_filtered = - nr.filtered_clone(NodeRefFilter::new().with_dial_info_filter(dif)); - log_net!("--> Keepalive ping to {:?}", nr_filtered); - unord.push( - async move { rpc.rpc_call_status(Destination::direct(nr_filtered)).await } + if let Some(relay_nr) = &opt_relay_nr { + if nr.same_entry(relay_nr) { + // Relay nodes get pinged over all protocols we have inbound dialinfo for + // This is so we can preserve the inbound NAT mappings at our router + for did in &dids { + // Do we need to do this ping? + // Check if we have already pinged over this low-level-protocol/address-type/port combo + // We want to ensure we do the bare minimum required here + let pt = did.dial_info.protocol_type(); + let at = did.dial_info.address_type(); + let needs_ping = if let Some((llpt, port)) = + mapped_port_info.protocol_to_port.get(&(pt, at)) + { + mapped_port_info + .low_level_protocol_ports + .remove(&(*llpt, at, *port)) + } else { + false + }; + if needs_ping { + let rpc = rpc.clone(); + let dif = did.dial_info.make_filter(); + let nr_filtered = + nr.filtered_clone(NodeRefFilter::new().with_dial_info_filter(dif)); + log_net!("--> Keepalive ping to {:?}", nr_filtered); + unord.push( + async move { + rpc.rpc_call_status(Destination::direct(nr_filtered)).await + } .instrument(Span::current()) .boxed(), - ); - did_pings = true; + ); + did_pings = true; + } } } } diff --git a/veilid-core/src/routing_table/tasks/private_route_management.rs b/veilid-core/src/routing_table/tasks/private_route_management.rs index b54b73ae..3f44a529 100644 --- a/veilid-core/src/routing_table/tasks/private_route_management.rs +++ b/veilid-core/src/routing_table/tasks/private_route_management.rs @@ -8,7 +8,7 @@ const BACKGROUND_SAFETY_ROUTE_COUNT: usize = 2; impl RoutingTable { /// Fastest routes sort - fn route_sort_latency_fn(a: &(DHTKey, u64), b: &(DHTKey, u64)) -> cmp::Ordering { + fn route_sort_latency_fn(a: &(RouteId, u64), b: &(RouteId, u64)) -> cmp::Ordering { let mut al = a.1; let mut bl = b.1; // Treat zero latency as uncalculated @@ -35,14 +35,14 @@ impl RoutingTable { /// /// If a route doesn't 'need_testing', then we neither test nor drop it #[instrument(level = "trace", skip(self))] - fn get_allocated_routes_to_test(&self, cur_ts: Timestamp) -> Vec { + fn get_allocated_routes_to_test(&self, cur_ts: Timestamp) -> Vec { let default_route_hop_count = self.with_config(|c| c.network.rpc.default_route_hop_count as usize); let rss = self.route_spec_store(); - let mut must_test_routes = Vec::::new(); - let mut unpublished_routes = Vec::<(DHTKey, u64)>::new(); - let mut expired_routes = Vec::::new(); + let mut must_test_routes = Vec::::new(); + let mut unpublished_routes = Vec::<(RouteId, u64)>::new(); + let mut expired_routes = Vec::::new(); rss.list_allocated_routes(|k, v| { let stats = v.get_stats(); // Ignore nodes that don't need testing @@ -81,7 +81,7 @@ impl RoutingTable { } // Process dead routes - for r in &expired_routes { + for r in expired_routes { log_rtab!(debug "Expired route: {}", r); rss.release_route(r); } @@ -95,7 +95,7 @@ impl RoutingTable { async fn test_route_set( &self, stop_token: StopToken, - routes_needing_testing: Vec, + routes_needing_testing: Vec, ) -> EyreResult<()> { if routes_needing_testing.is_empty() { return Ok(()); @@ -107,43 +107,45 @@ impl RoutingTable { #[derive(Default, Debug)] struct TestRouteContext { failed: bool, - dead_routes: Vec, + dead_routes: Vec, } - let mut unord = FuturesUnordered::new(); let ctx = Arc::new(Mutex::new(TestRouteContext::default())); - for r in routes_needing_testing { - let rss = rss.clone(); - let ctx = ctx.clone(); - unord.push( - async move { - let success = match rss.test_route(&r).await { - Ok(v) => v, - Err(e) => { - log_rtab!(error "Test route failed: {}", e); - ctx.lock().failed = true; + { + let mut unord = FuturesUnordered::new(); + for r in routes_needing_testing { + let rss = rss.clone(); + let ctx = ctx.clone(); + unord.push( + async move { + let success = match rss.test_route(r).await { + Ok(v) => v, + Err(e) => { + log_rtab!(error "Test route failed: {}", e); + ctx.lock().failed = true; + return; + } + }; + if success { + // Route is okay, leave it alone return; } - }; - if success { - // Route is okay, leave it alone - return; + // Route test failed + ctx.lock().dead_routes.push(r); } - // Route test failed - ctx.lock().dead_routes.push(r); - } - .instrument(Span::current()) - .boxed(), - ); + .instrument(Span::current()) + .boxed(), + ); + } + + // Wait for test_route futures to complete in parallel + while let Ok(Some(_)) = unord.next().timeout_at(stop_token.clone()).await {} } - // Wait for test_route futures to complete in parallel - while let Ok(Some(_)) = unord.next().timeout_at(stop_token.clone()).await {} - // Process failed routes - let ctx = &mut *ctx.lock(); - for r in &ctx.dead_routes { - log_rtab!(debug "Dead route failed to test: {}", &r); + let ctx = Arc::try_unwrap(ctx).unwrap().into_inner(); + for r in ctx.dead_routes { + log_rtab!(debug "Dead route failed to test: {}", r); rss.release_route(r); } @@ -176,13 +178,16 @@ impl RoutingTable { .await?; } - // Ensure we have a minimum of N allocated local, unpublished routes with the default number of hops + // Ensure we have a minimum of N allocated local, unpublished routes with the default number of hops and all our supported crypto kinds let default_route_hop_count = self.with_config(|c| c.network.rpc.default_route_hop_count as usize); let mut local_unpublished_route_count = 0usize; let rss = self.route_spec_store(); rss.list_allocated_routes(|_k, v| { - if !v.is_published() && v.hop_count() == default_route_hop_count { + if !v.is_published() + && v.hop_count() == default_route_hop_count + && v.get_route_set_keys().kinds() == VALID_CRYPTO_KINDS + { local_unpublished_route_count += 1; } Option::<()>::None @@ -196,6 +201,7 @@ impl RoutingTable { // Parameters here must be the default safety route spec // These will be used by test_remote_route as well if let Some(k) = rss.allocate_route( + &VALID_CRYPTO_KINDS, Stability::default(), Sequencing::default(), default_route_hop_count, diff --git a/veilid-core/src/routing_table/tasks/relay_management.rs b/veilid-core/src/routing_table/tasks/relay_management.rs index 4e685c02..ca80b81f 100644 --- a/veilid-core/src/routing_table/tasks/relay_management.rs +++ b/veilid-core/src/routing_table/tasks/relay_management.rs @@ -51,10 +51,9 @@ impl RoutingTable { // The outbound relay is the host of the PWA if let Some(outbound_relay_peerinfo) = intf::get_outbound_relay_peer().await { // Register new outbound relay - if let Some(nr) = self.register_node_with_signed_node_info( + if let Some(nr) = self.register_node_with_peer_info( RoutingDomain::PublicInternet, - outbound_relay_peerinfo.node_id.key, - outbound_relay_peerinfo.signed_node_info, + outbound_relay_peerinfo, false, ) { info!("Outbound relay node selected: {}", nr); diff --git a/veilid-core/src/routing_table/tasks/rolling_transfers.rs b/veilid-core/src/routing_table/tasks/rolling_transfers.rs index 436381ec..6ea87bdb 100644 --- a/veilid-core/src/routing_table/tasks/rolling_transfers.rs +++ b/veilid-core/src/routing_table/tasks/rolling_transfers.rs @@ -21,13 +21,9 @@ impl RoutingTable { ); // Roll all bucket entry transfers - let entries: Vec> = inner - .buckets - .iter() - .flat_map(|b| b.entries().map(|(_k, v)| v.clone())) - .collect(); - for v in entries { - v.with_mut(inner, |_rti, e| e.roll_transfers(last_ts, cur_ts)); + let all_entries: Vec> = inner.all_entries.iter().collect(); + for entry in all_entries { + entry.with_mut(inner, |_rti, e| e.roll_transfers(last_ts, cur_ts)); } } diff --git a/veilid-core/src/rpc_processor/coders/dht_key.rs b/veilid-core/src/rpc_processor/coders/key256.rs similarity index 60% rename from veilid-core/src/rpc_processor/coders/dht_key.rs rename to veilid-core/src/rpc_processor/coders/key256.rs index bc1e3f22..41a02cc5 100644 --- a/veilid-core/src/rpc_processor/coders/dht_key.rs +++ b/veilid-core/src/rpc_processor/coders/key256.rs @@ -1,7 +1,7 @@ use super::*; use core::convert::TryInto; -pub fn decode_dht_key(public_key: &veilid_capnp::key256::Reader) -> DHTKey { +pub fn decode_key256(public_key: &veilid_capnp::key256::Reader) -> PublicKey { let u0 = public_key.get_u0().to_be_bytes(); let u1 = public_key.get_u1().to_be_bytes(); let u2 = public_key.get_u2().to_be_bytes(); @@ -13,32 +13,28 @@ pub fn decode_dht_key(public_key: &veilid_capnp::key256::Reader) -> DHTKey { x[16..24].copy_from_slice(&u2); x[24..32].copy_from_slice(&u3); - DHTKey::new(x) + PublicKey::new(x) } -pub fn encode_dht_key( - key: &DHTKey, - builder: &mut veilid_capnp::key256::Builder, -) -> Result<(), RPCError> { +pub fn encode_key256(key: &PublicKey, builder: &mut veilid_capnp::key256::Builder) { builder.set_u0(u64::from_be_bytes( key.bytes[0..8] .try_into() - .map_err(RPCError::map_protocol("slice with incorrect length"))?, + .expect("slice with incorrect length"), )); builder.set_u1(u64::from_be_bytes( key.bytes[8..16] .try_into() - .map_err(RPCError::map_protocol("slice with incorrect length"))?, + .expect("slice with incorrect length"), )); builder.set_u2(u64::from_be_bytes( key.bytes[16..24] .try_into() - .map_err(RPCError::map_protocol("slice with incorrect length"))?, + .expect("slice with incorrect length"), )); builder.set_u3(u64::from_be_bytes( key.bytes[24..32] .try_into() - .map_err(RPCError::map_protocol("slice with incorrect length"))?, + .expect("slice with incorrect length"), )); - Ok(()) } diff --git a/veilid-core/src/rpc_processor/coders/mod.rs b/veilid-core/src/rpc_processor/coders/mod.rs index dafc01f2..8c8ce160 100644 --- a/veilid-core/src/rpc_processor/coders/mod.rs +++ b/veilid-core/src/rpc_processor/coders/mod.rs @@ -1,10 +1,9 @@ mod address; mod address_type_set; -mod dht_key; -mod dht_signature; mod dial_info; mod dial_info_class; mod dial_info_detail; +mod key256; mod network_class; mod node_info; mod node_status; @@ -16,21 +15,22 @@ mod protocol_type_set; mod sender_info; mod sequencing; mod signal_info; +mod signature512; mod signed_direct_node_info; mod signed_node_info; mod signed_relayed_node_info; mod socket_address; mod tunnel; +mod typed_key; +mod typed_signature; mod value_data; -mod value_key; pub use address::*; pub use address_type_set::*; -pub use dht_key::*; -pub use dht_signature::*; pub use dial_info::*; pub use dial_info_class::*; pub use dial_info_detail::*; +pub use key256::*; pub use network_class::*; pub use node_info::*; pub use node_status::*; @@ -42,12 +42,14 @@ pub use protocol_type_set::*; pub use sender_info::*; pub use sequencing::*; pub use signal_info::*; +pub use signature512::*; pub use signed_direct_node_info::*; pub use signed_node_info::*; pub use signed_relayed_node_info::*; pub use socket_address::*; pub use tunnel::*; +pub use typed_key::*; +pub use typed_signature::*; pub use value_data::*; -pub use value_key::*; use super::*; diff --git a/veilid-core/src/rpc_processor/coders/node_info.rs b/veilid-core/src/rpc_processor/coders/node_info.rs index 14ab95b3..6d2d87c6 100644 --- a/veilid-core/src/rpc_processor/coders/node_info.rs +++ b/veilid-core/src/rpc_processor/coders/node_info.rs @@ -12,8 +12,24 @@ pub fn encode_node_info( let mut ats_builder = builder.reborrow().init_address_types(); encode_address_type_set(&node_info.address_types, &mut ats_builder)?; - builder.set_min_version(node_info.min_version); - builder.set_max_version(node_info.max_version); + let mut es_builder = builder + .reborrow() + .init_envelope_support(node_info.envelope_support.len() as u32); + if let Some(s) = es_builder.as_slice() { + s.clone_from_slice(&node_info.envelope_support); + } + + let mut cs_builder = builder + .reborrow() + .init_crypto_support(node_info.crypto_support.len() as u32); + if let Some(s) = cs_builder.as_slice() { + let csvec: Vec = node_info + .crypto_support + .iter() + .map(|x| u32::from_be_bytes(x.0)) + .collect(); + s.clone_from_slice(&csvec); + } let mut didl_builder = builder.reborrow().init_dial_info_detail_list( node_info @@ -55,8 +71,51 @@ pub fn decode_node_info(reader: &veilid_capnp::node_info::Reader) -> Result MAX_ENVELOPE_VERSIONS { + return Err(RPCError::protocol("too many envelope versions")); + } + if envelope_support.len() == 0 { + return Err(RPCError::protocol("no envelope versions")); + } + + let crypto_support: Vec = reader + .reborrow() + .get_crypto_support() + .map_err(RPCError::protocol)? + .as_slice() + .map(|s| s.iter().map(|x| FourCC::from(x.to_be_bytes())).collect()) + .unwrap_or_default(); + + // Ensure crypto kinds are not duplicated + // Unsorted is okay, some nodes may have a different crypto order preference + // But nothing should show up more than once + let mut ckinds = crypto_support.clone(); + ckinds.dedup(); + if ckinds.len() != crypto_support.len() { + return Err(RPCError::protocol("duplicate crypto kinds")); + } + if crypto_support.len() > MAX_CRYPTO_KINDS { + return Err(RPCError::protocol("too many crypto kinds")); + } + if crypto_support.len() == 0 { + return Err(RPCError::protocol("no crypto kinds")); + } let didl_reader = reader .reborrow() @@ -76,8 +135,8 @@ pub fn decode_node_info(reader: &veilid_capnp::node_info::Reader) -> Result Nonce { let u1 = reader.get_u1().to_be_bytes(); let u2 = reader.get_u2().to_be_bytes(); - [ + Nonce::new([ u0[0], u0[1], u0[2], u0[3], u0[4], u0[5], u0[6], u0[7], // u0 u1[0], u1[1], u1[2], u1[3], u1[4], u1[5], u1[6], u1[7], // u1 u2[0], u2[1], u2[2], u2[3], u2[4], u2[5], u2[6], u2[7], // u2 - ] + ]) } diff --git a/veilid-core/src/rpc_processor/coders/operations/answer.rs b/veilid-core/src/rpc_processor/coders/operations/answer.rs index 7dd429a2..d940c736 100644 --- a/veilid-core/src/rpc_processor/coders/operations/answer.rs +++ b/veilid-core/src/rpc_processor/coders/operations/answer.rs @@ -15,9 +15,12 @@ impl RPCAnswer { pub fn desc(&self) -> &'static str { self.detail.desc() } - pub fn decode(reader: &veilid_capnp::answer::Reader) -> Result { + pub fn decode( + reader: &veilid_capnp::answer::Reader, + crypto: Crypto, + ) -> Result { let d_reader = reader.get_detail(); - let detail = RPCAnswerDetail::decode(&d_reader)?; + let detail = RPCAnswerDetail::decode(&d_reader, crypto)?; Ok(RPCAnswer { detail }) } pub fn encode(&self, builder: &mut veilid_capnp::answer::Builder) -> Result<(), RPCError> { @@ -60,6 +63,7 @@ impl RPCAnswerDetail { pub fn decode( reader: &veilid_capnp::answer::detail::Reader, + crypto: Crypto, ) -> Result { let which_reader = reader.which().map_err(RPCError::protocol)?; let out = match which_reader { @@ -70,7 +74,7 @@ impl RPCAnswerDetail { } veilid_capnp::answer::detail::FindNodeA(r) => { let op_reader = r.map_err(RPCError::protocol)?; - let out = RPCOperationFindNodeA::decode(&op_reader)?; + let out = RPCOperationFindNodeA::decode(&op_reader, crypto)?; RPCAnswerDetail::FindNodeA(out) } veilid_capnp::answer::detail::AppCallA(r) => { @@ -80,27 +84,27 @@ impl RPCAnswerDetail { } veilid_capnp::answer::detail::GetValueA(r) => { let op_reader = r.map_err(RPCError::protocol)?; - let out = RPCOperationGetValueA::decode(&op_reader)?; + let out = RPCOperationGetValueA::decode(&op_reader, crypto)?; RPCAnswerDetail::GetValueA(out) } veilid_capnp::answer::detail::SetValueA(r) => { let op_reader = r.map_err(RPCError::protocol)?; - let out = RPCOperationSetValueA::decode(&op_reader)?; + let out = RPCOperationSetValueA::decode(&op_reader, crypto)?; RPCAnswerDetail::SetValueA(out) } veilid_capnp::answer::detail::WatchValueA(r) => { let op_reader = r.map_err(RPCError::protocol)?; - let out = RPCOperationWatchValueA::decode(&op_reader)?; + let out = RPCOperationWatchValueA::decode(&op_reader, crypto)?; RPCAnswerDetail::WatchValueA(out) } veilid_capnp::answer::detail::SupplyBlockA(r) => { let op_reader = r.map_err(RPCError::protocol)?; - let out = RPCOperationSupplyBlockA::decode(&op_reader)?; + let out = RPCOperationSupplyBlockA::decode(&op_reader, crypto)?; RPCAnswerDetail::SupplyBlockA(out) } veilid_capnp::answer::detail::FindBlockA(r) => { let op_reader = r.map_err(RPCError::protocol)?; - let out = RPCOperationFindBlockA::decode(&op_reader)?; + let out = RPCOperationFindBlockA::decode(&op_reader, crypto)?; RPCAnswerDetail::FindBlockA(out) } veilid_capnp::answer::detail::StartTunnelA(r) => { diff --git a/veilid-core/src/rpc_processor/coders/operations/operation.rs b/veilid-core/src/rpc_processor/coders/operations/operation.rs index f23c1df8..d27b4d88 100644 --- a/veilid-core/src/rpc_processor/coders/operations/operation.rs +++ b/veilid-core/src/rpc_processor/coders/operations/operation.rs @@ -16,22 +16,25 @@ impl RPCOperationKind { } } - pub fn decode(kind_reader: &veilid_capnp::operation::kind::Reader) -> Result { + pub fn decode( + kind_reader: &veilid_capnp::operation::kind::Reader, + crypto: Crypto, + ) -> Result { let which_reader = kind_reader.which().map_err(RPCError::protocol)?; let out = match which_reader { veilid_capnp::operation::kind::Which::Question(r) => { let q_reader = r.map_err(RPCError::protocol)?; - let out = RPCQuestion::decode(&q_reader)?; + let out = RPCQuestion::decode(&q_reader, crypto)?; RPCOperationKind::Question(out) } veilid_capnp::operation::kind::Which::Statement(r) => { let q_reader = r.map_err(RPCError::protocol)?; - let out = RPCStatement::decode(&q_reader)?; + let out = RPCStatement::decode(&q_reader, crypto)?; RPCOperationKind::Statement(out) } veilid_capnp::operation::kind::Which::Answer(r) => { let q_reader = r.map_err(RPCError::protocol)?; - let out = RPCAnswer::decode(&q_reader)?; + let out = RPCAnswer::decode(&q_reader, crypto)?; RPCOperationKind::Answer(out) } }; @@ -54,31 +57,25 @@ impl RPCOperationKind { #[derive(Debug, Clone)] pub struct RPCOperation { op_id: OperationId, - sender_node_info: Option, + opt_sender_peer_info: Option, target_node_info_ts: Timestamp, kind: RPCOperationKind, } impl RPCOperation { - pub fn new_question( - question: RPCQuestion, - sender_signed_node_info: SenderSignedNodeInfo, - ) -> Self { + pub fn new_question(question: RPCQuestion, sender_peer_info: SenderPeerInfo) -> Self { Self { op_id: OperationId::new(get_random_u64()), - sender_node_info: sender_signed_node_info.signed_node_info, - target_node_info_ts: sender_signed_node_info.target_node_info_ts, + opt_sender_peer_info: sender_peer_info.opt_sender_peer_info, + target_node_info_ts: sender_peer_info.target_node_info_ts, kind: RPCOperationKind::Question(question), } } - pub fn new_statement( - statement: RPCStatement, - sender_signed_node_info: SenderSignedNodeInfo, - ) -> Self { + pub fn new_statement(statement: RPCStatement, sender_peer_info: SenderPeerInfo) -> Self { Self { op_id: OperationId::new(get_random_u64()), - sender_node_info: sender_signed_node_info.signed_node_info, - target_node_info_ts: sender_signed_node_info.target_node_info_ts, + opt_sender_peer_info: sender_peer_info.opt_sender_peer_info, + target_node_info_ts: sender_peer_info.target_node_info_ts, kind: RPCOperationKind::Statement(statement), } } @@ -86,12 +83,12 @@ impl RPCOperation { pub fn new_answer( request: &RPCOperation, answer: RPCAnswer, - sender_signed_node_info: SenderSignedNodeInfo, + sender_peer_info: SenderPeerInfo, ) -> Self { Self { op_id: request.op_id, - sender_node_info: sender_signed_node_info.signed_node_info, - target_node_info_ts: sender_signed_node_info.target_node_info_ts, + opt_sender_peer_info: sender_peer_info.opt_sender_peer_info, + target_node_info_ts: sender_peer_info.target_node_info_ts, kind: RPCOperationKind::Answer(answer), } } @@ -100,8 +97,8 @@ impl RPCOperation { self.op_id } - pub fn sender_node_info(&self) -> Option<&SignedNodeInfo> { - self.sender_node_info.as_ref() + pub fn sender_peer_info(&self) -> Option<&PeerInfo> { + self.opt_sender_peer_info.as_ref() } pub fn target_node_info_ts(&self) -> Timestamp { self.target_node_info_ts @@ -117,20 +114,16 @@ impl RPCOperation { pub fn decode( operation_reader: &veilid_capnp::operation::Reader, - opt_sender_node_id: Option<&DHTKey>, + crypto: Crypto, ) -> Result { let op_id = OperationId::new(operation_reader.get_op_id()); - let sender_node_info = if operation_reader.has_sender_node_info() { - if let Some(sender_node_id) = opt_sender_node_id { - let sni_reader = operation_reader - .get_sender_node_info() - .map_err(RPCError::protocol)?; - let sni = decode_signed_node_info(&sni_reader, sender_node_id)?; - Some(sni) - } else { - None - } + let sender_peer_info = if operation_reader.has_sender_peer_info() { + let pi_reader = operation_reader + .get_sender_peer_info() + .map_err(RPCError::protocol)?; + let pi = decode_peer_info(&pi_reader, crypto.clone())?; + Some(pi) } else { None }; @@ -138,11 +131,11 @@ impl RPCOperation { let target_node_info_ts = Timestamp::new(operation_reader.get_target_node_info_ts()); let kind_reader = operation_reader.get_kind(); - let kind = RPCOperationKind::decode(&kind_reader)?; + let kind = RPCOperationKind::decode(&kind_reader, crypto)?; Ok(RPCOperation { op_id, - sender_node_info, + opt_sender_peer_info: sender_peer_info, target_node_info_ts, kind, }) @@ -150,9 +143,9 @@ impl RPCOperation { pub fn encode(&self, builder: &mut veilid_capnp::operation::Builder) -> Result<(), RPCError> { builder.set_op_id(self.op_id.as_u64()); - if let Some(sender_info) = &self.sender_node_info { - let mut si_builder = builder.reborrow().init_sender_node_info(); - encode_signed_node_info(&sender_info, &mut si_builder)?; + if let Some(sender_peer_info) = &self.opt_sender_peer_info { + let mut pi_builder = builder.reborrow().init_sender_peer_info(); + encode_peer_info(&sender_peer_info, &mut pi_builder)?; } builder.set_target_node_info_ts(self.target_node_info_ts.as_u64()); let mut k_builder = builder.reborrow().init_kind(); diff --git a/veilid-core/src/rpc_processor/coders/operations/operation_find_block.rs b/veilid-core/src/rpc_processor/coders/operations/operation_find_block.rs index ce42da3f..b5ecab45 100644 --- a/veilid-core/src/rpc_processor/coders/operations/operation_find_block.rs +++ b/veilid-core/src/rpc_processor/coders/operations/operation_find_block.rs @@ -2,7 +2,7 @@ use super::*; #[derive(Debug, Clone)] pub struct RPCOperationFindBlockQ { - pub block_id: DHTKey, + pub block_id: TypedKey, } impl RPCOperationFindBlockQ { @@ -10,7 +10,7 @@ impl RPCOperationFindBlockQ { reader: &veilid_capnp::operation_find_block_q::Reader, ) -> Result { let bi_reader = reader.get_block_id().map_err(RPCError::protocol)?; - let block_id = decode_dht_key(&bi_reader); + let block_id = decode_typed_key(&bi_reader)?; Ok(RPCOperationFindBlockQ { block_id }) } @@ -19,7 +19,7 @@ impl RPCOperationFindBlockQ { builder: &mut veilid_capnp::operation_find_block_q::Builder, ) -> Result<(), RPCError> { let mut bi_builder = builder.reborrow().init_block_id(); - encode_dht_key(&self.block_id, &mut bi_builder)?; + encode_typed_key(&self.block_id, &mut bi_builder); Ok(()) } @@ -35,6 +35,7 @@ pub struct RPCOperationFindBlockA { impl RPCOperationFindBlockA { pub fn decode( reader: &veilid_capnp::operation_find_block_a::Reader, + crypto: Crypto, ) -> Result { let data = reader.get_data().map_err(RPCError::protocol)?.to_vec(); @@ -46,7 +47,7 @@ impl RPCOperationFindBlockA { .map_err(RPCError::map_internal("too many suppliers"))?, ); for s in suppliers_reader.iter() { - let peer_info = decode_peer_info(&s)?; + let peer_info = decode_peer_info(&s, crypto.clone())?; suppliers.push(peer_info); } @@ -58,7 +59,7 @@ impl RPCOperationFindBlockA { .map_err(RPCError::map_internal("too many peers"))?, ); for p in peers_reader.iter() { - let peer_info = decode_peer_info(&p)?; + let peer_info = decode_peer_info(&p, crypto.clone())?; peers.push(peer_info); } diff --git a/veilid-core/src/rpc_processor/coders/operations/operation_find_node.rs b/veilid-core/src/rpc_processor/coders/operations/operation_find_node.rs index cf6bb675..c3511efa 100644 --- a/veilid-core/src/rpc_processor/coders/operations/operation_find_node.rs +++ b/veilid-core/src/rpc_processor/coders/operations/operation_find_node.rs @@ -2,7 +2,7 @@ use super::*; #[derive(Debug, Clone)] pub struct RPCOperationFindNodeQ { - pub node_id: DHTKey, + pub node_id: TypedKey, } impl RPCOperationFindNodeQ { @@ -10,7 +10,7 @@ impl RPCOperationFindNodeQ { reader: &veilid_capnp::operation_find_node_q::Reader, ) -> Result { let ni_reader = reader.get_node_id().map_err(RPCError::protocol)?; - let node_id = decode_dht_key(&ni_reader); + let node_id = decode_typed_key(&ni_reader)?; Ok(RPCOperationFindNodeQ { node_id }) } pub fn encode( @@ -18,7 +18,7 @@ impl RPCOperationFindNodeQ { builder: &mut veilid_capnp::operation_find_node_q::Builder, ) -> Result<(), RPCError> { let mut ni_builder = builder.reborrow().init_node_id(); - encode_dht_key(&self.node_id, &mut ni_builder)?; + encode_typed_key(&self.node_id, &mut ni_builder); Ok(()) } } @@ -31,6 +31,7 @@ pub struct RPCOperationFindNodeA { impl RPCOperationFindNodeA { pub fn decode( reader: &veilid_capnp::operation_find_node_a::Reader, + crypto: Crypto, ) -> Result { let peers_reader = reader.get_peers().map_err(RPCError::protocol)?; let mut peers = Vec::::with_capacity( @@ -40,7 +41,7 @@ impl RPCOperationFindNodeA { .map_err(RPCError::map_internal("too many peers"))?, ); for p in peers_reader.iter() { - let peer_info = decode_peer_info(&p)?; + let peer_info = decode_peer_info(&p, crypto.clone())?; peers.push(peer_info); } diff --git a/veilid-core/src/rpc_processor/coders/operations/operation_get_value.rs b/veilid-core/src/rpc_processor/coders/operations/operation_get_value.rs index f9fc9959..5db1f993 100644 --- a/veilid-core/src/rpc_processor/coders/operations/operation_get_value.rs +++ b/veilid-core/src/rpc_processor/coders/operations/operation_get_value.rs @@ -2,7 +2,8 @@ use super::*; #[derive(Debug, Clone)] pub struct RPCOperationGetValueQ { - pub key: ValueKey, + pub key: TypedKey, + pub subkey: ValueSubkey, } impl RPCOperationGetValueQ { @@ -10,15 +11,17 @@ impl RPCOperationGetValueQ { reader: &veilid_capnp::operation_get_value_q::Reader, ) -> Result { let k_reader = reader.get_key().map_err(RPCError::protocol)?; - let key = decode_value_key(&k_reader)?; - Ok(RPCOperationGetValueQ { key }) + let key = decode_typed_key(&k_reader)?; + let subkey = reader.get_subkey(); + Ok(RPCOperationGetValueQ { key, subkey }) } pub fn encode( &self, builder: &mut veilid_capnp::operation_get_value_q::Builder, ) -> Result<(), RPCError> { let mut k_builder = builder.reborrow().init_key(); - encode_value_key(&self.key, &mut k_builder)?; + encode_typed_key(&self.key, &mut k_builder); + builder.set_subkey(self.subkey); Ok(()) } } @@ -32,6 +35,7 @@ pub enum RPCOperationGetValueA { impl RPCOperationGetValueA { pub fn decode( reader: &veilid_capnp::operation_get_value_a::Reader, + crypto: Crypto, ) -> Result { match reader.which().map_err(RPCError::protocol)? { veilid_capnp::operation_get_value_a::Which::Data(r) => { @@ -47,7 +51,7 @@ impl RPCOperationGetValueA { .map_err(RPCError::map_internal("too many peers"))?, ); for p in peers_reader.iter() { - let peer_info = decode_peer_info(&p)?; + let peer_info = decode_peer_info(&p, crypto.clone())?; peers.push(peer_info); } diff --git a/veilid-core/src/rpc_processor/coders/operations/operation_route.rs b/veilid-core/src/rpc_processor/coders/operations/operation_route.rs index 88d65fe9..67333819 100644 --- a/veilid-core/src/rpc_processor/coders/operations/operation_route.rs +++ b/veilid-core/src/rpc_processor/coders/operations/operation_route.rs @@ -2,17 +2,15 @@ use super::*; #[derive(Debug, Clone)] pub struct RoutedOperation { - pub version: u8, pub sequencing: Sequencing, - pub signatures: Vec, + pub signatures: Vec, pub nonce: Nonce, pub data: Vec, } impl RoutedOperation { - pub fn new(version: u8, sequencing: Sequencing, nonce: Nonce, data: Vec) -> Self { + pub fn new(sequencing: Sequencing, nonce: Nonce, data: Vec) -> Self { Self { - version, sequencing, signatures: Vec::new(), nonce, @@ -24,25 +22,23 @@ impl RoutedOperation { reader: &veilid_capnp::routed_operation::Reader, ) -> Result { let sigs_reader = reader.get_signatures().map_err(RPCError::protocol)?; - let mut signatures = Vec::::with_capacity( + let mut signatures = Vec::::with_capacity( sigs_reader .len() .try_into() .map_err(RPCError::map_internal("too many signatures"))?, ); for s in sigs_reader.iter() { - let sig = decode_signature(&s); + let sig = decode_signature512(&s); signatures.push(sig); } - let version = reader.get_version(); let sequencing = decode_sequencing(reader.get_sequencing().map_err(RPCError::protocol)?); let n_reader = reader.get_nonce().map_err(RPCError::protocol)?; let nonce = decode_nonce(&n_reader); let data = reader.get_data().map_err(RPCError::protocol)?.to_vec(); Ok(RoutedOperation { - version, sequencing, signatures, nonce, @@ -54,7 +50,6 @@ impl RoutedOperation { &self, builder: &mut veilid_capnp::routed_operation::Builder, ) -> Result<(), RPCError> { - builder.reborrow().set_version(self.version); builder .reborrow() .set_sequencing(encode_sequencing(self.sequencing)); @@ -66,7 +61,7 @@ impl RoutedOperation { ); for (i, sig) in self.signatures.iter().enumerate() { let mut sig_builder = sigs_builder.reborrow().get(i as u32); - encode_signature(sig, &mut sig_builder); + encode_signature512(sig, &mut sig_builder); } let mut n_builder = builder.reborrow().init_nonce(); encode_nonce(&self.nonce, &mut n_builder); @@ -85,9 +80,10 @@ pub struct RPCOperationRoute { impl RPCOperationRoute { pub fn decode( reader: &veilid_capnp::operation_route::Reader, + crypto: Crypto, ) -> Result { let sr_reader = reader.get_safety_route().map_err(RPCError::protocol)?; - let safety_route = decode_safety_route(&sr_reader)?; + let safety_route = decode_safety_route(&sr_reader, crypto)?; let o_reader = reader.get_operation().map_err(RPCError::protocol)?; let operation = RoutedOperation::decode(&o_reader)?; diff --git a/veilid-core/src/rpc_processor/coders/operations/operation_set_value.rs b/veilid-core/src/rpc_processor/coders/operations/operation_set_value.rs index 23a34421..4f1c5763 100644 --- a/veilid-core/src/rpc_processor/coders/operations/operation_set_value.rs +++ b/veilid-core/src/rpc_processor/coders/operations/operation_set_value.rs @@ -2,7 +2,8 @@ use super::*; #[derive(Debug, Clone)] pub struct RPCOperationSetValueQ { - pub key: ValueKey, + pub key: TypedKey, + pub subkey: ValueSubkey, pub value: ValueData, } @@ -11,17 +12,19 @@ impl RPCOperationSetValueQ { reader: &veilid_capnp::operation_set_value_q::Reader, ) -> Result { let k_reader = reader.get_key().map_err(RPCError::protocol)?; - let key = decode_value_key(&k_reader)?; + let key = decode_typed_key(&k_reader)?; + let subkey = reader.get_subkey(); let v_reader = reader.get_value().map_err(RPCError::protocol)?; let value = decode_value_data(&v_reader)?; - Ok(RPCOperationSetValueQ { key, value }) + Ok(RPCOperationSetValueQ { key, subkey, value }) } pub fn encode( &self, builder: &mut veilid_capnp::operation_set_value_q::Builder, ) -> Result<(), RPCError> { let mut k_builder = builder.reborrow().init_key(); - encode_value_key(&self.key, &mut k_builder)?; + encode_typed_key(&self.key, &mut k_builder); + builder.set_subkey(self.subkey); let mut v_builder = builder.reborrow().init_value(); encode_value_data(&self.value, &mut v_builder)?; Ok(()) @@ -37,6 +40,7 @@ pub enum RPCOperationSetValueA { impl RPCOperationSetValueA { pub fn decode( reader: &veilid_capnp::operation_set_value_a::Reader, + crypto: Crypto, ) -> Result { match reader.which().map_err(RPCError::protocol)? { veilid_capnp::operation_set_value_a::Which::Data(r) => { @@ -52,7 +56,7 @@ impl RPCOperationSetValueA { .map_err(RPCError::map_internal("too many peers"))?, ); for p in peers_reader.iter() { - let peer_info = decode_peer_info(&p)?; + let peer_info = decode_peer_info(&p, crypto.clone())?; peers.push(peer_info); } diff --git a/veilid-core/src/rpc_processor/coders/operations/operation_signal.rs b/veilid-core/src/rpc_processor/coders/operations/operation_signal.rs index 4b8a6fd3..a414e300 100644 --- a/veilid-core/src/rpc_processor/coders/operations/operation_signal.rs +++ b/veilid-core/src/rpc_processor/coders/operations/operation_signal.rs @@ -8,8 +8,9 @@ pub struct RPCOperationSignal { impl RPCOperationSignal { pub fn decode( reader: &veilid_capnp::operation_signal::Reader, + crypto: Crypto, ) -> Result { - let signal_info = decode_signal_info(reader)?; + let signal_info = decode_signal_info(reader, crypto)?; Ok(RPCOperationSignal { signal_info }) } pub fn encode( diff --git a/veilid-core/src/rpc_processor/coders/operations/operation_supply_block.rs b/veilid-core/src/rpc_processor/coders/operations/operation_supply_block.rs index d593650d..f68de596 100644 --- a/veilid-core/src/rpc_processor/coders/operations/operation_supply_block.rs +++ b/veilid-core/src/rpc_processor/coders/operations/operation_supply_block.rs @@ -2,7 +2,7 @@ use super::*; #[derive(Debug, Clone)] pub struct RPCOperationSupplyBlockQ { - pub block_id: DHTKey, + pub block_id: TypedKey, } impl RPCOperationSupplyBlockQ { @@ -10,7 +10,7 @@ impl RPCOperationSupplyBlockQ { reader: &veilid_capnp::operation_supply_block_q::Reader, ) -> Result { let bi_reader = reader.get_block_id().map_err(RPCError::protocol)?; - let block_id = decode_dht_key(&bi_reader); + let block_id = decode_typed_key(&bi_reader)?; Ok(RPCOperationSupplyBlockQ { block_id }) } @@ -19,7 +19,7 @@ impl RPCOperationSupplyBlockQ { builder: &mut veilid_capnp::operation_supply_block_q::Builder, ) -> Result<(), RPCError> { let mut bi_builder = builder.reborrow().init_block_id(); - encode_dht_key(&self.block_id, &mut bi_builder)?; + encode_typed_key(&self.block_id, &mut bi_builder); Ok(()) } @@ -34,6 +34,7 @@ pub enum RPCOperationSupplyBlockA { impl RPCOperationSupplyBlockA { pub fn decode( reader: &veilid_capnp::operation_supply_block_a::Reader, + crypto: Crypto, ) -> Result { match reader.which().map_err(RPCError::protocol)? { veilid_capnp::operation_supply_block_a::Which::Expiration(r) => { @@ -48,7 +49,7 @@ impl RPCOperationSupplyBlockA { .map_err(RPCError::map_internal("too many peers"))?, ); for p in peers_reader.iter() { - let peer_info = decode_peer_info(&p)?; + let peer_info = decode_peer_info(&p, crypto.clone())?; peers.push(peer_info); } diff --git a/veilid-core/src/rpc_processor/coders/operations/operation_value_changed.rs b/veilid-core/src/rpc_processor/coders/operations/operation_value_changed.rs index 3d1c08cf..86bc9c69 100644 --- a/veilid-core/src/rpc_processor/coders/operations/operation_value_changed.rs +++ b/veilid-core/src/rpc_processor/coders/operations/operation_value_changed.rs @@ -2,7 +2,9 @@ use super::*; #[derive(Debug, Clone)] pub struct RPCOperationValueChanged { - pub key: ValueKey, + pub key: TypedKey, + pub subkeys: Vec, + pub count: u32, pub value: ValueData, } @@ -11,17 +13,60 @@ impl RPCOperationValueChanged { reader: &veilid_capnp::operation_value_changed::Reader, ) -> Result { let k_reader = reader.get_key().map_err(RPCError::protocol)?; - let key = decode_value_key(&k_reader)?; + let key = decode_typed_key(&k_reader)?; + + let sk_reader = reader.get_subkeys().map_err(RPCError::protocol)?; + let mut subkeys = Vec::::with_capacity( + sk_reader + .len() + .try_into() + .map_err(RPCError::map_protocol("too many subkey ranges"))?, + ); + for skr in sk_reader.iter() { + let vskr = (skr.get_start(), skr.get_end()); + if vskr.0 > vskr.1 { + return Err(RPCError::protocol("invalid subkey range")); + } + if let Some(lvskr) = subkeys.last() { + if lvskr.1 >= vskr.0 { + return Err(RPCError::protocol( + "subkey range out of order or not merged", + )); + } + } + subkeys.push(vskr); + } + let count = reader.get_count(); let v_reader = reader.get_value().map_err(RPCError::protocol)?; let value = decode_value_data(&v_reader)?; - Ok(RPCOperationValueChanged { key, value }) + Ok(RPCOperationValueChanged { + key, + subkeys, + count, + value, + }) } pub fn encode( &self, builder: &mut veilid_capnp::operation_value_changed::Builder, ) -> Result<(), RPCError> { let mut k_builder = builder.reborrow().init_key(); - encode_value_key(&self.key, &mut k_builder)?; + encode_typed_key(&self.key, &mut k_builder); + + let mut sk_builder = builder.reborrow().init_subkeys( + self.subkeys + .len() + .try_into() + .map_err(RPCError::map_internal("invalid subkey range list length"))?, + ); + for (i, skr) in self.subkeys.iter().enumerate() { + let mut skr_builder = sk_builder.reborrow().get(i as u32); + skr_builder.set_start(skr.0); + skr_builder.set_end(skr.1); + } + + builder.set_count(self.count); + let mut v_builder = builder.reborrow().init_value(); encode_value_data(&self.value, &mut v_builder)?; Ok(()) diff --git a/veilid-core/src/rpc_processor/coders/operations/operation_watch_value.rs b/veilid-core/src/rpc_processor/coders/operations/operation_watch_value.rs index 00c0199c..7812b98b 100644 --- a/veilid-core/src/rpc_processor/coders/operations/operation_watch_value.rs +++ b/veilid-core/src/rpc_processor/coders/operations/operation_watch_value.rs @@ -2,7 +2,10 @@ use super::*; #[derive(Debug, Clone)] pub struct RPCOperationWatchValueQ { - pub key: ValueKey, + pub key: TypedKey, + pub subkeys: Vec, + pub expiration: u64, + pub count: u32, } impl RPCOperationWatchValueQ { @@ -10,15 +13,60 @@ impl RPCOperationWatchValueQ { reader: &veilid_capnp::operation_watch_value_q::Reader, ) -> Result { let k_reader = reader.get_key().map_err(RPCError::protocol)?; - let key = decode_value_key(&k_reader)?; - Ok(RPCOperationWatchValueQ { key }) + let key = decode_typed_key(&k_reader)?; + + let sk_reader = reader.get_subkeys().map_err(RPCError::protocol)?; + let mut subkeys = Vec::::with_capacity( + sk_reader + .len() + .try_into() + .map_err(RPCError::map_protocol("too many subkey ranges"))?, + ); + for skr in sk_reader.iter() { + let vskr = (skr.get_start(), skr.get_end()); + if vskr.0 > vskr.1 { + return Err(RPCError::protocol("invalid subkey range")); + } + if let Some(lvskr) = subkeys.last() { + if lvskr.1 >= vskr.0 { + return Err(RPCError::protocol( + "subkey range out of order or not merged", + )); + } + } + subkeys.push(vskr); + } + + let expiration = reader.get_expiration(); + let count = reader.get_count(); + + Ok(RPCOperationWatchValueQ { + key, + subkeys, + expiration, + count, + }) } pub fn encode( &self, builder: &mut veilid_capnp::operation_watch_value_q::Builder, ) -> Result<(), RPCError> { let mut k_builder = builder.reborrow().init_key(); - encode_value_key(&self.key, &mut k_builder)?; + encode_typed_key(&self.key, &mut k_builder); + + let mut sk_builder = builder.reborrow().init_subkeys( + self.subkeys + .len() + .try_into() + .map_err(RPCError::map_internal("invalid subkey range list length"))?, + ); + for (i, skr) in self.subkeys.iter().enumerate() { + let mut skr_builder = sk_builder.reborrow().get(i as u32); + skr_builder.set_start(skr.0); + skr_builder.set_end(skr.1); + } + builder.set_expiration(self.expiration); + builder.set_count(self.count); Ok(()) } } @@ -32,6 +80,7 @@ pub struct RPCOperationWatchValueA { impl RPCOperationWatchValueA { pub fn decode( reader: &veilid_capnp::operation_watch_value_a::Reader, + crypto: Crypto, ) -> Result { let expiration = reader.get_expiration(); let peers_reader = reader.get_peers().map_err(RPCError::protocol)?; @@ -42,7 +91,7 @@ impl RPCOperationWatchValueA { .map_err(RPCError::map_internal("too many peers"))?, ); for p in peers_reader.iter() { - let peer_info = decode_peer_info(&p)?; + let peer_info = decode_peer_info(&p, crypto.clone())?; peers.push(peer_info); } diff --git a/veilid-core/src/rpc_processor/coders/operations/question.rs b/veilid-core/src/rpc_processor/coders/operations/question.rs index 4e7e3966..e3f50776 100644 --- a/veilid-core/src/rpc_processor/coders/operations/question.rs +++ b/veilid-core/src/rpc_processor/coders/operations/question.rs @@ -19,9 +19,12 @@ impl RPCQuestion { pub fn desc(&self) -> &'static str { self.detail.desc() } - pub fn decode(reader: &veilid_capnp::question::Reader) -> Result { + pub fn decode( + reader: &veilid_capnp::question::Reader, + crypto: Crypto, + ) -> Result { let rt_reader = reader.get_respond_to(); - let respond_to = RespondTo::decode(&rt_reader)?; + let respond_to = RespondTo::decode(&rt_reader, crypto)?; let d_reader = reader.get_detail(); let detail = RPCQuestionDetail::decode(&d_reader)?; Ok(RPCQuestion { respond_to, detail }) diff --git a/veilid-core/src/rpc_processor/coders/operations/respond_to.rs b/veilid-core/src/rpc_processor/coders/operations/respond_to.rs index f05b6d08..43d6f871 100644 --- a/veilid-core/src/rpc_processor/coders/operations/respond_to.rs +++ b/veilid-core/src/rpc_processor/coders/operations/respond_to.rs @@ -23,12 +23,15 @@ impl RespondTo { Ok(()) } - pub fn decode(reader: &veilid_capnp::question::respond_to::Reader) -> Result { + pub fn decode( + reader: &veilid_capnp::question::respond_to::Reader, + crypto: Crypto, + ) -> Result { let respond_to = match reader.which().map_err(RPCError::protocol)? { veilid_capnp::question::respond_to::Sender(()) => RespondTo::Sender, veilid_capnp::question::respond_to::PrivateRoute(pr_reader) => { let pr_reader = pr_reader.map_err(RPCError::protocol)?; - let pr = decode_private_route(&pr_reader)?; + let pr = decode_private_route(&pr_reader, crypto)?; RespondTo::PrivateRoute(pr) } }; diff --git a/veilid-core/src/rpc_processor/coders/operations/statement.rs b/veilid-core/src/rpc_processor/coders/operations/statement.rs index 1c2ddf59..2108b373 100644 --- a/veilid-core/src/rpc_processor/coders/operations/statement.rs +++ b/veilid-core/src/rpc_processor/coders/operations/statement.rs @@ -18,9 +18,12 @@ impl RPCStatement { pub fn desc(&self) -> &'static str { self.detail.desc() } - pub fn decode(reader: &veilid_capnp::statement::Reader) -> Result { + pub fn decode( + reader: &veilid_capnp::statement::Reader, + crypto: Crypto, + ) -> Result { let d_reader = reader.get_detail(); - let detail = RPCStatementDetail::decode(&d_reader)?; + let detail = RPCStatementDetail::decode(&d_reader, crypto)?; Ok(RPCStatement { detail }) } pub fn encode(&self, builder: &mut veilid_capnp::statement::Builder) -> Result<(), RPCError> { @@ -52,6 +55,7 @@ impl RPCStatementDetail { } pub fn decode( reader: &veilid_capnp::statement::detail::Reader, + crypto: Crypto, ) -> Result { let which_reader = reader.which().map_err(RPCError::protocol)?; let out = match which_reader { @@ -62,7 +66,7 @@ impl RPCStatementDetail { } veilid_capnp::statement::detail::Route(r) => { let op_reader = r.map_err(RPCError::protocol)?; - let out = RPCOperationRoute::decode(&op_reader)?; + let out = RPCOperationRoute::decode(&op_reader, crypto)?; RPCStatementDetail::Route(out) } veilid_capnp::statement::detail::ValueChanged(r) => { @@ -72,7 +76,7 @@ impl RPCStatementDetail { } veilid_capnp::statement::detail::Signal(r) => { let op_reader = r.map_err(RPCError::protocol)?; - let out = RPCOperationSignal::decode(&op_reader)?; + let out = RPCOperationSignal::decode(&op_reader, crypto)?; RPCStatementDetail::Signal(out) } veilid_capnp::statement::detail::ReturnReceipt(r) => { diff --git a/veilid-core/src/rpc_processor/coders/peer_info.rs b/veilid-core/src/rpc_processor/coders/peer_info.rs index 5844b8ab..14a1cdd1 100644 --- a/veilid-core/src/rpc_processor/coders/peer_info.rs +++ b/veilid-core/src/rpc_processor/coders/peer_info.rs @@ -5,28 +5,50 @@ pub fn encode_peer_info( builder: &mut veilid_capnp::peer_info::Builder, ) -> Result<(), RPCError> { // - let mut nid_builder = builder.reborrow().init_node_id(); - encode_dht_key(&peer_info.node_id.key, &mut nid_builder)?; + let mut nids_builder = builder.reborrow().init_node_ids( + peer_info + .node_ids + .len() + .try_into() + .map_err(RPCError::map_invalid_format("out of bound error"))?, + ); + for (i, nid) in peer_info.node_ids.iter().enumerate() { + encode_typed_key( + nid, + &mut nids_builder.reborrow().get( + i.try_into() + .map_err(RPCError::map_invalid_format("out of bound error"))?, + ), + ); + } let mut sni_builder = builder.reborrow().init_signed_node_info(); encode_signed_node_info(&peer_info.signed_node_info, &mut sni_builder)?; Ok(()) } -pub fn decode_peer_info(reader: &veilid_capnp::peer_info::Reader) -> Result { - let nid_reader = reader +pub fn decode_peer_info( + reader: &veilid_capnp::peer_info::Reader, + crypto: Crypto, +) -> Result { + let nids_reader = reader .reborrow() - .get_node_id() + .get_node_ids() .map_err(RPCError::protocol)?; let sni_reader = reader .reborrow() .get_signed_node_info() .map_err(RPCError::protocol)?; - let node_id = NodeId::new(decode_dht_key(&nid_reader)); - let signed_node_info = decode_signed_node_info(&sni_reader, &node_id.key)?; - + let mut node_ids = TypedKeySet::with_capacity(nids_reader.len() as usize); + for nid_reader in nids_reader.iter() { + node_ids.add(decode_typed_key(&nid_reader)?); + } + let signed_node_info = decode_signed_node_info(&sni_reader, crypto, &mut node_ids)?; + if node_ids.len() == 0 { + return Err(RPCError::protocol("no verified node ids")); + } Ok(PeerInfo { - node_id, + node_ids, signed_node_info, }) } diff --git a/veilid-core/src/rpc_processor/coders/private_safety_route.rs b/veilid-core/src/rpc_processor/coders/private_safety_route.rs index 1edbb9fd..86d63d03 100644 --- a/veilid-core/src/rpc_processor/coders/private_safety_route.rs +++ b/veilid-core/src/rpc_processor/coders/private_safety_route.rs @@ -53,7 +53,7 @@ pub fn encode_route_hop( match &route_hop.node { RouteNode::NodeId(ni) => { let mut ni_builder = node_builder.init_node_id(); - encode_dht_key(&ni.key, &mut ni_builder)?; + encode_key256(&ni, &mut ni_builder); } RouteNode::PeerInfo(pi) => { let mut pi_builder = node_builder.init_peer_info(); @@ -67,17 +67,20 @@ pub fn encode_route_hop( Ok(()) } -pub fn decode_route_hop(reader: &veilid_capnp::route_hop::Reader) -> Result { +pub fn decode_route_hop( + reader: &veilid_capnp::route_hop::Reader, + crypto: Crypto, +) -> Result { let n_reader = reader.reborrow().get_node(); let node = match n_reader.which().map_err(RPCError::protocol)? { veilid_capnp::route_hop::node::Which::NodeId(ni) => { let ni_reader = ni.map_err(RPCError::protocol)?; - RouteNode::NodeId(NodeId::new(decode_dht_key(&ni_reader))) + RouteNode::NodeId(decode_key256(&ni_reader)) } veilid_capnp::route_hop::node::Which::PeerInfo(pi) => { let pi_reader = pi.map_err(RPCError::protocol)?; RouteNode::PeerInfo( - decode_peer_info(&pi_reader) + decode_peer_info(&pi_reader, crypto) .map_err(RPCError::map_protocol("invalid peer info in route hop"))?, ) } @@ -101,10 +104,10 @@ pub fn encode_private_route( private_route: &PrivateRoute, builder: &mut veilid_capnp::private_route::Builder, ) -> Result<(), RPCError> { - encode_dht_key( + encode_typed_key( &private_route.public_key, &mut builder.reborrow().init_public_key(), - )?; + ); builder.set_hop_count(private_route.hop_count); let mut h_builder = builder.reborrow().init_hops(); match &private_route.hops { @@ -125,16 +128,17 @@ pub fn encode_private_route( pub fn decode_private_route( reader: &veilid_capnp::private_route::Reader, + crypto: Crypto, ) -> Result { - let public_key = decode_dht_key(&reader.get_public_key().map_err(RPCError::map_protocol( - "invalid public key in private route", - ))?); + let public_key = decode_typed_key(&reader.get_public_key().map_err( + RPCError::map_protocol("invalid public key in private route"), + )?)?; let hop_count = reader.get_hop_count(); let hops = match reader.get_hops().which().map_err(RPCError::protocol)? { veilid_capnp::private_route::hops::Which::FirstHop(rh_reader) => { let rh_reader = rh_reader.map_err(RPCError::protocol)?; - PrivateRouteHops::FirstHop(decode_route_hop(&rh_reader)?) + PrivateRouteHops::FirstHop(decode_route_hop(&rh_reader, crypto)?) } veilid_capnp::private_route::hops::Which::Data(rhd_reader) => { let rhd_reader = rhd_reader.map_err(RPCError::protocol)?; @@ -156,10 +160,10 @@ pub fn encode_safety_route( safety_route: &SafetyRoute, builder: &mut veilid_capnp::safety_route::Builder, ) -> Result<(), RPCError> { - encode_dht_key( + encode_typed_key( &safety_route.public_key, &mut builder.reborrow().init_public_key(), - )?; + ); builder.set_hop_count(safety_route.hop_count); let h_builder = builder.reborrow().init_hops(); match &safety_route.hops { @@ -178,12 +182,13 @@ pub fn encode_safety_route( pub fn decode_safety_route( reader: &veilid_capnp::safety_route::Reader, + crypto: Crypto, ) -> Result { - let public_key = decode_dht_key( + let public_key = decode_typed_key( &reader .get_public_key() .map_err(RPCError::map_protocol("invalid public key in safety route"))?, - ); + )?; let hop_count = reader.get_hop_count(); let hops = match reader.get_hops().which().map_err(RPCError::protocol)? { veilid_capnp::safety_route::hops::Which::Data(rhd_reader) => { @@ -192,7 +197,7 @@ pub fn decode_safety_route( } veilid_capnp::safety_route::hops::Which::Private(pr_reader) => { let pr_reader = pr_reader.map_err(RPCError::protocol)?; - SafetyRouteHops::Private(decode_private_route(&pr_reader)?) + SafetyRouteHops::Private(decode_private_route(&pr_reader, crypto)?) } }; diff --git a/veilid-core/src/rpc_processor/coders/signal_info.rs b/veilid-core/src/rpc_processor/coders/signal_info.rs index 5e9edc84..0f51257e 100644 --- a/veilid-core/src/rpc_processor/coders/signal_info.rs +++ b/veilid-core/src/rpc_processor/coders/signal_info.rs @@ -34,6 +34,7 @@ pub fn encode_signal_info( pub fn decode_signal_info( reader: &veilid_capnp::operation_signal::Reader, + crypto: Crypto, ) -> Result { Ok( match reader @@ -52,7 +53,7 @@ pub fn decode_signal_info( let pi_reader = r.get_peer_info().map_err(RPCError::map_protocol( "invalid peer info in hole punch signal info", ))?; - let peer_info = decode_peer_info(&pi_reader)?; + let peer_info = decode_peer_info(&pi_reader, crypto)?; SignalInfo::HolePunch { receipt, peer_info } } @@ -68,7 +69,7 @@ pub fn decode_signal_info( let pi_reader = r.get_peer_info().map_err(RPCError::map_protocol( "invalid peer info in reverse connect signal info", ))?; - let peer_info = decode_peer_info(&pi_reader)?; + let peer_info = decode_peer_info(&pi_reader, crypto)?; SignalInfo::ReverseConnect { receipt, peer_info } } diff --git a/veilid-core/src/rpc_processor/coders/dht_signature.rs b/veilid-core/src/rpc_processor/coders/signature512.rs similarity index 91% rename from veilid-core/src/rpc_processor/coders/dht_signature.rs rename to veilid-core/src/rpc_processor/coders/signature512.rs index 5b7427b2..25212fe6 100644 --- a/veilid-core/src/rpc_processor/coders/dht_signature.rs +++ b/veilid-core/src/rpc_processor/coders/signature512.rs @@ -1,6 +1,6 @@ use super::*; -pub fn encode_signature(sig: &DHTSignature, builder: &mut veilid_capnp::signature512::Builder) { +pub fn encode_signature512(sig: &Signature, builder: &mut veilid_capnp::signature512::Builder) { let sig = &sig.bytes; builder.set_u0(u64::from_be_bytes( @@ -29,7 +29,7 @@ pub fn encode_signature(sig: &DHTSignature, builder: &mut veilid_capnp::signatur )); } -pub fn decode_signature(reader: &veilid_capnp::signature512::Reader) -> DHTSignature { +pub fn decode_signature512(reader: &veilid_capnp::signature512::Reader) -> Signature { let u0 = reader.get_u0().to_be_bytes(); let u1 = reader.get_u1().to_be_bytes(); let u2 = reader.get_u2().to_be_bytes(); @@ -39,7 +39,7 @@ pub fn decode_signature(reader: &veilid_capnp::signature512::Reader) -> DHTSigna let u6 = reader.get_u6().to_be_bytes(); let u7 = reader.get_u7().to_be_bytes(); - DHTSignature::new([ + Signature::new([ u0[0], u0[1], u0[2], u0[3], u0[4], u0[5], u0[6], u0[7], // u0 u1[0], u1[1], u1[2], u1[3], u1[4], u1[5], u1[6], u1[7], // u1 u2[0], u2[1], u2[2], u2[3], u2[4], u2[5], u2[6], u2[7], // u2 diff --git a/veilid-core/src/rpc_processor/coders/signed_direct_node_info.rs b/veilid-core/src/rpc_processor/coders/signed_direct_node_info.rs index bca21bb5..c15ddfb8 100644 --- a/veilid-core/src/rpc_processor/coders/signed_direct_node_info.rs +++ b/veilid-core/src/rpc_processor/coders/signed_direct_node_info.rs @@ -12,18 +12,30 @@ pub fn encode_signed_direct_node_info( .reborrow() .set_timestamp(signed_direct_node_info.timestamp.into()); - let mut sig_builder = builder.reborrow().init_signature(); - let Some(signature) = &signed_direct_node_info.signature else { - return Err(RPCError::internal("Should not encode SignedDirectNodeInfo without signature!")); - }; - encode_signature(signature, &mut sig_builder); + let mut sigs_builder = builder.reborrow().init_signatures( + signed_direct_node_info + .signatures + .len() + .try_into() + .map_err(RPCError::map_invalid_format("out of bound error"))?, + ); + for (i, typed_signature) in signed_direct_node_info.signatures.iter().enumerate() { + encode_typed_signature( + typed_signature, + &mut sigs_builder.reborrow().get( + i.try_into() + .map_err(RPCError::map_invalid_format("out of bound error"))?, + ), + ); + } Ok(()) } pub fn decode_signed_direct_node_info( reader: &veilid_capnp::signed_direct_node_info::Reader, - node_id: &DHTKey, + crypto: Crypto, + node_ids: &mut TypedKeySet, ) -> Result { let ni_reader = reader .reborrow() @@ -31,15 +43,24 @@ pub fn decode_signed_direct_node_info( .map_err(RPCError::protocol)?; let node_info = decode_node_info(&ni_reader)?; - let sig_reader = reader - .reborrow() - .get_signature() - .map_err(RPCError::protocol)?; - let timestamp = reader.reborrow().get_timestamp().into(); - let signature = decode_signature(&sig_reader); + let sigs_reader = reader + .reborrow() + .get_signatures() + .map_err(RPCError::protocol)?; - SignedDirectNodeInfo::new(NodeId::new(*node_id), node_info, timestamp, signature) + let sig_count = sigs_reader.len() as usize; + if sig_count > MAX_CRYPTO_KINDS { + return Err(RPCError::protocol("too many signatures")); + } + + let mut typed_signatures = Vec::with_capacity(sig_count); + for sig_reader in sigs_reader { + let typed_signature = decode_typed_signature(&sig_reader)?; + typed_signatures.push(typed_signature); + } + + SignedDirectNodeInfo::new(crypto, node_ids, node_info, timestamp, typed_signatures) .map_err(RPCError::protocol) } diff --git a/veilid-core/src/rpc_processor/coders/signed_node_info.rs b/veilid-core/src/rpc_processor/coders/signed_node_info.rs index 2af7cefd..aeede197 100644 --- a/veilid-core/src/rpc_processor/coders/signed_node_info.rs +++ b/veilid-core/src/rpc_processor/coders/signed_node_info.rs @@ -20,20 +20,21 @@ pub fn encode_signed_node_info( pub fn decode_signed_node_info( reader: &veilid_capnp::signed_node_info::Reader, - node_id: &DHTKey, + crypto: Crypto, + node_ids: &mut TypedKeySet, ) -> Result { match reader .which() - .map_err(RPCError::map_internal("invalid signal operation"))? + .map_err(RPCError::map_internal("invalid signed node info"))? { veilid_capnp::signed_node_info::Direct(d) => { let d_reader = d.map_err(RPCError::protocol)?; - let sdni = decode_signed_direct_node_info(&d_reader, node_id)?; + let sdni = decode_signed_direct_node_info(&d_reader, crypto, node_ids)?; Ok(SignedNodeInfo::Direct(sdni)) } veilid_capnp::signed_node_info::Relayed(r) => { let r_reader = r.map_err(RPCError::protocol)?; - let srni = decode_signed_relayed_node_info(&r_reader, node_id)?; + let srni = decode_signed_relayed_node_info(&r_reader, crypto, node_ids)?; Ok(SignedNodeInfo::Relayed(srni)) } } diff --git a/veilid-core/src/rpc_processor/coders/signed_relayed_node_info.rs b/veilid-core/src/rpc_processor/coders/signed_relayed_node_info.rs index 21d3266b..4264e853 100644 --- a/veilid-core/src/rpc_processor/coders/signed_relayed_node_info.rs +++ b/veilid-core/src/rpc_processor/coders/signed_relayed_node_info.rs @@ -8,8 +8,22 @@ pub fn encode_signed_relayed_node_info( let mut ni_builder = builder.reborrow().init_node_info(); encode_node_info(&signed_relayed_node_info.node_info, &mut ni_builder)?; - let mut rid_builder = builder.reborrow().init_relay_id(); - encode_dht_key(&signed_relayed_node_info.relay_id.key, &mut rid_builder)?; + let mut rids_builder = builder.reborrow().init_relay_ids( + signed_relayed_node_info + .relay_ids + .len() + .try_into() + .map_err(RPCError::map_invalid_format("out of bound error"))?, + ); + for (i, typed_key) in signed_relayed_node_info.relay_ids.iter().enumerate() { + encode_typed_key( + typed_key, + &mut rids_builder.reborrow().get( + i.try_into() + .map_err(RPCError::map_invalid_format("out of bound error"))?, + ), + ); + } let mut ri_builder = builder.reborrow().init_relay_info(); encode_signed_direct_node_info(&signed_relayed_node_info.relay_info, &mut ri_builder)?; @@ -18,15 +32,30 @@ pub fn encode_signed_relayed_node_info( .reborrow() .set_timestamp(signed_relayed_node_info.timestamp.into()); - let mut sig_builder = builder.reborrow().init_signature(); - encode_signature(&signed_relayed_node_info.signature, &mut sig_builder); + let mut sigs_builder = builder.reborrow().init_signatures( + signed_relayed_node_info + .signatures + .len() + .try_into() + .map_err(RPCError::map_invalid_format("out of bound error"))?, + ); + for (i, typed_signature) in signed_relayed_node_info.signatures.iter().enumerate() { + encode_typed_signature( + typed_signature, + &mut sigs_builder.reborrow().get( + i.try_into() + .map_err(RPCError::map_invalid_format("out of bound error"))?, + ), + ); + } Ok(()) } pub fn decode_signed_relayed_node_info( reader: &veilid_capnp::signed_relayed_node_info::Reader, - node_id: &DHTKey, + crypto: Crypto, + node_ids: &mut TypedKeySet, ) -> Result { let ni_reader = reader .reborrow() @@ -34,33 +63,64 @@ pub fn decode_signed_relayed_node_info( .map_err(RPCError::protocol)?; let node_info = decode_node_info(&ni_reader)?; - let rid_reader = reader + let rids_reader = reader .reborrow() - .get_relay_id() + .get_relay_ids() .map_err(RPCError::protocol)?; - let relay_id = decode_dht_key(&rid_reader); + let rid_count = rids_reader.len() as usize; + if rid_count > MAX_CRYPTO_KINDS { + return Err(RPCError::protocol("too many relay ids")); + } + let mut relay_ids = TypedKeySet::with_capacity(rid_count); + for rid_reader in rids_reader { + let relay_id = decode_typed_key(&rid_reader)?; + relay_ids.add(relay_id); + } let ri_reader = reader .reborrow() .get_relay_info() .map_err(RPCError::protocol)?; - let relay_info = decode_signed_direct_node_info(&ri_reader, &relay_id)?; + let relay_info = decode_signed_direct_node_info(&ri_reader, crypto.clone(), &mut relay_ids)?; + + // Ensure the relay info for the node has a superset of the crypto kinds of the node it is relaying + if common_crypto_kinds( + &node_info.crypto_support, + &relay_info.node_info.crypto_support, + ) + .len() + != node_info.crypto_support.len() + { + return Err(RPCError::protocol( + "relay should have superset of node crypto kinds", + )); + } - let sig_reader = reader - .reborrow() - .get_signature() - .map_err(RPCError::protocol)?; let timestamp = reader.reborrow().get_timestamp().into(); - let signature = decode_signature(&sig_reader); + let sigs_reader = reader + .reborrow() + .get_signatures() + .map_err(RPCError::protocol)?; + let sig_count = sigs_reader.len() as usize; + if sig_count > MAX_CRYPTO_KINDS { + return Err(RPCError::protocol("too many signatures")); + } + + let mut typed_signatures = Vec::with_capacity(sig_count); + for sig_reader in sigs_reader { + let typed_signature = decode_typed_signature(&sig_reader)?; + typed_signatures.push(typed_signature); + } SignedRelayedNodeInfo::new( - NodeId::new(*node_id), + crypto, + node_ids, node_info, - NodeId::new(relay_id), + relay_ids, relay_info, timestamp, - signature, + typed_signatures, ) .map_err(RPCError::protocol) } diff --git a/veilid-core/src/rpc_processor/coders/typed_key.rs b/veilid-core/src/rpc_processor/coders/typed_key.rs new file mode 100644 index 00000000..d6908b94 --- /dev/null +++ b/veilid-core/src/rpc_processor/coders/typed_key.rs @@ -0,0 +1,19 @@ +use super::*; + +pub fn decode_typed_key(typed_key: &veilid_capnp::typed_key::Reader) -> Result { + let key_reader = typed_key + .get_key() + .map_err(RPCError::map_invalid_format("invalid typed key"))?; + let kind = typed_key.get_kind(); + + Ok(TypedKey::new( + CryptoKind::from(kind.to_be_bytes()), + decode_key256(&key_reader), + )) +} + +pub fn encode_typed_key(typed_key: &TypedKey, builder: &mut veilid_capnp::typed_key::Builder) { + builder.set_kind(u32::from_be_bytes(typed_key.kind.0)); + let mut key_builder = builder.reborrow().init_key(); + encode_key256(&typed_key.value, &mut key_builder); +} diff --git a/veilid-core/src/rpc_processor/coders/typed_signature.rs b/veilid-core/src/rpc_processor/coders/typed_signature.rs new file mode 100644 index 00000000..1a54669d --- /dev/null +++ b/veilid-core/src/rpc_processor/coders/typed_signature.rs @@ -0,0 +1,24 @@ +use super::*; + +pub fn decode_typed_signature( + typed_signature: &veilid_capnp::typed_signature::Reader, +) -> Result { + let sig_reader = typed_signature + .get_signature() + .map_err(RPCError::map_invalid_format("invalid typed signature"))?; + let kind = typed_signature.get_kind(); + + Ok(TypedSignature::new( + CryptoKind::from(kind.to_be_bytes()), + decode_signature512(&sig_reader), + )) +} + +pub fn encode_typed_signature( + typed_signature: &TypedSignature, + builder: &mut veilid_capnp::typed_signature::Builder, +) { + builder.set_kind(u32::from_be_bytes(typed_signature.kind.0)); + let mut sig_builder = builder.reborrow().init_signature(); + encode_signature512(&typed_signature.value, &mut sig_builder); +} diff --git a/veilid-core/src/rpc_processor/coders/value_data.rs b/veilid-core/src/rpc_processor/coders/value_data.rs index ba859423..70cbf0a4 100644 --- a/veilid-core/src/rpc_processor/coders/value_data.rs +++ b/veilid-core/src/rpc_processor/coders/value_data.rs @@ -5,6 +5,7 @@ pub fn encode_value_data( builder: &mut veilid_capnp::value_data::Builder, ) -> Result<(), RPCError> { builder.set_data(&value_data.data); + builder.set_schema(u32::from_be_bytes(value_data.schema.0)); builder.set_seq(value_data.seq); Ok(()) } @@ -12,5 +13,6 @@ pub fn encode_value_data( pub fn decode_value_data(reader: &veilid_capnp::value_data::Reader) -> Result { let data = reader.get_data().map_err(RPCError::protocol)?.to_vec(); let seq = reader.get_seq(); - Ok(ValueData { data, seq }) + let schema = FourCC::from(reader.get_schema().to_be_bytes()); + Ok(ValueData { data, schema, seq }) } diff --git a/veilid-core/src/rpc_processor/coders/value_key.rs b/veilid-core/src/rpc_processor/coders/value_key.rs deleted file mode 100644 index fcb1c0a2..00000000 --- a/veilid-core/src/rpc_processor/coders/value_key.rs +++ /dev/null @@ -1,25 +0,0 @@ -use super::*; - -pub fn encode_value_key( - value_key: &ValueKey, - builder: &mut veilid_capnp::value_key::Builder, -) -> Result<(), RPCError> { - let mut pk_builder = builder.reborrow().init_public_key(); - encode_dht_key(&value_key.key, &mut pk_builder)?; - if let Some(subkey) = &value_key.subkey { - builder.set_subkey(subkey); - } - Ok(()) -} - -pub fn decode_value_key(reader: &veilid_capnp::value_key::Reader) -> Result { - let pk_reader = reader.get_public_key().map_err(RPCError::protocol)?; - let key = decode_dht_key(&pk_reader); - let subkey = if !reader.has_subkey() { - None - } else { - let subkey = reader.get_subkey().map_err(RPCError::protocol)?; - Some(subkey.to_owned()) - }; - Ok(ValueKey { key, subkey }) -} diff --git a/veilid-core/src/rpc_processor/destination.rs b/veilid-core/src/rpc_processor/destination.rs index f5c7069f..ae19a4b0 100644 --- a/veilid-core/src/rpc_processor/destination.rs +++ b/veilid-core/src/rpc_processor/destination.rs @@ -15,11 +15,11 @@ pub enum Destination { /// The relay to send to relay: NodeRef, /// The final destination the relay should send to - target: DHTKey, + target: NodeRef, /// Require safety route or not safety_selection: SafetySelection, }, - /// Send to private route (privateroute) + /// Send to private route PrivateRoute { /// A private route to send to private_route: PrivateRoute, @@ -36,7 +36,7 @@ impl Destination { safety_selection: SafetySelection::Unsafe(sequencing), } } - pub fn relay(relay: NodeRef, target: DHTKey) -> Self { + pub fn relay(relay: NodeRef, target: NodeRef) -> Self { let sequencing = relay.sequencing(); Self::Relay { relay, @@ -124,7 +124,7 @@ impl fmt::Display for Destination { "" }; - write!(f, "{}@{}{}", target.encode(), relay, sr) + write!(f, "{}@{}{}", target, relay, sr) } Destination::PrivateRoute { private_route, @@ -136,7 +136,7 @@ impl fmt::Display for Destination { "" }; - write!(f, "{}{}", private_route, sr) + write!(f, "{}{}", private_route.public_key, sr) } } } @@ -162,8 +162,9 @@ impl RPCProcessor { } SafetySelection::Safe(safety_spec) => { // Sent directly but with a safety route, respond to private route + let crypto_kind = target.best_node_id().kind; let Some(pr_key) = rss - .get_private_route_for_safety_spec(safety_spec, &[target.node_id()]) + .get_private_route_for_safety_spec(crypto_kind, safety_spec, &target.node_ids()) .map_err(RPCError::internal)? else { return Ok(NetworkResult::no_connection_other("no private route for response at this time")); }; @@ -187,11 +188,15 @@ impl RPCProcessor { } SafetySelection::Safe(safety_spec) => { // Sent via a relay but with a safety route, respond to private route + let crypto_kind = target.best_node_id().kind; + + let mut avoid_nodes = relay.node_ids(); + avoid_nodes.add_all(&target.node_ids()); let Some(pr_key) = rss - .get_private_route_for_safety_spec(safety_spec, &[relay.node_id(), *target]) - .map_err(RPCError::internal)? else { - return Ok(NetworkResult::no_connection_other("no private route for response at this time")); - }; + .get_private_route_for_safety_spec(crypto_kind, safety_spec, &avoid_nodes) + .map_err(RPCError::internal)? else { + return Ok(NetworkResult::no_connection_other("no private route for response at this time")); + }; // Get the assembled route for response let private_route = rss @@ -205,50 +210,50 @@ impl RPCProcessor { private_route, safety_selection, } => { + let Some(avoid_node_id) = private_route.first_hop_node_id() else { return Err(RPCError::internal("destination private route must have first hop")); }; + let crypto_kind = private_route.public_key.kind; + match safety_selection { SafetySelection::Unsafe(_) => { // Sent to a private route with no safety route, use a stub safety route for the response // Determine if we can use optimized nodeinfo - let route_node = match rss - .has_remote_private_route_seen_our_node_info(&private_route.public_key) + let route_node = if rss + .has_remote_private_route_seen_our_node_info(&private_route.public_key.value) { - true => { if !routing_table.has_valid_own_node_info(RoutingDomain::PublicInternet) { return Ok(NetworkResult::no_connection_other("Own node info must be valid to use private route")); } - RouteNode::NodeId(NodeId::new(routing_table.node_id())) - } - false => { + RouteNode::NodeId(routing_table.node_id(crypto_kind).value) + } else { let Some(own_peer_info) = routing_table.get_own_peer_info(RoutingDomain::PublicInternet) else { return Ok(NetworkResult::no_connection_other("Own peer info must be valid to use private route")); }; RouteNode::PeerInfo(own_peer_info) - }, }; Ok(NetworkResult::value(RespondTo::PrivateRoute( - PrivateRoute::new_stub(routing_table.node_id(), route_node), + PrivateRoute::new_stub(routing_table.node_id(crypto_kind), route_node), ))) } SafetySelection::Safe(safety_spec) => { // Sent to a private route via a safety route, respond to private route - + // Check for loopback test - let pr_key = if safety_spec.preferred_route - == Some(private_route.public_key) + let opt_private_route_id = rss.get_route_id_for_key(&private_route.public_key.value); + let pr_key = if opt_private_route_id.is_some() && safety_spec.preferred_route == opt_private_route_id { // Private route is also safety route during loopback test - private_route.public_key + private_route.public_key.value } else { - // Get the privat route to respond to that matches the safety route spec we sent the request with + // Get the private route to respond to that matches the safety route spec we sent the request with let Some(pr_key) = rss - .get_private_route_for_safety_spec(safety_spec, &[avoid_node_id]) + .get_private_route_for_safety_spec(crypto_kind, safety_spec, &[avoid_node_id]) .map_err(RPCError::internal)? else { return Ok(NetworkResult::no_connection_other("no private route for response at this time")); }; @@ -296,17 +301,25 @@ impl RPCProcessor { }; // Reply directly to the request's source - let sender_id = detail.envelope.get_sender_id(); + let sender_node_id = TypedKey::new(detail.envelope.get_crypto_kind(), detail.envelope.get_sender_id()); // This may be a different node's reference than the 'sender' in the case of a relay let peer_noderef = detail.peer_noderef.clone(); // If the sender_id is that of the peer, then this is a direct reply // else it is a relayed reply through the peer - if peer_noderef.node_id() == sender_id { + if peer_noderef.node_ids().contains(&sender_node_id) { NetworkResult::value(Destination::direct(peer_noderef)) } else { - NetworkResult::value(Destination::relay(peer_noderef, sender_id)) + // Look up the sender node, we should have added it via senderNodeInfo before getting here. + if let Some(sender_noderef) = self.routing_table.lookup_node_ref(sender_node_id) { + NetworkResult::value(Destination::relay(peer_noderef, sender_noderef)) + } else { + return NetworkResult::invalid_message( + "not responding to sender that has no node info", + ); + } + } } RespondTo::PrivateRoute(pr) => { diff --git a/veilid-core/src/rpc_processor/mod.rs b/veilid-core/src/rpc_processor/mod.rs index eebcd017..45966de0 100644 --- a/veilid-core/src/rpc_processor/mod.rs +++ b/veilid-core/src/rpc_processor/mod.rs @@ -52,8 +52,10 @@ struct RPCMessageHeaderDetailDirect { /// Header details for rpc messages received over only a safety route but not a private route #[derive(Debug, Clone)] struct RPCMessageHeaderDetailSafetyRouted { + /// Direct header + direct: RPCMessageHeaderDetailDirect, /// Remote safety route used - remote_safety_route: DHTKey, + remote_safety_route: PublicKey, /// The sequencing used for this route sequencing: Sequencing, } @@ -61,10 +63,12 @@ struct RPCMessageHeaderDetailSafetyRouted { /// Header details for rpc messages received over a private route #[derive(Debug, Clone)] struct RPCMessageHeaderDetailPrivateRouted { + /// Direct header + direct: RPCMessageHeaderDetailDirect, /// Remote safety route used (or possibly node id the case of no safety route) - remote_safety_route: DHTKey, + remote_safety_route: PublicKey, /// The private route we received the rpc over - private_route: DHTKey, + private_route: PublicKey, // The safety spec for replying to this private routed rpc safety_spec: SafetySpec, } @@ -87,7 +91,16 @@ struct RPCMessageHeader { detail: RPCMessageHeaderDetail, } -impl RPCMessageHeader {} +impl RPCMessageHeader { + /// The crypto kind used on the RPC + pub fn crypto_kind(&self) -> CryptoKind { + match &self.detail { + RPCMessageHeaderDetail::Direct(d) => d.envelope.get_crypto_kind(), + RPCMessageHeaderDetail::SafetyRouted(s) => s.direct.envelope.get_crypto_kind(), + RPCMessageHeaderDetail::PrivateRouted(p) => p.direct.envelope.get_crypto_kind(), + } + } +} #[derive(Debug)] pub struct RPCMessageData { @@ -141,9 +154,9 @@ struct WaitableReply { node_ref: NodeRef, send_ts: Timestamp, send_data_kind: SendDataKind, - safety_route: Option, - remote_private_route: Option, - reply_private_route: Option, + safety_route: Option, + remote_private_route: Option, + reply_private_route: Option, } ///////////////////////////////////////////////////////////////////// @@ -163,38 +176,38 @@ impl Answer { struct RenderedOperation { /// The rendered operation bytes message: Vec, - /// Destination node id we're sending to - node_id: DHTKey, - /// Node to send envelope to (may not be destination node id in case of relay) + /// Destination node we're sending to + destination_node_ref: NodeRef, + /// Node to send envelope to (may not be destination node in case of relay) node_ref: NodeRef, /// Total safety + private route hop count + 1 hop for the initial send hop_count: usize, /// The safety route used to send the message - safety_route: Option, + safety_route: Option, /// The private route used to send the message - remote_private_route: Option, + remote_private_route: Option, /// The private route requested to receive the reply - reply_private_route: Option, + reply_private_route: Option, } /// Node information exchanged during every RPC message #[derive(Default, Debug, Clone)] -pub struct SenderSignedNodeInfo { - /// The current signed node info of the sender if required - signed_node_info: Option, +pub struct SenderPeerInfo { + /// The current peer info of the sender if required + opt_sender_peer_info: Option, /// The last timestamp of the target's node info to assist remote node with sending its latest node info target_node_info_ts: Timestamp, } -impl SenderSignedNodeInfo { - pub fn new_no_sni(target_node_info_ts: Timestamp) -> Self { +impl SenderPeerInfo { + pub fn new_no_peer_info(target_node_info_ts: Timestamp) -> Self { Self { - signed_node_info: None, + opt_sender_peer_info: None, target_node_info_ts, } } - pub fn new(sender_signed_node_info: SignedNodeInfo, target_node_info_ts: Timestamp) -> Self { + pub fn new(sender_peer_info: PeerInfo, target_node_info_ts: Timestamp) -> Self { Self { - signed_node_info: Some(sender_signed_node_info), + opt_sender_peer_info: Some(sender_peer_info), target_node_info_ts, } } @@ -371,7 +384,7 @@ impl RPCProcessor { /// If no node was found in the timeout, this returns None pub async fn search_dht_single_key( &self, - _node_id: DHTKey, + _node_id: PublicKey, _count: u32, _fanout: u32, _timeout: Option, @@ -386,7 +399,7 @@ impl RPCProcessor { /// Search the DHT for the 'count' closest nodes to a key, adding them all to the routing table if they are not there and returning their node references pub async fn search_dht_multi_key( &self, - _node_id: DHTKey, + _node_id: PublicKey, _count: u32, _fanout: u32, _timeout: Option, @@ -399,14 +412,14 @@ impl RPCProcessor { /// Note: This routine can possible be recursive, hence the SendPinBoxFuture async form pub fn resolve_node( &self, - node_id: DHTKey, + node_id: PublicKey, ) -> SendPinBoxFuture, RPCError>> { let this = self.clone(); Box::pin(async move { let routing_table = this.routing_table(); // First see if we have the node in our routing table already - if let Some(nr) = routing_table.lookup_node_ref(node_id) { + if let Some(nr) = routing_table.lookup_any_node_ref(node_id) { // ensure we have some dial info for the entry already, // if not, we should do the find_node anyway if nr.has_any_dial_info() { @@ -429,7 +442,7 @@ impl RPCProcessor { .await?; if let Some(nr) = &nr { - if nr.node_id() != node_id { + if nr.node_ids().contains_key(&node_id) { // found a close node, but not exact within our configured resolve_node timeout return Ok(None); } @@ -483,14 +496,20 @@ impl RPCProcessor { &self, safety_selection: SafetySelection, remote_private_route: PrivateRoute, - reply_private_route: Option, + reply_private_route: Option, message_data: Vec, ) -> Result, RPCError> { let routing_table = self.routing_table(); let rss = routing_table.route_spec_store(); + + // Get useful private route properties let pr_is_stub = remote_private_route.is_stub(); let pr_hop_count = remote_private_route.hop_count; - let pr_pubkey = remote_private_route.public_key; + let pr_pubkey = remote_private_route.public_key.value; + let crypto_kind = remote_private_route.crypto_kind(); + let Some(vcrypto) = self.crypto.get(crypto_kind) else { + return Err(RPCError::internal("crypto not available for selected private route")); + }; // Compile the safety route with the private route let compiled_route: CompiledRoute = match rss @@ -505,27 +524,21 @@ impl RPCProcessor { } }; let sr_is_stub = compiled_route.safety_route.is_stub(); - let sr_pubkey = compiled_route.safety_route.public_key; + let sr_pubkey = compiled_route.safety_route.public_key.value; // Encrypt routed operation // Xmsg + ENC(Xmsg, DH(PKapr, SKbsr)) - // xxx use factory method, get version from somewhere... - let nonce = Crypto::get_random_nonce(); - let dh_secret = self - .crypto + let nonce = vcrypto.random_nonce(); + let dh_secret = vcrypto .cached_dh(&pr_pubkey, &compiled_route.secret) .map_err(RPCError::map_internal("dh failed"))?; - let enc_msg_data = Crypto::encrypt_aead(&message_data, &nonce, &dh_secret, None) + let enc_msg_data = vcrypto + .encrypt_aead(&message_data, &nonce, &dh_secret, None) .map_err(RPCError::map_internal("encryption failed"))?; // Make the routed operation - // xxx: replace MAX_CRYPTO_VERSION with the version from the factory - let operation = RoutedOperation::new( - MAX_CRYPTO_VERSION, - safety_selection.get_sequencing(), - nonce, - enc_msg_data, - ); + let operation = + RoutedOperation::new(safety_selection.get_sequencing(), nonce, enc_msg_data); // Prepare route operation let sr_hop_count = compiled_route.safety_route.hop_count; @@ -533,8 +546,8 @@ impl RPCProcessor { safety_route: compiled_route.safety_route, operation, }; - let ssni_route = self - .get_sender_signed_node_info(&Destination::direct(compiled_route.first_hop.clone()))?; + let ssni_route = + self.get_sender_peer_info(&Destination::direct(compiled_route.first_hop.clone())); let operation = RPCOperation::new_statement( RPCStatement::new(RPCStatementDetail::Route(route_operation)), ssni_route, @@ -547,12 +560,11 @@ impl RPCProcessor { let out_message = builder_to_vec(route_msg)?; // Get the first hop this is going to - let out_node_id = compiled_route.first_hop.node_id(); let out_hop_count = (1 + sr_hop_count + pr_hop_count) as usize; let out = RenderedOperation { message: out_message, - node_id: out_node_id, + destination_node_ref: compiled_route.first_hop.clone(), node_ref: compiled_route.first_hop, hop_count: out_hop_count, safety_route: if sr_is_stub { None } else { Some(sr_pubkey) }, @@ -587,7 +599,7 @@ impl RPCProcessor { let reply_private_route = match operation.kind() { RPCOperationKind::Question(q) => match q.respond_to() { RespondTo::Sender => None, - RespondTo::PrivateRoute(pr) => Some(pr.public_key), + RespondTo::PrivateRoute(pr) => Some(pr.public_key.value), }, RPCOperationKind::Statement(_) | RPCOperationKind::Answer(_) => None, }; @@ -607,16 +619,15 @@ impl RPCProcessor { // -------------------------------------- // Get the actual destination node id accounting for relays - let (node_ref, node_id) = if let Destination::Relay { + let (node_ref, destination_node_ref) = if let Destination::Relay { relay: _, - target: ref dht_key, + ref target, safety_selection: _, } = dest { - (node_ref.clone(), dht_key.clone()) + (node_ref.clone(), target.clone()) } else { - let node_id = node_ref.node_id(); - (node_ref.clone(), node_id) + (node_ref.clone(), node_ref.clone()) }; // Handle the existence of safety route @@ -635,7 +646,7 @@ impl RPCProcessor { // route, we can use a direct envelope instead of routing out = NetworkResult::value(RenderedOperation { message, - node_id, + destination_node_ref, node_ref, hop_count: 1, safety_route: None, @@ -646,7 +657,9 @@ impl RPCProcessor { SafetySelection::Safe(_) => { // No private route was specified for the request // but we are using a safety route, so we must create an empty private route - let peer_info = match node_ref.make_peer_info(RoutingDomain::PublicInternet) + // Destination relay is ignored for safety routed operations + let peer_info = match destination_node_ref + .make_peer_info(RoutingDomain::PublicInternet) { None => { return Ok(NetworkResult::no_connection_other( @@ -655,8 +668,10 @@ impl RPCProcessor { } Some(pi) => pi, }; - let private_route = - PrivateRoute::new_stub(node_id, RouteNode::PeerInfo(peer_info)); + let private_route = PrivateRoute::new_stub( + destination_node_ref.best_node_id(), + RouteNode::PeerInfo(peer_info), + ); // Wrap with safety route out = self.wrap_with_route( @@ -689,18 +704,17 @@ impl RPCProcessor { /// Get signed node info to package with RPC messages to improve /// routing table caching when it is okay to do so - #[instrument(level = "trace", skip(self), ret, err)] - fn get_sender_signed_node_info( - &self, - dest: &Destination, - ) -> Result { + /// Also check target's timestamp of our own node info, to see if we should send that + /// And send our timestamp of the target's node info so they can determine if they should update us on their next rpc + #[instrument(level = "trace", skip(self), ret)] + fn get_sender_peer_info(&self, dest: &Destination) -> SenderPeerInfo { // Don't do this if the sender is to remain private // Otherwise we would be attaching the original sender's identity to the final destination, // thus defeating the purpose of the safety route entirely :P match dest.get_safety_selection() { SafetySelection::Unsafe(_) => {} SafetySelection::Safe(_) => { - return Ok(SenderSignedNodeInfo::default()); + return SenderPeerInfo::default(); } } @@ -715,25 +729,19 @@ impl RPCProcessor { relay: _, target, safety_selection: _, - } => { - if let Some(target) = routing_table.lookup_node_ref(*target) { - target - } else { - // Target was not in our routing table - return Ok(SenderSignedNodeInfo::default()); - } - } + } => target.clone(), Destination::PrivateRoute { private_route: _, safety_selection: _, } => { - return Ok(SenderSignedNodeInfo::default()); + return SenderPeerInfo::default(); } }; let Some(routing_domain) = target.best_routing_domain() else { - // No routing domain for target? - return Err(RPCError::internal(format!("No routing domain for target: {}", target))); + // No routing domain for target, no node info + // Only a stale connection or no connection exists + return SenderPeerInfo::default(); }; // Get the target's node info timestamp @@ -741,7 +749,7 @@ impl RPCProcessor { // Don't return our node info if it's not valid yet let Some(own_peer_info) = routing_table.get_own_peer_info(routing_domain) else { - return Ok(SenderSignedNodeInfo::new_no_sni(target_node_info_ts)); + return SenderPeerInfo::new_no_peer_info(target_node_info_ts); }; // Get our node info timestamp @@ -749,13 +757,10 @@ impl RPCProcessor { // If the target has seen our node info already don't send it again if target.has_seen_our_node_info_ts(routing_domain, our_node_info_ts) { - return Ok(SenderSignedNodeInfo::new_no_sni(target_node_info_ts)); + return SenderPeerInfo::new_no_peer_info(target_node_info_ts); } - Ok(SenderSignedNodeInfo::new( - own_peer_info.signed_node_info, - target_node_info_ts, - )) + SenderPeerInfo::new(own_peer_info, target_node_info_ts) } /// Record failure to send to node or route @@ -764,8 +769,8 @@ impl RPCProcessor { rpc_kind: RPCKind, send_ts: Timestamp, node_ref: NodeRef, - safety_route: Option, - remote_private_route: Option, + safety_route: Option, + remote_private_route: Option, ) { let wants_answer = matches!(rpc_kind, RPCKind::Question); @@ -793,9 +798,9 @@ impl RPCProcessor { &self, send_ts: Timestamp, node_ref: NodeRef, - safety_route: Option, - remote_private_route: Option, - private_route: Option, + safety_route: Option, + remote_private_route: Option, + private_route: Option, ) { // Record for node if this was not sent via a route if safety_route.is_none() && remote_private_route.is_none() { @@ -833,8 +838,8 @@ impl RPCProcessor { send_ts: Timestamp, bytes: ByteCount, node_ref: NodeRef, - safety_route: Option, - remote_private_route: Option, + safety_route: Option, + remote_private_route: Option, ) { let wants_answer = matches!(rpc_kind, RPCKind::Question); @@ -870,9 +875,9 @@ impl RPCProcessor { recv_ts: Timestamp, bytes: ByteCount, node_ref: NodeRef, - safety_route: Option, - remote_private_route: Option, - reply_private_route: Option, + safety_route: Option, + remote_private_route: Option, + reply_private_route: Option, ) { // Record stats for remote node if this was direct if safety_route.is_none() && remote_private_route.is_none() && reply_private_route.is_none() @@ -999,11 +1004,11 @@ impl RPCProcessor { dest: Destination, question: RPCQuestion, ) -> Result, RPCError> { - // Get sender signed node info if we should send that - let ssni = self.get_sender_signed_node_info(&dest)?; + // Get sender peer info if we should send that + let spi = self.get_sender_peer_info(&dest); // Wrap question in operation - let operation = RPCOperation::new_question(question, ssni); + let operation = RPCOperation::new_question(question, spi); let op_id = operation.op_id(); // Log rpc send @@ -1012,7 +1017,7 @@ impl RPCProcessor { // Produce rendered operation let RenderedOperation { message, - node_id, + destination_node_ref, node_ref, hop_count, safety_route, @@ -1032,7 +1037,7 @@ impl RPCProcessor { let send_ts = get_aligned_timestamp(); let send_data_kind = network_result_try!(self .network_manager() - .send_envelope(node_ref.clone(), Some(node_id), message) + .send_envelope(node_ref.clone(), Some(destination_node_ref), message) .await .map_err(|e| { // If we're returning an error, clean up @@ -1074,11 +1079,11 @@ impl RPCProcessor { dest: Destination, statement: RPCStatement, ) -> Result, RPCError> { - // Get sender signed node info if we should send that - let ssni = self.get_sender_signed_node_info(&dest)?; + // Get sender peer info if we should send that + let spi = self.get_sender_peer_info(&dest); // Wrap statement in operation - let operation = RPCOperation::new_statement(statement, ssni); + let operation = RPCOperation::new_statement(statement, spi); // Log rpc send trace!(target: "rpc_message", dir = "send", kind = "statement", op_id = operation.op_id().as_u64(), desc = operation.kind().desc(), ?dest); @@ -1086,7 +1091,7 @@ impl RPCProcessor { // Produce rendered operation let RenderedOperation { message, - node_id, + destination_node_ref, node_ref, hop_count: _, safety_route, @@ -1099,7 +1104,7 @@ impl RPCProcessor { let send_ts = get_aligned_timestamp(); let _send_data_kind = network_result_try!(self .network_manager() - .send_envelope(node_ref.clone(), Some(node_id), message) + .send_envelope(node_ref.clone(), Some(destination_node_ref), message) .await .map_err(|e| { // If we're returning an error, clean up @@ -1136,10 +1141,10 @@ impl RPCProcessor { let dest = network_result_try!(self.get_respond_to_destination(&request)); // Get sender signed node info if we should send that - let ssni = self.get_sender_signed_node_info(&dest)?; + let spi = self.get_sender_peer_info(&dest); // Wrap answer in operation - let operation = RPCOperation::new_answer(&request.operation, answer, ssni); + let operation = RPCOperation::new_answer(&request.operation, answer, spi); // Log rpc send trace!(target: "rpc_message", dir = "send", kind = "answer", op_id = operation.op_id().as_u64(), desc = operation.kind().desc(), ?dest); @@ -1147,7 +1152,7 @@ impl RPCProcessor { // Produce rendered operation let RenderedOperation { message, - node_id, + destination_node_ref, node_ref, hop_count: _, safety_route, @@ -1159,7 +1164,7 @@ impl RPCProcessor { let bytes: ByteCount = (message.len() as u64).into(); let send_ts = get_aligned_timestamp(); network_result_try!(self.network_manager() - .send_envelope(node_ref.clone(), Some(node_id), message) + .send_envelope(node_ref.clone(), Some(destination_node_ref), message) .await .map_err(|e| { // If we're returning an error, clean up @@ -1197,7 +1202,10 @@ impl RPCProcessor { let routing_domain = detail.routing_domain; // Decode the operation - let sender_node_id = detail.envelope.get_sender_id(); + let sender_node_id = TypedKey::new( + detail.envelope.get_crypto_kind(), + detail.envelope.get_sender_id(), + ); // Decode the RPC message let operation = { @@ -1206,22 +1214,23 @@ impl RPCProcessor { .get_root::() .map_err(RPCError::protocol) .map_err(logthru_rpc!())?; - RPCOperation::decode(&op_reader, Some(&sender_node_id))? + RPCOperation::decode(&op_reader, self.crypto.clone())? }; - // Get the sender noderef, incorporating and 'sender node info' + // Get the sender noderef, incorporating sender's peer info let mut opt_sender_nr: Option = None; - if let Some(sender_node_info) = operation.sender_node_info() { - // Sender NodeInfo was specified, update our routing table with it - if !self.filter_node_info(routing_domain, &sender_node_info) { + if let Some(sender_peer_info) = operation.sender_peer_info() { + // Ensure the sender peer info is for the actual sender specified in the envelope + + // Sender PeerInfo was specified, update our routing table with it + if !self.filter_node_info(routing_domain, &sender_peer_info.signed_node_info) { return Err(RPCError::invalid_format( - "sender signednodeinfo has invalid peer scope", + "sender peerinfo has invalid peer scope", )); } - opt_sender_nr = self.routing_table().register_node_with_signed_node_info( + opt_sender_nr = self.routing_table().register_node_with_peer_info( routing_domain, - sender_node_id, - sender_node_info.clone(), + sender_peer_info.clone(), false, ); } @@ -1252,7 +1261,7 @@ impl RPCProcessor { .get_root::() .map_err(RPCError::protocol) .map_err(logthru_rpc!())?; - RPCOperation::decode(&op_reader, None)? + RPCOperation::decode(&op_reader, self.crypto.clone())? }; // Make the RPC message @@ -1386,15 +1395,17 @@ impl RPCProcessor { } #[instrument(level = "trace", skip(self, body), err)] - pub fn enqueue_safety_routed_message( + fn enqueue_safety_routed_message( &self, - remote_safety_route: DHTKey, + direct: RPCMessageHeaderDetailDirect, + remote_safety_route: PublicKey, sequencing: Sequencing, body: Vec, ) -> EyreResult<()> { let msg = RPCMessageEncoded { header: RPCMessageHeader { detail: RPCMessageHeaderDetail::SafetyRouted(RPCMessageHeaderDetailSafetyRouted { + direct, remote_safety_route, sequencing, }), @@ -1415,10 +1426,11 @@ impl RPCProcessor { } #[instrument(level = "trace", skip(self, body), err)] - pub fn enqueue_private_routed_message( + fn enqueue_private_routed_message( &self, - remote_safety_route: DHTKey, - private_route: DHTKey, + direct: RPCMessageHeaderDetailDirect, + remote_safety_route: PublicKey, + private_route: PublicKey, safety_spec: SafetySpec, body: Vec, ) -> EyreResult<()> { @@ -1426,6 +1438,7 @@ impl RPCProcessor { header: RPCMessageHeader { detail: RPCMessageHeaderDetail::PrivateRouted( RPCMessageHeaderDetailPrivateRouted { + direct, remote_safety_route, private_route, safety_spec, diff --git a/veilid-core/src/rpc_processor/rpc_app_call.rs b/veilid-core/src/rpc_processor/rpc_app_call.rs index b0506be9..4d6b78c4 100644 --- a/veilid-core/src/rpc_processor/rpc_app_call.rs +++ b/veilid-core/src/rpc_processor/rpc_app_call.rs @@ -53,15 +53,20 @@ impl RPCProcessor { _ => panic!("not a question"), }; + // Get the crypto kind used to send this question + let crypto_kind = msg.header.crypto_kind(); + + // Get the sender node id this came from + let sender = msg + .opt_sender_nr + .as_ref() + .map(|nr| nr.node_ids().get(crypto_kind).unwrap().value); + // Register a waiter for this app call let id = msg.operation.op_id(); let handle = self.unlocked_inner.waiting_app_call_table.add_op_waiter(id); // Pass the call up through the update callback - let sender = msg - .opt_sender_nr - .as_ref() - .map(|nr| NodeId::new(nr.node_id())); let message = app_call_q.message.clone(); (self.unlocked_inner.update_callback)(VeilidUpdate::AppCall(VeilidAppCall { sender, diff --git a/veilid-core/src/rpc_processor/rpc_app_message.rs b/veilid-core/src/rpc_processor/rpc_app_message.rs index 6793803f..0150a205 100644 --- a/veilid-core/src/rpc_processor/rpc_app_message.rs +++ b/veilid-core/src/rpc_processor/rpc_app_message.rs @@ -30,8 +30,16 @@ impl RPCProcessor { _ => panic!("not a statement"), }; + // Get the crypto kind used to send this question + let crypto_kind = msg.header.crypto_kind(); + + // Get the sender node id this came from + let sender = msg + .opt_sender_nr + .as_ref() + .map(|nr| nr.node_ids().get(crypto_kind).unwrap().value); + // Pass the message up through the update callback - let sender = msg.opt_sender_nr.map(|nr| NodeId::new(nr.node_id())); let message = app_message.message; (self.unlocked_inner.update_callback)(VeilidUpdate::AppMessage(VeilidAppMessage { sender, diff --git a/veilid-core/src/rpc_processor/rpc_find_node.rs b/veilid-core/src/rpc_processor/rpc_find_node.rs index 098c9714..a3a5af6e 100644 --- a/veilid-core/src/rpc_processor/rpc_find_node.rs +++ b/veilid-core/src/rpc_processor/rpc_find_node.rs @@ -11,7 +11,7 @@ impl RPCProcessor { pub async fn rpc_call_find_node( self, dest: Destination, - key: DHTKey, + node_id: TypedKey, ) -> Result>>, RPCError> { // Ensure destination never has a private route if matches!( @@ -26,8 +26,7 @@ impl RPCProcessor { )); } - let find_node_q_detail = - RPCQuestionDetail::FindNodeQ(RPCOperationFindNodeQ { node_id: key }); + let find_node_q_detail = RPCQuestionDetail::FindNodeQ(RPCOperationFindNodeQ { node_id }); let find_node_q = RPCQuestion::new( network_result_try!(self.get_destination_respond_to(&dest)?), find_node_q_detail, @@ -98,20 +97,30 @@ impl RPCProcessor { }; // find N nodes closest to the target node in our routing table - let filter = Box::new( - move |rti: &RoutingTableInner, _k: DHTKey, v: Option>| { - rti.filter_has_valid_signed_node_info(RoutingDomain::PublicInternet, true, v) + move |rti: &RoutingTableInner, opt_entry: Option>| { + // Ensure only things that are valid/signed in the PublicInternet domain are returned + rti.filter_has_valid_signed_node_info( + RoutingDomain::PublicInternet, + true, + opt_entry, + ) }, ) as RoutingTableEntryFilter; let filters = VecDeque::from([filter]); + let node_count = { + let c = self.config.get(); + c.network.dht.max_find_node_count as usize + }; + let closest_nodes = routing_table.find_closest_nodes( + node_count, find_node_q.node_id, filters, // transform - |rti, k, v| { - rti.transform_to_peer_info(RoutingDomain::PublicInternet, &own_peer_info, k, v) + |rti, entry| { + rti.transform_to_peer_info(RoutingDomain::PublicInternet, &own_peer_info, entry) }, ); diff --git a/veilid-core/src/rpc_processor/rpc_route.rs b/veilid-core/src/rpc_processor/rpc_route.rs index ad5c8164..5ba33922 100644 --- a/veilid-core/src/rpc_processor/rpc_route.rs +++ b/veilid-core/src/rpc_processor/rpc_route.rs @@ -26,31 +26,11 @@ impl RPCProcessor { } // Get next hop node ref - let mut next_hop_nr = match route_hop.node { - RouteNode::NodeId(id) => { - // - let Some(nr) = self.routing_table.lookup_node_ref(id.key) else { - return Ok(NetworkResult::invalid_message(format!("node hop {} not found", id.key))); - }; - nr - } - RouteNode::PeerInfo(pi) => { - // - let Some(nr) = self.routing_table - .register_node_with_signed_node_info( - RoutingDomain::PublicInternet, - pi.node_id.key, - pi.signed_node_info, - false, - ) else - { - return Ok(NetworkResult::invalid_message(format!( - "node hop {} could not be registered", - pi.node_id.key - ))); - }; - nr - } + let Some(mut next_hop_nr) = route_hop.node.node_ref(self.routing_table.clone(), safety_route.public_key.kind) else { + return Err(RPCError::network(format!( + "could not get route node hop ref: {}", + route_hop.node.describe(safety_route.public_key.kind) + ))); }; // Apply sequencing preference @@ -77,7 +57,7 @@ impl RPCProcessor { &self, routed_operation: RoutedOperation, next_route_node: RouteNode, - safety_route_public_key: DHTKey, + safety_route_public_key: TypedKey, next_private_route: PrivateRoute, ) -> Result, RPCError> { // Make sure hop count makes sense @@ -88,30 +68,12 @@ impl RPCProcessor { } // Get next hop node ref - let mut next_hop_nr = match &next_route_node { - RouteNode::NodeId(id) => { - // - self.routing_table - .lookup_node_ref(id.key) - .ok_or_else(|| RPCError::network(format!("node hop {} not found", id.key))) - } - RouteNode::PeerInfo(pi) => { - // - self.routing_table - .register_node_with_signed_node_info( - RoutingDomain::PublicInternet, - pi.node_id.key, - pi.signed_node_info.clone(), - false, - ) - .ok_or_else(|| { - RPCError::network(format!( - "node hop {} could not be registered", - pi.node_id.key - )) - }) - } - }?; + let Some(mut next_hop_nr) = next_route_node.node_ref(self.routing_table.clone(), safety_route_public_key.kind) else { + return Err(RPCError::network(format!( + "could not get route node hop ref: {}", + next_route_node.describe(safety_route_public_key.kind) + ))); + }; // Apply sequencing preference next_hop_nr.set_sequencing(routed_operation.sequencing); @@ -140,19 +102,18 @@ impl RPCProcessor { #[instrument(level = "trace", skip_all, err)] fn process_safety_routed_operation( &self, - _detail: RPCMessageHeaderDetailDirect, + detail: RPCMessageHeaderDetailDirect, + vcrypto: CryptoSystemVersion, routed_operation: RoutedOperation, - remote_sr_pubkey: DHTKey, + remote_sr_pubkey: TypedKey, ) -> Result, RPCError> { - // Now that things are valid, decrypt the routed operation with DEC(nonce, DH(the SR's public key, the PR's (or node's) secret) // xxx: punish nodes that send messages that fail to decrypt eventually? How to do this for safety routes? - let node_id_secret = self.routing_table.node_id_secret(); - let dh_secret = self - .crypto - .cached_dh(&remote_sr_pubkey, &node_id_secret) + let node_id_secret = self.routing_table.node_id_secret_key(remote_sr_pubkey.kind); + let dh_secret = vcrypto + .cached_dh(&remote_sr_pubkey.value, &node_id_secret) .map_err(RPCError::protocol)?; - let body = match Crypto::decrypt_aead( + let body = match vcrypto.decrypt_aead( &routed_operation.data, &routed_operation.nonce, &dh_secret, @@ -160,13 +121,21 @@ impl RPCProcessor { ) { Ok(v) => v, Err(e) => { - return Ok(NetworkResult::invalid_message(format!("decryption of routed operation failed: {}", e))); + return Ok(NetworkResult::invalid_message(format!( + "decryption of routed operation failed: {}", + e + ))); } }; - + // Pass message to RPC system - self.enqueue_safety_routed_message(remote_sr_pubkey, routed_operation.sequencing, body) - .map_err(RPCError::internal)?; + self.enqueue_safety_routed_message( + detail, + remote_sr_pubkey.value, + routed_operation.sequencing, + body, + ) + .map_err(RPCError::internal)?; Ok(NetworkResult::value(())) } @@ -176,9 +145,10 @@ impl RPCProcessor { fn process_private_routed_operation( &self, detail: RPCMessageHeaderDetailDirect, + vcrypto: CryptoSystemVersion, routed_operation: RoutedOperation, - remote_sr_pubkey: DHTKey, - pr_pubkey: DHTKey, + remote_sr_pubkey: TypedKey, + pr_pubkey: TypedKey, ) -> Result, RPCError> { // Get sender id let sender_id = detail.envelope.get_sender_id(); @@ -186,19 +156,20 @@ impl RPCProcessor { // Look up the private route and ensure it's one in our spec store // Ensure the route is validated, and construct a return safetyspec that matches the inbound preferences let rss = self.routing_table.route_spec_store(); + let preferred_route = rss.get_route_id_for_key(&pr_pubkey.value); let Some((secret_key, safety_spec)) = rss .with_signature_validated_route( &pr_pubkey, &routed_operation.signatures, &routed_operation.data, sender_id, - |rsd| { + |rssd, rsd| { ( - rsd.get_secret_key(), + rsd.secret_key, SafetySpec { - preferred_route: Some(pr_pubkey), - hop_count: rsd.hop_count(), - stability: rsd.get_stability(), + preferred_route, + hop_count: rssd.hop_count(), + stability: rssd.get_stability(), sequencing: routed_operation.sequencing, }, ) @@ -208,26 +179,31 @@ impl RPCProcessor { return Ok(NetworkResult::invalid_message("signatures did not validate for private route")); }; - // Now that things are valid, decrypt the routed operation with DEC(nonce, DH(the SR's public key, the PR's (or node's) secret) // xxx: punish nodes that send messages that fail to decrypt eventually. How to do this for private routes? - let dh_secret = self - .crypto - .cached_dh(&remote_sr_pubkey, &secret_key) + let dh_secret = vcrypto + .cached_dh(&remote_sr_pubkey.value, &secret_key) .map_err(RPCError::protocol)?; - let body = Crypto::decrypt_aead( - &routed_operation.data, - &routed_operation.nonce, - &dh_secret, - None, - ) - .map_err(RPCError::map_internal( - "decryption of routed operation failed", - ))?; + let body = vcrypto + .decrypt_aead( + &routed_operation.data, + &routed_operation.nonce, + &dh_secret, + None, + ) + .map_err(RPCError::map_internal( + "decryption of routed operation failed", + ))?; // Pass message to RPC system - self.enqueue_private_routed_message(remote_sr_pubkey, pr_pubkey, safety_spec, body) - .map_err(RPCError::internal)?; + self.enqueue_private_routed_message( + detail, + remote_sr_pubkey.value, + pr_pubkey.value, + safety_spec, + body, + ) + .map_err(RPCError::internal)?; Ok(NetworkResult::value(())) } @@ -236,20 +212,26 @@ impl RPCProcessor { fn process_routed_operation( &self, detail: RPCMessageHeaderDetailDirect, + vcrypto: CryptoSystemVersion, routed_operation: RoutedOperation, - remote_sr_pubkey: DHTKey, - pr_pubkey: DHTKey, + remote_sr_pubkey: TypedKey, + pr_pubkey: TypedKey, ) -> Result, RPCError> { - // If the private route public key is our node id, then this was sent via safety route to our node directly // so there will be no signatures to validate - if pr_pubkey == self.routing_table.node_id() { + if self.routing_table.node_ids().contains(&pr_pubkey) { // The private route was a stub - self.process_safety_routed_operation(detail, routed_operation, remote_sr_pubkey) + self.process_safety_routed_operation( + detail, + vcrypto, + routed_operation, + remote_sr_pubkey, + ) } else { // Both safety and private routes used, should reply with a safety route self.process_private_routed_operation( detail, + vcrypto, routed_operation, remote_sr_pubkey, pr_pubkey, @@ -260,7 +242,7 @@ impl RPCProcessor { pub(crate) async fn process_private_route_first_hop( &self, mut routed_operation: RoutedOperation, - sr_pubkey: DHTKey, + sr_pubkey: TypedKey, mut private_route: PrivateRoute, ) -> Result, RPCError> { let Some(pr_first_hop) = private_route.pop_first_hop() else { @@ -275,8 +257,12 @@ impl RPCProcessor { }; // Decrypt route hop data - let route_hop = network_result_try!(self.decrypt_private_route_hop_data(&route_hop_data, &private_route.public_key, &mut routed_operation)?); - + let route_hop = network_result_try!(self.decrypt_private_route_hop_data( + &route_hop_data, + &private_route.public_key, + &mut routed_operation + )?); + // Ensure hop count > 0 if private_route.hop_count == 0 { return Ok(NetworkResult::invalid_message( @@ -285,20 +271,21 @@ impl RPCProcessor { } // Make next PrivateRoute and pass it on - return self.process_route_private_route_hop( - routed_operation, - route_hop.node, - sr_pubkey, - PrivateRoute { - public_key: private_route.public_key, - hop_count: private_route.hop_count - 1, - hops: route_hop - .next_hop - .map(|rhd| PrivateRouteHops::Data(rhd)) - .unwrap_or(PrivateRouteHops::Empty), - }, - ) - .await; + return self + .process_route_private_route_hop( + routed_operation, + route_hop.node, + sr_pubkey, + PrivateRoute { + public_key: private_route.public_key, + hop_count: private_route.hop_count - 1, + hops: route_hop + .next_hop + .map(|rhd| PrivateRouteHops::Data(rhd)) + .unwrap_or(PrivateRouteHops::Empty), + }, + ) + .await; } // Switching to private route from safety route @@ -312,15 +299,26 @@ impl RPCProcessor { } /// Decrypt route hop data and sign routed operation - pub(crate) fn decrypt_private_route_hop_data(&self, route_hop_data: &RouteHopData, pr_pubkey: &DHTKey, route_operation: &mut RoutedOperation) -> Result, RPCError> - { + pub(crate) fn decrypt_private_route_hop_data( + &self, + route_hop_data: &RouteHopData, + pr_pubkey: &TypedKey, + route_operation: &mut RoutedOperation, + ) -> Result, RPCError> { + // Get crypto kind + let crypto_kind = pr_pubkey.kind; + let Some(vcrypto) = self.crypto.get(crypto_kind) else { + return Ok(NetworkResult::invalid_message( + "private route hop data crypto is not supported", + )); + }; + // Decrypt the blob with DEC(nonce, DH(the PR's public key, this hop's secret) - let node_id_secret = self.routing_table.node_id_secret(); - let dh_secret = self - .crypto - .cached_dh(&pr_pubkey, &node_id_secret) + let node_id_secret = self.routing_table.node_id_secret_key(crypto_kind); + let dh_secret = vcrypto + .cached_dh(&pr_pubkey.value, &node_id_secret) .map_err(RPCError::protocol)?; - let dec_blob_data = match Crypto::decrypt_aead( + let dec_blob_data = match vcrypto.decrypt_aead( &route_hop_data.blob, &route_hop_data.nonce, &dh_secret, @@ -328,7 +326,10 @@ impl RPCProcessor { ) { Ok(v) => v, Err(e) => { - return Ok(NetworkResult::invalid_message(format!("unable to decrypt private route hop data: {}", e))); + return Ok(NetworkResult::invalid_message(format!( + "unable to decrypt private route hop data: {}", + e + ))); } }; let dec_blob_reader = RPCMessageData::new(dec_blob_data).get_reader()?; @@ -338,15 +339,16 @@ impl RPCProcessor { let rh_reader = dec_blob_reader .get_root::() .map_err(RPCError::protocol)?; - decode_route_hop(&rh_reader)? + decode_route_hop(&rh_reader, self.crypto.clone())? }; // Sign the operation if this is not our last hop // as the last hop is already signed by the envelope if route_hop.next_hop.is_some() { - let node_id = self.routing_table.node_id(); - let node_id_secret = self.routing_table.node_id_secret(); - let sig = sign(&node_id, &node_id_secret, &route_operation.data) + let node_id = self.routing_table.node_id(crypto_kind); + let node_id_secret = self.routing_table.node_id_secret_key(crypto_kind); + let sig = vcrypto + .sign(&node_id.value, &node_id_secret, &route_operation.data) .map_err(RPCError::internal)?; route_operation.signatures.push(sig); } @@ -378,25 +380,30 @@ impl RPCProcessor { _ => panic!("not a statement"), }; - // Process routed operation version - // xxx switch this to a Crypto trait factory method per issue#140 - if route.operation.version != MAX_CRYPTO_VERSION { + // Get crypto kind + let crypto_kind = route.safety_route.crypto_kind(); + let Some(vcrypto) = self.crypto.get(crypto_kind) else { return Ok(NetworkResult::invalid_message( - "routes operation crypto is not valid version", + "routed operation crypto is not supported", )); - } + }; // See what kind of safety route we have going on here match route.safety_route.hops { // There is a safety route hop SafetyRouteHops::Data(ref route_hop_data) => { // Decrypt the blob with DEC(nonce, DH(the SR's public key, this hop's secret) - let node_id_secret = self.routing_table.node_id_secret(); - let dh_secret = self - .crypto - .cached_dh(&route.safety_route.public_key, &node_id_secret) + let node_id_secret = self.routing_table.node_id_secret_key(crypto_kind); + let dh_secret = vcrypto + .cached_dh(&route.safety_route.public_key.value, &node_id_secret) .map_err(RPCError::protocol)?; - let mut dec_blob_data = Crypto::decrypt_aead(&route_hop_data.blob, &route_hop_data.nonce, &dh_secret, None) + let mut dec_blob_data = vcrypto + .decrypt_aead( + &route_hop_data.blob, + &route_hop_data.nonce, + &dh_secret, + None, + ) .map_err(RPCError::protocol)?; // See if this is last hop in safety route, if so, we're decoding a PrivateRoute not a RouteHop @@ -413,28 +420,36 @@ impl RPCProcessor { let pr_reader = dec_blob_reader .get_root::() .map_err(RPCError::protocol)?; - decode_private_route(&pr_reader)? + decode_private_route(&pr_reader, self.crypto.clone())? }; // Switching from full safety route to private route first hop - network_result_try!(self.process_private_route_first_hop( - route.operation, - route.safety_route.public_key, - private_route, - ) - .await?); + network_result_try!( + self.process_private_route_first_hop( + route.operation, + route.safety_route.public_key, + private_route, + ) + .await? + ); } else if dec_blob_tag == 0 { // RouteHop let route_hop = { let rh_reader = dec_blob_reader .get_root::() .map_err(RPCError::protocol)?; - decode_route_hop(&rh_reader)? + decode_route_hop(&rh_reader, self.crypto.clone())? }; // Continue the full safety route with another hop - network_result_try!(self.process_route_safety_route_hop(route.operation, route_hop, route.safety_route) - .await?); + network_result_try!( + self.process_route_safety_route_hop( + route.operation, + route_hop, + route.safety_route + ) + .await? + ); } else { return Ok(NetworkResult::invalid_message("invalid blob tag")); } @@ -445,18 +460,23 @@ impl RPCProcessor { match private_route.hops { PrivateRouteHops::FirstHop(_) => { // Safety route was a stub, start with the beginning of the private route - network_result_try!(self.process_private_route_first_hop( - route.operation, - route.safety_route.public_key, - private_route, - ) - .await?); + network_result_try!( + self.process_private_route_first_hop( + route.operation, + route.safety_route.public_key, + private_route, + ) + .await? + ); } PrivateRouteHops::Data(route_hop_data) => { - // Decrypt route hop data - let route_hop = network_result_try!(self.decrypt_private_route_hop_data(&route_hop_data, &private_route.public_key, &mut route.operation)?); - + let route_hop = network_result_try!(self.decrypt_private_route_hop_data( + &route_hop_data, + &private_route.public_key, + &mut route.operation + )?); + // Ensure hop count > 0 if private_route.hop_count == 0 { return Ok(NetworkResult::invalid_message( @@ -465,20 +485,22 @@ impl RPCProcessor { } // Make next PrivateRoute and pass it on - network_result_try!(self.process_route_private_route_hop( - route.operation, - route_hop.node, - route.safety_route.public_key, - PrivateRoute { - public_key: private_route.public_key, - hop_count: private_route.hop_count - 1, - hops: route_hop - .next_hop - .map(|rhd| PrivateRouteHops::Data(rhd)) - .unwrap_or(PrivateRouteHops::Empty), - }, - ) - .await?); + network_result_try!( + self.process_route_private_route_hop( + route.operation, + route_hop.node, + route.safety_route.public_key, + PrivateRoute { + public_key: private_route.public_key, + hop_count: private_route.hop_count - 1, + hops: route_hop + .next_hop + .map(|rhd| PrivateRouteHops::Data(rhd)) + .unwrap_or(PrivateRouteHops::Empty), + }, + ) + .await? + ); } PrivateRouteHops::Empty => { // Ensure hop count == 0 @@ -496,6 +518,7 @@ impl RPCProcessor { // No hops left, time to process the routed operation network_result_try!(self.process_routed_operation( detail, + vcrypto, route.operation, route.safety_route.public_key, private_route.public_key, diff --git a/veilid-core/src/rpc_processor/rpc_status.rs b/veilid-core/src/rpc_processor/rpc_status.rs index ef03f631..2335100a 100644 --- a/veilid-core/src/rpc_processor/rpc_status.rs +++ b/veilid-core/src/rpc_processor/rpc_status.rs @@ -42,7 +42,6 @@ impl RPCProcessor { target, safety_selection: _, } => { - let opt_target_nr = self.routing_table.lookup_node_ref(*target); let routing_domain = match relay.best_routing_domain() { Some(rd) => rd, None => { @@ -51,7 +50,7 @@ impl RPCProcessor { )) } }; - (opt_target_nr, routing_domain) + (Some(target.clone()), routing_domain) } Destination::PrivateRoute { private_route: _, diff --git a/veilid-core/src/rpc_processor/rpc_validate_dial_info.rs b/veilid-core/src/rpc_processor/rpc_validate_dial_info.rs index df1a58a2..7535779e 100644 --- a/veilid-core/src/rpc_processor/rpc_validate_dial_info.rs +++ b/veilid-core/src/rpc_processor/rpc_validate_dial_info.rs @@ -88,7 +88,10 @@ impl RPCProcessor { // Use the address type though, to ensure we reach an ipv6 capable node if this is // an ipv6 address let routing_table = self.routing_table(); - let sender_id = detail.envelope.get_sender_id(); + let sender_node_id = TypedKey::new( + detail.envelope.get_crypto_kind(), + detail.envelope.get_sender_id(), + ); let routing_domain = detail.routing_domain; let node_count = { let c = self.config.get(); @@ -102,7 +105,7 @@ impl RPCProcessor { dial_info.clone(), ); let will_validate_dial_info_filter = Box::new( - move |rti: &RoutingTableInner, _k: DHTKey, v: Option>| { + move |rti: &RoutingTableInner, v: Option>| { let entry = v.unwrap(); entry.with(rti, move |_rti, e| { if let Some(status) = &e.node_status(routing_domain) { @@ -129,7 +132,7 @@ impl RPCProcessor { } for peer in peers { // Ensure the peer is not the one asking for the validation - if peer.node_id() == sender_id { + if peer.node_ids().contains(&sender_node_id) { continue; } diff --git a/veilid-core/src/tests/common/test_table_store.rs b/veilid-core/src/tests/common/test_table_store.rs index 659e7527..d7af7094 100644 --- a/veilid-core/src/tests/common/test_table_store.rs +++ b/veilid-core/src/tests/common/test_table_store.rs @@ -123,24 +123,24 @@ pub async fn test_store_delete_load(ts: TableStore) { assert_eq!(db.load(2, b"baz").unwrap(), Some(b"QWERTY".to_vec())); } -pub async fn test_frozen(ts: TableStore) { +pub async fn test_frozen(vcrypto: CryptoSystemVersion, ts: TableStore) { trace!("test_frozen"); let _ = ts.delete("test"); let db = ts.open("test", 3).await.expect("should have opened"); - let (dht_key, _) = generate_secret(); + let keypair = vcrypto.generate_keypair(); - assert!(db.store_rkyv(0, b"asdf", &dht_key).await.is_ok()); + assert!(db.store_rkyv(0, b"asdf", &keypair).await.is_ok()); - assert_eq!(db.load_rkyv::(0, b"qwer").unwrap(), None); + assert_eq!(db.load_rkyv::(0, b"qwer").unwrap(), None); - let d = match db.load_rkyv::(0, b"asdf") { + let d = match db.load_rkyv::(0, b"asdf") { Ok(x) => x, Err(e) => { panic!("couldn't decode: {}", e); } }; - assert_eq!(d, Some(dht_key), "keys should be equal"); + assert_eq!(d, Some(keypair), "keys should be equal"); assert!( db.store(1, b"foo", b"1234567890").await.is_ok(), @@ -148,19 +148,23 @@ pub async fn test_frozen(ts: TableStore) { ); assert!( - db.load_rkyv::(1, b"foo").is_err(), + db.load_rkyv::(1, b"foo").is_err(), "should fail to unfreeze" ); } pub async fn test_all() { let api = startup().await; + let crypto = api.crypto().unwrap(); let ts = api.table_store().unwrap(); - test_delete_open_delete(ts.clone()).await; - test_store_delete_load(ts.clone()).await; - test_frozen(ts.clone()).await; - let _ = ts.delete("test").await; + for ck in VALID_CRYPTO_KINDS { + let vcrypto = crypto.get(ck).unwrap(); + test_delete_open_delete(ts.clone()).await; + test_store_delete_load(ts.clone()).await; + test_frozen(vcrypto, ts.clone()).await; + let _ = ts.delete("test").await; + } shutdown(api).await; } diff --git a/veilid-core/src/tests/common/test_veilid_config.rs b/veilid-core/src/tests/common/test_veilid_config.rs index 3743ca7e..9347a7a2 100644 --- a/veilid-core/src/tests/common/test_veilid_config.rs +++ b/veilid-core/src/tests/common/test_veilid_config.rs @@ -192,10 +192,9 @@ fn config_callback(key: String) -> ConfigCallbackReturn { "network.client_whitelist_timeout_ms" => Ok(Box::new(300_000u32)), "network.reverse_connection_receipt_time_ms" => Ok(Box::new(5_000u32)), "network.hole_punch_receipt_time_ms" => Ok(Box::new(5_000u32)), - "network.node_id" => Ok(Box::new(Option::::None)), - "network.node_id_secret" => Ok(Box::new(Option::::None)), - "network.bootstrap" => Ok(Box::new(Vec::::new())), - "network.bootstrap_nodes" => Ok(Box::new(Vec::::new())), + "network.routing_table.node_id" => Ok(Box::new(TypedKeySet::new())), + "network.routing_table.node_id_secret" => Ok(Box::new(TypedSecretSet::new())), + "network.routing_table.bootstrap" => Ok(Box::new(Vec::::new())), "network.routing_table.limit_over_attached" => Ok(Box::new(64u32)), "network.routing_table.limit_fully_attached" => Ok(Box::new(32u32)), "network.routing_table.limit_attached_strong" => Ok(Box::new(16u32)), @@ -316,15 +315,14 @@ pub async fn test_config() { assert_eq!(inner.network.client_whitelist_timeout_ms, 300_000u32); assert_eq!(inner.network.reverse_connection_receipt_time_ms, 5_000u32); assert_eq!(inner.network.hole_punch_receipt_time_ms, 5_000u32); - assert!(inner.network.node_id.is_none()); - assert!(inner.network.node_id_secret.is_none()); - assert_eq!(inner.network.bootstrap, Vec::::new()); - assert_eq!(inner.network.bootstrap_nodes, Vec::::new()); assert_eq!(inner.network.rpc.concurrency, 2u32); assert_eq!(inner.network.rpc.queue_size, 1024u32); assert_eq!(inner.network.rpc.timeout_ms, 10_000u32); assert_eq!(inner.network.rpc.max_route_hop_count, 4u8); assert_eq!(inner.network.rpc.default_route_hop_count, 1u8); + assert_eq!(inner.network.routing_table.node_id.len(), 0); + assert_eq!(inner.network.routing_table.node_id_secret.len(), 0); + assert_eq!(inner.network.routing_table.bootstrap, Vec::::new()); assert_eq!(inner.network.routing_table.limit_over_attached, 64u32); assert_eq!(inner.network.routing_table.limit_fully_attached, 32u32); assert_eq!(inner.network.routing_table.limit_attached_strong, 16u32); diff --git a/veilid-core/src/tests/common/test_veilid_core.rs b/veilid-core/src/tests/common/test_veilid_core.rs index 27bf3e2e..eab1dde3 100644 --- a/veilid-core/src/tests/common/test_veilid_core.rs +++ b/veilid-core/src/tests/common/test_veilid_core.rs @@ -50,64 +50,155 @@ pub async fn test_signed_node_info() { .await .expect("startup failed"); - // Test direct - let node_info = NodeInfo { - network_class: NetworkClass::InboundCapable, - outbound_protocols: ProtocolTypeSet::all(), - address_types: AddressTypeSet::all(), - min_version: 0, - max_version: 0, - dial_info_detail_list: vec![DialInfoDetail { - class: DialInfoClass::Mapped, - dial_info: DialInfo::udp(SocketAddress::default()), - }], - }; + let crypto = api.crypto().unwrap(); + for ck in VALID_CRYPTO_KINDS { + let vcrypto = crypto.get(ck).unwrap(); - let (pkey, skey) = generate_secret(); + // Test direct + let node_info = NodeInfo { + network_class: NetworkClass::InboundCapable, + outbound_protocols: ProtocolTypeSet::all(), + address_types: AddressTypeSet::all(), + envelope_support: VALID_ENVELOPE_VERSIONS.to_vec(), + crypto_support: VALID_CRYPTO_KINDS.to_vec(), + dial_info_detail_list: vec![DialInfoDetail { + class: DialInfoClass::Mapped, + dial_info: DialInfo::udp(SocketAddress::default()), + }], + }; - let sni = - SignedDirectNodeInfo::with_secret(NodeId::new(pkey.clone()), node_info.clone(), &skey) - .unwrap(); - let _ = SignedDirectNodeInfo::new( - NodeId::new(pkey), - node_info.clone(), - sni.timestamp, - sni.signature.unwrap(), - ) - .unwrap(); + // Test correct validation + let keypair = vcrypto.generate_keypair(); + let sni = SignedDirectNodeInfo::make_signatures( + crypto.clone(), + vec![TypedKeyPair::new(ck, keypair)], + node_info.clone(), + ) + .unwrap(); + let mut tks: TypedKeySet = TypedKey::new(ck, keypair.key).into(); + let oldtkslen = tks.len(); + let _ = SignedDirectNodeInfo::new( + crypto.clone(), + &mut tks, + node_info.clone(), + sni.timestamp, + sni.signatures.clone(), + ) + .unwrap(); + assert_eq!(tks.len(), oldtkslen); + assert_eq!(tks.len(), sni.signatures.len()); - // Test relayed - let node_info2 = NodeInfo { - network_class: NetworkClass::OutboundOnly, - outbound_protocols: ProtocolTypeSet::all(), - address_types: AddressTypeSet::all(), - min_version: 0, - max_version: 0, - dial_info_detail_list: vec![DialInfoDetail { - class: DialInfoClass::Blocked, - dial_info: DialInfo::udp(SocketAddress::default()), - }], - }; + // Test incorrect validation + let keypair1 = vcrypto.generate_keypair(); + let mut tks1: TypedKeySet = TypedKey::new(ck, keypair1.key).into(); + let oldtks1len = tks1.len(); + let _ = SignedDirectNodeInfo::new( + crypto.clone(), + &mut tks1, + node_info.clone(), + sni.timestamp, + sni.signatures.clone(), + ) + .unwrap_err(); + assert_eq!(tks1.len(), oldtks1len); + assert_eq!(tks1.len(), sni.signatures.len()); - let (pkey2, skey2) = generate_secret(); + // Test unsupported cryptosystem validation + let fake_crypto_kind: CryptoKind = FourCC::from([0, 1, 2, 3]); + let mut tksfake: TypedKeySet = TypedKey::new(fake_crypto_kind, PublicKey::default()).into(); + let mut sigsfake = sni.signatures.clone(); + sigsfake.push(TypedSignature::new(fake_crypto_kind, Signature::default())); + tksfake.add(TypedKey::new(ck, keypair.key)); + let sdnifake = SignedDirectNodeInfo::new( + crypto.clone(), + &mut tksfake, + node_info.clone(), + sni.timestamp, + sigsfake.clone(), + ) + .unwrap(); + assert_eq!(tksfake.len(), 1); + assert_eq!(sdnifake.signatures.len(), sigsfake.len()); - let sni2 = SignedRelayedNodeInfo::with_secret( - NodeId::new(pkey2.clone()), - node_info2.clone(), - NodeId::new(pkey.clone()), - sni.clone(), - &skey2, - ) - .unwrap(); - let _ = SignedRelayedNodeInfo::new( - NodeId::new(pkey2), - node_info2, - NodeId::new(pkey), - sni, - sni2.timestamp, - sni2.signature, - ) - .unwrap(); + // Test relayed + let node_info2 = NodeInfo { + network_class: NetworkClass::OutboundOnly, + outbound_protocols: ProtocolTypeSet::all(), + address_types: AddressTypeSet::all(), + envelope_support: VALID_ENVELOPE_VERSIONS.to_vec(), + crypto_support: VALID_CRYPTO_KINDS.to_vec(), + dial_info_detail_list: vec![DialInfoDetail { + class: DialInfoClass::Blocked, + dial_info: DialInfo::udp(SocketAddress::default()), + }], + }; + + // Test correct validation + let keypair2 = vcrypto.generate_keypair(); + let mut tks2: TypedKeySet = TypedKey::new(ck, keypair2.key).into(); + let oldtks2len = tks2.len(); + + let sni2 = SignedRelayedNodeInfo::make_signatures( + crypto.clone(), + vec![TypedKeyPair::new(ck, keypair2)], + node_info2.clone(), + tks.clone(), + sni.clone(), + ) + .unwrap(); + let _ = SignedRelayedNodeInfo::new( + crypto.clone(), + &mut tks2, + node_info2.clone(), + tks.clone(), + sni.clone(), + sni2.timestamp, + sni2.signatures.clone(), + ) + .unwrap(); + + assert_eq!(tks2.len(), oldtks2len); + assert_eq!(tks2.len(), sni2.signatures.len()); + + // Test incorrect validation + let keypair3 = vcrypto.generate_keypair(); + let mut tks3: TypedKeySet = TypedKey::new(ck, keypair3.key).into(); + let oldtks3len = tks3.len(); + + let _ = SignedRelayedNodeInfo::new( + crypto.clone(), + &mut tks3, + node_info2.clone(), + tks.clone(), + sni.clone(), + sni2.timestamp, + sni2.signatures.clone(), + ) + .unwrap_err(); + + assert_eq!(tks3.len(), oldtks3len); + assert_eq!(tks3.len(), sni2.signatures.len()); + + // Test unsupported cryptosystem validation + let fake_crypto_kind: CryptoKind = FourCC::from([0, 1, 2, 3]); + let mut tksfake3: TypedKeySet = + TypedKey::new(fake_crypto_kind, PublicKey::default()).into(); + let mut sigsfake3 = sni2.signatures.clone(); + sigsfake3.push(TypedSignature::new(fake_crypto_kind, Signature::default())); + tksfake3.add(TypedKey::new(ck, keypair2.key)); + let srnifake = SignedRelayedNodeInfo::new( + crypto.clone(), + &mut tksfake3, + node_info2.clone(), + tks.clone(), + sni.clone(), + sni2.timestamp, + sigsfake3.clone(), + ) + .unwrap(); + assert_eq!(tksfake3.len(), 1); + assert_eq!(srnifake.signatures.len(), sigsfake3.len()); + } api.shutdown().await; } diff --git a/veilid-core/src/tests/native/mod.rs b/veilid-core/src/tests/native/mod.rs index a05f1116..ccb7d420 100644 --- a/veilid-core/src/tests/native/mod.rs +++ b/veilid-core/src/tests/native/mod.rs @@ -12,7 +12,7 @@ pub async fn run_all_tests() { info!("TEST: test_host_interface"); test_host_interface::test_all().await; info!("TEST: test_dht_key"); - test_dht_key::test_all().await; + test_types::test_all().await; info!("TEST: test_veilid_core"); test_veilid_core::test_all().await; info!("TEST: test_veilid_config"); @@ -85,7 +85,7 @@ cfg_if! { fn run_test_dht_key() { setup(); block_on(async { - test_dht_key::test_all().await; + test_types::test_all().await; }); } diff --git a/veilid-core/src/veilid_api/api.rs b/veilid-core/src/veilid_api/api.rs index a9a572e9..ec50636e 100644 --- a/veilid-core/src/veilid_api/api.rs +++ b/veilid-core/src/veilid_api/api.rs @@ -116,7 +116,7 @@ impl VeilidAPI { //////////////////////////////////////////////////////////////// // Attach/Detach - // get a full copy of the current state + /// Get a full copy of the current state pub async fn get_state(&self) -> Result { let attachment_manager = self.attachment_manager()?; let network_manager = attachment_manager.network_manager(); @@ -133,9 +133,7 @@ impl VeilidAPI { }) } - // get network connectedness - - // connect to the network + /// Connect to the network #[instrument(level = "debug", err, skip_all)] pub async fn attach(&self) -> Result<(), VeilidAPIError> { let attachment_manager = self.attachment_manager()?; @@ -145,7 +143,7 @@ impl VeilidAPI { Ok(()) } - // disconnect from the network + /// Disconnect from the network #[instrument(level = "debug", err, skip_all)] pub async fn detach(&self) -> Result<(), VeilidAPIError> { let attachment_manager = self.attachment_manager()?; @@ -166,18 +164,27 @@ impl VeilidAPI { //////////////////////////////////////////////////////////////// // Private route allocation + /// Allocate a new private route set with default cryptography and network options + /// Returns a route id and a publishable 'blob' with the route encrypted with each crypto kind + /// Those nodes importing the blob will have their choice of which crypto kind to use #[instrument(level = "debug", skip(self))] - pub async fn new_private_route(&self) -> Result<(DHTKey, Vec), VeilidAPIError> { - self.new_custom_private_route(Stability::default(), Sequencing::default()) - .await + pub async fn new_private_route(&self) -> Result<(RouteId, Vec), VeilidAPIError> { + self.new_custom_private_route( + &VALID_CRYPTO_KINDS, + Stability::default(), + Sequencing::default(), + ) + .await } + /// #[instrument(level = "debug", skip(self))] pub async fn new_custom_private_route( &self, + crypto_kinds: &[CryptoKind], stability: Stability, sequencing: Sequencing, - ) -> Result<(DHTKey, Vec), VeilidAPIError> { + ) -> Result<(RouteId, Vec), VeilidAPIError> { let default_route_hop_count: usize = { let config = self.config()?; let c = config.get(); @@ -187,6 +194,7 @@ impl VeilidAPI { let rss = self.routing_table()?.route_spec_store(); let r = rss .allocate_route( + &crypto_kinds, stability, sequencing, default_route_hop_count, @@ -194,53 +202,48 @@ impl VeilidAPI { &[], ) .map_err(VeilidAPIError::internal)?; - let Some(pr_pubkey) = r else { + let Some(route_id) = r else { apibail_generic!("unable to allocate route"); }; if !rss - .test_route(&pr_pubkey) + .test_route(route_id.clone()) .await .map_err(VeilidAPIError::no_connection)? { - rss.release_route(&pr_pubkey); + rss.release_route(route_id); apibail_generic!("allocated route failed to test"); } - let private_route = rss - .assemble_private_route(&pr_pubkey, Some(true)) + let private_routes = rss + .assemble_private_routes(&route_id, Some(true)) .map_err(VeilidAPIError::generic)?; - let blob = match RouteSpecStore::private_route_to_blob(&private_route) { + let blob = match RouteSpecStore::private_routes_to_blob(&private_routes) { Ok(v) => v, Err(e) => { - rss.release_route(&pr_pubkey); + rss.release_route(route_id); apibail_internal!(e); } }; - rss.mark_route_published(&pr_pubkey, true) + rss.mark_route_published(&route_id, true) .map_err(VeilidAPIError::internal)?; - Ok((pr_pubkey, blob)) + Ok((route_id, blob)) } #[instrument(level = "debug", skip(self))] - pub fn import_remote_private_route(&self, blob: Vec) -> Result { + pub fn import_remote_private_route(&self, blob: Vec) -> Result { let rss = self.routing_table()?.route_spec_store(); rss.import_remote_private_route(blob) .map_err(|e| VeilidAPIError::invalid_argument(e, "blob", "private route blob")) } #[instrument(level = "debug", skip(self))] - pub fn release_private_route(&self, key: &DHTKey) -> Result<(), VeilidAPIError> { + pub fn release_private_route(&self, route_id: RouteId) -> Result<(), VeilidAPIError> { let rss = self.routing_table()?.route_spec_store(); - if rss.release_route(key) { - Ok(()) - } else { - Err(VeilidAPIError::invalid_argument( - "release_private_route", - "key", - key, - )) + if !rss.release_route(route_id) { + apibail_invalid_argument!("release_private_route", "key", route_id); } + Ok(()) } //////////////////////////////////////////////////////////////// diff --git a/veilid-core/src/veilid_api/debug.rs b/veilid-core/src/veilid_api/debug.rs index 986a3c5d..db5faee7 100644 --- a/veilid-core/src/veilid_api/debug.rs +++ b/veilid-core/src/veilid_api/debug.rs @@ -7,7 +7,7 @@ use routing_table::*; #[derive(Default, Debug)] struct DebugCache { - imported_routes: Vec, + imported_routes: Vec, } static DEBUG_CACHE: Mutex = Mutex::new(DebugCache { @@ -30,17 +30,23 @@ fn get_string(text: &str) -> Option { Some(text.to_owned()) } -fn get_route_id(rss: RouteSpecStore) -> impl Fn(&str) -> Option { +fn get_route_id(rss: RouteSpecStore, allow_remote: bool) -> impl Fn(&str) -> Option { return move |text: &str| { if text.is_empty() { return None; } - match DHTKey::try_decode(text).ok() { + match RouteId::from_str(text).ok() { Some(key) => { let routes = rss.list_allocated_routes(|k, _| Some(*k)); if routes.contains(&key) { return Some(key); } + if allow_remote { + let rroutes = rss.list_remote_routes(|k, _| Some(*k)); + if rroutes.contains(&key) { + return Some(key); + } + } } None => { let routes = rss.list_allocated_routes(|k, _| Some(*k)); @@ -50,6 +56,15 @@ fn get_route_id(rss: RouteSpecStore) -> impl Fn(&str) -> Option { return Some(r); } } + if allow_remote { + let routes = rss.list_remote_routes(|k, _| Some(*k)); + for r in routes { + let rkey = r.encode(); + if rkey.starts_with(text) { + return Some(r); + } + } + } } } None @@ -74,7 +89,7 @@ fn get_safety_selection(text: &str, routing_table: RoutingTable) -> Option impl FnOnce(&str) -> Option impl FnOnce(&str) -> Option impl FnOnce(&str) -> Option impl FnOnce(&str) -> Option Option { usize::from_str(text).ok() } -fn get_dht_key(text: &str) -> Option { - DHTKey::try_decode(text).ok() +fn get_typed_key(text: &str) -> Option { + TypedKey::from_str(text).ok() +} +fn get_public_key(text: &str) -> Option { + PublicKey::from_str(text).ok() } fn get_node_ref(routing_table: RoutingTable) -> impl FnOnce(&str) -> Option { @@ -198,8 +215,13 @@ fn get_node_ref(routing_table: RoutingTable) -> impl FnOnce(&str) -> Option Result { let args: Vec = args.split_whitespace().map(|s| s.to_owned()).collect(); + let routing_table = self.network_manager()?.routing_table(); - let node_id = get_debug_argument_at(&args, 0, "debug_entry", "node_id", get_dht_key)?; + let node_ref = get_debug_argument_at( + &args, + 0, + "debug_entry", + "node_id", + get_node_ref(routing_table), + )?; // Dump routing table entry let routing_table = self.network_manager()?.routing_table(); - Ok(routing_table.debug_info_entry(node_id)) + Ok(routing_table.debug_info_entry(node_ref)) } async fn debug_nodeinfo(&self, _args: String) -> Result { @@ -442,13 +471,14 @@ impl VeilidAPI { // Purge connection table let connection_manager = self.network_manager()?.connection_manager(); connection_manager.shutdown().await; - connection_manager.startup().await; // Eliminate last_connections from routing table entries self.network_manager()? .routing_table() .purge_last_connections(); + connection_manager.startup().await; + Ok("Connections purged".to_owned()) } else if args[0] == "routes" { // Purge route spec store @@ -607,8 +637,15 @@ impl VeilidAPI { } // Allocate route - let out = match rss.allocate_route(stability, sequencing, hop_count, directions, &[]) { - Ok(Some(v)) => format!("{}", v.encode()), + let out = match rss.allocate_route( + &VALID_CRYPTO_KINDS, + stability, + sequencing, + hop_count, + directions, + &[], + ) { + Ok(Some(v)) => format!("{}", v), Ok(None) => format!(""), Err(e) => { format!("Route allocation failed: {}", e) @@ -623,11 +660,27 @@ impl VeilidAPI { let routing_table = netman.routing_table(); let rss = routing_table.route_spec_store(); - let route_id = get_debug_argument_at(&args, 1, "debug_route", "route_id", get_dht_key)?; + let route_id = get_debug_argument_at( + &args, + 1, + "debug_route", + "route_id", + get_route_id(rss.clone(), true), + )?; // Release route - let out = match rss.release_route(&route_id) { - true => "Released".to_owned(), + let out = match rss.release_route(route_id) { + true => { + // release imported + let mut dc = DEBUG_CACHE.lock(); + for (n, ir) in dc.imported_routes.iter().enumerate() { + if *ir == route_id { + dc.imported_routes.remove(n); + break; + } + } + "Released".to_owned() + } false => "Route does not exist".to_owned(), }; @@ -639,7 +692,13 @@ impl VeilidAPI { let routing_table = netman.routing_table(); let rss = routing_table.route_spec_store(); - let route_id = get_debug_argument_at(&args, 1, "debug_route", "route_id", get_dht_key)?; + let route_id = get_debug_argument_at( + &args, + 1, + "debug_route", + "route_id", + get_route_id(rss.clone(), false), + )?; let full = { if args.len() > 2 { let full_val = get_debug_argument_at(&args, 2, "debug_route", "full", get_string)? @@ -655,13 +714,13 @@ impl VeilidAPI { }; // Publish route - let out = match rss.assemble_private_route(&route_id, Some(!full)) { - Ok(private_route) => { + let out = match rss.assemble_private_routes(&route_id, Some(!full)) { + Ok(private_routes) => { if let Err(e) = rss.mark_route_published(&route_id, true) { return Ok(format!("Couldn't mark route published: {}", e)); } // Convert to blob - let blob_data = RouteSpecStore::private_route_to_blob(&private_route) + let blob_data = RouteSpecStore::private_routes_to_blob(&private_routes) .map_err(VeilidAPIError::internal)?; let out = BASE64URL_NOPAD.encode(&blob_data); info!( @@ -685,7 +744,13 @@ impl VeilidAPI { let routing_table = netman.routing_table(); let rss = routing_table.route_spec_store(); - let route_id = get_debug_argument_at(&args, 1, "debug_route", "route_id", get_dht_key)?; + let route_id = get_debug_argument_at( + &args, + 1, + "debug_route", + "route_id", + get_route_id(rss.clone(), false), + )?; // Unpublish route let out = if let Err(e) = rss.mark_route_published(&route_id, false) { @@ -701,7 +766,13 @@ impl VeilidAPI { let routing_table = netman.routing_table(); let rss = routing_table.route_spec_store(); - let route_id = get_debug_argument_at(&args, 1, "debug_route", "route_id", get_dht_key)?; + let route_id = get_debug_argument_at( + &args, + 1, + "debug_route", + "route_id", + get_route_id(rss.clone(), true), + )?; match rss.debug_route(&route_id) { Some(s) => Ok(s), @@ -739,14 +810,14 @@ impl VeilidAPI { .decode(blob.as_bytes()) .map_err(VeilidAPIError::generic)?; let rss = self.routing_table()?.route_spec_store(); - let pr_pubkey = rss + let route_id = rss .import_remote_private_route(blob_dec) .map_err(VeilidAPIError::generic)?; let mut dc = DEBUG_CACHE.lock(); let n = dc.imported_routes.len(); - let out = format!("Private route #{} imported: {}", n, pr_pubkey); - dc.imported_routes.push(pr_pubkey); + let out = format!("Private route #{} imported: {}", n, route_id); + dc.imported_routes.push(route_id); return Ok(out); } @@ -757,10 +828,16 @@ impl VeilidAPI { let routing_table = netman.routing_table(); let rss = routing_table.route_spec_store(); - let route_id = get_debug_argument_at(&args, 1, "debug_route", "route_id", get_dht_key)?; + let route_id = get_debug_argument_at( + &args, + 1, + "debug_route", + "route_id", + get_route_id(rss.clone(), true), + )?; let success = rss - .test_route(&route_id) + .test_route(route_id) .await .map_err(VeilidAPIError::internal)?; diff --git a/veilid-core/src/veilid_api/error.rs b/veilid-core/src/veilid_api/error.rs index aece21d4..7e04b947 100644 --- a/veilid-core/src/veilid_api/error.rs +++ b/veilid-core/src/veilid_api/error.rs @@ -66,9 +66,17 @@ macro_rules! apibail_no_connection { #[allow(unused_macros)] #[macro_export] -macro_rules! apibail_key_not_found { +macro_rules! apibail_invalid_target { + () => { + return Err(VeilidAPIError::invalid_target()) + }; +} + +#[allow(unused_macros)] +#[macro_export] +macro_rules! apibail_route_not_found { ($x:expr) => { - return Err(VeilidAPIError::key_not_found($x)) + return Err(VeilidAPIError::route_not_found($x)) }; } @@ -107,12 +115,12 @@ pub enum VeilidAPIError { TryAgain, #[error("Shutdown")] Shutdown, - #[error("Key not found: {key}")] - KeyNotFound { key: DHTKey }, + #[error("Invalid target")] + InvalidTarget, #[error("No connection: {message}")] NoConnection { message: String }, #[error("No peer info: {node_id}")] - NoPeerInfo { node_id: NodeId }, + NoPeerInfo { node_id: TypedKey }, #[error("Internal: {message}")] Internal { message: String }, #[error("Unimplemented: {message}")] @@ -147,15 +155,15 @@ impl VeilidAPIError { pub fn shutdown() -> Self { Self::Shutdown } - pub fn key_not_found(key: DHTKey) -> Self { - Self::KeyNotFound { key } + pub fn invalid_target() -> Self { + Self::InvalidTarget } pub fn no_connection(msg: T) -> Self { Self::NoConnection { message: msg.to_string(), } } - pub fn no_peer_info(node_id: NodeId) -> Self { + pub fn no_peer_info(node_id: TypedKey) -> Self { Self::NoPeerInfo { node_id } } pub fn internal(msg: T) -> Self { diff --git a/veilid-core/src/veilid_api/mod.rs b/veilid-core/src/veilid_api/mod.rs index a63adbb2..02e1e308 100644 --- a/veilid-core/src/veilid_api/mod.rs +++ b/veilid-core/src/veilid_api/mod.rs @@ -19,8 +19,7 @@ pub use types::*; pub use alloc::string::ToString; pub use attachment_manager::AttachmentManager; pub use core::str::FromStr; -pub use crypto::Crypto; -pub use crypto::{generate_secret, sign, verify, DHTKey, DHTKeySecret, DHTSignature, Nonce}; +pub use crypto::*; pub use intf::BlockStore; pub use intf::ProtectedStore; pub use intf::{TableDB, TableDBTransaction, TableStore}; diff --git a/veilid-core/src/veilid_api/routing_context.rs b/veilid-core/src/veilid_api/routing_context.rs index 3cffd867..981fb197 100644 --- a/veilid-core/src/veilid_api/routing_context.rs +++ b/veilid-core/src/veilid_api/routing_context.rs @@ -4,8 +4,8 @@ use super::*; #[derive(Clone, Debug)] pub enum Target { - NodeId(NodeId), - PrivateRoute(DHTKey), + NodeId(PublicKey), // Node by any of its public keys + PrivateRoute(RouteId), // Remote private route by its id } pub struct RoutingContextInner {} @@ -66,7 +66,7 @@ impl RoutingContext { }), }) } - + pub fn with_sequencing(self, sequencing: Sequencing) -> Self { Self { api: self.api.clone(), @@ -105,9 +105,9 @@ impl RoutingContext { match target { Target::NodeId(node_id) => { // Resolve node - let mut nr = match rpc_processor.resolve_node(node_id.key).await { + let mut nr = match rpc_processor.resolve_node(node_id).await { Ok(Some(nr)) => nr, - Ok(None) => apibail_key_not_found!(node_id.key), + Ok(None) => apibail_invalid_target!(), Err(e) => return Err(e.into()), }; // Apply sequencing to match safety selection @@ -118,14 +118,13 @@ impl RoutingContext { safety_selection: self.unlocked_inner.safety_selection, }) } - Target::PrivateRoute(pr) => { + Target::PrivateRoute(rsid) => { // Get remote private route let rss = self.api.routing_table()?.route_spec_store(); - let Some(private_route) = rss - .get_remote_private_route(&pr) - else { - apibail_key_not_found!(pr); - }; + + let Some(private_route) = rss.best_remote_private_route(&rsid) else { + apibail_invalid_target!(); + }; Ok(rpc_processor::Destination::PrivateRoute { private_route, @@ -198,38 +197,49 @@ impl RoutingContext { /////////////////////////////////// /// DHT Values - pub async fn get_value(&self, _value_key: ValueKey) -> Result, VeilidAPIError> { + pub async fn get_value( + &self, + _key: TypedKey, + _subkey: ValueSubkey, + ) -> Result { panic!("unimplemented"); } pub async fn set_value( &self, - _value_key: ValueKey, - _value: Vec, + _key: TypedKey, + _subkey: ValueSubkey, + _value: ValueData, ) -> Result { panic!("unimplemented"); } pub async fn watch_value( &self, - _value_key: ValueKey, - _callback: ValueChangeCallback, + _key: TypedKey, + _subkeys: &[ValueSubkeyRange], + _expiration: Timestamp, + _count: u32, ) -> Result { panic!("unimplemented"); } - pub async fn cancel_watch_value(&self, _value_key: ValueKey) -> Result { + pub async fn cancel_watch_value( + &self, + _key: TypedKey, + _subkeys: &[ValueSubkeyRange], + ) -> Result { panic!("unimplemented"); } /////////////////////////////////// /// Block Store - pub async fn find_block(&self, _block_id: BlockId) -> Result, VeilidAPIError> { + pub async fn find_block(&self, _block_id: PublicKey) -> Result, VeilidAPIError> { panic!("unimplemented"); } - pub async fn supply_block(&self, _block_id: BlockId) -> Result { + pub async fn supply_block(&self, _block_id: PublicKey) -> Result { panic!("unimplemented"); } } diff --git a/veilid-core/src/veilid_api/types.rs b/veilid-core/src/veilid_api/types.rs index 72f9ba7d..57bfd6d1 100644 --- a/veilid-core/src/veilid_api/types.rs +++ b/veilid-core/src/veilid_api/types.rs @@ -15,7 +15,67 @@ pub type OperationId = AlignedU64; pub type ByteCount = AlignedU64; /// Tunnel identifier pub type TunnelId = AlignedU64; +/// Value schema +pub type ValueSchema = FourCC; +/// Value subkey +pub type ValueSubkey = u32; +/// Value subkey range +pub type ValueSubkeyRange = (u32, u32); +/// Value sequence number +pub type ValueSeqNum = u32; +/// FOURCC code +#[derive( + Copy, + Default, + Clone, + Hash, + PartialOrd, + Ord, + PartialEq, + Eq, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes, PartialOrd, Ord, PartialEq, Eq, Hash))] +pub struct FourCC(pub [u8; 4]); + +impl From<[u8; 4]> for FourCC { + fn from(b: [u8; 4]) -> Self { + Self(b) + } +} +impl TryFrom<&[u8]> for FourCC { + type Error = VeilidAPIError; + fn try_from(b: &[u8]) -> Result { + Ok(Self(b.try_into().map_err(VeilidAPIError::generic)?)) + } +} + +impl fmt::Display for FourCC { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + write!(f, "{}", String::from_utf8_lossy(&self.0)) + } +} +impl fmt::Debug for FourCC { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + write!(f, "{}", String::from_utf8_lossy(&self.0)) + } +} + +impl FromStr for FourCC { + type Err = VeilidAPIError; + fn from_str(s: &str) -> Result { + Ok(Self( + s.as_bytes().try_into().map_err(VeilidAPIError::generic)?, + )) + } +} + +/// Log level for VeilidCore #[derive( Debug, Clone, @@ -91,6 +151,7 @@ impl fmt::Display for VeilidLogLevel { } } +/// A VeilidCore log message with optional backtrace #[derive( Debug, Clone, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize, )] @@ -101,6 +162,7 @@ pub struct VeilidLog { pub backtrace: Option, } +/// Direct statement blob passed to hosting application for processing #[derive( Debug, Clone, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize, )] @@ -108,12 +170,13 @@ pub struct VeilidLog { pub struct VeilidAppMessage { /// Some(sender) if the message was sent directly, None if received via a private/safety route #[serde(with = "opt_json_as_string")] - pub sender: Option, + pub sender: Option, /// The content of the message to deliver to the application #[serde(with = "json_as_base64")] pub message: Vec, } +/// Direct question blob passed to hosting application for processing to send an eventual AppReply #[derive( Debug, Clone, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize, )] @@ -121,7 +184,7 @@ pub struct VeilidAppMessage { pub struct VeilidAppCall { /// Some(sender) if the request was sent directly, None if received via a private/safety route #[serde(with = "opt_json_as_string")] - pub sender: Option, + pub sender: Option, /// The content of the request to deliver to the application #[serde(with = "json_as_base64")] pub message: Vec, @@ -130,6 +193,7 @@ pub struct VeilidAppCall { pub id: OperationId, } +/// Attachment abstraction for network 'signal strength' #[derive( Debug, PartialEq, @@ -203,7 +267,7 @@ pub struct VeilidStateAttachment { )] #[archive_attr(repr(C), derive(CheckBytes))] pub struct PeerTableData { - pub node_id: DHTKey, + pub node_ids: TypedKeySet, pub peer_address: PeerAddress, pub peer_stats: PeerStats, } @@ -226,8 +290,8 @@ pub struct VeilidStateNetwork { )] #[archive_attr(repr(C), derive(CheckBytes))] pub struct VeilidStateRoute { - pub dead_routes: Vec, - pub dead_remote_routes: Vec, + pub dead_routes: Vec, + pub dead_remote_routes: Vec, } #[derive( @@ -238,6 +302,17 @@ pub struct VeilidStateConfig { pub config: VeilidConfigInner, } +#[derive( + Debug, Clone, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct VeilidValueChange { + key: TypedKey, + subkeys: Vec, + count: u32, + value: ValueData, +} + #[derive(Debug, Clone, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize)] #[archive_attr(repr(u8), derive(CheckBytes))] #[serde(tag = "kind")] @@ -249,6 +324,7 @@ pub enum VeilidUpdate { Network(VeilidStateNetwork), Config(VeilidStateConfig), Route(VeilidStateRoute), + ValueChange(VeilidValueChange), Shutdown, } @@ -262,77 +338,6 @@ pub struct VeilidState { ///////////////////////////////////////////////////////////////////////////////////////////////////// /// -#[derive( - Clone, - Debug, - Default, - PartialOrd, - PartialEq, - Eq, - Ord, - Serialize, - Deserialize, - RkyvArchive, - RkyvSerialize, - RkyvDeserialize, -)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct NodeId { - pub key: DHTKey, -} -impl NodeId { - pub fn new(key: DHTKey) -> Self { - Self { key } - } -} -impl fmt::Display for NodeId { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { - write!(f, "{}", self.key.encode()) - } -} -impl FromStr for NodeId { - type Err = VeilidAPIError; - fn from_str(s: &str) -> Result { - Ok(Self { - key: DHTKey::try_decode(s)?, - }) - } -} - -#[derive( - Clone, - Debug, - Default, - PartialOrd, - PartialEq, - Eq, - Ord, - Serialize, - Deserialize, - RkyvArchive, - RkyvSerialize, - RkyvDeserialize, -)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct ValueKey { - pub key: DHTKey, - pub subkey: Option, -} -impl ValueKey { - pub fn new(key: DHTKey) -> Self { - Self { key, subkey: None } - } - pub fn new_subkey(key: DHTKey, subkey: String) -> Self { - Self { - key, - subkey: if subkey.is_empty() { - None - } else { - Some(subkey) - }, - } - } -} #[derive( Clone, @@ -350,15 +355,20 @@ impl ValueKey { )] #[archive_attr(repr(C), derive(CheckBytes))] pub struct ValueData { + pub seq: ValueSeqNum, + pub schema: ValueSchema, pub data: Vec, - pub seq: u32, } impl ValueData { - pub fn new(data: Vec) -> Self { - Self { data, seq: 0 } + pub fn new(schema: ValueSchema, data: Vec) -> Self { + Self { + seq: 0, + schema, + data, + } } - pub fn new_with_seq(data: Vec, seq: u32) -> Self { - Self { data, seq } + pub fn new_with_seq(seq: ValueSeqNum, schema: ValueSchema, data: Vec) -> Self { + Self { seq, schema, data } } pub fn change(&mut self, data: Vec) { self.data = data; @@ -366,30 +376,6 @@ impl ValueData { } } -#[derive( - Clone, - Debug, - Default, - PartialOrd, - PartialEq, - Eq, - Ord, - Serialize, - Deserialize, - RkyvArchive, - RkyvSerialize, - RkyvDeserialize, -)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct BlockId { - pub key: DHTKey, -} -impl BlockId { - pub fn new(key: DHTKey) -> Self { - Self { key } - } -} - ///////////////////////////////////////////////////////////////////////////////////////////////////// // Keep member order appropriate for sorting < preference @@ -549,8 +535,8 @@ impl SafetySelection { )] #[archive_attr(repr(C), derive(CheckBytes))] pub struct SafetySpec { - /// preferred safety route if it still exists - pub preferred_route: Option, + /// preferred safety route set id if it still exists + pub preferred_route: Option, /// must be greater than 0 pub hop_count: usize, /// prefer reliability over speed @@ -713,8 +699,8 @@ pub struct NodeInfo { pub outbound_protocols: ProtocolTypeSet, #[with(RkyvEnumSet)] pub address_types: AddressTypeSet, - pub min_version: u8, - pub max_version: u8, + pub envelope_support: Vec, + pub crypto_support: Vec, pub dial_info_detail_list: Vec, } @@ -1879,43 +1865,57 @@ impl MatchesDialInfoFilter for DialInfo { ////////////////////////////////////////////////////////////////////////// -// Signed NodeInfo that can be passed around amongst peers and verifiable +/// Signed NodeInfo that can be passed around amongst peers and verifiable #[derive(Clone, Debug, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize)] #[archive_attr(repr(C), derive(CheckBytes))] pub struct SignedDirectNodeInfo { pub node_info: NodeInfo, pub timestamp: Timestamp, - pub signature: Option, + pub signatures: Vec, } - impl SignedDirectNodeInfo { + /// Returns a new SignedDirectNodeInfo that has its signatures validated. + /// On success, this will modify the node_ids set to only include node_ids whose signatures validate. + /// All signatures are stored however, as this can be passed to other nodes that may be able to validate those signatures. pub fn new( - node_id: NodeId, + crypto: Crypto, + node_ids: &mut TypedKeySet, node_info: NodeInfo, timestamp: Timestamp, - signature: DHTSignature, + typed_signatures: Vec, ) -> Result { let node_info_bytes = Self::make_signature_bytes(&node_info, timestamp)?; - verify(&node_id.key, &node_info_bytes, &signature)?; + + // Verify the signatures that we can + let validated_node_ids = + crypto.verify_signatures(node_ids, &node_info_bytes, &typed_signatures)?; + *node_ids = validated_node_ids; + if node_ids.len() == 0 { + apibail_generic!("no valid node ids in direct node info"); + } + Ok(Self { node_info, timestamp, - signature: Some(signature), + signatures: typed_signatures, }) } - pub fn with_secret( - node_id: NodeId, + pub fn make_signatures( + crypto: Crypto, + typed_key_pairs: Vec, node_info: NodeInfo, - secret: &DHTKeySecret, ) -> Result { let timestamp = get_aligned_timestamp(); let node_info_bytes = Self::make_signature_bytes(&node_info, timestamp)?; - let signature = sign(&node_id.key, secret, &node_info_bytes)?; + let typed_signatures = + crypto.generate_signatures(&node_info_bytes, &typed_key_pairs, |kp, s| { + TypedSignature::new(kp.kind, s) + })?; Ok(Self { node_info, timestamp, - signature: Some(signature), + signatures: typed_signatures, }) } @@ -1940,13 +1940,13 @@ impl SignedDirectNodeInfo { pub fn with_no_signature(node_info: NodeInfo) -> Self { Self { node_info, - signature: None, timestamp: get_aligned_timestamp(), + signatures: Vec::new(), } } - pub fn has_valid_signature(&self) -> bool { - self.signature.is_some() + pub fn has_any_signature(&self) -> bool { + !self.signatures.is_empty() } } @@ -1955,56 +1955,69 @@ impl SignedDirectNodeInfo { #[archive_attr(repr(C), derive(CheckBytes))] pub struct SignedRelayedNodeInfo { pub node_info: NodeInfo, - pub relay_id: NodeId, + pub relay_ids: TypedKeySet, pub relay_info: SignedDirectNodeInfo, pub timestamp: Timestamp, - pub signature: DHTSignature, + pub signatures: Vec, } impl SignedRelayedNodeInfo { + /// Returns a new SignedRelayedNodeInfo that has its signatures validated. + /// On success, this will modify the node_ids set to only include node_ids whose signatures validate. + /// All signatures are stored however, as this can be passed to other nodes that may be able to validate those signatures. pub fn new( - node_id: NodeId, + crypto: Crypto, + node_ids: &mut TypedKeySet, node_info: NodeInfo, - relay_id: NodeId, + relay_ids: TypedKeySet, relay_info: SignedDirectNodeInfo, timestamp: Timestamp, - signature: DHTSignature, + typed_signatures: Vec, ) -> Result { let node_info_bytes = - Self::make_signature_bytes(&node_info, &relay_id, &relay_info, timestamp)?; - verify(&node_id.key, &node_info_bytes, &signature)?; + Self::make_signature_bytes(&node_info, &relay_ids, &relay_info, timestamp)?; + let validated_node_ids = + crypto.verify_signatures(node_ids, &node_info_bytes, &typed_signatures)?; + *node_ids = validated_node_ids; + if node_ids.len() == 0 { + apibail_generic!("no valid node ids in relayed node info"); + } + Ok(Self { node_info, - relay_id, + relay_ids, relay_info, - signature, timestamp, + signatures: typed_signatures, }) } - pub fn with_secret( - node_id: NodeId, + pub fn make_signatures( + crypto: Crypto, + typed_key_pairs: Vec, node_info: NodeInfo, - relay_id: NodeId, + relay_ids: TypedKeySet, relay_info: SignedDirectNodeInfo, - secret: &DHTKeySecret, ) -> Result { let timestamp = get_aligned_timestamp(); let node_info_bytes = - Self::make_signature_bytes(&node_info, &relay_id, &relay_info, timestamp)?; - let signature = sign(&node_id.key, secret, &node_info_bytes)?; + Self::make_signature_bytes(&node_info, &relay_ids, &relay_info, timestamp)?; + let typed_signatures = + crypto.generate_signatures(&node_info_bytes, &typed_key_pairs, |kp, s| { + TypedSignature::new(kp.kind, s) + })?; Ok(Self { node_info, - relay_id, + relay_ids, relay_info, - signature, timestamp, + signatures: typed_signatures, }) } fn make_signature_bytes( node_info: &NodeInfo, - relay_id: &NodeId, + relay_ids: &[TypedKey], relay_info: &SignedDirectNodeInfo, timestamp: Timestamp, ) -> Result, VeilidAPIError> { @@ -2016,11 +2029,13 @@ impl SignedRelayedNodeInfo { encode_node_info(node_info, &mut ni_builder).map_err(VeilidAPIError::internal)?; sig_bytes.append(&mut builder_to_vec(ni_msg).map_err(VeilidAPIError::internal)?); - // Add relay id to signature - let mut rid_msg = ::capnp::message::Builder::new_default(); - let mut rid_builder = rid_msg.init_root::(); - encode_dht_key(&relay_id.key, &mut rid_builder).map_err(VeilidAPIError::internal)?; - sig_bytes.append(&mut builder_to_vec(rid_msg).map_err(VeilidAPIError::internal)?); + // Add relay ids to signature + for relay_id in relay_ids { + let mut rid_msg = ::capnp::message::Builder::new_default(); + let mut rid_builder = rid_msg.init_root::(); + encode_typed_key(relay_id, &mut rid_builder); + sig_bytes.append(&mut builder_to_vec(rid_msg).map_err(VeilidAPIError::internal)?); + } // Add relay info to signature let mut ri_msg = ::capnp::message::Builder::new_default(); @@ -2034,6 +2049,10 @@ impl SignedRelayedNodeInfo { Ok(sig_bytes) } + + pub fn has_any_signature(&self) -> bool { + !self.signatures.is_empty() + } } #[derive(Clone, Debug, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize)] @@ -2044,10 +2063,10 @@ pub enum SignedNodeInfo { } impl SignedNodeInfo { - pub fn has_valid_signature(&self) -> bool { + pub fn has_any_signature(&self) -> bool { match self { - SignedNodeInfo::Direct(d) => d.has_valid_signature(), - SignedNodeInfo::Relayed(_) => true, + SignedNodeInfo::Direct(d) => d.has_any_signature(), + SignedNodeInfo::Relayed(r) => r.has_any_signature(), } } @@ -2063,10 +2082,10 @@ impl SignedNodeInfo { SignedNodeInfo::Relayed(r) => &r.node_info, } } - pub fn relay_id(&self) -> Option { + pub fn relay_ids(&self) -> TypedKeySet { match self { - SignedNodeInfo::Direct(_) => None, - SignedNodeInfo::Relayed(r) => Some(r.relay_id.clone()), + SignedNodeInfo::Direct(_) => TypedKeySet::new(), + SignedNodeInfo::Relayed(r) => r.relay_ids.clone(), } } pub fn relay_info(&self) -> Option<&NodeInfo> { @@ -2079,7 +2098,7 @@ impl SignedNodeInfo { match self { SignedNodeInfo::Direct(_) => None, SignedNodeInfo::Relayed(r) => Some(PeerInfo::new( - r.relay_id.clone(), + r.relay_ids.clone(), SignedNodeInfo::Direct(r.relay_info.clone()), )), } @@ -2127,14 +2146,15 @@ impl SignedNodeInfo { #[derive(Clone, Debug, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize)] #[archive_attr(repr(C), derive(CheckBytes))] pub struct PeerInfo { - pub node_id: NodeId, + pub node_ids: TypedKeySet, pub signed_node_info: SignedNodeInfo, } impl PeerInfo { - pub fn new(node_id: NodeId, signed_node_info: SignedNodeInfo) -> Self { + pub fn new(node_ids: TypedKeySet, signed_node_info: SignedNodeInfo) -> Self { + assert!(node_ids.len() > 0 && node_ids.len() <= MAX_CRYPTO_KINDS); Self { - node_id, + node_ids, signed_node_info, } } @@ -2380,9 +2400,6 @@ pub struct PeerStats { pub transfer: TransferStatsDownUp, // Stats for communications with the peer } -pub type ValueChangeCallback = - Arc) -> SendPinBoxFuture<()> + Send + Sync + 'static>; - ///////////////////////////////////////////////////////////////////////////////////////////////////// #[derive(Clone, Debug, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize)] diff --git a/veilid-core/src/veilid_config.rs b/veilid-core/src/veilid_config.rs index 8b207370..b4e9aed5 100644 --- a/veilid-core/src/veilid_config.rs +++ b/veilid-core/src/veilid_config.rs @@ -331,6 +331,9 @@ pub struct VeilidConfigRPC { RkyvDeserialize, )] pub struct VeilidConfigRoutingTable { + pub node_id: TypedKeySet, + pub node_id_secret: TypedSecretSet, + pub bootstrap: Vec, pub limit_over_attached: u32, pub limit_fully_attached: u32, pub limit_attached_strong: u32, @@ -362,10 +365,6 @@ pub struct VeilidConfigNetwork { pub client_whitelist_timeout_ms: u32, pub reverse_connection_receipt_time_ms: u32, pub hole_punch_receipt_time_ms: u32, - pub node_id: Option, - pub node_id_secret: Option, - pub bootstrap: Vec, - pub bootstrap_nodes: Vec, pub routing_table: VeilidConfigRoutingTable, pub rpc: VeilidConfigRPC, pub dht: VeilidConfigDHT, @@ -596,21 +595,26 @@ impl VeilidConfig { cb: ConfigCallback, update_cb: UpdateCallback, ) -> Result<(), VeilidAPIError> { - macro_rules! get_config { - ($key:expr) => { - let keyname = &stringify!($key)[6..]; - let v = cb(keyname.to_owned())?; - $key = match v.downcast() { - Ok(v) => *v, - Err(_) => { - apibail_generic!(format!("incorrect type for key {}", keyname)) - } - }; - }; - } - self.update_cb = Some(update_cb); self.with_mut(|inner| { + // Simple config transformation + macro_rules! get_config { + ($key:expr) => { + let keyname = &stringify!($key)[6..]; + let v = cb(keyname.to_owned())?; + $key = match v.downcast() { + Ok(v) => *v, + Err(e) => { + apibail_generic!(format!( + "incorrect type for key {}: {:?}", + keyname, + type_name_of_val(&*e) + )) + } + }; + }; + } + get_config!(inner.program_name); get_config!(inner.namespace); get_config!(inner.capabilities.protocol_udp); @@ -628,8 +632,6 @@ impl VeilidConfig { get_config!(inner.protected_store.always_use_insecure_storage); get_config!(inner.protected_store.insecure_fallback_directory); get_config!(inner.protected_store.delete); - get_config!(inner.network.node_id); - get_config!(inner.network.node_id_secret); get_config!(inner.network.connection_initial_timeout_ms); get_config!(inner.network.connection_inactivity_timeout_ms); get_config!(inner.network.max_connections_per_ip4); @@ -639,8 +641,9 @@ impl VeilidConfig { get_config!(inner.network.client_whitelist_timeout_ms); get_config!(inner.network.reverse_connection_receipt_time_ms); get_config!(inner.network.hole_punch_receipt_time_ms); - get_config!(inner.network.bootstrap); - get_config!(inner.network.bootstrap_nodes); + get_config!(inner.network.routing_table.node_id); + get_config!(inner.network.routing_table.node_id_secret); + get_config!(inner.network.routing_table.bootstrap); get_config!(inner.network.routing_table.limit_over_attached); get_config!(inner.network.routing_table.limit_fully_attached); get_config!(inner.network.routing_table.limit_attached_strong); @@ -716,11 +719,20 @@ impl VeilidConfig { self.inner.read() } + fn safe_config(&self) -> VeilidConfigInner { + let mut safe_cfg = self.inner.read().clone(); + + // Remove secrets + safe_cfg.network.routing_table.node_id_secret = TypedSecretSet::new(); + + safe_cfg + } + pub fn with_mut(&self, f: F) -> Result where F: FnOnce(&mut VeilidConfigInner) -> Result, { - let (out, config) = { + let out = { let inner = &mut *self.inner.write(); // Edit a copy let mut editedinner = inner.clone(); @@ -730,12 +742,13 @@ impl VeilidConfig { Self::validate(&mut editedinner)?; // Commit changes *inner = editedinner.clone(); - (out, editedinner) + out }; // Send configuration update to clients if let Some(update_cb) = &self.update_cb { - update_cb(VeilidUpdate::Config(VeilidStateConfig { config })); + let safe_cfg = self.safe_config(); + update_cb(VeilidUpdate::Config(VeilidStateConfig { config: safe_cfg })); } Ok(out) @@ -908,76 +921,115 @@ impl VeilidConfig { // Get the node id from config if one is specified // Must be done -after- protected store startup - pub async fn init_node_id( + pub async fn init_node_ids( &self, + crypto: Crypto, protected_store: intf::ProtectedStore, ) -> Result<(), VeilidAPIError> { - let mut node_id = self.inner.read().network.node_id; - let mut node_id_secret = self.inner.read().network.node_id_secret; - // See if node id was previously stored in the protected store - if node_id.is_none() { - debug!("pulling node id from storage"); - if let Some(s) = protected_store - .load_user_secret_string("node_id") - .await - .map_err(VeilidAPIError::internal)? - { - debug!("node id found in storage"); - node_id = Some(DHTKey::try_decode(s.as_str()).map_err(VeilidAPIError::internal)?); - } else { - debug!("node id not found in storage"); - } - } + let mut out_node_id = TypedKeySet::new(); + let mut out_node_id_secret = TypedSecretSet::new(); - // See if node id secret was previously stored in the protected store - if node_id_secret.is_none() { - debug!("pulling node id secret from storage"); - if let Some(s) = protected_store - .load_user_secret_string("node_id_secret") - .await - .map_err(VeilidAPIError::internal)? - { - debug!("node id secret found in storage"); - node_id_secret = - Some(DHTKeySecret::try_decode(s.as_str()).map_err(VeilidAPIError::internal)?); - } else { - debug!("node id secret not found in storage"); - } - } + for ck in VALID_CRYPTO_KINDS { + let vcrypto = crypto + .get(ck) + .expect("Valid crypto kind is not actually valid."); - // If we have a node id from storage, check it - let (node_id, node_id_secret) = - if let (Some(node_id), Some(node_id_secret)) = (node_id, node_id_secret) { - // Validate node id - if !crypto::validate_key(&node_id, &node_id_secret) { - apibail_generic!("node id secret and node id key don't match"); + let mut node_id = self.inner.read().network.routing_table.node_id.get(ck); + let mut node_id_secret = self + .inner + .read() + .network + .routing_table + .node_id_secret + .get(ck); + + // See if node id was previously stored in the protected store + if node_id.is_none() { + debug!("pulling node_id_{} from storage", ck); + if let Some(s) = protected_store + .load_user_secret_string(format!("node_id_{}", ck)) + .await + .map_err(VeilidAPIError::internal)? + { + debug!("node_id_{} found in storage", ck); + node_id = match TypedKey::from_str(s.as_str()) { + Ok(v) => Some(v), + Err(_) => { + debug!("node id in protected store is not valid"); + None + } + } + } else { + debug!("node_id_{} not found in storage", ck); } - (node_id, node_id_secret) - } else { - // If we still don't have a valid node id, generate one - debug!("generating new node id"); - generate_secret() - }; - info!("Node Id is {}", node_id.encode()); - // info!("Node Id Secret is {}", node_id_secret.encode()); + } - // Save the node id / secret in storage - protected_store - .save_user_secret_string("node_id", node_id.encode().as_str()) - .await - .map_err(VeilidAPIError::internal)?; - protected_store - .save_user_secret_string("node_id_secret", node_id_secret.encode().as_str()) - .await - .map_err(VeilidAPIError::internal)?; + // See if node id secret was previously stored in the protected store + if node_id_secret.is_none() { + debug!("pulling node id secret from storage"); + if let Some(s) = protected_store + .load_user_secret_string(format!("node_id_secret_{}", ck)) + .await + .map_err(VeilidAPIError::internal)? + { + debug!("node_id_secret_{} found in storage", ck); + node_id_secret = match TypedSecret::from_str(s.as_str()) { + Ok(v) => Some(v), + Err(_) => { + debug!("node id secret in protected store is not valid"); + None + } + } + } else { + debug!("node_id_secret_{} not found in storage", ck); + } + } + // If we have a node id from storage, check it + let (node_id, node_id_secret) = + if let (Some(node_id), Some(node_id_secret)) = (node_id, node_id_secret) { + // Validate node id + if !vcrypto.validate_keypair(&node_id.value, &node_id_secret.value) { + apibail_generic!(format!( + "node_id_secret_{} and node_id_key_{} don't match", + ck, ck + )); + } + (node_id, node_id_secret) + } else { + // If we still don't have a valid node id, generate one + debug!("generating new node_id_{}", ck); + let kp = vcrypto.generate_keypair(); + (TypedKey::new(ck, kp.key), TypedSecret::new(ck, kp.secret)) + }; + info!("Node Id: {}", node_id); + + // Save the node id / secret in storage + protected_store + .save_user_secret_string(format!("node_id_{}", ck), node_id.to_string()) + .await + .map_err(VeilidAPIError::internal)?; + protected_store + .save_user_secret_string( + format!("node_id_secret_{}", ck), + node_id_secret.to_string(), + ) + .await + .map_err(VeilidAPIError::internal)?; + + // Save for config + out_node_id.add(node_id); + out_node_id_secret.add(node_id_secret); + } + + // Commit back to config self.with_mut(|c| { - c.network.node_id = Some(node_id); - c.network.node_id_secret = Some(node_id_secret); + c.network.routing_table.node_id = out_node_id; + c.network.routing_table.node_id_secret = out_node_id_secret; Ok(()) })?; - trace!("init_node_id complete"); + trace!("init_node_ids complete"); Ok(()) } diff --git a/veilid-flutter/example/lib/app.dart b/veilid-flutter/example/lib/app.dart index 4cb9dfbc..00e6600a 100644 --- a/veilid-flutter/example/lib/app.dart +++ b/veilid-flutter/example/lib/app.dart @@ -118,15 +118,18 @@ class _MyAppState extends State with UiLoggy { }); await Veilid.instance.attach(); } else if (!startup && _startedUp) { - await Veilid.instance.shutdownVeilidCore(); - if (_updateProcessor != null) { - await _updateProcessor; + try { + await Veilid.instance.shutdownVeilidCore(); + if (_updateProcessor != null) { + await _updateProcessor; + } + } finally { + setState(() { + _updateProcessor = null; + _updateStream = null; + _startedUp = false; + }); } - setState(() { - _updateProcessor = null; - _updateStream = null; - _startedUp = false; - }); } } diff --git a/veilid-flutter/lib/default_config.dart b/veilid-flutter/lib/default_config.dart index 51800a03..90a5f245 100644 --- a/veilid-flutter/lib/default_config.dart +++ b/veilid-flutter/lib/default_config.dart @@ -46,13 +46,12 @@ Future getDefaultVeilidConfig(String programName) async { clientWhitelistTimeoutMs: 300000, reverseConnectionReceiptTimeMs: 5000, holePunchReceiptTimeMs: 5000, - nodeId: null, - nodeIdSecret: null, - bootstrap: kIsWeb - ? ["ws://bootstrap.dev.veilid.net:5150/ws"] - : ["bootstrap.dev.veilid.net"], - bootstrapNodes: [], routingTable: VeilidConfigRoutingTable( + nodeId: [], + nodeIdSecret: [], + bootstrap: kIsWeb + ? ["ws://bootstrap.dev.veilid.net:5150/ws"] + : ["bootstrap.dev.veilid.net"], limitOverAttached: 64, limitFullyAttached: 32, limitAttachedStrong: 16, diff --git a/veilid-flutter/lib/veilid.dart b/veilid-flutter/lib/veilid.dart index 3a8baea4..8d9a8c90 100644 --- a/veilid-flutter/lib/veilid.dart +++ b/veilid-flutter/lib/veilid.dart @@ -697,6 +697,9 @@ class VeilidConfigRPC { //////////// class VeilidConfigRoutingTable { + List nodeId; + List nodeIdSecret; + List bootstrap; int limitOverAttached; int limitFullyAttached; int limitAttachedStrong; @@ -704,6 +707,9 @@ class VeilidConfigRoutingTable { int limitAttachedWeak; VeilidConfigRoutingTable({ + required this.nodeId, + required this.nodeIdSecret, + required this.bootstrap, required this.limitOverAttached, required this.limitFullyAttached, required this.limitAttachedStrong, @@ -713,6 +719,9 @@ class VeilidConfigRoutingTable { Map get json { return { + 'node_id': nodeId.map((p) => p).toList(), + 'node_id_secret': nodeIdSecret.map((p) => p).toList(), + 'bootstrap': bootstrap.map((p) => p).toList(), 'limit_over_attached': limitOverAttached, 'limit_fully_attached': limitFullyAttached, 'limit_attached_strong': limitAttachedStrong, @@ -722,7 +731,10 @@ class VeilidConfigRoutingTable { } VeilidConfigRoutingTable.fromJson(dynamic json) - : limitOverAttached = json['limit_over_attached'], + : nodeId = List.from(json['node_id'].map((j) => j)), + nodeIdSecret = List.from(json['node_id_secret'].map((j) => j)), + bootstrap = List.from(json['bootstrap'].map((j) => j)), + limitOverAttached = json['limit_over_attached'], limitFullyAttached = json['limit_fully_attached'], limitAttachedStrong = json['limit_attached_strong'], limitAttachedGood = json['limit_attached_good'], @@ -741,10 +753,6 @@ class VeilidConfigNetwork { int clientWhitelistTimeoutMs; int reverseConnectionReceiptTimeMs; int holePunchReceiptTimeMs; - String? nodeId; - String? nodeIdSecret; - List bootstrap; - List bootstrapNodes; VeilidConfigRoutingTable routingTable; VeilidConfigRPC rpc; VeilidConfigDHT dht; @@ -765,10 +773,6 @@ class VeilidConfigNetwork { required this.clientWhitelistTimeoutMs, required this.reverseConnectionReceiptTimeMs, required this.holePunchReceiptTimeMs, - required this.nodeId, - required this.nodeIdSecret, - required this.bootstrap, - required this.bootstrapNodes, required this.routingTable, required this.rpc, required this.dht, @@ -791,10 +795,6 @@ class VeilidConfigNetwork { 'client_whitelist_timeout_ms': clientWhitelistTimeoutMs, 'reverse_connection_receipt_time_ms': reverseConnectionReceiptTimeMs, 'hole_punch_receipt_time_ms': holePunchReceiptTimeMs, - 'node_id': nodeId, - 'node_id_secret': nodeIdSecret, - 'bootstrap': bootstrap, - 'bootstrap_nodes': bootstrapNodes, 'routing_table': routingTable.json, 'rpc': rpc.json, 'dht': dht.json, @@ -820,10 +820,6 @@ class VeilidConfigNetwork { reverseConnectionReceiptTimeMs = json['reverse_connection_receipt_time_ms'], holePunchReceiptTimeMs = json['hole_punch_receipt_time_ms'], - nodeId = json['node_id'], - nodeIdSecret = json['node_id_secret'], - bootstrap = json['bootstrap'], - bootstrapNodes = json['bootstrap_nodes'], routingTable = VeilidConfigRoutingTable.fromJson(json['routing_table']), rpc = VeilidConfigRPC.fromJson(json['rpc']), dht = VeilidConfigDHT.fromJson(json['dht']), @@ -1163,26 +1159,26 @@ class PeerStats { //////////// class PeerTableData { - String nodeId; + List nodeIds; PeerAddress peerAddress; PeerStats peerStats; PeerTableData({ - required this.nodeId, + required this.nodeIds, required this.peerAddress, required this.peerStats, }); Map get json { return { - 'node_id': nodeId, + 'node_ids': nodeIds.map((p) => p).toList(), 'peer_address': peerAddress.json, 'peer_stats': peerStats.json, }; } PeerTableData.fromJson(dynamic json) - : nodeId = json['node_id'], + : nodeIds = List.from(json['node_ids'].map((j) => j)), peerAddress = PeerAddress.fromJson(json['peer_address']), peerStats = PeerStats.fromJson(json['peer_stats']); } @@ -1822,19 +1818,19 @@ Sequencing sequencingFromJson(String j) { } ////////////////////////////////////// -/// KeyBlob -class KeyBlob { - final String key; +/// RouteBlob +class RouteBlob { + final String routeId; final Uint8List blob; - KeyBlob(this.key, this.blob); + RouteBlob(this.routeId, this.blob); - KeyBlob.fromJson(dynamic json) - : key = json['key'], + RouteBlob.fromJson(dynamic json) + : routeId = json['route_id'], blob = base64UrlNoPadDecode(json['blob']); Map get json { - return {'key': key, 'blob': base64UrlNoPadEncode(blob)}; + return {'route_id': routeId, 'blob': base64UrlNoPadEncode(blob)}; } } @@ -1922,8 +1918,8 @@ abstract class Veilid { Future routingContext(); // Private route allocation - Future newPrivateRoute(); - Future newCustomPrivateRoute( + Future newPrivateRoute(); + Future newCustomPrivateRoute( Stability stability, Sequencing sequencing); Future importRemotePrivateRoute(Uint8List blob); Future releasePrivateRoute(String key); diff --git a/veilid-flutter/lib/veilid_ffi.dart b/veilid-flutter/lib/veilid_ffi.dart index 4af42a26..dd0b13a4 100644 --- a/veilid-flutter/lib/veilid_ffi.dart +++ b/veilid-flutter/lib/veilid_ffi.dart @@ -847,22 +847,23 @@ class VeilidFFI implements Veilid { } @override - Future newPrivateRoute() { + Future newPrivateRoute() { final recvPort = ReceivePort("new_private_route"); final sendPort = recvPort.sendPort; _newPrivateRoute(sendPort.nativePort); - return processFutureJson(KeyBlob.fromJson, recvPort.first); + return processFutureJson(RouteBlob.fromJson, recvPort.first); } @override - Future newCustomPrivateRoute( + Future newCustomPrivateRoute( Stability stability, Sequencing sequencing) async { final recvPort = ReceivePort("new_custom_private_route"); final sendPort = recvPort.sendPort; _newCustomPrivateRoute(sendPort.nativePort, stability.json.toNativeUtf8(), sequencing.json.toNativeUtf8()); - final keyblob = await processFutureJson(KeyBlob.fromJson, recvPort.first); - return keyblob; + final routeBlob = + await processFutureJson(RouteBlob.fromJson, recvPort.first); + return routeBlob; } @override diff --git a/veilid-flutter/lib/veilid_js.dart b/veilid-flutter/lib/veilid_js.dart index fbfcecba..963a5fca 100644 --- a/veilid-flutter/lib/veilid_js.dart +++ b/veilid-flutter/lib/veilid_js.dart @@ -65,15 +65,15 @@ class VeilidRoutingContextJS implements VeilidRoutingContext { var encodedRequest = base64UrlNoPadEncode(request); return base64UrlNoPadDecode(await _wrapApiPromise(js_util.callMethod( - wasm, "routing_context_app_call", [_ctx.id, encodedRequest]))); + wasm, "routing_context_app_call", [_ctx.id, target, encodedRequest]))); } @override Future appMessage(String target, Uint8List message) { var encodedMessage = base64UrlNoPadEncode(message); - return _wrapApiPromise(js_util.callMethod( - wasm, "routing_context_app_message", [_ctx.id, encodedMessage])); + return _wrapApiPromise(js_util.callMethod(wasm, + "routing_context_app_message", [_ctx.id, target, encodedMessage])); } } @@ -267,14 +267,14 @@ class VeilidJS implements Veilid { } @override - Future newPrivateRoute() async { + Future newPrivateRoute() async { Map blobJson = jsonDecode(await _wrapApiPromise( js_util.callMethod(wasm, "new_private_route", []))); - return KeyBlob.fromJson(blobJson); + return RouteBlob.fromJson(blobJson); } @override - Future newCustomPrivateRoute( + Future newCustomPrivateRoute( Stability stability, Sequencing sequencing) async { var stabilityString = jsonEncode(stability, toEncodable: veilidApiToEncodable); @@ -284,7 +284,7 @@ class VeilidJS implements Veilid { Map blobJson = jsonDecode(await _wrapApiPromise(js_util .callMethod( wasm, "new_private_route", [stabilityString, sequencingString]))); - return KeyBlob.fromJson(blobJson); + return RouteBlob.fromJson(blobJson); } @override diff --git a/veilid-flutter/rust/src/dart_ffi.rs b/veilid-flutter/rust/src/dart_ffi.rs index 3956f11b..4650844d 100644 --- a/veilid-flutter/rust/src/dart_ffi.rs +++ b/veilid-flutter/rust/src/dart_ffi.rs @@ -10,12 +10,14 @@ use opentelemetry::*; use opentelemetry_otlp::WithExportConfig; use parking_lot::Mutex; use serde::*; +use std::str::FromStr; use std::collections::BTreeMap; use std::os::raw::c_char; use std::sync::Arc; use tracing::*; use tracing_subscriber::prelude::*; use tracing_subscriber::*; +use veilid_core::Encodable as _; // Globals lazy_static! { @@ -56,6 +58,29 @@ define_string_destructor!(free_string); type APIResult = Result; const APIRESULT_VOID: APIResult<()> = APIResult::Ok(()); +// Parse target +async fn parse_target(s: String) -> APIResult { + + // Is this a route id? + if let Ok(rrid) = veilid_core::RouteId::from_str(&s) { + let veilid_api = get_veilid_api().await?; + let routing_table = veilid_api.routing_table()?; + let rss = routing_table.route_spec_store(); + + // Is this a valid remote route id? (can't target allocated routes) + if rss.is_route_id_remote(&rrid) { + return Ok(veilid_core::Target::PrivateRoute(rrid)); + } + } + + // Is this a node id? + if let Ok(nid) = veilid_core::PublicKey::from_str(&s) { + return Ok(veilid_core::Target::NodeId(nid)); + } + + Err(veilid_core::VeilidAPIError::invalid_target()) +} + ///////////////////////////////////////// // FFI-specific @@ -92,8 +117,8 @@ pub struct VeilidFFIConfig { } #[derive(Debug, Deserialize, Serialize)] -pub struct VeilidFFIKeyBlob { - pub key: veilid_core::DHTKey, +pub struct VeilidFFIRouteBlob { + pub route_id: veilid_core::RouteId, #[serde(with = "veilid_core::json_as_base64")] pub blob: Vec, } @@ -415,10 +440,10 @@ pub extern "C" fn routing_context_with_sequencing(id: u32, sequencing: FfiStr) - new_id } + #[no_mangle] pub extern "C" fn routing_context_app_call(port: i64, id: u32, target: FfiStr, request: FfiStr) { - let target: veilid_core::DHTKey = - veilid_core::deserialize_opt_json(target.into_opt_string()).unwrap(); + let target_string: String = target.into_opt_string().unwrap(); let request: Vec = data_encoding::BASE64URL_NOPAD .decode( request.into_opt_string() @@ -427,10 +452,6 @@ pub extern "C" fn routing_context_app_call(port: i64, id: u32, target: FfiStr, r ) .unwrap(); DartIsolateWrapper::new(port).spawn_result(async move { - let veilid_api = get_veilid_api().await?; - let routing_table = veilid_api.routing_table()?; - let rss = routing_table.route_spec_store(); - let routing_context = { let rc = ROUTING_CONTEXTS.lock(); let Some(routing_context) = rc.get(&id) else { @@ -439,12 +460,7 @@ pub extern "C" fn routing_context_app_call(port: i64, id: u32, target: FfiStr, r routing_context.clone() }; - let target = if rss.get_remote_private_route(&target).is_some() { - veilid_core::Target::PrivateRoute(target) - } else { - veilid_core::Target::NodeId(veilid_core::NodeId::new(target)) - }; - + let target = parse_target(target_string).await?; let answer = routing_context.app_call(target, request).await?; let answer = data_encoding::BASE64URL_NOPAD.encode(&answer); APIResult::Ok(answer) @@ -453,8 +469,7 @@ pub extern "C" fn routing_context_app_call(port: i64, id: u32, target: FfiStr, r #[no_mangle] pub extern "C" fn routing_context_app_message(port: i64, id: u32, target: FfiStr, message: FfiStr) { - let target: veilid_core::DHTKey = - veilid_core::deserialize_opt_json(target.into_opt_string()).unwrap(); + let target_string: String = target.into_opt_string().unwrap(); let message: Vec = data_encoding::BASE64URL_NOPAD .decode( message.into_opt_string() @@ -462,11 +477,7 @@ pub extern "C" fn routing_context_app_message(port: i64, id: u32, target: FfiStr .as_bytes(), ) .unwrap(); - DartIsolateWrapper::new(port).spawn_result(async move { - let veilid_api = get_veilid_api().await?; - let routing_table = veilid_api.routing_table()?; - let rss = routing_table.route_spec_store(); - + DartIsolateWrapper::new(port).spawn_result(async move { let routing_context = { let rc = ROUTING_CONTEXTS.lock(); let Some(routing_context) = rc.get(&id) else { @@ -475,12 +486,7 @@ pub extern "C" fn routing_context_app_message(port: i64, id: u32, target: FfiStr routing_context.clone() }; - let target = if rss.get_remote_private_route(&target).is_some() { - veilid_core::Target::PrivateRoute(target) - } else { - veilid_core::Target::NodeId(veilid_core::NodeId::new(target)) - }; - + let target = parse_target(target_string).await?; routing_context.app_message(target, message).await?; APIRESULT_VOID }); @@ -491,11 +497,11 @@ pub extern "C" fn new_private_route(port: i64) { DartIsolateWrapper::new(port).spawn_result_json(async move { let veilid_api = get_veilid_api().await?; - let (key, blob) = veilid_api.new_private_route().await?; + let (route_id, blob) = veilid_api.new_private_route().await?; - let keyblob = VeilidFFIKeyBlob { key, blob }; + let route_blob = VeilidFFIRouteBlob { route_id, blob }; - APIResult::Ok(keyblob) + APIResult::Ok(route_blob) }); } @@ -509,13 +515,13 @@ pub extern "C" fn new_custom_private_route(port: i64, stability: FfiStr, sequenc DartIsolateWrapper::new(port).spawn_result_json(async move { let veilid_api = get_veilid_api().await?; - let (key, blob) = veilid_api - .new_custom_private_route(stability, sequencing) + let (route_id, blob) = veilid_api + .new_custom_private_route(&veilid_core::VALID_CRYPTO_KINDS, stability, sequencing) .await?; - let keyblob = VeilidFFIKeyBlob { key, blob }; + let route_blob = VeilidFFIRouteBlob { route_id, blob }; - APIResult::Ok(keyblob) + APIResult::Ok(route_blob) }); } @@ -531,19 +537,19 @@ pub extern "C" fn import_remote_private_route(port: i64, blob: FfiStr) { DartIsolateWrapper::new(port).spawn_result(async move { let veilid_api = get_veilid_api().await?; - let key = veilid_api.import_remote_private_route(blob)?; + let route_id = veilid_api.import_remote_private_route(blob)?; - APIResult::Ok(key.encode()) + APIResult::Ok(route_id.encode()) }); } #[no_mangle] -pub extern "C" fn release_private_route(port: i64, key: FfiStr) { - let key: veilid_core::DHTKey = - veilid_core::deserialize_opt_json(key.into_opt_string()).unwrap(); +pub extern "C" fn release_private_route(port: i64, route_id: FfiStr) { + let route_id: veilid_core::RouteId = + veilid_core::deserialize_opt_json(route_id.into_opt_string()).unwrap(); DartIsolateWrapper::new(port).spawn_result(async move { let veilid_api = get_veilid_api().await?; - veilid_api.release_private_route(&key)?; + veilid_api.release_private_route(route_id)?; APIRESULT_VOID }); } diff --git a/veilid-server/src/cmdline.rs b/veilid-server/src/cmdline.rs index 1ef31d99..9f37de05 100644 --- a/veilid-server/src/cmdline.rs +++ b/veilid-server/src/cmdline.rs @@ -4,7 +4,7 @@ use clap::{Arg, ArgMatches, Command}; use std::ffi::OsStr; use std::path::Path; use std::str::FromStr; -use veilid_core::{DHTKey, DHTKeySecret}; +use veilid_core::{TypedKeySet, TypedSecretSet}; fn do_clap_matches(default_config_path: &OsStr) -> Result { let matches = Command::new("veilid-server") @@ -78,17 +78,21 @@ fn do_clap_matches(default_config_path: &OsStr) -> Result Result EyreResult<(Settings, ArgMatches)> { settingsrw.logging.terminal.enabled = false; // Split or get secret - let (k, s) = if let Some((k, s)) = v.split_once(':') { - let k = DHTKey::try_decode(k).wrap_err("failed to decode node id from command line")?; - let s = DHTKeySecret::try_decode(s)?; - (k, s) - } else { - let k = DHTKey::try_decode(v)?; - let buffer = rpassword::prompt_password("Enter secret key (will not echo): ") - .wrap_err("invalid secret key")?; - let buffer = buffer.trim().to_string(); - let s = DHTKeySecret::try_decode(&buffer)?; - (k, s) - }; - settingsrw.core.network.node_id = Some(k); - settingsrw.core.network.node_id_secret = Some(s); + let tks = + TypedKeySet::from_str(v).wrap_err("failed to decode node id set from command line")?; + + let buffer = rpassword::prompt_password("Enter secret key set (will not echo): ") + .wrap_err("invalid secret key")?; + let buffer = buffer.trim().to_string(); + let tss = TypedSecretSet::from_str(&buffer).wrap_err("failed to decode secret set")?; + + settingsrw.core.network.routing_table.node_id = Some(tks); + settingsrw.core.network.routing_table.node_id_secret = Some(tss); } if matches.occurrences_of("bootstrap") != 0 { @@ -276,29 +268,7 @@ pub fn process_command_line() -> EyreResult<(Settings, ArgMatches)> { bail!("value not specified for bootstrap"); } }; - settingsrw.core.network.bootstrap = bootstrap_list; - } - - if matches.occurrences_of("bootstrap-nodes") != 0 { - let bootstrap_list = match matches.value_of("bootstrap-nodes") { - Some(x) => { - println!("Overriding bootstrap node list with: "); - let mut out: Vec = Vec::new(); - for x in x.split(',') { - let x = x.trim(); - println!(" {}", x); - out.push( - ParsedNodeDialInfo::from_str(x) - .wrap_err("unable to parse dial info in bootstrap node list")?, - ); - } - out - } - None => { - bail!("value not specified for bootstrap node list"); - } - }; - settingsrw.core.network.bootstrap_nodes = bootstrap_list; + settingsrw.core.network.routing_table.bootstrap = bootstrap_list; } #[cfg(feature = "rt-tokio")] diff --git a/veilid-server/src/main.rs b/veilid-server/src/main.rs index 7dd76d54..19b08b71 100644 --- a/veilid-server/src/main.rs +++ b/veilid-server/src/main.rs @@ -18,6 +18,7 @@ use cfg_if::*; #[allow(unused_imports)] use color_eyre::eyre::{bail, ensure, eyre, Result as EyreResult, WrapErr}; use server::*; +use std::str::FromStr; use tools::*; use tracing::*; use veilid_logs::*; @@ -42,10 +43,16 @@ fn main() -> EyreResult<()> { } // --- Generate DHT Key --- - if matches.occurrences_of("generate-dht-key") != 0 { - let (key, secret) = veilid_core::generate_secret(); - println!("Public: {}\nSecret: {}", key.encode(), secret.encode()); - return Ok(()); + if matches.occurrences_of("generate-key-pair") != 0 { + if let Some(ckstr) = matches.get_one::("generate-key-pair") { + let ck: veilid_core::CryptoKind = + veilid_core::FourCC::from_str(ckstr).wrap_err("couldn't parse crypto kind")?; + let tkp = veilid_core::Crypto::generate_keypair(ck).wrap_err("invalid crypto kind")?; + println!("{}", tkp.to_string()); + return Ok(()); + } else { + bail!("missing crypto kind"); + } } // See if we're just running a quick command diff --git a/veilid-server/src/settings.rs b/veilid-server/src/settings.rs index 9a6e6259..3867edbb 100644 --- a/veilid-server/src/settings.rs +++ b/veilid-server/src/settings.rs @@ -64,11 +64,10 @@ core: client_whitelist_timeout_ms: 300000 reverse_connection_receipt_time_ms: 5000 hole_punch_receipt_time_ms: 5000 - node_id: null - node_id_secret: null - bootstrap: ['bootstrap.dev.veilid.net'] - bootstrap_nodes: [] routing_table: + node_id: null + node_id_secret: null + bootstrap: ['bootstrap.dev.veilid.net'] limit_over_attached: 64 limit_fully_attached: 32 limit_attached_strong: 16 @@ -300,66 +299,6 @@ impl serde::Serialize for ParsedUrl { } } -#[derive(Debug, Clone, PartialEq)] -pub struct ParsedNodeDialInfo { - pub node_dial_info_string: String, - pub node_id: NodeId, - pub dial_info: DialInfo, -} - -// impl ParsedNodeDialInfo { -// pub fn offset_port(&mut self, offset: u16) -> Result<(), ()> { -// // Bump port on dial_info -// self.dial_info -// .set_port(self.dial_info.port() + 1); -// self.node_dial_info_string = format!("{}@{}",self.node_id, self.dial_info); -// Ok(()) -// } -// } - -impl FromStr for ParsedNodeDialInfo { - type Err = veilid_core::VeilidAPIError; - fn from_str( - node_dial_info_string: &str, - ) -> Result { - let (id_str, di_str) = node_dial_info_string.split_once('@').ok_or_else(|| { - VeilidAPIError::invalid_argument( - "Invalid node dial info in bootstrap entry", - "node_dial_info_string", - node_dial_info_string, - ) - })?; - let node_id = NodeId::from_str(id_str) - .map_err(|e| VeilidAPIError::invalid_argument(e, "node_id", id_str))?; - let dial_info = DialInfo::from_str(di_str) - .map_err(|e| VeilidAPIError::invalid_argument(e, "dial_info", id_str))?; - Ok(Self { - node_dial_info_string: node_dial_info_string.to_owned(), - node_id, - dial_info, - }) - } -} - -impl<'de> serde::Deserialize<'de> for ParsedNodeDialInfo { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - let s = String::deserialize(deserializer)?; - ParsedNodeDialInfo::from_str(s.as_str()).map_err(serde::de::Error::custom) - } -} - -impl serde::Serialize for ParsedNodeDialInfo { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - self.node_dial_info_string.serialize(serializer) - } -} - #[derive(Debug, PartialEq)] pub struct NamedSocketAddrs { pub name: String, @@ -577,6 +516,9 @@ pub struct Dht { #[derive(Debug, Deserialize, Serialize)] pub struct RoutingTable { + pub node_id: Option, + pub node_id_secret: Option, + pub bootstrap: Vec, pub limit_over_attached: u32, pub limit_fully_attached: u32, pub limit_attached_strong: u32, @@ -595,10 +537,6 @@ pub struct Network { pub client_whitelist_timeout_ms: u32, pub reverse_connection_receipt_time_ms: u32, pub hole_punch_receipt_time_ms: u32, - pub node_id: Option, - pub node_id_secret: Option, - pub bootstrap: Vec, - pub bootstrap_nodes: Vec, pub routing_table: RoutingTable, pub rpc: Rpc, pub dht: Dht, @@ -919,6 +857,7 @@ impl Settings { } }}; } + set_config_value!(inner.daemon.enabled, value); set_config_value!(inner.client_api.enabled, value); set_config_value!(inner.client_api.listen_address, value); @@ -964,10 +903,9 @@ impl Settings { set_config_value!(inner.core.network.client_whitelist_timeout_ms, value); set_config_value!(inner.core.network.reverse_connection_receipt_time_ms, value); set_config_value!(inner.core.network.hole_punch_receipt_time_ms, value); - set_config_value!(inner.core.network.node_id, value); - set_config_value!(inner.core.network.node_id_secret, value); - set_config_value!(inner.core.network.bootstrap, value); - set_config_value!(inner.core.network.bootstrap_nodes, value); + set_config_value!(inner.core.network.routing_table.node_id, value); + set_config_value!(inner.core.network.routing_table.node_id_secret, value); + set_config_value!(inner.core.network.routing_table.bootstrap, value); set_config_value!(inner.core.network.routing_table.limit_over_attached, value); set_config_value!(inner.core.network.routing_table.limit_fully_attached, value); set_config_value!( @@ -1119,19 +1057,27 @@ impl Settings { "network.hole_punch_receipt_time_ms" => { Ok(Box::new(inner.core.network.hole_punch_receipt_time_ms)) } - "network.node_id" => Ok(Box::new(inner.core.network.node_id)), - "network.node_id_secret" => Ok(Box::new(inner.core.network.node_id_secret)), - "network.bootstrap" => Ok(Box::new(inner.core.network.bootstrap.clone())), - "network.bootstrap_nodes" => Ok(Box::new( + "network.routing_table.node_id" => Ok(Box::new( inner .core .network - .bootstrap_nodes + .routing_table + .node_id .clone() - .into_iter() - .map(|e| e.node_dial_info_string) - .collect::>(), + .unwrap_or_default(), )), + "network.routing_table.node_id_secret" => Ok(Box::new( + inner + .core + .network + .routing_table + .node_id_secret + .clone() + .unwrap_or_default(), + )), + "network.routing_table.bootstrap" => { + Ok(Box::new(inner.core.network.routing_table.bootstrap.clone())) + } "network.routing_table.limit_over_attached" => Ok(Box::new( inner.core.network.routing_table.limit_over_attached, )), @@ -1488,14 +1434,13 @@ mod tests { assert_eq!(s.core.network.client_whitelist_timeout_ms, 300_000u32); assert_eq!(s.core.network.reverse_connection_receipt_time_ms, 5_000u32); assert_eq!(s.core.network.hole_punch_receipt_time_ms, 5_000u32); - assert_eq!(s.core.network.node_id, None); - assert_eq!(s.core.network.node_id_secret, None); + assert_eq!(s.core.network.routing_table.node_id, None); + assert_eq!(s.core.network.routing_table.node_id_secret, None); // assert_eq!( - s.core.network.bootstrap, + s.core.network.routing_table.bootstrap, vec!["bootstrap.dev.veilid.net".to_owned()] ); - assert_eq!(s.core.network.bootstrap_nodes, vec![]); // assert_eq!(s.core.network.rpc.concurrency, 0); assert_eq!(s.core.network.rpc.queue_size, 1024); diff --git a/veilid-tools/src/tools.rs b/veilid-tools/src/tools.rs index 632336a1..a58a1fd6 100644 --- a/veilid-tools/src/tools.rs +++ b/veilid-tools/src/tools.rs @@ -299,6 +299,14 @@ pub unsafe fn aligned_8_u8_vec_uninit(n_bytes: usize) -> Vec { ) } +pub unsafe fn unaligned_u8_vec_uninit(n_bytes: usize) -> Vec { + let mut unaligned: Vec = Vec::with_capacity(n_bytes); + let ptr = unaligned.as_mut_ptr(); + mem::forget(unaligned); + + Vec::from_raw_parts(ptr as *mut u8, n_bytes, n_bytes) +} + pub fn debug_backtrace() -> String { let bt = backtrace::Backtrace::new(); format!("{:?}", bt) @@ -328,3 +336,7 @@ pub fn is_debug_backtrace_enabled() -> bool { } } } + +pub fn type_name_of_val(_val: &T) -> &'static str { + std::any::type_name::() +} diff --git a/veilid-wasm/src/lib.rs b/veilid-wasm/src/lib.rs index 33c965aa..26da17bf 100644 --- a/veilid-wasm/src/lib.rs +++ b/veilid-wasm/src/lib.rs @@ -79,6 +79,28 @@ pub fn from_json( deserialize_json(&s) } +// Parse target +fn parse_target(s: String) -> APIResult { + // Is this a route id? + if let Ok(rrid) = veilid_core::RouteId::from_str(&s) { + let veilid_api = get_veilid_api()?; + let routing_table = veilid_api.routing_table()?; + let rss = routing_table.route_spec_store(); + + // Is this a valid remote route id? (can't target allocated routes) + if rss.is_route_id_remote(&rrid) { + return Ok(veilid_core::Target::PrivateRoute(rrid)); + } + } + + // Is this a node id? + if let Ok(nid) = veilid_core::PublicKey::from_str(&s) { + return Ok(veilid_core::Target::NodeId(nid)); + } + + Err(veilid_core::VeilidAPIError::invalid_target()) +} + // Utility types for async API results type APIResult = Result; const APIRESULT_UNDEFINED: APIResult<()> = APIResult::Ok(()); @@ -136,8 +158,8 @@ pub struct VeilidWASMConfig { } #[derive(Debug, Deserialize, Serialize)] -pub struct VeilidKeyBlob { - pub key: veilid_core::DHTKey, +pub struct VeilidRouteBlob { + pub route_id: veilid_core::RouteId, #[serde(with = "veilid_core::json_as_base64")] pub blob: Vec, } @@ -355,7 +377,6 @@ pub fn routing_context_app_call(id: u32, target: String, request: String) -> Pro wrap_api_future_plain(async move { let veilid_api = get_veilid_api()?; let routing_table = veilid_api.routing_table()?; - let rss = routing_table.route_spec_store(); let routing_context = { let rc = (*ROUTING_CONTEXTS).borrow(); @@ -365,15 +386,7 @@ pub fn routing_context_app_call(id: u32, target: String, request: String) -> Pro routing_context.clone() }; - let target: DHTKey = - DHTKey::try_decode(&target).map_err(|e| VeilidAPIError::parse_error(e, &target))?; - - let target = if rss.get_remote_private_route(&target).is_some() { - veilid_core::Target::PrivateRoute(target) - } else { - veilid_core::Target::NodeId(veilid_core::NodeId::new(target)) - }; - + let target = parse_target(target)?; let answer = routing_context.app_call(target, request).await?; let answer = data_encoding::BASE64URL_NOPAD.encode(&answer); APIResult::Ok(answer) @@ -388,7 +401,6 @@ pub fn routing_context_app_message(id: u32, target: String, message: String) -> wrap_api_future_void(async move { let veilid_api = get_veilid_api()?; let routing_table = veilid_api.routing_table()?; - let rss = routing_table.route_spec_store(); let routing_context = { let rc = (*ROUTING_CONTEXTS).borrow(); @@ -398,15 +410,7 @@ pub fn routing_context_app_message(id: u32, target: String, message: String) -> routing_context.clone() }; - let target: DHTKey = - DHTKey::try_decode(&target).map_err(|e| VeilidAPIError::parse_error(e, &target))?; - - let target = if rss.get_remote_private_route(&target).is_some() { - veilid_core::Target::PrivateRoute(target) - } else { - veilid_core::Target::NodeId(veilid_core::NodeId::new(target)) - }; - + let target = parse_target(target)?; routing_context.app_message(target, message).await?; APIRESULT_UNDEFINED }) @@ -417,11 +421,11 @@ pub fn new_private_route() -> Promise { wrap_api_future_json(async move { let veilid_api = get_veilid_api()?; - let (key, blob) = veilid_api.new_private_route().await?; + let (route_id, blob) = veilid_api.new_private_route().await?; - let keyblob = VeilidKeyBlob { key, blob }; + let route_blob = VeilidRouteBlob { route_id, blob }; - APIResult::Ok(keyblob) + APIResult::Ok(route_blob) }) } @@ -433,13 +437,13 @@ pub fn new_custom_private_route(stability: String, sequencing: String) -> Promis wrap_api_future_json(async move { let veilid_api = get_veilid_api()?; - let (key, blob) = veilid_api - .new_custom_private_route(stability, sequencing) + let (route_id, blob) = veilid_api + .new_custom_private_route(&veilid_core::VALID_CRYPTO_KINDS, stability, sequencing) .await?; - let keyblob = VeilidKeyBlob { key, blob }; + let route_blob = VeilidRouteBlob { route_id, blob }; - APIResult::Ok(keyblob) + APIResult::Ok(route_blob) }) } @@ -458,11 +462,11 @@ pub fn import_remote_private_route(blob: String) -> Promise { } #[wasm_bindgen()] -pub fn release_private_route(key: String) -> Promise { - let key: veilid_core::DHTKey = veilid_core::deserialize_json(&key).unwrap(); +pub fn release_private_route(route_id: String) -> Promise { + let route_id: veilid_core::RouteId = veilid_core::deserialize_json(&route_id).unwrap(); wrap_api_future_void(async move { let veilid_api = get_veilid_api()?; - veilid_api.release_private_route(&key)?; + veilid_api.release_private_route(route_id)?; APIRESULT_UNDEFINED }) } diff --git a/veilid-wasm/tests/web.rs b/veilid-wasm/tests/web.rs index a986d126..2ae8ca70 100644 --- a/veilid-wasm/tests/web.rs +++ b/veilid-wasm/tests/web.rs @@ -37,9 +37,14 @@ fn init_callbacks() { case "capabilities.protocol_connect_wss": return true; case "capabilities.protocol_accept_wss": return false; case "tablestore.directory": return ""; - case "network.node_id": return "ZLd4uMYdP4qYLtxF6GqrzBb32Z6T3rE2FWMkWup1pdY"; - case "network.node_id_secret": return "s2Gvq6HJOxgQh-3xIgfWSL3I-DWZ2c1RjZLJl2Xmg2E"; - case "network.bootstrap": return []; + case "network.routing_table.node_id": return []; + case "network.routing_table.node_id_secret": return []; + case "network.routing_table.bootstrap": return []; + case "network.routing_table.limit_over_attached": return 64; + case "network.routing_table.limit_fully_attached": return 32; + case "network.routing_table.limit_attached_strong": return 16; + case "network.routing_table.limit_attached_good": return 8; + case "network.routing_table.limit_attached_weak": return 4; case "network.rpc.concurrency": return 2; case "network.rpc.queue_size": return 128; case "network.rpc.max_timestamp_behind": return 10000000;