mirror of
https://gitlab.com/veilid/veilid.git
synced 2025-02-03 01:50:17 -05:00
Merge branch 'json-rpc' into 'main'
Add support for JSON API and veilid-python See merge request veilid/veilid!25
This commit is contained in:
commit
79b4593ce8
7
.vscode/launch.json
vendored
7
.vscode/launch.json
vendored
@ -11,6 +11,13 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"configurations": [
|
"configurations": [
|
||||||
|
{
|
||||||
|
"name": "Python: Attach using Process Id",
|
||||||
|
"type": "python",
|
||||||
|
"request": "attach",
|
||||||
|
"processId": "${command:pickProcess}",
|
||||||
|
"justMyCode": true
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"type": "lldb",
|
"type": "lldb",
|
||||||
"request": "attach",
|
"request": "attach",
|
||||||
|
97
Cargo.lock
generated
97
Cargo.lock
generated
@ -845,27 +845,6 @@ version = "0.17.0"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "13e2d432d1601d61d1e11140d04e9d239b5cf7316fa1106523c3d86eea19c29d"
|
checksum = "13e2d432d1601d61d1e11140d04e9d239b5cf7316fa1106523c3d86eea19c29d"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "capnp-futures"
|
|
||||||
version = "0.17.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "71d520e0af228b92de357f230f4987ee4f9786f2b8aa24b9cfe53f5b11c17198"
|
|
||||||
dependencies = [
|
|
||||||
"capnp",
|
|
||||||
"futures",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "capnp-rpc"
|
|
||||||
version = "0.17.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "9ab8e869783e491cbcc350427a5e775aa4d8a1deaa5198d74332957cfa430779"
|
|
||||||
dependencies = [
|
|
||||||
"capnp",
|
|
||||||
"capnp-futures",
|
|
||||||
"futures",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "capnpc"
|
name = "capnpc"
|
||||||
version = "0.17.1"
|
version = "0.17.1"
|
||||||
@ -1678,6 +1657,12 @@ version = "0.3.0"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "0688c2a7f92e427f44895cd63841bff7b29f8d7a1648b9e7e07a4a365b2e1257"
|
checksum = "0688c2a7f92e427f44895cd63841bff7b29f8d7a1648b9e7e07a4a365b2e1257"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "dyn-clone"
|
||||||
|
version = "1.0.11"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "68b0cf012f1230e43cd00ebb729c6bb58707ecfa8ad08b52ef3a4ccd2697fc30"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ed25519"
|
name = "ed25519"
|
||||||
version = "1.5.3"
|
version = "1.5.3"
|
||||||
@ -4689,6 +4674,30 @@ dependencies = [
|
|||||||
"winapi-util",
|
"winapi-util",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "schemars"
|
||||||
|
version = "0.8.12"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "02c613288622e5f0c3fdc5dbd4db1c5fbe752746b1d1a56a0630b78fd00de44f"
|
||||||
|
dependencies = [
|
||||||
|
"dyn-clone",
|
||||||
|
"schemars_derive",
|
||||||
|
"serde",
|
||||||
|
"serde_json",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "schemars_derive"
|
||||||
|
version = "0.8.12"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "109da1e6b197438deb6db99952990c7f959572794b80ff93707d55a232545e7c"
|
||||||
|
dependencies = [
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"serde_derive_internals",
|
||||||
|
"syn 1.0.109",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "scoped-tls"
|
name = "scoped-tls"
|
||||||
version = "1.0.1"
|
version = "1.0.1"
|
||||||
@ -4854,6 +4863,17 @@ dependencies = [
|
|||||||
"syn 2.0.18",
|
"syn 2.0.18",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "serde_derive_internals"
|
||||||
|
version = "0.26.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c"
|
||||||
|
dependencies = [
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"syn 1.0.109",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "serde_json"
|
name = "serde_json"
|
||||||
version = "1.0.96"
|
version = "1.0.96"
|
||||||
@ -5886,6 +5906,16 @@ dependencies = [
|
|||||||
"wasm-bindgen",
|
"wasm-bindgen",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "triomphe"
|
||||||
|
version = "0.1.8"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "f1ee9bd9239c339d714d657fac840c6d2a4f9c45f4f9ec7b0975113458be78db"
|
||||||
|
dependencies = [
|
||||||
|
"serde",
|
||||||
|
"stable_deref_trait",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "trust-dns-proto"
|
name = "trust-dns-proto"
|
||||||
version = "0.22.0"
|
version = "0.22.0"
|
||||||
@ -6120,9 +6150,6 @@ dependencies = [
|
|||||||
"async-std",
|
"async-std",
|
||||||
"async-tungstenite 0.8.0",
|
"async-tungstenite 0.8.0",
|
||||||
"bugsalot",
|
"bugsalot",
|
||||||
"capnp",
|
|
||||||
"capnp-rpc",
|
|
||||||
"capnpc",
|
|
||||||
"cfg-if 1.0.0",
|
"cfg-if 1.0.0",
|
||||||
"clap 3.2.25",
|
"clap 3.2.25",
|
||||||
"config",
|
"config",
|
||||||
@ -6131,8 +6158,10 @@ dependencies = [
|
|||||||
"cursive-flexi-logger-view",
|
"cursive-flexi-logger-view",
|
||||||
"cursive_buffered_backend",
|
"cursive_buffered_backend",
|
||||||
"cursive_table_view",
|
"cursive_table_view",
|
||||||
|
"data-encoding",
|
||||||
"directories",
|
"directories",
|
||||||
"flexi_logger",
|
"flexi_logger",
|
||||||
|
"flume",
|
||||||
"futures",
|
"futures",
|
||||||
"hex",
|
"hex",
|
||||||
"json",
|
"json",
|
||||||
@ -6141,10 +6170,11 @@ dependencies = [
|
|||||||
"serde",
|
"serde",
|
||||||
"serde_derive",
|
"serde_derive",
|
||||||
"serial_test",
|
"serial_test",
|
||||||
|
"stop-token",
|
||||||
"thiserror",
|
"thiserror",
|
||||||
"tokio 1.28.2",
|
"tokio 1.28.2",
|
||||||
"tokio-util",
|
"tokio-util",
|
||||||
"veilid-core",
|
"veilid-tools",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -6206,12 +6236,14 @@ dependencies = [
|
|||||||
"owo-colors",
|
"owo-colors",
|
||||||
"paranoid-android",
|
"paranoid-android",
|
||||||
"parking_lot 0.12.1",
|
"parking_lot 0.12.1",
|
||||||
|
"paste",
|
||||||
"range-set-blaze",
|
"range-set-blaze",
|
||||||
"rkyv",
|
"rkyv",
|
||||||
"rtnetlink",
|
"rtnetlink",
|
||||||
"rusqlite",
|
"rusqlite",
|
||||||
"rustls 0.19.1",
|
"rustls 0.19.1",
|
||||||
"rustls-pemfile 0.2.1",
|
"rustls-pemfile 0.2.1",
|
||||||
|
"schemars",
|
||||||
"secrecy",
|
"secrecy",
|
||||||
"send_wrapper 0.6.0",
|
"send_wrapper 0.6.0",
|
||||||
"serde",
|
"serde",
|
||||||
@ -6287,9 +6319,6 @@ dependencies = [
|
|||||||
"async-tungstenite 0.22.2",
|
"async-tungstenite 0.22.2",
|
||||||
"backtrace",
|
"backtrace",
|
||||||
"bugsalot",
|
"bugsalot",
|
||||||
"capnp",
|
|
||||||
"capnp-rpc",
|
|
||||||
"capnpc",
|
|
||||||
"cfg-if 1.0.0",
|
"cfg-if 1.0.0",
|
||||||
"clap 3.2.25",
|
"clap 3.2.25",
|
||||||
"color-eyre",
|
"color-eyre",
|
||||||
@ -6327,6 +6356,7 @@ dependencies = [
|
|||||||
"tracing-subscriber",
|
"tracing-subscriber",
|
||||||
"url",
|
"url",
|
||||||
"veilid-core",
|
"veilid-core",
|
||||||
|
"wg",
|
||||||
"windows-service",
|
"windows-service",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -6637,6 +6667,17 @@ dependencies = [
|
|||||||
"winapi 0.3.9",
|
"winapi 0.3.9",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "wg"
|
||||||
|
version = "0.3.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "f390449c16e0679435fc97a6b49d24e67f09dd05fea1de54db1b60902896d273"
|
||||||
|
dependencies = [
|
||||||
|
"atomic-waker",
|
||||||
|
"parking_lot 0.12.1",
|
||||||
|
"triomphe",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "which"
|
name = "which"
|
||||||
version = "4.4.0"
|
version = "4.4.0"
|
||||||
|
@ -3,7 +3,6 @@ name = "veilid-cli"
|
|||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
authors = ["John Smith <jsmith@example.com>"]
|
authors = ["John Smith <jsmith@example.com>"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
build = "build.rs"
|
|
||||||
license = "LGPL-2.0-or-later OR MPL-2.0 OR (MIT AND BSD-3-Clause)"
|
license = "LGPL-2.0-or-later OR MPL-2.0 OR (MIT AND BSD-3-Clause)"
|
||||||
|
|
||||||
[[bin]]
|
[[bin]]
|
||||||
@ -13,8 +12,8 @@ path = "src/main.rs"
|
|||||||
[features]
|
[features]
|
||||||
default = [ "rt-tokio" ]
|
default = [ "rt-tokio" ]
|
||||||
macos = [ "cursive/ncurses-backend" ]
|
macos = [ "cursive/ncurses-backend" ]
|
||||||
rt-async-std = [ "async-std", "veilid-core/rt-async-std", "cursive/rt-async-std" ]
|
rt-async-std = [ "async-std", "veilid-tools/rt-async-std", "cursive/rt-async-std" ]
|
||||||
rt-tokio = [ "tokio", "tokio-util", "veilid-core/rt-tokio", "cursive/rt-tokio" ]
|
rt-tokio = [ "tokio", "tokio-util", "veilid-tools/rt-tokio", "cursive/rt-tokio" ]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
async-std = { version = "^1.9", features = ["unstable", "attributes"], optional = true }
|
async-std = { version = "^1.9", features = ["unstable", "attributes"], optional = true }
|
||||||
@ -36,19 +35,17 @@ serde = "^1"
|
|||||||
serde_derive = "^1"
|
serde_derive = "^1"
|
||||||
parking_lot = "^0"
|
parking_lot = "^0"
|
||||||
cfg-if = "^1"
|
cfg-if = "^1"
|
||||||
capnp = "^0"
|
|
||||||
capnp-rpc = "^0"
|
|
||||||
config = { version = "^0", features = ["yaml"] }
|
config = { version = "^0", features = ["yaml"] }
|
||||||
bugsalot = { git = "https://github.com/crioux/bugsalot.git" }
|
bugsalot = { git = "https://github.com/crioux/bugsalot.git" }
|
||||||
flexi_logger = { version = "^0", features = ["use_chrono_for_offset"] }
|
flexi_logger = { version = "^0", features = ["use_chrono_for_offset"] }
|
||||||
thiserror = "^1"
|
thiserror = "^1"
|
||||||
crossbeam-channel = "^0"
|
crossbeam-channel = "^0"
|
||||||
hex = "^0"
|
hex = "^0"
|
||||||
veilid-core = { path = "../veilid-core" }
|
veilid-tools = { path = "../veilid-tools" }
|
||||||
json = "^0"
|
json = "^0"
|
||||||
|
stop-token = { version = "^0", default-features = false }
|
||||||
|
flume = { version = "^0", features = ["async"] }
|
||||||
|
data-encoding = { version = "^2" }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
serial_test = "^0"
|
serial_test = "^0"
|
||||||
|
|
||||||
[build-dependencies]
|
|
||||||
capnpc = "^0"
|
|
||||||
|
@ -1,7 +0,0 @@
|
|||||||
fn main() {
|
|
||||||
::capnpc::CompilerCommand::new()
|
|
||||||
.file("../veilid-server/proto/veilid-client.capnp")
|
|
||||||
.src_prefix("../veilid-server/")
|
|
||||||
.run()
|
|
||||||
.expect("compiling schema");
|
|
||||||
}
|
|
@ -1,511 +1,392 @@
|
|||||||
use crate::command_processor::*;
|
use crate::command_processor::*;
|
||||||
use crate::tools::*;
|
use crate::tools::*;
|
||||||
use crate::veilid_client_capnp::*;
|
use futures::stream::FuturesUnordered;
|
||||||
use capnp::capability::Promise;
|
use futures::StreamExt;
|
||||||
use capnp_rpc::{pry, rpc_twoparty_capnp, twoparty, Disconnector, RpcSystem};
|
|
||||||
use futures::future::FutureExt;
|
|
||||||
use serde::de::DeserializeOwned;
|
|
||||||
use std::cell::RefCell;
|
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::rc::Rc;
|
use std::time::SystemTime;
|
||||||
use veilid_core::tools::*;
|
use stop_token::{future::FutureExt as _, StopSource};
|
||||||
use veilid_core::*;
|
|
||||||
|
|
||||||
macro_rules! capnp_failed {
|
cfg_if! {
|
||||||
($ex:expr) => {{
|
if #[cfg(feature="rt-async-std")] {
|
||||||
let msg = format!("Capnp Error: {}", $ex);
|
use async_std::io::prelude::BufReadExt;
|
||||||
error!("{}", msg);
|
use async_std::io::WriteExt;
|
||||||
Promise::err(capnp::Error::failed(msg))
|
use async_std::io::BufReader;
|
||||||
}};
|
} else if #[cfg(feature="rt-tokio")] {
|
||||||
}
|
use tokio::io::AsyncBufReadExt;
|
||||||
|
use tokio::io::AsyncWriteExt;
|
||||||
macro_rules! pry_result {
|
use tokio::io::BufReader;
|
||||||
($ex:expr) => {
|
|
||||||
match $ex {
|
|
||||||
Ok(v) => v,
|
|
||||||
Err(e) => {
|
|
||||||
return capnp_failed!(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
fn map_to_internal_error<T: ToString>(e: T) -> VeilidAPIError {
|
|
||||||
VeilidAPIError::Internal {
|
|
||||||
message: e.to_string(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn decode_api_result<T: DeserializeOwned + fmt::Debug>(
|
|
||||||
reader: &api_result::Reader,
|
|
||||||
) -> VeilidAPIResult<T> {
|
|
||||||
match reader.which().map_err(map_to_internal_error)? {
|
|
||||||
api_result::Which::Ok(v) => {
|
|
||||||
let ok_val = v.map_err(map_to_internal_error)?;
|
|
||||||
let res: T = veilid_core::deserialize_json(ok_val).map_err(map_to_internal_error)?;
|
|
||||||
Ok(res)
|
|
||||||
}
|
|
||||||
api_result::Which::Err(e) => {
|
|
||||||
let err_val = e.map_err(map_to_internal_error)?;
|
|
||||||
let res: VeilidAPIError =
|
|
||||||
veilid_core::deserialize_json(err_val).map_err(map_to_internal_error)?;
|
|
||||||
Err(res)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct VeilidClientImpl {
|
|
||||||
comproc: CommandProcessor,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl VeilidClientImpl {
|
|
||||||
pub fn new(comproc: CommandProcessor) -> Self {
|
|
||||||
Self { comproc }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl veilid_client::Server for VeilidClientImpl {
|
|
||||||
fn update(
|
|
||||||
&mut self,
|
|
||||||
params: veilid_client::UpdateParams,
|
|
||||||
_results: veilid_client::UpdateResults,
|
|
||||||
) -> Promise<(), ::capnp::Error> {
|
|
||||||
let veilid_update = pry!(pry!(params.get()).get_veilid_update());
|
|
||||||
let veilid_update: VeilidUpdate = pry_result!(deserialize_json(veilid_update));
|
|
||||||
|
|
||||||
match veilid_update {
|
|
||||||
VeilidUpdate::Log(log) => {
|
|
||||||
self.comproc.update_log(log);
|
|
||||||
}
|
|
||||||
VeilidUpdate::AppMessage(msg) => {
|
|
||||||
self.comproc.update_app_message(msg);
|
|
||||||
}
|
|
||||||
VeilidUpdate::AppCall(call) => {
|
|
||||||
self.comproc.update_app_call(call);
|
|
||||||
}
|
|
||||||
VeilidUpdate::Attachment(attachment) => {
|
|
||||||
self.comproc.update_attachment(attachment);
|
|
||||||
}
|
|
||||||
VeilidUpdate::Network(network) => {
|
|
||||||
self.comproc.update_network_status(network);
|
|
||||||
}
|
|
||||||
VeilidUpdate::Config(config) => {
|
|
||||||
self.comproc.update_config(config);
|
|
||||||
}
|
|
||||||
VeilidUpdate::RouteChange(route) => {
|
|
||||||
self.comproc.update_route(route);
|
|
||||||
}
|
|
||||||
VeilidUpdate::Shutdown => self.comproc.update_shutdown(),
|
|
||||||
VeilidUpdate::ValueChange(value_change) => {
|
|
||||||
self.comproc.update_value_change(value_change);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Promise::ok(())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ClientApiConnectionInner {
|
struct ClientApiConnectionInner {
|
||||||
comproc: CommandProcessor,
|
comproc: CommandProcessor,
|
||||||
connect_addr: Option<SocketAddr>,
|
connect_addr: Option<SocketAddr>,
|
||||||
disconnector: Option<Disconnector<rpc_twoparty_capnp::Side>>,
|
request_sender: Option<flume::Sender<String>>,
|
||||||
server: Option<Rc<RefCell<veilid_server::Client>>>,
|
disconnector: Option<StopSource>,
|
||||||
server_settings: Option<String>,
|
|
||||||
disconnect_requested: bool,
|
disconnect_requested: bool,
|
||||||
cancel_eventual: Eventual,
|
reply_channels: HashMap<u32, flume::Sender<json::JsonValue>>,
|
||||||
|
next_req_id: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
type Handle<T> = Rc<RefCell<T>>;
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct ClientApiConnection {
|
pub struct ClientApiConnection {
|
||||||
inner: Handle<ClientApiConnectionInner>,
|
inner: Arc<Mutex<ClientApiConnectionInner>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ClientApiConnection {
|
impl ClientApiConnection {
|
||||||
pub fn new(comproc: CommandProcessor) -> Self {
|
pub fn new(comproc: CommandProcessor) -> Self {
|
||||||
Self {
|
Self {
|
||||||
inner: Rc::new(RefCell::new(ClientApiConnectionInner {
|
inner: Arc::new(Mutex::new(ClientApiConnectionInner {
|
||||||
comproc,
|
comproc,
|
||||||
connect_addr: None,
|
connect_addr: None,
|
||||||
|
request_sender: None,
|
||||||
disconnector: None,
|
disconnector: None,
|
||||||
server: None,
|
|
||||||
server_settings: None,
|
|
||||||
disconnect_requested: false,
|
disconnect_requested: false,
|
||||||
cancel_eventual: Eventual::new(),
|
reply_channels: HashMap::new(),
|
||||||
|
next_req_id: 0,
|
||||||
})),
|
})),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn cancel(&self) {
|
pub fn cancel_all(&self) {
|
||||||
let eventual = {
|
let mut inner = self.inner.lock();
|
||||||
let inner = self.inner.borrow();
|
inner.reply_channels.clear();
|
||||||
inner.cancel_eventual.clone()
|
}
|
||||||
|
|
||||||
|
async fn process_veilid_state<'a>(&self, state: &json::JsonValue) {
|
||||||
|
let comproc = self.inner.lock().comproc.clone();
|
||||||
|
comproc.update_attachment(&state["attachment"]);
|
||||||
|
comproc.update_network_status(&state["network"]);
|
||||||
|
comproc.update_config(&state["config"]);
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn process_response(&self, response: json::JsonValue) {
|
||||||
|
// find the operation id and send the response to the channel for it
|
||||||
|
let Some(id) = response["id"].as_u32() else {
|
||||||
|
error!("invalid id: {}", response);
|
||||||
|
return;
|
||||||
};
|
};
|
||||||
eventual.resolve(); // don't need to await this
|
let reply_channel = {
|
||||||
}
|
let mut inner = self.inner.lock();
|
||||||
|
inner.reply_channels.remove(&id)
|
||||||
async fn process_veilid_state<'a>(
|
|
||||||
&'a mut self,
|
|
||||||
veilid_state: VeilidState,
|
|
||||||
) -> Result<(), String> {
|
|
||||||
let mut inner = self.inner.borrow_mut();
|
|
||||||
inner.comproc.update_attachment(veilid_state.attachment);
|
|
||||||
inner.comproc.update_network_status(veilid_state.network);
|
|
||||||
inner.comproc.update_config(veilid_state.config);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn spawn_rpc_system(
|
|
||||||
&mut self,
|
|
||||||
connect_addr: SocketAddr,
|
|
||||||
mut rpc_system: RpcSystem<rpc_twoparty_capnp::Side>,
|
|
||||||
) -> Result<(), String> {
|
|
||||||
let mut request;
|
|
||||||
{
|
|
||||||
let mut inner = self.inner.borrow_mut();
|
|
||||||
|
|
||||||
// Get the bootstrap server connection object
|
|
||||||
inner.server = Some(Rc::new(RefCell::new(
|
|
||||||
rpc_system.bootstrap(rpc_twoparty_capnp::Side::Server),
|
|
||||||
)));
|
|
||||||
|
|
||||||
// Store our disconnector future for later (must happen after bootstrap, contrary to documentation)
|
|
||||||
inner.disconnector = Some(rpc_system.get_disconnector());
|
|
||||||
|
|
||||||
// Get a client object to pass to the server for status update callbacks
|
|
||||||
let client = capnp_rpc::new_client(VeilidClientImpl::new(inner.comproc.clone()));
|
|
||||||
|
|
||||||
// Register our client and get a registration object back
|
|
||||||
request = inner
|
|
||||||
.server
|
|
||||||
.as_ref()
|
|
||||||
.unwrap()
|
|
||||||
.borrow_mut()
|
|
||||||
.register_request();
|
|
||||||
request.get().set_veilid_client(client);
|
|
||||||
|
|
||||||
inner
|
|
||||||
.comproc
|
|
||||||
.set_connection_state(ConnectionState::Connected(
|
|
||||||
connect_addr,
|
|
||||||
std::time::SystemTime::now(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let rpc_jh = spawn_local(rpc_system);
|
|
||||||
|
|
||||||
let reg_res: Result<registration::Client, String> = (async {
|
|
||||||
// Send the request and get the state object and the registration object
|
|
||||||
let response = request
|
|
||||||
.send()
|
|
||||||
.promise
|
|
||||||
.await
|
|
||||||
.map_err(|e| format!("failed to send register request: {}", e))?;
|
|
||||||
let response = response
|
|
||||||
.get()
|
|
||||||
.map_err(|e| format!("failed to get register response: {}", e))?;
|
|
||||||
|
|
||||||
// Get the registration object, which drops our connection when it is dropped
|
|
||||||
let registration = response
|
|
||||||
.get_registration()
|
|
||||||
.map_err(|e| format!("failed to get registration object: {}", e))?;
|
|
||||||
|
|
||||||
// Get the initial veilid state
|
|
||||||
let veilid_state = response
|
|
||||||
.get_state()
|
|
||||||
.map_err(|e| format!("failed to get initial veilid state: {}", e))?;
|
|
||||||
|
|
||||||
// Set up our state for the first time
|
|
||||||
let veilid_state: VeilidState = deserialize_json(veilid_state)
|
|
||||||
.map_err(|e| format!("failed to get deserialize veilid state: {}", e))?;
|
|
||||||
self.process_veilid_state(veilid_state).await?;
|
|
||||||
|
|
||||||
// Save server settings
|
|
||||||
let server_settings = response
|
|
||||||
.get_settings()
|
|
||||||
.map_err(|e| format!("failed to get initial veilid server settings: {}", e))?
|
|
||||||
.to_owned();
|
|
||||||
self.inner.borrow_mut().server_settings = Some(server_settings.clone());
|
|
||||||
|
|
||||||
// Don't drop the registration, doing so will remove the client
|
|
||||||
// object mapping from the server which we need for the update backchannel
|
|
||||||
Ok(registration)
|
|
||||||
})
|
|
||||||
.await;
|
|
||||||
|
|
||||||
let _registration = match reg_res {
|
|
||||||
Ok(v) => v,
|
|
||||||
Err(e) => {
|
|
||||||
rpc_jh.abort().await;
|
|
||||||
return Err(e);
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
let Some(reply_channel) = reply_channel else {
|
||||||
// Wait until rpc system completion or disconnect was requested
|
warn!("received cancelled reply: {}", response);
|
||||||
let res = rpc_jh.await;
|
return;
|
||||||
res.map_err(|e| format!("client RPC system error: {}", e))
|
};
|
||||||
|
if let Err(e) = reply_channel.send_async(response).await {
|
||||||
|
error!("failed to process reply: {}", e);
|
||||||
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_connection(&mut self, connect_addr: SocketAddr) -> Result<(), String> {
|
async fn process_veilid_update(&self, update: json::JsonValue) {
|
||||||
|
let comproc = self.inner.lock().comproc.clone();
|
||||||
|
let Some(kind) = update["kind"].as_str() else {
|
||||||
|
comproc.log_message(format!("missing update kind: {}", update));
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
match kind {
|
||||||
|
"Log" => {
|
||||||
|
comproc.update_log(&update);
|
||||||
|
}
|
||||||
|
"AppMessage" => {
|
||||||
|
comproc.update_app_message(&update);
|
||||||
|
}
|
||||||
|
"AppCall" => {
|
||||||
|
comproc.update_app_call(&update);
|
||||||
|
}
|
||||||
|
"Attachment" => {
|
||||||
|
comproc.update_attachment(&update);
|
||||||
|
}
|
||||||
|
"Network" => {
|
||||||
|
comproc.update_network_status(&update);
|
||||||
|
}
|
||||||
|
"Config" => {
|
||||||
|
comproc.update_config(&update);
|
||||||
|
}
|
||||||
|
"RouteChange" => {
|
||||||
|
comproc.update_route(&update);
|
||||||
|
}
|
||||||
|
"Shutdown" => comproc.update_shutdown(),
|
||||||
|
"ValueChange" => {
|
||||||
|
comproc.update_value_change(&update);
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
comproc.log_message(format!("unknown update kind: {}", update));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_connection(&self, connect_addr: SocketAddr) -> Result<(), String> {
|
||||||
trace!("ClientApiConnection::handle_connection");
|
trace!("ClientApiConnection::handle_connection");
|
||||||
|
|
||||||
self.inner.borrow_mut().connect_addr = Some(connect_addr);
|
|
||||||
// Connect the TCP socket
|
// Connect the TCP socket
|
||||||
let stream = TcpStream::connect(connect_addr)
|
let stream = TcpStream::connect(connect_addr)
|
||||||
.await
|
.await
|
||||||
.map_err(map_to_string)?;
|
.map_err(map_to_string)?;
|
||||||
|
|
||||||
// If it succeed, disable nagle algorithm
|
// If it succeed, disable nagle algorithm
|
||||||
stream.set_nodelay(true).map_err(map_to_string)?;
|
stream.set_nodelay(true).map_err(map_to_string)?;
|
||||||
|
|
||||||
// Create the VAT network
|
// State we connected
|
||||||
|
let comproc = self.inner.lock().comproc.clone();
|
||||||
|
comproc.set_connection_state(ConnectionState::Connected(connect_addr, SystemTime::now()));
|
||||||
|
|
||||||
|
// Split the stream
|
||||||
cfg_if! {
|
cfg_if! {
|
||||||
if #[cfg(feature="rt-async-std")] {
|
if #[cfg(feature="rt-async-std")] {
|
||||||
use futures::AsyncReadExt;
|
use futures::AsyncReadExt;
|
||||||
let (reader, writer) = stream.split();
|
let (reader, mut writer) = stream.split();
|
||||||
|
let mut reader = BufReader::new(reader);
|
||||||
} else if #[cfg(feature="rt-tokio")] {
|
} else if #[cfg(feature="rt-tokio")] {
|
||||||
pub use tokio_util::compat::*;
|
let (reader, mut writer) = stream.into_split();
|
||||||
let (reader, writer) = stream.into_split();
|
let mut reader = BufReader::new(reader);
|
||||||
let reader = reader.compat();
|
|
||||||
let writer = writer.compat_write();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let rpc_network = Box::new(twoparty::VatNetwork::new(
|
// Requests to send
|
||||||
reader,
|
let (requests_tx, requests_rx) = flume::unbounded();
|
||||||
writer,
|
|
||||||
rpc_twoparty_capnp::Side::Client,
|
|
||||||
Default::default(),
|
|
||||||
));
|
|
||||||
|
|
||||||
// Create the rpc system
|
// Create disconnection mechanism
|
||||||
let rpc_system = RpcSystem::new(rpc_network, None);
|
let stop_token = {
|
||||||
|
let stop_source = StopSource::new();
|
||||||
|
let token = stop_source.token();
|
||||||
|
let mut inner = self.inner.lock();
|
||||||
|
inner.connect_addr = Some(connect_addr);
|
||||||
|
inner.disconnector = Some(stop_source);
|
||||||
|
inner.request_sender = Some(requests_tx);
|
||||||
|
token
|
||||||
|
};
|
||||||
|
|
||||||
// Process the rpc system until we decide we're done
|
// Futures to process unordered
|
||||||
match self.spawn_rpc_system(connect_addr, rpc_system).await {
|
let mut unord = FuturesUnordered::new();
|
||||||
Ok(()) => {}
|
|
||||||
|
// Process lines
|
||||||
|
let this = self.clone();
|
||||||
|
let recv_messages_future = async move {
|
||||||
|
let mut linebuf = String::new();
|
||||||
|
while let Ok(size) = reader.read_line(&mut linebuf).await {
|
||||||
|
// Exit on EOF
|
||||||
|
if size == 0 {
|
||||||
|
// Disconnected
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
let line = linebuf.trim().to_owned();
|
||||||
|
linebuf.clear();
|
||||||
|
|
||||||
|
// Unmarshal json
|
||||||
|
let j = match json::parse(&line) {
|
||||||
|
Ok(v) => v,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!("Failed to spawn client RPC system: {}", e);
|
error!("failed to parse server response: {}", e);
|
||||||
}
|
continue;
|
||||||
}
|
}
|
||||||
|
};
|
||||||
|
|
||||||
// Drop the server and disconnector too (if we still have it)
|
if j["type"] == "Update" {
|
||||||
let mut inner = self.inner.borrow_mut();
|
this.process_veilid_update(j).await;
|
||||||
|
} else if j["type"] == "Response" {
|
||||||
|
this.process_response(j).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
//
|
||||||
|
let mut inner = this.inner.lock();
|
||||||
|
inner.request_sender = None;
|
||||||
|
};
|
||||||
|
unord.push(system_boxed(recv_messages_future));
|
||||||
|
|
||||||
|
// Requests send processor
|
||||||
|
let send_requests_future = async move {
|
||||||
|
while let Ok(req) = requests_rx.recv_async().await {
|
||||||
|
if let Err(e) = writer.write_all(req.as_bytes()).await {
|
||||||
|
error!("failed to write request: {}", e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
unord.push(system_boxed(send_requests_future));
|
||||||
|
|
||||||
|
// Request initial server state
|
||||||
|
let capi = self.clone();
|
||||||
|
spawn_detached_local(async move {
|
||||||
|
let mut req = json::JsonValue::new_object();
|
||||||
|
req["op"] = "GetState".into();
|
||||||
|
let Some(resp) = capi.perform_request(req).await else {
|
||||||
|
error!("failed to get state");
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
if resp.has_key("error") {
|
||||||
|
error!("failed to get state: {}", resp["error"]);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
capi.process_veilid_state(&resp["value"]).await;
|
||||||
|
});
|
||||||
|
|
||||||
|
// Send and receive until we're done or a stop is requested
|
||||||
|
while let Ok(Some(())) = unord.next().timeout_at(stop_token.clone()).await {}
|
||||||
|
|
||||||
|
// // Drop the server and disconnector too (if we still have it)
|
||||||
|
let mut inner = self.inner.lock();
|
||||||
let disconnect_requested = inner.disconnect_requested;
|
let disconnect_requested = inner.disconnect_requested;
|
||||||
inner.server_settings = None;
|
inner.request_sender = None;
|
||||||
inner.server = None;
|
|
||||||
inner.disconnector = None;
|
inner.disconnector = None;
|
||||||
inner.disconnect_requested = false;
|
inner.disconnect_requested = false;
|
||||||
inner.connect_addr = None;
|
inner.connect_addr = None;
|
||||||
|
|
||||||
if !disconnect_requested {
|
|
||||||
// Connection lost
|
|
||||||
Err("Connection lost".to_owned())
|
|
||||||
} else {
|
|
||||||
// Connection finished
|
// Connection finished
|
||||||
|
if disconnect_requested {
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err("Connection lost".to_owned())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn perform_request(&self, mut req: json::JsonValue) -> Option<json::JsonValue> {
|
||||||
|
let (sender, reply_rx) = {
|
||||||
|
let mut inner = self.inner.lock();
|
||||||
|
|
||||||
|
// Get the request sender
|
||||||
|
let Some(sender) = inner.request_sender.clone() else {
|
||||||
|
error!("dropping request, not connected");
|
||||||
|
return None;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Get next id
|
||||||
|
let id = inner.next_req_id;
|
||||||
|
inner.next_req_id += 1;
|
||||||
|
|
||||||
|
// Add the id
|
||||||
|
req["id"] = id.into();
|
||||||
|
|
||||||
|
// Make a reply receiver
|
||||||
|
let (reply_tx, reply_rx) = flume::bounded(1);
|
||||||
|
inner.reply_channels.insert(id, reply_tx);
|
||||||
|
(sender, reply_rx)
|
||||||
|
};
|
||||||
|
|
||||||
|
// Send the request
|
||||||
|
let req_ndjson = req.dump() + "\n";
|
||||||
|
if let Err(e) = sender.send_async(req_ndjson).await {
|
||||||
|
error!("failed to send request: {}", e);
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for the reply
|
||||||
|
let Ok(r) = reply_rx.recv_async().await else {
|
||||||
|
// Cancelled
|
||||||
|
return None;
|
||||||
|
};
|
||||||
|
|
||||||
|
Some(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn server_attach(&self) -> Result<(), String> {
|
||||||
|
trace!("ClientApiConnection::server_attach");
|
||||||
|
|
||||||
|
let mut req = json::JsonValue::new_object();
|
||||||
|
req["op"] = "Attach".into();
|
||||||
|
let Some(resp) = self.perform_request(req).await else {
|
||||||
|
return Err("Cancelled".to_owned());
|
||||||
|
};
|
||||||
|
if resp.has_key("error") {
|
||||||
|
return Err(resp["error"].to_string());
|
||||||
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
pub fn cancellable<T>(&mut self, p: Promise<T, capnp::Error>) -> Promise<T, capnp::Error>
|
pub async fn server_detach(&self) -> Result<(), String> {
|
||||||
where
|
|
||||||
T: 'static,
|
|
||||||
{
|
|
||||||
let (mut cancel_instance, cancel_eventual) = {
|
|
||||||
let inner = self.inner.borrow();
|
|
||||||
(
|
|
||||||
inner.cancel_eventual.instance_empty().fuse(),
|
|
||||||
inner.cancel_eventual.clone(),
|
|
||||||
)
|
|
||||||
};
|
|
||||||
let mut p = p.fuse();
|
|
||||||
|
|
||||||
Promise::from_future(async move {
|
|
||||||
let out = select! {
|
|
||||||
a = p => {
|
|
||||||
a
|
|
||||||
},
|
|
||||||
_ = cancel_instance => {
|
|
||||||
Err(capnp::Error::failed("cancelled".into()))
|
|
||||||
}
|
|
||||||
};
|
|
||||||
drop(cancel_instance);
|
|
||||||
cancel_eventual.reset();
|
|
||||||
out
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn server_attach(&mut self) -> Result<(), String> {
|
|
||||||
trace!("ClientApiConnection::server_attach");
|
|
||||||
let server = {
|
|
||||||
let inner = self.inner.borrow();
|
|
||||||
inner
|
|
||||||
.server
|
|
||||||
.as_ref()
|
|
||||||
.ok_or_else(|| "Not connected, ignoring attach request".to_owned())?
|
|
||||||
.clone()
|
|
||||||
};
|
|
||||||
let request = server.borrow().attach_request();
|
|
||||||
let response = self
|
|
||||||
.cancellable(request.send().promise)
|
|
||||||
.await
|
|
||||||
.map_err(map_to_string)?;
|
|
||||||
let reader = response
|
|
||||||
.get()
|
|
||||||
.map_err(map_to_string)?
|
|
||||||
.get_result()
|
|
||||||
.map_err(map_to_string)?;
|
|
||||||
let res: VeilidAPIResult<()> = decode_api_result(&reader);
|
|
||||||
res.map_err(map_to_string)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn server_detach(&mut self) -> Result<(), String> {
|
|
||||||
trace!("ClientApiConnection::server_detach");
|
trace!("ClientApiConnection::server_detach");
|
||||||
let server = {
|
let mut req = json::JsonValue::new_object();
|
||||||
let inner = self.inner.borrow();
|
req["op"] = "Detach".into();
|
||||||
inner
|
let Some(resp) = self.perform_request(req).await else {
|
||||||
.server
|
return Err("Cancelled".to_owned());
|
||||||
.as_ref()
|
|
||||||
.ok_or_else(|| "Not connected, ignoring detach request".to_owned())?
|
|
||||||
.clone()
|
|
||||||
};
|
};
|
||||||
let request = server.borrow().detach_request();
|
if resp.has_key("error") {
|
||||||
let response = self
|
return Err(resp["error"].to_string());
|
||||||
.cancellable(request.send().promise)
|
}
|
||||||
.await
|
Ok(())
|
||||||
.map_err(map_to_string)?;
|
|
||||||
let reader = response
|
|
||||||
.get()
|
|
||||||
.map_err(map_to_string)?
|
|
||||||
.get_result()
|
|
||||||
.map_err(map_to_string)?;
|
|
||||||
let res: VeilidAPIResult<()> = decode_api_result(&reader);
|
|
||||||
res.map_err(map_to_string)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn server_shutdown(&mut self) -> Result<(), String> {
|
pub async fn server_shutdown(&self) -> Result<(), String> {
|
||||||
trace!("ClientApiConnection::server_shutdown");
|
trace!("ClientApiConnection::server_shutdown");
|
||||||
let server = {
|
let mut req = json::JsonValue::new_object();
|
||||||
let inner = self.inner.borrow();
|
req["op"] = "Control".into();
|
||||||
inner
|
req["args"] = json::JsonValue::new_array();
|
||||||
.server
|
req["args"].push("Shutdown").unwrap();
|
||||||
.as_ref()
|
let Some(resp) = self.perform_request(req).await else {
|
||||||
.ok_or_else(|| "Not connected, ignoring attach request".to_owned())?
|
return Err("Cancelled".to_owned());
|
||||||
.clone()
|
|
||||||
};
|
};
|
||||||
let request = server.borrow().shutdown_request();
|
if resp.has_key("error") {
|
||||||
let response = self
|
return Err(resp["error"].to_string());
|
||||||
.cancellable(request.send().promise)
|
}
|
||||||
.await
|
Ok(())
|
||||||
.map_err(map_to_string)?;
|
|
||||||
response.get().map(drop).map_err(map_to_string)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn server_debug(&mut self, what: String) -> Result<String, String> {
|
pub async fn server_debug(&self, what: String) -> Result<String, String> {
|
||||||
trace!("ClientApiConnection::server_debug");
|
trace!("ClientApiConnection::server_debug");
|
||||||
let server = {
|
let mut req = json::JsonValue::new_object();
|
||||||
let inner = self.inner.borrow();
|
req["op"] = "Debug".into();
|
||||||
inner
|
req["command"] = what.into();
|
||||||
.server
|
let Some(resp) = self.perform_request(req).await else {
|
||||||
.as_ref()
|
return Err("Cancelled".to_owned());
|
||||||
.ok_or_else(|| "Not connected, ignoring debug request".to_owned())?
|
|
||||||
.clone()
|
|
||||||
};
|
};
|
||||||
let mut request = server.borrow().debug_request();
|
if resp.has_key("error") {
|
||||||
request.get().set_command(&what);
|
return Err(resp["error"].to_string());
|
||||||
let response = self
|
}
|
||||||
.cancellable(request.send().promise)
|
Ok(resp["value"].to_string())
|
||||||
.await
|
|
||||||
.map_err(map_to_string)?;
|
|
||||||
let reader = response
|
|
||||||
.get()
|
|
||||||
.map_err(map_to_string)?
|
|
||||||
.get_result()
|
|
||||||
.map_err(map_to_string)?;
|
|
||||||
let res: VeilidAPIResult<String> = decode_api_result(&reader);
|
|
||||||
res.map_err(map_to_string)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn server_change_log_level(
|
pub async fn server_change_log_level(
|
||||||
&mut self,
|
&self,
|
||||||
layer: String,
|
layer: String,
|
||||||
log_level: VeilidConfigLogLevel,
|
log_level: String,
|
||||||
) -> Result<(), String> {
|
) -> Result<(), String> {
|
||||||
trace!("ClientApiConnection::change_log_level");
|
trace!("ClientApiConnection::change_log_level");
|
||||||
let server = {
|
let mut req = json::JsonValue::new_object();
|
||||||
let inner = self.inner.borrow();
|
req["op"] = "Control".into();
|
||||||
inner
|
req["args"] = json::JsonValue::new_array();
|
||||||
.server
|
req["args"].push("ChangeLogLevel").unwrap();
|
||||||
.as_ref()
|
req["args"].push(layer).unwrap();
|
||||||
.ok_or_else(|| "Not connected, ignoring change_log_level request".to_owned())?
|
req["args"].push(log_level).unwrap();
|
||||||
.clone()
|
let Some(resp) = self.perform_request(req).await else {
|
||||||
|
return Err("Cancelled".to_owned());
|
||||||
};
|
};
|
||||||
let mut request = server.borrow().change_log_level_request();
|
if resp.has_key("error") {
|
||||||
request.get().set_layer(&layer);
|
return Err(resp["error"].to_string());
|
||||||
let log_level_json = veilid_core::serialize_json(&log_level);
|
}
|
||||||
request.get().set_log_level(&log_level_json);
|
Ok(())
|
||||||
let response = self
|
|
||||||
.cancellable(request.send().promise)
|
|
||||||
.await
|
|
||||||
.map_err(map_to_string)?;
|
|
||||||
let reader = response
|
|
||||||
.get()
|
|
||||||
.map_err(map_to_string)?
|
|
||||||
.get_result()
|
|
||||||
.map_err(map_to_string)?;
|
|
||||||
let res: VeilidAPIResult<()> = decode_api_result(&reader);
|
|
||||||
res.map_err(map_to_string)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn server_appcall_reply(
|
pub async fn server_appcall_reply(&self, id: u64, msg: Vec<u8>) -> Result<(), String> {
|
||||||
&mut self,
|
|
||||||
id: OperationId,
|
|
||||||
msg: Vec<u8>,
|
|
||||||
) -> Result<(), String> {
|
|
||||||
trace!("ClientApiConnection::appcall_reply");
|
trace!("ClientApiConnection::appcall_reply");
|
||||||
let server = {
|
let mut req = json::JsonValue::new_object();
|
||||||
let inner = self.inner.borrow();
|
req["op"] = "AppCallReply".into();
|
||||||
inner
|
req["call_id"] = id.to_string().into();
|
||||||
.server
|
req["message"] = data_encoding::BASE64URL_NOPAD.encode(&msg).into();
|
||||||
.as_ref()
|
let Some(resp) = self.perform_request(req).await else {
|
||||||
.ok_or_else(|| "Not connected, ignoring change_log_level request".to_owned())?
|
return Err("Cancelled".to_owned());
|
||||||
.clone()
|
|
||||||
};
|
};
|
||||||
let mut request = server.borrow().app_call_reply_request();
|
if resp.has_key("error") {
|
||||||
request.get().set_id(id.as_u64());
|
return Err(resp["error"].to_string());
|
||||||
request.get().set_message(&msg);
|
}
|
||||||
let response = self
|
Ok(())
|
||||||
.cancellable(request.send().promise)
|
|
||||||
.await
|
|
||||||
.map_err(map_to_string)?;
|
|
||||||
let reader = response
|
|
||||||
.get()
|
|
||||||
.map_err(map_to_string)?
|
|
||||||
.get_result()
|
|
||||||
.map_err(map_to_string)?;
|
|
||||||
let res: VeilidAPIResult<()> = decode_api_result(&reader);
|
|
||||||
res.map_err(map_to_string)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start Client API connection
|
// Start Client API connection
|
||||||
pub async fn connect(&mut self, connect_addr: SocketAddr) -> Result<(), String> {
|
pub async fn connect(&self, connect_addr: SocketAddr) -> Result<(), String> {
|
||||||
trace!("ClientApiConnection::connect");
|
trace!("ClientApiConnection::connect");
|
||||||
// Save the address to connect to
|
// Save the address to connect to
|
||||||
self.handle_connection(connect_addr).await
|
self.handle_connection(connect_addr).await
|
||||||
}
|
}
|
||||||
|
|
||||||
// End Client API connection
|
// End Client API connection
|
||||||
pub async fn disconnect(&mut self) {
|
pub async fn disconnect(&self) {
|
||||||
trace!("ClientApiConnection::disconnect");
|
trace!("ClientApiConnection::disconnect");
|
||||||
let disconnector = self.inner.borrow_mut().disconnector.take();
|
let mut inner = self.inner.lock();
|
||||||
match disconnector {
|
if inner.disconnector.is_some() {
|
||||||
Some(d) => {
|
inner.disconnector = None;
|
||||||
self.inner.borrow_mut().disconnect_requested = true;
|
inner.disconnect_requested = true;
|
||||||
d.await.unwrap();
|
|
||||||
}
|
|
||||||
None => {
|
|
||||||
debug!("disconnector doesn't exist");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,21 +1,19 @@
|
|||||||
use crate::client_api_connection::*;
|
use crate::client_api_connection::*;
|
||||||
use crate::settings::Settings;
|
use crate::settings::Settings;
|
||||||
|
use crate::tools::*;
|
||||||
use crate::ui::*;
|
use crate::ui::*;
|
||||||
use std::cell::*;
|
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::rc::Rc;
|
|
||||||
use std::time::SystemTime;
|
use std::time::SystemTime;
|
||||||
use veilid_core::tools::*;
|
use veilid_tools::*;
|
||||||
use veilid_core::*;
|
|
||||||
|
|
||||||
pub fn convert_loglevel(s: &str) -> Result<VeilidConfigLogLevel, String> {
|
pub fn convert_loglevel(s: &str) -> Result<String, String> {
|
||||||
match s.to_ascii_lowercase().as_str() {
|
match s.to_ascii_lowercase().as_str() {
|
||||||
"off" => Ok(VeilidConfigLogLevel::Off),
|
"off" => Ok("Off".to_owned()),
|
||||||
"error" => Ok(VeilidConfigLogLevel::Error),
|
"error" => Ok("Error".to_owned()),
|
||||||
"warn" => Ok(VeilidConfigLogLevel::Warn),
|
"warn" => Ok("Warn".to_owned()),
|
||||||
"info" => Ok(VeilidConfigLogLevel::Info),
|
"info" => Ok("Info".to_owned()),
|
||||||
"debug" => Ok(VeilidConfigLogLevel::Debug),
|
"debug" => Ok("Debug".to_owned()),
|
||||||
"trace" => Ok(VeilidConfigLogLevel::Trace),
|
"trace" => Ok("Trace".to_owned()),
|
||||||
_ => Err(format!("Invalid log level: {}", s)),
|
_ => Err(format!("Invalid log level: {}", s)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -39,7 +37,7 @@ impl ConnectionState {
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct CommandProcessorInner {
|
struct CommandProcessorInner {
|
||||||
ui: UI,
|
ui_sender: UISender,
|
||||||
capi: Option<ClientApiConnection>,
|
capi: Option<ClientApiConnection>,
|
||||||
reconnect: bool,
|
reconnect: bool,
|
||||||
finished: bool,
|
finished: bool,
|
||||||
@ -47,21 +45,19 @@ struct CommandProcessorInner {
|
|||||||
autoreconnect: bool,
|
autoreconnect: bool,
|
||||||
server_addr: Option<SocketAddr>,
|
server_addr: Option<SocketAddr>,
|
||||||
connection_waker: Eventual,
|
connection_waker: Eventual,
|
||||||
last_call_id: Option<OperationId>,
|
last_call_id: Option<u64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
type Handle<T> = Rc<RefCell<T>>;
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct CommandProcessor {
|
pub struct CommandProcessor {
|
||||||
inner: Handle<CommandProcessorInner>,
|
inner: Arc<Mutex<CommandProcessorInner>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl CommandProcessor {
|
impl CommandProcessor {
|
||||||
pub fn new(ui: UI, settings: &Settings) -> Self {
|
pub fn new(ui_sender: UISender, settings: &Settings) -> Self {
|
||||||
Self {
|
Self {
|
||||||
inner: Rc::new(RefCell::new(CommandProcessorInner {
|
inner: Arc::new(Mutex::new(CommandProcessorInner {
|
||||||
ui,
|
ui_sender,
|
||||||
capi: None,
|
capi: None,
|
||||||
reconnect: settings.autoreconnect,
|
reconnect: settings.autoreconnect,
|
||||||
finished: false,
|
finished: false,
|
||||||
@ -73,20 +69,20 @@ impl CommandProcessor {
|
|||||||
})),
|
})),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub fn set_client_api_connection(&mut self, capi: ClientApiConnection) {
|
pub fn set_client_api_connection(&self, capi: ClientApiConnection) {
|
||||||
self.inner.borrow_mut().capi = Some(capi);
|
self.inner.lock().capi = Some(capi);
|
||||||
}
|
}
|
||||||
fn inner(&self) -> Ref<CommandProcessorInner> {
|
fn inner(&self) -> MutexGuard<CommandProcessorInner> {
|
||||||
self.inner.borrow()
|
self.inner.lock()
|
||||||
}
|
}
|
||||||
fn inner_mut(&self) -> RefMut<CommandProcessorInner> {
|
fn inner_mut(&self) -> MutexGuard<CommandProcessorInner> {
|
||||||
self.inner.borrow_mut()
|
self.inner.lock()
|
||||||
}
|
}
|
||||||
fn ui(&self) -> UI {
|
fn ui_sender(&self) -> UISender {
|
||||||
self.inner.borrow().ui.clone()
|
self.inner.lock().ui_sender.clone()
|
||||||
}
|
}
|
||||||
fn capi(&self) -> ClientApiConnection {
|
fn capi(&self) -> ClientApiConnection {
|
||||||
self.inner.borrow().capi.as_ref().unwrap().clone()
|
self.inner.lock().capi.as_ref().unwrap().clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn word_split(line: &str) -> (String, Option<String>) {
|
fn word_split(line: &str) -> (String, Option<String>) {
|
||||||
@ -103,12 +99,12 @@ impl CommandProcessor {
|
|||||||
pub fn cancel_command(&self) {
|
pub fn cancel_command(&self) {
|
||||||
trace!("CommandProcessor::cancel_command");
|
trace!("CommandProcessor::cancel_command");
|
||||||
let capi = self.capi();
|
let capi = self.capi();
|
||||||
capi.cancel();
|
capi.cancel_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn cmd_help(&self, _rest: Option<String>, callback: UICallback) -> Result<(), String> {
|
pub fn cmd_help(&self, _rest: Option<String>, callback: UICallback) -> Result<(), String> {
|
||||||
trace!("CommandProcessor::cmd_help");
|
trace!("CommandProcessor::cmd_help");
|
||||||
self.ui().add_node_event(
|
self.ui_sender().add_node_event(
|
||||||
r#"Commands:
|
r#"Commands:
|
||||||
exit/quit - exit the client
|
exit/quit - exit the client
|
||||||
disconnect - disconnect the client from the Veilid node
|
disconnect - disconnect the client from the Veilid node
|
||||||
@ -121,14 +117,14 @@ reply - reply to an AppCall not handled directly by the server
|
|||||||
"#
|
"#
|
||||||
.to_owned(),
|
.to_owned(),
|
||||||
);
|
);
|
||||||
let ui = self.ui();
|
let ui = self.ui_sender();
|
||||||
ui.send_callback(callback);
|
ui.send_callback(callback);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn cmd_exit(&self, callback: UICallback) -> Result<(), String> {
|
pub fn cmd_exit(&self, callback: UICallback) -> Result<(), String> {
|
||||||
trace!("CommandProcessor::cmd_exit");
|
trace!("CommandProcessor::cmd_exit");
|
||||||
let ui = self.ui();
|
let ui = self.ui_sender();
|
||||||
ui.send_callback(callback);
|
ui.send_callback(callback);
|
||||||
ui.quit();
|
ui.quit();
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -136,8 +132,8 @@ reply - reply to an AppCall not handled directly by the server
|
|||||||
|
|
||||||
pub fn cmd_shutdown(&self, callback: UICallback) -> Result<(), String> {
|
pub fn cmd_shutdown(&self, callback: UICallback) -> Result<(), String> {
|
||||||
trace!("CommandProcessor::cmd_shutdown");
|
trace!("CommandProcessor::cmd_shutdown");
|
||||||
let mut capi = self.capi();
|
let capi = self.capi();
|
||||||
let ui = self.ui();
|
let ui = self.ui_sender();
|
||||||
spawn_detached_local(async move {
|
spawn_detached_local(async move {
|
||||||
if let Err(e) = capi.server_shutdown().await {
|
if let Err(e) = capi.server_shutdown().await {
|
||||||
error!("Server command 'shutdown' failed to execute: {}", e);
|
error!("Server command 'shutdown' failed to execute: {}", e);
|
||||||
@ -149,8 +145,8 @@ reply - reply to an AppCall not handled directly by the server
|
|||||||
|
|
||||||
pub fn cmd_attach(&self, callback: UICallback) -> Result<(), String> {
|
pub fn cmd_attach(&self, callback: UICallback) -> Result<(), String> {
|
||||||
trace!("CommandProcessor::cmd_attach");
|
trace!("CommandProcessor::cmd_attach");
|
||||||
let mut capi = self.capi();
|
let capi = self.capi();
|
||||||
let ui = self.ui();
|
let ui = self.ui_sender();
|
||||||
spawn_detached_local(async move {
|
spawn_detached_local(async move {
|
||||||
if let Err(e) = capi.server_attach().await {
|
if let Err(e) = capi.server_attach().await {
|
||||||
error!("Server command 'attach' failed: {}", e);
|
error!("Server command 'attach' failed: {}", e);
|
||||||
@ -162,8 +158,8 @@ reply - reply to an AppCall not handled directly by the server
|
|||||||
|
|
||||||
pub fn cmd_detach(&self, callback: UICallback) -> Result<(), String> {
|
pub fn cmd_detach(&self, callback: UICallback) -> Result<(), String> {
|
||||||
trace!("CommandProcessor::cmd_detach");
|
trace!("CommandProcessor::cmd_detach");
|
||||||
let mut capi = self.capi();
|
let capi = self.capi();
|
||||||
let ui = self.ui();
|
let ui = self.ui_sender();
|
||||||
spawn_detached_local(async move {
|
spawn_detached_local(async move {
|
||||||
if let Err(e) = capi.server_detach().await {
|
if let Err(e) = capi.server_detach().await {
|
||||||
error!("Server command 'detach' failed: {}", e);
|
error!("Server command 'detach' failed: {}", e);
|
||||||
@ -175,8 +171,8 @@ reply - reply to an AppCall not handled directly by the server
|
|||||||
|
|
||||||
pub fn cmd_disconnect(&self, callback: UICallback) -> Result<(), String> {
|
pub fn cmd_disconnect(&self, callback: UICallback) -> Result<(), String> {
|
||||||
trace!("CommandProcessor::cmd_disconnect");
|
trace!("CommandProcessor::cmd_disconnect");
|
||||||
let mut capi = self.capi();
|
let capi = self.capi();
|
||||||
let ui = self.ui();
|
let ui = self.ui_sender();
|
||||||
spawn_detached_local(async move {
|
spawn_detached_local(async move {
|
||||||
capi.disconnect().await;
|
capi.disconnect().await;
|
||||||
ui.send_callback(callback);
|
ui.send_callback(callback);
|
||||||
@ -186,8 +182,8 @@ reply - reply to an AppCall not handled directly by the server
|
|||||||
|
|
||||||
pub fn cmd_debug(&self, rest: Option<String>, callback: UICallback) -> Result<(), String> {
|
pub fn cmd_debug(&self, rest: Option<String>, callback: UICallback) -> Result<(), String> {
|
||||||
trace!("CommandProcessor::cmd_debug");
|
trace!("CommandProcessor::cmd_debug");
|
||||||
let mut capi = self.capi();
|
let capi = self.capi();
|
||||||
let ui = self.ui();
|
let ui = self.ui_sender();
|
||||||
spawn_detached_local(async move {
|
spawn_detached_local(async move {
|
||||||
match capi.server_debug(rest.unwrap_or_default()).await {
|
match capi.server_debug(rest.unwrap_or_default()).await {
|
||||||
Ok(output) => ui.display_string_dialog("Debug Output", output, callback),
|
Ok(output) => ui.display_string_dialog("Debug Output", output, callback),
|
||||||
@ -203,8 +199,8 @@ reply - reply to an AppCall not handled directly by the server
|
|||||||
callback: UICallback,
|
callback: UICallback,
|
||||||
) -> Result<(), String> {
|
) -> Result<(), String> {
|
||||||
trace!("CommandProcessor::cmd_change_log_level");
|
trace!("CommandProcessor::cmd_change_log_level");
|
||||||
let mut capi = self.capi();
|
let capi = self.capi();
|
||||||
let ui = self.ui();
|
let ui = self.ui_sender();
|
||||||
spawn_detached_local(async move {
|
spawn_detached_local(async move {
|
||||||
let (layer, rest) = Self::word_split(&rest.unwrap_or_default());
|
let (layer, rest) = Self::word_split(&rest.unwrap_or_default());
|
||||||
let log_level = match convert_loglevel(&rest.unwrap_or_default()) {
|
let log_level = match convert_loglevel(&rest.unwrap_or_default()) {
|
||||||
@ -235,8 +231,8 @@ reply - reply to an AppCall not handled directly by the server
|
|||||||
pub fn cmd_reply(&self, rest: Option<String>, callback: UICallback) -> Result<(), String> {
|
pub fn cmd_reply(&self, rest: Option<String>, callback: UICallback) -> Result<(), String> {
|
||||||
trace!("CommandProcessor::cmd_reply");
|
trace!("CommandProcessor::cmd_reply");
|
||||||
|
|
||||||
let mut capi = self.capi();
|
let capi = self.capi();
|
||||||
let ui = self.ui();
|
let ui = self.ui_sender();
|
||||||
let some_last_id = self.inner_mut().last_call_id.take();
|
let some_last_id = self.inner_mut().last_call_id.take();
|
||||||
spawn_detached_local(async move {
|
spawn_detached_local(async move {
|
||||||
let (first, second) = Self::word_split(&rest.clone().unwrap_or_default());
|
let (first, second) = Self::word_split(&rest.clone().unwrap_or_default());
|
||||||
@ -249,7 +245,7 @@ reply - reply to an AppCall not handled directly by the server
|
|||||||
}
|
}
|
||||||
Ok(v) => v,
|
Ok(v) => v,
|
||||||
};
|
};
|
||||||
(OperationId::new(id), second)
|
(id, second)
|
||||||
} else {
|
} else {
|
||||||
let id = match some_last_id {
|
let id = match some_last_id {
|
||||||
None => {
|
None => {
|
||||||
@ -307,14 +303,14 @@ reply - reply to an AppCall not handled directly by the server
|
|||||||
"change_log_level" => self.cmd_change_log_level(rest, callback),
|
"change_log_level" => self.cmd_change_log_level(rest, callback),
|
||||||
"reply" => self.cmd_reply(rest, callback),
|
"reply" => self.cmd_reply(rest, callback),
|
||||||
_ => {
|
_ => {
|
||||||
let ui = self.ui();
|
let ui = self.ui_sender();
|
||||||
ui.send_callback(callback);
|
ui.send_callback(callback);
|
||||||
Err(format!("Invalid command: {}", cmd))
|
Err(format!("Invalid command: {}", cmd))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn connection_manager(&mut self) {
|
pub async fn connection_manager(&self) {
|
||||||
// Connect until we're done
|
// Connect until we're done
|
||||||
while !self.inner_mut().finished {
|
while !self.inner_mut().finished {
|
||||||
// Wait for connection request
|
// Wait for connection request
|
||||||
@ -342,7 +338,7 @@ reply - reply to an AppCall not handled directly by the server
|
|||||||
} else {
|
} else {
|
||||||
debug!("Retrying connection to {}", server_addr);
|
debug!("Retrying connection to {}", server_addr);
|
||||||
}
|
}
|
||||||
let mut capi = self.capi();
|
let capi = self.capi();
|
||||||
let res = capi.connect(server_addr).await;
|
let res = capi.connect(server_addr).await;
|
||||||
if res.is_ok() {
|
if res.is_ok() {
|
||||||
info!(
|
info!(
|
||||||
@ -377,7 +373,7 @@ reply - reply to an AppCall not handled directly by the server
|
|||||||
|
|
||||||
// called by ui
|
// called by ui
|
||||||
////////////////////////////////////////////
|
////////////////////////////////////////////
|
||||||
pub fn set_server_address(&mut self, server_addr: Option<SocketAddr>) {
|
pub fn set_server_address(&self, server_addr: Option<SocketAddr>) {
|
||||||
self.inner_mut().server_addr = server_addr;
|
self.inner_mut().server_addr = server_addr;
|
||||||
}
|
}
|
||||||
pub fn get_server_address(&self) -> Option<SocketAddr> {
|
pub fn get_server_address(&self) -> Option<SocketAddr> {
|
||||||
@ -387,54 +383,65 @@ reply - reply to an AppCall not handled directly by the server
|
|||||||
// calls into ui
|
// calls into ui
|
||||||
////////////////////////////////////////////
|
////////////////////////////////////////////
|
||||||
|
|
||||||
pub fn update_attachment(&mut self, attachment: veilid_core::VeilidStateAttachment) {
|
pub fn log_message(&self, message: String) {
|
||||||
self.inner_mut().ui.set_attachment_state(
|
self.inner().ui_sender.add_node_event(message);
|
||||||
attachment.state,
|
}
|
||||||
attachment.public_internet_ready,
|
|
||||||
attachment.local_network_ready,
|
pub fn update_attachment(&self, attachment: &json::JsonValue) {
|
||||||
|
self.inner_mut().ui_sender.set_attachment_state(
|
||||||
|
attachment["state"].as_str().unwrap_or_default().to_owned(),
|
||||||
|
attachment["public_internet_ready"]
|
||||||
|
.as_bool()
|
||||||
|
.unwrap_or_default(),
|
||||||
|
attachment["local_network_ready"]
|
||||||
|
.as_bool()
|
||||||
|
.unwrap_or_default(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn update_network_status(&mut self, network: veilid_core::VeilidStateNetwork) {
|
pub fn update_network_status(&self, network: &json::JsonValue) {
|
||||||
self.inner_mut().ui.set_network_status(
|
self.inner_mut().ui_sender.set_network_status(
|
||||||
network.started,
|
network["started"].as_bool().unwrap_or_default(),
|
||||||
network.bps_down.as_u64(),
|
json_str_u64(&network["bps_down"]),
|
||||||
network.bps_up.as_u64(),
|
json_str_u64(&network["bps_up"]),
|
||||||
network.peers,
|
network["peers"]
|
||||||
|
.members()
|
||||||
|
.cloned()
|
||||||
|
.collect::<Vec<json::JsonValue>>(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
pub fn update_config(&mut self, config: veilid_core::VeilidStateConfig) {
|
pub fn update_config(&self, config: &json::JsonValue) {
|
||||||
self.inner_mut().ui.set_config(config.config)
|
self.inner_mut().ui_sender.set_config(&config["config"])
|
||||||
}
|
}
|
||||||
pub fn update_route(&mut self, route: veilid_core::VeilidRouteChange) {
|
pub fn update_route(&self, route: &json::JsonValue) {
|
||||||
let mut out = String::new();
|
let mut out = String::new();
|
||||||
if !route.dead_routes.is_empty() {
|
if route["dead_routes"].len() != 0 {
|
||||||
out.push_str(&format!("Dead routes: {:?}", route.dead_routes));
|
out.push_str(&format!("Dead routes: {:?}", route["dead_routes"]));
|
||||||
}
|
}
|
||||||
if !route.dead_remote_routes.is_empty() {
|
if route["dead_routes"].len() != 0 {
|
||||||
if !out.is_empty() {
|
if !out.is_empty() {
|
||||||
out.push_str("\n");
|
out.push_str("\n");
|
||||||
}
|
}
|
||||||
out.push_str(&format!(
|
out.push_str(&format!(
|
||||||
"Dead remote routes: {:?}",
|
"Dead remote routes: {:?}",
|
||||||
route.dead_remote_routes
|
route["dead_remote_routes"]
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
if !out.is_empty() {
|
if !out.is_empty() {
|
||||||
self.inner().ui.add_node_event(out);
|
self.inner().ui_sender.add_node_event(out);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub fn update_value_change(&mut self, value_change: veilid_core::VeilidValueChange) {
|
pub fn update_value_change(&self, value_change: &json::JsonValue) {
|
||||||
let out = format!("Value change: {:?}", value_change);
|
let out = format!("Value change: {:?}", value_change.as_str().unwrap_or("???"));
|
||||||
self.inner().ui.add_node_event(out);
|
self.inner().ui_sender.add_node_event(out);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn update_log(&mut self, log: veilid_core::VeilidLog) {
|
pub fn update_log(&self, log: &json::JsonValue) {
|
||||||
self.inner().ui.add_node_event(format!(
|
self.inner().ui_sender.add_node_event(format!(
|
||||||
"{}: {}{}",
|
"{}: {}{}",
|
||||||
log.log_level,
|
log["log_level"].as_str().unwrap_or("???"),
|
||||||
log.message,
|
log["message"].as_str().unwrap_or("???"),
|
||||||
if let Some(bt) = log.backtrace {
|
if let Some(bt) = log["backtrace"].as_str() {
|
||||||
format!("\nBacktrace:\n{}", bt)
|
format!("\nBacktrace:\n{}", bt)
|
||||||
} else {
|
} else {
|
||||||
"".to_owned()
|
"".to_owned()
|
||||||
@ -442,79 +449,83 @@ reply - reply to an AppCall not handled directly by the server
|
|||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn update_app_message(&mut self, msg: veilid_core::VeilidAppMessage) {
|
pub fn update_app_message(&self, msg: &json::JsonValue) {
|
||||||
|
let message = json_str_vec_u8(&msg["message"]);
|
||||||
|
|
||||||
// check is message body is ascii printable
|
// check is message body is ascii printable
|
||||||
let mut printable = true;
|
let mut printable = true;
|
||||||
for c in msg.message() {
|
for c in &message {
|
||||||
if *c < 32 || *c > 126 {
|
if *c < 32 || *c > 126 {
|
||||||
printable = false;
|
printable = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let strmsg = if printable {
|
let strmsg = if printable {
|
||||||
String::from_utf8_lossy(msg.message()).to_string()
|
String::from_utf8_lossy(&message).to_string()
|
||||||
} else {
|
} else {
|
||||||
hex::encode(msg.message())
|
hex::encode(message)
|
||||||
};
|
};
|
||||||
|
|
||||||
self.inner()
|
self.inner()
|
||||||
.ui
|
.ui_sender
|
||||||
.add_node_event(format!("AppMessage ({:?}): {}", msg.sender(), strmsg));
|
.add_node_event(format!("AppMessage ({:?}): {}", msg["sender"], strmsg));
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn update_app_call(&mut self, call: veilid_core::VeilidAppCall) {
|
pub fn update_app_call(&self, call: &json::JsonValue) {
|
||||||
|
let message = json_str_vec_u8(&call["message"]);
|
||||||
|
|
||||||
// check is message body is ascii printable
|
// check is message body is ascii printable
|
||||||
let mut printable = true;
|
let mut printable = true;
|
||||||
for c in call.message() {
|
for c in &message {
|
||||||
if *c < 32 || *c > 126 {
|
if *c < 32 || *c > 126 {
|
||||||
printable = false;
|
printable = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let strmsg = if printable {
|
let strmsg = if printable {
|
||||||
String::from_utf8_lossy(call.message()).to_string()
|
String::from_utf8_lossy(&message).to_string()
|
||||||
} else {
|
} else {
|
||||||
format!("#{}", hex::encode(call.message()))
|
format!("#{}", hex::encode(&message))
|
||||||
};
|
};
|
||||||
|
|
||||||
self.inner().ui.add_node_event(format!(
|
let id = json_str_u64(&call["call_id"]);
|
||||||
|
|
||||||
|
self.inner().ui_sender.add_node_event(format!(
|
||||||
"AppCall ({:?}) id = {:016x} : {}",
|
"AppCall ({:?}) id = {:016x} : {}",
|
||||||
call.sender(),
|
call["sender"], id, strmsg
|
||||||
call.id().as_u64(),
|
|
||||||
strmsg
|
|
||||||
));
|
));
|
||||||
|
|
||||||
self.inner_mut().last_call_id = Some(call.id());
|
self.inner_mut().last_call_id = Some(id);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn update_shutdown(&mut self) {
|
pub fn update_shutdown(&self) {
|
||||||
// Do nothing with this, we'll process shutdown when rpc connection closes
|
// Do nothing with this, we'll process shutdown when rpc connection closes
|
||||||
}
|
}
|
||||||
|
|
||||||
// called by client_api_connection
|
// called by client_api_connection
|
||||||
// calls into ui
|
// calls into ui
|
||||||
////////////////////////////////////////////
|
////////////////////////////////////////////
|
||||||
pub fn set_connection_state(&mut self, state: ConnectionState) {
|
pub fn set_connection_state(&self, state: ConnectionState) {
|
||||||
self.inner_mut().ui.set_connection_state(state);
|
self.inner_mut().ui_sender.set_connection_state(state);
|
||||||
}
|
}
|
||||||
// called by ui
|
// called by ui
|
||||||
////////////////////////////////////////////
|
////////////////////////////////////////////
|
||||||
pub fn start_connection(&mut self) {
|
pub fn start_connection(&self) {
|
||||||
self.inner_mut().reconnect = true;
|
self.inner_mut().reconnect = true;
|
||||||
self.inner_mut().connection_waker.resolve();
|
self.inner_mut().connection_waker.resolve();
|
||||||
}
|
}
|
||||||
// pub fn stop_connection(&mut self) {
|
// pub fn stop_connection(&self) {
|
||||||
// self.inner_mut().reconnect = false;
|
// self.inner_mut().reconnect = false;
|
||||||
// let mut capi = self.capi().clone();
|
// let mut capi = self.capi().clone();
|
||||||
// spawn_detached(async move {
|
// spawn_detached(async move {
|
||||||
// capi.disconnect().await;
|
// capi.disconnect().await;
|
||||||
// });
|
// });
|
||||||
// }
|
// }
|
||||||
pub fn cancel_reconnect(&mut self) {
|
pub fn cancel_reconnect(&self) {
|
||||||
self.inner_mut().reconnect = false;
|
self.inner_mut().reconnect = false;
|
||||||
self.inner_mut().connection_waker.resolve();
|
self.inner_mut().connection_waker.resolve();
|
||||||
}
|
}
|
||||||
pub fn quit(&mut self) {
|
pub fn quit(&self) {
|
||||||
self.inner_mut().finished = true;
|
self.inner_mut().finished = true;
|
||||||
self.inner_mut().reconnect = false;
|
self.inner_mut().reconnect = false;
|
||||||
self.inner_mut().connection_waker.resolve();
|
self.inner_mut().connection_waker.resolve();
|
||||||
@ -523,8 +534,8 @@ reply - reply to an AppCall not handled directly by the server
|
|||||||
// called by ui
|
// called by ui
|
||||||
// calls into client_api_connection
|
// calls into client_api_connection
|
||||||
////////////////////////////////////////////
|
////////////////////////////////////////////
|
||||||
pub fn attach(&mut self) {
|
pub fn attach(&self) {
|
||||||
let mut capi = self.capi();
|
let capi = self.capi();
|
||||||
|
|
||||||
spawn_detached_local(async move {
|
spawn_detached_local(async move {
|
||||||
if let Err(e) = capi.server_attach().await {
|
if let Err(e) = capi.server_attach().await {
|
||||||
@ -533,8 +544,8 @@ reply - reply to an AppCall not handled directly by the server
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn detach(&mut self) {
|
pub fn detach(&self) {
|
||||||
let mut capi = self.capi();
|
let capi = self.capi();
|
||||||
|
|
||||||
spawn_detached_local(async move {
|
spawn_detached_local(async move {
|
||||||
if let Err(e) = capi.server_detach().await {
|
if let Err(e) = capi.server_detach().await {
|
||||||
|
@ -3,7 +3,6 @@
|
|||||||
#![recursion_limit = "256"]
|
#![recursion_limit = "256"]
|
||||||
|
|
||||||
use crate::tools::*;
|
use crate::tools::*;
|
||||||
use veilid_core::tools::*;
|
|
||||||
|
|
||||||
use clap::{Arg, ColorChoice, Command};
|
use clap::{Arg, ColorChoice, Command};
|
||||||
use flexi_logger::*;
|
use flexi_logger::*;
|
||||||
@ -18,11 +17,6 @@ mod settings;
|
|||||||
mod tools;
|
mod tools;
|
||||||
mod ui;
|
mod ui;
|
||||||
|
|
||||||
#[allow(clippy::all)]
|
|
||||||
pub mod veilid_client_capnp {
|
|
||||||
include!(concat!(env!("OUT_DIR"), "/proto/veilid_client_capnp.rs"));
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_command_line(default_config_path: &OsStr) -> Result<clap::ArgMatches, String> {
|
fn parse_command_line(default_config_path: &OsStr) -> Result<clap::ArgMatches, String> {
|
||||||
let matches = Command::new("veilid-cli")
|
let matches = Command::new("veilid-cli")
|
||||||
.version("0.1")
|
.version("0.1")
|
||||||
@ -97,7 +91,7 @@ fn main() -> Result<(), String> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create UI object
|
// Create UI object
|
||||||
let mut sivui = ui::UI::new(settings.interface.node_log.scrollback, &settings);
|
let (mut sivui, uisender) = ui::UI::new(settings.interface.node_log.scrollback, &settings);
|
||||||
|
|
||||||
// Set up loggers
|
// Set up loggers
|
||||||
{
|
{
|
||||||
@ -160,19 +154,19 @@ fn main() -> Result<(), String> {
|
|||||||
|
|
||||||
// Create command processor
|
// Create command processor
|
||||||
debug!("Creating Command Processor ");
|
debug!("Creating Command Processor ");
|
||||||
let mut comproc = command_processor::CommandProcessor::new(sivui.clone(), &settings);
|
let comproc = command_processor::CommandProcessor::new(uisender, &settings);
|
||||||
sivui.set_command_processor(comproc.clone());
|
sivui.set_command_processor(comproc.clone());
|
||||||
|
|
||||||
// Create client api client side
|
// Create client api client side
|
||||||
info!("Starting API connection");
|
info!("Starting API connection");
|
||||||
let mut capi = client_api_connection::ClientApiConnection::new(comproc.clone());
|
let capi = client_api_connection::ClientApiConnection::new(comproc.clone());
|
||||||
|
|
||||||
// Save client api in command processor
|
// Save client api in command processor
|
||||||
comproc.set_client_api_connection(capi.clone());
|
comproc.set_client_api_connection(capi.clone());
|
||||||
|
|
||||||
// Keep a connection to the server
|
// Keep a connection to the server
|
||||||
comproc.set_server_address(server_addr);
|
comproc.set_server_address(server_addr);
|
||||||
let mut comproc2 = comproc.clone();
|
let comproc2 = comproc.clone();
|
||||||
let connection_future = comproc.connection_manager();
|
let connection_future = comproc.connection_manager();
|
||||||
|
|
||||||
// Start async
|
// Start async
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
use super::*;
|
use super::*;
|
||||||
use cursive_table_view::*;
|
use cursive_table_view::*;
|
||||||
use std::cmp::Ordering;
|
use std::cmp::Ordering;
|
||||||
use veilid_core::*;
|
|
||||||
|
|
||||||
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
|
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
|
||||||
pub enum PeerTableColumn {
|
pub enum PeerTableColumn {
|
||||||
@ -24,8 +23,11 @@ pub enum PeerTableColumn {
|
|||||||
// }
|
// }
|
||||||
// }
|
// }
|
||||||
|
|
||||||
fn format_ts(ts: Timestamp) -> String {
|
fn format_ts(ts: &json::JsonValue) -> String {
|
||||||
let ts = ts.as_u64();
|
if ts.is_null() {
|
||||||
|
return "---".to_owned();
|
||||||
|
}
|
||||||
|
let ts = json_str_u64(ts);
|
||||||
let secs = timestamp_to_secs(ts);
|
let secs = timestamp_to_secs(ts);
|
||||||
if secs >= 1.0 {
|
if secs >= 1.0 {
|
||||||
format!("{:.2}s", timestamp_to_secs(ts))
|
format!("{:.2}s", timestamp_to_secs(ts))
|
||||||
@ -34,8 +36,11 @@ fn format_ts(ts: Timestamp) -> String {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn format_bps(bps: ByteCount) -> String {
|
fn format_bps(bps: &json::JsonValue) -> String {
|
||||||
let bps = bps.as_u64();
|
if bps.is_null() {
|
||||||
|
return "---".to_owned();
|
||||||
|
}
|
||||||
|
let bps = json_str_u64(bps);
|
||||||
if bps >= 1024u64 * 1024u64 * 1024u64 {
|
if bps >= 1024u64 * 1024u64 * 1024u64 {
|
||||||
format!("{:.2}GB/s", (bps / (1024u64 * 1024u64)) as f64 / 1024.0)
|
format!("{:.2}GB/s", (bps / (1024u64 * 1024u64)) as f64 / 1024.0)
|
||||||
} else if bps >= 1024u64 * 1024u64 {
|
} else if bps >= 1024u64 * 1024u64 {
|
||||||
@ -47,25 +52,20 @@ fn format_bps(bps: ByteCount) -> String {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TableViewItem<PeerTableColumn> for PeerTableData {
|
impl TableViewItem<PeerTableColumn> for json::JsonValue {
|
||||||
fn to_column(&self, column: PeerTableColumn) -> String {
|
fn to_column(&self, column: PeerTableColumn) -> String {
|
||||||
match column {
|
match column {
|
||||||
PeerTableColumn::NodeId => self
|
PeerTableColumn::NodeId => self["node_ids"][0].to_string(),
|
||||||
.node_ids
|
PeerTableColumn::Address => self["peer_address"].to_string(),
|
||||||
.first()
|
PeerTableColumn::LatencyAvg => {
|
||||||
.map(|n| n.to_string())
|
format!("{}", format_ts(&self["peer_stats"]["latency"]["average"]))
|
||||||
.unwrap_or_else(|| "???".to_owned()),
|
}
|
||||||
PeerTableColumn::Address => self.peer_address.clone(),
|
PeerTableColumn::TransferDownAvg => {
|
||||||
PeerTableColumn::LatencyAvg => format!(
|
format_bps(&self["peer_stats"]["transfer"]["down"]["average"])
|
||||||
"{}",
|
}
|
||||||
self.peer_stats
|
PeerTableColumn::TransferUpAvg => {
|
||||||
.latency
|
format_bps(&self["peer_stats"]["transfer"]["up"]["average"])
|
||||||
.as_ref()
|
}
|
||||||
.map(|l| format_ts(l.average))
|
|
||||||
.unwrap_or("---".to_owned())
|
|
||||||
),
|
|
||||||
PeerTableColumn::TransferDownAvg => format_bps(self.peer_stats.transfer.down.average),
|
|
||||||
PeerTableColumn::TransferUpAvg => format_bps(self.peer_stats.transfer.up.average),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -76,26 +76,20 @@ impl TableViewItem<PeerTableColumn> for PeerTableData {
|
|||||||
match column {
|
match column {
|
||||||
PeerTableColumn::NodeId => self.to_column(column).cmp(&other.to_column(column)),
|
PeerTableColumn::NodeId => self.to_column(column).cmp(&other.to_column(column)),
|
||||||
PeerTableColumn::Address => self.to_column(column).cmp(&other.to_column(column)),
|
PeerTableColumn::Address => self.to_column(column).cmp(&other.to_column(column)),
|
||||||
PeerTableColumn::LatencyAvg => self
|
PeerTableColumn::LatencyAvg => json_str_u64(&self["peer_stats"]["latency"]["average"])
|
||||||
.peer_stats
|
.cmp(&json_str_u64(&other["peer_stats"]["latency"]["average"])),
|
||||||
.latency
|
PeerTableColumn::TransferDownAvg => {
|
||||||
.as_ref()
|
json_str_u64(&self["peer_stats"]["transfer"]["down"]["average"]).cmp(&json_str_u64(
|
||||||
.map(|l| l.average)
|
&other["peer_stats"]["transfer"]["down"]["average"],
|
||||||
.cmp(&other.peer_stats.latency.as_ref().map(|l| l.average)),
|
))
|
||||||
PeerTableColumn::TransferDownAvg => self
|
}
|
||||||
.peer_stats
|
PeerTableColumn::TransferUpAvg => {
|
||||||
.transfer
|
json_str_u64(&self["peer_stats"]["transfer"]["up"]["average"]).cmp(&json_str_u64(
|
||||||
.down
|
&other["peer_stats"]["transfer"]["up"]["average"],
|
||||||
.average
|
))
|
||||||
.cmp(&other.peer_stats.transfer.down.average),
|
}
|
||||||
PeerTableColumn::TransferUpAvg => self
|
|
||||||
.peer_stats
|
|
||||||
.transfer
|
|
||||||
.up
|
|
||||||
.average
|
|
||||||
.cmp(&other.peer_stats.transfer.up.average),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type PeersTableView = TableView<PeerTableData, PeerTableColumn>;
|
pub type PeersTableView = TableView<json::JsonValue, PeerTableColumn>;
|
||||||
|
@ -1,5 +1,10 @@
|
|||||||
use cfg_if::*;
|
pub use cfg_if::*;
|
||||||
|
pub use log::*;
|
||||||
|
pub use parking_lot::*;
|
||||||
|
pub use veilid_tools::*;
|
||||||
|
|
||||||
use core::future::Future;
|
use core::future::Future;
|
||||||
|
use core::str::FromStr;
|
||||||
|
|
||||||
cfg_if! {
|
cfg_if! {
|
||||||
if #[cfg(feature="rt-async-std")] {
|
if #[cfg(feature="rt-async-std")] {
|
||||||
@ -17,3 +22,13 @@ cfg_if! {
|
|||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn json_str_u64(value: &json::JsonValue) -> u64 {
|
||||||
|
u64::from_str(value.as_str().unwrap_or_default()).unwrap_or_default()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn json_str_vec_u8(value: &json::JsonValue) -> Vec<u8> {
|
||||||
|
data_encoding::BASE64URL_NOPAD
|
||||||
|
.decode(value.as_str().unwrap_or_default().as_bytes())
|
||||||
|
.unwrap_or_default()
|
||||||
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
use crate::command_processor::*;
|
use crate::command_processor::*;
|
||||||
use crate::peers_table_view::*;
|
use crate::peers_table_view::*;
|
||||||
use crate::settings::Settings;
|
use crate::settings::Settings;
|
||||||
|
use crate::tools::*;
|
||||||
use crossbeam_channel::Sender;
|
use crossbeam_channel::Sender;
|
||||||
use cursive::align::*;
|
use cursive::align::*;
|
||||||
use cursive::event::*;
|
use cursive::event::*;
|
||||||
@ -12,12 +13,8 @@ use cursive::Cursive;
|
|||||||
use cursive::CursiveRunnable;
|
use cursive::CursiveRunnable;
|
||||||
use cursive_flexi_logger_view::{CursiveLogWriter, FlexiLoggerView};
|
use cursive_flexi_logger_view::{CursiveLogWriter, FlexiLoggerView};
|
||||||
//use cursive_multiplex::*;
|
//use cursive_multiplex::*;
|
||||||
use log::*;
|
|
||||||
use std::cell::RefCell;
|
|
||||||
use std::collections::{HashMap, VecDeque};
|
use std::collections::{HashMap, VecDeque};
|
||||||
use std::rc::Rc;
|
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
use veilid_core::*;
|
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////
|
||||||
///
|
///
|
||||||
@ -50,20 +47,20 @@ impl<T> Dirty<T> {
|
|||||||
pub type UICallback = Box<dyn Fn(&mut Cursive) + Send>;
|
pub type UICallback = Box<dyn Fn(&mut Cursive) + Send>;
|
||||||
|
|
||||||
struct UIState {
|
struct UIState {
|
||||||
attachment_state: Dirty<AttachmentState>,
|
attachment_state: Dirty<String>,
|
||||||
public_internet_ready: Dirty<bool>,
|
public_internet_ready: Dirty<bool>,
|
||||||
local_network_ready: Dirty<bool>,
|
local_network_ready: Dirty<bool>,
|
||||||
network_started: Dirty<bool>,
|
network_started: Dirty<bool>,
|
||||||
network_down_up: Dirty<(f32, f32)>,
|
network_down_up: Dirty<(f32, f32)>,
|
||||||
connection_state: Dirty<ConnectionState>,
|
connection_state: Dirty<ConnectionState>,
|
||||||
peers_state: Dirty<Vec<PeerTableData>>,
|
peers_state: Dirty<Vec<json::JsonValue>>,
|
||||||
node_id: Dirty<String>,
|
node_id: Dirty<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl UIState {
|
impl UIState {
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
Self {
|
Self {
|
||||||
attachment_state: Dirty::new(AttachmentState::Detached),
|
attachment_state: Dirty::new("Detached".to_owned()),
|
||||||
public_internet_ready: Dirty::new(false),
|
public_internet_ready: Dirty::new(false),
|
||||||
local_network_ready: Dirty::new(false),
|
local_network_ready: Dirty::new(false),
|
||||||
network_started: Dirty::new(false),
|
network_started: Dirty::new(false),
|
||||||
@ -83,19 +80,15 @@ pub struct UIInner {
|
|||||||
ui_state: UIState,
|
ui_state: UIState,
|
||||||
log_colors: HashMap<Level, cursive::theme::Color>,
|
log_colors: HashMap<Level, cursive::theme::Color>,
|
||||||
cmdproc: Option<CommandProcessor>,
|
cmdproc: Option<CommandProcessor>,
|
||||||
cb_sink: Sender<Box<dyn FnOnce(&mut Cursive) + 'static + Send>>,
|
|
||||||
cmd_history: VecDeque<String>,
|
cmd_history: VecDeque<String>,
|
||||||
cmd_history_position: usize,
|
cmd_history_position: usize,
|
||||||
cmd_history_max_size: usize,
|
cmd_history_max_size: usize,
|
||||||
connection_dialog_state: Option<ConnectionState>,
|
connection_dialog_state: Option<ConnectionState>,
|
||||||
}
|
}
|
||||||
|
|
||||||
type Handle<T> = Rc<RefCell<T>>;
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct UI {
|
pub struct UI {
|
||||||
siv: Handle<CursiveRunnable>,
|
siv: CursiveRunnable,
|
||||||
inner: Handle<UIInner>,
|
inner: Arc<Mutex<UIInner>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Error, Debug)]
|
#[derive(Error, Debug)]
|
||||||
@ -113,11 +106,11 @@ impl UI {
|
|||||||
inner.cmdproc.as_ref().unwrap().clone()
|
inner.cmdproc.as_ref().unwrap().clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn inner(s: &mut Cursive) -> std::cell::Ref<'_, UIInner> {
|
fn inner(s: &mut Cursive) -> MutexGuard<'_, UIInner> {
|
||||||
s.user_data::<Handle<UIInner>>().unwrap().borrow()
|
s.user_data::<Arc<Mutex<UIInner>>>().unwrap().lock()
|
||||||
}
|
}
|
||||||
fn inner_mut(s: &mut Cursive) -> std::cell::RefMut<'_, UIInner> {
|
fn inner_mut(s: &mut Cursive) -> MutexGuard<'_, UIInner> {
|
||||||
s.user_data::<Handle<UIInner>>().unwrap().borrow_mut()
|
s.user_data::<Arc<Mutex<UIInner>>>().unwrap().lock()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn setup_colors(siv: &mut CursiveRunnable, inner: &mut UIInner, settings: &Settings) {
|
fn setup_colors(siv: &mut CursiveRunnable, inner: &mut UIInner, settings: &Settings) {
|
||||||
@ -239,15 +232,16 @@ impl UI {
|
|||||||
s.find_name("peers").unwrap()
|
s.find_name("peers").unwrap()
|
||||||
}
|
}
|
||||||
fn render_attachment_state(inner: &mut UIInner) -> String {
|
fn render_attachment_state(inner: &mut UIInner) -> String {
|
||||||
let att = match inner.ui_state.attachment_state.get() {
|
let att = match inner.ui_state.attachment_state.get().as_str() {
|
||||||
AttachmentState::Detached => "[----]",
|
"Detached" => "[----]",
|
||||||
AttachmentState::Attaching => "[/ ]",
|
"Attaching" => "[/ ]",
|
||||||
AttachmentState::AttachedWeak => "[| ]",
|
"AttachedWeak" => "[| ]",
|
||||||
AttachmentState::AttachedGood => "[|| ]",
|
"AttachedGood" => "[|| ]",
|
||||||
AttachmentState::AttachedStrong => "[||| ]",
|
"AttachedStrong" => "[||| ]",
|
||||||
AttachmentState::FullyAttached => "[||||]",
|
"FullyAttached" => "[||||]",
|
||||||
AttachmentState::OverAttached => "[++++]",
|
"OverAttached" => "[++++]",
|
||||||
AttachmentState::Detaching => "[////]",
|
"Detaching" => "[////]",
|
||||||
|
_ => "[????]",
|
||||||
};
|
};
|
||||||
let pi = if *inner.ui_state.public_internet_ready.get() {
|
let pi = if *inner.ui_state.public_internet_ready.get() {
|
||||||
"+P"
|
"+P"
|
||||||
@ -272,15 +266,16 @@ impl UI {
|
|||||||
}
|
}
|
||||||
fn render_button_attach<'a>(inner: &mut UIInner) -> (&'a str, bool) {
|
fn render_button_attach<'a>(inner: &mut UIInner) -> (&'a str, bool) {
|
||||||
if let ConnectionState::Connected(_, _) = inner.ui_state.connection_state.get() {
|
if let ConnectionState::Connected(_, _) = inner.ui_state.connection_state.get() {
|
||||||
match inner.ui_state.attachment_state.get() {
|
match inner.ui_state.attachment_state.get().as_str() {
|
||||||
AttachmentState::Detached => ("Attach", true),
|
"Detached" => ("Attach", true),
|
||||||
AttachmentState::Attaching => ("Detach", true),
|
"Attaching" => ("Detach", true),
|
||||||
AttachmentState::AttachedWeak => ("Detach", true),
|
"AttachedWeak" => ("Detach", true),
|
||||||
AttachmentState::AttachedGood => ("Detach", true),
|
"AttachedGood" => ("Detach", true),
|
||||||
AttachmentState::AttachedStrong => ("Detach", true),
|
"AttachedStrong" => ("Detach", true),
|
||||||
AttachmentState::FullyAttached => ("Detach", true),
|
"FullyAttached" => ("Detach", true),
|
||||||
AttachmentState::OverAttached => ("Detach", true),
|
"OverAttached" => ("Detach", true),
|
||||||
AttachmentState::Detaching => ("Detach", false),
|
"Detaching" => ("Detach", false),
|
||||||
|
_ => ("???", false),
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
(" ---- ", false)
|
(" ---- ", false)
|
||||||
@ -412,17 +407,19 @@ impl UI {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn on_button_attach_pressed(s: &mut Cursive) {
|
fn on_button_attach_pressed(s: &mut Cursive) {
|
||||||
let action: Option<bool> = match Self::inner_mut(s).ui_state.attachment_state.get() {
|
let action: Option<bool> = match Self::inner_mut(s).ui_state.attachment_state.get().as_str()
|
||||||
AttachmentState::Detached => Some(true),
|
{
|
||||||
AttachmentState::Attaching => Some(false),
|
"Detached" => Some(true),
|
||||||
AttachmentState::AttachedWeak => Some(false),
|
"Attaching" => Some(false),
|
||||||
AttachmentState::AttachedGood => Some(false),
|
"AttachedWeak" => Some(false),
|
||||||
AttachmentState::AttachedStrong => Some(false),
|
"AttachedGood" => Some(false),
|
||||||
AttachmentState::FullyAttached => Some(false),
|
"AttachedStrong" => Some(false),
|
||||||
AttachmentState::OverAttached => Some(false),
|
"FullyAttached" => Some(false),
|
||||||
AttachmentState::Detaching => None,
|
"OverAttached" => Some(false),
|
||||||
|
"Detaching" => None,
|
||||||
|
_ => None,
|
||||||
};
|
};
|
||||||
let mut cmdproc = Self::command_processor(s);
|
let cmdproc = Self::command_processor(s);
|
||||||
if let Some(a) = action {
|
if let Some(a) = action {
|
||||||
if a {
|
if a {
|
||||||
cmdproc.attach();
|
cmdproc.attach();
|
||||||
@ -704,7 +701,7 @@ impl UI {
|
|||||||
////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////
|
||||||
// Public functions
|
// Public functions
|
||||||
|
|
||||||
pub fn new(node_log_scrollback: usize, settings: &Settings) -> Self {
|
pub fn new(node_log_scrollback: usize, settings: &Settings) -> (Self, UISender) {
|
||||||
cursive_flexi_logger_view::resize(node_log_scrollback);
|
cursive_flexi_logger_view::resize(node_log_scrollback);
|
||||||
|
|
||||||
// Instantiate the cursive runnable
|
// Instantiate the cursive runnable
|
||||||
@ -723,9 +720,9 @@ impl UI {
|
|||||||
let cb_sink = runnable.cb_sink().clone();
|
let cb_sink = runnable.cb_sink().clone();
|
||||||
|
|
||||||
// Create the UI object
|
// Create the UI object
|
||||||
let this = Self {
|
let mut this = Self {
|
||||||
siv: Rc::new(RefCell::new(runnable)),
|
siv: runnable,
|
||||||
inner: Rc::new(RefCell::new(UIInner {
|
inner: Arc::new(Mutex::new(UIInner {
|
||||||
ui_state: UIState::new(),
|
ui_state: UIState::new(),
|
||||||
log_colors: Default::default(),
|
log_colors: Default::default(),
|
||||||
cmdproc: None,
|
cmdproc: None,
|
||||||
@ -737,15 +734,13 @@ impl UI {
|
|||||||
cmd_history_position: 0,
|
cmd_history_position: 0,
|
||||||
cmd_history_max_size: settings.interface.command_line.history_size,
|
cmd_history_max_size: settings.interface.command_line.history_size,
|
||||||
connection_dialog_state: None,
|
connection_dialog_state: None,
|
||||||
cb_sink,
|
|
||||||
})),
|
})),
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut siv = this.siv.borrow_mut();
|
let mut inner = this.inner.lock();
|
||||||
let mut inner = this.inner.borrow_mut();
|
|
||||||
|
|
||||||
// Make the inner object accessible in callbacks easily
|
// Make the inner object accessible in callbacks easily
|
||||||
siv.set_user_data(this.inner.clone());
|
this.siv.set_user_data(this.inner.clone());
|
||||||
|
|
||||||
// Create layouts
|
// Create layouts
|
||||||
|
|
||||||
@ -828,87 +823,44 @@ impl UI {
|
|||||||
.child(TextView::new(version)),
|
.child(TextView::new(version)),
|
||||||
);
|
);
|
||||||
|
|
||||||
siv.add_fullscreen_layer(mainlayout);
|
this.siv.add_fullscreen_layer(mainlayout);
|
||||||
|
|
||||||
UI::setup_colors(&mut siv, &mut inner, settings);
|
UI::setup_colors(&mut this.siv, &mut inner, settings);
|
||||||
UI::setup_quit_handler(&mut siv);
|
UI::setup_quit_handler(&mut this.siv);
|
||||||
siv.set_global_callback(cursive::event::Event::CtrlChar('k'), UI::clear_handler);
|
this.siv
|
||||||
|
.set_global_callback(cursive::event::Event::CtrlChar('k'), UI::clear_handler);
|
||||||
|
|
||||||
drop(inner);
|
drop(inner);
|
||||||
drop(siv);
|
|
||||||
|
|
||||||
this
|
let inner = this.inner.clone();
|
||||||
|
(this, UISender { inner, cb_sink })
|
||||||
}
|
}
|
||||||
pub fn cursive_flexi_logger(&self) -> Box<CursiveLogWriter> {
|
pub fn cursive_flexi_logger(&self) -> Box<CursiveLogWriter> {
|
||||||
let mut flv =
|
let mut flv = cursive_flexi_logger_view::cursive_flexi_logger(self.siv.cb_sink().clone());
|
||||||
cursive_flexi_logger_view::cursive_flexi_logger(self.siv.borrow().cb_sink().clone());
|
flv.set_colors(self.inner.lock().log_colors.clone());
|
||||||
flv.set_colors(self.inner.borrow().log_colors.clone());
|
|
||||||
flv
|
flv
|
||||||
}
|
}
|
||||||
pub fn set_command_processor(&mut self, cmdproc: CommandProcessor) {
|
pub fn set_command_processor(&mut self, cmdproc: CommandProcessor) {
|
||||||
let mut inner = self.inner.borrow_mut();
|
let mut inner = self.inner.lock();
|
||||||
inner.cmdproc = Some(cmdproc);
|
inner.cmdproc = Some(cmdproc);
|
||||||
let _ = inner.cb_sink.send(Box::new(UI::update_cb));
|
|
||||||
}
|
|
||||||
pub fn set_attachment_state(
|
|
||||||
&mut self,
|
|
||||||
state: AttachmentState,
|
|
||||||
public_internet_ready: bool,
|
|
||||||
local_network_ready: bool,
|
|
||||||
) {
|
|
||||||
let mut inner = self.inner.borrow_mut();
|
|
||||||
inner.ui_state.attachment_state.set(state);
|
|
||||||
inner
|
|
||||||
.ui_state
|
|
||||||
.public_internet_ready
|
|
||||||
.set(public_internet_ready);
|
|
||||||
inner.ui_state.local_network_ready.set(local_network_ready);
|
|
||||||
|
|
||||||
let _ = inner.cb_sink.send(Box::new(UI::update_cb));
|
|
||||||
}
|
|
||||||
pub fn set_network_status(
|
|
||||||
&mut self,
|
|
||||||
started: bool,
|
|
||||||
bps_down: u64,
|
|
||||||
bps_up: u64,
|
|
||||||
peers: Vec<PeerTableData>,
|
|
||||||
) {
|
|
||||||
let mut inner = self.inner.borrow_mut();
|
|
||||||
inner.ui_state.network_started.set(started);
|
|
||||||
inner.ui_state.network_down_up.set((
|
|
||||||
((bps_down as f64) / 1000.0f64) as f32,
|
|
||||||
((bps_up as f64) / 1000.0f64) as f32,
|
|
||||||
));
|
|
||||||
inner.ui_state.peers_state.set(peers);
|
|
||||||
let _ = inner.cb_sink.send(Box::new(UI::update_cb));
|
|
||||||
}
|
|
||||||
pub fn set_config(&mut self, config: VeilidConfigInner) {
|
|
||||||
let mut inner = self.inner.borrow_mut();
|
|
||||||
|
|
||||||
inner
|
|
||||||
.ui_state
|
|
||||||
.node_id
|
|
||||||
.set(config.network.routing_table.node_id.to_string());
|
|
||||||
}
|
|
||||||
pub fn set_connection_state(&mut self, state: ConnectionState) {
|
|
||||||
let mut inner = self.inner.borrow_mut();
|
|
||||||
inner.ui_state.connection_state.set(state);
|
|
||||||
let _ = inner.cb_sink.send(Box::new(UI::update_cb));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn add_node_event(&self, event: String) {
|
// Note: Cursive is not re-entrant, can't borrow_mut self.siv again after this
|
||||||
let inner = self.inner.borrow();
|
pub async fn run_async(&mut self) {
|
||||||
let color = *inner.log_colors.get(&Level::Info).unwrap();
|
self.siv.run_async().await;
|
||||||
let mut starting_style: Style = color.into();
|
|
||||||
for line in event.lines() {
|
|
||||||
let (spanned_string, end_style) =
|
|
||||||
cursive::utils::markup::ansi::parse_with_starting_style(starting_style, line);
|
|
||||||
cursive_flexi_logger_view::push_to_log(spanned_string);
|
|
||||||
starting_style = end_style;
|
|
||||||
}
|
|
||||||
let _ = inner.cb_sink.send(Box::new(UI::update_cb));
|
|
||||||
}
|
}
|
||||||
|
// pub fn run(&mut self) {
|
||||||
|
// self.siv.run();
|
||||||
|
// }
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct UISender {
|
||||||
|
inner: Arc<Mutex<UIInner>>,
|
||||||
|
cb_sink: Sender<Box<dyn FnOnce(&mut Cursive) + 'static + Send>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl UISender {
|
||||||
pub fn display_string_dialog<T: ToString, S: ToString>(
|
pub fn display_string_dialog<T: ToString, S: ToString>(
|
||||||
&self,
|
&self,
|
||||||
title: T,
|
title: T,
|
||||||
@ -917,31 +869,84 @@ impl UI {
|
|||||||
) {
|
) {
|
||||||
let title = title.to_string();
|
let title = title.to_string();
|
||||||
let text = text.to_string();
|
let text = text.to_string();
|
||||||
let inner = self.inner.borrow();
|
let _ = self.cb_sink.send(Box::new(move |s| {
|
||||||
let _ = inner.cb_sink.send(Box::new(move |s| {
|
|
||||||
UI::display_string_dialog_cb(s, title, text, close_cb)
|
UI::display_string_dialog_cb(s, title, text, close_cb)
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn quit(&self) {
|
pub fn quit(&self) {
|
||||||
let inner = self.inner.borrow();
|
let _ = self.cb_sink.send(Box::new(|s| {
|
||||||
let _ = inner.cb_sink.send(Box::new(|s| {
|
|
||||||
s.quit();
|
s.quit();
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn send_callback(&self, callback: UICallback) {
|
pub fn send_callback(&self, callback: UICallback) {
|
||||||
let inner = self.inner.borrow();
|
let _ = self.cb_sink.send(Box::new(move |s| callback(s)));
|
||||||
let _ = inner.cb_sink.send(Box::new(move |s| callback(s)));
|
}
|
||||||
|
pub fn set_attachment_state(
|
||||||
|
&mut self,
|
||||||
|
state: String,
|
||||||
|
public_internet_ready: bool,
|
||||||
|
local_network_ready: bool,
|
||||||
|
) {
|
||||||
|
{
|
||||||
|
let mut inner = self.inner.lock();
|
||||||
|
inner.ui_state.attachment_state.set(state);
|
||||||
|
inner
|
||||||
|
.ui_state
|
||||||
|
.public_internet_ready
|
||||||
|
.set(public_internet_ready);
|
||||||
|
inner.ui_state.local_network_ready.set(local_network_ready);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Note: Cursive is not re-entrant, can't borrow_mut self.siv again after this
|
let _ = self.cb_sink.send(Box::new(UI::update_cb));
|
||||||
pub async fn run_async(&mut self) {
|
}
|
||||||
let mut siv = self.siv.borrow_mut();
|
pub fn set_network_status(
|
||||||
siv.run_async().await;
|
&mut self,
|
||||||
|
started: bool,
|
||||||
|
bps_down: u64,
|
||||||
|
bps_up: u64,
|
||||||
|
peers: Vec<json::JsonValue>,
|
||||||
|
) {
|
||||||
|
{
|
||||||
|
let mut inner = self.inner.lock();
|
||||||
|
inner.ui_state.network_started.set(started);
|
||||||
|
inner.ui_state.network_down_up.set((
|
||||||
|
((bps_down as f64) / 1000.0f64) as f32,
|
||||||
|
((bps_up as f64) / 1000.0f64) as f32,
|
||||||
|
));
|
||||||
|
inner.ui_state.peers_state.set(peers);
|
||||||
|
}
|
||||||
|
let _ = self.cb_sink.send(Box::new(UI::update_cb));
|
||||||
|
}
|
||||||
|
pub fn set_config(&mut self, config: &json::JsonValue) {
|
||||||
|
let mut inner = self.inner.lock();
|
||||||
|
|
||||||
|
inner
|
||||||
|
.ui_state
|
||||||
|
.node_id
|
||||||
|
.set(config["network"]["routing_table"]["node_id"].to_string());
|
||||||
|
}
|
||||||
|
pub fn set_connection_state(&mut self, state: ConnectionState) {
|
||||||
|
{
|
||||||
|
let mut inner = self.inner.lock();
|
||||||
|
inner.ui_state.connection_state.set(state);
|
||||||
|
}
|
||||||
|
let _ = self.cb_sink.send(Box::new(UI::update_cb));
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn add_node_event(&self, event: String) {
|
||||||
|
{
|
||||||
|
let inner = self.inner.lock();
|
||||||
|
let color = *inner.log_colors.get(&Level::Info).unwrap();
|
||||||
|
let mut starting_style: Style = color.into();
|
||||||
|
for line in event.lines() {
|
||||||
|
let (spanned_string, end_style) =
|
||||||
|
cursive::utils::markup::ansi::parse_with_starting_style(starting_style, line);
|
||||||
|
cursive_flexi_logger_view::push_to_log(spanned_string);
|
||||||
|
starting_style = end_style;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let _ = self.cb_sink.send(Box::new(UI::update_cb));
|
||||||
}
|
}
|
||||||
// pub fn run(&mut self) {
|
|
||||||
// let mut siv = self.siv.borrow_mut();
|
|
||||||
// siv.run();
|
|
||||||
// }
|
|
||||||
}
|
}
|
||||||
|
@ -73,6 +73,7 @@ weak-table = "0.3.2"
|
|||||||
range-set-blaze = "0.1.5"
|
range-set-blaze = "0.1.5"
|
||||||
argon2 = "0.5.0"
|
argon2 = "0.5.0"
|
||||||
paste = "1.0.12"
|
paste = "1.0.12"
|
||||||
|
schemars = "0.8.12"
|
||||||
|
|
||||||
# Dependencies for native builds only
|
# Dependencies for native builds only
|
||||||
# Linux, Windows, Mac, iOS, Android
|
# Linux, Windows, Mac, iOS, Android
|
||||||
|
@ -496,13 +496,15 @@ struct Question @0xd8510bc33492ef70 {
|
|||||||
getValueQ @5 :OperationGetValueQ;
|
getValueQ @5 :OperationGetValueQ;
|
||||||
setValueQ @6 :OperationSetValueQ;
|
setValueQ @6 :OperationSetValueQ;
|
||||||
watchValueQ @7 :OperationWatchValueQ;
|
watchValueQ @7 :OperationWatchValueQ;
|
||||||
supplyBlockQ @8 :OperationSupplyBlockQ;
|
# #[cfg(feature="unstable-blockstore")]
|
||||||
findBlockQ @9 :OperationFindBlockQ;
|
# supplyBlockQ @8 :OperationSupplyBlockQ;
|
||||||
|
# findBlockQ @9 :OperationFindBlockQ;
|
||||||
|
|
||||||
# Tunnel operations
|
# Tunnel operations
|
||||||
startTunnelQ @10 :OperationStartTunnelQ;
|
# #[cfg(feature="unstable-tunnels")]
|
||||||
completeTunnelQ @11 :OperationCompleteTunnelQ;
|
# startTunnelQ @10 :OperationStartTunnelQ;
|
||||||
cancelTunnelQ @12 :OperationCancelTunnelQ;
|
# completeTunnelQ @11 :OperationCompleteTunnelQ;
|
||||||
|
# cancelTunnelQ @12 :OperationCancelTunnelQ;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -533,13 +535,16 @@ struct Answer @0xacacb8b6988c1058 {
|
|||||||
getValueA @3 :OperationGetValueA;
|
getValueA @3 :OperationGetValueA;
|
||||||
setValueA @4 :OperationSetValueA;
|
setValueA @4 :OperationSetValueA;
|
||||||
watchValueA @5 :OperationWatchValueA;
|
watchValueA @5 :OperationWatchValueA;
|
||||||
supplyBlockA @6 :OperationSupplyBlockA;
|
|
||||||
findBlockA @7 :OperationFindBlockA;
|
# #[cfg(feature="unstable-blockstore")]
|
||||||
|
#supplyBlockA @6 :OperationSupplyBlockA;
|
||||||
|
#findBlockA @7 :OperationFindBlockA;
|
||||||
|
|
||||||
# Tunnel operations
|
# Tunnel operations
|
||||||
startTunnelA @8 :OperationStartTunnelA;
|
# #[cfg(feature="unstable-tunnels")]
|
||||||
completeTunnelA @9 :OperationCompleteTunnelA;
|
# startTunnelA @8 :OperationStartTunnelA;
|
||||||
cancelTunnelA @10 :OperationCancelTunnelA;
|
# completeTunnelA @9 :OperationCompleteTunnelA;
|
||||||
|
# cancelTunnelA @10 :OperationCancelTunnelA;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -30,7 +30,7 @@ impl AttachmentManager {
|
|||||||
storage_manager: StorageManager,
|
storage_manager: StorageManager,
|
||||||
protected_store: ProtectedStore,
|
protected_store: ProtectedStore,
|
||||||
table_store: TableStore,
|
table_store: TableStore,
|
||||||
block_store: BlockStore,
|
#[cfg(feature = "unstable-blockstore")] block_store: BlockStore,
|
||||||
crypto: Crypto,
|
crypto: Crypto,
|
||||||
) -> AttachmentManagerUnlockedInner {
|
) -> AttachmentManagerUnlockedInner {
|
||||||
AttachmentManagerUnlockedInner {
|
AttachmentManagerUnlockedInner {
|
||||||
@ -40,6 +40,7 @@ impl AttachmentManager {
|
|||||||
storage_manager,
|
storage_manager,
|
||||||
protected_store,
|
protected_store,
|
||||||
table_store,
|
table_store,
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
block_store,
|
block_store,
|
||||||
crypto,
|
crypto,
|
||||||
),
|
),
|
||||||
@ -60,7 +61,7 @@ impl AttachmentManager {
|
|||||||
storage_manager: StorageManager,
|
storage_manager: StorageManager,
|
||||||
protected_store: ProtectedStore,
|
protected_store: ProtectedStore,
|
||||||
table_store: TableStore,
|
table_store: TableStore,
|
||||||
block_store: BlockStore,
|
#[cfg(feature = "unstable-blockstore")] block_store: BlockStore,
|
||||||
crypto: Crypto,
|
crypto: Crypto,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
@ -70,6 +71,7 @@ impl AttachmentManager {
|
|||||||
storage_manager,
|
storage_manager,
|
||||||
protected_store,
|
protected_store,
|
||||||
table_store,
|
table_store,
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
block_store,
|
block_store,
|
||||||
crypto,
|
crypto,
|
||||||
)),
|
)),
|
||||||
|
@ -17,6 +17,7 @@ struct ServicesContext {
|
|||||||
|
|
||||||
pub protected_store: Option<ProtectedStore>,
|
pub protected_store: Option<ProtectedStore>,
|
||||||
pub table_store: Option<TableStore>,
|
pub table_store: Option<TableStore>,
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
pub block_store: Option<BlockStore>,
|
pub block_store: Option<BlockStore>,
|
||||||
pub crypto: Option<Crypto>,
|
pub crypto: Option<Crypto>,
|
||||||
pub attachment_manager: Option<AttachmentManager>,
|
pub attachment_manager: Option<AttachmentManager>,
|
||||||
@ -30,6 +31,7 @@ impl ServicesContext {
|
|||||||
update_callback,
|
update_callback,
|
||||||
protected_store: None,
|
protected_store: None,
|
||||||
table_store: None,
|
table_store: None,
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
block_store: None,
|
block_store: None,
|
||||||
crypto: None,
|
crypto: None,
|
||||||
attachment_manager: None,
|
attachment_manager: None,
|
||||||
@ -42,7 +44,7 @@ impl ServicesContext {
|
|||||||
update_callback: UpdateCallback,
|
update_callback: UpdateCallback,
|
||||||
protected_store: ProtectedStore,
|
protected_store: ProtectedStore,
|
||||||
table_store: TableStore,
|
table_store: TableStore,
|
||||||
block_store: BlockStore,
|
#[cfg(feature = "unstable-blockstore")] block_store: BlockStore,
|
||||||
crypto: Crypto,
|
crypto: Crypto,
|
||||||
attachment_manager: AttachmentManager,
|
attachment_manager: AttachmentManager,
|
||||||
storage_manager: StorageManager,
|
storage_manager: StorageManager,
|
||||||
@ -52,6 +54,7 @@ impl ServicesContext {
|
|||||||
update_callback,
|
update_callback,
|
||||||
protected_store: Some(protected_store),
|
protected_store: Some(protected_store),
|
||||||
table_store: Some(table_store),
|
table_store: Some(table_store),
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
block_store: Some(block_store),
|
block_store: Some(block_store),
|
||||||
crypto: Some(crypto),
|
crypto: Some(crypto),
|
||||||
attachment_manager: Some(attachment_manager),
|
attachment_manager: Some(attachment_manager),
|
||||||
@ -103,6 +106,8 @@ impl ServicesContext {
|
|||||||
self.crypto = Some(crypto.clone());
|
self.crypto = Some(crypto.clone());
|
||||||
|
|
||||||
// Set up block store
|
// Set up block store
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
|
{
|
||||||
trace!("init block store");
|
trace!("init block store");
|
||||||
let block_store = BlockStore::new(self.config.clone());
|
let block_store = BlockStore::new(self.config.clone());
|
||||||
if let Err(e) = block_store.init().await {
|
if let Err(e) = block_store.init().await {
|
||||||
@ -111,14 +116,15 @@ impl ServicesContext {
|
|||||||
return Err(e);
|
return Err(e);
|
||||||
}
|
}
|
||||||
self.block_store = Some(block_store.clone());
|
self.block_store = Some(block_store.clone());
|
||||||
|
}
|
||||||
|
|
||||||
// Set up storage manager
|
// Set up storage manager
|
||||||
trace!("init storage manager");
|
trace!("init storage manager");
|
||||||
let storage_manager = StorageManager::new(
|
let storage_manager = StorageManager::new(
|
||||||
self.config.clone(),
|
self.config.clone(),
|
||||||
self.crypto.clone().unwrap(),
|
self.crypto.clone().unwrap(),
|
||||||
self.protected_store.clone().unwrap(),
|
|
||||||
self.table_store.clone().unwrap(),
|
self.table_store.clone().unwrap(),
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
self.block_store.clone().unwrap(),
|
self.block_store.clone().unwrap(),
|
||||||
);
|
);
|
||||||
if let Err(e) = storage_manager.init().await {
|
if let Err(e) = storage_manager.init().await {
|
||||||
@ -136,6 +142,7 @@ impl ServicesContext {
|
|||||||
storage_manager,
|
storage_manager,
|
||||||
protected_store,
|
protected_store,
|
||||||
table_store,
|
table_store,
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
block_store,
|
block_store,
|
||||||
crypto,
|
crypto,
|
||||||
);
|
);
|
||||||
@ -162,6 +169,7 @@ impl ServicesContext {
|
|||||||
trace!("terminate storage manager");
|
trace!("terminate storage manager");
|
||||||
storage_manager.terminate().await;
|
storage_manager.terminate().await;
|
||||||
}
|
}
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
if let Some(block_store) = &mut self.block_store {
|
if let Some(block_store) = &mut self.block_store {
|
||||||
trace!("terminate block store");
|
trace!("terminate block store");
|
||||||
block_store.terminate().await;
|
block_store.terminate().await;
|
||||||
@ -198,6 +206,7 @@ pub struct VeilidCoreContext {
|
|||||||
pub storage_manager: StorageManager,
|
pub storage_manager: StorageManager,
|
||||||
pub protected_store: ProtectedStore,
|
pub protected_store: ProtectedStore,
|
||||||
pub table_store: TableStore,
|
pub table_store: TableStore,
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
pub block_store: BlockStore,
|
pub block_store: BlockStore,
|
||||||
pub crypto: Crypto,
|
pub crypto: Crypto,
|
||||||
pub attachment_manager: AttachmentManager,
|
pub attachment_manager: AttachmentManager,
|
||||||
@ -251,6 +260,7 @@ impl VeilidCoreContext {
|
|||||||
storage_manager: sc.storage_manager.unwrap(),
|
storage_manager: sc.storage_manager.unwrap(),
|
||||||
protected_store: sc.protected_store.unwrap(),
|
protected_store: sc.protected_store.unwrap(),
|
||||||
table_store: sc.table_store.unwrap(),
|
table_store: sc.table_store.unwrap(),
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
block_store: sc.block_store.unwrap(),
|
block_store: sc.block_store.unwrap(),
|
||||||
crypto: sc.crypto.unwrap(),
|
crypto: sc.crypto.unwrap(),
|
||||||
attachment_manager: sc.attachment_manager.unwrap(),
|
attachment_manager: sc.attachment_manager.unwrap(),
|
||||||
@ -264,6 +274,7 @@ impl VeilidCoreContext {
|
|||||||
self.update_callback.clone(),
|
self.update_callback.clone(),
|
||||||
self.protected_store,
|
self.protected_store,
|
||||||
self.table_store,
|
self.table_store,
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
self.block_store,
|
self.block_store,
|
||||||
self.crypto,
|
self.crypto,
|
||||||
self.attachment_manager,
|
self.attachment_manager,
|
||||||
|
@ -25,11 +25,10 @@ pub use none::*;
|
|||||||
#[cfg(feature = "enable-crypto-vld0")]
|
#[cfg(feature = "enable-crypto-vld0")]
|
||||||
pub use vld0::*;
|
pub use vld0::*;
|
||||||
|
|
||||||
use crate::*;
|
use super::*;
|
||||||
use core::convert::TryInto;
|
use core::convert::TryInto;
|
||||||
use hashlink::linked_hash_map::Entry;
|
use hashlink::linked_hash_map::Entry;
|
||||||
use hashlink::LruCache;
|
use hashlink::LruCache;
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
/// Handle to a particular cryptosystem
|
/// Handle to a particular cryptosystem
|
||||||
pub type CryptoSystemVersion = Arc<dyn CryptoSystem + Send + Sync>;
|
pub type CryptoSystemVersion = Arc<dyn CryptoSystem + Send + Sync>;
|
||||||
|
@ -1,8 +1,12 @@
|
|||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
mod block_store;
|
mod block_store;
|
||||||
|
|
||||||
mod protected_store;
|
mod protected_store;
|
||||||
mod system;
|
mod system;
|
||||||
|
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
pub use block_store::*;
|
pub use block_store::*;
|
||||||
|
|
||||||
pub use protected_store::*;
|
pub use protected_store::*;
|
||||||
pub use system::*;
|
pub use system::*;
|
||||||
|
|
||||||
|
@ -69,7 +69,7 @@ impl ProtectedStore {
|
|||||||
));
|
));
|
||||||
|
|
||||||
// Ensure permissions are correct
|
// Ensure permissions are correct
|
||||||
ensure_file_private_owner(&insecure_keyring_file)?;
|
ensure_file_private_owner(&insecure_keyring_file).map_err(|e| eyre!("{}", e))?;
|
||||||
|
|
||||||
// Open the insecure keyring
|
// Open the insecure keyring
|
||||||
inner.keyring_manager = Some(
|
inner.keyring_manager = Some(
|
||||||
|
@ -1,8 +1,12 @@
|
|||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
mod block_store;
|
mod block_store;
|
||||||
|
|
||||||
mod protected_store;
|
mod protected_store;
|
||||||
mod system;
|
mod system;
|
||||||
|
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
pub use block_store::*;
|
pub use block_store::*;
|
||||||
|
|
||||||
pub use protected_store::*;
|
pub use protected_store::*;
|
||||||
pub use system::*;
|
pub use system::*;
|
||||||
|
|
||||||
|
@ -41,14 +41,6 @@ pub use self::veilid_config::*;
|
|||||||
pub use self::veilid_layer_filter::*;
|
pub use self::veilid_layer_filter::*;
|
||||||
pub use veilid_tools as tools;
|
pub use veilid_tools as tools;
|
||||||
|
|
||||||
use enumset::*;
|
|
||||||
use rkyv::{
|
|
||||||
bytecheck, bytecheck::CheckBytes, de::deserializers::SharedDeserializeMap, with::Skip,
|
|
||||||
Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize,
|
|
||||||
};
|
|
||||||
type RkyvDefaultValidator<'t> = rkyv::validation::validators::DefaultValidator<'t>;
|
|
||||||
use serde::*;
|
|
||||||
|
|
||||||
pub mod veilid_capnp {
|
pub mod veilid_capnp {
|
||||||
include!(concat!(env!("OUT_DIR"), "/proto/veilid_capnp.rs"));
|
include!(concat!(env!("OUT_DIR"), "/proto/veilid_capnp.rs"));
|
||||||
}
|
}
|
||||||
@ -93,4 +85,20 @@ pub static DEFAULT_LOG_IGNORE_LIST: [&str; 21] = [
|
|||||||
"attohttpc",
|
"attohttpc",
|
||||||
];
|
];
|
||||||
|
|
||||||
|
use cfg_if::*;
|
||||||
|
use enumset::*;
|
||||||
|
use eyre::{bail, eyre, Report as EyreReport, Result as EyreResult, WrapErr};
|
||||||
|
use parking_lot::*;
|
||||||
|
use rkyv::{
|
||||||
|
bytecheck, bytecheck::CheckBytes, de::deserializers::SharedDeserializeMap, with::Skip,
|
||||||
|
Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize,
|
||||||
|
};
|
||||||
|
use tracing::*;
|
||||||
use veilid_tools::*;
|
use veilid_tools::*;
|
||||||
|
type RkyvDefaultValidator<'t> = rkyv::validation::validators::DefaultValidator<'t>;
|
||||||
|
use futures_util::stream::FuturesUnordered;
|
||||||
|
use owo_colors::OwoColorize;
|
||||||
|
use schemars::{schema_for, JsonSchema};
|
||||||
|
use serde::*;
|
||||||
|
use stop_token::*;
|
||||||
|
use thiserror::Error as ThisError;
|
||||||
|
@ -150,6 +150,7 @@ struct NetworkManagerUnlockedInner {
|
|||||||
storage_manager: StorageManager,
|
storage_manager: StorageManager,
|
||||||
protected_store: ProtectedStore,
|
protected_store: ProtectedStore,
|
||||||
table_store: TableStore,
|
table_store: TableStore,
|
||||||
|
#[cfg(feature="unstable-blockstore")]
|
||||||
block_store: BlockStore,
|
block_store: BlockStore,
|
||||||
crypto: Crypto,
|
crypto: Crypto,
|
||||||
// Accessors
|
// Accessors
|
||||||
@ -181,6 +182,7 @@ impl NetworkManager {
|
|||||||
storage_manager: StorageManager,
|
storage_manager: StorageManager,
|
||||||
protected_store: ProtectedStore,
|
protected_store: ProtectedStore,
|
||||||
table_store: TableStore,
|
table_store: TableStore,
|
||||||
|
#[cfg(feature="unstable-blockstore")]
|
||||||
block_store: BlockStore,
|
block_store: BlockStore,
|
||||||
crypto: Crypto,
|
crypto: Crypto,
|
||||||
) -> NetworkManagerUnlockedInner {
|
) -> NetworkManagerUnlockedInner {
|
||||||
@ -189,6 +191,7 @@ impl NetworkManager {
|
|||||||
storage_manager,
|
storage_manager,
|
||||||
protected_store,
|
protected_store,
|
||||||
table_store,
|
table_store,
|
||||||
|
#[cfg(feature="unstable-blockstore")]
|
||||||
block_store,
|
block_store,
|
||||||
crypto,
|
crypto,
|
||||||
routing_table: RwLock::new(None),
|
routing_table: RwLock::new(None),
|
||||||
@ -204,6 +207,7 @@ impl NetworkManager {
|
|||||||
storage_manager: StorageManager,
|
storage_manager: StorageManager,
|
||||||
protected_store: ProtectedStore,
|
protected_store: ProtectedStore,
|
||||||
table_store: TableStore,
|
table_store: TableStore,
|
||||||
|
#[cfg(feature="unstable-blockstore")]
|
||||||
block_store: BlockStore,
|
block_store: BlockStore,
|
||||||
crypto: Crypto,
|
crypto: Crypto,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
@ -214,6 +218,7 @@ impl NetworkManager {
|
|||||||
storage_manager,
|
storage_manager,
|
||||||
protected_store,
|
protected_store,
|
||||||
table_store,
|
table_store,
|
||||||
|
#[cfg(feature="unstable-blockstore")]
|
||||||
block_store,
|
block_store,
|
||||||
crypto,
|
crypto,
|
||||||
)),
|
)),
|
||||||
@ -241,6 +246,7 @@ impl NetworkManager {
|
|||||||
pub fn table_store(&self) -> TableStore {
|
pub fn table_store(&self) -> TableStore {
|
||||||
self.unlocked_inner.table_store.clone()
|
self.unlocked_inner.table_store.clone()
|
||||||
}
|
}
|
||||||
|
#[cfg(feature="unstable-blockstore")]
|
||||||
pub fn block_store(&self) -> BlockStore {
|
pub fn block_store(&self) -> BlockStore {
|
||||||
self.unlocked_inner.block_store.clone()
|
self.unlocked_inner.block_store.clone()
|
||||||
}
|
}
|
||||||
@ -679,12 +685,12 @@ impl NetworkManager {
|
|||||||
peer_info,
|
peer_info,
|
||||||
false,
|
false,
|
||||||
) {
|
) {
|
||||||
None => {
|
Ok(nr) => nr,
|
||||||
|
Err(e) => {
|
||||||
return Ok(NetworkResult::invalid_message(
|
return Ok(NetworkResult::invalid_message(
|
||||||
"unable to register reverse connect peerinfo",
|
format!("unable to register reverse connect peerinfo: {}", e)
|
||||||
))
|
));
|
||||||
}
|
}
|
||||||
Some(nr) => nr,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Make a reverse connection to the peer and send the receipt to it
|
// Make a reverse connection to the peer and send the receipt to it
|
||||||
@ -702,13 +708,12 @@ impl NetworkManager {
|
|||||||
peer_info,
|
peer_info,
|
||||||
false,
|
false,
|
||||||
) {
|
) {
|
||||||
None => {
|
Ok(nr) => nr,
|
||||||
|
Err(e) => {
|
||||||
return Ok(NetworkResult::invalid_message(
|
return Ok(NetworkResult::invalid_message(
|
||||||
//sender_id,
|
format!("unable to register hole punch connect peerinfo: {}", e)
|
||||||
"unable to register hole punch connect peerinfo",
|
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
Some(nr) => nr,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Get the udp direct dialinfo for the hole punch
|
// Get the udp direct dialinfo for the hole punch
|
||||||
@ -1071,8 +1076,17 @@ impl NetworkManager {
|
|||||||
|
|
||||||
// Dial info filter comes from the target node ref
|
// Dial info filter comes from the target node ref
|
||||||
let dial_info_filter = target_node_ref.dial_info_filter();
|
let dial_info_filter = target_node_ref.dial_info_filter();
|
||||||
let sequencing = target_node_ref.sequencing();
|
let mut sequencing = target_node_ref.sequencing();
|
||||||
|
|
||||||
|
// If the node has had lost questions or failures to send, prefer sequencing
|
||||||
|
// to improve reliability. The node may be experiencing UDP fragmentation drops
|
||||||
|
// or other firewalling issues and may perform better with TCP.
|
||||||
|
let unreliable = target_node_ref.peer_stats().rpc_stats.failed_to_send > 0 || target_node_ref.peer_stats().rpc_stats.recent_lost_answers > 0;
|
||||||
|
if unreliable && sequencing < Sequencing::PreferOrdered {
|
||||||
|
sequencing = Sequencing::PreferOrdered;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the best contact method with these parameters from the routing domain
|
||||||
let cm = routing_table.get_contact_method(
|
let cm = routing_table.get_contact_method(
|
||||||
routing_domain,
|
routing_domain,
|
||||||
&peer_a,
|
&peer_a,
|
||||||
@ -1088,7 +1102,7 @@ impl NetworkManager {
|
|||||||
ContactMethod::Direct(di) => NodeContactMethod::Direct(di),
|
ContactMethod::Direct(di) => NodeContactMethod::Direct(di),
|
||||||
ContactMethod::SignalReverse(relay_key, target_key) => {
|
ContactMethod::SignalReverse(relay_key, target_key) => {
|
||||||
let relay_nr = routing_table
|
let relay_nr = routing_table
|
||||||
.lookup_and_filter_noderef(relay_key, routing_domain.into(), dial_info_filter)
|
.lookup_and_filter_noderef(relay_key, routing_domain.into(), dial_info_filter)?
|
||||||
.ok_or_else(|| eyre!("couldn't look up relay"))?;
|
.ok_or_else(|| eyre!("couldn't look up relay"))?;
|
||||||
if !target_node_ref.node_ids().contains(&target_key) {
|
if !target_node_ref.node_ids().contains(&target_key) {
|
||||||
bail!("target noderef didn't match target key");
|
bail!("target noderef didn't match target key");
|
||||||
@ -1097,7 +1111,7 @@ impl NetworkManager {
|
|||||||
}
|
}
|
||||||
ContactMethod::SignalHolePunch(relay_key, target_key) => {
|
ContactMethod::SignalHolePunch(relay_key, target_key) => {
|
||||||
let relay_nr = routing_table
|
let relay_nr = routing_table
|
||||||
.lookup_and_filter_noderef(relay_key, routing_domain.into(), dial_info_filter)
|
.lookup_and_filter_noderef(relay_key, routing_domain.into(), dial_info_filter)?
|
||||||
.ok_or_else(|| eyre!("couldn't look up relay"))?;
|
.ok_or_else(|| eyre!("couldn't look up relay"))?;
|
||||||
if target_node_ref.node_ids().contains(&target_key) {
|
if target_node_ref.node_ids().contains(&target_key) {
|
||||||
bail!("target noderef didn't match target key");
|
bail!("target noderef didn't match target key");
|
||||||
@ -1106,13 +1120,13 @@ impl NetworkManager {
|
|||||||
}
|
}
|
||||||
ContactMethod::InboundRelay(relay_key) => {
|
ContactMethod::InboundRelay(relay_key) => {
|
||||||
let relay_nr = routing_table
|
let relay_nr = routing_table
|
||||||
.lookup_and_filter_noderef(relay_key, routing_domain.into(), dial_info_filter)
|
.lookup_and_filter_noderef(relay_key, routing_domain.into(), dial_info_filter)?
|
||||||
.ok_or_else(|| eyre!("couldn't look up relay"))?;
|
.ok_or_else(|| eyre!("couldn't look up relay"))?;
|
||||||
NodeContactMethod::InboundRelay(relay_nr)
|
NodeContactMethod::InboundRelay(relay_nr)
|
||||||
}
|
}
|
||||||
ContactMethod::OutboundRelay(relay_key) => {
|
ContactMethod::OutboundRelay(relay_key) => {
|
||||||
let relay_nr = routing_table
|
let relay_nr = routing_table
|
||||||
.lookup_and_filter_noderef(relay_key, routing_domain.into(), dial_info_filter)
|
.lookup_and_filter_noderef(relay_key, routing_domain.into(), dial_info_filter)?
|
||||||
.ok_or_else(|| eyre!("couldn't look up relay"))?;
|
.ok_or_else(|| eyre!("couldn't look up relay"))?;
|
||||||
NodeContactMethod::OutboundRelay(relay_nr)
|
NodeContactMethod::OutboundRelay(relay_nr)
|
||||||
}
|
}
|
||||||
@ -1415,7 +1429,13 @@ impl NetworkManager {
|
|||||||
// We should, because relays are chosen by nodes that have established connectivity and
|
// We should, because relays are chosen by nodes that have established connectivity and
|
||||||
// should be mutually in each others routing tables. The node needing the relay will be
|
// should be mutually in each others routing tables. The node needing the relay will be
|
||||||
// pinging this node regularly to keep itself in the routing table
|
// pinging this node regularly to keep itself in the routing table
|
||||||
routing_table.lookup_node_ref(recipient_id)
|
match routing_table.lookup_node_ref(recipient_id) {
|
||||||
|
Ok(v) => v,
|
||||||
|
Err(e) => {
|
||||||
|
log_net!(debug "failed to look up recipient node for relay, dropping outbound relayed packet: {}" ,e);
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(relay_nr) = some_relay_nr {
|
if let Some(relay_nr) = some_relay_nr {
|
||||||
@ -1457,12 +1477,12 @@ impl NetworkManager {
|
|||||||
connection_descriptor,
|
connection_descriptor,
|
||||||
ts,
|
ts,
|
||||||
) {
|
) {
|
||||||
None => {
|
Ok(v) => v,
|
||||||
|
Err(e) => {
|
||||||
// If the node couldn't be registered just skip this envelope,
|
// If the node couldn't be registered just skip this envelope,
|
||||||
// the error will have already been logged
|
log_net!(debug "failed to register node with existing connection: {}", e);
|
||||||
return Ok(false);
|
return Ok(false);
|
||||||
}
|
}
|
||||||
Some(v) => v,
|
|
||||||
};
|
};
|
||||||
source_noderef.add_envelope_version(envelope.get_version());
|
source_noderef.add_envelope_version(envelope.get_version());
|
||||||
|
|
||||||
@ -1559,7 +1579,7 @@ impl NetworkManager {
|
|||||||
peers: {
|
peers: {
|
||||||
let mut out = Vec::new();
|
let mut out = Vec::new();
|
||||||
for (k, v) in routing_table.get_recent_peers() {
|
for (k, v) in routing_table.get_recent_peers() {
|
||||||
if let Some(nr) = routing_table.lookup_node_ref(k) {
|
if let Ok(Some(nr)) = routing_table.lookup_node_ref(k) {
|
||||||
let peer_stats = nr.peer_stats();
|
let peer_stats = nr.peer_stats();
|
||||||
let peer = PeerTableData {
|
let peer = PeerTableData {
|
||||||
node_ids: nr.node_ids().iter().copied().collect(),
|
node_ids: nr.node_ids().iter().copied().collect(),
|
||||||
|
@ -212,7 +212,8 @@ impl Network {
|
|||||||
} else {
|
} else {
|
||||||
// If no address is specified, but the port is, use ipv4 and ipv6 unspecified
|
// If no address is specified, but the port is, use ipv4 and ipv6 unspecified
|
||||||
// If the address is specified, only use the specified port and fail otherwise
|
// If the address is specified, only use the specified port and fail otherwise
|
||||||
let sockaddrs = listen_address_to_socket_addrs(&listen_address)?;
|
let sockaddrs =
|
||||||
|
listen_address_to_socket_addrs(&listen_address).map_err(|e| eyre!("{}", e))?;
|
||||||
if sockaddrs.is_empty() {
|
if sockaddrs.is_empty() {
|
||||||
bail!("No valid listen address: {}", listen_address);
|
bail!("No valid listen address: {}", listen_address);
|
||||||
}
|
}
|
||||||
@ -236,7 +237,8 @@ impl Network {
|
|||||||
} else {
|
} else {
|
||||||
// If no address is specified, but the port is, use ipv4 and ipv6 unspecified
|
// If no address is specified, but the port is, use ipv4 and ipv6 unspecified
|
||||||
// If the address is specified, only use the specified port and fail otherwise
|
// If the address is specified, only use the specified port and fail otherwise
|
||||||
let sockaddrs = listen_address_to_socket_addrs(&listen_address)?;
|
let sockaddrs =
|
||||||
|
listen_address_to_socket_addrs(&listen_address).map_err(|e| eyre!("{}", e))?;
|
||||||
if sockaddrs.is_empty() {
|
if sockaddrs.is_empty() {
|
||||||
bail!("No valid listen address: {}", listen_address);
|
bail!("No valid listen address: {}", listen_address);
|
||||||
}
|
}
|
||||||
|
@ -603,7 +603,7 @@ impl BucketEntryInner {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub(super) fn check_dead(&self, cur_ts: Timestamp) -> bool {
|
pub(super) fn check_dead(&self, cur_ts: Timestamp) -> bool {
|
||||||
// If we have failured to send NEVER_REACHED_PING_COUNT times in a row, the node is dead
|
// If we have failed to send NEVER_REACHED_PING_COUNT times in a row, the node is dead
|
||||||
if self.peer_stats.rpc_stats.failed_to_send >= NEVER_REACHED_PING_COUNT {
|
if self.peer_stats.rpc_stats.failed_to_send >= NEVER_REACHED_PING_COUNT {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -628,14 +628,14 @@ impl RoutingTable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Resolve an existing routing table entry using any crypto kind and return a reference to it
|
/// Resolve an existing routing table entry using any crypto kind and return a reference to it
|
||||||
pub fn lookup_any_node_ref(&self, node_id_key: PublicKey) -> Option<NodeRef> {
|
pub fn lookup_any_node_ref(&self, node_id_key: PublicKey) -> EyreResult<Option<NodeRef>> {
|
||||||
self.inner
|
self.inner
|
||||||
.read()
|
.read()
|
||||||
.lookup_any_node_ref(self.clone(), node_id_key)
|
.lookup_any_node_ref(self.clone(), node_id_key)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Resolve an existing routing table entry and return a reference to it
|
/// Resolve an existing routing table entry and return a reference to it
|
||||||
pub fn lookup_node_ref(&self, node_id: TypedKey) -> Option<NodeRef> {
|
pub fn lookup_node_ref(&self, node_id: TypedKey) -> EyreResult<Option<NodeRef>> {
|
||||||
self.inner.read().lookup_node_ref(self.clone(), node_id)
|
self.inner.read().lookup_node_ref(self.clone(), node_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -645,7 +645,7 @@ impl RoutingTable {
|
|||||||
node_id: TypedKey,
|
node_id: TypedKey,
|
||||||
routing_domain_set: RoutingDomainSet,
|
routing_domain_set: RoutingDomainSet,
|
||||||
dial_info_filter: DialInfoFilter,
|
dial_info_filter: DialInfoFilter,
|
||||||
) -> Option<NodeRef> {
|
) -> EyreResult<Option<NodeRef>> {
|
||||||
self.inner.read().lookup_and_filter_noderef(
|
self.inner.read().lookup_and_filter_noderef(
|
||||||
self.clone(),
|
self.clone(),
|
||||||
node_id,
|
node_id,
|
||||||
@ -662,7 +662,7 @@ impl RoutingTable {
|
|||||||
routing_domain: RoutingDomain,
|
routing_domain: RoutingDomain,
|
||||||
peer_info: PeerInfo,
|
peer_info: PeerInfo,
|
||||||
allow_invalid: bool,
|
allow_invalid: bool,
|
||||||
) -> Option<NodeRef> {
|
) -> EyreResult<NodeRef> {
|
||||||
self.inner.write().register_node_with_peer_info(
|
self.inner.write().register_node_with_peer_info(
|
||||||
self.clone(),
|
self.clone(),
|
||||||
routing_domain,
|
routing_domain,
|
||||||
@ -678,7 +678,7 @@ impl RoutingTable {
|
|||||||
node_id: TypedKey,
|
node_id: TypedKey,
|
||||||
descriptor: ConnectionDescriptor,
|
descriptor: ConnectionDescriptor,
|
||||||
timestamp: Timestamp,
|
timestamp: Timestamp,
|
||||||
) -> Option<NodeRef> {
|
) -> EyreResult<NodeRef> {
|
||||||
self.inner.write().register_node_with_existing_connection(
|
self.inner.write().register_node_with_existing_connection(
|
||||||
self.clone(),
|
self.clone(),
|
||||||
node_id,
|
node_id,
|
||||||
@ -711,7 +711,7 @@ impl RoutingTable {
|
|||||||
// (uses same logic as send_data, ensuring last_connection works for UDP)
|
// (uses same logic as send_data, ensuring last_connection works for UDP)
|
||||||
for e in &recent_peers {
|
for e in &recent_peers {
|
||||||
let mut dead = true;
|
let mut dead = true;
|
||||||
if let Some(nr) = self.lookup_node_ref(*e) {
|
if let Ok(Some(nr)) = self.lookup_node_ref(*e) {
|
||||||
if let Some(last_connection) = nr.last_connection() {
|
if let Some(last_connection) = nr.last_connection() {
|
||||||
out.push((*e, RecentPeersEntry { last_connection }));
|
out.push((*e, RecentPeersEntry { last_connection }));
|
||||||
dead = false;
|
dead = false;
|
||||||
@ -1017,10 +1017,11 @@ impl RoutingTable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Register the node if it's new
|
// Register the node if it's new
|
||||||
if let Some(nr) =
|
match self.register_node_with_peer_info(RoutingDomain::PublicInternet, p, false) {
|
||||||
self.register_node_with_peer_info(RoutingDomain::PublicInternet, p, false)
|
Ok(nr) => out.push(nr),
|
||||||
{
|
Err(e) => {
|
||||||
out.push(nr);
|
log_rtab!(debug "failed to register node with peer info from find node answer: {}", e);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
out
|
out
|
||||||
|
@ -192,25 +192,24 @@ pub trait NodeRefBase: Sized {
|
|||||||
}
|
}
|
||||||
dif
|
dif
|
||||||
}
|
}
|
||||||
fn relay(&self, routing_domain: RoutingDomain) -> Option<NodeRef> {
|
fn relay(&self, routing_domain: RoutingDomain) -> EyreResult<Option<NodeRef>> {
|
||||||
self.operate_mut(|rti, e| {
|
self.operate_mut(|rti, e| {
|
||||||
e.signed_node_info(routing_domain)
|
let Some(sni) = e.signed_node_info(routing_domain) else {
|
||||||
.and_then(|n| n.relay_peer_info())
|
return Ok(None);
|
||||||
.and_then(|rpi| {
|
};
|
||||||
|
let Some(rpi) = sni.relay_peer_info() else {
|
||||||
|
return Ok(None);
|
||||||
|
};
|
||||||
// If relay is ourselves, then return None, because we can't relay through ourselves
|
// If relay is ourselves, then return None, because we can't relay through ourselves
|
||||||
// and to contact this node we should have had an existing inbound connection
|
// and to contact this node we should have had an existing inbound connection
|
||||||
if rti.unlocked_inner.matches_own_node_id(rpi.node_ids()) {
|
if rti.unlocked_inner.matches_own_node_id(rpi.node_ids()) {
|
||||||
return None;
|
bail!("Can't relay though ourselves");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Register relay node and return noderef
|
// Register relay node and return noderef
|
||||||
rti.register_node_with_peer_info(
|
let nr =
|
||||||
self.routing_table(),
|
rti.register_node_with_peer_info(self.routing_table(), routing_domain, rpi, false)?;
|
||||||
routing_domain,
|
Ok(Some(nr))
|
||||||
rpi,
|
|
||||||
false,
|
|
||||||
)
|
|
||||||
})
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -37,15 +37,27 @@ impl RouteNode {
|
|||||||
match self {
|
match self {
|
||||||
RouteNode::NodeId(id) => {
|
RouteNode::NodeId(id) => {
|
||||||
//
|
//
|
||||||
routing_table.lookup_node_ref(TypedKey::new(crypto_kind, *id))
|
match routing_table.lookup_node_ref(TypedKey::new(crypto_kind, *id)) {
|
||||||
|
Ok(nr) => nr,
|
||||||
|
Err(e) => {
|
||||||
|
log_rtab!(debug "failed to look up route node: {}", e);
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
RouteNode::PeerInfo(pi) => {
|
RouteNode::PeerInfo(pi) => {
|
||||||
//
|
//
|
||||||
routing_table.register_node_with_peer_info(
|
match routing_table.register_node_with_peer_info(
|
||||||
RoutingDomain::PublicInternet,
|
RoutingDomain::PublicInternet,
|
||||||
pi.clone(),
|
pi.clone(),
|
||||||
false,
|
false,
|
||||||
)
|
) {
|
||||||
|
Ok(nr) => Some(nr),
|
||||||
|
Err(e) => {
|
||||||
|
log_rtab!(debug "failed to register route node: {}", e);
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -378,7 +378,14 @@ impl RouteSpecStore {
|
|||||||
// Already seen this node, should not be in the route twice
|
// Already seen this node, should not be in the route twice
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
if let Some(relay) = node.locked_mut(rti).relay(RoutingDomain::PublicInternet) {
|
let opt_relay = match node.locked_mut(rti).relay(RoutingDomain::PublicInternet) {
|
||||||
|
Ok(r) => r,
|
||||||
|
Err(e) => {
|
||||||
|
log_rtab!(error "failed to get relay for route node: {}", e);
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
if let Some(relay) = opt_relay {
|
||||||
let relay_id = relay.locked(rti).best_node_id();
|
let relay_id = relay.locked(rti).best_node_id();
|
||||||
if !seen_nodes.insert(relay_id) {
|
if !seen_nodes.insert(relay_id) {
|
||||||
// Already seen this node, should not be in the route twice
|
// Already seen this node, should not be in the route twice
|
||||||
@ -869,13 +876,15 @@ impl RouteSpecStore {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let opt_first_hop = match pr_first_hop_node {
|
let opt_first_hop = match pr_first_hop_node {
|
||||||
RouteNode::NodeId(id) => rti.lookup_node_ref(routing_table.clone(), TypedKey::new(crypto_kind, id)),
|
RouteNode::NodeId(id) => rti.lookup_node_ref(routing_table.clone(), TypedKey::new(crypto_kind, id))?,
|
||||||
RouteNode::PeerInfo(pi) => rti.register_node_with_peer_info(
|
RouteNode::PeerInfo(pi) => {
|
||||||
|
Some(rti.register_node_with_peer_info(
|
||||||
routing_table.clone(),
|
routing_table.clone(),
|
||||||
RoutingDomain::PublicInternet,
|
RoutingDomain::PublicInternet,
|
||||||
pi,
|
pi,
|
||||||
false,
|
false,
|
||||||
),
|
)?)
|
||||||
|
}
|
||||||
};
|
};
|
||||||
if opt_first_hop.is_none() {
|
if opt_first_hop.is_none() {
|
||||||
// Can't reach this private route any more
|
// Can't reach this private route any more
|
||||||
@ -1322,9 +1331,9 @@ impl RouteSpecStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ensure this isn't also an allocated route
|
// ensure this isn't also an allocated route
|
||||||
if inner.content.get_id_by_key(&private_route.public_key.value).is_some() {
|
// if inner.content.get_id_by_key(&private_route.public_key.value).is_some() {
|
||||||
bail!("should not import allocated route");
|
// bail!("should not import allocated route");
|
||||||
}
|
// }
|
||||||
}
|
}
|
||||||
|
|
||||||
inner.cache.cache_remote_private_route(cur_ts, id, private_routes);
|
inner.cache.cache_remote_private_route(cur_ts, id, private_routes);
|
||||||
|
@ -40,7 +40,7 @@ impl RouteSpecStoreContent {
|
|||||||
// Go through best route and resolve noderefs
|
// Go through best route and resolve noderefs
|
||||||
let mut hop_node_refs = Vec::with_capacity(rsd.hops.len());
|
let mut hop_node_refs = Vec::with_capacity(rsd.hops.len());
|
||||||
for h in &rsd.hops {
|
for h in &rsd.hops {
|
||||||
let Some(nr) = routing_table.lookup_node_ref(TypedKey::new(rsd.crypto_kind, *h)) else {
|
let Ok(Some(nr)) = routing_table.lookup_node_ref(TypedKey::new(rsd.crypto_kind, *h)) else {
|
||||||
dead_ids.push(rsid.clone());
|
dead_ids.push(rsid.clone());
|
||||||
break;
|
break;
|
||||||
};
|
};
|
||||||
|
@ -651,14 +651,13 @@ impl RoutingTableInner {
|
|||||||
outer_self: RoutingTable,
|
outer_self: RoutingTable,
|
||||||
node_ids: &TypedKeySet,
|
node_ids: &TypedKeySet,
|
||||||
update_func: F,
|
update_func: F,
|
||||||
) -> Option<NodeRef>
|
) -> EyreResult<NodeRef>
|
||||||
where
|
where
|
||||||
F: FnOnce(&mut RoutingTableInner, &mut BucketEntryInner),
|
F: FnOnce(&mut RoutingTableInner, &mut BucketEntryInner),
|
||||||
{
|
{
|
||||||
// Ensure someone isn't trying register this node itself
|
// Ensure someone isn't trying register this node itself
|
||||||
if self.unlocked_inner.matches_own_node_id(node_ids) {
|
if self.unlocked_inner.matches_own_node_id(node_ids) {
|
||||||
log_rtab!(debug "can't register own node");
|
bail!("can't register own node");
|
||||||
return None;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Look up all bucket entries and make sure we only have zero or one
|
// Look up all bucket entries and make sure we only have zero or one
|
||||||
@ -688,8 +687,7 @@ impl RoutingTableInner {
|
|||||||
if let Some(best_entry) = best_entry {
|
if let Some(best_entry) = best_entry {
|
||||||
// Update the entry with all of the node ids
|
// Update the entry with all of the node ids
|
||||||
if let Err(e) = self.update_bucket_entries(best_entry.clone(), node_ids) {
|
if let Err(e) = self.update_bucket_entries(best_entry.clone(), node_ids) {
|
||||||
log_rtab!(debug "Not registering new ids for existing node: {}", e);
|
bail!("Not registering new ids for existing node: {}", e);
|
||||||
return None;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make a noderef to return
|
// Make a noderef to return
|
||||||
@ -699,7 +697,7 @@ impl RoutingTableInner {
|
|||||||
best_entry.with_mut_inner(|e| update_func(self, e));
|
best_entry.with_mut_inner(|e| update_func(self, e));
|
||||||
|
|
||||||
// Return the noderef
|
// Return the noderef
|
||||||
return Some(nr);
|
return Ok(nr);
|
||||||
}
|
}
|
||||||
|
|
||||||
// If no entry exists yet, add the first entry to a bucket, possibly evicting a bucket member
|
// If no entry exists yet, add the first entry to a bucket, possibly evicting a bucket member
|
||||||
@ -712,8 +710,7 @@ impl RoutingTableInner {
|
|||||||
|
|
||||||
// Update the other bucket entries with the remaining node ids
|
// Update the other bucket entries with the remaining node ids
|
||||||
if let Err(e) = self.update_bucket_entries(new_entry.clone(), node_ids) {
|
if let Err(e) = self.update_bucket_entries(new_entry.clone(), node_ids) {
|
||||||
log_rtab!(debug "Not registering new node: {}", e);
|
bail!("Not registering new node: {}", e);
|
||||||
return None;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make node ref to return
|
// Make node ref to return
|
||||||
@ -725,7 +722,7 @@ impl RoutingTableInner {
|
|||||||
// Kick the bucket
|
// Kick the bucket
|
||||||
log_rtab!(debug "Routing table now has {} nodes, {} live", self.bucket_entry_count(), self.get_entry_count(RoutingDomainSet::all(), BucketEntryState::Unreliable, &VALID_CRYPTO_KINDS));
|
log_rtab!(debug "Routing table now has {} nodes, {} live", self.bucket_entry_count(), self.get_entry_count(RoutingDomainSet::all(), BucketEntryState::Unreliable, &VALID_CRYPTO_KINDS));
|
||||||
|
|
||||||
Some(nr)
|
Ok(nr)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Resolve an existing routing table entry using any crypto kind and return a reference to it
|
/// Resolve an existing routing table entry using any crypto kind and return a reference to it
|
||||||
@ -733,28 +730,35 @@ impl RoutingTableInner {
|
|||||||
&self,
|
&self,
|
||||||
outer_self: RoutingTable,
|
outer_self: RoutingTable,
|
||||||
node_id_key: PublicKey,
|
node_id_key: PublicKey,
|
||||||
) -> Option<NodeRef> {
|
) -> EyreResult<Option<NodeRef>> {
|
||||||
VALID_CRYPTO_KINDS.iter().find_map(|ck| {
|
for ck in VALID_CRYPTO_KINDS {
|
||||||
self.lookup_node_ref(outer_self.clone(), TypedKey::new(*ck, node_id_key))
|
if let Some(nr) =
|
||||||
})
|
self.lookup_node_ref(outer_self.clone(), TypedKey::new(ck, node_id_key))?
|
||||||
|
{
|
||||||
|
return Ok(Some(nr));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(None)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Resolve an existing routing table entry and return a reference to it
|
/// Resolve an existing routing table entry and return a reference to it
|
||||||
pub fn lookup_node_ref(&self, outer_self: RoutingTable, node_id: TypedKey) -> Option<NodeRef> {
|
pub fn lookup_node_ref(
|
||||||
|
&self,
|
||||||
|
outer_self: RoutingTable,
|
||||||
|
node_id: TypedKey,
|
||||||
|
) -> EyreResult<Option<NodeRef>> {
|
||||||
if self.unlocked_inner.matches_own_node_id(&[node_id]) {
|
if self.unlocked_inner.matches_own_node_id(&[node_id]) {
|
||||||
log_rtab!(error "can't look up own node id in routing table");
|
bail!("can't look up own node id in routing table");
|
||||||
return None;
|
|
||||||
}
|
}
|
||||||
if !VALID_CRYPTO_KINDS.contains(&node_id.kind) {
|
if !VALID_CRYPTO_KINDS.contains(&node_id.kind) {
|
||||||
log_rtab!(error "can't look up node id with invalid crypto kind");
|
bail!("can't look up node id with invalid crypto kind");
|
||||||
return None;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let bucket_index = self.unlocked_inner.calculate_bucket_index(&node_id);
|
let bucket_index = self.unlocked_inner.calculate_bucket_index(&node_id);
|
||||||
let bucket = self.get_bucket(bucket_index);
|
let bucket = self.get_bucket(bucket_index);
|
||||||
bucket
|
Ok(bucket
|
||||||
.entry(&node_id.value)
|
.entry(&node_id.value)
|
||||||
.map(|e| NodeRef::new(outer_self, e, None))
|
.map(|e| NodeRef::new(outer_self, e, None)))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Resolve an existing routing table entry and return a filtered reference to it
|
/// Resolve an existing routing table entry and return a filtered reference to it
|
||||||
@ -764,15 +768,15 @@ impl RoutingTableInner {
|
|||||||
node_id: TypedKey,
|
node_id: TypedKey,
|
||||||
routing_domain_set: RoutingDomainSet,
|
routing_domain_set: RoutingDomainSet,
|
||||||
dial_info_filter: DialInfoFilter,
|
dial_info_filter: DialInfoFilter,
|
||||||
) -> Option<NodeRef> {
|
) -> EyreResult<Option<NodeRef>> {
|
||||||
let nr = self.lookup_node_ref(outer_self, node_id)?;
|
let nr = self.lookup_node_ref(outer_self, node_id)?;
|
||||||
Some(
|
Ok(nr.map(|nr| {
|
||||||
nr.filtered_clone(
|
nr.filtered_clone(
|
||||||
NodeRefFilter::new()
|
NodeRefFilter::new()
|
||||||
.with_dial_info_filter(dial_info_filter)
|
.with_dial_info_filter(dial_info_filter)
|
||||||
.with_routing_domain_set(routing_domain_set),
|
.with_routing_domain_set(routing_domain_set),
|
||||||
),
|
|
||||||
)
|
)
|
||||||
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Resolve an existing routing table entry and call a function on its entry without using a noderef
|
/// Resolve an existing routing table entry and call a function on its entry without using a noderef
|
||||||
@ -802,50 +806,53 @@ impl RoutingTableInner {
|
|||||||
routing_domain: RoutingDomain,
|
routing_domain: RoutingDomain,
|
||||||
peer_info: PeerInfo,
|
peer_info: PeerInfo,
|
||||||
allow_invalid: bool,
|
allow_invalid: bool,
|
||||||
) -> Option<NodeRef> {
|
) -> EyreResult<NodeRef> {
|
||||||
// if our own node is in the list, then ignore it as we don't add ourselves to our own routing table
|
// if our own node is in the list, then ignore it as we don't add ourselves to our own routing table
|
||||||
if self
|
if self
|
||||||
.unlocked_inner
|
.unlocked_inner
|
||||||
.matches_own_node_id(peer_info.node_ids())
|
.matches_own_node_id(peer_info.node_ids())
|
||||||
{
|
{
|
||||||
log_rtab!(debug "can't register own node id in routing table");
|
bail!("can't register own node id in routing table");
|
||||||
return None;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// node can not be its own relay
|
// node can not be its own relay
|
||||||
let rids = peer_info.signed_node_info().relay_ids();
|
let rids = peer_info.signed_node_info().relay_ids();
|
||||||
let nids = peer_info.node_ids();
|
let nids = peer_info.node_ids();
|
||||||
if nids.contains_any(&rids) {
|
if nids.contains_any(&rids) {
|
||||||
log_rtab!(debug "node can not be its own relay");
|
bail!("node can not be its own relay");
|
||||||
return None;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if !allow_invalid {
|
if !allow_invalid {
|
||||||
// verify signature
|
// verify signature
|
||||||
if !peer_info.signed_node_info().has_any_signature() {
|
if !peer_info.signed_node_info().has_any_signature() {
|
||||||
log_rtab!(debug "signed node info for {:?} has no valid signature", peer_info.node_ids());
|
bail!(
|
||||||
return None;
|
"signed node info for {:?} has no valid signature",
|
||||||
|
peer_info.node_ids()
|
||||||
|
);
|
||||||
}
|
}
|
||||||
// verify signed node info is valid in this routing domain
|
// verify signed node info is valid in this routing domain
|
||||||
if !self.signed_node_info_is_valid_in_routing_domain(
|
if !self.signed_node_info_is_valid_in_routing_domain(
|
||||||
routing_domain,
|
routing_domain,
|
||||||
peer_info.signed_node_info(),
|
peer_info.signed_node_info(),
|
||||||
) {
|
) {
|
||||||
log_rtab!(debug "signed node info for {:?} not valid in the {:?} routing domain", peer_info.node_ids(), routing_domain);
|
bail!(
|
||||||
return None;
|
"signed node info for {:?} not valid in the {:?} routing domain",
|
||||||
|
peer_info.node_ids(),
|
||||||
|
routing_domain
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let (node_ids, signed_node_info) = peer_info.destructure();
|
let (node_ids, signed_node_info) = peer_info.destructure();
|
||||||
self.create_node_ref(outer_self, &node_ids, |_rti, e| {
|
let mut nr = self.create_node_ref(outer_self, &node_ids, |_rti, e| {
|
||||||
e.update_signed_node_info(routing_domain, signed_node_info);
|
e.update_signed_node_info(routing_domain, signed_node_info);
|
||||||
})
|
})?;
|
||||||
.map(|mut nr| {
|
|
||||||
nr.set_filter(Some(
|
nr.set_filter(Some(
|
||||||
NodeRefFilter::new().with_routing_domain(routing_domain),
|
NodeRefFilter::new().with_routing_domain(routing_domain),
|
||||||
));
|
));
|
||||||
nr
|
|
||||||
})
|
Ok(nr)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Shortcut function to add a node to our routing table if it doesn't exist
|
/// Shortcut function to add a node to our routing table if it doesn't exist
|
||||||
@ -856,17 +863,15 @@ impl RoutingTableInner {
|
|||||||
node_id: TypedKey,
|
node_id: TypedKey,
|
||||||
descriptor: ConnectionDescriptor,
|
descriptor: ConnectionDescriptor,
|
||||||
timestamp: Timestamp,
|
timestamp: Timestamp,
|
||||||
) -> Option<NodeRef> {
|
) -> EyreResult<NodeRef> {
|
||||||
let out = self.create_node_ref(outer_self, &TypedKeySet::from(node_id), |_rti, e| {
|
let nr = self.create_node_ref(outer_self, &TypedKeySet::from(node_id), |_rti, e| {
|
||||||
// this node is live because it literally just connected to us
|
// this node is live because it literally just connected to us
|
||||||
e.touch_last_seen(timestamp);
|
e.touch_last_seen(timestamp);
|
||||||
});
|
})?;
|
||||||
if let Some(nr) = &out {
|
|
||||||
// set the most recent node address for connection finding and udp replies
|
// set the most recent node address for connection finding and udp replies
|
||||||
nr.locked_mut(self)
|
nr.locked_mut(self)
|
||||||
.set_last_connection(descriptor, timestamp);
|
.set_last_connection(descriptor, timestamp);
|
||||||
}
|
Ok(nr)
|
||||||
out
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////
|
||||||
|
@ -259,9 +259,18 @@ impl RoutingTable {
|
|||||||
// Got peer info, let's add it to the routing table
|
// Got peer info, let's add it to the routing table
|
||||||
for pi in peer_info {
|
for pi in peer_info {
|
||||||
// Register the node
|
// Register the node
|
||||||
if let Some(nr) =
|
let nr = match self.register_node_with_peer_info(
|
||||||
self.register_node_with_peer_info(RoutingDomain::PublicInternet, pi, false)
|
RoutingDomain::PublicInternet,
|
||||||
{
|
pi,
|
||||||
|
false,
|
||||||
|
) {
|
||||||
|
Ok(nr) => nr,
|
||||||
|
Err(e) => {
|
||||||
|
log_rtab!(error "failed to register direct bootstrap peer info: {}", e);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
// Add this our futures to process in parallel
|
// Add this our futures to process in parallel
|
||||||
for crypto_kind in VALID_CRYPTO_KINDS {
|
for crypto_kind in VALID_CRYPTO_KINDS {
|
||||||
let routing_table = self.clone();
|
let routing_table = self.clone();
|
||||||
@ -274,7 +283,6 @@ impl RoutingTable {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Wait for all bootstrap operations to complete before we complete the singlefuture
|
// Wait for all bootstrap operations to complete before we complete the singlefuture
|
||||||
while let Ok(Some(_)) = unord.next().timeout_at(stop_token.clone()).await {}
|
while let Ok(Some(_)) = unord.next().timeout_at(stop_token.clone()).await {}
|
||||||
@ -341,9 +349,14 @@ impl RoutingTable {
|
|||||||
|
|
||||||
let pi = PeerInfo::new(bsrec.node_ids, sni);
|
let pi = PeerInfo::new(bsrec.node_ids, sni);
|
||||||
|
|
||||||
if let Some(nr) =
|
let nr =
|
||||||
self.register_node_with_peer_info(RoutingDomain::PublicInternet, pi, true)
|
match self.register_node_with_peer_info(RoutingDomain::PublicInternet, pi, true) {
|
||||||
{
|
Ok(nr) => nr,
|
||||||
|
Err(e) => {
|
||||||
|
log_rtab!(error "failed to register bootstrap peer info: {}", e);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
// Add this our futures to process in parallel
|
// Add this our futures to process in parallel
|
||||||
for crypto_kind in VALID_CRYPTO_KINDS {
|
for crypto_kind in VALID_CRYPTO_KINDS {
|
||||||
// Do we need to bootstrap this crypto kind?
|
// Do we need to bootstrap this crypto kind?
|
||||||
@ -363,9 +376,7 @@ impl RoutingTable {
|
|||||||
let _ = routing_table.find_target(crypto_kind, nr.clone()).await;
|
let _ = routing_table.find_target(crypto_kind, nr.clone()).await;
|
||||||
|
|
||||||
// Ensure we got the signed peer info
|
// Ensure we got the signed peer info
|
||||||
if !nr
|
if !nr.signed_node_info_has_valid_signature(RoutingDomain::PublicInternet) {
|
||||||
.signed_node_info_has_valid_signature(RoutingDomain::PublicInternet)
|
|
||||||
{
|
|
||||||
log_rtab!(warn
|
log_rtab!(warn
|
||||||
"bootstrap at {:?} did not return valid signed node info",
|
"bootstrap at {:?} did not return valid signed node info",
|
||||||
nr
|
nr
|
||||||
@ -380,7 +391,6 @@ impl RoutingTable {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Wait for all bootstrap operations to complete before we complete the singlefuture
|
// Wait for all bootstrap operations to complete before we complete the singlefuture
|
||||||
while let Ok(Some(_)) = unord.next().timeout_at(stop_token.clone()).await {}
|
while let Ok(Some(_)) = unord.next().timeout_at(stop_token.clone()).await {}
|
||||||
|
@ -51,15 +51,20 @@ impl RoutingTable {
|
|||||||
// The outbound relay is the host of the PWA
|
// The outbound relay is the host of the PWA
|
||||||
if let Some(outbound_relay_peerinfo) = intf::get_outbound_relay_peer().await {
|
if let Some(outbound_relay_peerinfo) = intf::get_outbound_relay_peer().await {
|
||||||
// Register new outbound relay
|
// Register new outbound relay
|
||||||
if let Some(nr) = self.register_node_with_peer_info(
|
match self.register_node_with_peer_info(
|
||||||
RoutingDomain::PublicInternet,
|
RoutingDomain::PublicInternet,
|
||||||
outbound_relay_peerinfo,
|
outbound_relay_peerinfo,
|
||||||
false,
|
false,
|
||||||
) {
|
) {
|
||||||
info!("Outbound relay node selected: {}", nr);
|
Ok(nr) => {
|
||||||
|
log_rtab!("Outbound relay node selected: {}", nr);
|
||||||
editor.set_relay_node(nr);
|
editor.set_relay_node(nr);
|
||||||
got_outbound_relay = true;
|
got_outbound_relay = true;
|
||||||
}
|
}
|
||||||
|
Err(e) => {
|
||||||
|
log_rtab!(error "failed to register node with peer info: {}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !got_outbound_relay {
|
if !got_outbound_relay {
|
||||||
|
@ -1,9 +1,9 @@
|
|||||||
use crate::*;
|
use crate::*;
|
||||||
|
use routing_table::*;
|
||||||
const SERIALIZED_PEERINFO: &str = r###"{"node_ids":["FAKE:eFOfgm_FNZBsTRi7KAESNwYFAUGgX2uDrTRWAL8ucjM"],"signed_node_info":{"Direct":{"node_info":{"network_class":"InboundCapable","outbound_protocols":1,"address_types":3,"envelope_support":[0],"crypto_support":[[86,76,68,48]],"dial_info_detail_list":[{"class":"Direct","dial_info":{"kind":"UDP","socket_address":{"address":{"IPV4":"1.2.3.4"},"port":5150}}},{"class":"Direct","dial_info":{"kind":"UDP","socket_address":{"address":{"IPV6":"bad:cafe::1"},"port":5150}}},{"class":"Direct","dial_info":{"kind":"TCP","socket_address":{"address":{"IPV4":"5.6.7.8"},"port":5150}}},{"class":"Direct","dial_info":{"kind":"TCP","socket_address":{"address":{"IPV6":"bad:cafe::1"},"port":5150}}},{"class":"Direct","dial_info":{"kind":"WS","socket_address":{"address":{"IPV4":"9.10.11.12"},"port":5150},"request":"bootstrap-1.dev.veilid.net:5150/ws"}},{"class":"Direct","dial_info":{"kind":"WS","socket_address":{"address":{"IPV6":"bad:cafe::1"},"port":5150},"request":"bootstrap-1.dev.veilid.net:5150/ws"}}]},"timestamp":1685058646770389,"signatures":[]}}}"###;
|
|
||||||
|
|
||||||
fn fake_routing_table() -> routing_table::RoutingTable {
|
fn fake_routing_table() -> routing_table::RoutingTable {
|
||||||
let veilid_config = VeilidConfig::new();
|
let veilid_config = VeilidConfig::new();
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
let block_store = BlockStore::new(veilid_config.clone());
|
let block_store = BlockStore::new(veilid_config.clone());
|
||||||
let protected_store = ProtectedStore::new(veilid_config.clone());
|
let protected_store = ProtectedStore::new(veilid_config.clone());
|
||||||
let table_store = TableStore::new(veilid_config.clone(), protected_store.clone());
|
let table_store = TableStore::new(veilid_config.clone(), protected_store.clone());
|
||||||
@ -11,8 +11,8 @@ fn fake_routing_table() -> routing_table::RoutingTable {
|
|||||||
let storage_manager = storage_manager::StorageManager::new(
|
let storage_manager = storage_manager::StorageManager::new(
|
||||||
veilid_config.clone(),
|
veilid_config.clone(),
|
||||||
crypto.clone(),
|
crypto.clone(),
|
||||||
protected_store.clone(),
|
|
||||||
table_store.clone(),
|
table_store.clone(),
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
block_store.clone(),
|
block_store.clone(),
|
||||||
);
|
);
|
||||||
let network_manager = network_manager::NetworkManager::new(
|
let network_manager = network_manager::NetworkManager::new(
|
||||||
@ -20,10 +20,11 @@ fn fake_routing_table() -> routing_table::RoutingTable {
|
|||||||
storage_manager,
|
storage_manager,
|
||||||
protected_store.clone(),
|
protected_store.clone(),
|
||||||
table_store.clone(),
|
table_store.clone(),
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
block_store.clone(),
|
block_store.clone(),
|
||||||
crypto.clone(),
|
crypto.clone(),
|
||||||
);
|
);
|
||||||
routing_table::RoutingTable::new(network_manager)
|
RoutingTable::new(network_manager)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn test_routingtable_buckets_round_trip() {
|
pub async fn test_routingtable_buckets_round_trip() {
|
||||||
@ -82,11 +83,35 @@ pub async fn test_routingtable_buckets_round_trip() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn test_round_trip_peerinfo() {
|
pub async fn test_round_trip_peerinfo() {
|
||||||
let pi: routing_table::PeerInfo = deserialize_json(SERIALIZED_PEERINFO).unwrap();
|
let mut tks = TypedKeySet::new();
|
||||||
|
tks.add(TypedKey::new(
|
||||||
|
CRYPTO_KIND_VLD0,
|
||||||
|
CryptoKey::new([
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
0, 0, 0,
|
||||||
|
]),
|
||||||
|
));
|
||||||
|
let pi: PeerInfo = PeerInfo::new(
|
||||||
|
tks,
|
||||||
|
SignedNodeInfo::Direct(SignedDirectNodeInfo::new(
|
||||||
|
NodeInfo::new(
|
||||||
|
NetworkClass::OutboundOnly,
|
||||||
|
ProtocolTypeSet::new(),
|
||||||
|
AddressTypeSet::new(),
|
||||||
|
vec![0],
|
||||||
|
vec![CRYPTO_KIND_VLD0],
|
||||||
|
vec![],
|
||||||
|
),
|
||||||
|
Timestamp::new(0),
|
||||||
|
Vec::new(),
|
||||||
|
)),
|
||||||
|
);
|
||||||
|
let s = serialize_json(&pi);
|
||||||
|
let pi2 = deserialize_json(&s).expect("Should deserialize");
|
||||||
|
let s2 = serialize_json(&pi2);
|
||||||
|
|
||||||
let back = serialize_json(pi);
|
assert_eq!(pi, pi2);
|
||||||
|
assert_eq!(s, s2);
|
||||||
assert_eq!(SERIALIZED_PEERINFO, back);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn test_all() {
|
pub async fn test_all() {
|
||||||
|
@ -1,7 +1,16 @@
|
|||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
#[derive(
|
#[derive(
|
||||||
Clone, Default, Debug, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize,
|
Clone,
|
||||||
|
Default,
|
||||||
|
PartialEq,
|
||||||
|
Eq,
|
||||||
|
Debug,
|
||||||
|
Serialize,
|
||||||
|
Deserialize,
|
||||||
|
RkyvArchive,
|
||||||
|
RkyvSerialize,
|
||||||
|
RkyvDeserialize,
|
||||||
)]
|
)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct NodeInfo {
|
pub struct NodeInfo {
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
|
#[derive(
|
||||||
|
Clone, Debug, Serialize, Deserialize, PartialEq, Eq, RkyvArchive, RkyvSerialize, RkyvDeserialize,
|
||||||
|
)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct PeerInfo {
|
pub struct PeerInfo {
|
||||||
node_ids: TypedKeySet,
|
node_ids: TypedKeySet,
|
||||||
|
@ -1,7 +1,9 @@
|
|||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
/// Signed NodeInfo that can be passed around amongst peers and verifiable
|
/// Signed NodeInfo that can be passed around amongst peers and verifiable
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
|
#[derive(
|
||||||
|
Clone, Debug, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize,
|
||||||
|
)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct SignedDirectNodeInfo {
|
pub struct SignedDirectNodeInfo {
|
||||||
node_info: NodeInfo,
|
node_info: NodeInfo,
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
|
#[derive(
|
||||||
|
Clone, Debug, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize,
|
||||||
|
)]
|
||||||
#[archive_attr(repr(u8), derive(CheckBytes))]
|
#[archive_attr(repr(u8), derive(CheckBytes))]
|
||||||
pub enum SignedNodeInfo {
|
pub enum SignedNodeInfo {
|
||||||
Direct(SignedDirectNodeInfo),
|
Direct(SignedDirectNodeInfo),
|
||||||
|
@ -1,7 +1,9 @@
|
|||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
/// Signed NodeInfo with a relay that can be passed around amongst peers and verifiable
|
/// Signed NodeInfo with a relay that can be passed around amongst peers and verifiable
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
|
#[derive(
|
||||||
|
Clone, Debug, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize,
|
||||||
|
)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct SignedRelayedNodeInfo {
|
pub struct SignedRelayedNodeInfo {
|
||||||
node_info: NodeInfo,
|
node_info: NodeInfo,
|
||||||
|
@ -22,6 +22,7 @@ mod signed_relayed_node_info;
|
|||||||
mod signed_value_data;
|
mod signed_value_data;
|
||||||
mod signed_value_descriptor;
|
mod signed_value_descriptor;
|
||||||
mod socket_address;
|
mod socket_address;
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
mod tunnel;
|
mod tunnel;
|
||||||
mod typed_key;
|
mod typed_key;
|
||||||
mod typed_signature;
|
mod typed_signature;
|
||||||
@ -50,6 +51,7 @@ pub use signed_relayed_node_info::*;
|
|||||||
pub use signed_value_data::*;
|
pub use signed_value_data::*;
|
||||||
pub use signed_value_descriptor::*;
|
pub use signed_value_descriptor::*;
|
||||||
pub use socket_address::*;
|
pub use socket_address::*;
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
pub use tunnel::*;
|
pub use tunnel::*;
|
||||||
pub use typed_key::*;
|
pub use typed_key::*;
|
||||||
pub use typed_signature::*;
|
pub use typed_signature::*;
|
||||||
|
@ -37,10 +37,15 @@ pub enum RPCAnswerDetail {
|
|||||||
GetValueA(RPCOperationGetValueA),
|
GetValueA(RPCOperationGetValueA),
|
||||||
SetValueA(RPCOperationSetValueA),
|
SetValueA(RPCOperationSetValueA),
|
||||||
WatchValueA(RPCOperationWatchValueA),
|
WatchValueA(RPCOperationWatchValueA),
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
SupplyBlockA(RPCOperationSupplyBlockA),
|
SupplyBlockA(RPCOperationSupplyBlockA),
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
FindBlockA(RPCOperationFindBlockA),
|
FindBlockA(RPCOperationFindBlockA),
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
StartTunnelA(RPCOperationStartTunnelA),
|
StartTunnelA(RPCOperationStartTunnelA),
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
CompleteTunnelA(RPCOperationCompleteTunnelA),
|
CompleteTunnelA(RPCOperationCompleteTunnelA),
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
CancelTunnelA(RPCOperationCancelTunnelA),
|
CancelTunnelA(RPCOperationCancelTunnelA),
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -53,10 +58,15 @@ impl RPCAnswerDetail {
|
|||||||
RPCAnswerDetail::GetValueA(_) => "GetValueA",
|
RPCAnswerDetail::GetValueA(_) => "GetValueA",
|
||||||
RPCAnswerDetail::SetValueA(_) => "SetValueA",
|
RPCAnswerDetail::SetValueA(_) => "SetValueA",
|
||||||
RPCAnswerDetail::WatchValueA(_) => "WatchValueA",
|
RPCAnswerDetail::WatchValueA(_) => "WatchValueA",
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
RPCAnswerDetail::SupplyBlockA(_) => "SupplyBlockA",
|
RPCAnswerDetail::SupplyBlockA(_) => "SupplyBlockA",
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
RPCAnswerDetail::FindBlockA(_) => "FindBlockA",
|
RPCAnswerDetail::FindBlockA(_) => "FindBlockA",
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
RPCAnswerDetail::StartTunnelA(_) => "StartTunnelA",
|
RPCAnswerDetail::StartTunnelA(_) => "StartTunnelA",
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
RPCAnswerDetail::CompleteTunnelA(_) => "CompleteTunnelA",
|
RPCAnswerDetail::CompleteTunnelA(_) => "CompleteTunnelA",
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
RPCAnswerDetail::CancelTunnelA(_) => "CancelTunnelA",
|
RPCAnswerDetail::CancelTunnelA(_) => "CancelTunnelA",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -68,10 +78,15 @@ impl RPCAnswerDetail {
|
|||||||
RPCAnswerDetail::GetValueA(r) => r.validate(validate_context),
|
RPCAnswerDetail::GetValueA(r) => r.validate(validate_context),
|
||||||
RPCAnswerDetail::SetValueA(r) => r.validate(validate_context),
|
RPCAnswerDetail::SetValueA(r) => r.validate(validate_context),
|
||||||
RPCAnswerDetail::WatchValueA(r) => r.validate(validate_context),
|
RPCAnswerDetail::WatchValueA(r) => r.validate(validate_context),
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
RPCAnswerDetail::SupplyBlockA(r) => r.validate(validate_context),
|
RPCAnswerDetail::SupplyBlockA(r) => r.validate(validate_context),
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
RPCAnswerDetail::FindBlockA(r) => r.validate(validate_context),
|
RPCAnswerDetail::FindBlockA(r) => r.validate(validate_context),
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
RPCAnswerDetail::StartTunnelA(r) => r.validate(validate_context),
|
RPCAnswerDetail::StartTunnelA(r) => r.validate(validate_context),
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
RPCAnswerDetail::CompleteTunnelA(r) => r.validate(validate_context),
|
RPCAnswerDetail::CompleteTunnelA(r) => r.validate(validate_context),
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
RPCAnswerDetail::CancelTunnelA(r) => r.validate(validate_context),
|
RPCAnswerDetail::CancelTunnelA(r) => r.validate(validate_context),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -110,26 +125,31 @@ impl RPCAnswerDetail {
|
|||||||
let out = RPCOperationWatchValueA::decode(&op_reader)?;
|
let out = RPCOperationWatchValueA::decode(&op_reader)?;
|
||||||
RPCAnswerDetail::WatchValueA(out)
|
RPCAnswerDetail::WatchValueA(out)
|
||||||
}
|
}
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
veilid_capnp::answer::detail::SupplyBlockA(r) => {
|
veilid_capnp::answer::detail::SupplyBlockA(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationSupplyBlockA::decode(&op_reader)?;
|
let out = RPCOperationSupplyBlockA::decode(&op_reader)?;
|
||||||
RPCAnswerDetail::SupplyBlockA(out)
|
RPCAnswerDetail::SupplyBlockA(out)
|
||||||
}
|
}
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
veilid_capnp::answer::detail::FindBlockA(r) => {
|
veilid_capnp::answer::detail::FindBlockA(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationFindBlockA::decode(&op_reader)?;
|
let out = RPCOperationFindBlockA::decode(&op_reader)?;
|
||||||
RPCAnswerDetail::FindBlockA(out)
|
RPCAnswerDetail::FindBlockA(out)
|
||||||
}
|
}
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
veilid_capnp::answer::detail::StartTunnelA(r) => {
|
veilid_capnp::answer::detail::StartTunnelA(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationStartTunnelA::decode(&op_reader)?;
|
let out = RPCOperationStartTunnelA::decode(&op_reader)?;
|
||||||
RPCAnswerDetail::StartTunnelA(out)
|
RPCAnswerDetail::StartTunnelA(out)
|
||||||
}
|
}
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
veilid_capnp::answer::detail::CompleteTunnelA(r) => {
|
veilid_capnp::answer::detail::CompleteTunnelA(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationCompleteTunnelA::decode(&op_reader)?;
|
let out = RPCOperationCompleteTunnelA::decode(&op_reader)?;
|
||||||
RPCAnswerDetail::CompleteTunnelA(out)
|
RPCAnswerDetail::CompleteTunnelA(out)
|
||||||
}
|
}
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
veilid_capnp::answer::detail::CancelTunnelA(r) => {
|
veilid_capnp::answer::detail::CancelTunnelA(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationCancelTunnelA::decode(&op_reader)?;
|
let out = RPCOperationCancelTunnelA::decode(&op_reader)?;
|
||||||
@ -151,16 +171,21 @@ impl RPCAnswerDetail {
|
|||||||
RPCAnswerDetail::WatchValueA(d) => {
|
RPCAnswerDetail::WatchValueA(d) => {
|
||||||
d.encode(&mut builder.reborrow().init_watch_value_a())
|
d.encode(&mut builder.reborrow().init_watch_value_a())
|
||||||
}
|
}
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
RPCAnswerDetail::SupplyBlockA(d) => {
|
RPCAnswerDetail::SupplyBlockA(d) => {
|
||||||
d.encode(&mut builder.reborrow().init_supply_block_a())
|
d.encode(&mut builder.reborrow().init_supply_block_a())
|
||||||
}
|
}
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
RPCAnswerDetail::FindBlockA(d) => d.encode(&mut builder.reborrow().init_find_block_a()),
|
RPCAnswerDetail::FindBlockA(d) => d.encode(&mut builder.reborrow().init_find_block_a()),
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
RPCAnswerDetail::StartTunnelA(d) => {
|
RPCAnswerDetail::StartTunnelA(d) => {
|
||||||
d.encode(&mut builder.reborrow().init_start_tunnel_a())
|
d.encode(&mut builder.reborrow().init_start_tunnel_a())
|
||||||
}
|
}
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
RPCAnswerDetail::CompleteTunnelA(d) => {
|
RPCAnswerDetail::CompleteTunnelA(d) => {
|
||||||
d.encode(&mut builder.reborrow().init_complete_tunnel_a())
|
d.encode(&mut builder.reborrow().init_complete_tunnel_a())
|
||||||
}
|
}
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
RPCAnswerDetail::CancelTunnelA(d) => {
|
RPCAnswerDetail::CancelTunnelA(d) => {
|
||||||
d.encode(&mut builder.reborrow().init_cancel_tunnel_a())
|
d.encode(&mut builder.reborrow().init_cancel_tunnel_a())
|
||||||
}
|
}
|
||||||
|
@ -2,18 +2,14 @@ mod answer;
|
|||||||
mod operation;
|
mod operation;
|
||||||
mod operation_app_call;
|
mod operation_app_call;
|
||||||
mod operation_app_message;
|
mod operation_app_message;
|
||||||
mod operation_cancel_tunnel;
|
|
||||||
mod operation_complete_tunnel;
|
|
||||||
mod operation_find_block;
|
|
||||||
mod operation_find_node;
|
mod operation_find_node;
|
||||||
mod operation_get_value;
|
mod operation_get_value;
|
||||||
mod operation_return_receipt;
|
mod operation_return_receipt;
|
||||||
mod operation_route;
|
mod operation_route;
|
||||||
mod operation_set_value;
|
mod operation_set_value;
|
||||||
mod operation_signal;
|
mod operation_signal;
|
||||||
mod operation_start_tunnel;
|
|
||||||
mod operation_status;
|
mod operation_status;
|
||||||
mod operation_supply_block;
|
|
||||||
mod operation_validate_dial_info;
|
mod operation_validate_dial_info;
|
||||||
mod operation_value_changed;
|
mod operation_value_changed;
|
||||||
mod operation_watch_value;
|
mod operation_watch_value;
|
||||||
@ -21,22 +17,29 @@ mod question;
|
|||||||
mod respond_to;
|
mod respond_to;
|
||||||
mod statement;
|
mod statement;
|
||||||
|
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
|
mod operation_find_block;
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
|
mod operation_supply_block;
|
||||||
|
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
|
mod operation_cancel_tunnel;
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
|
mod operation_complete_tunnel;
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
|
mod operation_start_tunnel;
|
||||||
|
|
||||||
pub use answer::*;
|
pub use answer::*;
|
||||||
pub use operation::*;
|
pub use operation::*;
|
||||||
pub use operation_app_call::*;
|
pub use operation_app_call::*;
|
||||||
pub use operation_app_message::*;
|
pub use operation_app_message::*;
|
||||||
pub use operation_cancel_tunnel::*;
|
|
||||||
pub use operation_complete_tunnel::*;
|
|
||||||
pub use operation_find_block::*;
|
|
||||||
pub use operation_find_node::*;
|
pub use operation_find_node::*;
|
||||||
pub use operation_get_value::*;
|
pub use operation_get_value::*;
|
||||||
pub use operation_return_receipt::*;
|
pub use operation_return_receipt::*;
|
||||||
pub use operation_route::*;
|
pub use operation_route::*;
|
||||||
pub use operation_set_value::*;
|
pub use operation_set_value::*;
|
||||||
pub use operation_signal::*;
|
pub use operation_signal::*;
|
||||||
pub use operation_start_tunnel::*;
|
|
||||||
pub use operation_status::*;
|
pub use operation_status::*;
|
||||||
pub use operation_supply_block::*;
|
|
||||||
pub use operation_validate_dial_info::*;
|
pub use operation_validate_dial_info::*;
|
||||||
pub use operation_value_changed::*;
|
pub use operation_value_changed::*;
|
||||||
pub use operation_watch_value::*;
|
pub use operation_watch_value::*;
|
||||||
@ -44,4 +47,16 @@ pub use question::*;
|
|||||||
pub use respond_to::*;
|
pub use respond_to::*;
|
||||||
pub use statement::*;
|
pub use statement::*;
|
||||||
|
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
|
pub use operation_find_block::*;
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
|
pub use operation_supply_block::*;
|
||||||
|
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
|
pub use operation_cancel_tunnel::*;
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
|
pub use operation_complete_tunnel::*;
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
|
pub use operation_start_tunnel::*;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct RPCOperationCancelTunnelQ {
|
pub struct RPCOperationCancelTunnelQ {
|
||||||
id: TunnelId,
|
id: TunnelId,
|
||||||
@ -37,6 +38,7 @@ impl RPCOperationCancelTunnelQ {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub enum RPCOperationCancelTunnelA {
|
pub enum RPCOperationCancelTunnelA {
|
||||||
Tunnel(TunnelId),
|
Tunnel(TunnelId),
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct RPCOperationCompleteTunnelQ {
|
pub struct RPCOperationCompleteTunnelQ {
|
||||||
id: TunnelId,
|
id: TunnelId,
|
||||||
@ -74,6 +75,7 @@ impl RPCOperationCompleteTunnelQ {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub enum RPCOperationCompleteTunnelA {
|
pub enum RPCOperationCompleteTunnelA {
|
||||||
Tunnel(FullTunnel),
|
Tunnel(FullTunnel),
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct RPCOperationStartTunnelQ {
|
pub struct RPCOperationStartTunnelQ {
|
||||||
id: TunnelId,
|
id: TunnelId,
|
||||||
@ -64,6 +65,7 @@ impl RPCOperationStartTunnelQ {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub enum RPCOperationStartTunnelA {
|
pub enum RPCOperationStartTunnelA {
|
||||||
Partial(PartialTunnel),
|
Partial(PartialTunnel),
|
||||||
|
@ -49,10 +49,15 @@ pub enum RPCQuestionDetail {
|
|||||||
GetValueQ(RPCOperationGetValueQ),
|
GetValueQ(RPCOperationGetValueQ),
|
||||||
SetValueQ(RPCOperationSetValueQ),
|
SetValueQ(RPCOperationSetValueQ),
|
||||||
WatchValueQ(RPCOperationWatchValueQ),
|
WatchValueQ(RPCOperationWatchValueQ),
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
SupplyBlockQ(RPCOperationSupplyBlockQ),
|
SupplyBlockQ(RPCOperationSupplyBlockQ),
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
FindBlockQ(RPCOperationFindBlockQ),
|
FindBlockQ(RPCOperationFindBlockQ),
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
StartTunnelQ(RPCOperationStartTunnelQ),
|
StartTunnelQ(RPCOperationStartTunnelQ),
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
CompleteTunnelQ(RPCOperationCompleteTunnelQ),
|
CompleteTunnelQ(RPCOperationCompleteTunnelQ),
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
CancelTunnelQ(RPCOperationCancelTunnelQ),
|
CancelTunnelQ(RPCOperationCancelTunnelQ),
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -65,10 +70,15 @@ impl RPCQuestionDetail {
|
|||||||
RPCQuestionDetail::GetValueQ(_) => "GetValueQ",
|
RPCQuestionDetail::GetValueQ(_) => "GetValueQ",
|
||||||
RPCQuestionDetail::SetValueQ(_) => "SetValueQ",
|
RPCQuestionDetail::SetValueQ(_) => "SetValueQ",
|
||||||
RPCQuestionDetail::WatchValueQ(_) => "WatchValueQ",
|
RPCQuestionDetail::WatchValueQ(_) => "WatchValueQ",
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
RPCQuestionDetail::SupplyBlockQ(_) => "SupplyBlockQ",
|
RPCQuestionDetail::SupplyBlockQ(_) => "SupplyBlockQ",
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
RPCQuestionDetail::FindBlockQ(_) => "FindBlockQ",
|
RPCQuestionDetail::FindBlockQ(_) => "FindBlockQ",
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
RPCQuestionDetail::StartTunnelQ(_) => "StartTunnelQ",
|
RPCQuestionDetail::StartTunnelQ(_) => "StartTunnelQ",
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
RPCQuestionDetail::CompleteTunnelQ(_) => "CompleteTunnelQ",
|
RPCQuestionDetail::CompleteTunnelQ(_) => "CompleteTunnelQ",
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
RPCQuestionDetail::CancelTunnelQ(_) => "CancelTunnelQ",
|
RPCQuestionDetail::CancelTunnelQ(_) => "CancelTunnelQ",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -80,10 +90,15 @@ impl RPCQuestionDetail {
|
|||||||
RPCQuestionDetail::GetValueQ(r) => r.validate(validate_context),
|
RPCQuestionDetail::GetValueQ(r) => r.validate(validate_context),
|
||||||
RPCQuestionDetail::SetValueQ(r) => r.validate(validate_context),
|
RPCQuestionDetail::SetValueQ(r) => r.validate(validate_context),
|
||||||
RPCQuestionDetail::WatchValueQ(r) => r.validate(validate_context),
|
RPCQuestionDetail::WatchValueQ(r) => r.validate(validate_context),
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
RPCQuestionDetail::SupplyBlockQ(r) => r.validate(validate_context),
|
RPCQuestionDetail::SupplyBlockQ(r) => r.validate(validate_context),
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
RPCQuestionDetail::FindBlockQ(r) => r.validate(validate_context),
|
RPCQuestionDetail::FindBlockQ(r) => r.validate(validate_context),
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
RPCQuestionDetail::StartTunnelQ(r) => r.validate(validate_context),
|
RPCQuestionDetail::StartTunnelQ(r) => r.validate(validate_context),
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
RPCQuestionDetail::CompleteTunnelQ(r) => r.validate(validate_context),
|
RPCQuestionDetail::CompleteTunnelQ(r) => r.validate(validate_context),
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
RPCQuestionDetail::CancelTunnelQ(r) => r.validate(validate_context),
|
RPCQuestionDetail::CancelTunnelQ(r) => r.validate(validate_context),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -123,26 +138,31 @@ impl RPCQuestionDetail {
|
|||||||
let out = RPCOperationWatchValueQ::decode(&op_reader)?;
|
let out = RPCOperationWatchValueQ::decode(&op_reader)?;
|
||||||
RPCQuestionDetail::WatchValueQ(out)
|
RPCQuestionDetail::WatchValueQ(out)
|
||||||
}
|
}
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
veilid_capnp::question::detail::SupplyBlockQ(r) => {
|
veilid_capnp::question::detail::SupplyBlockQ(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationSupplyBlockQ::decode(&op_reader)?;
|
let out = RPCOperationSupplyBlockQ::decode(&op_reader)?;
|
||||||
RPCQuestionDetail::SupplyBlockQ(out)
|
RPCQuestionDetail::SupplyBlockQ(out)
|
||||||
}
|
}
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
veilid_capnp::question::detail::FindBlockQ(r) => {
|
veilid_capnp::question::detail::FindBlockQ(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationFindBlockQ::decode(&op_reader)?;
|
let out = RPCOperationFindBlockQ::decode(&op_reader)?;
|
||||||
RPCQuestionDetail::FindBlockQ(out)
|
RPCQuestionDetail::FindBlockQ(out)
|
||||||
}
|
}
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
veilid_capnp::question::detail::StartTunnelQ(r) => {
|
veilid_capnp::question::detail::StartTunnelQ(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationStartTunnelQ::decode(&op_reader)?;
|
let out = RPCOperationStartTunnelQ::decode(&op_reader)?;
|
||||||
RPCQuestionDetail::StartTunnelQ(out)
|
RPCQuestionDetail::StartTunnelQ(out)
|
||||||
}
|
}
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
veilid_capnp::question::detail::CompleteTunnelQ(r) => {
|
veilid_capnp::question::detail::CompleteTunnelQ(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationCompleteTunnelQ::decode(&op_reader)?;
|
let out = RPCOperationCompleteTunnelQ::decode(&op_reader)?;
|
||||||
RPCQuestionDetail::CompleteTunnelQ(out)
|
RPCQuestionDetail::CompleteTunnelQ(out)
|
||||||
}
|
}
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
veilid_capnp::question::detail::CancelTunnelQ(r) => {
|
veilid_capnp::question::detail::CancelTunnelQ(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationCancelTunnelQ::decode(&op_reader)?;
|
let out = RPCOperationCancelTunnelQ::decode(&op_reader)?;
|
||||||
@ -164,18 +184,23 @@ impl RPCQuestionDetail {
|
|||||||
RPCQuestionDetail::WatchValueQ(d) => {
|
RPCQuestionDetail::WatchValueQ(d) => {
|
||||||
d.encode(&mut builder.reborrow().init_watch_value_q())
|
d.encode(&mut builder.reborrow().init_watch_value_q())
|
||||||
}
|
}
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
RPCQuestionDetail::SupplyBlockQ(d) => {
|
RPCQuestionDetail::SupplyBlockQ(d) => {
|
||||||
d.encode(&mut builder.reborrow().init_supply_block_q())
|
d.encode(&mut builder.reborrow().init_supply_block_q())
|
||||||
}
|
}
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
RPCQuestionDetail::FindBlockQ(d) => {
|
RPCQuestionDetail::FindBlockQ(d) => {
|
||||||
d.encode(&mut builder.reborrow().init_find_block_q())
|
d.encode(&mut builder.reborrow().init_find_block_q())
|
||||||
}
|
}
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
RPCQuestionDetail::StartTunnelQ(d) => {
|
RPCQuestionDetail::StartTunnelQ(d) => {
|
||||||
d.encode(&mut builder.reborrow().init_start_tunnel_q())
|
d.encode(&mut builder.reborrow().init_start_tunnel_q())
|
||||||
}
|
}
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
RPCQuestionDetail::CompleteTunnelQ(d) => {
|
RPCQuestionDetail::CompleteTunnelQ(d) => {
|
||||||
d.encode(&mut builder.reborrow().init_complete_tunnel_q())
|
d.encode(&mut builder.reborrow().init_complete_tunnel_q())
|
||||||
}
|
}
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
RPCQuestionDetail::CancelTunnelQ(d) => {
|
RPCQuestionDetail::CancelTunnelQ(d) => {
|
||||||
d.encode(&mut builder.reborrow().init_cancel_tunnel_q())
|
d.encode(&mut builder.reborrow().init_cancel_tunnel_q())
|
||||||
}
|
}
|
||||||
|
@ -312,7 +312,12 @@ impl RPCProcessor {
|
|||||||
NetworkResult::value(Destination::direct(peer_noderef))
|
NetworkResult::value(Destination::direct(peer_noderef))
|
||||||
} else {
|
} else {
|
||||||
// Look up the sender node, we should have added it via senderNodeInfo before getting here.
|
// Look up the sender node, we should have added it via senderNodeInfo before getting here.
|
||||||
if let Some(sender_noderef) = self.routing_table.lookup_node_ref(sender_node_id) {
|
let res = match self.routing_table.lookup_node_ref(sender_node_id) {
|
||||||
|
Ok(v) => v,
|
||||||
|
Err(e) => return NetworkResult::invalid_message(
|
||||||
|
format!("failed to look up node info for respond to: {}", e)
|
||||||
|
)};
|
||||||
|
if let Some(sender_noderef) = res {
|
||||||
NetworkResult::value(Destination::relay(peer_noderef, sender_noderef))
|
NetworkResult::value(Destination::relay(peer_noderef, sender_noderef))
|
||||||
} else {
|
} else {
|
||||||
return NetworkResult::invalid_message(
|
return NetworkResult::invalid_message(
|
||||||
|
@ -4,23 +4,30 @@ mod fanout_call;
|
|||||||
mod operation_waiter;
|
mod operation_waiter;
|
||||||
mod rpc_app_call;
|
mod rpc_app_call;
|
||||||
mod rpc_app_message;
|
mod rpc_app_message;
|
||||||
mod rpc_cancel_tunnel;
|
|
||||||
mod rpc_complete_tunnel;
|
|
||||||
mod rpc_error;
|
mod rpc_error;
|
||||||
mod rpc_find_block;
|
|
||||||
mod rpc_find_node;
|
mod rpc_find_node;
|
||||||
mod rpc_get_value;
|
mod rpc_get_value;
|
||||||
mod rpc_return_receipt;
|
mod rpc_return_receipt;
|
||||||
mod rpc_route;
|
mod rpc_route;
|
||||||
mod rpc_set_value;
|
mod rpc_set_value;
|
||||||
mod rpc_signal;
|
mod rpc_signal;
|
||||||
mod rpc_start_tunnel;
|
|
||||||
mod rpc_status;
|
mod rpc_status;
|
||||||
mod rpc_supply_block;
|
|
||||||
mod rpc_validate_dial_info;
|
mod rpc_validate_dial_info;
|
||||||
mod rpc_value_changed;
|
mod rpc_value_changed;
|
||||||
mod rpc_watch_value;
|
mod rpc_watch_value;
|
||||||
|
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
|
mod rpc_find_block;
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
|
mod rpc_supply_block;
|
||||||
|
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
|
mod rpc_cancel_tunnel;
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
|
mod rpc_complete_tunnel;
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
|
mod rpc_start_tunnel;
|
||||||
|
|
||||||
pub use coders::*;
|
pub use coders::*;
|
||||||
pub use destination::*;
|
pub use destination::*;
|
||||||
pub use fanout_call::*;
|
pub use fanout_call::*;
|
||||||
@ -473,7 +480,10 @@ impl RPCProcessor {
|
|||||||
let routing_table = this.routing_table();
|
let routing_table = this.routing_table();
|
||||||
|
|
||||||
// First see if we have the node in our routing table already
|
// First see if we have the node in our routing table already
|
||||||
if let Some(nr) = routing_table.lookup_node_ref(node_id) {
|
if let Some(nr) = routing_table
|
||||||
|
.lookup_node_ref(node_id)
|
||||||
|
.map_err(RPCError::internal)?
|
||||||
|
{
|
||||||
// ensure we have some dial info for the entry already,
|
// ensure we have some dial info for the entry already,
|
||||||
// if not, we should do the find_node anyway
|
// if not, we should do the find_node anyway
|
||||||
if nr.has_any_dial_info() {
|
if nr.has_any_dial_info() {
|
||||||
@ -837,6 +847,10 @@ impl RPCProcessor {
|
|||||||
// Record for node if this was not sent via a route
|
// Record for node if this was not sent via a route
|
||||||
if safety_route.is_none() && remote_private_route.is_none() {
|
if safety_route.is_none() && remote_private_route.is_none() {
|
||||||
node_ref.stats_failed_to_send(send_ts, wants_answer);
|
node_ref.stats_failed_to_send(send_ts, wants_answer);
|
||||||
|
|
||||||
|
// Also clear the last_connections for the entry so we make a new connection next time
|
||||||
|
node_ref.clear_last_connections();
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -865,6 +879,10 @@ impl RPCProcessor {
|
|||||||
// Record for node if this was not sent via a route
|
// Record for node if this was not sent via a route
|
||||||
if safety_route.is_none() && remote_private_route.is_none() {
|
if safety_route.is_none() && remote_private_route.is_none() {
|
||||||
node_ref.stats_question_lost();
|
node_ref.stats_question_lost();
|
||||||
|
|
||||||
|
// Also clear the last_connections for the entry so we make a new connection next time
|
||||||
|
node_ref.clear_last_connections();
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
// Get route spec store
|
// Get route spec store
|
||||||
@ -1331,20 +1349,26 @@ impl RPCProcessor {
|
|||||||
|
|
||||||
// Sender PeerInfo was specified, update our routing table with it
|
// Sender PeerInfo was specified, update our routing table with it
|
||||||
if !self.filter_node_info(routing_domain, sender_peer_info.signed_node_info()) {
|
if !self.filter_node_info(routing_domain, sender_peer_info.signed_node_info()) {
|
||||||
return Err(RPCError::invalid_format(
|
return Ok(NetworkResult::invalid_message(
|
||||||
"sender peerinfo has invalid peer scope",
|
"sender peerinfo has invalid peer scope",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
opt_sender_nr = self.routing_table().register_node_with_peer_info(
|
opt_sender_nr = match self.routing_table().register_node_with_peer_info(
|
||||||
routing_domain,
|
routing_domain,
|
||||||
sender_peer_info.clone(),
|
sender_peer_info.clone(),
|
||||||
false,
|
false,
|
||||||
);
|
) {
|
||||||
|
Ok(v) => Some(v),
|
||||||
|
Err(e) => return Ok(NetworkResult::invalid_message(e)),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// look up sender node, in case it's different than our peer due to relaying
|
// look up sender node, in case it's different than our peer due to relaying
|
||||||
if opt_sender_nr.is_none() {
|
if opt_sender_nr.is_none() {
|
||||||
opt_sender_nr = self.routing_table().lookup_node_ref(sender_node_id)
|
opt_sender_nr = match self.routing_table().lookup_node_ref(sender_node_id) {
|
||||||
|
Ok(v) => v,
|
||||||
|
Err(e) => return Ok(NetworkResult::invalid_message(e)),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the 'seen our node info' timestamp to determine if this node needs a
|
// Update the 'seen our node info' timestamp to determine if this node needs a
|
||||||
@ -1408,10 +1432,15 @@ impl RPCProcessor {
|
|||||||
RPCQuestionDetail::GetValueQ(_) => self.process_get_value_q(msg).await,
|
RPCQuestionDetail::GetValueQ(_) => self.process_get_value_q(msg).await,
|
||||||
RPCQuestionDetail::SetValueQ(_) => self.process_set_value_q(msg).await,
|
RPCQuestionDetail::SetValueQ(_) => self.process_set_value_q(msg).await,
|
||||||
RPCQuestionDetail::WatchValueQ(_) => self.process_watch_value_q(msg).await,
|
RPCQuestionDetail::WatchValueQ(_) => self.process_watch_value_q(msg).await,
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
RPCQuestionDetail::SupplyBlockQ(_) => self.process_supply_block_q(msg).await,
|
RPCQuestionDetail::SupplyBlockQ(_) => self.process_supply_block_q(msg).await,
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
RPCQuestionDetail::FindBlockQ(_) => self.process_find_block_q(msg).await,
|
RPCQuestionDetail::FindBlockQ(_) => self.process_find_block_q(msg).await,
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
RPCQuestionDetail::StartTunnelQ(_) => self.process_start_tunnel_q(msg).await,
|
RPCQuestionDetail::StartTunnelQ(_) => self.process_start_tunnel_q(msg).await,
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
RPCQuestionDetail::CompleteTunnelQ(_) => self.process_complete_tunnel_q(msg).await,
|
RPCQuestionDetail::CompleteTunnelQ(_) => self.process_complete_tunnel_q(msg).await,
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
RPCQuestionDetail::CancelTunnelQ(_) => self.process_cancel_tunnel_q(msg).await,
|
RPCQuestionDetail::CancelTunnelQ(_) => self.process_cancel_tunnel_q(msg).await,
|
||||||
},
|
},
|
||||||
RPCOperationKind::Statement(s) => match s.detail() {
|
RPCOperationKind::Statement(s) => match s.detail() {
|
||||||
|
@ -99,10 +99,14 @@ impl RPCProcessor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Exposed to API for apps to return app call answers
|
/// Exposed to API for apps to return app call answers
|
||||||
pub async fn app_call_reply(&self, id: OperationId, message: Vec<u8>) -> Result<(), RPCError> {
|
pub async fn app_call_reply(
|
||||||
|
&self,
|
||||||
|
call_id: OperationId,
|
||||||
|
message: Vec<u8>,
|
||||||
|
) -> Result<(), RPCError> {
|
||||||
self.unlocked_inner
|
self.unlocked_inner
|
||||||
.waiting_app_call_table
|
.waiting_app_call_table
|
||||||
.complete_op_waiter(id, message)
|
.complete_op_waiter(call_id, message)
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -30,9 +30,19 @@ impl RPCProcessor {
|
|||||||
let routing_domain = match target.best_routing_domain() {
|
let routing_domain = match target.best_routing_domain() {
|
||||||
Some(rd) => rd,
|
Some(rd) => rd,
|
||||||
None => {
|
None => {
|
||||||
|
// Because this exits before calling 'question()',
|
||||||
|
// a failure to find a routing domain constitutes a send failure
|
||||||
|
let send_ts = get_aligned_timestamp();
|
||||||
|
self.record_send_failure(
|
||||||
|
RPCKind::Question,
|
||||||
|
send_ts,
|
||||||
|
target.clone(),
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
);
|
||||||
return Ok(NetworkResult::no_connection_other(
|
return Ok(NetworkResult::no_connection_other(
|
||||||
"no routing domain for target",
|
"no routing domain for target",
|
||||||
))
|
));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
(Some(target.clone()), routing_domain)
|
(Some(target.clone()), routing_domain)
|
||||||
@ -45,9 +55,26 @@ impl RPCProcessor {
|
|||||||
let routing_domain = match relay.best_routing_domain() {
|
let routing_domain = match relay.best_routing_domain() {
|
||||||
Some(rd) => rd,
|
Some(rd) => rd,
|
||||||
None => {
|
None => {
|
||||||
|
// Because this exits before calling 'question()',
|
||||||
|
// a failure to find a routing domain constitutes a send failure for both the target and its relay
|
||||||
|
let send_ts = get_aligned_timestamp();
|
||||||
|
self.record_send_failure(
|
||||||
|
RPCKind::Question,
|
||||||
|
send_ts,
|
||||||
|
relay.clone(),
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
);
|
||||||
|
self.record_send_failure(
|
||||||
|
RPCKind::Question,
|
||||||
|
send_ts,
|
||||||
|
target.clone(),
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
);
|
||||||
return Ok(NetworkResult::no_connection_other(
|
return Ok(NetworkResult::no_connection_other(
|
||||||
"no routing domain for peer",
|
"no routing domain for peer",
|
||||||
))
|
));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
(Some(target.clone()), routing_domain)
|
(Some(target.clone()), routing_domain)
|
||||||
|
@ -28,8 +28,8 @@ const FLUSH_RECORD_STORES_INTERVAL_SECS: u32 = 1;
|
|||||||
struct StorageManagerUnlockedInner {
|
struct StorageManagerUnlockedInner {
|
||||||
config: VeilidConfig,
|
config: VeilidConfig,
|
||||||
crypto: Crypto,
|
crypto: Crypto,
|
||||||
protected_store: ProtectedStore,
|
|
||||||
table_store: TableStore,
|
table_store: TableStore,
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
block_store: BlockStore,
|
block_store: BlockStore,
|
||||||
|
|
||||||
// Background processes
|
// Background processes
|
||||||
@ -46,15 +46,14 @@ impl StorageManager {
|
|||||||
fn new_unlocked_inner(
|
fn new_unlocked_inner(
|
||||||
config: VeilidConfig,
|
config: VeilidConfig,
|
||||||
crypto: Crypto,
|
crypto: Crypto,
|
||||||
protected_store: ProtectedStore,
|
|
||||||
table_store: TableStore,
|
table_store: TableStore,
|
||||||
block_store: BlockStore,
|
#[cfg(feature = "unstable-blockstore")] block_store: BlockStore,
|
||||||
) -> StorageManagerUnlockedInner {
|
) -> StorageManagerUnlockedInner {
|
||||||
StorageManagerUnlockedInner {
|
StorageManagerUnlockedInner {
|
||||||
config,
|
config,
|
||||||
crypto,
|
crypto,
|
||||||
protected_store,
|
|
||||||
table_store,
|
table_store,
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
block_store,
|
block_store,
|
||||||
flush_record_stores_task: TickTask::new(FLUSH_RECORD_STORES_INTERVAL_SECS),
|
flush_record_stores_task: TickTask::new(FLUSH_RECORD_STORES_INTERVAL_SECS),
|
||||||
}
|
}
|
||||||
@ -66,15 +65,14 @@ impl StorageManager {
|
|||||||
pub fn new(
|
pub fn new(
|
||||||
config: VeilidConfig,
|
config: VeilidConfig,
|
||||||
crypto: Crypto,
|
crypto: Crypto,
|
||||||
protected_store: ProtectedStore,
|
|
||||||
table_store: TableStore,
|
table_store: TableStore,
|
||||||
block_store: BlockStore,
|
#[cfg(feature = "unstable-blockstore")] block_store: BlockStore,
|
||||||
) -> StorageManager {
|
) -> StorageManager {
|
||||||
let unlocked_inner = Arc::new(Self::new_unlocked_inner(
|
let unlocked_inner = Arc::new(Self::new_unlocked_inner(
|
||||||
config,
|
config,
|
||||||
crypto,
|
crypto,
|
||||||
protected_store,
|
|
||||||
table_store,
|
table_store,
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
block_store,
|
block_store,
|
||||||
));
|
));
|
||||||
let this = StorageManager {
|
let this = StorageManager {
|
||||||
|
@ -70,6 +70,7 @@ impl VeilidAPI {
|
|||||||
}
|
}
|
||||||
Err(VeilidAPIError::not_initialized())
|
Err(VeilidAPIError::not_initialized())
|
||||||
}
|
}
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
pub fn block_store(&self) -> VeilidAPIResult<BlockStore> {
|
pub fn block_store(&self) -> VeilidAPIResult<BlockStore> {
|
||||||
let inner = self.inner.lock();
|
let inner = self.inner.lock();
|
||||||
if let Some(context) = &inner.context {
|
if let Some(context) = &inner.context {
|
||||||
@ -257,10 +258,14 @@ impl VeilidAPI {
|
|||||||
// App Calls
|
// App Calls
|
||||||
|
|
||||||
#[instrument(level = "debug", skip(self))]
|
#[instrument(level = "debug", skip(self))]
|
||||||
pub async fn app_call_reply(&self, id: OperationId, message: Vec<u8>) -> VeilidAPIResult<()> {
|
pub async fn app_call_reply(
|
||||||
|
&self,
|
||||||
|
call_id: OperationId,
|
||||||
|
message: Vec<u8>,
|
||||||
|
) -> VeilidAPIResult<()> {
|
||||||
let rpc_processor = self.rpc_processor()?;
|
let rpc_processor = self.rpc_processor()?;
|
||||||
rpc_processor
|
rpc_processor
|
||||||
.app_call_reply(id, message)
|
.app_call_reply(call_id, message)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| e.into())
|
.map_err(|e| e.into())
|
||||||
}
|
}
|
||||||
@ -268,6 +273,7 @@ impl VeilidAPI {
|
|||||||
////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////
|
||||||
// Tunnel Building
|
// Tunnel Building
|
||||||
|
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
#[instrument(level = "debug", err, skip(self))]
|
#[instrument(level = "debug", err, skip(self))]
|
||||||
pub async fn start_tunnel(
|
pub async fn start_tunnel(
|
||||||
&self,
|
&self,
|
||||||
@ -277,6 +283,7 @@ impl VeilidAPI {
|
|||||||
panic!("unimplemented");
|
panic!("unimplemented");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
#[instrument(level = "debug", err, skip(self))]
|
#[instrument(level = "debug", err, skip(self))]
|
||||||
pub async fn complete_tunnel(
|
pub async fn complete_tunnel(
|
||||||
&self,
|
&self,
|
||||||
@ -287,6 +294,7 @@ impl VeilidAPI {
|
|||||||
panic!("unimplemented");
|
panic!("unimplemented");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
#[instrument(level = "debug", err, skip(self))]
|
#[instrument(level = "debug", err, skip(self))]
|
||||||
pub async fn cancel_tunnel(&self, _tunnel_id: TunnelId) -> VeilidAPIResult<bool> {
|
pub async fn cancel_tunnel(&self, _tunnel_id: TunnelId) -> VeilidAPIResult<bool> {
|
||||||
panic!("unimplemented");
|
panic!("unimplemented");
|
||||||
|
@ -31,17 +31,23 @@ fn get_string(text: &str) -> Option<String> {
|
|||||||
Some(text.to_owned())
|
Some(text.to_owned())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_route_id(rss: RouteSpecStore, allow_remote: bool) -> impl Fn(&str) -> Option<RouteId> {
|
fn get_route_id(
|
||||||
|
rss: RouteSpecStore,
|
||||||
|
allow_allocated: bool,
|
||||||
|
allow_remote: bool,
|
||||||
|
) -> impl Fn(&str) -> Option<RouteId> {
|
||||||
return move |text: &str| {
|
return move |text: &str| {
|
||||||
if text.is_empty() {
|
if text.is_empty() {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
match RouteId::from_str(text).ok() {
|
match RouteId::from_str(text).ok() {
|
||||||
Some(key) => {
|
Some(key) => {
|
||||||
|
if allow_allocated {
|
||||||
let routes = rss.list_allocated_routes(|k, _| Some(*k));
|
let routes = rss.list_allocated_routes(|k, _| Some(*k));
|
||||||
if routes.contains(&key) {
|
if routes.contains(&key) {
|
||||||
return Some(key);
|
return Some(key);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
if allow_remote {
|
if allow_remote {
|
||||||
let rroutes = rss.list_remote_routes(|k, _| Some(*k));
|
let rroutes = rss.list_remote_routes(|k, _| Some(*k));
|
||||||
if rroutes.contains(&key) {
|
if rroutes.contains(&key) {
|
||||||
@ -50,6 +56,7 @@ fn get_route_id(rss: RouteSpecStore, allow_remote: bool) -> impl Fn(&str) -> Opt
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
|
if allow_allocated {
|
||||||
let routes = rss.list_allocated_routes(|k, _| Some(*k));
|
let routes = rss.list_allocated_routes(|k, _| Some(*k));
|
||||||
for r in routes {
|
for r in routes {
|
||||||
let rkey = r.encode();
|
let rkey = r.encode();
|
||||||
@ -57,6 +64,7 @@ fn get_route_id(rss: RouteSpecStore, allow_remote: bool) -> impl Fn(&str) -> Opt
|
|||||||
return Some(r);
|
return Some(r);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
if allow_remote {
|
if allow_remote {
|
||||||
let routes = rss.list_remote_routes(|k, _| Some(*k));
|
let routes = rss.list_remote_routes(|k, _| Some(*k));
|
||||||
for r in routes {
|
for r in routes {
|
||||||
@ -90,7 +98,7 @@ fn get_safety_selection(text: &str, routing_table: RoutingTable) -> Option<Safet
|
|||||||
let mut sequencing = Sequencing::default();
|
let mut sequencing = Sequencing::default();
|
||||||
for x in text.split(",") {
|
for x in text.split(",") {
|
||||||
let x = x.trim();
|
let x = x.trim();
|
||||||
if let Some(pr) = get_route_id(rss.clone(), false)(x) {
|
if let Some(pr) = get_route_id(rss.clone(), true, false)(x) {
|
||||||
preferred_route = Some(pr)
|
preferred_route = Some(pr)
|
||||||
}
|
}
|
||||||
if let Some(n) = get_number(x) {
|
if let Some(n) = get_number(x) {
|
||||||
@ -143,19 +151,29 @@ fn get_destination(routing_table: RoutingTable) -> impl FnOnce(&str) -> Option<D
|
|||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
if &text[0..1] == "#" {
|
if &text[0..1] == "#" {
|
||||||
|
let rss = routing_table.route_spec_store();
|
||||||
|
|
||||||
// Private route
|
// Private route
|
||||||
let text = &text[1..];
|
let text = &text[1..];
|
||||||
let n = get_number(text)?;
|
|
||||||
let mut dc = DEBUG_CACHE.lock();
|
|
||||||
let private_route_id = dc.imported_routes.get(n)?.clone();
|
|
||||||
|
|
||||||
let rss = routing_table.route_spec_store();
|
let private_route = if let Some(prid) = get_route_id(rss.clone(), false, true)(text) {
|
||||||
let Some(private_route) = rss.best_remote_private_route(&private_route_id) else {
|
let Some(private_route) = rss.best_remote_private_route(&prid) else {
|
||||||
|
return None;
|
||||||
|
};
|
||||||
|
private_route
|
||||||
|
} else {
|
||||||
|
let mut dc = DEBUG_CACHE.lock();
|
||||||
|
let n = get_number(text)?;
|
||||||
|
let prid = dc.imported_routes.get(n)?.clone();
|
||||||
|
let Some(private_route) = rss.best_remote_private_route(&prid) else {
|
||||||
// Remove imported route
|
// Remove imported route
|
||||||
dc.imported_routes.remove(n);
|
dc.imported_routes.remove(n);
|
||||||
info!("removed dead imported route {}", n);
|
info!("removed dead imported route {}", n);
|
||||||
return None;
|
return None;
|
||||||
};
|
};
|
||||||
|
private_route
|
||||||
|
};
|
||||||
|
|
||||||
Some(Destination::private_route(
|
Some(Destination::private_route(
|
||||||
private_route,
|
private_route,
|
||||||
ss.unwrap_or(SafetySelection::Unsafe(Sequencing::default())),
|
ss.unwrap_or(SafetySelection::Unsafe(Sequencing::default())),
|
||||||
@ -217,9 +235,9 @@ fn get_node_ref(routing_table: RoutingTable) -> impl FnOnce(&str) -> Option<Node
|
|||||||
.unwrap_or((text, None));
|
.unwrap_or((text, None));
|
||||||
|
|
||||||
let mut nr = if let Some(key) = get_public_key(text) {
|
let mut nr = if let Some(key) = get_public_key(text) {
|
||||||
routing_table.lookup_any_node_ref(key)?
|
routing_table.lookup_any_node_ref(key).ok().flatten()?
|
||||||
} else if let Some(key) = get_typed_key(text) {
|
} else if let Some(key) = get_typed_key(text) {
|
||||||
routing_table.lookup_node_ref(key)?
|
routing_table.lookup_node_ref(key).ok().flatten()?
|
||||||
} else {
|
} else {
|
||||||
return None;
|
return None;
|
||||||
};
|
};
|
||||||
@ -663,7 +681,7 @@ impl VeilidAPI {
|
|||||||
1,
|
1,
|
||||||
"debug_route",
|
"debug_route",
|
||||||
"route_id",
|
"route_id",
|
||||||
get_route_id(rss.clone(), true),
|
get_route_id(rss.clone(), true, true),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// Release route
|
// Release route
|
||||||
@ -695,7 +713,7 @@ impl VeilidAPI {
|
|||||||
1,
|
1,
|
||||||
"debug_route",
|
"debug_route",
|
||||||
"route_id",
|
"route_id",
|
||||||
get_route_id(rss.clone(), false),
|
get_route_id(rss.clone(), true, false),
|
||||||
)?;
|
)?;
|
||||||
let full = {
|
let full = {
|
||||||
if args.len() > 2 {
|
if args.len() > 2 {
|
||||||
@ -747,7 +765,7 @@ impl VeilidAPI {
|
|||||||
1,
|
1,
|
||||||
"debug_route",
|
"debug_route",
|
||||||
"route_id",
|
"route_id",
|
||||||
get_route_id(rss.clone(), false),
|
get_route_id(rss.clone(), true, false),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// Unpublish route
|
// Unpublish route
|
||||||
@ -769,7 +787,7 @@ impl VeilidAPI {
|
|||||||
1,
|
1,
|
||||||
"debug_route",
|
"debug_route",
|
||||||
"route_id",
|
"route_id",
|
||||||
get_route_id(rss.clone(), true),
|
get_route_id(rss.clone(), true, true),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
match rss.debug_route(&route_id) {
|
match rss.debug_route(&route_id) {
|
||||||
@ -831,7 +849,7 @@ impl VeilidAPI {
|
|||||||
1,
|
1,
|
||||||
"debug_route",
|
"debug_route",
|
||||||
"route_id",
|
"route_id",
|
||||||
get_route_id(rss.clone(), true),
|
get_route_id(rss.clone(), true, true),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let success = rss
|
let success = rss
|
||||||
@ -916,7 +934,7 @@ impl VeilidAPI {
|
|||||||
entry <node>
|
entry <node>
|
||||||
nodeinfo
|
nodeinfo
|
||||||
config [key [new value]]
|
config [key [new value]]
|
||||||
purge <buckets|connections>
|
purge <buckets|connections|routes>
|
||||||
attach
|
attach
|
||||||
detach
|
detach
|
||||||
restart network
|
restart network
|
||||||
|
@ -117,6 +117,7 @@ macro_rules! apibail_already_initialized {
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
#[archive_attr(repr(u8), derive(CheckBytes))]
|
#[archive_attr(repr(u8), derive(CheckBytes))]
|
||||||
#[serde(tag = "kind")]
|
#[serde(tag = "kind")]
|
||||||
@ -136,7 +137,10 @@ pub enum VeilidAPIError {
|
|||||||
#[error("No connection: {message}")]
|
#[error("No connection: {message}")]
|
||||||
NoConnection { message: String },
|
NoConnection { message: String },
|
||||||
#[error("Key not found: {key}")]
|
#[error("Key not found: {key}")]
|
||||||
KeyNotFound { key: TypedKey },
|
KeyNotFound {
|
||||||
|
#[schemars(with="String")]
|
||||||
|
key: TypedKey
|
||||||
|
},
|
||||||
#[error("Internal: {message}")]
|
#[error("Internal: {message}")]
|
||||||
Internal { message: String },
|
Internal { message: String },
|
||||||
#[error("Unimplemented: {message}")]
|
#[error("Unimplemented: {message}")]
|
||||||
|
228
veilid-core/src/veilid_api/json_api/crypto_system.rs
Normal file
228
veilid-core/src/veilid_api/json_api/crypto_system.rs
Normal file
@ -0,0 +1,228 @@
|
|||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
pub struct CryptoSystemRequest {
|
||||||
|
pub cs_id: u32,
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub cs_op: CryptoSystemRequestOp,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
pub struct CryptoSystemResponse {
|
||||||
|
pub cs_id: u32,
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub cs_op: CryptoSystemResponseOp,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(tag = "cs_op")]
|
||||||
|
pub enum CryptoSystemRequestOp {
|
||||||
|
Release,
|
||||||
|
CachedDh {
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
key: PublicKey,
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
secret: SecretKey,
|
||||||
|
},
|
||||||
|
ComputeDh {
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
key: PublicKey,
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
secret: SecretKey,
|
||||||
|
},
|
||||||
|
RandomBytes {
|
||||||
|
len: u32,
|
||||||
|
},
|
||||||
|
DefaultSaltLength,
|
||||||
|
HashPassword {
|
||||||
|
#[serde(with = "json_as_base64")]
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
password: Vec<u8>,
|
||||||
|
#[serde(with = "json_as_base64")]
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
salt: Vec<u8>,
|
||||||
|
},
|
||||||
|
VerifyPassword {
|
||||||
|
#[serde(with = "json_as_base64")]
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
password: Vec<u8>,
|
||||||
|
password_hash: String,
|
||||||
|
},
|
||||||
|
DeriveSharedSecret {
|
||||||
|
#[serde(with = "json_as_base64")]
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
password: Vec<u8>,
|
||||||
|
#[serde(with = "json_as_base64")]
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
salt: Vec<u8>,
|
||||||
|
},
|
||||||
|
RandomNonce,
|
||||||
|
RandomSharedSecret,
|
||||||
|
GenerateKeyPair,
|
||||||
|
GenerateHash {
|
||||||
|
#[serde(with = "json_as_base64")]
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
data: Vec<u8>,
|
||||||
|
},
|
||||||
|
ValidateKeyPair {
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
key: PublicKey,
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
secret: SecretKey,
|
||||||
|
},
|
||||||
|
ValidateHash {
|
||||||
|
#[serde(with = "json_as_base64")]
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
data: Vec<u8>,
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
hash_digest: HashDigest,
|
||||||
|
},
|
||||||
|
Distance {
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
key1: CryptoKey,
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
key2: CryptoKey,
|
||||||
|
},
|
||||||
|
Sign {
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
key: PublicKey,
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
secret: SecretKey,
|
||||||
|
#[serde(with = "json_as_base64")]
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
data: Vec<u8>,
|
||||||
|
},
|
||||||
|
Verify {
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
key: PublicKey,
|
||||||
|
#[serde(with = "json_as_base64")]
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
data: Vec<u8>,
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
secret: Signature,
|
||||||
|
},
|
||||||
|
AeadOverhead,
|
||||||
|
DecryptAead {
|
||||||
|
#[serde(with = "json_as_base64")]
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
body: Vec<u8>,
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
nonce: Nonce,
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
shared_secret: SharedSecret,
|
||||||
|
#[serde(with = "opt_json_as_base64")]
|
||||||
|
#[schemars(with = "Option<String>")]
|
||||||
|
associated_data: Option<Vec<u8>>,
|
||||||
|
},
|
||||||
|
EncryptAead {
|
||||||
|
#[serde(with = "json_as_base64")]
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
body: Vec<u8>,
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
nonce: Nonce,
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
shared_secret: SharedSecret,
|
||||||
|
#[serde(with = "opt_json_as_base64")]
|
||||||
|
#[schemars(with = "Option<String>")]
|
||||||
|
associated_data: Option<Vec<u8>>,
|
||||||
|
},
|
||||||
|
CryptNoAuth {
|
||||||
|
#[serde(with = "json_as_base64")]
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
body: Vec<u8>,
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
nonce: Nonce,
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
shared_secret: SharedSecret,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(tag = "cs_op")]
|
||||||
|
pub enum CryptoSystemResponseOp {
|
||||||
|
InvalidId,
|
||||||
|
Release,
|
||||||
|
CachedDh {
|
||||||
|
#[serde(flatten)]
|
||||||
|
#[schemars(with = "ApiResult<String>")]
|
||||||
|
result: ApiResultWithString<SharedSecret>,
|
||||||
|
},
|
||||||
|
ComputeDh {
|
||||||
|
#[serde(flatten)]
|
||||||
|
#[schemars(with = "ApiResult<String>")]
|
||||||
|
result: ApiResultWithString<SharedSecret>,
|
||||||
|
},
|
||||||
|
RandomBytes {
|
||||||
|
#[serde(with = "json_as_base64")]
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
value: Vec<u8>,
|
||||||
|
},
|
||||||
|
DefaultSaltLength {
|
||||||
|
value: u32,
|
||||||
|
},
|
||||||
|
HashPassword {
|
||||||
|
#[serde(flatten)]
|
||||||
|
result: ApiResult<String>,
|
||||||
|
},
|
||||||
|
VerifyPassword {
|
||||||
|
#[serde(flatten)]
|
||||||
|
result: ApiResult<bool>,
|
||||||
|
},
|
||||||
|
DeriveSharedSecret {
|
||||||
|
#[serde(flatten)]
|
||||||
|
#[schemars(with = "ApiResult<String>")]
|
||||||
|
result: ApiResultWithString<SharedSecret>,
|
||||||
|
},
|
||||||
|
RandomNonce {
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
value: Nonce,
|
||||||
|
},
|
||||||
|
RandomSharedSecret {
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
value: SharedSecret,
|
||||||
|
},
|
||||||
|
GenerateKeyPair {
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
value: KeyPair,
|
||||||
|
},
|
||||||
|
GenerateHash {
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
value: HashDigest,
|
||||||
|
},
|
||||||
|
ValidateKeyPair {
|
||||||
|
value: bool,
|
||||||
|
},
|
||||||
|
ValidateHash {
|
||||||
|
value: bool,
|
||||||
|
},
|
||||||
|
Distance {
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
value: CryptoKeyDistance,
|
||||||
|
},
|
||||||
|
Sign {
|
||||||
|
#[serde(flatten)]
|
||||||
|
#[schemars(with = "ApiResult<String>")]
|
||||||
|
result: ApiResultWithString<Signature>,
|
||||||
|
},
|
||||||
|
Verify {
|
||||||
|
#[serde(flatten)]
|
||||||
|
result: ApiResult<()>,
|
||||||
|
},
|
||||||
|
AeadOverhead {
|
||||||
|
value: u32,
|
||||||
|
},
|
||||||
|
DecryptAead {
|
||||||
|
#[serde(flatten)]
|
||||||
|
#[schemars(with = "ApiResult<String>")]
|
||||||
|
result: ApiResultWithVecU8,
|
||||||
|
},
|
||||||
|
EncryptAead {
|
||||||
|
#[serde(flatten)]
|
||||||
|
#[schemars(with = "ApiResult<String>")]
|
||||||
|
result: ApiResultWithVecU8,
|
||||||
|
},
|
||||||
|
CryptNoAuth {
|
||||||
|
#[serde(with = "json_as_base64")]
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
value: Vec<u8>,
|
||||||
|
},
|
||||||
|
}
|
319
veilid-core/src/veilid_api/json_api/mod.rs
Normal file
319
veilid-core/src/veilid_api/json_api/mod.rs
Normal file
@ -0,0 +1,319 @@
|
|||||||
|
use super::*;
|
||||||
|
|
||||||
|
mod routing_context;
|
||||||
|
pub use routing_context::*;
|
||||||
|
|
||||||
|
mod table_db;
|
||||||
|
pub use table_db::*;
|
||||||
|
|
||||||
|
mod crypto_system;
|
||||||
|
pub use crypto_system::*;
|
||||||
|
|
||||||
|
mod process;
|
||||||
|
pub use process::*;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
pub struct Request {
|
||||||
|
/// Operation Id (pairs with Response, or empty if unidirectional)
|
||||||
|
#[serde(default)]
|
||||||
|
pub id: u32,
|
||||||
|
/// The request operation variant
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub op: RequestOp,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(tag = "type")]
|
||||||
|
pub enum RecvMessage {
|
||||||
|
Response(Response),
|
||||||
|
Update(VeilidUpdate),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
pub struct Response {
|
||||||
|
/// Operation Id (pairs with Request, or empty if unidirectional)
|
||||||
|
#[serde(default)]
|
||||||
|
pub id: u32,
|
||||||
|
/// The response operation variant
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub op: ResponseOp,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(tag = "op")]
|
||||||
|
pub enum RequestOp {
|
||||||
|
Control {
|
||||||
|
args: Vec<String>,
|
||||||
|
},
|
||||||
|
GetState,
|
||||||
|
Attach,
|
||||||
|
Detach,
|
||||||
|
NewPrivateRoute,
|
||||||
|
NewCustomPrivateRoute {
|
||||||
|
#[schemars(with = "Vec<String>")]
|
||||||
|
kinds: Vec<CryptoKind>,
|
||||||
|
#[serde(default)]
|
||||||
|
stability: Stability,
|
||||||
|
#[serde(default)]
|
||||||
|
sequencing: Sequencing,
|
||||||
|
},
|
||||||
|
ImportRemotePrivateRoute {
|
||||||
|
#[serde(with = "json_as_base64")]
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
blob: Vec<u8>,
|
||||||
|
},
|
||||||
|
ReleasePrivateRoute {
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
route_id: RouteId,
|
||||||
|
},
|
||||||
|
AppCallReply {
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
call_id: OperationId,
|
||||||
|
#[serde(with = "json_as_base64")]
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
message: Vec<u8>,
|
||||||
|
},
|
||||||
|
// Routing Context
|
||||||
|
NewRoutingContext,
|
||||||
|
RoutingContext(RoutingContextRequest),
|
||||||
|
// TableDb
|
||||||
|
OpenTableDb {
|
||||||
|
name: String,
|
||||||
|
column_count: u32,
|
||||||
|
},
|
||||||
|
DeleteTableDb {
|
||||||
|
name: String,
|
||||||
|
},
|
||||||
|
TableDb(TableDbRequest),
|
||||||
|
TableDbTransaction(TableDbTransactionRequest),
|
||||||
|
// Crypto
|
||||||
|
GetCryptoSystem {
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
kind: CryptoKind,
|
||||||
|
},
|
||||||
|
BestCryptoSystem,
|
||||||
|
CryptoSystem(CryptoSystemRequest),
|
||||||
|
VerifySignatures {
|
||||||
|
#[schemars(with = "Vec<String>")]
|
||||||
|
node_ids: Vec<TypedKey>,
|
||||||
|
#[serde(with = "json_as_base64")]
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
data: Vec<u8>,
|
||||||
|
#[schemars(with = "Vec<String>")]
|
||||||
|
signatures: Vec<TypedSignature>,
|
||||||
|
},
|
||||||
|
GenerateSignatures {
|
||||||
|
#[serde(with = "json_as_base64")]
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
data: Vec<u8>,
|
||||||
|
#[schemars(with = "Vec<String>")]
|
||||||
|
key_pairs: Vec<TypedKeyPair>,
|
||||||
|
},
|
||||||
|
GenerateKeyPair {
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
kind: CryptoKind,
|
||||||
|
},
|
||||||
|
// Misc
|
||||||
|
Now,
|
||||||
|
Debug {
|
||||||
|
command: String,
|
||||||
|
},
|
||||||
|
VeilidVersionString,
|
||||||
|
VeilidVersion,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
pub struct NewPrivateRouteResult {
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
route_id: RouteId,
|
||||||
|
#[serde(with = "json_as_base64")]
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
blob: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(tag = "op")]
|
||||||
|
pub enum ResponseOp {
|
||||||
|
Control {
|
||||||
|
#[serde(flatten)]
|
||||||
|
result: ApiResult<String>,
|
||||||
|
},
|
||||||
|
GetState {
|
||||||
|
#[serde(flatten)]
|
||||||
|
result: ApiResult<VeilidState>,
|
||||||
|
},
|
||||||
|
Attach {
|
||||||
|
#[serde(flatten)]
|
||||||
|
result: ApiResult<()>,
|
||||||
|
},
|
||||||
|
Detach {
|
||||||
|
#[serde(flatten)]
|
||||||
|
result: ApiResult<()>,
|
||||||
|
},
|
||||||
|
NewPrivateRoute {
|
||||||
|
#[serde(flatten)]
|
||||||
|
result: ApiResult<NewPrivateRouteResult>,
|
||||||
|
},
|
||||||
|
NewCustomPrivateRoute {
|
||||||
|
#[serde(flatten)]
|
||||||
|
result: ApiResult<NewPrivateRouteResult>,
|
||||||
|
},
|
||||||
|
ImportRemotePrivateRoute {
|
||||||
|
#[serde(flatten)]
|
||||||
|
#[schemars(with = "ApiResult<String>")]
|
||||||
|
result: ApiResultWithString<RouteId>,
|
||||||
|
},
|
||||||
|
ReleasePrivateRoute {
|
||||||
|
#[serde(flatten)]
|
||||||
|
result: ApiResult<()>,
|
||||||
|
},
|
||||||
|
AppCallReply {
|
||||||
|
#[serde(flatten)]
|
||||||
|
result: ApiResult<()>,
|
||||||
|
},
|
||||||
|
// Routing Context
|
||||||
|
NewRoutingContext {
|
||||||
|
value: u32,
|
||||||
|
},
|
||||||
|
RoutingContext(RoutingContextResponse),
|
||||||
|
// TableDb
|
||||||
|
OpenTableDb {
|
||||||
|
#[serde(flatten)]
|
||||||
|
result: ApiResult<u32>,
|
||||||
|
},
|
||||||
|
DeleteTableDb {
|
||||||
|
#[serde(flatten)]
|
||||||
|
result: ApiResult<bool>,
|
||||||
|
},
|
||||||
|
TableDb(TableDbResponse),
|
||||||
|
TableDbTransaction(TableDbTransactionResponse),
|
||||||
|
// Crypto
|
||||||
|
GetCryptoSystem {
|
||||||
|
#[serde(flatten)]
|
||||||
|
result: ApiResult<u32>,
|
||||||
|
},
|
||||||
|
BestCryptoSystem {
|
||||||
|
#[serde(flatten)]
|
||||||
|
result: ApiResult<u32>,
|
||||||
|
},
|
||||||
|
CryptoSystem(CryptoSystemResponse),
|
||||||
|
VerifySignatures {
|
||||||
|
#[serde(flatten)]
|
||||||
|
#[schemars(with = "ApiResult<Vec<String>>")]
|
||||||
|
result: ApiResultWithVecString<TypedKeySet>,
|
||||||
|
},
|
||||||
|
GenerateSignatures {
|
||||||
|
#[serde(flatten)]
|
||||||
|
#[schemars(with = "ApiResult<Vec<String>>")]
|
||||||
|
result: ApiResultWithVecString<Vec<TypedSignature>>,
|
||||||
|
},
|
||||||
|
GenerateKeyPair {
|
||||||
|
#[serde(flatten)]
|
||||||
|
#[schemars(with = "ApiResult<String>")]
|
||||||
|
result: ApiResultWithString<TypedKeyPair>,
|
||||||
|
},
|
||||||
|
// Misc
|
||||||
|
Now {
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
value: Timestamp,
|
||||||
|
},
|
||||||
|
Debug {
|
||||||
|
#[serde(flatten)]
|
||||||
|
result: ApiResult<String>,
|
||||||
|
},
|
||||||
|
VeilidVersionString {
|
||||||
|
value: String,
|
||||||
|
},
|
||||||
|
VeilidVersion {
|
||||||
|
major: u32,
|
||||||
|
minor: u32,
|
||||||
|
patch: u32,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(untagged)]
|
||||||
|
pub enum ApiResult<T>
|
||||||
|
where
|
||||||
|
T: Clone + fmt::Debug + JsonSchema,
|
||||||
|
{
|
||||||
|
Ok { value: T },
|
||||||
|
Err { error: VeilidAPIError },
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(untagged)]
|
||||||
|
pub enum ApiResultWithString<T>
|
||||||
|
where
|
||||||
|
T: Clone + fmt::Debug,
|
||||||
|
{
|
||||||
|
Ok {
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
value: T,
|
||||||
|
},
|
||||||
|
Err {
|
||||||
|
error: VeilidAPIError,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(untagged)]
|
||||||
|
pub enum ApiResultWithVecU8 {
|
||||||
|
Ok {
|
||||||
|
#[serde(with = "json_as_base64")]
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
value: Vec<u8>,
|
||||||
|
},
|
||||||
|
Err {
|
||||||
|
error: VeilidAPIError,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(transparent)]
|
||||||
|
pub struct VecU8 {
|
||||||
|
#[serde(with = "json_as_base64")]
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
value: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(untagged)]
|
||||||
|
pub enum ApiResultWithVecVecU8 {
|
||||||
|
Ok {
|
||||||
|
#[schemars(with = "Vec<String>")]
|
||||||
|
value: Vec<VecU8>,
|
||||||
|
},
|
||||||
|
Err {
|
||||||
|
error: VeilidAPIError,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(untagged)]
|
||||||
|
pub enum ApiResultWithVecString<T>
|
||||||
|
where
|
||||||
|
T: Clone + fmt::Debug,
|
||||||
|
{
|
||||||
|
Ok {
|
||||||
|
#[schemars(with = "Vec<String>")]
|
||||||
|
value: T,
|
||||||
|
},
|
||||||
|
Err {
|
||||||
|
error: VeilidAPIError,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn emit_schemas(out: &mut HashMap<String, String>) {
|
||||||
|
let schema_request = schema_for!(Request);
|
||||||
|
let schema_recv_message = schema_for!(RecvMessage);
|
||||||
|
|
||||||
|
out.insert(
|
||||||
|
"Request".to_owned(),
|
||||||
|
serde_json::to_string_pretty(&schema_request).unwrap(),
|
||||||
|
);
|
||||||
|
|
||||||
|
out.insert(
|
||||||
|
"RecvMessage".to_owned(),
|
||||||
|
serde_json::to_string_pretty(&schema_recv_message).unwrap(),
|
||||||
|
);
|
||||||
|
}
|
774
veilid-core/src/veilid_api/json_api/process.rs
Normal file
774
veilid-core/src/veilid_api/json_api/process.rs
Normal file
@ -0,0 +1,774 @@
|
|||||||
|
use super::*;
|
||||||
|
use futures_util::FutureExt;
|
||||||
|
|
||||||
|
pub fn to_json_api_result<T: Clone + fmt::Debug + JsonSchema>(
|
||||||
|
r: VeilidAPIResult<T>,
|
||||||
|
) -> json_api::ApiResult<T> {
|
||||||
|
match r {
|
||||||
|
Err(e) => json_api::ApiResult::Err { error: e },
|
||||||
|
Ok(v) => json_api::ApiResult::Ok { value: v },
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn to_json_api_result_with_string<T: Clone + fmt::Debug>(
|
||||||
|
r: VeilidAPIResult<T>,
|
||||||
|
) -> json_api::ApiResultWithString<T> {
|
||||||
|
match r {
|
||||||
|
Err(e) => json_api::ApiResultWithString::Err { error: e },
|
||||||
|
Ok(v) => json_api::ApiResultWithString::Ok { value: v },
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn to_json_api_result_with_vec_string<T: Clone + fmt::Debug>(
|
||||||
|
r: VeilidAPIResult<T>,
|
||||||
|
) -> json_api::ApiResultWithVecString<T> {
|
||||||
|
match r {
|
||||||
|
Err(e) => json_api::ApiResultWithVecString::Err { error: e },
|
||||||
|
Ok(v) => json_api::ApiResultWithVecString::Ok { value: v },
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn to_json_api_result_with_vec_u8(r: VeilidAPIResult<Vec<u8>>) -> json_api::ApiResultWithVecU8 {
|
||||||
|
match r {
|
||||||
|
Err(e) => json_api::ApiResultWithVecU8::Err { error: e },
|
||||||
|
Ok(v) => json_api::ApiResultWithVecU8::Ok { value: v },
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn to_json_api_result_with_vec_vec_u8(
|
||||||
|
r: VeilidAPIResult<Vec<Vec<u8>>>,
|
||||||
|
) -> json_api::ApiResultWithVecVecU8 {
|
||||||
|
match r {
|
||||||
|
Err(e) => json_api::ApiResultWithVecVecU8::Err { error: e },
|
||||||
|
Ok(v) => json_api::ApiResultWithVecVecU8::Ok {
|
||||||
|
value: v.into_iter().map(|v| VecU8 { value: v }).collect(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct JsonRequestProcessorInner {
|
||||||
|
routing_contexts: BTreeMap<u32, RoutingContext>,
|
||||||
|
table_dbs: BTreeMap<u32, TableDB>,
|
||||||
|
table_db_transactions: BTreeMap<u32, TableDBTransaction>,
|
||||||
|
crypto_systems: BTreeMap<u32, CryptoSystemVersion>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct JsonRequestProcessor {
|
||||||
|
api: VeilidAPI,
|
||||||
|
inner: Arc<Mutex<JsonRequestProcessorInner>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl JsonRequestProcessor {
|
||||||
|
pub fn new(api: VeilidAPI) -> Self {
|
||||||
|
Self {
|
||||||
|
api,
|
||||||
|
inner: Arc::new(Mutex::new(JsonRequestProcessorInner {
|
||||||
|
routing_contexts: Default::default(),
|
||||||
|
table_dbs: Default::default(),
|
||||||
|
table_db_transactions: Default::default(),
|
||||||
|
crypto_systems: Default::default(),
|
||||||
|
})),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Routing Context
|
||||||
|
fn add_routing_context(&self, routing_context: RoutingContext) -> u32 {
|
||||||
|
let mut inner = self.inner.lock();
|
||||||
|
let mut next_id: u32 = 1;
|
||||||
|
while inner.routing_contexts.contains_key(&next_id) {
|
||||||
|
next_id += 1;
|
||||||
|
}
|
||||||
|
inner.routing_contexts.insert(next_id, routing_context);
|
||||||
|
next_id
|
||||||
|
}
|
||||||
|
fn lookup_routing_context(&self, id: u32, rc_id: u32) -> Result<RoutingContext, Response> {
|
||||||
|
let inner = self.inner.lock();
|
||||||
|
let Some(routing_context) = inner.routing_contexts.get(&rc_id).cloned() else {
|
||||||
|
return Err(Response {
|
||||||
|
id,
|
||||||
|
op: ResponseOp::RoutingContext(RoutingContextResponse {
|
||||||
|
rc_id,
|
||||||
|
rc_op: RoutingContextResponseOp::InvalidId
|
||||||
|
})
|
||||||
|
});
|
||||||
|
};
|
||||||
|
Ok(routing_context)
|
||||||
|
}
|
||||||
|
fn release_routing_context(&self, id: u32) -> i32 {
|
||||||
|
let mut inner = self.inner.lock();
|
||||||
|
if inner.routing_contexts.remove(&id).is_none() {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// TableDB
|
||||||
|
fn add_table_db(&self, table_db: TableDB) -> u32 {
|
||||||
|
let mut inner = self.inner.lock();
|
||||||
|
let mut next_id: u32 = 1;
|
||||||
|
while inner.table_dbs.contains_key(&next_id) {
|
||||||
|
next_id += 1;
|
||||||
|
}
|
||||||
|
inner.table_dbs.insert(next_id, table_db);
|
||||||
|
next_id
|
||||||
|
}
|
||||||
|
fn lookup_table_db(&self, id: u32, db_id: u32) -> Result<TableDB, Response> {
|
||||||
|
let inner = self.inner.lock();
|
||||||
|
let Some(table_db) = inner.table_dbs.get(&db_id).cloned() else {
|
||||||
|
return Err(Response {
|
||||||
|
id,
|
||||||
|
op: ResponseOp::TableDb(TableDbResponse {
|
||||||
|
db_id,
|
||||||
|
db_op: TableDbResponseOp::InvalidId
|
||||||
|
})
|
||||||
|
});
|
||||||
|
};
|
||||||
|
Ok(table_db)
|
||||||
|
}
|
||||||
|
fn release_table_db(&self, id: u32) -> i32 {
|
||||||
|
let mut inner = self.inner.lock();
|
||||||
|
if inner.table_dbs.remove(&id).is_none() {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// TableDBTransaction
|
||||||
|
fn add_table_db_transaction(&self, tdbt: TableDBTransaction) -> u32 {
|
||||||
|
let mut inner = self.inner.lock();
|
||||||
|
let mut next_id: u32 = 1;
|
||||||
|
while inner.table_db_transactions.contains_key(&next_id) {
|
||||||
|
next_id += 1;
|
||||||
|
}
|
||||||
|
inner.table_db_transactions.insert(next_id, tdbt);
|
||||||
|
next_id
|
||||||
|
}
|
||||||
|
fn lookup_table_db_transaction(
|
||||||
|
&self,
|
||||||
|
id: u32,
|
||||||
|
tx_id: u32,
|
||||||
|
) -> Result<TableDBTransaction, Response> {
|
||||||
|
let inner = self.inner.lock();
|
||||||
|
let Some(table_db_transaction) = inner.table_db_transactions.get(&tx_id).cloned() else {
|
||||||
|
return Err(Response {
|
||||||
|
id,
|
||||||
|
op: ResponseOp::TableDbTransaction(TableDbTransactionResponse {
|
||||||
|
tx_id,
|
||||||
|
tx_op: TableDbTransactionResponseOp::InvalidId
|
||||||
|
})
|
||||||
|
});
|
||||||
|
};
|
||||||
|
Ok(table_db_transaction)
|
||||||
|
}
|
||||||
|
fn release_table_db_transaction(&self, id: u32) -> i32 {
|
||||||
|
let mut inner = self.inner.lock();
|
||||||
|
if inner.table_db_transactions.remove(&id).is_none() {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// CryptoSystem
|
||||||
|
fn add_crypto_system(&self, csv: CryptoSystemVersion) -> u32 {
|
||||||
|
let mut inner = self.inner.lock();
|
||||||
|
let mut next_id: u32 = 1;
|
||||||
|
while inner.crypto_systems.contains_key(&next_id) {
|
||||||
|
next_id += 1;
|
||||||
|
}
|
||||||
|
inner.crypto_systems.insert(next_id, csv);
|
||||||
|
next_id
|
||||||
|
}
|
||||||
|
fn lookup_crypto_system(&self, id: u32, cs_id: u32) -> Result<CryptoSystemVersion, Response> {
|
||||||
|
let inner = self.inner.lock();
|
||||||
|
let Some(crypto_system) = inner.crypto_systems.get(&cs_id).cloned() else {
|
||||||
|
return Err(Response {
|
||||||
|
id,
|
||||||
|
op: ResponseOp::CryptoSystem(CryptoSystemResponse {
|
||||||
|
cs_id,
|
||||||
|
cs_op: CryptoSystemResponseOp::InvalidId
|
||||||
|
})
|
||||||
|
});
|
||||||
|
};
|
||||||
|
Ok(crypto_system)
|
||||||
|
}
|
||||||
|
fn release_crypto_system(&self, id: u32) -> i32 {
|
||||||
|
let mut inner = self.inner.lock();
|
||||||
|
if inner.crypto_systems.remove(&id).is_none() {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Target
|
||||||
|
|
||||||
|
// Parse target
|
||||||
|
async fn parse_target(&self, s: String) -> VeilidAPIResult<Target> {
|
||||||
|
// Is this a route id?
|
||||||
|
if let Ok(rrid) = RouteId::from_str(&s) {
|
||||||
|
let routing_table = self.api.routing_table()?;
|
||||||
|
let rss = routing_table.route_spec_store();
|
||||||
|
|
||||||
|
// Is this a valid remote route id? (can't target allocated routes)
|
||||||
|
if rss.is_route_id_remote(&rrid) {
|
||||||
|
return Ok(Target::PrivateRoute(rrid));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Is this a node id?
|
||||||
|
if let Ok(nid) = TypedKey::from_str(&s) {
|
||||||
|
return Ok(Target::NodeId(nid));
|
||||||
|
}
|
||||||
|
|
||||||
|
Err(VeilidAPIError::invalid_target())
|
||||||
|
}
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
pub async fn process_routing_context_request(
|
||||||
|
&self,
|
||||||
|
routing_context: RoutingContext,
|
||||||
|
rcr: RoutingContextRequest,
|
||||||
|
) -> RoutingContextResponse {
|
||||||
|
let rc_op = match rcr.rc_op {
|
||||||
|
RoutingContextRequestOp::Release => {
|
||||||
|
self.release_routing_context(rcr.rc_id);
|
||||||
|
RoutingContextResponseOp::Release {}
|
||||||
|
}
|
||||||
|
RoutingContextRequestOp::WithPrivacy => RoutingContextResponseOp::WithPrivacy {
|
||||||
|
result: to_json_api_result(
|
||||||
|
routing_context
|
||||||
|
.clone()
|
||||||
|
.with_privacy()
|
||||||
|
.map(|new_rc| self.add_routing_context(new_rc)),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
RoutingContextRequestOp::WithCustomPrivacy { stability } => {
|
||||||
|
RoutingContextResponseOp::WithCustomPrivacy {
|
||||||
|
result: to_json_api_result(
|
||||||
|
routing_context
|
||||||
|
.clone()
|
||||||
|
.with_custom_privacy(stability)
|
||||||
|
.map(|new_rc| self.add_routing_context(new_rc)),
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RoutingContextRequestOp::WithSequencing { sequencing } => {
|
||||||
|
RoutingContextResponseOp::WithSequencing {
|
||||||
|
value: self
|
||||||
|
.add_routing_context(routing_context.clone().with_sequencing(sequencing)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RoutingContextRequestOp::AppCall { target, request } => {
|
||||||
|
RoutingContextResponseOp::AppCall {
|
||||||
|
result: to_json_api_result_with_vec_u8(
|
||||||
|
self.parse_target(target)
|
||||||
|
.then(|tr| async { routing_context.app_call(tr?, request).await })
|
||||||
|
.await,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RoutingContextRequestOp::AppMessage { target, message } => {
|
||||||
|
RoutingContextResponseOp::AppMessage {
|
||||||
|
result: to_json_api_result(
|
||||||
|
self.parse_target(target)
|
||||||
|
.then(|tr| async { routing_context.app_message(tr?, message).await })
|
||||||
|
.await,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RoutingContextRequestOp::CreateDhtRecord { kind, schema } => {
|
||||||
|
RoutingContextResponseOp::CreateDhtRecord {
|
||||||
|
result: to_json_api_result(
|
||||||
|
routing_context.create_dht_record(kind, schema).await,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RoutingContextRequestOp::OpenDhtRecord { key, writer } => {
|
||||||
|
RoutingContextResponseOp::OpenDhtRecord {
|
||||||
|
result: to_json_api_result(routing_context.open_dht_record(key, writer).await),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RoutingContextRequestOp::CloseDhtRecord { key } => {
|
||||||
|
RoutingContextResponseOp::CloseDhtRecord {
|
||||||
|
result: to_json_api_result(routing_context.close_dht_record(key).await),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RoutingContextRequestOp::DeleteDhtRecord { key } => {
|
||||||
|
RoutingContextResponseOp::DeleteDhtRecord {
|
||||||
|
result: to_json_api_result(routing_context.delete_dht_record(key).await),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RoutingContextRequestOp::GetDhtValue {
|
||||||
|
key,
|
||||||
|
subkey,
|
||||||
|
force_refresh,
|
||||||
|
} => RoutingContextResponseOp::GetDhtValue {
|
||||||
|
result: to_json_api_result(
|
||||||
|
routing_context
|
||||||
|
.get_dht_value(key, subkey, force_refresh)
|
||||||
|
.await,
|
||||||
|
),
|
||||||
|
},
|
||||||
|
RoutingContextRequestOp::SetDhtValue { key, subkey, data } => {
|
||||||
|
RoutingContextResponseOp::SetDhtValue {
|
||||||
|
result: to_json_api_result(
|
||||||
|
routing_context.set_dht_value(key, subkey, data).await,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RoutingContextRequestOp::WatchDhtValues {
|
||||||
|
key,
|
||||||
|
subkeys,
|
||||||
|
expiration,
|
||||||
|
count,
|
||||||
|
} => RoutingContextResponseOp::WatchDhtValues {
|
||||||
|
result: to_json_api_result(
|
||||||
|
routing_context
|
||||||
|
.watch_dht_values(key, subkeys, expiration, count)
|
||||||
|
.await,
|
||||||
|
),
|
||||||
|
},
|
||||||
|
RoutingContextRequestOp::CancelDhtWatch { key, subkeys } => {
|
||||||
|
RoutingContextResponseOp::CancelDhtWatch {
|
||||||
|
result: to_json_api_result(
|
||||||
|
routing_context.cancel_dht_watch(key, subkeys).await,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
RoutingContextResponse {
|
||||||
|
rc_id: rcr.rc_id,
|
||||||
|
rc_op,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn process_table_db_request(
|
||||||
|
&self,
|
||||||
|
table_db: TableDB,
|
||||||
|
tdr: TableDbRequest,
|
||||||
|
) -> TableDbResponse {
|
||||||
|
let db_op = match tdr.db_op {
|
||||||
|
TableDbRequestOp::Release => {
|
||||||
|
self.release_table_db(tdr.db_id);
|
||||||
|
TableDbResponseOp::Release {}
|
||||||
|
}
|
||||||
|
TableDbRequestOp::GetColumnCount => TableDbResponseOp::GetColumnCount {
|
||||||
|
result: to_json_api_result(table_db.get_column_count()),
|
||||||
|
},
|
||||||
|
TableDbRequestOp::GetKeys { col } => TableDbResponseOp::GetKeys {
|
||||||
|
result: to_json_api_result_with_vec_vec_u8(table_db.get_keys(col).await),
|
||||||
|
},
|
||||||
|
TableDbRequestOp::Transact => TableDbResponseOp::Transact {
|
||||||
|
value: self.add_table_db_transaction(table_db.transact()),
|
||||||
|
},
|
||||||
|
TableDbRequestOp::Store { col, key, value } => TableDbResponseOp::Store {
|
||||||
|
result: to_json_api_result(table_db.store(col, &key, &value).await),
|
||||||
|
},
|
||||||
|
TableDbRequestOp::Load { col, key } => TableDbResponseOp::Load {
|
||||||
|
result: to_json_api_result(
|
||||||
|
table_db
|
||||||
|
.load(col, &key)
|
||||||
|
.await
|
||||||
|
.map(|vopt| vopt.map(|v| VecU8 { value: v })),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
TableDbRequestOp::Delete { col, key } => TableDbResponseOp::Delete {
|
||||||
|
result: to_json_api_result(
|
||||||
|
table_db
|
||||||
|
.delete(col, &key)
|
||||||
|
.await
|
||||||
|
.map(|vopt| vopt.map(|v| VecU8 { value: v })),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
TableDbResponse {
|
||||||
|
db_id: tdr.db_id,
|
||||||
|
db_op,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn process_table_db_transaction_request(
|
||||||
|
&self,
|
||||||
|
table_db_transaction: TableDBTransaction,
|
||||||
|
tdtr: TableDbTransactionRequest,
|
||||||
|
) -> TableDbTransactionResponse {
|
||||||
|
let tx_op = match tdtr.tx_op {
|
||||||
|
TableDbTransactionRequestOp::Commit => TableDbTransactionResponseOp::Commit {
|
||||||
|
result: to_json_api_result(table_db_transaction.commit().await.map(|_| {
|
||||||
|
self.release_table_db_transaction(tdtr.tx_id);
|
||||||
|
})),
|
||||||
|
},
|
||||||
|
TableDbTransactionRequestOp::Rollback => {
|
||||||
|
table_db_transaction.rollback();
|
||||||
|
self.release_table_db_transaction(tdtr.tx_id);
|
||||||
|
TableDbTransactionResponseOp::Rollback {}
|
||||||
|
}
|
||||||
|
TableDbTransactionRequestOp::Store { col, key, value } => {
|
||||||
|
table_db_transaction.store(col, &key, &value);
|
||||||
|
TableDbTransactionResponseOp::Store {}
|
||||||
|
}
|
||||||
|
TableDbTransactionRequestOp::Delete { col, key } => {
|
||||||
|
table_db_transaction.delete(col, &key);
|
||||||
|
TableDbTransactionResponseOp::Delete {}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
TableDbTransactionResponse {
|
||||||
|
tx_id: tdtr.tx_id,
|
||||||
|
tx_op,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn process_crypto_system_request(
|
||||||
|
&self,
|
||||||
|
csv: CryptoSystemVersion,
|
||||||
|
csr: CryptoSystemRequest,
|
||||||
|
) -> CryptoSystemResponse {
|
||||||
|
let cs_op = match csr.cs_op {
|
||||||
|
CryptoSystemRequestOp::Release => {
|
||||||
|
self.release_crypto_system(csr.cs_id);
|
||||||
|
CryptoSystemResponseOp::Release {}
|
||||||
|
}
|
||||||
|
CryptoSystemRequestOp::CachedDh { key, secret } => CryptoSystemResponseOp::CachedDh {
|
||||||
|
result: to_json_api_result_with_string(csv.cached_dh(&key, &secret)),
|
||||||
|
},
|
||||||
|
CryptoSystemRequestOp::ComputeDh { key, secret } => CryptoSystemResponseOp::ComputeDh {
|
||||||
|
result: to_json_api_result_with_string(csv.compute_dh(&key, &secret)),
|
||||||
|
},
|
||||||
|
CryptoSystemRequestOp::RandomBytes { len } => CryptoSystemResponseOp::RandomBytes {
|
||||||
|
value: csv.random_bytes(len),
|
||||||
|
},
|
||||||
|
CryptoSystemRequestOp::DefaultSaltLength => CryptoSystemResponseOp::DefaultSaltLength {
|
||||||
|
value: csv.default_salt_length(),
|
||||||
|
},
|
||||||
|
CryptoSystemRequestOp::HashPassword { password, salt } => {
|
||||||
|
CryptoSystemResponseOp::HashPassword {
|
||||||
|
result: to_json_api_result(csv.hash_password(&password, &salt)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
CryptoSystemRequestOp::VerifyPassword {
|
||||||
|
password,
|
||||||
|
password_hash,
|
||||||
|
} => CryptoSystemResponseOp::VerifyPassword {
|
||||||
|
result: to_json_api_result(csv.verify_password(&password, &password_hash)),
|
||||||
|
},
|
||||||
|
CryptoSystemRequestOp::DeriveSharedSecret { password, salt } => {
|
||||||
|
CryptoSystemResponseOp::DeriveSharedSecret {
|
||||||
|
result: to_json_api_result_with_string(
|
||||||
|
csv.derive_shared_secret(&password, &salt),
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
CryptoSystemRequestOp::RandomNonce => CryptoSystemResponseOp::RandomNonce {
|
||||||
|
value: csv.random_nonce(),
|
||||||
|
},
|
||||||
|
CryptoSystemRequestOp::RandomSharedSecret => {
|
||||||
|
CryptoSystemResponseOp::RandomSharedSecret {
|
||||||
|
value: csv.random_shared_secret(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
CryptoSystemRequestOp::GenerateKeyPair => CryptoSystemResponseOp::GenerateKeyPair {
|
||||||
|
value: csv.generate_keypair(),
|
||||||
|
},
|
||||||
|
CryptoSystemRequestOp::GenerateHash { data } => CryptoSystemResponseOp::GenerateHash {
|
||||||
|
value: csv.generate_hash(&data),
|
||||||
|
},
|
||||||
|
CryptoSystemRequestOp::ValidateKeyPair { key, secret } => {
|
||||||
|
CryptoSystemResponseOp::ValidateKeyPair {
|
||||||
|
value: csv.validate_keypair(&key, &secret),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
CryptoSystemRequestOp::ValidateHash { data, hash_digest } => {
|
||||||
|
CryptoSystemResponseOp::ValidateHash {
|
||||||
|
value: csv.validate_hash(&data, &hash_digest),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
CryptoSystemRequestOp::Distance { key1, key2 } => CryptoSystemResponseOp::Distance {
|
||||||
|
value: csv.distance(&key1, &key2),
|
||||||
|
},
|
||||||
|
CryptoSystemRequestOp::Sign { key, secret, data } => CryptoSystemResponseOp::Sign {
|
||||||
|
result: to_json_api_result_with_string(csv.sign(&key, &secret, &data)),
|
||||||
|
},
|
||||||
|
CryptoSystemRequestOp::Verify { key, data, secret } => CryptoSystemResponseOp::Verify {
|
||||||
|
result: to_json_api_result(csv.verify(&key, &data, &secret)),
|
||||||
|
},
|
||||||
|
CryptoSystemRequestOp::AeadOverhead => CryptoSystemResponseOp::AeadOverhead {
|
||||||
|
value: csv.aead_overhead() as u32,
|
||||||
|
},
|
||||||
|
CryptoSystemRequestOp::DecryptAead {
|
||||||
|
body,
|
||||||
|
nonce,
|
||||||
|
shared_secret,
|
||||||
|
associated_data,
|
||||||
|
} => CryptoSystemResponseOp::DecryptAead {
|
||||||
|
result: to_json_api_result_with_vec_u8(csv.decrypt_aead(
|
||||||
|
&body,
|
||||||
|
&nonce,
|
||||||
|
&shared_secret,
|
||||||
|
associated_data.as_ref().map(|ad| ad.as_slice()),
|
||||||
|
)),
|
||||||
|
},
|
||||||
|
CryptoSystemRequestOp::EncryptAead {
|
||||||
|
body,
|
||||||
|
nonce,
|
||||||
|
shared_secret,
|
||||||
|
associated_data,
|
||||||
|
} => CryptoSystemResponseOp::EncryptAead {
|
||||||
|
result: to_json_api_result_with_vec_u8(csv.encrypt_aead(
|
||||||
|
&body,
|
||||||
|
&nonce,
|
||||||
|
&shared_secret,
|
||||||
|
associated_data.as_ref().map(|ad| ad.as_slice()),
|
||||||
|
)),
|
||||||
|
},
|
||||||
|
CryptoSystemRequestOp::CryptNoAuth {
|
||||||
|
body,
|
||||||
|
nonce,
|
||||||
|
shared_secret,
|
||||||
|
} => CryptoSystemResponseOp::CryptNoAuth {
|
||||||
|
value: csv.crypt_no_auth_unaligned(&body, &nonce, &shared_secret),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
CryptoSystemResponse {
|
||||||
|
cs_id: csr.cs_id,
|
||||||
|
cs_op,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn process_request(self, request: Request) -> Response {
|
||||||
|
let id = request.id;
|
||||||
|
|
||||||
|
let op = match request.op {
|
||||||
|
RequestOp::Control { args: _args } => ResponseOp::Control {
|
||||||
|
result: to_json_api_result(VeilidAPIResult::Err(VeilidAPIError::unimplemented(
|
||||||
|
"control should be handled by veilid-core host application",
|
||||||
|
))),
|
||||||
|
},
|
||||||
|
RequestOp::GetState => ResponseOp::GetState {
|
||||||
|
result: to_json_api_result(self.api.get_state().await),
|
||||||
|
},
|
||||||
|
RequestOp::Attach => ResponseOp::Attach {
|
||||||
|
result: to_json_api_result(self.api.attach().await),
|
||||||
|
},
|
||||||
|
RequestOp::Detach => ResponseOp::Detach {
|
||||||
|
result: to_json_api_result(self.api.detach().await),
|
||||||
|
},
|
||||||
|
RequestOp::NewPrivateRoute => ResponseOp::NewPrivateRoute {
|
||||||
|
result: to_json_api_result(self.api.new_private_route().await.map(|r| {
|
||||||
|
NewPrivateRouteResult {
|
||||||
|
route_id: r.0,
|
||||||
|
blob: r.1,
|
||||||
|
}
|
||||||
|
})),
|
||||||
|
},
|
||||||
|
RequestOp::NewCustomPrivateRoute {
|
||||||
|
kinds,
|
||||||
|
stability,
|
||||||
|
sequencing,
|
||||||
|
} => ResponseOp::NewCustomPrivateRoute {
|
||||||
|
result: to_json_api_result(
|
||||||
|
self.api
|
||||||
|
.new_custom_private_route(&kinds, stability, sequencing)
|
||||||
|
.await
|
||||||
|
.map(|r| NewPrivateRouteResult {
|
||||||
|
route_id: r.0,
|
||||||
|
blob: r.1,
|
||||||
|
}),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
RequestOp::ImportRemotePrivateRoute { blob } => ResponseOp::ImportRemotePrivateRoute {
|
||||||
|
result: to_json_api_result_with_string(self.api.import_remote_private_route(blob)),
|
||||||
|
},
|
||||||
|
RequestOp::ReleasePrivateRoute { route_id } => ResponseOp::ReleasePrivateRoute {
|
||||||
|
result: to_json_api_result(self.api.release_private_route(route_id)),
|
||||||
|
},
|
||||||
|
RequestOp::AppCallReply { call_id, message } => ResponseOp::AppCallReply {
|
||||||
|
result: to_json_api_result(self.api.app_call_reply(call_id, message).await),
|
||||||
|
},
|
||||||
|
RequestOp::NewRoutingContext => ResponseOp::NewRoutingContext {
|
||||||
|
value: self.add_routing_context(self.api.routing_context()),
|
||||||
|
},
|
||||||
|
RequestOp::RoutingContext(rcr) => {
|
||||||
|
let routing_context = match self.lookup_routing_context(id, rcr.rc_id) {
|
||||||
|
Ok(v) => v,
|
||||||
|
Err(e) => return e,
|
||||||
|
};
|
||||||
|
ResponseOp::RoutingContext(
|
||||||
|
self.process_routing_context_request(routing_context, rcr)
|
||||||
|
.await,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
RequestOp::OpenTableDb { name, column_count } => {
|
||||||
|
let table_store = match self.api.table_store() {
|
||||||
|
Ok(v) => v,
|
||||||
|
Err(e) => {
|
||||||
|
return Response {
|
||||||
|
id,
|
||||||
|
op: ResponseOp::OpenTableDb {
|
||||||
|
result: to_json_api_result(Err(e)),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
ResponseOp::OpenTableDb {
|
||||||
|
result: to_json_api_result(
|
||||||
|
table_store
|
||||||
|
.open(&name, column_count)
|
||||||
|
.await
|
||||||
|
.map(|table_db| self.add_table_db(table_db)),
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RequestOp::DeleteTableDb { name } => {
|
||||||
|
let table_store = match self.api.table_store() {
|
||||||
|
Ok(v) => v,
|
||||||
|
Err(e) => {
|
||||||
|
return Response {
|
||||||
|
id,
|
||||||
|
op: ResponseOp::OpenTableDb {
|
||||||
|
result: to_json_api_result(Err(e)),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
ResponseOp::DeleteTableDb {
|
||||||
|
result: to_json_api_result(table_store.delete(&name).await),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RequestOp::TableDb(tdr) => {
|
||||||
|
let table_db = match self.lookup_table_db(id, tdr.db_id) {
|
||||||
|
Ok(v) => v,
|
||||||
|
Err(e) => return e,
|
||||||
|
};
|
||||||
|
ResponseOp::TableDb(self.process_table_db_request(table_db, tdr).await)
|
||||||
|
}
|
||||||
|
RequestOp::TableDbTransaction(tdtr) => {
|
||||||
|
let table_db_transaction = match self.lookup_table_db_transaction(id, tdtr.tx_id) {
|
||||||
|
Ok(v) => v,
|
||||||
|
Err(e) => return e,
|
||||||
|
};
|
||||||
|
ResponseOp::TableDbTransaction(
|
||||||
|
self.process_table_db_transaction_request(table_db_transaction, tdtr)
|
||||||
|
.await,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
RequestOp::GetCryptoSystem { kind } => {
|
||||||
|
let crypto = match self.api.crypto() {
|
||||||
|
Ok(v) => v,
|
||||||
|
Err(e) => {
|
||||||
|
return Response {
|
||||||
|
id,
|
||||||
|
op: ResponseOp::GetCryptoSystem {
|
||||||
|
result: to_json_api_result(Err(e)),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
ResponseOp::GetCryptoSystem {
|
||||||
|
result: to_json_api_result(
|
||||||
|
crypto
|
||||||
|
.get(kind)
|
||||||
|
.ok_or_else(|| {
|
||||||
|
VeilidAPIError::invalid_argument(
|
||||||
|
"unsupported cryptosystem",
|
||||||
|
"kind",
|
||||||
|
kind,
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.map(|csv| self.add_crypto_system(csv)),
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RequestOp::BestCryptoSystem => {
|
||||||
|
let crypto = match self.api.crypto() {
|
||||||
|
Ok(v) => v,
|
||||||
|
Err(e) => {
|
||||||
|
return Response {
|
||||||
|
id,
|
||||||
|
op: ResponseOp::GetCryptoSystem {
|
||||||
|
result: to_json_api_result(Err(e)),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
ResponseOp::BestCryptoSystem {
|
||||||
|
result: to_json_api_result(Ok(self.add_crypto_system(crypto.best()))),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RequestOp::CryptoSystem(csr) => {
|
||||||
|
let csv = match self.lookup_crypto_system(id, csr.cs_id) {
|
||||||
|
Ok(v) => v,
|
||||||
|
Err(e) => return e,
|
||||||
|
};
|
||||||
|
ResponseOp::CryptoSystem(self.process_crypto_system_request(csv, csr).await)
|
||||||
|
}
|
||||||
|
RequestOp::VerifySignatures {
|
||||||
|
node_ids,
|
||||||
|
data,
|
||||||
|
signatures,
|
||||||
|
} => {
|
||||||
|
let crypto = match self.api.crypto() {
|
||||||
|
Ok(v) => v,
|
||||||
|
Err(e) => {
|
||||||
|
return Response {
|
||||||
|
id,
|
||||||
|
op: ResponseOp::GetCryptoSystem {
|
||||||
|
result: to_json_api_result(Err(e)),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
ResponseOp::VerifySignatures {
|
||||||
|
result: to_json_api_result_with_vec_string(crypto.verify_signatures(
|
||||||
|
&node_ids,
|
||||||
|
&data,
|
||||||
|
&signatures,
|
||||||
|
)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RequestOp::GenerateSignatures { data, key_pairs } => {
|
||||||
|
let crypto = match self.api.crypto() {
|
||||||
|
Ok(v) => v,
|
||||||
|
Err(e) => {
|
||||||
|
return Response {
|
||||||
|
id,
|
||||||
|
op: ResponseOp::GetCryptoSystem {
|
||||||
|
result: to_json_api_result(Err(e)),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
ResponseOp::GenerateSignatures {
|
||||||
|
result: to_json_api_result_with_vec_string(crypto.generate_signatures(
|
||||||
|
&data,
|
||||||
|
&key_pairs,
|
||||||
|
|k, s| TypedSignature::new(k.kind, s),
|
||||||
|
)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RequestOp::GenerateKeyPair { kind } => ResponseOp::GenerateKeyPair {
|
||||||
|
result: to_json_api_result_with_string(Crypto::generate_keypair(kind)),
|
||||||
|
},
|
||||||
|
RequestOp::Now => ResponseOp::Now {
|
||||||
|
value: get_aligned_timestamp(),
|
||||||
|
},
|
||||||
|
RequestOp::Debug { command } => ResponseOp::Debug {
|
||||||
|
result: to_json_api_result(self.api.debug(command).await),
|
||||||
|
},
|
||||||
|
RequestOp::VeilidVersionString => ResponseOp::VeilidVersionString {
|
||||||
|
value: veilid_version_string(),
|
||||||
|
},
|
||||||
|
RequestOp::VeilidVersion => {
|
||||||
|
let (major, minor, patch) = veilid_version();
|
||||||
|
|
||||||
|
ResponseOp::VeilidVersion {
|
||||||
|
major,
|
||||||
|
minor,
|
||||||
|
patch,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Response { id, op }
|
||||||
|
}
|
||||||
|
}
|
143
veilid-core/src/veilid_api/json_api/routing_context.rs
Normal file
143
veilid-core/src/veilid_api/json_api/routing_context.rs
Normal file
@ -0,0 +1,143 @@
|
|||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
pub struct RoutingContextRequest {
|
||||||
|
pub rc_id: u32,
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub rc_op: RoutingContextRequestOp,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
pub struct RoutingContextResponse {
|
||||||
|
pub rc_id: u32,
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub rc_op: RoutingContextResponseOp,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(tag = "rc_op")]
|
||||||
|
pub enum RoutingContextRequestOp {
|
||||||
|
Release,
|
||||||
|
WithPrivacy,
|
||||||
|
WithCustomPrivacy {
|
||||||
|
stability: Stability,
|
||||||
|
},
|
||||||
|
WithSequencing {
|
||||||
|
sequencing: Sequencing,
|
||||||
|
},
|
||||||
|
AppCall {
|
||||||
|
target: String,
|
||||||
|
#[serde(with = "json_as_base64")]
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
request: Vec<u8>,
|
||||||
|
},
|
||||||
|
AppMessage {
|
||||||
|
target: String,
|
||||||
|
#[serde(with = "json_as_base64")]
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
message: Vec<u8>,
|
||||||
|
},
|
||||||
|
CreateDhtRecord {
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
kind: CryptoKind,
|
||||||
|
schema: DHTSchema,
|
||||||
|
},
|
||||||
|
OpenDhtRecord {
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
key: TypedKey,
|
||||||
|
#[schemars(with = "Option<String>")]
|
||||||
|
writer: Option<KeyPair>,
|
||||||
|
},
|
||||||
|
CloseDhtRecord {
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
key: TypedKey,
|
||||||
|
},
|
||||||
|
DeleteDhtRecord {
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
key: TypedKey,
|
||||||
|
},
|
||||||
|
GetDhtValue {
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
key: TypedKey,
|
||||||
|
subkey: ValueSubkey,
|
||||||
|
force_refresh: bool,
|
||||||
|
},
|
||||||
|
SetDhtValue {
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
key: TypedKey,
|
||||||
|
subkey: ValueSubkey,
|
||||||
|
#[serde(with = "json_as_base64")]
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
data: Vec<u8>,
|
||||||
|
},
|
||||||
|
WatchDhtValues {
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
key: TypedKey,
|
||||||
|
subkeys: ValueSubkeyRangeSet,
|
||||||
|
expiration: Timestamp,
|
||||||
|
count: u32,
|
||||||
|
},
|
||||||
|
CancelDhtWatch {
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
key: TypedKey,
|
||||||
|
subkeys: ValueSubkeyRangeSet,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(tag = "rc_op")]
|
||||||
|
pub enum RoutingContextResponseOp {
|
||||||
|
InvalidId,
|
||||||
|
Release,
|
||||||
|
WithPrivacy {
|
||||||
|
#[serde(flatten)]
|
||||||
|
result: ApiResult<u32>,
|
||||||
|
},
|
||||||
|
WithCustomPrivacy {
|
||||||
|
#[serde(flatten)]
|
||||||
|
result: ApiResult<u32>,
|
||||||
|
},
|
||||||
|
WithSequencing {
|
||||||
|
value: u32,
|
||||||
|
},
|
||||||
|
AppCall {
|
||||||
|
#[serde(flatten)]
|
||||||
|
#[schemars(with = "ApiResult<String>")]
|
||||||
|
result: ApiResultWithVecU8,
|
||||||
|
},
|
||||||
|
AppMessage {
|
||||||
|
#[serde(flatten)]
|
||||||
|
result: ApiResult<()>,
|
||||||
|
},
|
||||||
|
CreateDhtRecord {
|
||||||
|
#[serde(flatten)]
|
||||||
|
result: ApiResult<DHTRecordDescriptor>,
|
||||||
|
},
|
||||||
|
OpenDhtRecord {
|
||||||
|
#[serde(flatten)]
|
||||||
|
result: ApiResult<DHTRecordDescriptor>,
|
||||||
|
},
|
||||||
|
CloseDhtRecord {
|
||||||
|
#[serde(flatten)]
|
||||||
|
result: ApiResult<()>,
|
||||||
|
},
|
||||||
|
DeleteDhtRecord {
|
||||||
|
#[serde(flatten)]
|
||||||
|
result: ApiResult<()>,
|
||||||
|
},
|
||||||
|
GetDhtValue {
|
||||||
|
#[serde(flatten)]
|
||||||
|
result: ApiResult<Option<ValueData>>,
|
||||||
|
},
|
||||||
|
SetDhtValue {
|
||||||
|
#[serde(flatten)]
|
||||||
|
result: ApiResult<Option<ValueData>>,
|
||||||
|
},
|
||||||
|
WatchDhtValues {
|
||||||
|
#[serde(flatten)]
|
||||||
|
result: ApiResult<Timestamp>,
|
||||||
|
},
|
||||||
|
CancelDhtWatch {
|
||||||
|
#[serde(flatten)]
|
||||||
|
result: ApiResult<bool>,
|
||||||
|
},
|
||||||
|
}
|
129
veilid-core/src/veilid_api/json_api/table_db.rs
Normal file
129
veilid-core/src/veilid_api/json_api/table_db.rs
Normal file
@ -0,0 +1,129 @@
|
|||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
pub struct TableDbRequest {
|
||||||
|
pub db_id: u32,
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub db_op: TableDbRequestOp,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
pub struct TableDbResponse {
|
||||||
|
pub db_id: u32,
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub db_op: TableDbResponseOp,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(tag = "db_op")]
|
||||||
|
pub enum TableDbRequestOp {
|
||||||
|
Release,
|
||||||
|
GetColumnCount,
|
||||||
|
GetKeys {
|
||||||
|
col: u32,
|
||||||
|
},
|
||||||
|
Transact,
|
||||||
|
Store {
|
||||||
|
col: u32,
|
||||||
|
#[serde(with = "json_as_base64")]
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
key: Vec<u8>,
|
||||||
|
#[serde(with = "json_as_base64")]
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
value: Vec<u8>,
|
||||||
|
},
|
||||||
|
Load {
|
||||||
|
col: u32,
|
||||||
|
#[serde(with = "json_as_base64")]
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
key: Vec<u8>,
|
||||||
|
},
|
||||||
|
Delete {
|
||||||
|
col: u32,
|
||||||
|
#[serde(with = "json_as_base64")]
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
key: Vec<u8>,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(tag = "db_op")]
|
||||||
|
pub enum TableDbResponseOp {
|
||||||
|
InvalidId,
|
||||||
|
Release,
|
||||||
|
GetColumnCount {
|
||||||
|
#[serde(flatten)]
|
||||||
|
result: ApiResult<u32>,
|
||||||
|
},
|
||||||
|
GetKeys {
|
||||||
|
#[serde(flatten)]
|
||||||
|
#[schemars(with = "ApiResult<Vec<String>>")]
|
||||||
|
result: ApiResultWithVecVecU8,
|
||||||
|
},
|
||||||
|
Transact {
|
||||||
|
value: u32,
|
||||||
|
},
|
||||||
|
Store {
|
||||||
|
#[serde(flatten)]
|
||||||
|
result: ApiResult<()>,
|
||||||
|
},
|
||||||
|
Load {
|
||||||
|
#[serde(flatten)]
|
||||||
|
#[schemars(with = "ApiResult<Option<String>>")]
|
||||||
|
result: ApiResult<Option<VecU8>>,
|
||||||
|
},
|
||||||
|
Delete {
|
||||||
|
#[serde(flatten)]
|
||||||
|
#[schemars(with = "ApiResult<Option<String>>")]
|
||||||
|
result: ApiResult<Option<VecU8>>,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
pub struct TableDbTransactionRequest {
|
||||||
|
pub tx_id: u32,
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub tx_op: TableDbTransactionRequestOp,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
pub struct TableDbTransactionResponse {
|
||||||
|
pub tx_id: u32,
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub tx_op: TableDbTransactionResponseOp,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(tag = "tx_op")]
|
||||||
|
pub enum TableDbTransactionRequestOp {
|
||||||
|
Commit,
|
||||||
|
Rollback,
|
||||||
|
Store {
|
||||||
|
col: u32,
|
||||||
|
#[serde(with = "json_as_base64")]
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
key: Vec<u8>,
|
||||||
|
#[serde(with = "json_as_base64")]
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
value: Vec<u8>,
|
||||||
|
},
|
||||||
|
Delete {
|
||||||
|
col: u32,
|
||||||
|
#[serde(with = "json_as_base64")]
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
key: Vec<u8>,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||||
|
#[serde(tag = "tx_op")]
|
||||||
|
pub enum TableDbTransactionResponseOp {
|
||||||
|
InvalidId,
|
||||||
|
Commit {
|
||||||
|
#[serde(flatten)]
|
||||||
|
result: ApiResult<()>,
|
||||||
|
},
|
||||||
|
Rollback {},
|
||||||
|
Store {},
|
||||||
|
Delete {},
|
||||||
|
}
|
@ -7,6 +7,7 @@ mod routing_context;
|
|||||||
mod serialize_helpers;
|
mod serialize_helpers;
|
||||||
mod types;
|
mod types;
|
||||||
|
|
||||||
|
pub mod json_api;
|
||||||
pub mod tests;
|
pub mod tests;
|
||||||
|
|
||||||
pub use api::*;
|
pub use api::*;
|
||||||
@ -20,6 +21,7 @@ pub use alloc::string::ToString;
|
|||||||
pub use attachment_manager::AttachmentManager;
|
pub use attachment_manager::AttachmentManager;
|
||||||
pub use core::str::FromStr;
|
pub use core::str::FromStr;
|
||||||
pub use crypto::*;
|
pub use crypto::*;
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
pub use intf::BlockStore;
|
pub use intf::BlockStore;
|
||||||
pub use intf::ProtectedStore;
|
pub use intf::ProtectedStore;
|
||||||
pub use network_manager::NetworkManager;
|
pub use network_manager::NetworkManager;
|
||||||
|
@ -290,10 +290,12 @@ impl RoutingContext {
|
|||||||
///////////////////////////////////
|
///////////////////////////////////
|
||||||
/// Block Store
|
/// Block Store
|
||||||
|
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
pub async fn find_block(&self, _block_id: PublicKey) -> VeilidAPIResult<Vec<u8>> {
|
pub async fn find_block(&self, _block_id: PublicKey) -> VeilidAPIResult<Vec<u8>> {
|
||||||
panic!("unimplemented");
|
panic!("unimplemented");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "unstable-blockstore")]
|
||||||
pub async fn supply_block(&self, _block_id: PublicKey) -> VeilidAPIResult<bool> {
|
pub async fn supply_block(&self, _block_id: PublicKey) -> VeilidAPIResult<bool> {
|
||||||
panic!("unimplemented");
|
panic!("unimplemented");
|
||||||
}
|
}
|
||||||
|
@ -55,6 +55,27 @@ pub mod json_as_base64 {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub mod opt_json_as_base64 {
|
||||||
|
use data_encoding::BASE64URL_NOPAD;
|
||||||
|
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||||
|
|
||||||
|
pub fn serialize<S: Serializer>(v: &Option<Vec<u8>>, s: S) -> Result<S::Ok, S::Error> {
|
||||||
|
let base64 = v.as_ref().map(|x| BASE64URL_NOPAD.encode(&x));
|
||||||
|
Option::<String>::serialize(&base64, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result<Option<Vec<u8>>, D::Error> {
|
||||||
|
let base64 = Option::<String>::deserialize(d)?;
|
||||||
|
base64
|
||||||
|
.map(|x| {
|
||||||
|
BASE64URL_NOPAD
|
||||||
|
.decode(x.as_bytes())
|
||||||
|
.map_err(|e| serde::de::Error::custom(e))
|
||||||
|
})
|
||||||
|
.transpose()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub mod json_as_string {
|
pub mod json_as_string {
|
||||||
use std::fmt::Display;
|
use std::fmt::Display;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
@ -9,11 +9,10 @@ pub fn serialize<T: Integer + Serialize, S: Serializer>(
|
|||||||
v: &RangeSetBlaze<T>,
|
v: &RangeSetBlaze<T>,
|
||||||
s: S,
|
s: S,
|
||||||
) -> Result<S::Ok, S::Error> {
|
) -> Result<S::Ok, S::Error> {
|
||||||
let cnt = v.ranges_len() * 2;
|
let cnt = v.ranges_len();
|
||||||
let mut seq = s.serialize_seq(Some(cnt))?;
|
let mut seq = s.serialize_seq(Some(cnt))?;
|
||||||
for range in v.ranges() {
|
for range in v.ranges() {
|
||||||
seq.serialize_element(range.start())?;
|
seq.serialize_element(&(range.start(), range.end()))?;
|
||||||
seq.serialize_element(range.end())?;
|
|
||||||
}
|
}
|
||||||
seq.end()
|
seq.end()
|
||||||
}
|
}
|
||||||
@ -41,10 +40,7 @@ pub fn deserialize<'de, T: Integer + Deserialize<'de>, D: Deserializer<'de>>(
|
|||||||
{
|
{
|
||||||
let mut values = RangeSetBlaze::<T>::new();
|
let mut values = RangeSetBlaze::<T>::new();
|
||||||
|
|
||||||
while let Some(start) = seq.next_element()? {
|
while let Some((start, end)) = seq.next_element()? {
|
||||||
let Some(end) = seq.next_element()? else {
|
|
||||||
break;
|
|
||||||
};
|
|
||||||
values.ranges_insert(start..=end);
|
values.ranges_insert(start..=end);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -212,10 +212,6 @@ pub fn fix_veilidvaluechange() -> VeilidValueChange {
|
|||||||
key: fix_typedkey(),
|
key: fix_typedkey(),
|
||||||
subkeys: vec![1, 2, 3, 4],
|
subkeys: vec![1, 2, 3, 4],
|
||||||
count: 5,
|
count: 5,
|
||||||
value: ValueData {
|
value: ValueData::new_with_seq(23, b"ValueData".to_vec(), fix_cryptokey()),
|
||||||
seq: 23,
|
|
||||||
data: b"ValueData".to_vec(),
|
|
||||||
writer: fix_cryptokey(),
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -17,10 +17,15 @@ pub async fn test_all() {
|
|||||||
test_transferstatsdownup().await;
|
test_transferstatsdownup().await;
|
||||||
test_rpcstats().await;
|
test_rpcstats().await;
|
||||||
test_peerstats().await;
|
test_peerstats().await;
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
test_tunnelmode().await;
|
test_tunnelmode().await;
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
test_tunnelerror().await;
|
test_tunnelerror().await;
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
test_tunnelendpoint().await;
|
test_tunnelendpoint().await;
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
test_fulltunnel().await;
|
test_fulltunnel().await;
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
test_partialtunnel().await;
|
test_partialtunnel().await;
|
||||||
test_veilidloglevel().await;
|
test_veilidloglevel().await;
|
||||||
test_veilidlog().await;
|
test_veilidlog().await;
|
||||||
|
@ -13,21 +13,18 @@ pub async fn test_alignedu64() {
|
|||||||
// app_messsage_call
|
// app_messsage_call
|
||||||
|
|
||||||
pub async fn test_veilidappmessage() {
|
pub async fn test_veilidappmessage() {
|
||||||
let orig = VeilidAppMessage {
|
let orig = VeilidAppMessage::new(Some(fix_typedkey()), b"Hi there!".to_vec());
|
||||||
sender: Some(fix_typedkey()),
|
|
||||||
message: b"Hi there!".to_vec(),
|
|
||||||
};
|
|
||||||
let copy = deserialize_json(&serialize_json(&orig)).unwrap();
|
let copy = deserialize_json(&serialize_json(&orig)).unwrap();
|
||||||
|
|
||||||
assert_eq!(orig, copy);
|
assert_eq!(orig, copy);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn test_veilidappcall() {
|
pub async fn test_veilidappcall() {
|
||||||
let orig = VeilidAppCall {
|
let orig = VeilidAppCall::new(
|
||||||
sender: Some(fix_typedkey()),
|
Some(fix_typedkey()),
|
||||||
message: b"Well, hello!".to_vec(),
|
b"Well, hello!".to_vec(),
|
||||||
id: AlignedU64::from(123),
|
AlignedU64::from(123),
|
||||||
};
|
);
|
||||||
let copy = deserialize_json(&serialize_json(&orig)).unwrap();
|
let copy = deserialize_json(&serialize_json(&orig)).unwrap();
|
||||||
|
|
||||||
assert_eq!(orig, copy);
|
assert_eq!(orig, copy);
|
||||||
@ -116,12 +113,15 @@ pub async fn test_peerstats() {
|
|||||||
|
|
||||||
// tunnel
|
// tunnel
|
||||||
|
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
pub async fn test_tunnelmode() {
|
pub async fn test_tunnelmode() {
|
||||||
let orig = TunnelMode::Raw;
|
let orig = TunnelMode::Raw;
|
||||||
let copy = deserialize_json(&serialize_json(&orig)).unwrap();
|
let copy = deserialize_json(&serialize_json(&orig)).unwrap();
|
||||||
|
|
||||||
assert_eq!(orig, copy);
|
assert_eq!(orig, copy);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
pub async fn test_tunnelerror() {
|
pub async fn test_tunnelerror() {
|
||||||
let orig = TunnelError::NoCapacity;
|
let orig = TunnelError::NoCapacity;
|
||||||
let copy = deserialize_json(&serialize_json(&orig)).unwrap();
|
let copy = deserialize_json(&serialize_json(&orig)).unwrap();
|
||||||
@ -129,6 +129,7 @@ pub async fn test_tunnelerror() {
|
|||||||
assert_eq!(orig, copy);
|
assert_eq!(orig, copy);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
pub async fn test_tunnelendpoint() {
|
pub async fn test_tunnelendpoint() {
|
||||||
let orig = TunnelEndpoint {
|
let orig = TunnelEndpoint {
|
||||||
mode: TunnelMode::Raw,
|
mode: TunnelMode::Raw,
|
||||||
@ -139,6 +140,7 @@ pub async fn test_tunnelendpoint() {
|
|||||||
assert_eq!(orig, copy);
|
assert_eq!(orig, copy);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
pub async fn test_fulltunnel() {
|
pub async fn test_fulltunnel() {
|
||||||
let orig = FullTunnel {
|
let orig = FullTunnel {
|
||||||
id: AlignedU64::from(42),
|
id: AlignedU64::from(42),
|
||||||
@ -157,6 +159,7 @@ pub async fn test_fulltunnel() {
|
|||||||
assert_eq!(orig, copy);
|
assert_eq!(orig, copy);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
pub async fn test_partialtunnel() {
|
pub async fn test_partialtunnel() {
|
||||||
let orig = PartialTunnel {
|
let orig = PartialTunnel {
|
||||||
id: AlignedU64::from(42),
|
id: AlignedU64::from(42),
|
||||||
|
@ -5,12 +5,12 @@ use range_set_blaze::*;
|
|||||||
// dht_record_descriptors
|
// dht_record_descriptors
|
||||||
|
|
||||||
pub async fn test_dhtrecorddescriptor() {
|
pub async fn test_dhtrecorddescriptor() {
|
||||||
let orig = DHTRecordDescriptor {
|
let orig = DHTRecordDescriptor::new(
|
||||||
key: fix_typedkey(),
|
fix_typedkey(),
|
||||||
owner: fix_cryptokey(),
|
fix_cryptokey(),
|
||||||
owner_secret: Some(fix_cryptokey()),
|
Some(fix_cryptokey()),
|
||||||
schema: DHTSchema::DFLT(DHTSchemaDFLT { o_cnt: 4321 }),
|
DHTSchema::DFLT(DHTSchemaDFLT { o_cnt: 4321 }),
|
||||||
};
|
);
|
||||||
let copy = deserialize_json(&serialize_json(&orig)).unwrap();
|
let copy = deserialize_json(&serialize_json(&orig)).unwrap();
|
||||||
|
|
||||||
assert_eq!(orig, copy);
|
assert_eq!(orig, copy);
|
||||||
@ -19,11 +19,7 @@ pub async fn test_dhtrecorddescriptor() {
|
|||||||
// value_data
|
// value_data
|
||||||
|
|
||||||
pub async fn test_valuedata() {
|
pub async fn test_valuedata() {
|
||||||
let orig = ValueData {
|
let orig = ValueData::new_with_seq(42, b"Brent Spiner".to_vec(), fix_cryptokey());
|
||||||
seq: 42,
|
|
||||||
data: b"Brent Spiner".to_vec(),
|
|
||||||
writer: fix_cryptokey(),
|
|
||||||
};
|
|
||||||
let copy = deserialize_json(&serialize_json(&orig)).unwrap();
|
let copy = deserialize_json(&serialize_json(&orig)).unwrap();
|
||||||
|
|
||||||
assert_eq!(orig, copy);
|
assert_eq!(orig, copy);
|
||||||
@ -32,9 +28,7 @@ pub async fn test_valuedata() {
|
|||||||
// value_subkey_range_set
|
// value_subkey_range_set
|
||||||
|
|
||||||
pub async fn test_valuesubkeyrangeset() {
|
pub async fn test_valuesubkeyrangeset() {
|
||||||
let orig = ValueSubkeyRangeSet {
|
let orig = ValueSubkeyRangeSet::new_with_data(RangeSetBlaze::from_iter([20..=30]));
|
||||||
data: RangeSetBlaze::from_iter([20..=30]),
|
|
||||||
};
|
|
||||||
let copy = deserialize_json(&serialize_json(&orig)).unwrap();
|
let copy = deserialize_json(&serialize_json(&orig)).unwrap();
|
||||||
|
|
||||||
assert_eq!(orig, copy);
|
assert_eq!(orig, copy);
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
use super::fixtures::*;
|
use super::fixtures::*;
|
||||||
use crate::*;
|
use crate::*;
|
||||||
use range_set_blaze::*;
|
|
||||||
|
|
||||||
// dlft
|
// dlft
|
||||||
|
|
||||||
|
@ -18,10 +18,16 @@ use super::*;
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
#[repr(C, align(8))]
|
#[repr(C, align(8))]
|
||||||
#[archive_attr(repr(C, align(8)), derive(CheckBytes))]
|
#[archive_attr(repr(C, align(8)), derive(CheckBytes))]
|
||||||
pub struct AlignedU64(u64);
|
#[serde(transparent)]
|
||||||
|
pub struct AlignedU64(
|
||||||
|
#[serde(with = "json_as_string")]
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
u64,
|
||||||
|
);
|
||||||
|
|
||||||
impl From<u64> for AlignedU64 {
|
impl From<u64> for AlignedU64 {
|
||||||
fn from(v: u64) -> Self {
|
fn from(v: u64) -> Self {
|
||||||
|
@ -2,15 +2,27 @@ use super::*;
|
|||||||
|
|
||||||
/// Direct statement blob passed to hosting application for processing
|
/// Direct statement blob passed to hosting application for processing
|
||||||
#[derive(
|
#[derive(
|
||||||
Debug, Clone, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize,
|
Debug,
|
||||||
|
Clone,
|
||||||
|
PartialEq,
|
||||||
|
Eq,
|
||||||
|
Serialize,
|
||||||
|
Deserialize,
|
||||||
|
RkyvArchive,
|
||||||
|
RkyvSerialize,
|
||||||
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct VeilidAppMessage {
|
pub struct VeilidAppMessage {
|
||||||
/// Some(sender) if the message was sent directly, None if received via a private/safety route
|
/// Some(sender) if the message was sent directly, None if received via a private/safety route
|
||||||
#[serde(with = "opt_json_as_string")]
|
#[serde(with = "opt_json_as_string")]
|
||||||
|
#[schemars(with = "Option<String>")]
|
||||||
pub sender: Option<TypedKey>,
|
pub sender: Option<TypedKey>,
|
||||||
|
|
||||||
/// The content of the message to deliver to the application
|
/// The content of the message to deliver to the application
|
||||||
#[serde(with = "json_as_base64")]
|
#[serde(with = "json_as_base64")]
|
||||||
|
#[schemars(with = "String")]
|
||||||
pub message: Vec<u8>,
|
pub message: Vec<u8>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -29,27 +41,41 @@ impl VeilidAppMessage {
|
|||||||
|
|
||||||
/// Direct question blob passed to hosting application for processing to send an eventual AppReply
|
/// Direct question blob passed to hosting application for processing to send an eventual AppReply
|
||||||
#[derive(
|
#[derive(
|
||||||
Debug, Clone, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize,
|
Debug,
|
||||||
|
Clone,
|
||||||
|
PartialEq,
|
||||||
|
Eq,
|
||||||
|
Serialize,
|
||||||
|
Deserialize,
|
||||||
|
RkyvArchive,
|
||||||
|
RkyvSerialize,
|
||||||
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct VeilidAppCall {
|
pub struct VeilidAppCall {
|
||||||
/// Some(sender) if the request was sent directly, None if received via a private/safety route
|
/// Some(sender) if the request was sent directly, None if received via a private/safety route
|
||||||
#[serde(with = "opt_json_as_string")]
|
#[serde(with = "opt_json_as_string")]
|
||||||
pub sender: Option<TypedKey>,
|
#[schemars(with = "Option<String>")]
|
||||||
|
sender: Option<TypedKey>,
|
||||||
|
|
||||||
/// The content of the request to deliver to the application
|
/// The content of the request to deliver to the application
|
||||||
#[serde(with = "json_as_base64")]
|
#[serde(with = "json_as_base64")]
|
||||||
pub message: Vec<u8>,
|
#[schemars(with = "String")]
|
||||||
|
message: Vec<u8>,
|
||||||
|
|
||||||
/// The id to reply to
|
/// The id to reply to
|
||||||
#[serde(with = "json_as_string")]
|
#[serde(with = "json_as_string")]
|
||||||
pub id: OperationId,
|
#[schemars(with = "String")]
|
||||||
|
call_id: OperationId,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl VeilidAppCall {
|
impl VeilidAppCall {
|
||||||
pub fn new(sender: Option<TypedKey>, message: Vec<u8>, id: OperationId) -> Self {
|
pub fn new(sender: Option<TypedKey>, message: Vec<u8>, call_id: OperationId) -> Self {
|
||||||
Self {
|
Self {
|
||||||
sender,
|
sender,
|
||||||
message,
|
message,
|
||||||
id,
|
call_id,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -60,6 +86,6 @@ impl VeilidAppCall {
|
|||||||
&self.message
|
&self.message
|
||||||
}
|
}
|
||||||
pub fn id(&self) -> OperationId {
|
pub fn id(&self) -> OperationId {
|
||||||
self.id
|
self.call_id
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -13,18 +13,22 @@ use super::*;
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct DHTRecordDescriptor {
|
pub struct DHTRecordDescriptor {
|
||||||
/// DHT Key = Hash(ownerKeyKind) of: [ ownerKeyValue, schema ]
|
/// DHT Key = Hash(ownerKeyKind) of: [ ownerKeyValue, schema ]
|
||||||
pub key: TypedKey,
|
#[schemars(with = "String")]
|
||||||
|
key: TypedKey,
|
||||||
/// The public key of the owner
|
/// The public key of the owner
|
||||||
pub owner: PublicKey,
|
#[schemars(with = "String")]
|
||||||
|
owner: PublicKey,
|
||||||
/// If this key is being created: Some(the secret key of the owner)
|
/// If this key is being created: Some(the secret key of the owner)
|
||||||
/// If this key is just being opened: None
|
/// If this key is just being opened: None
|
||||||
pub owner_secret: Option<SecretKey>,
|
#[schemars(with = "Option<String>")]
|
||||||
|
owner_secret: Option<SecretKey>,
|
||||||
/// The schema in use associated with the key
|
/// The schema in use associated with the key
|
||||||
pub schema: DHTSchema,
|
schema: DHTSchema,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DHTRecordDescriptor {
|
impl DHTRecordDescriptor {
|
||||||
|
@ -13,6 +13,7 @@ use super::*;
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct DHTSchemaDFLT {
|
pub struct DHTSchemaDFLT {
|
||||||
|
@ -19,6 +19,7 @@ pub use smpl::*;
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
#[archive_attr(repr(u8), derive(CheckBytes))]
|
#[archive_attr(repr(u8), derive(CheckBytes))]
|
||||||
#[serde(tag = "kind")]
|
#[serde(tag = "kind")]
|
||||||
|
@ -13,10 +13,12 @@ use super::*;
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct DHTSchemaSMPLMember {
|
pub struct DHTSchemaSMPLMember {
|
||||||
/// Member key
|
/// Member key
|
||||||
|
#[schemars(with = "String")]
|
||||||
pub m_key: PublicKey,
|
pub m_key: PublicKey,
|
||||||
/// Member subkey count
|
/// Member subkey count
|
||||||
pub m_cnt: u16,
|
pub m_cnt: u16,
|
||||||
@ -35,6 +37,7 @@ pub struct DHTSchemaSMPLMember {
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct DHTSchemaSMPL {
|
pub struct DHTSchemaSMPL {
|
||||||
|
@ -13,12 +13,21 @@ use super::*;
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct ValueData {
|
pub struct ValueData {
|
||||||
pub seq: ValueSeqNum,
|
/// An increasing sequence number to time-order the DHT record changes
|
||||||
pub data: Vec<u8>,
|
seq: ValueSeqNum,
|
||||||
pub writer: PublicKey,
|
|
||||||
|
/// The contents of a DHT Record
|
||||||
|
#[serde(with = "json_as_base64")]
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
data: Vec<u8>,
|
||||||
|
|
||||||
|
/// The public identity key of the writer of the data
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
writer: PublicKey,
|
||||||
}
|
}
|
||||||
impl ValueData {
|
impl ValueData {
|
||||||
pub const MAX_LEN: usize = 32768;
|
pub const MAX_LEN: usize = 32768;
|
||||||
|
@ -15,12 +15,15 @@ use range_set_blaze::*;
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
|
#[serde(transparent)]
|
||||||
pub struct ValueSubkeyRangeSet {
|
pub struct ValueSubkeyRangeSet {
|
||||||
#[with(RkyvRangeSetBlaze)]
|
#[with(RkyvRangeSetBlaze)]
|
||||||
#[serde(with = "serialize_range_set_blaze")]
|
#[serde(with = "serialize_range_set_blaze")]
|
||||||
pub data: RangeSetBlaze<ValueSubkey>,
|
#[schemars(with = "Vec<(u32,u32)>")]
|
||||||
|
data: RangeSetBlaze<ValueSubkey>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ValueSubkeyRangeSet {
|
impl ValueSubkeyRangeSet {
|
||||||
@ -29,6 +32,9 @@ impl ValueSubkeyRangeSet {
|
|||||||
data: Default::default(),
|
data: Default::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
pub fn new_with_data(data: RangeSetBlaze<ValueSubkey>) -> Self {
|
||||||
|
Self { data }
|
||||||
|
}
|
||||||
pub fn single(value: ValueSubkey) -> Self {
|
pub fn single(value: ValueSubkey) -> Self {
|
||||||
let mut data = RangeSetBlaze::new();
|
let mut data = RangeSetBlaze::new();
|
||||||
data.insert(value);
|
data.insert(value);
|
||||||
|
@ -15,8 +15,11 @@ use super::*;
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes, PartialOrd, Ord, PartialEq, Eq, Hash))]
|
#[archive_attr(repr(C), derive(CheckBytes, PartialOrd, Ord, PartialEq, Eq, Hash))]
|
||||||
|
#[serde(try_from = "String")]
|
||||||
|
#[serde(into = "String")]
|
||||||
pub struct FourCC(pub [u8; 4]);
|
pub struct FourCC(pub [u8; 4]);
|
||||||
|
|
||||||
impl From<[u8; 4]> for FourCC {
|
impl From<[u8; 4]> for FourCC {
|
||||||
@ -37,6 +40,12 @@ impl From<FourCC> for u32 {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<FourCC> for String {
|
||||||
|
fn from(u: FourCC) -> Self {
|
||||||
|
String::from_utf8_lossy(&u.0).to_string()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl TryFrom<&[u8]> for FourCC {
|
impl TryFrom<&[u8]> for FourCC {
|
||||||
type Error = VeilidAPIError;
|
type Error = VeilidAPIError;
|
||||||
fn try_from(b: &[u8]) -> Result<Self, Self::Error> {
|
fn try_from(b: &[u8]) -> Result<Self, Self::Error> {
|
||||||
@ -44,6 +53,13 @@ impl TryFrom<&[u8]> for FourCC {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl TryFrom<String> for FourCC {
|
||||||
|
type Error = VeilidAPIError;
|
||||||
|
fn try_from(s: String) -> Result<Self, Self::Error> {
|
||||||
|
Self::from_str(s.as_str())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl fmt::Display for FourCC {
|
impl fmt::Display for FourCC {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||||
write!(f, "{}", String::from_utf8_lossy(&self.0))
|
write!(f, "{}", String::from_utf8_lossy(&self.0))
|
||||||
|
@ -4,6 +4,7 @@ mod dht;
|
|||||||
mod fourcc;
|
mod fourcc;
|
||||||
mod safety;
|
mod safety;
|
||||||
mod stats;
|
mod stats;
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
mod tunnel;
|
mod tunnel;
|
||||||
mod veilid_log;
|
mod veilid_log;
|
||||||
mod veilid_state;
|
mod veilid_state;
|
||||||
@ -16,6 +17,7 @@ pub use dht::*;
|
|||||||
pub use fourcc::*;
|
pub use fourcc::*;
|
||||||
pub use safety::*;
|
pub use safety::*;
|
||||||
pub use stats::*;
|
pub use stats::*;
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
pub use tunnel::*;
|
pub use tunnel::*;
|
||||||
pub use veilid_log::*;
|
pub use veilid_log::*;
|
||||||
pub use veilid_state::*;
|
pub use veilid_state::*;
|
||||||
|
@ -15,6 +15,7 @@ use super::*;
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
#[archive_attr(repr(u8), derive(CheckBytes))]
|
#[archive_attr(repr(u8), derive(CheckBytes))]
|
||||||
pub enum Sequencing {
|
pub enum Sequencing {
|
||||||
@ -44,6 +45,7 @@ impl Default for Sequencing {
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
#[archive_attr(repr(u8), derive(CheckBytes))]
|
#[archive_attr(repr(u8), derive(CheckBytes))]
|
||||||
pub enum Stability {
|
pub enum Stability {
|
||||||
@ -72,6 +74,7 @@ impl Default for Stability {
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
#[archive_attr(repr(u8), derive(CheckBytes))]
|
#[archive_attr(repr(u8), derive(CheckBytes))]
|
||||||
pub enum SafetySelection {
|
pub enum SafetySelection {
|
||||||
@ -111,10 +114,12 @@ impl Default for SafetySelection {
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct SafetySpec {
|
pub struct SafetySpec {
|
||||||
/// preferred safety route set id if it still exists
|
/// preferred safety route set id if it still exists
|
||||||
|
#[schemars(with = "Option<String>")]
|
||||||
pub preferred_route: Option<RouteId>,
|
pub preferred_route: Option<RouteId>,
|
||||||
/// must be greater than 0
|
/// must be greater than 0
|
||||||
pub hop_count: usize,
|
pub hop_count: usize,
|
||||||
|
@ -11,14 +11,12 @@ use super::*;
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct LatencyStats {
|
pub struct LatencyStats {
|
||||||
#[serde(with = "json_as_string")]
|
|
||||||
pub fastest: TimestampDuration, // fastest latency in the ROLLING_LATENCIES_SIZE last latencies
|
pub fastest: TimestampDuration, // fastest latency in the ROLLING_LATENCIES_SIZE last latencies
|
||||||
#[serde(with = "json_as_string")]
|
|
||||||
pub average: TimestampDuration, // average latency over the ROLLING_LATENCIES_SIZE last latencies
|
pub average: TimestampDuration, // average latency over the ROLLING_LATENCIES_SIZE last latencies
|
||||||
#[serde(with = "json_as_string")]
|
|
||||||
pub slowest: TimestampDuration, // slowest latency in the ROLLING_LATENCIES_SIZE last latencies
|
pub slowest: TimestampDuration, // slowest latency in the ROLLING_LATENCIES_SIZE last latencies
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -33,16 +31,13 @@ pub struct LatencyStats {
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct TransferStats {
|
pub struct TransferStats {
|
||||||
#[serde(with = "json_as_string")]
|
|
||||||
pub total: ByteCount, // total amount transferred ever
|
pub total: ByteCount, // total amount transferred ever
|
||||||
#[serde(with = "json_as_string")]
|
|
||||||
pub maximum: ByteCount, // maximum rate over the ROLLING_TRANSFERS_SIZE last amounts
|
pub maximum: ByteCount, // maximum rate over the ROLLING_TRANSFERS_SIZE last amounts
|
||||||
#[serde(with = "json_as_string")]
|
|
||||||
pub average: ByteCount, // average rate over the ROLLING_TRANSFERS_SIZE last amounts
|
pub average: ByteCount, // average rate over the ROLLING_TRANSFERS_SIZE last amounts
|
||||||
#[serde(with = "json_as_string")]
|
|
||||||
pub minimum: ByteCount, // minimum rate over the ROLLING_TRANSFERS_SIZE last amounts
|
pub minimum: ByteCount, // minimum rate over the ROLLING_TRANSFERS_SIZE last amounts
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -57,6 +52,7 @@ pub struct TransferStats {
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct TransferStatsDownUp {
|
pub struct TransferStatsDownUp {
|
||||||
@ -75,17 +71,15 @@ pub struct TransferStatsDownUp {
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct RPCStats {
|
pub struct RPCStats {
|
||||||
pub messages_sent: u32, // number of rpcs that have been sent in the total_time range
|
pub messages_sent: u32, // number of rpcs that have been sent in the total_time range
|
||||||
pub messages_rcvd: u32, // number of rpcs that have been received in the total_time range
|
pub messages_rcvd: u32, // number of rpcs that have been received in the total_time range
|
||||||
pub questions_in_flight: u32, // number of questions issued that have yet to be answered
|
pub questions_in_flight: u32, // number of questions issued that have yet to be answered
|
||||||
#[serde(with = "opt_json_as_string")]
|
|
||||||
pub last_question_ts: Option<Timestamp>, // when the peer was last questioned (either successfully or not) and we wanted an answer
|
pub last_question_ts: Option<Timestamp>, // when the peer was last questioned (either successfully or not) and we wanted an answer
|
||||||
#[serde(with = "opt_json_as_string")]
|
|
||||||
pub last_seen_ts: Option<Timestamp>, // when the peer was last seen for any reason, including when we first attempted to reach out to it
|
pub last_seen_ts: Option<Timestamp>, // when the peer was last seen for any reason, including when we first attempted to reach out to it
|
||||||
#[serde(with = "opt_json_as_string")]
|
|
||||||
pub first_consecutive_seen_ts: Option<Timestamp>, // the timestamp of the first consecutive proof-of-life for this node (an answer or received question)
|
pub first_consecutive_seen_ts: Option<Timestamp>, // the timestamp of the first consecutive proof-of-life for this node (an answer or received question)
|
||||||
pub recent_lost_answers: u32, // number of answers that have been lost since we lost reliability
|
pub recent_lost_answers: u32, // number of answers that have been lost since we lost reliability
|
||||||
pub failed_to_send: u32, // number of messages that have failed to send since we last successfully sent one
|
pub failed_to_send: u32, // number of messages that have failed to send since we last successfully sent one
|
||||||
@ -102,10 +96,10 @@ pub struct RPCStats {
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct PeerStats {
|
pub struct PeerStats {
|
||||||
#[serde(with = "json_as_string")]
|
|
||||||
pub time_added: Timestamp, // when the peer was added to the routing table
|
pub time_added: Timestamp, // when the peer was added to the routing table
|
||||||
pub rpc_stats: RPCStats, // information about RPCs
|
pub rpc_stats: RPCStats, // information about RPCs
|
||||||
pub latency: Option<LatencyStats>, // latencies for communications with the peer
|
pub latency: Option<LatencyStats>, // latencies for communications with the peer
|
||||||
|
@ -1,8 +1,11 @@
|
|||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
/// Tunnel identifier
|
/// Tunnel identifier
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
pub type TunnelId = AlignedU64;
|
pub type TunnelId = AlignedU64;
|
||||||
|
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
#[derive(
|
#[derive(
|
||||||
Copy,
|
Copy,
|
||||||
Clone,
|
Clone,
|
||||||
@ -16,6 +19,7 @@ pub type TunnelId = AlignedU64;
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
#[archive_attr(repr(u8), derive(CheckBytes))]
|
#[archive_attr(repr(u8), derive(CheckBytes))]
|
||||||
pub enum TunnelMode {
|
pub enum TunnelMode {
|
||||||
@ -23,6 +27,7 @@ pub enum TunnelMode {
|
|||||||
Turn,
|
Turn,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
#[derive(
|
#[derive(
|
||||||
Copy,
|
Copy,
|
||||||
Clone,
|
Clone,
|
||||||
@ -36,6 +41,7 @@ pub enum TunnelMode {
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
#[archive_attr(repr(u8), derive(CheckBytes))]
|
#[archive_attr(repr(u8), derive(CheckBytes))]
|
||||||
pub enum TunnelError {
|
pub enum TunnelError {
|
||||||
@ -45,8 +51,18 @@ pub enum TunnelError {
|
|||||||
NoCapacity, // Endpoint is full
|
NoCapacity, // Endpoint is full
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
#[derive(
|
#[derive(
|
||||||
Clone, Debug, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize,
|
Clone,
|
||||||
|
Debug,
|
||||||
|
PartialEq,
|
||||||
|
Eq,
|
||||||
|
Serialize,
|
||||||
|
Deserialize,
|
||||||
|
RkyvArchive,
|
||||||
|
RkyvSerialize,
|
||||||
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct TunnelEndpoint {
|
pub struct TunnelEndpoint {
|
||||||
@ -54,6 +70,7 @@ pub struct TunnelEndpoint {
|
|||||||
pub description: String, // XXX: TODO
|
pub description: String, // XXX: TODO
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
impl Default for TunnelEndpoint {
|
impl Default for TunnelEndpoint {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self {
|
Self {
|
||||||
@ -63,6 +80,7 @@ impl Default for TunnelEndpoint {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
#[derive(
|
#[derive(
|
||||||
Clone,
|
Clone,
|
||||||
Debug,
|
Debug,
|
||||||
@ -74,6 +92,7 @@ impl Default for TunnelEndpoint {
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct FullTunnel {
|
pub struct FullTunnel {
|
||||||
@ -83,6 +102,7 @@ pub struct FullTunnel {
|
|||||||
pub remote: TunnelEndpoint,
|
pub remote: TunnelEndpoint,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "unstable-tunnels")]
|
||||||
#[derive(
|
#[derive(
|
||||||
Clone,
|
Clone,
|
||||||
Debug,
|
Debug,
|
||||||
@ -94,6 +114,7 @@ pub struct FullTunnel {
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct PartialTunnel {
|
pub struct PartialTunnel {
|
||||||
|
@ -14,6 +14,7 @@ use super::*;
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
#[archive_attr(repr(u8), derive(CheckBytes))]
|
#[archive_attr(repr(u8), derive(CheckBytes))]
|
||||||
pub enum VeilidLogLevel {
|
pub enum VeilidLogLevel {
|
||||||
@ -63,22 +64,45 @@ impl VeilidLogLevel {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl FromStr for VeilidLogLevel {
|
||||||
|
type Err = VeilidAPIError;
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
Ok(match s {
|
||||||
|
"Error" => Self::Error,
|
||||||
|
"Warn" => Self::Warn,
|
||||||
|
"Info" => Self::Info,
|
||||||
|
"Debug" => Self::Debug,
|
||||||
|
"Trace" => Self::Trace,
|
||||||
|
_ => {
|
||||||
|
apibail_invalid_argument!("Can't convert str", "s", s);
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
impl fmt::Display for VeilidLogLevel {
|
impl fmt::Display for VeilidLogLevel {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||||
let text = match self {
|
let text = match self {
|
||||||
Self::Error => "ERROR",
|
Self::Error => "Error",
|
||||||
Self::Warn => "WARN",
|
Self::Warn => "Warn",
|
||||||
Self::Info => "INFO",
|
Self::Info => "Info",
|
||||||
Self::Debug => "DEBUG",
|
Self::Debug => "Debug",
|
||||||
Self::Trace => "TRACE",
|
Self::Trace => "Trace",
|
||||||
};
|
};
|
||||||
write!(f, "{}", text)
|
write!(f, "{}", text)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A VeilidCore log message with optional backtrace
|
/// A VeilidCore log message with optional backtrace
|
||||||
#[derive(
|
#[derive(
|
||||||
Debug, Clone, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize,
|
Debug,
|
||||||
|
Clone,
|
||||||
|
PartialEq,
|
||||||
|
Eq,
|
||||||
|
Serialize,
|
||||||
|
Deserialize,
|
||||||
|
RkyvArchive,
|
||||||
|
RkyvSerialize,
|
||||||
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct VeilidLog {
|
pub struct VeilidLog {
|
||||||
|
@ -12,6 +12,7 @@ use super::*;
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
#[archive_attr(repr(u8), derive(CheckBytes))]
|
#[archive_attr(repr(u8), derive(CheckBytes))]
|
||||||
pub enum AttachmentState {
|
pub enum AttachmentState {
|
||||||
@ -60,7 +61,16 @@ impl TryFrom<String> for AttachmentState {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(
|
#[derive(
|
||||||
Debug, Clone, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize,
|
Debug,
|
||||||
|
Clone,
|
||||||
|
PartialEq,
|
||||||
|
Eq,
|
||||||
|
Serialize,
|
||||||
|
Deserialize,
|
||||||
|
RkyvArchive,
|
||||||
|
RkyvSerialize,
|
||||||
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct VeilidStateAttachment {
|
pub struct VeilidStateAttachment {
|
||||||
@ -70,39 +80,76 @@ pub struct VeilidStateAttachment {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(
|
#[derive(
|
||||||
Debug, Clone, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize,
|
Debug,
|
||||||
|
Clone,
|
||||||
|
PartialEq,
|
||||||
|
Eq,
|
||||||
|
Serialize,
|
||||||
|
Deserialize,
|
||||||
|
RkyvArchive,
|
||||||
|
RkyvSerialize,
|
||||||
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct PeerTableData {
|
pub struct PeerTableData {
|
||||||
|
#[schemars(with = "Vec<String>")]
|
||||||
pub node_ids: Vec<TypedKey>,
|
pub node_ids: Vec<TypedKey>,
|
||||||
pub peer_address: String,
|
pub peer_address: String,
|
||||||
pub peer_stats: PeerStats,
|
pub peer_stats: PeerStats,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(
|
#[derive(
|
||||||
Debug, Clone, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize,
|
Debug,
|
||||||
|
Clone,
|
||||||
|
PartialEq,
|
||||||
|
Eq,
|
||||||
|
Serialize,
|
||||||
|
Deserialize,
|
||||||
|
RkyvArchive,
|
||||||
|
RkyvSerialize,
|
||||||
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct VeilidStateNetwork {
|
pub struct VeilidStateNetwork {
|
||||||
pub started: bool,
|
pub started: bool,
|
||||||
#[serde(with = "json_as_string")]
|
|
||||||
pub bps_down: ByteCount,
|
pub bps_down: ByteCount,
|
||||||
#[serde(with = "json_as_string")]
|
|
||||||
pub bps_up: ByteCount,
|
pub bps_up: ByteCount,
|
||||||
pub peers: Vec<PeerTableData>,
|
pub peers: Vec<PeerTableData>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(
|
#[derive(
|
||||||
Debug, Clone, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize,
|
Debug,
|
||||||
|
Clone,
|
||||||
|
PartialEq,
|
||||||
|
Eq,
|
||||||
|
Serialize,
|
||||||
|
Deserialize,
|
||||||
|
RkyvArchive,
|
||||||
|
RkyvSerialize,
|
||||||
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct VeilidRouteChange {
|
pub struct VeilidRouteChange {
|
||||||
|
#[schemars(with = "Vec<String>")]
|
||||||
pub dead_routes: Vec<RouteId>,
|
pub dead_routes: Vec<RouteId>,
|
||||||
|
#[schemars(with = "Vec<String>")]
|
||||||
pub dead_remote_routes: Vec<RouteId>,
|
pub dead_remote_routes: Vec<RouteId>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(
|
#[derive(
|
||||||
Debug, Clone, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize,
|
Debug,
|
||||||
|
Clone,
|
||||||
|
PartialEq,
|
||||||
|
Eq,
|
||||||
|
Serialize,
|
||||||
|
Deserialize,
|
||||||
|
RkyvArchive,
|
||||||
|
RkyvSerialize,
|
||||||
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct VeilidStateConfig {
|
pub struct VeilidStateConfig {
|
||||||
@ -110,10 +157,20 @@ pub struct VeilidStateConfig {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(
|
#[derive(
|
||||||
Debug, Clone, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize,
|
Debug,
|
||||||
|
Clone,
|
||||||
|
PartialEq,
|
||||||
|
Eq,
|
||||||
|
Serialize,
|
||||||
|
Deserialize,
|
||||||
|
RkyvArchive,
|
||||||
|
RkyvSerialize,
|
||||||
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct VeilidValueChange {
|
pub struct VeilidValueChange {
|
||||||
|
#[schemars(with = "String")]
|
||||||
pub key: TypedKey,
|
pub key: TypedKey,
|
||||||
pub subkeys: Vec<ValueSubkey>,
|
pub subkeys: Vec<ValueSubkey>,
|
||||||
pub count: u32,
|
pub count: u32,
|
||||||
@ -121,7 +178,16 @@ pub struct VeilidValueChange {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(
|
#[derive(
|
||||||
Debug, Clone, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize,
|
Debug,
|
||||||
|
Clone,
|
||||||
|
PartialEq,
|
||||||
|
Eq,
|
||||||
|
Serialize,
|
||||||
|
Deserialize,
|
||||||
|
RkyvArchive,
|
||||||
|
RkyvSerialize,
|
||||||
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
#[archive_attr(repr(u8), derive(CheckBytes))]
|
#[archive_attr(repr(u8), derive(CheckBytes))]
|
||||||
#[serde(tag = "kind")]
|
#[serde(tag = "kind")]
|
||||||
@ -138,7 +204,16 @@ pub enum VeilidUpdate {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(
|
#[derive(
|
||||||
Debug, Clone, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize,
|
Debug,
|
||||||
|
Clone,
|
||||||
|
PartialEq,
|
||||||
|
Eq,
|
||||||
|
Serialize,
|
||||||
|
Deserialize,
|
||||||
|
RkyvArchive,
|
||||||
|
RkyvSerialize,
|
||||||
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct VeilidState {
|
pub struct VeilidState {
|
||||||
|
@ -25,6 +25,7 @@ pub type ConfigCallback = Arc<dyn Fn(String) -> ConfigCallbackReturn + Send + Sy
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
pub struct VeilidConfigHTTPS {
|
pub struct VeilidConfigHTTPS {
|
||||||
pub enabled: bool,
|
pub enabled: bool,
|
||||||
@ -54,6 +55,7 @@ pub struct VeilidConfigHTTPS {
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
pub struct VeilidConfigHTTP {
|
pub struct VeilidConfigHTTP {
|
||||||
pub enabled: bool,
|
pub enabled: bool,
|
||||||
@ -79,6 +81,7 @@ pub struct VeilidConfigHTTP {
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
pub struct VeilidConfigApplication {
|
pub struct VeilidConfigApplication {
|
||||||
pub https: VeilidConfigHTTPS,
|
pub https: VeilidConfigHTTPS,
|
||||||
@ -106,6 +109,7 @@ pub struct VeilidConfigApplication {
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
pub struct VeilidConfigUDP {
|
pub struct VeilidConfigUDP {
|
||||||
pub enabled: bool,
|
pub enabled: bool,
|
||||||
@ -135,6 +139,7 @@ pub struct VeilidConfigUDP {
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
pub struct VeilidConfigTCP {
|
pub struct VeilidConfigTCP {
|
||||||
pub connect: bool,
|
pub connect: bool,
|
||||||
@ -166,6 +171,7 @@ pub struct VeilidConfigTCP {
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
pub struct VeilidConfigWS {
|
pub struct VeilidConfigWS {
|
||||||
pub connect: bool,
|
pub connect: bool,
|
||||||
@ -198,6 +204,7 @@ pub struct VeilidConfigWS {
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
pub struct VeilidConfigWSS {
|
pub struct VeilidConfigWSS {
|
||||||
pub connect: bool,
|
pub connect: bool,
|
||||||
@ -226,6 +233,7 @@ pub struct VeilidConfigWSS {
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
pub struct VeilidConfigProtocol {
|
pub struct VeilidConfigProtocol {
|
||||||
pub udp: VeilidConfigUDP,
|
pub udp: VeilidConfigUDP,
|
||||||
@ -253,6 +261,7 @@ pub struct VeilidConfigProtocol {
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
pub struct VeilidConfigTLS {
|
pub struct VeilidConfigTLS {
|
||||||
pub certificate_path: String,
|
pub certificate_path: String,
|
||||||
@ -273,6 +282,7 @@ pub struct VeilidConfigTLS {
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
pub struct VeilidConfigDHT {
|
pub struct VeilidConfigDHT {
|
||||||
pub max_find_node_count: u32,
|
pub max_find_node_count: u32,
|
||||||
@ -309,6 +319,7 @@ pub struct VeilidConfigDHT {
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
pub struct VeilidConfigRPC {
|
pub struct VeilidConfigRPC {
|
||||||
pub concurrency: u32,
|
pub concurrency: u32,
|
||||||
@ -333,9 +344,12 @@ pub struct VeilidConfigRPC {
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
pub struct VeilidConfigRoutingTable {
|
pub struct VeilidConfigRoutingTable {
|
||||||
|
#[schemars(with = "Vec<String>")]
|
||||||
pub node_id: TypedKeySet,
|
pub node_id: TypedKeySet,
|
||||||
|
#[schemars(with = "Vec<String>")]
|
||||||
pub node_id_secret: TypedSecretSet,
|
pub node_id_secret: TypedSecretSet,
|
||||||
pub bootstrap: Vec<String>,
|
pub bootstrap: Vec<String>,
|
||||||
pub limit_over_attached: u32,
|
pub limit_over_attached: u32,
|
||||||
@ -358,6 +372,7 @@ pub struct VeilidConfigRoutingTable {
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
pub struct VeilidConfigNetwork {
|
pub struct VeilidConfigNetwork {
|
||||||
pub connection_initial_timeout_ms: u32,
|
pub connection_initial_timeout_ms: u32,
|
||||||
@ -391,6 +406,7 @@ pub struct VeilidConfigNetwork {
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
pub struct VeilidConfigTableStore {
|
pub struct VeilidConfigTableStore {
|
||||||
pub directory: String,
|
pub directory: String,
|
||||||
@ -408,6 +424,7 @@ pub struct VeilidConfigTableStore {
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
pub struct VeilidConfigBlockStore {
|
pub struct VeilidConfigBlockStore {
|
||||||
pub directory: String,
|
pub directory: String,
|
||||||
@ -425,6 +442,7 @@ pub struct VeilidConfigBlockStore {
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
pub struct VeilidConfigProtectedStore {
|
pub struct VeilidConfigProtectedStore {
|
||||||
pub allow_insecure_fallback: bool,
|
pub allow_insecure_fallback: bool,
|
||||||
@ -446,6 +464,7 @@ pub struct VeilidConfigProtectedStore {
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
pub struct VeilidConfigCapabilities {
|
pub struct VeilidConfigCapabilities {
|
||||||
pub protocol_udp: bool,
|
pub protocol_udp: bool,
|
||||||
@ -468,6 +487,7 @@ pub struct VeilidConfigCapabilities {
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
pub enum VeilidConfigLogLevel {
|
pub enum VeilidConfigLogLevel {
|
||||||
Off,
|
Off,
|
||||||
@ -525,6 +545,35 @@ impl Default for VeilidConfigLogLevel {
|
|||||||
Self::Off
|
Self::Off
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
impl FromStr for VeilidConfigLogLevel {
|
||||||
|
type Err = VeilidAPIError;
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
Ok(match s {
|
||||||
|
"Off" => Self::Off,
|
||||||
|
"Error" => Self::Error,
|
||||||
|
"Warn" => Self::Warn,
|
||||||
|
"Info" => Self::Info,
|
||||||
|
"Debug" => Self::Debug,
|
||||||
|
"Trace" => Self::Trace,
|
||||||
|
_ => {
|
||||||
|
apibail_invalid_argument!("Can't convert str", "s", s);
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl fmt::Display for VeilidConfigLogLevel {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||||
|
let text = match self {
|
||||||
|
Self::Off => "Off",
|
||||||
|
Self::Error => "Error",
|
||||||
|
Self::Warn => "Warn",
|
||||||
|
Self::Info => "Info",
|
||||||
|
Self::Debug => "Debug",
|
||||||
|
Self::Trace => "Trace",
|
||||||
|
};
|
||||||
|
write!(f, "{}", text)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(
|
#[derive(
|
||||||
Default,
|
Default,
|
||||||
@ -537,6 +586,7 @@ impl Default for VeilidConfigLogLevel {
|
|||||||
RkyvArchive,
|
RkyvArchive,
|
||||||
RkyvSerialize,
|
RkyvSerialize,
|
||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
pub struct VeilidConfigInner {
|
pub struct VeilidConfigInner {
|
||||||
pub program_name: String,
|
pub program_name: String,
|
||||||
@ -729,7 +779,7 @@ impl VeilidConfig {
|
|||||||
self.inner.read()
|
self.inner.read()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn safe_config(&self) -> VeilidConfigInner {
|
pub fn safe_config(&self) -> VeilidConfigInner {
|
||||||
let mut safe_cfg = self.inner.read().clone();
|
let mut safe_cfg = self.inner.read().clone();
|
||||||
|
|
||||||
// Remove secrets
|
// Remove secrets
|
||||||
@ -737,7 +787,6 @@ impl VeilidConfig {
|
|||||||
safe_cfg.protected_store.device_encryption_key_password = "".to_owned();
|
safe_cfg.protected_store.device_encryption_key_password = "".to_owned();
|
||||||
safe_cfg.protected_store.new_device_encryption_key_password = None;
|
safe_cfg.protected_store.new_device_encryption_key_password = None;
|
||||||
|
|
||||||
|
|
||||||
safe_cfg
|
safe_cfg
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -753,6 +802,11 @@ impl VeilidConfig {
|
|||||||
let out = f(&mut editedinner)?;
|
let out = f(&mut editedinner)?;
|
||||||
// Validate
|
// Validate
|
||||||
Self::validate(&mut editedinner)?;
|
Self::validate(&mut editedinner)?;
|
||||||
|
// See if things have changed
|
||||||
|
if *inner == editedinner {
|
||||||
|
// No changes, return early
|
||||||
|
return Ok(out);
|
||||||
|
}
|
||||||
// Commit changes
|
// Commit changes
|
||||||
*inner = editedinner.clone();
|
*inner = editedinner.clone();
|
||||||
out
|
out
|
||||||
|
@ -856,7 +856,8 @@ class VeilidConfigProtectedStore {
|
|||||||
directory = json['directory'],
|
directory = json['directory'],
|
||||||
delete = json['delete'],
|
delete = json['delete'],
|
||||||
deviceEncryptionKeyPassword = json['device_encryption_key_password'],
|
deviceEncryptionKeyPassword = json['device_encryption_key_password'],
|
||||||
newDeviceEncryptionKeyPassword = json['new_device_encryption_key_password'];
|
newDeviceEncryptionKeyPassword =
|
||||||
|
json['new_device_encryption_key_password'];
|
||||||
}
|
}
|
||||||
|
|
||||||
////////////
|
////////////
|
||||||
|
@ -797,7 +797,7 @@ class VeilidTableDBTransactionFFI extends VeilidTableDBTransaction {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@override
|
@override
|
||||||
Future<bool> delete(int col, Uint8List key) {
|
Future<void> delete(int col, Uint8List key) {
|
||||||
final nativeEncodedKey = base64UrlNoPadEncode(key).toNativeUtf8();
|
final nativeEncodedKey = base64UrlNoPadEncode(key).toNativeUtf8();
|
||||||
|
|
||||||
final recvPort = ReceivePort("veilid_table_db_transaction_delete");
|
final recvPort = ReceivePort("veilid_table_db_transaction_delete");
|
||||||
@ -888,7 +888,7 @@ class VeilidTableDBFFI extends VeilidTableDB {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@override
|
@override
|
||||||
Future<Uint8List?> delete(int col, Uint8List key) {
|
Future<Uint8List?> delete(int col, Uint8List key) async {
|
||||||
final nativeEncodedKey = base64UrlNoPadEncode(key).toNativeUtf8();
|
final nativeEncodedKey = base64UrlNoPadEncode(key).toNativeUtf8();
|
||||||
|
|
||||||
final recvPort = ReceivePort("veilid_table_db_delete");
|
final recvPort = ReceivePort("veilid_table_db_delete");
|
||||||
@ -899,7 +899,11 @@ class VeilidTableDBFFI extends VeilidTableDB {
|
|||||||
col,
|
col,
|
||||||
nativeEncodedKey,
|
nativeEncodedKey,
|
||||||
);
|
);
|
||||||
return processFuturePlain(recvPort.first);
|
String? out = await processFuturePlain(recvPort.first);
|
||||||
|
if (out == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
return base64UrlNoPadDecode(out);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1556,12 +1560,12 @@ class VeilidFFI implements Veilid {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@override
|
@override
|
||||||
Future<void> appCallReply(String id, Uint8List message) {
|
Future<void> appCallReply(String call_id, Uint8List message) {
|
||||||
final nativeId = id.toNativeUtf8();
|
final nativeCallId = call_id.toNativeUtf8();
|
||||||
final nativeEncodedMessage = base64UrlNoPadEncode(message).toNativeUtf8();
|
final nativeEncodedMessage = base64UrlNoPadEncode(message).toNativeUtf8();
|
||||||
final recvPort = ReceivePort("app_call_reply");
|
final recvPort = ReceivePort("app_call_reply");
|
||||||
final sendPort = recvPort.sendPort;
|
final sendPort = recvPort.sendPort;
|
||||||
_appCallReply(sendPort.nativePort, nativeId, nativeEncodedMessage);
|
_appCallReply(sendPort.nativePort, nativeCallId, nativeEncodedMessage);
|
||||||
return processFutureVoid(recvPort.first);
|
return processFutureVoid(recvPort.first);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -368,7 +368,7 @@ class VeilidTableDBTransactionJS extends VeilidTableDBTransaction {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@override
|
@override
|
||||||
Future<bool> delete(int col, Uint8List key) {
|
Future<void> delete(int col, Uint8List key) {
|
||||||
final encodedKey = base64UrlNoPadEncode(key);
|
final encodedKey = base64UrlNoPadEncode(key);
|
||||||
|
|
||||||
return _wrapApiPromise(js_util.callMethod(
|
return _wrapApiPromise(js_util.callMethod(
|
||||||
@ -580,10 +580,10 @@ class VeilidJS implements Veilid {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@override
|
@override
|
||||||
Future<void> appCallReply(String id, Uint8List message) {
|
Future<void> appCallReply(String callId, Uint8List message) {
|
||||||
var encodedMessage = base64UrlNoPadEncode(message);
|
var encodedMessage = base64UrlNoPadEncode(message);
|
||||||
return _wrapApiPromise(
|
return _wrapApiPromise(
|
||||||
js_util.callMethod(wasm, "app_call_reply", [id, encodedMessage]));
|
js_util.callMethod(wasm, "app_call_reply", [callId, encodedMessage]));
|
||||||
}
|
}
|
||||||
|
|
||||||
@override
|
@override
|
||||||
|
@ -262,7 +262,9 @@ abstract class VeilidUpdate {
|
|||||||
case "AppCall":
|
case "AppCall":
|
||||||
{
|
{
|
||||||
return VeilidAppCall(
|
return VeilidAppCall(
|
||||||
sender: json["sender"], message: json["message"], id: json["id"]);
|
sender: json["sender"],
|
||||||
|
message: json["message"],
|
||||||
|
callId: json["call_id"]);
|
||||||
}
|
}
|
||||||
case "Attachment":
|
case "Attachment":
|
||||||
{
|
{
|
||||||
@ -348,22 +350,22 @@ class VeilidAppMessage implements VeilidUpdate {
|
|||||||
class VeilidAppCall implements VeilidUpdate {
|
class VeilidAppCall implements VeilidUpdate {
|
||||||
final String? sender;
|
final String? sender;
|
||||||
final Uint8List message;
|
final Uint8List message;
|
||||||
final String id;
|
final String callId;
|
||||||
|
|
||||||
//
|
//
|
||||||
VeilidAppCall({
|
VeilidAppCall({
|
||||||
required this.sender,
|
required this.sender,
|
||||||
required this.message,
|
required this.message,
|
||||||
required this.id,
|
required this.callId,
|
||||||
});
|
});
|
||||||
|
|
||||||
@override
|
@override
|
||||||
Map<String, dynamic> toJson() {
|
Map<String, dynamic> toJson() {
|
||||||
return {
|
return {
|
||||||
'kind': "AppMessage",
|
'kind': "AppCall",
|
||||||
'sender': sender,
|
'sender': sender,
|
||||||
'message': base64UrlNoPadEncode(message),
|
'message': base64UrlNoPadEncode(message),
|
||||||
'id': id,
|
'call_id': callId,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -511,16 +513,17 @@ class VeilidStateNetwork {
|
|||||||
/// VeilidStateConfig
|
/// VeilidStateConfig
|
||||||
|
|
||||||
class VeilidStateConfig {
|
class VeilidStateConfig {
|
||||||
final Map<String, dynamic> config;
|
final VeilidConfig config;
|
||||||
|
|
||||||
VeilidStateConfig({
|
VeilidStateConfig({
|
||||||
required this.config,
|
required this.config,
|
||||||
});
|
});
|
||||||
|
|
||||||
VeilidStateConfig.fromJson(dynamic json) : config = json['config'];
|
VeilidStateConfig.fromJson(dynamic json)
|
||||||
|
: config = VeilidConfig.fromJson(json['config']);
|
||||||
|
|
||||||
Map<String, dynamic> toJson() {
|
Map<String, dynamic> toJson() {
|
||||||
return {'config': config};
|
return {'config': config.toJson()};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8,7 +8,7 @@ abstract class VeilidTableDBTransaction {
|
|||||||
Future<void> commit();
|
Future<void> commit();
|
||||||
Future<void> rollback();
|
Future<void> rollback();
|
||||||
Future<void> store(int col, Uint8List key, Uint8List value);
|
Future<void> store(int col, Uint8List key, Uint8List value);
|
||||||
Future<bool> delete(int col, Uint8List key);
|
Future<void> delete(int col, Uint8List key);
|
||||||
|
|
||||||
Future<void> storeJson(int col, Uint8List key, Object? object,
|
Future<void> storeJson(int col, Uint8List key, Object? object,
|
||||||
{Object? Function(Object? nonEncodable)? toEncodable}) async {
|
{Object? Function(Object? nonEncodable)? toEncodable}) async {
|
||||||
|
@ -707,21 +707,21 @@ pub extern "C" fn release_private_route(port: i64, route_id: FfiStr) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub extern "C" fn app_call_reply(port: i64, id: FfiStr, message: FfiStr) {
|
pub extern "C" fn app_call_reply(port: i64, call_id: FfiStr, message: FfiStr) {
|
||||||
let id = id.into_opt_string().unwrap_or_default();
|
let call_id = call_id.into_opt_string().unwrap_or_default();
|
||||||
let message = message.into_opt_string().unwrap_or_default();
|
let message = message.into_opt_string().unwrap_or_default();
|
||||||
DartIsolateWrapper::new(port).spawn_result(async move {
|
DartIsolateWrapper::new(port).spawn_result(async move {
|
||||||
let id = match id.parse() {
|
let call_id = match call_id.parse() {
|
||||||
Ok(v) => v,
|
Ok(v) => v,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
return APIResult::Err(veilid_core::VeilidAPIError::invalid_argument(e, "id", id))
|
return APIResult::Err(veilid_core::VeilidAPIError::invalid_argument(e, "call_id", call_id))
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
let message = data_encoding::BASE64URL_NOPAD
|
let message = data_encoding::BASE64URL_NOPAD
|
||||||
.decode(message.as_bytes())
|
.decode(message.as_bytes())
|
||||||
.map_err(|e| veilid_core::VeilidAPIError::invalid_argument(e, "message", message))?;
|
.map_err(|e| veilid_core::VeilidAPIError::invalid_argument(e, "message", message))?;
|
||||||
let veilid_api = get_veilid_api().await?;
|
let veilid_api = get_veilid_api().await?;
|
||||||
veilid_api.app_call_reply(id, message).await?;
|
veilid_api.app_call_reply(call_id, message).await?;
|
||||||
APIRESULT_VOID
|
APIRESULT_VOID
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -908,8 +908,8 @@ pub extern "C" fn table_db_transaction_delete(port: i64, id: u32, col: u32, key:
|
|||||||
tdbt.clone()
|
tdbt.clone()
|
||||||
};
|
};
|
||||||
|
|
||||||
let out = tdbt.delete(col, &key);
|
tdbt.delete(col, &key);
|
||||||
APIResult::Ok(out)
|
APIRESULT_VOID
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
160
veilid-python/.gitignore
vendored
Normal file
160
veilid-python/.gitignore
vendored
Normal file
@ -0,0 +1,160 @@
|
|||||||
|
# Byte-compiled / optimized / DLL files
|
||||||
|
__pycache__/
|
||||||
|
*.py[cod]
|
||||||
|
*$py.class
|
||||||
|
|
||||||
|
# C extensions
|
||||||
|
*.so
|
||||||
|
|
||||||
|
# Distribution / packaging
|
||||||
|
.Python
|
||||||
|
build/
|
||||||
|
develop-eggs/
|
||||||
|
dist/
|
||||||
|
downloads/
|
||||||
|
eggs/
|
||||||
|
.eggs/
|
||||||
|
lib/
|
||||||
|
lib64/
|
||||||
|
parts/
|
||||||
|
sdist/
|
||||||
|
var/
|
||||||
|
wheels/
|
||||||
|
share/python-wheels/
|
||||||
|
*.egg-info/
|
||||||
|
.installed.cfg
|
||||||
|
*.egg
|
||||||
|
MANIFEST
|
||||||
|
|
||||||
|
# PyInstaller
|
||||||
|
# Usually these files are written by a python script from a template
|
||||||
|
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||||
|
*.manifest
|
||||||
|
*.spec
|
||||||
|
|
||||||
|
# Installer logs
|
||||||
|
pip-log.txt
|
||||||
|
pip-delete-this-directory.txt
|
||||||
|
|
||||||
|
# Unit test / coverage reports
|
||||||
|
htmlcov/
|
||||||
|
.tox/
|
||||||
|
.nox/
|
||||||
|
.coverage
|
||||||
|
.coverage.*
|
||||||
|
.cache
|
||||||
|
nosetests.xml
|
||||||
|
coverage.xml
|
||||||
|
*.cover
|
||||||
|
*.py,cover
|
||||||
|
.hypothesis/
|
||||||
|
.pytest_cache/
|
||||||
|
cover/
|
||||||
|
|
||||||
|
# Translations
|
||||||
|
*.mo
|
||||||
|
*.pot
|
||||||
|
|
||||||
|
# Django stuff:
|
||||||
|
*.log
|
||||||
|
local_settings.py
|
||||||
|
db.sqlite3
|
||||||
|
db.sqlite3-journal
|
||||||
|
|
||||||
|
# Flask stuff:
|
||||||
|
instance/
|
||||||
|
.webassets-cache
|
||||||
|
|
||||||
|
# Scrapy stuff:
|
||||||
|
.scrapy
|
||||||
|
|
||||||
|
# Sphinx documentation
|
||||||
|
docs/_build/
|
||||||
|
|
||||||
|
# PyBuilder
|
||||||
|
.pybuilder/
|
||||||
|
target/
|
||||||
|
|
||||||
|
# Jupyter Notebook
|
||||||
|
.ipynb_checkpoints
|
||||||
|
|
||||||
|
# IPython
|
||||||
|
profile_default/
|
||||||
|
ipython_config.py
|
||||||
|
|
||||||
|
# pyenv
|
||||||
|
# For a library or package, you might want to ignore these files since the code is
|
||||||
|
# intended to run in multiple environments; otherwise, check them in:
|
||||||
|
# .python-version
|
||||||
|
|
||||||
|
# pipenv
|
||||||
|
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||||
|
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||||
|
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||||
|
# install all needed dependencies.
|
||||||
|
#Pipfile.lock
|
||||||
|
|
||||||
|
# poetry
|
||||||
|
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
||||||
|
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||||
|
# commonly ignored for libraries.
|
||||||
|
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
||||||
|
#poetry.lock
|
||||||
|
|
||||||
|
# pdm
|
||||||
|
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
||||||
|
#pdm.lock
|
||||||
|
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
||||||
|
# in version control.
|
||||||
|
# https://pdm.fming.dev/#use-with-ide
|
||||||
|
.pdm.toml
|
||||||
|
|
||||||
|
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
||||||
|
__pypackages__/
|
||||||
|
|
||||||
|
# Celery stuff
|
||||||
|
celerybeat-schedule
|
||||||
|
celerybeat.pid
|
||||||
|
|
||||||
|
# SageMath parsed files
|
||||||
|
*.sage.py
|
||||||
|
|
||||||
|
# Environments
|
||||||
|
.env
|
||||||
|
.venv
|
||||||
|
env/
|
||||||
|
venv/
|
||||||
|
ENV/
|
||||||
|
env.bak/
|
||||||
|
venv.bak/
|
||||||
|
|
||||||
|
# Spyder project settings
|
||||||
|
.spyderproject
|
||||||
|
.spyproject
|
||||||
|
|
||||||
|
# Rope project settings
|
||||||
|
.ropeproject
|
||||||
|
|
||||||
|
# mkdocs documentation
|
||||||
|
/site
|
||||||
|
|
||||||
|
# mypy
|
||||||
|
.mypy_cache/
|
||||||
|
.dmypy.json
|
||||||
|
dmypy.json
|
||||||
|
|
||||||
|
# Pyre type checker
|
||||||
|
.pyre/
|
||||||
|
|
||||||
|
# pytype static type analyzer
|
||||||
|
.pytype/
|
||||||
|
|
||||||
|
# Cython debug symbols
|
||||||
|
cython_debug/
|
||||||
|
|
||||||
|
# PyCharm
|
||||||
|
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||||
|
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||||
|
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||||
|
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||||
|
#.idea/
|
25
veilid-python/README.md
Normal file
25
veilid-python/README.md
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
# Veilid Bindings for Python
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
To use:
|
||||||
|
```
|
||||||
|
poetry add veilid_python
|
||||||
|
```
|
||||||
|
or
|
||||||
|
```
|
||||||
|
pip3 install veilid_python
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Development
|
||||||
|
|
||||||
|
To run tests:
|
||||||
|
```
|
||||||
|
poetry run pytest
|
||||||
|
```
|
||||||
|
|
||||||
|
To update schema for validation with the latest copy from a running `veilid-server`:
|
||||||
|
```
|
||||||
|
./update_schema.sh
|
||||||
|
```
|
165
veilid-python/poetry.lock
generated
Normal file
165
veilid-python/poetry.lock
generated
Normal file
@ -0,0 +1,165 @@
|
|||||||
|
# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand.
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "attrs"
|
||||||
|
version = "23.1.0"
|
||||||
|
description = "Classes Without Boilerplate"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.7"
|
||||||
|
files = [
|
||||||
|
{file = "attrs-23.1.0-py3-none-any.whl", hash = "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04"},
|
||||||
|
{file = "attrs-23.1.0.tar.gz", hash = "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"},
|
||||||
|
]
|
||||||
|
|
||||||
|
[package.extras]
|
||||||
|
cov = ["attrs[tests]", "coverage[toml] (>=5.3)"]
|
||||||
|
dev = ["attrs[docs,tests]", "pre-commit"]
|
||||||
|
docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"]
|
||||||
|
tests = ["attrs[tests-no-zope]", "zope-interface"]
|
||||||
|
tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "colorama"
|
||||||
|
version = "0.4.6"
|
||||||
|
description = "Cross-platform colored terminal text."
|
||||||
|
optional = false
|
||||||
|
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
|
||||||
|
files = [
|
||||||
|
{file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
|
||||||
|
{file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "iniconfig"
|
||||||
|
version = "2.0.0"
|
||||||
|
description = "brain-dead simple config-ini parsing"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.7"
|
||||||
|
files = [
|
||||||
|
{file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"},
|
||||||
|
{file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"},
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "jsonschema"
|
||||||
|
version = "4.17.3"
|
||||||
|
description = "An implementation of JSON Schema validation for Python"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.7"
|
||||||
|
files = [
|
||||||
|
{file = "jsonschema-4.17.3-py3-none-any.whl", hash = "sha256:a870ad254da1a8ca84b6a2905cac29d265f805acc57af304784962a2aa6508f6"},
|
||||||
|
{file = "jsonschema-4.17.3.tar.gz", hash = "sha256:0f864437ab8b6076ba6707453ef8f98a6a0d512a80e93f8abdb676f737ecb60d"},
|
||||||
|
]
|
||||||
|
|
||||||
|
[package.dependencies]
|
||||||
|
attrs = ">=17.4.0"
|
||||||
|
pyrsistent = ">=0.14.0,<0.17.0 || >0.17.0,<0.17.1 || >0.17.1,<0.17.2 || >0.17.2"
|
||||||
|
|
||||||
|
[package.extras]
|
||||||
|
format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"]
|
||||||
|
format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "packaging"
|
||||||
|
version = "23.1"
|
||||||
|
description = "Core utilities for Python packages"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.7"
|
||||||
|
files = [
|
||||||
|
{file = "packaging-23.1-py3-none-any.whl", hash = "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61"},
|
||||||
|
{file = "packaging-23.1.tar.gz", hash = "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"},
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "pluggy"
|
||||||
|
version = "1.0.0"
|
||||||
|
description = "plugin and hook calling mechanisms for python"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.6"
|
||||||
|
files = [
|
||||||
|
{file = "pluggy-1.0.0-py2.py3-none-any.whl", hash = "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"},
|
||||||
|
{file = "pluggy-1.0.0.tar.gz", hash = "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159"},
|
||||||
|
]
|
||||||
|
|
||||||
|
[package.extras]
|
||||||
|
dev = ["pre-commit", "tox"]
|
||||||
|
testing = ["pytest", "pytest-benchmark"]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "pyrsistent"
|
||||||
|
version = "0.19.3"
|
||||||
|
description = "Persistent/Functional/Immutable data structures"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.7"
|
||||||
|
files = [
|
||||||
|
{file = "pyrsistent-0.19.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:20460ac0ea439a3e79caa1dbd560344b64ed75e85d8703943e0b66c2a6150e4a"},
|
||||||
|
{file = "pyrsistent-0.19.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4c18264cb84b5e68e7085a43723f9e4c1fd1d935ab240ce02c0324a8e01ccb64"},
|
||||||
|
{file = "pyrsistent-0.19.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b774f9288dda8d425adb6544e5903f1fb6c273ab3128a355c6b972b7df39dcf"},
|
||||||
|
{file = "pyrsistent-0.19.3-cp310-cp310-win32.whl", hash = "sha256:5a474fb80f5e0d6c9394d8db0fc19e90fa540b82ee52dba7d246a7791712f74a"},
|
||||||
|
{file = "pyrsistent-0.19.3-cp310-cp310-win_amd64.whl", hash = "sha256:49c32f216c17148695ca0e02a5c521e28a4ee6c5089f97e34fe24163113722da"},
|
||||||
|
{file = "pyrsistent-0.19.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f0774bf48631f3a20471dd7c5989657b639fd2d285b861237ea9e82c36a415a9"},
|
||||||
|
{file = "pyrsistent-0.19.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ab2204234c0ecd8b9368dbd6a53e83c3d4f3cab10ecaf6d0e772f456c442393"},
|
||||||
|
{file = "pyrsistent-0.19.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e42296a09e83028b3476f7073fcb69ffebac0e66dbbfd1bd847d61f74db30f19"},
|
||||||
|
{file = "pyrsistent-0.19.3-cp311-cp311-win32.whl", hash = "sha256:64220c429e42a7150f4bfd280f6f4bb2850f95956bde93c6fda1b70507af6ef3"},
|
||||||
|
{file = "pyrsistent-0.19.3-cp311-cp311-win_amd64.whl", hash = "sha256:016ad1afadf318eb7911baa24b049909f7f3bb2c5b1ed7b6a8f21db21ea3faa8"},
|
||||||
|
{file = "pyrsistent-0.19.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c4db1bd596fefd66b296a3d5d943c94f4fac5bcd13e99bffe2ba6a759d959a28"},
|
||||||
|
{file = "pyrsistent-0.19.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aeda827381f5e5d65cced3024126529ddc4289d944f75e090572c77ceb19adbf"},
|
||||||
|
{file = "pyrsistent-0.19.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:42ac0b2f44607eb92ae88609eda931a4f0dfa03038c44c772e07f43e738bcac9"},
|
||||||
|
{file = "pyrsistent-0.19.3-cp37-cp37m-win32.whl", hash = "sha256:e8f2b814a3dc6225964fa03d8582c6e0b6650d68a232df41e3cc1b66a5d2f8d1"},
|
||||||
|
{file = "pyrsistent-0.19.3-cp37-cp37m-win_amd64.whl", hash = "sha256:c9bb60a40a0ab9aba40a59f68214eed5a29c6274c83b2cc206a359c4a89fa41b"},
|
||||||
|
{file = "pyrsistent-0.19.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a2471f3f8693101975b1ff85ffd19bb7ca7dd7c38f8a81701f67d6b4f97b87d8"},
|
||||||
|
{file = "pyrsistent-0.19.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc5d149f31706762c1f8bda2e8c4f8fead6e80312e3692619a75301d3dbb819a"},
|
||||||
|
{file = "pyrsistent-0.19.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3311cb4237a341aa52ab8448c27e3a9931e2ee09561ad150ba94e4cfd3fc888c"},
|
||||||
|
{file = "pyrsistent-0.19.3-cp38-cp38-win32.whl", hash = "sha256:f0e7c4b2f77593871e918be000b96c8107da48444d57005b6a6bc61fb4331b2c"},
|
||||||
|
{file = "pyrsistent-0.19.3-cp38-cp38-win_amd64.whl", hash = "sha256:c147257a92374fde8498491f53ffa8f4822cd70c0d85037e09028e478cababb7"},
|
||||||
|
{file = "pyrsistent-0.19.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b735e538f74ec31378f5a1e3886a26d2ca6351106b4dfde376a26fc32a044edc"},
|
||||||
|
{file = "pyrsistent-0.19.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99abb85579e2165bd8522f0c0138864da97847875ecbd45f3e7e2af569bfc6f2"},
|
||||||
|
{file = "pyrsistent-0.19.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3a8cb235fa6d3fd7aae6a4f1429bbb1fec1577d978098da1252f0489937786f3"},
|
||||||
|
{file = "pyrsistent-0.19.3-cp39-cp39-win32.whl", hash = "sha256:c74bed51f9b41c48366a286395c67f4e894374306b197e62810e0fdaf2364da2"},
|
||||||
|
{file = "pyrsistent-0.19.3-cp39-cp39-win_amd64.whl", hash = "sha256:878433581fc23e906d947a6814336eee031a00e6defba224234169ae3d3d6a98"},
|
||||||
|
{file = "pyrsistent-0.19.3-py3-none-any.whl", hash = "sha256:ccf0d6bd208f8111179f0c26fdf84ed7c3891982f2edaeae7422575f47e66b64"},
|
||||||
|
{file = "pyrsistent-0.19.3.tar.gz", hash = "sha256:1a2994773706bbb4995c31a97bc94f1418314923bd1048c6d964837040376440"},
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "pytest"
|
||||||
|
version = "7.3.2"
|
||||||
|
description = "pytest: simple powerful testing with Python"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.7"
|
||||||
|
files = [
|
||||||
|
{file = "pytest-7.3.2-py3-none-any.whl", hash = "sha256:cdcbd012c9312258922f8cd3f1b62a6580fdced17db6014896053d47cddf9295"},
|
||||||
|
{file = "pytest-7.3.2.tar.gz", hash = "sha256:ee990a3cc55ba808b80795a79944756f315c67c12b56abd3ac993a7b8c17030b"},
|
||||||
|
]
|
||||||
|
|
||||||
|
[package.dependencies]
|
||||||
|
colorama = {version = "*", markers = "sys_platform == \"win32\""}
|
||||||
|
iniconfig = "*"
|
||||||
|
packaging = "*"
|
||||||
|
pluggy = ">=0.12,<2.0"
|
||||||
|
|
||||||
|
[package.extras]
|
||||||
|
testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "pytest-asyncio"
|
||||||
|
version = "0.21.0"
|
||||||
|
description = "Pytest support for asyncio"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.7"
|
||||||
|
files = [
|
||||||
|
{file = "pytest-asyncio-0.21.0.tar.gz", hash = "sha256:2b38a496aef56f56b0e87557ec313e11e1ab9276fc3863f6a7be0f1d0e415e1b"},
|
||||||
|
{file = "pytest_asyncio-0.21.0-py3-none-any.whl", hash = "sha256:f2b3366b7cd501a4056858bd39349d5af19742aed2d81660b7998b6341c7eb9c"},
|
||||||
|
]
|
||||||
|
|
||||||
|
[package.dependencies]
|
||||||
|
pytest = ">=7.0.0"
|
||||||
|
|
||||||
|
[package.extras]
|
||||||
|
docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"]
|
||||||
|
testing = ["coverage (>=6.2)", "flaky (>=3.5.0)", "hypothesis (>=5.7.1)", "mypy (>=0.931)", "pytest-trio (>=0.7.0)"]
|
||||||
|
|
||||||
|
[metadata]
|
||||||
|
lock-version = "2.0"
|
||||||
|
python-versions = "^3.11"
|
||||||
|
content-hash = "03a349f63b3d28e64191b6dd845333914827806332b52f5c52ccbd2863c93b4b"
|
19
veilid-python/pyproject.toml
Normal file
19
veilid-python/pyproject.toml
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
[tool.poetry]
|
||||||
|
name = "veilid-python"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = ""
|
||||||
|
authors = ["Christien Rioux <chris@veilid.org>"]
|
||||||
|
readme = "README.md"
|
||||||
|
packages = [{include = "veilid"}]
|
||||||
|
|
||||||
|
[tool.poetry.dependencies]
|
||||||
|
python = "^3.11"
|
||||||
|
jsonschema = "^4.17.3"
|
||||||
|
|
||||||
|
[tool.poetry.group.dev.dependencies]
|
||||||
|
pytest = "^7.3.2"
|
||||||
|
pytest-asyncio = "^0.21.0"
|
||||||
|
|
||||||
|
[build-system]
|
||||||
|
requires = ["poetry-core"]
|
||||||
|
build-backend = "poetry.core.masonry.api"
|
34
veilid-python/tests/__init__.py
Normal file
34
veilid-python/tests/__init__.py
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
from typing import Callable, Awaitable
|
||||||
|
import os
|
||||||
|
import pytest
|
||||||
|
pytest_plugins = ('pytest_asyncio',)
|
||||||
|
|
||||||
|
import veilid
|
||||||
|
|
||||||
|
|
||||||
|
##################################################################
|
||||||
|
VEILID_SERVER = os.getenv("VEILID_SERVER")
|
||||||
|
if VEILID_SERVER is not None:
|
||||||
|
vsparts = VEILID_SERVER.split(":")
|
||||||
|
VEILID_SERVER = vsparts[0]
|
||||||
|
if len(vsparts) == 2:
|
||||||
|
VEILID_SERVER_PORT = int(vsparts[1])
|
||||||
|
else:
|
||||||
|
VEILID_SERVER_PORT = 5959
|
||||||
|
else:
|
||||||
|
VEILID_SERVER = "localhost"
|
||||||
|
VEILID_SERVER_PORT = 5959
|
||||||
|
|
||||||
|
##################################################################
|
||||||
|
|
||||||
|
async def simple_connect_and_run(func: Callable[[veilid.VeilidAPI], Awaitable]):
|
||||||
|
api = await veilid.json_api_connect(VEILID_SERVER, VEILID_SERVER_PORT, simple_update_callback)
|
||||||
|
async with api:
|
||||||
|
|
||||||
|
# purge routes to ensure we start fresh
|
||||||
|
await api.debug("purge routes")
|
||||||
|
|
||||||
|
await func(api)
|
||||||
|
|
||||||
|
async def simple_update_callback(update: veilid.VeilidUpdate):
|
||||||
|
print("VeilidUpdate: {}".format(update))
|
39
veilid-python/tests/test_basic.py
Normal file
39
veilid-python/tests/test_basic.py
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
# Basic veilid tests
|
||||||
|
|
||||||
|
import veilid
|
||||||
|
import pytest
|
||||||
|
from . import *
|
||||||
|
|
||||||
|
##################################################################
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_connect():
|
||||||
|
async def func(api: veilid.VeilidAPI):
|
||||||
|
pass
|
||||||
|
await simple_connect_and_run(func)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_get_node_id():
|
||||||
|
async def func(api: veilid.VeilidAPI):
|
||||||
|
# get our own node id
|
||||||
|
state = await api.get_state()
|
||||||
|
node_id = state.config.config.network.routing_table.node_id.pop()
|
||||||
|
await simple_connect_and_run(func)
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_fail_connect():
|
||||||
|
with pytest.raises(Exception):
|
||||||
|
api = await veilid.json_api_connect("fuahwelifuh32luhwafluehawea", 1, simple_update_callback)
|
||||||
|
async with api:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_version():
|
||||||
|
async def func(api: veilid.VeilidAPI):
|
||||||
|
v = await api.veilid_version()
|
||||||
|
print("veilid_version: {}".format(v.__dict__))
|
||||||
|
vstr = await api.veilid_version_string()
|
||||||
|
print("veilid_version_string: {}".format(vstr))
|
||||||
|
await simple_connect_and_run(func)
|
||||||
|
|
42
veilid-python/tests/test_crypto.py
Normal file
42
veilid-python/tests/test_crypto.py
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
# Crypto veilid tests
|
||||||
|
|
||||||
|
import veilid
|
||||||
|
import pytest
|
||||||
|
from . import *
|
||||||
|
|
||||||
|
##################################################################
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_best_crypto_system():
|
||||||
|
async def func(api: veilid.VeilidAPI):
|
||||||
|
bcs = await api.best_crypto_system()
|
||||||
|
await simple_connect_and_run(func)
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_get_crypto_system():
|
||||||
|
async def func(api: veilid.VeilidAPI):
|
||||||
|
cs = await api.get_crypto_system(veilid.CryptoKind.CRYPTO_KIND_VLD0)
|
||||||
|
# clean up handle early
|
||||||
|
del cs
|
||||||
|
await simple_connect_and_run(func)
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_get_crypto_system_invalid():
|
||||||
|
async def func(api: veilid.VeilidAPI):
|
||||||
|
with pytest.raises(veilid.VeilidAPIError):
|
||||||
|
cs = await api.get_crypto_system(veilid.CryptoKind.CRYPTO_KIND_NONE)
|
||||||
|
await simple_connect_and_run(func)
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_hash_and_verify_password():
|
||||||
|
async def func(api: veilid.VeilidAPI):
|
||||||
|
bcs = await api.best_crypto_system()
|
||||||
|
nonce = await bcs.random_nonce()
|
||||||
|
salt = nonce.to_bytes()
|
||||||
|
# Password match
|
||||||
|
phash = await bcs.hash_password(b"abc123", salt)
|
||||||
|
assert await bcs.verify_password(b"abc123", phash)
|
||||||
|
# Password mismatch
|
||||||
|
phash2 = await bcs.hash_password(b"abc1234", salt)
|
||||||
|
assert not await bcs.verify_password(b"abc12345", phash)
|
||||||
|
await simple_connect_and_run(func)
|
93
veilid-python/tests/test_routing_context.py
Normal file
93
veilid-python/tests/test_routing_context.py
Normal file
@ -0,0 +1,93 @@
|
|||||||
|
# Routing context veilid tests
|
||||||
|
|
||||||
|
import veilid
|
||||||
|
import pytest
|
||||||
|
import asyncio
|
||||||
|
import json
|
||||||
|
from . import *
|
||||||
|
|
||||||
|
##################################################################
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_routing_contexts():
|
||||||
|
async def func(api: veilid.VeilidAPI):
|
||||||
|
rc = await api.new_routing_context()
|
||||||
|
rcp = await rc.with_privacy()
|
||||||
|
rcps = await rcp.with_sequencing(veilid.Sequencing.ENSURE_ORDERED)
|
||||||
|
rcpsr = await rcps.with_custom_privacy(veilid.Stability.RELIABLE)
|
||||||
|
await simple_connect_and_run(func)
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_routing_context_app_message_loopback():
|
||||||
|
|
||||||
|
app_message_queue = asyncio.Queue()
|
||||||
|
|
||||||
|
async def app_message_queue_update_callback(update: veilid.VeilidUpdate):
|
||||||
|
if update.kind == veilid.VeilidUpdateKind.APP_MESSAGE:
|
||||||
|
await app_message_queue.put(update)
|
||||||
|
|
||||||
|
api = await veilid.json_api_connect(VEILID_SERVER, VEILID_SERVER_PORT, app_message_queue_update_callback)
|
||||||
|
async with api:
|
||||||
|
|
||||||
|
# purge routes to ensure we start fresh
|
||||||
|
await api.debug("purge routes")
|
||||||
|
|
||||||
|
# make a routing context that uses a safety route
|
||||||
|
rc = await (await api.new_routing_context()).with_privacy()
|
||||||
|
|
||||||
|
# make a new local private route
|
||||||
|
prl, blob = await api.new_private_route()
|
||||||
|
|
||||||
|
# import it as a remote route as well so we can send to it
|
||||||
|
prr = await api.import_remote_private_route(blob)
|
||||||
|
|
||||||
|
# send an app message to our own private route
|
||||||
|
message = b"abcd1234"
|
||||||
|
await rc.app_message(prr, message)
|
||||||
|
|
||||||
|
# we should get the same message back
|
||||||
|
update: veilid.VeilidUpdate = await asyncio.wait_for(app_message_queue.get(), timeout=10)
|
||||||
|
appmsg: veilid.VeilidAppMessage = update.detail
|
||||||
|
assert appmsg.message == message
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_routing_context_app_call_loopback():
|
||||||
|
|
||||||
|
app_call_queue = asyncio.Queue()
|
||||||
|
|
||||||
|
async def app_call_queue_update_callback(update: veilid.VeilidUpdate):
|
||||||
|
if update.kind == veilid.VeilidUpdateKind.APP_CALL:
|
||||||
|
await app_call_queue.put(update)
|
||||||
|
|
||||||
|
api = await veilid.json_api_connect(VEILID_SERVER, VEILID_SERVER_PORT, app_call_queue_update_callback)
|
||||||
|
async with api:
|
||||||
|
|
||||||
|
# purge routes to ensure we start fresh
|
||||||
|
await api.debug("purge routes")
|
||||||
|
|
||||||
|
# make a routing context that uses a safety route
|
||||||
|
rc = await (await api.new_routing_context()).with_privacy()
|
||||||
|
|
||||||
|
# make a new local private route
|
||||||
|
prl, blob = await api.new_private_route()
|
||||||
|
|
||||||
|
# import it as a remote route as well so we can send to it
|
||||||
|
prr = await api.import_remote_private_route(blob)
|
||||||
|
|
||||||
|
# send an app message to our own private route
|
||||||
|
request = b"abcd1234"
|
||||||
|
app_call_task = asyncio.create_task(rc.app_call(prr, request), name = "app call task")
|
||||||
|
|
||||||
|
# we should get the same request back
|
||||||
|
update: veilid.VeilidUpdate = await asyncio.wait_for(app_call_queue.get(), timeout=10)
|
||||||
|
appcall: veilid.VeilidAppCall = update.detail
|
||||||
|
assert appcall.message == request
|
||||||
|
|
||||||
|
# now we reply to the request
|
||||||
|
reply = b"qwer5678"
|
||||||
|
await api.app_call_reply(appcall.call_id, reply)
|
||||||
|
|
||||||
|
# now we should get the reply from the call
|
||||||
|
result = await app_call_task
|
||||||
|
assert result == reply
|
17
veilid-python/update_schema.sh
Executable file
17
veilid-python/update_schema.sh
Executable file
@ -0,0 +1,17 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -eo pipefail
|
||||||
|
SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||||
|
|
||||||
|
VEILID_SERVER=$SCRIPTDIR/../target/debug/veilid-server
|
||||||
|
|
||||||
|
# Ensure executable exists
|
||||||
|
if [ ! -f "$VEILID_SERVER" ]; then
|
||||||
|
echo "$VEILID_SERVER does not exist. Build with cargo build."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Produce schema from veilid-server
|
||||||
|
$VEILID_SERVER --emit-schema Request > $SCRIPTDIR/veilid/schema/Request.json
|
||||||
|
$VEILID_SERVER --emit-schema RecvMessage > $SCRIPTDIR/veilid/schema/RecvMessage.json
|
||||||
|
|
||||||
|
|
6
veilid-python/veilid/__init__.py
Normal file
6
veilid-python/veilid/__init__.py
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
from .api import *
|
||||||
|
from .config import *
|
||||||
|
from .error import *
|
||||||
|
from .json_api import *
|
||||||
|
from .error import *
|
||||||
|
from .types import *
|
211
veilid-python/veilid/api.py
Normal file
211
veilid-python/veilid/api.py
Normal file
@ -0,0 +1,211 @@
|
|||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from typing import Self
|
||||||
|
|
||||||
|
from .state import *
|
||||||
|
from .config import *
|
||||||
|
from .error import *
|
||||||
|
from .types import *
|
||||||
|
|
||||||
|
class RoutingContext(ABC):
|
||||||
|
@abstractmethod
|
||||||
|
async def with_privacy(self) -> Self:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def with_custom_privacy(self, stability: Stability) -> Self:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def with_sequencing(self, sequencing: Sequencing) -> Self:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def app_call(self, target: TypedKey | RouteId, request: bytes) -> bytes:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def app_message(self, target: TypedKey | RouteId, message: bytes):
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def create_dht_record(self, kind: CryptoKind, schema: DHTSchema) -> DHTRecordDescriptor:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def open_dht_record(self, key: TypedKey, writer: Optional[KeyPair]) -> DHTRecordDescriptor:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def close_dht_record(self, key: TypedKey):
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def delete_dht_record(self, key: TypedKey):
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def get_dht_value(self, key: TypedKey, subkey: ValueSubkey, force_refresh: bool) -> Optional[ValueData]:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def set_dht_value(self, key: TypedKey, subkey: ValueSubkey, data: bytes) -> Optional[ValueData]:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def watch_dht_values(self, key: TypedKey, subkeys: list[(ValueSubkey, ValueSubkey)], expiration: Timestamp, count: int) -> Timestamp:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def cancel_dht_watch(self, key: TypedKey, subkeys: list[(ValueSubkey, ValueSubkey)]) -> bool:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class TableDbTransaction(ABC):
|
||||||
|
@abstractmethod
|
||||||
|
async def commit(self):
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def rollback(self):
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def store(self, col: int, key: bytes, value: bytes):
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def delete(self, col: int, key: bytes):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class TableDb(ABC):
|
||||||
|
@abstractmethod
|
||||||
|
async def get_column_count(self) -> int:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def get_keys(self, col: int) -> list[bytes]:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def transact(self) -> TableDbTransaction:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def store(self, col: int, key: bytes, value: bytes):
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def load(self, col: int, key: bytes) -> Optional[bytes]:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def delete(self, col: int, key: bytes) -> Optional[bytes]:
|
||||||
|
pass
|
||||||
|
|
||||||
|
class CryptoSystem(ABC):
|
||||||
|
@abstractmethod
|
||||||
|
async def cached_dh(self, key: PublicKey, secret: SecretKey) -> SharedSecret:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def compute_dh(self, key: PublicKey, secret: SecretKey) -> SharedSecret:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def random_bytes(self, len: int) -> bytes:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def default_salt_length(self) -> int:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def hash_password(self, password: bytes, salt: bytes) -> str:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def verify_password(self, password: bytes, password_hash: str) -> bool:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def derive_shared_secret(self, password: bytes, salt: bytes) -> SharedSecret:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def random_nonce(self) -> Nonce:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def random_shared_secret(self) -> SharedSecret:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def generate_key_pair(self) -> KeyPair:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def generate_hash(self, data: bytes) -> HashDigest:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def validate_key_pair(self, key: PublicKey, secret: SecretKey) -> bool:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def validate_hash(self, data: bytes, hash_digest: HashDigest) -> bool:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def distance(self, key1: CryptoKey, key2: CryptoKey) -> CryptoKeyDistance:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def sign(self, key: PublicKey, secret: SecretKey, data: bytes) -> Signature:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def verify(self, key: PublicKey, data: bytes, signature: Signature):
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def aead_overhead(self) -> int:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def decrypt_aead(self, body: bytes, nonce: Nonce, shared_secret: SharedSecret, associated_data: Optional[bytes]) -> bytes:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def encrypt_aead(self, body: bytes, nonce: Nonce, shared_secret: SharedSecret, associated_data: Optional[bytes]) -> bytes:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def crypt_no_auth(self, body: bytes, nonce: Nonce, shared_secret: SharedSecret) -> bytes:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class VeilidAPI(ABC):
|
||||||
|
@abstractmethod
|
||||||
|
async def control(self, args: list[str]) -> str:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def get_state(self) -> VeilidState:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def attach(self):
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def detach(self):
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def new_private_route(self) -> Tuple[RouteId, bytes]:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def new_custom_private_route(self, kinds: list[CryptoKind], stability: Stability, sequencing: Sequencing) -> Tuple[RouteId, bytes]:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def import_remote_private_route(self, blob: bytes) -> RouteId:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def release_private_route(self, route_id: RouteId):
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def app_call_reply(self, call_id: OperationId, message: bytes):
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def new_routing_context(self) -> RoutingContext:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def open_table_db(self, name: str, column_count: int) -> TableDb:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def delete_table_db(self, name: str):
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def get_crypto_system(self, kind: CryptoKind) -> CryptoSystem:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def best_crypto_system(self) -> CryptoSystem:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def verify_signatures(self, node_ids: list[TypedKey], data: bytes, signatures: list[TypedSignature]) -> list[TypedKey]:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def generate_signatures(self, data: bytes, key_pairs: list[TypedKeyPair]) -> list[TypedSignature]:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def generate_key_pair(self, kind: CryptoKind) -> list[TypedKeyPair]:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def now(self) -> Timestamp:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def debug(self, command: str) -> str:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def veilid_version_string(self) -> str:
|
||||||
|
pass
|
||||||
|
@abstractmethod
|
||||||
|
async def veilid_version(self) -> VeilidVersion:
|
||||||
|
pass
|
553
veilid-python/veilid/config.py
Normal file
553
veilid-python/veilid/config.py
Normal file
@ -0,0 +1,553 @@
|
|||||||
|
from typing import Self, Optional
|
||||||
|
from enum import StrEnum
|
||||||
|
from json import dumps
|
||||||
|
|
||||||
|
from .types import *
|
||||||
|
|
||||||
|
class VeilidConfigLogLevel(StrEnum):
|
||||||
|
OFF = 'Off'
|
||||||
|
ERROR = 'Error'
|
||||||
|
WARN = 'Warn'
|
||||||
|
INFO = 'Info'
|
||||||
|
DEBUG = 'Debug'
|
||||||
|
TRACE = 'Trace'
|
||||||
|
|
||||||
|
class VeilidConfigCapabilities:
|
||||||
|
protocol_udp: bool
|
||||||
|
protocol_connect_tcp: bool
|
||||||
|
protocol_accept_tcp: bool
|
||||||
|
protocol_connect_ws: bool
|
||||||
|
protocol_accept_ws: bool
|
||||||
|
protocol_connect_wss: bool
|
||||||
|
protocol_accept_wss: bool
|
||||||
|
|
||||||
|
def __init__(self, protocol_udp: bool, protocol_connect_tcp: bool, protocol_accept_tcp: bool,
|
||||||
|
protocol_connect_ws: bool, protocol_accept_ws: bool, protocol_connect_wss: bool, protocol_accept_wss: bool):
|
||||||
|
|
||||||
|
self.protocol_udp = protocol_udp
|
||||||
|
self.protocol_connect_tcp = protocol_connect_tcp
|
||||||
|
self.protocol_accept_tcp = protocol_accept_tcp
|
||||||
|
self.protocol_connect_ws = protocol_connect_ws
|
||||||
|
self.protocol_accept_ws = protocol_accept_ws
|
||||||
|
self.protocol_connect_wss = protocol_connect_wss
|
||||||
|
self.protocol_accept_wss = protocol_accept_wss
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_json(j: dict) -> Self:
|
||||||
|
return VeilidConfigCapabilities(j['protocol_udp'],
|
||||||
|
j['protocol_connect_tcp'],
|
||||||
|
j['protocol_accept_tcp'],
|
||||||
|
j['protocol_connect_ws'],
|
||||||
|
j['protocol_accept_ws'],
|
||||||
|
j['protocol_connect_wss'],
|
||||||
|
j['protocol_accept_wss'])
|
||||||
|
def to_json(self) -> dict:
|
||||||
|
return self.__dict__
|
||||||
|
|
||||||
|
class VeilidConfigProtectedStore:
|
||||||
|
allow_insecure_fallback: bool
|
||||||
|
always_use_insecure_storage: bool
|
||||||
|
directory: str
|
||||||
|
delete: bool
|
||||||
|
device_encryption_key_password: str
|
||||||
|
new_device_encryption_key_password: Optional[str]
|
||||||
|
|
||||||
|
def __init__(self, allow_insecure_fallback: bool, always_use_insecure_storage: bool,
|
||||||
|
directory: str, delete: bool, device_encryption_key_password: str, new_device_encryption_key_password: Optional[str]):
|
||||||
|
|
||||||
|
self.allow_insecure_fallback = allow_insecure_fallback
|
||||||
|
self.always_use_insecure_storage = always_use_insecure_storage
|
||||||
|
self.directory = directory
|
||||||
|
self.delete = delete
|
||||||
|
self.device_encryption_key_password = device_encryption_key_password
|
||||||
|
self.new_device_encryption_key_password = new_device_encryption_key_password
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_json(j: dict) -> Self:
|
||||||
|
return VeilidConfigProtectedStore(j['allow_insecure_fallback'], j['always_use_insecure_storage'],
|
||||||
|
j['directory'], j['delete'], j['device_encryption_key_password'], j['new_device_encryption_key_password'])
|
||||||
|
def to_json(self) -> dict:
|
||||||
|
return self.__dict__
|
||||||
|
|
||||||
|
class VeilidConfigTableStore:
|
||||||
|
directory: str
|
||||||
|
delete: bool
|
||||||
|
|
||||||
|
def __init__(self, directory: str, delete: bool):
|
||||||
|
self.directory = directory
|
||||||
|
self.delete = delete
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_json(j: dict) -> Self:
|
||||||
|
return VeilidConfigTableStore(j['directory'], j['delete'])
|
||||||
|
def to_json(self) -> dict:
|
||||||
|
return self.__dict__
|
||||||
|
|
||||||
|
class VeilidConfigBlockStore:
|
||||||
|
directory: str
|
||||||
|
delete: bool
|
||||||
|
|
||||||
|
def __init__(self, directory: str, delete: bool):
|
||||||
|
self.directory = directory
|
||||||
|
self.delete = delete
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_json(j: dict) -> Self:
|
||||||
|
return VeilidConfigBlockStore(j['directory'], j['delete'])
|
||||||
|
def to_json(self) -> dict:
|
||||||
|
return self.__dict__
|
||||||
|
|
||||||
|
class VeilidConfigRoutingTable:
|
||||||
|
node_id: list[TypedKey]
|
||||||
|
node_id_secret: list[TypedSecret]
|
||||||
|
bootstrap: list[str]
|
||||||
|
limit_over_attached: int
|
||||||
|
limit_fully_attached: int
|
||||||
|
limit_attached_strong: int
|
||||||
|
limit_attached_good: int
|
||||||
|
limit_attached_weak: int
|
||||||
|
|
||||||
|
def __init__(self, node_id: list[TypedKey], node_id_secret: list[TypedSecret], bootstrap: list[str], limit_over_attached: int,
|
||||||
|
limit_fully_attached: int, limit_attached_strong: int, limit_attached_good: int, limit_attached_weak: int):
|
||||||
|
|
||||||
|
self.node_id = node_id
|
||||||
|
self.node_id_secret = node_id_secret
|
||||||
|
self.bootstrap = bootstrap
|
||||||
|
self.limit_over_attached = limit_over_attached
|
||||||
|
self.limit_fully_attached = limit_fully_attached
|
||||||
|
self.limit_attached_strong = limit_attached_strong
|
||||||
|
self.limit_attached_good = limit_attached_good
|
||||||
|
self.limit_attached_weak = limit_attached_weak
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_json(j: dict) -> Self:
|
||||||
|
return VeilidConfigRoutingTable(
|
||||||
|
list(map(lambda x: TypedKey(x), j['node_id'])),
|
||||||
|
list(map(lambda x: TypedSecret(x), j['node_id_secret'])),
|
||||||
|
j['bootstrap'],
|
||||||
|
j['limit_over_attached'],
|
||||||
|
j['limit_fully_attached'],
|
||||||
|
j['limit_attached_strong'],
|
||||||
|
j['limit_attached_good'],
|
||||||
|
j['limit_attached_weak'])
|
||||||
|
def to_json(self) -> dict:
|
||||||
|
return self.__dict__
|
||||||
|
|
||||||
|
|
||||||
|
class VeilidConfigRPC:
|
||||||
|
concurrency: int
|
||||||
|
queue_size: int
|
||||||
|
max_timestamp_behind_ms: Optional[int]
|
||||||
|
max_timestamp_ahead_ms: Optional[int]
|
||||||
|
timeout_ms: int
|
||||||
|
max_route_hop_count: int
|
||||||
|
default_route_hop_count: int
|
||||||
|
|
||||||
|
def __init__(self, concurrency: int, queue_size: int, max_timestamp_behind_ms: Optional[int], max_timestamp_ahead_ms: Optional[int],
|
||||||
|
timeout_ms: int, max_route_hop_count: int, default_route_hop_count: int):
|
||||||
|
|
||||||
|
self.concurrency = concurrency
|
||||||
|
self.queue_size = queue_size
|
||||||
|
self.max_timestamp_behind_ms = max_timestamp_behind_ms
|
||||||
|
self.max_timestamp_ahead_ms = max_timestamp_ahead_ms
|
||||||
|
self.timeout_ms = timeout_ms
|
||||||
|
self.max_route_hop_count = max_route_hop_count
|
||||||
|
self.default_route_hop_count = default_route_hop_count
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_json(j: dict) -> Self:
|
||||||
|
return VeilidConfigRPC(
|
||||||
|
j['concurrency'],
|
||||||
|
j['queue_size'],
|
||||||
|
j['max_timestamp_behind_ms'],
|
||||||
|
j['max_timestamp_ahead_ms'],
|
||||||
|
j['timeout_ms'],
|
||||||
|
j['max_route_hop_count'],
|
||||||
|
j['default_route_hop_count'])
|
||||||
|
def to_json(self) -> dict:
|
||||||
|
return self.__dict__
|
||||||
|
|
||||||
|
class VeilidConfigDHT:
|
||||||
|
max_find_node_count: int
|
||||||
|
resolve_node_timeout_ms: int
|
||||||
|
resolve_node_count: int
|
||||||
|
resolve_node_fanout: int
|
||||||
|
get_value_timeout_ms: int
|
||||||
|
get_value_count: int
|
||||||
|
get_value_fanout: int
|
||||||
|
set_value_timeout_ms: int
|
||||||
|
set_value_count: int
|
||||||
|
set_value_fanout: int
|
||||||
|
min_peer_count: int
|
||||||
|
min_peer_refresh_time_ms: int
|
||||||
|
validate_dial_info_receipt_time_ms: int
|
||||||
|
local_subkey_cache_size: int
|
||||||
|
local_max_subkey_cache_memory_mb: int
|
||||||
|
remote_subkey_cache_size: int
|
||||||
|
remote_max_records: int
|
||||||
|
remote_max_subkey_cache_memory_mb: int
|
||||||
|
remote_max_storage_space_mb: int
|
||||||
|
|
||||||
|
def __init__(self, max_find_node_count: int, resolve_node_timeout_ms: int, resolve_node_count: int,
|
||||||
|
resolve_node_fanout: int, get_value_timeout_ms: int, get_value_count: int, get_value_fanout: int,
|
||||||
|
set_value_timeout_ms: int, set_value_count: int, set_value_fanout: int,
|
||||||
|
min_peer_count: int, min_peer_refresh_time_ms: int, validate_dial_info_receipt_time_ms: int,
|
||||||
|
local_subkey_cache_size: int, local_max_subkey_cache_memory_mb: int,
|
||||||
|
remote_subkey_cache_size: int, remote_max_records: int, remote_max_subkey_cache_memory_mb: int, remote_max_storage_space_mb: int):
|
||||||
|
|
||||||
|
self.max_find_node_count = max_find_node_count
|
||||||
|
self.resolve_node_timeout_ms =resolve_node_timeout_ms
|
||||||
|
self.resolve_node_count = resolve_node_count
|
||||||
|
self.resolve_node_fanout = resolve_node_fanout
|
||||||
|
self.get_value_timeout_ms = get_value_timeout_ms
|
||||||
|
self.get_value_count = get_value_count
|
||||||
|
self.get_value_fanout = get_value_fanout
|
||||||
|
self.set_value_timeout_ms = set_value_timeout_ms
|
||||||
|
self.set_value_count = set_value_count
|
||||||
|
self.set_value_fanout = set_value_fanout
|
||||||
|
self.min_peer_count = min_peer_count
|
||||||
|
self.min_peer_refresh_time_ms = min_peer_refresh_time_ms
|
||||||
|
self.validate_dial_info_receipt_time_ms = validate_dial_info_receipt_time_ms
|
||||||
|
self.local_subkey_cache_size = local_subkey_cache_size
|
||||||
|
self.local_max_subkey_cache_memory_mb = local_max_subkey_cache_memory_mb
|
||||||
|
self.remote_subkey_cache_size = remote_subkey_cache_size
|
||||||
|
self.remote_max_records = remote_max_records
|
||||||
|
self.remote_max_subkey_cache_memory_mb = remote_max_subkey_cache_memory_mb
|
||||||
|
self.remote_max_storage_space_mb = remote_max_storage_space_mb
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_json(j: dict) -> Self:
|
||||||
|
return VeilidConfigDHT(
|
||||||
|
j['max_find_node_count'],
|
||||||
|
j['resolve_node_timeout_ms'],
|
||||||
|
j['resolve_node_count'],
|
||||||
|
j['resolve_node_fanout'],
|
||||||
|
j['get_value_timeout_ms'],
|
||||||
|
j['get_value_count'],
|
||||||
|
j['get_value_fanout'],
|
||||||
|
j['set_value_timeout_ms'],
|
||||||
|
j['set_value_count'],
|
||||||
|
j['set_value_fanout'],
|
||||||
|
j['min_peer_count'],
|
||||||
|
j['min_peer_refresh_time_ms'],
|
||||||
|
j['validate_dial_info_receipt_time_ms'],
|
||||||
|
j['local_subkey_cache_size'],
|
||||||
|
j['local_max_subkey_cache_memory_mb'],
|
||||||
|
j['remote_subkey_cache_size'],
|
||||||
|
j['remote_max_records'],
|
||||||
|
j['remote_max_subkey_cache_memory_mb'],
|
||||||
|
j['remote_max_storage_space_mb'])
|
||||||
|
def to_json(self) -> dict:
|
||||||
|
return self.__dict__
|
||||||
|
|
||||||
|
class VeilidConfigTLS:
|
||||||
|
certificate_path: str
|
||||||
|
private_key_path: str
|
||||||
|
connection_initial_timeout_ms: int
|
||||||
|
|
||||||
|
def __init__(self, certificate_path: str, private_key_path: str, connection_initial_timeout_ms: int):
|
||||||
|
self.certificate_path = certificate_path
|
||||||
|
self.private_key_path = private_key_path
|
||||||
|
self.connection_initial_timeout_ms = connection_initial_timeout_ms
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_json(j: dict) -> Self:
|
||||||
|
return VeilidConfigTLS(
|
||||||
|
j['certificate_path'],
|
||||||
|
j['private_key_path'],
|
||||||
|
j['connection_initial_timeout_ms'])
|
||||||
|
def to_json(self) -> dict:
|
||||||
|
return self.__dict__
|
||||||
|
|
||||||
|
class VeilidConfigHTTPS:
|
||||||
|
enabled: bool
|
||||||
|
listen_address: str
|
||||||
|
path: str
|
||||||
|
url: Optional[str]
|
||||||
|
|
||||||
|
def __init__(self, enabled: bool, listen_address: str, path: str, url: Optional[str]):
|
||||||
|
self.enabled = enabled
|
||||||
|
self.listen_address = listen_address
|
||||||
|
self.path = path
|
||||||
|
self.url = url
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_json(j: dict) -> Self:
|
||||||
|
return VeilidConfigHTTPS(
|
||||||
|
j['enabled'],
|
||||||
|
j['listen_address'],
|
||||||
|
j['path'],
|
||||||
|
j['url'])
|
||||||
|
def to_json(self) -> dict:
|
||||||
|
return self.__dict__
|
||||||
|
|
||||||
|
class VeilidConfigHTTP:
|
||||||
|
enabled: bool
|
||||||
|
listen_address: str
|
||||||
|
path: str
|
||||||
|
url: Optional[str]
|
||||||
|
|
||||||
|
def __init__(self, enabled: bool, listen_address: str, path: str, url: Optional[str]):
|
||||||
|
self.enabled = enabled
|
||||||
|
self.listen_address = listen_address
|
||||||
|
self.path = path
|
||||||
|
self.url = url
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_json(j: dict) -> Self:
|
||||||
|
return VeilidConfigHTTP(
|
||||||
|
j['enabled'],
|
||||||
|
j['listen_address'],
|
||||||
|
j['path'],
|
||||||
|
j['url'])
|
||||||
|
def to_json(self) -> dict:
|
||||||
|
return self.__dict__
|
||||||
|
|
||||||
|
class VeilidConfigApplication:
|
||||||
|
https: VeilidConfigHTTPS
|
||||||
|
http: VeilidConfigHTTP
|
||||||
|
|
||||||
|
def __init__(self, https: VeilidConfigHTTPS, http: VeilidConfigHTTP):
|
||||||
|
self.https = https
|
||||||
|
self.http = http
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_json(j: dict) -> Self:
|
||||||
|
return VeilidConfigApplication(
|
||||||
|
VeilidConfigHTTPS.from_json(j['https']),
|
||||||
|
VeilidConfigHTTP.from_json(j['http']))
|
||||||
|
def to_json(self) -> dict:
|
||||||
|
return self.__dict__
|
||||||
|
|
||||||
|
|
||||||
|
class VeilidConfigUDP:
|
||||||
|
enabled: bool
|
||||||
|
socket_pool_size: int
|
||||||
|
listen_address: str
|
||||||
|
public_address: Optional[str]
|
||||||
|
|
||||||
|
def __init__(self, enabled: bool, socket_pool_size: int, listen_address: str, public_address: Optional[str]):
|
||||||
|
self.enabled = enabled
|
||||||
|
self.socket_pool_size = socket_pool_size
|
||||||
|
self.listen_address = listen_address
|
||||||
|
self.public_address = public_address
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_json(j: dict) -> Self:
|
||||||
|
return VeilidConfigUDP(
|
||||||
|
j['enabled'],
|
||||||
|
j['socket_pool_size'],
|
||||||
|
j['listen_address'],
|
||||||
|
j['public_address'])
|
||||||
|
def to_json(self) -> dict:
|
||||||
|
return self.__dict__
|
||||||
|
|
||||||
|
class VeilidConfigTCP:
|
||||||
|
connect: bool
|
||||||
|
listen: bool
|
||||||
|
max_connections: int
|
||||||
|
listen_address: str
|
||||||
|
public_address: Optional[str]
|
||||||
|
|
||||||
|
def __init__(self, connect: bool, listen: bool, max_connections: int, listen_address: str, public_address: Optional[str]):
|
||||||
|
self.connect = connect
|
||||||
|
self.listen = listen
|
||||||
|
self.max_connections = max_connections
|
||||||
|
self.listen_address = listen_address
|
||||||
|
self.public_address = public_address
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_json(j: dict) -> Self:
|
||||||
|
return VeilidConfigTCP(
|
||||||
|
j['connect'],
|
||||||
|
j['listen'],
|
||||||
|
j['max_connections'],
|
||||||
|
j['listen_address'],
|
||||||
|
j['public_address'])
|
||||||
|
def to_json(self) -> dict:
|
||||||
|
return self.__dict__
|
||||||
|
|
||||||
|
class VeilidConfigWS:
|
||||||
|
connect: bool
|
||||||
|
listen: bool
|
||||||
|
max_connections: int
|
||||||
|
listen_address: str
|
||||||
|
path: str
|
||||||
|
url: Optional[str]
|
||||||
|
|
||||||
|
def __init__(self, connect: bool, listen: bool, max_connections: int, listen_address: str, path: str, url: Optional[str]):
|
||||||
|
self.connect = connect
|
||||||
|
self.listen = listen
|
||||||
|
self.max_connections = max_connections
|
||||||
|
self.listen_address = listen_address
|
||||||
|
self.path = path
|
||||||
|
self.url = url
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_json(j: dict) -> Self:
|
||||||
|
return VeilidConfigWS(
|
||||||
|
j['connect'],
|
||||||
|
j['listen'],
|
||||||
|
j['max_connections'],
|
||||||
|
j['listen_address'],
|
||||||
|
j['path'],
|
||||||
|
j['url'])
|
||||||
|
def to_json(self) -> dict:
|
||||||
|
return self.__dict__
|
||||||
|
|
||||||
|
class VeilidConfigWSS:
|
||||||
|
connect: bool
|
||||||
|
listen: bool
|
||||||
|
max_connections: int
|
||||||
|
listen_address: str
|
||||||
|
path: str
|
||||||
|
url: Optional[str]
|
||||||
|
|
||||||
|
def __init__(self, connect: bool, listen: bool, max_connections: int, listen_address: str, path: str, url: Optional[str]):
|
||||||
|
self.connect = connect
|
||||||
|
self.listen = listen
|
||||||
|
self.max_connections = max_connections
|
||||||
|
self.listen_address = listen_address
|
||||||
|
self.path = path
|
||||||
|
self.url = url
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_json(j: dict) -> Self:
|
||||||
|
return VeilidConfigWSS(
|
||||||
|
j['connect'],
|
||||||
|
j['listen'],
|
||||||
|
j['max_connections'],
|
||||||
|
j['listen_address'],
|
||||||
|
j['path'],
|
||||||
|
j['url'])
|
||||||
|
def to_json(self) -> dict:
|
||||||
|
return self.__dict__
|
||||||
|
|
||||||
|
class VeilidConfigProtocol:
|
||||||
|
udp: VeilidConfigUDP
|
||||||
|
tcp: VeilidConfigTCP
|
||||||
|
ws: VeilidConfigWS
|
||||||
|
wss: VeilidConfigWSS
|
||||||
|
|
||||||
|
def __init__(self, udp: VeilidConfigUDP, tcp: VeilidConfigTCP, ws: VeilidConfigWS, wss: VeilidConfigWSS):
|
||||||
|
self.udp = udp
|
||||||
|
self.tcp = tcp
|
||||||
|
self.ws = ws
|
||||||
|
self.wss = wss
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_json(j: dict) -> Self:
|
||||||
|
return VeilidConfigProtocol(
|
||||||
|
VeilidConfigUDP.from_json(j['udp']),
|
||||||
|
VeilidConfigTCP.from_json(j['tcp']),
|
||||||
|
VeilidConfigWS.from_json(j['ws']),
|
||||||
|
VeilidConfigWSS.from_json(j['wss']))
|
||||||
|
def to_json(self) -> dict:
|
||||||
|
return self.__dict__
|
||||||
|
|
||||||
|
|
||||||
|
class VeilidConfigNetwork:
|
||||||
|
connection_initial_timeout_ms: int
|
||||||
|
connection_inactivity_timeout_ms: int
|
||||||
|
max_connections_per_ip4: int
|
||||||
|
max_connections_per_ip6_prefix: int
|
||||||
|
max_connections_per_ip6_prefix_size: int
|
||||||
|
max_connection_frequency_per_min: int
|
||||||
|
client_whitelist_timeout_ms: int
|
||||||
|
reverse_connection_receipt_time_ms: int
|
||||||
|
hole_punch_receipt_time_ms: int
|
||||||
|
routing_table: VeilidConfigRoutingTable
|
||||||
|
rpc: VeilidConfigRPC
|
||||||
|
dht: VeilidConfigDHT
|
||||||
|
upnp: bool
|
||||||
|
detect_address_changes: bool
|
||||||
|
restricted_nat_retries: int
|
||||||
|
tls: VeilidConfigTLS
|
||||||
|
application: VeilidConfigApplication
|
||||||
|
protocol: VeilidConfigProtocol
|
||||||
|
|
||||||
|
def __init__(self, connection_initial_timeout_ms: int, connection_inactivity_timeout_ms: int,
|
||||||
|
max_connections_per_ip4: int, max_connections_per_ip6_prefix: int,
|
||||||
|
max_connections_per_ip6_prefix_size: int, max_connection_frequency_per_min: int,
|
||||||
|
client_whitelist_timeout_ms: int, reverse_connection_receipt_time_ms: int,
|
||||||
|
hole_punch_receipt_time_ms: int, routing_table: VeilidConfigRoutingTable,
|
||||||
|
rpc: VeilidConfigRPC, dht: VeilidConfigDHT, upnp: bool, detect_address_changes: bool,
|
||||||
|
restricted_nat_retries: int, tls: VeilidConfigTLS, application: VeilidConfigApplication, protocol: VeilidConfigProtocol):
|
||||||
|
|
||||||
|
self.connection_initial_timeout_ms = connection_initial_timeout_ms
|
||||||
|
self.connection_inactivity_timeout_ms = connection_inactivity_timeout_ms
|
||||||
|
self.max_connections_per_ip4 = max_connections_per_ip4
|
||||||
|
self.max_connections_per_ip6_prefix = max_connections_per_ip6_prefix
|
||||||
|
self.max_connections_per_ip6_prefix_size = max_connections_per_ip6_prefix_size
|
||||||
|
self.max_connection_frequency_per_min = max_connection_frequency_per_min
|
||||||
|
self.client_whitelist_timeout_ms = client_whitelist_timeout_ms
|
||||||
|
self.reverse_connection_receipt_time_ms = reverse_connection_receipt_time_ms
|
||||||
|
self.hole_punch_receipt_time_ms = hole_punch_receipt_time_ms
|
||||||
|
self.routing_table = routing_table
|
||||||
|
self.rpc = rpc
|
||||||
|
self.dht = dht
|
||||||
|
self.upnp = upnp
|
||||||
|
self.detect_address_changes = detect_address_changes
|
||||||
|
self.restricted_nat_retries = restricted_nat_retries
|
||||||
|
self.tls = tls
|
||||||
|
self.application = application
|
||||||
|
self.protocol = protocol
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_json(j: dict) -> Self:
|
||||||
|
return VeilidConfigNetwork(
|
||||||
|
j['connection_initial_timeout_ms'],
|
||||||
|
j['connection_inactivity_timeout_ms'],
|
||||||
|
j['max_connections_per_ip4'],
|
||||||
|
j['max_connections_per_ip6_prefix'],
|
||||||
|
j['max_connections_per_ip6_prefix_size'],
|
||||||
|
j['max_connection_frequency_per_min'],
|
||||||
|
j['client_whitelist_timeout_ms'],
|
||||||
|
j['reverse_connection_receipt_time_ms'],
|
||||||
|
j['hole_punch_receipt_time_ms'],
|
||||||
|
VeilidConfigRoutingTable.from_json(j['routing_table']),
|
||||||
|
VeilidConfigRPC.from_json(j['rpc']),
|
||||||
|
VeilidConfigDHT.from_json(j['dht']),
|
||||||
|
j['upnp'],
|
||||||
|
j['detect_address_changes'],
|
||||||
|
j['restricted_nat_retries'],
|
||||||
|
VeilidConfigTLS.from_json(j['tls']),
|
||||||
|
VeilidConfigApplication.from_json(j['application']),
|
||||||
|
VeilidConfigProtocol.from_json(j['protocol']))
|
||||||
|
def to_json(self) -> dict:
|
||||||
|
return self.__dict__
|
||||||
|
|
||||||
|
class VeilidConfig:
|
||||||
|
program_name: str
|
||||||
|
namespace: str
|
||||||
|
capabilities: VeilidConfigCapabilities
|
||||||
|
protected_store: VeilidConfigProtectedStore
|
||||||
|
table_store: VeilidConfigTableStore
|
||||||
|
block_store: VeilidConfigBlockStore
|
||||||
|
network: VeilidConfigNetwork
|
||||||
|
|
||||||
|
def __init__(self, program_name: str, namespace: str, capabilities: VeilidConfigCapabilities,
|
||||||
|
protected_store: VeilidConfigProtectedStore, table_store: VeilidConfigTableStore,
|
||||||
|
block_store: VeilidConfigBlockStore, network: VeilidConfigNetwork):
|
||||||
|
|
||||||
|
self.program_name = program_name
|
||||||
|
self.namespace = namespace
|
||||||
|
self.capabilities = capabilities
|
||||||
|
self.protected_store = protected_store
|
||||||
|
self.table_store = table_store
|
||||||
|
self.block_store = block_store
|
||||||
|
self.network = network
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_json(j: dict) -> Self:
|
||||||
|
'''JSON object hook'''
|
||||||
|
return VeilidConfig(j['program_name'], j['namespace'],
|
||||||
|
VeilidConfigCapabilities.from_json(j['capabilities']),
|
||||||
|
VeilidConfigProtectedStore.from_json(j['protected_store']),
|
||||||
|
VeilidConfigTableStore.from_json(j['table_store']),
|
||||||
|
VeilidConfigBlockStore.from_json(j['block_store']),
|
||||||
|
VeilidConfigNetwork.from_json(j['network']))
|
||||||
|
def to_json(self) -> dict:
|
||||||
|
return self.__dict__
|
||||||
|
|
||||||
|
|
142
veilid-python/veilid/error.py
Normal file
142
veilid-python/veilid/error.py
Normal file
@ -0,0 +1,142 @@
|
|||||||
|
from typing import Self, Any
|
||||||
|
|
||||||
|
class VeilidAPIError(Exception):
|
||||||
|
"""Veilid API error exception base class"""
|
||||||
|
pass
|
||||||
|
@staticmethod
|
||||||
|
def from_json(j: dict) -> Self:
|
||||||
|
match j['kind']:
|
||||||
|
case 'NotInitialized':
|
||||||
|
return VeilidAPIErrorNotInitialized()
|
||||||
|
case 'AlreadyInitialized':
|
||||||
|
return VeilidAPIErrorAlreadyInitialized()
|
||||||
|
case 'Timeout':
|
||||||
|
return VeilidAPIErrorTimeout()
|
||||||
|
case 'TryAgain':
|
||||||
|
return VeilidAPIErrorTryAgain()
|
||||||
|
case 'Shutdown':
|
||||||
|
return VeilidAPIErrorShutdown()
|
||||||
|
case 'InvalidTarget':
|
||||||
|
return VeilidAPIErrorInvalidTarget()
|
||||||
|
case 'NoConnection':
|
||||||
|
return VeilidAPIErrorNoConnection(j['message'])
|
||||||
|
case 'KeyNotFound':
|
||||||
|
return VeilidAPIErrorKeyNotFound(j['key'])
|
||||||
|
case 'Internal':
|
||||||
|
return VeilidAPIErrorInternal(j['message'])
|
||||||
|
case 'Unimplemented':
|
||||||
|
return VeilidAPIErrorUnimplemented(j['message'])
|
||||||
|
case 'ParseError':
|
||||||
|
return VeilidAPIErrorParseError(j['message'], j['value'])
|
||||||
|
case 'InvalidArgument':
|
||||||
|
return VeilidAPIErrorInvalidArgument(j['context'], j['argument'], j['value'])
|
||||||
|
case 'MissingArgument':
|
||||||
|
return VeilidAPIErrorMissingArgument(j['context'], j['argument'])
|
||||||
|
case 'Generic':
|
||||||
|
return VeilidAPIErrorGeneric(j['message'])
|
||||||
|
case _:
|
||||||
|
return VeilidAPIError("Unknown exception type: {}".format(j['kind']))
|
||||||
|
|
||||||
|
|
||||||
|
class VeilidAPIErrorNotInitialized(VeilidAPIError):
|
||||||
|
"""Veilid was not initialized"""
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__("Not initialized")
|
||||||
|
|
||||||
|
class VeilidAPIErrorAlreadyInitialized(VeilidAPIError):
|
||||||
|
"""Veilid was already initialized"""
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__("Already initialized")
|
||||||
|
|
||||||
|
class VeilidAPIErrorTimeout(VeilidAPIError):
|
||||||
|
"""Veilid operation timed out"""
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__("Timeout")
|
||||||
|
|
||||||
|
class VeilidAPIErrorTryAgain(VeilidAPIError):
|
||||||
|
"""Operation could not be performed at this time, retry again later"""
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__("Try again")
|
||||||
|
|
||||||
|
class VeilidAPIErrorShutdown(VeilidAPIError):
|
||||||
|
"""Veilid was already shut down"""
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__("Shutdown")
|
||||||
|
|
||||||
|
class VeilidAPIErrorInvalidTarget(VeilidAPIError):
|
||||||
|
"""Target of operation is not valid"""
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__("Invalid target")
|
||||||
|
|
||||||
|
class VeilidAPIErrorNoConnection(VeilidAPIError):
|
||||||
|
"""Connection could not be established"""
|
||||||
|
message: str
|
||||||
|
def __init__(self, message: str):
|
||||||
|
super().__init__("No connection")
|
||||||
|
self.message = message
|
||||||
|
|
||||||
|
class VeilidAPIErrorKeyNotFound(VeilidAPIError):
|
||||||
|
"""Key was not found"""
|
||||||
|
key: str
|
||||||
|
def __init__(self, key: str):
|
||||||
|
super().__init__("Key not found")
|
||||||
|
self.key = key
|
||||||
|
|
||||||
|
class VeilidAPIErrorInternal(VeilidAPIError):
|
||||||
|
"""Veilid experienced an internal failure"""
|
||||||
|
message: str
|
||||||
|
def __init__(self, message: str):
|
||||||
|
super().__init__("Internal")
|
||||||
|
self.message = message
|
||||||
|
|
||||||
|
class VeilidAPIErrorUnimplemented(VeilidAPIError):
|
||||||
|
"""Functionality is not yet implemented"""
|
||||||
|
message: str
|
||||||
|
def __init__(self, message: str):
|
||||||
|
super().__init__("Unimplemented")
|
||||||
|
self.message = message
|
||||||
|
|
||||||
|
class VeilidAPIErrorParseError(VeilidAPIError):
|
||||||
|
"""Value was not in a parseable format"""
|
||||||
|
message: str
|
||||||
|
value: str
|
||||||
|
def __init__(self, message: str, value: str):
|
||||||
|
super().__init__("Parse error")
|
||||||
|
self.message = message
|
||||||
|
self.value = value
|
||||||
|
|
||||||
|
class VeilidAPIErrorInvalidArgument(VeilidAPIError):
|
||||||
|
"""Argument is not valid in this context"""
|
||||||
|
context: str
|
||||||
|
argument: str
|
||||||
|
value: str
|
||||||
|
def __init__(self, context: str, argument: str, value: str):
|
||||||
|
super().__init__("Invalid argument")
|
||||||
|
self.context = context
|
||||||
|
self.argument = argument
|
||||||
|
self.value = value
|
||||||
|
|
||||||
|
class VeilidAPIErrorMissingArgument(VeilidAPIError):
|
||||||
|
"""Required argument was missing"""
|
||||||
|
context: str
|
||||||
|
argument: str
|
||||||
|
def __init__(self, context: str, argument: str):
|
||||||
|
super().__init__("Missing argument")
|
||||||
|
self.context = context
|
||||||
|
self.argument = argument
|
||||||
|
|
||||||
|
class VeilidAPIErrorGeneric(VeilidAPIError):
|
||||||
|
"""Generic error message"""
|
||||||
|
message: str
|
||||||
|
def __init__(self, message: str):
|
||||||
|
super().__init__("Generic")
|
||||||
|
self.message = message
|
||||||
|
|
||||||
|
|
||||||
|
def raise_api_result(api_result: dict) -> Any:
|
||||||
|
if "value" in api_result:
|
||||||
|
return api_result["value"]
|
||||||
|
elif "error" in api_result:
|
||||||
|
raise VeilidAPIError.from_json(api_result["error"])
|
||||||
|
else:
|
||||||
|
raise ValueError("Invalid format for ApiResult")
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user