mirror of
https://gitlab.com/veilid/veilid.git
synced 2024-10-01 01:26:08 -04:00
Merge branch 'dht-work' into 'main'
DHT WatchValue and InspectRecord Support See merge request veilid/veilid!261
This commit is contained in:
commit
ebd4c0070d
167
.gitignore
vendored
167
.gitignore
vendored
@ -66,172 +66,7 @@ flamegraph.svg
|
||||
perf.data
|
||||
perf.data.old
|
||||
|
||||
##############################################################################
|
||||
### Python
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
# For a library or package, you might want to ignore these files since the code is
|
||||
# intended to run in multiple environments; otherwise, check them in:
|
||||
# .python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# poetry
|
||||
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
||||
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||
# commonly ignored for libraries.
|
||||
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
||||
#poetry.lock
|
||||
|
||||
# pdm
|
||||
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
||||
#pdm.lock
|
||||
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
||||
# in version control.
|
||||
# https://pdm.fming.dev/#use-with-ide
|
||||
.pdm.toml
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
# PyCharm
|
||||
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
#.idea/
|
||||
|
||||
## Custom for veilid-python
|
||||
veilid-python/demo/.demokeys
|
||||
###################################
|
||||
|
||||
.vscode/
|
||||
.idea/
|
||||
|
1181
Cargo.lock
generated
1181
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -17,7 +17,7 @@ logging:
|
||||
enabled: false
|
||||
core:
|
||||
capabilities:
|
||||
disable: ['TUNL','SGNL','RLAY','DIAL','DHTV','APPM','ROUT']
|
||||
disable: ['TUNL','SGNL','RLAY','DIAL','DHTV','DHTW','APPM','ROUT']
|
||||
network:
|
||||
upnp: false
|
||||
dht:
|
||||
|
@ -22,7 +22,7 @@ logging:
|
||||
enabled: false
|
||||
core:
|
||||
capabilities:
|
||||
disable: ['TUNL','SGNL','RLAY','DIAL','DHTV','APPM']
|
||||
disable: ['TUNL','SGNL','RLAY','DIAL','DHTV','DHTW','APPM']
|
||||
network:
|
||||
upnp: false
|
||||
dht:
|
||||
|
@ -38,7 +38,7 @@ cursive = { git = "https://gitlab.com/veilid/cursive.git", default-features = fa
|
||||
cursive_buffered_backend = { git = "https://gitlab.com/veilid/cursive-buffered-backend.git" }
|
||||
# cursive-multiplex = "0.6.0"
|
||||
# cursive_tree_view = "0.6.0"
|
||||
cursive_table_view = "0.14.0"
|
||||
cursive_table_view = { git = "https://gitlab.com/veilid/cursive-table-view.git" }
|
||||
arboard = "3.3.0"
|
||||
# cursive-tabs = "0.5.0"
|
||||
clap = { version = "4", features = ["derive"] }
|
||||
@ -50,12 +50,12 @@ serde_derive = "^1"
|
||||
parking_lot = "^0"
|
||||
cfg-if = "^1"
|
||||
config = { version = "^0", features = ["yaml"] }
|
||||
bugsalot = { package = "veilid-bugsalot", version = "0.1.0" }
|
||||
bugsalot = { package = "veilid-bugsalot", version = "0.2.0" }
|
||||
flexi_logger = { version = "^0", features = ["use_chrono_for_offset"] }
|
||||
thiserror = "^1"
|
||||
crossbeam-channel = "^0"
|
||||
hex = "^0"
|
||||
veilid-tools = { version = "0.2.5", path = "../veilid-tools", default-features = false}
|
||||
veilid-tools = { version = "0.2.5", path = "../veilid-tools", default-features = false }
|
||||
|
||||
json = "^0"
|
||||
stop-token = { version = "^0", default-features = false }
|
||||
@ -67,6 +67,8 @@ chrono = "0.4.31"
|
||||
owning_ref = "0.4.1"
|
||||
unicode-width = "0.1.11"
|
||||
lru = "0.10.1"
|
||||
rustyline-async = "0.4.2"
|
||||
console = "0.15.8"
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "^2"
|
||||
|
@ -80,7 +80,7 @@ impl ClientApiConnection {
|
||||
async fn process_veilid_update(&self, update: json::JsonValue) {
|
||||
let comproc = self.inner.lock().comproc.clone();
|
||||
let Some(kind) = update["kind"].as_str() else {
|
||||
comproc.log_message(Level::Error, format!("missing update kind: {}", update));
|
||||
comproc.log_message(Level::Error, &format!("missing update kind: {}", update));
|
||||
return;
|
||||
};
|
||||
match kind {
|
||||
@ -110,7 +110,7 @@ impl ClientApiConnection {
|
||||
comproc.update_value_change(&update);
|
||||
}
|
||||
_ => {
|
||||
comproc.log_message(Level::Error, format!("unknown update kind: {}", update));
|
||||
comproc.log_message(Level::Error, &format!("unknown update kind: {}", update));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -395,6 +395,27 @@ impl ClientApiConnection {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn server_change_log_ignore(
|
||||
&self,
|
||||
layer: String,
|
||||
log_ignore: String,
|
||||
) -> Result<(), String> {
|
||||
trace!("ClientApiConnection::change_log_ignore");
|
||||
let mut req = json::JsonValue::new_object();
|
||||
req["op"] = "Control".into();
|
||||
req["args"] = json::JsonValue::new_array();
|
||||
req["args"].push("ChangeLogIgnore").unwrap();
|
||||
req["args"].push(layer).unwrap();
|
||||
req["args"].push(log_ignore).unwrap();
|
||||
let Some(resp) = self.perform_request(req).await else {
|
||||
return Err("Cancelled".to_owned());
|
||||
};
|
||||
if resp.has_key("error") {
|
||||
return Err(resp["error"].to_string());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Start Client API connection
|
||||
pub async fn ipc_connect(&self, ipc_path: PathBuf) -> Result<(), String> {
|
||||
trace!("ClientApiConnection::ipc_connect");
|
||||
|
@ -41,7 +41,7 @@ impl ConnectionState {
|
||||
}
|
||||
|
||||
struct CommandProcessorInner {
|
||||
ui_sender: UISender,
|
||||
ui_sender: Box<dyn UISender>,
|
||||
capi: Option<ClientApiConnection>,
|
||||
reconnect: bool,
|
||||
finished: bool,
|
||||
@ -60,7 +60,7 @@ pub struct CommandProcessor {
|
||||
}
|
||||
|
||||
impl CommandProcessor {
|
||||
pub fn new(ui_sender: UISender, settings: &Settings) -> Self {
|
||||
pub fn new(ui_sender: Box<dyn UISender>, settings: &Settings) -> Self {
|
||||
Self {
|
||||
inner: Arc::new(Mutex::new(CommandProcessorInner {
|
||||
ui_sender,
|
||||
@ -86,8 +86,8 @@ impl CommandProcessor {
|
||||
fn inner_mut(&self) -> MutexGuard<CommandProcessorInner> {
|
||||
self.inner.lock()
|
||||
}
|
||||
fn ui_sender(&self) -> UISender {
|
||||
self.inner.lock().ui_sender.clone()
|
||||
fn ui_sender(&self) -> Box<dyn UISender> {
|
||||
self.inner.lock().ui_sender.clone_uisender()
|
||||
}
|
||||
fn capi(&self) -> ClientApiConnection {
|
||||
self.inner.lock().capi.as_ref().unwrap().clone()
|
||||
@ -126,7 +126,7 @@ impl CommandProcessor {
|
||||
|
||||
ui.add_node_event(
|
||||
Level::Info,
|
||||
format!(
|
||||
&format!(
|
||||
r#"Client Commands:
|
||||
exit/quit exit the client
|
||||
disconnect disconnect the client from the Veilid node
|
||||
@ -136,6 +136,9 @@ impl CommandProcessor {
|
||||
all, terminal, system, api, file, otlp
|
||||
levels include:
|
||||
error, warn, info, debug, trace
|
||||
change_log_ignore <layer> <changes> change the log target ignore list for a tracing layer
|
||||
targets to add to the ignore list can be separated by a comma.
|
||||
to remove a target from the ignore list, prepend it with a minus.
|
||||
enable [flag] set a flag
|
||||
disable [flag] unset a flag
|
||||
valid flags in include:
|
||||
@ -190,11 +193,11 @@ Server Debug Commands:
|
||||
spawn_detached_local(async move {
|
||||
match capi.server_debug(command_line).await {
|
||||
Ok(output) => {
|
||||
ui.add_node_event(Level::Info, output);
|
||||
ui.add_node_event(Level::Info, &output);
|
||||
ui.send_callback(callback);
|
||||
}
|
||||
Err(e) => {
|
||||
ui.add_node_event(Level::Error, e.to_string());
|
||||
ui.add_node_event(Level::Error, &e);
|
||||
ui.send_callback(callback);
|
||||
}
|
||||
}
|
||||
@ -215,20 +218,59 @@ Server Debug Commands:
|
||||
let log_level = match convert_loglevel(&rest.unwrap_or_default()) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
ui.add_node_event(Level::Error, format!("Failed to change log level: {}", e));
|
||||
ui.add_node_event(Level::Error, &format!("Failed to change log level: {}", e));
|
||||
ui.send_callback(callback);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
match capi.server_change_log_level(layer, log_level).await {
|
||||
match capi.server_change_log_level(layer, log_level.clone()).await {
|
||||
Ok(()) => {
|
||||
ui.display_string_dialog("Success", "Log level changed", callback);
|
||||
ui.display_string_dialog(
|
||||
"Log level changed",
|
||||
&format!("Log level set to '{}'", log_level),
|
||||
callback,
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
ui.display_string_dialog(
|
||||
"Server command 'change_log_level' failed",
|
||||
e.to_string(),
|
||||
&e,
|
||||
callback,
|
||||
);
|
||||
}
|
||||
}
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn cmd_change_log_ignore(
|
||||
&self,
|
||||
rest: Option<String>,
|
||||
callback: UICallback,
|
||||
) -> Result<(), String> {
|
||||
trace!("CommandProcessor::cmd_change_log_ignore");
|
||||
let capi = self.capi();
|
||||
let ui = self.ui_sender();
|
||||
spawn_detached_local(async move {
|
||||
let (layer, rest) = Self::word_split(&rest.unwrap_or_default());
|
||||
let log_ignore = rest.unwrap_or_default();
|
||||
|
||||
match capi
|
||||
.server_change_log_ignore(layer, log_ignore.clone())
|
||||
.await
|
||||
{
|
||||
Ok(()) => {
|
||||
ui.display_string_dialog(
|
||||
"Log ignore changed",
|
||||
&format!("Log ignore changed '{}'", log_ignore),
|
||||
callback,
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
ui.display_string_dialog(
|
||||
"Server command 'change_log_ignore' failed",
|
||||
&e,
|
||||
callback,
|
||||
);
|
||||
}
|
||||
@ -247,11 +289,11 @@ Server Debug Commands:
|
||||
match flag.as_str() {
|
||||
"app_messages" => {
|
||||
this.inner.lock().enable_app_messages = true;
|
||||
ui.add_node_event(Level::Info, format!("flag enabled: {}", flag));
|
||||
ui.add_node_event(Level::Info, &format!("flag enabled: {}", flag));
|
||||
ui.send_callback(callback);
|
||||
}
|
||||
_ => {
|
||||
ui.add_node_event(Level::Error, format!("unknown flag: {}", flag));
|
||||
ui.add_node_event(Level::Error, &format!("unknown flag: {}", flag));
|
||||
ui.send_callback(callback);
|
||||
}
|
||||
}
|
||||
@ -269,11 +311,11 @@ Server Debug Commands:
|
||||
match flag.as_str() {
|
||||
"app_messages" => {
|
||||
this.inner.lock().enable_app_messages = false;
|
||||
ui.add_node_event(Level::Info, format!("flag disabled: {}", flag));
|
||||
ui.add_node_event(Level::Info, &format!("flag disabled: {}", flag));
|
||||
ui.send_callback(callback);
|
||||
}
|
||||
_ => {
|
||||
ui.add_node_event(Level::Error, format!("unknown flag: {}", flag));
|
||||
ui.add_node_event(Level::Error, &format!("unknown flag: {}", flag));
|
||||
ui.send_callback(callback);
|
||||
}
|
||||
}
|
||||
@ -291,6 +333,7 @@ Server Debug Commands:
|
||||
"disconnect" => self.cmd_disconnect(callback),
|
||||
"shutdown" => self.cmd_shutdown(callback),
|
||||
"change_log_level" => self.cmd_change_log_level(rest, callback),
|
||||
"change_log_ignore" => self.cmd_change_log_ignore(rest, callback),
|
||||
"enable" => self.cmd_enable(rest, callback),
|
||||
"disable" => self.cmd_disable(rest, callback),
|
||||
_ => self.cmd_debug(command_line.to_owned(), callback),
|
||||
@ -413,13 +456,13 @@ Server Debug Commands:
|
||||
// calls into ui
|
||||
////////////////////////////////////////////
|
||||
|
||||
pub fn log_message(&self, log_level: Level, message: String) {
|
||||
self.inner().ui_sender.add_node_event(log_level, message);
|
||||
pub fn log_message(&self, log_level: Level, message: &str) {
|
||||
self.inner().ui_sender.add_log_event(log_level, message);
|
||||
}
|
||||
|
||||
pub fn update_attachment(&self, attachment: &json::JsonValue) {
|
||||
self.inner_mut().ui_sender.set_attachment_state(
|
||||
attachment["state"].as_str().unwrap_or_default().to_owned(),
|
||||
attachment["state"].as_str().unwrap_or_default(),
|
||||
attachment["public_internet_ready"]
|
||||
.as_bool()
|
||||
.unwrap_or_default(),
|
||||
@ -458,7 +501,7 @@ Server Debug Commands:
|
||||
));
|
||||
}
|
||||
if !out.is_empty() {
|
||||
self.inner().ui_sender.add_node_event(Level::Info, out);
|
||||
self.inner().ui_sender.add_node_event(Level::Info, &out);
|
||||
}
|
||||
}
|
||||
pub fn update_value_change(&self, value_change: &json::JsonValue) {
|
||||
@ -475,15 +518,15 @@ Server Debug Commands:
|
||||
datastr,
|
||||
if truncated { "..." } else { "" }
|
||||
);
|
||||
self.inner().ui_sender.add_node_event(Level::Info, out);
|
||||
self.inner().ui_sender.add_node_event(Level::Info, &out);
|
||||
}
|
||||
|
||||
pub fn update_log(&self, log: &json::JsonValue) {
|
||||
let log_level =
|
||||
Level::from_str(log["log_level"].as_str().unwrap_or("error")).unwrap_or(Level::Error);
|
||||
self.inner().ui_sender.add_node_event(
|
||||
self.inner().ui_sender.add_log_event(
|
||||
log_level,
|
||||
format!(
|
||||
&format!(
|
||||
"{}: {}{}",
|
||||
log["log_level"].as_str().unwrap_or("???"),
|
||||
log["message"].as_str().unwrap_or("???"),
|
||||
@ -530,7 +573,7 @@ Server Debug Commands:
|
||||
|
||||
self.inner().ui_sender.add_node_event(
|
||||
Level::Info,
|
||||
format!(
|
||||
&format!(
|
||||
"AppMessage ({:?}): {}{}",
|
||||
msg["sender"],
|
||||
strmsg,
|
||||
@ -570,7 +613,7 @@ Server Debug Commands:
|
||||
|
||||
self.inner().ui_sender.add_node_event(
|
||||
Level::Info,
|
||||
format!(
|
||||
&format!(
|
||||
"AppCall ({:?}) id = {:016x} : {}{}",
|
||||
call["sender"],
|
||||
id,
|
||||
|
1448
veilid-cli/src/cursive_ui.rs
Normal file
1448
veilid-cli/src/cursive_ui.rs
Normal file
File diff suppressed because it is too large
Load Diff
350
veilid-cli/src/interactive_ui.rs
Normal file
350
veilid-cli/src/interactive_ui.rs
Normal file
@ -0,0 +1,350 @@
|
||||
use std::io::Write;
|
||||
|
||||
use crate::command_processor::*;
|
||||
use crate::cursive_ui::CursiveUI;
|
||||
use crate::settings::*;
|
||||
use crate::tools::*;
|
||||
use crate::ui::*;
|
||||
|
||||
use console::{style, Term};
|
||||
use flexi_logger::writers::LogWriter;
|
||||
use rustyline_async::SharedWriter;
|
||||
use rustyline_async::{Readline, ReadlineError, ReadlineEvent};
|
||||
use stop_token::future::FutureExt as StopTokenFutureExt;
|
||||
use stop_token::*;
|
||||
|
||||
pub type InteractiveUICallback = Box<dyn FnMut() + Send>;
|
||||
|
||||
pub struct InteractiveUIInner {
|
||||
cmdproc: Option<CommandProcessor>,
|
||||
stdout: Option<SharedWriter>,
|
||||
error: Option<String>,
|
||||
done: Option<StopSource>,
|
||||
connection_state_receiver: flume::Receiver<ConnectionState>,
|
||||
log_enabled: bool,
|
||||
enable_color: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct InteractiveUI {
|
||||
inner: Arc<Mutex<InteractiveUIInner>>,
|
||||
}
|
||||
|
||||
impl InteractiveUI {
|
||||
pub fn new(_settings: &Settings) -> (Self, InteractiveUISender) {
|
||||
let (cssender, csreceiver) = flume::unbounded::<ConnectionState>();
|
||||
|
||||
let term = Term::stdout();
|
||||
let enable_color = console::colors_enabled() && term.features().colors_supported();
|
||||
|
||||
// Create the UI object
|
||||
let this = Self {
|
||||
inner: Arc::new(Mutex::new(InteractiveUIInner {
|
||||
cmdproc: None,
|
||||
stdout: None,
|
||||
error: None,
|
||||
done: Some(StopSource::new()),
|
||||
connection_state_receiver: csreceiver,
|
||||
log_enabled: false,
|
||||
enable_color,
|
||||
})),
|
||||
};
|
||||
|
||||
let ui_sender = InteractiveUISender {
|
||||
inner: this.inner.clone(),
|
||||
connection_state_sender: cssender,
|
||||
};
|
||||
|
||||
(this, ui_sender)
|
||||
}
|
||||
|
||||
pub async fn command_loop(&self) {
|
||||
let (mut readline, mut stdout) =
|
||||
match Readline::new("> ".to_owned()).map_err(|e| e.to_string()) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
println!("Error: {:?}", e);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let (connection_state_receiver, done) = {
|
||||
let inner = self.inner.lock();
|
||||
(
|
||||
inner.connection_state_receiver.clone(),
|
||||
inner.done.as_ref().unwrap().token(),
|
||||
)
|
||||
};
|
||||
|
||||
self.inner.lock().stdout = Some(stdout.clone());
|
||||
|
||||
CursiveUI::set_start_time();
|
||||
|
||||
// Wait for connection to be established
|
||||
loop {
|
||||
match connection_state_receiver.recv_async().await {
|
||||
Ok(ConnectionState::ConnectedTCP(_, _))
|
||||
| Ok(ConnectionState::ConnectedIPC(_, _)) => {
|
||||
break;
|
||||
}
|
||||
Ok(ConnectionState::RetryingTCP(_, _)) | Ok(ConnectionState::RetryingIPC(_, _)) => {
|
||||
}
|
||||
Ok(ConnectionState::Disconnected) => {}
|
||||
Err(e) => {
|
||||
eprintln!("Error: {:?}", e);
|
||||
self.inner.lock().done.take();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
loop {
|
||||
if let Some(e) = self.inner.lock().error.clone() {
|
||||
println!("Error: {:?}", e);
|
||||
break;
|
||||
}
|
||||
match readline.readline().timeout_at(done.clone()).await {
|
||||
Ok(Ok(ReadlineEvent::Line(line))) => {
|
||||
let line = line.trim();
|
||||
if line == "clear" {
|
||||
if let Err(e) = readline.clear() {
|
||||
println!("Error: {:?}", e);
|
||||
}
|
||||
} else if line == "log error" {
|
||||
let opt_cmdproc = self.inner.lock().cmdproc.clone();
|
||||
if let Some(cmdproc) = opt_cmdproc {
|
||||
if let Err(e) = cmdproc.run_command(
|
||||
"change_log_level api error",
|
||||
UICallback::Interactive(Box::new(|| {})),
|
||||
) {
|
||||
eprintln!("Error: {:?}", e);
|
||||
self.inner.lock().done.take();
|
||||
}
|
||||
self.inner.lock().log_enabled = true;
|
||||
}
|
||||
} else if line == "log warn" {
|
||||
let opt_cmdproc = self.inner.lock().cmdproc.clone();
|
||||
if let Some(cmdproc) = opt_cmdproc {
|
||||
if let Err(e) = cmdproc.run_command(
|
||||
"change_log_level api warn",
|
||||
UICallback::Interactive(Box::new(|| {})),
|
||||
) {
|
||||
eprintln!("Error: {:?}", e);
|
||||
self.inner.lock().done.take();
|
||||
}
|
||||
self.inner.lock().log_enabled = true;
|
||||
}
|
||||
} else if line == "log info" {
|
||||
let opt_cmdproc = self.inner.lock().cmdproc.clone();
|
||||
if let Some(cmdproc) = opt_cmdproc {
|
||||
if let Err(e) = cmdproc.run_command(
|
||||
"change_log_level api info",
|
||||
UICallback::Interactive(Box::new(|| {})),
|
||||
) {
|
||||
eprintln!("Error: {:?}", e);
|
||||
self.inner.lock().done.take();
|
||||
}
|
||||
self.inner.lock().log_enabled = true;
|
||||
}
|
||||
} else if line == "log debug" || line == "log" {
|
||||
let opt_cmdproc = self.inner.lock().cmdproc.clone();
|
||||
if let Some(cmdproc) = opt_cmdproc {
|
||||
if let Err(e) = cmdproc.run_command(
|
||||
"change_log_level api debug",
|
||||
UICallback::Interactive(Box::new(|| {})),
|
||||
) {
|
||||
eprintln!("Error: {:?}", e);
|
||||
self.inner.lock().done.take();
|
||||
}
|
||||
self.inner.lock().log_enabled = true;
|
||||
}
|
||||
} else if line == "log trace" {
|
||||
let opt_cmdproc = self.inner.lock().cmdproc.clone();
|
||||
if let Some(cmdproc) = opt_cmdproc {
|
||||
if let Err(e) = cmdproc.run_command(
|
||||
"change_log_level api trace",
|
||||
UICallback::Interactive(Box::new(|| {})),
|
||||
) {
|
||||
eprintln!("Error: {:?}", e);
|
||||
self.inner.lock().done.take();
|
||||
}
|
||||
self.inner.lock().log_enabled = true;
|
||||
}
|
||||
} else if line == "log off" {
|
||||
let opt_cmdproc = self.inner.lock().cmdproc.clone();
|
||||
if let Some(cmdproc) = opt_cmdproc {
|
||||
if let Err(e) = cmdproc.run_command(
|
||||
"change_log_level api off",
|
||||
UICallback::Interactive(Box::new(|| {})),
|
||||
) {
|
||||
eprintln!("Error: {:?}", e);
|
||||
self.inner.lock().done.take();
|
||||
}
|
||||
self.inner.lock().log_enabled = false;
|
||||
}
|
||||
} else if !line.is_empty() {
|
||||
readline.add_history_entry(line.to_string());
|
||||
let cmdproc = self.inner.lock().cmdproc.clone();
|
||||
if let Some(cmdproc) = &cmdproc {
|
||||
if let Err(e) = cmdproc.run_command(
|
||||
line,
|
||||
UICallback::Interactive(Box::new({
|
||||
//let mut stdout = stdout.clone();
|
||||
move || {
|
||||
// if let Err(e) = writeln!(stdout) {
|
||||
// println!("Error: {:?}", e);
|
||||
// }
|
||||
}
|
||||
})),
|
||||
) {
|
||||
if let Err(e) = writeln!(stdout, "Error: {}", e) {
|
||||
println!("Error: {:?}", e);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(Ok(ReadlineEvent::Interrupted)) => {
|
||||
break;
|
||||
}
|
||||
Ok(Ok(ReadlineEvent::Eof)) => {
|
||||
break;
|
||||
}
|
||||
Ok(Err(ReadlineError::Closed)) => {}
|
||||
Ok(Err(ReadlineError::IO(e))) => {
|
||||
println!("IO Error: {:?}", e);
|
||||
break;
|
||||
}
|
||||
Err(_) => {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
let _ = readline.flush();
|
||||
}
|
||||
}
|
||||
|
||||
impl UI for InteractiveUI {
|
||||
fn set_command_processor(&mut self, cmdproc: CommandProcessor) {
|
||||
let mut inner = self.inner.lock();
|
||||
inner.cmdproc = Some(cmdproc);
|
||||
}
|
||||
fn run_async(&mut self) -> Pin<Box<dyn core::future::Future<Output = ()>>> {
|
||||
let this = self.clone();
|
||||
Box::pin(async move {
|
||||
this.command_loop().await;
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct InteractiveUISender {
|
||||
inner: Arc<Mutex<InteractiveUIInner>>,
|
||||
connection_state_sender: flume::Sender<ConnectionState>,
|
||||
}
|
||||
|
||||
impl UISender for InteractiveUISender {
|
||||
fn clone_uisender(&self) -> Box<dyn UISender> {
|
||||
Box::new(InteractiveUISender {
|
||||
inner: self.inner.clone(),
|
||||
connection_state_sender: self.connection_state_sender.clone(),
|
||||
})
|
||||
}
|
||||
fn as_logwriter(&self) -> Option<Box<dyn LogWriter>> {
|
||||
None
|
||||
}
|
||||
|
||||
fn display_string_dialog(&self, title: &str, text: &str, close_cb: UICallback) {
|
||||
let Some(mut stdout) = self.inner.lock().stdout.clone() else {
|
||||
return;
|
||||
};
|
||||
if let Err(e) = writeln!(stdout, "{}: {}", title, text) {
|
||||
self.inner.lock().error = Some(e.to_string());
|
||||
}
|
||||
if let UICallback::Interactive(mut close_cb) = close_cb {
|
||||
close_cb()
|
||||
}
|
||||
}
|
||||
|
||||
fn quit(&self) {
|
||||
self.inner.lock().done.take();
|
||||
}
|
||||
|
||||
fn send_callback(&self, callback: UICallback) {
|
||||
if let UICallback::Interactive(mut callback) = callback {
|
||||
callback();
|
||||
}
|
||||
}
|
||||
fn set_attachment_state(
|
||||
&mut self,
|
||||
_state: &str,
|
||||
_public_internet_ready: bool,
|
||||
_local_network_ready: bool,
|
||||
) {
|
||||
//
|
||||
}
|
||||
fn set_network_status(
|
||||
&mut self,
|
||||
_started: bool,
|
||||
_bps_down: u64,
|
||||
_bps_up: u64,
|
||||
mut _peers: Vec<json::JsonValue>,
|
||||
) {
|
||||
//
|
||||
}
|
||||
fn set_config(&mut self, _config: &json::JsonValue) {
|
||||
//
|
||||
}
|
||||
fn set_connection_state(&mut self, state: ConnectionState) {
|
||||
if let Err(e) = self.connection_state_sender.send(state) {
|
||||
eprintln!("Error: {:?}", e);
|
||||
self.inner.lock().done.take();
|
||||
}
|
||||
}
|
||||
|
||||
fn add_node_event(&self, _log_color: Level, event: &str) {
|
||||
let Some(mut stdout) = self.inner.lock().stdout.clone() else {
|
||||
return;
|
||||
};
|
||||
if let Err(e) = writeln!(stdout, "{}", event) {
|
||||
self.inner.lock().error = Some(e.to_string());
|
||||
}
|
||||
}
|
||||
fn add_log_event(&self, log_color: Level, event: &str) {
|
||||
let (enable_color, mut stdout) = {
|
||||
let inner = self.inner.lock();
|
||||
if !inner.log_enabled {
|
||||
return;
|
||||
}
|
||||
let Some(stdout) = inner.stdout.clone() else {
|
||||
return;
|
||||
};
|
||||
|
||||
(inner.enable_color, stdout)
|
||||
};
|
||||
|
||||
let log_line = format!(
|
||||
"{}: {}",
|
||||
CursiveUI::cli_ts(CursiveUI::get_start_time()),
|
||||
event
|
||||
);
|
||||
if enable_color {
|
||||
let log_line = match log_color {
|
||||
Level::Error => style(log_line).red().bright().to_string(),
|
||||
Level::Warn => style(log_line).yellow().bright().to_string(),
|
||||
Level::Info => log_line,
|
||||
Level::Debug => style(log_line).green().bright().to_string(),
|
||||
Level::Trace => style(log_line).blue().bright().to_string(),
|
||||
};
|
||||
if let Err(e) = writeln!(stdout, "{}", log_line) {
|
||||
eprintln!("Error: {:?}", e);
|
||||
self.inner.lock().done.take();
|
||||
}
|
||||
} else {
|
||||
println!("{}", log_line);
|
||||
}
|
||||
}
|
||||
}
|
268
veilid-cli/src/io_read_write_ui.rs
Normal file
268
veilid-cli/src/io_read_write_ui.rs
Normal file
@ -0,0 +1,268 @@
|
||||
use crate::command_processor::*;
|
||||
use crate::settings::*;
|
||||
use crate::tools::*;
|
||||
use crate::ui::*;
|
||||
|
||||
use futures::io::{AsyncBufReadExt, AsyncRead, AsyncWrite, AsyncWriteExt, BufReader, BufWriter};
|
||||
use stop_token::future::FutureExt as StopTokenFutureExt;
|
||||
use stop_token::*;
|
||||
use veilid_tools::AsyncMutex;
|
||||
|
||||
use flexi_logger::writers::LogWriter;
|
||||
|
||||
static FINISHED_LINE: &str = "\x7F ===FINISHED=== \x7F";
|
||||
|
||||
pub type IOReadWriteUICallback = Box<dyn FnMut() + Send>;
|
||||
|
||||
pub struct IOReadWriteUIInner<R: AsyncRead + Unpin + Send, W: AsyncWrite + Unpin + Send> {
|
||||
cmdproc: Option<CommandProcessor>,
|
||||
in_io: Arc<AsyncMutex<BufReader<R>>>,
|
||||
out_io: Arc<AsyncMutex<BufWriter<W>>>,
|
||||
out_receiver: flume::Receiver<String>,
|
||||
out_sender: flume::Sender<String>,
|
||||
done: Option<StopSource>,
|
||||
connection_state_receiver: flume::Receiver<ConnectionState>,
|
||||
}
|
||||
|
||||
pub struct IOReadWriteUI<R: AsyncRead + Unpin + Send, W: AsyncWrite + Unpin + Send> {
|
||||
inner: Arc<Mutex<IOReadWriteUIInner<R, W>>>,
|
||||
}
|
||||
impl<R: AsyncRead + Unpin + Send, W: AsyncWrite + Unpin + Send> Clone for IOReadWriteUI<R, W> {
|
||||
fn clone(&self) -> Self {
|
||||
IOReadWriteUI {
|
||||
inner: self.inner.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: AsyncRead + Unpin + Send, W: AsyncWrite + Unpin + Send> IOReadWriteUI<R, W> {
|
||||
pub fn new(_settings: &Settings, in_io: R, out_io: W) -> (Self, IOReadWriteUISender<R, W>) {
|
||||
// Create the UI object
|
||||
let (sender, receiver) = flume::unbounded::<String>();
|
||||
let (cssender, csreceiver) = flume::unbounded::<ConnectionState>();
|
||||
let this = Self {
|
||||
inner: Arc::new(Mutex::new(IOReadWriteUIInner {
|
||||
cmdproc: None,
|
||||
in_io: Arc::new(AsyncMutex::new(BufReader::new(in_io))),
|
||||
out_io: Arc::new(AsyncMutex::new(BufWriter::new(out_io))),
|
||||
out_receiver: receiver,
|
||||
out_sender: sender.clone(),
|
||||
connection_state_receiver: csreceiver,
|
||||
done: Some(StopSource::new()),
|
||||
})),
|
||||
};
|
||||
|
||||
let ui_sender = IOReadWriteUISender {
|
||||
inner: this.inner.clone(),
|
||||
out_sender: sender,
|
||||
connection_state_sender: cssender,
|
||||
};
|
||||
|
||||
(this, ui_sender)
|
||||
}
|
||||
|
||||
pub async fn output_loop(&self) {
|
||||
let out_receiver = self.inner.lock().out_receiver.clone();
|
||||
let out_io = self.inner.lock().out_io.clone();
|
||||
|
||||
let mut out = out_io.lock().await;
|
||||
let done = self.inner.lock().done.as_ref().unwrap().token();
|
||||
|
||||
while let Ok(Ok(line)) = out_receiver.recv_async().timeout_at(done.clone()).await {
|
||||
if line == FINISHED_LINE {
|
||||
break;
|
||||
}
|
||||
let line = format!("{}\n", line);
|
||||
if let Err(e) = out.write_all(line.as_bytes()).await {
|
||||
eprintln!("Error: {:?}", e);
|
||||
break;
|
||||
}
|
||||
if let Err(e) = out.flush().await {
|
||||
eprintln!("Error: {:?}", e);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn command_loop(&self) {
|
||||
let (in_io, out_sender, connection_state_receiver, done) = {
|
||||
let inner = self.inner.lock();
|
||||
(
|
||||
inner.in_io.clone(),
|
||||
inner.out_sender.clone(),
|
||||
inner.connection_state_receiver.clone(),
|
||||
inner.done.as_ref().unwrap().token(),
|
||||
)
|
||||
};
|
||||
let mut in_io = in_io.lock().await;
|
||||
|
||||
let (exec_sender, exec_receiver) = flume::bounded(1);
|
||||
|
||||
// Wait for connection to be established
|
||||
loop {
|
||||
match connection_state_receiver.recv_async().await {
|
||||
Ok(ConnectionState::ConnectedTCP(_, _))
|
||||
| Ok(ConnectionState::ConnectedIPC(_, _)) => {
|
||||
break;
|
||||
}
|
||||
Ok(ConnectionState::RetryingTCP(_, _)) | Ok(ConnectionState::RetryingIPC(_, _)) => {
|
||||
}
|
||||
Ok(ConnectionState::Disconnected) => {}
|
||||
Err(e) => {
|
||||
eprintln!("Error: {:?}", e);
|
||||
self.inner.lock().done.take();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Process the input
|
||||
loop {
|
||||
let mut line = String::new();
|
||||
match in_io.read_line(&mut line).timeout_at(done.clone()).await {
|
||||
Ok(Ok(bytes)) => {
|
||||
if bytes == 0 {
|
||||
// Clean exit after everything else is sent
|
||||
if let Err(e) = out_sender.send(FINISHED_LINE.to_string()) {
|
||||
eprintln!("Error: {:?}", e);
|
||||
self.inner.lock().done.take();
|
||||
}
|
||||
break;
|
||||
}
|
||||
let line = line.trim();
|
||||
if !line.is_empty() {
|
||||
let cmdproc = self.inner.lock().cmdproc.clone();
|
||||
if let Some(cmdproc) = &cmdproc {
|
||||
// Run command
|
||||
if let Err(e) = cmdproc.run_command(
|
||||
line,
|
||||
UICallback::IOReadWrite(Box::new({
|
||||
let exec_sender = exec_sender.clone();
|
||||
move || {
|
||||
// Let the next command execute
|
||||
if let Err(e) = exec_sender.send(()) {
|
||||
eprintln!("Error: {:?}", e);
|
||||
}
|
||||
}
|
||||
})),
|
||||
) {
|
||||
eprintln!("Error: {:?}", e);
|
||||
self.inner.lock().done.take();
|
||||
break;
|
||||
}
|
||||
// Wait until command is done executing before running the next line
|
||||
if let Err(e) = exec_receiver.recv_async().await {
|
||||
eprintln!("Error: {:?}", e);
|
||||
self.inner.lock().done.take();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(Err(e)) => {
|
||||
eprintln!("IO Error: {:?}", e);
|
||||
self.inner.lock().done.take();
|
||||
break;
|
||||
}
|
||||
Err(_) => {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: AsyncRead + Unpin + Send + 'static, W: AsyncWrite + Unpin + Send + 'static> UI
|
||||
for IOReadWriteUI<R, W>
|
||||
{
|
||||
fn set_command_processor(&mut self, cmdproc: CommandProcessor) {
|
||||
let mut inner = self.inner.lock();
|
||||
inner.cmdproc = Some(cmdproc);
|
||||
}
|
||||
fn run_async(&mut self) -> Pin<Box<dyn core::future::Future<Output = ()>>> {
|
||||
let this = self.clone();
|
||||
Box::pin(async move {
|
||||
let out_fut = this.output_loop();
|
||||
let cmd_fut = this.command_loop();
|
||||
futures::join!(out_fut, cmd_fut);
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct IOReadWriteUISender<R: AsyncRead + Unpin + Send, W: AsyncWrite + Unpin + Send> {
|
||||
inner: Arc<Mutex<IOReadWriteUIInner<R, W>>>,
|
||||
out_sender: flume::Sender<String>,
|
||||
connection_state_sender: flume::Sender<ConnectionState>,
|
||||
}
|
||||
|
||||
impl<R: AsyncRead + Unpin + Send + 'static, W: AsyncWrite + Unpin + Send + 'static> UISender
|
||||
for IOReadWriteUISender<R, W>
|
||||
{
|
||||
fn clone_uisender(&self) -> Box<dyn UISender> {
|
||||
Box::new(IOReadWriteUISender {
|
||||
inner: self.inner.clone(),
|
||||
out_sender: self.out_sender.clone(),
|
||||
connection_state_sender: self.connection_state_sender.clone(),
|
||||
})
|
||||
}
|
||||
fn as_logwriter(&self) -> Option<Box<dyn LogWriter>> {
|
||||
None
|
||||
}
|
||||
|
||||
fn display_string_dialog(&self, title: &str, text: &str, close_cb: UICallback) {
|
||||
if let Err(e) = self.out_sender.send(format!("{}: {}", title, text)) {
|
||||
eprintln!("Error: {:?}", e);
|
||||
self.inner.lock().done.take();
|
||||
}
|
||||
if let UICallback::IOReadWrite(mut close_cb) = close_cb {
|
||||
close_cb()
|
||||
}
|
||||
}
|
||||
|
||||
fn quit(&self) {
|
||||
self.inner.lock().done.take();
|
||||
}
|
||||
|
||||
fn send_callback(&self, callback: UICallback) {
|
||||
if let UICallback::IOReadWrite(mut callback) = callback {
|
||||
callback();
|
||||
}
|
||||
}
|
||||
fn set_attachment_state(
|
||||
&mut self,
|
||||
_state: &str,
|
||||
_public_internet_ready: bool,
|
||||
_local_network_ready: bool,
|
||||
) {
|
||||
//
|
||||
}
|
||||
fn set_network_status(
|
||||
&mut self,
|
||||
_started: bool,
|
||||
_bps_down: u64,
|
||||
_bps_up: u64,
|
||||
mut _peers: Vec<json::JsonValue>,
|
||||
) {
|
||||
//
|
||||
}
|
||||
fn set_config(&mut self, _config: &json::JsonValue) {
|
||||
//
|
||||
}
|
||||
fn set_connection_state(&mut self, state: ConnectionState) {
|
||||
if let Err(e) = self.connection_state_sender.send(state) {
|
||||
eprintln!("Error: {:?}", e);
|
||||
self.inner.lock().done.take();
|
||||
}
|
||||
}
|
||||
|
||||
fn add_node_event(&self, _log_color: Level, event: &str) {
|
||||
if let Err(e) = self.out_sender.send(format!("{}\n", event)) {
|
||||
eprintln!("Error: {:?}", e);
|
||||
self.inner.lock().done.take();
|
||||
}
|
||||
}
|
||||
fn add_log_event(&self, _log_color: Level, _event: &str) {}
|
||||
}
|
274
veilid-cli/src/log_viewer_ui.rs
Normal file
274
veilid-cli/src/log_viewer_ui.rs
Normal file
@ -0,0 +1,274 @@
|
||||
use crate::command_processor::*;
|
||||
use crate::cursive_ui::CursiveUI;
|
||||
use crate::settings::*;
|
||||
use crate::tools::*;
|
||||
use crate::ui::*;
|
||||
|
||||
use console::{style, Term};
|
||||
use flexi_logger::writers::LogWriter;
|
||||
use stop_token::future::FutureExt as StopTokenFutureExt;
|
||||
use stop_token::*;
|
||||
|
||||
pub type LogViewerUICallback = Box<dyn FnMut() + Send>;
|
||||
|
||||
pub struct LogViewerUIInner {
|
||||
cmdproc: Option<CommandProcessor>,
|
||||
done: Option<StopSource>,
|
||||
term: Term,
|
||||
connection_state_receiver: flume::Receiver<ConnectionState>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct LogViewerUI {
|
||||
inner: Arc<Mutex<LogViewerUIInner>>,
|
||||
}
|
||||
|
||||
impl LogViewerUI {
|
||||
pub fn new(_settings: &Settings) -> (Self, LogViewerUISender) {
|
||||
let (cssender, csreceiver) = flume::unbounded::<ConnectionState>();
|
||||
|
||||
let term = Term::stdout();
|
||||
let enable_color = console::colors_enabled() && term.features().colors_supported();
|
||||
|
||||
// Create the UI object
|
||||
let this = Self {
|
||||
inner: Arc::new(Mutex::new(LogViewerUIInner {
|
||||
cmdproc: None,
|
||||
done: Some(StopSource::new()),
|
||||
term: term.clone(),
|
||||
connection_state_receiver: csreceiver,
|
||||
})),
|
||||
};
|
||||
|
||||
let ui_sender = LogViewerUISender {
|
||||
inner: this.inner.clone(),
|
||||
connection_state_sender: cssender,
|
||||
term,
|
||||
enable_color,
|
||||
};
|
||||
|
||||
(this, ui_sender)
|
||||
}
|
||||
|
||||
pub async fn command_loop(&self) {
|
||||
let (connection_state_receiver, term, done) = {
|
||||
let inner = self.inner.lock();
|
||||
(
|
||||
inner.connection_state_receiver.clone(),
|
||||
inner.term.clone(),
|
||||
inner.done.as_ref().unwrap().token(),
|
||||
)
|
||||
};
|
||||
|
||||
CursiveUI::set_start_time();
|
||||
|
||||
// Wait for connection to be established
|
||||
loop {
|
||||
match connection_state_receiver.recv_async().await {
|
||||
Ok(ConnectionState::ConnectedTCP(_, _))
|
||||
| Ok(ConnectionState::ConnectedIPC(_, _)) => {
|
||||
break;
|
||||
}
|
||||
Ok(ConnectionState::RetryingTCP(_, _)) | Ok(ConnectionState::RetryingIPC(_, _)) => {
|
||||
}
|
||||
Ok(ConnectionState::Disconnected) => {}
|
||||
Err(e) => {
|
||||
eprintln!("Error: {:?}", e);
|
||||
self.inner.lock().done.take();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let cmdproc = self.inner.lock().cmdproc.clone().unwrap();
|
||||
|
||||
if !term.features().is_attended() {
|
||||
done.await;
|
||||
} else {
|
||||
while let Ok(Ok(c)) = blocking_wrapper(
|
||||
{
|
||||
let term = term.clone();
|
||||
move || term.read_char()
|
||||
},
|
||||
Err(std::io::Error::other("failed")),
|
||||
)
|
||||
.timeout_at(done.clone())
|
||||
.await
|
||||
{
|
||||
match c {
|
||||
'q' | 'Q' => {
|
||||
self.inner.lock().done.take();
|
||||
break;
|
||||
}
|
||||
'e' | 'E' => {
|
||||
if let Err(e) = cmdproc.run_command(
|
||||
"change_log_level api error",
|
||||
UICallback::LogViewer(Box::new(|| {})),
|
||||
) {
|
||||
eprintln!("Error: {:?}", e);
|
||||
self.inner.lock().done.take();
|
||||
}
|
||||
}
|
||||
'w' | 'W' => {
|
||||
if let Err(e) = cmdproc.run_command(
|
||||
"change_log_level api warn",
|
||||
UICallback::LogViewer(Box::new(|| {})),
|
||||
) {
|
||||
eprintln!("Error: {:?}", e);
|
||||
self.inner.lock().done.take();
|
||||
}
|
||||
}
|
||||
'i' | 'I' => {
|
||||
if let Err(e) = cmdproc.run_command(
|
||||
"change_log_level api info",
|
||||
UICallback::LogViewer(Box::new(|| {})),
|
||||
) {
|
||||
eprintln!("Error: {:?}", e);
|
||||
self.inner.lock().done.take();
|
||||
}
|
||||
}
|
||||
'd' | 'D' => {
|
||||
if let Err(e) = cmdproc.run_command(
|
||||
"change_log_level api debug",
|
||||
UICallback::LogViewer(Box::new(|| {})),
|
||||
) {
|
||||
eprintln!("Error: {:?}", e);
|
||||
self.inner.lock().done.take();
|
||||
}
|
||||
}
|
||||
't' | 'T' => {
|
||||
if let Err(e) = cmdproc.run_command(
|
||||
"change_log_level api trace",
|
||||
UICallback::LogViewer(Box::new(|| {})),
|
||||
) {
|
||||
eprintln!("Error: {:?}", e);
|
||||
self.inner.lock().done.take();
|
||||
}
|
||||
}
|
||||
'h' | 'H' => {
|
||||
println!(
|
||||
r"Help:
|
||||
h - This help
|
||||
e - Change log level to 'error'
|
||||
w - Change log level to 'warn'
|
||||
i - Change log level to 'info'
|
||||
d - Change log level to 'debug'
|
||||
t - Change log level to 'trace'
|
||||
q - Quit
|
||||
"
|
||||
);
|
||||
}
|
||||
_ => {
|
||||
// ignore
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl UI for LogViewerUI {
|
||||
fn set_command_processor(&mut self, cmdproc: CommandProcessor) {
|
||||
let mut inner = self.inner.lock();
|
||||
inner.cmdproc = Some(cmdproc);
|
||||
}
|
||||
fn run_async(&mut self) -> Pin<Box<dyn core::future::Future<Output = ()>>> {
|
||||
let this = self.clone();
|
||||
Box::pin(async move {
|
||||
this.command_loop().await;
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct LogViewerUISender {
|
||||
inner: Arc<Mutex<LogViewerUIInner>>,
|
||||
connection_state_sender: flume::Sender<ConnectionState>,
|
||||
term: Term,
|
||||
enable_color: bool,
|
||||
}
|
||||
|
||||
impl UISender for LogViewerUISender {
|
||||
fn clone_uisender(&self) -> Box<dyn UISender> {
|
||||
Box::new(LogViewerUISender {
|
||||
inner: self.inner.clone(),
|
||||
connection_state_sender: self.connection_state_sender.clone(),
|
||||
term: self.term.clone(),
|
||||
enable_color: self.enable_color,
|
||||
})
|
||||
}
|
||||
fn as_logwriter(&self) -> Option<Box<dyn LogWriter>> {
|
||||
None
|
||||
}
|
||||
|
||||
fn display_string_dialog(&self, title: &str, text: &str, close_cb: UICallback) {
|
||||
println!("{}: {}", title, text);
|
||||
if let UICallback::Interactive(mut close_cb) = close_cb {
|
||||
close_cb()
|
||||
}
|
||||
}
|
||||
|
||||
fn quit(&self) {
|
||||
self.inner.lock().done.take();
|
||||
}
|
||||
|
||||
fn send_callback(&self, callback: UICallback) {
|
||||
if let UICallback::Interactive(mut callback) = callback {
|
||||
callback();
|
||||
}
|
||||
}
|
||||
fn set_attachment_state(
|
||||
&mut self,
|
||||
_state: &str,
|
||||
_public_internet_ready: bool,
|
||||
_local_network_ready: bool,
|
||||
) {
|
||||
//
|
||||
}
|
||||
fn set_network_status(
|
||||
&mut self,
|
||||
_started: bool,
|
||||
_bps_down: u64,
|
||||
_bps_up: u64,
|
||||
mut _peers: Vec<json::JsonValue>,
|
||||
) {
|
||||
//
|
||||
}
|
||||
fn set_config(&mut self, _config: &json::JsonValue) {
|
||||
//
|
||||
}
|
||||
fn set_connection_state(&mut self, state: ConnectionState) {
|
||||
if let Err(e) = self.connection_state_sender.send(state) {
|
||||
eprintln!("Error: {:?}", e);
|
||||
self.inner.lock().done.take();
|
||||
}
|
||||
}
|
||||
|
||||
fn add_node_event(&self, _log_color: Level, event: &str) {
|
||||
println!("{}", event);
|
||||
}
|
||||
fn add_log_event(&self, log_color: Level, event: &str) {
|
||||
let log_line = format!(
|
||||
"{}: {}",
|
||||
CursiveUI::cli_ts(CursiveUI::get_start_time()),
|
||||
event
|
||||
);
|
||||
if self.enable_color {
|
||||
let log_line = match log_color {
|
||||
Level::Error => style(log_line).red().bright().to_string(),
|
||||
Level::Warn => style(log_line).yellow().bright().to_string(),
|
||||
Level::Info => log_line,
|
||||
Level::Debug => style(log_line).green().bright().to_string(),
|
||||
Level::Trace => style(log_line).blue().bright().to_string(),
|
||||
};
|
||||
if let Err(e) = self.term.write_line(&log_line) {
|
||||
eprintln!("Error: {:?}", e);
|
||||
self.inner.lock().done.take();
|
||||
}
|
||||
} else {
|
||||
println!("{}", log_line);
|
||||
}
|
||||
}
|
||||
}
|
@ -3,14 +3,19 @@
|
||||
#![deny(unused_must_use)]
|
||||
#![recursion_limit = "256"]
|
||||
|
||||
use crate::{settings::NamedSocketAddrs, tools::*};
|
||||
use crate::{settings::NamedSocketAddrs, tools::*, ui::*};
|
||||
|
||||
use clap::{Parser, ValueEnum};
|
||||
use flexi_logger::*;
|
||||
use std::path::PathBuf;
|
||||
|
||||
mod cached_text_view;
|
||||
mod client_api_connection;
|
||||
mod command_processor;
|
||||
mod cursive_ui;
|
||||
mod interactive_ui;
|
||||
mod io_read_write_ui;
|
||||
mod log_viewer_ui;
|
||||
mod peers_table_view;
|
||||
mod settings;
|
||||
mod tools;
|
||||
@ -31,7 +36,7 @@ struct CmdlineArgs {
|
||||
#[arg(long, short = 'p')]
|
||||
ipc_path: Option<PathBuf>,
|
||||
/// Subnode index to use when connecting
|
||||
#[arg(long, short = 'i', default_value = "0")]
|
||||
#[arg(long, default_value = "0")]
|
||||
subnode_index: usize,
|
||||
/// Address to connect to
|
||||
#[arg(long, short = 'a')]
|
||||
@ -45,183 +50,286 @@ struct CmdlineArgs {
|
||||
/// log level
|
||||
#[arg(value_enum)]
|
||||
log_level: Option<LogLevel>,
|
||||
/// interactive
|
||||
#[arg(long, short = 'i', group = "execution_mode")]
|
||||
interactive: bool,
|
||||
/// evaluate
|
||||
#[arg(long, short = 'e', group = "execution_mode")]
|
||||
evaluate: Option<String>,
|
||||
/// show log only
|
||||
#[arg(long, short = 'l', group = "execution_mode")]
|
||||
log: bool,
|
||||
/// read commands from file
|
||||
#[arg(
|
||||
long,
|
||||
short = 'f',
|
||||
group = "execution_mode",
|
||||
value_name = "COMMAND_FILE"
|
||||
)]
|
||||
command_file: Option<PathBuf>,
|
||||
}
|
||||
|
||||
fn main() -> Result<(), String> {
|
||||
// Get command line options
|
||||
let default_config_path = settings::Settings::get_default_config_path();
|
||||
let args = CmdlineArgs::parse();
|
||||
// Start async
|
||||
block_on(async move {
|
||||
// Get command line options
|
||||
let default_config_path = settings::Settings::get_default_config_path();
|
||||
let args = CmdlineArgs::parse();
|
||||
|
||||
if args.wait_for_debug {
|
||||
use bugsalot::debugger;
|
||||
debugger::wait_until_attached(None).expect("state() not implemented on this platform");
|
||||
}
|
||||
|
||||
// Attempt to load configuration
|
||||
let settings_path = args.config_file.unwrap_or(default_config_path);
|
||||
let settings_path = if settings_path.exists() {
|
||||
Some(settings_path.into_os_string())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let mut settings = settings::Settings::new(settings_path.as_deref())
|
||||
.map_err(|e| format!("configuration is invalid: {}", e))?;
|
||||
|
||||
// Set config from command line
|
||||
if let Some(LogLevel::Debug) = args.log_level {
|
||||
settings.logging.level = settings::LogLevel::Debug;
|
||||
settings.logging.terminal.enabled = true;
|
||||
}
|
||||
if let Some(LogLevel::Trace) = args.log_level {
|
||||
settings.logging.level = settings::LogLevel::Trace;
|
||||
settings.logging.terminal.enabled = true;
|
||||
}
|
||||
|
||||
// Create UI object
|
||||
let (mut sivui, uisender) = ui::UI::new(settings.interface.node_log.scrollback, &settings);
|
||||
|
||||
// Set up loggers
|
||||
{
|
||||
let mut specbuilder = LogSpecBuilder::new();
|
||||
specbuilder.default(settings::convert_loglevel(settings.logging.level));
|
||||
specbuilder.module("cursive", LevelFilter::Off);
|
||||
specbuilder.module("cursive_core", LevelFilter::Off);
|
||||
specbuilder.module("cursive_buffered_backend", LevelFilter::Off);
|
||||
specbuilder.module("tokio_util", LevelFilter::Off);
|
||||
specbuilder.module("mio", LevelFilter::Off);
|
||||
specbuilder.module("async_std", LevelFilter::Off);
|
||||
specbuilder.module("async_io", LevelFilter::Off);
|
||||
specbuilder.module("polling", LevelFilter::Off);
|
||||
|
||||
let logger = Logger::with(specbuilder.build());
|
||||
|
||||
if settings.logging.terminal.enabled {
|
||||
if settings.logging.file.enabled {
|
||||
std::fs::create_dir_all(settings.logging.file.directory.clone())
|
||||
.map_err(map_to_string)?;
|
||||
logger
|
||||
.log_to_file_and_writer(
|
||||
FileSpec::default()
|
||||
.directory(settings.logging.file.directory.clone())
|
||||
.suppress_timestamp(),
|
||||
Box::new(uisender.clone()),
|
||||
)
|
||||
.start()
|
||||
.expect("failed to initialize logger!");
|
||||
} else {
|
||||
logger
|
||||
.log_to_writer(Box::new(uisender.clone()))
|
||||
.start()
|
||||
.expect("failed to initialize logger!");
|
||||
}
|
||||
} else if settings.logging.file.enabled {
|
||||
std::fs::create_dir_all(settings.logging.file.directory.clone())
|
||||
.map_err(map_to_string)?;
|
||||
logger
|
||||
.log_to_file(
|
||||
FileSpec::default()
|
||||
.directory(settings.logging.file.directory.clone())
|
||||
.suppress_timestamp(),
|
||||
)
|
||||
.start()
|
||||
.expect("failed to initialize logger!");
|
||||
if args.wait_for_debug {
|
||||
use bugsalot::debugger;
|
||||
debugger::wait_until_attached(None).expect("state() not implemented on this platform");
|
||||
}
|
||||
}
|
||||
|
||||
// Get client address
|
||||
let enable_ipc = (settings.enable_ipc && args.address.is_none()) || args.ipc_path.is_some();
|
||||
let mut enable_network =
|
||||
(settings.enable_network && args.ipc_path.is_none()) || args.address.is_some();
|
||||
|
||||
// Determine IPC path to try
|
||||
let mut client_api_ipc_path = None;
|
||||
if enable_ipc {
|
||||
cfg_if::cfg_if! {
|
||||
if #[cfg(windows)] {
|
||||
if let Some(ipc_path) = args.ipc_path.or(settings.ipc_path.clone()) {
|
||||
if is_ipc_socket_path(&ipc_path) {
|
||||
// try direct path
|
||||
enable_network = false;
|
||||
client_api_ipc_path = Some(ipc_path);
|
||||
} else {
|
||||
// try subnode index inside path
|
||||
let ipc_path = ipc_path.join(args.subnode_index.to_string());
|
||||
if is_ipc_socket_path(&ipc_path) {
|
||||
// subnode indexed path exists
|
||||
enable_network = false;
|
||||
client_api_ipc_path = Some(ipc_path);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if let Some(ipc_path) = args.ipc_path.or(settings.ipc_path.clone()) {
|
||||
if is_ipc_socket_path(&ipc_path) {
|
||||
// try direct path
|
||||
enable_network = false;
|
||||
client_api_ipc_path = Some(ipc_path);
|
||||
} else if ipc_path.exists() && ipc_path.is_dir() {
|
||||
// try subnode index inside path
|
||||
let ipc_path = ipc_path.join(args.subnode_index.to_string());
|
||||
if is_ipc_socket_path(&ipc_path) {
|
||||
// subnode indexed path exists
|
||||
enable_network = false;
|
||||
client_api_ipc_path = Some(ipc_path);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
let mut client_api_network_addresses = None;
|
||||
if enable_network {
|
||||
let args_address = if let Some(args_address) = args.address {
|
||||
match NamedSocketAddrs::try_from(args_address) {
|
||||
Ok(v) => Some(v),
|
||||
Err(e) => {
|
||||
return Err(format!("Invalid server address: {}", e));
|
||||
}
|
||||
}
|
||||
// Attempt to load configuration
|
||||
let settings_path = args.config_file.unwrap_or(default_config_path);
|
||||
let settings_path = if settings_path.exists() {
|
||||
Some(settings_path.into_os_string())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
if let Some(address_arg) = args_address.or(settings.address.clone()) {
|
||||
client_api_network_addresses = Some(address_arg.addrs);
|
||||
} else if let Some(address) = settings.address.clone() {
|
||||
client_api_network_addresses = Some(address.addrs.clone());
|
||||
|
||||
let mut settings = settings::Settings::new(settings_path.as_deref())
|
||||
.map_err(|e| format!("configuration is invalid: {}", e))?;
|
||||
|
||||
// Set config from command line
|
||||
if let Some(LogLevel::Debug) = args.log_level {
|
||||
settings.logging.level = settings::LogLevel::Debug;
|
||||
settings.logging.terminal.enabled = true;
|
||||
}
|
||||
if let Some(LogLevel::Trace) = args.log_level {
|
||||
settings.logging.level = settings::LogLevel::Trace;
|
||||
settings.logging.terminal.enabled = true;
|
||||
}
|
||||
}
|
||||
|
||||
// Create command processor
|
||||
debug!("Creating Command Processor ");
|
||||
let comproc = command_processor::CommandProcessor::new(uisender, &settings);
|
||||
sivui.set_command_processor(comproc.clone());
|
||||
// If we are running in interactive mode disable some things
|
||||
let mut enable_cursive = true;
|
||||
if args.interactive || args.log || args.command_file.is_some() || args.evaluate.is_some() {
|
||||
settings.logging.terminal.enabled = false;
|
||||
enable_cursive = false;
|
||||
}
|
||||
|
||||
// Create client api client side
|
||||
info!("Starting API connection");
|
||||
let capi = client_api_connection::ClientApiConnection::new(comproc.clone());
|
||||
// Create UI object
|
||||
let (mut ui, uisender) = if enable_cursive {
|
||||
let (ui, uisender) = cursive_ui::CursiveUI::new(&settings);
|
||||
(
|
||||
Box::new(ui) as Box<dyn UI>,
|
||||
Box::new(uisender) as Box<dyn UISender>,
|
||||
)
|
||||
} else if args.interactive {
|
||||
let (ui, uisender) = interactive_ui::InteractiveUI::new(&settings);
|
||||
(
|
||||
Box::new(ui) as Box<dyn UI>,
|
||||
Box::new(uisender) as Box<dyn UISender>,
|
||||
)
|
||||
} else if let Some(command_file) = args.command_file {
|
||||
cfg_if! {
|
||||
if #[cfg(feature="rt-async-std")] {
|
||||
let (in_obj, out_obj) =
|
||||
if command_file.to_string_lossy() == "-" {
|
||||
(Box::pin(async_std::io::stdin()) as Pin<Box<dyn futures::AsyncRead + Send>>, async_std::io::stdout())
|
||||
} else {
|
||||
let f = match async_std::fs::File::open(command_file).await {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
return Err(e.to_string());
|
||||
}
|
||||
};
|
||||
(Box::pin(f) as Pin<Box<dyn futures::AsyncRead + Send>>, async_std::io::stdout())
|
||||
};
|
||||
} else if #[cfg(feature="rt-tokio")] {
|
||||
use tokio_util::compat::{TokioAsyncWriteCompatExt, TokioAsyncReadCompatExt};
|
||||
let (in_obj, out_obj) =
|
||||
if command_file.to_string_lossy() == "-" {
|
||||
(Box::pin(tokio::io::stdin().compat()) as Pin<Box<dyn futures::AsyncRead + Send>>, tokio::io::stdout().compat_write())
|
||||
} else {
|
||||
let f = match tokio::fs::File::open(command_file).await {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
return Err(e.to_string());
|
||||
}
|
||||
};
|
||||
(Box::pin(f.compat()) as Pin<Box<dyn futures::AsyncRead + Send>>, tokio::io::stdout().compat_write())
|
||||
};
|
||||
} else {
|
||||
compile_error!("needs executor implementation")
|
||||
}
|
||||
}
|
||||
|
||||
// Save client api in command processor
|
||||
comproc.set_client_api_connection(capi.clone());
|
||||
let (ui, uisender) = io_read_write_ui::IOReadWriteUI::new(&settings, in_obj, out_obj);
|
||||
(
|
||||
Box::new(ui) as Box<dyn UI>,
|
||||
Box::new(uisender) as Box<dyn UISender>,
|
||||
)
|
||||
} else if let Some(evaluate) = args.evaluate {
|
||||
cfg_if! {
|
||||
if #[cfg(feature="rt-async-std")] {
|
||||
let in_str = format!("{}\n", evaluate);
|
||||
let (in_obj, out_obj) = (futures::io::Cursor::new(in_str), async_std::io::stdout());
|
||||
} else if #[cfg(feature="rt-tokio")] {
|
||||
use tokio_util::compat::{TokioAsyncWriteCompatExt};
|
||||
let in_str = format!("{}\n", evaluate);
|
||||
let (in_obj, out_obj) = (futures::io::Cursor::new(in_str), tokio::io::stdout().compat_write());
|
||||
} else {
|
||||
compile_error!("needs executor implementation")
|
||||
}
|
||||
}
|
||||
|
||||
// Keep a connection to the server
|
||||
if let Some(client_api_ipc_path) = client_api_ipc_path {
|
||||
comproc.set_ipc_path(Some(client_api_ipc_path));
|
||||
} else if let Some(client_api_network_address) = client_api_network_addresses {
|
||||
let network_addr = client_api_network_address.first().cloned();
|
||||
comproc.set_network_address(network_addr);
|
||||
} else {
|
||||
return Err("veilid-server could not be reached".to_owned());
|
||||
}
|
||||
let (ui, uisender) = io_read_write_ui::IOReadWriteUI::new(&settings, in_obj, out_obj);
|
||||
(
|
||||
Box::new(ui) as Box<dyn UI>,
|
||||
Box::new(uisender) as Box<dyn UISender>,
|
||||
)
|
||||
} else if args.log {
|
||||
let (ui, uisender) = log_viewer_ui::LogViewerUI::new(&settings);
|
||||
(
|
||||
Box::new(ui) as Box<dyn UI>,
|
||||
Box::new(uisender) as Box<dyn UISender>,
|
||||
)
|
||||
} else {
|
||||
panic!("unknown ui mode");
|
||||
};
|
||||
|
||||
let comproc2 = comproc.clone();
|
||||
let connection_future = comproc.connection_manager();
|
||||
// Set up loggers
|
||||
{
|
||||
let mut specbuilder = LogSpecBuilder::new();
|
||||
specbuilder.default(settings::convert_loglevel(settings.logging.level));
|
||||
specbuilder.module("cursive", LevelFilter::Off);
|
||||
specbuilder.module("cursive_core", LevelFilter::Off);
|
||||
specbuilder.module("cursive_buffered_backend", LevelFilter::Off);
|
||||
specbuilder.module("tokio_util", LevelFilter::Off);
|
||||
specbuilder.module("mio", LevelFilter::Off);
|
||||
specbuilder.module("async_std", LevelFilter::Off);
|
||||
specbuilder.module("async_io", LevelFilter::Off);
|
||||
specbuilder.module("polling", LevelFilter::Off);
|
||||
|
||||
let logger = Logger::with(specbuilder.build());
|
||||
|
||||
if settings.logging.terminal.enabled {
|
||||
if settings.logging.file.enabled {
|
||||
std::fs::create_dir_all(settings.logging.file.directory.clone())
|
||||
.map_err(map_to_string)?;
|
||||
logger
|
||||
.log_to_file_and_writer(
|
||||
FileSpec::default()
|
||||
.directory(settings.logging.file.directory.clone())
|
||||
.suppress_timestamp(),
|
||||
uisender.as_logwriter().unwrap(),
|
||||
)
|
||||
.start()
|
||||
.expect("failed to initialize logger!");
|
||||
} else {
|
||||
logger
|
||||
.log_to_writer(uisender.as_logwriter().unwrap())
|
||||
.start()
|
||||
.expect("failed to initialize logger!");
|
||||
}
|
||||
} else if settings.logging.file.enabled {
|
||||
std::fs::create_dir_all(settings.logging.file.directory.clone())
|
||||
.map_err(map_to_string)?;
|
||||
logger
|
||||
.log_to_file(
|
||||
FileSpec::default()
|
||||
.directory(settings.logging.file.directory.clone())
|
||||
.suppress_timestamp(),
|
||||
)
|
||||
.start()
|
||||
.expect("failed to initialize logger!");
|
||||
}
|
||||
}
|
||||
|
||||
// Get client address
|
||||
let enable_ipc = (settings.enable_ipc && args.address.is_none()) || args.ipc_path.is_some();
|
||||
let mut enable_network =
|
||||
(settings.enable_network && args.ipc_path.is_none()) || args.address.is_some();
|
||||
|
||||
// Determine IPC path to try
|
||||
let mut client_api_ipc_path = None;
|
||||
if enable_ipc {
|
||||
cfg_if::cfg_if! {
|
||||
if #[cfg(windows)] {
|
||||
if let Some(ipc_path) = args.ipc_path.or(settings.ipc_path.clone()) {
|
||||
if is_ipc_socket_path(&ipc_path) {
|
||||
// try direct path
|
||||
enable_network = false;
|
||||
client_api_ipc_path = Some(ipc_path);
|
||||
} else {
|
||||
// try subnode index inside path
|
||||
let ipc_path = ipc_path.join(args.subnode_index.to_string());
|
||||
if is_ipc_socket_path(&ipc_path) {
|
||||
// subnode indexed path exists
|
||||
enable_network = false;
|
||||
client_api_ipc_path = Some(ipc_path);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if let Some(ipc_path) = args.ipc_path.or(settings.ipc_path.clone()) {
|
||||
if is_ipc_socket_path(&ipc_path) {
|
||||
// try direct path
|
||||
enable_network = false;
|
||||
client_api_ipc_path = Some(ipc_path);
|
||||
} else if ipc_path.exists() && ipc_path.is_dir() {
|
||||
// try subnode index inside path
|
||||
let ipc_path = ipc_path.join(args.subnode_index.to_string());
|
||||
if is_ipc_socket_path(&ipc_path) {
|
||||
// subnode indexed path exists
|
||||
enable_network = false;
|
||||
client_api_ipc_path = Some(ipc_path);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
let mut client_api_network_addresses = None;
|
||||
if enable_network {
|
||||
let args_address = if let Some(args_address) = args.address {
|
||||
match NamedSocketAddrs::try_from(args_address) {
|
||||
Ok(v) => Some(v),
|
||||
Err(e) => {
|
||||
return Err(format!("Invalid server address: {}", e));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
if let Some(address_arg) = args_address.or(settings.address.clone()) {
|
||||
client_api_network_addresses = Some(address_arg.addrs);
|
||||
} else if let Some(address) = settings.address.clone() {
|
||||
client_api_network_addresses = Some(address.addrs.clone());
|
||||
}
|
||||
}
|
||||
|
||||
// Create command processor
|
||||
debug!("Creating Command Processor ");
|
||||
let comproc = command_processor::CommandProcessor::new(uisender, &settings);
|
||||
|
||||
ui.set_command_processor(comproc.clone());
|
||||
|
||||
// Create client api client side
|
||||
info!("Starting API connection");
|
||||
let capi = client_api_connection::ClientApiConnection::new(comproc.clone());
|
||||
|
||||
// Save client api in command processor
|
||||
comproc.set_client_api_connection(capi.clone());
|
||||
|
||||
// Keep a connection to the server
|
||||
if let Some(client_api_ipc_path) = client_api_ipc_path {
|
||||
comproc.set_ipc_path(Some(client_api_ipc_path));
|
||||
} else if let Some(client_api_network_address) = client_api_network_addresses {
|
||||
let network_addr = client_api_network_address.first().cloned();
|
||||
comproc.set_network_address(network_addr);
|
||||
} else {
|
||||
return Err("veilid-server could not be reached".to_owned());
|
||||
}
|
||||
|
||||
let comproc2 = comproc.clone();
|
||||
let connection_future = comproc.connection_manager();
|
||||
|
||||
// Start async
|
||||
block_on(async move {
|
||||
// Start UI
|
||||
let ui_future = async move {
|
||||
sivui.run_async().await;
|
||||
ui.run_async().await;
|
||||
|
||||
// When UI quits, close connection and command processor cleanly
|
||||
comproc2.quit();
|
||||
@ -240,7 +348,6 @@ fn main() -> Result<(), String> {
|
||||
compile_error!("needs executor implementation")
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Ok(())
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
1403
veilid-cli/src/ui.rs
1403
veilid-cli/src/ui.rs
File diff suppressed because it is too large
Load Diff
@ -47,12 +47,10 @@ enable-crypto-none = []
|
||||
# Debugging and testing features
|
||||
verbose-tracing = []
|
||||
tracking = []
|
||||
debug-dht = []
|
||||
crypto-test = ["enable-crypto-vld0", "enable-crypto-none"]
|
||||
crypto-test-none = ["enable-crypto-none"]
|
||||
veilid_core_android_tests = ["dep:paranoid-android"]
|
||||
veilid_core_ios_tests = ["dep:tracing-oslog"]
|
||||
network-result-extra = ["veilid-tools/network-result-extra"]
|
||||
|
||||
### DEPENDENCIES
|
||||
|
||||
@ -122,7 +120,7 @@ chacha20 = "0.9.1"
|
||||
argon2 = "0.5.2"
|
||||
|
||||
# Network
|
||||
async-std-resolver = { version = "0.23.2", optional = true }
|
||||
async-std-resolver = { version = "0.24.0", optional = true }
|
||||
hickory-resolver = { version = "0.24.0", optional = true }
|
||||
|
||||
# Serialization
|
||||
@ -144,13 +142,14 @@ lz4_flex = { version = "0.11.1", default-features = false, features = [
|
||||
|
||||
# Tools
|
||||
config = { version = "0.13.4", features = ["yaml"] }
|
||||
bugsalot = { package = "veilid-bugsalot", version = "0.1.0" }
|
||||
bugsalot = { package = "veilid-bugsalot", version = "0.2.0" }
|
||||
chrono = "0.4.31"
|
||||
libc = "0.2.151"
|
||||
nix = "0.27.1"
|
||||
|
||||
# System
|
||||
async-std = { version = "1.12.0", features = ["unstable"], optional = true }
|
||||
sysinfo = { version = "^0.30.6" }
|
||||
tokio = { version = "1.35.0", features = ["full"], optional = true }
|
||||
tokio-util = { version = "0.7.10", features = ["compat"], optional = true }
|
||||
tokio-stream = { version = "0.1.14", features = ["net"], optional = true }
|
||||
|
@ -81,11 +81,7 @@ fn append_hash<P: AsRef<Path>, Q: AsRef<Path>>(input_path: P, output_path: Q) {
|
||||
let output_path = output_path.as_ref();
|
||||
let lines = std::io::BufReader::new(std::fs::File::open(input_path).unwrap()).lines();
|
||||
let h = calculate_hash(lines);
|
||||
let mut out_file = OpenOptions::new()
|
||||
.write(true)
|
||||
.append(true)
|
||||
.open(output_path)
|
||||
.unwrap();
|
||||
let mut out_file = OpenOptions::new().append(true).open(output_path).unwrap();
|
||||
writeln!(out_file, "\n//BUILDHASH:{}", hex::encode(h)).unwrap();
|
||||
}
|
||||
|
||||
|
@ -346,30 +346,46 @@ struct OperationSetValueQ @0xbac06191ff8bdbc5 {
|
||||
}
|
||||
|
||||
struct OperationSetValueA @0x9378d0732dc95be2 {
|
||||
set @0 :Bool; # true if the set was close enough to be set
|
||||
set @0 :Bool; # true if the set was accepted
|
||||
value @1 :SignedValueData; # optional: the current value at the key if the set seq number was lower or equal to what was there before
|
||||
peers @2 :List(PeerInfo); # returned 'closer peer' information on either success or failure
|
||||
}
|
||||
|
||||
struct OperationWatchValueQ @0xf9a5a6c547b9b228 {
|
||||
key @0 :TypedKey; # key for value to watch
|
||||
subkeys @1 :List(SubkeyRange); # subkey range to watch (up to 512 subranges), if empty, watch everything
|
||||
subkeys @1 :List(SubkeyRange); # subkey range to watch (up to 512 subranges), if empty this implies 0..=UINT32_MAX
|
||||
expiration @2 :UInt64; # requested timestamp when this watch will expire in usec since epoch (can be return less, 0 for max)
|
||||
count @3 :UInt32; # requested number of changes to watch for (0 = cancel, 1 = single shot, 2+ = counter, UINT32_MAX = continuous)
|
||||
watcher @4 :PublicKey; # optional: the watcher performing the watch, can be the owner or a schema member
|
||||
signature @5 :Signature; # optional: signature of the watcher, must be one of the schema members or the key owner. signature covers: key, subkeys, expiration, count
|
||||
watchId @4 :UInt64; # if 0, request a new watch. if >0, existing watch id
|
||||
watcher @5 :PublicKey; # the watcher performing the watch, can be the owner or a schema member, or a generated anonymous watch keypair
|
||||
signature @6 :Signature; # signature of the watcher, signature covers: key, subkeys, expiration, count, watchId
|
||||
}
|
||||
|
||||
struct OperationWatchValueA @0xa726cab7064ba893 {
|
||||
expiration @0 :UInt64; # timestamp when this watch will expire in usec since epoch (0 if watch was rejected). if watch is being cancelled (with count = 0), this will be the non-zero former expiration time.
|
||||
peers @1 :List(PeerInfo); # returned list of other nodes to ask that could propagate watches
|
||||
accepted @0 :Bool; # true if the watch was close enough to be accepted
|
||||
expiration @1 :UInt64; # timestamp when this watch will expire in usec since epoch (0 if watch was cancelled/dropped)
|
||||
peers @2 :List(PeerInfo); # returned list of other nodes to ask that could propagate watches
|
||||
watchId @3 :UInt64; # (0 = id not allocated if rejecting new watch) random id for watch instance on this node
|
||||
}
|
||||
|
||||
struct OperationInspectValueQ @0xdef712d2fd16f55a {
|
||||
key @0 :TypedKey; # DHT Key = Hash(ownerKeyKind) of: [ ownerKeyValue, schema ]
|
||||
subkeys @1 :List(SubkeyRange); # subkey range to inspect (up to 512 total subkeys), if empty this implies 0..=511
|
||||
wantDescriptor @2 :Bool; # whether or not to include the descriptor for the key
|
||||
}
|
||||
|
||||
struct OperationInspectValueA @0xb8b57faf960ee102 {
|
||||
seqs @0 :List(ValueSeqNum); # the list of subkey value sequence numbers in ascending order for each subkey in the requested range. if a subkey has not been written to, it is given a value of UINT32_MAX. these are not signed, and may be immediately out of date, and must be verified by a GetValueQ request.
|
||||
peers @1 :List(PeerInfo); # returned 'closer peer' information on either success or failure
|
||||
descriptor @2 :SignedValueDescriptor; # optional: the descriptor if requested if the value is also returned
|
||||
}
|
||||
|
||||
struct OperationValueChanged @0xd1c59ebdd8cc1bf6 {
|
||||
key @0 :TypedKey; # key for value that changed
|
||||
subkeys @1 :List(SubkeyRange); # subkey range that changed (up to 512 ranges at a time)
|
||||
subkeys @1 :List(SubkeyRange); # subkey range that changed (up to 512 ranges at a time, if empty this is a watch expiration notice)
|
||||
count @2 :UInt32; # remaining changes left (0 means watch has expired)
|
||||
value @3 :SignedValueData; # first value that changed (the rest can be gotten with getvalue)
|
||||
watchId @3 :UInt64; # watch id this value change came from
|
||||
value @4 :SignedValueData; # first value that changed (the rest can be gotten with getvalue)
|
||||
}
|
||||
|
||||
struct OperationSupplyBlockQ @0xadbf4c542d749971 {
|
||||
@ -483,15 +499,17 @@ struct Question @0xd8510bc33492ef70 {
|
||||
getValueQ @5 :OperationGetValueQ;
|
||||
setValueQ @6 :OperationSetValueQ;
|
||||
watchValueQ @7 :OperationWatchValueQ;
|
||||
inspectValueQ @8 :OperationInspectValueQ;
|
||||
|
||||
# #[cfg(feature="unstable-blockstore")]
|
||||
# supplyBlockQ @8 :OperationSupplyBlockQ;
|
||||
# findBlockQ @9 :OperationFindBlockQ;
|
||||
# supplyBlockQ @9 :OperationSupplyBlockQ;
|
||||
# findBlockQ @10 :OperationFindBlockQ;
|
||||
|
||||
# Tunnel operations
|
||||
# #[cfg(feature="unstable-tunnels")]
|
||||
# startTunnelQ @10 :OperationStartTunnelQ;
|
||||
# completeTunnelQ @11 :OperationCompleteTunnelQ;
|
||||
# cancelTunnelQ @12 :OperationCancelTunnelQ;
|
||||
# startTunnelQ @11 :OperationStartTunnelQ;
|
||||
# completeTunnelQ @12 :OperationCompleteTunnelQ;
|
||||
# cancelTunnelQ @13 :OperationCancelTunnelQ;
|
||||
}
|
||||
}
|
||||
|
||||
@ -522,16 +540,17 @@ struct Answer @0xacacb8b6988c1058 {
|
||||
getValueA @3 :OperationGetValueA;
|
||||
setValueA @4 :OperationSetValueA;
|
||||
watchValueA @5 :OperationWatchValueA;
|
||||
inspectValueA @6 :OperationInspectValueA;
|
||||
|
||||
# #[cfg(feature="unstable-blockstore")]
|
||||
#supplyBlockA @6 :OperationSupplyBlockA;
|
||||
#findBlockA @7 :OperationFindBlockA;
|
||||
#supplyBlockA @7 :OperationSupplyBlockA;
|
||||
#findBlockA @8 :OperationFindBlockA;
|
||||
|
||||
# Tunnel operations
|
||||
# #[cfg(feature="unstable-tunnels")]
|
||||
# startTunnelA @8 :OperationStartTunnelA;
|
||||
# completeTunnelA @9 :OperationCompleteTunnelA;
|
||||
# cancelTunnelA @10 :OperationCancelTunnelA;
|
||||
# startTunnelA @9 :OperationStartTunnelA;
|
||||
# completeTunnelA @10 :OperationCompleteTunnelA;
|
||||
# cancelTunnelA @11 :OperationCancelTunnelA;
|
||||
}
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -179,7 +179,14 @@ impl AttachmentManager {
|
||||
fn update_attaching_detaching_state(&self, state: AttachmentState) {
|
||||
let update_callback = {
|
||||
let mut inner = self.inner.lock();
|
||||
|
||||
// Clear routing table health so when we start measuring it we start from scratch
|
||||
inner.last_routing_table_health = None;
|
||||
|
||||
// Set attachment state directly
|
||||
inner.last_attachment_state = state;
|
||||
|
||||
// Set timestamps
|
||||
if state == AttachmentState::Attaching {
|
||||
inner.attach_ts = Some(get_aligned_timestamp());
|
||||
} else if state == AttachmentState::Detached {
|
||||
@ -189,9 +196,12 @@ impl AttachmentManager {
|
||||
} else {
|
||||
unreachable!("don't use this for attached states, use update_attachment()");
|
||||
}
|
||||
|
||||
// Get callback
|
||||
inner.update_callback.clone()
|
||||
};
|
||||
|
||||
// Send update
|
||||
if let Some(update_callback) = update_callback {
|
||||
update_callback(VeilidUpdate::Attachment(Box::new(VeilidStateAttachment {
|
||||
state,
|
||||
@ -203,7 +213,7 @@ impl AttachmentManager {
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
async fn attachment_maintainer(self) {
|
||||
debug!("attachment starting");
|
||||
log_net!(debug "attachment starting");
|
||||
self.update_attaching_detaching_state(AttachmentState::Attaching);
|
||||
|
||||
let netman = self.network_manager();
|
||||
@ -217,7 +227,7 @@ impl AttachmentManager {
|
||||
break;
|
||||
}
|
||||
|
||||
debug!("started maintaining peers");
|
||||
log_net!(debug "started maintaining peers");
|
||||
while self.inner.lock().maintain_peers {
|
||||
// tick network manager
|
||||
if let Err(err) = netman.tick().await {
|
||||
@ -241,32 +251,31 @@ impl AttachmentManager {
|
||||
// sleep should be at the end in case maintain_peers changes state
|
||||
sleep(1000).await;
|
||||
}
|
||||
debug!("stopped maintaining peers");
|
||||
log_net!(debug "stopped maintaining peers");
|
||||
|
||||
if !restart {
|
||||
self.update_attaching_detaching_state(AttachmentState::Detaching);
|
||||
debug!("attachment stopping");
|
||||
log_net!(debug "attachment stopping");
|
||||
}
|
||||
|
||||
debug!("stopping network");
|
||||
log_net!(debug "stopping network");
|
||||
netman.shutdown().await;
|
||||
|
||||
if !restart {
|
||||
break;
|
||||
}
|
||||
|
||||
debug!("completely restarting attachment");
|
||||
log_net!(debug "completely restarting attachment");
|
||||
// chill out for a second first, give network stack time to settle out
|
||||
sleep(1000).await;
|
||||
}
|
||||
|
||||
self.update_attaching_detaching_state(AttachmentState::Detached);
|
||||
debug!("attachment stopped");
|
||||
log_net!(debug "attachment stopped");
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
pub async fn init(&self, update_callback: UpdateCallback) -> EyreResult<()> {
|
||||
trace!("init");
|
||||
{
|
||||
let mut inner = self.inner.lock();
|
||||
inner.update_callback = Some(update_callback.clone());
|
||||
|
@ -1,6 +1,6 @@
|
||||
use crate::api_tracing_layer::*;
|
||||
use crate::attachment_manager::*;
|
||||
use crate::crypto::Crypto;
|
||||
use crate::logging::*;
|
||||
use crate::storage_manager::*;
|
||||
use crate::veilid_api::*;
|
||||
use crate::veilid_config::*;
|
||||
@ -70,7 +70,6 @@ impl ServicesContext {
|
||||
ApiTracingLayer::init(self.update_callback.clone()).await;
|
||||
|
||||
// Set up protected store
|
||||
trace!("init protected store");
|
||||
let protected_store = ProtectedStore::new(self.config.clone());
|
||||
if let Err(e) = protected_store.init().await {
|
||||
error!("failed to init protected store: {}", e);
|
||||
@ -80,7 +79,6 @@ impl ServicesContext {
|
||||
self.protected_store = Some(protected_store.clone());
|
||||
|
||||
// Set up tablestore and crypto system
|
||||
trace!("create table store and crypto system");
|
||||
let table_store = TableStore::new(self.config.clone(), protected_store.clone());
|
||||
let crypto = Crypto::new(self.config.clone(), table_store.clone());
|
||||
table_store.set_crypto(crypto.clone());
|
||||
@ -88,7 +86,6 @@ impl ServicesContext {
|
||||
// Initialize table store first, so crypto code can load caches
|
||||
// Tablestore can use crypto during init, just not any cached operations or things
|
||||
// that require flushing back to the tablestore
|
||||
trace!("init table store");
|
||||
if let Err(e) = table_store.init().await {
|
||||
error!("failed to init table store: {}", e);
|
||||
self.shutdown().await;
|
||||
@ -97,7 +94,6 @@ impl ServicesContext {
|
||||
self.table_store = Some(table_store.clone());
|
||||
|
||||
// Set up crypto
|
||||
trace!("init crypto");
|
||||
if let Err(e) = crypto.init().await {
|
||||
error!("failed to init crypto: {}", e);
|
||||
self.shutdown().await;
|
||||
@ -108,7 +104,6 @@ impl ServicesContext {
|
||||
// Set up block store
|
||||
#[cfg(feature = "unstable-blockstore")]
|
||||
{
|
||||
trace!("init block store");
|
||||
let block_store = BlockStore::new(self.config.clone());
|
||||
if let Err(e) = block_store.init().await {
|
||||
error!("failed to init block store: {}", e);
|
||||
@ -119,7 +114,6 @@ impl ServicesContext {
|
||||
}
|
||||
|
||||
// Set up storage manager
|
||||
trace!("init storage manager");
|
||||
let update_callback = self.update_callback.clone();
|
||||
|
||||
let storage_manager = StorageManager::new(
|
||||
@ -137,7 +131,6 @@ impl ServicesContext {
|
||||
self.storage_manager = Some(storage_manager.clone());
|
||||
|
||||
// Set up attachment manager
|
||||
trace!("init attachment manager");
|
||||
let update_callback = self.update_callback.clone();
|
||||
let attachment_manager = AttachmentManager::new(
|
||||
self.config.clone(),
|
||||
@ -163,28 +156,22 @@ impl ServicesContext {
|
||||
info!("Veilid API shutting down");
|
||||
|
||||
if let Some(attachment_manager) = &mut self.attachment_manager {
|
||||
trace!("terminate attachment manager");
|
||||
attachment_manager.terminate().await;
|
||||
}
|
||||
if let Some(storage_manager) = &mut self.storage_manager {
|
||||
trace!("terminate storage manager");
|
||||
storage_manager.terminate().await;
|
||||
}
|
||||
#[cfg(feature = "unstable-blockstore")]
|
||||
if let Some(block_store) = &mut self.block_store {
|
||||
trace!("terminate block store");
|
||||
block_store.terminate().await;
|
||||
}
|
||||
if let Some(crypto) = &mut self.crypto {
|
||||
trace!("terminate crypto");
|
||||
crypto.terminate().await;
|
||||
}
|
||||
if let Some(table_store) = &mut self.table_store {
|
||||
trace!("terminate table store");
|
||||
table_store.terminate().await;
|
||||
}
|
||||
if let Some(protected_store) = &mut self.protected_store {
|
||||
trace!("terminate protected store");
|
||||
protected_store.terminate().await;
|
||||
}
|
||||
|
||||
@ -220,7 +207,6 @@ impl VeilidCoreContext {
|
||||
config_callback: ConfigCallback,
|
||||
) -> VeilidAPIResult<VeilidCoreContext> {
|
||||
// Set up config from callback
|
||||
trace!("setup config with callback");
|
||||
let mut config = VeilidConfig::new();
|
||||
config.setup(config_callback, update_callback.clone())?;
|
||||
|
||||
@ -233,20 +219,17 @@ impl VeilidCoreContext {
|
||||
config_json: String,
|
||||
) -> VeilidAPIResult<VeilidCoreContext> {
|
||||
// Set up config from json
|
||||
trace!("setup config with json");
|
||||
let mut config = VeilidConfig::new();
|
||||
config.setup_from_json(config_json, update_callback.clone())?;
|
||||
Self::new_common(update_callback, config).await
|
||||
}
|
||||
|
||||
|
||||
#[instrument(err, skip_all)]
|
||||
async fn new_with_config(
|
||||
update_callback: UpdateCallback,
|
||||
config_inner: VeilidConfigInner,
|
||||
) -> VeilidAPIResult<VeilidCoreContext> {
|
||||
// Set up config from json
|
||||
trace!("setup config with json");
|
||||
let mut config = VeilidConfig::new();
|
||||
config.setup_from_config(config_inner, update_callback.clone())?;
|
||||
Self::new_common(update_callback, config).await
|
||||
|
@ -1,5 +1,7 @@
|
||||
use super::*;
|
||||
|
||||
const VEILID_DOMAIN_API: &[u8] = b"VEILID_API";
|
||||
|
||||
pub trait CryptoSystem {
|
||||
// Accessors
|
||||
fn kind(&self) -> CryptoKind;
|
||||
@ -17,6 +19,15 @@ pub trait CryptoSystem {
|
||||
fn random_nonce(&self) -> Nonce;
|
||||
fn random_shared_secret(&self) -> SharedSecret;
|
||||
fn compute_dh(&self, key: &PublicKey, secret: &SecretKey) -> VeilidAPIResult<SharedSecret>;
|
||||
fn generate_shared_secret(
|
||||
&self,
|
||||
key: &PublicKey,
|
||||
secret: &SecretKey,
|
||||
domain: &[u8],
|
||||
) -> VeilidAPIResult<SharedSecret> {
|
||||
let dh = self.compute_dh(key, secret)?;
|
||||
Ok(self.generate_hash(&[&dh.bytes, domain, VEILID_DOMAIN_API].concat()))
|
||||
}
|
||||
fn generate_keypair(&self) -> KeyPair;
|
||||
fn generate_hash(&self, data: &[u8]) -> HashDigest;
|
||||
fn generate_hash_reader(&self, reader: &mut dyn std::io::Read) -> VeilidAPIResult<HashDigest>;
|
||||
|
@ -128,8 +128,8 @@ impl Crypto {
|
||||
self.unlocked_inner.config.clone()
|
||||
}
|
||||
|
||||
#[instrument(skip_all, err)]
|
||||
pub async fn init(&self) -> EyreResult<()> {
|
||||
trace!("Crypto::init");
|
||||
let table_store = self.unlocked_inner.table_store.clone();
|
||||
// Init node id from config
|
||||
if let Err(e) = self
|
||||
@ -190,7 +190,6 @@ impl Crypto {
|
||||
}
|
||||
|
||||
pub async fn flush(&self) -> EyreResult<()> {
|
||||
//trace!("Crypto::flush");
|
||||
let cache_bytes = {
|
||||
let inner = self.inner.lock();
|
||||
cache_to_bytes(&inner.dh_cache)
|
||||
@ -206,15 +205,14 @@ impl Crypto {
|
||||
}
|
||||
|
||||
pub async fn terminate(&self) {
|
||||
trace!("Crypto::terminate");
|
||||
let flush_future = self.inner.lock().flush_future.take();
|
||||
if let Some(f) = flush_future {
|
||||
f.await;
|
||||
}
|
||||
trace!("starting termination flush");
|
||||
log_crypto!("starting termination flush");
|
||||
match self.flush().await {
|
||||
Ok(_) => {
|
||||
trace!("finished termination flush");
|
||||
log_crypto!("finished termination flush");
|
||||
}
|
||||
Err(e) => {
|
||||
error!("failed termination flush: {}", e);
|
||||
|
@ -33,7 +33,7 @@ impl ProtectedStore {
|
||||
if let Err(e) = self.remove_user_secret(kpsk).await {
|
||||
error!("failed to delete '{}': {}", kpsk, e);
|
||||
} else {
|
||||
debug!("deleted table '{}'", kpsk);
|
||||
log_pstore!(debug "deleted table '{}'", kpsk);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
|
@ -19,7 +19,7 @@ impl ProtectedStore {
|
||||
if let Err(e) = self.remove_user_secret(kpsk).await {
|
||||
error!("failed to delete '{}': {}", kpsk, e);
|
||||
} else {
|
||||
debug!("deleted table '{}'", kpsk);
|
||||
log_pstore!(debug "deleted table '{}'", kpsk);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
|
@ -44,11 +44,11 @@ cfg_if::cfg_if! {
|
||||
#[macro_use]
|
||||
extern crate alloc;
|
||||
|
||||
mod api_tracing_layer;
|
||||
mod attachment_manager;
|
||||
mod core_context;
|
||||
mod crypto;
|
||||
mod intf;
|
||||
mod logging;
|
||||
mod network_manager;
|
||||
mod routing_table;
|
||||
mod rpc_processor;
|
||||
@ -56,14 +56,12 @@ mod storage_manager;
|
||||
mod table_store;
|
||||
mod veilid_api;
|
||||
mod veilid_config;
|
||||
mod veilid_layer_filter;
|
||||
mod wasm_helpers;
|
||||
|
||||
pub use self::api_tracing_layer::ApiTracingLayer;
|
||||
pub use self::core_context::{api_startup, api_startup_json, api_startup_config, UpdateCallback};
|
||||
pub use self::core_context::{api_startup, api_startup_config, api_startup_json, UpdateCallback};
|
||||
pub use self::logging::{ApiTracingLayer, VeilidLayerFilter};
|
||||
pub use self::veilid_api::*;
|
||||
pub use self::veilid_config::*;
|
||||
pub use self::veilid_layer_filter::*;
|
||||
pub use veilid_tools as tools;
|
||||
|
||||
/// The on-the-wire serialization format for Veilid RPC
|
||||
@ -88,10 +86,15 @@ pub fn veilid_version() -> (u32, u32, u32) {
|
||||
)
|
||||
}
|
||||
|
||||
/// Return the default veilid config as a json object
|
||||
pub fn default_veilid_config() -> String {
|
||||
serialize_json(VeilidConfigInner::default())
|
||||
}
|
||||
|
||||
#[cfg(target_os = "android")]
|
||||
pub use intf::android::veilid_core_setup_android;
|
||||
|
||||
pub static DEFAULT_LOG_IGNORE_LIST: [&str; 23] = [
|
||||
pub static DEFAULT_LOG_IGNORE_LIST: [&str; 26] = [
|
||||
"mio",
|
||||
"h2",
|
||||
"hyper",
|
||||
@ -110,11 +113,14 @@ pub static DEFAULT_LOG_IGNORE_LIST: [&str; 23] = [
|
||||
"tungstenite",
|
||||
"netlink_proto",
|
||||
"netlink_sys",
|
||||
"trust_dns_resolver",
|
||||
"trust_dns_proto",
|
||||
"hickory_resolver",
|
||||
"hickory_proto",
|
||||
"attohttpc",
|
||||
"ws_stream_wasm",
|
||||
"keyvaluedb_web",
|
||||
"veilid_api",
|
||||
"network_result",
|
||||
"dht",
|
||||
];
|
||||
|
||||
use cfg_if::*;
|
||||
|
@ -47,6 +47,19 @@ impl ApiTracingLayer {
|
||||
}
|
||||
}
|
||||
|
||||
fn simplify_file(file: &str) -> String {
|
||||
let path = std::path::Path::new(file);
|
||||
let path_component_count = path.iter().count();
|
||||
if path.ends_with("mod.rs") && path_component_count >= 2 {
|
||||
let outpath: std::path::PathBuf = path.iter().skip(path_component_count - 2).collect();
|
||||
outpath.to_string_lossy().to_string()
|
||||
} else if let Some(filename) = path.file_name() {
|
||||
filename.to_string_lossy().to_string()
|
||||
} else {
|
||||
file.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Subscriber + for<'a> registry::LookupSpan<'a>> Layer<S> for ApiTracingLayer {
|
||||
fn on_new_span(
|
||||
&self,
|
||||
@ -86,15 +99,39 @@ impl<S: Subscriber + for<'a> registry::LookupSpan<'a>> Layer<S> for ApiTracingLa
|
||||
let mut recorder = StringRecorder::new();
|
||||
event.record(&mut recorder);
|
||||
let meta = event.metadata();
|
||||
let level = meta.level();
|
||||
let log_level = VeilidLogLevel::from_tracing_level(*level);
|
||||
let level = *meta.level();
|
||||
let target = meta.target();
|
||||
let log_level = VeilidLogLevel::from_tracing_level(level);
|
||||
|
||||
let origin = meta
|
||||
.file()
|
||||
.and_then(|file| meta.line().map(|ln| format!("{}:{}", file, ln)))
|
||||
.unwrap_or_default();
|
||||
let origin = match level {
|
||||
Level::ERROR | Level::WARN => meta
|
||||
.file()
|
||||
.and_then(|file| {
|
||||
meta.line()
|
||||
.map(|ln| format!("{}:{}", simplify_file(file), ln))
|
||||
})
|
||||
.unwrap_or_default(),
|
||||
Level::INFO => "".to_owned(),
|
||||
Level::DEBUG | Level::TRACE => meta
|
||||
.file()
|
||||
.and_then(|file| {
|
||||
meta.line().map(|ln| {
|
||||
format!(
|
||||
"{}{}:{}",
|
||||
if target.is_empty() {
|
||||
"".to_owned()
|
||||
} else {
|
||||
format!("[{}]", target)
|
||||
},
|
||||
simplify_file(file),
|
||||
ln
|
||||
)
|
||||
})
|
||||
})
|
||||
.unwrap_or_default(),
|
||||
};
|
||||
|
||||
let message = format!("{} {}", origin, recorder);
|
||||
let message = format!("{}{}", origin, recorder).trim().to_owned();
|
||||
|
||||
let backtrace = if log_level <= VeilidLogLevel::Error {
|
||||
let bt = backtrace::Backtrace::new();
|
@ -1,12 +1,10 @@
|
||||
// LogThru
|
||||
// Pass errors through and log them simultaneously via map_err()
|
||||
// Also contains common log facilities (net, rpc, rtab, stor, pstore, crypto, etc )
|
||||
mod api_tracing_layer;
|
||||
mod veilid_layer_filter;
|
||||
|
||||
use super::*;
|
||||
|
||||
pub fn map_to_string<X: ToString>(arg: X) -> String {
|
||||
arg.to_string()
|
||||
}
|
||||
pub use api_tracing_layer::*;
|
||||
pub use veilid_layer_filter::*;
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! fn_string {
|
||||
@ -51,6 +49,78 @@ macro_rules! log_net {
|
||||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! log_client_api {
|
||||
(error $text:expr) => {error!(
|
||||
target: "client_api",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
(error $fmt:literal, $($arg:expr),+) => {
|
||||
error!(target:"client_api", $fmt, $($arg),+);
|
||||
};
|
||||
(warn $text:expr) => {warn!(
|
||||
target: "client_api",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
(warn $fmt:literal, $($arg:expr),+) => {
|
||||
warn!(target:"client_api", $fmt, $($arg),+);
|
||||
};
|
||||
(debug $text:expr) => {debug!(
|
||||
target: "client_api",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
(debug $fmt:literal, $($arg:expr),+) => {
|
||||
debug!(target:"client_api", $fmt, $($arg),+);
|
||||
};
|
||||
($text:expr) => {trace!(
|
||||
target: "client_api",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
($fmt:literal, $($arg:expr),+) => {
|
||||
trace!(target:"client_api", $fmt, $($arg),+);
|
||||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! log_network_result {
|
||||
(error $text:expr) => {error!(
|
||||
target: "network_result",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
(error $fmt:literal, $($arg:expr),+) => {
|
||||
error!(target: "network_result", $fmt, $($arg),+);
|
||||
};
|
||||
(warn $text:expr) => {warn!(
|
||||
target: "network_result",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
(warn $fmt:literal, $($arg:expr),+) => {
|
||||
warn!(target:"network_result", $fmt, $($arg),+);
|
||||
};
|
||||
(debug $text:expr) => {debug!(
|
||||
target: "network_result",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
(debug $fmt:literal, $($arg:expr),+) => {
|
||||
debug!(target:"network_result", $fmt, $($arg),+);
|
||||
};
|
||||
($text:expr) => {trace!(
|
||||
target: "network_result",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
($fmt:literal, $($arg:expr),+) => {
|
||||
trace!(target:"network_result", $fmt, $($arg),+);
|
||||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! log_rpc {
|
||||
(error $text:expr) => { error!(
|
||||
@ -69,7 +139,7 @@ macro_rules! log_rpc {
|
||||
(warn $fmt:literal, $($arg:expr),+) => {
|
||||
warn!(target:"rpc", $fmt, $($arg),+);
|
||||
};
|
||||
(debug $text:expr) => { error!(
|
||||
(debug $text:expr) => { debug!(
|
||||
target: "rpc",
|
||||
"{}",
|
||||
$text,
|
||||
@ -87,6 +157,42 @@ macro_rules! log_rpc {
|
||||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! log_dht {
|
||||
(error $text:expr) => { error!(
|
||||
target: "dht",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
(error $fmt:literal, $($arg:expr),+) => {
|
||||
error!(target:"dht", $fmt, $($arg),+);
|
||||
};
|
||||
(warn $text:expr) => { warn!(
|
||||
target: "dht",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
(warn $fmt:literal, $($arg:expr),+) => {
|
||||
warn!(target:"dht", $fmt, $($arg),+);
|
||||
};
|
||||
(debug $text:expr) => { debug!(
|
||||
target: "dht",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
(debug $fmt:literal, $($arg:expr),+) => {
|
||||
debug!(target:"dht", $fmt, $($arg),+);
|
||||
};
|
||||
($text:expr) => {trace!(
|
||||
target: "dht",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
($fmt:literal, $($arg:expr),+) => {
|
||||
trace!(target:"dht", $fmt, $($arg),+);
|
||||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! log_rtab {
|
||||
(error $text:expr) => { error!(
|
||||
@ -195,6 +301,42 @@ macro_rules! log_pstore {
|
||||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! log_tstore {
|
||||
(error $text:expr) => { error!(
|
||||
target: "tstore",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
(error $fmt:literal, $($arg:expr),+) => {
|
||||
error!(target:"tstore", $fmt, $($arg),+);
|
||||
};
|
||||
(warn $text:expr) => { warn!(
|
||||
target: "tstore",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
(warn $fmt:literal, $($arg:expr),+) => {
|
||||
warn!(target:"tstore", $fmt, $($arg),+);
|
||||
};
|
||||
(debug $text:expr) => { debug!(
|
||||
target: "tstore",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
(debug $fmt:literal, $($arg:expr),+) => {
|
||||
debug!(target:"tstore", $fmt, $($arg),+);
|
||||
};
|
||||
($text:expr) => {trace!(
|
||||
target: "tstore",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
($fmt:literal, $($arg:expr),+) => {
|
||||
trace!(target:"tstore", $fmt, $($arg),+);
|
||||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! log_crypto {
|
||||
(error $text:expr) => { error!(
|
||||
@ -222,188 +364,3 @@ macro_rules! log_crypto {
|
||||
trace!(target:"crypto", $fmt, $($arg),+);
|
||||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! logthru_net {
|
||||
($($level:ident)?) => {
|
||||
logthru!($($level)? "net")
|
||||
};
|
||||
($($level:ident)? $text:literal) => {
|
||||
logthru!($($level)? "net", $text)
|
||||
};
|
||||
($($level:ident)? $fmt:literal, $($arg:expr),+) => {
|
||||
logthru!($($level)? "net", $fmt, $($arg),+)
|
||||
}
|
||||
}
|
||||
#[macro_export]
|
||||
macro_rules! logthru_rpc {
|
||||
($($level:ident)?) => {
|
||||
logthru!($($level)? "rpc")
|
||||
};
|
||||
($($level:ident)? $text:literal) => {
|
||||
logthru!($($level)? "rpc", $text)
|
||||
};
|
||||
($($level:ident)? $fmt:literal, $($arg:expr),+) => {
|
||||
logthru!($($level)? "rpc", $fmt, $($arg),+)
|
||||
}
|
||||
}
|
||||
#[macro_export]
|
||||
macro_rules! logthru_rtab {
|
||||
($($level:ident)?) => {
|
||||
logthru!($($level)? "rtab")
|
||||
};
|
||||
($($level:ident)? $text:literal) => {
|
||||
logthru!($($level)? "rtab", $text)
|
||||
};
|
||||
($($level:ident)? $fmt:literal, $($arg:expr),+) => {
|
||||
logthru!($($level)? "rtab", $fmt, $($arg),+)
|
||||
}
|
||||
}
|
||||
#[macro_export]
|
||||
macro_rules! logthru_stor {
|
||||
($($level:ident)?) => {
|
||||
logthru!($($level)? "stor")
|
||||
};
|
||||
($($level:ident)? $text:literal) => {
|
||||
logthru!($($level)? "stor", $text)
|
||||
};
|
||||
($($level:ident)? $fmt:literal, $($arg:expr),+) => {
|
||||
logthru!($($level)? "stor", $fmt, $($arg),+)
|
||||
}
|
||||
}
|
||||
#[macro_export]
|
||||
macro_rules! logthru_pstore {
|
||||
($($level:ident)?) => {
|
||||
logthru!($($level)? "pstore")
|
||||
};
|
||||
($($level:ident)? $text:literal) => {
|
||||
logthru!($($level)? "pstore", $text)
|
||||
};
|
||||
($($level:ident)? $fmt:literal, $($arg:expr),+) => {
|
||||
logthru!($($level)? "pstore", $fmt, $($arg),+)
|
||||
}
|
||||
}
|
||||
#[macro_export]
|
||||
macro_rules! logthru_crypto {
|
||||
($($level:ident)?) => {
|
||||
logthru!($($level)? "crypto")
|
||||
};
|
||||
($($level:ident)? $text:literal) => {
|
||||
logthru!($($level)? "crypto", $text)
|
||||
};
|
||||
($($level:ident)? $fmt:literal, $($arg:expr),+) => {
|
||||
logthru!($($level)? "crypto", $fmt, $($arg),+)
|
||||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! logthru {
|
||||
// error
|
||||
(error $target:literal) => (|e__| {
|
||||
error!(
|
||||
target: $target,
|
||||
"[{:?}]",
|
||||
e__,
|
||||
);
|
||||
e__
|
||||
});
|
||||
(error $target:literal, $text:literal) => (|e__| {
|
||||
error!(
|
||||
target: $target,
|
||||
"[{:?}] {}",
|
||||
e__,
|
||||
$text
|
||||
);
|
||||
e__
|
||||
});
|
||||
(error $target:literal, $fmt:literal, $($arg:expr),+) => (|e__| {
|
||||
error!(
|
||||
target: $target,
|
||||
concat!("[{:?}] ", $fmt),
|
||||
e__,
|
||||
$($arg),+
|
||||
);
|
||||
e__
|
||||
});
|
||||
// warn
|
||||
(warn $target:literal) => (|e__| {
|
||||
warn!(
|
||||
target: $target,
|
||||
"[{:?}]",
|
||||
e__,
|
||||
);
|
||||
e__
|
||||
});
|
||||
(warn $target:literal, $text:literal) => (|e__| {
|
||||
warn!(
|
||||
target: $target,
|
||||
"[{:?}] {}",
|
||||
e__,
|
||||
$text
|
||||
);
|
||||
e__
|
||||
});
|
||||
(warn $target:literal, $fmt:literal, $($arg:expr),+) => (|e__| {
|
||||
warn!(
|
||||
target: $target,
|
||||
concat!("[{:?}] ", $fmt),
|
||||
e__,
|
||||
$($arg),+
|
||||
);
|
||||
e__
|
||||
});
|
||||
// debug
|
||||
(debug $target:literal) => (|e__| {
|
||||
debug!(
|
||||
target: $target,
|
||||
"[{:?}]",
|
||||
e__,
|
||||
);
|
||||
e__
|
||||
});
|
||||
(debug $target:literal, $text:literal) => (|e__| {
|
||||
debug!(
|
||||
target: $target,
|
||||
"[{:?}] {}",
|
||||
e__,
|
||||
$text
|
||||
);
|
||||
e__
|
||||
});
|
||||
(debug $target:literal, $fmt:literal, $($arg:expr),+) => (|e__| {
|
||||
debug!(
|
||||
target: $target,
|
||||
concat!("[{:?}] ", $fmt),
|
||||
e__,
|
||||
$($arg),+
|
||||
);
|
||||
e__
|
||||
});
|
||||
// trace
|
||||
($target:literal) => (|e__| {
|
||||
trace!(
|
||||
target: $target,
|
||||
"[{:?}]",
|
||||
e__,
|
||||
);
|
||||
e__
|
||||
});
|
||||
($target:literal, $text:literal) => (|e__| {
|
||||
trace!(
|
||||
target: $target,
|
||||
"[{:?}] {}",
|
||||
e__,
|
||||
$text
|
||||
);
|
||||
e__
|
||||
});
|
||||
($target:literal, $fmt:literal, $($arg:expr),+) => (|e__| {
|
||||
trace!(
|
||||
target: $target,
|
||||
concat!("[{:?}] ", $fmt),
|
||||
e__,
|
||||
$($arg),+
|
||||
);
|
||||
e__
|
||||
})
|
||||
}
|
@ -16,13 +16,25 @@ pub struct VeilidLayerFilter {
|
||||
impl VeilidLayerFilter {
|
||||
pub fn new(
|
||||
max_level: VeilidConfigLogLevel,
|
||||
ignore_list: Option<Vec<String>>,
|
||||
ignore_log_targets: &[String],
|
||||
) -> VeilidLayerFilter {
|
||||
let mut ignore_list = DEFAULT_LOG_IGNORE_LIST.map(|x| x.to_owned()).to_vec();
|
||||
for igedit in ignore_log_targets {
|
||||
if let Some(rest) = igedit.strip_prefix('-') {
|
||||
for i in 0..ignore_list.len() {
|
||||
if ignore_list[i] == rest {
|
||||
ignore_list.remove(i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
ignore_list.push(igedit.clone());
|
||||
}
|
||||
}
|
||||
Self {
|
||||
inner: Arc::new(RwLock::new(VeilidLayerFilterInner {
|
||||
max_level: max_level.to_tracing_level_filter(),
|
||||
ignore_list: ignore_list
|
||||
.unwrap_or_else(|| DEFAULT_LOG_IGNORE_LIST.map(|x| x.to_owned()).to_vec()),
|
||||
ignore_list,
|
||||
})),
|
||||
}
|
||||
}
|
@ -115,7 +115,7 @@ impl ConnectionManager {
|
||||
}
|
||||
|
||||
pub async fn startup(&self) {
|
||||
trace!("startup connection manager");
|
||||
log_net!(debug "startup connection manager");
|
||||
let mut inner = self.arc.inner.lock();
|
||||
if inner.is_some() {
|
||||
panic!("shouldn't start connection manager twice without shutting it down first");
|
||||
@ -135,7 +135,7 @@ impl ConnectionManager {
|
||||
}
|
||||
|
||||
pub async fn shutdown(&self) {
|
||||
debug!("starting connection manager shutdown");
|
||||
log_net!(debug "starting connection manager shutdown");
|
||||
// Remove the inner from the lock
|
||||
let mut inner = {
|
||||
let mut inner_lock = self.arc.inner.lock();
|
||||
@ -148,16 +148,16 @@ impl ConnectionManager {
|
||||
};
|
||||
|
||||
// Stop all the connections and the async processor
|
||||
debug!("stopping async processor task");
|
||||
log_net!(debug "stopping async processor task");
|
||||
drop(inner.stop_source.take());
|
||||
let async_processor_jh = inner.async_processor_jh.take().unwrap();
|
||||
// wait for the async processor to stop
|
||||
debug!("waiting for async processor to stop");
|
||||
log_net!(debug "waiting for async processor to stop");
|
||||
async_processor_jh.await;
|
||||
// Wait for the connections to complete
|
||||
debug!("waiting for connection handlers to complete");
|
||||
log_net!(debug "waiting for connection handlers to complete");
|
||||
self.arc.connection_table.join().await;
|
||||
debug!("finished connection manager shutdown");
|
||||
log_net!(debug "finished connection manager shutdown");
|
||||
}
|
||||
|
||||
// Internal routine to see if we should keep this connection
|
||||
|
@ -99,7 +99,7 @@ impl ConnectionTable {
|
||||
let unord = FuturesUnordered::new();
|
||||
for table in &mut inner.conn_by_id {
|
||||
for (_, mut v) in table.drain() {
|
||||
trace!("connection table join: {:?}", v);
|
||||
log_net!("connection table join: {:?}", v);
|
||||
v.close();
|
||||
unord.push(v);
|
||||
}
|
||||
|
@ -227,7 +227,7 @@ impl NetworkManager {
|
||||
Some(
|
||||
bcs.derive_shared_secret(
|
||||
network_key_password.as_bytes(),
|
||||
network_key_password.as_bytes(),
|
||||
&bcs.generate_hash(network_key_password.as_bytes()).bytes,
|
||||
)
|
||||
.expect("failed to derive network key"),
|
||||
)
|
||||
@ -363,9 +363,8 @@ impl NetworkManager {
|
||||
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
pub async fn internal_startup(&self) -> EyreResult<()> {
|
||||
trace!("NetworkManager::internal_startup begin");
|
||||
if self.unlocked_inner.components.read().is_some() {
|
||||
debug!("NetworkManager::internal_startup already started");
|
||||
log_net!(debug "NetworkManager::internal_startup already started");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
@ -402,7 +401,7 @@ impl NetworkManager {
|
||||
rpc_processor.startup().await?;
|
||||
receipt_manager.startup().await?;
|
||||
|
||||
trace!("NetworkManager::internal_startup end");
|
||||
log_net!("NetworkManager::internal_startup end");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -422,13 +421,13 @@ impl NetworkManager {
|
||||
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub async fn shutdown(&self) {
|
||||
debug!("starting network manager shutdown");
|
||||
log_net!(debug "starting network manager shutdown");
|
||||
|
||||
// Cancel all tasks
|
||||
self.cancel_tasks().await;
|
||||
|
||||
// Shutdown network components if they started up
|
||||
debug!("shutting down network components");
|
||||
log_net!(debug "shutting down network components");
|
||||
|
||||
let components = self.unlocked_inner.components.read().clone();
|
||||
if let Some(components) = components {
|
||||
@ -441,16 +440,16 @@ impl NetworkManager {
|
||||
}
|
||||
|
||||
// reset the state
|
||||
debug!("resetting network manager state");
|
||||
log_net!(debug "resetting network manager state");
|
||||
{
|
||||
*self.inner.lock() = NetworkManager::new_inner();
|
||||
}
|
||||
|
||||
// send update
|
||||
debug!("sending network state update to api clients");
|
||||
log_net!(debug "sending network state update to api clients");
|
||||
self.send_network_update();
|
||||
|
||||
debug!("finished network manager shutdown");
|
||||
log_net!(debug "finished network manager shutdown");
|
||||
}
|
||||
|
||||
pub fn update_client_allowlist(&self, client: TypedKey) {
|
||||
@ -493,7 +492,7 @@ impl NetworkManager {
|
||||
.unwrap_or_default()
|
||||
{
|
||||
let (k, v) = inner.client_allowlist.remove_lru().unwrap();
|
||||
trace!(key=?k, value=?v, "purge_client_allowlist: remove_lru")
|
||||
trace!(target: "net", key=?k, value=?v, "purge_client_allowlist: remove_lru")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -310,14 +310,16 @@ impl DiscoveryContext {
|
||||
|
||||
// ask the node to send us a dial info validation receipt
|
||||
|
||||
rpc_processor
|
||||
match rpc_processor
|
||||
.rpc_call_validate_dial_info(node_ref.clone(), dial_info, redirect)
|
||||
.await
|
||||
.map_err(logthru_net!(
|
||||
"failed to send validate_dial_info to {:?}",
|
||||
node_ref
|
||||
))
|
||||
.unwrap_or(false)
|
||||
{
|
||||
Err(e) => {
|
||||
log_net!("failed to send validate_dial_info to {:?}: {}", node_ref, e);
|
||||
false
|
||||
}
|
||||
Ok(v) => v,
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self), ret)]
|
||||
|
@ -279,22 +279,22 @@ impl Network {
|
||||
fn load_server_config(&self) -> io::Result<ServerConfig> {
|
||||
let c = self.config.get();
|
||||
//
|
||||
trace!(
|
||||
log_net!(
|
||||
"loading certificate from {}",
|
||||
c.network.tls.certificate_path
|
||||
);
|
||||
let certs = Self::load_certs(&PathBuf::from(&c.network.tls.certificate_path))?;
|
||||
trace!("loaded {} certificates", certs.len());
|
||||
log_net!("loaded {} certificates", certs.len());
|
||||
if certs.is_empty() {
|
||||
return Err(io::Error::new(io::ErrorKind::InvalidInput, format!("Certificates at {} could not be loaded.\nEnsure it is in PEM format, beginning with '-----BEGIN CERTIFICATE-----'",c.network.tls.certificate_path)));
|
||||
}
|
||||
//
|
||||
trace!(
|
||||
log_net!(
|
||||
"loading private key from {}",
|
||||
c.network.tls.private_key_path
|
||||
);
|
||||
let mut keys = Self::load_keys(&PathBuf::from(&c.network.tls.private_key_path))?;
|
||||
trace!("loaded {} keys", keys.len());
|
||||
log_net!("loaded {} keys", keys.len());
|
||||
if keys.is_empty() {
|
||||
return Err(io::Error::new(io::ErrorKind::InvalidInput, format!("Private key at {} could not be loaded.\nEnsure it is unencrypted and in RSA or PKCS8 format, beginning with '-----BEGIN RSA PRIVATE KEY-----' or '-----BEGIN PRIVATE KEY-----'",c.network.tls.private_key_path)));
|
||||
}
|
||||
@ -710,7 +710,7 @@ impl Network {
|
||||
self.unlocked_inner
|
||||
.interfaces
|
||||
.with_interfaces(|interfaces| {
|
||||
debug!("interfaces: {:#?}", interfaces);
|
||||
log_net!(debug "interfaces: {:#?}", interfaces);
|
||||
|
||||
for intf in interfaces.values() {
|
||||
// Skip networks that we should never encounter
|
||||
@ -915,12 +915,12 @@ impl Network {
|
||||
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub async fn shutdown(&self) {
|
||||
debug!("starting low level network shutdown");
|
||||
log_net!(debug "starting low level network shutdown");
|
||||
|
||||
let routing_table = self.routing_table();
|
||||
|
||||
// Stop all tasks
|
||||
debug!("stopping update network class task");
|
||||
log_net!(debug "stopping update network class task");
|
||||
if let Err(e) = self.unlocked_inner.update_network_class_task.stop().await {
|
||||
error!("update_network_class_task not cancelled: {}", e);
|
||||
}
|
||||
@ -930,17 +930,17 @@ impl Network {
|
||||
let mut inner = self.inner.lock();
|
||||
// take the join handles out
|
||||
for h in inner.join_handles.drain(..) {
|
||||
trace!("joining: {:?}", h);
|
||||
log_net!("joining: {:?}", h);
|
||||
unord.push(h);
|
||||
}
|
||||
// Drop the stop
|
||||
drop(inner.stop_source.take());
|
||||
}
|
||||
debug!("stopping {} low level network tasks", unord.len());
|
||||
log_net!(debug "stopping {} low level network tasks", unord.len());
|
||||
// Wait for everything to stop
|
||||
while unord.next().await.is_some() {}
|
||||
|
||||
debug!("clearing dial info");
|
||||
log_net!(debug "clearing dial info");
|
||||
|
||||
routing_table
|
||||
.edit_routing_domain(RoutingDomain::PublicInternet)
|
||||
@ -961,7 +961,7 @@ impl Network {
|
||||
// Reset state including network class
|
||||
*self.inner.lock() = Self::new_inner();
|
||||
|
||||
debug!("finished low level network shutdown");
|
||||
log_net!(debug "finished low level network shutdown");
|
||||
}
|
||||
|
||||
//////////////////////////////////////////
|
||||
|
@ -268,7 +268,7 @@ impl Network {
|
||||
}
|
||||
}
|
||||
|
||||
debug!("spawn_socket_listener: binding successful to {}", addr);
|
||||
log_net!(debug "spawn_socket_listener: binding successful to {}", addr);
|
||||
|
||||
// Create protocol handler records
|
||||
let listener_state = Arc::new(RwLock::new(ListenerState::new()));
|
||||
|
@ -15,16 +15,16 @@ impl Network {
|
||||
task_count = 1;
|
||||
}
|
||||
}
|
||||
trace!("task_count: {}", task_count);
|
||||
log_net!("task_count: {}", task_count);
|
||||
for _ in 0..task_count {
|
||||
trace!("Spawning UDP listener task");
|
||||
log_net!("Spawning UDP listener task");
|
||||
|
||||
////////////////////////////////////////////////////////////
|
||||
// Run thread task to process stream of messages
|
||||
let this = self.clone();
|
||||
|
||||
let jh = spawn(async move {
|
||||
trace!("UDP listener task spawned");
|
||||
log_net!("UDP listener task spawned");
|
||||
|
||||
// Collect all our protocol handlers into a vector
|
||||
let mut protocol_handlers: Vec<RawUdpProtocolHandler> = this
|
||||
@ -103,7 +103,7 @@ impl Network {
|
||||
}
|
||||
}
|
||||
|
||||
trace!("UDP listener task stopped");
|
||||
log_net!("UDP listener task stopped");
|
||||
});
|
||||
////////////////////////////////////////////////////////////
|
||||
|
||||
|
@ -1,233 +1,246 @@
|
||||
use crate::*;
|
||||
use async_io::Async;
|
||||
use std::io;
|
||||
|
||||
cfg_if! {
|
||||
if #[cfg(feature="rt-async-std")] {
|
||||
pub use async_std::net::{TcpStream, TcpListener, Shutdown, UdpSocket};
|
||||
} else if #[cfg(feature="rt-tokio")] {
|
||||
pub use tokio::net::{TcpStream, TcpListener, UdpSocket};
|
||||
pub use tokio_util::compat::*;
|
||||
} else {
|
||||
compile_error!("needs executor implementation")
|
||||
}
|
||||
}
|
||||
|
||||
use socket2::{Domain, Protocol, SockAddr, Socket, Type};
|
||||
|
||||
cfg_if! {
|
||||
if #[cfg(windows)] {
|
||||
use winapi::shared::ws2def::{ SOL_SOCKET, SO_EXCLUSIVEADDRUSE};
|
||||
use winapi::um::winsock2::{SOCKET_ERROR, setsockopt};
|
||||
use winapi::ctypes::c_int;
|
||||
use std::os::windows::io::AsRawSocket;
|
||||
|
||||
fn set_exclusiveaddruse(socket: &Socket) -> io::Result<()> {
|
||||
unsafe {
|
||||
let optval:c_int = 1;
|
||||
if setsockopt(socket.as_raw_socket().try_into().unwrap(), SOL_SOCKET, SO_EXCLUSIVEADDRUSE, (&optval as *const c_int).cast(),
|
||||
std::mem::size_of::<c_int>() as c_int) == SOCKET_ERROR {
|
||||
return Err(io::Error::last_os_error());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", ret)]
|
||||
pub fn new_unbound_shared_udp_socket(domain: Domain) -> io::Result<Socket> {
|
||||
let socket = Socket::new(domain, Type::DGRAM, Some(Protocol::UDP))?;
|
||||
if domain == Domain::IPV6 {
|
||||
socket.set_only_v6(true)?;
|
||||
}
|
||||
socket.set_reuse_address(true)?;
|
||||
|
||||
cfg_if! {
|
||||
if #[cfg(unix)] {
|
||||
socket.set_reuse_port(true)?;
|
||||
}
|
||||
}
|
||||
Ok(socket)
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", ret)]
|
||||
pub fn new_bound_shared_udp_socket(local_address: SocketAddr) -> io::Result<Socket> {
|
||||
let domain = Domain::for_address(local_address);
|
||||
let socket = new_unbound_shared_udp_socket(domain)?;
|
||||
let socket2_addr = SockAddr::from(local_address);
|
||||
socket.bind(&socket2_addr)?;
|
||||
|
||||
log_net!("created bound shared udp socket on {:?}", &local_address);
|
||||
|
||||
Ok(socket)
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", ret)]
|
||||
pub fn new_bound_first_udp_socket(local_address: SocketAddr) -> io::Result<Socket> {
|
||||
let domain = Domain::for_address(local_address);
|
||||
let socket = Socket::new(domain, Type::DGRAM, Some(Protocol::UDP))?;
|
||||
if domain == Domain::IPV6 {
|
||||
socket.set_only_v6(true)?;
|
||||
}
|
||||
// Bind the socket -first- before turning on 'reuse address' this way it will
|
||||
// fail if the port is already taken
|
||||
let socket2_addr = SockAddr::from(local_address);
|
||||
|
||||
// On windows, do SO_EXCLUSIVEADDRUSE before the bind to ensure the port is fully available
|
||||
cfg_if! {
|
||||
if #[cfg(windows)] {
|
||||
set_exclusiveaddruse(&socket)?;
|
||||
}
|
||||
}
|
||||
|
||||
socket.bind(&socket2_addr)?;
|
||||
|
||||
// Set 'reuse address' so future binds to this port will succeed
|
||||
// This does not work on Windows, where reuse options can not be set after the bind
|
||||
cfg_if! {
|
||||
if #[cfg(unix)] {
|
||||
socket
|
||||
.set_reuse_address(true)?;
|
||||
socket.set_reuse_port(true)?;
|
||||
}
|
||||
}
|
||||
log_net!("created bound first udp socket on {:?}", &local_address);
|
||||
|
||||
Ok(socket)
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", ret)]
|
||||
pub fn new_unbound_tcp_socket(domain: Domain) -> io::Result<Socket> {
|
||||
let socket = Socket::new(domain, Type::STREAM, Some(Protocol::TCP))?;
|
||||
if let Err(e) = socket.set_nodelay(true) {
|
||||
log_net!(error "Couldn't set TCP nodelay: {}", e);
|
||||
}
|
||||
if domain == Domain::IPV6 {
|
||||
socket.set_only_v6(true)?;
|
||||
}
|
||||
Ok(socket)
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", ret)]
|
||||
pub fn new_unbound_shared_tcp_socket(domain: Domain) -> io::Result<Socket> {
|
||||
let socket = Socket::new(domain, Type::STREAM, Some(Protocol::TCP))?;
|
||||
// if let Err(e) = socket.set_linger(Some(core::time::Duration::from_secs(0))) {
|
||||
// log_net!(error "Couldn't set TCP linger: {}", e);
|
||||
// }
|
||||
if let Err(e) = socket.set_nodelay(true) {
|
||||
log_net!(error "Couldn't set TCP nodelay: {}", e);
|
||||
}
|
||||
if domain == Domain::IPV6 {
|
||||
socket.set_only_v6(true)?;
|
||||
}
|
||||
socket.set_reuse_address(true)?;
|
||||
cfg_if! {
|
||||
if #[cfg(unix)] {
|
||||
socket.set_reuse_port(true)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(socket)
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", ret)]
|
||||
pub fn new_bound_shared_tcp_socket(local_address: SocketAddr) -> io::Result<Socket> {
|
||||
let domain = Domain::for_address(local_address);
|
||||
let socket = new_unbound_shared_tcp_socket(domain)?;
|
||||
let socket2_addr = SockAddr::from(local_address);
|
||||
socket.bind(&socket2_addr)?;
|
||||
|
||||
log_net!("created bound shared tcp socket on {:?}", &local_address);
|
||||
|
||||
Ok(socket)
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", ret)]
|
||||
pub fn new_bound_first_tcp_socket(local_address: SocketAddr) -> io::Result<Socket> {
|
||||
let domain = Domain::for_address(local_address);
|
||||
|
||||
let socket = Socket::new(domain, Type::STREAM, Some(Protocol::TCP))?;
|
||||
// if let Err(e) = socket.set_linger(Some(core::time::Duration::from_secs(0))) {
|
||||
// log_net!(error "Couldn't set TCP linger: {}", e);
|
||||
// }
|
||||
if let Err(e) = socket.set_nodelay(true) {
|
||||
log_net!(error "Couldn't set TCP nodelay: {}", e);
|
||||
}
|
||||
if domain == Domain::IPV6 {
|
||||
socket.set_only_v6(true)?;
|
||||
}
|
||||
|
||||
// On windows, do SO_EXCLUSIVEADDRUSE before the bind to ensure the port is fully available
|
||||
cfg_if! {
|
||||
if #[cfg(windows)] {
|
||||
set_exclusiveaddruse(&socket)?;
|
||||
}
|
||||
}
|
||||
|
||||
// Bind the socket -first- before turning on 'reuse address' this way it will
|
||||
// fail if the port is already taken
|
||||
let socket2_addr = SockAddr::from(local_address);
|
||||
socket.bind(&socket2_addr)?;
|
||||
|
||||
// Set 'reuse address' so future binds to this port will succeed
|
||||
// This does not work on Windows, where reuse options can not be set after the bind
|
||||
cfg_if! {
|
||||
if #[cfg(unix)] {
|
||||
socket
|
||||
.set_reuse_address(true)?;
|
||||
socket.set_reuse_port(true)?;
|
||||
}
|
||||
}
|
||||
log_net!("created bound first tcp socket on {:?}", &local_address);
|
||||
|
||||
Ok(socket)
|
||||
}
|
||||
|
||||
// Non-blocking connect is tricky when you want to start with a prepared socket
|
||||
// Errors should not be logged as they are valid conditions for this function
|
||||
#[instrument(level = "trace", ret)]
|
||||
pub async fn nonblocking_connect(
|
||||
socket: Socket,
|
||||
addr: SocketAddr,
|
||||
timeout_ms: u32,
|
||||
) -> io::Result<TimeoutOr<TcpStream>> {
|
||||
// Set for non blocking connect
|
||||
socket.set_nonblocking(true)?;
|
||||
|
||||
// Make socket2 SockAddr
|
||||
let socket2_addr = socket2::SockAddr::from(addr);
|
||||
|
||||
// Connect to the remote address
|
||||
match socket.connect(&socket2_addr) {
|
||||
Ok(()) => Ok(()),
|
||||
#[cfg(unix)]
|
||||
Err(err) if err.raw_os_error() == Some(libc::EINPROGRESS) => Ok(()),
|
||||
Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => Ok(()),
|
||||
Err(e) => Err(e),
|
||||
}?;
|
||||
let async_stream = Async::new(std::net::TcpStream::from(socket))?;
|
||||
|
||||
// The stream becomes writable when connected
|
||||
timeout_or_try!(timeout(timeout_ms, async_stream.writable())
|
||||
.await
|
||||
.into_timeout_or()
|
||||
.into_result()?);
|
||||
|
||||
// Check low level error
|
||||
let async_stream = match async_stream.get_ref().take_error()? {
|
||||
None => Ok(async_stream),
|
||||
Some(err) => Err(err),
|
||||
}?;
|
||||
|
||||
// Convert back to inner and then return async version
|
||||
cfg_if! {
|
||||
if #[cfg(feature="rt-async-std")] {
|
||||
Ok(TimeoutOr::value(TcpStream::from(async_stream.into_inner()?)))
|
||||
} else if #[cfg(feature="rt-tokio")] {
|
||||
Ok(TimeoutOr::value(TcpStream::from_std(async_stream.into_inner()?)?))
|
||||
} else {
|
||||
compile_error!("needs executor implementation")
|
||||
}
|
||||
}
|
||||
}
|
||||
use crate::*;
|
||||
use async_io::Async;
|
||||
use std::io;
|
||||
|
||||
cfg_if! {
|
||||
if #[cfg(feature="rt-async-std")] {
|
||||
pub use async_std::net::{TcpStream, TcpListener, UdpSocket};
|
||||
} else if #[cfg(feature="rt-tokio")] {
|
||||
pub use tokio::net::{TcpStream, TcpListener, UdpSocket};
|
||||
pub use tokio_util::compat::*;
|
||||
} else {
|
||||
compile_error!("needs executor implementation")
|
||||
}
|
||||
}
|
||||
|
||||
use socket2::{Domain, Protocol, SockAddr, Socket, Type};
|
||||
|
||||
cfg_if! {
|
||||
if #[cfg(windows)] {
|
||||
use winapi::shared::ws2def::{ SOL_SOCKET, SO_EXCLUSIVEADDRUSE};
|
||||
use winapi::um::winsock2::{SOCKET_ERROR, setsockopt};
|
||||
use winapi::ctypes::c_int;
|
||||
use std::os::windows::io::AsRawSocket;
|
||||
|
||||
fn set_exclusiveaddruse(socket: &Socket) -> io::Result<()> {
|
||||
unsafe {
|
||||
let optval:c_int = 1;
|
||||
if setsockopt(socket.as_raw_socket().try_into().unwrap(), SOL_SOCKET, SO_EXCLUSIVEADDRUSE, (&optval as *const c_int).cast(),
|
||||
std::mem::size_of::<c_int>() as c_int) == SOCKET_ERROR {
|
||||
return Err(io::Error::last_os_error());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", ret)]
|
||||
pub fn new_unbound_shared_udp_socket(domain: Domain) -> io::Result<Socket> {
|
||||
let socket = Socket::new(domain, Type::DGRAM, Some(Protocol::UDP))?;
|
||||
if domain == Domain::IPV6 {
|
||||
socket.set_only_v6(true)?;
|
||||
}
|
||||
socket.set_reuse_address(true)?;
|
||||
|
||||
cfg_if! {
|
||||
if #[cfg(unix)] {
|
||||
socket.set_reuse_port(true)?;
|
||||
}
|
||||
}
|
||||
Ok(socket)
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", ret)]
|
||||
pub fn new_bound_shared_udp_socket(local_address: SocketAddr) -> io::Result<Socket> {
|
||||
let domain = Domain::for_address(local_address);
|
||||
let socket = new_unbound_shared_udp_socket(domain)?;
|
||||
let socket2_addr = SockAddr::from(local_address);
|
||||
socket.bind(&socket2_addr)?;
|
||||
|
||||
log_net!("created bound shared udp socket on {:?}", &local_address);
|
||||
|
||||
Ok(socket)
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", ret)]
|
||||
pub fn new_bound_first_udp_socket(local_address: SocketAddr) -> io::Result<Socket> {
|
||||
let domain = Domain::for_address(local_address);
|
||||
let socket = Socket::new(domain, Type::DGRAM, Some(Protocol::UDP))?;
|
||||
if domain == Domain::IPV6 {
|
||||
socket.set_only_v6(true)?;
|
||||
}
|
||||
// Bind the socket -first- before turning on 'reuse address' this way it will
|
||||
// fail if the port is already taken
|
||||
let socket2_addr = SockAddr::from(local_address);
|
||||
|
||||
// On windows, do SO_EXCLUSIVEADDRUSE before the bind to ensure the port is fully available
|
||||
cfg_if! {
|
||||
if #[cfg(windows)] {
|
||||
set_exclusiveaddruse(&socket)?;
|
||||
}
|
||||
}
|
||||
|
||||
// Bind the socket -first- without turning on SO_REUSEPORT this way it will
|
||||
// fail if the port is already taken
|
||||
cfg_if! {
|
||||
if #[cfg(unix)] {
|
||||
socket
|
||||
.set_reuse_address(true)?;
|
||||
}
|
||||
}
|
||||
|
||||
socket.bind(&socket2_addr)?;
|
||||
|
||||
// Set 'reuse address' so future binds to this port will succeed
|
||||
// This does not work on Windows, where reuse options can not be set after the bind
|
||||
cfg_if! {
|
||||
if #[cfg(unix)] {
|
||||
socket.set_reuse_port(true)?;
|
||||
}
|
||||
}
|
||||
log_net!("created bound first udp socket on {:?}", &local_address);
|
||||
|
||||
Ok(socket)
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", ret)]
|
||||
pub fn new_unbound_tcp_socket(domain: Domain) -> io::Result<Socket> {
|
||||
let socket = Socket::new(domain, Type::STREAM, Some(Protocol::TCP))?;
|
||||
if let Err(e) = socket.set_nodelay(true) {
|
||||
log_net!(error "Couldn't set TCP nodelay: {}", e);
|
||||
}
|
||||
if domain == Domain::IPV6 {
|
||||
socket.set_only_v6(true)?;
|
||||
}
|
||||
Ok(socket)
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", ret)]
|
||||
pub fn new_unbound_shared_tcp_socket(domain: Domain) -> io::Result<Socket> {
|
||||
let socket = Socket::new(domain, Type::STREAM, Some(Protocol::TCP))?;
|
||||
// if let Err(e) = socket.set_linger(Some(core::time::Duration::from_secs(0))) {
|
||||
// log_net!(error "Couldn't set TCP linger: {}", e);
|
||||
// }
|
||||
if let Err(e) = socket.set_nodelay(true) {
|
||||
log_net!(error "Couldn't set TCP nodelay: {}", e);
|
||||
}
|
||||
if domain == Domain::IPV6 {
|
||||
socket.set_only_v6(true)?;
|
||||
}
|
||||
socket.set_reuse_address(true)?;
|
||||
cfg_if! {
|
||||
if #[cfg(unix)] {
|
||||
socket.set_reuse_port(true)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(socket)
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", ret)]
|
||||
pub fn new_bound_shared_tcp_socket(local_address: SocketAddr) -> io::Result<Socket> {
|
||||
let domain = Domain::for_address(local_address);
|
||||
let socket = new_unbound_shared_tcp_socket(domain)?;
|
||||
let socket2_addr = SockAddr::from(local_address);
|
||||
socket.bind(&socket2_addr)?;
|
||||
|
||||
log_net!("created bound shared tcp socket on {:?}", &local_address);
|
||||
|
||||
Ok(socket)
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", ret)]
|
||||
pub fn new_bound_first_tcp_socket(local_address: SocketAddr) -> io::Result<Socket> {
|
||||
let domain = Domain::for_address(local_address);
|
||||
|
||||
let socket = Socket::new(domain, Type::STREAM, Some(Protocol::TCP))?;
|
||||
// if let Err(e) = socket.set_linger(Some(core::time::Duration::from_secs(0))) {
|
||||
// log_net!(error "Couldn't set TCP linger: {}", e);
|
||||
// }
|
||||
if let Err(e) = socket.set_nodelay(true) {
|
||||
log_net!(error "Couldn't set TCP nodelay: {}", e);
|
||||
}
|
||||
if domain == Domain::IPV6 {
|
||||
socket.set_only_v6(true)?;
|
||||
}
|
||||
|
||||
// On windows, do SO_EXCLUSIVEADDRUSE before the bind to ensure the port is fully available
|
||||
cfg_if! {
|
||||
if #[cfg(windows)] {
|
||||
set_exclusiveaddruse(&socket)?;
|
||||
}
|
||||
}
|
||||
|
||||
// Bind the socket -first- without turning on SO_REUSEPORT this way it will
|
||||
// fail if the port is already taken
|
||||
let socket2_addr = SockAddr::from(local_address);
|
||||
|
||||
cfg_if! {
|
||||
if #[cfg(unix)] {
|
||||
socket
|
||||
.set_reuse_address(true)?;
|
||||
}
|
||||
}
|
||||
|
||||
socket.bind(&socket2_addr)?;
|
||||
|
||||
// Set 'reuse address' so future binds to this port will succeed
|
||||
// This does not work on Windows, where reuse options can not be set after the bind
|
||||
cfg_if! {
|
||||
if #[cfg(unix)] {
|
||||
socket.set_reuse_port(true)?;
|
||||
}
|
||||
}
|
||||
log_net!("created bound first tcp socket on {:?}", &local_address);
|
||||
|
||||
Ok(socket)
|
||||
}
|
||||
|
||||
// Non-blocking connect is tricky when you want to start with a prepared socket
|
||||
// Errors should not be logged as they are valid conditions for this function
|
||||
#[instrument(level = "trace", ret)]
|
||||
pub async fn nonblocking_connect(
|
||||
socket: Socket,
|
||||
addr: SocketAddr,
|
||||
timeout_ms: u32,
|
||||
) -> io::Result<TimeoutOr<TcpStream>> {
|
||||
// Set for non blocking connect
|
||||
socket.set_nonblocking(true)?;
|
||||
|
||||
// Make socket2 SockAddr
|
||||
let socket2_addr = socket2::SockAddr::from(addr);
|
||||
|
||||
// Connect to the remote address
|
||||
match socket.connect(&socket2_addr) {
|
||||
Ok(()) => Ok(()),
|
||||
#[cfg(unix)]
|
||||
Err(err) if err.raw_os_error() == Some(libc::EINPROGRESS) => Ok(()),
|
||||
Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => Ok(()),
|
||||
Err(e) => Err(e),
|
||||
}?;
|
||||
let async_stream = Async::new(std::net::TcpStream::from(socket))?;
|
||||
|
||||
// The stream becomes writable when connected
|
||||
timeout_or_try!(timeout(timeout_ms, async_stream.writable())
|
||||
.await
|
||||
.into_timeout_or()
|
||||
.into_result()?);
|
||||
|
||||
// Check low level error
|
||||
let async_stream = match async_stream.get_ref().take_error()? {
|
||||
None => Ok(async_stream),
|
||||
Some(err) => Err(err),
|
||||
}?;
|
||||
|
||||
// Convert back to inner and then return async version
|
||||
cfg_if! {
|
||||
if #[cfg(feature="rt-async-std")] {
|
||||
Ok(TimeoutOr::value(TcpStream::from(async_stream.into_inner()?)))
|
||||
} else if #[cfg(feature="rt-tokio")] {
|
||||
Ok(TimeoutOr::value(TcpStream::from_std(async_stream.into_inner()?)?))
|
||||
} else {
|
||||
compile_error!("needs executor implementation")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -39,9 +39,8 @@ impl RawUdpProtocolHandler {
|
||||
NetworkResult::Value(None) => {
|
||||
continue;
|
||||
}
|
||||
#[cfg(feature = "network-result-extra")]
|
||||
nres => {
|
||||
log_network_result!(
|
||||
log_network_result!(debug
|
||||
"UDP::recv_message insert_frame failed: {:?} <= size={} remote_addr={}",
|
||||
nres,
|
||||
size,
|
||||
@ -49,10 +48,6 @@ impl RawUdpProtocolHandler {
|
||||
);
|
||||
continue;
|
||||
}
|
||||
#[cfg(not(feature = "network-result-extra"))]
|
||||
_ => {
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
// Check length of reassembled message (same for all protocols)
|
||||
|
@ -280,7 +280,7 @@ impl Network {
|
||||
editor_public_internet: &mut RoutingDomainEditor,
|
||||
editor_local_network: &mut RoutingDomainEditor,
|
||||
) -> EyreResult<()> {
|
||||
trace!("starting udp listeners");
|
||||
log_net!("starting udp listeners");
|
||||
let routing_table = self.routing_table();
|
||||
let (listen_address, public_address, detect_address_changes) = {
|
||||
let c = self.config.get();
|
||||
@ -312,7 +312,7 @@ impl Network {
|
||||
let local_dial_info_list = self.create_udp_inbound_sockets(ip_addrs, udp_port).await?;
|
||||
let mut static_public = false;
|
||||
|
||||
trace!("UDP: listener started on {:#?}", local_dial_info_list);
|
||||
log_net!("UDP: listener started on {:#?}", local_dial_info_list);
|
||||
|
||||
// Register local dial info
|
||||
for di in &local_dial_info_list {
|
||||
@ -383,7 +383,7 @@ impl Network {
|
||||
editor_public_internet: &mut RoutingDomainEditor,
|
||||
editor_local_network: &mut RoutingDomainEditor,
|
||||
) -> EyreResult<()> {
|
||||
trace!("starting ws listeners");
|
||||
log_net!("starting ws listeners");
|
||||
let routing_table = self.routing_table();
|
||||
let (listen_address, url, path, detect_address_changes) = {
|
||||
let c = self.config.get();
|
||||
@ -415,7 +415,7 @@ impl Network {
|
||||
Box::new(|c, t| Box::new(WebsocketProtocolHandler::new(c, t))),
|
||||
)
|
||||
.await?;
|
||||
trace!("WS: listener started on {:#?}", socket_addresses);
|
||||
log_net!("WS: listener started on {:#?}", socket_addresses);
|
||||
|
||||
let mut static_public = false;
|
||||
let mut registered_addresses: HashSet<IpAddr> = HashSet::new();
|
||||
@ -493,7 +493,7 @@ impl Network {
|
||||
editor_public_internet: &mut RoutingDomainEditor,
|
||||
editor_local_network: &mut RoutingDomainEditor,
|
||||
) -> EyreResult<()> {
|
||||
trace!("starting wss listeners");
|
||||
log_net!("starting wss listeners");
|
||||
|
||||
let (listen_address, url, detect_address_changes) = {
|
||||
let c = self.config.get();
|
||||
@ -524,7 +524,7 @@ impl Network {
|
||||
Box::new(|c, t| Box::new(WebsocketProtocolHandler::new(c, t))),
|
||||
)
|
||||
.await?;
|
||||
trace!("WSS: listener started on {:#?}", socket_addresses);
|
||||
log_net!("WSS: listener started on {:#?}", socket_addresses);
|
||||
|
||||
// NOTE: No interface dial info for WSS, as there is no way to connect to a local dialinfo via TLS
|
||||
// If the hostname is specified, it is the public dialinfo via the URL. If no hostname
|
||||
@ -586,7 +586,7 @@ impl Network {
|
||||
editor_public_internet: &mut RoutingDomainEditor,
|
||||
editor_local_network: &mut RoutingDomainEditor,
|
||||
) -> EyreResult<()> {
|
||||
trace!("starting tcp listeners");
|
||||
log_net!("starting tcp listeners");
|
||||
|
||||
let routing_table = self.routing_table();
|
||||
let (listen_address, public_address, detect_address_changes) = {
|
||||
@ -618,7 +618,7 @@ impl Network {
|
||||
Box::new(|c, _| Box::new(RawTcpProtocolHandler::new(c))),
|
||||
)
|
||||
.await?;
|
||||
trace!("TCP: listener started on {:#?}", socket_addresses);
|
||||
log_net!("TCP: listener started on {:#?}", socket_addresses);
|
||||
|
||||
let mut static_public = false;
|
||||
let mut registered_addresses: HashSet<IpAddr> = HashSet::new();
|
||||
|
@ -185,9 +185,9 @@ impl ReceiptManager {
|
||||
}
|
||||
|
||||
pub async fn startup(&self) -> EyreResult<()> {
|
||||
trace!("startup receipt manager");
|
||||
// Retrieve config
|
||||
log_net!(debug "startup receipt manager");
|
||||
|
||||
// Retrieve config
|
||||
{
|
||||
// let config = self.core().config();
|
||||
// let c = config.get();
|
||||
@ -296,7 +296,7 @@ impl ReceiptManager {
|
||||
}
|
||||
|
||||
pub async fn shutdown(&self) {
|
||||
debug!("starting receipt manager shutdown");
|
||||
log_net!(debug "starting receipt manager shutdown");
|
||||
let network_manager = self.network_manager();
|
||||
|
||||
// Stop all tasks
|
||||
@ -308,13 +308,13 @@ impl ReceiptManager {
|
||||
};
|
||||
|
||||
// Wait for everything to stop
|
||||
debug!("waiting for timeout task to stop");
|
||||
log_net!(debug "waiting for timeout task to stop");
|
||||
if timeout_task.join().await.is_err() {
|
||||
panic!("joining timeout task failed");
|
||||
}
|
||||
|
||||
*self.inner.lock() = Self::new_inner(network_manager);
|
||||
debug!("finished receipt manager shutdown");
|
||||
log_net!(debug "finished receipt manager shutdown");
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
|
@ -11,47 +11,56 @@ impl NetworkManager {
|
||||
///
|
||||
/// Sending to a node requires determining a NetworkClass compatible contact method
|
||||
/// between the source and destination node
|
||||
pub(crate) fn send_data(
|
||||
pub(crate) async fn send_data(
|
||||
&self,
|
||||
destination_node_ref: NodeRef,
|
||||
data: Vec<u8>,
|
||||
) -> EyreResult<NetworkResult<SendDataMethod>> {
|
||||
// First try to send data to the last flow we've seen this peer on
|
||||
let data = if let Some(flow) = destination_node_ref.last_flow() {
|
||||
match self
|
||||
.net()
|
||||
.send_data_to_existing_flow(flow, data)
|
||||
.await?
|
||||
{
|
||||
SendDataToExistingFlowResult::Sent(unique_flow) => {
|
||||
// Update timestamp for this last flow since we just sent to it
|
||||
destination_node_ref
|
||||
.set_last_flow(unique_flow.flow, get_aligned_timestamp());
|
||||
|
||||
return Ok(NetworkResult::value(SendDataMethod {
|
||||
opt_relayed_contact_method: None,
|
||||
contact_method: NodeContactMethod::Existing,
|
||||
unique_flow,
|
||||
}));
|
||||
}
|
||||
SendDataToExistingFlowResult::NotSent(data) => {
|
||||
// Couldn't send data to existing flow
|
||||
// so pass the data back out
|
||||
data
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// No last connection
|
||||
data
|
||||
};
|
||||
|
||||
// No existing connection was found or usable, so we proceed to see how to make a new one
|
||||
|
||||
// Get the best way to contact this node
|
||||
let possibly_relayed_contact_method = self.get_node_contact_method(destination_node_ref.clone())?;
|
||||
|
||||
self.try_possibly_relayed_contact_method(possibly_relayed_contact_method, destination_node_ref, data).await
|
||||
}
|
||||
|
||||
pub(crate) fn try_possibly_relayed_contact_method(&self,
|
||||
possibly_relayed_contact_method: NodeContactMethod,
|
||||
destination_node_ref: NodeRef,
|
||||
data: Vec<u8>,
|
||||
) -> SendPinBoxFuture<EyreResult<NetworkResult<SendDataMethod>>> {
|
||||
let this = self.clone();
|
||||
Box::pin(
|
||||
async move {
|
||||
// First try to send data to the last flow we've seen this peer on
|
||||
let data = if let Some(flow) = destination_node_ref.last_flow() {
|
||||
match this
|
||||
.net()
|
||||
.send_data_to_existing_flow(flow, data)
|
||||
.await?
|
||||
{
|
||||
SendDataToExistingFlowResult::Sent(unique_flow) => {
|
||||
// Update timestamp for this last flow since we just sent to it
|
||||
destination_node_ref
|
||||
.set_last_flow(unique_flow.flow, get_aligned_timestamp());
|
||||
|
||||
return Ok(NetworkResult::value(SendDataMethod {
|
||||
opt_relayed_contact_method: None,
|
||||
contact_method: NodeContactMethod::Existing,
|
||||
unique_flow,
|
||||
}));
|
||||
}
|
||||
SendDataToExistingFlowResult::NotSent(data) => {
|
||||
// Couldn't send data to existing flow
|
||||
// so pass the data back out
|
||||
data
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// No last connection
|
||||
data
|
||||
};
|
||||
|
||||
// No existing connection was found or usable, so we proceed to see how to make a new one
|
||||
|
||||
// Get the best way to contact this node
|
||||
let possibly_relayed_contact_method = this.get_node_contact_method(destination_node_ref.clone())?;
|
||||
|
||||
// If we need to relay, do it
|
||||
let (contact_method, target_node_ref, opt_relayed_contact_method) = match possibly_relayed_contact_method.clone() {
|
||||
@ -64,7 +73,7 @@ impl NetworkManager {
|
||||
};
|
||||
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
debug!(
|
||||
log_net!(debug
|
||||
"ContactMethod: {:?} for {:?}",
|
||||
contact_method, destination_node_ref
|
||||
);
|
||||
@ -96,16 +105,28 @@ impl NetworkManager {
|
||||
)
|
||||
}
|
||||
NodeContactMethod::SignalReverse(relay_nr, target_node_ref) => {
|
||||
network_result_try!(
|
||||
this.send_data_ncm_signal_reverse(relay_nr, target_node_ref, data)
|
||||
.await?
|
||||
)
|
||||
let nres =
|
||||
this.send_data_ncm_signal_reverse(relay_nr.clone(), target_node_ref.clone(), data.clone())
|
||||
.await?;
|
||||
if matches!(nres, NetworkResult::Timeout) {
|
||||
// Failed to holepunch, fallback to inbound relay
|
||||
log_network_result!(debug "Reverse connection failed to {}, falling back to inbound relay via {}", target_node_ref, relay_nr);
|
||||
network_result_try!(this.try_possibly_relayed_contact_method(NodeContactMethod::InboundRelay(relay_nr), destination_node_ref, data).await?)
|
||||
} else {
|
||||
network_result_try!(nres)
|
||||
}
|
||||
}
|
||||
NodeContactMethod::SignalHolePunch(relay_nr, target_node_ref) => {
|
||||
network_result_try!(
|
||||
this.send_data_ncm_signal_hole_punch(relay_nr, target_node_ref, data)
|
||||
.await?
|
||||
)
|
||||
let nres =
|
||||
this.send_data_ncm_signal_hole_punch(relay_nr.clone(), target_node_ref.clone(), data.clone())
|
||||
.await?;
|
||||
if matches!(nres, NetworkResult::Timeout) {
|
||||
// Failed to holepunch, fallback to inbound relay
|
||||
log_network_result!(debug "Hole punch failed to {}, falling back to inbound relay via {}", target_node_ref, relay_nr);
|
||||
network_result_try!(this.try_possibly_relayed_contact_method(NodeContactMethod::InboundRelay(relay_nr), destination_node_ref, data).await?)
|
||||
} else {
|
||||
network_result_try!(nres)
|
||||
}
|
||||
}
|
||||
NodeContactMethod::Existing => {
|
||||
network_result_try!(
|
||||
@ -304,7 +325,7 @@ impl NetworkManager {
|
||||
// First try to send data to the last socket we've seen this peer on
|
||||
let data = if let Some(flow) = node_ref.last_flow() {
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
debug!(
|
||||
log_net!(debug
|
||||
"ExistingConnection: {:?} for {:?}",
|
||||
flow, node_ref
|
||||
);
|
||||
|
@ -85,12 +85,12 @@ impl NetworkManager {
|
||||
}
|
||||
|
||||
pub(crate) async fn cancel_tasks(&self) {
|
||||
debug!("stopping rolling transfers task");
|
||||
log_net!(debug "stopping rolling transfers task");
|
||||
if let Err(e) = self.unlocked_inner.rolling_transfers_task.stop().await {
|
||||
warn!("rolling_transfers_task not stopped: {}", e);
|
||||
}
|
||||
|
||||
debug!("stopping routing table tasks");
|
||||
log_net!(debug "stopping routing table tasks");
|
||||
let routing_table = self.routing_table();
|
||||
routing_table.cancel_tasks().await;
|
||||
|
||||
|
@ -46,8 +46,7 @@ impl NetworkManager {
|
||||
flow: Flow, // the flow used
|
||||
reporting_peer: NodeRef, // the peer's noderef reporting the socket address
|
||||
) {
|
||||
#[cfg(feature = "network-result-extra")]
|
||||
debug!("report_global_socket_address\nsocket_address: {:#?}\nflow: {:#?}\nreporting_peer: {:#?}", socket_address, flow, reporting_peer);
|
||||
log_network_result!("report_global_socket_address\nsocket_address: {:#?}\nflow: {:#?}\nreporting_peer: {:#?}", socket_address, flow, reporting_peer);
|
||||
|
||||
// Ignore these reports if we are currently detecting public dial info
|
||||
let net = self.net();
|
||||
@ -172,8 +171,7 @@ impl NetworkManager {
|
||||
.unwrap_or(false)
|
||||
{
|
||||
// Record the origin of the inconsistency
|
||||
#[cfg(feature = "network-result-extra")]
|
||||
debug!("inconsistency added from {:?}: reported {:?} with current_addresses = {:?}", reporting_ip_block, a, current_addresses);
|
||||
log_network_result!(debug "inconsistency added from {:?}: reported {:?} with current_addresses = {:?}", reporting_ip_block, a, current_addresses);
|
||||
|
||||
inconsistencies.push(*reporting_ip_block);
|
||||
}
|
||||
@ -214,7 +212,7 @@ impl NetworkManager {
|
||||
|
||||
// // debug code
|
||||
// if inconsistent {
|
||||
// trace!("public_address_check_cache: {:#?}\ncurrent_addresses: {:#?}\ninconsistencies: {}", inner
|
||||
// log_net!("public_address_check_cache: {:#?}\ncurrent_addresses: {:#?}\ninconsistencies: {}", inner
|
||||
// .public_address_check_cache, current_addresses, inconsistencies);
|
||||
// }
|
||||
|
||||
|
@ -50,6 +50,7 @@ impl Address {
|
||||
Address::IPV4(v4) => {
|
||||
ipv4addr_is_private(v4)
|
||||
|| ipv4addr_is_link_local(v4)
|
||||
|| ipv4addr_is_shared(v4)
|
||||
|| ipv4addr_is_ietf_protocol_assignment(v4)
|
||||
}
|
||||
Address::IPV6(v6) => {
|
||||
|
@ -334,6 +334,7 @@ impl Network {
|
||||
/////////////////////////////////////////////////////////////////
|
||||
|
||||
pub async fn startup(&self) -> EyreResult<()> {
|
||||
log_net!(debug "starting network");
|
||||
// get protocol config
|
||||
let protocol_config = {
|
||||
let c = self.config.get();
|
||||
@ -396,6 +397,7 @@ impl Network {
|
||||
editor_public_internet.commit(true).await;
|
||||
|
||||
self.inner.lock().network_started = true;
|
||||
log_net!(debug "network started");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -412,7 +414,7 @@ impl Network {
|
||||
}
|
||||
|
||||
pub async fn shutdown(&self) {
|
||||
trace!("stopping network");
|
||||
log_net!(debug "stopping network");
|
||||
|
||||
// Reset state
|
||||
let routing_table = self.routing_table();
|
||||
@ -429,7 +431,7 @@ impl Network {
|
||||
// Cancels all async background tasks by dropping join handles
|
||||
*self.inner.lock() = Self::new_inner();
|
||||
|
||||
trace!("network stopped");
|
||||
log_net!(debug "network stopped");
|
||||
}
|
||||
|
||||
pub fn get_preferred_local_address(&self, _dial_info: &DialInfo) -> Option<SocketAddr> {
|
||||
|
@ -932,10 +932,10 @@ impl Drop for BucketEntry {
|
||||
if self.ref_count.load(Ordering::Acquire) != 0 {
|
||||
#[cfg(feature = "tracking")]
|
||||
{
|
||||
println!("NodeRef Tracking");
|
||||
info!("NodeRef Tracking");
|
||||
for (id, bt) in &mut self.node_ref_tracks {
|
||||
bt.resolve();
|
||||
println!("Id: {}\n----------------\n{:#?}", id, bt);
|
||||
info!("Id: {}\n----------------\n{:#?}", id, bt);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -247,7 +247,7 @@ impl RoutingTable {
|
||||
|
||||
/// Called to initialize the routing table after it is created
|
||||
pub async fn init(&self) -> EyreResult<()> {
|
||||
debug!("starting routing table init");
|
||||
log_rtab!(debug "starting routing table init");
|
||||
|
||||
// Set up routing buckets
|
||||
{
|
||||
@ -256,7 +256,7 @@ impl RoutingTable {
|
||||
}
|
||||
|
||||
// Load bucket entries from table db if possible
|
||||
debug!("loading routing table entries");
|
||||
log_rtab!(debug "loading routing table entries");
|
||||
if let Err(e) = self.load_buckets().await {
|
||||
log_rtab!(debug "Error loading buckets from storage: {:#?}. Resetting.", e);
|
||||
let mut inner = self.inner.write();
|
||||
@ -264,7 +264,7 @@ impl RoutingTable {
|
||||
}
|
||||
|
||||
// Set up routespecstore
|
||||
debug!("starting route spec store init");
|
||||
log_rtab!(debug "starting route spec store init");
|
||||
let route_spec_store = match RouteSpecStore::load(self.clone()).await {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
@ -272,7 +272,7 @@ impl RoutingTable {
|
||||
RouteSpecStore::new(self.clone())
|
||||
}
|
||||
};
|
||||
debug!("finished route spec store init");
|
||||
log_rtab!(debug "finished route spec store init");
|
||||
|
||||
{
|
||||
let mut inner = self.inner.write();
|
||||
@ -285,13 +285,13 @@ impl RoutingTable {
|
||||
.set_routing_table(Some(self.clone()))
|
||||
.await;
|
||||
|
||||
debug!("finished routing table init");
|
||||
log_rtab!(debug "finished routing table init");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Called to shut down the routing table
|
||||
pub async fn terminate(&self) {
|
||||
debug!("starting routing table terminate");
|
||||
log_rtab!(debug "starting routing table terminate");
|
||||
|
||||
// Stop storage manager from using us
|
||||
self.network_manager
|
||||
@ -303,12 +303,12 @@ impl RoutingTable {
|
||||
self.cancel_tasks().await;
|
||||
|
||||
// Load bucket entries from table db if possible
|
||||
debug!("saving routing table entries");
|
||||
log_rtab!(debug "saving routing table entries");
|
||||
if let Err(e) = self.save_buckets().await {
|
||||
error!("failed to save routing table entries: {}", e);
|
||||
}
|
||||
|
||||
debug!("saving route spec store");
|
||||
log_rtab!(debug "saving route spec store");
|
||||
let rss = {
|
||||
let mut inner = self.inner.write();
|
||||
inner.route_spec_store.take()
|
||||
@ -318,12 +318,12 @@ impl RoutingTable {
|
||||
error!("couldn't save route spec store: {}", e);
|
||||
}
|
||||
}
|
||||
debug!("shutting down routing table");
|
||||
log_rtab!(debug "shutting down routing table");
|
||||
|
||||
let mut inner = self.inner.write();
|
||||
*inner = RoutingTableInner::new(self.unlocked_inner.clone());
|
||||
|
||||
debug!("finished routing table terminate");
|
||||
log_rtab!(debug "finished routing table terminate");
|
||||
}
|
||||
|
||||
/// Serialize the routing table.
|
||||
@ -390,6 +390,15 @@ impl RoutingTable {
|
||||
for b in &c.network.routing_table.bootstrap {
|
||||
cache_validity_key.append(&mut b.as_bytes().to_vec());
|
||||
}
|
||||
cache_validity_key.append(
|
||||
&mut c
|
||||
.network
|
||||
.network_key_password
|
||||
.clone()
|
||||
.unwrap_or_default()
|
||||
.as_bytes()
|
||||
.to_vec(),
|
||||
);
|
||||
};
|
||||
|
||||
// Deserialize bucket map and all entries from the table store
|
||||
@ -870,7 +879,15 @@ impl RoutingTable {
|
||||
// does it have some dial info we need?
|
||||
let filter = |n: &NodeInfo| {
|
||||
let mut keep = false;
|
||||
// Bootstraps must have -only- inbound capable network class
|
||||
if !matches!(n.network_class(), NetworkClass::InboundCapable) {
|
||||
return false;
|
||||
}
|
||||
for did in n.dial_info_detail_list() {
|
||||
// Bootstraps must have -only- direct dial info
|
||||
if !matches!(did.class, DialInfoClass::Direct) {
|
||||
return false;
|
||||
}
|
||||
if matches!(did.dial_info.address_type(), AddressType::IPV4) {
|
||||
for (n, protocol_type) in protocol_types.iter().enumerate() {
|
||||
if nodes_proto_v4[n] < max_per_type
|
||||
|
@ -849,8 +849,7 @@ impl RouteSpecStore {
|
||||
// Get all valid routes, allow routes that need testing
|
||||
// but definitely prefer routes that have been recently tested
|
||||
for (id, rssd) in inner.content.iter_details() {
|
||||
if rssd.get_stability() >= stability
|
||||
&& rssd.is_sequencing_match(sequencing)
|
||||
if rssd.is_sequencing_match(sequencing)
|
||||
&& rssd.hop_count() >= min_hop_count
|
||||
&& rssd.hop_count() <= max_hop_count
|
||||
&& rssd.get_directions().is_superset(directions)
|
||||
@ -864,6 +863,7 @@ impl RouteSpecStore {
|
||||
|
||||
// Sort the routes by preference
|
||||
routes.sort_by(|a, b| {
|
||||
// Prefer routes that don't need testing
|
||||
let a_needs_testing = a.1.get_stats().needs_testing(cur_ts);
|
||||
let b_needs_testing = b.1.get_stats().needs_testing(cur_ts);
|
||||
if !a_needs_testing && b_needs_testing {
|
||||
@ -872,6 +872,18 @@ impl RouteSpecStore {
|
||||
if !b_needs_testing && a_needs_testing {
|
||||
return cmp::Ordering::Greater;
|
||||
}
|
||||
|
||||
// Prefer routes that meet the stability selection
|
||||
let a_meets_stability = a.1.get_stability() >= stability;
|
||||
let b_meets_stability = b.1.get_stability() >= stability;
|
||||
if a_meets_stability && !b_meets_stability {
|
||||
return cmp::Ordering::Less;
|
||||
}
|
||||
if b_meets_stability && !a_meets_stability {
|
||||
return cmp::Ordering::Greater;
|
||||
}
|
||||
|
||||
// Prefer faster routes
|
||||
let a_latency = a.1.get_stats().latency_stats().average;
|
||||
let b_latency = b.1.get_stats().latency_stats().average;
|
||||
|
||||
@ -903,10 +915,14 @@ impl RouteSpecStore {
|
||||
F: FnMut(&RouteId, &RemotePrivateRouteInfo) -> Option<R>,
|
||||
{
|
||||
let inner = self.inner.lock();
|
||||
let mut out = Vec::with_capacity(inner.cache.get_remote_private_route_count());
|
||||
for info in inner.cache.iter_remote_private_routes() {
|
||||
if let Some(x) = filter(info.0, info.1) {
|
||||
out.push(x);
|
||||
let cur_ts = get_aligned_timestamp();
|
||||
let remote_route_ids = inner.cache.get_remote_private_route_ids(cur_ts);
|
||||
let mut out = Vec::with_capacity(remote_route_ids.len());
|
||||
for id in remote_route_ids {
|
||||
if let Some(rpri) = inner.cache.peek_remote_private_route(cur_ts, &id) {
|
||||
if let Some(x) = filter(&id, rpri) {
|
||||
out.push(x);
|
||||
}
|
||||
}
|
||||
}
|
||||
out
|
||||
@ -916,7 +932,7 @@ impl RouteSpecStore {
|
||||
pub fn debug_route(&self, id: &RouteId) -> Option<String> {
|
||||
let inner = &mut *self.inner.lock();
|
||||
let cur_ts = get_aligned_timestamp();
|
||||
if let Some(rpri) = inner.cache.peek_remote_private_route_mut(cur_ts, id) {
|
||||
if let Some(rpri) = inner.cache.peek_remote_private_route(cur_ts, id) {
|
||||
return Some(format!("{:#?}", rpri));
|
||||
}
|
||||
if let Some(rssd) = inner.content.get_detail(id) {
|
||||
@ -1001,7 +1017,7 @@ impl RouteSpecStore {
|
||||
first_hop.set_sequencing(sequencing);
|
||||
|
||||
// Return the compiled safety route
|
||||
//println!("compile_safety_route profile (stub): {} us", (get_timestamp() - profile_start_ts));
|
||||
//info!("compile_safety_route profile (stub): {} us", (get_timestamp() - profile_start_ts));
|
||||
return Ok(CompiledRoute {
|
||||
safety_route: SafetyRoute::new_stub(
|
||||
routing_table.node_id(crypto_kind),
|
||||
@ -1073,7 +1089,7 @@ impl RouteSpecStore {
|
||||
first_hop,
|
||||
};
|
||||
// Return compiled route
|
||||
//println!("compile_safety_route profile (cached): {} us", (get_timestamp() - profile_start_ts));
|
||||
//info!("compile_safety_route profile (cached): {} us", (get_timestamp() - profile_start_ts));
|
||||
return Ok(compiled_route);
|
||||
}
|
||||
}
|
||||
@ -1192,7 +1208,7 @@ impl RouteSpecStore {
|
||||
};
|
||||
|
||||
// Return compiled route
|
||||
//println!("compile_safety_route profile (uncached): {} us", (get_timestamp() - profile_start_ts));
|
||||
//info!("compile_safety_route profile (uncached): {} us", (get_timestamp() - profile_start_ts));
|
||||
Ok(compiled_route)
|
||||
}
|
||||
|
||||
@ -1575,7 +1591,7 @@ impl RouteSpecStore {
|
||||
/// Check to see if this remote (not ours) private route has seen our current node info yet
|
||||
/// This happens when you communicate with a private route without a safety route
|
||||
pub fn has_remote_private_route_seen_our_node_info(&self, key: &PublicKey) -> bool {
|
||||
let inner = &mut *self.inner.lock();
|
||||
let inner = &*self.inner.lock();
|
||||
|
||||
// Check for local route. If this is not a remote private route,
|
||||
// we may be running a test and using our own local route as the destination private route.
|
||||
@ -1586,7 +1602,7 @@ impl RouteSpecStore {
|
||||
|
||||
if let Some(rrid) = inner.cache.get_remote_private_route_id_by_key(key) {
|
||||
let cur_ts = get_aligned_timestamp();
|
||||
if let Some(rpri) = inner.cache.peek_remote_private_route_mut(cur_ts, &rrid) {
|
||||
if let Some(rpri) = inner.cache.peek_remote_private_route(cur_ts, &rrid) {
|
||||
let our_node_info_ts = self
|
||||
.unlocked_inner
|
||||
.routing_table
|
||||
@ -1634,7 +1650,7 @@ impl RouteSpecStore {
|
||||
}
|
||||
|
||||
/// Get the route statistics for any route we know about, local or remote
|
||||
pub fn with_route_stats<F, R>(&self, cur_ts: Timestamp, key: &PublicKey, f: F) -> Option<R>
|
||||
pub fn with_route_stats_mut<F, R>(&self, cur_ts: Timestamp, key: &PublicKey, f: F) -> Option<R>
|
||||
where
|
||||
F: FnOnce(&mut RouteStats) -> R,
|
||||
{
|
||||
|
@ -45,7 +45,7 @@ impl RemotePrivateRouteInfo {
|
||||
&mut self.stats
|
||||
}
|
||||
|
||||
pub fn has_seen_our_node_info_ts(&mut self, our_node_info_ts: Timestamp) -> bool {
|
||||
pub fn has_seen_our_node_info_ts(&self, our_node_info_ts: Timestamp) -> bool {
|
||||
self.last_seen_our_node_info_ts == our_node_info_ts
|
||||
}
|
||||
pub fn set_last_seen_our_node_info_ts(&mut self, last_seen_our_node_info_ts: Timestamp) {
|
||||
|
@ -173,16 +173,18 @@ impl RouteSpecStoreCache {
|
||||
id
|
||||
}
|
||||
|
||||
/// get count of remote private routes in cache
|
||||
pub fn get_remote_private_route_count(&self) -> usize {
|
||||
self.remote_private_route_set_cache.len()
|
||||
}
|
||||
|
||||
/// iterate all of the remote private routes we have in the cache
|
||||
pub fn iter_remote_private_routes(
|
||||
&self,
|
||||
) -> hashlink::linked_hash_map::Iter<RouteId, RemotePrivateRouteInfo> {
|
||||
self.remote_private_route_set_cache.iter()
|
||||
pub fn get_remote_private_route_ids(&self, cur_ts: Timestamp) -> Vec<RouteId> {
|
||||
self.remote_private_route_set_cache
|
||||
.iter()
|
||||
.filter_map(|(id, rpri)| {
|
||||
if !rpri.did_expire(cur_ts) {
|
||||
Some(*id)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// remote private route cache accessor
|
||||
@ -217,6 +219,21 @@ impl RouteSpecStoreCache {
|
||||
None
|
||||
}
|
||||
|
||||
/// remote private route cache accessor without lru action
|
||||
/// will not LRU entries but may expire entries and not return them if they are stale
|
||||
pub fn peek_remote_private_route(
|
||||
&self,
|
||||
cur_ts: Timestamp,
|
||||
id: &RouteId,
|
||||
) -> Option<&RemotePrivateRouteInfo> {
|
||||
if let Some(rpri) = self.remote_private_route_set_cache.peek(id) {
|
||||
if !rpri.did_expire(cur_ts) {
|
||||
return Some(rpri);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// mutable remote private route cache accessor without lru action
|
||||
/// will not LRU entries but may expire entries and not return them if they are stale
|
||||
pub fn peek_remote_private_route_mut(
|
||||
|
@ -888,11 +888,16 @@ impl RoutingTableInner {
|
||||
}
|
||||
}
|
||||
|
||||
// Public internet routing domain is ready for app use,
|
||||
// when we have proper dialinfo/networkclass
|
||||
let public_internet_ready = !matches!(
|
||||
self.get_network_class(RoutingDomain::PublicInternet)
|
||||
.unwrap_or_default(),
|
||||
NetworkClass::Invalid
|
||||
);
|
||||
|
||||
// Local internet routing domain is ready for app use
|
||||
// when we have proper dialinfo/networkclass
|
||||
let local_network_ready = !matches!(
|
||||
self.get_network_class(RoutingDomain::LocalNetwork)
|
||||
.unwrap_or_default(),
|
||||
|
@ -4,6 +4,7 @@ use futures_util::stream::{FuturesUnordered, StreamExt};
|
||||
use stop_token::future::FutureExt as StopFutureExt;
|
||||
|
||||
pub const BOOTSTRAP_TXT_VERSION_0: u8 = 0;
|
||||
pub const MIN_BOOTSTRAP_PEERS: usize = 4;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct BootstrapRecord {
|
||||
@ -285,8 +286,7 @@ impl RoutingTable {
|
||||
{
|
||||
Ok(NodeContactMethod::Direct(v)) => v,
|
||||
Ok(v) => {
|
||||
log_rtab!(warn "invalid contact method for bootstrap, restarting network: {:?}", v);
|
||||
routing_table.network_manager().restart_network();
|
||||
log_rtab!(warn "invalid contact method for bootstrap, ignoring peer: {:?}", v);
|
||||
return;
|
||||
}
|
||||
Err(e) => {
|
||||
@ -345,7 +345,7 @@ impl RoutingTable {
|
||||
// Do we need to bootstrap this crypto kind?
|
||||
let eckey = (RoutingDomain::PublicInternet, crypto_kind);
|
||||
let cnt = entry_count.get(&eckey).copied().unwrap_or_default();
|
||||
if cnt == 0 {
|
||||
if cnt < MIN_BOOTSTRAP_PEERS {
|
||||
crypto_kinds.push(crypto_kind);
|
||||
}
|
||||
}
|
||||
|
@ -194,31 +194,31 @@ impl RoutingTable {
|
||||
|
||||
pub(crate) async fn cancel_tasks(&self) {
|
||||
// Cancel all tasks being ticked
|
||||
debug!("stopping rolling transfers task");
|
||||
log_rtab!(debug "stopping rolling transfers task");
|
||||
if let Err(e) = self.unlocked_inner.rolling_transfers_task.stop().await {
|
||||
error!("rolling_transfers_task not stopped: {}", e);
|
||||
}
|
||||
debug!("stopping kick buckets task");
|
||||
log_rtab!(debug "stopping kick buckets task");
|
||||
if let Err(e) = self.unlocked_inner.kick_buckets_task.stop().await {
|
||||
error!("kick_buckets_task not stopped: {}", e);
|
||||
}
|
||||
debug!("stopping bootstrap task");
|
||||
log_rtab!(debug "stopping bootstrap task");
|
||||
if let Err(e) = self.unlocked_inner.bootstrap_task.stop().await {
|
||||
error!("bootstrap_task not stopped: {}", e);
|
||||
}
|
||||
debug!("stopping peer minimum refresh task");
|
||||
log_rtab!(debug "stopping peer minimum refresh task");
|
||||
if let Err(e) = self.unlocked_inner.peer_minimum_refresh_task.stop().await {
|
||||
error!("peer_minimum_refresh_task not stopped: {}", e);
|
||||
}
|
||||
debug!("stopping ping_validator task");
|
||||
log_rtab!(debug "stopping ping_validator task");
|
||||
if let Err(e) = self.unlocked_inner.ping_validator_task.stop().await {
|
||||
error!("ping_validator_task not stopped: {}", e);
|
||||
}
|
||||
debug!("stopping relay management task");
|
||||
log_rtab!(debug "stopping relay management task");
|
||||
if let Err(e) = self.unlocked_inner.relay_management_task.stop().await {
|
||||
warn!("relay_management_task not stopped: {}", e);
|
||||
}
|
||||
debug!("stopping private route management task");
|
||||
log_rtab!(debug "stopping private route management task");
|
||||
if let Err(e) = self
|
||||
.unlocked_inner
|
||||
.private_route_management_task
|
||||
|
@ -105,9 +105,6 @@ impl RoutingTable {
|
||||
for relay_nr_filtered in relay_noderefs {
|
||||
let rpc = rpc.clone();
|
||||
|
||||
#[cfg(feature = "network-result-extra")]
|
||||
log_rtab!(debug "--> Keepalive ping to {:?}", relay_nr_filtered);
|
||||
#[cfg(not(feature = "network-result-extra"))]
|
||||
log_rtab!("--> Keepalive ping to {:?}", relay_nr_filtered);
|
||||
|
||||
futurequeue.push_back(
|
||||
|
@ -23,13 +23,13 @@ impl RoutingTable {
|
||||
let state = relay_node.state(cur_ts);
|
||||
// Relay node is dead or no longer needed
|
||||
if matches!(state, BucketEntryState::Dead) {
|
||||
debug!("Relay node died, dropping relay {}", relay_node);
|
||||
log_rtab!(debug "Relay node died, dropping relay {}", relay_node);
|
||||
editor.clear_relay_node();
|
||||
false
|
||||
}
|
||||
// Relay node no longer can relay
|
||||
else if relay_node.operate(|_rti, e| !relay_node_filter(e)) {
|
||||
debug!(
|
||||
log_rtab!(debug
|
||||
"Relay node can no longer relay, dropping relay {}",
|
||||
relay_node
|
||||
);
|
||||
@ -38,7 +38,7 @@ impl RoutingTable {
|
||||
}
|
||||
// Relay node is no longer required
|
||||
else if !own_node_info.requires_relay() {
|
||||
debug!(
|
||||
log_rtab!(debug
|
||||
"Relay node no longer required, dropping relay {}",
|
||||
relay_node
|
||||
);
|
||||
@ -47,7 +47,7 @@ impl RoutingTable {
|
||||
}
|
||||
// Should not have relay for invalid network class
|
||||
else if !self.has_valid_network_class(RoutingDomain::PublicInternet) {
|
||||
debug!(
|
||||
log_rtab!(debug
|
||||
"Invalid network class does not get a relay, dropping relay {}",
|
||||
relay_node
|
||||
);
|
||||
@ -75,7 +75,7 @@ impl RoutingTable {
|
||||
false,
|
||||
) {
|
||||
Ok(nr) => {
|
||||
debug!("Outbound relay node selected: {}", nr);
|
||||
log_rtab!(debug "Outbound relay node selected: {}", nr);
|
||||
editor.set_relay_node(nr);
|
||||
got_outbound_relay = true;
|
||||
}
|
||||
@ -84,13 +84,13 @@ impl RoutingTable {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
debug!("Outbound relay desired but not available");
|
||||
log_rtab!(debug "Outbound relay desired but not available");
|
||||
}
|
||||
}
|
||||
if !got_outbound_relay {
|
||||
// Find a node in our routing table that is an acceptable inbound relay
|
||||
if let Some(nr) = self.find_inbound_relay(RoutingDomain::PublicInternet, cur_ts) {
|
||||
debug!("Inbound relay node selected: {}", nr);
|
||||
log_rtab!(debug "Inbound relay node selected: {}", nr);
|
||||
editor.set_relay_node(nr);
|
||||
}
|
||||
}
|
||||
|
@ -27,6 +27,7 @@ mod tunnel;
|
||||
mod typed_key;
|
||||
mod typed_signature;
|
||||
|
||||
pub(crate) use operations::MAX_INSPECT_VALUE_A_SEQS_LEN;
|
||||
pub(in crate::rpc_processor) use operations::*;
|
||||
|
||||
pub(crate) use address::*;
|
||||
@ -60,9 +61,11 @@ pub use typed_signature::*;
|
||||
use super::*;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
#[allow(clippy::enum_variant_names)]
|
||||
pub(in crate::rpc_processor) enum QuestionContext {
|
||||
GetValue(ValidateGetValueContext),
|
||||
SetValue(ValidateSetValueContext),
|
||||
InspectValue(ValidateInspectValueContext),
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
|
@ -12,6 +12,7 @@ impl RPCAnswer {
|
||||
pub fn validate(&mut self, validate_context: &RPCValidateContext) -> Result<(), RPCError> {
|
||||
self.detail.validate(validate_context)
|
||||
}
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
pub fn desc(&self) -> &'static str {
|
||||
self.detail.desc()
|
||||
}
|
||||
@ -37,6 +38,7 @@ pub(in crate::rpc_processor) enum RPCAnswerDetail {
|
||||
GetValueA(Box<RPCOperationGetValueA>),
|
||||
SetValueA(Box<RPCOperationSetValueA>),
|
||||
WatchValueA(Box<RPCOperationWatchValueA>),
|
||||
InspectValueA(Box<RPCOperationInspectValueA>),
|
||||
#[cfg(feature = "unstable-blockstore")]
|
||||
SupplyBlockA(Box<RPCOperationSupplyBlockA>),
|
||||
#[cfg(feature = "unstable-blockstore")]
|
||||
@ -50,6 +52,7 @@ pub(in crate::rpc_processor) enum RPCAnswerDetail {
|
||||
}
|
||||
|
||||
impl RPCAnswerDetail {
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
pub fn desc(&self) -> &'static str {
|
||||
match self {
|
||||
RPCAnswerDetail::StatusA(_) => "StatusA",
|
||||
@ -58,6 +61,7 @@ impl RPCAnswerDetail {
|
||||
RPCAnswerDetail::GetValueA(_) => "GetValueA",
|
||||
RPCAnswerDetail::SetValueA(_) => "SetValueA",
|
||||
RPCAnswerDetail::WatchValueA(_) => "WatchValueA",
|
||||
RPCAnswerDetail::InspectValueA(_) => "InspectValueA",
|
||||
#[cfg(feature = "unstable-blockstore")]
|
||||
RPCAnswerDetail::SupplyBlockA(_) => "SupplyBlockA",
|
||||
#[cfg(feature = "unstable-blockstore")]
|
||||
@ -78,6 +82,7 @@ impl RPCAnswerDetail {
|
||||
RPCAnswerDetail::GetValueA(r) => r.validate(validate_context),
|
||||
RPCAnswerDetail::SetValueA(r) => r.validate(validate_context),
|
||||
RPCAnswerDetail::WatchValueA(r) => r.validate(validate_context),
|
||||
RPCAnswerDetail::InspectValueA(r) => r.validate(validate_context),
|
||||
#[cfg(feature = "unstable-blockstore")]
|
||||
RPCAnswerDetail::SupplyBlockA(r) => r.validate(validate_context),
|
||||
#[cfg(feature = "unstable-blockstore")]
|
||||
@ -125,6 +130,11 @@ impl RPCAnswerDetail {
|
||||
let out = RPCOperationWatchValueA::decode(&op_reader)?;
|
||||
RPCAnswerDetail::WatchValueA(Box::new(out))
|
||||
}
|
||||
veilid_capnp::answer::detail::InspectValueA(r) => {
|
||||
let op_reader = r.map_err(RPCError::protocol)?;
|
||||
let out = RPCOperationInspectValueA::decode(&op_reader)?;
|
||||
RPCAnswerDetail::InspectValueA(Box::new(out))
|
||||
}
|
||||
#[cfg(feature = "unstable-blockstore")]
|
||||
veilid_capnp::answer::detail::SupplyBlockA(r) => {
|
||||
let op_reader = r.map_err(RPCError::protocol)?;
|
||||
@ -171,6 +181,9 @@ impl RPCAnswerDetail {
|
||||
RPCAnswerDetail::WatchValueA(d) => {
|
||||
d.encode(&mut builder.reborrow().init_watch_value_a())
|
||||
}
|
||||
RPCAnswerDetail::InspectValueA(d) => {
|
||||
d.encode(&mut builder.reborrow().init_inspect_value_a())
|
||||
}
|
||||
#[cfg(feature = "unstable-blockstore")]
|
||||
RPCAnswerDetail::SupplyBlockA(d) => {
|
||||
d.encode(&mut builder.reborrow().init_supply_block_a())
|
||||
|
@ -4,6 +4,7 @@ mod operation_app_call;
|
||||
mod operation_app_message;
|
||||
mod operation_find_node;
|
||||
mod operation_get_value;
|
||||
mod operation_inspect_value;
|
||||
mod operation_return_receipt;
|
||||
mod operation_route;
|
||||
mod operation_set_value;
|
||||
@ -35,6 +36,7 @@ pub(in crate::rpc_processor) use operation_app_call::*;
|
||||
pub(in crate::rpc_processor) use operation_app_message::*;
|
||||
pub(in crate::rpc_processor) use operation_find_node::*;
|
||||
pub(in crate::rpc_processor) use operation_get_value::*;
|
||||
pub(in crate::rpc_processor) use operation_inspect_value::*;
|
||||
pub(in crate::rpc_processor) use operation_return_receipt::*;
|
||||
pub(in crate::rpc_processor) use operation_route::*;
|
||||
pub(in crate::rpc_processor) use operation_set_value::*;
|
||||
@ -60,3 +62,5 @@ pub(in crate::rpc_processor) use operation_complete_tunnel::*;
|
||||
pub(in crate::rpc_processor) use operation_start_tunnel::*;
|
||||
|
||||
use super::*;
|
||||
|
||||
pub(crate) use operation_inspect_value::MAX_INSPECT_VALUE_A_SEQS_LEN;
|
||||
|
@ -8,6 +8,7 @@ pub(in crate::rpc_processor) enum RPCOperationKind {
|
||||
}
|
||||
|
||||
impl RPCOperationKind {
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
pub fn desc(&self) -> &'static str {
|
||||
match self {
|
||||
RPCOperationKind::Question(q) => q.desc(),
|
||||
@ -105,6 +106,7 @@ impl RPCOperation {
|
||||
.validate(validate_context.crypto.clone())
|
||||
.map_err(RPCError::protocol)?;
|
||||
}
|
||||
|
||||
// Validate operation kind
|
||||
self.kind.validate(validate_context)
|
||||
}
|
||||
|
@ -45,6 +45,8 @@ impl RPCOperationAppCallQ {
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(in crate::rpc_processor) struct RPCOperationAppCallA {
|
||||
message: Vec<u8>,
|
||||
|
@ -38,6 +38,8 @@ impl RPCOperationCancelTunnelQ {
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[cfg(feature = "unstable-tunnels")]
|
||||
#[derive(Debug, Clone)]
|
||||
pub(in crate::rpc_processor) enum RPCOperationCancelTunnelA {
|
||||
|
@ -75,6 +75,8 @@ impl RPCOperationCompleteTunnelQ {
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[cfg(feature = "unstable-tunnels")]
|
||||
#[derive(Debug, Clone)]
|
||||
pub(in crate::rpc_processor) enum RPCOperationCompleteTunnelA {
|
||||
|
@ -44,6 +44,8 @@ impl RPCOperationFindBlockQ {
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(in crate::rpc_processor) struct RPCOperationFindBlockA {
|
||||
data: Vec<u8>,
|
||||
|
@ -73,6 +73,8 @@ impl RPCOperationFindNodeQ {
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(in crate::rpc_processor) struct RPCOperationFindNodeA {
|
||||
peers: Vec<PeerInfo>,
|
||||
|
@ -75,6 +75,8 @@ impl RPCOperationGetValueQ {
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(in crate::rpc_processor) struct RPCOperationGetValueA {
|
||||
value: Option<SignedValueData>,
|
||||
@ -109,30 +111,36 @@ impl RPCOperationGetValueA {
|
||||
panic!("Wrong context type for GetValueA");
|
||||
};
|
||||
|
||||
if let Some(value) = &self.value {
|
||||
// Get descriptor to validate with
|
||||
let descriptor = if let Some(descriptor) = &self.descriptor {
|
||||
if let Some(last_descriptor) = &get_value_context.last_descriptor {
|
||||
if descriptor.cmp_no_sig(last_descriptor) != cmp::Ordering::Equal {
|
||||
return Err(RPCError::protocol(
|
||||
"getvalue descriptor does not match last descriptor",
|
||||
));
|
||||
}
|
||||
}
|
||||
descriptor
|
||||
} else {
|
||||
let Some(descriptor) = &get_value_context.last_descriptor else {
|
||||
return Err(RPCError::protocol(
|
||||
"no last descriptor, requires a descriptor",
|
||||
));
|
||||
};
|
||||
descriptor
|
||||
};
|
||||
// Validate descriptor
|
||||
if let Some(descriptor) = &self.descriptor {
|
||||
// Ensure the descriptor itself validates
|
||||
descriptor
|
||||
.validate(get_value_context.vcrypto.clone())
|
||||
.map_err(RPCError::protocol)?;
|
||||
|
||||
// Ensure descriptor matches last one
|
||||
if let Some(last_descriptor) = &get_value_context.last_descriptor {
|
||||
if descriptor.cmp_no_sig(last_descriptor) != cmp::Ordering::Equal {
|
||||
return Err(RPCError::protocol(
|
||||
"GetValue descriptor does not match last descriptor",
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the value validates
|
||||
if let Some(value) = &self.value {
|
||||
// Get descriptor to validate with
|
||||
let Some(descriptor) = self
|
||||
.descriptor
|
||||
.as_ref()
|
||||
.or(get_value_context.last_descriptor.as_ref())
|
||||
else {
|
||||
return Err(RPCError::protocol(
|
||||
"no last descriptor, requires a descriptor",
|
||||
));
|
||||
};
|
||||
|
||||
// And the signed value data
|
||||
value
|
||||
.validate(
|
||||
|
@ -0,0 +1,288 @@
|
||||
use super::*;
|
||||
use crate::storage_manager::SignedValueDescriptor;
|
||||
|
||||
const MAX_INSPECT_VALUE_Q_SUBKEY_RANGES_LEN: usize = 512;
|
||||
pub(crate) const MAX_INSPECT_VALUE_A_SEQS_LEN: usize = 512;
|
||||
const MAX_INSPECT_VALUE_A_PEERS_LEN: usize = 20;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(in crate::rpc_processor) struct ValidateInspectValueContext {
|
||||
pub last_descriptor: Option<SignedValueDescriptor>,
|
||||
pub subkeys: ValueSubkeyRangeSet,
|
||||
pub vcrypto: CryptoSystemVersion,
|
||||
}
|
||||
|
||||
impl fmt::Debug for ValidateInspectValueContext {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("ValidateInspectValueContext")
|
||||
.field("last_descriptor", &self.last_descriptor)
|
||||
.field("vcrypto", &self.vcrypto.kind().to_string())
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(in crate::rpc_processor) struct RPCOperationInspectValueQ {
|
||||
key: TypedKey,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
want_descriptor: bool,
|
||||
}
|
||||
|
||||
impl RPCOperationInspectValueQ {
|
||||
pub fn new(
|
||||
key: TypedKey,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
want_descriptor: bool,
|
||||
) -> Result<Self, RPCError> {
|
||||
Ok(Self {
|
||||
key,
|
||||
subkeys,
|
||||
want_descriptor,
|
||||
})
|
||||
}
|
||||
pub fn validate(&mut self, _validate_context: &RPCValidateContext) -> Result<(), RPCError> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// pub fn key(&self) -> &TypedKey {
|
||||
// &self.key
|
||||
// }
|
||||
// pub fn subkeys(&self) -> &ValueSubkeyRangeSet {
|
||||
// &self.subkeys
|
||||
// }
|
||||
// pub fn want_descriptor(&self) -> bool {
|
||||
// self.want_descriptor
|
||||
// }
|
||||
pub fn destructure(self) -> (TypedKey, ValueSubkeyRangeSet, bool) {
|
||||
(self.key, self.subkeys, self.want_descriptor)
|
||||
}
|
||||
|
||||
pub fn decode(
|
||||
reader: &veilid_capnp::operation_inspect_value_q::Reader,
|
||||
) -> Result<Self, RPCError> {
|
||||
let k_reader = reader.reborrow().get_key().map_err(RPCError::protocol)?;
|
||||
let key = decode_typed_key(&k_reader)?;
|
||||
let sk_reader = reader.get_subkeys().map_err(RPCError::protocol)?;
|
||||
// Maximum number of ranges that can hold the maximum number of subkeys is one subkey per range
|
||||
if sk_reader.len() as usize > MAX_INSPECT_VALUE_Q_SUBKEY_RANGES_LEN {
|
||||
return Err(RPCError::protocol("InspectValueQ too many subkey ranges"));
|
||||
}
|
||||
let mut subkeys = ValueSubkeyRangeSet::new();
|
||||
for skr in sk_reader.iter() {
|
||||
let vskr = (skr.get_start(), skr.get_end());
|
||||
if vskr.0 > vskr.1 {
|
||||
return Err(RPCError::protocol("invalid subkey range"));
|
||||
}
|
||||
if let Some(lvskr) = subkeys.last() {
|
||||
if lvskr >= vskr.0 {
|
||||
return Err(RPCError::protocol(
|
||||
"subkey range out of order or not merged",
|
||||
));
|
||||
}
|
||||
}
|
||||
subkeys.ranges_insert(vskr.0..=vskr.1);
|
||||
}
|
||||
|
||||
let want_descriptor = reader.reborrow().get_want_descriptor();
|
||||
Ok(Self {
|
||||
key,
|
||||
subkeys,
|
||||
want_descriptor,
|
||||
})
|
||||
}
|
||||
pub fn encode(
|
||||
&self,
|
||||
builder: &mut veilid_capnp::operation_inspect_value_q::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
let mut k_builder = builder.reborrow().init_key();
|
||||
encode_typed_key(&self.key, &mut k_builder);
|
||||
|
||||
let mut sk_builder = builder.reborrow().init_subkeys(
|
||||
self.subkeys
|
||||
.ranges_len()
|
||||
.try_into()
|
||||
.map_err(RPCError::map_internal("invalid subkey range list length"))?,
|
||||
);
|
||||
for (i, skr) in self.subkeys.ranges().enumerate() {
|
||||
let mut skr_builder = sk_builder.reborrow().get(i as u32);
|
||||
skr_builder.set_start(*skr.start());
|
||||
skr_builder.set_end(*skr.end());
|
||||
}
|
||||
builder.set_want_descriptor(self.want_descriptor);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(in crate::rpc_processor) struct RPCOperationInspectValueA {
|
||||
seqs: Vec<ValueSeqNum>,
|
||||
peers: Vec<PeerInfo>,
|
||||
descriptor: Option<SignedValueDescriptor>,
|
||||
}
|
||||
|
||||
impl RPCOperationInspectValueA {
|
||||
pub fn new(
|
||||
seqs: Vec<ValueSeqNum>,
|
||||
peers: Vec<PeerInfo>,
|
||||
descriptor: Option<SignedValueDescriptor>,
|
||||
) -> Result<Self, RPCError> {
|
||||
if seqs.len() > MAX_INSPECT_VALUE_A_SEQS_LEN {
|
||||
return Err(RPCError::protocol(
|
||||
"encoded InspectValueA seqs length too long",
|
||||
));
|
||||
}
|
||||
if peers.len() > MAX_INSPECT_VALUE_A_PEERS_LEN {
|
||||
return Err(RPCError::protocol(
|
||||
"encoded InspectValueA peers length too long",
|
||||
));
|
||||
}
|
||||
Ok(Self {
|
||||
seqs,
|
||||
peers,
|
||||
descriptor,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn validate(&mut self, validate_context: &RPCValidateContext) -> Result<(), RPCError> {
|
||||
let question_context = validate_context
|
||||
.question_context
|
||||
.as_ref()
|
||||
.expect("InspectValueA requires question context");
|
||||
let QuestionContext::InspectValue(inspect_value_context) = question_context else {
|
||||
panic!("Wrong context type for InspectValueA");
|
||||
};
|
||||
|
||||
// Ensure seqs returned does not exceeed subkeys requested
|
||||
#[allow(clippy::unnecessary_cast)]
|
||||
if self.seqs.len() as u64 > inspect_value_context.subkeys.len() as u64 {
|
||||
return Err(RPCError::protocol(format!(
|
||||
"InspectValue seqs length is greater than subkeys requested: {} > {}",
|
||||
self.seqs.len(),
|
||||
inspect_value_context.subkeys.len()
|
||||
)));
|
||||
}
|
||||
|
||||
// Validate descriptor
|
||||
if let Some(descriptor) = &self.descriptor {
|
||||
// Ensure the descriptor itself validates
|
||||
descriptor
|
||||
.validate(inspect_value_context.vcrypto.clone())
|
||||
.map_err(RPCError::protocol)?;
|
||||
|
||||
// Ensure descriptor matches last one
|
||||
if let Some(last_descriptor) = &inspect_value_context.last_descriptor {
|
||||
if descriptor.cmp_no_sig(last_descriptor) != cmp::Ordering::Equal {
|
||||
return Err(RPCError::protocol(
|
||||
"InspectValue descriptor does not match last descriptor",
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
PeerInfo::validate_vec(&mut self.peers, validate_context.crypto.clone());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// pub fn seqs(&self) -> &[ValueSeqNum] {
|
||||
// &self.seqs
|
||||
// }
|
||||
// pub fn peers(&self) -> &[PeerInfo] {
|
||||
// &self.peers
|
||||
// }
|
||||
// pub fn descriptor(&self) -> Option<&SignedValueDescriptor> {
|
||||
// self.descriptor.as_ref()
|
||||
// }
|
||||
pub fn destructure(
|
||||
self,
|
||||
) -> (
|
||||
Vec<ValueSeqNum>,
|
||||
Vec<PeerInfo>,
|
||||
Option<SignedValueDescriptor>,
|
||||
) {
|
||||
(self.seqs, self.peers, self.descriptor)
|
||||
}
|
||||
|
||||
pub fn decode(
|
||||
reader: &veilid_capnp::operation_inspect_value_a::Reader,
|
||||
) -> Result<Self, RPCError> {
|
||||
let seqs = if reader.has_seqs() {
|
||||
let seqs_reader = reader.get_seqs().map_err(RPCError::protocol)?;
|
||||
if seqs_reader.len() as usize > MAX_INSPECT_VALUE_A_SEQS_LEN {
|
||||
return Err(RPCError::protocol(
|
||||
"decoded InspectValueA seqs length too long",
|
||||
));
|
||||
}
|
||||
let Some(seqs) = seqs_reader.as_slice().map(|s| s.to_vec()) else {
|
||||
return Err(RPCError::protocol("invalid decoded InspectValueA seqs"));
|
||||
};
|
||||
seqs
|
||||
} else {
|
||||
return Err(RPCError::protocol("missing decoded InspectValueA seqs"));
|
||||
};
|
||||
|
||||
let peers_reader = reader.get_peers().map_err(RPCError::protocol)?;
|
||||
if peers_reader.len() as usize > MAX_INSPECT_VALUE_A_PEERS_LEN {
|
||||
return Err(RPCError::protocol(
|
||||
"decoded InspectValueA peers length too long",
|
||||
));
|
||||
}
|
||||
let mut peers = Vec::<PeerInfo>::with_capacity(
|
||||
peers_reader
|
||||
.len()
|
||||
.try_into()
|
||||
.map_err(RPCError::map_internal("too many peers"))?,
|
||||
);
|
||||
for p in peers_reader.iter() {
|
||||
let peer_info = decode_peer_info(&p)?;
|
||||
peers.push(peer_info);
|
||||
}
|
||||
|
||||
let descriptor = if reader.has_descriptor() {
|
||||
let d_reader = reader.get_descriptor().map_err(RPCError::protocol)?;
|
||||
let descriptor = decode_signed_value_descriptor(&d_reader)?;
|
||||
Some(descriptor)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
seqs,
|
||||
peers,
|
||||
descriptor,
|
||||
})
|
||||
}
|
||||
pub fn encode(
|
||||
&self,
|
||||
builder: &mut veilid_capnp::operation_inspect_value_a::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
let mut seqs_builder = builder.reborrow().init_seqs(
|
||||
self.seqs
|
||||
.len()
|
||||
.try_into()
|
||||
.map_err(RPCError::map_internal("invalid seqs list length"))?,
|
||||
);
|
||||
for (i, seq) in self.seqs.iter().enumerate() {
|
||||
seqs_builder.set(i as u32, *seq);
|
||||
}
|
||||
|
||||
let mut peers_builder = builder.reborrow().init_peers(
|
||||
self.peers
|
||||
.len()
|
||||
.try_into()
|
||||
.map_err(RPCError::map_internal("invalid peers list length"))?,
|
||||
);
|
||||
for (i, peer) in self.peers.iter().enumerate() {
|
||||
let mut pi_builder = peers_builder.reborrow().get(i as u32);
|
||||
encode_peer_info(peer, &mut pi_builder)?;
|
||||
}
|
||||
|
||||
if let Some(descriptor) = &self.descriptor {
|
||||
let mut d_builder = builder.reborrow().init_descriptor();
|
||||
encode_signed_value_descriptor(descriptor, &mut d_builder)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
@ -109,6 +109,8 @@ impl RPCOperationSetValueQ {
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(in crate::rpc_processor) struct RPCOperationSetValueA {
|
||||
set: bool,
|
||||
@ -139,13 +141,13 @@ impl RPCOperationSetValueA {
|
||||
panic!("Wrong context type for SetValueA");
|
||||
};
|
||||
|
||||
if let Some(value) = &self.value {
|
||||
// Ensure the descriptor itself validates
|
||||
set_value_context
|
||||
.descriptor
|
||||
.validate(set_value_context.vcrypto.clone())
|
||||
.map_err(RPCError::protocol)?;
|
||||
// Ensure the descriptor itself validates
|
||||
set_value_context
|
||||
.descriptor
|
||||
.validate(set_value_context.vcrypto.clone())
|
||||
.map_err(RPCError::protocol)?;
|
||||
|
||||
if let Some(value) = &self.value {
|
||||
// And the signed value data
|
||||
value
|
||||
.validate(
|
||||
|
@ -65,6 +65,8 @@ impl RPCOperationStartTunnelQ {
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[cfg(feature = "unstable-tunnels")]
|
||||
#[derive(Debug, Clone)]
|
||||
pub(in crate::rpc_processor) enum RPCOperationStartTunnelA {
|
||||
|
@ -42,6 +42,8 @@ impl RPCOperationStatusQ {
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(in crate::rpc_processor) struct RPCOperationStatusA {
|
||||
node_status: Option<NodeStatus>,
|
||||
|
@ -42,6 +42,8 @@ impl RPCOperationSupplyBlockQ {
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(in crate::rpc_processor) struct RPCOperationSupplyBlockA {
|
||||
expiration: u64,
|
||||
|
@ -1,13 +1,14 @@
|
||||
use super::*;
|
||||
use crate::storage_manager::SignedValueData;
|
||||
|
||||
const MAX_VALUE_CHANGED_SUBKEYS_LEN: usize = 512;
|
||||
const MAX_VALUE_CHANGED_SUBKEY_RANGES_LEN: usize = 512;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(in crate::rpc_processor) struct RPCOperationValueChanged {
|
||||
key: TypedKey,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
count: u32,
|
||||
watch_id: u64,
|
||||
value: SignedValueData,
|
||||
}
|
||||
|
||||
@ -17,26 +18,33 @@ impl RPCOperationValueChanged {
|
||||
key: TypedKey,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
count: u32,
|
||||
watch_id: u64,
|
||||
value: SignedValueData,
|
||||
) -> Result<Self, RPCError> {
|
||||
// Needed because RangeSetBlaze uses different types here all the time
|
||||
#[allow(clippy::unnecessary_cast)]
|
||||
let subkeys_len = subkeys.ranges_len() as usize;
|
||||
if subkeys.ranges_len() > MAX_VALUE_CHANGED_SUBKEY_RANGES_LEN {
|
||||
return Err(RPCError::protocol(
|
||||
"ValueChanged subkey ranges length too long",
|
||||
));
|
||||
}
|
||||
|
||||
if subkeys_len > MAX_VALUE_CHANGED_SUBKEYS_LEN {
|
||||
return Err(RPCError::protocol("ValueChanged subkeys length too long"));
|
||||
if watch_id == 0 {
|
||||
return Err(RPCError::protocol("ValueChanged needs a nonzero watch id"));
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
key,
|
||||
subkeys,
|
||||
count,
|
||||
watch_id,
|
||||
value,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn validate(&mut self, _validate_context: &RPCValidateContext) -> Result<(), RPCError> {
|
||||
// validation must be done by storage manager as this is more complicated
|
||||
if self.watch_id == 0 {
|
||||
return Err(RPCError::protocol("ValueChanged does not have a valid id"));
|
||||
}
|
||||
// further validation must be done by storage manager as this is more complicated
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -55,14 +63,25 @@ impl RPCOperationValueChanged {
|
||||
self.count
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn watch_id(&self) -> u64 {
|
||||
self.watch_id
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn value(&self) -> &SignedValueData {
|
||||
&self.value
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn destructure(self) -> (TypedKey, ValueSubkeyRangeSet, u32, SignedValueData) {
|
||||
(self.key, self.subkeys, self.count, self.value)
|
||||
pub fn destructure(self) -> (TypedKey, ValueSubkeyRangeSet, u32, u64, SignedValueData) {
|
||||
(
|
||||
self.key,
|
||||
self.subkeys,
|
||||
self.count,
|
||||
self.watch_id,
|
||||
self.value,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn decode(
|
||||
@ -72,8 +91,10 @@ impl RPCOperationValueChanged {
|
||||
let key = decode_typed_key(&k_reader)?;
|
||||
|
||||
let sk_reader = reader.get_subkeys().map_err(RPCError::protocol)?;
|
||||
if sk_reader.len() as usize > MAX_VALUE_CHANGED_SUBKEYS_LEN {
|
||||
return Err(RPCError::protocol("ValueChanged subkeys length too long"));
|
||||
if sk_reader.len() as usize > MAX_VALUE_CHANGED_SUBKEY_RANGES_LEN {
|
||||
return Err(RPCError::protocol(
|
||||
"ValueChanged subkey ranges length too long",
|
||||
));
|
||||
}
|
||||
|
||||
let mut subkeys = ValueSubkeyRangeSet::new();
|
||||
@ -93,11 +114,14 @@ impl RPCOperationValueChanged {
|
||||
}
|
||||
let count = reader.get_count();
|
||||
let v_reader = reader.get_value().map_err(RPCError::protocol)?;
|
||||
let watch_id = reader.get_watch_id();
|
||||
let value = decode_signed_value_data(&v_reader)?;
|
||||
|
||||
Ok(Self {
|
||||
key,
|
||||
subkeys,
|
||||
count,
|
||||
watch_id,
|
||||
value,
|
||||
})
|
||||
}
|
||||
@ -121,6 +145,7 @@ impl RPCOperationValueChanged {
|
||||
}
|
||||
|
||||
builder.set_count(self.count);
|
||||
builder.set_watch_id(self.watch_id);
|
||||
|
||||
let mut v_builder = builder.reborrow().init_value();
|
||||
encode_signed_value_data(&self.value, &mut v_builder)?;
|
||||
|
@ -1,6 +1,6 @@
|
||||
use super::*;
|
||||
|
||||
const MAX_WATCH_VALUE_Q_SUBKEYS_LEN: usize = 512;
|
||||
const MAX_WATCH_VALUE_Q_SUBKEY_RANGES_LEN: usize = 512;
|
||||
const MAX_WATCH_VALUE_A_PEERS_LEN: usize = 20;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
@ -9,6 +9,7 @@ pub(in crate::rpc_processor) struct RPCOperationWatchValueQ {
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
expiration: u64,
|
||||
count: u32,
|
||||
watch_id: Option<u64>,
|
||||
watcher: PublicKey,
|
||||
signature: Signature,
|
||||
}
|
||||
@ -20,18 +21,20 @@ impl RPCOperationWatchValueQ {
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
expiration: u64,
|
||||
count: u32,
|
||||
watch_id: Option<u64>,
|
||||
watcher: KeyPair,
|
||||
vcrypto: CryptoSystemVersion,
|
||||
) -> Result<Self, RPCError> {
|
||||
// Needed because RangeSetBlaze uses different types here all the time
|
||||
#[allow(clippy::unnecessary_cast)]
|
||||
let subkeys_len = subkeys.ranges_len() as usize;
|
||||
|
||||
if subkeys_len > MAX_WATCH_VALUE_Q_SUBKEYS_LEN {
|
||||
if subkeys.ranges_len() > MAX_WATCH_VALUE_Q_SUBKEY_RANGES_LEN {
|
||||
return Err(RPCError::protocol("WatchValueQ subkeys length too long"));
|
||||
}
|
||||
|
||||
let signature_data = Self::make_signature_data(&key, &subkeys, expiration, count);
|
||||
// Count is zero means cancelling, so there should always be a watch id in this case
|
||||
if count == 0 && watch_id.is_none() {
|
||||
return Err(RPCError::protocol("can't cancel zero watch id"));
|
||||
}
|
||||
|
||||
let signature_data = Self::make_signature_data(&key, &subkeys, expiration, count, watch_id);
|
||||
let signature = vcrypto
|
||||
.sign(&watcher.key, &watcher.secret, &signature_data)
|
||||
.map_err(RPCError::protocol)?;
|
||||
@ -41,6 +44,7 @@ impl RPCOperationWatchValueQ {
|
||||
subkeys,
|
||||
expiration,
|
||||
count,
|
||||
watch_id,
|
||||
watcher: watcher.key,
|
||||
signature,
|
||||
})
|
||||
@ -52,12 +56,12 @@ impl RPCOperationWatchValueQ {
|
||||
subkeys: &ValueSubkeyRangeSet,
|
||||
expiration: u64,
|
||||
count: u32,
|
||||
watch_id: Option<u64>,
|
||||
) -> Vec<u8> {
|
||||
// Needed because RangeSetBlaze uses different types here all the time
|
||||
#[allow(clippy::unnecessary_cast)]
|
||||
let subkeys_len = subkeys.ranges_len() as usize;
|
||||
let subkeys_ranges_len = subkeys.ranges_len();
|
||||
|
||||
let mut sig_data = Vec::with_capacity(PUBLIC_KEY_LENGTH + 4 + (subkeys_len * 8) + 8 + 4);
|
||||
let mut sig_data =
|
||||
Vec::with_capacity(PUBLIC_KEY_LENGTH + 4 + (subkeys_ranges_len * 8) + 8 + 8);
|
||||
sig_data.extend_from_slice(&key.kind.0);
|
||||
sig_data.extend_from_slice(&key.value.bytes);
|
||||
for sk in subkeys.ranges() {
|
||||
@ -66,6 +70,9 @@ impl RPCOperationWatchValueQ {
|
||||
}
|
||||
sig_data.extend_from_slice(&expiration.to_le_bytes());
|
||||
sig_data.extend_from_slice(&count.to_le_bytes());
|
||||
if let Some(watch_id) = watch_id {
|
||||
sig_data.extend_from_slice(&watch_id.to_le_bytes());
|
||||
}
|
||||
sig_data
|
||||
}
|
||||
|
||||
@ -74,11 +81,22 @@ impl RPCOperationWatchValueQ {
|
||||
return Err(RPCError::protocol("unsupported cryptosystem"));
|
||||
};
|
||||
|
||||
let sig_data =
|
||||
Self::make_signature_data(&self.key, &self.subkeys, self.expiration, self.count);
|
||||
let sig_data = Self::make_signature_data(
|
||||
&self.key,
|
||||
&self.subkeys,
|
||||
self.expiration,
|
||||
self.count,
|
||||
self.watch_id,
|
||||
);
|
||||
vcrypto
|
||||
.verify(&self.watcher, &sig_data, &self.signature)
|
||||
.map_err(RPCError::protocol)?;
|
||||
|
||||
// Count is zero means cancelling, so there should always be a watch id in this case
|
||||
if self.count == 0 && self.watch_id.is_none() {
|
||||
return Err(RPCError::protocol("can't cancel zero watch id"));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -102,6 +120,11 @@ impl RPCOperationWatchValueQ {
|
||||
self.count
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn watch_id(&self) -> Option<u64> {
|
||||
self.watch_id
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn watcher(&self) -> &PublicKey {
|
||||
&self.watcher
|
||||
@ -118,6 +141,7 @@ impl RPCOperationWatchValueQ {
|
||||
ValueSubkeyRangeSet,
|
||||
u64,
|
||||
u32,
|
||||
Option<u64>,
|
||||
PublicKey,
|
||||
Signature,
|
||||
) {
|
||||
@ -126,6 +150,7 @@ impl RPCOperationWatchValueQ {
|
||||
self.subkeys,
|
||||
self.expiration,
|
||||
self.count,
|
||||
self.watch_id,
|
||||
self.watcher,
|
||||
self.signature,
|
||||
)
|
||||
@ -138,8 +163,8 @@ impl RPCOperationWatchValueQ {
|
||||
let key = decode_typed_key(&k_reader)?;
|
||||
|
||||
let sk_reader = reader.get_subkeys().map_err(RPCError::protocol)?;
|
||||
if sk_reader.len() as usize > MAX_WATCH_VALUE_Q_SUBKEYS_LEN {
|
||||
return Err(RPCError::protocol("WatchValueQ subkeys length too long"));
|
||||
if sk_reader.len() as usize > MAX_WATCH_VALUE_Q_SUBKEY_RANGES_LEN {
|
||||
return Err(RPCError::protocol("WatchValueQ too many subkey ranges"));
|
||||
}
|
||||
let mut subkeys = ValueSubkeyRangeSet::new();
|
||||
for skr in sk_reader.iter() {
|
||||
@ -159,6 +184,11 @@ impl RPCOperationWatchValueQ {
|
||||
|
||||
let expiration = reader.get_expiration();
|
||||
let count = reader.get_count();
|
||||
let watch_id = if reader.get_watch_id() != 0 {
|
||||
Some(reader.get_watch_id())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let w_reader = reader.get_watcher().map_err(RPCError::protocol)?;
|
||||
let watcher = decode_key256(&w_reader);
|
||||
@ -171,6 +201,7 @@ impl RPCOperationWatchValueQ {
|
||||
subkeys,
|
||||
expiration,
|
||||
count,
|
||||
watch_id,
|
||||
watcher,
|
||||
signature,
|
||||
})
|
||||
@ -196,6 +227,7 @@ impl RPCOperationWatchValueQ {
|
||||
}
|
||||
builder.set_expiration(self.expiration);
|
||||
builder.set_count(self.count);
|
||||
builder.set_watch_id(self.watch_id.unwrap_or(0u64));
|
||||
|
||||
let mut w_builder = builder.reborrow().init_watcher();
|
||||
encode_key256(&self.watcher, &mut w_builder);
|
||||
@ -207,19 +239,33 @@ impl RPCOperationWatchValueQ {
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(in crate::rpc_processor) struct RPCOperationWatchValueA {
|
||||
accepted: bool,
|
||||
expiration: u64,
|
||||
peers: Vec<PeerInfo>,
|
||||
watch_id: u64,
|
||||
}
|
||||
|
||||
impl RPCOperationWatchValueA {
|
||||
#[allow(dead_code)]
|
||||
pub fn new(expiration: u64, peers: Vec<PeerInfo>) -> Result<Self, RPCError> {
|
||||
pub fn new(
|
||||
accepted: bool,
|
||||
expiration: u64,
|
||||
peers: Vec<PeerInfo>,
|
||||
watch_id: u64,
|
||||
) -> Result<Self, RPCError> {
|
||||
if peers.len() > MAX_WATCH_VALUE_A_PEERS_LEN {
|
||||
return Err(RPCError::protocol("WatchValueA peers length too long"));
|
||||
}
|
||||
Ok(Self { expiration, peers })
|
||||
Ok(Self {
|
||||
accepted,
|
||||
expiration,
|
||||
peers,
|
||||
watch_id,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn validate(&mut self, validate_context: &RPCValidateContext) -> Result<(), RPCError> {
|
||||
@ -227,6 +273,10 @@ impl RPCOperationWatchValueA {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn accepted(&self) -> bool {
|
||||
self.accepted
|
||||
}
|
||||
#[allow(dead_code)]
|
||||
pub fn expiration(&self) -> u64 {
|
||||
self.expiration
|
||||
@ -236,13 +286,18 @@ impl RPCOperationWatchValueA {
|
||||
&self.peers
|
||||
}
|
||||
#[allow(dead_code)]
|
||||
pub fn destructure(self) -> (u64, Vec<PeerInfo>) {
|
||||
(self.expiration, self.peers)
|
||||
pub fn watch_id(&self) -> u64 {
|
||||
self.watch_id
|
||||
}
|
||||
#[allow(dead_code)]
|
||||
pub fn destructure(self) -> (bool, u64, Vec<PeerInfo>, u64) {
|
||||
(self.accepted, self.expiration, self.peers, self.watch_id)
|
||||
}
|
||||
|
||||
pub fn decode(
|
||||
reader: &veilid_capnp::operation_watch_value_a::Reader,
|
||||
) -> Result<Self, RPCError> {
|
||||
let accepted = reader.get_accepted();
|
||||
let expiration = reader.get_expiration();
|
||||
let peers_reader = reader.get_peers().map_err(RPCError::protocol)?;
|
||||
if peers_reader.len() as usize > MAX_WATCH_VALUE_A_PEERS_LEN {
|
||||
@ -258,13 +313,20 @@ impl RPCOperationWatchValueA {
|
||||
let peer_info = decode_peer_info(&p)?;
|
||||
peers.push(peer_info);
|
||||
}
|
||||
let watch_id = reader.get_watch_id();
|
||||
|
||||
Ok(Self { expiration, peers })
|
||||
Ok(Self {
|
||||
accepted,
|
||||
expiration,
|
||||
peers,
|
||||
watch_id,
|
||||
})
|
||||
}
|
||||
pub fn encode(
|
||||
&self,
|
||||
builder: &mut veilid_capnp::operation_watch_value_a::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
builder.set_accepted(self.accepted);
|
||||
builder.set_expiration(self.expiration);
|
||||
|
||||
let mut peers_builder = builder.reborrow().init_peers(
|
||||
@ -277,6 +339,7 @@ impl RPCOperationWatchValueA {
|
||||
let mut pi_builder = peers_builder.reborrow().get(i as u32);
|
||||
encode_peer_info(peer, &mut pi_builder)?;
|
||||
}
|
||||
builder.set_watch_id(self.watch_id);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -20,6 +20,7 @@ impl RPCQuestion {
|
||||
pub fn detail(&self) -> &RPCQuestionDetail {
|
||||
&self.detail
|
||||
}
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
pub fn desc(&self) -> &'static str {
|
||||
self.detail.desc()
|
||||
}
|
||||
@ -49,6 +50,7 @@ pub(in crate::rpc_processor) enum RPCQuestionDetail {
|
||||
GetValueQ(Box<RPCOperationGetValueQ>),
|
||||
SetValueQ(Box<RPCOperationSetValueQ>),
|
||||
WatchValueQ(Box<RPCOperationWatchValueQ>),
|
||||
InspectValueQ(Box<RPCOperationInspectValueQ>),
|
||||
#[cfg(feature = "unstable-blockstore")]
|
||||
SupplyBlockQ(Box<RPCOperationSupplyBlockQ>),
|
||||
#[cfg(feature = "unstable-blockstore")]
|
||||
@ -62,6 +64,7 @@ pub(in crate::rpc_processor) enum RPCQuestionDetail {
|
||||
}
|
||||
|
||||
impl RPCQuestionDetail {
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
pub fn desc(&self) -> &'static str {
|
||||
match self {
|
||||
RPCQuestionDetail::StatusQ(_) => "StatusQ",
|
||||
@ -70,6 +73,7 @@ impl RPCQuestionDetail {
|
||||
RPCQuestionDetail::GetValueQ(_) => "GetValueQ",
|
||||
RPCQuestionDetail::SetValueQ(_) => "SetValueQ",
|
||||
RPCQuestionDetail::WatchValueQ(_) => "WatchValueQ",
|
||||
RPCQuestionDetail::InspectValueQ(_) => "InspectValueQ",
|
||||
#[cfg(feature = "unstable-blockstore")]
|
||||
RPCQuestionDetail::SupplyBlockQ(_) => "SupplyBlockQ",
|
||||
#[cfg(feature = "unstable-blockstore")]
|
||||
@ -90,6 +94,7 @@ impl RPCQuestionDetail {
|
||||
RPCQuestionDetail::GetValueQ(r) => r.validate(validate_context),
|
||||
RPCQuestionDetail::SetValueQ(r) => r.validate(validate_context),
|
||||
RPCQuestionDetail::WatchValueQ(r) => r.validate(validate_context),
|
||||
RPCQuestionDetail::InspectValueQ(r) => r.validate(validate_context),
|
||||
#[cfg(feature = "unstable-blockstore")]
|
||||
RPCQuestionDetail::SupplyBlockQ(r) => r.validate(validate_context),
|
||||
#[cfg(feature = "unstable-blockstore")]
|
||||
@ -138,6 +143,11 @@ impl RPCQuestionDetail {
|
||||
let out = RPCOperationWatchValueQ::decode(&op_reader)?;
|
||||
RPCQuestionDetail::WatchValueQ(Box::new(out))
|
||||
}
|
||||
veilid_capnp::question::detail::InspectValueQ(r) => {
|
||||
let op_reader = r.map_err(RPCError::protocol)?;
|
||||
let out = RPCOperationInspectValueQ::decode(&op_reader)?;
|
||||
RPCQuestionDetail::InspectValueQ(Box::new(out))
|
||||
}
|
||||
#[cfg(feature = "unstable-blockstore")]
|
||||
veilid_capnp::question::detail::SupplyBlockQ(r) => {
|
||||
let op_reader = r.map_err(RPCError::protocol)?;
|
||||
@ -184,6 +194,9 @@ impl RPCQuestionDetail {
|
||||
RPCQuestionDetail::WatchValueQ(d) => {
|
||||
d.encode(&mut builder.reborrow().init_watch_value_q())
|
||||
}
|
||||
RPCQuestionDetail::InspectValueQ(d) => {
|
||||
d.encode(&mut builder.reborrow().init_inspect_value_q())
|
||||
}
|
||||
#[cfg(feature = "unstable-blockstore")]
|
||||
RPCQuestionDetail::SupplyBlockQ(d) => {
|
||||
d.encode(&mut builder.reborrow().init_supply_block_q())
|
||||
|
@ -15,6 +15,7 @@ impl RPCStatement {
|
||||
pub fn detail(&self) -> &RPCStatementDetail {
|
||||
&self.detail
|
||||
}
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
pub fn desc(&self) -> &'static str {
|
||||
self.detail.desc()
|
||||
}
|
||||
@ -43,6 +44,7 @@ pub(in crate::rpc_processor) enum RPCStatementDetail {
|
||||
}
|
||||
|
||||
impl RPCStatementDetail {
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
pub fn desc(&self) -> &'static str {
|
||||
match self {
|
||||
RPCStatementDetail::ValidateDialInfo(_) => "ValidateDialInfo",
|
||||
|
@ -8,6 +8,49 @@ where
|
||||
result: Option<Result<R, RPCError>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub(crate) enum FanoutResultKind {
|
||||
Timeout,
|
||||
Finished,
|
||||
Exhausted,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(crate) struct FanoutResult {
|
||||
pub kind: FanoutResultKind,
|
||||
pub value_nodes: Vec<NodeRef>,
|
||||
}
|
||||
|
||||
pub(crate) fn debug_fanout_result(result: &FanoutResult) -> String {
|
||||
let kc = match result.kind {
|
||||
FanoutResultKind::Timeout => "T",
|
||||
FanoutResultKind::Finished => "F",
|
||||
FanoutResultKind::Exhausted => "E",
|
||||
};
|
||||
format!("{}:{}", kc, result.value_nodes.len())
|
||||
}
|
||||
|
||||
pub(crate) fn debug_fanout_results(results: &[FanoutResult]) -> String {
|
||||
let mut col = 0;
|
||||
let mut out = String::new();
|
||||
let mut left = results.len();
|
||||
for r in results {
|
||||
if col == 0 {
|
||||
out += " ";
|
||||
}
|
||||
let sr = debug_fanout_result(r);
|
||||
out += &sr;
|
||||
out += ",";
|
||||
col += 1;
|
||||
left -= 1;
|
||||
if col == 32 && left != 0 {
|
||||
col = 0;
|
||||
out += "\n"
|
||||
}
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
pub(crate) type FanoutCallReturnType = RPCNetworkResult<Vec<PeerInfo>>;
|
||||
pub(crate) type FanoutNodeInfoFilter = Arc<dyn Fn(&[TypedKey], &NodeInfo) -> bool + Send + Sync>;
|
||||
|
||||
@ -158,8 +201,7 @@ where
|
||||
#[allow(unused_variables)]
|
||||
Ok(x) => {
|
||||
// Call failed, node will not be considered again
|
||||
#[cfg(feature = "network-result-extra")]
|
||||
log_rpc!(debug "Fanout result {}: {:?}", &next_node, x);
|
||||
log_network_result!(debug "Fanout result {}: {:?}", &next_node, x);
|
||||
}
|
||||
Err(e) => {
|
||||
// Error happened, abort everything and return the error
|
||||
|
@ -8,6 +8,7 @@ mod rpc_app_message;
|
||||
mod rpc_error;
|
||||
mod rpc_find_node;
|
||||
mod rpc_get_value;
|
||||
mod rpc_inspect_value;
|
||||
mod rpc_return_receipt;
|
||||
mod rpc_route;
|
||||
mod rpc_set_value;
|
||||
@ -168,14 +169,14 @@ pub(crate) struct RPCMessage {
|
||||
opt_sender_nr: Option<NodeRef>,
|
||||
}
|
||||
|
||||
#[instrument(skip_all, err)]
|
||||
pub fn builder_to_vec<'a, T>(builder: capnp::message::Builder<T>) -> Result<Vec<u8>, RPCError>
|
||||
where
|
||||
T: capnp::message::Allocator + 'a,
|
||||
{
|
||||
let mut buffer = vec![];
|
||||
capnp::serialize_packed::write_message(&mut buffer, &builder)
|
||||
.map_err(RPCError::protocol)
|
||||
.map_err(logthru_rpc!())?;
|
||||
.map_err(RPCError::protocol)?;
|
||||
Ok(buffer)
|
||||
}
|
||||
|
||||
@ -373,7 +374,7 @@ impl RPCProcessor {
|
||||
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
pub async fn startup(&self) -> EyreResult<()> {
|
||||
debug!("startup rpc processor");
|
||||
log_rpc!(debug "startup rpc processor");
|
||||
{
|
||||
let mut inner = self.inner.lock();
|
||||
|
||||
@ -382,7 +383,7 @@ impl RPCProcessor {
|
||||
inner.stop_source = Some(StopSource::new());
|
||||
|
||||
// spin up N workers
|
||||
trace!(
|
||||
log_rpc!(
|
||||
"Spinning up {} RPC workers",
|
||||
self.unlocked_inner.concurrency
|
||||
);
|
||||
@ -408,7 +409,7 @@ impl RPCProcessor {
|
||||
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub async fn shutdown(&self) {
|
||||
debug!("starting rpc processor shutdown");
|
||||
log_rpc!(debug "starting rpc processor shutdown");
|
||||
|
||||
// Stop storage manager from using us
|
||||
self.storage_manager.set_rpc_processor(None).await;
|
||||
@ -424,17 +425,17 @@ impl RPCProcessor {
|
||||
// drop the stop
|
||||
drop(inner.stop_source.take());
|
||||
}
|
||||
debug!("stopping {} rpc worker tasks", unord.len());
|
||||
log_rpc!(debug "stopping {} rpc worker tasks", unord.len());
|
||||
|
||||
// Wait for them to complete
|
||||
while unord.next().await.is_some() {}
|
||||
|
||||
debug!("resetting rpc processor state");
|
||||
log_rpc!(debug "resetting rpc processor state");
|
||||
|
||||
// Release the rpc processor
|
||||
*self.inner.lock() = Self::new_inner();
|
||||
|
||||
debug!("finished rpc processor shutdown");
|
||||
log_rpc!(debug "finished rpc processor shutdown");
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
@ -470,6 +471,11 @@ impl RPCProcessor {
|
||||
) -> TimeoutOr<Result<Option<NodeRef>, RPCError>> {
|
||||
let routing_table = self.routing_table();
|
||||
|
||||
// Ignore own node
|
||||
if routing_table.matches_own_node_id(&[node_id]) {
|
||||
return TimeoutOr::Value(Err(RPCError::network("can't search for own node id")));
|
||||
}
|
||||
|
||||
// Routine to call to generate fanout
|
||||
let call_routine = |next_node: NodeRef| {
|
||||
let this = self.clone();
|
||||
@ -610,6 +616,20 @@ impl RPCProcessor {
|
||||
// Reply received
|
||||
let recv_ts = get_aligned_timestamp();
|
||||
|
||||
// Ensure the reply comes over the private route that was requested
|
||||
if let Some(reply_private_route) = waitable_reply.reply_private_route {
|
||||
match &rpcreader.header.detail {
|
||||
RPCMessageHeaderDetail::Direct(_) | RPCMessageHeaderDetail::SafetyRouted(_) => {
|
||||
return Err(RPCError::protocol("should have received reply over private route"));
|
||||
}
|
||||
RPCMessageHeaderDetail::PrivateRouted(pr) => {
|
||||
if pr.private_route != reply_private_route {
|
||||
return Err(RPCError::protocol("received reply over the wrong private route"));
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Record answer received
|
||||
self.record_answer_received(
|
||||
waitable_reply.send_ts,
|
||||
@ -918,12 +938,12 @@ impl RPCProcessor {
|
||||
// If safety route was in use, record failure to send there
|
||||
if let Some(sr_pubkey) = &safety_route {
|
||||
let rss = self.routing_table.route_spec_store();
|
||||
rss.with_route_stats(send_ts, sr_pubkey, |s| s.record_send_failed());
|
||||
rss.with_route_stats_mut(send_ts, sr_pubkey, |s| s.record_send_failed());
|
||||
} else {
|
||||
// If no safety route was in use, then it's the private route's fault if we have one
|
||||
if let Some(pr_pubkey) = &remote_private_route {
|
||||
let rss = self.routing_table.route_spec_store();
|
||||
rss.with_route_stats(send_ts, pr_pubkey, |s| s.record_send_failed());
|
||||
rss.with_route_stats_mut(send_ts, pr_pubkey, |s| s.record_send_failed());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -952,19 +972,19 @@ impl RPCProcessor {
|
||||
// If safety route was used, record question lost there
|
||||
if let Some(sr_pubkey) = &safety_route {
|
||||
let rss = self.routing_table.route_spec_store();
|
||||
rss.with_route_stats(send_ts, sr_pubkey, |s| {
|
||||
rss.with_route_stats_mut(send_ts, sr_pubkey, |s| {
|
||||
s.record_question_lost();
|
||||
});
|
||||
}
|
||||
// If remote private route was used, record question lost there
|
||||
if let Some(rpr_pubkey) = &remote_private_route {
|
||||
rss.with_route_stats(send_ts, rpr_pubkey, |s| {
|
||||
rss.with_route_stats_mut(send_ts, rpr_pubkey, |s| {
|
||||
s.record_question_lost();
|
||||
});
|
||||
}
|
||||
// If private route was used, record question lost there
|
||||
if let Some(pr_pubkey) = &private_route {
|
||||
rss.with_route_stats(send_ts, pr_pubkey, |s| {
|
||||
rss.with_route_stats_mut(send_ts, pr_pubkey, |s| {
|
||||
s.record_question_lost();
|
||||
});
|
||||
}
|
||||
@ -998,7 +1018,7 @@ impl RPCProcessor {
|
||||
|
||||
// If safety route was used, record send there
|
||||
if let Some(sr_pubkey) = &safety_route {
|
||||
rss.with_route_stats(send_ts, sr_pubkey, |s| {
|
||||
rss.with_route_stats_mut(send_ts, sr_pubkey, |s| {
|
||||
s.record_sent(send_ts, bytes);
|
||||
});
|
||||
}
|
||||
@ -1006,7 +1026,7 @@ impl RPCProcessor {
|
||||
// If remote private route was used, record send there
|
||||
if let Some(pr_pubkey) = &remote_private_route {
|
||||
let rss = self.routing_table.route_spec_store();
|
||||
rss.with_route_stats(send_ts, pr_pubkey, |s| {
|
||||
rss.with_route_stats_mut(send_ts, pr_pubkey, |s| {
|
||||
s.record_sent(send_ts, bytes);
|
||||
});
|
||||
}
|
||||
@ -1039,7 +1059,7 @@ impl RPCProcessor {
|
||||
|
||||
// If safety route was used, record route there
|
||||
if let Some(sr_pubkey) = &safety_route {
|
||||
rss.with_route_stats(send_ts, sr_pubkey, |s| {
|
||||
rss.with_route_stats_mut(send_ts, sr_pubkey, |s| {
|
||||
// If we received an answer, the safety route we sent over can be considered tested
|
||||
s.record_tested(recv_ts);
|
||||
|
||||
@ -1050,7 +1070,7 @@ impl RPCProcessor {
|
||||
|
||||
// If local private route was used, record route there
|
||||
if let Some(pr_pubkey) = &reply_private_route {
|
||||
rss.with_route_stats(send_ts, pr_pubkey, |s| {
|
||||
rss.with_route_stats_mut(send_ts, pr_pubkey, |s| {
|
||||
// Record received bytes
|
||||
s.record_received(recv_ts, bytes);
|
||||
|
||||
@ -1061,7 +1081,7 @@ impl RPCProcessor {
|
||||
|
||||
// If remote private route was used, record there
|
||||
if let Some(rpr_pubkey) = &remote_private_route {
|
||||
rss.with_route_stats(send_ts, rpr_pubkey, |s| {
|
||||
rss.with_route_stats_mut(send_ts, rpr_pubkey, |s| {
|
||||
// Record received bytes
|
||||
s.record_received(recv_ts, bytes);
|
||||
|
||||
@ -1086,12 +1106,12 @@ impl RPCProcessor {
|
||||
// then we must have received with a local private route too, per the design rules
|
||||
if let Some(sr_pubkey) = &safety_route {
|
||||
let rss = self.routing_table.route_spec_store();
|
||||
rss.with_route_stats(send_ts, sr_pubkey, |s| {
|
||||
rss.with_route_stats_mut(send_ts, sr_pubkey, |s| {
|
||||
s.record_latency(total_latency / 2u64);
|
||||
});
|
||||
}
|
||||
if let Some(pr_pubkey) = &reply_private_route {
|
||||
rss.with_route_stats(send_ts, pr_pubkey, |s| {
|
||||
rss.with_route_stats_mut(send_ts, pr_pubkey, |s| {
|
||||
s.record_latency(total_latency / 2u64);
|
||||
});
|
||||
}
|
||||
@ -1117,7 +1137,7 @@ impl RPCProcessor {
|
||||
|
||||
// This may record nothing if the remote safety route is not also
|
||||
// a remote private route that been imported, but that's okay
|
||||
rss.with_route_stats(recv_ts, &d.remote_safety_route, |s| {
|
||||
rss.with_route_stats_mut(recv_ts, &d.remote_safety_route, |s| {
|
||||
s.record_received(recv_ts, bytes);
|
||||
});
|
||||
}
|
||||
@ -1129,12 +1149,12 @@ impl RPCProcessor {
|
||||
// a remote private route that been imported, but that's okay
|
||||
// it could also be a node id if no remote safety route was used
|
||||
// in which case this also will do nothing
|
||||
rss.with_route_stats(recv_ts, &d.remote_safety_route, |s| {
|
||||
rss.with_route_stats_mut(recv_ts, &d.remote_safety_route, |s| {
|
||||
s.record_received(recv_ts, bytes);
|
||||
});
|
||||
|
||||
// Record for our local private route we received over
|
||||
rss.with_route_stats(recv_ts, &d.private_route, |s| {
|
||||
rss.with_route_stats_mut(recv_ts, &d.private_route, |s| {
|
||||
s.record_received(recv_ts, bytes);
|
||||
});
|
||||
}
|
||||
@ -1403,6 +1423,7 @@ impl RPCProcessor {
|
||||
/// Decoding RPC from the wire
|
||||
/// This performs a capnp decode on the data, and if it passes the capnp schema
|
||||
/// it performs the cryptographic validation required to pass the operation up for processing
|
||||
#[instrument(skip_all)]
|
||||
fn decode_rpc_operation(
|
||||
&self,
|
||||
encoded_msg: &RPCMessageEncoded,
|
||||
@ -1410,8 +1431,7 @@ impl RPCProcessor {
|
||||
let reader = encoded_msg.data.get_reader()?;
|
||||
let op_reader = reader
|
||||
.get_root::<veilid_capnp::operation::Reader>()
|
||||
.map_err(RPCError::protocol)
|
||||
.map_err(logthru_rpc!())?;
|
||||
.map_err(RPCError::protocol)?;
|
||||
let mut operation = RPCOperation::decode(&op_reader)?;
|
||||
|
||||
// Validate the RPC message
|
||||
@ -1420,7 +1440,12 @@ impl RPCProcessor {
|
||||
Ok(operation)
|
||||
}
|
||||
|
||||
/// Cryptographic RPC validation
|
||||
/// Cryptographic RPC validation and sanitization
|
||||
///
|
||||
/// This code may modify the RPC operation to remove elements that are inappropriate for this node
|
||||
/// or reject the RPC operation entirely. For example, PeerInfo in fanout peer lists may be
|
||||
/// removed if they are deemed inappropriate for this node, without rejecting the entire operation.
|
||||
///
|
||||
/// We do this as part of the RPC network layer to ensure that any RPC operations that are
|
||||
/// processed have already been validated cryptographically and it is not the job of the
|
||||
/// caller or receiver. This does not mean the operation is 'semantically correct'. For
|
||||
@ -1467,15 +1492,29 @@ impl RPCProcessor {
|
||||
let sender_node_id = detail.envelope.get_sender_typed_id();
|
||||
|
||||
// Decode and validate the RPC operation
|
||||
let operation = match self.decode_rpc_operation(&encoded_msg) {
|
||||
let decode_res = self.decode_rpc_operation(&encoded_msg);
|
||||
let operation = match decode_res {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
// Punish nodes that send direct undecodable crap
|
||||
if matches!(e, RPCError::Protocol(_) | RPCError::InvalidFormat(_)) {
|
||||
address_filter.punish_node_id(sender_node_id);
|
||||
}
|
||||
match e {
|
||||
// Invalid messages that should be punished
|
||||
RPCError::Protocol(_) | RPCError::InvalidFormat(_) => {
|
||||
log_rpc!(debug "Invalid RPC Operation: {}", e);
|
||||
|
||||
// Punish nodes that send direct undecodable crap
|
||||
address_filter.punish_node_id(sender_node_id);
|
||||
},
|
||||
// Ignored messages that should be dropped
|
||||
RPCError::Ignore(_) | RPCError::Network(_) | RPCError::TryAgain(_) => {
|
||||
log_rpc!(debug "Dropping RPC Operation: {}", e);
|
||||
},
|
||||
// Internal errors that deserve louder logging
|
||||
RPCError::Unimplemented(_) | RPCError::Internal(_) => {
|
||||
log_rpc!(error "Error decoding RPC operation: {}", e);
|
||||
}
|
||||
};
|
||||
return Ok(NetworkResult::invalid_message(e));
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
// Get the routing domain this message came over
|
||||
@ -1547,7 +1586,10 @@ impl RPCProcessor {
|
||||
let operation = match self.decode_rpc_operation(&encoded_msg) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
// Punish routes that send routed undecodable crap
|
||||
// Debug on error
|
||||
log_rpc!(debug "Dropping RPC operation: {}", e);
|
||||
|
||||
// XXX: Punish routes that send routed undecodable crap
|
||||
// address_filter.punish_route_id(xxx);
|
||||
return Ok(NetworkResult::invalid_message(e));
|
||||
}
|
||||
@ -1563,30 +1605,36 @@ impl RPCProcessor {
|
||||
};
|
||||
|
||||
// Process stats for questions/statements received
|
||||
let kind = match msg.operation.kind() {
|
||||
match msg.operation.kind() {
|
||||
RPCOperationKind::Question(_) => {
|
||||
self.record_question_received(&msg);
|
||||
|
||||
if let Some(sender_nr) = msg.opt_sender_nr.clone() {
|
||||
sender_nr.stats_question_rcvd(msg.header.timestamp, msg.header.body_len);
|
||||
}
|
||||
"question"
|
||||
|
||||
// Log rpc receive
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
debug!(target: "rpc_message", dir = "recv", kind = "question", op_id = msg.operation.op_id().as_u64(), desc = msg.operation.kind().desc(), header = ?msg.header);
|
||||
}
|
||||
RPCOperationKind::Statement(_) => {
|
||||
if let Some(sender_nr) = msg.opt_sender_nr.clone() {
|
||||
sender_nr.stats_question_rcvd(msg.header.timestamp, msg.header.body_len);
|
||||
}
|
||||
"statement"
|
||||
|
||||
// Log rpc receive
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
debug!(target: "rpc_message", dir = "recv", kind = "statement", op_id = msg.operation.op_id().as_u64(), desc = msg.operation.kind().desc(), header = ?msg.header);
|
||||
}
|
||||
RPCOperationKind::Answer(_) => {
|
||||
// Answer stats are processed in wait_for_reply
|
||||
"answer"
|
||||
|
||||
// Log rpc receive
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
debug!(target: "rpc_message", dir = "recv", kind = "answer", op_id = msg.operation.op_id().as_u64(), desc = msg.operation.kind().desc(), header = ?msg.header);
|
||||
}
|
||||
};
|
||||
|
||||
// Log rpc receive
|
||||
trace!(target: "rpc_message", dir = "recv", kind, op_id = msg.operation.op_id().as_u64(), desc = msg.operation.kind().desc(), header = ?msg.header);
|
||||
|
||||
// Process specific message kind
|
||||
match msg.operation.kind() {
|
||||
RPCOperationKind::Question(q) => match q.detail() {
|
||||
@ -1596,6 +1644,7 @@ impl RPCProcessor {
|
||||
RPCQuestionDetail::GetValueQ(_) => self.process_get_value_q(msg).await,
|
||||
RPCQuestionDetail::SetValueQ(_) => self.process_set_value_q(msg).await,
|
||||
RPCQuestionDetail::WatchValueQ(_) => self.process_watch_value_q(msg).await,
|
||||
RPCQuestionDetail::InspectValueQ(_) => self.process_inspect_value_q(msg).await,
|
||||
#[cfg(feature = "unstable-blockstore")]
|
||||
RPCQuestionDetail::SupplyBlockQ(_) => self.process_supply_block_q(msg).await,
|
||||
#[cfg(feature = "unstable-blockstore")]
|
||||
|
@ -116,7 +116,7 @@ where
|
||||
pub fn get_op_context(&self, op_id: OperationId) -> Result<C, RPCError> {
|
||||
let inner = self.inner.lock();
|
||||
let Some(waiting_op) = inner.waiting_op_table.get(&op_id) else {
|
||||
return Err(RPCError::internal("Missing operation id getting op context"));
|
||||
return Err(RPCError::ignore("Missing operation id getting op context"));
|
||||
};
|
||||
Ok(waiting_op.context.clone())
|
||||
}
|
||||
|
@ -55,6 +55,8 @@ impl RPCProcessor {
|
||||
)))
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[cfg_attr(feature="verbose-tracing", instrument(level = "trace", skip(self, msg), fields(msg.operation.op_id), ret, err))]
|
||||
pub(crate) async fn process_app_call_q(&self, msg: RPCMessage) -> RPCNetworkResult<()> {
|
||||
// Ignore if disabled
|
||||
@ -71,6 +73,24 @@ impl RPCProcessor {
|
||||
));
|
||||
}
|
||||
|
||||
// Get the private route this came over
|
||||
let opt_pr_pubkey = match &msg.header.detail {
|
||||
RPCMessageHeaderDetail::Direct(_) | RPCMessageHeaderDetail::SafetyRouted(_) => None,
|
||||
RPCMessageHeaderDetail::PrivateRouted(pr) => Some(pr.private_route),
|
||||
};
|
||||
let route_id = if let Some(pr_pubkey) = opt_pr_pubkey {
|
||||
let rss = routing_table.route_spec_store();
|
||||
let Some(route_id) = rss.get_route_id_for_key(&pr_pubkey) else {
|
||||
return Ok(NetworkResult::invalid_message(format!(
|
||||
"private route does not exist for key: {}",
|
||||
pr_pubkey
|
||||
)));
|
||||
};
|
||||
Some(route_id)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Get the question
|
||||
let (op_id, _, _, kind) = msg.operation.clone().destructure();
|
||||
let app_call_q = match kind {
|
||||
@ -99,7 +119,7 @@ impl RPCProcessor {
|
||||
// Pass the call up through the update callback
|
||||
let message_q = app_call_q.destructure();
|
||||
(self.unlocked_inner.update_callback)(VeilidUpdate::AppCall(Box::new(VeilidAppCall::new(
|
||||
sender, message_q, op_id,
|
||||
sender, route_id, message_q, op_id,
|
||||
))));
|
||||
|
||||
// Wait for an app call answer to come back from the app
|
||||
|
@ -19,6 +19,8 @@ impl RPCProcessor {
|
||||
self.statement(dest, statement).await
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[cfg_attr(feature="verbose-tracing", instrument(level = "trace", skip(self, msg), fields(msg.operation.op_id), ret, err))]
|
||||
pub(crate) async fn process_app_message(&self, msg: RPCMessage) -> RPCNetworkResult<()> {
|
||||
// Ignore if disabled
|
||||
@ -34,6 +36,24 @@ impl RPCProcessor {
|
||||
));
|
||||
}
|
||||
|
||||
// Get the private route this came over
|
||||
let opt_pr_pubkey = match &msg.header.detail {
|
||||
RPCMessageHeaderDetail::Direct(_) | RPCMessageHeaderDetail::SafetyRouted(_) => None,
|
||||
RPCMessageHeaderDetail::PrivateRouted(pr) => Some(pr.private_route),
|
||||
};
|
||||
let route_id = if let Some(pr_pubkey) = opt_pr_pubkey {
|
||||
let rss = routing_table.route_spec_store();
|
||||
let Some(route_id) = rss.get_route_id_for_key(&pr_pubkey) else {
|
||||
return Ok(NetworkResult::invalid_message(format!(
|
||||
"private route does not exist for key: {}",
|
||||
pr_pubkey
|
||||
)));
|
||||
};
|
||||
Some(route_id)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Get the statement
|
||||
let (_, _, _, kind) = msg.operation.destructure();
|
||||
let app_message = match kind {
|
||||
@ -56,7 +76,7 @@ impl RPCProcessor {
|
||||
// Pass the message up through the update callback
|
||||
let message = app_message.destructure();
|
||||
(self.unlocked_inner.update_callback)(VeilidUpdate::AppMessage(Box::new(
|
||||
VeilidAppMessage::new(sender, message),
|
||||
VeilidAppMessage::new(sender, route_id, message),
|
||||
)));
|
||||
|
||||
Ok(NetworkResult::value(()))
|
||||
|
@ -15,6 +15,8 @@ pub enum RPCError {
|
||||
Network(String),
|
||||
#[error("[RPCError: TryAgain({0})]")]
|
||||
TryAgain(String),
|
||||
#[error("[RPCError: Ignore({0})]")]
|
||||
Ignore(String),
|
||||
}
|
||||
|
||||
impl RPCError {
|
||||
@ -48,6 +50,18 @@ impl RPCError {
|
||||
pub fn map_network<M: ToString, X: ToString>(message: M) -> impl FnOnce(X) -> Self {
|
||||
move |x| Self::Network(format!("{}: {}", message.to_string(), x.to_string()))
|
||||
}
|
||||
pub fn try_again<X: ToString>(x: X) -> Self {
|
||||
Self::TryAgain(x.to_string())
|
||||
}
|
||||
pub fn map_try_again<M: ToString, X: ToString>(message: M) -> impl FnOnce(X) -> Self {
|
||||
move |x| Self::TryAgain(format!("{}: {}", message.to_string(), x.to_string()))
|
||||
}
|
||||
pub fn ignore<X: ToString>(x: X) -> Self {
|
||||
Self::Ignore(x.to_string())
|
||||
}
|
||||
pub fn map_ignore<M: ToString, X: ToString>(message: M) -> impl FnOnce(X) -> Self {
|
||||
move |x| Self::Ignore(format!("{}: {}", message.to_string(), x.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<RPCError> for VeilidAPIError {
|
||||
@ -59,6 +73,7 @@ impl From<RPCError> for VeilidAPIError {
|
||||
RPCError::Internal(message) => VeilidAPIError::Internal { message },
|
||||
RPCError::Network(message) => VeilidAPIError::Generic { message },
|
||||
RPCError::TryAgain(message) => VeilidAPIError::TryAgain { message },
|
||||
RPCError::Ignore(message) => VeilidAPIError::Generic { message },
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -84,6 +84,8 @@ impl RPCProcessor {
|
||||
)))
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[cfg_attr(feature="verbose-tracing", instrument(level = "trace", skip(self, msg), fields(msg.operation.op_id), ret, err))]
|
||||
pub(crate) async fn process_find_node_q(&self, msg: RPCMessage) -> RPCNetworkResult<()> {
|
||||
// Ensure this never came over a private route, safety route is okay though
|
||||
|
@ -74,8 +74,7 @@ impl RPCProcessor {
|
||||
vcrypto: vcrypto.clone(),
|
||||
});
|
||||
|
||||
#[cfg(feature="debug-dht")]
|
||||
log_rpc!(debug "{}", debug_string);
|
||||
log_dht!(debug "{}", debug_string);
|
||||
|
||||
let waitable_reply = network_result_try!(
|
||||
self.question(dest.clone(), question, Some(question_context))
|
||||
@ -102,8 +101,7 @@ impl RPCProcessor {
|
||||
};
|
||||
|
||||
let (value, peers, descriptor) = get_value_a.destructure();
|
||||
#[cfg(feature="debug-dht")]
|
||||
{
|
||||
if debug_target_enabled!("dht") {
|
||||
let debug_string_value = value.as_ref().map(|v| {
|
||||
format!(" len={} seq={} writer={}",
|
||||
v.value_data().data().len(),
|
||||
@ -126,10 +124,10 @@ impl RPCProcessor {
|
||||
dest
|
||||
);
|
||||
|
||||
log_rpc!(debug "{}", debug_string_answer);
|
||||
log_dht!(debug "{}", debug_string_answer);
|
||||
|
||||
let peer_ids:Vec<String> = peers.iter().filter_map(|p| p.node_ids().get(key.kind).map(|k| k.to_string())).collect();
|
||||
log_rpc!(debug "Peers: {:#?}", peer_ids);
|
||||
log_dht!(debug "Peers: {:#?}", peer_ids);
|
||||
}
|
||||
|
||||
// Validate peers returned are, in fact, closer to the key than the node we sent this to
|
||||
@ -168,6 +166,8 @@ impl RPCProcessor {
|
||||
)))
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[cfg_attr(feature="verbose-tracing", instrument(level = "trace", skip(self, msg), fields(msg.operation.op_id), ret, err))]
|
||||
pub(crate) async fn process_get_value_q(
|
||||
&self,
|
||||
@ -213,8 +213,7 @@ impl RPCProcessor {
|
||||
let routing_table = self.routing_table();
|
||||
let closer_to_key_peers = network_result_try!(routing_table.find_preferred_peers_closer_to_key(key, vec![CAP_DHT, CAP_DHT_WATCH]));
|
||||
|
||||
#[cfg(feature="debug-dht")]
|
||||
{
|
||||
if debug_target_enabled!("dht") {
|
||||
let debug_string = format!(
|
||||
"IN <=== GetValueQ({} #{}{}) <== {}",
|
||||
key,
|
||||
@ -227,7 +226,7 @@ impl RPCProcessor {
|
||||
msg.header.direct_sender_node_id()
|
||||
);
|
||||
|
||||
log_rpc!(debug "{}", debug_string);
|
||||
log_dht!(debug "{}", debug_string);
|
||||
}
|
||||
|
||||
// See if we would have accepted this as a set
|
||||
@ -235,7 +234,7 @@ impl RPCProcessor {
|
||||
let c = self.config.get();
|
||||
c.network.dht.set_value_count as usize
|
||||
};
|
||||
let (subkey_result_value, subkey_result_descriptor) = if closer_to_key_peers.len() >= set_value_count {
|
||||
let (get_result_value, get_result_descriptor) = if closer_to_key_peers.len() >= set_value_count {
|
||||
// Not close enough
|
||||
(None, None)
|
||||
} else {
|
||||
@ -243,16 +242,15 @@ impl RPCProcessor {
|
||||
|
||||
// See if we have this record ourselves
|
||||
let storage_manager = self.storage_manager();
|
||||
let subkey_result = network_result_try!(storage_manager
|
||||
let get_result = network_result_try!(storage_manager
|
||||
.inbound_get_value(key, subkey, want_descriptor)
|
||||
.await
|
||||
.map_err(RPCError::internal)?);
|
||||
(subkey_result.value, subkey_result.descriptor)
|
||||
(get_result.opt_value, get_result.opt_descriptor)
|
||||
};
|
||||
|
||||
#[cfg(feature="debug-dht")]
|
||||
{
|
||||
let debug_string_value = subkey_result_value.as_ref().map(|v| {
|
||||
if debug_target_enabled!("dht") {
|
||||
let debug_string_value = get_result_value.as_ref().map(|v| {
|
||||
format!(" len={} seq={} writer={}",
|
||||
v.value_data().data().len(),
|
||||
v.value_data().seq(),
|
||||
@ -265,7 +263,7 @@ impl RPCProcessor {
|
||||
key,
|
||||
subkey,
|
||||
debug_string_value,
|
||||
if subkey_result_descriptor.is_some() {
|
||||
if get_result_descriptor.is_some() {
|
||||
" +desc"
|
||||
} else {
|
||||
""
|
||||
@ -274,14 +272,14 @@ impl RPCProcessor {
|
||||
msg.header.direct_sender_node_id()
|
||||
);
|
||||
|
||||
log_rpc!(debug "{}", debug_string_answer);
|
||||
log_dht!(debug "{}", debug_string_answer);
|
||||
}
|
||||
|
||||
// Make GetValue answer
|
||||
let get_value_a = RPCOperationGetValueA::new(
|
||||
subkey_result_value.map(|x| (*x).clone()),
|
||||
get_result_value.map(|x| (*x).clone()),
|
||||
closer_to_key_peers,
|
||||
subkey_result_descriptor.map(|x| (*x).clone()),
|
||||
get_result_descriptor.map(|x| (*x).clone()),
|
||||
)?;
|
||||
|
||||
// Send GetValue answer
|
||||
|
269
veilid-core/src/rpc_processor/rpc_inspect_value.rs
Normal file
269
veilid-core/src/rpc_processor/rpc_inspect_value.rs
Normal file
@ -0,0 +1,269 @@
|
||||
use super::*;
|
||||
use crate::storage_manager::SignedValueDescriptor;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct InspectValueAnswer {
|
||||
pub seqs: Vec<ValueSeqNum>,
|
||||
pub peers: Vec<PeerInfo>,
|
||||
pub descriptor: Option<SignedValueDescriptor>,
|
||||
}
|
||||
|
||||
impl RPCProcessor {
|
||||
/// Sends an inspect value request and wait for response
|
||||
/// Can be sent via all methods including relays
|
||||
/// Safety routes may be used, but never private routes.
|
||||
/// Because this leaks information about the identity of the node itself,
|
||||
/// replying to this request received over a private route will leak
|
||||
/// the identity of the node and defeat the private route.
|
||||
/// The number of subkey sequence numbers returned may either be:
|
||||
/// * the amount requested
|
||||
/// * an amount truncated to MAX_INSPECT_VALUE_A_SEQS_LEN subkeys
|
||||
/// * zero if nothing was found
|
||||
#[cfg_attr(
|
||||
feature = "verbose-tracing",
|
||||
instrument(level = "trace", skip(self, last_descriptor),
|
||||
fields(ret.value.data.len,
|
||||
ret.seqs,
|
||||
ret.peers.len,
|
||||
ret.latency
|
||||
),err)
|
||||
)]
|
||||
pub async fn rpc_call_inspect_value(
|
||||
self,
|
||||
dest: Destination,
|
||||
key: TypedKey,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
last_descriptor: Option<SignedValueDescriptor>,
|
||||
) -> RPCNetworkResult<Answer<InspectValueAnswer>> {
|
||||
// Ensure destination never has a private route
|
||||
// and get the target noderef so we can validate the response
|
||||
let Some(target) = dest.node() else {
|
||||
return Err(RPCError::internal(
|
||||
"Never send get value requests over private routes",
|
||||
));
|
||||
};
|
||||
|
||||
// Get the target node id
|
||||
let Some(vcrypto) = self.crypto.get(key.kind) else {
|
||||
return Err(RPCError::internal("unsupported cryptosystem"));
|
||||
};
|
||||
let Some(target_node_id) = target.node_ids().get(key.kind) else {
|
||||
return Err(RPCError::internal("No node id for crypto kind"));
|
||||
};
|
||||
|
||||
let debug_string = format!(
|
||||
"OUT ==> InspectValueQ({} #{}{}) => {}",
|
||||
key,
|
||||
&subkeys,
|
||||
if last_descriptor.is_some() {
|
||||
" +lastdesc"
|
||||
} else {
|
||||
""
|
||||
},
|
||||
dest
|
||||
);
|
||||
|
||||
// Send the inspectvalue question
|
||||
let inspect_value_q = RPCOperationInspectValueQ::new(key, subkeys.clone(), last_descriptor.is_none())?;
|
||||
let question = RPCQuestion::new(
|
||||
network_result_try!(self.get_destination_respond_to(&dest)?),
|
||||
RPCQuestionDetail::InspectValueQ(Box::new(inspect_value_q)),
|
||||
);
|
||||
|
||||
let question_context = QuestionContext::InspectValue(ValidateInspectValueContext {
|
||||
last_descriptor,
|
||||
subkeys,
|
||||
vcrypto: vcrypto.clone(),
|
||||
});
|
||||
|
||||
log_dht!(debug "{}", debug_string);
|
||||
|
||||
let waitable_reply = network_result_try!(
|
||||
self.question(dest.clone(), question, Some(question_context))
|
||||
.await?
|
||||
);
|
||||
|
||||
// Keep the reply private route that was used to return with the answer
|
||||
let reply_private_route = waitable_reply.reply_private_route;
|
||||
|
||||
// Wait for reply
|
||||
let (msg, latency) = match self.wait_for_reply(waitable_reply, debug_string).await? {
|
||||
TimeoutOr::Timeout => return Ok(NetworkResult::Timeout),
|
||||
TimeoutOr::Value(v) => v,
|
||||
};
|
||||
|
||||
// Get the right answer type
|
||||
let (_, _, _, kind) = msg.operation.destructure();
|
||||
let inspect_value_a = match kind {
|
||||
RPCOperationKind::Answer(a) => match a.destructure() {
|
||||
RPCAnswerDetail::InspectValueA(a) => a,
|
||||
_ => return Ok(NetworkResult::invalid_message("not an inspectvalue answer")),
|
||||
},
|
||||
_ => return Ok(NetworkResult::invalid_message("not an answer")),
|
||||
};
|
||||
|
||||
let (seqs, peers, descriptor) = inspect_value_a.destructure();
|
||||
if debug_target_enabled!("dht") {
|
||||
let debug_string_answer = format!(
|
||||
"OUT <== InspectValueA({} {} peers={}) <= {} seqs:\n{}",
|
||||
key,
|
||||
if descriptor.is_some() {
|
||||
" +desc"
|
||||
} else {
|
||||
""
|
||||
},
|
||||
peers.len(),
|
||||
dest,
|
||||
debug_seqs(&seqs)
|
||||
);
|
||||
|
||||
log_dht!(debug "{}", debug_string_answer);
|
||||
|
||||
let peer_ids:Vec<String> = peers.iter().filter_map(|p| p.node_ids().get(key.kind).map(|k| k.to_string())).collect();
|
||||
log_dht!(debug "Peers: {:#?}", peer_ids);
|
||||
}
|
||||
|
||||
// Validate peers returned are, in fact, closer to the key than the node we sent this to
|
||||
let valid = match RoutingTable::verify_peers_closer(vcrypto, target_node_id, key, &peers) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
return Ok(NetworkResult::invalid_message(format!(
|
||||
"missing cryptosystem in peers node ids: {}",
|
||||
e
|
||||
)));
|
||||
}
|
||||
};
|
||||
if !valid {
|
||||
return Ok(NetworkResult::invalid_message("non-closer peers returned"));
|
||||
}
|
||||
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
tracing::Span::current().record("ret.latency", latency.as_u64());
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
tracing::Span::current().record("ret.seqs", seqs);
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
tracing::Span::current().record("ret.peers.len", peers.len());
|
||||
|
||||
Ok(NetworkResult::value(Answer::new(
|
||||
latency,
|
||||
reply_private_route,
|
||||
InspectValueAnswer {
|
||||
seqs,
|
||||
peers,
|
||||
descriptor,
|
||||
},
|
||||
)))
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[cfg_attr(feature="verbose-tracing", instrument(level = "trace", skip(self, msg), fields(msg.operation.op_id), ret, err))]
|
||||
pub(crate) async fn process_inspect_value_q(
|
||||
&self,
|
||||
msg: RPCMessage,
|
||||
) -> RPCNetworkResult<()> {
|
||||
|
||||
// Ensure this never came over a private route, safety route is okay though
|
||||
match &msg.header.detail {
|
||||
RPCMessageHeaderDetail::Direct(_) | RPCMessageHeaderDetail::SafetyRouted(_) => {}
|
||||
RPCMessageHeaderDetail::PrivateRouted(_) => {
|
||||
return Ok(NetworkResult::invalid_message(
|
||||
"not processing inspect value request over private route",
|
||||
))
|
||||
}
|
||||
}
|
||||
// Ignore if disabled
|
||||
let routing_table = self.routing_table();
|
||||
let opi = routing_table.get_own_peer_info(msg.header.routing_domain());
|
||||
if !opi
|
||||
.signed_node_info()
|
||||
.node_info()
|
||||
.has_capability(CAP_DHT)
|
||||
{
|
||||
return Ok(NetworkResult::service_unavailable(
|
||||
"dht is not available",
|
||||
));
|
||||
}
|
||||
|
||||
// Get the question
|
||||
let kind = msg.operation.kind().clone();
|
||||
let inspect_value_q = match kind {
|
||||
RPCOperationKind::Question(q) => match q.destructure() {
|
||||
(_, RPCQuestionDetail::InspectValueQ(q)) => q,
|
||||
_ => panic!("not a inspectvalue question"),
|
||||
},
|
||||
_ => panic!("not a question"),
|
||||
};
|
||||
|
||||
// Destructure
|
||||
let (key, subkeys, want_descriptor) = inspect_value_q.destructure();
|
||||
|
||||
// Get the nodes that we know about that are closer to the the key than our own node
|
||||
let routing_table = self.routing_table();
|
||||
let closer_to_key_peers = network_result_try!(routing_table.find_preferred_peers_closer_to_key(key, vec![CAP_DHT, CAP_DHT_WATCH]));
|
||||
|
||||
if debug_target_enabled!("dht") {
|
||||
let debug_string = format!(
|
||||
"IN <=== InspectValueQ({} {}{}) <== {}",
|
||||
key,
|
||||
subkeys,
|
||||
if want_descriptor {
|
||||
" +wantdesc"
|
||||
} else {
|
||||
""
|
||||
},
|
||||
msg.header.direct_sender_node_id()
|
||||
);
|
||||
|
||||
log_dht!(debug "{}", debug_string);
|
||||
}
|
||||
|
||||
// See if we would have accepted this as a set
|
||||
let set_value_count = {
|
||||
let c = self.config.get();
|
||||
c.network.dht.set_value_count as usize
|
||||
};
|
||||
let (inspect_result_seqs, inspect_result_descriptor) = if closer_to_key_peers.len() >= set_value_count {
|
||||
// Not close enough
|
||||
(Vec::new(), None)
|
||||
} else {
|
||||
// Close enough, lets get it
|
||||
|
||||
// See if we have this record ourselves
|
||||
let storage_manager = self.storage_manager();
|
||||
let inspect_result = network_result_try!(storage_manager
|
||||
.inbound_inspect_value(key, subkeys, want_descriptor)
|
||||
.await
|
||||
.map_err(RPCError::internal)?);
|
||||
(inspect_result.seqs, inspect_result.opt_descriptor)
|
||||
};
|
||||
|
||||
if debug_target_enabled!("dht") {
|
||||
let debug_string_answer = format!(
|
||||
"IN ===> InspectValueA({} {:?}{} peers={}) ==> {}",
|
||||
key,
|
||||
inspect_result_seqs,
|
||||
if inspect_result_descriptor.is_some() {
|
||||
" +desc"
|
||||
} else {
|
||||
""
|
||||
},
|
||||
closer_to_key_peers.len(),
|
||||
msg.header.direct_sender_node_id()
|
||||
);
|
||||
|
||||
log_dht!(debug "{}", debug_string_answer);
|
||||
}
|
||||
|
||||
// Make InspectValue answer
|
||||
let inspect_value_a = RPCOperationInspectValueA::new(
|
||||
inspect_result_seqs,
|
||||
closer_to_key_peers,
|
||||
inspect_result_descriptor.map(|x| (*x).clone()),
|
||||
)?;
|
||||
|
||||
// Send InspectValue answer
|
||||
self.answer(msg, RPCAnswer::new(RPCAnswerDetail::InspectValueA(Box::new(inspect_value_a))))
|
||||
.await
|
||||
}
|
||||
}
|
@ -88,8 +88,9 @@ impl RPCProcessor {
|
||||
vcrypto: vcrypto.clone(),
|
||||
});
|
||||
|
||||
#[cfg(feature="debug-dht")]
|
||||
log_rpc!(debug "{}", debug_string);
|
||||
if debug_target_enabled!("dht") {
|
||||
log_dht!(debug "{}", debug_string);
|
||||
}
|
||||
|
||||
let waitable_reply = network_result_try!(
|
||||
self.question(dest.clone(), question, Some(question_context))
|
||||
@ -117,8 +118,7 @@ impl RPCProcessor {
|
||||
|
||||
let (set, value, peers) = set_value_a.destructure();
|
||||
|
||||
#[cfg(feature="debug-dht")]
|
||||
{
|
||||
if debug_target_enabled!("dht") {
|
||||
let debug_string_value = value.as_ref().map(|v| {
|
||||
format!(" len={} writer={}",
|
||||
v.value_data().data().len(),
|
||||
@ -141,10 +141,10 @@ impl RPCProcessor {
|
||||
dest,
|
||||
);
|
||||
|
||||
log_rpc!(debug "{}", debug_string_answer);
|
||||
log_dht!(debug "{}", debug_string_answer);
|
||||
|
||||
let peer_ids:Vec<String> = peers.iter().filter_map(|p| p.node_ids().get(key.kind).map(|k| k.to_string())).collect();
|
||||
log_rpc!(debug "Peers: {:#?}", peer_ids);
|
||||
log_dht!(debug "Peers: {:#?}", peer_ids);
|
||||
}
|
||||
|
||||
// Validate peers returned are, in fact, closer to the key than the node we sent this to
|
||||
@ -181,6 +181,8 @@ impl RPCProcessor {
|
||||
)))
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[cfg_attr(feature="verbose-tracing", instrument(level = "trace", skip(self, msg), fields(msg.operation.op_id), ret, err))]
|
||||
pub(crate) async fn process_set_value_q(
|
||||
&self,
|
||||
@ -270,8 +272,7 @@ impl RPCProcessor {
|
||||
(true, new_value)
|
||||
};
|
||||
|
||||
#[cfg(feature="debug-dht")]
|
||||
{
|
||||
if debug_target_enabled!("dht") {
|
||||
let debug_string_value = new_value.as_ref().map(|v| {
|
||||
format!(" len={} seq={} writer={}",
|
||||
v.value_data().data().len(),
|
||||
@ -294,7 +295,7 @@ impl RPCProcessor {
|
||||
msg.header.direct_sender_node_id()
|
||||
);
|
||||
|
||||
log_rpc!(debug "{}", debug_string_answer);
|
||||
log_dht!(debug "{}", debug_string_answer);
|
||||
}
|
||||
|
||||
// Make SetValue answer
|
||||
|
@ -32,6 +32,8 @@ impl RPCProcessor {
|
||||
self.statement(dest, statement).await
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[cfg_attr(feature="verbose-tracing", instrument(level = "trace", skip(self, msg), fields(msg.operation.op_id), ret, err))]
|
||||
pub(crate) async fn process_signal(&self, msg: RPCMessage) -> RPCNetworkResult<()> {
|
||||
// Ignore if disabled
|
||||
|
@ -200,6 +200,8 @@ impl RPCProcessor {
|
||||
)))
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[cfg_attr(feature="verbose-tracing", instrument(level = "trace", skip(self, msg), fields(msg.operation.op_id), ret, err))]
|
||||
pub(crate) async fn process_status_q(&self, msg: RPCMessage) -> RPCNetworkResult<()> {
|
||||
// Get the question
|
||||
|
@ -56,6 +56,8 @@ impl RPCProcessor {
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[cfg_attr(feature="verbose-tracing", instrument(level = "trace", skip(self, msg), fields(msg.operation.op_id), ret, err))]
|
||||
pub(crate) async fn process_validate_dial_info(&self, msg: RPCMessage) -> RPCNetworkResult<()> {
|
||||
let routing_table = self.routing_table();
|
||||
|
@ -14,9 +14,16 @@ impl RPCProcessor {
|
||||
key: TypedKey,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
count: u32,
|
||||
watch_id: u64,
|
||||
value: SignedValueData,
|
||||
) -> RPCNetworkResult<()> {
|
||||
let value_changed = RPCOperationValueChanged::new(key, subkeys, count, value)?;
|
||||
// Ensure destination is never using a safety route
|
||||
if matches!(dest.get_safety_selection(), SafetySelection::Safe(_)) {
|
||||
return Err(RPCError::internal(
|
||||
"Never send value changes over safety routes",
|
||||
));
|
||||
}
|
||||
let value_changed = RPCOperationValueChanged::new(key, subkeys, count, watch_id, value)?;
|
||||
let statement =
|
||||
RPCStatement::new(RPCStatementDetail::ValueChanged(Box::new(value_changed)));
|
||||
|
||||
@ -24,10 +31,12 @@ impl RPCProcessor {
|
||||
self.statement(dest, statement).await
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
pub(crate) async fn process_value_changed(&self, msg: RPCMessage) -> RPCNetworkResult<()> {
|
||||
// Get the statement
|
||||
let (_, _, _, kind) = msg.operation.destructure();
|
||||
let (key, subkeys, count, value) = match kind {
|
||||
let (key, subkeys, count, watch_id, value) = match kind {
|
||||
RPCOperationKind::Statement(s) => match s.destructure() {
|
||||
RPCStatementDetail::ValueChanged(s) => s.destructure(),
|
||||
_ => panic!("not a value changed statement"),
|
||||
@ -35,8 +44,25 @@ impl RPCProcessor {
|
||||
_ => panic!("not a statement"),
|
||||
};
|
||||
|
||||
#[cfg(feature = "debug-dht")]
|
||||
{
|
||||
// Get the inbound node if if this came in directly
|
||||
// If this was received over just a safety route, ignore it
|
||||
// It this was received over a private route, the inbound node id could be either the actual
|
||||
// node id, or a safety route (can't tell if a stub was used).
|
||||
// Try it as the node if, and the storage manager will reject the
|
||||
// value change if it doesn't match the active watch's node id
|
||||
let inbound_node_id = match &msg.header.detail {
|
||||
RPCMessageHeaderDetail::Direct(d) => d.envelope.get_sender_typed_id(),
|
||||
RPCMessageHeaderDetail::SafetyRouted(_) => {
|
||||
return Ok(NetworkResult::invalid_message(
|
||||
"not processing value change over safety route",
|
||||
));
|
||||
}
|
||||
RPCMessageHeaderDetail::PrivateRouted(p) => {
|
||||
TypedKey::new(p.direct.envelope.get_crypto_kind(), p.remote_safety_route)
|
||||
}
|
||||
};
|
||||
|
||||
if debug_target_enabled!("dht") {
|
||||
let debug_string_value = format!(
|
||||
" len={} seq={} writer={}",
|
||||
value.value_data().data().len(),
|
||||
@ -45,21 +71,30 @@ impl RPCProcessor {
|
||||
);
|
||||
|
||||
let debug_string_stmt = format!(
|
||||
"IN <== ValueChanged({} #{:?}+{}{}) <= {}",
|
||||
"IN <== ValueChanged(id={} {} #{:?}+{}{}) from {} <= {}",
|
||||
watch_id,
|
||||
key,
|
||||
subkeys,
|
||||
count,
|
||||
debug_string_value,
|
||||
msg.header.direct_sender_node_id()
|
||||
inbound_node_id,
|
||||
msg.header.direct_sender_node_id(),
|
||||
);
|
||||
|
||||
log_rpc!(debug "{}", debug_string_stmt);
|
||||
log_dht!(debug "{}", debug_string_stmt);
|
||||
}
|
||||
|
||||
// Save the subkey, creating a new record if necessary
|
||||
let storage_manager = self.storage_manager();
|
||||
storage_manager
|
||||
.inbound_value_changed(key, subkeys, count, Arc::new(value))
|
||||
.inbound_value_changed(
|
||||
key,
|
||||
subkeys,
|
||||
count,
|
||||
Arc::new(value),
|
||||
inbound_node_id,
|
||||
watch_id,
|
||||
)
|
||||
.await
|
||||
.map_err(RPCError::internal)?;
|
||||
|
||||
|
@ -2,8 +2,10 @@ use super::*;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct WatchValueAnswer {
|
||||
pub accepted: bool,
|
||||
pub expiration_ts: Timestamp,
|
||||
pub peers: Vec<PeerInfo>,
|
||||
pub watch_id: u64,
|
||||
}
|
||||
|
||||
impl RPCProcessor {
|
||||
@ -22,6 +24,7 @@ impl RPCProcessor {
|
||||
ret.peers.len
|
||||
),err)
|
||||
)]
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub async fn rpc_call_watch_value(
|
||||
self,
|
||||
dest: Destination,
|
||||
@ -30,6 +33,7 @@ impl RPCProcessor {
|
||||
expiration: Timestamp,
|
||||
count: u32,
|
||||
watcher: KeyPair,
|
||||
watch_id: Option<u64>,
|
||||
) -> RPCNetworkResult<Answer<WatchValueAnswer>> {
|
||||
// Ensure destination never has a private route
|
||||
// and get the target noderef so we can validate the response
|
||||
@ -48,8 +52,18 @@ impl RPCProcessor {
|
||||
};
|
||||
|
||||
let debug_string = format!(
|
||||
"OUT ==> WatchValueQ({} {}@{}+{}) => {} (watcher={})",
|
||||
key, subkeys, expiration, count, dest, watcher.key
|
||||
"OUT ==> WatchValueQ({} {} {}@{}+{}) => {} (watcher={}) ",
|
||||
if let Some(watch_id) = watch_id {
|
||||
format!("id={} ", watch_id)
|
||||
} else {
|
||||
"".to_owned()
|
||||
},
|
||||
key,
|
||||
subkeys,
|
||||
expiration,
|
||||
count,
|
||||
dest,
|
||||
watcher.key
|
||||
);
|
||||
|
||||
// Send the watchvalue question
|
||||
@ -58,6 +72,7 @@ impl RPCProcessor {
|
||||
subkeys.clone(),
|
||||
expiration.as_u64(),
|
||||
count,
|
||||
watch_id,
|
||||
watcher,
|
||||
vcrypto.clone(),
|
||||
)?;
|
||||
@ -66,8 +81,7 @@ impl RPCProcessor {
|
||||
RPCQuestionDetail::WatchValueQ(Box::new(watch_value_q)),
|
||||
);
|
||||
|
||||
#[cfg(feature = "debug-dht")]
|
||||
log_rpc!(debug "{}", debug_string);
|
||||
log_dht!(debug "{}", debug_string);
|
||||
|
||||
let waitable_reply =
|
||||
network_result_try!(self.question(dest.clone(), question, None).await?);
|
||||
@ -90,12 +104,13 @@ impl RPCProcessor {
|
||||
},
|
||||
_ => return Ok(NetworkResult::invalid_message("not an answer")),
|
||||
};
|
||||
|
||||
let (expiration, peers) = watch_value_a.destructure();
|
||||
#[cfg(feature = "debug-dht")]
|
||||
{
|
||||
let question_watch_id = watch_id;
|
||||
let (accepted, expiration, peers, watch_id) = watch_value_a.destructure();
|
||||
if debug_target_enabled!("dht") {
|
||||
let debug_string_answer = format!(
|
||||
"OUT <== WatchValueA({} #{:?}@{} peers={}) <= {}",
|
||||
"OUT <== WatchValueA({}id={} {} #{:?}@{} peers={}) <= {}",
|
||||
if accepted { "+accept " } else { "" },
|
||||
watch_id,
|
||||
key,
|
||||
subkeys,
|
||||
expiration,
|
||||
@ -103,13 +118,32 @@ impl RPCProcessor {
|
||||
dest
|
||||
);
|
||||
|
||||
log_rpc!(debug "{}", debug_string_answer);
|
||||
log_dht!(debug "{}", debug_string_answer);
|
||||
|
||||
let peer_ids: Vec<String> = peers
|
||||
.iter()
|
||||
.filter_map(|p| p.node_ids().get(key.kind).map(|k| k.to_string()))
|
||||
.collect();
|
||||
log_rpc!(debug "Peers: {:#?}", peer_ids);
|
||||
log_dht!(debug "Peers: {:#?}", peer_ids);
|
||||
}
|
||||
|
||||
// Validate accepted requests
|
||||
if accepted {
|
||||
// Verify returned answer watch id is the same as the question watch id if it exists
|
||||
if let Some(question_watch_id) = question_watch_id {
|
||||
if question_watch_id != watch_id {
|
||||
return Ok(NetworkResult::invalid_message(format!(
|
||||
"answer watch id={} doesn't match question watch id={}",
|
||||
watch_id, question_watch_id,
|
||||
)));
|
||||
}
|
||||
}
|
||||
// Validate if a watch is created/updated, that it has a nonzero id
|
||||
if expiration != 0 && watch_id == 0 {
|
||||
return Ok(NetworkResult::invalid_message(
|
||||
"zero watch id returned on accepted or cancelled watch",
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
// Validate peers returned are, in fact, closer to the key than the node we sent this to
|
||||
@ -137,12 +171,16 @@ impl RPCProcessor {
|
||||
latency,
|
||||
reply_private_route,
|
||||
WatchValueAnswer {
|
||||
accepted,
|
||||
expiration_ts: Timestamp::new(expiration),
|
||||
peers,
|
||||
watch_id,
|
||||
},
|
||||
)))
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[cfg_attr(feature="verbose-tracing", instrument(level = "trace", skip(self, msg), fields(msg.operation.op_id), ret, err))]
|
||||
pub(crate) async fn process_watch_value_q(&self, msg: RPCMessage) -> RPCNetworkResult<()> {
|
||||
let routing_table = self.routing_table();
|
||||
@ -185,16 +223,21 @@ impl RPCProcessor {
|
||||
};
|
||||
|
||||
// Destructure
|
||||
let (key, subkeys, expiration, count, watcher, _signature) = watch_value_q.destructure();
|
||||
let (key, subkeys, expiration, count, watch_id, watcher, _signature) =
|
||||
watch_value_q.destructure();
|
||||
|
||||
// Get target for ValueChanged notifications
|
||||
let dest = network_result_try!(self.get_respond_to_destination(&msg));
|
||||
let target = dest.get_target(rss)?;
|
||||
|
||||
#[cfg(feature = "debug-dht")]
|
||||
{
|
||||
if debug_target_enabled!("dht") {
|
||||
let debug_string = format!(
|
||||
"IN <=== WatchValueQ({} {}@{}+{}) <== {} (watcher={})",
|
||||
"IN <=== WatchValueQ({}{} {}@{}+{}) <== {} (watcher={})",
|
||||
if let Some(watch_id) = watch_id {
|
||||
format!("id={} ", watch_id)
|
||||
} else {
|
||||
"".to_owned()
|
||||
},
|
||||
key,
|
||||
subkeys,
|
||||
expiration,
|
||||
@ -203,7 +246,7 @@ impl RPCProcessor {
|
||||
watcher
|
||||
);
|
||||
|
||||
log_rpc!(debug "{}", debug_string);
|
||||
log_dht!(debug "{}", debug_string);
|
||||
}
|
||||
|
||||
// Get the nodes that we know about that are closer to the the key than our own node
|
||||
@ -211,40 +254,53 @@ impl RPCProcessor {
|
||||
routing_table.find_preferred_peers_closer_to_key(key, vec![CAP_DHT, CAP_DHT_WATCH])
|
||||
);
|
||||
|
||||
// See if we would have accepted this as a set
|
||||
// See if we would have accepted this as a set, same set_value_count for watches
|
||||
let set_value_count = {
|
||||
let c = self.config.get();
|
||||
c.network.dht.set_value_count as usize
|
||||
};
|
||||
let ret_expiration = if closer_to_key_peers.len() >= set_value_count {
|
||||
// Not close enough
|
||||
let (ret_accepted, ret_expiration, ret_watch_id) =
|
||||
if closer_to_key_peers.len() >= set_value_count {
|
||||
// Not close enough, not accepted
|
||||
log_dht!(debug "Not close enough for watch value");
|
||||
|
||||
#[cfg(feature = "debug-dht")]
|
||||
log_rpc!(debug "Not close enough for watch value");
|
||||
(false, 0, watch_id.unwrap_or_default())
|
||||
} else {
|
||||
// Accepted, lets try to watch or cancel it
|
||||
|
||||
Timestamp::default()
|
||||
} else {
|
||||
// Close enough, lets watch it
|
||||
|
||||
// See if we have this record ourselves, if so, accept the watch
|
||||
let storage_manager = self.storage_manager();
|
||||
network_result_try!(storage_manager
|
||||
.inbound_watch_value(
|
||||
key,
|
||||
subkeys.clone(),
|
||||
Timestamp::new(expiration),
|
||||
let params = WatchParameters {
|
||||
subkeys: subkeys.clone(),
|
||||
expiration: Timestamp::new(expiration),
|
||||
count,
|
||||
watcher,
|
||||
target,
|
||||
watcher
|
||||
)
|
||||
.await
|
||||
.map_err(RPCError::internal)?)
|
||||
};
|
||||
};
|
||||
|
||||
#[cfg(feature = "debug-dht")]
|
||||
{
|
||||
// See if we have this record ourselves, if so, accept the watch
|
||||
let storage_manager = self.storage_manager();
|
||||
let watch_result = network_result_try!(storage_manager
|
||||
.inbound_watch_value(key, params, watch_id,)
|
||||
.await
|
||||
.map_err(RPCError::internal)?);
|
||||
|
||||
// Encode the watch result
|
||||
// Rejections and cancellations are treated the same way by clients
|
||||
let (ret_expiration, ret_watch_id) = match watch_result {
|
||||
WatchResult::Created { id, expiration } => (expiration.as_u64(), id),
|
||||
WatchResult::Changed { expiration } => {
|
||||
(expiration.as_u64(), watch_id.unwrap_or_default())
|
||||
}
|
||||
WatchResult::Cancelled => (0, watch_id.unwrap_or_default()),
|
||||
WatchResult::Rejected => (0, watch_id.unwrap_or_default()),
|
||||
};
|
||||
(true, ret_expiration, ret_watch_id)
|
||||
};
|
||||
|
||||
if debug_target_enabled!("dht") {
|
||||
let debug_string_answer = format!(
|
||||
"IN ===> WatchValueA({} #{} expiration={} peers={}) ==> {}",
|
||||
"IN ===> WatchValueA({}id={} {} #{} expiration={} peers={}) ==> {}",
|
||||
if ret_accepted { "+accept " } else { "" },
|
||||
ret_watch_id,
|
||||
key,
|
||||
subkeys,
|
||||
ret_expiration,
|
||||
@ -252,17 +308,15 @@ impl RPCProcessor {
|
||||
msg.header.direct_sender_node_id()
|
||||
);
|
||||
|
||||
log_rpc!(debug "{}", debug_string_answer);
|
||||
log_dht!(debug "{}", debug_string_answer);
|
||||
}
|
||||
|
||||
// Make WatchValue answer
|
||||
let watch_value_a = RPCOperationWatchValueA::new(
|
||||
ret_expiration.as_u64(),
|
||||
if ret_expiration.as_u64() == 0 {
|
||||
closer_to_key_peers
|
||||
} else {
|
||||
vec![]
|
||||
},
|
||||
ret_accepted,
|
||||
ret_expiration,
|
||||
closer_to_key_peers,
|
||||
ret_watch_id,
|
||||
)?;
|
||||
|
||||
// Send GetValue answer
|
||||
|
@ -15,6 +15,20 @@ impl StorageManager {
|
||||
};
|
||||
remote_record_store.debug_records()
|
||||
}
|
||||
pub(crate) async fn debug_opened_records(&self) -> String {
|
||||
let inner = self.inner.lock().await;
|
||||
let mut out = "[\n".to_owned();
|
||||
for (k, v) in &inner.opened_records {
|
||||
let writer = if let Some(w) = v.writer() {
|
||||
w.to_string()
|
||||
} else {
|
||||
"".to_owned()
|
||||
};
|
||||
out += &format!(" {} {},\n", k, writer);
|
||||
}
|
||||
format!("{}]\n", out)
|
||||
}
|
||||
|
||||
pub(crate) async fn purge_local_records(&self, reclaim: Option<usize>) -> String {
|
||||
let mut inner = self.inner.lock().await;
|
||||
let Some(local_record_store) = &mut inner.local_record_store else {
|
||||
@ -66,8 +80,17 @@ impl StorageManager {
|
||||
let Some(local_record_store) = &inner.local_record_store else {
|
||||
return "not initialized".to_owned();
|
||||
};
|
||||
local_record_store.debug_record_info(key)
|
||||
let local_debug = local_record_store.debug_record_info(key);
|
||||
|
||||
let opened_debug = if let Some(o) = inner.opened_records.get(&key) {
|
||||
format!("Opened Record: {:#?}\n", o)
|
||||
} else {
|
||||
"".to_owned()
|
||||
};
|
||||
|
||||
format!("{}\n{}", local_debug, opened_debug)
|
||||
}
|
||||
|
||||
pub(crate) async fn debug_remote_record_info(&self, key: TypedKey) -> String {
|
||||
let inner = self.inner.lock().await;
|
||||
let Some(remote_record_store) = &inner.remote_record_store else {
|
||||
|
@ -14,10 +14,10 @@ struct OutboundGetValueContext {
|
||||
|
||||
/// The result of the outbound_get_value operation
|
||||
pub(super) struct OutboundGetValueResult {
|
||||
/// Fanout result
|
||||
pub fanout_result: FanoutResult,
|
||||
/// The subkey that was retrieved
|
||||
pub subkey_result: SubkeyResult,
|
||||
/// And where it was retrieved from
|
||||
pub value_nodes: Vec<NodeRef>,
|
||||
pub get_result: GetResult,
|
||||
}
|
||||
|
||||
impl StorageManager {
|
||||
@ -28,7 +28,7 @@ impl StorageManager {
|
||||
key: TypedKey,
|
||||
subkey: ValueSubkey,
|
||||
safety_selection: SafetySelection,
|
||||
last_subkey_result: SubkeyResult,
|
||||
last_get_result: GetResult,
|
||||
) -> VeilidAPIResult<OutboundGetValueResult> {
|
||||
let routing_table = rpc_processor.routing_table();
|
||||
|
||||
@ -44,15 +44,15 @@ impl StorageManager {
|
||||
};
|
||||
|
||||
// Make do-get-value answer context
|
||||
let schema = if let Some(d) = &last_subkey_result.descriptor {
|
||||
let schema = if let Some(d) = &last_get_result.opt_descriptor {
|
||||
Some(d.schema()?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let context = Arc::new(Mutex::new(OutboundGetValueContext {
|
||||
value: last_subkey_result.value,
|
||||
value: last_get_result.opt_value,
|
||||
value_nodes: vec![],
|
||||
descriptor: last_subkey_result.descriptor.clone(),
|
||||
descriptor: last_get_result.opt_descriptor.clone(),
|
||||
schema,
|
||||
}));
|
||||
|
||||
@ -60,7 +60,7 @@ impl StorageManager {
|
||||
let call_routine = |next_node: NodeRef| {
|
||||
let rpc_processor = rpc_processor.clone();
|
||||
let context = context.clone();
|
||||
let last_descriptor = last_subkey_result.descriptor.clone();
|
||||
let last_descriptor = last_get_result.opt_descriptor.clone();
|
||||
async move {
|
||||
let gva = network_result_try!(
|
||||
rpc_processor
|
||||
@ -79,14 +79,20 @@ impl StorageManager {
|
||||
if let Some(descriptor) = gva.answer.descriptor {
|
||||
let mut ctx = context.lock();
|
||||
if ctx.descriptor.is_none() && ctx.schema.is_none() {
|
||||
ctx.schema = Some(descriptor.schema().map_err(RPCError::invalid_format)?);
|
||||
let schema = match descriptor.schema() {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
return Ok(NetworkResult::invalid_message(e));
|
||||
}
|
||||
};
|
||||
ctx.schema = Some(schema);
|
||||
ctx.descriptor = Some(Arc::new(descriptor));
|
||||
}
|
||||
}
|
||||
|
||||
// Keep the value if we got one and it is newer and it passes schema validation
|
||||
if let Some(value) = gva.answer.value {
|
||||
log_stor!(debug "Got value back: len={} seq={}", value.value_data().data().len(), value.value_data().seq());
|
||||
log_dht!(debug "Got value back: len={} seq={}", value.value_data().data().len(), value.value_data().seq());
|
||||
let mut ctx = context.lock();
|
||||
|
||||
// Ensure we have a schema and descriptor
|
||||
@ -142,8 +148,7 @@ impl StorageManager {
|
||||
}
|
||||
|
||||
// Return peers if we have some
|
||||
#[cfg(feature = "network-result-extra")]
|
||||
log_stor!(debug "GetValue fanout call returned peers {}", gva.answer.peers.len());
|
||||
log_network_result!(debug "GetValue fanout call returned peers {}", gva.answer.peers.len());
|
||||
|
||||
Ok(NetworkResult::value(gva.answer.peers))
|
||||
}
|
||||
@ -174,65 +179,35 @@ impl StorageManager {
|
||||
check_done,
|
||||
);
|
||||
|
||||
match fanout_call.run(vec![]).await {
|
||||
let kind = match fanout_call.run(vec![]).await {
|
||||
// If we don't finish in the timeout (too much time passed checking for consensus)
|
||||
TimeoutOr::Timeout => {
|
||||
// Return the best answer we've got
|
||||
let ctx = context.lock();
|
||||
if ctx.value_nodes.len() >= consensus_count {
|
||||
log_stor!(debug "GetValue Fanout Timeout Consensus");
|
||||
} else {
|
||||
log_stor!(debug "GetValue Fanout Timeout Non-Consensus: {}", ctx.value_nodes.len());
|
||||
}
|
||||
Ok(OutboundGetValueResult {
|
||||
subkey_result: SubkeyResult {
|
||||
value: ctx.value.clone(),
|
||||
descriptor: ctx.descriptor.clone(),
|
||||
},
|
||||
value_nodes: ctx.value_nodes.clone(),
|
||||
})
|
||||
}
|
||||
// If we finished with consensus (enough nodes returning the same value)
|
||||
TimeoutOr::Value(Ok(Some(()))) => {
|
||||
// Return the best answer we've got
|
||||
let ctx = context.lock();
|
||||
if ctx.value_nodes.len() >= consensus_count {
|
||||
log_stor!(debug "GetValue Fanout Consensus");
|
||||
} else {
|
||||
log_stor!(debug "GetValue Fanout Non-Consensus: {}", ctx.value_nodes.len());
|
||||
}
|
||||
Ok(OutboundGetValueResult {
|
||||
subkey_result: SubkeyResult {
|
||||
value: ctx.value.clone(),
|
||||
descriptor: ctx.descriptor.clone(),
|
||||
},
|
||||
value_nodes: ctx.value_nodes.clone(),
|
||||
})
|
||||
}
|
||||
// If we finished without consensus (ran out of nodes before getting consensus)
|
||||
TimeoutOr::Value(Ok(None)) => {
|
||||
// Return the best answer we've got
|
||||
let ctx = context.lock();
|
||||
if ctx.value_nodes.len() >= consensus_count {
|
||||
log_stor!(debug "GetValue Fanout Exhausted Consensus");
|
||||
} else {
|
||||
log_stor!(debug "GetValue Fanout Exhausted Non-Consensus: {}", ctx.value_nodes.len());
|
||||
}
|
||||
Ok(OutboundGetValueResult {
|
||||
subkey_result: SubkeyResult {
|
||||
value: ctx.value.clone(),
|
||||
descriptor: ctx.descriptor.clone(),
|
||||
},
|
||||
value_nodes: ctx.value_nodes.clone(),
|
||||
})
|
||||
}
|
||||
TimeoutOr::Timeout => FanoutResultKind::Timeout,
|
||||
// If we finished with or without consensus (enough nodes returning the same value)
|
||||
TimeoutOr::Value(Ok(Some(()))) => FanoutResultKind::Finished,
|
||||
// If we ran out of nodes before getting consensus)
|
||||
TimeoutOr::Value(Ok(None)) => FanoutResultKind::Exhausted,
|
||||
// Failed
|
||||
TimeoutOr::Value(Err(e)) => {
|
||||
// If we finished with an error, return that
|
||||
log_stor!(debug "GetValue Fanout Error: {}", e);
|
||||
Err(e.into())
|
||||
log_dht!(debug "GetValue Fanout Error: {}", e);
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let ctx = context.lock();
|
||||
let fanout_result = FanoutResult {
|
||||
kind,
|
||||
value_nodes: ctx.value_nodes.clone(),
|
||||
};
|
||||
log_network_result!(debug "GetValue Fanout: {:?}", fanout_result);
|
||||
|
||||
Ok(OutboundGetValueResult {
|
||||
fanout_result,
|
||||
get_result: GetResult {
|
||||
opt_value: ctx.value.clone(),
|
||||
opt_descriptor: ctx.descriptor.clone(),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
/// Handle a received 'Get Value' query
|
||||
@ -241,28 +216,28 @@ impl StorageManager {
|
||||
key: TypedKey,
|
||||
subkey: ValueSubkey,
|
||||
want_descriptor: bool,
|
||||
) -> VeilidAPIResult<NetworkResult<SubkeyResult>> {
|
||||
) -> VeilidAPIResult<NetworkResult<GetResult>> {
|
||||
let mut inner = self.lock().await?;
|
||||
|
||||
// See if this is a remote or local value
|
||||
let (_is_local, last_subkey_result) = {
|
||||
let (_is_local, last_get_result) = {
|
||||
// See if the subkey we are getting has a last known local value
|
||||
let mut last_subkey_result = inner.handle_get_local_value(key, subkey, true).await?;
|
||||
let mut last_get_result = inner.handle_get_local_value(key, subkey, true).await?;
|
||||
// If this is local, it must have a descriptor already
|
||||
if last_subkey_result.descriptor.is_some() {
|
||||
if last_get_result.opt_descriptor.is_some() {
|
||||
if !want_descriptor {
|
||||
last_subkey_result.descriptor = None;
|
||||
last_get_result.opt_descriptor = None;
|
||||
}
|
||||
(true, last_subkey_result)
|
||||
(true, last_get_result)
|
||||
} else {
|
||||
// See if the subkey we are getting has a last known remote value
|
||||
let last_subkey_result = inner
|
||||
let last_get_result = inner
|
||||
.handle_get_remote_value(key, subkey, want_descriptor)
|
||||
.await?;
|
||||
(false, last_subkey_result)
|
||||
(false, last_get_result)
|
||||
}
|
||||
};
|
||||
|
||||
Ok(NetworkResult::value(last_subkey_result))
|
||||
Ok(NetworkResult::value(last_get_result))
|
||||
}
|
||||
}
|
||||
|
337
veilid-core/src/storage_manager/inspect_value.rs
Normal file
337
veilid-core/src/storage_manager/inspect_value.rs
Normal file
@ -0,0 +1,337 @@
|
||||
use super::*;
|
||||
|
||||
/// The fully parsed descriptor
|
||||
struct DescriptorInfo {
|
||||
/// The descriptor itself
|
||||
descriptor: Arc<SignedValueDescriptor>,
|
||||
|
||||
/// The in-schema subkeys that overlap the inspected range
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
}
|
||||
|
||||
impl DescriptorInfo {
|
||||
pub fn new(
|
||||
descriptor: Arc<SignedValueDescriptor>,
|
||||
subkeys: &ValueSubkeyRangeSet,
|
||||
) -> VeilidAPIResult<Self> {
|
||||
let schema = descriptor.schema().map_err(RPCError::invalid_format)?;
|
||||
let subkeys = schema.truncate_subkeys(subkeys, Some(MAX_INSPECT_VALUE_A_SEQS_LEN));
|
||||
Ok(Self {
|
||||
descriptor,
|
||||
subkeys,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Info tracked per subkey
|
||||
struct SubkeySeqCount {
|
||||
/// The newest sequence number found for a subkey
|
||||
pub seq: ValueSeqNum,
|
||||
/// The nodes that have returned the value so far (up to the consensus count)
|
||||
pub value_nodes: Vec<NodeRef>,
|
||||
}
|
||||
|
||||
/// The context of the outbound_get_value operation
|
||||
struct OutboundInspectValueContext {
|
||||
/// The combined sequence numbers and result counts so far
|
||||
pub seqcounts: Vec<SubkeySeqCount>,
|
||||
/// The descriptor if we got a fresh one or empty if no descriptor was needed
|
||||
pub opt_descriptor_info: Option<DescriptorInfo>,
|
||||
}
|
||||
|
||||
/// The result of the outbound_get_value operation
|
||||
pub(super) struct OutboundInspectValueResult {
|
||||
/// Fanout results for each subkey
|
||||
pub fanout_results: Vec<FanoutResult>,
|
||||
/// The inspection that was retrieved
|
||||
pub inspect_result: InspectResult,
|
||||
}
|
||||
|
||||
impl StorageManager {
|
||||
/// Perform a 'inspect value' query on the network
|
||||
pub(super) async fn outbound_inspect_value(
|
||||
&self,
|
||||
rpc_processor: RPCProcessor,
|
||||
key: TypedKey,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
safety_selection: SafetySelection,
|
||||
local_inspect_result: InspectResult,
|
||||
use_set_scope: bool,
|
||||
) -> VeilidAPIResult<OutboundInspectValueResult> {
|
||||
let routing_table = rpc_processor.routing_table();
|
||||
|
||||
// Get the DHT parameters for 'InspectValue'
|
||||
// Can use either 'get scope' or 'set scope' depending on the purpose of the inspection
|
||||
let (key_count, consensus_count, fanout, timeout_us) = {
|
||||
let c = self.unlocked_inner.config.get();
|
||||
|
||||
if use_set_scope {
|
||||
(
|
||||
c.network.dht.max_find_node_count as usize,
|
||||
c.network.dht.set_value_count as usize,
|
||||
c.network.dht.set_value_fanout as usize,
|
||||
TimestampDuration::from(ms_to_us(c.network.dht.set_value_timeout_ms)),
|
||||
)
|
||||
} else {
|
||||
(
|
||||
c.network.dht.max_find_node_count as usize,
|
||||
c.network.dht.get_value_count as usize,
|
||||
c.network.dht.get_value_fanout as usize,
|
||||
TimestampDuration::from(ms_to_us(c.network.dht.get_value_timeout_ms)),
|
||||
)
|
||||
}
|
||||
};
|
||||
|
||||
// Make do-inspect-value answer context
|
||||
let opt_descriptor_info = if let Some(descriptor) = &local_inspect_result.opt_descriptor {
|
||||
// Get the descriptor info. This also truncates the subkeys list to what can be returned from the network.
|
||||
Some(DescriptorInfo::new(descriptor.clone(), &subkeys)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let context = Arc::new(Mutex::new(OutboundInspectValueContext {
|
||||
seqcounts: local_inspect_result
|
||||
.seqs
|
||||
.iter()
|
||||
.map(|s| SubkeySeqCount {
|
||||
seq: *s,
|
||||
value_nodes: vec![],
|
||||
})
|
||||
.collect(),
|
||||
opt_descriptor_info,
|
||||
}));
|
||||
|
||||
// Routine to call to generate fanout
|
||||
let call_routine = |next_node: NodeRef| {
|
||||
let rpc_processor = rpc_processor.clone();
|
||||
let context = context.clone();
|
||||
let opt_descriptor = local_inspect_result.opt_descriptor.clone();
|
||||
let subkeys = subkeys.clone();
|
||||
async move {
|
||||
let iva = network_result_try!(
|
||||
rpc_processor
|
||||
.clone()
|
||||
.rpc_call_inspect_value(
|
||||
Destination::direct(next_node.clone()).with_safety(safety_selection),
|
||||
key,
|
||||
subkeys.clone(),
|
||||
opt_descriptor.map(|x| (*x).clone()),
|
||||
)
|
||||
.await?
|
||||
);
|
||||
let answer = iva.answer;
|
||||
|
||||
// Keep the descriptor if we got one. If we had a last_descriptor it will
|
||||
// already be validated by rpc_call_inspect_value
|
||||
if let Some(descriptor) = answer.descriptor {
|
||||
let mut ctx = context.lock();
|
||||
if ctx.opt_descriptor_info.is_none() {
|
||||
// Get the descriptor info. This also truncates the subkeys list to what can be returned from the network.
|
||||
let descriptor_info =
|
||||
match DescriptorInfo::new(Arc::new(descriptor.clone()), &subkeys) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
return Ok(NetworkResult::invalid_message(e));
|
||||
}
|
||||
};
|
||||
ctx.opt_descriptor_info = Some(descriptor_info);
|
||||
}
|
||||
}
|
||||
|
||||
// Keep the value if we got one and it is newer and it passes schema validation
|
||||
if !answer.seqs.is_empty() {
|
||||
log_dht!(debug "Got seqs back: len={}", answer.seqs.len());
|
||||
let mut ctx = context.lock();
|
||||
|
||||
// Ensure we have a schema and descriptor etc
|
||||
let Some(descriptor_info) = &ctx.opt_descriptor_info else {
|
||||
// Got a value but no descriptor for it
|
||||
// Move to the next node
|
||||
return Ok(NetworkResult::invalid_message(
|
||||
"Got inspection with no descriptor",
|
||||
));
|
||||
};
|
||||
|
||||
// Get number of subkeys from schema and ensure we are getting the
|
||||
// right number of sequence numbers betwen that and what we asked for
|
||||
#[allow(clippy::unnecessary_cast)]
|
||||
if answer.seqs.len() as u64 != descriptor_info.subkeys.len() as u64 {
|
||||
// Not the right number of sequence numbers
|
||||
// Move to the next node
|
||||
return Ok(NetworkResult::invalid_message(format!(
|
||||
"wrong number of seqs returned {} (wanted {})",
|
||||
answer.seqs.len(),
|
||||
descriptor_info.subkeys.len()
|
||||
)));
|
||||
}
|
||||
|
||||
// If we have a prior seqs list, merge in the new seqs
|
||||
if ctx.seqcounts.is_empty() {
|
||||
ctx.seqcounts = answer
|
||||
.seqs
|
||||
.iter()
|
||||
.map(|s| SubkeySeqCount {
|
||||
seq: *s,
|
||||
// One node has shown us the newest sequence numbers so far
|
||||
value_nodes: if *s == ValueSeqNum::MAX {
|
||||
vec![]
|
||||
} else {
|
||||
vec![next_node.clone()]
|
||||
},
|
||||
})
|
||||
.collect();
|
||||
} else {
|
||||
if ctx.seqcounts.len() != answer.seqs.len() {
|
||||
return Err(RPCError::internal(
|
||||
"seqs list length should always be equal by now",
|
||||
));
|
||||
}
|
||||
for pair in ctx.seqcounts.iter_mut().zip(answer.seqs.iter()) {
|
||||
let ctx_seqcnt = pair.0;
|
||||
let answer_seq = *pair.1;
|
||||
|
||||
// If we already have consensus for this subkey, don't bother updating it any more
|
||||
// While we may find a better sequence number if we keep looking, this does not mimic the behavior
|
||||
// of get and set unless we stop here
|
||||
if ctx_seqcnt.value_nodes.len() >= consensus_count {
|
||||
continue;
|
||||
}
|
||||
|
||||
// If the new seq isn't undefined and is better than the old seq (either greater or old is undefined)
|
||||
// Then take that sequence number and note that we have gotten newer sequence numbers so we keep
|
||||
// looking for consensus
|
||||
// If the sequence number matches the old sequence number, then we keep the value node for reference later
|
||||
if answer_seq != ValueSeqNum::MAX {
|
||||
if ctx_seqcnt.seq == ValueSeqNum::MAX || answer_seq > ctx_seqcnt.seq
|
||||
{
|
||||
// One node has shown us the latest sequence numbers so far
|
||||
ctx_seqcnt.seq = answer_seq;
|
||||
ctx_seqcnt.value_nodes = vec![next_node.clone()];
|
||||
} else if answer_seq == ctx_seqcnt.seq {
|
||||
// Keep the nodes that showed us the latest values
|
||||
ctx_seqcnt.value_nodes.push(next_node.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Return peers if we have some
|
||||
log_network_result!(debug "InspectValue fanout call returned peers {}", answer.peers.len());
|
||||
|
||||
Ok(NetworkResult::value(answer.peers))
|
||||
}
|
||||
};
|
||||
|
||||
// Routine to call to check if we're done at each step
|
||||
let check_done = |_closest_nodes: &[NodeRef]| {
|
||||
// If we have reached sufficient consensus on all subkeys, return done
|
||||
let ctx = context.lock();
|
||||
let mut has_consensus = true;
|
||||
for cs in ctx.seqcounts.iter() {
|
||||
if cs.value_nodes.len() < consensus_count {
|
||||
has_consensus = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if !ctx.seqcounts.is_empty() && ctx.opt_descriptor_info.is_some() && has_consensus {
|
||||
return Some(());
|
||||
}
|
||||
None
|
||||
};
|
||||
|
||||
// Call the fanout
|
||||
let fanout_call = FanoutCall::new(
|
||||
routing_table.clone(),
|
||||
key,
|
||||
key_count,
|
||||
fanout,
|
||||
timeout_us,
|
||||
capability_fanout_node_info_filter(vec![CAP_DHT, CAP_DHT_WATCH]),
|
||||
call_routine,
|
||||
check_done,
|
||||
);
|
||||
|
||||
let kind = match fanout_call.run(vec![]).await {
|
||||
// If we don't finish in the timeout (too much time passed checking for consensus)
|
||||
TimeoutOr::Timeout => FanoutResultKind::Timeout,
|
||||
// If we finished with or without consensus (enough nodes returning the same value)
|
||||
TimeoutOr::Value(Ok(Some(()))) => FanoutResultKind::Finished,
|
||||
// If we ran out of nodes before getting consensus)
|
||||
TimeoutOr::Value(Ok(None)) => FanoutResultKind::Exhausted,
|
||||
// Failed
|
||||
TimeoutOr::Value(Err(e)) => {
|
||||
// If we finished with an error, return that
|
||||
log_dht!(debug "InspectValue Fanout Error: {}", e);
|
||||
return Err(e.into());
|
||||
}
|
||||
};
|
||||
|
||||
let ctx = context.lock();
|
||||
let mut fanout_results = vec![];
|
||||
for cs in &ctx.seqcounts {
|
||||
let has_consensus = cs.value_nodes.len() >= consensus_count;
|
||||
let fanout_result = FanoutResult {
|
||||
kind: if has_consensus {
|
||||
FanoutResultKind::Finished
|
||||
} else {
|
||||
kind
|
||||
},
|
||||
value_nodes: cs.value_nodes.clone(),
|
||||
};
|
||||
fanout_results.push(fanout_result);
|
||||
}
|
||||
|
||||
log_network_result!(debug "InspectValue Fanout ({:?}):\n{}", kind, debug_fanout_results(&fanout_results));
|
||||
|
||||
Ok(OutboundInspectValueResult {
|
||||
fanout_results,
|
||||
inspect_result: InspectResult {
|
||||
subkeys: ctx
|
||||
.opt_descriptor_info
|
||||
.as_ref()
|
||||
.map(|d| d.subkeys.clone())
|
||||
.unwrap_or_default(),
|
||||
seqs: ctx.seqcounts.iter().map(|cs| cs.seq).collect(),
|
||||
opt_descriptor: ctx
|
||||
.opt_descriptor_info
|
||||
.as_ref()
|
||||
.map(|d| d.descriptor.clone()),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
/// Handle a received 'Inspect Value' query
|
||||
pub async fn inbound_inspect_value(
|
||||
&self,
|
||||
key: TypedKey,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
want_descriptor: bool,
|
||||
) -> VeilidAPIResult<NetworkResult<InspectResult>> {
|
||||
let mut inner = self.lock().await?;
|
||||
|
||||
// See if this is a remote or local value
|
||||
let (_is_local, inspect_result) = {
|
||||
// See if the subkey we are getting has a last known local value
|
||||
let mut local_inspect_result = inner
|
||||
.handle_inspect_local_value(key, subkeys.clone(), true)
|
||||
.await?;
|
||||
// If this is local, it must have a descriptor already
|
||||
if local_inspect_result.opt_descriptor.is_some() {
|
||||
if !want_descriptor {
|
||||
local_inspect_result.opt_descriptor = None;
|
||||
}
|
||||
(true, local_inspect_result)
|
||||
} else {
|
||||
// See if the subkey we are getting has a last known remote value
|
||||
let remote_inspect_result = inner
|
||||
.handle_inspect_remote_value(key, subkeys, want_descriptor)
|
||||
.await?;
|
||||
(false, remote_inspect_result)
|
||||
}
|
||||
};
|
||||
|
||||
Ok(NetworkResult::value(inspect_result))
|
||||
}
|
||||
}
|
@ -1,27 +1,22 @@
|
||||
mod debug;
|
||||
mod get_value;
|
||||
mod keys;
|
||||
mod limited_size;
|
||||
mod inspect_value;
|
||||
mod record_store;
|
||||
mod record_store_limits;
|
||||
mod set_value;
|
||||
mod storage_manager_inner;
|
||||
mod tasks;
|
||||
mod types;
|
||||
mod watch_value;
|
||||
|
||||
use keys::*;
|
||||
use limited_size::*;
|
||||
use record_store::*;
|
||||
use record_store_limits::*;
|
||||
use storage_manager_inner::*;
|
||||
|
||||
pub use types::*;
|
||||
|
||||
use super::*;
|
||||
use network_manager::*;
|
||||
use record_store::*;
|
||||
use routing_table::*;
|
||||
use rpc_processor::*;
|
||||
use storage_manager_inner::*;
|
||||
|
||||
pub use record_store::{WatchParameters, WatchResult};
|
||||
pub use types::*;
|
||||
|
||||
/// The maximum size of a single subkey
|
||||
const MAX_SUBKEY_SIZE: usize = ValueData::MAX_LEN;
|
||||
@ -33,8 +28,10 @@ const FLUSH_RECORD_STORES_INTERVAL_SECS: u32 = 1;
|
||||
const OFFLINE_SUBKEY_WRITES_INTERVAL_SECS: u32 = 1;
|
||||
/// Frequency to send ValueChanged notifications to the network
|
||||
const SEND_VALUE_CHANGES_INTERVAL_SECS: u32 = 1;
|
||||
/// Frequence to check for dead nodes and routes for active watches
|
||||
/// Frequency to check for dead nodes and routes for client-side active watches
|
||||
const CHECK_ACTIVE_WATCHES_INTERVAL_SECS: u32 = 1;
|
||||
/// Frequency to check for expired server-side watched records
|
||||
const CHECK_WATCHED_RECORDS_INTERVAL_SECS: u32 = 1;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
/// A single 'value changed' message to send
|
||||
@ -43,6 +40,7 @@ struct ValueChangedInfo {
|
||||
key: TypedKey,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
count: u32,
|
||||
watch_id: u64,
|
||||
value: Arc<SignedValueData>,
|
||||
}
|
||||
|
||||
@ -58,6 +56,7 @@ struct StorageManagerUnlockedInner {
|
||||
offline_subkey_writes_task: TickTask<EyreReport>,
|
||||
send_value_changes_task: TickTask<EyreReport>,
|
||||
check_active_watches_task: TickTask<EyreReport>,
|
||||
check_watched_records_task: TickTask<EyreReport>,
|
||||
|
||||
// Anonymous watch keys
|
||||
anonymous_watch_keys: TypedKeyPairGroup,
|
||||
@ -94,6 +93,7 @@ impl StorageManager {
|
||||
offline_subkey_writes_task: TickTask::new(OFFLINE_SUBKEY_WRITES_INTERVAL_SECS),
|
||||
send_value_changes_task: TickTask::new(SEND_VALUE_CHANGES_INTERVAL_SECS),
|
||||
check_active_watches_task: TickTask::new(CHECK_ACTIVE_WATCHES_INTERVAL_SECS),
|
||||
check_watched_records_task: TickTask::new(CHECK_WATCHED_RECORDS_INTERVAL_SECS),
|
||||
|
||||
anonymous_watch_keys,
|
||||
}
|
||||
@ -127,7 +127,7 @@ impl StorageManager {
|
||||
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
pub async fn init(&self, update_callback: UpdateCallback) -> EyreResult<()> {
|
||||
debug!("startup storage manager");
|
||||
log_stor!(debug "startup storage manager");
|
||||
|
||||
let mut inner = self.inner.lock().await;
|
||||
inner.init(self.clone(), update_callback).await?;
|
||||
@ -137,7 +137,7 @@ impl StorageManager {
|
||||
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub async fn terminate(&self) {
|
||||
debug!("starting storage manager shutdown");
|
||||
log_stor!(debug "starting storage manager shutdown");
|
||||
|
||||
let mut inner = self.inner.lock().await;
|
||||
inner.terminate().await;
|
||||
@ -148,7 +148,7 @@ impl StorageManager {
|
||||
// Release the storage manager
|
||||
*inner = Self::new_inner(self.unlocked_inner.clone());
|
||||
|
||||
debug!("finished storage manager shutdown");
|
||||
log_stor!(debug "finished storage manager shutdown");
|
||||
}
|
||||
|
||||
pub async fn set_rpc_processor(&self, opt_rpc_processor: Option<RPCProcessor>) {
|
||||
@ -169,7 +169,7 @@ impl StorageManager {
|
||||
Ok(inner)
|
||||
}
|
||||
|
||||
fn online_writes_ready_inner(inner: &StorageManagerInner) -> Option<RPCProcessor> {
|
||||
fn online_ready_inner(inner: &StorageManagerInner) -> Option<RPCProcessor> {
|
||||
if let Some(rpc_processor) = { inner.opt_rpc_processor.clone() } {
|
||||
if let Some(network_class) = rpc_processor
|
||||
.routing_table()
|
||||
@ -193,7 +193,7 @@ impl StorageManager {
|
||||
|
||||
async fn online_writes_ready(&self) -> EyreResult<Option<RPCProcessor>> {
|
||||
let inner = self.lock().await?;
|
||||
Ok(Self::online_writes_ready_inner(&inner))
|
||||
Ok(Self::online_ready_inner(&inner))
|
||||
}
|
||||
|
||||
async fn has_offline_subkey_writes(&self) -> EyreResult<bool> {
|
||||
@ -209,6 +209,7 @@ impl StorageManager {
|
||||
safety_selection: SafetySelection,
|
||||
) -> VeilidAPIResult<DHTRecordDescriptor> {
|
||||
let mut inner = self.lock().await?;
|
||||
schema.validate()?;
|
||||
|
||||
// Create a new owned local record from scratch
|
||||
let (key, owner) = inner
|
||||
@ -243,7 +244,7 @@ impl StorageManager {
|
||||
// No record yet, try to get it from the network
|
||||
|
||||
// Get rpc processor and drop mutex so we don't block while getting the value from the network
|
||||
let Some(rpc_processor) = inner.opt_rpc_processor.clone() else {
|
||||
let Some(rpc_processor) = Self::online_ready_inner(&inner) else {
|
||||
apibail_try_again!("offline, try again later");
|
||||
};
|
||||
|
||||
@ -259,12 +260,12 @@ impl StorageManager {
|
||||
key,
|
||||
subkey,
|
||||
safety_selection,
|
||||
SubkeyResult::default(),
|
||||
GetResult::default(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// If we got nothing back, the key wasn't found
|
||||
if result.subkey_result.value.is_none() && result.subkey_result.descriptor.is_none() {
|
||||
if result.get_result.opt_value.is_none() && result.get_result.opt_descriptor.is_none() {
|
||||
// No result
|
||||
apibail_key_not_found!(key);
|
||||
};
|
||||
@ -285,7 +286,7 @@ impl StorageManager {
|
||||
|
||||
// Open the new record
|
||||
inner
|
||||
.open_new_record(key, writer, subkey, result.subkey_result, safety_selection)
|
||||
.open_new_record(key, writer, subkey, result.get_result, safety_selection)
|
||||
.await
|
||||
}
|
||||
|
||||
@ -293,7 +294,7 @@ impl StorageManager {
|
||||
pub async fn close_record(&self, key: TypedKey) -> VeilidAPIResult<()> {
|
||||
let (opt_opened_record, opt_rpc_processor) = {
|
||||
let mut inner = self.lock().await?;
|
||||
(inner.close_record(key)?, inner.opt_rpc_processor.clone())
|
||||
(inner.close_record(key)?, Self::online_ready_inner(&inner))
|
||||
};
|
||||
|
||||
// Send a one-time cancel request for the watch if we have one and we're online
|
||||
@ -311,14 +312,14 @@ impl StorageManager {
|
||||
0,
|
||||
opened_record.safety_selection(),
|
||||
opened_record.writer().cloned(),
|
||||
Some(active_watch.id),
|
||||
Some(active_watch.watch_node),
|
||||
)
|
||||
.await?;
|
||||
if let Some(owvresult) = opt_owvresult {
|
||||
if owvresult.expiration_ts.as_u64() != 0 {
|
||||
log_stor!(debug
|
||||
"close record watch cancel got unexpected expiration: {}",
|
||||
owvresult.expiration_ts
|
||||
"close record watch cancel should have zero expiration"
|
||||
);
|
||||
}
|
||||
} else {
|
||||
@ -364,22 +365,22 @@ impl StorageManager {
|
||||
};
|
||||
|
||||
// See if the requested subkey is our local record store
|
||||
let last_subkey_result = inner.handle_get_local_value(key, subkey, true).await?;
|
||||
let last_get_result = inner.handle_get_local_value(key, subkey, true).await?;
|
||||
|
||||
// Return the existing value if we have one unless we are forcing a refresh
|
||||
if !force_refresh {
|
||||
if let Some(last_subkey_result_value) = last_subkey_result.value {
|
||||
return Ok(Some(last_subkey_result_value.value_data().clone()));
|
||||
if let Some(last_get_result_value) = last_get_result.opt_value {
|
||||
return Ok(Some(last_get_result_value.value_data().clone()));
|
||||
}
|
||||
}
|
||||
|
||||
// Refresh if we can
|
||||
|
||||
// Get rpc processor and drop mutex so we don't block while getting the value from the network
|
||||
let Some(rpc_processor) = inner.opt_rpc_processor.clone() else {
|
||||
let Some(rpc_processor) = Self::online_ready_inner(&inner) else {
|
||||
// Return the existing value if we have one if we aren't online
|
||||
if let Some(last_subkey_result_value) = last_subkey_result.value {
|
||||
return Ok(Some(last_subkey_result_value.value_data().clone()));
|
||||
if let Some(last_get_result_value) = last_get_result.opt_value {
|
||||
return Ok(Some(last_get_result_value.value_data().clone()));
|
||||
}
|
||||
apibail_try_again!("offline, try again later");
|
||||
};
|
||||
@ -389,8 +390,8 @@ impl StorageManager {
|
||||
|
||||
// May have last descriptor / value
|
||||
// Use the safety selection we opened the record with
|
||||
let opt_last_seq = last_subkey_result
|
||||
.value
|
||||
let opt_last_seq = last_get_result
|
||||
.opt_value
|
||||
.as_ref()
|
||||
.map(|v| v.value_data().seq());
|
||||
let result = self
|
||||
@ -399,32 +400,36 @@ impl StorageManager {
|
||||
key,
|
||||
subkey,
|
||||
safety_selection,
|
||||
last_subkey_result,
|
||||
last_get_result,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// See if we got a value back
|
||||
let Some(subkey_result_value) = result.subkey_result.value else {
|
||||
let Some(get_result_value) = result.get_result.opt_value else {
|
||||
// If we got nothing back then we also had nothing beforehand, return nothing
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
// Keep the list of nodes that returned a value for later reference
|
||||
let mut inner = self.lock().await?;
|
||||
inner.set_value_nodes(key, result.value_nodes)?;
|
||||
inner.process_fanout_results(
|
||||
key,
|
||||
core::iter::once((subkey, &result.fanout_result)),
|
||||
false,
|
||||
)?;
|
||||
|
||||
// If we got a new value back then write it to the opened record
|
||||
if Some(subkey_result_value.value_data().seq()) != opt_last_seq {
|
||||
if Some(get_result_value.value_data().seq()) != opt_last_seq {
|
||||
inner
|
||||
.handle_set_local_value(
|
||||
key,
|
||||
subkey,
|
||||
subkey_result_value.clone(),
|
||||
get_result_value.clone(),
|
||||
WatchUpdateMode::UpdateAll,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
Ok(Some(subkey_result_value.value_data().clone()))
|
||||
Ok(Some(get_result_value.value_data().clone()))
|
||||
}
|
||||
|
||||
/// Set the value of a subkey on an opened local record
|
||||
@ -433,6 +438,7 @@ impl StorageManager {
|
||||
key: TypedKey,
|
||||
subkey: ValueSubkey,
|
||||
data: Vec<u8>,
|
||||
writer: Option<KeyPair>,
|
||||
) -> VeilidAPIResult<Option<ValueData>> {
|
||||
let mut inner = self.lock().await?;
|
||||
|
||||
@ -451,22 +457,25 @@ impl StorageManager {
|
||||
)
|
||||
};
|
||||
|
||||
// Use the specified writer, or if not specified, the default writer when the record was opened
|
||||
let opt_writer = writer.or(opt_writer);
|
||||
|
||||
// If we don't have a writer then we can't write
|
||||
let Some(writer) = opt_writer else {
|
||||
apibail_generic!("value is not writable");
|
||||
};
|
||||
|
||||
// See if the subkey we are modifying has a last known local value
|
||||
let last_subkey_result = inner.handle_get_local_value(key, subkey, true).await?;
|
||||
let last_get_result = inner.handle_get_local_value(key, subkey, true).await?;
|
||||
|
||||
// Get the descriptor and schema for the key
|
||||
let Some(descriptor) = last_subkey_result.descriptor else {
|
||||
let Some(descriptor) = last_get_result.opt_descriptor else {
|
||||
apibail_generic!("must have a descriptor");
|
||||
};
|
||||
let schema = descriptor.schema()?;
|
||||
|
||||
// Make new subkey data
|
||||
let value_data = if let Some(last_signed_value_data) = last_subkey_result.value {
|
||||
let value_data = if let Some(last_signed_value_data) = last_get_result.opt_value {
|
||||
if last_signed_value_data.value_data().data() == data
|
||||
&& last_signed_value_data.value_data().writer() == &writer.key
|
||||
{
|
||||
@ -495,20 +504,19 @@ impl StorageManager {
|
||||
writer.secret,
|
||||
)?);
|
||||
|
||||
// Write the value locally first
|
||||
log_stor!(debug "Writing subkey locally: {}:{} len={}", key, subkey, signed_value_data.value_data().data().len() );
|
||||
inner
|
||||
.handle_set_local_value(
|
||||
key,
|
||||
subkey,
|
||||
signed_value_data.clone(),
|
||||
WatchUpdateMode::NoUpdate,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Get rpc processor and drop mutex so we don't block while getting the value from the network
|
||||
let Some(rpc_processor) = Self::online_writes_ready_inner(&inner) else {
|
||||
log_stor!(debug "Writing subkey locally: {}:{} len={}", key, subkey, signed_value_data.value_data().data().len() );
|
||||
|
||||
// Offline, just write it locally and return immediately
|
||||
inner
|
||||
.handle_set_local_value(
|
||||
key,
|
||||
subkey,
|
||||
signed_value_data.clone(),
|
||||
WatchUpdateMode::UpdateAll,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let Some(rpc_processor) = Self::online_ready_inner(&inner) else {
|
||||
log_stor!(debug "Writing subkey offline: {}:{} len={}", key, subkey, signed_value_data.value_data().data().len() );
|
||||
// Add to offline writes to flush
|
||||
inner
|
||||
@ -527,6 +535,8 @@ impl StorageManager {
|
||||
// Drop the lock for network access
|
||||
drop(inner);
|
||||
|
||||
log_stor!(debug "Writing subkey to the network: {}:{} len={}", key, subkey, signed_value_data.value_data().data().len() );
|
||||
|
||||
// Use the safety selection we opened the record with
|
||||
let result = self
|
||||
.outbound_set_value(
|
||||
@ -541,20 +551,24 @@ impl StorageManager {
|
||||
|
||||
// Keep the list of nodes that returned a value for later reference
|
||||
let mut inner = self.lock().await?;
|
||||
inner.set_value_nodes(key, result.value_nodes)?;
|
||||
|
||||
// Whatever record we got back, store it locally, might be newer than the one we asked to save
|
||||
inner
|
||||
.handle_set_local_value(
|
||||
key,
|
||||
subkey,
|
||||
result.signed_value_data.clone(),
|
||||
WatchUpdateMode::UpdateAll,
|
||||
)
|
||||
.await?;
|
||||
inner.process_fanout_results(
|
||||
key,
|
||||
core::iter::once((subkey, &result.fanout_result)),
|
||||
true,
|
||||
)?;
|
||||
|
||||
// Return the new value if it differs from what was asked to set
|
||||
if result.signed_value_data.value_data() != signed_value_data.value_data() {
|
||||
// Record the newer value and send and update since it is different than what we just set
|
||||
inner
|
||||
.handle_set_local_value(
|
||||
key,
|
||||
subkey,
|
||||
result.signed_value_data.clone(),
|
||||
WatchUpdateMode::UpdateAll,
|
||||
)
|
||||
.await?;
|
||||
|
||||
return Ok(Some(result.signed_value_data.value_data().clone()));
|
||||
}
|
||||
|
||||
@ -562,7 +576,7 @@ impl StorageManager {
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
/// Add a watch to a DHT value
|
||||
/// Create,update or cancel an outbound watch to a DHT value
|
||||
pub async fn watch_values(
|
||||
&self,
|
||||
key: TypedKey,
|
||||
@ -572,6 +586,20 @@ impl StorageManager {
|
||||
) -> VeilidAPIResult<Timestamp> {
|
||||
let inner = self.lock().await?;
|
||||
|
||||
// Get the safety selection and the writer we opened this record
|
||||
// and whatever active watch id and watch node we may have in case this is a watch update
|
||||
let (safety_selection, opt_writer, opt_watch_id, opt_watch_node) = {
|
||||
let Some(opened_record) = inner.opened_records.get(&key) else {
|
||||
apibail_generic!("record not open");
|
||||
};
|
||||
(
|
||||
opened_record.safety_selection(),
|
||||
opened_record.writer().cloned(),
|
||||
opened_record.active_watch().map(|aw| aw.id),
|
||||
opened_record.active_watch().map(|aw| aw.watch_node.clone()),
|
||||
)
|
||||
};
|
||||
|
||||
// Rewrite subkey range if empty to full
|
||||
let subkeys = if subkeys.is_empty() {
|
||||
ValueSubkeyRangeSet::full()
|
||||
@ -579,20 +607,19 @@ impl StorageManager {
|
||||
subkeys
|
||||
};
|
||||
|
||||
// Get the safety selection and the writer we opened this record with
|
||||
let (safety_selection, opt_writer, opt_watch_node) = {
|
||||
let Some(opened_record) = inner.opened_records.get(&key) else {
|
||||
apibail_generic!("record not open");
|
||||
// Get the schema so we can truncate the watch to the number of subkeys
|
||||
let schema = if let Some(lrs) = inner.local_record_store.as_ref() {
|
||||
let Some(schema) = lrs.peek_record(key, |r| r.schema()) else {
|
||||
apibail_generic!("no local record found");
|
||||
};
|
||||
(
|
||||
opened_record.safety_selection(),
|
||||
opened_record.writer().cloned(),
|
||||
opened_record.active_watch().map(|aw| aw.watch_node.clone()),
|
||||
)
|
||||
schema
|
||||
} else {
|
||||
apibail_not_initialized!();
|
||||
};
|
||||
let subkeys = schema.truncate_subkeys(&subkeys, None);
|
||||
|
||||
// Get rpc processor and drop mutex so we don't block while requesting the watch from the network
|
||||
let Some(rpc_processor) = inner.opt_rpc_processor.clone() else {
|
||||
let Some(rpc_processor) = Self::online_ready_inner(&inner) else {
|
||||
apibail_try_again!("offline, try again later");
|
||||
};
|
||||
|
||||
@ -610,13 +637,14 @@ impl StorageManager {
|
||||
count,
|
||||
safety_selection,
|
||||
opt_writer,
|
||||
opt_watch_id,
|
||||
opt_watch_node,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// If we did not get a valid response return a zero timestamp
|
||||
// If we did not get a valid response assume nothing changed
|
||||
let Some(owvresult) = opt_owvresult else {
|
||||
return Ok(Timestamp::new(0));
|
||||
apibail_try_again!("did not get a valid response");
|
||||
};
|
||||
|
||||
// Clear any existing watch if the watch succeeded or got cancelled
|
||||
@ -642,24 +670,29 @@ impl StorageManager {
|
||||
expiration.as_u64()
|
||||
};
|
||||
|
||||
// If the expiration time is less than our minimum expiration time consider this watch cancelled
|
||||
// If the expiration time is less than our minimum expiration time (or zero) consider this watch inactive
|
||||
let mut expiration_ts = owvresult.expiration_ts;
|
||||
if expiration_ts.as_u64() < min_expiration_ts {
|
||||
return Ok(Timestamp::new(0));
|
||||
}
|
||||
|
||||
// If the expiration time is greated than our maximum expiration time, clamp our local watch so we ignore extra valuechanged messages
|
||||
// If the expiration time is greater than our maximum expiration time, clamp our local watch so we ignore extra valuechanged messages
|
||||
if expiration_ts.as_u64() > max_expiration_ts {
|
||||
expiration_ts = Timestamp::new(max_expiration_ts);
|
||||
}
|
||||
|
||||
// If we requested a cancellation, then consider this watch cancelled
|
||||
if count == 0 {
|
||||
// Expiration returned should be zero if we requested a cancellation
|
||||
if expiration_ts.as_u64() != 0 {
|
||||
log_stor!(debug "got active watch despite asking for a cancellation");
|
||||
}
|
||||
return Ok(Timestamp::new(0));
|
||||
}
|
||||
|
||||
// Keep a record of the watch
|
||||
opened_record.set_active_watch(ActiveWatch {
|
||||
id: owvresult.watch_id,
|
||||
expiration_ts,
|
||||
watch_node: owvresult.watch_node,
|
||||
opt_value_changed_route: owvresult.opt_value_changed_route,
|
||||
@ -707,25 +740,141 @@ impl StorageManager {
|
||||
active_watch.count
|
||||
};
|
||||
|
||||
// Update the watch
|
||||
// Update the watch. This just calls through to the above watch_values() function
|
||||
// This will update the active_watch so we don't need to do that in this routine.
|
||||
let expiration_ts = self
|
||||
.watch_values(key, subkeys, active_watch.expiration_ts, count)
|
||||
.await?;
|
||||
|
||||
// A zero expiration time means the watch is done or nothing is left, and the watch is no longer active
|
||||
// A zero expiration time returned from watch_value() means the watch is done
|
||||
// or no subkeys are left, and the watch is no longer active
|
||||
if expiration_ts.as_u64() == 0 {
|
||||
// Return false indicating the watch is completely gone
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// Return true because the the watch was changed
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
/// Inspect an opened DHT record for its subkey sequence numbers
|
||||
pub async fn inspect_record(
|
||||
&self,
|
||||
key: TypedKey,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
scope: DHTReportScope,
|
||||
) -> VeilidAPIResult<DHTRecordReport> {
|
||||
let subkeys = if subkeys.is_empty() {
|
||||
ValueSubkeyRangeSet::full()
|
||||
} else {
|
||||
subkeys
|
||||
};
|
||||
|
||||
let mut inner = self.lock().await?;
|
||||
let safety_selection = {
|
||||
let Some(opened_record) = inner.opened_records.get(&key) else {
|
||||
apibail_generic!("record not open");
|
||||
};
|
||||
opened_record.safety_selection()
|
||||
};
|
||||
|
||||
// See if the requested record is our local record store
|
||||
let mut local_inspect_result = inner
|
||||
.handle_inspect_local_value(key, subkeys.clone(), true)
|
||||
.await?;
|
||||
|
||||
#[allow(clippy::unnecessary_cast)]
|
||||
{
|
||||
assert!(
|
||||
local_inspect_result.subkeys.len() as u64 == local_inspect_result.seqs.len() as u64,
|
||||
"mismatch between local subkeys returned and sequence number list returned"
|
||||
);
|
||||
}
|
||||
assert!(
|
||||
local_inspect_result.subkeys.is_subset(&subkeys),
|
||||
"more subkeys returned locally than requested"
|
||||
);
|
||||
|
||||
// If this is the maximum scope we're interested in, return the report
|
||||
if matches!(scope, DHTReportScope::Local) {
|
||||
return Ok(DHTRecordReport::new(
|
||||
local_inspect_result.subkeys,
|
||||
local_inspect_result.seqs,
|
||||
vec![],
|
||||
));
|
||||
}
|
||||
|
||||
// Get rpc processor and drop mutex so we don't block while getting the value from the network
|
||||
let Some(rpc_processor) = Self::online_ready_inner(&inner) else {
|
||||
apibail_try_again!("offline, try again later");
|
||||
};
|
||||
|
||||
// Drop the lock for network access
|
||||
drop(inner);
|
||||
|
||||
// If we're simulating a set, increase the previous sequence number we have by 1
|
||||
if matches!(scope, DHTReportScope::UpdateSet) {
|
||||
for seq in &mut local_inspect_result.seqs {
|
||||
*seq = seq.overflowing_add(1).0;
|
||||
}
|
||||
}
|
||||
|
||||
// Get the inspect record report from the network
|
||||
let result = self
|
||||
.outbound_inspect_value(
|
||||
rpc_processor,
|
||||
key,
|
||||
subkeys,
|
||||
safety_selection,
|
||||
if matches!(scope, DHTReportScope::SyncGet | DHTReportScope::SyncSet) {
|
||||
InspectResult::default()
|
||||
} else {
|
||||
local_inspect_result.clone()
|
||||
},
|
||||
matches!(scope, DHTReportScope::UpdateSet | DHTReportScope::SyncSet),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Sanity check before zip
|
||||
#[allow(clippy::unnecessary_cast)]
|
||||
{
|
||||
assert_eq!(
|
||||
result.inspect_result.subkeys.len() as u64,
|
||||
result.fanout_results.len() as u64,
|
||||
"mismatch between subkeys returned and fanout results returned"
|
||||
);
|
||||
}
|
||||
if !local_inspect_result.subkeys.is_empty() && !result.inspect_result.subkeys.is_empty() {
|
||||
assert_eq!(
|
||||
result.inspect_result.subkeys.len(),
|
||||
local_inspect_result.subkeys.len(),
|
||||
"mismatch between local subkeys returned and network results returned"
|
||||
);
|
||||
}
|
||||
|
||||
// Keep the list of nodes that returned a value for later reference
|
||||
let mut inner = self.lock().await?;
|
||||
let results_iter = result
|
||||
.inspect_result
|
||||
.subkeys
|
||||
.iter()
|
||||
.zip(result.fanout_results.iter());
|
||||
|
||||
inner.process_fanout_results(key, results_iter, false)?;
|
||||
|
||||
Ok(DHTRecordReport::new(
|
||||
result.inspect_result.subkeys,
|
||||
local_inspect_result.seqs,
|
||||
result.inspect_result.seqs,
|
||||
))
|
||||
}
|
||||
|
||||
// Send single value change out to the network
|
||||
#[instrument(level = "trace", skip(self), err)]
|
||||
async fn send_value_change(&self, vc: ValueChangedInfo) -> VeilidAPIResult<()> {
|
||||
let rpc_processor = {
|
||||
let inner = self.inner.lock().await;
|
||||
if let Some(rpc_processor) = &inner.opt_rpc_processor {
|
||||
if let Some(rpc_processor) = Self::online_ready_inner(&inner) {
|
||||
rpc_processor.clone()
|
||||
} else {
|
||||
apibail_try_again!("network is not available");
|
||||
@ -741,10 +890,9 @@ impl StorageManager {
|
||||
.map_err(VeilidAPIError::from)?;
|
||||
|
||||
network_result_value_or_log!(rpc_processor
|
||||
.rpc_call_value_changed(dest, vc.key, vc.subkeys.clone(), vc.count, (*vc.value).clone())
|
||||
.await
|
||||
.map_err(VeilidAPIError::from)? => [format!(": dest={:?} vc={:?}", dest, vc)] {
|
||||
});
|
||||
.rpc_call_value_changed(dest, vc.key, vc.subkeys.clone(), vc.count, vc.watch_id, (*vc.value).clone() )
|
||||
.await
|
||||
.map_err(VeilidAPIError::from)? => [format!(": dest={:?} vc={:?}", dest, vc)] {});
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -0,0 +1,79 @@
|
||||
use super::*;
|
||||
|
||||
const L2_CACHE_DEPTH: usize = 4; // XXX: i just picked this. we could probably do better than this someday
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct InspectCacheL2Value {
|
||||
pub seqs: Vec<ValueSeqNum>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
struct InspectCacheL2 {
|
||||
pub cache: LruCache<ValueSubkeyRangeSet, InspectCacheL2Value>,
|
||||
}
|
||||
|
||||
impl InspectCacheL2 {
|
||||
pub fn new(l2_cache_limit: usize) -> Self {
|
||||
Self {
|
||||
cache: LruCache::new(l2_cache_limit),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct InspectCache {
|
||||
cache: LruCache<TypedKey, InspectCacheL2>,
|
||||
}
|
||||
|
||||
impl InspectCache {
|
||||
pub fn new(l1_cache_limit: usize) -> Self {
|
||||
Self {
|
||||
cache: LruCache::new(l1_cache_limit),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get(
|
||||
&mut self,
|
||||
key: &TypedKey,
|
||||
subkeys: &ValueSubkeyRangeSet,
|
||||
) -> Option<InspectCacheL2Value> {
|
||||
if let Some(l2c) = self.cache.get_mut(key) {
|
||||
if let Some(l2v) = l2c.cache.get(subkeys) {
|
||||
return Some(l2v.clone());
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub fn put(&mut self, key: TypedKey, subkeys: ValueSubkeyRangeSet, value: InspectCacheL2Value) {
|
||||
self.cache
|
||||
.entry(key)
|
||||
.or_insert_with(|| InspectCacheL2::new(L2_CACHE_DEPTH))
|
||||
.cache
|
||||
.insert(subkeys, value);
|
||||
}
|
||||
|
||||
pub fn invalidate(&mut self, key: &TypedKey) {
|
||||
self.cache.remove(key);
|
||||
}
|
||||
|
||||
pub fn replace_subkey_seq(&mut self, key: &TypedKey, subkey: ValueSubkey, seq: ValueSeqNum) {
|
||||
let Some(l2) = self.cache.get_mut(key) else {
|
||||
return;
|
||||
};
|
||||
|
||||
for entry in &mut l2.cache {
|
||||
let Some(idx) = entry.0.idx_of_subkey(subkey) else {
|
||||
continue;
|
||||
};
|
||||
if idx < entry.1.seqs.len() {
|
||||
entry.1.seqs[idx] = seq;
|
||||
} else if idx > entry.1.seqs.len() {
|
||||
panic!(
|
||||
"representational error in l2 inspect cache: {} > {}",
|
||||
idx,
|
||||
entry.1.seqs.len()
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,29 @@
|
||||
use super::*;
|
||||
|
||||
/// Information about nodes that cache a local record remotely
|
||||
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub(in crate::storage_manager) struct PerNodeRecordDetail {
|
||||
pub last_set: Timestamp,
|
||||
pub last_seen: Timestamp,
|
||||
pub subkeys: ValueSubkeyRangeSet,
|
||||
}
|
||||
|
||||
/// Information required to handle locally opened records
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub(in crate::storage_manager) struct LocalRecordDetail {
|
||||
/// The last 'safety selection' used when creating/opening this record.
|
||||
/// Even when closed, this safety selection applies to re-publication attempts by the system.
|
||||
pub safety_selection: SafetySelection,
|
||||
/// The nodes that we have seen this record cached on recently
|
||||
#[serde(default)]
|
||||
pub nodes: HashMap<PublicKey, PerNodeRecordDetail>,
|
||||
}
|
||||
|
||||
impl LocalRecordDetail {
|
||||
pub fn new(safety_selection: SafetySelection) -> Self {
|
||||
Self {
|
||||
safety_selection,
|
||||
nodes: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
@ -4,6 +4,29 @@
|
||||
/// This store does not perform any validation on the schema, and all ValueRecordData passed in must have been previously validated.
|
||||
/// Uses an in-memory store for the records, backed by the TableStore. Subkey data is LRU cached and rotated out by a limits policy,
|
||||
/// and backed to the TableStore for persistence.
|
||||
mod inspect_cache;
|
||||
mod keys;
|
||||
mod limited_size;
|
||||
mod local_record_detail;
|
||||
mod opened_record;
|
||||
mod record;
|
||||
mod record_data;
|
||||
mod record_store_limits;
|
||||
mod remote_record_detail;
|
||||
mod watch;
|
||||
|
||||
pub(super) use inspect_cache::*;
|
||||
pub(super) use keys::*;
|
||||
pub(super) use limited_size::*;
|
||||
pub(super) use local_record_detail::*;
|
||||
pub(super) use opened_record::*;
|
||||
pub(super) use record::*;
|
||||
pub(super) use record_data::*;
|
||||
pub(super) use record_store_limits::*;
|
||||
pub(super) use remote_record_detail::*;
|
||||
pub(super) use watch::*;
|
||||
pub use watch::{WatchParameters, WatchResult};
|
||||
|
||||
use super::*;
|
||||
use hashlink::LruCache;
|
||||
|
||||
@ -22,30 +45,6 @@ where
|
||||
in_total_storage: bool,
|
||||
}
|
||||
|
||||
/// An individual watch
|
||||
#[derive(Debug, Clone)]
|
||||
struct WatchedRecordWatch {
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
expiration: Timestamp,
|
||||
count: u32,
|
||||
target: Target,
|
||||
watcher: CryptoKey,
|
||||
changed: ValueSubkeyRangeSet,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
/// A record being watched for changes
|
||||
struct WatchedRecord {
|
||||
/// The list of active watchers
|
||||
watchers: Vec<WatchedRecordWatch>,
|
||||
}
|
||||
|
||||
pub(super) enum WatchUpdateMode {
|
||||
NoUpdate,
|
||||
UpdateAll,
|
||||
ExcludeTarget(Target),
|
||||
}
|
||||
|
||||
pub(super) struct RecordStore<D>
|
||||
where
|
||||
D: fmt::Debug + Clone + Serialize + for<'d> Deserialize<'d>,
|
||||
@ -62,6 +61,8 @@ where
|
||||
record_index: LruCache<RecordTableKey, Record<D>>,
|
||||
/// The in-memory cache of commonly accessed subkey data so we don't have to keep hitting the db
|
||||
subkey_cache: LruCache<SubkeyTableKey, RecordData>,
|
||||
/// The in-memory cache of commonly accessed sequence number data so we don't have to keep hitting the db
|
||||
inspect_cache: InspectCache,
|
||||
/// Total storage space or subkey data inclusive of structures in memory
|
||||
subkey_cache_total_size: LimitedSize<usize>,
|
||||
/// Total storage space of records in the tabledb inclusive of subkey data and structures
|
||||
@ -71,21 +72,31 @@ where
|
||||
/// The list of records that have changed since last flush to disk (optimization for batched writes)
|
||||
changed_records: HashSet<RecordTableKey>,
|
||||
/// The list of records being watched for changes
|
||||
watched_records: HashMap<RecordTableKey, WatchedRecord>,
|
||||
watched_records: HashMap<RecordTableKey, WatchList>,
|
||||
/// The list of watched records that have changed values since last notification
|
||||
changed_watched_values: HashSet<RecordTableKey>,
|
||||
|
||||
/// A mutex to ensure we handle this concurrently
|
||||
purge_dead_records_mutex: Arc<AsyncMutex<()>>,
|
||||
}
|
||||
|
||||
/// The result of the do_get_value_operation
|
||||
#[derive(Default, Debug)]
|
||||
pub struct SubkeyResult {
|
||||
#[derive(Default, Clone, Debug)]
|
||||
pub struct GetResult {
|
||||
/// The subkey value if we got one
|
||||
pub value: Option<Arc<SignedValueData>>,
|
||||
pub opt_value: Option<Arc<SignedValueData>>,
|
||||
/// The descriptor if we got a fresh one or empty if no descriptor was needed
|
||||
pub descriptor: Option<Arc<SignedValueDescriptor>>,
|
||||
pub opt_descriptor: Option<Arc<SignedValueDescriptor>>,
|
||||
}
|
||||
|
||||
/// The result of the do_inspect_value_operation
|
||||
#[derive(Default, Clone, Debug)]
|
||||
pub struct InspectResult {
|
||||
/// The actual in-schema subkey range being reported on
|
||||
pub subkeys: ValueSubkeyRangeSet,
|
||||
/// The sequence map
|
||||
pub seqs: Vec<ValueSeqNum>,
|
||||
/// The descriptor if we got a fresh one or empty if no descriptor was needed
|
||||
pub opt_descriptor: Option<Arc<SignedValueDescriptor>>,
|
||||
}
|
||||
|
||||
impl<D> RecordStore<D>
|
||||
@ -109,6 +120,7 @@ where
|
||||
subkey_table: None,
|
||||
record_index: LruCache::new(limits.max_records.unwrap_or(usize::MAX)),
|
||||
subkey_cache: LruCache::new(subkey_cache_size),
|
||||
inspect_cache: InspectCache::new(subkey_cache_size),
|
||||
subkey_cache_total_size: LimitedSize::new(
|
||||
"subkey_cache_total_size",
|
||||
0,
|
||||
@ -279,6 +291,16 @@ where
|
||||
log_stor!(error "dead record found in index: {:?}", dr.key);
|
||||
}
|
||||
|
||||
// Record should have no watches now
|
||||
if self.watched_records.contains_key(&dr.key) {
|
||||
log_stor!(error "dead record found in watches: {:?}", dr.key);
|
||||
}
|
||||
|
||||
// Record should have no watch changes now
|
||||
if self.changed_watched_values.contains(&dr.key) {
|
||||
log_stor!(error "dead record found in watch changes: {:?}", dr.key);
|
||||
}
|
||||
|
||||
// Delete record
|
||||
if let Err(e) = rt_xact.delete(0, &dr.key.bytes()) {
|
||||
log_stor!(error "record could not be deleted: {}", e);
|
||||
@ -399,13 +421,27 @@ where
|
||||
apibail_key_not_found!(key);
|
||||
};
|
||||
|
||||
self.add_dead_record(rtk, record);
|
||||
// Remove watches
|
||||
self.watched_records.remove(&rtk);
|
||||
|
||||
// Remove watch changes
|
||||
self.changed_watched_values.remove(&rtk);
|
||||
|
||||
// Invalidate inspect cache for this key
|
||||
self.inspect_cache.invalidate(&rtk.key);
|
||||
|
||||
// Remove from table store immediately
|
||||
self.add_dead_record(rtk, record);
|
||||
self.purge_dead_records(false).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(super) fn contains_record(&mut self, key: TypedKey) -> bool {
|
||||
let rtk = RecordTableKey { key };
|
||||
self.record_index.contains_key(&rtk)
|
||||
}
|
||||
|
||||
pub(super) fn with_record<R, F>(&mut self, key: TypedKey, f: F) -> Option<R>
|
||||
where
|
||||
F: FnOnce(&Record<D>) -> R,
|
||||
@ -471,7 +507,7 @@ where
|
||||
key: TypedKey,
|
||||
subkey: ValueSubkey,
|
||||
want_descriptor: bool,
|
||||
) -> VeilidAPIResult<Option<SubkeyResult>> {
|
||||
) -> VeilidAPIResult<Option<GetResult>> {
|
||||
// Get record from index
|
||||
let Some((subkey_count, has_subkey, opt_descriptor)) = self.with_record(key, |record| {
|
||||
(
|
||||
@ -496,9 +532,9 @@ where
|
||||
// See if we have this subkey stored
|
||||
if !has_subkey {
|
||||
// If not, return no value but maybe with descriptor
|
||||
return Ok(Some(SubkeyResult {
|
||||
value: None,
|
||||
descriptor: opt_descriptor,
|
||||
return Ok(Some(GetResult {
|
||||
opt_value: None,
|
||||
opt_descriptor,
|
||||
}));
|
||||
}
|
||||
|
||||
@ -509,12 +545,12 @@ where
|
||||
|
||||
// If subkey exists in subkey cache, use that
|
||||
let stk = SubkeyTableKey { key, subkey };
|
||||
if let Some(record_data) = self.subkey_cache.get_mut(&stk) {
|
||||
if let Some(record_data) = self.subkey_cache.get(&stk) {
|
||||
let out = record_data.signed_value_data().clone();
|
||||
|
||||
return Ok(Some(SubkeyResult {
|
||||
value: Some(out),
|
||||
descriptor: opt_descriptor,
|
||||
return Ok(Some(GetResult {
|
||||
opt_value: Some(out),
|
||||
opt_descriptor,
|
||||
}));
|
||||
}
|
||||
// If not in cache, try to pull from table store if it is in our stored subkey set
|
||||
@ -531,9 +567,9 @@ where
|
||||
// Add to cache, do nothing with lru out
|
||||
self.add_to_subkey_cache(stk, record_data);
|
||||
|
||||
Ok(Some(SubkeyResult {
|
||||
value: Some(out),
|
||||
descriptor: opt_descriptor,
|
||||
Ok(Some(GetResult {
|
||||
opt_value: Some(out),
|
||||
opt_descriptor,
|
||||
}))
|
||||
}
|
||||
|
||||
@ -542,7 +578,7 @@ where
|
||||
key: TypedKey,
|
||||
subkey: ValueSubkey,
|
||||
want_descriptor: bool,
|
||||
) -> VeilidAPIResult<Option<SubkeyResult>> {
|
||||
) -> VeilidAPIResult<Option<GetResult>> {
|
||||
// record from index
|
||||
let Some((subkey_count, has_subkey, opt_descriptor)) = self.peek_record(key, |record| {
|
||||
(
|
||||
@ -567,9 +603,9 @@ where
|
||||
// See if we have this subkey stored
|
||||
if !has_subkey {
|
||||
// If not, return no value but maybe with descriptor
|
||||
return Ok(Some(SubkeyResult {
|
||||
value: None,
|
||||
descriptor: opt_descriptor,
|
||||
return Ok(Some(GetResult {
|
||||
opt_value: None,
|
||||
opt_descriptor,
|
||||
}));
|
||||
}
|
||||
|
||||
@ -583,9 +619,9 @@ where
|
||||
if let Some(record_data) = self.subkey_cache.peek(&stk) {
|
||||
let out = record_data.signed_value_data().clone();
|
||||
|
||||
return Ok(Some(SubkeyResult {
|
||||
value: Some(out),
|
||||
descriptor: opt_descriptor,
|
||||
return Ok(Some(GetResult {
|
||||
opt_value: Some(out),
|
||||
opt_descriptor,
|
||||
}));
|
||||
}
|
||||
// If not in cache, try to pull from table store if it is in our stored subkey set
|
||||
@ -599,9 +635,9 @@ where
|
||||
|
||||
let out = record_data.signed_value_data().clone();
|
||||
|
||||
Ok(Some(SubkeyResult {
|
||||
value: Some(out),
|
||||
descriptor: opt_descriptor,
|
||||
Ok(Some(GetResult {
|
||||
opt_value: Some(out),
|
||||
opt_descriptor,
|
||||
}))
|
||||
}
|
||||
|
||||
@ -609,20 +645,30 @@ where
|
||||
&mut self,
|
||||
key: TypedKey,
|
||||
subkey: ValueSubkey,
|
||||
opt_ignore_target: Option<Target>,
|
||||
watch_update_mode: WatchUpdateMode,
|
||||
) {
|
||||
let (do_update, opt_ignore_target) = match watch_update_mode {
|
||||
WatchUpdateMode::NoUpdate => (false, None),
|
||||
WatchUpdateMode::UpdateAll => (true, None),
|
||||
WatchUpdateMode::ExcludeTarget(target) => (true, Some(target)),
|
||||
};
|
||||
if !do_update {
|
||||
return;
|
||||
}
|
||||
|
||||
let rtk = RecordTableKey { key };
|
||||
let Some(wr) = self.watched_records.get_mut(&rtk) else {
|
||||
return;
|
||||
};
|
||||
|
||||
// Update all watchers
|
||||
let mut changed = false;
|
||||
for w in &mut wr.watchers {
|
||||
for w in &mut wr.watches {
|
||||
// If this watcher is watching the changed subkey then add to the watcher's changed list
|
||||
// Don't bother marking changes for value sets coming from the same watching node/target because they
|
||||
// are already going to be aware of the changes in that case
|
||||
if Some(&w.target) != opt_ignore_target.as_ref()
|
||||
&& w.subkeys.contains(subkey)
|
||||
if Some(&w.params.target) != opt_ignore_target.as_ref()
|
||||
&& w.params.subkeys.contains(subkey)
|
||||
&& w.changed.insert(subkey)
|
||||
{
|
||||
changed = true;
|
||||
@ -713,6 +759,13 @@ where
|
||||
.await
|
||||
.map_err(VeilidAPIError::internal)?;
|
||||
|
||||
// Write to inspect cache
|
||||
self.inspect_cache.replace_subkey_seq(
|
||||
&stk.key,
|
||||
subkey,
|
||||
subkey_record_data.signed_value_data().value_data().seq(),
|
||||
);
|
||||
|
||||
// Write to subkey cache
|
||||
self.add_to_subkey_cache(stk, subkey_record_data);
|
||||
|
||||
@ -726,34 +779,239 @@ where
|
||||
// Update storage space
|
||||
self.total_storage_space.commit().unwrap();
|
||||
|
||||
// Update watched value
|
||||
|
||||
let (do_update, opt_ignore_target) = match watch_update_mode {
|
||||
WatchUpdateMode::NoUpdate => (false, None),
|
||||
WatchUpdateMode::UpdateAll => (true, None),
|
||||
WatchUpdateMode::ExcludeTarget(target) => (true, Some(target)),
|
||||
};
|
||||
if do_update {
|
||||
self.update_watched_value(key, subkey, opt_ignore_target)
|
||||
.await;
|
||||
}
|
||||
// Send updates to
|
||||
self.update_watched_value(key, subkey, watch_update_mode)
|
||||
.await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Add a record watch for changes
|
||||
pub async fn watch_record(
|
||||
pub async fn inspect_record(
|
||||
&mut self,
|
||||
key: TypedKey,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
mut expiration: Timestamp,
|
||||
count: u32,
|
||||
target: Target,
|
||||
watcher: CryptoKey,
|
||||
) -> VeilidAPIResult<Option<Timestamp>> {
|
||||
// If subkeys is empty or count is zero then we're cancelling a watch completely
|
||||
if subkeys.is_empty() || count == 0 {
|
||||
return self.cancel_watch(key, target, watcher).await;
|
||||
want_descriptor: bool,
|
||||
) -> VeilidAPIResult<Option<InspectResult>> {
|
||||
// Get subkey table
|
||||
let Some(subkey_table) = self.subkey_table.clone() else {
|
||||
apibail_internal!("record store not initialized");
|
||||
};
|
||||
|
||||
// Get record from index
|
||||
let Some((subkeys, opt_descriptor)) = self.with_record(key, |record| {
|
||||
// Get number of subkeys from schema and ensure we are getting the
|
||||
// right number of sequence numbers betwen that and what we asked for
|
||||
let truncated_subkeys = record
|
||||
.schema()
|
||||
.truncate_subkeys(&subkeys, Some(MAX_INSPECT_VALUE_A_SEQS_LEN));
|
||||
(
|
||||
truncated_subkeys,
|
||||
if want_descriptor {
|
||||
Some(record.descriptor().clone())
|
||||
} else {
|
||||
None
|
||||
},
|
||||
)
|
||||
}) else {
|
||||
// Record not available
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
// Check if we can return some subkeys
|
||||
if subkeys.is_empty() {
|
||||
apibail_invalid_argument!("subkeys set does not overlap schema", "subkeys", subkeys);
|
||||
}
|
||||
|
||||
// See if we have this inspection cached
|
||||
if let Some(icv) = self.inspect_cache.get(&key, &subkeys) {
|
||||
return Ok(Some(InspectResult {
|
||||
subkeys,
|
||||
seqs: icv.seqs,
|
||||
opt_descriptor,
|
||||
}));
|
||||
}
|
||||
|
||||
// Build sequence number list to return
|
||||
#[allow(clippy::unnecessary_cast)]
|
||||
let mut seqs = Vec::with_capacity(subkeys.len() as usize);
|
||||
for subkey in subkeys.iter() {
|
||||
let stk = SubkeyTableKey { key, subkey };
|
||||
let seq = if let Some(record_data) = self.subkey_cache.peek(&stk) {
|
||||
record_data.signed_value_data().value_data().seq()
|
||||
} else {
|
||||
// If not in cache, try to pull from table store if it is in our stored subkey set
|
||||
// XXX: This would be better if it didn't have to pull the whole record data to get the seq.
|
||||
if let Some(record_data) = subkey_table
|
||||
.load_json::<RecordData>(0, &stk.bytes())
|
||||
.await
|
||||
.map_err(VeilidAPIError::internal)?
|
||||
{
|
||||
record_data.signed_value_data().value_data().seq()
|
||||
} else {
|
||||
// Subkey not written to
|
||||
ValueSubkey::MAX
|
||||
}
|
||||
};
|
||||
seqs.push(seq)
|
||||
}
|
||||
|
||||
// Save seqs cache
|
||||
self.inspect_cache.put(
|
||||
key,
|
||||
subkeys.clone(),
|
||||
InspectCacheL2Value { seqs: seqs.clone() },
|
||||
);
|
||||
|
||||
Ok(Some(InspectResult {
|
||||
subkeys,
|
||||
seqs,
|
||||
opt_descriptor,
|
||||
}))
|
||||
}
|
||||
|
||||
pub async fn _change_existing_watch(
|
||||
&mut self,
|
||||
key: TypedKey,
|
||||
params: WatchParameters,
|
||||
watch_id: u64,
|
||||
) -> VeilidAPIResult<WatchResult> {
|
||||
if params.count == 0 {
|
||||
apibail_internal!("cancel watch should not have gotten here");
|
||||
}
|
||||
if params.expiration.as_u64() == 0 {
|
||||
apibail_internal!("zero expiration should have been resolved to max by now");
|
||||
}
|
||||
// Get the watch list for this record
|
||||
let rtk = RecordTableKey { key };
|
||||
let Some(watch_list) = self.watched_records.get_mut(&rtk) else {
|
||||
// No watches, nothing to change
|
||||
return Ok(WatchResult::Rejected);
|
||||
};
|
||||
|
||||
// Check each watch to see if we have an exact match for the id to change
|
||||
for w in &mut watch_list.watches {
|
||||
// If the watch id doesn't match, then we're not updating
|
||||
// Also do not allow the watcher key to change
|
||||
if w.id == watch_id && w.params.watcher == params.watcher {
|
||||
// Updating an existing watch
|
||||
w.params = params;
|
||||
return Ok(WatchResult::Changed {
|
||||
expiration: w.params.expiration,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// No existing watch found
|
||||
Ok(WatchResult::Rejected)
|
||||
}
|
||||
|
||||
pub async fn _create_new_watch(
|
||||
&mut self,
|
||||
key: TypedKey,
|
||||
params: WatchParameters,
|
||||
member_check: Box<dyn Fn(PublicKey) -> bool + Send>,
|
||||
) -> VeilidAPIResult<WatchResult> {
|
||||
// Generate a record-unique watch id > 0
|
||||
let rtk = RecordTableKey { key };
|
||||
let mut id = 0;
|
||||
while id == 0 {
|
||||
id = get_random_u64();
|
||||
}
|
||||
if let Some(watched_record) = self.watched_records.get_mut(&rtk) {
|
||||
// Make sure it doesn't match any other id (unlikely, but lets be certain)
|
||||
'x: loop {
|
||||
for w in &mut watched_record.watches {
|
||||
if w.id == id {
|
||||
loop {
|
||||
id = id.overflowing_add(1).0;
|
||||
if id != 0 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
continue 'x;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate watch limits
|
||||
let mut watch_count = 0;
|
||||
let mut target_watch_count = 0;
|
||||
|
||||
let is_member = member_check(params.watcher);
|
||||
|
||||
let rtk = RecordTableKey { key };
|
||||
if let Some(watched_record) = self.watched_records.get_mut(&rtk) {
|
||||
// Total up the number of watches for this key
|
||||
for w in &mut watched_record.watches {
|
||||
// See if this watch should be counted toward any limits
|
||||
let count_watch = if is_member {
|
||||
// If the watcher is a member of the schema, then consider the total per-watcher key
|
||||
w.params.watcher == params.watcher
|
||||
} else {
|
||||
// If the watcher is not a member of the schema, the check if this watch is an anonymous watch and contributes to per-record key total
|
||||
!member_check(w.params.watcher)
|
||||
};
|
||||
|
||||
// For any watch, if the target matches our also tally that separately
|
||||
// If the watcher is a member of the schema, then consider the total per-target-per-watcher key
|
||||
// If the watcher is not a member of the schema, then it is an anonymous watch and the total is per-target-per-record key
|
||||
if count_watch {
|
||||
watch_count += 1;
|
||||
if w.params.target == params.target {
|
||||
target_watch_count += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// For members, no more than one watch per target per watcher per record
|
||||
// For anonymous, no more than one watch per target per record
|
||||
if target_watch_count > 0 {
|
||||
// Too many watches
|
||||
return Ok(WatchResult::Rejected);
|
||||
}
|
||||
|
||||
// Check watch table for limits
|
||||
let watch_limit = if is_member {
|
||||
self.limits.member_watch_limit
|
||||
} else {
|
||||
self.limits.public_watch_limit
|
||||
};
|
||||
if watch_count >= watch_limit {
|
||||
return Ok(WatchResult::Rejected);
|
||||
}
|
||||
|
||||
// Ok this is an acceptable new watch, add it
|
||||
let watch_list = self.watched_records.entry(rtk).or_default();
|
||||
let expiration = params.expiration;
|
||||
watch_list.watches.push(Watch {
|
||||
params,
|
||||
id,
|
||||
changed: ValueSubkeyRangeSet::new(),
|
||||
});
|
||||
Ok(WatchResult::Created { id, expiration })
|
||||
}
|
||||
|
||||
/// Add or update an inbound record watch for changes
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub async fn watch_record(
|
||||
&mut self,
|
||||
key: TypedKey,
|
||||
mut params: WatchParameters,
|
||||
opt_watch_id: Option<u64>,
|
||||
) -> VeilidAPIResult<WatchResult> {
|
||||
// If count is zero then we're cancelling a watch completely
|
||||
if params.count == 0 {
|
||||
if let Some(watch_id) = opt_watch_id {
|
||||
let cancelled = self.cancel_watch(key, watch_id, params.watcher).await?;
|
||||
if cancelled {
|
||||
return Ok(WatchResult::Cancelled);
|
||||
}
|
||||
return Ok(WatchResult::Rejected);
|
||||
}
|
||||
apibail_internal!("shouldn't have let a None watch id get here");
|
||||
}
|
||||
|
||||
// See if expiration timestamp is too far in the future or not enough in the future
|
||||
@ -761,114 +1019,67 @@ where
|
||||
let max_ts = cur_ts + self.limits.max_watch_expiration.as_u64();
|
||||
let min_ts = cur_ts + self.limits.min_watch_expiration.as_u64();
|
||||
|
||||
if expiration.as_u64() == 0 || expiration.as_u64() > max_ts {
|
||||
if params.expiration.as_u64() == 0 || params.expiration.as_u64() > max_ts {
|
||||
// Clamp expiration max time (or set zero expiration to max)
|
||||
expiration = Timestamp::new(max_ts);
|
||||
} else if expiration.as_u64() < min_ts {
|
||||
params.expiration = Timestamp::new(max_ts);
|
||||
} else if params.expiration.as_u64() < min_ts {
|
||||
// Don't add watches with too low of an expiration time
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// Get the record being watched
|
||||
let Some(is_member) = self.with_record(key, |record| {
|
||||
// Check if the watcher specified is a schema member
|
||||
let schema = record.schema();
|
||||
(*record.owner()) == watcher || schema.is_member(&watcher)
|
||||
}) else {
|
||||
// Record not found
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
// See if we are updating an existing watch
|
||||
// with the watcher matched on target
|
||||
let mut watch_count = 0;
|
||||
let rtk = RecordTableKey { key };
|
||||
if let Some(watch) = self.watched_records.get_mut(&rtk) {
|
||||
for w in &mut watch.watchers {
|
||||
if w.watcher == watcher {
|
||||
watch_count += 1;
|
||||
|
||||
// Only one watch for an anonymous watcher
|
||||
// Allow members to have one watch per target
|
||||
if !is_member || w.target == target {
|
||||
// Updating an existing watch
|
||||
w.subkeys = subkeys;
|
||||
w.expiration = expiration;
|
||||
w.count = count;
|
||||
return Ok(Some(expiration));
|
||||
}
|
||||
if let Some(watch_id) = opt_watch_id {
|
||||
let cancelled = self.cancel_watch(key, watch_id, params.watcher).await?;
|
||||
if cancelled {
|
||||
return Ok(WatchResult::Cancelled);
|
||||
}
|
||||
}
|
||||
return Ok(WatchResult::Rejected);
|
||||
}
|
||||
|
||||
// Adding a new watcher to a watch
|
||||
// Check watch table for limits
|
||||
if is_member {
|
||||
// Member watch
|
||||
if watch_count >= self.limits.member_watch_limit {
|
||||
// Too many watches
|
||||
return Ok(None);
|
||||
}
|
||||
// Make a closure to check for member vs anonymous
|
||||
let Some(member_check) = self.with_record(key, |record| {
|
||||
let schema = record.schema();
|
||||
let owner = *record.owner();
|
||||
Box::new(move |watcher| owner == params.watcher || schema.is_member(&watcher))
|
||||
}) else {
|
||||
// Record not found
|
||||
return Ok(WatchResult::Rejected);
|
||||
};
|
||||
|
||||
// Create or update depending on if a watch id is specified or not
|
||||
if let Some(watch_id) = opt_watch_id {
|
||||
self._change_existing_watch(key, params, watch_id).await
|
||||
} else {
|
||||
// Public watch
|
||||
if watch_count >= self.limits.public_watch_limit {
|
||||
// Too many watches
|
||||
return Ok(None);
|
||||
}
|
||||
self._create_new_watch(key, params, member_check).await
|
||||
}
|
||||
|
||||
// Ok this is an acceptable new watch, add it
|
||||
let watch = self.watched_records.entry(rtk).or_default();
|
||||
watch.watchers.push(WatchedRecordWatch {
|
||||
subkeys,
|
||||
expiration,
|
||||
count,
|
||||
target,
|
||||
watcher,
|
||||
changed: ValueSubkeyRangeSet::new(),
|
||||
});
|
||||
Ok(Some(expiration))
|
||||
}
|
||||
|
||||
/// Add a record watch for changes
|
||||
/// Clear a specific watch for a record
|
||||
/// returns true if the watch was found and cancelled
|
||||
async fn cancel_watch(
|
||||
&mut self,
|
||||
key: TypedKey,
|
||||
target: Target,
|
||||
watcher: CryptoKey,
|
||||
) -> VeilidAPIResult<Option<Timestamp>> {
|
||||
// Get the record being watched
|
||||
let Some(is_member) = self.with_record(key, |record| {
|
||||
// Check if the watcher specified is a schema member
|
||||
let schema = record.schema();
|
||||
(*record.owner()) == watcher || schema.is_member(&watcher)
|
||||
}) else {
|
||||
// Record not found
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
watch_id: u64,
|
||||
watcher: PublicKey,
|
||||
) -> VeilidAPIResult<bool> {
|
||||
if watch_id == 0 {
|
||||
apibail_internal!("should not have let a zero watch id get here");
|
||||
}
|
||||
// See if we are cancelling an existing watch
|
||||
// with the watcher matched on target
|
||||
let rtk = RecordTableKey { key };
|
||||
let mut is_empty = false;
|
||||
let mut ret_timestamp = None;
|
||||
if let Some(watch) = self.watched_records.get_mut(&rtk) {
|
||||
let mut ret = false;
|
||||
if let Some(watch_list) = self.watched_records.get_mut(&rtk) {
|
||||
let mut dead_watcher = None;
|
||||
for (wn, w) in watch.watchers.iter_mut().enumerate() {
|
||||
if w.watcher == watcher {
|
||||
// Only one watch for an anonymous watcher
|
||||
// Allow members to have one watch per target
|
||||
if !is_member || w.target == target {
|
||||
// Canceling an existing watch
|
||||
dead_watcher = Some(wn);
|
||||
ret_timestamp = Some(w.expiration);
|
||||
break;
|
||||
}
|
||||
for (wn, w) in watch_list.watches.iter_mut().enumerate() {
|
||||
// Must match the watch id and the watcher key to cancel
|
||||
if w.id == watch_id && w.params.watcher == watcher {
|
||||
// Canceling an existing watch
|
||||
dead_watcher = Some(wn);
|
||||
ret = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if let Some(dw) = dead_watcher {
|
||||
watch.watchers.remove(dw);
|
||||
if watch.watchers.is_empty() {
|
||||
watch_list.watches.remove(dw);
|
||||
if watch_list.watches.is_empty() {
|
||||
is_empty = true;
|
||||
}
|
||||
}
|
||||
@ -877,7 +1088,42 @@ where
|
||||
self.watched_records.remove(&rtk);
|
||||
}
|
||||
|
||||
Ok(ret_timestamp)
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
/// Move watches from one store to another
|
||||
pub fn move_watches(
|
||||
&mut self,
|
||||
key: TypedKey,
|
||||
in_watch: Option<(WatchList, bool)>,
|
||||
) -> Option<(WatchList, bool)> {
|
||||
let rtk = RecordTableKey { key };
|
||||
let out = self.watched_records.remove(&rtk);
|
||||
if let Some(in_watch) = in_watch {
|
||||
self.watched_records.insert(rtk, in_watch.0);
|
||||
if in_watch.1 {
|
||||
self.changed_watched_values.insert(rtk);
|
||||
}
|
||||
}
|
||||
let is_watched = self.changed_watched_values.remove(&rtk);
|
||||
out.map(|r| (r, is_watched))
|
||||
}
|
||||
|
||||
/// See if any watched records have expired and clear them out
|
||||
pub fn check_watched_records(&mut self) {
|
||||
let now = get_aligned_timestamp();
|
||||
self.watched_records.retain(|key, watch_list| {
|
||||
watch_list.watches.retain(|w| {
|
||||
w.params.count != 0 && w.params.expiration > now && !w.params.subkeys.is_empty()
|
||||
});
|
||||
if watch_list.watches.is_empty() {
|
||||
// If we're removing the watched record, drop any changed watch values too
|
||||
self.changed_watched_values.remove(key);
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
pub async fn take_value_changes(&mut self, changes: &mut Vec<ValueChangedInfo>) {
|
||||
@ -887,6 +1133,7 @@ where
|
||||
key: TypedKey,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
count: u32,
|
||||
watch_id: u64,
|
||||
}
|
||||
|
||||
let mut evcis = vec![];
|
||||
@ -895,31 +1142,38 @@ where
|
||||
if let Some(watch) = self.watched_records.get_mut(&rtk) {
|
||||
// Process watch notifications
|
||||
let mut dead_watchers = vec![];
|
||||
for (wn, w) in watch.watchers.iter_mut().enumerate() {
|
||||
for (wn, w) in watch.watches.iter_mut().enumerate() {
|
||||
// Get the subkeys that have changed
|
||||
let subkeys = w.changed.clone();
|
||||
|
||||
// If no subkeys on this watcher have changed then skip it
|
||||
if subkeys.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
w.changed.clear();
|
||||
|
||||
// Reduce the count of changes sent
|
||||
// if count goes to zero mark this watcher dead
|
||||
w.count -= 1;
|
||||
let count = w.count;
|
||||
w.params.count -= 1;
|
||||
let count = w.params.count;
|
||||
if count == 0 {
|
||||
dead_watchers.push(wn);
|
||||
}
|
||||
|
||||
evcis.push(EarlyValueChangedInfo {
|
||||
target: w.target,
|
||||
target: w.params.target,
|
||||
key: rtk.key,
|
||||
subkeys,
|
||||
count,
|
||||
watch_id: w.id,
|
||||
});
|
||||
}
|
||||
|
||||
// Remove in reverse so we don't have to offset the index to remove the right key
|
||||
for dw in dead_watchers.iter().rev().copied() {
|
||||
watch.watchers.remove(dw);
|
||||
if watch.watchers.is_empty() {
|
||||
watch.watches.remove(dw);
|
||||
if watch.watches.is_empty() {
|
||||
empty_watched_records.push(rtk);
|
||||
}
|
||||
}
|
||||
@ -935,7 +1189,7 @@ where
|
||||
log_stor!(error "first subkey should exist for value change notification");
|
||||
continue;
|
||||
};
|
||||
let subkey_result = match self.get_subkey(evci.key, first_subkey, false).await {
|
||||
let get_result = match self.get_subkey(evci.key, first_subkey, false).await {
|
||||
Ok(Some(skr)) => skr,
|
||||
Ok(None) => {
|
||||
log_stor!(error "subkey should have data for value change notification");
|
||||
@ -946,7 +1200,7 @@ where
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let Some(value) = subkey_result.value else {
|
||||
let Some(value) = get_result.opt_value else {
|
||||
log_stor!(error "first subkey should have had value for value change notification");
|
||||
continue;
|
||||
};
|
||||
@ -956,6 +1210,7 @@ where
|
||||
key: evci.key,
|
||||
subkeys: evci.subkeys,
|
||||
count: evci.count,
|
||||
watch_id: evci.watch_id,
|
||||
value,
|
||||
});
|
||||
}
|
||||
@ -1013,8 +1268,16 @@ where
|
||||
}
|
||||
|
||||
pub fn debug_record_info(&self, key: TypedKey) -> String {
|
||||
self.peek_record(key, |r| format!("{:#?}", r))
|
||||
.unwrap_or("Not found".to_owned())
|
||||
let record_info = self
|
||||
.peek_record(key, |r| format!("{:#?}", r))
|
||||
.unwrap_or("Not found".to_owned());
|
||||
let watched_record = match self.watched_records.get(&RecordTableKey { key }) {
|
||||
Some(w) => {
|
||||
format!("Remote Watches: {:#?}", w)
|
||||
}
|
||||
None => "No remote watches".to_owned(),
|
||||
};
|
||||
format!("{}\n{}\n", record_info, watched_record)
|
||||
}
|
||||
|
||||
pub async fn debug_record_subkey_info(&self, key: TypedKey, subkey: ValueSubkey) -> String {
|
@ -2,6 +2,8 @@ use super::*;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub(in crate::storage_manager) struct ActiveWatch {
|
||||
/// The watch id returned from the watch node
|
||||
pub id: u64,
|
||||
/// The expiration of a successful watch
|
||||
pub expiration_ts: Timestamp,
|
||||
/// Which node accepted the watch
|
||||
@ -42,10 +44,16 @@ impl OpenedRecord {
|
||||
pub fn writer(&self) -> Option<&KeyPair> {
|
||||
self.writer.as_ref()
|
||||
}
|
||||
pub fn set_writer(&mut self, writer: Option<KeyPair>) {
|
||||
self.writer = writer;
|
||||
}
|
||||
|
||||
pub fn safety_selection(&self) -> SafetySelection {
|
||||
self.safety_selection
|
||||
}
|
||||
pub fn set_safety_selection(&mut self, safety_selection: SafetySelection) {
|
||||
self.safety_selection = safety_selection;
|
||||
}
|
||||
|
||||
pub fn set_active_watch(&mut self, active_watch: ActiveWatch) {
|
||||
self.active_watch = Some(active_watch);
|
@ -23,7 +23,7 @@ where
|
||||
detail: D,
|
||||
) -> VeilidAPIResult<Self> {
|
||||
let schema = descriptor.schema()?;
|
||||
let subkey_count = schema.subkey_count();
|
||||
let subkey_count = schema.max_subkey() as usize + 1;
|
||||
Ok(Self {
|
||||
descriptor,
|
||||
subkey_count,
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user