Merge branch 'dht-work' into 'main'

Initial DHT WatchValue support

Closes #347 and #349

See merge request veilid/veilid!250
This commit is contained in:
Christien Rioux 2024-01-21 21:08:31 +00:00
commit 6f184f7326
119 changed files with 7307 additions and 2958 deletions

1044
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -105,6 +105,8 @@ code-linux:
ARG BASE=local
IF [ "$BASE" = "local" ]
FROM +build-linux-cache
ELSE IF [ "$BASE" = "uncached" ]
FROM +deps-linux
ELSE
ARG CI_REGISTRY_IMAGE=registry.gitlab.com/veilid/veilid
FROM $CI_REGISTRY_IMAGE/build-cache:latest
@ -128,12 +130,12 @@ clippy:
build-release:
FROM +code-linux
RUN cargo build --release -p veilid-server -p veilid-cli -p veilid-tools -p veilid-core
SAVE ARTIFACT ./target/release AS LOCAL ./target/release
SAVE ARTIFACT ./target/release AS LOCAL ./target/earthly/release
build:
FROM +code-linux
RUN cargo build -p veilid-server -p veilid-cli -p veilid-tools -p veilid-core
SAVE ARTIFACT ./target/debug AS LOCAL ./target/debug
SAVE ARTIFACT ./target/debug AS LOCAL ./target/earthly/debug
build-linux-amd64:
FROM +code-linux

View File

@ -88,6 +88,9 @@ core:
remote_max_records: 65536
remote_max_subkey_cache_memory_mb: %REMOTE_MAX_SUBKEY_CACHE_MEMORY_MB%
remote_max_storage_space_mb: 0
public_watch_limit: 32
member_watch_limit: 8
max_watch_expiration_ms: 600000
upnp: true
detect_address_changes: true
restricted_nat_retries: 0

View File

@ -255,6 +255,9 @@ dht:
remote_max_records: 65536
remote_max_subkey_cache_memory_mb: %REMOTE_MAX_SUBKEY_CACHE_MEMORY_MB%
remote_max_storage_space_mb: 0
public_watch_limit: 32
member_watch_limit: 8
max_watch_expiration_ms: 600000
```
#### core:network:tls

View File

@ -34,12 +34,12 @@ cursive = { git = "https://gitlab.com/veilid/cursive.git", default-features = fa
"toml",
"ansi",
] }
cursive-flexi-logger-view = { git = "https://gitlab.com/veilid/cursive-flexi-logger-view.git" }
cursive_buffered_backend = { git = "https://gitlab.com/veilid/cursive-buffered-backend.git" }
# cursive-multiplex = "0.6.0"
# cursive_tree_view = "0.6.0"
cursive_table_view = "0.14.0"
arboard = "3.2.1"
arboard = "3.3.0"
# cursive-tabs = "0.5.0"
clap = { version = "4", features = ["derive"] }
directories = "^5"
@ -55,7 +55,7 @@ flexi_logger = { version = "^0", features = ["use_chrono_for_offset"] }
thiserror = "^1"
crossbeam-channel = "^0"
hex = "^0"
veilid-tools = { version = "0.2.5", path = "../veilid-tools" }
veilid-tools = { version = "0.2.5", path = "../veilid-tools", default-features = false}
json = "^0"
stop-token = { version = "^0", default-features = false }
@ -63,7 +63,10 @@ flume = { version = "^0", features = ["async"] }
data-encoding = { version = "^2" }
indent = { version = "0.1.1" }
chrono = "0.4.26"
chrono = "0.4.31"
owning_ref = "0.4.1"
unicode-width = "0.1.11"
lru = "0.10.1"
[dev-dependencies]
serial_test = "^2"

View File

@ -0,0 +1,633 @@
use std::collections::VecDeque;
use std::ops::Deref;
use std::sync::Arc;
use std::sync::{Mutex, MutexGuard};
use cursive::align::*;
use cursive::theme::StyleType;
use cursive::utils::lines::spans::{LinesIterator, Row};
use cursive::utils::markup::StyledString;
use cursive::view::{SizeCache, View};
use cursive::{Printer, Vec2, With, XY};
use owning_ref::{ArcRef, OwningHandle};
use unicode_width::UnicodeWidthStr;
// Content type used internally for caching and storage
type ContentType = VecDeque<StyledString>;
type InnerContentType = Arc<ContentType>;
type CacheType = StyledString;
type InnerCacheType = Arc<StyledString>;
/// A reference to the text content.
///
/// This can be deref'ed into a [`StyledString`].
///
/// [`StyledString`]: ../utils/markup/type.StyledString.html
///
/// This keeps the content locked. Do not store this!
pub struct TextContentRef {
_handle: OwningHandle<ArcRef<Mutex<TextContentInner>>, MutexGuard<'static, TextContentInner>>,
// We also need to keep a copy of Arc so `deref` can return
// a reference to the `StyledString`
data: Arc<VecDeque<StyledString>>,
}
impl Deref for TextContentRef {
type Target = VecDeque<StyledString>;
fn deref(&self) -> &VecDeque<StyledString> {
self.data.as_ref()
}
}
/// Provides access to the content of a [`TextView`].
///
/// [`TextView`]: struct.TextView.html
///
/// Cloning this object will still point to the same content.
///
/// # Examples
///
/// ```rust
/// # use cursive::views::{TextView, TextContent};
/// let mut content = TextContent::new("content");
/// let view = TextView::new_with_content(content.clone());
///
/// // Later, possibly in a different thread
/// content.set_content("new content");
/// assert!(view.get_content().source().contains("new"));
/// ```
#[derive(Clone)]
pub struct TextContent {
content: Arc<Mutex<TextContentInner>>,
}
#[allow(dead_code)]
impl TextContent {
/// Creates a new text content around the given value.
///
/// Parses the given value.
pub fn new<S>(content: S) -> Self
where
S: Into<ContentType>,
{
let content = Arc::new(content.into());
TextContent {
content: Arc::new(Mutex::new(TextContentInner {
content_value: content,
content_cache: Arc::new(CacheType::default()),
size_cache: None,
})),
}
}
/// Replaces the content with the given value.
pub fn set_content<S>(&self, content: S)
where
S: Into<ContentType>,
{
self.with_content(|c| {
*c = content.into();
});
}
/// Append `line` to the end of a `TextView`.
pub fn append_line<S>(&self, line: S)
where
S: Into<StyledString>,
{
self.with_content(|c| {
c.push_back(line.into());
})
}
/// Append `lines` to the end of a `TextView`.
pub fn append_lines<I, S>(&self, lines: S)
where
S: Iterator<Item = I>,
I: Into<StyledString>,
{
self.with_content(|c| {
for line in lines {
c.push_back(line.into());
}
})
}
/// Remove lines from the beginning until we have no more than 'count' from the end
pub fn resize_back(&self, count: usize) {
if self.get_content().len() <= count {
return;
}
self.with_content(|c| {
while c.len() > count {
c.remove(0);
}
})
}
/// Remove lines from the end until we have no more than 'count' from the beginning
pub fn resize_front(&self, count: usize) {
if self.get_content().len() <= count {
return;
}
self.with_content(|c| {
while c.len() > count {
c.remove(c.len() - 1);
}
})
}
/// Returns a reference to the content.
///
/// This locks the data while the returned value is alive,
/// so don't keep it too long.
pub fn get_content(&self) -> TextContentRef {
TextContentInner::get_content(&self.content)
}
/// Apply the given closure to the inner content, and bust the cache afterward.
pub fn with_content<F, O>(&self, f: F) -> O
where
F: FnOnce(&mut ContentType) -> O,
{
self.with_content_inner(|c| f(Arc::make_mut(&mut c.content_value)))
}
/// Apply the given closure to the inner content, and bust the cache afterward.
fn with_content_inner<F, O>(&self, f: F) -> O
where
F: FnOnce(&mut TextContentInner) -> O,
{
let mut content = self.content.lock().unwrap();
let out = f(&mut content);
content.size_cache = None;
out
}
}
/// Internel representation of the content for a `TextView`.
///
/// This is mostly just a `StyledString`.
///
/// Can be shared (through a `Arc<Mutex>`).
struct TextContentInner {
// content: String,
content_value: InnerContentType,
content_cache: InnerCacheType,
// We keep the cache here so it can be busted when we change the content.
size_cache: Option<XY<SizeCache>>,
}
impl TextContentInner {
/// From a shareable content (Arc + Mutex), return a
fn get_content(content: &Arc<Mutex<TextContentInner>>) -> TextContentRef {
let arc_ref: ArcRef<Mutex<TextContentInner>> = ArcRef::new(Arc::clone(content));
let _handle =
OwningHandle::new_with_fn(arc_ref, |mutex| unsafe { (*mutex).lock().unwrap() });
let data = Arc::clone(&_handle.content_value);
TextContentRef { _handle, data }
}
fn is_cache_valid(&self, size: Vec2) -> bool {
match self.size_cache {
None => false,
Some(ref last) => last.x.accept(size.x) && last.y.accept(size.y),
}
}
fn get_cache(&self) -> &InnerCacheType {
&self.content_cache
}
}
/// A simple view showing a fixed text.
///
/// # Examples
///
/// ```rust
/// use cursive::Cursive;
/// use cursive_cached_text_view::CachedTextView;
/// let mut siv = Cursive::new();
///
/// siv.add_layer(CachedTextView::new("Hello world!", 5));
/// ```
pub struct CachedTextView {
cache: TinyCache<usize, Vec<Row>>,
content: TextContent,
align: Align,
style: StyleType,
// True if we can wrap long lines.
wrap: bool,
// Maximum number of lines to keep while appending
max_lines: Option<usize>,
// ScrollBase make many scrolling-related things easier
width: Option<usize>,
}
#[allow(dead_code)]
impl CachedTextView {
/// Creates a new TextView with the given content.
pub fn new<S>(content: S, cache_size: usize, max_lines: Option<usize>) -> Self
where
S: Into<ContentType>,
{
Self::new_with_content(TextContent::new(content), cache_size, max_lines)
}
/// Creates a new TextView using the given `TextContent`.
///
/// If you kept a clone of the given content, you'll be able to update it
/// remotely.
///
/// # Examples
///
/// ```rust
/// # use cursive_cached_text_view::{TextContent, CachedTextView};
/// let mut content = TextContent::new("content");
/// let view = CachedTextView::new_with_content(content.clone(), 5);
///
/// // Later, possibly in a different thread
/// content.set_content("new content");
/// assert!(view.get_content().source().contains("new"));
/// ```
pub fn new_with_content(
content: TextContent,
cache_size: usize,
max_lines: Option<usize>,
) -> Self {
CachedTextView {
cache: TinyCache::new(cache_size),
content,
style: StyleType::default(),
wrap: true,
align: Align::top_left(),
width: None,
max_lines,
}
}
/// Creates a new empty `TextView`.
pub fn empty(cache_size: usize, max_lines: Option<usize>) -> Self {
CachedTextView::new(ContentType::default(), cache_size, max_lines)
}
/// Sets the style for the content.
pub fn set_style<S: Into<StyleType>>(&mut self, style: S) {
self.cache.clear();
self.style = style.into();
}
/// Sets the style for the entire content.
///
/// Chainable variant.
#[must_use]
pub fn style<S: Into<StyleType>>(self, style: S) -> Self {
self.with(|s| s.set_style(style))
}
/// Disables content wrap for this view.
///
/// This may be useful if you want horizontal scrolling.
#[must_use]
pub fn no_wrap(self) -> Self {
self.with(|s| s.set_content_wrap(false))
}
/// Controls content wrap for this view.
///
/// If `true` (the default), text will wrap long lines when needed.
pub fn set_content_wrap(&mut self, wrap: bool) {
self.cache.clear();
self.wrap = wrap;
}
/// Sets the horizontal alignment for this view.
#[must_use]
pub fn h_align(mut self, h: HAlign) -> Self {
self.align.h = h;
self
}
/// Sets the vertical alignment for this view.
#[must_use]
pub fn v_align(mut self, v: VAlign) -> Self {
self.align.v = v;
self
}
/// Sets the alignment for this view.
#[must_use]
pub fn align(mut self, a: Align) -> Self {
self.align = a;
self
}
/// Center the text horizontally and vertically inside the view.
#[must_use]
pub fn center(mut self) -> Self {
self.align = Align::center();
self
}
/// Replace the text in this view.
///
/// Chainable variant.
#[must_use]
pub fn content<S>(self, content: S) -> Self
where
S: Into<ContentType>,
{
self.with(|s| s.set_content(content))
}
/// Replace the text in this view.
pub fn set_content<S>(&mut self, content: S)
where
S: Into<ContentType>,
{
self.cache.clear();
self.content.set_content(content);
}
/// Append `content` to the end of a `TextView`.
pub fn append_line<S>(&mut self, content: S)
where
S: Into<StyledString>,
{
self.cache.clear();
self.content.append_line(content);
if let Some(max_lines) = self.max_lines {
self.content.resize_back(max_lines);
}
}
/// Append `content` lines to the end of a `TextView`.
pub fn append_lines<S, I>(&mut self, content: I)
where
I: Iterator<Item = S>,
S: Into<StyledString>,
{
self.cache.clear();
self.content.append_lines(content);
if let Some(max_lines) = self.max_lines {
self.content.resize_back(max_lines);
}
}
/// Returns the current text in this view.
pub fn get_content(&self) -> TextContentRef {
TextContentInner::get_content(&self.content.content)
}
/// Returns a shared reference to the content, allowing content mutation.
pub fn get_shared_content(&mut self) -> TextContent {
// We take &mut here without really needing it,
// because it sort of "makes sense".
TextContent {
content: Arc::clone(&self.content.content),
}
}
// This must be non-destructive, as it may be called
// multiple times during layout.
fn compute_rows(&mut self, size: Vec2) {
let size = if self.wrap { size } else { Vec2::max_value() };
let mut content = self.content.content.lock().unwrap();
if content.is_cache_valid(size) {
return;
}
// Completely bust the cache
// Just in case we fail, we don't want to leave a bad cache.
content.size_cache = None;
content.content_cache = Arc::new(StyledString::from_iter(
content.content_value.iter().map(|s| {
let mut s = s.clone();
s.append_plain("\n");
s
}),
));
let rows = self.cache.compute(size.x, || {
LinesIterator::new(content.get_cache().as_ref(), size.x).collect()
});
// Desired width
self.width = if rows.iter().any(|row| row.is_wrapped) {
// If any rows are wrapped, then require the full width.
Some(size.x)
} else {
rows.iter().map(|row| row.width).max()
}
}
}
impl View for CachedTextView {
fn draw(&self, printer: &Printer) {
let rows = if let Some(rows) = self.cache.last() {
rows
} else {
return;
};
let h = rows.len();
// If the content is smaller than the view, align it somewhere.
let offset = self.align.v.get_offset(h, printer.size.y);
let printer = &printer.offset((0, offset));
let content = self.content.content.lock().unwrap();
printer.with_style(self.style, |printer| {
for (y, row) in rows
.iter()
.enumerate()
.skip(printer.content_offset.y)
.take(printer.output_size.y)
{
let l = row.width;
let mut x = self.align.h.get_offset(l, printer.size.x);
for span in row.resolve_stream(content.get_cache().as_ref()) {
printer.with_style(*span.attr, |printer| {
printer.print((x, y), span.content);
x += span.content.width();
});
}
}
});
}
fn layout(&mut self, size: Vec2) {
// Compute the text rows.
self.compute_rows(size);
let num_rows = self.cache.last().map(|rows| rows.len()).unwrap_or(0);
// The entire "virtual" size (includes all rows)
let my_size = Vec2::new(self.width.unwrap_or(0), num_rows);
// Build a fresh cache.
let mut content = self.content.content.lock().unwrap();
content.size_cache = Some(SizeCache::build(my_size, size));
}
fn needs_relayout(&self) -> bool {
let content = self.content.content.lock().unwrap();
content.size_cache.is_none()
}
fn required_size(&mut self, size: Vec2) -> Vec2 {
self.compute_rows(size);
let num_rows = self.cache.last().map(|rows| rows.len()).unwrap_or(0);
Vec2::new(self.width.unwrap_or(0), num_rows)
}
}
struct TinyCache<K, V> {
size: usize,
data: Vec<(usize, K, V)>,
}
impl<K, V> TinyCache<K, V> {
fn new(size: usize) -> Self {
TinyCache {
size,
data: Vec::with_capacity(size),
}
}
fn get_key_index(&self, key: &K) -> Option<usize>
where
K: Eq,
{
self.data.iter().rposition(|(_, k, _)| k == key)
}
fn compute(&mut self, key: K, f: impl FnOnce() -> V) -> &V
where
K: Eq,
{
if let Some(index) = self.get_key_index(&key) {
self.data[index].0 += 1;
return &self.data[index].2;
}
let v = f();
self.clean();
self.data.push((0, key, v));
&self.data.last().as_ref().unwrap().2
}
fn clean(&mut self) {
if self.data.len() < self.size {
return;
}
let index = self
.data
.iter()
.enumerate()
.min_by_key(|(_, (count, _, _))| *count)
.map(|(i, _)| i);
if let Some(index) = index {
self.data.swap_remove(index);
}
}
fn clear(&mut self) {
self.data.clear();
}
fn last(&self) -> Option<&V> {
self.data.last().map(|(_, _, v)| v)
}
#[cfg(test)]
fn len(&self) -> usize {
self.data.len()
}
#[cfg(test)]
fn is_empty(&self) -> bool {
self.data.is_empty()
}
#[cfg(test)]
fn keys(&self) -> Vec<(&K, usize)> {
self.data
.iter()
.map(|(count, key, _)| (key, *count))
.collect()
}
}
#[cfg(test)]
mod tests {
use cursive::theme::Style;
use cursive::Vec2;
use super::*;
#[test]
fn sanity() {
let text_view = CachedTextView::new(ContentType::default(), 5, None);
assert_eq!(text_view.get_content().data.len(), 0);
}
#[test]
fn test_cache() {
let mut text_view =
CachedTextView::new(VecDeque::from([StyledString::from("sample")]), 3, None);
assert!(text_view.cache.is_empty());
text_view.compute_rows(Vec2::new(0, 0));
assert_eq!(text_view.cache.len(), 1);
text_view.compute_rows(Vec2::new(0, 0));
assert_eq!(text_view.cache.len(), 1);
text_view.compute_rows(Vec2::new(1, 0));
assert_eq!(text_view.cache.len(), 2);
text_view.compute_rows(Vec2::new(2, 0));
assert_eq!(text_view.cache.len(), 3);
text_view.compute_rows(Vec2::new(3, 0));
assert_eq!(text_view.cache.len(), 3);
assert_eq!(text_view.cache.keys(), [(&0, 1), (&2, 0), (&3, 0)]);
text_view.set_content(VecDeque::new());
assert_eq!(text_view.cache.len(), 0);
text_view.compute_rows(Vec2::new(0, 0));
text_view.append_line("sample");
assert_eq!(text_view.cache.len(), 0);
text_view.compute_rows(Vec2::new(0, 0));
text_view.set_content_wrap(false);
assert_eq!(text_view.cache.len(), 0);
text_view.compute_rows(Vec2::new(0, 0));
text_view.set_style(Style::view());
assert_eq!(text_view.cache.len(), 0);
}
}

View File

@ -3,13 +3,13 @@ use crate::tools::*;
use futures::stream::FuturesUnordered;
use futures::StreamExt;
use std::net::SocketAddr;
use std::path::PathBuf;
use std::time::SystemTime;
use stop_token::{future::FutureExt as _, StopSource};
cfg_if! {
if #[cfg(feature="rt-async-std")] {
use async_std::io::prelude::BufReadExt;
use async_std::io::WriteExt;
use futures::{AsyncBufReadExt, AsyncWriteExt};
use async_std::io::BufReader;
} else if #[cfg(feature="rt-tokio")] {
use tokio::io::AsyncBufReadExt;
@ -20,7 +20,6 @@ cfg_if! {
struct ClientApiConnectionInner {
comproc: CommandProcessor,
connect_addr: Option<SocketAddr>,
request_sender: Option<flume::Sender<String>>,
disconnector: Option<StopSource>,
disconnect_requested: bool,
@ -38,7 +37,6 @@ impl ClientApiConnection {
Self {
inner: Arc::new(Mutex::new(ClientApiConnectionInner {
comproc,
connect_addr: None,
request_sender: None,
disconnector: None,
disconnect_requested: false,
@ -117,33 +115,15 @@ impl ClientApiConnection {
}
}
async fn handle_connection(&self, connect_addr: SocketAddr) -> Result<(), String> {
trace!("ClientApiConnection::handle_connection");
// Connect the TCP socket
let stream = TcpStream::connect(connect_addr)
.await
.map_err(map_to_string)?;
// If it succeed, disable nagle algorithm
stream.set_nodelay(true).map_err(map_to_string)?;
// State we connected
let comproc = self.inner.lock().comproc.clone();
comproc.set_connection_state(ConnectionState::Connected(connect_addr, SystemTime::now()));
// Split the stream
cfg_if! {
if #[cfg(feature="rt-async-std")] {
use futures::AsyncReadExt;
let (reader, mut writer) = stream.split();
let mut reader = BufReader::new(reader);
} else if #[cfg(feature="rt-tokio")] {
let (reader, mut writer) = stream.into_split();
let mut reader = BufReader::new(reader);
}
}
pub async fn run_json_api_processor<R, W>(
self,
mut reader: R,
mut writer: W,
) -> Result<(), String>
where
R: AsyncBufReadExt + Unpin + Send,
W: AsyncWriteExt + Unpin + Send,
{
// Requests to send
let (requests_tx, requests_rx) = flume::unbounded();
@ -152,7 +132,6 @@ impl ClientApiConnection {
let stop_source = StopSource::new();
let token = stop_source.token();
let mut inner = self.inner.lock();
inner.connect_addr = Some(connect_addr);
inner.disconnector = Some(stop_source);
inner.request_sender = Some(requests_tx);
token
@ -231,7 +210,6 @@ impl ClientApiConnection {
inner.request_sender = None;
inner.disconnector = None;
inner.disconnect_requested = false;
inner.connect_addr = None;
// Connection finished
if disconnect_requested {
@ -241,6 +219,66 @@ impl ClientApiConnection {
}
}
async fn handle_tcp_connection(&self, connect_addr: SocketAddr) -> Result<(), String> {
trace!("ClientApiConnection::handle_tcp_connection");
// Connect the TCP socket
let stream = TcpStream::connect(connect_addr)
.await
.map_err(map_to_string)?;
// If it succeed, disable nagle algorithm
stream.set_nodelay(true).map_err(map_to_string)?;
// State we connected
let comproc = self.inner.lock().comproc.clone();
comproc.set_connection_state(ConnectionState::ConnectedTCP(
connect_addr,
SystemTime::now(),
));
// Split into reader and writer halves
// with line buffering on the reader
cfg_if! {
if #[cfg(feature="rt-async-std")] {
use futures::AsyncReadExt;
let (reader, writer) = stream.split();
let reader = BufReader::new(reader);
} else {
let (reader, writer) = stream.into_split();
let reader = BufReader::new(reader);
}
}
self.clone().run_json_api_processor(reader, writer).await
}
async fn handle_ipc_connection(&self, ipc_path: PathBuf) -> Result<(), String> {
trace!("ClientApiConnection::handle_ipc_connection");
// Connect the IPC socket
let stream = IpcStream::connect(&ipc_path).await.map_err(map_to_string)?;
// State we connected
let comproc = self.inner.lock().comproc.clone();
comproc.set_connection_state(ConnectionState::ConnectedIPC(ipc_path, SystemTime::now()));
// Split into reader and writer halves
// with line buffering on the reader
use futures::AsyncReadExt;
let (reader, writer) = stream.split();
cfg_if! {
if #[cfg(feature = "rt-tokio")] {
use tokio_util::compat::{FuturesAsyncReadCompatExt, FuturesAsyncWriteCompatExt};
let reader = reader.compat();
let writer = writer.compat_write();
}
}
let reader = BufReader::new(reader);
self.clone().run_json_api_processor(reader, writer).await
}
async fn perform_request(&self, mut req: json::JsonValue) -> Option<json::JsonValue> {
let (sender, reply_rx) = {
let mut inner = self.inner.lock();
@ -358,10 +396,15 @@ impl ClientApiConnection {
}
// Start Client API connection
pub async fn connect(&self, connect_addr: SocketAddr) -> Result<(), String> {
trace!("ClientApiConnection::connect");
pub async fn ipc_connect(&self, ipc_path: PathBuf) -> Result<(), String> {
trace!("ClientApiConnection::ipc_connect");
// Save the pathto connect to
self.handle_ipc_connection(ipc_path).await
}
pub async fn tcp_connect(&self, connect_addr: SocketAddr) -> Result<(), String> {
trace!("ClientApiConnection::tcp_connect");
// Save the address to connect to
self.handle_connection(connect_addr).await
self.handle_tcp_connection(connect_addr).await
}
// End Client API connection

View File

@ -4,6 +4,7 @@ use crate::tools::*;
use crate::ui::*;
use indent::indent_all_by;
use std::net::SocketAddr;
use std::path::PathBuf;
use std::time::SystemTime;
use veilid_tools::*;
@ -22,18 +23,20 @@ pub fn convert_loglevel(s: &str) -> Result<String, String> {
#[derive(PartialEq, Clone)]
pub enum ConnectionState {
Disconnected,
Connected(SocketAddr, SystemTime),
Retrying(SocketAddr, SystemTime),
ConnectedTCP(SocketAddr, SystemTime),
RetryingTCP(SocketAddr, SystemTime),
ConnectedIPC(PathBuf, SystemTime),
RetryingIPC(PathBuf, SystemTime),
}
impl ConnectionState {
pub fn is_disconnected(&self) -> bool {
matches!(*self, Self::Disconnected)
}
pub fn is_connected(&self) -> bool {
matches!(*self, Self::Connected(_, _))
matches!(*self, Self::ConnectedTCP(_, _) | Self::ConnectedIPC(_, _))
}
pub fn is_retrying(&self) -> bool {
matches!(*self, Self::Retrying(_, _))
matches!(*self, Self::RetryingTCP(_, _) | Self::RetryingIPC(_, _))
}
}
@ -44,7 +47,8 @@ struct CommandProcessorInner {
finished: bool,
autoconnect: bool,
autoreconnect: bool,
server_addr: Option<SocketAddr>,
ipc_path: Option<PathBuf>,
network_addr: Option<SocketAddr>,
connection_waker: Eventual,
last_call_id: Option<u64>,
enable_app_messages: bool,
@ -65,7 +69,8 @@ impl CommandProcessor {
finished: false,
autoconnect: settings.autoconnect,
autoreconnect: settings.autoreconnect,
server_addr: None,
ipc_path: None,
network_addr: None,
connection_waker: Eventual::new(),
last_call_id: None,
enable_app_messages: false,
@ -306,38 +311,75 @@ Server Debug Commands:
// Loop while we want to keep the connection
let mut first = true;
while self.inner().reconnect {
let server_addr_opt = self.inner_mut().server_addr;
let server_addr = match server_addr_opt {
None => break,
Some(addr) => addr,
};
if first {
info!("Connecting to server at {}", server_addr);
self.set_connection_state(ConnectionState::Retrying(
server_addr,
// IPC
let ipc_path_opt = self.inner_mut().ipc_path.clone();
if let Some(ipc_path) = ipc_path_opt {
if first {
info!(
"Connecting to server at {}",
ipc_path.to_string_lossy().to_string()
);
self.set_connection_state(ConnectionState::RetryingIPC(
ipc_path.clone(),
SystemTime::now(),
));
} else {
debug!(
"Retrying connection to {}",
ipc_path.to_string_lossy().to_string()
);
}
let capi = self.capi();
let res = capi.ipc_connect(ipc_path.clone()).await;
if res.is_ok() {
info!(
"Connection to server at {} terminated normally",
ipc_path.to_string_lossy().to_string()
);
break;
}
if !self.inner().autoreconnect {
info!("Connection to server lost.");
break;
}
self.set_connection_state(ConnectionState::RetryingIPC(
ipc_path,
SystemTime::now(),
));
} else {
debug!("Retrying connection to {}", server_addr);
}
let capi = self.capi();
let res = capi.connect(server_addr).await;
if res.is_ok() {
info!(
"Connection to server at {} terminated normally",
server_addr
);
break;
}
if !self.inner().autoreconnect {
info!("Connection to server lost.");
break;
}
self.set_connection_state(ConnectionState::Retrying(
server_addr,
SystemTime::now(),
));
// TCP
let network_addr_opt = self.inner_mut().network_addr;
if let Some(network_addr) = network_addr_opt {
if first {
info!("Connecting to server at {}", network_addr);
self.set_connection_state(ConnectionState::RetryingTCP(
network_addr,
SystemTime::now(),
));
} else {
debug!("Retrying connection to {}", network_addr);
}
let capi = self.capi();
let res = capi.tcp_connect(network_addr).await;
if res.is_ok() {
info!(
"Connection to server at {} terminated normally",
network_addr
);
break;
}
if !self.inner().autoreconnect {
info!("Connection to server lost.");
break;
}
self.set_connection_state(ConnectionState::RetryingTCP(
network_addr,
SystemTime::now(),
));
}
debug!("Connection lost, retrying in 2 seconds");
{
@ -355,11 +397,17 @@ Server Debug Commands:
// called by ui
////////////////////////////////////////////
pub fn set_server_address(&self, server_addr: Option<SocketAddr>) {
self.inner_mut().server_addr = server_addr;
pub fn set_ipc_path(&self, ipc_path: Option<PathBuf>) {
self.inner_mut().ipc_path = ipc_path;
}
pub fn get_server_address(&self) -> Option<SocketAddr> {
self.inner().server_addr
pub fn get_ipc_path(&self) -> Option<PathBuf> {
self.inner().ipc_path.clone()
}
pub fn set_network_address(&self, network_addr: Option<SocketAddr>) {
self.inner_mut().network_addr = network_addr;
}
pub fn get_network_address(&self) -> Option<SocketAddr> {
self.inner().network_addr
}
// called by client_api_connection
// calls into ui
@ -414,7 +462,19 @@ Server Debug Commands:
}
}
pub fn update_value_change(&self, value_change: &json::JsonValue) {
let out = format!("Value change: {:?}", value_change.as_str().unwrap_or("???"));
let data = json_str_vec_u8(&value_change["value"]["data"]);
let (datastr, truncated) = Self::print_json_str_vec_u8(&data);
let out = format!(
"Value change: key={} subkeys={} count={} value.seq={} value.writer={} value.data={}{}",
value_change["key"].dump(),
value_change["subkeys"].dump(),
value_change["count"].dump(),
value_change["value"]["seq"].dump(),
value_change["value"]["writer"].dump(),
datastr,
if truncated { "..." } else { "" }
);
self.inner().ui_sender.add_node_event(Level::Info, out);
}
@ -436,16 +496,10 @@ Server Debug Commands:
);
}
pub fn update_app_message(&self, msg: &json::JsonValue) {
if !self.inner.lock().enable_app_messages {
return;
}
let message = json_str_vec_u8(&msg["message"]);
fn print_json_str_vec_u8(message: &[u8]) -> (String, bool) {
// check if message body is ascii printable
let mut printable = true;
for c in &message {
for c in message {
if *c < 32 || *c > 126 {
printable = false;
}
@ -454,7 +508,7 @@ Server Debug Commands:
let (message, truncated) = if message.len() > 64 {
(&message[0..64], true)
} else {
(&message[..], false)
(message, false)
};
let strmsg = if printable {
@ -463,6 +517,17 @@ Server Debug Commands:
hex::encode(message)
};
(strmsg, truncated)
}
pub fn update_app_message(&self, msg: &json::JsonValue) {
if !self.inner.lock().enable_app_messages {
return;
}
let message = json_str_vec_u8(&msg["message"]);
let (strmsg, truncated) = Self::print_json_str_vec_u8(&message);
self.inner().ui_sender.add_node_event(
Level::Info,
format!(

View File

@ -3,12 +3,12 @@
#![deny(unused_must_use)]
#![recursion_limit = "256"]
use crate::tools::*;
use crate::{settings::NamedSocketAddrs, tools::*};
use clap::{Parser, ValueEnum};
use flexi_logger::*;
use std::{net::ToSocketAddrs, path::PathBuf};
use std::path::PathBuf;
mod cached_text_view;
mod client_api_connection;
mod command_processor;
mod peers_table_view;
@ -27,14 +27,20 @@ enum LogLevel {
#[derive(Parser, Debug)]
#[command(author, version, about = "Veilid Console Client")]
struct CmdlineArgs {
/// IPC socket to connect to
#[arg(long, short = 'p')]
ipc_path: Option<PathBuf>,
/// Subnode index to use when connecting
#[arg(long, short = 'i', default_value = "0")]
subnode_index: usize,
/// Address to connect to
#[arg(long)]
#[arg(long, short = 'a')]
address: Option<String>,
/// Wait for debugger to attach
#[arg(long)]
wait_for_debug: bool,
/// Specify a configuration file to use
#[arg(short, long, value_name = "FILE")]
#[arg(short = 'c', long, value_name = "FILE")]
config_file: Option<PathBuf>,
/// log level
#[arg(value_enum)]
@ -91,7 +97,6 @@ fn main() -> Result<(), String> {
let logger = Logger::with(specbuilder.build());
if settings.logging.terminal.enabled {
let flv = sivui.cursive_flexi_logger();
if settings.logging.file.enabled {
std::fs::create_dir_all(settings.logging.file.directory.clone())
.map_err(map_to_string)?;
@ -100,13 +105,13 @@ fn main() -> Result<(), String> {
FileSpec::default()
.directory(settings.logging.file.directory.clone())
.suppress_timestamp(),
flv,
Box::new(uisender.clone()),
)
.start()
.expect("failed to initialize logger!");
} else {
logger
.log_to_writer(flv)
.log_to_writer(Box::new(uisender.clone()))
.start()
.expect("failed to initialize logger!");
}
@ -123,16 +128,69 @@ fn main() -> Result<(), String> {
.expect("failed to initialize logger!");
}
}
// Get client address
let server_addrs = if let Some(address_arg) = args.address {
address_arg
.to_socket_addrs()
.map_err(|e| format!("Invalid server address '{}'", e))?
.collect()
} else {
settings.address.addrs.clone()
};
let server_addr = server_addrs.first().cloned();
let enable_ipc = (settings.enable_ipc && args.address.is_none()) || args.ipc_path.is_some();
let mut enable_network =
(settings.enable_network && args.ipc_path.is_none()) || args.address.is_some();
// Determine IPC path to try
let mut client_api_ipc_path = None;
if enable_ipc {
cfg_if::cfg_if! {
if #[cfg(windows)] {
if let Some(ipc_path) = args.ipc_path.or(settings.ipc_path.clone()) {
if is_ipc_socket_path(&ipc_path) {
// try direct path
enable_network = false;
client_api_ipc_path = Some(ipc_path);
} else {
// try subnode index inside path
let ipc_path = ipc_path.join(args.subnode_index.to_string());
if is_ipc_socket_path(&ipc_path) {
// subnode indexed path exists
enable_network = false;
client_api_ipc_path = Some(ipc_path);
}
}
}
} else {
if let Some(ipc_path) = args.ipc_path.or(settings.ipc_path.clone()) {
if is_ipc_socket_path(&ipc_path) {
// try direct path
enable_network = false;
client_api_ipc_path = Some(ipc_path);
} else if ipc_path.exists() && ipc_path.is_dir() {
// try subnode index inside path
let ipc_path = ipc_path.join(args.subnode_index.to_string());
if is_ipc_socket_path(&ipc_path) {
// subnode indexed path exists
enable_network = false;
client_api_ipc_path = Some(ipc_path);
}
}
}
}
}
}
let mut client_api_network_addresses = None;
if enable_network {
let args_address = if let Some(args_address) = args.address {
match NamedSocketAddrs::try_from(args_address) {
Ok(v) => Some(v),
Err(e) => {
return Err(format!("Invalid server address: {}", e));
}
}
} else {
None
};
if let Some(address_arg) = args_address.or(settings.address.clone()) {
client_api_network_addresses = Some(address_arg.addrs);
} else if let Some(address) = settings.address.clone() {
client_api_network_addresses = Some(address.addrs.clone());
}
}
// Create command processor
debug!("Creating Command Processor ");
@ -147,7 +205,15 @@ fn main() -> Result<(), String> {
comproc.set_client_api_connection(capi.clone());
// Keep a connection to the server
comproc.set_server_address(server_addr);
if let Some(client_api_ipc_path) = client_api_ipc_path {
comproc.set_ipc_path(Some(client_api_ipc_path));
} else if let Some(client_api_network_address) = client_api_network_addresses {
let network_addr = client_api_network_address.first().cloned();
comproc.set_network_address(network_addr);
} else {
return Err("veilid-server could not be reached".to_owned());
}
let comproc2 = comproc.clone();
let connection_future = comproc.connection_manager();

View File

@ -7,6 +7,9 @@ use std::path::{Path, PathBuf};
pub fn load_default_config() -> Result<config::Config, config::ConfigError> {
let default_config = r#"---
enable_ipc: true
ipc_path: '%IPC_DIRECTORY%'
enable_network: false
address: "localhost:5959"
autoconnect: true
autoreconnect: true
@ -20,7 +23,7 @@ logging:
append: true
interface:
node_log:
scrollback: 2048
scrollback: 10000
command_line:
history_size: 2048
theme:
@ -45,6 +48,10 @@ interface:
warn : "light yellow"
error : "light red"
"#
.replace(
"%IPC_DIRECTORY%",
&Settings::get_default_ipc_directory().to_string_lossy(),
)
.replace(
"%LOGGING_FILE_DIRECTORY%",
&Settings::get_default_log_directory().to_string_lossy(),
@ -111,11 +118,22 @@ pub fn convert_loglevel(log_level: LogLevel) -> log::LevelFilter {
}
}
#[derive(Debug)]
#[derive(Debug, Clone)]
pub struct NamedSocketAddrs {
pub name: String,
pub addrs: Vec<SocketAddr>,
}
impl TryFrom<String> for NamedSocketAddrs {
type Error = std::io::Error;
fn try_from(value: String) -> Result<Self, Self::Error> {
let addrs = value.to_socket_addrs()?.collect();
let name = value;
Ok(NamedSocketAddrs { name, addrs })
}
}
impl<'de> serde::Deserialize<'de> for NamedSocketAddrs {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
@ -200,7 +218,10 @@ pub struct Interface {
#[derive(Debug, Deserialize)]
pub struct Settings {
pub address: NamedSocketAddrs,
pub enable_ipc: bool,
pub ipc_path: Option<PathBuf>,
pub enable_network: bool,
pub address: Option<NamedSocketAddrs>,
pub autoconnect: bool,
pub autoreconnect: bool,
pub logging: Logging,
@ -208,6 +229,36 @@ pub struct Settings {
}
impl Settings {
#[allow(dead_code)]
fn get_server_default_directory(subpath: &str) -> PathBuf {
#[cfg(unix)]
{
let globalpath = PathBuf::from("/var/db/veilid-server").join(subpath);
if globalpath.is_dir() {
return globalpath;
}
}
let mut ts_path = if let Some(my_proj_dirs) = ProjectDirs::from("org", "Veilid", "Veilid") {
PathBuf::from(my_proj_dirs.data_local_dir())
} else {
PathBuf::from("./")
};
ts_path.push(subpath);
ts_path
}
pub fn get_default_ipc_directory() -> PathBuf {
cfg_if::cfg_if! {
if #[cfg(windows)] {
PathBuf::from(r"\\.\PIPE\veilid-server")
} else {
Self::get_server_default_directory("ipc")
}
}
}
pub fn get_default_config_path() -> PathBuf {
// Get default configuration file location
let mut default_config_path =

View File

@ -7,34 +7,44 @@ use cursive::align::*;
use cursive::event::*;
use cursive::theme::*;
use cursive::traits::*;
use cursive::utils::markup::ansi;
use cursive::utils::markup::StyledString;
use cursive::view::SizeConstraint;
use cursive::views::*;
use cursive::Cursive;
use cursive::CursiveRunnable;
use cursive_flexi_logger_view::{CursiveLogWriter, FlexiLoggerView};
use flexi_logger::writers::LogWriter;
use flexi_logger::DeferredNow;
// use cursive_multiplex::*;
use crate::cached_text_view::*;
use chrono::{Datelike, Timelike};
use std::collections::{HashMap, VecDeque};
use std::io::Write;
use std::path::PathBuf;
use std::sync::atomic::{AtomicU64, Ordering};
use std::time::{SystemTime, UNIX_EPOCH};
use thiserror::Error;
//////////////////////////////////////////////////////////////
///
struct Dirty<T> {
struct Dirty<T>
where
T: PartialEq,
{
value: T,
dirty: bool,
}
impl<T> Dirty<T> {
impl<T> Dirty<T>
where
T: PartialEq,
{
pub fn new(value: T) -> Self {
Self { value, dirty: true }
}
pub fn set(&mut self, value: T) {
self.dirty = self.value != value;
self.value = value;
self.dirty = true;
}
pub fn get(&self) -> &T {
&self.value
@ -50,6 +60,7 @@ impl<T> Dirty<T> {
}
pub type UICallback = Box<dyn Fn(&mut Cursive) + Send>;
pub type NodeEventsPanel = Panel<NamedView<ScrollView<NamedView<CachedTextView>>>>;
static START_TIME: AtomicU64 = AtomicU64::new(0);
@ -201,6 +212,11 @@ impl UI {
siv.set_global_callback(cursive::event::Event::Key(Key::Esc), UI::quit_handler);
}
fn setup_clear_handler(siv: &mut Cursive) {
siv.clear_global_callbacks(cursive::event::Event::CtrlChar('k'));
siv.set_on_pre_event(cursive::event::Event::CtrlChar('k'), UI::clear_handler);
}
fn quit_handler(siv: &mut Cursive) {
siv.add_layer(
Dialog::text("Do you want to exit?")
@ -219,12 +235,25 @@ impl UI {
});
}
fn clear_handler(siv: &mut Cursive) {
cursive_flexi_logger_view::clear_log();
Self::node_events_view(siv).set_content([]);
UI::update_cb(siv);
}
fn node_events_panel(s: &mut Cursive) -> ViewRef<Panel<ScrollView<FlexiLoggerView>>> {
////////////////////////////////////////////////////////////////////////////////////////////////
// Selectors
// fn main_layout(s: &mut Cursive) -> ViewRef<LinearLayout> {
// s.find_name("main-layout").unwrap()
// }
fn node_events_panel(s: &mut Cursive) -> ViewRef<NodeEventsPanel> {
s.find_name("node-events-panel").unwrap()
}
fn node_events_view(s: &mut Cursive) -> ViewRef<CachedTextView> {
s.find_name("node-events-view").unwrap()
}
fn node_events_scroll_view(s: &mut Cursive) -> ViewRef<ScrollView<NamedView<CachedTextView>>> {
s.find_name("node-events-scroll-view").unwrap()
}
fn command_line(s: &mut Cursive) -> ViewRef<EditView> {
s.find_name("command-line").unwrap()
}
@ -237,6 +266,46 @@ impl UI {
fn peers(s: &mut Cursive) -> ViewRef<PeersTableView> {
s.find_name("peers").unwrap()
}
fn ipc_path(s: &mut Cursive) -> ViewRef<EditView> {
s.find_name("ipc-path").unwrap()
}
fn ipc_path_radio(s: &mut Cursive) -> ViewRef<RadioButton<u32>> {
s.find_name("ipc-path-radio").unwrap()
}
fn connecting_text(s: &mut Cursive) -> ViewRef<TextView> {
s.find_name("connecting-text").unwrap()
}
fn network_address(s: &mut Cursive) -> ViewRef<EditView> {
s.find_name("network-address").unwrap()
}
fn network_address_radio(s: &mut Cursive) -> ViewRef<RadioButton<u32>> {
s.find_name("network-address-radio").unwrap()
}
fn connection_dialog(s: &mut Cursive) -> ViewRef<Dialog> {
s.find_name("connection-dialog").unwrap()
}
////////////////////////////////////////////////////////////////////////////////////////////////
fn push_styled_line(s: &mut Cursive, styled_string: StyledString) {
let mut ctv = UI::node_events_view(s);
ctv.append_line(styled_string)
}
fn push_ansi_lines(s: &mut Cursive, mut starting_style: Style, lines: String) {
let mut ctv = UI::node_events_view(s);
let mut sslines: Vec<StyledString> = vec![];
for line in lines.lines() {
let (spanned_string, end_style) = ansi::parse_with_starting_style(starting_style, line);
sslines.push(spanned_string);
starting_style = end_style;
}
ctv.append_lines(sslines.into_iter());
}
////////////////////////////////////////////////////////////////////////////////////////////////
fn render_attachment_state(inner: &mut UIInner) -> String {
let att = match inner.ui_state.attachment_state.get().as_str() {
"Detached" => "[----]",
@ -271,7 +340,7 @@ impl UI {
}
}
fn render_button_attach<'a>(inner: &mut UIInner) -> (&'a str, bool) {
if let ConnectionState::Connected(_, _) = inner.ui_state.connection_state.get() {
if let ConnectionState::ConnectedTCP(_, _) = inner.ui_state.connection_state.get() {
match inner.ui_state.attachment_state.get().as_str() {
"Detached" => ("Attach", true),
"Attaching" => ("Detach", true),
@ -351,16 +420,17 @@ impl UI {
return;
}
// run command
cursive_flexi_logger_view::parse_lines_to_log(
UI::push_ansi_lines(
s,
ColorStyle::primary().into(),
format!("> {} {}", UI::cli_ts(Self::get_start_time()), text),
format!("{}> {}\n", UI::cli_ts(Self::get_start_time()), text),
);
match Self::run_command(s, text) {
Ok(_) => {}
Err(e) => {
let color = *Self::inner_mut(s).log_colors.get(&Level::Error).unwrap();
cursive_flexi_logger_view::parse_lines_to_log(
UI::push_ansi_lines(
s,
color.into(),
format!(" {} Error: {}", UI::cli_ts(Self::get_start_time()), e),
);
@ -445,19 +515,39 @@ impl UI {
button_attach.set_enabled(button_enable);
}
fn submit_connection_address(s: &mut Cursive) {
let edit = s.find_name::<EditView>("connection-address").unwrap();
fn submit_ipc_path(s: &mut Cursive) {
let edit = Self::ipc_path(s);
let addr = (*edit.get_content()).clone();
let sa = match addr.parse::<std::net::SocketAddr>() {
Ok(sa) => Some(sa),
let ipc_path = match addr.parse::<PathBuf>() {
Ok(sa) => sa,
Err(_) => {
s.add_layer(Dialog::text("Invalid address").button("Close", |s| {
s.add_layer(Dialog::text("Invalid IPC path").button("Close", |s| {
s.pop_layer();
}));
return;
}
};
Self::command_processor(s).set_server_address(sa);
Self::command_processor(s).set_ipc_path(Some(ipc_path));
Self::command_processor(s).set_network_address(None);
Self::command_processor(s).start_connection();
}
fn submit_network_address(s: &mut Cursive) {
let edit = Self::network_address(s);
let addr = (*edit.get_content()).clone();
let sa = match addr.parse::<std::net::SocketAddr>() {
Ok(sa) => sa,
Err(_) => {
s.add_layer(
Dialog::text("Invalid network address").button("Close", |s| {
s.pop_layer();
}),
);
return;
}
};
Self::command_processor(s).set_ipc_path(None);
Self::command_processor(s).set_network_address(Some(sa));
Self::command_processor(s).start_connection();
}
@ -475,7 +565,8 @@ impl UI {
&& std::io::stdout().flush().is_ok()
{
let color = *Self::inner_mut(s).log_colors.get(&Level::Info).unwrap();
cursive_flexi_logger_view::parse_lines_to_log(
UI::push_ansi_lines(
s,
color.into(),
format!(
">> {} Copied: {}",
@ -491,7 +582,8 @@ impl UI {
// X11/Wayland/other system copy
if clipboard.set_text(text.as_ref()).is_ok() {
let color = *Self::inner_mut(s).log_colors.get(&Level::Info).unwrap();
cursive_flexi_logger_view::parse_lines_to_log(
UI::push_ansi_lines(
s,
color.into(),
format!(
">> {} Copied: {}",
@ -501,7 +593,8 @@ impl UI {
);
} else {
let color = *Self::inner_mut(s).log_colors.get(&Level::Warn).unwrap();
cursive_flexi_logger_view::parse_lines_to_log(
UI::push_ansi_lines(
s,
color.into(),
format!(
">> {} Could not copy to clipboard",
@ -534,12 +627,23 @@ impl UI {
EventResult::Ignored
}
fn show_connection_dialog(s: &mut Cursive, state: ConnectionState) -> bool {
fn draw_connection_dialog(s: &mut Cursive, state: ConnectionState) -> bool {
let is_ipc = Self::command_processor(s).get_ipc_path().is_some();
let mut inner = Self::inner_mut(s);
let mut connection_type_group: RadioGroup<u32> = RadioGroup::new().on_change(|s, v| {
if *v == 0 {
Self::ipc_path(s).enable();
Self::network_address(s).disable();
} else if *v == 1 {
Self::ipc_path(s).disable();
Self::network_address(s).enable();
}
});
let mut show: bool = false;
let mut hide: bool = false;
let mut reset: bool = false;
let mut connecting: bool = false;
match state {
ConnectionState::Disconnected => {
if inner.connection_dialog_state.is_none()
@ -556,10 +660,11 @@ impl UI {
.unwrap()
.is_retrying()
{
reset = true;
hide = true;
show = true
}
}
ConnectionState::Connected(_, _) => {
ConnectionState::ConnectedTCP(_, _) | ConnectionState::ConnectedIPC(_, _) => {
if inner.connection_dialog_state.is_some()
&& !inner
.connection_dialog_state
@ -570,7 +675,7 @@ impl UI {
hide = true;
}
}
ConnectionState::Retrying(_, _) => {
ConnectionState::RetryingTCP(_, _) | ConnectionState::RetryingIPC(_, _) => {
if inner.connection_dialog_state.is_none()
|| inner
.connection_dialog_state
@ -585,8 +690,10 @@ impl UI {
.unwrap()
.is_disconnected()
{
reset = true;
hide = true;
show = true;
}
connecting = true;
}
}
inner.connection_dialog_state = Some(state);
@ -594,38 +701,76 @@ impl UI {
if hide {
s.pop_layer();
s.pop_layer();
return true;
}
if show {
s.add_fullscreen_layer(Layer::with_color(
ResizedView::with_full_screen(DummyView {}),
ColorStyle::new(PaletteColor::Background, PaletteColor::Background),
));
s.add_layer(
Dialog::around(
LinearLayout::vertical().child(
LinearLayout::horizontal()
.child(TextView::new("Address:"))
.child(
EditView::new()
.on_submit(|s, _| Self::submit_connection_address(s))
.with_name("connection-address")
.fixed_height(1)
.min_width(40),
),
),
)
.title("Connect to server")
Dialog::around(if connecting {
LinearLayout::vertical()
.child(TextView::new(" "))
.child(
TextView::new(if is_ipc {
"Connecting to IPC:"
} else {
"Connecting to TCP:"
})
.min_width(40),
)
.child(TextView::new("").with_name("connecting-text"))
} else {
LinearLayout::vertical()
.child(TextView::new(" "))
.child(
if is_ipc {
connection_type_group.button(0, "IPC Path").selected()
} else {
connection_type_group.button(0, "IPC Path")
}
.with_name("ipc-path-radio"),
)
.child(
EditView::new()
.with_enabled(is_ipc)
.on_submit(|s, _| Self::submit_ipc_path(s))
.with_name("ipc-path")
.fixed_height(1)
.min_width(40),
)
.child(TextView::new(" "))
.child(
if is_ipc {
connection_type_group.button(1, "Network Address")
} else {
connection_type_group
.button(1, "Network Address")
.selected()
}
.with_name("network-address-radio"),
)
.child(
EditView::new()
.with_enabled(!is_ipc)
.on_submit(|s, _| Self::submit_network_address(s))
.with_name("network-address")
.fixed_height(1)
.min_width(40),
)
.child(TextView::new(" "))
})
.title(if connecting {
"Connecting to server..."
} else {
"Connect to server"
})
.with_name("connection-dialog"),
);
return true;
}
if reset {
let mut dlg = s.find_name::<Dialog>("connection-dialog").unwrap();
dlg.clear_buttons();
return true;
}
false
}
@ -633,31 +778,56 @@ impl UI {
fn refresh_connection_dialog(s: &mut Cursive) {
let new_state = Self::inner(s).ui_state.connection_state.get().clone();
if !Self::show_connection_dialog(s, new_state.clone()) {
if !Self::draw_connection_dialog(s, new_state.clone()) {
return;
}
match new_state {
ConnectionState::Disconnected => {
let addr = match Self::command_processor(s).get_server_address() {
None => "".to_owned(),
Some(addr) => addr.to_string(),
Self::ipc_path_radio(s).set_enabled(true);
Self::network_address_radio(s).set_enabled(true);
let (network_address, network_address_enabled) =
match Self::command_processor(s).get_network_address() {
None => ("".to_owned(), false),
Some(addr) => (addr.to_string(), true),
};
let mut edit = Self::network_address(s);
edit.set_content(network_address);
edit.set_enabled(network_address_enabled);
let (ipc_path, ipc_path_enabled) = match Self::command_processor(s).get_ipc_path() {
None => ("".to_owned(), false),
Some(ipc_path) => (ipc_path.to_string_lossy().to_string(), true),
};
debug!("address is {}", addr);
let mut edit = s.find_name::<EditView>("connection-address").unwrap();
edit.set_content(addr);
edit.set_enabled(true);
let mut dlg = s.find_name::<Dialog>("connection-dialog").unwrap();
dlg.add_button("Connect", Self::submit_connection_address);
let mut edit = Self::ipc_path(s);
edit.set_content(ipc_path);
edit.set_enabled(ipc_path_enabled);
let mut dlg = Self::connection_dialog(s);
dlg.add_button("Connect", |s| {
if Self::ipc_path_radio(s).is_selected() {
Self::submit_ipc_path(s);
} else {
Self::submit_network_address(s);
}
});
}
ConnectionState::Connected(_, _) => {}
ConnectionState::Retrying(addr, _) => {
//
let mut edit = s.find_name::<EditView>("connection-address").unwrap();
debug!("address is {}", addr);
edit.set_content(addr.to_string());
edit.set_enabled(false);
let mut dlg = s.find_name::<Dialog>("connection-dialog").unwrap();
ConnectionState::ConnectedTCP(_, _) | ConnectionState::ConnectedIPC(_, _) => {}
ConnectionState::RetryingTCP(addr, _) => {
let mut text = Self::connecting_text(s);
text.set_content(addr.to_string());
let mut dlg = Self::connection_dialog(s);
dlg.add_button("Cancel", |s| {
Self::command_processor(s).cancel_reconnect();
});
}
ConnectionState::RetryingIPC(ipc_path, _) => {
let mut text = Self::connecting_text(s);
text.set_content(ipc_path.to_string_lossy().to_string());
let mut dlg = Self::connection_dialog(s);
dlg.add_button("Cancel", |s| {
Self::command_processor(s).cancel_reconnect();
});
@ -678,6 +848,8 @@ impl UI {
let mut status = StyledString::new();
let mut enable_status_fields = false;
match inner.ui_state.connection_state.get() {
ConnectionState::Disconnected => {
status.append_styled(
@ -686,35 +858,64 @@ impl UI {
);
status.append_styled("|", ColorStyle::highlight_inactive());
}
ConnectionState::Retrying(addr, _) => {
ConnectionState::RetryingTCP(addr, _) => {
status.append_styled(
format!("Reconnecting to {} ", addr),
ColorStyle::highlight_inactive(),
);
status.append_styled("|", ColorStyle::highlight_inactive());
}
ConnectionState::Connected(addr, _) => {
ConnectionState::RetryingIPC(path, _) => {
status.append_styled(
format!(
"Reconnecting to IPC#{} ",
path.file_name()
.unwrap_or_default()
.to_string_lossy()
.into_owned()
),
ColorStyle::highlight_inactive(),
);
status.append_styled("|", ColorStyle::highlight_inactive());
}
ConnectionState::ConnectedTCP(addr, _) => {
status.append_styled(
format!("Connected to {} ", addr),
ColorStyle::highlight_inactive(),
);
status.append_styled("|", ColorStyle::highlight_inactive());
// Add attachment state
status.append_styled(
format!(" {} ", UI::render_attachment_state(&mut inner)),
ColorStyle::highlight_inactive(),
);
status.append_styled("|", ColorStyle::highlight_inactive());
// Add bandwidth status
status.append_styled(
format!(" {} ", UI::render_network_status(&mut inner)),
ColorStyle::highlight_inactive(),
);
status.append_styled("|", ColorStyle::highlight_inactive());
// Add tunnel status
status.append_styled(" No Tunnels ", ColorStyle::highlight_inactive());
status.append_styled("|", ColorStyle::highlight_inactive());
enable_status_fields = true;
}
ConnectionState::ConnectedIPC(path, _) => {
status.append_styled(
format!(
"Connected to IPC#{} ",
path.file_name()
.unwrap_or_default()
.to_string_lossy()
.into_owned()
),
ColorStyle::highlight_inactive(),
);
enable_status_fields = true;
}
}
if enable_status_fields {
status.append_styled("|", ColorStyle::highlight_inactive());
// Add attachment state
status.append_styled(
format!(" {} ", UI::render_attachment_state(&mut inner)),
ColorStyle::highlight_inactive(),
);
status.append_styled("|", ColorStyle::highlight_inactive());
// Add bandwidth status
status.append_styled(
format!(" {} ", UI::render_network_status(&mut inner)),
ColorStyle::highlight_inactive(),
);
status.append_styled("|", ColorStyle::highlight_inactive());
// Add tunnel status
status.append_styled(" No Tunnels ", ColorStyle::highlight_inactive());
status.append_styled("|", ColorStyle::highlight_inactive());
};
statusbar.set_content(status);
@ -837,9 +1038,29 @@ impl UI {
START_TIME.load(Ordering::Relaxed)
}
pub fn new(node_log_scrollback: usize, settings: &Settings) -> (Self, UISender) {
cursive_flexi_logger_view::resize(node_log_scrollback);
fn make_node_events_panel(
node_log_scrollback: usize,
) -> ResizedView<NamedView<NodeEventsPanel>> {
Panel::new(
CachedTextView::new([], node_log_scrollback, Some(node_log_scrollback))
.with_name("node-events-view")
.scrollable()
.scroll_strategy(cursive::view::ScrollStrategy::StickToBottom)
.on_scroll(|s, _r| {
let mut sv = UI::node_events_scroll_view(s);
if sv.is_at_bottom() {
sv.set_scroll_strategy(cursive::view::ScrollStrategy::StickToBottom);
}
})
.with_name("node-events-scroll-view"),
)
.title_position(HAlign::Left)
.title("Node Events")
.with_name("node-events-panel")
.full_screen()
}
pub fn new(node_log_scrollback: usize, settings: &Settings) -> (Self, UISender) {
UI::set_start_time();
// Instantiate the cursive runnable
let runnable = CursiveRunnable::new(
@ -874,6 +1095,7 @@ impl UI {
let ui_sender = UISender {
inner: this.inner.clone(),
cb_sink,
colors: default_log_colors(),
};
let mut inner = this.inner.lock();
@ -882,13 +1104,7 @@ impl UI {
this.siv.set_user_data(this.inner.clone());
// Create layouts
let node_events_view = Panel::new(FlexiLoggerView::new_scrollable())
.title_position(HAlign::Left)
.title("Node Events")
.with_name("node-events-panel")
.full_screen();
let node_events_view = Self::make_node_events_panel(node_log_scrollback);
let mut peers_table_view = PeersTableView::new()
.column(PeerTableColumn::NodeId, "Node Id", |c| c.width(48))
.column(PeerTableColumn::Address, "Address", |c| c)
@ -967,22 +1183,18 @@ impl UI {
.child(TextView::new(version)),
);
this.siv.add_fullscreen_layer(mainlayout);
this.siv
.add_fullscreen_layer(mainlayout.with_name("main-layout"));
UI::setup_colors(&mut this.siv, &mut inner, settings);
UI::setup_quit_handler(&mut this.siv);
this.siv
.set_global_callback(cursive::event::Event::CtrlChar('k'), UI::clear_handler);
UI::setup_clear_handler(&mut this.siv);
drop(inner);
(this, ui_sender)
}
pub fn cursive_flexi_logger(&self) -> Box<CursiveLogWriter> {
let mut flv = cursive_flexi_logger_view::cursive_flexi_logger(self.siv.cb_sink().clone());
flv.set_colors(self.inner.lock().log_colors.clone());
flv
}
pub fn set_command_processor(&mut self, cmdproc: CommandProcessor) {
let mut inner = self.inner.lock();
inner.cmdproc = Some(cmdproc);
@ -999,10 +1211,22 @@ impl UI {
type CallbackSink = Box<dyn FnOnce(&mut Cursive) + 'static + Send>;
/// Default log colors
fn default_log_colors() -> HashMap<Level, Color> {
let mut colors = HashMap::<Level, Color>::new();
colors.insert(Level::Trace, Color::Dark(BaseColor::Green));
colors.insert(Level::Debug, Color::Dark(BaseColor::Cyan));
colors.insert(Level::Info, Color::Dark(BaseColor::Blue));
colors.insert(Level::Warn, Color::Dark(BaseColor::Yellow));
colors.insert(Level::Error, Color::Dark(BaseColor::Red));
colors
}
#[derive(Clone)]
pub struct UISender {
inner: Arc<Mutex<UIInner>>,
cb_sink: Sender<CallbackSink>,
colors: HashMap<Level, Color>,
}
impl UISender {
@ -1094,14 +1318,72 @@ impl UISender {
}
pub fn add_node_event(&self, log_color: Level, event: String) {
{
let color = {
let inner = self.inner.lock();
let color = *inner.log_colors.get(&log_color).unwrap();
cursive_flexi_logger_view::parse_lines_to_log(
color.into(),
format!("{}: {}", UI::cli_ts(UI::get_start_time()), event),
);
*inner.log_colors.get(&log_color).unwrap()
};
let _ = self.push_styled_lines(
color.into(),
format!("{}: {}\n", UI::cli_ts(UI::get_start_time()), event),
);
}
pub fn push_styled(&self, styled_string: StyledString) -> std::io::Result<()> {
let res = self.cb_sink.send(Box::new(move |s| {
UI::push_styled_line(s, styled_string);
}));
if res.is_err() {
return Err(std::io::Error::from(std::io::ErrorKind::BrokenPipe));
}
let _ = self.cb_sink.send(Box::new(UI::update_cb));
Ok(())
}
pub fn push_styled_lines(&self, starting_style: Style, lines: String) -> std::io::Result<()> {
let res = self.cb_sink.send(Box::new(move |s| {
UI::push_ansi_lines(s, starting_style, lines);
}));
if res.is_err() {
return Err(std::io::Error::from(std::io::ErrorKind::BrokenPipe));
}
Ok(())
}
}
impl LogWriter for UISender {
fn write(&self, _now: &mut DeferredNow, record: &Record) -> std::io::Result<()> {
let color = *self.colors.get(&record.level()).unwrap();
let args = format!("{}", &record.args());
let mut line = StyledString::new();
let mut indent = 0;
let levstr = format!("{}: ", record.level());
indent += levstr.len();
line.append_styled(levstr, color);
let filestr = format!(
"{}:{} ",
record.file().unwrap_or("(unnamed)"),
record.line().unwrap_or(0),
);
indent += filestr.len();
line.append_plain(filestr);
for argline in args.lines() {
line.append_styled(argline, color);
line.append_plain("\n");
self.push_styled(line)?;
line = StyledString::new();
line.append_plain(" ".repeat(indent));
}
Ok(())
}
fn flush(&self) -> std::io::Result<()> {
// we are not buffering
Ok(())
}
fn max_log_level(&self) -> log::LevelFilter {
log::LevelFilter::max()
}
}

View File

@ -63,10 +63,9 @@ veilid-tools = { version = "0.2.5", path = "../veilid-tools", features = [
"tracing",
], default-features = false }
paste = "1.0.14"
once_cell = "1.18.0"
owning_ref = "0.4.1"
once_cell = "1.19.0"
backtrace = "0.3.69"
num-traits = "0.2.16"
num-traits = "0.2.17"
shell-words = "1.1.0"
static_assertions = "1.1.0"
cfg-if = "1.0.0"
@ -75,32 +74,32 @@ lazy_static = "1.4.0"
directories = "5.0.1"
# Logging
tracing = { version = "0.1.37", features = ["log", "attributes"] }
tracing-subscriber = "0.3.17"
tracing = { version = "0.1.40", features = ["log", "attributes"] }
tracing-subscriber = "0.3.18"
tracing-error = "0.2.0"
eyre = "0.6.8"
thiserror = "1.0.48"
eyre = "0.6.11"
thiserror = "1.0.50"
# Data structures
enumset = { version = "1.1.2", features = ["serde"] }
enumset = { version = "1.1.3", features = ["serde"] }
keyvaluedb = "0.1.1"
range-set-blaze = "0.1.9"
range-set-blaze = "0.1.13"
weak-table = "0.3.2"
hashlink = { package = "veilid-hashlink", version = "0.1.0", features = [
"serde_impl",
] }
# System
futures-util = { version = "0.3.28", default-features = false, features = [
futures-util = { version = "0.3.29", default-features = false, features = [
"alloc",
] }
flume = { version = "0.11.0", features = ["async"] }
parking_lot = "0.12.1"
lock_api = "0.4.10"
lock_api = "0.4.11"
stop-token = { version = "0.7.0", default-features = false }
# Crypto
ed25519-dalek = { version = "2.0.0", default-features = false, features = [
ed25519-dalek = { version = "2.1.0", default-features = false, features = [
"alloc",
"rand_core",
"digest",
@ -112,29 +111,29 @@ x25519-dalek = { version = "2.0.0", default-features = false, features = [
"zeroize",
"precomputed-tables",
] }
curve25519-dalek = { version = "4.1.0", default-features = false, features = [
curve25519-dalek = { version = "4.1.1", default-features = false, features = [
"alloc",
"zeroize",
"precomputed-tables",
] }
blake3 = { version = "1.4.1" }
blake3 = { version = "1.5.0" }
chacha20poly1305 = "0.10.1"
chacha20 = "0.9.1"
argon2 = "0.5.2"
# Network
async-std-resolver = { version = "0.23.0", optional = true }
trust-dns-resolver = { version = "0.23.0", optional = true }
async-std-resolver = { version = "0.23.2", optional = true }
trust-dns-resolver = { version = "0.23.2", optional = true }
enum-as-inner = "=0.6.0" # temporary fix for trust-dns-resolver v0.22.0
# Serialization
capnp = { version = "0.18.1", default-features = false, features = ["alloc"] }
serde = { version = "1.0.188", features = ["derive"] }
serde_json = { version = "1.0.107" }
capnp = { version = "0.18.10", default-features = false, features = ["alloc"] }
serde = { version = "1.0.193", features = ["derive", "rc"] }
serde_json = { version = "1.0.108" }
serde-big-array = "0.5.1"
json = "0.12.4"
data-encoding = { version = "2.4.0" }
schemars = "0.8.13"
data-encoding = { version = "2.5.0" }
schemars = "0.8.16"
lz4_flex = { version = "0.11.1", default-features = false, features = [
"safe-encode",
"safe-decode",
@ -145,19 +144,19 @@ lz4_flex = { version = "0.11.1", default-features = false, features = [
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
# Tools
config = { version = "0.13.3", features = ["yaml"] }
config = { version = "0.13.4", features = ["yaml"] }
bugsalot = { package = "veilid-bugsalot", version = "0.1.0" }
chrono = "0.4.31"
libc = "0.2.148"
libc = "0.2.151"
nix = "0.27.1"
# System
async-std = { version = "1.12.0", features = ["unstable"], optional = true }
tokio = { version = "1.32.0", features = ["full"], optional = true }
tokio-util = { version = "0.7.8", features = ["compat"], optional = true }
tokio = { version = "1.35.0", features = ["full"], optional = true }
tokio-util = { version = "0.7.10", features = ["compat"], optional = true }
tokio-stream = { version = "0.1.14", features = ["net"], optional = true }
async-io = { version = "1.13.0" }
futures-util = { version = "0.3.28", default-features = false, features = [
futures-util = { version = "0.3.29", default-features = false, features = [
"async-await",
"sink",
"std",
@ -165,7 +164,7 @@ futures-util = { version = "0.3.28", default-features = false, features = [
] }
# Data structures
keyring-manager = "0.5.0"
keyring-manager = "0.5.1"
keyvaluedb-sqlite = "0.1.1"
# Network
@ -174,11 +173,11 @@ async-tungstenite = { package = "veilid-async-tungstenite", version = "0.23.0",
] }
igd = { package = "veilid-igd", version = "0.1.1" }
async-tls = { package = "veilid-async-tls", version = "0.12.0" }
webpki = "0.22.1"
webpki-roots = "0.25.2"
rustls = "0.21.8"
rustls-pemfile = "1.0.3"
socket2 = { version = "0.5.4", features = ["all"] }
webpki = "0.22.4"
webpki-roots = "0.25.3"
rustls = "0.21.10"
rustls-pemfile = "1.0.4"
socket2 = { version = "0.5.5", features = ["all"] }
# Dependencies for WASM builds only
[target.'cfg(target_arch = "wasm32")'.dependencies]
@ -188,7 +187,7 @@ veilid-tools = { version = "0.2.5", path = "../veilid-tools", default-features =
] }
# Tools
getrandom = { version = "0.2.10", features = ["js"] }
getrandom = { version = "0.2.11", features = ["js"] }
# System
async_executors = { version = "0.7.0", default-features = false, features = [
@ -196,15 +195,15 @@ async_executors = { version = "0.7.0", default-features = false, features = [
"timer",
] }
async-lock = "2.8.0"
wasm-bindgen = "0.2.87"
js-sys = "0.3.64"
wasm-bindgen-futures = "0.4.37"
wasm-bindgen = "0.2.89"
js-sys = "0.3.66"
wasm-bindgen-futures = "0.4.39"
send_wrapper = { version = "0.6.0", features = ["futures"] }
serde_bytes = { version = "0.11", default_features = false, features = [
"alloc",
] }
tsify = { version = "0.4.5", features = ["js"] }
serde-wasm-bindgen = "0.6.0"
serde-wasm-bindgen = "0.6.3"
# Network
ws_stream_wasm = "0.7.4"
@ -218,7 +217,7 @@ keyvaluedb-web = "0.1.1"
### Configuration for WASM32 'web-sys' crate
[target.'cfg(target_arch = "wasm32")'.dependencies.web-sys]
version = "0.3.64"
version = "0.3.66"
features = [
'Document',
'HtmlDocument',
@ -258,13 +257,13 @@ tracing-oslog = { version = "0.1.2", optional = true }
[target.'cfg(not(target_arch = "wasm32"))'.dev-dependencies]
simplelog = { version = "0.12.1", features = ["test"] }
serial_test = "2.0.0"
tracing-subscriber = { version = "0.3.17", features = ["env-filter"] }
tracing-subscriber = { version = "0.3.18", features = ["env-filter"] }
[target.'cfg(target_arch = "wasm32")'.dev-dependencies]
serial_test = { version = "2.0.0", default-features = false, features = [
"async",
] }
wasm-bindgen-test = "0.3.37"
wasm-bindgen-test = "0.3.39"
console_error_panic_hook = "0.1.7"
wee_alloc = "0.4.5"
wasm-logger = "0.2.0"
@ -274,7 +273,9 @@ wasm-logger = "0.2.0"
[build-dependencies]
capnpc = "0.18.0"
glob = "0.3.1"
filetime = "0.2.22"
filetime = "0.2.23"
sha2 = "0.10.8"
hex = "0.4.3"
[package.metadata.wasm-pack.profile.release]
wasm-opt = ["-O", "--enable-mutable-globals"]

View File

@ -1,7 +1,10 @@
use filetime::{set_file_mtime, FileTime};
use glob::glob;
use sha2::{Digest, Sha256};
use std::fs::OpenOptions;
use std::io::BufRead;
use std::io::Write;
use std::{
env, fs, io,
env, io,
path::Path,
process::{Command, Stdio},
};
@ -29,24 +32,61 @@ fn get_capnp_version_string() -> String {
s[20..].to_owned()
}
fn is_input_file_outdated<P1, P2>(input: P1, output: P2) -> io::Result<bool>
fn is_input_file_outdated<P, Q>(input: P, output: Q) -> io::Result<bool>
where
P1: AsRef<Path>,
P2: AsRef<Path>,
P: AsRef<Path>,
Q: AsRef<Path>,
{
let out_meta = fs::metadata(output);
if let Ok(meta) = out_meta {
let output_mtime = meta.modified()?;
let Some(out_bh) = get_build_hash(output) else {
// output file not found or no build hash, we are outdated
return Ok(true);
};
// if input file is more recent than our output, we are outdated
let input_meta = fs::metadata(input)?;
let input_mtime = input_meta.modified()?;
let in_bh = make_build_hash(input);
Ok(input_mtime > output_mtime)
} else {
// output file not found, we are outdated
Ok(true)
Ok(out_bh != in_bh)
}
fn calculate_hash(lines: std::io::Lines<std::io::BufReader<std::fs::File>>) -> Vec<u8> {
let mut hasher = Sha256::new();
// Build hash of lines, ignoring EOL conventions
for l in lines {
let l = l.unwrap();
hasher.update(l.as_bytes());
hasher.update(b"\n");
}
let out = hasher.finalize();
out.to_vec()
}
fn get_build_hash<Q: AsRef<Path>>(output_path: Q) -> Option<Vec<u8>> {
let lines = std::io::BufReader::new(std::fs::File::open(output_path).ok()?).lines();
for l in lines {
let l = l.unwrap();
if let Some(rest) = l.strip_prefix("//BUILDHASH:") {
return Some(hex::decode(rest).unwrap());
}
}
None
}
fn make_build_hash<P: AsRef<Path>>(input_path: P) -> Vec<u8> {
let input_path = input_path.as_ref();
let lines = std::io::BufReader::new(std::fs::File::open(input_path).unwrap()).lines();
calculate_hash(lines)
}
fn append_hash<P: AsRef<Path>, Q: AsRef<Path>>(input_path: P, output_path: Q) {
let input_path = input_path.as_ref();
let output_path = output_path.as_ref();
let lines = std::io::BufReader::new(std::fs::File::open(input_path).unwrap()).lines();
let h = calculate_hash(lines);
let mut out_file = OpenOptions::new()
.write(true)
.append(true)
.open(output_path)
.unwrap();
writeln!(out_file, "\n//BUILDHASH:{}", hex::encode(h)).unwrap();
}
fn do_capnp_build() {
@ -86,8 +126,8 @@ fn do_capnp_build() {
.run()
.expect("compiling schema");
// If successful, update modification time
set_file_mtime("proto/veilid_capnp.rs", FileTime::now()).unwrap();
// If successful, append a hash of the input to the output file
append_hash("proto/veilid.capnp", "proto/veilid_capnp.rs");
}
// Fix for missing __extenddftf2 on Android x86_64 Emulator
@ -97,11 +137,13 @@ fn fix_android_emulator() {
if target_arch == "x86_64" && target_os == "android" {
let missing_library = "clang_rt.builtins-x86_64-android";
let android_home = env::var("ANDROID_HOME").expect("ANDROID_HOME not set");
let lib_path = glob(&format!("{android_home}/ndk/25.1.8937393/**/lib{missing_library}.a"))
.expect("failed to glob")
.next()
.expect("Need libclang_rt.builtins-x86_64-android.a")
.unwrap();
let lib_path = glob(&format!(
"{android_home}/ndk/25.1.8937393/**/lib{missing_library}.a"
))
.expect("failed to glob")
.next()
.expect("Need libclang_rt.builtins-x86_64-android.a")
.unwrap();
let lib_dir = lib_path.parent().unwrap();
println!("cargo:rustc-link-search={}", lib_dir.display());
println!("cargo:rustc-link-lib=static={missing_library}");
@ -117,7 +159,7 @@ fn main() {
}
if is_input_file_outdated("./proto/veilid.capnp", "./proto/veilid_capnp.rs").unwrap() {
println!("cargo:warning=rebuilding proto/veilid_capnp.rs because it is older than proto/veilid.capnp");
println!("cargo:warning=rebuilding proto/veilid_capnp.rs because it has changed from the last generation of proto/veilid.capnp");
do_capnp_build();
}

View File

@ -356,12 +356,12 @@ struct OperationWatchValueQ @0xf9a5a6c547b9b228 {
subkeys @1 :List(SubkeyRange); # subkey range to watch (up to 512 subranges), if empty, watch everything
expiration @2 :UInt64; # requested timestamp when this watch will expire in usec since epoch (can be return less, 0 for max)
count @3 :UInt32; # requested number of changes to watch for (0 = cancel, 1 = single shot, 2+ = counter, UINT32_MAX = continuous)
watcher @4 :PublicKey; # the watcher performing the watch, can be the owner or a schema member
signature @5 :Signature; # signature of the watcher, must be one of the schema members or the key owner. signature covers: key, subkeys, expiration, count
watcher @4 :PublicKey; # optional: the watcher performing the watch, can be the owner or a schema member
signature @5 :Signature; # optional: signature of the watcher, must be one of the schema members or the key owner. signature covers: key, subkeys, expiration, count
}
struct OperationWatchValueA @0xa726cab7064ba893 {
expiration @0 :UInt64; # timestamp when this watch will expire in usec since epoch (0 if watch failed)
expiration @0 :UInt64; # timestamp when this watch will expire in usec since epoch (0 if watch was rejected). if watch is being cancelled (with count = 0), this will be the non-zero former expiration time.
peers @1 :List(PeerInfo); # returned list of other nodes to ask that could propagate watches
}

View File

@ -7680,7 +7680,7 @@ pub mod node_info {
::capnp::traits::FromPointerBuilder::get_from_pointer(self.builder.get_pointer_field(2), ::core::option::Option::None)
}
#[inline]
pub fn set_envelope_support(&mut self, value: ::capnp::primitive_list::Reader<'a,u8>) -> ::capnp::Result<()> {
pub fn set_envelope_support(&mut self, value: ::capnp::primitive_list::Reader<'_,u8>) -> ::capnp::Result<()> {
::capnp::traits::SetPointerBuilder::set_pointer_builder(self.builder.reborrow().get_pointer_field(2), value, false)
}
#[inline]
@ -7696,7 +7696,7 @@ pub mod node_info {
::capnp::traits::FromPointerBuilder::get_from_pointer(self.builder.get_pointer_field(3), ::core::option::Option::None)
}
#[inline]
pub fn set_crypto_support(&mut self, value: ::capnp::primitive_list::Reader<'a,u32>) -> ::capnp::Result<()> {
pub fn set_crypto_support(&mut self, value: ::capnp::primitive_list::Reader<'_,u32>) -> ::capnp::Result<()> {
::capnp::traits::SetPointerBuilder::set_pointer_builder(self.builder.reborrow().get_pointer_field(3), value, false)
}
#[inline]
@ -7712,7 +7712,7 @@ pub mod node_info {
::capnp::traits::FromPointerBuilder::get_from_pointer(self.builder.get_pointer_field(4), ::core::option::Option::None)
}
#[inline]
pub fn set_capabilities(&mut self, value: ::capnp::primitive_list::Reader<'a,u32>) -> ::capnp::Result<()> {
pub fn set_capabilities(&mut self, value: ::capnp::primitive_list::Reader<'_,u32>) -> ::capnp::Result<()> {
::capnp::traits::SetPointerBuilder::set_pointer_builder(self.builder.reborrow().get_pointer_field(4), value, false)
}
#[inline]
@ -7728,7 +7728,7 @@ pub mod node_info {
::capnp::traits::FromPointerBuilder::get_from_pointer(self.builder.get_pointer_field(5), ::core::option::Option::None)
}
#[inline]
pub fn set_dial_info_detail_list(&mut self, value: ::capnp::struct_list::Reader<'a,crate::veilid_capnp::dial_info_detail::Owned>) -> ::capnp::Result<()> {
pub fn set_dial_info_detail_list(&mut self, value: ::capnp::struct_list::Reader<'_,crate::veilid_capnp::dial_info_detail::Owned>) -> ::capnp::Result<()> {
::capnp::traits::SetPointerBuilder::set_pointer_builder(self.builder.reborrow().get_pointer_field(5), value, false)
}
#[inline]
@ -8096,7 +8096,7 @@ pub mod signed_direct_node_info {
::capnp::traits::FromPointerBuilder::get_from_pointer(self.builder.get_pointer_field(1), ::core::option::Option::None)
}
#[inline]
pub fn set_signatures(&mut self, value: ::capnp::struct_list::Reader<'a,crate::veilid_capnp::typed_signature::Owned>) -> ::capnp::Result<()> {
pub fn set_signatures(&mut self, value: ::capnp::struct_list::Reader<'_,crate::veilid_capnp::typed_signature::Owned>) -> ::capnp::Result<()> {
::capnp::traits::SetPointerBuilder::set_pointer_builder(self.builder.reborrow().get_pointer_field(1), value, false)
}
#[inline]
@ -8388,7 +8388,7 @@ pub mod signed_relayed_node_info {
::capnp::traits::FromPointerBuilder::get_from_pointer(self.builder.get_pointer_field(1), ::core::option::Option::None)
}
#[inline]
pub fn set_relay_ids(&mut self, value: ::capnp::struct_list::Reader<'a,crate::veilid_capnp::typed_key::Owned>) -> ::capnp::Result<()> {
pub fn set_relay_ids(&mut self, value: ::capnp::struct_list::Reader<'_,crate::veilid_capnp::typed_key::Owned>) -> ::capnp::Result<()> {
::capnp::traits::SetPointerBuilder::set_pointer_builder(self.builder.reborrow().get_pointer_field(1), value, false)
}
#[inline]
@ -8428,7 +8428,7 @@ pub mod signed_relayed_node_info {
::capnp::traits::FromPointerBuilder::get_from_pointer(self.builder.get_pointer_field(3), ::core::option::Option::None)
}
#[inline]
pub fn set_signatures(&mut self, value: ::capnp::struct_list::Reader<'a,crate::veilid_capnp::typed_signature::Owned>) -> ::capnp::Result<()> {
pub fn set_signatures(&mut self, value: ::capnp::struct_list::Reader<'_,crate::veilid_capnp::typed_signature::Owned>) -> ::capnp::Result<()> {
::capnp::traits::SetPointerBuilder::set_pointer_builder(self.builder.reborrow().get_pointer_field(3), value, false)
}
#[inline]
@ -9004,7 +9004,7 @@ pub mod peer_info {
::capnp::traits::FromPointerBuilder::get_from_pointer(self.builder.get_pointer_field(0), ::core::option::Option::None)
}
#[inline]
pub fn set_node_ids(&mut self, value: ::capnp::struct_list::Reader<'a,crate::veilid_capnp::typed_key::Owned>) -> ::capnp::Result<()> {
pub fn set_node_ids(&mut self, value: ::capnp::struct_list::Reader<'_,crate::veilid_capnp::typed_key::Owned>) -> ::capnp::Result<()> {
::capnp::traits::SetPointerBuilder::set_pointer_builder(self.builder.reborrow().get_pointer_field(0), value, false)
}
#[inline]
@ -9277,7 +9277,7 @@ pub mod routed_operation {
::capnp::traits::FromPointerBuilder::get_from_pointer(self.builder.get_pointer_field(0), ::core::option::Option::None)
}
#[inline]
pub fn set_signatures(&mut self, value: ::capnp::struct_list::Reader<'a,crate::veilid_capnp::signature512::Owned>) -> ::capnp::Result<()> {
pub fn set_signatures(&mut self, value: ::capnp::struct_list::Reader<'_,crate::veilid_capnp::signature512::Owned>) -> ::capnp::Result<()> {
::capnp::traits::SetPointerBuilder::set_pointer_builder(self.builder.reborrow().get_pointer_field(0), value, false)
}
#[inline]
@ -10547,7 +10547,7 @@ pub mod operation_find_node_q {
::capnp::traits::FromPointerBuilder::get_from_pointer(self.builder.get_pointer_field(1), ::core::option::Option::None)
}
#[inline]
pub fn set_capabilities(&mut self, value: ::capnp::primitive_list::Reader<'a,u32>) -> ::capnp::Result<()> {
pub fn set_capabilities(&mut self, value: ::capnp::primitive_list::Reader<'_,u32>) -> ::capnp::Result<()> {
::capnp::traits::SetPointerBuilder::set_pointer_builder(self.builder.reborrow().get_pointer_field(1), value, false)
}
#[inline]
@ -10777,7 +10777,7 @@ pub mod operation_find_node_a {
::capnp::traits::FromPointerBuilder::get_from_pointer(self.builder.get_pointer_field(0), ::core::option::Option::None)
}
#[inline]
pub fn set_peers(&mut self, value: ::capnp::struct_list::Reader<'a,crate::veilid_capnp::peer_info::Owned>) -> ::capnp::Result<()> {
pub fn set_peers(&mut self, value: ::capnp::struct_list::Reader<'_,crate::veilid_capnp::peer_info::Owned>) -> ::capnp::Result<()> {
::capnp::traits::SetPointerBuilder::set_pointer_builder(self.builder.reborrow().get_pointer_field(0), value, false)
}
#[inline]
@ -12994,7 +12994,7 @@ pub mod operation_get_value_a {
::capnp::traits::FromPointerBuilder::get_from_pointer(self.builder.get_pointer_field(1), ::core::option::Option::None)
}
#[inline]
pub fn set_peers(&mut self, value: ::capnp::struct_list::Reader<'a,crate::veilid_capnp::peer_info::Owned>) -> ::capnp::Result<()> {
pub fn set_peers(&mut self, value: ::capnp::struct_list::Reader<'_,crate::veilid_capnp::peer_info::Owned>) -> ::capnp::Result<()> {
::capnp::traits::SetPointerBuilder::set_pointer_builder(self.builder.reborrow().get_pointer_field(1), value, false)
}
#[inline]
@ -13619,7 +13619,7 @@ pub mod operation_set_value_a {
::capnp::traits::FromPointerBuilder::get_from_pointer(self.builder.get_pointer_field(1), ::core::option::Option::None)
}
#[inline]
pub fn set_peers(&mut self, value: ::capnp::struct_list::Reader<'a,crate::veilid_capnp::peer_info::Owned>) -> ::capnp::Result<()> {
pub fn set_peers(&mut self, value: ::capnp::struct_list::Reader<'_,crate::veilid_capnp::peer_info::Owned>) -> ::capnp::Result<()> {
::capnp::traits::SetPointerBuilder::set_pointer_builder(self.builder.reborrow().get_pointer_field(1), value, false)
}
#[inline]
@ -13912,7 +13912,7 @@ pub mod operation_watch_value_q {
::capnp::traits::FromPointerBuilder::get_from_pointer(self.builder.get_pointer_field(1), ::core::option::Option::None)
}
#[inline]
pub fn set_subkeys(&mut self, value: ::capnp::struct_list::Reader<'a,crate::veilid_capnp::subkey_range::Owned>) -> ::capnp::Result<()> {
pub fn set_subkeys(&mut self, value: ::capnp::struct_list::Reader<'_,crate::veilid_capnp::subkey_range::Owned>) -> ::capnp::Result<()> {
::capnp::traits::SetPointerBuilder::set_pointer_builder(self.builder.reborrow().get_pointer_field(1), value, false)
}
#[inline]
@ -14273,7 +14273,7 @@ pub mod operation_watch_value_a {
::capnp::traits::FromPointerBuilder::get_from_pointer(self.builder.get_pointer_field(0), ::core::option::Option::None)
}
#[inline]
pub fn set_peers(&mut self, value: ::capnp::struct_list::Reader<'a,crate::veilid_capnp::peer_info::Owned>) -> ::capnp::Result<()> {
pub fn set_peers(&mut self, value: ::capnp::struct_list::Reader<'_,crate::veilid_capnp::peer_info::Owned>) -> ::capnp::Result<()> {
::capnp::traits::SetPointerBuilder::set_pointer_builder(self.builder.reborrow().get_pointer_field(0), value, false)
}
#[inline]
@ -14536,7 +14536,7 @@ pub mod operation_value_changed {
::capnp::traits::FromPointerBuilder::get_from_pointer(self.builder.get_pointer_field(1), ::core::option::Option::None)
}
#[inline]
pub fn set_subkeys(&mut self, value: ::capnp::struct_list::Reader<'a,crate::veilid_capnp::subkey_range::Owned>) -> ::capnp::Result<()> {
pub fn set_subkeys(&mut self, value: ::capnp::struct_list::Reader<'_,crate::veilid_capnp::subkey_range::Owned>) -> ::capnp::Result<()> {
::capnp::traits::SetPointerBuilder::set_pointer_builder(self.builder.reborrow().get_pointer_field(1), value, false)
}
#[inline]
@ -15047,7 +15047,7 @@ pub mod operation_supply_block_a {
::capnp::traits::FromPointerBuilder::get_from_pointer(self.builder.get_pointer_field(0), ::core::option::Option::None)
}
#[inline]
pub fn set_peers(&mut self, value: ::capnp::struct_list::Reader<'a,crate::veilid_capnp::peer_info::Owned>) -> ::capnp::Result<()> {
pub fn set_peers(&mut self, value: ::capnp::struct_list::Reader<'_,crate::veilid_capnp::peer_info::Owned>) -> ::capnp::Result<()> {
::capnp::traits::SetPointerBuilder::set_pointer_builder(self.builder.reborrow().get_pointer_field(0), value, false)
}
#[inline]
@ -15516,7 +15516,7 @@ pub mod operation_find_block_a {
::capnp::traits::FromPointerBuilder::get_from_pointer(self.builder.get_pointer_field(1), ::core::option::Option::None)
}
#[inline]
pub fn set_suppliers(&mut self, value: ::capnp::struct_list::Reader<'a,crate::veilid_capnp::peer_info::Owned>) -> ::capnp::Result<()> {
pub fn set_suppliers(&mut self, value: ::capnp::struct_list::Reader<'_,crate::veilid_capnp::peer_info::Owned>) -> ::capnp::Result<()> {
::capnp::traits::SetPointerBuilder::set_pointer_builder(self.builder.reborrow().get_pointer_field(1), value, false)
}
#[inline]
@ -15532,7 +15532,7 @@ pub mod operation_find_block_a {
::capnp::traits::FromPointerBuilder::get_from_pointer(self.builder.get_pointer_field(2), ::core::option::Option::None)
}
#[inline]
pub fn set_peers(&mut self, value: ::capnp::struct_list::Reader<'a,crate::veilid_capnp::peer_info::Owned>) -> ::capnp::Result<()> {
pub fn set_peers(&mut self, value: ::capnp::struct_list::Reader<'_,crate::veilid_capnp::peer_info::Owned>) -> ::capnp::Result<()> {
::capnp::traits::SetPointerBuilder::set_pointer_builder(self.builder.reborrow().get_pointer_field(2), value, false)
}
#[inline]
@ -21292,3 +21292,5 @@ pub mod operation {
pub type WhichBuilder<'a,> = Which<::capnp::Result<crate::veilid_capnp::question::Builder<'a>>,::capnp::Result<crate::veilid_capnp::statement::Builder<'a>>,::capnp::Result<crate::veilid_capnp::answer::Builder<'a>>>;
}
}
//BUILDHASH:ab4fd70d40c9e543f799ce326dd41c61c7ea78132fb53f164156073d9786a9f6

View File

@ -120,6 +120,8 @@ impl ServicesContext {
// Set up storage manager
trace!("init storage manager");
let update_callback = self.update_callback.clone();
let storage_manager = StorageManager::new(
self.config.clone(),
self.crypto.clone().unwrap(),
@ -127,7 +129,7 @@ impl ServicesContext {
#[cfg(feature = "unstable-blockstore")]
self.block_store.clone().unwrap(),
);
if let Err(e) = storage_manager.init().await {
if let Err(e) = storage_manager.init(update_callback).await {
error!("failed to init storage manager: {}", e);
self.shutdown().await;
return Err(e);

View File

@ -57,7 +57,7 @@ impl ProtectedStore {
inner.keyring_manager = match maybe_km {
Ok(v) => Some(v),
Err(e) => {
log_pstore!(error "Failed to create secure keyring manager: {}", e);
info!("Secure key storage service unavailable, falling back to direct disk-based storage: {}", e);
None
}
};

View File

@ -40,6 +40,6 @@ pub async fn ptr_lookup(_ip_addr: IpAddr) -> EyreResult<String> {
bail!("wasm does not support ptr lookup")
}
pub fn env_variable_is_defined<S: AsRef<str>>(s: S) -> bool {
pub fn env_variable_is_defined<S: AsRef<str>>(_s: S) -> bool {
false
}

View File

@ -268,6 +268,13 @@ impl AddressFilter {
.or_insert(ts);
}
pub fn clear_punishments(&self) {
let mut inner = self.inner.lock();
inner.punishments_by_ip4.clear();
inner.punishments_by_ip6_prefix.clear();
inner.punishments_by_node_id.clear();
}
pub fn punish_ip_addr(&self, addr: IpAddr) {
log_net!(debug ">>> PUNISHED: {}", addr);
let ts = get_aligned_timestamp();

View File

@ -331,7 +331,7 @@ impl ConnectionManager {
}
// Attempt new connection
let mut retry_count = 0; // Someday, if we need this
let mut retry_count = 1;
let prot_conn = network_result_try!(loop {
let result_net_res = ProtocolNetworkConnection::connect(
@ -350,12 +350,18 @@ impl ConnectionManager {
}
Err(e) => {
if retry_count == 0 {
return Err(e).wrap_err("failed to connect");
return Err(e).wrap_err(format!(
"failed to connect: {:?} -> {:?}",
preferred_local_address, dial_info
));
}
}
};
log_net!(debug "get_or_create_connection retries left: {}", retry_count);
retry_count -= 1;
// Release the preferred local address if things can't connect due to a low-level collision we dont have a record of
preferred_local_address = None;
sleep(500).await;
});

View File

@ -1096,7 +1096,7 @@ impl NetworkManager {
Ok(true)
}
pub fn debug_restart_network(&self) {
pub fn restart_network(&self) {
self.net().restart_network();
}
}

View File

@ -401,7 +401,7 @@ impl IGDManager {
let desc = this.get_description(k.llpt, k.local_port);
match gw.add_port(convert_llpt(k.llpt), v.mapped_port, SocketAddr::new(local_ip, k.local_port), (UPNP_MAPPING_LIFETIME_MS + 999) / 1000, &desc) {
Ok(()) => {
log_net!(debug "renewed mapped port {:?} -> {:?}", v, k);
log_net!("renewed mapped port {:?} -> {:?}", v, k);
inner.port_maps.insert(k, PortMapValue {
ext_ip: v.ext_ip,

View File

@ -31,11 +31,11 @@ pub const PEEK_DETECT_LEN: usize = 64;
cfg_if! {
if #[cfg(all(feature = "unstable-blockstore", feature="unstable-tunnels"))] {
const PUBLIC_INTERNET_CAPABILITIES_LEN: usize = 8;
const PUBLIC_INTERNET_CAPABILITIES_LEN: usize = 9;
} else if #[cfg(any(feature = "unstable-blockstore", feature="unstable-tunnels"))] {
const PUBLIC_INTERNET_CAPABILITIES_LEN: usize = 7;
const PUBLIC_INTERNET_CAPABILITIES_LEN: usize = 8;
} else {
const PUBLIC_INTERNET_CAPABILITIES_LEN: usize = 6;
const PUBLIC_INTERNET_CAPABILITIES_LEN: usize = 7;
}
}
pub const PUBLIC_INTERNET_CAPABILITIES: [Capability; PUBLIC_INTERNET_CAPABILITIES_LEN] = [
@ -46,19 +46,21 @@ pub const PUBLIC_INTERNET_CAPABILITIES: [Capability; PUBLIC_INTERNET_CAPABILITIE
CAP_RELAY,
CAP_VALIDATE_DIAL_INFO,
CAP_DHT,
CAP_DHT_WATCH,
CAP_APPMESSAGE,
#[cfg(feature = "unstable-blockstore")]
CAP_BLOCKSTORE,
];
#[cfg(feature = "unstable-blockstore")]
const LOCAL_NETWORK_CAPABILITIES_LEN: usize = 4;
const LOCAL_NETWORK_CAPABILITIES_LEN: usize = 5;
#[cfg(not(feature = "unstable-blockstore"))]
const LOCAL_NETWORK_CAPABILITIES_LEN: usize = 3;
const LOCAL_NETWORK_CAPABILITIES_LEN: usize = 4;
pub const LOCAL_NETWORK_CAPABILITIES: [Capability; LOCAL_NETWORK_CAPABILITIES_LEN] = [
CAP_RELAY,
CAP_DHT,
CAP_DHT_WATCH,
CAP_APPMESSAGE,
#[cfg(feature = "unstable-blockstore")]
CAP_BLOCKSTORE,
@ -551,12 +553,9 @@ impl Network {
.wrap_err("connect failure")?
}
ProtocolType::WS | ProtocolType::WSS => {
WebsocketProtocolHandler::connect(
None,
&dial_info,
connect_timeout_ms)
.await
.wrap_err("connect failure")?
WebsocketProtocolHandler::connect(None, &dial_info, connect_timeout_ms)
.await
.wrap_err("connect failure")?
}
});

View File

@ -12,11 +12,11 @@ use std::io;
cfg_if! {
if #[cfg(all(feature = "unstable-blockstore", feature="unstable-tunnels"))] {
const PUBLIC_INTERNET_CAPABILITIES_LEN: usize = 6;
const PUBLIC_INTERNET_CAPABILITIES_LEN: usize = 7;
} else if #[cfg(any(feature = "unstable-blockstore", feature="unstable-tunnels"))] {
const PUBLIC_INTERNET_CAPABILITIES_LEN: usize = 5;
const PUBLIC_INTERNET_CAPABILITIES_LEN: usize = 6;
} else {
const PUBLIC_INTERNET_CAPABILITIES_LEN: usize = 4;
const PUBLIC_INTERNET_CAPABILITIES_LEN: usize = 5;
}
}
pub const PUBLIC_INTERNET_CAPABILITIES: [Capability; PUBLIC_INTERNET_CAPABILITIES_LEN] = [
@ -27,6 +27,7 @@ pub const PUBLIC_INTERNET_CAPABILITIES: [Capability; PUBLIC_INTERNET_CAPABILITIE
//CAP_RELAY,
//CAP_VALIDATE_DIAL_INFO,
CAP_DHT,
CAP_DHT_WATCH,
CAP_APPMESSAGE,
#[cfg(feature = "unstable-blockstore")]
CAP_BLOCKSTORE,
@ -40,6 +41,7 @@ pub const PUBLIC_INTERNET_CAPABILITIES: [Capability; PUBLIC_INTERNET_CAPABILITIE
// pub const LOCAL_NETWORK_CAPABILITIES: [Capability; LOCAL_NETWORK_CAPABILITIES_LEN] = [
// //CAP_RELAY,
// CAP_DHT,
// CAP_DHT_WATCH,
// CAP_APPMESSAGE,
// #[cfg(feature = "unstable-blockstore")]
// CAP_BLOCKSTORE,

View File

@ -647,6 +647,11 @@ impl BucketEntryInner {
return false;
}
// If we have had any lost answers recently, this is not reliable
if self.peer_stats.rpc_stats.recent_lost_answers > 0 {
return false;
}
match self.peer_stats.rpc_stats.first_consecutive_seen_ts {
// If we have not seen seen a node consecutively, it can't be reliable
None => false,

View File

@ -38,6 +38,9 @@ pub use types::*;
//////////////////////////////////////////////////////////////////////////
/// How many nodes in our routing table we require for a functional PublicInternet RoutingDomain
pub const MIN_PUBLIC_INTERNET_ROUTING_DOMAIN_NODE_COUNT: usize = 4;
/// How frequently we tick the relay management routine
pub const RELAY_MANAGEMENT_INTERVAL_SECS: u32 = 1;
@ -276,6 +279,12 @@ impl RoutingTable {
inner.route_spec_store = Some(route_spec_store);
}
// Inform storage manager we are up
self.network_manager
.storage_manager()
.set_routing_table(Some(self.clone()))
.await;
debug!("finished routing table init");
Ok(())
}
@ -284,6 +293,12 @@ impl RoutingTable {
pub async fn terminate(&self) {
debug!("starting routing table terminate");
// Stop storage manager from using us
self.network_manager
.storage_manager()
.set_routing_table(None)
.await;
// Stop tasks
self.cancel_tasks().await;

View File

@ -152,6 +152,10 @@ impl RouteSpecStore {
/// Purge the route spec store
pub async fn purge(&self) -> VeilidAPIResult<()> {
// Briefly pause routing table ticker while changes are made
let _tick_guard = self.unlocked_inner.routing_table.pause_tasks().await;
self.unlocked_inner.routing_table.cancel_tasks().await;
{
let inner = &mut *self.inner.lock();
inner.content = Default::default();
@ -1462,14 +1466,14 @@ impl RouteSpecStore {
Ok(out)
}
/// Import a remote private route for compilation
/// Import a remote private route set blob for compilation
/// It is safe to import the same route more than once and it will return the same route id
/// Returns a route set id
#[cfg_attr(
feature = "verbose-tracing",
instrument(level = "trace", skip(self, blob), ret, err)
)]
pub fn import_remote_private_route(&self, blob: Vec<u8>) -> VeilidAPIResult<RouteId> {
pub fn import_remote_private_route_blob(&self, blob: Vec<u8>) -> VeilidAPIResult<RouteId> {
let cur_ts = get_aligned_timestamp();
// decode the pr blob
@ -1502,6 +1506,46 @@ impl RouteSpecStore {
Ok(id)
}
/// Add a single remote private route for compilation
/// It is safe to add the same route more than once and it will return the same route id
/// Returns a route set id
#[cfg_attr(
feature = "verbose-tracing",
instrument(level = "trace", skip(self, blob), ret, err)
)]
pub fn add_remote_private_route(
&self,
private_route: PrivateRoute,
) -> VeilidAPIResult<RouteId> {
let cur_ts = get_aligned_timestamp();
// Make a single route set
let private_routes = vec![private_route];
// make the route id
let id = self.generate_remote_route_id(&private_routes)?;
// validate the private routes
let inner = &mut *self.inner.lock();
for private_route in &private_routes {
// ensure private route has first hop
if !matches!(private_route.hops, PrivateRouteHops::FirstHop(_)) {
apibail_generic!("private route must have first hop");
}
// ensure this isn't also an allocated route
// if inner.content.get_id_by_key(&private_route.public_key.value).is_some() {
// bail!("should not import allocated route");
// }
}
inner
.cache
.cache_remote_private_route(cur_ts, id, private_routes);
Ok(id)
}
/// Release a remote private route that is no longer in use
#[cfg_attr(
feature = "verbose-tracing",

View File

@ -3,7 +3,7 @@ use super::*;
/// The core representation of the RouteSpecStore that can be serialized
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub(super) struct RouteSpecStoreContent {
/// All of the route sets we have allocated so far indexed by key
/// All of the route sets we have allocated so far indexed by key (many to one)
id_by_key: HashMap<PublicKey, RouteId>,
/// All of the route sets we have allocated so far
details: HashMap<RouteId, RouteSetSpecDetail>,

View File

@ -137,10 +137,8 @@ impl RoutingDomainEditor {
None
};
// Debug print
log_rtab!(debug "[{:?}] COMMIT: {:?}", self.routing_domain, self.changes);
// Apply changes
log_rtab!("[{:?}] COMMIT: {:?}", self.routing_domain, self.changes);
let mut peer_info_changed = false;
{
let mut inner = self.routing_table.inner.write();
@ -181,7 +179,7 @@ impl RoutingDomainEditor {
peer_info_changed = true;
}
RoutingDomainChange::SetRelayNodeKeepalive { ts } => {
debug!("[{:?}] relay node keepalive: {:?}", self.routing_domain, ts);
trace!("[{:?}] relay node keepalive: {:?}", self.routing_domain, ts);
detail.common_mut().set_relay_node_last_keepalive(ts);
}
RoutingDomainChange::AddDialInfoDetail { dial_info_detail } => {

View File

@ -962,7 +962,10 @@ impl RoutingTableInner {
None => has_valid_own_node_info,
Some(entry) => entry.with_inner(|e| {
e.signed_node_info(routing_domain)
.map(|sni| sni.has_any_signature())
.map(|sni| {
sni.has_any_signature()
&& !matches!(sni.node_info().network_class(), NetworkClass::Invalid)
})
.unwrap_or(false)
}),
}

View File

@ -285,7 +285,8 @@ impl RoutingTable {
{
Ok(NodeContactMethod::Direct(v)) => v,
Ok(v) => {
log_rtab!(warn "invalid contact method for bootstrap: {:?}", v);
log_rtab!(warn "invalid contact method for bootstrap, restarting network: {:?}", v);
routing_table.network_manager().restart_network();
return;
}
Err(e) => {

View File

@ -157,7 +157,7 @@ impl RoutingTable {
for ck in VALID_CRYPTO_KINDS {
let eckey = (RoutingDomain::PublicInternet, ck);
let cnt = entry_counts.get(&eckey).copied().unwrap_or_default();
if cnt == 0 {
if cnt < MIN_PUBLIC_INTERNET_ROUTING_DOMAIN_NODE_COUNT {
needs_bootstrap = true;
} else if cnt < min_peer_count {
needs_peer_minimum_refresh = true;

View File

@ -2,7 +2,6 @@ use super::*;
use futures_util::stream::{FuturesUnordered, StreamExt};
use futures_util::FutureExt;
use stop_token::future::FutureExt as StopFutureExt;
const BACKGROUND_SAFETY_ROUTE_COUNT: usize = 2;
@ -103,10 +102,10 @@ impl RoutingTable {
}
/// Test set of routes and remove the ones that don't test clean
#[instrument(level = "trace", skip(self, stop_token), err)]
#[instrument(level = "trace", skip(self, _stop_token), err)]
async fn test_route_set(
&self,
stop_token: StopToken,
_stop_token: StopToken,
routes_needing_testing: Vec<RouteId>,
) -> EyreResult<()> {
if routes_needing_testing.is_empty() {
@ -158,7 +157,7 @@ impl RoutingTable {
}
// Wait for test_route futures to complete in parallel
while let Ok(Some(_)) = unord.next().timeout_at(stop_token.clone()).await {}
while unord.next().await.is_some() {}
}
// Process failed routes

View File

@ -8,6 +8,7 @@ pub const CAP_SIGNAL: Capability = FourCC(*b"SGNL");
pub const CAP_RELAY: Capability = FourCC(*b"RLAY");
pub const CAP_VALIDATE_DIAL_INFO: Capability = FourCC(*b"DIAL");
pub const CAP_DHT: Capability = FourCC(*b"DHTV");
pub const CAP_DHT_WATCH: Capability = FourCC(*b"DHTW");
pub const CAP_APPMESSAGE: Capability = FourCC(*b"APPM");
#[cfg(feature = "unstable-blockstore")]
pub const CAP_BLOCKSTORE: Capability = FourCC(*b"BLOC");

View File

@ -1,6 +1,8 @@
use super::*;
use crate::storage_manager::SignedValueData;
const MAX_VALUE_CHANGED_SUBKEYS_LEN: usize = 512;
#[derive(Debug, Clone)]
pub(in crate::rpc_processor) struct RPCOperationValueChanged {
key: TypedKey,
@ -16,13 +18,21 @@ impl RPCOperationValueChanged {
subkeys: ValueSubkeyRangeSet,
count: u32,
value: SignedValueData,
) -> Self {
Self {
) -> Result<Self, RPCError> {
// Needed because RangeSetBlaze uses different types here all the time
#[allow(clippy::unnecessary_cast)]
let subkeys_len = subkeys.ranges_len() as usize;
if subkeys_len > MAX_VALUE_CHANGED_SUBKEYS_LEN {
return Err(RPCError::protocol("ValueChanged subkeys length too long"));
}
Ok(Self {
key,
subkeys,
count,
value,
}
})
}
pub fn validate(&mut self, _validate_context: &RPCValidateContext) -> Result<(), RPCError> {
@ -62,6 +72,10 @@ impl RPCOperationValueChanged {
let key = decode_typed_key(&k_reader)?;
let sk_reader = reader.get_subkeys().map_err(RPCError::protocol)?;
if sk_reader.len() as usize > MAX_VALUE_CHANGED_SUBKEYS_LEN {
return Err(RPCError::protocol("ValueChanged subkeys length too long"));
}
let mut subkeys = ValueSubkeyRangeSet::new();
for skr in sk_reader.iter() {
let vskr = (skr.get_start(), skr.get_end());

View File

@ -20,41 +20,52 @@ impl RPCOperationWatchValueQ {
subkeys: ValueSubkeyRangeSet,
expiration: u64,
count: u32,
watcher: PublicKey,
signature: Signature,
watcher: KeyPair,
vcrypto: CryptoSystemVersion,
) -> Result<Self, RPCError> {
// Needed because RangeSetBlaze uses different types here all the time
#[allow(clippy::unnecessary_cast)]
let subkeys_len = subkeys.len() as usize;
let subkeys_len = subkeys.ranges_len() as usize;
if subkeys_len > MAX_WATCH_VALUE_Q_SUBKEYS_LEN {
return Err(RPCError::protocol("WatchValueQ subkeys length too long"));
}
let signature_data = Self::make_signature_data(&key, &subkeys, expiration, count);
let signature = vcrypto
.sign(&watcher.key, &watcher.secret, &signature_data)
.map_err(RPCError::protocol)?;
Ok(Self {
key,
subkeys,
expiration,
count,
watcher,
watcher: watcher.key,
signature,
})
}
// signature covers: key, subkeys, expiration, count, using watcher key
fn make_signature_data(&self) -> Vec<u8> {
fn make_signature_data(
key: &TypedKey,
subkeys: &ValueSubkeyRangeSet,
expiration: u64,
count: u32,
) -> Vec<u8> {
// Needed because RangeSetBlaze uses different types here all the time
#[allow(clippy::unnecessary_cast)]
let subkeys_len = self.subkeys.len() as usize;
let subkeys_len = subkeys.ranges_len() as usize;
let mut sig_data = Vec::with_capacity(PUBLIC_KEY_LENGTH + 4 + (subkeys_len * 8) + 8 + 4);
sig_data.extend_from_slice(&self.key.kind.0);
sig_data.extend_from_slice(&self.key.value.bytes);
for sk in self.subkeys.ranges() {
sig_data.extend_from_slice(&key.kind.0);
sig_data.extend_from_slice(&key.value.bytes);
for sk in subkeys.ranges() {
sig_data.extend_from_slice(&sk.start().to_le_bytes());
sig_data.extend_from_slice(&sk.end().to_le_bytes());
}
sig_data.extend_from_slice(&self.expiration.to_le_bytes());
sig_data.extend_from_slice(&self.count.to_le_bytes());
sig_data.extend_from_slice(&expiration.to_le_bytes());
sig_data.extend_from_slice(&count.to_le_bytes());
sig_data
}
@ -63,11 +74,11 @@ impl RPCOperationWatchValueQ {
return Err(RPCError::protocol("unsupported cryptosystem"));
};
let sig_data = self.make_signature_data();
let sig_data =
Self::make_signature_data(&self.key, &self.subkeys, self.expiration, self.count);
vcrypto
.verify(&self.watcher, &sig_data, &self.signature)
.map_err(RPCError::protocol)?;
Ok(())
}
@ -95,12 +106,10 @@ impl RPCOperationWatchValueQ {
pub fn watcher(&self) -> &PublicKey {
&self.watcher
}
#[allow(dead_code)]
pub fn signature(&self) -> &Signature {
&self.signature
}
#[allow(dead_code)]
pub fn destructure(
self,
@ -176,7 +185,7 @@ impl RPCOperationWatchValueQ {
let mut sk_builder = builder.reborrow().init_subkeys(
self.subkeys
.len()
.ranges_len()
.try_into()
.map_err(RPCError::map_internal("invalid subkey range list length"))?,
);

View File

@ -6,7 +6,7 @@ pub(crate) enum Destination {
/// Send to node directly
Direct {
/// The node to send to
target: NodeRef,
node: NodeRef,
/// Require safety route or not
safety_selection: SafetySelection,
},
@ -15,7 +15,7 @@ pub(crate) enum Destination {
/// The relay to send to
relay: NodeRef,
/// The final destination the relay should send to
target: NodeRef,
node: NodeRef,
/// Require safety route or not
safety_selection: SafetySelection,
},
@ -29,15 +29,15 @@ pub(crate) enum Destination {
}
impl Destination {
pub fn target(&self) -> Option<NodeRef> {
pub fn node(&self) -> Option<NodeRef> {
match self {
Destination::Direct {
target,
node: target,
safety_selection: _,
} => Some(target.clone()),
Destination::Relay {
relay: _,
target,
node: target,
safety_selection: _,
} => Some(target.clone()),
Destination::PrivateRoute {
@ -46,18 +46,18 @@ impl Destination {
} => None,
}
}
pub fn direct(target: NodeRef) -> Self {
let sequencing = target.sequencing();
pub fn direct(node: NodeRef) -> Self {
let sequencing = node.sequencing();
Self::Direct {
target,
node,
safety_selection: SafetySelection::Unsafe(sequencing),
}
}
pub fn relay(relay: NodeRef, target: NodeRef) -> Self {
let sequencing = relay.sequencing().max(target.sequencing());
pub fn relay(relay: NodeRef, node: NodeRef) -> Self {
let sequencing = relay.sequencing().max(node.sequencing());
Self::Relay {
relay,
target,
node,
safety_selection: SafetySelection::Unsafe(sequencing),
}
}
@ -71,19 +71,19 @@ impl Destination {
pub fn with_safety(self, safety_selection: SafetySelection) -> Self {
match self {
Destination::Direct {
target,
node,
safety_selection: _,
} => Self::Direct {
target,
node,
safety_selection,
},
Destination::Relay {
relay,
target,
node,
safety_selection: _,
} => Self::Relay {
relay,
target,
node,
safety_selection,
},
Destination::PrivateRoute {
@ -99,12 +99,12 @@ impl Destination {
pub fn get_safety_selection(&self) -> &SafetySelection {
match self {
Destination::Direct {
target: _,
node: _,
safety_selection,
} => safety_selection,
Destination::Relay {
relay: _,
target: _,
node: _,
safety_selection,
} => safety_selection,
Destination::PrivateRoute {
@ -113,13 +113,38 @@ impl Destination {
} => safety_selection,
}
}
pub fn get_target(&self, rss: RouteSpecStore) -> Result<Target, RPCError> {
match self {
Destination::Direct {
node,
safety_selection: _,
}
| Destination::Relay {
relay: _,
node,
safety_selection: _,
} => Ok(Target::NodeId(node.best_node_id())),
Destination::PrivateRoute {
private_route,
safety_selection: _,
} => {
// Add the remote private route if we're going to keep the id
let route_id = rss
.add_remote_private_route(private_route.clone())
.map_err(RPCError::protocol)?;
Ok(Target::PrivateRoute(route_id))
}
}
}
}
impl fmt::Display for Destination {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Destination::Direct {
target,
node,
safety_selection,
} => {
let sr = if matches!(safety_selection, SafetySelection::Safe(_)) {
@ -128,11 +153,11 @@ impl fmt::Display for Destination {
""
};
write!(f, "{}{}", target, sr)
write!(f, "{}{}", node, sr)
}
Destination::Relay {
relay,
target,
node,
safety_selection,
} => {
let sr = if matches!(safety_selection, SafetySelection::Safe(_)) {
@ -141,7 +166,7 @@ impl fmt::Display for Destination {
""
};
write!(f, "{}@{}{}", target, relay, sr)
write!(f, "{}@{}{}", node, relay, sr)
}
Destination::PrivateRoute {
private_route,
@ -160,6 +185,45 @@ impl fmt::Display for Destination {
}
impl RPCProcessor {
/// Convert a 'Target' into a 'Destination'
pub async fn resolve_target_to_destination(
&self,
target: Target,
safety_selection: SafetySelection,
) -> Result<rpc_processor::Destination, RPCError> {
match target {
Target::NodeId(node_id) => {
// Resolve node
let mut nr = match self.resolve_node(node_id, safety_selection).await? {
Some(nr) => nr,
None => {
return Err(RPCError::network("could not resolve node id"));
}
};
// Apply sequencing to match safety selection
nr.set_sequencing(safety_selection.get_sequencing());
Ok(rpc_processor::Destination::Direct {
node: nr,
safety_selection,
})
}
Target::PrivateRoute(rsid) => {
// Get remote private route
let rss = self.routing_table().route_spec_store();
let Some(private_route) = rss.best_remote_private_route(&rsid) else {
return Err(RPCError::network("could not get remote private route"));
};
Ok(rpc_processor::Destination::PrivateRoute {
private_route,
safety_selection,
})
}
}
}
/// Convert the 'Destination' into a 'RespondTo' for a response
pub(super) fn get_destination_respond_to(
&self,
@ -170,7 +234,7 @@ impl RPCProcessor {
match dest {
Destination::Direct {
target,
node: target,
safety_selection,
} => match safety_selection {
SafetySelection::Unsafe(_) => {
@ -198,7 +262,7 @@ impl RPCProcessor {
},
Destination::Relay {
relay,
target,
node: target,
safety_selection,
} => match safety_selection {
SafetySelection::Unsafe(_) => {

View File

@ -219,7 +219,10 @@ where
Ok(())
}
pub async fn run(self: Arc<Self>) -> TimeoutOr<Result<Option<R>, RPCError>> {
pub async fn run(
self: Arc<Self>,
init_fanout_queue: Vec<NodeRef>,
) -> TimeoutOr<Result<Option<R>, RPCError>> {
// Get timeout in milliseconds
let timeout_ms = match us_to_ms(self.timeout_us.as_u64()).map_err(RPCError::internal) {
Ok(v) => v,
@ -229,8 +232,12 @@ where
};
// Initialize closest nodes list
if let Err(e) = self.clone().init_closest_nodes() {
return TimeoutOr::value(Err(e));
if init_fanout_queue.is_empty() {
if let Err(e) = self.clone().init_closest_nodes() {
return TimeoutOr::value(Err(e));
}
} else {
self.clone().add_to_fanout_queue(&init_fanout_queue);
}
// Do a quick check to see if we're already done

View File

@ -196,12 +196,16 @@ struct WaitableReply {
#[derive(Clone, Debug, Default)]
pub struct Answer<T> {
pub latency: TimestampDuration, // how long it took to get this answer
pub answer: T, // the answer itself
/// Hpw long it took to get this answer
pub latency: TimestampDuration,
/// The private route requested to receive the reply
pub reply_private_route: Option<PublicKey>,
/// The answer itself
pub answer: T,
}
impl<T> Answer<T> {
pub fn new(latency: TimestampDuration, answer: T) -> Self {
Self { latency, answer }
pub fn new(latency: TimestampDuration, reply_private_route: Option<PublicKey>, answer: T) -> Self {
Self { latency, reply_private_route, answer }
}
}
@ -512,7 +516,7 @@ impl RPCProcessor {
check_done,
);
fanout_call.run().await
fanout_call.run(vec![]).await
}
/// Search the DHT for a specific node corresponding to a key unless we have that node in our routing table already, and return the node reference
@ -728,12 +732,12 @@ impl RPCProcessor {
// To where are we sending the request
match dest {
Destination::Direct {
target: ref node_ref,
node: ref node_ref,
safety_selection,
}
| Destination::Relay {
relay: ref node_ref,
target: _,
node: _,
safety_selection,
} => {
// Send to a node without a private route
@ -742,7 +746,7 @@ impl RPCProcessor {
// Get the actual destination node id accounting for relays
let (node_ref, destination_node_ref) = if let Destination::Relay {
relay: _,
ref target,
node: ref target,
safety_selection: _,
} = dest
{
@ -850,12 +854,12 @@ impl RPCProcessor {
let routing_table = self.routing_table();
let target = match dest {
Destination::Direct {
target,
node: target,
safety_selection: _,
} => target.clone(),
Destination::Relay {
relay: _,
target,
node: target,
safety_selection: _,
} => target.clone(),
Destination::PrivateRoute {
@ -1497,7 +1501,7 @@ impl RPCProcessor {
) {
address_filter.punish_node_id(sender_node_id);
return Ok(NetworkResult::invalid_message(
"sender peerinfo has invalid peer scope",
format!("sender peerinfo has invalid peer scope: {:?}",sender_peer_info.signed_node_info())
));
}
opt_sender_nr = match self.routing_table().register_node_with_peer_info(

View File

@ -23,6 +23,9 @@ impl RPCProcessor {
// Send the app call question
let waitable_reply = network_result_try!(self.question(dest, question, None).await?);
// Keep the reply private route that was used to return with the answer
let reply_private_route = waitable_reply.reply_private_route;
// Wait for reply
let (msg, latency) = match self.wait_for_reply(waitable_reply, debug_string).await? {
TimeoutOr::Timeout => return Ok(NetworkResult::Timeout),
@ -45,7 +48,11 @@ impl RPCProcessor {
tracing::Span::current().record("ret.latency", latency.as_u64());
#[cfg(feature = "verbose-tracing")]
tracing::Span::current().record("ret.len", a_message.len());
Ok(NetworkResult::value(Answer::new(latency, a_message)))
Ok(NetworkResult::value(Answer::new(
latency,
reply_private_route,
a_message,
)))
}
#[cfg_attr(feature="verbose-tracing", instrument(level = "trace", skip(self, msg), fields(msg.operation.op_id), ret, err))]

View File

@ -43,6 +43,9 @@ impl RPCProcessor {
// Send the find_node request
let waitable_reply = network_result_try!(self.question(dest, find_node_q, None).await?);
// Keep the reply private route that was used to return with the answer
let reply_private_route = waitable_reply.reply_private_route;
// Wait for reply
let (msg, latency) = match self.wait_for_reply(waitable_reply, debug_string).await? {
TimeoutOr::Timeout => return Ok(NetworkResult::Timeout),
@ -74,7 +77,11 @@ impl RPCProcessor {
}
}
Ok(NetworkResult::value(Answer::new(latency, peers)))
Ok(NetworkResult::value(Answer::new(
latency,
reply_private_route,
peers,
)))
}
#[cfg_attr(feature="verbose-tracing", instrument(level = "trace", skip(self, msg), fields(msg.operation.op_id), ret, err))]

View File

@ -35,9 +35,9 @@ impl RPCProcessor {
) ->RPCNetworkResult<Answer<GetValueAnswer>> {
// Ensure destination never has a private route
// and get the target noderef so we can validate the response
let Some(target) = dest.target() else {
let Some(target) = dest.node() else {
return Err(RPCError::internal(
"Never send set value requests over private routes",
"Never send get value requests over private routes",
));
};
@ -82,6 +82,9 @@ impl RPCProcessor {
.await?
);
// Keep the reply private route that was used to return with the answer
let reply_private_route = waitable_reply.reply_private_route;
// Wait for reply
let (msg, latency) = match self.wait_for_reply(waitable_reply, debug_string).await? {
TimeoutOr::Timeout => return Ok(NetworkResult::Timeout),
@ -156,6 +159,7 @@ impl RPCProcessor {
Ok(NetworkResult::value(Answer::new(
latency,
reply_private_route,
GetValueAnswer {
value,
peers,
@ -207,7 +211,7 @@ impl RPCProcessor {
// Get the nodes that we know about that are closer to the the key than our own node
let routing_table = self.routing_table();
let closer_to_key_peers = network_result_try!(routing_table.find_preferred_peers_closer_to_key(key, vec![CAP_DHT]));
let closer_to_key_peers = network_result_try!(routing_table.find_preferred_peers_closer_to_key(key, vec![CAP_DHT, CAP_DHT_WATCH]));
#[cfg(feature="debug-dht")]
{
@ -226,16 +230,29 @@ impl RPCProcessor {
log_rpc!(debug "{}", debug_string);
}
// See if we have this record ourselves
let storage_manager = self.storage_manager();
let subkey_result = network_result_try!(storage_manager
.inbound_get_value(key, subkey, want_descriptor)
.await
.map_err(RPCError::internal)?);
// See if we would have accepted this as a set
let set_value_count = {
let c = self.config.get();
c.network.dht.set_value_count as usize
};
let (subkey_result_value, subkey_result_descriptor) = if closer_to_key_peers.len() >= set_value_count {
// Not close enough
(None, None)
} else {
// Close enough, lets get it
// See if we have this record ourselves
let storage_manager = self.storage_manager();
let subkey_result = network_result_try!(storage_manager
.inbound_get_value(key, subkey, want_descriptor)
.await
.map_err(RPCError::internal)?);
(subkey_result.value, subkey_result.descriptor)
};
#[cfg(feature="debug-dht")]
{
let debug_string_value = subkey_result.value.as_ref().map(|v| {
let debug_string_value = subkey_result_value.as_ref().map(|v| {
format!(" len={} seq={} writer={}",
v.value_data().data().len(),
v.value_data().seq(),
@ -248,7 +265,7 @@ impl RPCProcessor {
key,
subkey,
debug_string_value,
if subkey_result.descriptor.is_some() {
if subkey_result_descriptor.is_some() {
" +desc"
} else {
""
@ -262,9 +279,9 @@ impl RPCProcessor {
// Make GetValue answer
let get_value_a = RPCOperationGetValueA::new(
subkey_result.value,
subkey_result_value.map(|x| (*x).clone()),
closer_to_key_peers,
subkey_result.descriptor,
subkey_result_descriptor.map(|x| (*x).clone()),
)?;
// Send GetValue answer

View File

@ -39,7 +39,7 @@ impl RPCProcessor {
) ->RPCNetworkResult<Answer<SetValueAnswer>> {
// Ensure destination never has a private route
// and get the target noderef so we can validate the response
let Some(target) = dest.target() else {
let Some(target) = dest.node() else {
return Err(RPCError::internal(
"Never send set value requests over private routes",
));
@ -96,6 +96,8 @@ impl RPCProcessor {
.await?
);
// Keep the reply private route that was used to return with the answer
let reply_private_route = waitable_reply.reply_private_route;
// Wait for reply
let (msg, latency) = match self.wait_for_reply(waitable_reply, debug_string).await? {
@ -174,6 +176,7 @@ impl RPCProcessor {
Ok(NetworkResult::value(Answer::new(
latency,
reply_private_route,
SetValueAnswer { set, value, peers },
)))
}
@ -185,6 +188,8 @@ impl RPCProcessor {
) ->RPCNetworkResult<()> {
// Ignore if disabled
let routing_table = self.routing_table();
let rss = routing_table.route_spec_store();
let opi = routing_table.get_own_peer_info(msg.header.routing_domain());
if !opi
.signed_node_info()
@ -219,6 +224,10 @@ impl RPCProcessor {
// Destructure
let (key, subkey, value, descriptor) = set_value_q.destructure();
// Get target for ValueChanged notifications
let dest = network_result_try!(self.get_respond_to_destination(&msg));
let target = dest.get_target(rss)?;
// Get the nodes that we know about that are closer to the the key than our own node
let routing_table = self.routing_table();
let closer_to_key_peers = network_result_try!(routing_table.find_preferred_peers_closer_to_key(key, vec![CAP_DHT]));
@ -254,7 +263,7 @@ impl RPCProcessor {
// Save the subkey, creating a new record if necessary
let storage_manager = self.storage_manager();
let new_value = network_result_try!(storage_manager
.inbound_set_value(key, subkey, value, descriptor)
.inbound_set_value(key, subkey, Arc::new(value), descriptor.map(Arc::new), target)
.await
.map_err(RPCError::internal)?);
@ -289,7 +298,7 @@ impl RPCProcessor {
}
// Make SetValue answer
let set_value_a = RPCOperationSetValueA::new(set, new_value, closer_to_key_peers)?;
let set_value_a = RPCOperationSetValueA::new(set, new_value.map(|x| (*x).clone()), closer_to_key_peers)?;
// Send SetValue answer
self.answer(msg, RPCAnswer::new(RPCAnswerDetail::SetValueA(Box::new(set_value_a))))

View File

@ -27,7 +27,7 @@ impl RPCProcessor {
SafetySelection::Unsafe(_) => {
let (opt_target_nr, routing_domain) = match &dest {
Destination::Direct {
target,
node: target,
safety_selection: _,
} => {
let routing_domain = match target.best_routing_domain() {
@ -52,7 +52,7 @@ impl RPCProcessor {
}
Destination::Relay {
relay,
target,
node: target,
safety_selection: _,
} => {
let routing_domain = match relay.best_routing_domain() {
@ -113,6 +113,9 @@ impl RPCProcessor {
// Note what kind of ping this was and to what peer scope
let send_data_method = waitable_reply.send_data_method.clone();
// Keep the reply private route that was used to return with the answer
let reply_private_route = waitable_reply.reply_private_route;
// Wait for reply
let (msg, latency) = match self.wait_for_reply(waitable_reply, debug_string).await? {
TimeoutOr::Timeout => return Ok(NetworkResult::Timeout),
@ -144,7 +147,7 @@ impl RPCProcessor {
let mut opt_sender_info = None;
match dest {
Destination::Direct {
target,
node: target,
safety_selection,
} => {
if matches!(safety_selection, SafetySelection::Unsafe(_)) {
@ -180,7 +183,7 @@ impl RPCProcessor {
}
Destination::Relay {
relay: _,
target: _,
node: _,
safety_selection: _,
}
| Destination::PrivateRoute {
@ -190,7 +193,11 @@ impl RPCProcessor {
// sender info is irrelevant over relays and routes
}
};
Ok(NetworkResult::value(Answer::new(latency, opt_sender_info)))
Ok(NetworkResult::value(Answer::new(
latency,
reply_private_route,
opt_sender_info,
)))
}
#[cfg_attr(feature="verbose-tracing", instrument(level = "trace", skip(self, msg), fields(msg.operation.op_id), ret, err))]

View File

@ -2,13 +2,67 @@ use super::*;
impl RPCProcessor {
#[cfg_attr(feature="verbose-tracing", instrument(level = "trace", skip(self, msg), fields(msg.operation.op_id), err))]
// Sends a high level app message
// Can be sent via all methods including relays and routes
#[cfg_attr(
feature = "verbose-tracing",
instrument(level = "trace", skip(self, message), fields(message.len = message.len()), err)
)]
pub async fn rpc_call_value_changed(
self,
dest: Destination,
key: TypedKey,
subkeys: ValueSubkeyRangeSet,
count: u32,
value: SignedValueData,
) -> RPCNetworkResult<()> {
let value_changed = RPCOperationValueChanged::new(key, subkeys, count, value)?;
let statement =
RPCStatement::new(RPCStatementDetail::ValueChanged(Box::new(value_changed)));
// Send the value changed request
self.statement(dest, statement).await
}
pub(crate) async fn process_value_changed(&self, msg: RPCMessage) -> RPCNetworkResult<()> {
// Ignore if disabled
let routing_table = self.routing_table();
let opi = routing_table.get_own_peer_info(msg.header.routing_domain());
if !opi.signed_node_info().node_info().has_capability(CAP_DHT) {
return Ok(NetworkResult::service_unavailable("dht is not available"));
// Get the statement
let (_, _, _, kind) = msg.operation.destructure();
let (key, subkeys, count, value) = match kind {
RPCOperationKind::Statement(s) => match s.destructure() {
RPCStatementDetail::ValueChanged(s) => s.destructure(),
_ => panic!("not a value changed statement"),
},
_ => panic!("not a statement"),
};
#[cfg(feature = "debug-dht")]
{
let debug_string_value = format!(
" len={} seq={} writer={}",
value.value_data().data().len(),
value.value_data().seq(),
value.value_data().writer(),
);
let debug_string_stmt = format!(
"IN <== ValueChanged({} #{:?}+{}{}) <= {}",
key,
subkeys,
count,
debug_string_value,
msg.header.direct_sender_node_id()
);
log_rpc!(debug "{}", debug_string_stmt);
}
Err(RPCError::unimplemented("process_value_changed"))
// Save the subkey, creating a new record if necessary
let storage_manager = self.storage_manager();
storage_manager
.inbound_value_changed(key, subkeys, count, Arc::new(value))
.await
.map_err(RPCError::internal)?;
Ok(NetworkResult::value(()))
}
}

View File

@ -1,14 +1,275 @@
use super::*;
#[derive(Clone, Debug)]
pub struct WatchValueAnswer {
pub expiration_ts: Timestamp,
pub peers: Vec<PeerInfo>,
}
impl RPCProcessor {
/// Sends a watch value request and wait for response
/// Can be sent via all methods including relays
/// Safety routes may be used, but never private routes.
/// Because this leaks information about the identity of the node itself,
/// replying to this request received over a private route will leak
/// the identity of the node and defeat the private route.
#[cfg_attr(
feature = "verbose-tracing",
instrument(level = "trace", skip(self),
fields(ret.expiration,
ret.latency,
ret.peers.len
),err)
)]
pub async fn rpc_call_watch_value(
self,
dest: Destination,
key: TypedKey,
subkeys: ValueSubkeyRangeSet,
expiration: Timestamp,
count: u32,
watcher: KeyPair,
) -> RPCNetworkResult<Answer<WatchValueAnswer>> {
// Ensure destination never has a private route
// and get the target noderef so we can validate the response
let Some(target) = dest.node() else {
return Err(RPCError::internal(
"Never send watch value requests over private routes",
));
};
// Get the target node id
let Some(vcrypto) = self.crypto.get(key.kind) else {
return Err(RPCError::internal("unsupported cryptosystem"));
};
let Some(target_node_id) = target.node_ids().get(key.kind) else {
return Err(RPCError::internal("No node id for crypto kind"));
};
let debug_string = format!(
"OUT ==> WatchValueQ({} {}@{}+{}) => {} (watcher={})",
key, subkeys, expiration, count, dest, watcher.key
);
// Send the watchvalue question
let watch_value_q = RPCOperationWatchValueQ::new(
key,
subkeys.clone(),
expiration.as_u64(),
count,
watcher,
vcrypto.clone(),
)?;
let question = RPCQuestion::new(
network_result_try!(self.get_destination_respond_to(&dest)?),
RPCQuestionDetail::WatchValueQ(Box::new(watch_value_q)),
);
#[cfg(feature = "debug-dht")]
log_rpc!(debug "{}", debug_string);
let waitable_reply =
network_result_try!(self.question(dest.clone(), question, None).await?);
// Keep the reply private route that was used to return with the answer
let reply_private_route = waitable_reply.reply_private_route;
// Wait for reply
let (msg, latency) = match self.wait_for_reply(waitable_reply, debug_string).await? {
TimeoutOr::Timeout => return Ok(NetworkResult::Timeout),
TimeoutOr::Value(v) => v,
};
// Get the right answer type
let (_, _, _, kind) = msg.operation.destructure();
let watch_value_a = match kind {
RPCOperationKind::Answer(a) => match a.destructure() {
RPCAnswerDetail::WatchValueA(a) => a,
_ => return Ok(NetworkResult::invalid_message("not a watchvalue answer")),
},
_ => return Ok(NetworkResult::invalid_message("not an answer")),
};
let (expiration, peers) = watch_value_a.destructure();
#[cfg(feature = "debug-dht")]
{
let debug_string_answer = format!(
"OUT <== WatchValueA({} #{:?}@{} peers={}) <= {}",
key,
subkeys,
expiration,
peers.len(),
dest
);
log_rpc!(debug "{}", debug_string_answer);
let peer_ids: Vec<String> = peers
.iter()
.filter_map(|p| p.node_ids().get(key.kind).map(|k| k.to_string()))
.collect();
log_rpc!(debug "Peers: {:#?}", peer_ids);
}
// Validate peers returned are, in fact, closer to the key than the node we sent this to
let valid = match RoutingTable::verify_peers_closer(vcrypto, target_node_id, key, &peers) {
Ok(v) => v,
Err(e) => {
return Ok(NetworkResult::invalid_message(format!(
"missing cryptosystem in peers node ids: {}",
e
)));
}
};
if !valid {
return Ok(NetworkResult::invalid_message("non-closer peers returned"));
}
#[cfg(feature = "verbose-tracing")]
tracing::Span::current().record("ret.latency", latency.as_u64());
#[cfg(feature = "verbose-tracing")]
tracing::Span::current().record("ret.expiration", latency.as_u64());
#[cfg(feature = "verbose-tracing")]
tracing::Span::current().record("ret.peers.len", peers.len());
Ok(NetworkResult::value(Answer::new(
latency,
reply_private_route,
WatchValueAnswer {
expiration_ts: Timestamp::new(expiration),
peers,
},
)))
}
#[cfg_attr(feature="verbose-tracing", instrument(level = "trace", skip(self, msg), fields(msg.operation.op_id), ret, err))]
pub(crate) async fn process_watch_value_q(&self, msg: RPCMessage) -> RPCNetworkResult<()> {
let routing_table = self.routing_table();
let rss = routing_table.route_spec_store();
// Ensure this never came over a private route, safety route is okay though
match &msg.header.detail {
RPCMessageHeaderDetail::Direct(_) | RPCMessageHeaderDetail::SafetyRouted(_) => {}
RPCMessageHeaderDetail::PrivateRouted(_) => {
return Ok(NetworkResult::invalid_message(
"not processing watch value request over private route",
))
}
}
// Ignore if disabled
let routing_table = self.routing_table();
let opi = routing_table.get_own_peer_info(msg.header.routing_domain());
if !opi.signed_node_info().node_info().has_capability(CAP_DHT) {
return Ok(NetworkResult::service_unavailable("dht is not available"));
}
Err(RPCError::unimplemented("process_watch_value_q"))
if !opi
.signed_node_info()
.node_info()
.has_capability(CAP_DHT_WATCH)
{
return Ok(NetworkResult::service_unavailable(
"dht watch is not available",
));
}
// Get the question
let kind = msg.operation.kind().clone();
let watch_value_q = match kind {
RPCOperationKind::Question(q) => match q.destructure() {
(_, RPCQuestionDetail::WatchValueQ(q)) => q,
_ => panic!("not a watchvalue question"),
},
_ => panic!("not a question"),
};
// Destructure
let (key, subkeys, expiration, count, watcher, _signature) = watch_value_q.destructure();
// Get target for ValueChanged notifications
let dest = network_result_try!(self.get_respond_to_destination(&msg));
let target = dest.get_target(rss)?;
#[cfg(feature = "debug-dht")]
{
let debug_string = format!(
"IN <=== WatchValueQ({} {}@{}+{}) <== {} (watcher={})",
key,
subkeys,
expiration,
count,
msg.header.direct_sender_node_id(),
watcher
);
log_rpc!(debug "{}", debug_string);
}
// Get the nodes that we know about that are closer to the the key than our own node
let closer_to_key_peers = network_result_try!(
routing_table.find_preferred_peers_closer_to_key(key, vec![CAP_DHT, CAP_DHT_WATCH])
);
// See if we would have accepted this as a set
let set_value_count = {
let c = self.config.get();
c.network.dht.set_value_count as usize
};
let ret_expiration = if closer_to_key_peers.len() >= set_value_count {
// Not close enough
#[cfg(feature = "debug-dht")]
log_rpc!(debug "Not close enough for watch value");
Timestamp::default()
} else {
// Close enough, lets watch it
// See if we have this record ourselves, if so, accept the watch
let storage_manager = self.storage_manager();
network_result_try!(storage_manager
.inbound_watch_value(
key,
subkeys.clone(),
Timestamp::new(expiration),
count,
target,
watcher
)
.await
.map_err(RPCError::internal)?)
};
#[cfg(feature = "debug-dht")]
{
let debug_string_answer = format!(
"IN ===> WatchValueA({} #{} expiration={} peers={}) ==> {}",
key,
subkeys,
ret_expiration,
closer_to_key_peers.len(),
msg.header.direct_sender_node_id()
);
log_rpc!(debug "{}", debug_string_answer);
}
// Make WatchValue answer
let watch_value_a = RPCOperationWatchValueA::new(
ret_expiration.as_u64(),
if ret_expiration.as_u64() == 0 {
closer_to_key_peers
} else {
vec![]
},
)?;
// Send GetValue answer
self.answer(
msg,
RPCAnswer::new(RPCAnswerDetail::WatchValueA(Box::new(watch_value_a))),
)
.await
}
}

View File

@ -3,25 +3,33 @@ use super::*;
/// The context of the outbound_get_value operation
struct OutboundGetValueContext {
/// The latest value of the subkey, may be the value passed in
pub value: Option<SignedValueData>,
/// The consensus count for the value we have received
pub value_count: usize,
pub value: Option<Arc<SignedValueData>>,
/// The nodes that have returned the value so far (up to the consensus count)
pub value_nodes: Vec<NodeRef>,
/// The descriptor if we got a fresh one or empty if no descriptor was needed
pub descriptor: Option<SignedValueDescriptor>,
pub descriptor: Option<Arc<SignedValueDescriptor>>,
/// The parsed schema from the descriptor if we have one
pub schema: Option<DHTSchema>,
}
/// The result of the outbound_get_value operation
pub(super) struct OutboundGetValueResult {
/// The subkey that was retrieved
pub subkey_result: SubkeyResult,
/// And where it was retrieved from
pub value_nodes: Vec<NodeRef>,
}
impl StorageManager {
/// Perform a 'get value' query on the network
pub async fn outbound_get_value(
pub(super) async fn outbound_get_value(
&self,
rpc_processor: RPCProcessor,
key: TypedKey,
subkey: ValueSubkey,
safety_selection: SafetySelection,
last_subkey_result: SubkeyResult,
) -> VeilidAPIResult<SubkeyResult> {
) -> VeilidAPIResult<OutboundGetValueResult> {
let routing_table = rpc_processor.routing_table();
// Get the DHT parameters for 'GetValue'
@ -43,7 +51,7 @@ impl StorageManager {
};
let context = Arc::new(Mutex::new(OutboundGetValueContext {
value: last_subkey_result.value,
value_count: 0,
value_nodes: vec![],
descriptor: last_subkey_result.descriptor.clone(),
schema,
}));
@ -61,7 +69,7 @@ impl StorageManager {
Destination::direct(next_node.clone()).with_safety(safety_selection),
key,
subkey,
last_descriptor,
last_descriptor.map(|x| (*x).clone()),
)
.await?
);
@ -72,7 +80,7 @@ impl StorageManager {
let mut ctx = context.lock();
if ctx.descriptor.is_none() && ctx.schema.is_none() {
ctx.schema = Some(descriptor.schema().map_err(RPCError::invalid_format)?);
ctx.descriptor = Some(descriptor);
ctx.descriptor = Some(Arc::new(descriptor));
}
}
@ -116,20 +124,20 @@ impl StorageManager {
return Ok(NetworkResult::invalid_message("value data mismatch"));
}
// Increase the consensus count for the existing value
ctx.value_count += 1;
ctx.value_nodes.push(next_node);
} else if new_seq > prior_seq {
// If the sequence number is greater, start over with the new value
ctx.value = Some(value);
ctx.value = Some(Arc::new(value));
// One node has shown us this value so far
ctx.value_count = 1;
ctx.value_nodes = vec![next_node];
} else {
// If the sequence number is older, ignore it
}
} else {
// If we have no prior value, keep it
ctx.value = Some(value);
ctx.value = Some(Arc::new(value));
// One node has shown us this value so far
ctx.value_count = 1;
ctx.value_nodes = vec![next_node];
}
}
@ -145,7 +153,9 @@ impl StorageManager {
let check_done = |_closest_nodes: &[NodeRef]| {
// If we have reached sufficient consensus, return done
let ctx = context.lock();
if ctx.value.is_some() && ctx.descriptor.is_some() && ctx.value_count >= consensus_count
if ctx.value.is_some()
&& ctx.descriptor.is_some()
&& ctx.value_nodes.len() >= consensus_count
{
return Some(());
}
@ -159,52 +169,61 @@ impl StorageManager {
key_count,
fanout,
timeout_us,
capability_fanout_node_info_filter(vec![CAP_DHT]),
capability_fanout_node_info_filter(vec![CAP_DHT, CAP_DHT_WATCH]),
call_routine,
check_done,
);
match fanout_call.run().await {
match fanout_call.run(vec![]).await {
// If we don't finish in the timeout (too much time passed checking for consensus)
TimeoutOr::Timeout => {
// Return the best answer we've got
let ctx = context.lock();
if ctx.value_count >= consensus_count {
if ctx.value_nodes.len() >= consensus_count {
log_stor!(debug "GetValue Fanout Timeout Consensus");
} else {
log_stor!(debug "GetValue Fanout Timeout Non-Consensus: {}", ctx.value_count);
log_stor!(debug "GetValue Fanout Timeout Non-Consensus: {}", ctx.value_nodes.len());
}
Ok(SubkeyResult {
value: ctx.value.clone(),
descriptor: ctx.descriptor.clone(),
Ok(OutboundGetValueResult {
subkey_result: SubkeyResult {
value: ctx.value.clone(),
descriptor: ctx.descriptor.clone(),
},
value_nodes: ctx.value_nodes.clone(),
})
}
// If we finished with consensus (enough nodes returning the same value)
TimeoutOr::Value(Ok(Some(()))) => {
// Return the best answer we've got
let ctx = context.lock();
if ctx.value_count >= consensus_count {
if ctx.value_nodes.len() >= consensus_count {
log_stor!(debug "GetValue Fanout Consensus");
} else {
log_stor!(debug "GetValue Fanout Non-Consensus: {}", ctx.value_count);
log_stor!(debug "GetValue Fanout Non-Consensus: {}", ctx.value_nodes.len());
}
Ok(SubkeyResult {
value: ctx.value.clone(),
descriptor: ctx.descriptor.clone(),
Ok(OutboundGetValueResult {
subkey_result: SubkeyResult {
value: ctx.value.clone(),
descriptor: ctx.descriptor.clone(),
},
value_nodes: ctx.value_nodes.clone(),
})
}
// If we finished without consensus (ran out of nodes before getting consensus)
TimeoutOr::Value(Ok(None)) => {
// Return the best answer we've got
let ctx = context.lock();
if ctx.value_count >= consensus_count {
if ctx.value_nodes.len() >= consensus_count {
log_stor!(debug "GetValue Fanout Exhausted Consensus");
} else {
log_stor!(debug "GetValue Fanout Exhausted Non-Consensus: {}", ctx.value_count);
log_stor!(debug "GetValue Fanout Exhausted Non-Consensus: {}", ctx.value_nodes.len());
}
Ok(SubkeyResult {
value: ctx.value.clone(),
descriptor: ctx.descriptor.clone(),
Ok(OutboundGetValueResult {
subkey_result: SubkeyResult {
value: ctx.value.clone(),
descriptor: ctx.descriptor.clone(),
},
value_nodes: ctx.value_nodes.clone(),
})
}
// Failed
@ -224,18 +243,26 @@ impl StorageManager {
want_descriptor: bool,
) -> VeilidAPIResult<NetworkResult<SubkeyResult>> {
let mut inner = self.lock().await?;
let res = match inner
.handle_get_remote_value(key, subkey, want_descriptor)
.await
{
Ok(res) => res,
Err(VeilidAPIError::Internal { message }) => {
apibail_internal!(message);
}
Err(e) => {
return Ok(NetworkResult::invalid_message(e));
// See if this is a remote or local value
let (_is_local, last_subkey_result) = {
// See if the subkey we are getting has a last known local value
let mut last_subkey_result = inner.handle_get_local_value(key, subkey, true).await?;
// If this is local, it must have a descriptor already
if last_subkey_result.descriptor.is_some() {
if !want_descriptor {
last_subkey_result.descriptor = None;
}
(true, last_subkey_result)
} else {
// See if the subkey we are getting has a last known remote value
let last_subkey_result = inner
.handle_get_remote_value(key, subkey, want_descriptor)
.await?;
(false, last_subkey_result)
}
};
Ok(NetworkResult::value(res))
Ok(NetworkResult::value(last_subkey_result))
}
}

View File

@ -8,6 +8,7 @@ mod set_value;
mod storage_manager_inner;
mod tasks;
mod types;
mod watch_value;
use keys::*;
use limited_size::*;
@ -30,6 +31,20 @@ const MAX_RECORD_DATA_SIZE: usize = 1_048_576;
const FLUSH_RECORD_STORES_INTERVAL_SECS: u32 = 1;
/// Frequency to check for offline subkeys writes to send to the network
const OFFLINE_SUBKEY_WRITES_INTERVAL_SECS: u32 = 1;
/// Frequency to send ValueChanged notifications to the network
const SEND_VALUE_CHANGES_INTERVAL_SECS: u32 = 1;
/// Frequence to check for dead nodes and routes for active watches
const CHECK_ACTIVE_WATCHES_INTERVAL_SECS: u32 = 1;
#[derive(Debug, Clone)]
/// A single 'value changed' message to send
struct ValueChangedInfo {
target: Target,
key: TypedKey,
subkeys: ValueSubkeyRangeSet,
count: u32,
value: Arc<SignedValueData>,
}
struct StorageManagerUnlockedInner {
config: VeilidConfig,
@ -41,6 +56,11 @@ struct StorageManagerUnlockedInner {
// Background processes
flush_record_stores_task: TickTask<EyreReport>,
offline_subkey_writes_task: TickTask<EyreReport>,
send_value_changes_task: TickTask<EyreReport>,
check_active_watches_task: TickTask<EyreReport>,
// Anonymous watch keys
anonymous_watch_keys: TypedKeyPairGroup,
}
#[derive(Clone)]
@ -56,6 +76,14 @@ impl StorageManager {
table_store: TableStore,
#[cfg(feature = "unstable-blockstore")] block_store: BlockStore,
) -> StorageManagerUnlockedInner {
// Generate keys to use for anonymous watches
let mut anonymous_watch_keys = TypedKeyPairGroup::new();
for ck in VALID_CRYPTO_KINDS {
let vcrypto = crypto.get(ck).unwrap();
let kp = vcrypto.generate_keypair();
anonymous_watch_keys.add(TypedKeyPair::new(ck, kp));
}
StorageManagerUnlockedInner {
config,
crypto,
@ -64,6 +92,10 @@ impl StorageManager {
block_store,
flush_record_stores_task: TickTask::new(FLUSH_RECORD_STORES_INTERVAL_SECS),
offline_subkey_writes_task: TickTask::new(OFFLINE_SUBKEY_WRITES_INTERVAL_SECS),
send_value_changes_task: TickTask::new(SEND_VALUE_CHANGES_INTERVAL_SECS),
check_active_watches_task: TickTask::new(CHECK_ACTIVE_WATCHES_INTERVAL_SECS),
anonymous_watch_keys,
}
}
fn new_inner(unlocked_inner: Arc<StorageManagerUnlockedInner>) -> StorageManagerInner {
@ -94,11 +126,11 @@ impl StorageManager {
}
#[instrument(level = "debug", skip_all, err)]
pub async fn init(&self) -> EyreResult<()> {
pub async fn init(&self, update_callback: UpdateCallback) -> EyreResult<()> {
debug!("startup storage manager");
let mut inner = self.inner.lock().await;
inner.init(self.clone()).await?;
inner.init(self.clone(), update_callback).await?;
Ok(())
}
@ -121,7 +153,12 @@ impl StorageManager {
pub async fn set_rpc_processor(&self, opt_rpc_processor: Option<RPCProcessor>) {
let mut inner = self.inner.lock().await;
inner.rpc_processor = opt_rpc_processor
inner.opt_rpc_processor = opt_rpc_processor
}
pub async fn set_routing_table(&self, opt_routing_table: Option<RoutingTable>) {
let mut inner = self.inner.lock().await;
inner.opt_routing_table = opt_routing_table
}
async fn lock(&self) -> VeilidAPIResult<AsyncMutexGuardArc<StorageManagerInner>> {
@ -133,7 +170,7 @@ impl StorageManager {
}
fn online_writes_ready_inner(inner: &StorageManagerInner) -> Option<RPCProcessor> {
if let Some(rpc_processor) = { inner.rpc_processor.clone() } {
if let Some(rpc_processor) = { inner.opt_rpc_processor.clone() } {
if let Some(network_class) = rpc_processor
.routing_table()
.get_network_class(RoutingDomain::PublicInternet)
@ -186,9 +223,7 @@ impl StorageManager {
.map(|r| r.unwrap())
}
/// Open an existing local record if it exists,
/// and if it doesnt exist locally, try to pull it from the network and
/// open it and return the opened descriptor
/// Open an existing local record if it exists, and if it doesnt exist locally, try to pull it from the network and open it and return the opened descriptor
pub async fn open_record(
&self,
key: TypedKey,
@ -208,7 +243,7 @@ impl StorageManager {
// No record yet, try to get it from the network
// Get rpc processor and drop mutex so we don't block while getting the value from the network
let Some(rpc_processor) = inner.rpc_processor.clone() else {
let Some(rpc_processor) = inner.opt_rpc_processor.clone() else {
apibail_try_again!("offline, try again later");
};
@ -218,7 +253,7 @@ impl StorageManager {
// No last descriptor, no last value
// Use the safety selection we opened the record with
let subkey: ValueSubkey = 0;
let subkey_result = self
let result = self
.outbound_get_value(
rpc_processor,
key,
@ -229,7 +264,7 @@ impl StorageManager {
.await?;
// If we got nothing back, the key wasn't found
if subkey_result.value.is_none() && subkey_result.descriptor.is_none() {
if result.subkey_result.value.is_none() && result.subkey_result.descriptor.is_none() {
// No result
apibail_key_not_found!(key);
};
@ -250,25 +285,61 @@ impl StorageManager {
// Open the new record
inner
.open_new_record(key, writer, subkey, subkey_result, safety_selection)
.open_new_record(key, writer, subkey, result.subkey_result, safety_selection)
.await
}
/// Close an opened local record
pub async fn close_record(&self, key: TypedKey) -> VeilidAPIResult<()> {
let mut inner = self.lock().await?;
inner.close_record(key)
let (opt_opened_record, opt_rpc_processor) = {
let mut inner = self.lock().await?;
(inner.close_record(key)?, inner.opt_rpc_processor.clone())
};
// Send a one-time cancel request for the watch if we have one and we're online
if let Some(opened_record) = opt_opened_record {
if let Some(active_watch) = opened_record.active_watch() {
if let Some(rpc_processor) = opt_rpc_processor {
// Use the safety selection we opened the record with
// Use the writer we opened with as the 'watcher' as well
let opt_owvresult = self
.outbound_watch_value(
rpc_processor,
key,
ValueSubkeyRangeSet::full(),
Timestamp::new(0),
0,
opened_record.safety_selection(),
opened_record.writer().cloned(),
Some(active_watch.watch_node),
)
.await?;
if let Some(owvresult) = opt_owvresult {
if owvresult.expiration_ts.as_u64() != 0 {
log_stor!(debug
"close record watch cancel got unexpected expiration: {}",
owvresult.expiration_ts
);
}
} else {
log_stor!(debug "close record watch cancel unsuccessful");
}
} else {
log_stor!(debug "skipping last-ditch watch cancel because we are offline");
}
}
}
Ok(())
}
/// Delete a local record
pub async fn delete_record(&self, key: TypedKey) -> VeilidAPIResult<()> {
let mut inner = self.lock().await?;
// Ensure the record is closed
if inner.opened_records.contains_key(&key) {
inner.close_record(key)?;
}
self.close_record(key).await?;
// Get record from the local store
let mut inner = self.lock().await?;
let Some(local_record_store) = inner.local_record_store.as_mut() else {
apibail_not_initialized!();
};
@ -278,9 +349,6 @@ impl StorageManager {
}
/// Get the value of a subkey from an opened local record
/// may refresh the record, and will if it is forced to or the subkey is not available locally yet
/// Returns Ok(None) if no value was found
/// Returns Ok(Some(value)) is a value was found online or locally
pub async fn get_value(
&self,
key: TypedKey,
@ -301,17 +369,17 @@ impl StorageManager {
// Return the existing value if we have one unless we are forcing a refresh
if !force_refresh {
if let Some(last_subkey_result_value) = last_subkey_result.value {
return Ok(Some(last_subkey_result_value.into_value_data()));
return Ok(Some(last_subkey_result_value.value_data().clone()));
}
}
// Refresh if we can
// Get rpc processor and drop mutex so we don't block while getting the value from the network
let Some(rpc_processor) = inner.rpc_processor.clone() else {
let Some(rpc_processor) = inner.opt_rpc_processor.clone() else {
// Return the existing value if we have one if we aren't online
if let Some(last_subkey_result_value) = last_subkey_result.value {
return Ok(Some(last_subkey_result_value.into_value_data()));
return Ok(Some(last_subkey_result_value.value_data().clone()));
}
apibail_try_again!("offline, try again later");
};
@ -325,7 +393,7 @@ impl StorageManager {
.value
.as_ref()
.map(|v| v.value_data().seq());
let subkey_result = self
let result = self
.outbound_get_value(
rpc_processor,
key,
@ -336,25 +404,30 @@ impl StorageManager {
.await?;
// See if we got a value back
let Some(subkey_result_value) = subkey_result.value else {
let Some(subkey_result_value) = result.subkey_result.value else {
// If we got nothing back then we also had nothing beforehand, return nothing
return Ok(None);
};
// Keep the list of nodes that returned a value for later reference
let mut inner = self.lock().await?;
inner.set_value_nodes(key, result.value_nodes)?;
// If we got a new value back then write it to the opened record
if Some(subkey_result_value.value_data().seq()) != opt_last_seq {
let mut inner = self.lock().await?;
inner
.handle_set_local_value(key, subkey, subkey_result_value.clone())
.handle_set_local_value(
key,
subkey,
subkey_result_value.clone(),
WatchUpdateMode::UpdateAll,
)
.await?;
}
Ok(Some(subkey_result_value.into_value_data()))
Ok(Some(subkey_result_value.value_data().clone()))
}
/// Set the value of a subkey on an opened local record
/// Puts changes to the network immediately and may refresh the record if the there is a newer subkey available online
/// Returns Ok(None) if the value was set
/// Returns Ok(Some(newer value)) if a newer value was found online
pub async fn set_value(
&self,
key: TypedKey,
@ -414,13 +487,13 @@ impl StorageManager {
}
// Sign the new value data with the writer
let signed_value_data = SignedValueData::make_signature(
let signed_value_data = Arc::new(SignedValueData::make_signature(
value_data,
descriptor.owner(),
subkey,
vcrypto,
writer.secret,
)?;
)?);
// Get rpc processor and drop mutex so we don't block while getting the value from the network
let Some(rpc_processor) = Self::online_writes_ready_inner(&inner) else {
@ -428,7 +501,12 @@ impl StorageManager {
// Offline, just write it locally and return immediately
inner
.handle_set_local_value(key, subkey, signed_value_data.clone())
.handle_set_local_value(
key,
subkey,
signed_value_data.clone(),
WatchUpdateMode::UpdateAll,
)
.await?;
log_stor!(debug "Writing subkey offline: {}:{} len={}", key, subkey, signed_value_data.value_data().data().len() );
@ -450,7 +528,7 @@ impl StorageManager {
drop(inner);
// Use the safety selection we opened the record with
let final_signed_value_data = self
let result = self
.outbound_set_value(
rpc_processor,
key,
@ -461,38 +539,213 @@ impl StorageManager {
)
.await?;
// Whatever record we got back, store it locally, might be newer than the one we asked to save
// Keep the list of nodes that returned a value for later reference
let mut inner = self.lock().await?;
inner.set_value_nodes(key, result.value_nodes)?;
// Whatever record we got back, store it locally, might be newer than the one we asked to save
inner
.handle_set_local_value(key, subkey, final_signed_value_data.clone())
.handle_set_local_value(
key,
subkey,
result.signed_value_data.clone(),
WatchUpdateMode::UpdateAll,
)
.await?;
// Return the new value if it differs from what was asked to set
if final_signed_value_data.value_data() != signed_value_data.value_data() {
return Ok(Some(final_signed_value_data.into_value_data()));
if result.signed_value_data.value_data() != signed_value_data.value_data() {
return Ok(Some(result.signed_value_data.value_data().clone()));
}
// If the original value was set, return None
Ok(None)
}
/// Add a watch to a DHT value
pub async fn watch_values(
&self,
_key: TypedKey,
_subkeys: ValueSubkeyRangeSet,
_expiration: Timestamp,
_count: u32,
key: TypedKey,
subkeys: ValueSubkeyRangeSet,
expiration: Timestamp,
count: u32,
) -> VeilidAPIResult<Timestamp> {
let _inner = self.lock().await?;
unimplemented!();
let inner = self.lock().await?;
// Rewrite subkey range if empty to full
let subkeys = if subkeys.is_empty() {
ValueSubkeyRangeSet::full()
} else {
subkeys
};
// Get the safety selection and the writer we opened this record with
let (safety_selection, opt_writer, opt_watch_node) = {
let Some(opened_record) = inner.opened_records.get(&key) else {
apibail_generic!("record not open");
};
(
opened_record.safety_selection(),
opened_record.writer().cloned(),
opened_record.active_watch().map(|aw| aw.watch_node.clone()),
)
};
// Get rpc processor and drop mutex so we don't block while requesting the watch from the network
let Some(rpc_processor) = inner.opt_rpc_processor.clone() else {
apibail_try_again!("offline, try again later");
};
// Drop the lock for network access
drop(inner);
// Use the safety selection we opened the record with
// Use the writer we opened with as the 'watcher' as well
let opt_owvresult = self
.outbound_watch_value(
rpc_processor,
key,
subkeys.clone(),
expiration,
count,
safety_selection,
opt_writer,
opt_watch_node,
)
.await?;
// If we did not get a valid response return a zero timestamp
let Some(owvresult) = opt_owvresult else {
return Ok(Timestamp::new(0));
};
// Clear any existing watch if the watch succeeded or got cancelled
let mut inner = self.lock().await?;
let Some(opened_record) = inner.opened_records.get_mut(&key) else {
apibail_generic!("record not open");
};
opened_record.clear_active_watch();
// Get the minimum expiration timestamp we will accept
let (rpc_timeout_us, max_watch_expiration_us) = {
let c = self.unlocked_inner.config.get();
(
TimestampDuration::from(ms_to_us(c.network.rpc.timeout_ms)),
TimestampDuration::from(ms_to_us(c.network.dht.max_watch_expiration_ms)),
)
};
let cur_ts = get_timestamp();
let min_expiration_ts = cur_ts + rpc_timeout_us.as_u64();
let max_expiration_ts = if expiration.as_u64() == 0 {
cur_ts + max_watch_expiration_us.as_u64()
} else {
expiration.as_u64()
};
// If the expiration time is less than our minimum expiration time consider this watch cancelled
let mut expiration_ts = owvresult.expiration_ts;
if expiration_ts.as_u64() < min_expiration_ts {
return Ok(Timestamp::new(0));
}
// If the expiration time is greated than our maximum expiration time, clamp our local watch so we ignore extra valuechanged messages
if expiration_ts.as_u64() > max_expiration_ts {
expiration_ts = Timestamp::new(max_expiration_ts);
}
// If we requested a cancellation, then consider this watch cancelled
if count == 0 {
return Ok(Timestamp::new(0));
}
// Keep a record of the watch
opened_record.set_active_watch(ActiveWatch {
expiration_ts,
watch_node: owvresult.watch_node,
opt_value_changed_route: owvresult.opt_value_changed_route,
subkeys,
count,
});
Ok(owvresult.expiration_ts)
}
pub async fn cancel_watch_values(
&self,
_key: TypedKey,
_subkeys: ValueSubkeyRangeSet,
key: TypedKey,
subkeys: ValueSubkeyRangeSet,
) -> VeilidAPIResult<bool> {
let _inner = self.lock().await?;
unimplemented!();
let (subkeys, active_watch) = {
let inner = self.lock().await?;
let Some(opened_record) = inner.opened_records.get(&key) else {
apibail_generic!("record not open");
};
// See what watch we have currently if any
let Some(active_watch) = opened_record.active_watch() else {
// If we didn't have an active watch, then we can just return false because there's nothing to do here
return Ok(false);
};
// Rewrite subkey range if empty to full
let subkeys = if subkeys.is_empty() {
ValueSubkeyRangeSet::full()
} else {
subkeys
};
// Reduce the subkey range
let new_subkeys = active_watch.subkeys.difference(&subkeys);
(new_subkeys, active_watch)
};
// If we have no subkeys left, then set the count to zero to indicate a full cancellation
let count = if subkeys.is_empty() {
0
} else {
active_watch.count
};
// Update the watch
let expiration_ts = self
.watch_values(key, subkeys, active_watch.expiration_ts, count)
.await?;
// A zero expiration time means the watch is done or nothing is left, and the watch is no longer active
if expiration_ts.as_u64() == 0 {
return Ok(false);
}
Ok(true)
}
// Send single value change out to the network
#[instrument(level = "trace", skip(self), err)]
async fn send_value_change(&self, vc: ValueChangedInfo) -> VeilidAPIResult<()> {
let rpc_processor = {
let inner = self.inner.lock().await;
if let Some(rpc_processor) = &inner.opt_rpc_processor {
rpc_processor.clone()
} else {
apibail_try_again!("network is not available");
}
};
let dest = rpc_processor
.resolve_target_to_destination(
vc.target.clone(),
SafetySelection::Unsafe(Sequencing::NoPreference),
)
.await
.map_err(VeilidAPIError::from)?;
network_result_value_or_log!(rpc_processor
.rpc_call_value_changed(dest, vc.key, vc.subkeys.clone(), vc.count, (*vc.value).clone())
.await
.map_err(VeilidAPIError::from)? => [format!(": dest={:?} vc={:?}", dest, vc)] {
});
Ok(())
}
}

View File

@ -13,16 +13,40 @@ struct DeadRecord<D>
where
D: fmt::Debug + Clone + Serialize + for<'d> Deserialize<'d>,
{
// The key used in the record_index
/// The key used in the record_index
key: RecordTableKey,
// The actual record
/// The actual record
record: Record<D>,
// True if this record is accounted for in the total storage
// and needs to have the statistics updated or not when purged
/// True if this record is accounted for in the total storage
/// and needs to have the statistics updated or not when purged
in_total_storage: bool,
}
pub struct RecordStore<D>
/// An individual watch
#[derive(Debug, Clone)]
struct WatchedRecordWatch {
subkeys: ValueSubkeyRangeSet,
expiration: Timestamp,
count: u32,
target: Target,
watcher: CryptoKey,
changed: ValueSubkeyRangeSet,
}
#[derive(Debug, Default, Clone)]
/// A record being watched for changes
struct WatchedRecord {
/// The list of active watchers
watchers: Vec<WatchedRecordWatch>,
}
pub(super) enum WatchUpdateMode {
NoUpdate,
UpdateAll,
ExcludeTarget(Target),
}
pub(super) struct RecordStore<D>
where
D: fmt::Debug + Clone + Serialize + for<'d> Deserialize<'d>,
{
@ -46,6 +70,10 @@ where
dead_records: Vec<DeadRecord<D>>,
/// The list of records that have changed since last flush to disk (optimization for batched writes)
changed_records: HashSet<RecordTableKey>,
/// The list of records being watched for changes
watched_records: HashMap<RecordTableKey, WatchedRecord>,
/// The list of watched records that have changed values since last notification
changed_watched_values: HashSet<RecordTableKey>,
/// A mutex to ensure we handle this concurrently
purge_dead_records_mutex: Arc<AsyncMutex<()>>,
@ -55,9 +83,9 @@ where
#[derive(Default, Debug)]
pub struct SubkeyResult {
/// The subkey value if we got one
pub value: Option<SignedValueData>,
pub value: Option<Arc<SignedValueData>>,
/// The descriptor if we got a fresh one or empty if no descriptor was needed
pub descriptor: Option<SignedValueDescriptor>,
pub descriptor: Option<Arc<SignedValueDescriptor>>,
}
impl<D> RecordStore<D>
@ -93,7 +121,9 @@ where
),
dead_records: Vec::new(),
changed_records: HashSet::new(),
watched_records: HashMap::new(),
purge_dead_records_mutex: Arc::new(AsyncMutex::new(())),
changed_watched_values: HashSet::new(),
}
}
@ -174,10 +204,6 @@ where
});
}
fn mark_record_changed(&mut self, key: RecordTableKey) {
self.changed_records.insert(key);
}
fn add_to_subkey_cache(&mut self, key: SubkeyTableKey, record_data: RecordData) {
let record_data_total_size = record_data.total_size();
// Write to subkey cache
@ -292,7 +318,6 @@ where
}
async fn flush_changed_records(&mut self) {
// touch records
if self.changed_records.is_empty() {
return;
}
@ -314,7 +339,7 @@ where
}
}
pub async fn tick(&mut self) -> EyreResult<()> {
pub async fn flush(&mut self) -> EyreResult<()> {
self.flush_changed_records().await;
self.purge_dead_records(true).await;
Ok(())
@ -396,7 +421,9 @@ where
record.touch(get_aligned_timestamp());
}
if out.is_some() {
self.mark_record_changed(rtk);
// Marks as changed because the record was touched and we want to keep the
// LRU ordering serialized
self.changed_records.insert(rtk);
}
out
@ -431,16 +458,14 @@ where
record.touch(get_aligned_timestamp());
}
if out.is_some() {
self.mark_record_changed(rtk);
// Marks as changed because the record was touched and we want to keep the
// LRU ordering serialized
self.changed_records.insert(rtk);
}
out
}
// pub fn get_descriptor(&mut self, key: TypedKey) -> Option<SignedValueDescriptor> {
// self.with_record(key, |record| record.descriptor().clone())
// }
pub async fn get_subkey(
&mut self,
key: TypedKey,
@ -580,11 +605,40 @@ where
}))
}
async fn update_watched_value(
&mut self,
key: TypedKey,
subkey: ValueSubkey,
opt_ignore_target: Option<Target>,
) {
let rtk = RecordTableKey { key };
let Some(wr) = self.watched_records.get_mut(&rtk) else {
return;
};
// Update all watchers
let mut changed = false;
for w in &mut wr.watchers {
// If this watcher is watching the changed subkey then add to the watcher's changed list
// Don't bother marking changes for value sets coming from the same watching node/target because they
// are already going to be aware of the changes in that case
if Some(&w.target) != opt_ignore_target.as_ref()
&& w.subkeys.contains(subkey)
&& w.changed.insert(subkey)
{
changed = true;
}
}
if changed {
self.changed_watched_values.insert(rtk);
}
}
pub async fn set_subkey(
&mut self,
key: TypedKey,
subkey: ValueSubkey,
signed_value_data: SignedValueData,
signed_value_data: Arc<SignedValueData>,
watch_update_mode: WatchUpdateMode,
) -> VeilidAPIResult<()> {
// Check size limit for data
if signed_value_data.value_data().data().len() > self.limits.max_subkey_size {
@ -672,9 +726,241 @@ where
// Update storage space
self.total_storage_space.commit().unwrap();
// Update watched value
let (do_update, opt_ignore_target) = match watch_update_mode {
WatchUpdateMode::NoUpdate => (false, None),
WatchUpdateMode::UpdateAll => (true, None),
WatchUpdateMode::ExcludeTarget(target) => (true, Some(target)),
};
if do_update {
self.update_watched_value(key, subkey, opt_ignore_target)
.await;
}
Ok(())
}
/// Add a record watch for changes
pub async fn watch_record(
&mut self,
key: TypedKey,
subkeys: ValueSubkeyRangeSet,
mut expiration: Timestamp,
count: u32,
target: Target,
watcher: CryptoKey,
) -> VeilidAPIResult<Option<Timestamp>> {
// If subkeys is empty or count is zero then we're cancelling a watch completely
if subkeys.is_empty() || count == 0 {
return self.cancel_watch(key, target, watcher).await;
}
// See if expiration timestamp is too far in the future or not enough in the future
let cur_ts = get_timestamp();
let max_ts = cur_ts + self.limits.max_watch_expiration.as_u64();
let min_ts = cur_ts + self.limits.min_watch_expiration.as_u64();
if expiration.as_u64() == 0 || expiration.as_u64() > max_ts {
// Clamp expiration max time (or set zero expiration to max)
expiration = Timestamp::new(max_ts);
} else if expiration.as_u64() < min_ts {
// Don't add watches with too low of an expiration time
return Ok(None);
}
// Get the record being watched
let Some(is_member) = self.with_record(key, |record| {
// Check if the watcher specified is a schema member
let schema = record.schema();
(*record.owner()) == watcher || schema.is_member(&watcher)
}) else {
// Record not found
return Ok(None);
};
// See if we are updating an existing watch
// with the watcher matched on target
let mut watch_count = 0;
let rtk = RecordTableKey { key };
if let Some(watch) = self.watched_records.get_mut(&rtk) {
for w in &mut watch.watchers {
if w.watcher == watcher {
watch_count += 1;
// Only one watch for an anonymous watcher
// Allow members to have one watch per target
if !is_member || w.target == target {
// Updating an existing watch
w.subkeys = subkeys;
w.expiration = expiration;
w.count = count;
return Ok(Some(expiration));
}
}
}
}
// Adding a new watcher to a watch
// Check watch table for limits
if is_member {
// Member watch
if watch_count >= self.limits.member_watch_limit {
// Too many watches
return Ok(None);
}
} else {
// Public watch
if watch_count >= self.limits.public_watch_limit {
// Too many watches
return Ok(None);
}
}
// Ok this is an acceptable new watch, add it
let watch = self.watched_records.entry(rtk).or_default();
watch.watchers.push(WatchedRecordWatch {
subkeys,
expiration,
count,
target,
watcher,
changed: ValueSubkeyRangeSet::new(),
});
Ok(Some(expiration))
}
/// Add a record watch for changes
async fn cancel_watch(
&mut self,
key: TypedKey,
target: Target,
watcher: CryptoKey,
) -> VeilidAPIResult<Option<Timestamp>> {
// Get the record being watched
let Some(is_member) = self.with_record(key, |record| {
// Check if the watcher specified is a schema member
let schema = record.schema();
(*record.owner()) == watcher || schema.is_member(&watcher)
}) else {
// Record not found
return Ok(None);
};
// See if we are cancelling an existing watch
// with the watcher matched on target
let rtk = RecordTableKey { key };
let mut is_empty = false;
let mut ret_timestamp = None;
if let Some(watch) = self.watched_records.get_mut(&rtk) {
let mut dead_watcher = None;
for (wn, w) in watch.watchers.iter_mut().enumerate() {
if w.watcher == watcher {
// Only one watch for an anonymous watcher
// Allow members to have one watch per target
if !is_member || w.target == target {
// Canceling an existing watch
dead_watcher = Some(wn);
ret_timestamp = Some(w.expiration);
break;
}
}
}
if let Some(dw) = dead_watcher {
watch.watchers.remove(dw);
if watch.watchers.is_empty() {
is_empty = true;
}
}
}
if is_empty {
self.watched_records.remove(&rtk);
}
Ok(ret_timestamp)
}
pub async fn take_value_changes(&mut self, changes: &mut Vec<ValueChangedInfo>) {
// ValueChangedInfo but without the subkey data that requires a double mutable borrow to get
struct EarlyValueChangedInfo {
target: Target,
key: TypedKey,
subkeys: ValueSubkeyRangeSet,
count: u32,
}
let mut evcis = vec![];
let mut empty_watched_records = vec![];
for rtk in self.changed_watched_values.drain() {
if let Some(watch) = self.watched_records.get_mut(&rtk) {
// Process watch notifications
let mut dead_watchers = vec![];
for (wn, w) in watch.watchers.iter_mut().enumerate() {
// Get the subkeys that have changed
let subkeys = w.changed.clone();
w.changed.clear();
// Reduce the count of changes sent
// if count goes to zero mark this watcher dead
w.count -= 1;
let count = w.count;
if count == 0 {
dead_watchers.push(wn);
}
evcis.push(EarlyValueChangedInfo {
target: w.target.clone(),
key: rtk.key,
subkeys,
count,
});
}
// Remove in reverse so we don't have to offset the index to remove the right key
for dw in dead_watchers.iter().rev().copied() {
watch.watchers.remove(dw);
if watch.watchers.is_empty() {
empty_watched_records.push(rtk);
}
}
}
}
for ewr in empty_watched_records {
self.watched_records.remove(&ewr);
}
for evci in evcis {
// Get the first subkey data
let Some(first_subkey) = evci.subkeys.first() else {
log_stor!(error "first subkey should exist for value change notification");
continue;
};
let subkey_result = match self.get_subkey(evci.key, first_subkey, false).await {
Ok(Some(skr)) => skr,
Ok(None) => {
log_stor!(error "subkey should have data for value change notification");
continue;
}
Err(e) => {
log_stor!(error "error getting subkey data for value change notification: {}", e);
continue;
}
};
let Some(value) = subkey_result.value else {
log_stor!(error "first subkey should have had value for value change notification");
continue;
};
changes.push(ValueChangedInfo {
target: evci.target,
key: evci.key,
subkeys: evci.subkeys,
count: evci.count,
value,
});
}
}
/// LRU out some records until we reclaim the amount of space requested
/// This will force a garbage collection of the space immediately
/// If zero is passed in here, a garbage collection will be performed of dead records
@ -694,7 +980,7 @@ where
reclaimed
}
pub(super) fn debug_records(&self) -> String {
pub fn debug_records(&self) -> String {
// Dump fields in an abbreviated way
let mut out = String::new();
@ -726,16 +1012,12 @@ where
out
}
pub(super) fn debug_record_info(&self, key: TypedKey) -> String {
pub fn debug_record_info(&self, key: TypedKey) -> String {
self.peek_record(key, |r| format!("{:#?}", r))
.unwrap_or("Not found".to_owned())
}
pub(super) async fn debug_record_subkey_info(
&self,
key: TypedKey,
subkey: ValueSubkey,
) -> String {
pub async fn debug_record_subkey_info(&self, key: TypedKey, subkey: ValueSubkey) -> String {
match self.peek_subkey(key, subkey, true).await {
Ok(Some(v)) => {
format!("{:#?}", v)

View File

@ -1,3 +1,5 @@
use super::*;
/// Configuration for the record store
#[derive(Debug, Default, Copy, Clone)]
pub struct RecordStoreLimits {
@ -13,4 +15,12 @@ pub struct RecordStoreLimits {
pub max_subkey_cache_memory_mb: Option<usize>,
/// Limit on the amount of storage space to use for subkey data and record data
pub max_storage_space_mb: Option<usize>,
/// Max number of anonymous watches
pub public_watch_limit: usize,
/// Max number of watches per schema member
pub member_watch_limit: usize,
/// Max expiration duration per watch
pub max_watch_expiration: TimestampDuration,
/// Min expiration duration per watch
pub min_watch_expiration: TimestampDuration,
}

View File

@ -3,26 +3,34 @@ use super::*;
/// The context of the outbound_set_value operation
struct OutboundSetValueContext {
/// The latest value of the subkey, may be the value passed in
pub value: SignedValueData,
/// The consensus count for the value we have received
pub set_count: usize,
pub value: Arc<SignedValueData>,
/// The nodes that have set the value so far (up to the consensus count)
pub value_nodes: Vec<NodeRef>,
/// The number of non-sets since the last set we have received
pub missed_since_last_set: usize,
/// The parsed schema from the descriptor if we have one
pub schema: DHTSchema,
}
/// The result of the outbound_set_value operation
pub(super) struct OutboundSetValueResult {
/// The value that was set
pub signed_value_data: Arc<SignedValueData>,
/// And where it was set to
pub value_nodes: Vec<NodeRef>,
}
impl StorageManager {
/// Perform a 'set value' query on the network
pub async fn outbound_set_value(
pub(super) async fn outbound_set_value(
&self,
rpc_processor: RPCProcessor,
key: TypedKey,
subkey: ValueSubkey,
safety_selection: SafetySelection,
value: SignedValueData,
descriptor: SignedValueDescriptor,
) -> VeilidAPIResult<SignedValueData> {
value: Arc<SignedValueData>,
descriptor: Arc<SignedValueDescriptor>,
) -> VeilidAPIResult<OutboundSetValueResult> {
let routing_table = rpc_processor.routing_table();
// Get the DHT parameters for 'SetValue'
@ -40,7 +48,7 @@ impl StorageManager {
let schema = descriptor.schema()?;
let context = Arc::new(Mutex::new(OutboundSetValueContext {
value,
set_count: 0,
value_nodes: vec![],
missed_since_last_set: 0,
schema,
}));
@ -67,8 +75,8 @@ impl StorageManager {
Destination::direct(next_node.clone()).with_safety(safety_selection),
key,
subkey,
value,
descriptor.clone(),
(*value).clone(),
(*descriptor).clone(),
send_descriptor,
)
.await?
@ -97,9 +105,9 @@ impl StorageManager {
let new_seq = value.value_data().seq();
if new_seq > prior_seq {
// If the sequence number is greater, keep it
ctx.value = value;
ctx.value = Arc::new(value);
// One node has shown us this value so far
ctx.set_count = 1;
ctx.value_nodes = vec![next_node];
ctx.missed_since_last_set = 0;
} else {
// If the sequence number is older, or an equal sequence number,
@ -110,7 +118,7 @@ impl StorageManager {
} else {
// It was set on this node and no newer value was found and returned,
// so increase our consensus count
ctx.set_count += 1;
ctx.value_nodes.push(next_node);
ctx.missed_since_last_set = 0;
}
} else {
@ -131,13 +139,13 @@ impl StorageManager {
let ctx = context.lock();
// If we have reached sufficient consensus, return done
if ctx.set_count >= consensus_count {
if ctx.value_nodes.len() >= consensus_count {
return Some(());
}
// If we have missed more than our consensus count since our last set, return done
// This keeps the traversal from searching too many nodes when we aren't converging
// Only do this if we have gotten at least half our desired sets.
if ctx.set_count >= ((consensus_count + 1) / 2)
if ctx.value_nodes.len() >= ((consensus_count + 1) / 2)
&& ctx.missed_since_last_set >= consensus_count
{
return Some(());
@ -152,45 +160,54 @@ impl StorageManager {
key_count,
fanout,
timeout_us,
capability_fanout_node_info_filter(vec![CAP_DHT]),
capability_fanout_node_info_filter(vec![CAP_DHT, CAP_DHT_WATCH]),
call_routine,
check_done,
);
match fanout_call.run().await {
match fanout_call.run(vec![]).await {
// If we don't finish in the timeout (too much time passed checking for consensus)
TimeoutOr::Timeout => {
// Return the best answer we've got
let ctx = context.lock();
if ctx.set_count >= consensus_count {
if ctx.value_nodes.len() >= consensus_count {
log_stor!(debug "SetValue Fanout Timeout Consensus");
} else {
log_stor!(debug "SetValue Fanout Timeout Non-Consensus: {}", ctx.set_count);
log_stor!(debug "SetValue Fanout Timeout Non-Consensus: {}", ctx.value_nodes.len());
}
Ok(ctx.value.clone())
Ok(OutboundSetValueResult {
signed_value_data: ctx.value.clone(),
value_nodes: ctx.value_nodes.clone(),
})
}
// If we finished with or without consensus (enough nodes returning the same value)
TimeoutOr::Value(Ok(Some(()))) => {
// Return the best answer we've got
let ctx = context.lock();
if ctx.set_count >= consensus_count {
if ctx.value_nodes.len() >= consensus_count {
log_stor!(debug "SetValue Fanout Consensus");
} else {
log_stor!(debug "SetValue Fanout Non-Consensus: {}", ctx.set_count);
log_stor!(debug "SetValue Fanout Non-Consensus: {}", ctx.value_nodes.len());
}
Ok(ctx.value.clone())
Ok(OutboundSetValueResult {
signed_value_data: ctx.value.clone(),
value_nodes: ctx.value_nodes.clone(),
})
}
// If we ran out of nodes before getting consensus)
TimeoutOr::Value(Ok(None)) => {
// Return the best answer we've got
let ctx = context.lock();
if ctx.set_count >= consensus_count {
if ctx.value_nodes.len() >= consensus_count {
log_stor!(debug "SetValue Fanout Exhausted Consensus");
} else {
log_stor!(debug "SetValue Fanout Exhausted Non-Consensus: {}", ctx.set_count);
log_stor!(debug "SetValue Fanout Exhausted Non-Consensus: {}", ctx.value_nodes.len());
}
Ok(ctx.value.clone())
Ok(OutboundSetValueResult {
signed_value_data: ctx.value.clone(),
value_nodes: ctx.value_nodes.clone(),
})
}
// Failed
TimeoutOr::Value(Err(e)) => {
@ -208,9 +225,10 @@ impl StorageManager {
&self,
key: TypedKey,
subkey: ValueSubkey,
value: SignedValueData,
descriptor: Option<SignedValueDescriptor>,
) -> VeilidAPIResult<NetworkResult<Option<SignedValueData>>> {
value: Arc<SignedValueData>,
descriptor: Option<Arc<SignedValueDescriptor>>,
target: Target,
) -> VeilidAPIResult<NetworkResult<Option<Arc<SignedValueData>>>> {
let mut inner = self.lock().await?;
// See if this is a remote or local value
@ -273,10 +291,18 @@ impl StorageManager {
// Do the set and return no new value
let res = if is_local {
inner.handle_set_local_value(key, subkey, value).await
inner
.handle_set_local_value(key, subkey, value, WatchUpdateMode::ExcludeTarget(target))
.await
} else {
inner
.handle_set_remote_value(key, subkey, value, actual_descriptor)
.handle_set_remote_value(
key,
subkey,
value,
actual_descriptor,
WatchUpdateMode::ExcludeTarget(target),
)
.await
};
match res {

View File

@ -25,9 +25,13 @@ pub(super) struct StorageManagerInner {
/// Storage manager metadata that is persistent, including copy of offline subkey writes
pub metadata_db: Option<TableDB>,
/// RPC processor if it is available
pub rpc_processor: Option<RPCProcessor>,
pub opt_rpc_processor: Option<RPCProcessor>,
/// Routing table if it is available
pub opt_routing_table: Option<RoutingTable>,
/// Background processing task (not part of attachment manager tick tree so it happens when detached too)
pub tick_future: Option<SendPinBoxFuture<()>>,
/// Update callback to send ValueChanged notification to
pub update_callback: Option<UpdateCallback>,
}
fn local_limits_from_config(config: VeilidConfig) -> RecordStoreLimits {
@ -39,6 +43,12 @@ fn local_limits_from_config(config: VeilidConfig) -> RecordStoreLimits {
max_records: None,
max_subkey_cache_memory_mb: Some(c.network.dht.local_max_subkey_cache_memory_mb as usize),
max_storage_space_mb: None,
public_watch_limit: c.network.dht.public_watch_limit as usize,
member_watch_limit: c.network.dht.member_watch_limit as usize,
max_watch_expiration: TimestampDuration::new(ms_to_us(
c.network.dht.max_watch_expiration_ms,
)),
min_watch_expiration: TimestampDuration::new(ms_to_us(c.network.rpc.timeout_ms)),
}
}
@ -51,6 +61,12 @@ fn remote_limits_from_config(config: VeilidConfig) -> RecordStoreLimits {
max_records: Some(c.network.dht.remote_max_records as usize),
max_subkey_cache_memory_mb: Some(c.network.dht.remote_max_subkey_cache_memory_mb as usize),
max_storage_space_mb: Some(c.network.dht.remote_max_storage_space_mb as usize),
public_watch_limit: c.network.dht.public_watch_limit as usize,
member_watch_limit: c.network.dht.member_watch_limit as usize,
max_watch_expiration: TimestampDuration::new(ms_to_us(
c.network.dht.max_watch_expiration_ms,
)),
min_watch_expiration: TimestampDuration::new(ms_to_us(c.network.rpc.timeout_ms)),
}
}
@ -64,12 +80,18 @@ impl StorageManagerInner {
remote_record_store: Default::default(),
offline_subkey_writes: Default::default(),
metadata_db: Default::default(),
rpc_processor: Default::default(),
opt_rpc_processor: Default::default(),
opt_routing_table: Default::default(),
tick_future: Default::default(),
update_callback: None,
}
}
pub async fn init(&mut self, outer_self: StorageManager) -> EyreResult<()> {
pub async fn init(
&mut self,
outer_self: StorageManager,
update_callback: UpdateCallback,
) -> EyreResult<()> {
let metadata_db = self
.unlocked_inner
.table_store
@ -109,13 +131,15 @@ impl StorageManagerInner {
}
});
self.tick_future = Some(tick_future);
self.update_callback = Some(update_callback);
self.initialized = true;
Ok(())
}
pub async fn terminate(&mut self) {
self.update_callback = None;
// Stop ticker
let tick_future = self.tick_future.take();
if let Some(f) = tick_future {
@ -124,12 +148,12 @@ impl StorageManagerInner {
// Final flush on record stores
if let Some(mut local_record_store) = self.local_record_store.take() {
if let Err(e) = local_record_store.tick().await {
if let Err(e) = local_record_store.flush().await {
log_stor!(error "termination local record store tick failed: {}", e);
}
}
if let Some(mut remote_record_store) = self.remote_record_store.take() {
if let Err(e) = remote_record_store.tick().await {
if let Err(e) = remote_record_store.flush().await {
log_stor!(error "termination remote record store tick failed: {}", e);
}
}
@ -195,16 +219,19 @@ impl StorageManagerInner {
let owner = vcrypto.generate_keypair();
// Make a signed value descriptor for this dht value
let signed_value_descriptor = SignedValueDescriptor::make_signature(
let signed_value_descriptor = Arc::new(SignedValueDescriptor::make_signature(
owner.key,
schema_data,
vcrypto.clone(),
owner.secret,
)?;
)?);
// Add new local value record
let cur_ts = get_aligned_timestamp();
let local_record_detail = LocalRecordDetail { safety_selection };
let local_record_detail = LocalRecordDetail {
safety_selection,
value_nodes: vec![],
};
let record =
Record::<LocalRecordDetail>::new(cur_ts, signed_value_descriptor, local_record_detail)?;
@ -243,7 +270,10 @@ impl StorageManagerInner {
let local_record = Record::new(
cur_ts,
remote_record.descriptor().clone(),
LocalRecordDetail { safety_selection },
LocalRecordDetail {
safety_selection,
value_nodes: vec![],
},
)?;
local_record_store.new_record(key, local_record).await?;
@ -261,7 +291,7 @@ impl StorageManagerInner {
continue;
};
local_record_store
.set_subkey(key, subkey, subkey_data)
.set_subkey(key, subkey, subkey_data, WatchUpdateMode::NoUpdate)
.await?;
}
@ -379,7 +409,10 @@ impl StorageManagerInner {
let record = Record::<LocalRecordDetail>::new(
get_aligned_timestamp(),
signed_value_descriptor,
LocalRecordDetail { safety_selection },
LocalRecordDetail {
safety_selection,
value_nodes: vec![],
},
)?;
local_record_store.new_record(key, record).await?;
@ -387,7 +420,7 @@ impl StorageManagerInner {
if let Some(signed_value_data) = subkey_result.value {
// Write subkey to local store
local_record_store
.set_subkey(key, subkey, signed_value_data)
.set_subkey(key, subkey, signed_value_data, WatchUpdateMode::NoUpdate)
.await?;
}
@ -400,14 +433,65 @@ impl StorageManagerInner {
Ok(descriptor)
}
pub fn close_record(&mut self, key: TypedKey) -> VeilidAPIResult<()> {
let Some(_opened_record) = self.opened_records.remove(&key) else {
apibail_generic!("record not open");
pub fn get_value_nodes(&self, key: TypedKey) -> VeilidAPIResult<Option<Vec<NodeRef>>> {
// Get local record store
let Some(local_record_store) = self.local_record_store.as_ref() else {
apibail_not_initialized!();
};
// Get routing table to see if we still know about these nodes
let Some(routing_table) = self.opt_rpc_processor.as_ref().map(|r| r.routing_table()) else {
apibail_try_again!("offline, try again later");
};
let opt_value_nodes = local_record_store.peek_record(key, |r| {
let d = r.detail();
d.value_nodes
.iter()
.copied()
.filter_map(|x| {
routing_table
.lookup_node_ref(TypedKey::new(key.kind, x))
.ok()
.flatten()
})
.collect()
});
Ok(opt_value_nodes)
}
pub fn set_value_nodes(
&mut self,
key: TypedKey,
value_nodes: Vec<NodeRef>,
) -> VeilidAPIResult<()> {
// Get local record store
let Some(local_record_store) = self.local_record_store.as_mut() else {
apibail_not_initialized!();
};
local_record_store.with_record_mut(key, |r| {
let d = r.detail_mut();
d.value_nodes = value_nodes
.into_iter()
.filter_map(|x| x.node_ids().get(key.kind).map(|k| k.value))
.collect();
});
Ok(())
}
pub async fn handle_get_local_value(
pub fn close_record(&mut self, key: TypedKey) -> VeilidAPIResult<Option<OpenedRecord>> {
let Some(local_record_store) = self.local_record_store.as_mut() else {
apibail_not_initialized!();
};
if local_record_store.peek_record(key, |_| {}).is_none() {
return Err(VeilidAPIError::key_not_found(key));
}
Ok(self.opened_records.remove(&key))
}
pub(super) async fn handle_get_local_value(
&mut self,
key: TypedKey,
subkey: ValueSubkey,
@ -430,11 +514,12 @@ impl StorageManagerInner {
})
}
pub async fn handle_set_local_value(
pub(super) async fn handle_set_local_value(
&mut self,
key: TypedKey,
subkey: ValueSubkey,
signed_value_data: SignedValueData,
signed_value_data: Arc<SignedValueData>,
watch_update_mode: WatchUpdateMode,
) -> VeilidAPIResult<()> {
// See if it's in the local record store
let Some(local_record_store) = self.local_record_store.as_mut() else {
@ -443,13 +528,31 @@ impl StorageManagerInner {
// Write subkey to local store
local_record_store
.set_subkey(key, subkey, signed_value_data)
.set_subkey(key, subkey, signed_value_data, watch_update_mode)
.await?;
Ok(())
}
pub async fn handle_get_remote_value(
pub(super) async fn handle_watch_local_value(
&mut self,
key: TypedKey,
subkeys: ValueSubkeyRangeSet,
expiration: Timestamp,
count: u32,
target: Target,
watcher: CryptoKey,
) -> VeilidAPIResult<Option<Timestamp>> {
// See if it's in the local record store
let Some(local_record_store) = self.local_record_store.as_mut() else {
apibail_not_initialized!();
};
local_record_store
.watch_record(key, subkeys, expiration, count, target, watcher)
.await
}
pub(super) async fn handle_get_remote_value(
&mut self,
key: TypedKey,
subkey: ValueSubkey,
@ -472,12 +575,13 @@ impl StorageManagerInner {
})
}
pub async fn handle_set_remote_value(
pub(super) async fn handle_set_remote_value(
&mut self,
key: TypedKey,
subkey: ValueSubkey,
signed_value_data: SignedValueData,
signed_value_descriptor: SignedValueDescriptor,
signed_value_data: Arc<SignedValueData>,
signed_value_descriptor: Arc<SignedValueDescriptor>,
watch_update_mode: WatchUpdateMode,
) -> VeilidAPIResult<()> {
// See if it's in the remote record store
let Some(remote_record_store) = self.remote_record_store.as_mut() else {
@ -499,18 +603,37 @@ impl StorageManagerInner {
// Write subkey to remote store
remote_record_store
.set_subkey(key, subkey, signed_value_data)
.set_subkey(key, subkey, signed_value_data, watch_update_mode)
.await?;
Ok(())
}
pub(super) async fn handle_watch_remote_value(
&mut self,
key: TypedKey,
subkeys: ValueSubkeyRangeSet,
expiration: Timestamp,
count: u32,
target: Target,
watcher: CryptoKey,
) -> VeilidAPIResult<Option<Timestamp>> {
// See if it's in the remote record store
let Some(remote_record_store) = self.remote_record_store.as_mut() else {
apibail_not_initialized!();
};
remote_record_store
.watch_record(key, subkeys, expiration, count, target, watcher)
.await
}
/// # DHT Key = Hash(ownerKeyKind) of: [ ownerKeyValue, schema ]
fn get_key<D>(vcrypto: CryptoSystemVersion, record: &Record<D>) -> TypedKey
where
D: fmt::Debug + Clone + Serialize,
{
let compiled = record.descriptor().schema_data();
let descriptor = record.descriptor();
let compiled = descriptor.schema_data();
let mut hash_data = Vec::<u8>::with_capacity(PUBLIC_KEY_LENGTH + 4 + compiled.len());
hash_data.extend_from_slice(&vcrypto.kind().0);
hash_data.extend_from_slice(&record.owner().bytes);

View File

@ -0,0 +1,71 @@
use super::*;
impl StorageManager {
// Check if watches either have dead nodes or if the watch has expired
#[instrument(level = "trace", skip(self), err)]
pub(super) async fn check_active_watches_task_routine(
self,
stop_token: StopToken,
_last_ts: Timestamp,
_cur_ts: Timestamp,
) -> EyreResult<()> {
{
let mut inner = self.inner.lock().await;
let Some(routing_table) = inner.opt_routing_table.clone() else {
return Ok(());
};
let rss = routing_table.route_spec_store();
let opt_update_callback = inner.update_callback.clone();
let cur_ts = get_aligned_timestamp();
for (k, v) in inner.opened_records.iter_mut() {
// If no active watch, then skip this
let Some(active_watch) = v.active_watch() else {
continue;
};
// See if the active watch's node is dead
let mut is_dead = false;
if matches!(
active_watch.watch_node.state(cur_ts),
BucketEntryState::Dead
) {
// Watched node is dead
is_dead = true;
}
// See if the private route we're using is dead
if !is_dead {
if let Some(value_changed_route) = active_watch.opt_value_changed_route {
if rss.get_route_id_for_key(&value_changed_route).is_none() {
// Route we would receive value changes on is dead
is_dead = true;
}
}
}
// See if the watch is expired
if !is_dead && active_watch.expiration_ts <= cur_ts {
// Watch has expired
is_dead = true;
}
if is_dead {
v.clear_active_watch();
if let Some(update_callback) = opt_update_callback.clone() {
// Send valuechange with dead count and no subkeys
update_callback(VeilidUpdate::ValueChange(Box::new(VeilidValueChange {
key: *k,
subkeys: ValueSubkeyRangeSet::new(),
count: 0,
value: ValueData::default(),
})));
}
}
}
}
Ok(())
}
}

View File

@ -11,10 +11,10 @@ impl StorageManager {
) -> EyreResult<()> {
let mut inner = self.inner.lock().await;
if let Some(local_record_store) = &mut inner.local_record_store {
local_record_store.tick().await?;
local_record_store.flush().await?;
}
if let Some(remote_record_store) = &mut inner.remote_record_store {
remote_record_store.tick().await?;
remote_record_store.flush().await?;
}
Ok(())
}

View File

@ -1,5 +1,7 @@
pub mod check_active_watches;
pub mod flush_record_stores;
pub mod offline_subkey_writes;
pub mod send_value_changes;
use super::*;
@ -47,23 +49,82 @@ impl StorageManager {
)
});
}
// Set send value changes tick task
debug!("starting send value changes task");
{
let this = self.clone();
self.unlocked_inner
.send_value_changes_task
.set_routine(move |s, l, t| {
Box::pin(
this.clone()
.send_value_changes_task_routine(
s,
Timestamp::new(l),
Timestamp::new(t),
)
.instrument(trace_span!(
parent: None,
"StorageManager send value changes task routine"
)),
)
});
}
// Set check active watches tick task
debug!("starting check active watches task");
{
let this = self.clone();
self.unlocked_inner
.check_active_watches_task
.set_routine(move |s, l, t| {
Box::pin(
this.clone()
.check_active_watches_task_routine(
s,
Timestamp::new(l),
Timestamp::new(t),
)
.instrument(trace_span!(
parent: None,
"StorageManager check active watches task routine"
)),
)
});
}
}
pub async fn tick(&self) -> EyreResult<()> {
// Run the rolling transfers task
// Run the flush stores task
self.unlocked_inner.flush_record_stores_task.tick().await?;
// Run offline subkey writes task if there's work to be done
if self.online_writes_ready().await?.is_some() && self.has_offline_subkey_writes().await? {
self.unlocked_inner
.offline_subkey_writes_task
.tick()
.await?;
// Check active watches
self.unlocked_inner.check_active_watches_task.tick().await?;
// Run online-only tasks
if self.online_writes_ready().await?.is_some() {
// Run offline subkey writes task if there's work to be done
if self.has_offline_subkey_writes().await? {
self.unlocked_inner
.offline_subkey_writes_task
.tick()
.await?;
}
// Send value changed notifications
self.unlocked_inner.send_value_changes_task.tick().await?;
}
Ok(())
}
pub(crate) async fn cancel_tasks(&self) {
debug!("stopping check active watches task");
if let Err(e) = self.unlocked_inner.check_active_watches_task.stop().await {
warn!("check_active_watches_task not stopped: {}", e);
}
debug!("stopping send value changes task");
if let Err(e) = self.unlocked_inner.send_value_changes_task.stop().await {
warn!("send_value_changes_task not stopped: {}", e);
}
debug!("stopping flush record stores task");
if let Err(e) = self.unlocked_inner.flush_record_stores_task.stop().await {
warn!("flush_record_stores_task not stopped: {}", e);

View File

@ -0,0 +1,59 @@
use super::*;
use futures_util::StreamExt;
use stop_token::future::FutureExt;
impl StorageManager {
// Send value change notifications across the network
#[instrument(level = "trace", skip(self), err)]
pub(super) async fn send_value_changes_task_routine(
self,
stop_token: StopToken,
_last_ts: Timestamp,
_cur_ts: Timestamp,
) -> EyreResult<()> {
let mut value_changes: Vec<ValueChangedInfo> = vec![];
{
let mut inner = self.inner.lock().await;
if let Some(local_record_store) = &mut inner.local_record_store {
local_record_store
.take_value_changes(&mut value_changes)
.await;
}
if let Some(remote_record_store) = &mut inner.remote_record_store {
remote_record_store
.take_value_changes(&mut value_changes)
.await;
}
}
// Send all value changes in parallel
let mut unord = FuturesUnordered::new();
// Add a future for each value change
for vc in value_changes {
let this = self.clone();
unord.push(async move {
if let Err(e) = this.send_value_change(vc).await {
log_stor!(debug "Failed to send value change: {}", e);
}
});
}
while !unord.is_empty() {
match unord.next().timeout_at(stop_token.clone()).await {
Ok(Some(_)) => {
// Some ValueChanged completed
}
Ok(None) => {
// We're empty
}
Err(_) => {
// Timeout means we drop the rest because we were asked to stop
return Ok(());
}
}
}
Ok(())
}
}

View File

@ -2,8 +2,11 @@ use super::*;
/// Information required to handle locally opened records
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct LocalRecordDetail {
pub(in crate::storage_manager) struct LocalRecordDetail {
/// The last 'safety selection' used when creating/opening this record.
/// Even when closed, this safety selection applies to re-publication attempts by the system.
pub safety_selection: SafetySelection,
/// The nodes that we have seen this record cached on recently
#[serde(default)]
pub value_nodes: Vec<PublicKey>,
}

View File

@ -8,10 +8,10 @@ mod signed_value_descriptor;
use super::*;
pub use local_record_detail::*;
pub use opened_record::*;
pub use record::*;
pub use record_data::*;
pub use remote_record_detail::*;
pub(super) use local_record_detail::*;
pub(super) use opened_record::*;
pub(super) use record::*;
pub(super) use record_data::*;
pub(super) use remote_record_detail::*;
pub use signed_value_data::*;
pub use signed_value_descriptor::*;

View File

@ -1,9 +1,23 @@
use super::*;
#[derive(Clone, Debug)]
pub(in crate::storage_manager) struct ActiveWatch {
/// The expiration of a successful watch
pub expiration_ts: Timestamp,
/// Which node accepted the watch
pub watch_node: NodeRef,
/// Which private route is responsible for receiving ValueChanged notifications
pub opt_value_changed_route: Option<PublicKey>,
/// Which subkeys we are watching
pub subkeys: ValueSubkeyRangeSet,
/// How many notifications are left
pub count: u32,
}
/// The state associated with a local record when it is opened
/// This is not serialized to storage as it is ephemeral for the lifetime of the opened record
#[derive(Clone, Debug, Default)]
pub struct OpenedRecord {
pub(in crate::storage_manager) struct OpenedRecord {
/// The key pair used to perform writes to subkey on this opened record
/// Without this, set_value() will fail regardless of which key or subkey is being written to
/// as all writes are signed
@ -11,6 +25,9 @@ pub struct OpenedRecord {
/// The safety selection in current use
safety_selection: SafetySelection,
/// Active watch we have on this record
active_watch: Option<ActiveWatch>,
}
impl OpenedRecord {
@ -18,6 +35,7 @@ impl OpenedRecord {
Self {
writer,
safety_selection,
active_watch: None,
}
}
@ -28,4 +46,16 @@ impl OpenedRecord {
pub fn safety_selection(&self) -> SafetySelection {
self.safety_selection
}
pub fn set_active_watch(&mut self, active_watch: ActiveWatch) {
self.active_watch = Some(active_watch);
}
pub fn clear_active_watch(&mut self) {
self.active_watch = None;
}
pub fn active_watch(&self) -> Option<ActiveWatch> {
self.active_watch.clone()
}
}

View File

@ -1,11 +1,11 @@
use super::*;
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct Record<D>
pub(in crate::storage_manager) struct Record<D>
where
D: fmt::Debug + Clone + Serialize,
D: fmt::Debug + Serialize + Clone,
{
descriptor: SignedValueDescriptor,
descriptor: Arc<SignedValueDescriptor>,
subkey_count: usize,
stored_subkeys: ValueSubkeyRangeSet,
last_touched_ts: Timestamp,
@ -15,11 +15,11 @@ where
impl<D> Record<D>
where
D: fmt::Debug + Clone + Serialize,
D: fmt::Debug + Serialize + Clone,
{
pub fn new(
cur_ts: Timestamp,
descriptor: SignedValueDescriptor,
descriptor: Arc<SignedValueDescriptor>,
detail: D,
) -> VeilidAPIResult<Self> {
let schema = descriptor.schema()?;
@ -34,8 +34,8 @@ where
})
}
pub fn descriptor(&self) -> &SignedValueDescriptor {
&self.descriptor
pub fn descriptor(&self) -> Arc<SignedValueDescriptor> {
self.descriptor.clone()
}
pub fn owner(&self) -> &PublicKey {
self.descriptor.owner()
@ -74,14 +74,14 @@ where
}
pub fn total_size(&self) -> usize {
(mem::size_of::<Self>() - mem::size_of::<SignedValueDescriptor>())
(mem::size_of::<Self>() - mem::size_of::<Arc<SignedValueDescriptor>>())
+ self.descriptor.total_size()
+ self.record_data_size
}
// pub fn detail(&self) -> &D {
// &self.detail
// }
pub fn detail(&self) -> &D {
&self.detail
}
pub fn detail_mut(&mut self) -> &mut D {
&mut self.detail
}

View File

@ -1,22 +1,21 @@
use super::*;
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
pub struct RecordData {
signed_value_data: SignedValueData,
pub(in crate::storage_manager) struct RecordData {
signed_value_data: Arc<SignedValueData>,
}
impl RecordData {
pub fn new(signed_value_data: SignedValueData) -> Self {
pub fn new(signed_value_data: Arc<SignedValueData>) -> Self {
Self { signed_value_data }
}
pub fn signed_value_data(&self) -> &SignedValueData {
&self.signed_value_data
pub fn signed_value_data(&self) -> Arc<SignedValueData> {
self.signed_value_data.clone()
}
pub fn data_size(&self) -> usize {
self.signed_value_data.data_size()
}
pub fn total_size(&self) -> usize {
(mem::size_of::<Self>() - mem::size_of::<SignedValueData>())
+ self.signed_value_data.total_size()
mem::size_of::<Self>() + self.signed_value_data.total_size()
}
}

View File

@ -1,4 +1,4 @@
use super::*;
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct RemoteRecordDetail {}
pub(in crate::storage_manager) struct RemoteRecordDetail {}

View File

@ -48,10 +48,6 @@ impl SignedValueData {
&self.value_data
}
pub fn into_value_data(self) -> ValueData {
self.value_data
}
pub fn signature(&self) -> &Signature {
&self.signature
}

View File

@ -0,0 +1,287 @@
use super::*;
/// The context of the outbound_watch_value operation
struct OutboundWatchValueContext {
/// A successful watch
pub opt_watch_value_result: Option<OutboundWatchValueResult>,
}
/// The result of the outbound_watch_value operation
#[derive(Debug, Clone)]
pub(super) struct OutboundWatchValueResult {
/// The expiration of a successful watch
pub expiration_ts: Timestamp,
/// Which node accepted the watch
pub watch_node: NodeRef,
/// Which private route is responsible for receiving ValueChanged notifications
pub opt_value_changed_route: Option<PublicKey>,
}
impl StorageManager {
/// Perform a 'watch value' query on the network
#[allow(clippy::too_many_arguments)]
pub(super) async fn outbound_watch_value(
&self,
rpc_processor: RPCProcessor,
key: TypedKey,
subkeys: ValueSubkeyRangeSet,
expiration: Timestamp,
count: u32,
safety_selection: SafetySelection,
opt_watcher: Option<KeyPair>,
opt_watch_node: Option<NodeRef>,
) -> VeilidAPIResult<Option<OutboundWatchValueResult>> {
let routing_table = rpc_processor.routing_table();
// Get the DHT parameters for 'WatchValue', some of which are the same for 'WatchValue' operations
let (key_count, timeout_us) = {
let c = self.unlocked_inner.config.get();
(
c.network.dht.max_find_node_count as usize,
TimestampDuration::from(ms_to_us(c.network.dht.get_value_timeout_ms)),
)
};
// Get the nodes we know are caching this value to seed the fanout
let init_fanout_queue = if let Some(watch_node) = opt_watch_node {
vec![watch_node]
} else {
let inner = self.inner.lock().await;
inner.get_value_nodes(key)?.unwrap_or_default()
};
// Get the appropriate watcher key
let watcher = opt_watcher.unwrap_or_else(|| {
self.unlocked_inner
.anonymous_watch_keys
.get(key.kind)
.unwrap()
.value
});
// Make do-watch-value answer context
let context = Arc::new(Mutex::new(OutboundWatchValueContext {
opt_watch_value_result: None,
}));
// Routine to call to generate fanout
let call_routine = |next_node: NodeRef| {
let rpc_processor = rpc_processor.clone();
let context = context.clone();
let subkeys = subkeys.clone();
async move {
let wva = network_result_try!(
rpc_processor
.clone()
.rpc_call_watch_value(
Destination::direct(next_node.clone()).with_safety(safety_selection),
key,
subkeys,
expiration,
count,
watcher
)
.await?
);
// Keep answer if we got one
if wva.answer.expiration_ts.as_u64() > 0 {
if count > 0 {
// If we asked for a nonzero notification count, then this is an accepted watch
log_stor!(debug "Watch accepted: expiration_ts={}", debug_ts(wva.answer.expiration_ts.as_u64()));
} else {
// If we asked for a zero notification count, then this is a cancelled watch
log_stor!(debug "Watch cancelled");
}
let mut ctx = context.lock();
ctx.opt_watch_value_result = Some(OutboundWatchValueResult {
expiration_ts: wva.answer.expiration_ts,
watch_node: next_node.clone(),
opt_value_changed_route: wva.reply_private_route,
});
}
// Return peers if we have some
#[cfg(feature = "network-result-extra")]
log_stor!(debug "WatchValue fanout call returned peers {}", wva.answer.peers.len());
Ok(NetworkResult::value(wva.answer.peers))
}
};
// Routine to call to check if we're done at each step
let check_done = |_closest_nodes: &[NodeRef]| {
// If a watch has succeeded, return done
let ctx = context.lock();
if ctx.opt_watch_value_result.is_some() {
return Some(());
}
None
};
// Call the fanout
// Use a fixed fanout concurrency of 1 because we only want one watch
let fanout_call = FanoutCall::new(
routing_table.clone(),
key,
key_count,
1,
timeout_us,
capability_fanout_node_info_filter(vec![CAP_DHT, CAP_DHT_WATCH]),
call_routine,
check_done,
);
match fanout_call.run(init_fanout_queue).await {
// If we don't finish in the timeout (too much time passed without a successful watch)
TimeoutOr::Timeout => {
// Return the best answer we've got
let ctx = context.lock();
if ctx.opt_watch_value_result.is_some() {
log_stor!(debug "WatchValue Fanout Timeout Success");
} else {
log_stor!(debug "WatchValue Fanout Timeout Failure");
}
Ok(ctx.opt_watch_value_result.clone())
}
// If we finished with done
TimeoutOr::Value(Ok(Some(()))) => {
// Return the best answer we've got
let ctx = context.lock();
if ctx.opt_watch_value_result.is_some() {
log_stor!(debug "WatchValue Fanout Success");
} else {
log_stor!(debug "WatchValue Fanout Failure");
}
Ok(ctx.opt_watch_value_result.clone())
}
// If we ran out of nodes
TimeoutOr::Value(Ok(None)) => {
// Return the best answer we've got
let ctx = context.lock();
if ctx.opt_watch_value_result.is_some() {
log_stor!(debug "WatchValue Fanout Exhausted Success");
} else {
log_stor!(debug "WatchValue Fanout Exhausted Failure");
}
Ok(ctx.opt_watch_value_result.clone())
}
// Failed
TimeoutOr::Value(Err(e)) => {
// If we finished with an error, return that
log_stor!(debug "WatchValue Fanout Error: {}", e);
Err(e.into())
}
}
}
/// Handle a received 'Watch Value' query
pub async fn inbound_watch_value(
&self,
key: TypedKey,
subkeys: ValueSubkeyRangeSet,
expiration: Timestamp,
count: u32,
target: Target,
watcher: CryptoKey,
) -> VeilidAPIResult<NetworkResult<Timestamp>> {
let mut inner = self.lock().await?;
// See if this is a remote or local value
let (_is_local, opt_expiration_ts) = {
// See if the subkey we are watching has a local value
let opt_expiration_ts = inner
.handle_watch_local_value(
key,
subkeys.clone(),
expiration,
count,
target.clone(),
watcher,
)
.await?;
if opt_expiration_ts.is_some() {
(true, opt_expiration_ts)
} else {
// See if the subkey we are watching is a remote value
let opt_expiration_ts = inner
.handle_watch_remote_value(key, subkeys, expiration, count, target, watcher)
.await?;
(false, opt_expiration_ts)
}
};
Ok(NetworkResult::value(opt_expiration_ts.unwrap_or_default()))
}
/// Handle a received 'Value Changed' statement
pub async fn inbound_value_changed(
&self,
key: TypedKey,
subkeys: ValueSubkeyRangeSet,
mut count: u32,
value: Arc<SignedValueData>,
) -> VeilidAPIResult<()> {
// Update local record store with new value
let (res, opt_update_callback) = {
let mut inner = self.lock().await?;
let res = if let Some(first_subkey) = subkeys.first() {
inner
.handle_set_local_value(
key,
first_subkey,
value.clone(),
WatchUpdateMode::NoUpdate,
)
.await
} else {
VeilidAPIResult::Ok(())
};
let Some(opened_record) = inner.opened_records.get_mut(&key) else {
// Don't send update or update the ActiveWatch if this record is closed
return res;
};
let Some(mut active_watch) = opened_record.active_watch() else {
// No active watch means no callback
return res;
};
if count > active_watch.count {
// If count is greater than our requested count then this is invalid, cancel the watch
log_stor!(debug "watch count went backward: {}: {}/{}", key, count, active_watch.count);
// Force count to zero
count = 0;
opened_record.clear_active_watch();
} else if count == 0 {
// If count is zero, we're done, cancel the watch and the app can renew it if it wants
log_stor!(debug "watch count finished: {}", key);
opened_record.clear_active_watch();
} else {
log_stor!(
"watch count decremented: {}: {}/{}",
key,
count,
active_watch.count
);
active_watch.count = count;
opened_record.set_active_watch(active_watch);
}
(res, inner.update_callback.clone())
};
// Announce ValueChanged VeilidUpdate
if let Some(update_callback) = opt_update_callback {
update_callback(VeilidUpdate::ValueChange(Box::new(VeilidValueChange {
key,
subkeys,
count,
value: value.value_data().clone(),
})));
}
res
}
}

View File

@ -229,6 +229,9 @@ pub fn config_callback(key: String) -> ConfigCallbackReturn {
"network.dht.remote_max_records" => Ok(Box::new(4096u32)),
"network.dht.remote_max_subkey_cache_memory_mb" => Ok(Box::new(64u32)),
"network.dht.remote_max_storage_space_mb" => Ok(Box::new(64u32)),
"network.dht.public_watch_limit" => Ok(Box::new(32u32)),
"network.dht.member_watch_limit" => Ok(Box::new(8u32)),
"network.dht.max_watch_expiration_ms" => Ok(Box::new(600_000u32)),
"network.upnp" => Ok(Box::new(false)),
"network.detect_address_changes" => Ok(Box::new(true)),
"network.restricted_nat_retries" => Ok(Box::new(0u32)),

View File

@ -19,12 +19,12 @@ pub async fn test_startup_shutdown_from_config() {
table_store: VeilidConfigTableStore {
directory: get_table_store_path(),
delete: true,
..Default::default()
// ..Default::default()
},
block_store: VeilidConfigBlockStore {
directory: get_block_store_path(),
delete: true,
..Default::default()
//..Default::default()
},
protected_store: VeilidConfigProtectedStore {
allow_insecure_fallback: true,

View File

@ -30,8 +30,8 @@ pub async fn run_all_tests() {
veilid_api::tests::test_serialize_json::test_all().await;
info!("TEST: routing_table::test_serialize_routing_table");
routing_table::tests::test_serialize_routing_table::test_all().await;
info!("TEST: test_dht");
test_dht::test_all().await;
// info!("TEST: test_dht");
// test_dht::test_all().await;
info!("Finished unit tests");
}
@ -131,6 +131,6 @@ cfg_if! {
run_test!(routing_table, test_serialize_routing_table);
run_test!(test_dht);
// run_test!(test_dht);
}
}

View File

@ -295,7 +295,7 @@ impl VeilidAPI {
/// Returns a route id that can be used to send private messages to the node creating this route.
pub fn import_remote_private_route(&self, blob: Vec<u8>) -> VeilidAPIResult<RouteId> {
let rss = self.routing_table()?.route_spec_store();
rss.import_remote_private_route(blob)
rss.import_remote_private_route_blob(blob)
}
/// Release either a locally allocated or remotely imported private route

View File

@ -3,16 +3,20 @@
use super::*;
use data_encoding::BASE64URL_NOPAD;
use hashlink::LinkedHashMap;
use network_manager::*;
use once_cell::sync::Lazy;
use routing_table::*;
#[derive(Default, Debug)]
#[derive(Default)]
struct DebugCache {
imported_routes: Vec<RouteId>,
opened_record_contexts: Lazy<LinkedHashMap<TypedKey, RoutingContext>>,
}
static DEBUG_CACHE: Mutex<DebugCache> = Mutex::new(DebugCache {
imported_routes: Vec::new(),
opened_record_contexts: Lazy::new(LinkedHashMap::new),
});
fn format_opt_ts(ts: Option<TimestampDuration>) -> String {
@ -283,6 +287,7 @@ fn get_destination(
fn get_number(text: &str) -> Option<usize> {
usize::from_str(text).ok()
}
fn get_typed_key(text: &str) -> Option<TypedKey> {
TypedKey::from_str(text).ok()
}
@ -301,6 +306,18 @@ fn get_crypto_system_version(crypto: Crypto) -> impl FnOnce(&str) -> Option<Cryp
}
}
fn get_dht_key_no_safety(text: &str) -> Option<TypedKey> {
let key = if let Some(key) = get_public_key(text) {
TypedKey::new(best_crypto_kind(), key)
} else if let Some(key) = get_typed_key(text) {
key
} else {
return None;
};
Some(key)
}
fn get_dht_key(
routing_table: RoutingTable,
) -> impl FnOnce(&str) -> Option<(TypedKey, Option<SafetySelection>)> {
@ -515,6 +532,34 @@ async fn async_get_debug_argument_at<T, G: FnOnce(&str) -> SendPinBoxFuture<Opti
Ok(val)
}
fn get_opened_dht_record_context(
args: &[String],
context: &str,
key: &str,
arg: usize,
) -> VeilidAPIResult<(TypedKey, RoutingContext)> {
let dc = DEBUG_CACHE.lock();
let key = match get_debug_argument_at(args, arg, context, key, get_dht_key_no_safety)
.ok()
.or_else(|| {
// If unspecified, use the most recent key opened or created
dc.opened_record_contexts.back().map(|kv| kv.0).copied()
}) {
Some(k) => k,
None => {
apibail_missing_argument!("no keys are opened", "key");
}
};
// Get routing context for record
let Some(rc) = dc.opened_record_contexts.get(&key).cloned() else {
apibail_missing_argument!("key is not opened", "key");
};
Ok((key, rc))
}
pub fn print_data(data: &[u8], truncate_len: Option<usize>) -> String {
// check if message body is ascii printable
let mut printable = true;
@ -764,7 +809,7 @@ impl VeilidAPI {
}
let netman = self.network_manager()?;
netman.debug_restart_network();
netman.restart_network();
Ok("Network restarted".to_owned())
} else {
@ -888,7 +933,7 @@ impl VeilidAPI {
match &dest {
Destination::Direct {
target,
node: target,
safety_selection: _,
} => Ok(format!(
"Destination: {:#?}\nTarget Entry:\n{}\n",
@ -897,7 +942,7 @@ impl VeilidAPI {
)),
Destination::Relay {
relay,
target,
node: target,
safety_selection: _,
} => Ok(format!(
"Destination: {:#?}\nTarget Entry:\n{}\nRelay Entry:\n{}\n",
@ -1262,7 +1307,7 @@ impl VeilidAPI {
.map_err(VeilidAPIError::generic)?;
let rss = self.routing_table()?.route_spec_store();
let route_id = rss
.import_remote_private_route(blob_dec)
.import_remote_private_route_blob(blob_dec)
.map_err(VeilidAPIError::generic)?;
let mut dc = DEBUG_CACHE.lock();
@ -1410,25 +1455,86 @@ impl VeilidAPI {
Err(e) => return Ok(format!("Can't open DHT record: {}", e)),
Ok(v) => v,
};
match rc.close_dht_record(*record.key()).await {
Err(e) => return Ok(format!("Can't close DHT record: {}", e)),
Ok(v) => v,
};
// Save routing context for record
let mut dc = DEBUG_CACHE.lock();
dc.opened_record_contexts.insert(*record.key(), rc);
debug!("DHT Record Created:\n{:#?}", record);
Ok(format!("{:?}", record))
Ok(format!("Created: {:?} : {:?}", record.key(), record))
}
async fn debug_record_get(&self, args: Vec<String>) -> VeilidAPIResult<String> {
async fn debug_record_open(&self, args: Vec<String>) -> VeilidAPIResult<String> {
let netman = self.network_manager()?;
let routing_table = netman.routing_table();
let (key, ss) = get_debug_argument_at(
&args,
1,
"debug_record_get",
"debug_record_open",
"key",
get_dht_key(routing_table),
)?;
let writer =
get_debug_argument_at(&args, 2, "debug_record_open", "writer", get_keypair).ok();
// Get routing context with optional safety
let rc = self.routing_context()?;
let rc = if let Some(ss) = ss {
match rc.with_safety(ss) {
Err(e) => return Ok(format!("Can't use safety selection: {}", e)),
Ok(v) => v,
}
} else {
rc
};
// Do a record open
let record = match rc.open_dht_record(key, writer).await {
Err(e) => return Ok(format!("Can't open DHT record: {}", e)),
Ok(v) => v,
};
// Save routing context for record
let mut dc = DEBUG_CACHE.lock();
dc.opened_record_contexts.insert(*record.key(), rc);
Ok(format!("Opened: {} : {:?}", key, record))
}
async fn debug_record_close(&self, args: Vec<String>) -> VeilidAPIResult<String> {
let (key, rc) = get_opened_dht_record_context(&args, "debug_record_close", "key", 1)?;
// Do a record close
if let Err(e) = rc.close_dht_record(key).await {
return Ok(format!("Can't close DHT record: {}", e));
};
Ok(format!("Closed: {:?}", key))
}
async fn debug_record_set(&self, args: Vec<String>) -> VeilidAPIResult<String> {
let (key, rc) = get_opened_dht_record_context(&args, "debug_record_set", "key", 1)?;
let subkey = get_debug_argument_at(&args, 2, "debug_record_set", "subkey", get_number)?;
let data = get_debug_argument_at(&args, 3, "debug_record_set", "data", get_data)?;
// Do a record set
let value = match rc.set_dht_value(key, subkey as ValueSubkey, data).await {
Err(e) => {
return Ok(format!("Can't set DHT value: {}", e));
}
Ok(v) => v,
};
let out = if let Some(value) = value {
format!("Newer value found: {:?}", value)
} else {
"Success".to_owned()
};
Ok(out)
}
async fn debug_record_get(&self, args: Vec<String>) -> VeilidAPIResult<String> {
let (key, rc) = get_opened_dht_record_context(&args, "debug_record_get", "key", 1)?;
let subkey = get_debug_argument_at(&args, 2, "debug_record_get", "subkey", get_number)?;
let force_refresh = if args.len() >= 4 {
Some(get_debug_argument_at(
@ -1452,36 +1558,12 @@ impl VeilidAPI {
false
};
// Get routing context with optional privacy
let rc = self.routing_context()?;
let rc = if let Some(ss) = ss {
match rc.with_safety(ss) {
Err(e) => return Ok(format!("Can't use safety selection: {}", e)),
Ok(v) => v,
}
} else {
rc
};
// Do a record get
let _record = match rc.open_dht_record(key, None).await {
Err(e) => return Ok(format!("Can't open DHT record: {}", e)),
Ok(v) => v,
};
let value = match rc
.get_dht_value(key, subkey as ValueSubkey, force_refresh)
.await
{
Err(e) => {
match rc.close_dht_record(key).await {
Err(e) => {
return Ok(format!(
"Can't get DHT value and can't close DHT record: {}",
e
))
}
Ok(v) => v,
};
return Ok(format!("Can't get DHT value: {}", e));
}
Ok(v) => v,
@ -1491,75 +1573,19 @@ impl VeilidAPI {
} else {
"No value data returned".to_owned()
};
match rc.close_dht_record(key).await {
Err(e) => return Ok(format!("Can't close DHT record: {}", e)),
Ok(v) => v,
};
Ok(out)
}
async fn debug_record_set(&self, args: Vec<String>) -> VeilidAPIResult<String> {
let netman = self.network_manager()?;
let routing_table = netman.routing_table();
let (key, ss) = get_debug_argument_at(
&args,
1,
"debug_record_set",
"key",
get_dht_key(routing_table),
)?;
let subkey = get_debug_argument_at(&args, 2, "debug_record_set", "subkey", get_number)?;
let writer = get_debug_argument_at(&args, 3, "debug_record_set", "writer", get_keypair)?;
let data = get_debug_argument_at(&args, 4, "debug_record_set", "data", get_data)?;
// Get routing context with optional privacy
let rc = self.routing_context()?;
let rc = if let Some(ss) = ss {
match rc.with_safety(ss) {
Err(e) => return Ok(format!("Can't use safety selection: {}", e)),
Ok(v) => v,
}
} else {
rc
};
// Do a record get
let _record = match rc.open_dht_record(key, Some(writer)).await {
Err(e) => return Ok(format!("Can't open DHT record: {}", e)),
Ok(v) => v,
};
let value = match rc.set_dht_value(key, subkey as ValueSubkey, data).await {
Err(e) => {
match rc.close_dht_record(key).await {
Err(e) => {
return Ok(format!(
"Can't set DHT value and can't close DHT record: {}",
e
))
}
Ok(v) => v,
};
return Ok(format!("Can't set DHT value: {}", e));
}
Ok(v) => v,
};
let out = if let Some(value) = value {
format!("{:?}", value)
} else {
"No value data returned".to_owned()
};
match rc.close_dht_record(key).await {
Err(e) => return Ok(format!("Can't close DHT record: {}", e)),
Ok(v) => v,
};
Ok(out)
}
async fn debug_record_delete(&self, args: Vec<String>) -> VeilidAPIResult<String> {
let key = get_debug_argument_at(&args, 1, "debug_record_delete", "key", get_typed_key)?;
let key = get_debug_argument_at(
&args,
1,
"debug_record_delete",
"key",
get_dht_key_no_safety,
)?;
// Do a record delete
// Do a record delete (can use any routing context here)
let rc = self.routing_context()?;
match rc.delete_dht_record(key).await {
Err(e) => return Ok(format!("Can't delete DHT record: {}", e)),
@ -1571,7 +1597,8 @@ impl VeilidAPI {
async fn debug_record_info(&self, args: Vec<String>) -> VeilidAPIResult<String> {
let storage_manager = self.storage_manager()?;
let key = get_debug_argument_at(&args, 1, "debug_record_info", "key", get_typed_key)?;
let key =
get_debug_argument_at(&args, 1, "debug_record_info", "key", get_dht_key_no_safety)?;
let subkey =
get_debug_argument_at(&args, 2, "debug_record_info", "subkey", get_number).ok();
@ -1595,6 +1622,56 @@ impl VeilidAPI {
Ok(out)
}
async fn debug_record_watch(&self, args: Vec<String>) -> VeilidAPIResult<String> {
let (key, rc) = get_opened_dht_record_context(&args, "debug_record_watch", "key", 1)?;
let subkeys = get_debug_argument_at(&args, 2, "debug_record_watch", "subkeys", get_subkeys)
.ok()
.unwrap_or_default();
let expiration =
get_debug_argument_at(&args, 3, "debug_record_watch", "expiration", parse_duration)
.ok()
.unwrap_or_default();
let count = get_debug_argument_at(&args, 4, "debug_record_watch", "count", get_number)
.ok()
.unwrap_or(usize::MAX) as u32;
// Do a record watch
let ts = match rc
.watch_dht_values(key, subkeys, Timestamp::new(expiration), count)
.await
{
Err(e) => {
return Ok(format!("Can't watch DHT value: {}", e));
}
Ok(v) => v,
};
if ts.as_u64() == 0 {
return Ok("Failed to watch value".to_owned());
}
Ok(format!("Success: expiration={:?}", debug_ts(ts.as_u64())))
}
async fn debug_record_cancel(&self, args: Vec<String>) -> VeilidAPIResult<String> {
let (key, rc) = get_opened_dht_record_context(&args, "debug_record_watch", "key", 1)?;
let subkeys = get_debug_argument_at(&args, 2, "debug_record_watch", "subkeys", get_subkeys)
.ok()
.unwrap_or_default();
// Do a record watch cancel
let still_active = match rc.cancel_dht_watch(key, subkeys).await {
Err(e) => {
return Ok(format!("Can't cancel DHT watch: {}", e));
}
Ok(v) => v,
};
Ok(if still_active {
"Watch partially cancelled".to_owned()
} else {
"Watch cancelled".to_owned()
})
}
async fn debug_record(&self, args: String) -> VeilidAPIResult<String> {
let args: Vec<String> =
shell_words::split(&args).map_err(|e| VeilidAPIError::parse_error(e, args))?;
@ -1607,6 +1684,10 @@ impl VeilidAPI {
self.debug_record_purge(args).await
} else if command == "create" {
self.debug_record_create(args).await
} else if command == "open" {
self.debug_record_open(args).await
} else if command == "close" {
self.debug_record_close(args).await
} else if command == "get" {
self.debug_record_get(args).await
} else if command == "set" {
@ -1615,6 +1696,10 @@ impl VeilidAPI {
self.debug_record_delete(args).await
} else if command == "info" {
self.debug_record_info(args).await
} else if command == "watch" {
self.debug_record_watch(args).await
} else if command == "cancel" {
self.debug_record_cancel(args).await
} else {
Ok(">>> Unknown command\n".to_owned())
}
@ -1625,10 +1710,20 @@ impl VeilidAPI {
let network_manager = self.network_manager()?;
let address_filter = network_manager.address_filter();
let out = format!("Address Filter Punishments:\n{:#?}", address_filter);
let out = format!("Address filter punishments:\n{:#?}", address_filter);
Ok(out)
}
async fn debug_punish_clear(&self, _args: Vec<String>) -> VeilidAPIResult<String> {
//
let network_manager = self.network_manager()?;
let address_filter = network_manager.address_filter();
address_filter.clear_punishments();
Ok("Address Filter punishments cleared\n".to_owned())
}
async fn debug_punish(&self, args: String) -> VeilidAPIResult<String> {
let args: Vec<String> =
shell_words::split(&args).map_err(|e| VeilidAPIError::parse_error(e, args))?;
@ -1665,6 +1760,7 @@ appcall <destination> <data>
appreply [#id] <data>
relay <relay> [public|local]
punish list
clear
route allocate [ord|*ord] [rel] [<count>] [in|out]
release <route>
publish <route> [full]
@ -1676,10 +1772,14 @@ route allocate [ord|*ord] [rel] [<count>] [in|out]
record list <local|remote>
purge <local|remote> [bytes]
create <dhtschema> [<cryptokind> [<safety>]]
set <key>[+<safety>] <subkey> <writer> <data>
get <key>[+<safety>] <subkey> [force]
open <key>[+<safety>] [<writer>]
close [<key>]
set [<key>] <subkey> <data>
get [<key>] <subkey> [force]
delete <key>
info <key> [subkey]
info [<key>] [subkey]
watch [<key>] [<subkeys>] [<expiration>] [<count>]
cancel [<key>] [<subkeys>]
--------------------------------------------------------------------
<key> is: VLD0:GsgXCRPrzSK6oBNgxhNpm-rTYFd02R0ySx6j9vbQBG4
* also <node>, <relay>, <target>, <route>

View File

@ -1,10 +1,9 @@
use super::*;
use routing_table::NodeRefBase;
///////////////////////////////////////////////////////////////////////////////////////
/// Valid destinations for a message sent over a routing context
#[derive(Clone, Debug)]
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum Target {
/// Node by its public key
NodeId(TypedKey),
@ -123,40 +122,10 @@ impl RoutingContext {
async fn get_destination(&self, target: Target) -> VeilidAPIResult<rpc_processor::Destination> {
let rpc_processor = self.api.rpc_processor()?;
match target {
Target::NodeId(node_id) => {
// Resolve node
let mut nr = match rpc_processor
.resolve_node(node_id, self.unlocked_inner.safety_selection)
.await
{
Ok(Some(nr)) => nr,
Ok(None) => apibail_invalid_target!("could not resolve node id"),
Err(e) => return Err(e.into()),
};
// Apply sequencing to match safety selection
nr.set_sequencing(self.sequencing());
Ok(rpc_processor::Destination::Direct {
target: nr,
safety_selection: self.unlocked_inner.safety_selection,
})
}
Target::PrivateRoute(rsid) => {
// Get remote private route
let rss = self.api.routing_table()?.route_spec_store();
let Some(private_route) = rss.best_remote_private_route(&rsid) else {
apibail_invalid_target!("could not get remote private route");
};
Ok(rpc_processor::Destination::PrivateRoute {
private_route,
safety_selection: self.unlocked_inner.safety_selection,
})
}
}
rpc_processor
.resolve_target_to_destination(target, self.unlocked_inner.safety_selection)
.await
.map_err(VeilidAPIError::invalid_target)
}
////////////////////////////////////////////////////////////////
@ -314,13 +283,25 @@ impl RoutingContext {
storage_manager.set_value(key, subkey, data).await
}
/// Watches changes to an opened or created value
/// Add a watch to a DHT value that informs the user via an VeilidUpdate::ValueChange callback when the record has subkeys change.
/// One remote node will be selected to perform the watch and it will offer an expiration time based on a suggestion, and make an attempt to
/// continue to report changes via the callback. Nodes that agree to doing watches will be put on our 'ping' list to ensure they are still around
/// otherwise the watch will be cancelled and will have to be re-watched.
///
/// Changes to subkeys within the subkey range are returned via a ValueChanged callback
/// If the subkey range is empty, all subkey changes are considered
/// Expiration can be infinite to keep the watch for the maximum amount of time
/// There is only one watch permitted per record. If a change to a watch is desired, the first one must will be overwritten.
/// * `key` is the record key to watch. it must first be opened for reading or writing.
/// * `subkeys` is the the range of subkeys to watch. The range must not exceed 512 discrete non-overlapping or adjacent subranges. If no range is specified, this is equivalent to watching the entire range of subkeys.
/// * `expiration` is the desired timestamp of when to automatically terminate the watch, in microseconds. If this value is less than `network.rpc.timeout_ms` milliseconds in the future, this function will return an error immediately.
/// * `count` is the number of times the watch will be sent, maximum. A zero value here is equivalent to a cancellation.
///
/// Return value upon success is the amount of time allowed for the watch
/// Returns a timestamp of when the watch will expire. All watches are guaranteed to expire at some point in the future, and the returned timestamp will
/// be no later than the requested expiration, but -may- be before the requested expiration.
///
/// DHT watches are accepted with the following conditions:
/// * First-come first-served basis for arbitrary unauthenticated readers, up to network.dht.public_watch_limit per record
/// * If a member (either the owner or a SMPL schema member) has opened the key for writing (even if no writing is performed) then the watch will be signed and guaranteed network.dht.member_watch_limit per writer
///
/// Members can be specified via the SMPL schema and do not need to allocate writable subkeys in order to offer a member watch capability.
pub async fn watch_dht_values(
&self,
key: TypedKey,
@ -338,6 +319,8 @@ impl RoutingContext {
/// Cancels a watch early
///
/// This is a convenience function that cancels watching all subkeys in a range
/// Returns Ok(true) if there is any remaining watch for this record
/// Returns Ok(false) if the entire watch has been cancelled
pub async fn cancel_dht_watch(
&self,
key: TypedKey,

View File

@ -144,6 +144,9 @@ pub fn fix_veilidconfiginner() -> VeilidConfigInner {
remote_max_records: 17,
remote_max_subkey_cache_memory_mb: 18,
remote_max_storage_space_mb: 19,
public_watch_limit: 20,
member_watch_limit: 21,
max_watch_expiration_ms: 22,
},
upnp: true,
detect_address_changes: false,
@ -205,7 +208,7 @@ pub fn fix_veilidconfiginner() -> VeilidConfigInner {
pub fn fix_veilidvaluechange() -> VeilidValueChange {
VeilidValueChange {
key: fix_typedkey(),
subkeys: vec![1, 2, 3, 4],
subkeys: ValueSubkeyRangeSet::new(),
count: 5,
value: ValueData::new_with_seq(23, b"ValueData".to_vec(), fix_cryptokey()).unwrap(),
}

View File

@ -53,6 +53,11 @@ impl DHTSchemaDFLT {
// Subkey out of range
false
}
/// Check if a key is a schema member
pub fn is_member(&self, _key: &PublicKey) -> bool {
false
}
}
impl TryFrom<&[u8]> for DHTSchemaDFLT {

View File

@ -59,6 +59,14 @@ impl DHTSchema {
DHTSchema::SMPL(s) => s.check_subkey_value_data(owner, subkey, value_data),
}
}
/// Check if a key is a schema member
pub fn is_member(&self, key: &PublicKey) -> bool {
match self {
DHTSchema::DFLT(d) => d.is_member(key),
DHTSchema::SMPL(s) => s.is_member(key),
}
}
}
impl Default for DHTSchema {

View File

@ -93,6 +93,16 @@ impl DHTSchemaSMPL {
// Subkey out of range
false
}
/// Check if a key is a schema member
pub fn is_member(&self, key: &PublicKey) -> bool {
for m in &self.members {
if m.m_key == *key {
return true;
}
}
false
}
}
impl TryFrom<&[u8]> for DHTSchemaSMPL {

View File

@ -16,6 +16,11 @@ impl ValueSubkeyRangeSet {
data: Default::default(),
}
}
pub fn full() -> Self {
let mut data = RangeSetBlaze::new();
data.ranges_insert(u32::MIN..=u32::MAX);
Self { data }
}
pub fn new_with_data(data: RangeSetBlaze<ValueSubkey>) -> Self {
Self { data }
}
@ -24,6 +29,23 @@ impl ValueSubkeyRangeSet {
data.insert(value);
Self { data }
}
pub fn intersect(&self, other: &ValueSubkeyRangeSet) -> ValueSubkeyRangeSet {
Self::new_with_data(&self.data & &other.data)
}
pub fn difference(&self, other: &ValueSubkeyRangeSet) -> ValueSubkeyRangeSet {
Self::new_with_data(&self.data - &other.data)
}
pub fn union(&self, other: &ValueSubkeyRangeSet) -> ValueSubkeyRangeSet {
Self::new_with_data(&self.data | &other.data)
}
pub fn data(&self) -> &RangeSetBlaze<ValueSubkey> {
&self.data
}
pub fn into_data(self) -> RangeSetBlaze<ValueSubkey> {
self.data
}
}
impl FromStr for ValueSubkeyRangeSet {

View File

@ -99,7 +99,7 @@ pub struct VeilidStateConfig {
pub struct VeilidValueChange {
#[schemars(with = "String")]
pub key: TypedKey,
pub subkeys: Vec<ValueSubkey>,
pub subkeys: ValueSubkeyRangeSet,
pub count: u32,
pub value: ValueData,
}

View File

@ -1,6 +1,6 @@
use std::path::PathBuf;
use directories::ProjectDirs;
use crate::*;
use directories::ProjectDirs;
use std::path::PathBuf;
////////////////////////////////////////////////////////////////////////////////////////////////
pub type ConfigCallbackReturn = VeilidAPIResult<Box<dyn core::any::Any + Send>>;
@ -263,11 +263,12 @@ impl Default for VeilidConfigTLS {
}
pub fn get_default_ssl_directory(sub_path: &str) -> String {
let default_path = PathBuf::from("/etc/veilid-server/ssl").join(sub_path);
#[cfg(unix)]
if default_path.exists() {
return default_path.to_string_lossy().into();
{
let default_path = PathBuf::from("/etc/veilid-server/ssl").join(sub_path);
if default_path.exists() {
return default_path.to_string_lossy().into();
}
}
ProjectDirs::from("org", "Veilid", "Veilid")
@ -301,6 +302,9 @@ pub struct VeilidConfigDHT {
pub remote_max_records: u32,
pub remote_max_subkey_cache_memory_mb: u32,
pub remote_max_storage_space_mb: u32,
pub public_watch_limit: u32,
pub member_watch_limit: u32,
pub max_watch_expiration_ms: u32,
}
impl Default for VeilidConfigDHT {
@ -326,6 +330,9 @@ impl Default for VeilidConfigDHT {
remote_max_records: 128,
remote_max_subkey_cache_memory_mb: 256,
remote_max_storage_space_mb: 256,
public_watch_limit: 32,
member_watch_limit: 8,
max_watch_expiration_ms: 600000,
}
}
}
@ -758,6 +765,9 @@ impl VeilidConfig {
get_config!(inner.network.dht.remote_max_records);
get_config!(inner.network.dht.remote_max_subkey_cache_memory_mb);
get_config!(inner.network.dht.remote_max_storage_space_mb);
get_config!(inner.network.dht.public_watch_limit);
get_config!(inner.network.dht.member_watch_limit);
get_config!(inner.network.dht.max_watch_expiration_ms);
get_config!(inner.network.rpc.concurrency);
get_config!(inner.network.rpc.queue_size);
get_config!(inner.network.rpc.max_timestamp_behind_ms);

View File

@ -138,7 +138,10 @@ Future<VeilidConfig> getDefaultVeilidConfig(String programName) async {
remoteSubkeyCacheSize: getRemoteSubkeyCacheSize(),
remoteMaxRecords: getRemoteMaxRecords(),
remoteMaxSubkeyCacheMemoryMb: await getRemoteMaxSubkeyCacheMemoryMb(),
remoteMaxStorageSpaceMb: getRemoteMaxStorageSpaceMb()),
remoteMaxStorageSpaceMb: getRemoteMaxStorageSpaceMb(),
publicWatchLimit: 32,
memberWatchLimit: 8,
maxWatchExpirationMs: 600000),
upnp: true,
detectAddressChanges: true,
restrictedNatRetries: 0,

View File

@ -22,11 +22,13 @@ extension ValidateDFLT on DHTSchemaDFLT {
}
return true;
}
int subkeyCount() => oCnt;
}
extension ValidateSMPL on DHTSchemaSMPL {
bool validate() {
final totalsv = members.fold(0, (acc, v) => acc + v.mCnt) + oCnt;
final totalsv = subkeyCount();
if (totalsv > 65535) {
return false;
}
@ -35,6 +37,28 @@ extension ValidateSMPL on DHTSchemaSMPL {
}
return true;
}
int subkeyCount() => members.fold(0, (acc, v) => acc + v.mCnt) + oCnt;
}
extension Validate on DHTSchema {
bool validate() {
if (this is DHTSchemaDFLT) {
return (this as DHTSchemaDFLT).validate();
} else if (this is DHTSchemaSMPL) {
return (this as DHTSchemaSMPL).validate();
}
throw TypeError();
}
int subkeyCount() {
if (this is DHTSchemaDFLT) {
return (this as DHTSchemaDFLT).subkeyCount();
} else if (this is DHTSchemaSMPL) {
return (this as DHTSchemaSMPL).subkeyCount();
}
throw TypeError();
}
}
//////////////////////////////////////
@ -115,6 +139,14 @@ class ValueSubkeyRange with _$ValueSubkeyRange {
_$ValueSubkeyRangeFromJson(json as Map<String, dynamic>);
}
extension ValueSubkeyRangeExt on ValueSubkeyRange {
bool contains(int v) => low <= v && v <= high;
}
extension ListValueSubkeyRangeExt on List<ValueSubkeyRange> {
bool containsSubkey(int v) => indexWhere((e) => e.contains(v)) != -1;
}
//////////////////////////////////////
/// ValueData
@ -256,7 +288,7 @@ abstract class VeilidRoutingContext {
Future<void> deleteDHTRecord(TypedKey key);
Future<ValueData?> getDHTValue(TypedKey key, int subkey, bool forceRefresh);
Future<ValueData?> setDHTValue(TypedKey key, int subkey, Uint8List data);
Future<Timestamp> watchDHTValues(TypedKey key, List<ValueSubkeyRange> subkeys,
Timestamp expiration, int count);
Future<bool> cancelDHTWatch(TypedKey key, List<ValueSubkeyRange> subkeys);
Future<Timestamp> watchDHTValues(TypedKey key,
{List<ValueSubkeyRange>? subkeys, Timestamp? expiration, int? count});
Future<bool> cancelDHTWatch(TypedKey key, {List<ValueSubkeyRange>? subkeys});
}

View File

@ -107,22 +107,22 @@ class _$DHTSchemaCopyWithImpl<$Res, $Val extends DHTSchema>
}
/// @nodoc
abstract class _$$DHTSchemaDFLTImplCopyWith<$Res>
abstract class _$$DHTSchemaDFLTCopyWith<$Res>
implements $DHTSchemaCopyWith<$Res> {
factory _$$DHTSchemaDFLTImplCopyWith(
_$DHTSchemaDFLTImpl value, $Res Function(_$DHTSchemaDFLTImpl) then) =
__$$DHTSchemaDFLTImplCopyWithImpl<$Res>;
factory _$$DHTSchemaDFLTCopyWith(
_$DHTSchemaDFLT value, $Res Function(_$DHTSchemaDFLT) then) =
__$$DHTSchemaDFLTCopyWithImpl<$Res>;
@override
@useResult
$Res call({int oCnt});
}
/// @nodoc
class __$$DHTSchemaDFLTImplCopyWithImpl<$Res>
extends _$DHTSchemaCopyWithImpl<$Res, _$DHTSchemaDFLTImpl>
implements _$$DHTSchemaDFLTImplCopyWith<$Res> {
__$$DHTSchemaDFLTImplCopyWithImpl(
_$DHTSchemaDFLTImpl _value, $Res Function(_$DHTSchemaDFLTImpl) _then)
class __$$DHTSchemaDFLTCopyWithImpl<$Res>
extends _$DHTSchemaCopyWithImpl<$Res, _$DHTSchemaDFLT>
implements _$$DHTSchemaDFLTCopyWith<$Res> {
__$$DHTSchemaDFLTCopyWithImpl(
_$DHTSchemaDFLT _value, $Res Function(_$DHTSchemaDFLT) _then)
: super(_value, _then);
@pragma('vm:prefer-inline')
@ -130,7 +130,7 @@ class __$$DHTSchemaDFLTImplCopyWithImpl<$Res>
$Res call({
Object? oCnt = null,
}) {
return _then(_$DHTSchemaDFLTImpl(
return _then(_$DHTSchemaDFLT(
oCnt: null == oCnt
? _value.oCnt
: oCnt // ignore: cast_nullable_to_non_nullable
@ -141,12 +141,12 @@ class __$$DHTSchemaDFLTImplCopyWithImpl<$Res>
/// @nodoc
@JsonSerializable()
class _$DHTSchemaDFLTImpl implements DHTSchemaDFLT {
const _$DHTSchemaDFLTImpl({required this.oCnt, final String? $type})
class _$DHTSchemaDFLT implements DHTSchemaDFLT {
const _$DHTSchemaDFLT({required this.oCnt, final String? $type})
: $type = $type ?? 'DFLT';
factory _$DHTSchemaDFLTImpl.fromJson(Map<String, dynamic> json) =>
_$$DHTSchemaDFLTImplFromJson(json);
factory _$DHTSchemaDFLT.fromJson(Map<String, dynamic> json) =>
_$$DHTSchemaDFLTFromJson(json);
@override
final int oCnt;
@ -163,7 +163,7 @@ class _$DHTSchemaDFLTImpl implements DHTSchemaDFLT {
bool operator ==(dynamic other) {
return identical(this, other) ||
(other.runtimeType == runtimeType &&
other is _$DHTSchemaDFLTImpl &&
other is _$DHTSchemaDFLT &&
(identical(other.oCnt, oCnt) || other.oCnt == oCnt));
}
@ -174,8 +174,8 @@ class _$DHTSchemaDFLTImpl implements DHTSchemaDFLT {
@JsonKey(ignore: true)
@override
@pragma('vm:prefer-inline')
_$$DHTSchemaDFLTImplCopyWith<_$DHTSchemaDFLTImpl> get copyWith =>
__$$DHTSchemaDFLTImplCopyWithImpl<_$DHTSchemaDFLTImpl>(this, _$identity);
_$$DHTSchemaDFLTCopyWith<_$DHTSchemaDFLT> get copyWith =>
__$$DHTSchemaDFLTCopyWithImpl<_$DHTSchemaDFLT>(this, _$identity);
@override
@optionalTypeArgs
@ -241,43 +241,43 @@ class _$DHTSchemaDFLTImpl implements DHTSchemaDFLT {
@override
Map<String, dynamic> toJson() {
return _$$DHTSchemaDFLTImplToJson(
return _$$DHTSchemaDFLTToJson(
this,
);
}
}
abstract class DHTSchemaDFLT implements DHTSchema {
const factory DHTSchemaDFLT({required final int oCnt}) = _$DHTSchemaDFLTImpl;
const factory DHTSchemaDFLT({required final int oCnt}) = _$DHTSchemaDFLT;
factory DHTSchemaDFLT.fromJson(Map<String, dynamic> json) =
_$DHTSchemaDFLTImpl.fromJson;
_$DHTSchemaDFLT.fromJson;
@override
int get oCnt;
@override
@JsonKey(ignore: true)
_$$DHTSchemaDFLTImplCopyWith<_$DHTSchemaDFLTImpl> get copyWith =>
_$$DHTSchemaDFLTCopyWith<_$DHTSchemaDFLT> get copyWith =>
throw _privateConstructorUsedError;
}
/// @nodoc
abstract class _$$DHTSchemaSMPLImplCopyWith<$Res>
abstract class _$$DHTSchemaSMPLCopyWith<$Res>
implements $DHTSchemaCopyWith<$Res> {
factory _$$DHTSchemaSMPLImplCopyWith(
_$DHTSchemaSMPLImpl value, $Res Function(_$DHTSchemaSMPLImpl) then) =
__$$DHTSchemaSMPLImplCopyWithImpl<$Res>;
factory _$$DHTSchemaSMPLCopyWith(
_$DHTSchemaSMPL value, $Res Function(_$DHTSchemaSMPL) then) =
__$$DHTSchemaSMPLCopyWithImpl<$Res>;
@override
@useResult
$Res call({int oCnt, List<DHTSchemaMember> members});
}
/// @nodoc
class __$$DHTSchemaSMPLImplCopyWithImpl<$Res>
extends _$DHTSchemaCopyWithImpl<$Res, _$DHTSchemaSMPLImpl>
implements _$$DHTSchemaSMPLImplCopyWith<$Res> {
__$$DHTSchemaSMPLImplCopyWithImpl(
_$DHTSchemaSMPLImpl _value, $Res Function(_$DHTSchemaSMPLImpl) _then)
class __$$DHTSchemaSMPLCopyWithImpl<$Res>
extends _$DHTSchemaCopyWithImpl<$Res, _$DHTSchemaSMPL>
implements _$$DHTSchemaSMPLCopyWith<$Res> {
__$$DHTSchemaSMPLCopyWithImpl(
_$DHTSchemaSMPL _value, $Res Function(_$DHTSchemaSMPL) _then)
: super(_value, _then);
@pragma('vm:prefer-inline')
@ -286,7 +286,7 @@ class __$$DHTSchemaSMPLImplCopyWithImpl<$Res>
Object? oCnt = null,
Object? members = null,
}) {
return _then(_$DHTSchemaSMPLImpl(
return _then(_$DHTSchemaSMPL(
oCnt: null == oCnt
? _value.oCnt
: oCnt // ignore: cast_nullable_to_non_nullable
@ -301,16 +301,16 @@ class __$$DHTSchemaSMPLImplCopyWithImpl<$Res>
/// @nodoc
@JsonSerializable()
class _$DHTSchemaSMPLImpl implements DHTSchemaSMPL {
const _$DHTSchemaSMPLImpl(
class _$DHTSchemaSMPL implements DHTSchemaSMPL {
const _$DHTSchemaSMPL(
{required this.oCnt,
required final List<DHTSchemaMember> members,
final String? $type})
: _members = members,
$type = $type ?? 'SMPL';
factory _$DHTSchemaSMPLImpl.fromJson(Map<String, dynamic> json) =>
_$$DHTSchemaSMPLImplFromJson(json);
factory _$DHTSchemaSMPL.fromJson(Map<String, dynamic> json) =>
_$$DHTSchemaSMPLFromJson(json);
@override
final int oCnt;
@ -334,7 +334,7 @@ class _$DHTSchemaSMPLImpl implements DHTSchemaSMPL {
bool operator ==(dynamic other) {
return identical(this, other) ||
(other.runtimeType == runtimeType &&
other is _$DHTSchemaSMPLImpl &&
other is _$DHTSchemaSMPL &&
(identical(other.oCnt, oCnt) || other.oCnt == oCnt) &&
const DeepCollectionEquality().equals(other._members, _members));
}
@ -347,8 +347,8 @@ class _$DHTSchemaSMPLImpl implements DHTSchemaSMPL {
@JsonKey(ignore: true)
@override
@pragma('vm:prefer-inline')
_$$DHTSchemaSMPLImplCopyWith<_$DHTSchemaSMPLImpl> get copyWith =>
__$$DHTSchemaSMPLImplCopyWithImpl<_$DHTSchemaSMPLImpl>(this, _$identity);
_$$DHTSchemaSMPLCopyWith<_$DHTSchemaSMPL> get copyWith =>
__$$DHTSchemaSMPLCopyWithImpl<_$DHTSchemaSMPL>(this, _$identity);
@override
@optionalTypeArgs
@ -414,7 +414,7 @@ class _$DHTSchemaSMPLImpl implements DHTSchemaSMPL {
@override
Map<String, dynamic> toJson() {
return _$$DHTSchemaSMPLImplToJson(
return _$$DHTSchemaSMPLToJson(
this,
);
}
@ -423,17 +423,17 @@ class _$DHTSchemaSMPLImpl implements DHTSchemaSMPL {
abstract class DHTSchemaSMPL implements DHTSchema {
const factory DHTSchemaSMPL(
{required final int oCnt,
required final List<DHTSchemaMember> members}) = _$DHTSchemaSMPLImpl;
required final List<DHTSchemaMember> members}) = _$DHTSchemaSMPL;
factory DHTSchemaSMPL.fromJson(Map<String, dynamic> json) =
_$DHTSchemaSMPLImpl.fromJson;
_$DHTSchemaSMPL.fromJson;
@override
int get oCnt;
List<DHTSchemaMember> get members;
@override
@JsonKey(ignore: true)
_$$DHTSchemaSMPLImplCopyWith<_$DHTSchemaSMPLImpl> get copyWith =>
_$$DHTSchemaSMPLCopyWith<_$DHTSchemaSMPL> get copyWith =>
throw _privateConstructorUsedError;
}
@ -491,22 +491,22 @@ class _$DHTSchemaMemberCopyWithImpl<$Res, $Val extends DHTSchemaMember>
}
/// @nodoc
abstract class _$$DHTSchemaMemberImplCopyWith<$Res>
abstract class _$$_DHTSchemaMemberCopyWith<$Res>
implements $DHTSchemaMemberCopyWith<$Res> {
factory _$$DHTSchemaMemberImplCopyWith(_$DHTSchemaMemberImpl value,
$Res Function(_$DHTSchemaMemberImpl) then) =
__$$DHTSchemaMemberImplCopyWithImpl<$Res>;
factory _$$_DHTSchemaMemberCopyWith(
_$_DHTSchemaMember value, $Res Function(_$_DHTSchemaMember) then) =
__$$_DHTSchemaMemberCopyWithImpl<$Res>;
@override
@useResult
$Res call({FixedEncodedString43 mKey, int mCnt});
}
/// @nodoc
class __$$DHTSchemaMemberImplCopyWithImpl<$Res>
extends _$DHTSchemaMemberCopyWithImpl<$Res, _$DHTSchemaMemberImpl>
implements _$$DHTSchemaMemberImplCopyWith<$Res> {
__$$DHTSchemaMemberImplCopyWithImpl(
_$DHTSchemaMemberImpl _value, $Res Function(_$DHTSchemaMemberImpl) _then)
class __$$_DHTSchemaMemberCopyWithImpl<$Res>
extends _$DHTSchemaMemberCopyWithImpl<$Res, _$_DHTSchemaMember>
implements _$$_DHTSchemaMemberCopyWith<$Res> {
__$$_DHTSchemaMemberCopyWithImpl(
_$_DHTSchemaMember _value, $Res Function(_$_DHTSchemaMember) _then)
: super(_value, _then);
@pragma('vm:prefer-inline')
@ -515,7 +515,7 @@ class __$$DHTSchemaMemberImplCopyWithImpl<$Res>
Object? mKey = null,
Object? mCnt = null,
}) {
return _then(_$DHTSchemaMemberImpl(
return _then(_$_DHTSchemaMember(
mKey: null == mKey
? _value.mKey
: mKey // ignore: cast_nullable_to_non_nullable
@ -530,12 +530,12 @@ class __$$DHTSchemaMemberImplCopyWithImpl<$Res>
/// @nodoc
@JsonSerializable()
class _$DHTSchemaMemberImpl implements _DHTSchemaMember {
const _$DHTSchemaMemberImpl({required this.mKey, required this.mCnt})
class _$_DHTSchemaMember implements _DHTSchemaMember {
const _$_DHTSchemaMember({required this.mKey, required this.mCnt})
: assert(mCnt > 0 && mCnt <= 65535, 'value out of range');
factory _$DHTSchemaMemberImpl.fromJson(Map<String, dynamic> json) =>
_$$DHTSchemaMemberImplFromJson(json);
factory _$_DHTSchemaMember.fromJson(Map<String, dynamic> json) =>
_$$_DHTSchemaMemberFromJson(json);
@override
final FixedEncodedString43 mKey;
@ -551,7 +551,7 @@ class _$DHTSchemaMemberImpl implements _DHTSchemaMember {
bool operator ==(dynamic other) {
return identical(this, other) ||
(other.runtimeType == runtimeType &&
other is _$DHTSchemaMemberImpl &&
other is _$_DHTSchemaMember &&
(identical(other.mKey, mKey) || other.mKey == mKey) &&
(identical(other.mCnt, mCnt) || other.mCnt == mCnt));
}
@ -563,13 +563,12 @@ class _$DHTSchemaMemberImpl implements _DHTSchemaMember {
@JsonKey(ignore: true)
@override
@pragma('vm:prefer-inline')
_$$DHTSchemaMemberImplCopyWith<_$DHTSchemaMemberImpl> get copyWith =>
__$$DHTSchemaMemberImplCopyWithImpl<_$DHTSchemaMemberImpl>(
this, _$identity);
_$$_DHTSchemaMemberCopyWith<_$_DHTSchemaMember> get copyWith =>
__$$_DHTSchemaMemberCopyWithImpl<_$_DHTSchemaMember>(this, _$identity);
@override
Map<String, dynamic> toJson() {
return _$$DHTSchemaMemberImplToJson(
return _$$_DHTSchemaMemberToJson(
this,
);
}
@ -578,10 +577,10 @@ class _$DHTSchemaMemberImpl implements _DHTSchemaMember {
abstract class _DHTSchemaMember implements DHTSchemaMember {
const factory _DHTSchemaMember(
{required final FixedEncodedString43 mKey,
required final int mCnt}) = _$DHTSchemaMemberImpl;
required final int mCnt}) = _$_DHTSchemaMember;
factory _DHTSchemaMember.fromJson(Map<String, dynamic> json) =
_$DHTSchemaMemberImpl.fromJson;
_$_DHTSchemaMember.fromJson;
@override
FixedEncodedString43 get mKey;
@ -589,7 +588,7 @@ abstract class _DHTSchemaMember implements DHTSchemaMember {
int get mCnt;
@override
@JsonKey(ignore: true)
_$$DHTSchemaMemberImplCopyWith<_$DHTSchemaMemberImpl> get copyWith =>
_$$_DHTSchemaMemberCopyWith<_$_DHTSchemaMember> get copyWith =>
throw _privateConstructorUsedError;
}
@ -673,11 +672,11 @@ class _$DHTRecordDescriptorCopyWithImpl<$Res, $Val extends DHTRecordDescriptor>
}
/// @nodoc
abstract class _$$DHTRecordDescriptorImplCopyWith<$Res>
abstract class _$$_DHTRecordDescriptorCopyWith<$Res>
implements $DHTRecordDescriptorCopyWith<$Res> {
factory _$$DHTRecordDescriptorImplCopyWith(_$DHTRecordDescriptorImpl value,
$Res Function(_$DHTRecordDescriptorImpl) then) =
__$$DHTRecordDescriptorImplCopyWithImpl<$Res>;
factory _$$_DHTRecordDescriptorCopyWith(_$_DHTRecordDescriptor value,
$Res Function(_$_DHTRecordDescriptor) then) =
__$$_DHTRecordDescriptorCopyWithImpl<$Res>;
@override
@useResult
$Res call(
@ -691,11 +690,11 @@ abstract class _$$DHTRecordDescriptorImplCopyWith<$Res>
}
/// @nodoc
class __$$DHTRecordDescriptorImplCopyWithImpl<$Res>
extends _$DHTRecordDescriptorCopyWithImpl<$Res, _$DHTRecordDescriptorImpl>
implements _$$DHTRecordDescriptorImplCopyWith<$Res> {
__$$DHTRecordDescriptorImplCopyWithImpl(_$DHTRecordDescriptorImpl _value,
$Res Function(_$DHTRecordDescriptorImpl) _then)
class __$$_DHTRecordDescriptorCopyWithImpl<$Res>
extends _$DHTRecordDescriptorCopyWithImpl<$Res, _$_DHTRecordDescriptor>
implements _$$_DHTRecordDescriptorCopyWith<$Res> {
__$$_DHTRecordDescriptorCopyWithImpl(_$_DHTRecordDescriptor _value,
$Res Function(_$_DHTRecordDescriptor) _then)
: super(_value, _then);
@pragma('vm:prefer-inline')
@ -706,7 +705,7 @@ class __$$DHTRecordDescriptorImplCopyWithImpl<$Res>
Object? schema = null,
Object? ownerSecret = freezed,
}) {
return _then(_$DHTRecordDescriptorImpl(
return _then(_$_DHTRecordDescriptor(
key: null == key
? _value.key
: key // ignore: cast_nullable_to_non_nullable
@ -729,15 +728,15 @@ class __$$DHTRecordDescriptorImplCopyWithImpl<$Res>
/// @nodoc
@JsonSerializable()
class _$DHTRecordDescriptorImpl implements _DHTRecordDescriptor {
const _$DHTRecordDescriptorImpl(
class _$_DHTRecordDescriptor implements _DHTRecordDescriptor {
const _$_DHTRecordDescriptor(
{required this.key,
required this.owner,
required this.schema,
this.ownerSecret});
factory _$DHTRecordDescriptorImpl.fromJson(Map<String, dynamic> json) =>
_$$DHTRecordDescriptorImplFromJson(json);
factory _$_DHTRecordDescriptor.fromJson(Map<String, dynamic> json) =>
_$$_DHTRecordDescriptorFromJson(json);
@override
final Typed<FixedEncodedString43> key;
@ -757,7 +756,7 @@ class _$DHTRecordDescriptorImpl implements _DHTRecordDescriptor {
bool operator ==(dynamic other) {
return identical(this, other) ||
(other.runtimeType == runtimeType &&
other is _$DHTRecordDescriptorImpl &&
other is _$_DHTRecordDescriptor &&
(identical(other.key, key) || other.key == key) &&
(identical(other.owner, owner) || other.owner == owner) &&
(identical(other.schema, schema) || other.schema == schema) &&
@ -772,13 +771,13 @@ class _$DHTRecordDescriptorImpl implements _DHTRecordDescriptor {
@JsonKey(ignore: true)
@override
@pragma('vm:prefer-inline')
_$$DHTRecordDescriptorImplCopyWith<_$DHTRecordDescriptorImpl> get copyWith =>
__$$DHTRecordDescriptorImplCopyWithImpl<_$DHTRecordDescriptorImpl>(
_$$_DHTRecordDescriptorCopyWith<_$_DHTRecordDescriptor> get copyWith =>
__$$_DHTRecordDescriptorCopyWithImpl<_$_DHTRecordDescriptor>(
this, _$identity);
@override
Map<String, dynamic> toJson() {
return _$$DHTRecordDescriptorImplToJson(
return _$$_DHTRecordDescriptorToJson(
this,
);
}
@ -789,10 +788,10 @@ abstract class _DHTRecordDescriptor implements DHTRecordDescriptor {
{required final Typed<FixedEncodedString43> key,
required final FixedEncodedString43 owner,
required final DHTSchema schema,
final FixedEncodedString43? ownerSecret}) = _$DHTRecordDescriptorImpl;
final FixedEncodedString43? ownerSecret}) = _$_DHTRecordDescriptor;
factory _DHTRecordDescriptor.fromJson(Map<String, dynamic> json) =
_$DHTRecordDescriptorImpl.fromJson;
_$_DHTRecordDescriptor.fromJson;
@override
Typed<FixedEncodedString43> get key;
@ -804,7 +803,7 @@ abstract class _DHTRecordDescriptor implements DHTRecordDescriptor {
FixedEncodedString43? get ownerSecret;
@override
@JsonKey(ignore: true)
_$$DHTRecordDescriptorImplCopyWith<_$DHTRecordDescriptorImpl> get copyWith =>
_$$_DHTRecordDescriptorCopyWith<_$_DHTRecordDescriptor> get copyWith =>
throw _privateConstructorUsedError;
}
@ -862,22 +861,22 @@ class _$ValueSubkeyRangeCopyWithImpl<$Res, $Val extends ValueSubkeyRange>
}
/// @nodoc
abstract class _$$ValueSubkeyRangeImplCopyWith<$Res>
abstract class _$$_ValueSubkeyRangeCopyWith<$Res>
implements $ValueSubkeyRangeCopyWith<$Res> {
factory _$$ValueSubkeyRangeImplCopyWith(_$ValueSubkeyRangeImpl value,
$Res Function(_$ValueSubkeyRangeImpl) then) =
__$$ValueSubkeyRangeImplCopyWithImpl<$Res>;
factory _$$_ValueSubkeyRangeCopyWith(
_$_ValueSubkeyRange value, $Res Function(_$_ValueSubkeyRange) then) =
__$$_ValueSubkeyRangeCopyWithImpl<$Res>;
@override
@useResult
$Res call({int low, int high});
}
/// @nodoc
class __$$ValueSubkeyRangeImplCopyWithImpl<$Res>
extends _$ValueSubkeyRangeCopyWithImpl<$Res, _$ValueSubkeyRangeImpl>
implements _$$ValueSubkeyRangeImplCopyWith<$Res> {
__$$ValueSubkeyRangeImplCopyWithImpl(_$ValueSubkeyRangeImpl _value,
$Res Function(_$ValueSubkeyRangeImpl) _then)
class __$$_ValueSubkeyRangeCopyWithImpl<$Res>
extends _$ValueSubkeyRangeCopyWithImpl<$Res, _$_ValueSubkeyRange>
implements _$$_ValueSubkeyRangeCopyWith<$Res> {
__$$_ValueSubkeyRangeCopyWithImpl(
_$_ValueSubkeyRange _value, $Res Function(_$_ValueSubkeyRange) _then)
: super(_value, _then);
@pragma('vm:prefer-inline')
@ -886,7 +885,7 @@ class __$$ValueSubkeyRangeImplCopyWithImpl<$Res>
Object? low = null,
Object? high = null,
}) {
return _then(_$ValueSubkeyRangeImpl(
return _then(_$_ValueSubkeyRange(
low: null == low
? _value.low
: low // ignore: cast_nullable_to_non_nullable
@ -901,13 +900,13 @@ class __$$ValueSubkeyRangeImplCopyWithImpl<$Res>
/// @nodoc
@JsonSerializable()
class _$ValueSubkeyRangeImpl implements _ValueSubkeyRange {
const _$ValueSubkeyRangeImpl({required this.low, required this.high})
class _$_ValueSubkeyRange implements _ValueSubkeyRange {
const _$_ValueSubkeyRange({required this.low, required this.high})
: assert(low < 0 || low > high, 'low out of range'),
assert(high < 0, 'high out of range');
factory _$ValueSubkeyRangeImpl.fromJson(Map<String, dynamic> json) =>
_$$ValueSubkeyRangeImplFromJson(json);
factory _$_ValueSubkeyRange.fromJson(Map<String, dynamic> json) =>
_$$_ValueSubkeyRangeFromJson(json);
@override
final int low;
@ -923,7 +922,7 @@ class _$ValueSubkeyRangeImpl implements _ValueSubkeyRange {
bool operator ==(dynamic other) {
return identical(this, other) ||
(other.runtimeType == runtimeType &&
other is _$ValueSubkeyRangeImpl &&
other is _$_ValueSubkeyRange &&
(identical(other.low, low) || other.low == low) &&
(identical(other.high, high) || other.high == high));
}
@ -935,13 +934,12 @@ class _$ValueSubkeyRangeImpl implements _ValueSubkeyRange {
@JsonKey(ignore: true)
@override
@pragma('vm:prefer-inline')
_$$ValueSubkeyRangeImplCopyWith<_$ValueSubkeyRangeImpl> get copyWith =>
__$$ValueSubkeyRangeImplCopyWithImpl<_$ValueSubkeyRangeImpl>(
this, _$identity);
_$$_ValueSubkeyRangeCopyWith<_$_ValueSubkeyRange> get copyWith =>
__$$_ValueSubkeyRangeCopyWithImpl<_$_ValueSubkeyRange>(this, _$identity);
@override
Map<String, dynamic> toJson() {
return _$$ValueSubkeyRangeImplToJson(
return _$$_ValueSubkeyRangeToJson(
this,
);
}
@ -949,11 +947,10 @@ class _$ValueSubkeyRangeImpl implements _ValueSubkeyRange {
abstract class _ValueSubkeyRange implements ValueSubkeyRange {
const factory _ValueSubkeyRange(
{required final int low,
required final int high}) = _$ValueSubkeyRangeImpl;
{required final int low, required final int high}) = _$_ValueSubkeyRange;
factory _ValueSubkeyRange.fromJson(Map<String, dynamic> json) =
_$ValueSubkeyRangeImpl.fromJson;
_$_ValueSubkeyRange.fromJson;
@override
int get low;
@ -961,7 +958,7 @@ abstract class _ValueSubkeyRange implements ValueSubkeyRange {
int get high;
@override
@JsonKey(ignore: true)
_$$ValueSubkeyRangeImplCopyWith<_$ValueSubkeyRangeImpl> get copyWith =>
_$$_ValueSubkeyRangeCopyWith<_$_ValueSubkeyRange> get copyWith =>
throw _privateConstructorUsedError;
}
@ -1028,11 +1025,10 @@ class _$ValueDataCopyWithImpl<$Res, $Val extends ValueData>
}
/// @nodoc
abstract class _$$ValueDataImplCopyWith<$Res>
implements $ValueDataCopyWith<$Res> {
factory _$$ValueDataImplCopyWith(
_$ValueDataImpl value, $Res Function(_$ValueDataImpl) then) =
__$$ValueDataImplCopyWithImpl<$Res>;
abstract class _$$_ValueDataCopyWith<$Res> implements $ValueDataCopyWith<$Res> {
factory _$$_ValueDataCopyWith(
_$_ValueData value, $Res Function(_$_ValueData) then) =
__$$_ValueDataCopyWithImpl<$Res>;
@override
@useResult
$Res call(
@ -1042,11 +1038,11 @@ abstract class _$$ValueDataImplCopyWith<$Res>
}
/// @nodoc
class __$$ValueDataImplCopyWithImpl<$Res>
extends _$ValueDataCopyWithImpl<$Res, _$ValueDataImpl>
implements _$$ValueDataImplCopyWith<$Res> {
__$$ValueDataImplCopyWithImpl(
_$ValueDataImpl _value, $Res Function(_$ValueDataImpl) _then)
class __$$_ValueDataCopyWithImpl<$Res>
extends _$ValueDataCopyWithImpl<$Res, _$_ValueData>
implements _$$_ValueDataCopyWith<$Res> {
__$$_ValueDataCopyWithImpl(
_$_ValueData _value, $Res Function(_$_ValueData) _then)
: super(_value, _then);
@pragma('vm:prefer-inline')
@ -1056,7 +1052,7 @@ class __$$ValueDataImplCopyWithImpl<$Res>
Object? data = null,
Object? writer = null,
}) {
return _then(_$ValueDataImpl(
return _then(_$_ValueData(
seq: null == seq
? _value.seq
: seq // ignore: cast_nullable_to_non_nullable
@ -1075,15 +1071,15 @@ class __$$ValueDataImplCopyWithImpl<$Res>
/// @nodoc
@JsonSerializable()
class _$ValueDataImpl implements _ValueData {
const _$ValueDataImpl(
class _$_ValueData implements _ValueData {
const _$_ValueData(
{required this.seq,
@Uint8ListJsonConverter.jsIsArray() required this.data,
required this.writer})
: assert(seq >= 0, 'seq out of range');
factory _$ValueDataImpl.fromJson(Map<String, dynamic> json) =>
_$$ValueDataImplFromJson(json);
factory _$_ValueData.fromJson(Map<String, dynamic> json) =>
_$$_ValueDataFromJson(json);
@override
final int seq;
@ -1102,7 +1098,7 @@ class _$ValueDataImpl implements _ValueData {
bool operator ==(dynamic other) {
return identical(this, other) ||
(other.runtimeType == runtimeType &&
other is _$ValueDataImpl &&
other is _$_ValueData &&
(identical(other.seq, seq) || other.seq == seq) &&
const DeepCollectionEquality().equals(other.data, data) &&
(identical(other.writer, writer) || other.writer == writer));
@ -1116,12 +1112,12 @@ class _$ValueDataImpl implements _ValueData {
@JsonKey(ignore: true)
@override
@pragma('vm:prefer-inline')
_$$ValueDataImplCopyWith<_$ValueDataImpl> get copyWith =>
__$$ValueDataImplCopyWithImpl<_$ValueDataImpl>(this, _$identity);
_$$_ValueDataCopyWith<_$_ValueData> get copyWith =>
__$$_ValueDataCopyWithImpl<_$_ValueData>(this, _$identity);
@override
Map<String, dynamic> toJson() {
return _$$ValueDataImplToJson(
return _$$_ValueDataToJson(
this,
);
}
@ -1131,10 +1127,10 @@ abstract class _ValueData implements ValueData {
const factory _ValueData(
{required final int seq,
@Uint8ListJsonConverter.jsIsArray() required final Uint8List data,
required final FixedEncodedString43 writer}) = _$ValueDataImpl;
required final FixedEncodedString43 writer}) = _$_ValueData;
factory _ValueData.fromJson(Map<String, dynamic> json) =
_$ValueDataImpl.fromJson;
_$_ValueData.fromJson;
@override
int get seq;
@ -1145,7 +1141,7 @@ abstract class _ValueData implements ValueData {
FixedEncodedString43 get writer;
@override
@JsonKey(ignore: true)
_$$ValueDataImplCopyWith<_$ValueDataImpl> get copyWith =>
_$$_ValueDataCopyWith<_$_ValueData> get copyWith =>
throw _privateConstructorUsedError;
}
@ -1219,11 +1215,11 @@ class _$SafetySpecCopyWithImpl<$Res, $Val extends SafetySpec>
}
/// @nodoc
abstract class _$$SafetySpecImplCopyWith<$Res>
abstract class _$$_SafetySpecCopyWith<$Res>
implements $SafetySpecCopyWith<$Res> {
factory _$$SafetySpecImplCopyWith(
_$SafetySpecImpl value, $Res Function(_$SafetySpecImpl) then) =
__$$SafetySpecImplCopyWithImpl<$Res>;
factory _$$_SafetySpecCopyWith(
_$_SafetySpec value, $Res Function(_$_SafetySpec) then) =
__$$_SafetySpecCopyWithImpl<$Res>;
@override
@useResult
$Res call(
@ -1234,11 +1230,11 @@ abstract class _$$SafetySpecImplCopyWith<$Res>
}
/// @nodoc
class __$$SafetySpecImplCopyWithImpl<$Res>
extends _$SafetySpecCopyWithImpl<$Res, _$SafetySpecImpl>
implements _$$SafetySpecImplCopyWith<$Res> {
__$$SafetySpecImplCopyWithImpl(
_$SafetySpecImpl _value, $Res Function(_$SafetySpecImpl) _then)
class __$$_SafetySpecCopyWithImpl<$Res>
extends _$SafetySpecCopyWithImpl<$Res, _$_SafetySpec>
implements _$$_SafetySpecCopyWith<$Res> {
__$$_SafetySpecCopyWithImpl(
_$_SafetySpec _value, $Res Function(_$_SafetySpec) _then)
: super(_value, _then);
@pragma('vm:prefer-inline')
@ -1249,7 +1245,7 @@ class __$$SafetySpecImplCopyWithImpl<$Res>
Object? sequencing = null,
Object? preferredRoute = freezed,
}) {
return _then(_$SafetySpecImpl(
return _then(_$_SafetySpec(
hopCount: null == hopCount
? _value.hopCount
: hopCount // ignore: cast_nullable_to_non_nullable
@ -1272,15 +1268,15 @@ class __$$SafetySpecImplCopyWithImpl<$Res>
/// @nodoc
@JsonSerializable()
class _$SafetySpecImpl implements _SafetySpec {
const _$SafetySpecImpl(
class _$_SafetySpec implements _SafetySpec {
const _$_SafetySpec(
{required this.hopCount,
required this.stability,
required this.sequencing,
this.preferredRoute});
factory _$SafetySpecImpl.fromJson(Map<String, dynamic> json) =>
_$$SafetySpecImplFromJson(json);
factory _$_SafetySpec.fromJson(Map<String, dynamic> json) =>
_$$_SafetySpecFromJson(json);
@override
final int hopCount;
@ -1300,7 +1296,7 @@ class _$SafetySpecImpl implements _SafetySpec {
bool operator ==(dynamic other) {
return identical(this, other) ||
(other.runtimeType == runtimeType &&
other is _$SafetySpecImpl &&
other is _$_SafetySpec &&
(identical(other.hopCount, hopCount) ||
other.hopCount == hopCount) &&
(identical(other.stability, stability) ||
@ -1319,12 +1315,12 @@ class _$SafetySpecImpl implements _SafetySpec {
@JsonKey(ignore: true)
@override
@pragma('vm:prefer-inline')
_$$SafetySpecImplCopyWith<_$SafetySpecImpl> get copyWith =>
__$$SafetySpecImplCopyWithImpl<_$SafetySpecImpl>(this, _$identity);
_$$_SafetySpecCopyWith<_$_SafetySpec> get copyWith =>
__$$_SafetySpecCopyWithImpl<_$_SafetySpec>(this, _$identity);
@override
Map<String, dynamic> toJson() {
return _$$SafetySpecImplToJson(
return _$$_SafetySpecToJson(
this,
);
}
@ -1335,10 +1331,10 @@ abstract class _SafetySpec implements SafetySpec {
{required final int hopCount,
required final Stability stability,
required final Sequencing sequencing,
final String? preferredRoute}) = _$SafetySpecImpl;
final String? preferredRoute}) = _$_SafetySpec;
factory _SafetySpec.fromJson(Map<String, dynamic> json) =
_$SafetySpecImpl.fromJson;
_$_SafetySpec.fromJson;
@override
int get hopCount;
@ -1350,7 +1346,7 @@ abstract class _SafetySpec implements SafetySpec {
String? get preferredRoute;
@override
@JsonKey(ignore: true)
_$$SafetySpecImplCopyWith<_$SafetySpecImpl> get copyWith =>
_$$_SafetySpecCopyWith<_$_SafetySpec> get copyWith =>
throw _privateConstructorUsedError;
}
@ -1408,22 +1404,21 @@ class _$RouteBlobCopyWithImpl<$Res, $Val extends RouteBlob>
}
/// @nodoc
abstract class _$$RouteBlobImplCopyWith<$Res>
implements $RouteBlobCopyWith<$Res> {
factory _$$RouteBlobImplCopyWith(
_$RouteBlobImpl value, $Res Function(_$RouteBlobImpl) then) =
__$$RouteBlobImplCopyWithImpl<$Res>;
abstract class _$$_RouteBlobCopyWith<$Res> implements $RouteBlobCopyWith<$Res> {
factory _$$_RouteBlobCopyWith(
_$_RouteBlob value, $Res Function(_$_RouteBlob) then) =
__$$_RouteBlobCopyWithImpl<$Res>;
@override
@useResult
$Res call({String routeId, @Uint8ListJsonConverter() Uint8List blob});
}
/// @nodoc
class __$$RouteBlobImplCopyWithImpl<$Res>
extends _$RouteBlobCopyWithImpl<$Res, _$RouteBlobImpl>
implements _$$RouteBlobImplCopyWith<$Res> {
__$$RouteBlobImplCopyWithImpl(
_$RouteBlobImpl _value, $Res Function(_$RouteBlobImpl) _then)
class __$$_RouteBlobCopyWithImpl<$Res>
extends _$RouteBlobCopyWithImpl<$Res, _$_RouteBlob>
implements _$$_RouteBlobCopyWith<$Res> {
__$$_RouteBlobCopyWithImpl(
_$_RouteBlob _value, $Res Function(_$_RouteBlob) _then)
: super(_value, _then);
@pragma('vm:prefer-inline')
@ -1432,7 +1427,7 @@ class __$$RouteBlobImplCopyWithImpl<$Res>
Object? routeId = null,
Object? blob = null,
}) {
return _then(_$RouteBlobImpl(
return _then(_$_RouteBlob(
routeId: null == routeId
? _value.routeId
: routeId // ignore: cast_nullable_to_non_nullable
@ -1447,12 +1442,12 @@ class __$$RouteBlobImplCopyWithImpl<$Res>
/// @nodoc
@JsonSerializable()
class _$RouteBlobImpl implements _RouteBlob {
const _$RouteBlobImpl(
class _$_RouteBlob implements _RouteBlob {
const _$_RouteBlob(
{required this.routeId, @Uint8ListJsonConverter() required this.blob});
factory _$RouteBlobImpl.fromJson(Map<String, dynamic> json) =>
_$$RouteBlobImplFromJson(json);
factory _$_RouteBlob.fromJson(Map<String, dynamic> json) =>
_$$_RouteBlobFromJson(json);
@override
final String routeId;
@ -1469,7 +1464,7 @@ class _$RouteBlobImpl implements _RouteBlob {
bool operator ==(dynamic other) {
return identical(this, other) ||
(other.runtimeType == runtimeType &&
other is _$RouteBlobImpl &&
other is _$_RouteBlob &&
(identical(other.routeId, routeId) || other.routeId == routeId) &&
const DeepCollectionEquality().equals(other.blob, blob));
}
@ -1482,12 +1477,12 @@ class _$RouteBlobImpl implements _RouteBlob {
@JsonKey(ignore: true)
@override
@pragma('vm:prefer-inline')
_$$RouteBlobImplCopyWith<_$RouteBlobImpl> get copyWith =>
__$$RouteBlobImplCopyWithImpl<_$RouteBlobImpl>(this, _$identity);
_$$_RouteBlobCopyWith<_$_RouteBlob> get copyWith =>
__$$_RouteBlobCopyWithImpl<_$_RouteBlob>(this, _$identity);
@override
Map<String, dynamic> toJson() {
return _$$RouteBlobImplToJson(
return _$$_RouteBlobToJson(
this,
);
}
@ -1495,12 +1490,11 @@ class _$RouteBlobImpl implements _RouteBlob {
abstract class _RouteBlob implements RouteBlob {
const factory _RouteBlob(
{required final String routeId,
@Uint8ListJsonConverter() required final Uint8List blob}) =
_$RouteBlobImpl;
{required final String routeId,
@Uint8ListJsonConverter() required final Uint8List blob}) = _$_RouteBlob;
factory _RouteBlob.fromJson(Map<String, dynamic> json) =
_$RouteBlobImpl.fromJson;
_$_RouteBlob.fromJson;
@override
String get routeId;
@ -1509,6 +1503,6 @@ abstract class _RouteBlob implements RouteBlob {
Uint8List get blob;
@override
@JsonKey(ignore: true)
_$$RouteBlobImplCopyWith<_$RouteBlobImpl> get copyWith =>
_$$_RouteBlobCopyWith<_$_RouteBlob> get copyWith =>
throw _privateConstructorUsedError;
}

View File

@ -6,20 +6,20 @@ part of 'routing_context.dart';
// JsonSerializableGenerator
// **************************************************************************
_$DHTSchemaDFLTImpl _$$DHTSchemaDFLTImplFromJson(Map<String, dynamic> json) =>
_$DHTSchemaDFLTImpl(
_$DHTSchemaDFLT _$$DHTSchemaDFLTFromJson(Map<String, dynamic> json) =>
_$DHTSchemaDFLT(
oCnt: json['o_cnt'] as int,
$type: json['kind'] as String?,
);
Map<String, dynamic> _$$DHTSchemaDFLTImplToJson(_$DHTSchemaDFLTImpl instance) =>
Map<String, dynamic> _$$DHTSchemaDFLTToJson(_$DHTSchemaDFLT instance) =>
<String, dynamic>{
'o_cnt': instance.oCnt,
'kind': instance.$type,
};
_$DHTSchemaSMPLImpl _$$DHTSchemaSMPLImplFromJson(Map<String, dynamic> json) =>
_$DHTSchemaSMPLImpl(
_$DHTSchemaSMPL _$$DHTSchemaSMPLFromJson(Map<String, dynamic> json) =>
_$DHTSchemaSMPL(
oCnt: json['o_cnt'] as int,
members: (json['members'] as List<dynamic>)
.map(DHTSchemaMember.fromJson)
@ -27,30 +27,28 @@ _$DHTSchemaSMPLImpl _$$DHTSchemaSMPLImplFromJson(Map<String, dynamic> json) =>
$type: json['kind'] as String?,
);
Map<String, dynamic> _$$DHTSchemaSMPLImplToJson(_$DHTSchemaSMPLImpl instance) =>
Map<String, dynamic> _$$DHTSchemaSMPLToJson(_$DHTSchemaSMPL instance) =>
<String, dynamic>{
'o_cnt': instance.oCnt,
'members': instance.members.map((e) => e.toJson()).toList(),
'kind': instance.$type,
};
_$DHTSchemaMemberImpl _$$DHTSchemaMemberImplFromJson(
Map<String, dynamic> json) =>
_$DHTSchemaMemberImpl(
_$_DHTSchemaMember _$$_DHTSchemaMemberFromJson(Map<String, dynamic> json) =>
_$_DHTSchemaMember(
mKey: FixedEncodedString43.fromJson(json['m_key']),
mCnt: json['m_cnt'] as int,
);
Map<String, dynamic> _$$DHTSchemaMemberImplToJson(
_$DHTSchemaMemberImpl instance) =>
Map<String, dynamic> _$$_DHTSchemaMemberToJson(_$_DHTSchemaMember instance) =>
<String, dynamic>{
'm_key': instance.mKey.toJson(),
'm_cnt': instance.mCnt,
};
_$DHTRecordDescriptorImpl _$$DHTRecordDescriptorImplFromJson(
_$_DHTRecordDescriptor _$$_DHTRecordDescriptorFromJson(
Map<String, dynamic> json) =>
_$DHTRecordDescriptorImpl(
_$_DHTRecordDescriptor(
key: Typed<FixedEncodedString43>.fromJson(json['key']),
owner: FixedEncodedString43.fromJson(json['owner']),
schema: DHTSchema.fromJson(json['schema']),
@ -59,8 +57,8 @@ _$DHTRecordDescriptorImpl _$$DHTRecordDescriptorImplFromJson(
: FixedEncodedString43.fromJson(json['owner_secret']),
);
Map<String, dynamic> _$$DHTRecordDescriptorImplToJson(
_$DHTRecordDescriptorImpl instance) =>
Map<String, dynamic> _$$_DHTRecordDescriptorToJson(
_$_DHTRecordDescriptor instance) =>
<String, dynamic>{
'key': instance.key.toJson(),
'owner': instance.owner.toJson(),
@ -68,43 +66,40 @@ Map<String, dynamic> _$$DHTRecordDescriptorImplToJson(
'owner_secret': instance.ownerSecret?.toJson(),
};
_$ValueSubkeyRangeImpl _$$ValueSubkeyRangeImplFromJson(
Map<String, dynamic> json) =>
_$ValueSubkeyRangeImpl(
_$_ValueSubkeyRange _$$_ValueSubkeyRangeFromJson(Map<String, dynamic> json) =>
_$_ValueSubkeyRange(
low: json['low'] as int,
high: json['high'] as int,
);
Map<String, dynamic> _$$ValueSubkeyRangeImplToJson(
_$ValueSubkeyRangeImpl instance) =>
Map<String, dynamic> _$$_ValueSubkeyRangeToJson(_$_ValueSubkeyRange instance) =>
<String, dynamic>{
'low': instance.low,
'high': instance.high,
};
_$ValueDataImpl _$$ValueDataImplFromJson(Map<String, dynamic> json) =>
_$ValueDataImpl(
_$_ValueData _$$_ValueDataFromJson(Map<String, dynamic> json) => _$_ValueData(
seq: json['seq'] as int,
data: const Uint8ListJsonConverter.jsIsArray().fromJson(json['data']),
writer: FixedEncodedString43.fromJson(json['writer']),
);
Map<String, dynamic> _$$ValueDataImplToJson(_$ValueDataImpl instance) =>
Map<String, dynamic> _$$_ValueDataToJson(_$_ValueData instance) =>
<String, dynamic>{
'seq': instance.seq,
'data': const Uint8ListJsonConverter.jsIsArray().toJson(instance.data),
'writer': instance.writer.toJson(),
};
_$SafetySpecImpl _$$SafetySpecImplFromJson(Map<String, dynamic> json) =>
_$SafetySpecImpl(
_$_SafetySpec _$$_SafetySpecFromJson(Map<String, dynamic> json) =>
_$_SafetySpec(
hopCount: json['hop_count'] as int,
stability: Stability.fromJson(json['stability']),
sequencing: Sequencing.fromJson(json['sequencing']),
preferredRoute: json['preferred_route'] as String?,
);
Map<String, dynamic> _$$SafetySpecImplToJson(_$SafetySpecImpl instance) =>
Map<String, dynamic> _$$_SafetySpecToJson(_$_SafetySpec instance) =>
<String, dynamic>{
'hop_count': instance.hopCount,
'stability': instance.stability.toJson(),
@ -112,13 +107,12 @@ Map<String, dynamic> _$$SafetySpecImplToJson(_$SafetySpecImpl instance) =>
'preferred_route': instance.preferredRoute,
};
_$RouteBlobImpl _$$RouteBlobImplFromJson(Map<String, dynamic> json) =>
_$RouteBlobImpl(
_$_RouteBlob _$$_RouteBlobFromJson(Map<String, dynamic> json) => _$_RouteBlob(
routeId: json['route_id'] as String,
blob: const Uint8ListJsonConverter().fromJson(json['blob']),
);
Map<String, dynamic> _$$RouteBlobImplToJson(_$RouteBlobImpl instance) =>
Map<String, dynamic> _$$_RouteBlobToJson(_$_RouteBlob instance) =>
<String, dynamic>{
'route_id': instance.routeId,
'blob': const Uint8ListJsonConverter().toJson(instance.blob),

View File

@ -263,26 +263,30 @@ class VeilidConfigTLS with _$VeilidConfigTLS {
////////////
@freezed
class VeilidConfigDHT with _$VeilidConfigDHT {
const factory VeilidConfigDHT(
{required int resolveNodeTimeoutMs,
required int resolveNodeCount,
required int resolveNodeFanout,
required int maxFindNodeCount,
required int getValueTimeoutMs,
required int getValueCount,
required int getValueFanout,
required int setValueTimeoutMs,
required int setValueCount,
required int setValueFanout,
required int minPeerCount,
required int minPeerRefreshTimeMs,
required int validateDialInfoReceiptTimeMs,
required int localSubkeyCacheSize,
required int localMaxSubkeyCacheMemoryMb,
required int remoteSubkeyCacheSize,
required int remoteMaxRecords,
required int remoteMaxSubkeyCacheMemoryMb,
required int remoteMaxStorageSpaceMb}) = _VeilidConfigDHT;
const factory VeilidConfigDHT({
required int resolveNodeTimeoutMs,
required int resolveNodeCount,
required int resolveNodeFanout,
required int maxFindNodeCount,
required int getValueTimeoutMs,
required int getValueCount,
required int getValueFanout,
required int setValueTimeoutMs,
required int setValueCount,
required int setValueFanout,
required int minPeerCount,
required int minPeerRefreshTimeMs,
required int validateDialInfoReceiptTimeMs,
required int localSubkeyCacheSize,
required int localMaxSubkeyCacheMemoryMb,
required int remoteSubkeyCacheSize,
required int remoteMaxRecords,
required int remoteMaxSubkeyCacheMemoryMb,
required int remoteMaxStorageSpaceMb,
required int publicWatchLimit,
required int memberWatchLimit,
required int maxWatchExpirationMs,
}) = _VeilidConfigDHT;
factory VeilidConfigDHT.fromJson(dynamic json) =>
_$VeilidConfigDHTFromJson(json as Map<String, dynamic>);

File diff suppressed because it is too large Load Diff

View File

@ -6,31 +6,31 @@ part of 'veilid_config.dart';
// JsonSerializableGenerator
// **************************************************************************
_$VeilidFFIConfigLoggingTerminalImpl
_$$VeilidFFIConfigLoggingTerminalImplFromJson(Map<String, dynamic> json) =>
_$VeilidFFIConfigLoggingTerminalImpl(
enabled: json['enabled'] as bool,
level: VeilidConfigLogLevel.fromJson(json['level']),
);
_$_VeilidFFIConfigLoggingTerminal _$$_VeilidFFIConfigLoggingTerminalFromJson(
Map<String, dynamic> json) =>
_$_VeilidFFIConfigLoggingTerminal(
enabled: json['enabled'] as bool,
level: VeilidConfigLogLevel.fromJson(json['level']),
);
Map<String, dynamic> _$$VeilidFFIConfigLoggingTerminalImplToJson(
_$VeilidFFIConfigLoggingTerminalImpl instance) =>
Map<String, dynamic> _$$_VeilidFFIConfigLoggingTerminalToJson(
_$_VeilidFFIConfigLoggingTerminal instance) =>
<String, dynamic>{
'enabled': instance.enabled,
'level': instance.level.toJson(),
};
_$VeilidFFIConfigLoggingOtlpImpl _$$VeilidFFIConfigLoggingOtlpImplFromJson(
_$_VeilidFFIConfigLoggingOtlp _$$_VeilidFFIConfigLoggingOtlpFromJson(
Map<String, dynamic> json) =>
_$VeilidFFIConfigLoggingOtlpImpl(
_$_VeilidFFIConfigLoggingOtlp(
enabled: json['enabled'] as bool,
level: VeilidConfigLogLevel.fromJson(json['level']),
grpcEndpoint: json['grpc_endpoint'] as String,
serviceName: json['service_name'] as String,
);
Map<String, dynamic> _$$VeilidFFIConfigLoggingOtlpImplToJson(
_$VeilidFFIConfigLoggingOtlpImpl instance) =>
Map<String, dynamic> _$$_VeilidFFIConfigLoggingOtlpToJson(
_$_VeilidFFIConfigLoggingOtlp instance) =>
<String, dynamic>{
'enabled': instance.enabled,
'level': instance.level.toJson(),
@ -38,60 +38,57 @@ Map<String, dynamic> _$$VeilidFFIConfigLoggingOtlpImplToJson(
'service_name': instance.serviceName,
};
_$VeilidFFIConfigLoggingApiImpl _$$VeilidFFIConfigLoggingApiImplFromJson(
_$_VeilidFFIConfigLoggingApi _$$_VeilidFFIConfigLoggingApiFromJson(
Map<String, dynamic> json) =>
_$VeilidFFIConfigLoggingApiImpl(
_$_VeilidFFIConfigLoggingApi(
enabled: json['enabled'] as bool,
level: VeilidConfigLogLevel.fromJson(json['level']),
);
Map<String, dynamic> _$$VeilidFFIConfigLoggingApiImplToJson(
_$VeilidFFIConfigLoggingApiImpl instance) =>
Map<String, dynamic> _$$_VeilidFFIConfigLoggingApiToJson(
_$_VeilidFFIConfigLoggingApi instance) =>
<String, dynamic>{
'enabled': instance.enabled,
'level': instance.level.toJson(),
};
_$VeilidFFIConfigLoggingImpl _$$VeilidFFIConfigLoggingImplFromJson(
_$_VeilidFFIConfigLogging _$$_VeilidFFIConfigLoggingFromJson(
Map<String, dynamic> json) =>
_$VeilidFFIConfigLoggingImpl(
_$_VeilidFFIConfigLogging(
terminal: VeilidFFIConfigLoggingTerminal.fromJson(json['terminal']),
otlp: VeilidFFIConfigLoggingOtlp.fromJson(json['otlp']),
api: VeilidFFIConfigLoggingApi.fromJson(json['api']),
);
Map<String, dynamic> _$$VeilidFFIConfigLoggingImplToJson(
_$VeilidFFIConfigLoggingImpl instance) =>
Map<String, dynamic> _$$_VeilidFFIConfigLoggingToJson(
_$_VeilidFFIConfigLogging instance) =>
<String, dynamic>{
'terminal': instance.terminal.toJson(),
'otlp': instance.otlp.toJson(),
'api': instance.api.toJson(),
};
_$VeilidFFIConfigImpl _$$VeilidFFIConfigImplFromJson(
Map<String, dynamic> json) =>
_$VeilidFFIConfigImpl(
_$_VeilidFFIConfig _$$_VeilidFFIConfigFromJson(Map<String, dynamic> json) =>
_$_VeilidFFIConfig(
logging: VeilidFFIConfigLogging.fromJson(json['logging']),
);
Map<String, dynamic> _$$VeilidFFIConfigImplToJson(
_$VeilidFFIConfigImpl instance) =>
Map<String, dynamic> _$$_VeilidFFIConfigToJson(_$_VeilidFFIConfig instance) =>
<String, dynamic>{
'logging': instance.logging.toJson(),
};
_$VeilidWASMConfigLoggingPerformanceImpl
_$$VeilidWASMConfigLoggingPerformanceImplFromJson(
Map<String, dynamic> json) =>
_$VeilidWASMConfigLoggingPerformanceImpl(
_$_VeilidWASMConfigLoggingPerformance
_$$_VeilidWASMConfigLoggingPerformanceFromJson(Map<String, dynamic> json) =>
_$_VeilidWASMConfigLoggingPerformance(
enabled: json['enabled'] as bool,
level: VeilidConfigLogLevel.fromJson(json['level']),
logsInTimings: json['logs_in_timings'] as bool,
logsInConsole: json['logs_in_console'] as bool,
);
Map<String, dynamic> _$$VeilidWASMConfigLoggingPerformanceImplToJson(
_$VeilidWASMConfigLoggingPerformanceImpl instance) =>
Map<String, dynamic> _$$_VeilidWASMConfigLoggingPerformanceToJson(
_$_VeilidWASMConfigLoggingPerformance instance) =>
<String, dynamic>{
'enabled': instance.enabled,
'level': instance.level.toJson(),
@ -99,58 +96,55 @@ Map<String, dynamic> _$$VeilidWASMConfigLoggingPerformanceImplToJson(
'logs_in_console': instance.logsInConsole,
};
_$VeilidWASMConfigLoggingApiImpl _$$VeilidWASMConfigLoggingApiImplFromJson(
_$_VeilidWASMConfigLoggingApi _$$_VeilidWASMConfigLoggingApiFromJson(
Map<String, dynamic> json) =>
_$VeilidWASMConfigLoggingApiImpl(
_$_VeilidWASMConfigLoggingApi(
enabled: json['enabled'] as bool,
level: VeilidConfigLogLevel.fromJson(json['level']),
);
Map<String, dynamic> _$$VeilidWASMConfigLoggingApiImplToJson(
_$VeilidWASMConfigLoggingApiImpl instance) =>
Map<String, dynamic> _$$_VeilidWASMConfigLoggingApiToJson(
_$_VeilidWASMConfigLoggingApi instance) =>
<String, dynamic>{
'enabled': instance.enabled,
'level': instance.level.toJson(),
};
_$VeilidWASMConfigLoggingImpl _$$VeilidWASMConfigLoggingImplFromJson(
_$_VeilidWASMConfigLogging _$$_VeilidWASMConfigLoggingFromJson(
Map<String, dynamic> json) =>
_$VeilidWASMConfigLoggingImpl(
_$_VeilidWASMConfigLogging(
performance:
VeilidWASMConfigLoggingPerformance.fromJson(json['performance']),
api: VeilidWASMConfigLoggingApi.fromJson(json['api']),
);
Map<String, dynamic> _$$VeilidWASMConfigLoggingImplToJson(
_$VeilidWASMConfigLoggingImpl instance) =>
Map<String, dynamic> _$$_VeilidWASMConfigLoggingToJson(
_$_VeilidWASMConfigLogging instance) =>
<String, dynamic>{
'performance': instance.performance.toJson(),
'api': instance.api.toJson(),
};
_$VeilidWASMConfigImpl _$$VeilidWASMConfigImplFromJson(
Map<String, dynamic> json) =>
_$VeilidWASMConfigImpl(
_$_VeilidWASMConfig _$$_VeilidWASMConfigFromJson(Map<String, dynamic> json) =>
_$_VeilidWASMConfig(
logging: VeilidWASMConfigLogging.fromJson(json['logging']),
);
Map<String, dynamic> _$$VeilidWASMConfigImplToJson(
_$VeilidWASMConfigImpl instance) =>
Map<String, dynamic> _$$_VeilidWASMConfigToJson(_$_VeilidWASMConfig instance) =>
<String, dynamic>{
'logging': instance.logging.toJson(),
};
_$VeilidConfigHTTPSImpl _$$VeilidConfigHTTPSImplFromJson(
Map<String, dynamic> json) =>
_$VeilidConfigHTTPSImpl(
_$_VeilidConfigHTTPS _$$_VeilidConfigHTTPSFromJson(Map<String, dynamic> json) =>
_$_VeilidConfigHTTPS(
enabled: json['enabled'] as bool,
listenAddress: json['listen_address'] as String,
path: json['path'] as String,
url: json['url'] as String?,
);
Map<String, dynamic> _$$VeilidConfigHTTPSImplToJson(
_$VeilidConfigHTTPSImpl instance) =>
Map<String, dynamic> _$$_VeilidConfigHTTPSToJson(
_$_VeilidConfigHTTPS instance) =>
<String, dynamic>{
'enabled': instance.enabled,
'listen_address': instance.listenAddress,
@ -158,17 +152,15 @@ Map<String, dynamic> _$$VeilidConfigHTTPSImplToJson(
'url': instance.url,
};
_$VeilidConfigHTTPImpl _$$VeilidConfigHTTPImplFromJson(
Map<String, dynamic> json) =>
_$VeilidConfigHTTPImpl(
_$_VeilidConfigHTTP _$$_VeilidConfigHTTPFromJson(Map<String, dynamic> json) =>
_$_VeilidConfigHTTP(
enabled: json['enabled'] as bool,
listenAddress: json['listen_address'] as String,
path: json['path'] as String,
url: json['url'] as String?,
);
Map<String, dynamic> _$$VeilidConfigHTTPImplToJson(
_$VeilidConfigHTTPImpl instance) =>
Map<String, dynamic> _$$_VeilidConfigHTTPToJson(_$_VeilidConfigHTTP instance) =>
<String, dynamic>{
'enabled': instance.enabled,
'listen_address': instance.listenAddress,
@ -176,31 +168,29 @@ Map<String, dynamic> _$$VeilidConfigHTTPImplToJson(
'url': instance.url,
};
_$VeilidConfigApplicationImpl _$$VeilidConfigApplicationImplFromJson(
_$_VeilidConfigApplication _$$_VeilidConfigApplicationFromJson(
Map<String, dynamic> json) =>
_$VeilidConfigApplicationImpl(
_$_VeilidConfigApplication(
https: VeilidConfigHTTPS.fromJson(json['https']),
http: VeilidConfigHTTP.fromJson(json['http']),
);
Map<String, dynamic> _$$VeilidConfigApplicationImplToJson(
_$VeilidConfigApplicationImpl instance) =>
Map<String, dynamic> _$$_VeilidConfigApplicationToJson(
_$_VeilidConfigApplication instance) =>
<String, dynamic>{
'https': instance.https.toJson(),
'http': instance.http.toJson(),
};
_$VeilidConfigUDPImpl _$$VeilidConfigUDPImplFromJson(
Map<String, dynamic> json) =>
_$VeilidConfigUDPImpl(
_$_VeilidConfigUDP _$$_VeilidConfigUDPFromJson(Map<String, dynamic> json) =>
_$_VeilidConfigUDP(
enabled: json['enabled'] as bool,
socketPoolSize: json['socket_pool_size'] as int,
listenAddress: json['listen_address'] as String,
publicAddress: json['public_address'] as String?,
);
Map<String, dynamic> _$$VeilidConfigUDPImplToJson(
_$VeilidConfigUDPImpl instance) =>
Map<String, dynamic> _$$_VeilidConfigUDPToJson(_$_VeilidConfigUDP instance) =>
<String, dynamic>{
'enabled': instance.enabled,
'socket_pool_size': instance.socketPoolSize,
@ -208,9 +198,8 @@ Map<String, dynamic> _$$VeilidConfigUDPImplToJson(
'public_address': instance.publicAddress,
};
_$VeilidConfigTCPImpl _$$VeilidConfigTCPImplFromJson(
Map<String, dynamic> json) =>
_$VeilidConfigTCPImpl(
_$_VeilidConfigTCP _$$_VeilidConfigTCPFromJson(Map<String, dynamic> json) =>
_$_VeilidConfigTCP(
connect: json['connect'] as bool,
listen: json['listen'] as bool,
maxConnections: json['max_connections'] as int,
@ -218,8 +207,7 @@ _$VeilidConfigTCPImpl _$$VeilidConfigTCPImplFromJson(
publicAddress: json['public_address'] as String?,
);
Map<String, dynamic> _$$VeilidConfigTCPImplToJson(
_$VeilidConfigTCPImpl instance) =>
Map<String, dynamic> _$$_VeilidConfigTCPToJson(_$_VeilidConfigTCP instance) =>
<String, dynamic>{
'connect': instance.connect,
'listen': instance.listen,
@ -228,8 +216,8 @@ Map<String, dynamic> _$$VeilidConfigTCPImplToJson(
'public_address': instance.publicAddress,
};
_$VeilidConfigWSImpl _$$VeilidConfigWSImplFromJson(Map<String, dynamic> json) =>
_$VeilidConfigWSImpl(
_$_VeilidConfigWS _$$_VeilidConfigWSFromJson(Map<String, dynamic> json) =>
_$_VeilidConfigWS(
connect: json['connect'] as bool,
listen: json['listen'] as bool,
maxConnections: json['max_connections'] as int,
@ -238,8 +226,7 @@ _$VeilidConfigWSImpl _$$VeilidConfigWSImplFromJson(Map<String, dynamic> json) =>
url: json['url'] as String?,
);
Map<String, dynamic> _$$VeilidConfigWSImplToJson(
_$VeilidConfigWSImpl instance) =>
Map<String, dynamic> _$$_VeilidConfigWSToJson(_$_VeilidConfigWS instance) =>
<String, dynamic>{
'connect': instance.connect,
'listen': instance.listen,
@ -249,9 +236,8 @@ Map<String, dynamic> _$$VeilidConfigWSImplToJson(
'url': instance.url,
};
_$VeilidConfigWSSImpl _$$VeilidConfigWSSImplFromJson(
Map<String, dynamic> json) =>
_$VeilidConfigWSSImpl(
_$_VeilidConfigWSS _$$_VeilidConfigWSSFromJson(Map<String, dynamic> json) =>
_$_VeilidConfigWSS(
connect: json['connect'] as bool,
listen: json['listen'] as bool,
maxConnections: json['max_connections'] as int,
@ -260,8 +246,7 @@ _$VeilidConfigWSSImpl _$$VeilidConfigWSSImplFromJson(
url: json['url'] as String?,
);
Map<String, dynamic> _$$VeilidConfigWSSImplToJson(
_$VeilidConfigWSSImpl instance) =>
Map<String, dynamic> _$$_VeilidConfigWSSToJson(_$_VeilidConfigWSS instance) =>
<String, dynamic>{
'connect': instance.connect,
'listen': instance.listen,
@ -271,17 +256,17 @@ Map<String, dynamic> _$$VeilidConfigWSSImplToJson(
'url': instance.url,
};
_$VeilidConfigProtocolImpl _$$VeilidConfigProtocolImplFromJson(
_$_VeilidConfigProtocol _$$_VeilidConfigProtocolFromJson(
Map<String, dynamic> json) =>
_$VeilidConfigProtocolImpl(
_$_VeilidConfigProtocol(
udp: VeilidConfigUDP.fromJson(json['udp']),
tcp: VeilidConfigTCP.fromJson(json['tcp']),
ws: VeilidConfigWS.fromJson(json['ws']),
wss: VeilidConfigWSS.fromJson(json['wss']),
);
Map<String, dynamic> _$$VeilidConfigProtocolImplToJson(
_$VeilidConfigProtocolImpl instance) =>
Map<String, dynamic> _$$_VeilidConfigProtocolToJson(
_$_VeilidConfigProtocol instance) =>
<String, dynamic>{
'udp': instance.udp.toJson(),
'tcp': instance.tcp.toJson(),
@ -289,25 +274,22 @@ Map<String, dynamic> _$$VeilidConfigProtocolImplToJson(
'wss': instance.wss.toJson(),
};
_$VeilidConfigTLSImpl _$$VeilidConfigTLSImplFromJson(
Map<String, dynamic> json) =>
_$VeilidConfigTLSImpl(
_$_VeilidConfigTLS _$$_VeilidConfigTLSFromJson(Map<String, dynamic> json) =>
_$_VeilidConfigTLS(
certificatePath: json['certificate_path'] as String,
privateKeyPath: json['private_key_path'] as String,
connectionInitialTimeoutMs: json['connection_initial_timeout_ms'] as int,
);
Map<String, dynamic> _$$VeilidConfigTLSImplToJson(
_$VeilidConfigTLSImpl instance) =>
Map<String, dynamic> _$$_VeilidConfigTLSToJson(_$_VeilidConfigTLS instance) =>
<String, dynamic>{
'certificate_path': instance.certificatePath,
'private_key_path': instance.privateKeyPath,
'connection_initial_timeout_ms': instance.connectionInitialTimeoutMs,
};
_$VeilidConfigDHTImpl _$$VeilidConfigDHTImplFromJson(
Map<String, dynamic> json) =>
_$VeilidConfigDHTImpl(
_$_VeilidConfigDHT _$$_VeilidConfigDHTFromJson(Map<String, dynamic> json) =>
_$_VeilidConfigDHT(
resolveNodeTimeoutMs: json['resolve_node_timeout_ms'] as int,
resolveNodeCount: json['resolve_node_count'] as int,
resolveNodeFanout: json['resolve_node_fanout'] as int,
@ -330,10 +312,12 @@ _$VeilidConfigDHTImpl _$$VeilidConfigDHTImplFromJson(
remoteMaxSubkeyCacheMemoryMb:
json['remote_max_subkey_cache_memory_mb'] as int,
remoteMaxStorageSpaceMb: json['remote_max_storage_space_mb'] as int,
publicWatchLimit: json['public_watch_limit'] as int,
memberWatchLimit: json['member_watch_limit'] as int,
maxWatchExpirationMs: json['max_watch_expiration_ms'] as int,
);
Map<String, dynamic> _$$VeilidConfigDHTImplToJson(
_$VeilidConfigDHTImpl instance) =>
Map<String, dynamic> _$$_VeilidConfigDHTToJson(_$_VeilidConfigDHT instance) =>
<String, dynamic>{
'resolve_node_timeout_ms': instance.resolveNodeTimeoutMs,
'resolve_node_count': instance.resolveNodeCount,
@ -356,11 +340,13 @@ Map<String, dynamic> _$$VeilidConfigDHTImplToJson(
'remote_max_subkey_cache_memory_mb':
instance.remoteMaxSubkeyCacheMemoryMb,
'remote_max_storage_space_mb': instance.remoteMaxStorageSpaceMb,
'public_watch_limit': instance.publicWatchLimit,
'member_watch_limit': instance.memberWatchLimit,
'max_watch_expiration_ms': instance.maxWatchExpirationMs,
};
_$VeilidConfigRPCImpl _$$VeilidConfigRPCImplFromJson(
Map<String, dynamic> json) =>
_$VeilidConfigRPCImpl(
_$_VeilidConfigRPC _$$_VeilidConfigRPCFromJson(Map<String, dynamic> json) =>
_$_VeilidConfigRPC(
concurrency: json['concurrency'] as int,
queueSize: json['queue_size'] as int,
timeoutMs: json['timeout_ms'] as int,
@ -370,8 +356,7 @@ _$VeilidConfigRPCImpl _$$VeilidConfigRPCImplFromJson(
maxTimestampAheadMs: json['max_timestamp_ahead_ms'] as int?,
);
Map<String, dynamic> _$$VeilidConfigRPCImplToJson(
_$VeilidConfigRPCImpl instance) =>
Map<String, dynamic> _$$_VeilidConfigRPCToJson(_$_VeilidConfigRPC instance) =>
<String, dynamic>{
'concurrency': instance.concurrency,
'queue_size': instance.queueSize,
@ -382,9 +367,9 @@ Map<String, dynamic> _$$VeilidConfigRPCImplToJson(
'max_timestamp_ahead_ms': instance.maxTimestampAheadMs,
};
_$VeilidConfigRoutingTableImpl _$$VeilidConfigRoutingTableImplFromJson(
_$_VeilidConfigRoutingTable _$$_VeilidConfigRoutingTableFromJson(
Map<String, dynamic> json) =>
_$VeilidConfigRoutingTableImpl(
_$_VeilidConfigRoutingTable(
nodeId: (json['node_id'] as List<dynamic>)
.map(Typed<FixedEncodedString43>.fromJson)
.toList(),
@ -400,8 +385,8 @@ _$VeilidConfigRoutingTableImpl _$$VeilidConfigRoutingTableImplFromJson(
limitAttachedWeak: json['limit_attached_weak'] as int,
);
Map<String, dynamic> _$$VeilidConfigRoutingTableImplToJson(
_$VeilidConfigRoutingTableImpl instance) =>
Map<String, dynamic> _$$_VeilidConfigRoutingTableToJson(
_$_VeilidConfigRoutingTable instance) =>
<String, dynamic>{
'node_id': instance.nodeId.map((e) => e.toJson()).toList(),
'node_id_secret': instance.nodeIdSecret.map((e) => e.toJson()).toList(),
@ -413,9 +398,9 @@ Map<String, dynamic> _$$VeilidConfigRoutingTableImplToJson(
'limit_attached_weak': instance.limitAttachedWeak,
};
_$VeilidConfigNetworkImpl _$$VeilidConfigNetworkImplFromJson(
_$_VeilidConfigNetwork _$$_VeilidConfigNetworkFromJson(
Map<String, dynamic> json) =>
_$VeilidConfigNetworkImpl(
_$_VeilidConfigNetwork(
connectionInitialTimeoutMs: json['connection_initial_timeout_ms'] as int,
connectionInactivityTimeoutMs:
json['connection_inactivity_timeout_ms'] as int,
@ -441,8 +426,8 @@ _$VeilidConfigNetworkImpl _$$VeilidConfigNetworkImplFromJson(
networkKeyPassword: json['network_key_password'] as String?,
);
Map<String, dynamic> _$$VeilidConfigNetworkImplToJson(
_$VeilidConfigNetworkImpl instance) =>
Map<String, dynamic> _$$_VeilidConfigNetworkToJson(
_$_VeilidConfigNetwork instance) =>
<String, dynamic>{
'connection_initial_timeout_ms': instance.connectionInitialTimeoutMs,
'connection_inactivity_timeout_ms':
@ -468,37 +453,37 @@ Map<String, dynamic> _$$VeilidConfigNetworkImplToJson(
'network_key_password': instance.networkKeyPassword,
};
_$VeilidConfigTableStoreImpl _$$VeilidConfigTableStoreImplFromJson(
_$_VeilidConfigTableStore _$$_VeilidConfigTableStoreFromJson(
Map<String, dynamic> json) =>
_$VeilidConfigTableStoreImpl(
_$_VeilidConfigTableStore(
directory: json['directory'] as String,
delete: json['delete'] as bool,
);
Map<String, dynamic> _$$VeilidConfigTableStoreImplToJson(
_$VeilidConfigTableStoreImpl instance) =>
Map<String, dynamic> _$$_VeilidConfigTableStoreToJson(
_$_VeilidConfigTableStore instance) =>
<String, dynamic>{
'directory': instance.directory,
'delete': instance.delete,
};
_$VeilidConfigBlockStoreImpl _$$VeilidConfigBlockStoreImplFromJson(
_$_VeilidConfigBlockStore _$$_VeilidConfigBlockStoreFromJson(
Map<String, dynamic> json) =>
_$VeilidConfigBlockStoreImpl(
_$_VeilidConfigBlockStore(
directory: json['directory'] as String,
delete: json['delete'] as bool,
);
Map<String, dynamic> _$$VeilidConfigBlockStoreImplToJson(
_$VeilidConfigBlockStoreImpl instance) =>
Map<String, dynamic> _$$_VeilidConfigBlockStoreToJson(
_$_VeilidConfigBlockStore instance) =>
<String, dynamic>{
'directory': instance.directory,
'delete': instance.delete,
};
_$VeilidConfigProtectedStoreImpl _$$VeilidConfigProtectedStoreImplFromJson(
_$_VeilidConfigProtectedStore _$$_VeilidConfigProtectedStoreFromJson(
Map<String, dynamic> json) =>
_$VeilidConfigProtectedStoreImpl(
_$_VeilidConfigProtectedStore(
allowInsecureFallback: json['allow_insecure_fallback'] as bool,
alwaysUseInsecureStorage: json['always_use_insecure_storage'] as bool,
directory: json['directory'] as String,
@ -509,8 +494,8 @@ _$VeilidConfigProtectedStoreImpl _$$VeilidConfigProtectedStoreImplFromJson(
json['new_device_encryption_key_password'] as String?,
);
Map<String, dynamic> _$$VeilidConfigProtectedStoreImplToJson(
_$VeilidConfigProtectedStoreImpl instance) =>
Map<String, dynamic> _$$_VeilidConfigProtectedStoreToJson(
_$_VeilidConfigProtectedStore instance) =>
<String, dynamic>{
'allow_insecure_fallback': instance.allowInsecureFallback,
'always_use_insecure_storage': instance.alwaysUseInsecureStorage,
@ -521,21 +506,21 @@ Map<String, dynamic> _$$VeilidConfigProtectedStoreImplToJson(
instance.newDeviceEncryptionKeyPassword,
};
_$VeilidConfigCapabilitiesImpl _$$VeilidConfigCapabilitiesImplFromJson(
_$_VeilidConfigCapabilities _$$_VeilidConfigCapabilitiesFromJson(
Map<String, dynamic> json) =>
_$VeilidConfigCapabilitiesImpl(
_$_VeilidConfigCapabilities(
disable:
(json['disable'] as List<dynamic>).map((e) => e as String).toList(),
);
Map<String, dynamic> _$$VeilidConfigCapabilitiesImplToJson(
_$VeilidConfigCapabilitiesImpl instance) =>
Map<String, dynamic> _$$_VeilidConfigCapabilitiesToJson(
_$_VeilidConfigCapabilities instance) =>
<String, dynamic>{
'disable': instance.disable,
};
_$VeilidConfigImpl _$$VeilidConfigImplFromJson(Map<String, dynamic> json) =>
_$VeilidConfigImpl(
_$_VeilidConfig _$$_VeilidConfigFromJson(Map<String, dynamic> json) =>
_$_VeilidConfig(
programName: json['program_name'] as String,
namespace: json['namespace'] as String,
capabilities: VeilidConfigCapabilities.fromJson(json['capabilities']),
@ -546,7 +531,7 @@ _$VeilidConfigImpl _$$VeilidConfigImplFromJson(Map<String, dynamic> json) =>
network: VeilidConfigNetwork.fromJson(json['network']),
);
Map<String, dynamic> _$$VeilidConfigImplToJson(_$VeilidConfigImpl instance) =>
Map<String, dynamic> _$$_VeilidConfigToJson(_$_VeilidConfig instance) =>
<String, dynamic>{
'program_name': instance.programName,
'namespace': instance.namespace,

View File

@ -671,8 +671,14 @@ class VeilidRoutingContextFFI extends VeilidRoutingContext {
}
@override
Future<Timestamp> watchDHTValues(TypedKey key, List<ValueSubkeyRange> subkeys,
Timestamp expiration, int count) async {
Future<Timestamp> watchDHTValues(TypedKey key,
{List<ValueSubkeyRange>? subkeys,
Timestamp? expiration,
int? count}) async {
subkeys ??= [];
expiration ??= Timestamp(value: BigInt.zero);
count ??= 0xFFFFFFFF;
_ctx.ensureValid();
final nativeKey = jsonEncode(key).toNativeUtf8();
final nativeSubkeys = jsonEncode(subkeys).toNativeUtf8();
@ -688,8 +694,10 @@ class VeilidRoutingContextFFI extends VeilidRoutingContext {
}
@override
Future<bool> cancelDHTWatch(
TypedKey key, List<ValueSubkeyRange> subkeys) async {
Future<bool> cancelDHTWatch(TypedKey key,
{List<ValueSubkeyRange>? subkeys}) async {
subkeys ??= [];
_ctx.ensureValid();
final nativeKey = jsonEncode(key).toNativeUtf8();
final nativeSubkeys = jsonEncode(subkeys).toNativeUtf8();

View File

@ -185,8 +185,14 @@ class VeilidRoutingContextJS extends VeilidRoutingContext {
}
@override
Future<Timestamp> watchDHTValues(TypedKey key, List<ValueSubkeyRange> subkeys,
Timestamp expiration, int count) async {
Future<Timestamp> watchDHTValues(TypedKey key,
{List<ValueSubkeyRange>? subkeys,
Timestamp? expiration,
int? count}) async {
subkeys ??= [];
expiration ??= Timestamp(value: BigInt.zero);
count ??= 0xFFFFFFFF;
final id = _ctx.requireId();
final ts = await _wrapApiPromise<String>(js_util.callMethod(
wasm, 'routing_context_watch_dht_values', [
@ -200,7 +206,9 @@ class VeilidRoutingContextJS extends VeilidRoutingContext {
}
@override
Future<bool> cancelDHTWatch(TypedKey key, List<ValueSubkeyRange> subkeys) {
Future<bool> cancelDHTWatch(TypedKey key, {List<ValueSubkeyRange>? subkeys}) {
subkeys ??= [];
final id = _ctx.requireId();
return _wrapApiPromise(js_util.callMethod(
wasm,

File diff suppressed because it is too large Load Diff

View File

@ -6,29 +6,29 @@ part of 'veilid_state.dart';
// JsonSerializableGenerator
// **************************************************************************
_$LatencyStatsImpl _$$LatencyStatsImplFromJson(Map<String, dynamic> json) =>
_$LatencyStatsImpl(
_$_LatencyStats _$$_LatencyStatsFromJson(Map<String, dynamic> json) =>
_$_LatencyStats(
fastest: TimestampDuration.fromJson(json['fastest']),
average: TimestampDuration.fromJson(json['average']),
slowest: TimestampDuration.fromJson(json['slowest']),
);
Map<String, dynamic> _$$LatencyStatsImplToJson(_$LatencyStatsImpl instance) =>
Map<String, dynamic> _$$_LatencyStatsToJson(_$_LatencyStats instance) =>
<String, dynamic>{
'fastest': instance.fastest.toJson(),
'average': instance.average.toJson(),
'slowest': instance.slowest.toJson(),
};
_$TransferStatsImpl _$$TransferStatsImplFromJson(Map<String, dynamic> json) =>
_$TransferStatsImpl(
_$_TransferStats _$$_TransferStatsFromJson(Map<String, dynamic> json) =>
_$_TransferStats(
total: BigInt.parse(json['total'] as String),
maximum: BigInt.parse(json['maximum'] as String),
average: BigInt.parse(json['average'] as String),
minimum: BigInt.parse(json['minimum'] as String),
);
Map<String, dynamic> _$$TransferStatsImplToJson(_$TransferStatsImpl instance) =>
Map<String, dynamic> _$$_TransferStatsToJson(_$_TransferStats instance) =>
<String, dynamic>{
'total': instance.total.toString(),
'maximum': instance.maximum.toString(),
@ -36,22 +36,21 @@ Map<String, dynamic> _$$TransferStatsImplToJson(_$TransferStatsImpl instance) =>
'minimum': instance.minimum.toString(),
};
_$TransferStatsDownUpImpl _$$TransferStatsDownUpImplFromJson(
_$_TransferStatsDownUp _$$_TransferStatsDownUpFromJson(
Map<String, dynamic> json) =>
_$TransferStatsDownUpImpl(
_$_TransferStatsDownUp(
down: TransferStats.fromJson(json['down']),
up: TransferStats.fromJson(json['up']),
);
Map<String, dynamic> _$$TransferStatsDownUpImplToJson(
_$TransferStatsDownUpImpl instance) =>
Map<String, dynamic> _$$_TransferStatsDownUpToJson(
_$_TransferStatsDownUp instance) =>
<String, dynamic>{
'down': instance.down.toJson(),
'up': instance.up.toJson(),
};
_$RPCStatsImpl _$$RPCStatsImplFromJson(Map<String, dynamic> json) =>
_$RPCStatsImpl(
_$_RPCStats _$$_RPCStatsFromJson(Map<String, dynamic> json) => _$_RPCStats(
messagesSent: json['messages_sent'] as int,
messagesRcvd: json['messages_rcvd'] as int,
questionsInFlight: json['questions_in_flight'] as int,
@ -68,7 +67,7 @@ _$RPCStatsImpl _$$RPCStatsImplFromJson(Map<String, dynamic> json) =>
failedToSend: json['failed_to_send'] as int,
);
Map<String, dynamic> _$$RPCStatsImplToJson(_$RPCStatsImpl instance) =>
Map<String, dynamic> _$$_RPCStatsToJson(_$_RPCStats instance) =>
<String, dynamic>{
'messages_sent': instance.messagesSent,
'messages_rcvd': instance.messagesRcvd,
@ -80,8 +79,7 @@ Map<String, dynamic> _$$RPCStatsImplToJson(_$RPCStatsImpl instance) =>
'failed_to_send': instance.failedToSend,
};
_$PeerStatsImpl _$$PeerStatsImplFromJson(Map<String, dynamic> json) =>
_$PeerStatsImpl(
_$_PeerStats _$$_PeerStatsFromJson(Map<String, dynamic> json) => _$_PeerStats(
timeAdded: Timestamp.fromJson(json['time_added']),
rpcStats: RPCStats.fromJson(json['rpc_stats']),
transfer: TransferStatsDownUp.fromJson(json['transfer']),
@ -90,7 +88,7 @@ _$PeerStatsImpl _$$PeerStatsImplFromJson(Map<String, dynamic> json) =>
: LatencyStats.fromJson(json['latency']),
);
Map<String, dynamic> _$$PeerStatsImplToJson(_$PeerStatsImpl instance) =>
Map<String, dynamic> _$$_PeerStatsToJson(_$_PeerStats instance) =>
<String, dynamic>{
'time_added': instance.timeAdded.toJson(),
'rpc_stats': instance.rpcStats.toJson(),
@ -98,8 +96,8 @@ Map<String, dynamic> _$$PeerStatsImplToJson(_$PeerStatsImpl instance) =>
'latency': instance.latency?.toJson(),
};
_$PeerTableDataImpl _$$PeerTableDataImplFromJson(Map<String, dynamic> json) =>
_$PeerTableDataImpl(
_$_PeerTableData _$$_PeerTableDataFromJson(Map<String, dynamic> json) =>
_$_PeerTableData(
nodeIds: (json['node_ids'] as List<dynamic>)
.map(Typed<FixedEncodedString43>.fromJson)
.toList(),
@ -107,22 +105,21 @@ _$PeerTableDataImpl _$$PeerTableDataImplFromJson(Map<String, dynamic> json) =>
peerStats: PeerStats.fromJson(json['peer_stats']),
);
Map<String, dynamic> _$$PeerTableDataImplToJson(_$PeerTableDataImpl instance) =>
Map<String, dynamic> _$$_PeerTableDataToJson(_$_PeerTableData instance) =>
<String, dynamic>{
'node_ids': instance.nodeIds.map((e) => e.toJson()).toList(),
'peer_address': instance.peerAddress,
'peer_stats': instance.peerStats.toJson(),
};
_$VeilidLogImpl _$$VeilidLogImplFromJson(Map<String, dynamic> json) =>
_$VeilidLogImpl(
_$VeilidLog _$$VeilidLogFromJson(Map<String, dynamic> json) => _$VeilidLog(
logLevel: VeilidLogLevel.fromJson(json['log_level']),
message: json['message'] as String,
backtrace: json['backtrace'] as String?,
$type: json['kind'] as String?,
);
Map<String, dynamic> _$$VeilidLogImplToJson(_$VeilidLogImpl instance) =>
Map<String, dynamic> _$$VeilidLogToJson(_$VeilidLog instance) =>
<String, dynamic>{
'log_level': instance.logLevel.toJson(),
'message': instance.message,
@ -130,9 +127,8 @@ Map<String, dynamic> _$$VeilidLogImplToJson(_$VeilidLogImpl instance) =>
'kind': instance.$type,
};
_$VeilidAppMessageImpl _$$VeilidAppMessageImplFromJson(
Map<String, dynamic> json) =>
_$VeilidAppMessageImpl(
_$VeilidAppMessage _$$VeilidAppMessageFromJson(Map<String, dynamic> json) =>
_$VeilidAppMessage(
message: const Uint8ListJsonConverter().fromJson(json['message']),
sender: json['sender'] == null
? null
@ -140,16 +136,15 @@ _$VeilidAppMessageImpl _$$VeilidAppMessageImplFromJson(
$type: json['kind'] as String?,
);
Map<String, dynamic> _$$VeilidAppMessageImplToJson(
_$VeilidAppMessageImpl instance) =>
Map<String, dynamic> _$$VeilidAppMessageToJson(_$VeilidAppMessage instance) =>
<String, dynamic>{
'message': const Uint8ListJsonConverter().toJson(instance.message),
'sender': instance.sender?.toJson(),
'kind': instance.$type,
};
_$VeilidAppCallImpl _$$VeilidAppCallImplFromJson(Map<String, dynamic> json) =>
_$VeilidAppCallImpl(
_$VeilidAppCall _$$VeilidAppCallFromJson(Map<String, dynamic> json) =>
_$VeilidAppCall(
message: const Uint8ListJsonConverter().fromJson(json['message']),
callId: json['call_id'] as String,
sender: json['sender'] == null
@ -158,7 +153,7 @@ _$VeilidAppCallImpl _$$VeilidAppCallImplFromJson(Map<String, dynamic> json) =>
$type: json['kind'] as String?,
);
Map<String, dynamic> _$$VeilidAppCallImplToJson(_$VeilidAppCallImpl instance) =>
Map<String, dynamic> _$$VeilidAppCallToJson(_$VeilidAppCall instance) =>
<String, dynamic>{
'message': const Uint8ListJsonConverter().toJson(instance.message),
'call_id': instance.callId,
@ -166,17 +161,17 @@ Map<String, dynamic> _$$VeilidAppCallImplToJson(_$VeilidAppCallImpl instance) =>
'kind': instance.$type,
};
_$VeilidUpdateAttachmentImpl _$$VeilidUpdateAttachmentImplFromJson(
_$VeilidUpdateAttachment _$$VeilidUpdateAttachmentFromJson(
Map<String, dynamic> json) =>
_$VeilidUpdateAttachmentImpl(
_$VeilidUpdateAttachment(
state: AttachmentState.fromJson(json['state']),
publicInternetReady: json['public_internet_ready'] as bool,
localNetworkReady: json['local_network_ready'] as bool,
$type: json['kind'] as String?,
);
Map<String, dynamic> _$$VeilidUpdateAttachmentImplToJson(
_$VeilidUpdateAttachmentImpl instance) =>
Map<String, dynamic> _$$VeilidUpdateAttachmentToJson(
_$VeilidUpdateAttachment instance) =>
<String, dynamic>{
'state': instance.state.toJson(),
'public_internet_ready': instance.publicInternetReady,
@ -184,9 +179,9 @@ Map<String, dynamic> _$$VeilidUpdateAttachmentImplToJson(
'kind': instance.$type,
};
_$VeilidUpdateNetworkImpl _$$VeilidUpdateNetworkImplFromJson(
_$VeilidUpdateNetwork _$$VeilidUpdateNetworkFromJson(
Map<String, dynamic> json) =>
_$VeilidUpdateNetworkImpl(
_$VeilidUpdateNetwork(
started: json['started'] as bool,
bpsDown: BigInt.parse(json['bps_down'] as String),
bpsUp: BigInt.parse(json['bps_up'] as String),
@ -195,8 +190,8 @@ _$VeilidUpdateNetworkImpl _$$VeilidUpdateNetworkImplFromJson(
$type: json['kind'] as String?,
);
Map<String, dynamic> _$$VeilidUpdateNetworkImplToJson(
_$VeilidUpdateNetworkImpl instance) =>
Map<String, dynamic> _$$VeilidUpdateNetworkToJson(
_$VeilidUpdateNetwork instance) =>
<String, dynamic>{
'started': instance.started,
'bps_down': instance.bpsDown.toString(),
@ -205,23 +200,22 @@ Map<String, dynamic> _$$VeilidUpdateNetworkImplToJson(
'kind': instance.$type,
};
_$VeilidUpdateConfigImpl _$$VeilidUpdateConfigImplFromJson(
Map<String, dynamic> json) =>
_$VeilidUpdateConfigImpl(
_$VeilidUpdateConfig _$$VeilidUpdateConfigFromJson(Map<String, dynamic> json) =>
_$VeilidUpdateConfig(
config: VeilidConfig.fromJson(json['config']),
$type: json['kind'] as String?,
);
Map<String, dynamic> _$$VeilidUpdateConfigImplToJson(
_$VeilidUpdateConfigImpl instance) =>
Map<String, dynamic> _$$VeilidUpdateConfigToJson(
_$VeilidUpdateConfig instance) =>
<String, dynamic>{
'config': instance.config.toJson(),
'kind': instance.$type,
};
_$VeilidUpdateRouteChangeImpl _$$VeilidUpdateRouteChangeImplFromJson(
_$VeilidUpdateRouteChange _$$VeilidUpdateRouteChangeFromJson(
Map<String, dynamic> json) =>
_$VeilidUpdateRouteChangeImpl(
_$VeilidUpdateRouteChange(
deadRoutes: (json['dead_routes'] as List<dynamic>)
.map((e) => e as String)
.toList(),
@ -231,17 +225,17 @@ _$VeilidUpdateRouteChangeImpl _$$VeilidUpdateRouteChangeImplFromJson(
$type: json['kind'] as String?,
);
Map<String, dynamic> _$$VeilidUpdateRouteChangeImplToJson(
_$VeilidUpdateRouteChangeImpl instance) =>
Map<String, dynamic> _$$VeilidUpdateRouteChangeToJson(
_$VeilidUpdateRouteChange instance) =>
<String, dynamic>{
'dead_routes': instance.deadRoutes,
'dead_remote_routes': instance.deadRemoteRoutes,
'kind': instance.$type,
};
_$VeilidUpdateValueChangeImpl _$$VeilidUpdateValueChangeImplFromJson(
_$VeilidUpdateValueChange _$$VeilidUpdateValueChangeFromJson(
Map<String, dynamic> json) =>
_$VeilidUpdateValueChangeImpl(
_$VeilidUpdateValueChange(
key: Typed<FixedEncodedString43>.fromJson(json['key']),
subkeys: (json['subkeys'] as List<dynamic>)
.map(ValueSubkeyRange.fromJson)
@ -251,8 +245,8 @@ _$VeilidUpdateValueChangeImpl _$$VeilidUpdateValueChangeImplFromJson(
$type: json['kind'] as String?,
);
Map<String, dynamic> _$$VeilidUpdateValueChangeImplToJson(
_$VeilidUpdateValueChangeImpl instance) =>
Map<String, dynamic> _$$VeilidUpdateValueChangeToJson(
_$VeilidUpdateValueChange instance) =>
<String, dynamic>{
'key': instance.key.toJson(),
'subkeys': instance.subkeys.map((e) => e.toJson()).toList(),
@ -261,25 +255,25 @@ Map<String, dynamic> _$$VeilidUpdateValueChangeImplToJson(
'kind': instance.$type,
};
_$VeilidStateAttachmentImpl _$$VeilidStateAttachmentImplFromJson(
_$_VeilidStateAttachment _$$_VeilidStateAttachmentFromJson(
Map<String, dynamic> json) =>
_$VeilidStateAttachmentImpl(
_$_VeilidStateAttachment(
state: AttachmentState.fromJson(json['state']),
publicInternetReady: json['public_internet_ready'] as bool,
localNetworkReady: json['local_network_ready'] as bool,
);
Map<String, dynamic> _$$VeilidStateAttachmentImplToJson(
_$VeilidStateAttachmentImpl instance) =>
Map<String, dynamic> _$$_VeilidStateAttachmentToJson(
_$_VeilidStateAttachment instance) =>
<String, dynamic>{
'state': instance.state.toJson(),
'public_internet_ready': instance.publicInternetReady,
'local_network_ready': instance.localNetworkReady,
};
_$VeilidStateNetworkImpl _$$VeilidStateNetworkImplFromJson(
_$_VeilidStateNetwork _$$_VeilidStateNetworkFromJson(
Map<String, dynamic> json) =>
_$VeilidStateNetworkImpl(
_$_VeilidStateNetwork(
started: json['started'] as bool,
bpsDown: BigInt.parse(json['bps_down'] as String),
bpsUp: BigInt.parse(json['bps_up'] as String),
@ -287,8 +281,8 @@ _$VeilidStateNetworkImpl _$$VeilidStateNetworkImplFromJson(
(json['peers'] as List<dynamic>).map(PeerTableData.fromJson).toList(),
);
Map<String, dynamic> _$$VeilidStateNetworkImplToJson(
_$VeilidStateNetworkImpl instance) =>
Map<String, dynamic> _$$_VeilidStateNetworkToJson(
_$_VeilidStateNetwork instance) =>
<String, dynamic>{
'started': instance.started,
'bps_down': instance.bpsDown.toString(),
@ -296,26 +290,25 @@ Map<String, dynamic> _$$VeilidStateNetworkImplToJson(
'peers': instance.peers.map((e) => e.toJson()).toList(),
};
_$VeilidStateConfigImpl _$$VeilidStateConfigImplFromJson(
Map<String, dynamic> json) =>
_$VeilidStateConfigImpl(
_$_VeilidStateConfig _$$_VeilidStateConfigFromJson(Map<String, dynamic> json) =>
_$_VeilidStateConfig(
config: VeilidConfig.fromJson(json['config']),
);
Map<String, dynamic> _$$VeilidStateConfigImplToJson(
_$VeilidStateConfigImpl instance) =>
Map<String, dynamic> _$$_VeilidStateConfigToJson(
_$_VeilidStateConfig instance) =>
<String, dynamic>{
'config': instance.config.toJson(),
};
_$VeilidStateImpl _$$VeilidStateImplFromJson(Map<String, dynamic> json) =>
_$VeilidStateImpl(
_$_VeilidState _$$_VeilidStateFromJson(Map<String, dynamic> json) =>
_$_VeilidState(
attachment: VeilidStateAttachment.fromJson(json['attachment']),
network: VeilidStateNetwork.fromJson(json['network']),
config: VeilidStateConfig.fromJson(json['config']),
);
Map<String, dynamic> _$$VeilidStateImplToJson(_$VeilidStateImpl instance) =>
Map<String, dynamic> _$$_VeilidStateToJson(_$_VeilidState instance) =>
<String, dynamic>{
'attachment': instance.attachment.toJson(),
'network': instance.network.toJson(),

View File

@ -31,17 +31,17 @@ debug-load = ["dep:ctor", "dep:libc-print", "dep:android_log-sys", "dep:oslog"]
[dependencies]
veilid-core = { path = "../../veilid-core", default-features = false }
tracing = { version = "0.1.37", features = ["log", "attributes"] }
tracing-subscriber = "0.3.17"
tracing = { version = "0.1.40", features = ["log", "attributes"] }
tracing-subscriber = "0.3.18"
parking_lot = "0.12.1"
backtrace = "0.3.69"
serde_json = "1.0.107"
serde = "1.0.188"
futures-util = { version = "0.3.28", default-features = false, features = [
serde_json = "1.0.108"
serde = "1.0.193"
futures-util = { version = "0.3.29", default-features = false, features = [
"alloc",
] }
cfg-if = "1.0.0"
data-encoding = { version = "2.4.0" }
data-encoding = { version = "2.5.0" }
# Dependencies for native builds only
# Linux, Windows, Mac, iOS, Android
@ -51,14 +51,14 @@ opentelemetry = { version = "0.20" }
opentelemetry-otlp = { version = "0.13" }
opentelemetry-semantic-conventions = "0.12"
async-std = { version = "1.12.0", features = ["unstable"], optional = true }
tokio = { version = "1.32.0", features = ["full"], optional = true }
tokio = { version = "1.35.0", features = ["full"], optional = true }
tokio-stream = { version = "0.1.14", features = ["net"], optional = true }
tokio-util = { version = "0.7.8", features = ["compat"], optional = true }
tokio-util = { version = "0.7.10", features = ["compat"], optional = true }
allo-isolate = "0.1.20"
ffi-support = "0.4.4"
lazy_static = "1.4.0"
hostname = "0.3.1"
ctor = { version = "0.2.5", optional = true }
ctor = { version = "0.2.6", optional = true }
libc-print = { version = "0.1.22", optional = true }

View File

@ -1,5 +1,16 @@
# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand.
[[package]]
name = "appdirs"
version = "1.4.4"
description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"."
optional = false
python-versions = "*"
files = [
{file = "appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"},
{file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"},
]
[[package]]
name = "attrs"
version = "23.1.0"
@ -42,94 +53,74 @@ files = [
[[package]]
name = "jsonschema"
version = "4.17.3"
version = "4.20.0"
description = "An implementation of JSON Schema validation for Python"
optional = false
python-versions = ">=3.7"
python-versions = ">=3.8"
files = [
{file = "jsonschema-4.17.3-py3-none-any.whl", hash = "sha256:a870ad254da1a8ca84b6a2905cac29d265f805acc57af304784962a2aa6508f6"},
{file = "jsonschema-4.17.3.tar.gz", hash = "sha256:0f864437ab8b6076ba6707453ef8f98a6a0d512a80e93f8abdb676f737ecb60d"},
{file = "jsonschema-4.20.0-py3-none-any.whl", hash = "sha256:ed6231f0429ecf966f5bc8dfef245998220549cbbcf140f913b7464c52c3b6b3"},
{file = "jsonschema-4.20.0.tar.gz", hash = "sha256:4f614fd46d8d61258610998997743ec5492a648b33cf478c1ddc23ed4598a5fa"},
]
[package.dependencies]
attrs = ">=17.4.0"
pyrsistent = ">=0.14.0,<0.17.0 || >0.17.0,<0.17.1 || >0.17.1,<0.17.2 || >0.17.2"
attrs = ">=22.2.0"
jsonschema-specifications = ">=2023.03.6"
referencing = ">=0.28.4"
rpds-py = ">=0.7.1"
[package.extras]
format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"]
format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"]
[[package]]
name = "jsonschema-specifications"
version = "2023.11.2"
description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry"
optional = false
python-versions = ">=3.8"
files = [
{file = "jsonschema_specifications-2023.11.2-py3-none-any.whl", hash = "sha256:e74ba7c0a65e8cb49dc26837d6cfe576557084a8b423ed16a420984228104f93"},
{file = "jsonschema_specifications-2023.11.2.tar.gz", hash = "sha256:9472fc4fea474cd74bea4a2b190daeccb5a9e4db2ea80efcf7a1b582fc9a81b8"},
]
[package.dependencies]
referencing = ">=0.31.0"
[[package]]
name = "packaging"
version = "23.1"
version = "23.2"
description = "Core utilities for Python packages"
optional = false
python-versions = ">=3.7"
files = [
{file = "packaging-23.1-py3-none-any.whl", hash = "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61"},
{file = "packaging-23.1.tar.gz", hash = "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"},
{file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"},
{file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"},
]
[[package]]
name = "pluggy"
version = "1.0.0"
version = "1.3.0"
description = "plugin and hook calling mechanisms for python"
optional = false
python-versions = ">=3.6"
python-versions = ">=3.8"
files = [
{file = "pluggy-1.0.0-py2.py3-none-any.whl", hash = "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"},
{file = "pluggy-1.0.0.tar.gz", hash = "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159"},
{file = "pluggy-1.3.0-py3-none-any.whl", hash = "sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7"},
{file = "pluggy-1.3.0.tar.gz", hash = "sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12"},
]
[package.extras]
dev = ["pre-commit", "tox"]
testing = ["pytest", "pytest-benchmark"]
[[package]]
name = "pyrsistent"
version = "0.19.3"
description = "Persistent/Functional/Immutable data structures"
optional = false
python-versions = ">=3.7"
files = [
{file = "pyrsistent-0.19.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:20460ac0ea439a3e79caa1dbd560344b64ed75e85d8703943e0b66c2a6150e4a"},
{file = "pyrsistent-0.19.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4c18264cb84b5e68e7085a43723f9e4c1fd1d935ab240ce02c0324a8e01ccb64"},
{file = "pyrsistent-0.19.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b774f9288dda8d425adb6544e5903f1fb6c273ab3128a355c6b972b7df39dcf"},
{file = "pyrsistent-0.19.3-cp310-cp310-win32.whl", hash = "sha256:5a474fb80f5e0d6c9394d8db0fc19e90fa540b82ee52dba7d246a7791712f74a"},
{file = "pyrsistent-0.19.3-cp310-cp310-win_amd64.whl", hash = "sha256:49c32f216c17148695ca0e02a5c521e28a4ee6c5089f97e34fe24163113722da"},
{file = "pyrsistent-0.19.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f0774bf48631f3a20471dd7c5989657b639fd2d285b861237ea9e82c36a415a9"},
{file = "pyrsistent-0.19.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ab2204234c0ecd8b9368dbd6a53e83c3d4f3cab10ecaf6d0e772f456c442393"},
{file = "pyrsistent-0.19.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e42296a09e83028b3476f7073fcb69ffebac0e66dbbfd1bd847d61f74db30f19"},
{file = "pyrsistent-0.19.3-cp311-cp311-win32.whl", hash = "sha256:64220c429e42a7150f4bfd280f6f4bb2850f95956bde93c6fda1b70507af6ef3"},
{file = "pyrsistent-0.19.3-cp311-cp311-win_amd64.whl", hash = "sha256:016ad1afadf318eb7911baa24b049909f7f3bb2c5b1ed7b6a8f21db21ea3faa8"},
{file = "pyrsistent-0.19.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c4db1bd596fefd66b296a3d5d943c94f4fac5bcd13e99bffe2ba6a759d959a28"},
{file = "pyrsistent-0.19.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aeda827381f5e5d65cced3024126529ddc4289d944f75e090572c77ceb19adbf"},
{file = "pyrsistent-0.19.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:42ac0b2f44607eb92ae88609eda931a4f0dfa03038c44c772e07f43e738bcac9"},
{file = "pyrsistent-0.19.3-cp37-cp37m-win32.whl", hash = "sha256:e8f2b814a3dc6225964fa03d8582c6e0b6650d68a232df41e3cc1b66a5d2f8d1"},
{file = "pyrsistent-0.19.3-cp37-cp37m-win_amd64.whl", hash = "sha256:c9bb60a40a0ab9aba40a59f68214eed5a29c6274c83b2cc206a359c4a89fa41b"},
{file = "pyrsistent-0.19.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a2471f3f8693101975b1ff85ffd19bb7ca7dd7c38f8a81701f67d6b4f97b87d8"},
{file = "pyrsistent-0.19.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc5d149f31706762c1f8bda2e8c4f8fead6e80312e3692619a75301d3dbb819a"},
{file = "pyrsistent-0.19.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3311cb4237a341aa52ab8448c27e3a9931e2ee09561ad150ba94e4cfd3fc888c"},
{file = "pyrsistent-0.19.3-cp38-cp38-win32.whl", hash = "sha256:f0e7c4b2f77593871e918be000b96c8107da48444d57005b6a6bc61fb4331b2c"},
{file = "pyrsistent-0.19.3-cp38-cp38-win_amd64.whl", hash = "sha256:c147257a92374fde8498491f53ffa8f4822cd70c0d85037e09028e478cababb7"},
{file = "pyrsistent-0.19.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b735e538f74ec31378f5a1e3886a26d2ca6351106b4dfde376a26fc32a044edc"},
{file = "pyrsistent-0.19.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99abb85579e2165bd8522f0c0138864da97847875ecbd45f3e7e2af569bfc6f2"},
{file = "pyrsistent-0.19.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3a8cb235fa6d3fd7aae6a4f1429bbb1fec1577d978098da1252f0489937786f3"},
{file = "pyrsistent-0.19.3-cp39-cp39-win32.whl", hash = "sha256:c74bed51f9b41c48366a286395c67f4e894374306b197e62810e0fdaf2364da2"},
{file = "pyrsistent-0.19.3-cp39-cp39-win_amd64.whl", hash = "sha256:878433581fc23e906d947a6814336eee031a00e6defba224234169ae3d3d6a98"},
{file = "pyrsistent-0.19.3-py3-none-any.whl", hash = "sha256:ccf0d6bd208f8111179f0c26fdf84ed7c3891982f2edaeae7422575f47e66b64"},
{file = "pyrsistent-0.19.3.tar.gz", hash = "sha256:1a2994773706bbb4995c31a97bc94f1418314923bd1048c6d964837040376440"},
]
[[package]]
name = "pytest"
version = "7.3.2"
version = "7.4.3"
description = "pytest: simple powerful testing with Python"
optional = false
python-versions = ">=3.7"
files = [
{file = "pytest-7.3.2-py3-none-any.whl", hash = "sha256:cdcbd012c9312258922f8cd3f1b62a6580fdced17db6014896053d47cddf9295"},
{file = "pytest-7.3.2.tar.gz", hash = "sha256:ee990a3cc55ba808b80795a79944756f315c67c12b56abd3ac993a7b8c17030b"},
{file = "pytest-7.4.3-py3-none-any.whl", hash = "sha256:0d009c083ea859a71b76adf7c1d502e4bc170b80a8ef002da5806527b9591fac"},
{file = "pytest-7.4.3.tar.gz", hash = "sha256:d989d136982de4e3b29dabcc838ad581c64e8ed52c11fbe86ddebd9da0818cd5"},
]
[package.dependencies]
@ -143,13 +134,13 @@ testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "no
[[package]]
name = "pytest-asyncio"
version = "0.21.0"
version = "0.21.1"
description = "Pytest support for asyncio"
optional = false
python-versions = ">=3.7"
files = [
{file = "pytest-asyncio-0.21.0.tar.gz", hash = "sha256:2b38a496aef56f56b0e87557ec313e11e1ab9276fc3863f6a7be0f1d0e415e1b"},
{file = "pytest_asyncio-0.21.0-py3-none-any.whl", hash = "sha256:f2b3366b7cd501a4056858bd39349d5af19742aed2d81660b7998b6341c7eb9c"},
{file = "pytest-asyncio-0.21.1.tar.gz", hash = "sha256:40a7eae6dded22c7b604986855ea48400ab15b069ae38116e8c01238e9eeb64d"},
{file = "pytest_asyncio-0.21.1-py3-none-any.whl", hash = "sha256:8666c1c8ac02631d7c51ba282e0c69a8a452b211ffedf2599099845da5c5c37b"},
]
[package.dependencies]
@ -159,7 +150,130 @@ pytest = ">=7.0.0"
docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"]
testing = ["coverage (>=6.2)", "flaky (>=3.5.0)", "hypothesis (>=5.7.1)", "mypy (>=0.931)", "pytest-trio (>=0.7.0)"]
[[package]]
name = "referencing"
version = "0.32.0"
description = "JSON Referencing + Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "referencing-0.32.0-py3-none-any.whl", hash = "sha256:bdcd3efb936f82ff86f993093f6da7435c7de69a3b3a5a06678a6050184bee99"},
{file = "referencing-0.32.0.tar.gz", hash = "sha256:689e64fe121843dcfd57b71933318ef1f91188ffb45367332700a86ac8fd6161"},
]
[package.dependencies]
attrs = ">=22.2.0"
rpds-py = ">=0.7.0"
[[package]]
name = "rpds-py"
version = "0.13.2"
description = "Python bindings to Rust's persistent data structures (rpds)"
optional = false
python-versions = ">=3.8"
files = [
{file = "rpds_py-0.13.2-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:1ceebd0ae4f3e9b2b6b553b51971921853ae4eebf3f54086be0565d59291e53d"},
{file = "rpds_py-0.13.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:46e1ed994a0920f350a4547a38471217eb86f57377e9314fbaaa329b71b7dfe3"},
{file = "rpds_py-0.13.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee353bb51f648924926ed05e0122b6a0b1ae709396a80eb583449d5d477fcdf7"},
{file = "rpds_py-0.13.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:530190eb0cd778363bbb7596612ded0bb9fef662daa98e9d92a0419ab27ae914"},
{file = "rpds_py-0.13.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29d311e44dd16d2434d5506d57ef4d7036544fc3c25c14b6992ef41f541b10fb"},
{file = "rpds_py-0.13.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2e72f750048b32d39e87fc85c225c50b2a6715034848dbb196bf3348aa761fa1"},
{file = "rpds_py-0.13.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db09b98c7540df69d4b47218da3fbd7cb466db0fb932e971c321f1c76f155266"},
{file = "rpds_py-0.13.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2ac26f50736324beb0282c819668328d53fc38543fa61eeea2c32ea8ea6eab8d"},
{file = "rpds_py-0.13.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:12ecf89bd54734c3c2c79898ae2021dca42750c7bcfb67f8fb3315453738ac8f"},
{file = "rpds_py-0.13.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3a44c8440183b43167fd1a0819e8356692bf5db1ad14ce140dbd40a1485f2dea"},
{file = "rpds_py-0.13.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:bcef4f2d3dc603150421de85c916da19471f24d838c3c62a4f04c1eb511642c1"},
{file = "rpds_py-0.13.2-cp310-none-win32.whl", hash = "sha256:ee6faebb265e28920a6f23a7d4c362414b3f4bb30607141d718b991669e49ddc"},
{file = "rpds_py-0.13.2-cp310-none-win_amd64.whl", hash = "sha256:ac96d67b37f28e4b6ecf507c3405f52a40658c0a806dffde624a8fcb0314d5fd"},
{file = "rpds_py-0.13.2-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:b5f6328e8e2ae8238fc767703ab7b95785521c42bb2b8790984e3477d7fa71ad"},
{file = "rpds_py-0.13.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:729408136ef8d45a28ee9a7411917c9e3459cf266c7e23c2f7d4bb8ef9e0da42"},
{file = "rpds_py-0.13.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:65cfed9c807c27dee76407e8bb29e6f4e391e436774bcc769a037ff25ad8646e"},
{file = "rpds_py-0.13.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:aefbdc934115d2f9278f153952003ac52cd2650e7313750390b334518c589568"},
{file = "rpds_py-0.13.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d48db29bd47814671afdd76c7652aefacc25cf96aad6daefa82d738ee87461e2"},
{file = "rpds_py-0.13.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3c55d7f2d817183d43220738270efd3ce4e7a7b7cbdaefa6d551ed3d6ed89190"},
{file = "rpds_py-0.13.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6aadae3042f8e6db3376d9e91f194c606c9a45273c170621d46128f35aef7cd0"},
{file = "rpds_py-0.13.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5feae2f9aa7270e2c071f488fab256d768e88e01b958f123a690f1cc3061a09c"},
{file = "rpds_py-0.13.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:51967a67ea0d7b9b5cd86036878e2d82c0b6183616961c26d825b8c994d4f2c8"},
{file = "rpds_py-0.13.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4d0c10d803549427f427085ed7aebc39832f6e818a011dcd8785e9c6a1ba9b3e"},
{file = "rpds_py-0.13.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:603d5868f7419081d616dab7ac3cfa285296735e7350f7b1e4f548f6f953ee7d"},
{file = "rpds_py-0.13.2-cp311-none-win32.whl", hash = "sha256:b8996ffb60c69f677245f5abdbcc623e9442bcc91ed81b6cd6187129ad1fa3e7"},
{file = "rpds_py-0.13.2-cp311-none-win_amd64.whl", hash = "sha256:5379e49d7e80dca9811b36894493d1c1ecb4c57de05c36f5d0dd09982af20211"},
{file = "rpds_py-0.13.2-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:8a776a29b77fe0cc28fedfd87277b0d0f7aa930174b7e504d764e0b43a05f381"},
{file = "rpds_py-0.13.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2a1472956c5bcc49fb0252b965239bffe801acc9394f8b7c1014ae9258e4572b"},
{file = "rpds_py-0.13.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f252dfb4852a527987a9156cbcae3022a30f86c9d26f4f17b8c967d7580d65d2"},
{file = "rpds_py-0.13.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f0d320e70b6b2300ff6029e234e79fe44e9dbbfc7b98597ba28e054bd6606a57"},
{file = "rpds_py-0.13.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ade2ccb937060c299ab0dfb2dea3d2ddf7e098ed63ee3d651ebfc2c8d1e8632a"},
{file = "rpds_py-0.13.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b9d121be0217787a7d59a5c6195b0842d3f701007333426e5154bf72346aa658"},
{file = "rpds_py-0.13.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8fa6bd071ec6d90f6e7baa66ae25820d57a8ab1b0a3c6d3edf1834d4b26fafa2"},
{file = "rpds_py-0.13.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c918621ee0a3d1fe61c313f2489464f2ae3d13633e60f520a8002a5e910982ee"},
{file = "rpds_py-0.13.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:25b28b3d33ec0a78e944aaaed7e5e2a94ac811bcd68b557ca48a0c30f87497d2"},
{file = "rpds_py-0.13.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:31e220a040b89a01505128c2f8a59ee74732f666439a03e65ccbf3824cdddae7"},
{file = "rpds_py-0.13.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:15253fff410873ebf3cfba1cc686a37711efcd9b8cb30ea21bb14a973e393f60"},
{file = "rpds_py-0.13.2-cp312-none-win32.whl", hash = "sha256:b981a370f8f41c4024c170b42fbe9e691ae2dbc19d1d99151a69e2c84a0d194d"},
{file = "rpds_py-0.13.2-cp312-none-win_amd64.whl", hash = "sha256:4c4e314d36d4f31236a545696a480aa04ea170a0b021e9a59ab1ed94d4c3ef27"},
{file = "rpds_py-0.13.2-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:80e5acb81cb49fd9f2d5c08f8b74ffff14ee73b10ca88297ab4619e946bcb1e1"},
{file = "rpds_py-0.13.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:efe093acc43e869348f6f2224df7f452eab63a2c60a6c6cd6b50fd35c4e075ba"},
{file = "rpds_py-0.13.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c2a61c0e4811012b0ba9f6cdcb4437865df5d29eab5d6018ba13cee1c3064a0"},
{file = "rpds_py-0.13.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:751758d9dd04d548ec679224cc00e3591f5ebf1ff159ed0d4aba6a0746352452"},
{file = "rpds_py-0.13.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6ba8858933f0c1a979781272a5f65646fca8c18c93c99c6ddb5513ad96fa54b1"},
{file = "rpds_py-0.13.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bfdfbe6a36bc3059fff845d64c42f2644cf875c65f5005db54f90cdfdf1df815"},
{file = "rpds_py-0.13.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa0379c1935c44053c98826bc99ac95f3a5355675a297ac9ce0dfad0ce2d50ca"},
{file = "rpds_py-0.13.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d5593855b5b2b73dd8413c3fdfa5d95b99d657658f947ba2c4318591e745d083"},
{file = "rpds_py-0.13.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:2a7bef6977043673750a88da064fd513f89505111014b4e00fbdd13329cd4e9a"},
{file = "rpds_py-0.13.2-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:3ab96754d23372009638a402a1ed12a27711598dd49d8316a22597141962fe66"},
{file = "rpds_py-0.13.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:e06cfea0ece444571d24c18ed465bc93afb8c8d8d74422eb7026662f3d3f779b"},
{file = "rpds_py-0.13.2-cp38-none-win32.whl", hash = "sha256:5493569f861fb7b05af6d048d00d773c6162415ae521b7010197c98810a14cab"},
{file = "rpds_py-0.13.2-cp38-none-win_amd64.whl", hash = "sha256:b07501b720cf060c5856f7b5626e75b8e353b5f98b9b354a21eb4bfa47e421b1"},
{file = "rpds_py-0.13.2-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:881df98f0a8404d32b6de0fd33e91c1b90ed1516a80d4d6dc69d414b8850474c"},
{file = "rpds_py-0.13.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d79c159adea0f1f4617f54aa156568ac69968f9ef4d1e5fefffc0a180830308e"},
{file = "rpds_py-0.13.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38d4f822ee2f338febcc85aaa2547eb5ba31ba6ff68d10b8ec988929d23bb6b4"},
{file = "rpds_py-0.13.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5d75d6d220d55cdced2f32cc22f599475dbe881229aeddba6c79c2e9df35a2b3"},
{file = "rpds_py-0.13.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5d97e9ae94fb96df1ee3cb09ca376c34e8a122f36927230f4c8a97f469994bff"},
{file = "rpds_py-0.13.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:67a429520e97621a763cf9b3ba27574779c4e96e49a27ff8a1aa99ee70beb28a"},
{file = "rpds_py-0.13.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:188435794405c7f0573311747c85a96b63c954a5f2111b1df8018979eca0f2f0"},
{file = "rpds_py-0.13.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:38f9bf2ad754b4a45b8210a6c732fe876b8a14e14d5992a8c4b7c1ef78740f53"},
{file = "rpds_py-0.13.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a6ba2cb7d676e9415b9e9ac7e2aae401dc1b1e666943d1f7bc66223d3d73467b"},
{file = "rpds_py-0.13.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:eaffbd8814bb1b5dc3ea156a4c5928081ba50419f9175f4fc95269e040eff8f0"},
{file = "rpds_py-0.13.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5a4c1058cdae6237d97af272b326e5f78ee7ee3bbffa6b24b09db4d828810468"},
{file = "rpds_py-0.13.2-cp39-none-win32.whl", hash = "sha256:b5267feb19070bef34b8dea27e2b504ebd9d31748e3ecacb3a4101da6fcb255c"},
{file = "rpds_py-0.13.2-cp39-none-win_amd64.whl", hash = "sha256:ddf23960cb42b69bce13045d5bc66f18c7d53774c66c13f24cf1b9c144ba3141"},
{file = "rpds_py-0.13.2-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:97163a1ab265a1073a6372eca9f4eeb9f8c6327457a0b22ddfc4a17dcd613e74"},
{file = "rpds_py-0.13.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:25ea41635d22b2eb6326f58e608550e55d01df51b8a580ea7e75396bafbb28e9"},
{file = "rpds_py-0.13.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76d59d4d451ba77f08cb4cd9268dec07be5bc65f73666302dbb5061989b17198"},
{file = "rpds_py-0.13.2-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e7c564c58cf8f248fe859a4f0fe501b050663f3d7fbc342172f259124fb59933"},
{file = "rpds_py-0.13.2-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:61dbc1e01dc0c5875da2f7ae36d6e918dc1b8d2ce04e871793976594aad8a57a"},
{file = "rpds_py-0.13.2-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fdb82eb60d31b0c033a8e8ee9f3fc7dfbaa042211131c29da29aea8531b4f18f"},
{file = "rpds_py-0.13.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d204957169f0b3511fb95395a9da7d4490fb361763a9f8b32b345a7fe119cb45"},
{file = "rpds_py-0.13.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c45008ca79bad237cbc03c72bc5205e8c6f66403773929b1b50f7d84ef9e4d07"},
{file = "rpds_py-0.13.2-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:79bf58c08f0756adba691d480b5a20e4ad23f33e1ae121584cf3a21717c36dfa"},
{file = "rpds_py-0.13.2-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:e86593bf8637659e6a6ed58854b6c87ec4e9e45ee8a4adfd936831cef55c2d21"},
{file = "rpds_py-0.13.2-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:d329896c40d9e1e5c7715c98529e4a188a1f2df51212fd65102b32465612b5dc"},
{file = "rpds_py-0.13.2-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:4a5375c5fff13f209527cd886dc75394f040c7d1ecad0a2cb0627f13ebe78a12"},
{file = "rpds_py-0.13.2-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:06d218e4464d31301e943b65b2c6919318ea6f69703a351961e1baaf60347276"},
{file = "rpds_py-0.13.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c1f41d32a2ddc5a94df4b829b395916a4b7f103350fa76ba6de625fcb9e773ac"},
{file = "rpds_py-0.13.2-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6bc568b05e02cd612be53900c88aaa55012e744930ba2eeb56279db4c6676eb3"},
{file = "rpds_py-0.13.2-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9d94d78418203904730585efa71002286ac4c8ac0689d0eb61e3c465f9e608ff"},
{file = "rpds_py-0.13.2-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bed0252c85e21cf73d2d033643c945b460d6a02fc4a7d644e3b2d6f5f2956c64"},
{file = "rpds_py-0.13.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:244e173bb6d8f3b2f0c4d7370a1aa341f35da3e57ffd1798e5b2917b91731fd3"},
{file = "rpds_py-0.13.2-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7f55cd9cf1564b7b03f238e4c017ca4794c05b01a783e9291065cb2858d86ce4"},
{file = "rpds_py-0.13.2-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:f03a1b3a4c03e3e0161642ac5367f08479ab29972ea0ffcd4fa18f729cd2be0a"},
{file = "rpds_py-0.13.2-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:f5f4424cb87a20b016bfdc157ff48757b89d2cc426256961643d443c6c277007"},
{file = "rpds_py-0.13.2-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:c82bbf7e03748417c3a88c1b0b291288ce3e4887a795a3addaa7a1cfd9e7153e"},
{file = "rpds_py-0.13.2-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:c0095b8aa3e432e32d372e9a7737e65b58d5ed23b9620fea7cb81f17672f1fa1"},
{file = "rpds_py-0.13.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:4c2d26aa03d877c9730bf005621c92da263523a1e99247590abbbe252ccb7824"},
{file = "rpds_py-0.13.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:96f2975fb14f39c5fe75203f33dd3010fe37d1c4e33177feef1107b5ced750e3"},
{file = "rpds_py-0.13.2-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4dcc5ee1d0275cb78d443fdebd0241e58772a354a6d518b1d7af1580bbd2c4e8"},
{file = "rpds_py-0.13.2-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:61d42d2b08430854485135504f672c14d4fc644dd243a9c17e7c4e0faf5ed07e"},
{file = "rpds_py-0.13.2-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d3a61e928feddc458a55110f42f626a2a20bea942ccedb6fb4cee70b4830ed41"},
{file = "rpds_py-0.13.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7de12b69d95072394998c622cfd7e8cea8f560db5fca6a62a148f902a1029f8b"},
{file = "rpds_py-0.13.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:87a90f5545fd61f6964e65eebde4dc3fa8660bb7d87adb01d4cf17e0a2b484ad"},
{file = "rpds_py-0.13.2-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:9c95a1a290f9acf7a8f2ebbdd183e99215d491beea52d61aa2a7a7d2c618ddc6"},
{file = "rpds_py-0.13.2-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:35f53c76a712e323c779ca39b9a81b13f219a8e3bc15f106ed1e1462d56fcfe9"},
{file = "rpds_py-0.13.2-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:96fb0899bb2ab353f42e5374c8f0789f54e0a94ef2f02b9ac7149c56622eaf31"},
{file = "rpds_py-0.13.2.tar.gz", hash = "sha256:f8eae66a1304de7368932b42d801c67969fd090ddb1a7a24f27b435ed4bed68f"},
]
[metadata]
lock-version = "2.0"
python-versions = "^3.11"
content-hash = "03a349f63b3d28e64191b6dd845333914827806332b52f5c52ccbd2863c93b4b"
content-hash = "a7ed29c103fe92e0aff96ea5bb601e6e10e651c5c9806d87d2b05a675edaf2ec"

View File

@ -15,6 +15,7 @@ jsonschema = "^4.17.3"
[tool.poetry.group.dev.dependencies]
pytest = "^7.3.2"
pytest-asyncio = "^0.21.0"
appdirs = "^1.4.4"
[build-system]
requires = ["poetry-core"]

View File

@ -1,5 +1,8 @@
import appdirs
import errno
import os
import socket
import sys
import re
from collections.abc import Callable
from functools import cache
@ -20,15 +23,46 @@ class VeilidTestConnectionError(Exception):
@cache
def server_info() -> tuple[str, int]:
"""Return the hostname and port of the test server."""
VEILID_SERVER = os.getenv("VEILID_SERVER")
if VEILID_SERVER is None:
VEILID_SERVER_NETWORK = os.getenv("VEILID_SERVER_NETWORK")
if VEILID_SERVER_NETWORK is None:
return "localhost", 5959
hostname, *rest = VEILID_SERVER.split(":")
hostname, *rest = VEILID_SERVER_NETWORK.split(":")
if rest:
return hostname, int(rest[0])
return hostname, 5959
def ipc_path_exists(path: str) -> bool:
"""Determine if an IPC socket exists in a platform independent way."""
if os.name == 'nt':
if not path.upper().startswith("\\\\.\\PIPE\\"):
return False
return path[9:] in os.listdir("\\\\.\\PIPE")
else:
return os.path.exists(path)
@cache
def ipc_info() -> str:
"""Return the path of the ipc socket of the test server."""
VEILID_SERVER_IPC = os.getenv("VEILID_SERVER_IPC")
if VEILID_SERVER_IPC is not None:
return VEILID_SERVER_IPC
if os.name == 'nt':
return '\\\\.\\PIPE\\veilid-server\\0'
ipc_0_path = "/var/db/veilid-server/ipc/0"
if os.path.exists(ipc_0_path):
return ipc_0_path
# hack to deal with rust's 'directories' crate case-inconsistency
if sys.platform.startswith('darwin'):
data_dir = appdirs.user_data_dir("Veilid","Veilid")
else:
data_dir = appdirs.user_data_dir("veilid","veilid")
ipc_0_path = os.path.join(data_dir, "ipc", "0")
return ipc_0_path
async def api_connector(callback: Callable) -> _JsonVeilidAPI:
"""Return an API connection if possible.
@ -37,9 +71,14 @@ async def api_connector(callback: Callable) -> _JsonVeilidAPI:
server's socket, raise an easy-to-catch VeilidTestConnectionError.
"""
ipc_path = ipc_info()
hostname, port = server_info()
try:
return await veilid.json_api_connect(hostname, port, callback)
if ipc_path_exists(ipc_path):
return await veilid.json_api_connect_ipc(ipc_path, callback)
else:
return await veilid.json_api_connect(hostname, port, callback)
except OSError as exc:
# This is a little goofy. The underlying Python library handles
# connection errors in 2 ways, depending on how many connections

View File

@ -166,8 +166,8 @@ async def test_routing_context_app_message_loopback_big_packets():
# import it as a remote route as well so we can send to it
prr = await api.import_remote_private_route(blob)
# do this test 1000 times
for _ in range(1000):
# do this test 100 times
for _ in range(100):
# send a random sized random app message to our own private route
message = random.randbytes(random.randint(0, 32768))
await rc.app_message(prr, message)
@ -230,7 +230,7 @@ async def test_routing_context_app_call_loopback_big_packets():
# import it as a remote route as well so we can send to it
prr = await api.import_remote_private_route(blob)
# do this test 10 times
for _ in range(10):
# send a random sized random app message to our own private route

View File

@ -110,7 +110,9 @@ class VeilidConfigDHT(ConfigBase):
remote_max_records: int
remote_max_subkey_cache_memory_mb: int
remote_max_storage_space_mb: int
public_watch_limit: int
member_watch_limit: int
max_watch_expiration_ms: int
@dataclass
class VeilidConfigTLS(ConfigBase):

View File

@ -1,6 +1,8 @@
import asyncio
import importlib.resources as importlib_resources
import json
import os
import socket
from typing import Awaitable, Callable, Optional, Self
from jsonschema import exceptions, validators
@ -149,6 +151,33 @@ class _JsonVeilidAPI(VeilidAPI):
)
return veilid_api
@classmethod
async def connect_ipc(
cls, ipc_path: str, update_callback: Callable[[VeilidUpdate], Awaitable]
) -> Self:
if os.name=='nt':
async def open_windows_pipe(path=None, *,
limit=65536, **kwds):
"""Similar to `open_unix_connection` but works with Windows Named Pipes."""
loop = asyncio.events.get_running_loop()
reader = asyncio.StreamReader(limit=limit, loop=loop)
protocol = asyncio.StreamReaderProtocol(reader, loop=loop)
transport, _ = await loop.create_pipe_connection(
lambda: protocol, path, **kwds)
writer = asyncio.StreamWriter(transport, protocol, reader, loop)
return reader, writer
reader, writer = await open_windows_pipe(ipc_path)
else:
reader, writer = await asyncio.open_unix_connection(ipc_path)
veilid_api = cls(reader, writer, update_callback)
veilid_api.handle_recv_messages_task = asyncio.create_task(
veilid_api.handle_recv_messages(), name="JsonVeilidAPI.handle_recv_messages"
)
return veilid_api
async def handle_recv_message_response(self, j: dict):
id = j["id"]
await self.lock.acquire()
@ -1173,3 +1202,8 @@ async def json_api_connect(
host: str, port: int, update_callback: Callable[[VeilidUpdate], Awaitable]
) -> _JsonVeilidAPI:
return await _JsonVeilidAPI.connect(host, port, update_callback)
async def json_api_connect_ipc(
ipc_path: str, update_callback: Callable[[VeilidUpdate], Awaitable]
) -> _JsonVeilidAPI:
return await _JsonVeilidAPI.connect_ipc(ipc_path, update_callback)

View File

@ -2588,9 +2588,21 @@
"subkeys": {
"type": "array",
"items": {
"type": "integer",
"format": "uint32",
"minimum": 0.0
"type": "array",
"items": [
{
"type": "integer",
"format": "uint32",
"minimum": 0.0
},
{
"type": "integer",
"format": "uint32",
"minimum": 0.0
}
],
"maxItems": 2,
"minItems": 2
}
},
"value": {
@ -3362,8 +3374,11 @@
"local_max_subkey_cache_memory_mb",
"local_subkey_cache_size",
"max_find_node_count",
"max_watch_expiration_ms",
"member_watch_limit",
"min_peer_count",
"min_peer_refresh_time_ms",
"public_watch_limit",
"remote_max_records",
"remote_max_storage_space_mb",
"remote_max_subkey_cache_memory_mb",
@ -3407,6 +3422,16 @@
"format": "uint32",
"minimum": 0.0
},
"max_watch_expiration_ms": {
"type": "integer",
"format": "uint32",
"minimum": 0.0
},
"member_watch_limit": {
"type": "integer",
"format": "uint32",
"minimum": 0.0
},
"min_peer_count": {
"type": "integer",
"format": "uint32",
@ -3417,6 +3442,11 @@
"format": "uint32",
"minimum": 0.0
},
"public_watch_limit": {
"type": "integer",
"format": "uint32",
"minimum": 0.0
},
"remote_max_records": {
"type": "integer",
"format": "uint32",

View File

@ -59,6 +59,7 @@ class Capability(StrEnum):
CAP_RELAY = "RLAY"
CAP_VALIDATE_DIAL_INFO = "DIAL"
CAP_DHT = "DHTV"
CAP_DHT_WATCH = "DHTW"
CAP_APPMESSAGE = "APPM"
CAP_BLOCKSTORE = "BLOC"

View File

@ -14,15 +14,19 @@ name = "veilid-server"
path = "src/main.rs"
[features]
default = ["rt-tokio", "veilid-core/default"]
default = ["rt-tokio", "veilid-core/default", "otlp-tonic"]
default-async-std = ["rt-async-std", "veilid-core/default-async-std"]
crypto-test = ["rt-tokio", "veilid-core/crypto-test"]
crypto-test-none = ["rt-tokio", "veilid-core/crypto-test-none"]
otlp-tonic = [ "opentelemetry-otlp/grpc-tonic", "opentelemetry-otlp/trace" ]
otlp-grpc = [ "opentelemetry-otlp/grpc-sys", "opentelemetry-otlp/trace" ]
rt-async-std = [
"veilid-core/rt-async-std",
"async-std",
"opentelemetry/rt-async-std",
"opentelemetry-otlp/grpc-sys",
]
rt-tokio = [
"veilid-core/rt-tokio",
@ -35,6 +39,7 @@ rt-tokio = [
tracking = ["veilid-core/tracking"]
network-result-extra = ["veilid-core/network-result-extra"]
verbose-tracing = ["veilid-core/verbose-tracing"]
debug-json-api = []
[dependencies]
veilid-core = { path = "../veilid-core", default-features = false }
@ -44,13 +49,13 @@ tracing-appender = "^0"
tracing-opentelemetry = "0.21"
# Buggy: tracing-error = "^0"
opentelemetry = { version = "0.20" }
opentelemetry-otlp = { version = "0.13" }
opentelemetry-otlp = { version = "0.13", default-features = false, optional=true }
opentelemetry-semantic-conventions = "0.12"
async-std = { version = "^1", features = ["unstable"], optional = true }
tokio = { version = "^1", features = ["full", "tracing"], optional = true }
tokio = { version = "1.35.0", features = ["full", "tracing"], optional = true }
tokio-stream = { version = "0.1.14", features = ["net"], optional = true }
tokio-util = { version = "0.7.10", features = ["compat"], optional = true }
console-subscriber = { version = "^0", optional = true }
tokio-stream = { version = "^0", features = ["net"], optional = true }
tokio-util = { version = "^0", features = ["compat"], optional = true }
async-tungstenite = { package = "veilid-async-tungstenite", version = "^0", features = [
"async-tls",
] }
@ -76,7 +81,7 @@ flume = { version = "^0", features = ["async"] }
rpassword = "^7"
hostname = "^0"
stop-token = { version = "^0", default-features = false }
sysinfo = { version = "^0.29.10", default-features = false }
sysinfo = { version = "^0.29.11", default-features = false }
wg = "0.3.2"
[target.'cfg(windows)'.dependencies]

View File

@ -6,6 +6,7 @@ use futures_util::{future::join_all, stream::FuturesUnordered, StreamExt};
use parking_lot::Mutex;
use std::collections::HashMap;
use std::net::SocketAddr;
use std::path::PathBuf;
use std::sync::Arc;
use stop_token::future::FutureExt as _;
use stop_token::*;
@ -18,10 +19,11 @@ use wg::AsyncWaitGroup;
const MAX_NON_JSON_LOGGING: usize = 50;
cfg_if! {
if #[cfg(feature="rt-async-std")] {
use async_std::io::prelude::BufReadExt;
use async_std::io::WriteExt;
} else if #[cfg(feature="rt-tokio")] {
use futures_util::{AsyncBufReadExt, AsyncWriteExt};
} else
if #[cfg(feature="rt-tokio")] {
use tokio::io::AsyncBufReadExt;
use tokio::io::AsyncWriteExt;
} else {
@ -46,7 +48,7 @@ struct ClientApiInner {
settings: Settings,
stop: Option<StopSource>,
join_handle: Option<ClientApiAllFuturesJoinHandle>,
update_channels: HashMap<(SocketAddr, SocketAddr), flume::Sender<String>>,
update_channels: HashMap<u64, flume::Sender<String>>,
}
#[derive(Clone)]
@ -108,9 +110,46 @@ impl ClientApi {
trace!("ClientApi::stop: stopped");
}
async fn handle_incoming(self, bind_addr: SocketAddr) -> std::io::Result<()> {
async fn handle_ipc_incoming(self, ipc_path: PathBuf) -> std::io::Result<()> {
if ipc_path.exists() {
if let Err(e) = std::fs::remove_file(&ipc_path) {
error!("Binding failed because IPC path is in use: {}\nAnother copy of this application may be using the same IPC path.", e);
return Err(e);
}
}
let mut listener = IpcListener::bind(ipc_path.clone()).await?;
debug!("IPC Client API listening on: {:?}", ipc_path);
// Process the incoming accept stream
let mut incoming_stream = listener.incoming()?;
// Make wait group for all incoming connections
let awg = AsyncWaitGroup::new();
let stop_token = self.inner.lock().stop.as_ref().unwrap().token();
while let Ok(Some(stream_result)) =
incoming_stream.next().timeout_at(stop_token.clone()).await
{
// Get the stream to process
let stream = stream_result?;
// Increment wait group
awg.add(1);
let t_awg = awg.clone();
// Process the connection
spawn(self.clone().handle_ipc_connection(stream, t_awg)).detach();
}
// Wait for all connections to terminate
awg.wait().await;
Ok(())
}
async fn handle_tcp_incoming(self, bind_addr: SocketAddr) -> std::io::Result<()> {
let listener = TcpListener::bind(bind_addr).await?;
debug!("Client API listening on: {:?}", bind_addr);
debug!("TCPClient API listening on: {:?}", bind_addr);
// Process the incoming accept stream
cfg_if! {
@ -137,7 +176,7 @@ impl ClientApi {
let t_awg = awg.clone();
// Process the connection
spawn(self.clone().handle_connection(stream, t_awg)).detach();
spawn(self.clone().handle_tcp_connection(stream, t_awg)).detach();
}
// Wait for all connections to terminate
@ -217,6 +256,9 @@ impl ClientApi {
// (trim all whitespace around input lines just to make things more permissive for API users)
let request: json_api::Request = deserialize_json(&sanitized_line)?;
#[cfg(feature = "debug-json-api")]
debug!("JSONAPI: Request: {:?}", request);
// See if this is a control message or a veilid-core message
let response = if let json_api::RequestOp::Control { args } = request.op {
// Process control messages
@ -231,6 +273,9 @@ impl ClientApi {
jrp.clone().process_request(request).await
};
#[cfg(feature = "debug-json-api")]
debug!("JSONAPI: Response: {:?}", response);
// Marshal json + newline => NDJSON
let response_string = serialize_json(json_api::RecvMessage::Response(response)) + "\n";
if let Err(e) = responses_tx.send_async(response_string).await {
@ -294,47 +339,11 @@ impl ClientApi {
VeilidAPIResult::Ok(None)
}
pub async fn handle_connection(self, stream: TcpStream, awg: AsyncWaitGroup) {
// Get address of peer
let peer_addr = match stream.peer_addr() {
Ok(v) => v,
Err(e) => {
eprintln!("can't get peer address: {}", e);
return;
}
};
// Get local address
let local_addr = match stream.local_addr() {
Ok(v) => v,
Err(e) => {
eprintln!("can't get local address: {}", e);
return;
}
};
// Get connection tuple
let conn_tuple = (local_addr, peer_addr);
debug!(
"Accepted Client API Connection: {:?} -> {:?}",
peer_addr, local_addr
);
// Make stop token to quit when stop() is requested externally
let stop_token = self.inner.lock().stop.as_ref().unwrap().token();
// Split into reader and writer halves
// with line buffering on the reader
cfg_if! {
if #[cfg(feature="rt-async-std")] {
use futures_util::AsyncReadExt;
let (reader, mut writer) = stream.split();
let reader = BufReader::new(reader);
} else {
let (reader, writer) = stream.into_split();
let reader = BufReader::new(reader);
}
}
pub async fn run_json_request_processor<R, W>(self, reader: R, writer: W, stop_token: StopToken)
where
R: AsyncBufReadExt + Unpin + Send,
W: AsyncWriteExt + Unpin + Send,
{
// Make request processor for this connection
let api = self.inner.lock().veilid_api.clone();
let jrp = json_api::JsonRequestProcessor::new(api);
@ -348,10 +357,11 @@ impl ClientApi {
let (responses_tx, responses_rx) = flume::unbounded();
// Start sending updates
let id = get_timestamp();
self.inner
.lock()
.update_channels
.insert(conn_tuple, responses_tx.clone());
.insert(id, responses_tx.clone());
// Request receive processor future
// Receives from socket and enqueues RequestLines
@ -401,13 +411,84 @@ impl ClientApi {
}
// Stop sending updates
self.inner.lock().update_channels.remove(&conn_tuple);
self.inner.lock().update_channels.remove(&id);
}
pub async fn handle_tcp_connection(self, stream: TcpStream, awg: AsyncWaitGroup) {
// Get address of peer
let peer_addr = match stream.peer_addr() {
Ok(v) => v,
Err(e) => {
eprintln!("can't get peer address: {}", e);
return;
}
};
// Get local address
let local_addr = match stream.local_addr() {
Ok(v) => v,
Err(e) => {
eprintln!("can't get local address: {}", e);
return;
}
};
// Get connection tuple
debug!(
"Closed Client API Connection: {:?} -> {:?}",
"Accepted TCP Client API Connection: {:?} -> {:?}",
peer_addr, local_addr
);
// Make stop token to quit when stop() is requested externally
let stop_token = self.inner.lock().stop.as_ref().unwrap().token();
// Split into reader and writer halves
// with line buffering on the reader
cfg_if! {
if #[cfg(feature="rt-async-std")] {
use futures_util::AsyncReadExt;
let (reader, writer) = stream.split();
let reader = BufReader::new(reader);
} else {
let (reader, writer) = stream.into_split();
let reader = BufReader::new(reader);
}
}
self.run_json_request_processor(reader, writer, stop_token)
.await;
debug!(
"Closed TCP Client API Connection: {:?} -> {:?}",
peer_addr, local_addr
);
awg.done();
}
pub async fn handle_ipc_connection(self, stream: IpcStream, awg: AsyncWaitGroup) {
// Get connection tuple
debug!("Accepted IPC Client API Connection");
// Make stop token to quit when stop() is requested externally
let stop_token = self.inner.lock().stop.as_ref().unwrap().token();
// Split into reader and writer halves
// with line buffering on the reader
use futures_util::AsyncReadExt;
let (reader, writer) = stream.split();
cfg_if! {
if #[cfg(feature = "rt-tokio")] {
use tokio_util::compat::{FuturesAsyncReadCompatExt, FuturesAsyncWriteCompatExt};
let reader = reader.compat();
let writer = writer.compat_write();
}
}
let reader = BufReader::new(reader);
self.run_json_request_processor(reader, writer, stop_token)
.await;
debug!("Closed IPC Client API Connection",);
awg.done();
}
@ -425,15 +506,29 @@ impl ClientApi {
}
#[instrument(level = "trace", skip(self))]
pub fn run(&self, bind_addrs: Vec<SocketAddr>) {
let bind_futures = bind_addrs.iter().copied().map(|addr| {
pub fn run(&self, ipc_path: Option<PathBuf>, tcp_bind_addrs: Vec<SocketAddr>) {
let mut bind_futures: Vec<SendPinBoxFuture<()>> = Vec::new();
// Local IPC
if let Some(ipc_path) = ipc_path {
let this = self.clone();
async move {
if let Err(e) = this.handle_incoming(addr).await {
warn!("Not binding client API to {}: {}", addr, e);
bind_futures.push(Box::pin(async move {
if let Err(e) = this.handle_ipc_incoming(ipc_path.clone()).await {
warn!("Not binding IPC client API to {:?}: {}", ipc_path, e);
}
}
});
}));
}
// Network sockets
for addr in tcp_bind_addrs.iter().copied() {
let this = self.clone();
bind_futures.push(Box::pin(async move {
if let Err(e) = this.handle_tcp_incoming(addr).await {
warn!("Not binding TCP client API to {}: {}", addr, e);
}
}));
}
let bind_futures_join = join_all(bind_futures);
self.inner.lock().join_handle = Some(spawn(bind_futures_join));
}

View File

@ -4,6 +4,9 @@
#![deny(unused_must_use)]
#![recursion_limit = "256"]
#[cfg(all(feature = "rt-async-std", windows))]
compile_error! {"async-std compilation for windows is currently unsupportedg"}
mod client_api;
mod server;
mod settings;
@ -77,6 +80,7 @@ pub struct CmdlineArgs {
/// Turn on OpenTelemetry tracing
///
/// This option uses the GRPC OpenTelemetry protocol, not HTTP. The format for the endpoint is host:port, like 'localhost:4317'
#[cfg(feature = "opentelemetry-otlp")]
#[arg(long, value_name = "endpoint")]
otlp: Option<String>,
@ -180,9 +184,6 @@ fn main() -> EyreResult<()> {
settingsrw.daemon.enabled = false;
}
if let Some(subnode_index) = args.subnode_index {
if subnode_index == 0 {
bail!("value of subnode_index should be between 1 and 65535");
}
settingsrw.testing.subnode_index = subnode_index;
};

Some files were not shown because too many files have changed in this diff Show More