Merge branch 'main' into add-cicd-scripts

[ci skip]
This commit is contained in:
TC Johnson 2024-04-21 13:43:17 -05:00
commit c969df33d8
No known key found for this signature in database
31 changed files with 381 additions and 219 deletions

View File

@ -22,11 +22,11 @@ welcome!
Running the setup script requires:
* Rust
- Rust
#### Optionally, to build for Android:
* Android SDK and NDK
- Android SDK and NDK
You may decide to use Android Studio [here](https://developer.android.com/studio)
to maintain your Android dependencies. If so, use the dependency manager
@ -35,11 +35,11 @@ method is highly recommended as you may run into path problems with the 'flutter
command line without it. If you do so, you may skip to
[Run Veilid setup script](#Run Veilid setup script).
* build-tools;33.0.1
* ndk;25.1.8937393
* cmake;3.22.1
* platform-tools
* platforms;android-33
- build-tools;34.0.0
- ndk;26.3.11579264
- cmake;3.22.1
- platform-tools
- platforms;android-34
#### Setup Dependencies using the CLI
@ -56,9 +56,9 @@ the command line to install the requisite package versions:
```shell
sdkmanager --install "platform-tools"
sdkmanager --install "platforms;android-33"
sdkmanager --install "build-tools;33.0.1"
sdkmanager --install "ndk;25.1.8937393"
sdkmanager --install "platforms;android-34"
sdkmanager --install "build-tools;34.0.0"
sdkmanager --install "ndk;26.3.11579264"
sdkmanager --install "cmake;3.22.1"
```
@ -97,23 +97,23 @@ Development of Veilid on MacOS is possible on both Intel and ARM hardware.
Development requires:
* Xcode, preferably latest version
* Homebrew [here](https://brew.sh)
* Rust
- Xcode, preferably latest version
- Homebrew [here](https://brew.sh)
- Rust
#### Optionally, to build for Android:
* Android Studio
* Android SDK and NDK
- Android Studio
- Android SDK and NDK
You will need to use Android Studio [here](https://developer.android.com/studio)
to maintain your Android dependencies. Use the SDK Manager in the IDE to install the following packages (use package details view to select version):
* Android SDK Build Tools (33.0.1)
* NDK (Side-by-side) (25.1.8937393)
* Cmake (3.22.1)
* Android SDK 33
* Android SDK Command Line Tools (latest) (7.0/latest)
- Android SDK Build Tools (34.0.0)
- NDK (Side-by-side) (26.3.11579264)
- Cmake (3.22.1)
- Android SDK 34
- Android SDK Command Line Tools (latest) (7.0/latest)
#### Setup command line environment

View File

@ -68,7 +68,7 @@ deps-android:
RUN mkdir /Android; mkdir /Android/Sdk
RUN curl -o /Android/cmdline-tools.zip https://dl.google.com/android/repository/commandlinetools-linux-9123335_latest.zip
RUN cd /Android; unzip /Android/cmdline-tools.zip
RUN yes | /Android/cmdline-tools/bin/sdkmanager --sdk_root=/Android/Sdk build-tools\;33.0.1 ndk\;25.1.8937393 cmake\;3.22.1 platform-tools platforms\;android-33 cmdline-tools\;latest
RUN yes | /Android/cmdline-tools/bin/sdkmanager --sdk_root=/Android/Sdk build-tools\;34.0.0 ndk\;26.3.11579264 cmake\;3.22.1 platform-tools platforms\;android-34 cmdline-tools\;latest
RUN rm -rf /Android/cmdline-tools
RUN apt-get clean
@ -155,7 +155,7 @@ build-linux-arm64:
build-android:
FROM +code-android
WORKDIR /veilid/veilid-core
ENV PATH=$PATH:/Android/Sdk/ndk/25.1.8937393/toolchains/llvm/prebuilt/linux-x86_64/bin/
ENV PATH=$PATH:/Android/Sdk/ndk/26.3.11579264/toolchains/llvm/prebuilt/linux-x86_64/bin/
RUN cargo build --target aarch64-linux-android --release
RUN cargo build --target armv7-linux-androideabi --release
RUN cargo build --target i686-linux-android --release

View File

@ -20,13 +20,13 @@ Releases happen via a CI/CD pipeline. The release process flows as follows:
2.1 Update your local copy of `main` to mirror the newly merged upstream `main`
2.2 Ensure the [CHANGELOG](./CHANGELOG.md) is updated
2.2 Ensure the [CHANGELOG](./CHANGELOG.md) is updated. Include `[ci skip]` in the commit message so that the testing pipeline is skipped.
2.3 Activate your bumpversion Python venv (see bumpversion setup section for details)
2.4 Execute version_bump.sh with the appropriate parameter (patch, minor, or major). This results in all version entries being updated and a matching git tag created locally.
2.5 Add all changes `git add *`
2.5 Add all changes `git add .`
2.6 Git commit the changes with the following message: `Version update: v{current_version} → v{new_version}`

View File

@ -27,40 +27,45 @@ elif [ ! -z "$(command -v dnf)" ]; then
sudo dnf groupinstall -y 'Development Tools'
fi
# Install Rust
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y -c clippy --profile default
source "$HOME/.cargo/env"
#ask if they want to install optional android sdk (and install if yes)
while true; do
read -p "Do you want to install Android SDK (optional) Y/N) " response
read -p "Do you want to install Android SDK (optional) Y/N) " response
case $response in
[yY] ) echo Installing Android SDK...;
# Install Android SDK
mkdir $HOME/Android; mkdir $HOME/Android/Sdk
curl -o $HOME/Android/cmdline-tools.zip https://dl.google.com/android/repository/commandlinetools-linux-9123335_latest.zip
cd $HOME/Android; unzip $HOME/Android/cmdline-tools.zip
$HOME/Android/cmdline-tools/bin/sdkmanager --sdk_root=$HOME/Android/Sdk build-tools\;33.0.1 ndk\;25.1.8937393 cmake\;3.22.1 platform-tools platforms\;android-33 cmdline-tools\;latest emulator
cd $HOME
rm -rf $HOME/Android/cmdline-tools $HOME/Android/cmdline-tools.zip
case $response in
[yY])
echo Installing Android SDK...
# Install Android SDK
mkdir $HOME/Android
mkdir $HOME/Android/Sdk
curl -o $HOME/Android/cmdline-tools.zip https://dl.google.com/android/repository/commandlinetools-linux-9123335_latest.zip
cd $HOME/Android
unzip $HOME/Android/cmdline-tools.zip
$HOME/Android/cmdline-tools/bin/sdkmanager --sdk_root=$HOME/Android/Sdk build-tools\;34.0.0 ndk\;26.3.11579264 cmake\;3.22.1 platform-tools platforms\;android-34 cmdline-tools\;latest emulator
cd $HOME
rm -rf $HOME/Android/cmdline-tools $HOME/Android/cmdline-tools.zip
# Add environment variables
cat >> $HOME/.profile <<END
# Add environment variables
cat >>$HOME/.profile <<END
source "\$HOME/.cargo/env"
export PATH=\$PATH:\$HOME/Android/Sdk/ndk/25.1.8937393/toolchains/llvm/prebuilt/linux-x86_64/bin:\$HOME/Android/Sdk/platform-tools:\$HOME/Android/Sdk/cmdline-tools/latest/bin
export PATH=\$PATH:\$HOME/Android/Sdk/ndk/26.3.11579264/toolchains/llvm/prebuilt/linux-x86_64/bin:\$HOME/Android/Sdk/platform-tools:\$HOME/Android/Sdk/cmdline-tools/latest/bin
export ANDROID_HOME=\$HOME/Android/Sdk
END
break ;;
[nN] ) echo Skipping Android SDK;
cat >> $HOME/.profile <<END
break
;;
[nN])
echo Skipping Android SDK
cat >>$HOME/.profile <<END
source "\$HOME/.cargo/env"
END
break;;
break
;;
* ) echo invalid response;;
esac
*) echo invalid response ;;
esac
done
echo Complete! Exit and reopen the shell and continue with ./setup_linux.sh

View File

@ -6,7 +6,7 @@ if [ $(id -u) -eq 0 ]; then
exit
fi
SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
SCRIPTDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
if [[ "$(uname)" != "Linux" ]]; then
echo Not running Linux
@ -22,7 +22,8 @@ while true; do
read -p "Did you install Android SDK? Y/N " response
case $response in
[yY] ) echo Checking android setup...;
[yY])
echo Checking android setup...
# ensure ANDROID_HOME is defined and exists
if [ -d "$ANDROID_HOME" ]; then
@ -41,7 +42,7 @@ while true; do
fi
# ensure ndk is installed
ANDROID_NDK_HOME="$ANDROID_HOME/ndk/25.1.8937393"
ANDROID_NDK_HOME="$ANDROID_HOME/ndk/26.3.11579264"
if [ -f "$ANDROID_NDK_HOME/ndk-build" ]; then
echo '[X] Android NDK is installed at the location $ANDROID_NDK_HOME'
else
@ -66,21 +67,24 @@ while true; do
fi
# ensure adb is installed
if command -v adb &> /dev/null; then
if command -v adb &>/dev/null; then
echo '[X] adb is available in the path'
else
echo 'adb is not available in the path'
exit 1
fi
break;;
[nN] ) echo Skipping Android SDK config check...;
break;;
* ) echo invalid response;;
break
;;
[nN])
echo Skipping Android SDK config check...
break
;;
*) echo invalid response ;;
esac
done
# ensure rustup is installed
if command -v rustup &> /dev/null; then
if command -v rustup &>/dev/null; then
echo '[X] rustup is available in the path'
else
echo 'rustup is not available in the path'
@ -88,7 +92,7 @@ else
fi
# ensure cargo is installed
if command -v cargo &> /dev/null; then
if command -v cargo &>/dev/null; then
echo '[X] cargo is available in the path'
else
echo 'cargo is not available in the path'
@ -96,7 +100,7 @@ else
fi
# ensure pip3 is installed
if command -v pip3 &> /dev/null; then
if command -v pip3 &>/dev/null; then
echo '[X] pip3 is available in the path'
else
echo 'pip3 is not available in the path'
@ -117,14 +121,18 @@ while true; do
read -p "Will you be modifying the capnproto schema? Y/N (say N if unsure)" response
case $response in
[yY] ) echo Installing capnproto...;
[yY])
echo Installing capnproto...
# Install capnproto using the same mechanism as our earthly build
$SCRIPTDIR/../scripts/earthly/install_capnproto.sh
break;;
[nN] ) echo Skipping capnproto installation...;
break;;
* ) echo invalid response;;
break
;;
[nN])
echo Skipping capnproto installation...
break
;;
*) echo invalid response ;;
esac
done

View File

@ -1,7 +1,7 @@
#!/bin/bash
set -eo pipefail
SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
SCRIPTDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
if [ ! "$(uname)" == "Darwin" ]; then
echo Not running on MacOS
@ -12,7 +12,8 @@ while true; do
read -p "Did you install Android SDK? Y/N " response
case $response in
[yY] ) echo Checking android setup...;
[yY])
echo Checking android setup...
# ensure ANDROID_HOME is defined and exists
if [ -d "$ANDROID_HOME" ]; then
echo '[X] $ANDROID_HOME is defined and exists'
@ -30,10 +31,10 @@ while true; do
fi
# ensure Android SDK packages are installed
$ANDROID_HOME/cmdline-tools/latest/bin/sdkmanager build-tools\;33.0.1 ndk\;25.1.8937393 cmake\;3.22.1 platform-tools platforms\;android-33
$ANDROID_HOME/cmdline-tools/latest/bin/sdkmanager build-tools\;34.0.0 ndk\;26.3.11579264 cmake\;3.22.1 platform-tools platforms\;android-34
# ensure ANDROID_NDK_HOME is defined and exists
ANDROID_NDK_HOME="$ANDROID_HOME/ndk/25.1.8937393"
ANDROID_NDK_HOME="$ANDROID_HOME/ndk/26.3.11579264"
if [ -d "$ANDROID_NDK_HOME" ]; then
echo '[X] Android NDK is defined and exists'
else
@ -66,22 +67,25 @@ while true; do
fi
# ensure adb is installed
if command -v adb &> /dev/null; then
if command -v adb &>/dev/null; then
echo '[X] adb is available in the path'
else
echo 'adb is not available in the path'
exit 1
fi
break;;
[nN] ) echo Skipping Android SDK config check...;
break;;
break
;;
[nN])
echo Skipping Android SDK config check...
break
;;
* ) echo invalid response;;
*) echo invalid response ;;
esac
done
# ensure brew is installed
if command -v brew &> /dev/null; then
if command -v brew &>/dev/null; then
echo '[X] brew is available in the path'
else
echo 'brew is not available in the path'
@ -89,7 +93,7 @@ else
fi
# ensure xcode is installed
if command -v xcode-select &> /dev/null; then
if command -v xcode-select &>/dev/null; then
echo '[X] XCode is available in the path'
else
echo 'XCode is not available in the path'
@ -97,7 +101,7 @@ else
fi
# ensure rustup is installed
if command -v rustup &> /dev/null; then
if command -v rustup &>/dev/null; then
echo '[X] rustup is available in the path'
else
echo 'rustup is not available in the path'
@ -105,7 +109,7 @@ else
fi
# ensure cargo is installed
if command -v cargo &> /dev/null; then
if command -v cargo &>/dev/null; then
echo '[X] cargo is available in the path'
else
echo 'cargo is not available in the path'
@ -113,7 +117,7 @@ else
fi
# ensure pip3 is installed
if command -v pip3 &> /dev/null; then
if command -v pip3 &>/dev/null; then
echo '[X] pip3 is available in the path'
else
echo 'pip3 is not available in the path'
@ -130,9 +134,9 @@ else
fi
# ensure we have command line tools
xcode-select --install 2> /dev/null || true
xcode-select --install 2>/dev/null || true
until [ -d /Library/Developer/CommandLineTools/usr/bin ]; do
sleep 5;
sleep 5
done
# install packages
@ -155,7 +159,7 @@ cargo install wasm-bindgen-cli wasm-pack cargo-edit
# install pip packages
pip3 install --upgrade bumpversion
if command -v pod &> /dev/null; then
if command -v pod &>/dev/null; then
echo '[X] CocoaPods is available in the path'
else
echo 'CocoaPods is not available in the path, installing it now'

View File

@ -1,8 +1,8 @@
[target.aarch64-linux-android]
linker = "/Android/Sdk/ndk/25.1.8937393/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android33-clang"
linker = "/Android/Sdk/ndk/26.3.11579264/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android34-clang"
[target.armv7-linux-androideabi]
linker = "/Android/Sdk/ndk/25.1.8937393/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi33-clang"
linker = "/Android/Sdk/ndk/26.3.11579264/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi33-clang"
[target.x86_64-linux-android]
linker = "/Android/Sdk/ndk/25.1.8937393/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android33-clang"
linker = "/Android/Sdk/ndk/26.3.11579264/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android34-clang"
[target.i686-linux-android]
linker = "/Android/Sdk/ndk/25.1.8937393/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android33-clang"
linker = "/Android/Sdk/ndk/26.3.11579264/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android34-clang"

View File

@ -1,10 +1,9 @@
use glob::glob;
use sha2::{Digest, Sha256};
use std::fs::OpenOptions;
use std::io::BufRead;
use std::io::Write;
use std::{
env, io,
io,
path::Path,
process::{Command, Stdio},
};
@ -126,26 +125,6 @@ fn do_capnp_build() {
append_hash("proto/veilid.capnp", "proto/veilid_capnp.rs");
}
// Fix for missing __extenddftf2 on Android x86_64 Emulator
fn fix_android_emulator() {
let target_os = env::var("CARGO_CFG_TARGET_OS").unwrap();
let target_arch = env::var("CARGO_CFG_TARGET_ARCH").unwrap();
if target_arch == "x86_64" && target_os == "android" {
let missing_library = "clang_rt.builtins-x86_64-android";
let android_home = env::var("ANDROID_HOME").expect("ANDROID_HOME not set");
let lib_path = glob(&format!(
"{android_home}/ndk/25.1.8937393/**/lib{missing_library}.a"
))
.expect("failed to glob")
.next()
.expect("Need libclang_rt.builtins-x86_64-android.a")
.unwrap();
let lib_dir = lib_path.parent().unwrap();
println!("cargo:rustc-link-search={}", lib_dir.display());
println!("cargo:rustc-link-lib=static={missing_library}");
}
}
fn main() {
if std::env::var("DOCS_RS").is_ok()
|| std::env::var("CARGO_CFG_DOC").is_ok()
@ -158,6 +137,4 @@ fn main() {
println!("cargo:warning=rebuilding proto/veilid_capnp.rs because it has changed from the last generation of proto/veilid.capnp");
do_capnp_build();
}
fix_android_emulator();
}

View File

@ -284,14 +284,13 @@ where
};
// Initialize closest nodes list
if init_fanout_queue.is_empty() {
if let Err(e) = self.clone().init_closest_nodes() {
return TimeoutOr::value(Err(e));
}
} else {
self.clone().add_to_fanout_queue(&init_fanout_queue);
if let Err(e) = self.clone().init_closest_nodes() {
return TimeoutOr::value(Err(e));
}
// Ensure we include the most recent nodes
self.clone().add_to_fanout_queue(&init_fanout_queue);
// Do a quick check to see if we're already done
{
let mut ctx = self.context.lock();

View File

@ -43,6 +43,12 @@ impl StorageManager {
)
};
// Get the nodes we know are caching this value to seed the fanout
let init_fanout_queue = {
let inner = self.inner.lock().await;
inner.get_value_nodes(key)?.unwrap_or_default()
};
// Make do-get-value answer context
let schema = if let Some(d) = &last_get_result.opt_descriptor {
Some(d.schema()?)
@ -179,7 +185,7 @@ impl StorageManager {
check_done,
);
let kind = match fanout_call.run(vec![]).await {
let kind = match fanout_call.run(init_fanout_queue).await {
// If we don't finish in the timeout (too much time passed checking for consensus)
TimeoutOr::Timeout => FanoutResultKind::Timeout,
// If we finished with or without consensus (enough nodes returning the same value)

View File

@ -82,6 +82,12 @@ impl StorageManager {
}
};
// Get the nodes we know are caching this value to seed the fanout
let init_fanout_queue = {
let inner = self.inner.lock().await;
inner.get_value_nodes(key)?.unwrap_or_default()
};
// Make do-inspect-value answer context
let opt_descriptor_info = if let Some(descriptor) = &local_inspect_result.opt_descriptor {
// Get the descriptor info. This also truncates the subkeys list to what can be returned from the network.
@ -253,7 +259,7 @@ impl StorageManager {
check_done,
);
let kind = match fanout_call.run(vec![]).await {
let kind = match fanout_call.run(init_fanout_queue).await {
// If we don't finish in the timeout (too much time passed checking for consensus)
TimeoutOr::Timeout => FanoutResultKind::Timeout,
// If we finished with or without consensus (enough nodes returning the same value)

View File

@ -795,10 +795,19 @@ impl StorageManager {
"more subkeys returned locally than requested"
);
// Get the offline subkeys for this record still only returning the ones we're inspecting
let offline_subkey_writes = inner
.offline_subkey_writes
.get(&key)
.map(|o| o.subkeys.clone())
.unwrap_or_default()
.intersect(&subkeys);
// If this is the maximum scope we're interested in, return the report
if matches!(scope, DHTReportScope::Local) {
return Ok(DHTRecordReport::new(
local_inspect_result.subkeys,
offline_subkey_writes,
local_inspect_result.seqs,
vec![],
));
@ -864,6 +873,7 @@ impl StorageManager {
Ok(DHTRecordReport::new(
result.inspect_result.subkeys,
offline_subkey_writes,
local_inspect_result.seqs,
result.inspect_result.seqs,
))

View File

@ -1037,7 +1037,7 @@ where
let Some(member_check) = self.with_record(key, |record| {
let schema = record.schema();
let owner = *record.owner();
Box::new(move |watcher| owner == params.watcher || schema.is_member(&watcher))
Box::new(move |watcher| owner == watcher || schema.is_member(&watcher))
}) else {
// Record not found
return Ok(WatchResult::Rejected);

View File

@ -44,6 +44,12 @@ impl StorageManager {
)
};
// Get the nodes we know are caching this value to seed the fanout
let init_fanout_queue = {
let inner = self.inner.lock().await;
inner.get_value_nodes(key)?.unwrap_or_default()
};
// Make do-set-value answer context
let schema = descriptor.schema()?;
let context = Arc::new(Mutex::new(OutboundSetValueContext {
@ -100,11 +106,17 @@ impl StorageManager {
return Ok(NetworkResult::invalid_message("Schema validation failed"));
}
// If we got a value back it should be different than the one we are setting
if ctx.value.value_data() == value.value_data() {
// Move to the next node
return Ok(NetworkResult::invalid_message("same value returned"));
}
// We have a prior value, ensure this is a newer sequence number
let prior_seq = ctx.value.value_data().seq();
let new_seq = value.value_data().seq();
if new_seq > prior_seq {
// If the sequence number is greater, keep it
if new_seq >= prior_seq {
// If the sequence number is greater or equal, keep it
ctx.value = Arc::new(value);
// One node has shown us this value so far
ctx.value_nodes = vec![next_node];
@ -164,7 +176,7 @@ impl StorageManager {
check_done,
);
let kind = match fanout_call.run(vec![]).await {
let kind = match fanout_call.run(init_fanout_queue).await {
// If we don't finish in the timeout (too much time passed checking for consensus)
TimeoutOr::Timeout => FanoutResultKind::Timeout,
// If we finished with or without consensus (enough nodes returning the same value)

View File

@ -10,13 +10,17 @@ impl StorageManager {
_last_ts: Timestamp,
_cur_ts: Timestamp,
) -> EyreResult<()> {
let offline_subkey_writes = {
let inner = self.lock().await?;
inner.offline_subkey_writes.clone()
let (mut offline_subkey_writes, opt_update_callback) = {
let mut inner = self.lock().await?;
let out = (
inner.offline_subkey_writes.clone(),
inner.update_callback.clone(),
);
inner.offline_subkey_writes.clear();
out
};
// make a safety selection that is conservative
for (key, osw) in offline_subkey_writes {
for (key, osw) in offline_subkey_writes.iter_mut() {
if poll!(stop_token.clone()).is_ready() {
log_stor!(debug "Offline subkey writes cancelled.");
break;
@ -25,10 +29,12 @@ impl StorageManager {
log_stor!(debug "Offline subkey writes stopped for network.");
break;
};
let mut written_subkeys = ValueSubkeyRangeSet::new();
for subkey in osw.subkeys.iter() {
let get_result = {
let mut inner = self.lock().await?;
inner.handle_get_local_value(key, subkey, true).await
inner.handle_get_local_value(*key, subkey, true).await
};
let Ok(get_result) = get_result else {
log_stor!(debug "Offline subkey write had no subkey result: {}:{}", key, subkey);
@ -43,22 +49,52 @@ impl StorageManager {
continue;
};
log_stor!(debug "Offline subkey write: {}:{} len={}", key, subkey, value.value_data().data().len());
if let Err(e) = self
let osvres = self
.outbound_set_value(
rpc_processor.clone(),
key,
*key,
subkey,
osw.safety_selection,
value,
descriptor,
)
.await
{
log_stor!(debug "failed to write offline subkey: {}", e);
.await;
match osvres {
Ok(osv) => {
if let Some(update_callback) = opt_update_callback.clone() {
// Send valuechange with dead count and no subkeys
update_callback(VeilidUpdate::ValueChange(Box::new(
VeilidValueChange {
key: *key,
subkeys: ValueSubkeyRangeSet::single(subkey),
count: u32::MAX,
value: Some(osv.signed_value_data.value_data().clone()),
},
)));
}
written_subkeys.insert(subkey);
}
Err(e) => {
log_stor!(debug "failed to write offline subkey: {}", e);
}
}
}
let mut inner = self.lock().await?;
inner.offline_subkey_writes.remove(&key);
osw.subkeys = osw.subkeys.difference(&written_subkeys);
}
// Add any subkeys back in that were not successfully written
let mut inner = self.lock().await?;
for (key, osw) in offline_subkey_writes {
if !osw.subkeys.is_empty() {
inner
.offline_subkey_writes
.entry(key)
.and_modify(|x| {
x.subkeys = x.subkeys.union(&osw.subkeys);
})
.or_insert(osw);
}
}
Ok(())

View File

@ -3,13 +3,13 @@ plugins {
}
android {
compileSdkVersion 33
buildToolsVersion "33.0.1"
compileSdkVersion 34
buildToolsVersion "34.0.0"
defaultConfig {
applicationId "com.veilid.veilid_core_android_tests"
minSdkVersion 24
targetSdkVersion 33
targetSdkVersion 34
versionCode 1
versionName "1.0"
@ -38,7 +38,7 @@ android {
sourceCompatibility JavaVersion.VERSION_1_8
targetCompatibility JavaVersion.VERSION_1_8
}
ndkVersion '25.1.8937393'
ndkVersion '26.3.11579264'
// Required to copy libc++_shared.so
externalNativeBuild {
@ -83,4 +83,3 @@ afterEvaluate {
tasks["generate${productFlavor}${buildType}Assets"].dependsOn(tasks["cargoBuild"])
}
}

View File

@ -12,6 +12,8 @@ pub struct DHTRecordReport {
/// This may be a subset of the requested range if it exceeds the schema limits
/// or has more than 512 subkeys
subkeys: ValueSubkeyRangeSet,
/// The subkeys that have been writen offline that still need to be flushed
offline_subkeys: ValueSubkeyRangeSet,
/// The sequence numbers of each subkey requested from a locally stored DHT Record
local_seqs: Vec<ValueSeqNum>,
/// The sequence numbers of each subkey requested from the DHT over the network
@ -22,11 +24,13 @@ from_impl_to_jsvalue!(DHTRecordReport);
impl DHTRecordReport {
pub fn new(
subkeys: ValueSubkeyRangeSet,
offline_subkeys: ValueSubkeyRangeSet,
local_seqs: Vec<ValueSeqNum>,
network_seqs: Vec<ValueSeqNum>,
) -> Self {
Self {
subkeys,
offline_subkeys,
local_seqs,
network_seqs,
}
@ -35,6 +39,9 @@ impl DHTRecordReport {
pub fn subkeys(&self) -> &ValueSubkeyRangeSet {
&self.subkeys
}
pub fn offline_subkeys(&self) -> &ValueSubkeyRangeSet {
&self.offline_subkeys
}
pub fn local_seqs(&self) -> &[ValueSeqNum] {
&self.local_seqs
}
@ -47,8 +54,9 @@ impl fmt::Debug for DHTRecordReport {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"DHTRecordReport {{\n subkeys: {:?}\n local_seqs:\n{}\n remote_seqs:\n{}\n}}\n",
"DHTRecordReport {{\n subkeys: {:?}\n offline_subkeys: {:?}\n local_seqs:\n{}\n remote_seqs:\n{}\n}}\n",
&self.subkeys,
&self.offline_subkeys,
&debug_seqs(&self.local_seqs),
&debug_seqs(&self.network_seqs)
)

View File

@ -63,7 +63,7 @@ android {
}
}
ndkVersion '25.1.8937393'
ndkVersion '26.3.11579264'
// Required to copy libc++_shared.so
externalNativeBuild {

View File

@ -27,7 +27,7 @@ apply from: "$flutterRoot/packages/flutter_tools/gradle/flutter.gradle"
android {
compileSdkVersion flutter.compileSdkVersion
ndkVersion '25.1.8937393'
ndkVersion '26.3.11579264'
compileOptions {
sourceCompatibility JavaVersion.VERSION_1_8
targetCompatibility JavaVersion.VERSION_1_8

View File

@ -55,7 +55,7 @@ class HistoryWrapper {
}
});
},
focusNode: FocusNode(onKey: (FocusNode node, RawKeyEvent event) {
focusNode: FocusNode(onKeyEvent: (FocusNode node, KeyEvent event) {
if (event.logicalKey == LogicalKeyboardKey.arrowDown ||
event.logicalKey == LogicalKeyboardKey.arrowUp) {
return KeyEventResult.handled;

View File

@ -246,6 +246,7 @@ class RouteBlob with _$RouteBlob {
class DHTRecordReport with _$DHTRecordReport {
const factory DHTRecordReport({
required List<ValueSubkeyRange> subkeys,
required List<ValueSubkeyRange> offlineSubkeys,
required List<int> localSeqs,
required List<int> networkSeqs,
}) = _DHTRecordReport;

View File

@ -1363,6 +1363,8 @@ DHTRecordReport _$DHTRecordReportFromJson(Map<String, dynamic> json) {
/// @nodoc
mixin _$DHTRecordReport {
List<ValueSubkeyRange> get subkeys => throw _privateConstructorUsedError;
List<ValueSubkeyRange> get offlineSubkeys =>
throw _privateConstructorUsedError;
List<int> get localSeqs => throw _privateConstructorUsedError;
List<int> get networkSeqs => throw _privateConstructorUsedError;
@ -1380,6 +1382,7 @@ abstract class $DHTRecordReportCopyWith<$Res> {
@useResult
$Res call(
{List<ValueSubkeyRange> subkeys,
List<ValueSubkeyRange> offlineSubkeys,
List<int> localSeqs,
List<int> networkSeqs});
}
@ -1398,6 +1401,7 @@ class _$DHTRecordReportCopyWithImpl<$Res, $Val extends DHTRecordReport>
@override
$Res call({
Object? subkeys = null,
Object? offlineSubkeys = null,
Object? localSeqs = null,
Object? networkSeqs = null,
}) {
@ -1406,6 +1410,10 @@ class _$DHTRecordReportCopyWithImpl<$Res, $Val extends DHTRecordReport>
? _value.subkeys
: subkeys // ignore: cast_nullable_to_non_nullable
as List<ValueSubkeyRange>,
offlineSubkeys: null == offlineSubkeys
? _value.offlineSubkeys
: offlineSubkeys // ignore: cast_nullable_to_non_nullable
as List<ValueSubkeyRange>,
localSeqs: null == localSeqs
? _value.localSeqs
: localSeqs // ignore: cast_nullable_to_non_nullable
@ -1428,6 +1436,7 @@ abstract class _$$DHTRecordReportImplCopyWith<$Res>
@useResult
$Res call(
{List<ValueSubkeyRange> subkeys,
List<ValueSubkeyRange> offlineSubkeys,
List<int> localSeqs,
List<int> networkSeqs});
}
@ -1444,6 +1453,7 @@ class __$$DHTRecordReportImplCopyWithImpl<$Res>
@override
$Res call({
Object? subkeys = null,
Object? offlineSubkeys = null,
Object? localSeqs = null,
Object? networkSeqs = null,
}) {
@ -1452,6 +1462,10 @@ class __$$DHTRecordReportImplCopyWithImpl<$Res>
? _value._subkeys
: subkeys // ignore: cast_nullable_to_non_nullable
as List<ValueSubkeyRange>,
offlineSubkeys: null == offlineSubkeys
? _value._offlineSubkeys
: offlineSubkeys // ignore: cast_nullable_to_non_nullable
as List<ValueSubkeyRange>,
localSeqs: null == localSeqs
? _value._localSeqs
: localSeqs // ignore: cast_nullable_to_non_nullable
@ -1469,9 +1483,11 @@ class __$$DHTRecordReportImplCopyWithImpl<$Res>
class _$DHTRecordReportImpl implements _DHTRecordReport {
const _$DHTRecordReportImpl(
{required final List<ValueSubkeyRange> subkeys,
required final List<ValueSubkeyRange> offlineSubkeys,
required final List<int> localSeqs,
required final List<int> networkSeqs})
: _subkeys = subkeys,
_offlineSubkeys = offlineSubkeys,
_localSeqs = localSeqs,
_networkSeqs = networkSeqs;
@ -1486,6 +1502,14 @@ class _$DHTRecordReportImpl implements _DHTRecordReport {
return EqualUnmodifiableListView(_subkeys);
}
final List<ValueSubkeyRange> _offlineSubkeys;
@override
List<ValueSubkeyRange> get offlineSubkeys {
if (_offlineSubkeys is EqualUnmodifiableListView) return _offlineSubkeys;
// ignore: implicit_dynamic_type
return EqualUnmodifiableListView(_offlineSubkeys);
}
final List<int> _localSeqs;
@override
List<int> get localSeqs {
@ -1504,7 +1528,7 @@ class _$DHTRecordReportImpl implements _DHTRecordReport {
@override
String toString() {
return 'DHTRecordReport(subkeys: $subkeys, localSeqs: $localSeqs, networkSeqs: $networkSeqs)';
return 'DHTRecordReport(subkeys: $subkeys, offlineSubkeys: $offlineSubkeys, localSeqs: $localSeqs, networkSeqs: $networkSeqs)';
}
@override
@ -1513,6 +1537,8 @@ class _$DHTRecordReportImpl implements _DHTRecordReport {
(other.runtimeType == runtimeType &&
other is _$DHTRecordReportImpl &&
const DeepCollectionEquality().equals(other._subkeys, _subkeys) &&
const DeepCollectionEquality()
.equals(other._offlineSubkeys, _offlineSubkeys) &&
const DeepCollectionEquality()
.equals(other._localSeqs, _localSeqs) &&
const DeepCollectionEquality()
@ -1524,6 +1550,7 @@ class _$DHTRecordReportImpl implements _DHTRecordReport {
int get hashCode => Object.hash(
runtimeType,
const DeepCollectionEquality().hash(_subkeys),
const DeepCollectionEquality().hash(_offlineSubkeys),
const DeepCollectionEquality().hash(_localSeqs),
const DeepCollectionEquality().hash(_networkSeqs));
@ -1545,6 +1572,7 @@ class _$DHTRecordReportImpl implements _DHTRecordReport {
abstract class _DHTRecordReport implements DHTRecordReport {
const factory _DHTRecordReport(
{required final List<ValueSubkeyRange> subkeys,
required final List<ValueSubkeyRange> offlineSubkeys,
required final List<int> localSeqs,
required final List<int> networkSeqs}) = _$DHTRecordReportImpl;
@ -1554,6 +1582,8 @@ abstract class _DHTRecordReport implements DHTRecordReport {
@override
List<ValueSubkeyRange> get subkeys;
@override
List<ValueSubkeyRange> get offlineSubkeys;
@override
List<int> get localSeqs;
@override
List<int> get networkSeqs;

View File

@ -116,6 +116,9 @@ _$DHTRecordReportImpl _$$DHTRecordReportImplFromJson(
subkeys: (json['subkeys'] as List<dynamic>)
.map(ValueSubkeyRange.fromJson)
.toList(),
offlineSubkeys: (json['offline_subkeys'] as List<dynamic>)
.map(ValueSubkeyRange.fromJson)
.toList(),
localSeqs:
(json['local_seqs'] as List<dynamic>).map((e) => e as int).toList(),
networkSeqs:
@ -126,6 +129,8 @@ Map<String, dynamic> _$$DHTRecordReportImplToJson(
_$DHTRecordReportImpl instance) =>
<String, dynamic>{
'subkeys': instance.subkeys.map((e) => e.toJson()).toList(),
'offline_subkeys':
instance.offlineSubkeys.map((e) => e.toJson()).toList(),
'local_seqs': instance.localSeqs,
'network_seqs': instance.networkSeqs,
};

View File

@ -2805,6 +2805,7 @@
"required": [
"local_seqs",
"network_seqs",
"offline_subkeys",
"subkeys"
],
"properties": {
@ -2826,6 +2827,27 @@
"minimum": 0.0
}
},
"offline_subkeys": {
"description": "The subkeys that have been writen offline that still need to be flushed",
"type": "array",
"items": {
"type": "array",
"items": [
{
"type": "integer",
"format": "uint32",
"minimum": 0.0
},
{
"type": "integer",
"format": "uint32",
"minimum": 0.0
}
],
"maxItems": 2,
"minItems": 2
}
},
"subkeys": {
"description": "The actual subkey range within the schema being reported on This may be a subset of the requested range if it exceeds the schema limits or has more than 512 subkeys",
"type": "array",

View File

@ -382,26 +382,30 @@ class DHTRecordDescriptor:
class DHTRecordReport:
subkeys: list[tuple[ValueSubkey, ValueSubkey]]
offline_subkeys: list[tuple[ValueSubkey, ValueSubkey]]
local_seqs: list[ValueSeqNum]
network_seqs: list[ValueSeqNum]
def __init__(
self,
subkeys: list[tuple[ValueSubkey, ValueSubkey]],
offline_subkeys: list[tuple[ValueSubkey, ValueSubkey]],
local_seqs: list[ValueSeqNum],
network_seqs: list[ValueSeqNum],
):
self.subkeys = subkeys
self.offline_subkey = offline_subkeys
self.local_seqs = local_seqs
self.network_seqs = network_seqs
def __repr__(self) -> str:
return f"<{self.__class__.__name__}(subkeys={self.subkeys!r}, local_seqs={self.local_seqs!r}, network_seqs={self.network_seqs!r})>"
return f"<{self.__class__.__name__}(subkeys={self.subkeys!r}, offline_subkeys={self.offline_subkeys!r}, local_seqs={self.local_seqs!r}, network_seqs={self.network_seqs!r})>"
@classmethod
def from_json(cls, j: dict) -> Self:
return cls(
[[p[0], p[1]] for p in j["subkeys"]],
[[p[0], p[1]] for p in j["offline_subkeys"]],
[ValueSeqNum(s) for s in j["local_seqs"]],
[ValueSeqNum(s) for s in j["network_seqs"]],
)

View File

@ -26,7 +26,7 @@ cfg_if! {
use std::convert::TryInto;
use std::ffi::CStr;
use std::io;
use std::os::raw::{c_char, c_int};
use std::os::raw::c_int;
use tools::*;
fn get_interface_name(index: u32) -> io::Result<String> {
@ -37,6 +37,7 @@ fn get_interface_name(index: u32) -> io::Result<String> {
bail_io_error_other!("if_indextoname returned null");
}
} else {
use std::os::raw::c_char;
if unsafe { if_indextoname(index, ifnamebuf.as_mut_ptr() as *mut c_char) }.is_null() {
bail_io_error_other!("if_indextoname returned null");
}

View File

@ -4,7 +4,7 @@ plugins {
android {
compileSdkVersion 33
buildToolsVersion "33.0.1"
buildToolsVersion "34.0.0"
defaultConfig {
applicationId "com.veilid.veilid_tools_android_tests"
@ -38,7 +38,7 @@ android {
sourceCompatibility JavaVersion.VERSION_1_8
targetCompatibility JavaVersion.VERSION_1_8
}
ndkVersion '25.1.8937393'
ndkVersion '26.3.11579264'
// Required to copy libc++_shared.so
externalNativeBuild {

View File

@ -86,17 +86,23 @@ pub async fn test_one_frag_out_in() {
// Sending
info!("sending");
for _ in 0..10000 {
let random_len = (get_random_u32() % 1000) as usize + FRAGMENT_LEN;
let mut message = vec![1u8; random_len];
random_bytes(&mut message);
let remote_addr = random_sockaddr();
let to_send = loop {
let random_len = (get_random_u32() % 1000) as usize + FRAGMENT_LEN;
let mut message = vec![1u8; random_len];
random_bytes(&mut message);
let remote_addr = random_sockaddr();
let to_send = (message, remote_addr);
if !all_sent.contains(&to_send) {
break to_send;
}
};
// Send single message above fragmentation limit
all_sent.insert((message.clone(), remote_addr));
all_sent.insert(to_send.clone());
assert!(matches!(
assbuf_out
.split_message(message.clone(), remote_addr, sender)
.await,
assbuf_out.split_message(to_send.0, to_send.1, sender).await,
Ok(NetworkResult::Value(()))
));
}
@ -150,18 +156,24 @@ pub async fn test_many_frags_out_in() {
let mut total_sent_size = 0usize;
info!("sending");
for _ in 0..1000 {
let random_len = (get_random_u32() % 65536) as usize;
total_sent_size += random_len;
let mut message = vec![1u8; random_len];
random_bytes(&mut message);
let remote_addr = random_sockaddr();
let to_send = loop {
let random_len = (get_random_u32() % 65536) as usize;
let mut message = vec![1u8; random_len];
random_bytes(&mut message);
let remote_addr = random_sockaddr();
let to_send = (message, remote_addr);
if !all_sent.contains(&to_send) {
break to_send;
}
};
// Send single message
all_sent.insert((message.clone(), remote_addr));
all_sent.insert(to_send.clone());
total_sent_size += to_send.0.len();
assert!(matches!(
assbuf_out
.split_message(message.clone(), remote_addr, sender)
.await,
assbuf_out.split_message(to_send.0, to_send.1, sender).await,
Ok(NetworkResult::Value(()))
));
}
@ -215,18 +227,24 @@ pub async fn test_many_frags_out_in_single_host() {
let mut total_sent_size = 0usize;
info!("sending");
for _ in 0..1000 {
let random_len = (get_random_u32() % 65536) as usize;
total_sent_size += random_len;
let mut message = vec![1u8; random_len];
random_bytes(&mut message);
let remote_addr = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(1, 2, 3, 4), 5678));
let to_send = loop {
let remote_addr = random_sockaddr();
let random_len = (get_random_u32() % 65536) as usize;
let mut message = vec![1u8; random_len];
random_bytes(&mut message);
let to_send = (message.clone(), remote_addr);
if !all_sent.contains(&to_send) {
break to_send;
}
};
// Send single message
all_sent.insert((message.clone(), remote_addr));
all_sent.insert(to_send.clone());
total_sent_size += to_send.0.len();
assert!(matches!(
assbuf_out
.split_message(message.clone(), remote_addr, sender)
.await,
assbuf_out.split_message(to_send.0, to_send.1, sender).await,
Ok(NetworkResult::Value(()))
));
}
@ -288,21 +306,28 @@ pub async fn test_many_frags_with_drops() {
let mut total_fragged = 0usize;
info!("sending");
for _ in 0..1000 {
let random_len = (get_random_u32() % 65536) as usize;
if random_len > FRAGMENT_LEN {
total_fragged += 1;
}
total_sent_size += random_len;
let mut message = vec![1u8; random_len];
random_bytes(&mut message);
let remote_addr = random_sockaddr();
let to_send = loop {
let remote_addr = random_sockaddr();
let random_len = (get_random_u32() % 65536) as usize;
if random_len > FRAGMENT_LEN {
total_fragged += 1;
}
let mut message = vec![1u8; random_len];
random_bytes(&mut message);
let to_send = (message.clone(), remote_addr);
if !all_sent.contains(&to_send) {
break to_send;
}
};
// Send single message
all_sent.insert((message.clone(), remote_addr));
all_sent.insert(to_send.clone());
total_sent_size += to_send.0.len();
assert!(matches!(
assbuf_out
.split_message(message.clone(), remote_addr, sender)
.await,
assbuf_out.split_message(to_send.0, to_send.1, sender).await,
Ok(NetworkResult::Value(()))
));
@ -358,18 +383,24 @@ pub async fn test_many_frags_reordered() {
let mut rng = rand::thread_rng();
info!("sending");
for _ in 0..1000 {
let random_len = (get_random_u32() % 65536) as usize;
total_sent_size += random_len;
let mut message = vec![1u8; random_len];
random_bytes(&mut message);
let remote_addr = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(1, 2, 3, 4), 5678));
let to_send = loop {
let random_len = (get_random_u32() % 65536) as usize;
let mut message = vec![1u8; random_len];
random_bytes(&mut message);
let remote_addr = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(1, 2, 3, 4), 5678));
let to_send = (message.clone(), remote_addr);
if !all_sent.contains(&to_send) {
break to_send;
}
};
// Send single message
all_sent.insert((message.clone(), remote_addr));
all_sent.insert(to_send.clone());
total_sent_size += to_send.0.len();
assert!(matches!(
assbuf_out
.split_message(message.clone(), remote_addr, sender)
.await,
assbuf_out.split_message(to_send.0, to_send.1, sender).await,
Ok(NetworkResult::Value(()))
));

View File

@ -272,7 +272,7 @@ pub fn change_log_ignore(layer: String, log_ignore: String) {
// Change all layers
for f in filters.values() {
f.set_ignore_list(Some(VeilidLayerFilter::apply_ignore_change(
f.ignore_list(),
&f.ignore_list(),
log_ignore.clone(),
)));
}
@ -280,7 +280,7 @@ pub fn change_log_ignore(layer: String, log_ignore: String) {
// Change a specific layer
let f = filters.get(layer.as_str()).unwrap();
f.set_ignore_list(Some(VeilidLayerFilter::apply_ignore_change(
f.ignore_list(),
&f.ignore_list(),
log_ignore.clone(),
)));
}

View File

@ -136,18 +136,16 @@ impl VeilidClient {
if layer.is_empty() {
// Change all layers
for f in filters.values() {
f.set_ignore_list(Some(VeilidLayerFilter::apply_ignore_change(
f.ignore_list(),
changes.clone(),
)));
let mut ignore_list = f.ignore_list();
VeilidLayerFilter::apply_ignore_change_list(&mut ignore_list, &changes);
f.set_ignore_list(Some(ignore_list));
}
} else {
// Change a specific layer
let f = filters.get(layer.as_str()).unwrap();
f.set_ignore_list(Some(VeilidLayerFilter::apply_ignore_change(
f.ignore_list(),
changes.clone(),
)));
let mut ignore_list = f.ignore_list();
VeilidLayerFilter::apply_ignore_change_list(&mut ignore_list, &changes);
f.set_ignore_list(Some(ignore_list));
}
}
/// Shut down Veilid and terminate the API.

View File

@ -21,7 +21,7 @@
},
"../pkg": {
"name": "veilid-wasm",
"version": "0.2.5",
"version": "0.3.1",
"dev": true,
"license": "MPL-2.0"
},