Improve relayed connections and earthfile

This commit is contained in:
Christien Rioux 2025-02-13 01:17:51 +00:00
parent 54867ec53c
commit 52b7d0b563
27 changed files with 288 additions and 3322 deletions

View File

@ -2,6 +2,7 @@ variables:
NO_DOCKER: 1
FORCE_COLOR: 1
EARTHLY_EXEC_CMD: "/bin/sh"
EARTHLY_DISABLE_REMOTE_REGISTRY_PROXY: 1
GIT_SUBMODULE_STRATEGY: normal
stages:
@ -35,7 +36,7 @@ format:
.earthly: &earthly_setup
- apk update && apk add git
- wget https://github.com/earthly/earthly/releases/download/v0.7.15/earthly-linux-amd64 -O /usr/local/bin/earthly
- wget https://github.com/earthly/earthly/releases/download/v0.8.15/earthly-linux-amd64 -O /usr/local/bin/earthly
- chmod +x /usr/local/bin/earthly
- earthly bootstrap
- echo $CI_REGISTRY_PASSWORD | docker login -u $CI_REGISTRY_USER --password-stdin $CI_REGISTRY
@ -106,7 +107,7 @@ semgrep:
- if: $CI_PIPELINE_SOURCE == "push" # Include all commits
- if: $CI_PIPELINE_SOURCE == "schedule" # Optionally include scheduled pipelines
- when: always # Ensure it runs if the pipeline is triggered for any other reason
variables:
# Connect to Semgrep AppSec Platform through your SEMGREP_APP_TOKEN.
@ -154,7 +155,7 @@ create_build_machines:
- bash scripts/cicd/build-orchestration/build-machine-ctrl.sh create amd64-rpm
rules:
- if: '$CI_COMMIT_TAG =~ /v\d.+/'
package_amd64_deb:
stage: build_packages
needs:
@ -264,7 +265,7 @@ dryrun_create_build_machines:
- bash scripts/cicd/build-orchestration/build-machine-ctrl.sh create amd64-rpm
rules:
- if: $CI_COMMIT_MESSAGE =~ /\[ci dryrun]/
dryrun_package_amd64_deb:
stage: build_packages
needs:
@ -290,7 +291,7 @@ dryrun_package_arm64_deb:
- bash scripts/cicd/build-machine/scp-arm64-debs-to-orchestrator.sh
rules:
- if: $CI_COMMIT_MESSAGE =~ /\[ci dryrun]/
dryrun_package_amd64_rpm:
stage: build_packages
needs:
@ -345,9 +346,9 @@ dryrun_deploy_repos:
stage: distribute
needs:
- dryrun_build_repositories
tags:
tags:
- repo-server
script:
script:
- ls -al $HOME/repo.tar
rules:
- if: $CI_COMMIT_MESSAGE =~ /\[ci dryrun]/
@ -378,7 +379,7 @@ nightly_create_build_machines:
- bash scripts/cicd/build-orchestration/build-machine-ctrl.sh create amd64-rpm
rules:
- if: $IS_NIGHTLY == "true"
nightly_package_amd64_deb:
stage: build_packages
needs:

179
Earthfile
View File

@ -1,4 +1,4 @@
VERSION 0.7
VERSION 0.8
########################################################################################################################
## ARGUMENTS
@ -14,7 +14,6 @@ VERSION 0.7
# Start with older Ubuntu to ensure GLIBC symbol versioning support for older linux
# Ensure we are using an amd64 platform because some of these targets use cross-platform tooling
FROM ubuntu:18.04
ENV ZIG_VERSION=0.13.0
ENV CMAKE_VERSION_MINOR=3.30
ENV CMAKE_VERSION_PATCH=3.30.1
@ -26,14 +25,40 @@ ENV CARGO_HOME=/usr/local/cargo
ENV PATH=$PATH:/usr/local/cargo/bin:/usr/local/zig
ENV LD_LIBRARY_PATH=/usr/local/lib
ENV RUST_BACKTRACE=1
ENV RETRY_COUNT=12
WORKDIR /veilid
IF [ $(arch) = "x86_64" ]
ENV DEFAULT_CARGO_TARGET = "x86_64-unknown-linux-gnu"
ELSE IF [ $(arch) = "aarch64" ]
ENV DEFAULT_CARGO_TARGET = "aarch64-unknown-linux-gnu"
ELSE
RUN echo "Unsupported host platform"
RUN false
END
# Install build prerequisites & setup required directories
deps-base:
RUN echo '\
Acquire::Retries "'$RETRY_COUNT'";\
Acquire::https::Timeout "240";\
Acquire::http::Timeout "240";\
APT::Get::Assume-Yes "true";\
APT::Install-Recommends "false";\
APT::Install-Suggests "false";\
Debug::Acquire::https "true";\
' > /etc/apt/apt.conf.d/99custom
RUN apt-get -y update
RUN apt-get install -y iproute2 curl build-essential libssl-dev openssl file git pkg-config libdbus-1-dev libdbus-glib-1-dev libgirepository1.0-dev libcairo2-dev checkinstall unzip libncursesw5-dev libncurses5-dev
RUN curl -O https://cmake.org/files/v$CMAKE_VERSION_MINOR/cmake-$CMAKE_VERSION_PATCH-linux-$(arch).sh
RUN apt-get install -y ca-certificates iproute2 curl build-essential libssl-dev openssl file git pkg-config libdbus-1-dev libdbus-glib-1-dev libgirepository1.0-dev libcairo2-dev checkinstall unzip libncursesw5-dev libncurses5-dev
IF [ $(arch) = "x86_64" ]
RUN apt-get install -y gcc-aarch64-linux-gnu
ELSE IF [ $(arch) = "aarch64" ]
RUN apt-get install -y gcc-x86-64-linux-gnu
ELSE
RUN apt-get install -y gcc-aarch64-linux-gnu gcc-x86-64-linux-gnu
END
RUN curl --retry $RETRY_COUNT --retry-connrefused -O https://cmake.org/files/v$CMAKE_VERSION_MINOR/cmake-$CMAKE_VERSION_PATCH-linux-$(arch).sh
RUN mkdir /opt/cmake
RUN sh cmake-$CMAKE_VERSION_PATCH-linux-$(arch).sh --skip-license --prefix=/opt/cmake
RUN ln -s /opt/cmake/bin/cmake /usr/local/bin/cmake
@ -41,27 +66,34 @@ deps-base:
# Install Rust
deps-rust:
FROM +deps-base
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --default-toolchain=$RUST_VERSION -y -c clippy --no-modify-path --profile minimal
RUN curl --retry $RETRY_COUNT --retry-connrefused --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --default-toolchain=$RUST_VERSION -y -c clippy --no-modify-path --profile minimal
RUN chmod -R a+w $RUSTUP_HOME $CARGO_HOME; \
rustup --version; \
cargo --version; \
rustc --version;
# Linux
RUN rustup target add x86_64-unknown-linux-gnu
RUN rustup target add aarch64-unknown-linux-gnu
# Android
RUN rustup target add aarch64-linux-android
RUN rustup target add armv7-linux-androideabi
RUN rustup target add i686-linux-android
RUN rustup target add x86_64-linux-android
# WASM
RUN rustup target add wasm32-unknown-unknown
RUN cargo install wasm-pack
RUN cargo install -f wasm-bindgen-cli --version $WASM_BINDGEN_CLI_VERSION
RUN retry=0; until [ "$retry" -ge $RETRY_COUNT ]; do \
rustup target add \
# Linux
x86_64-unknown-linux-gnu \
aarch64-unknown-linux-gnu \
# Android
aarch64-linux-android \
armv7-linux-androideabi \
i686-linux-android \
x86_64-linux-android \
# WASM
wasm32-unknown-unknown \
&& break; \
retry=$((retry+1)); \
echo "retry #$retry..."; \
sleep 10; \
done
RUN cargo install wasm-pack wasm-tools --locked
RUN cargo install -f wasm-bindgen-cli --locked --version $WASM_BINDGEN_CLI_VERSION
# Caching tool
RUN cargo install cargo-chef
RUN cargo install cargo-chef --locked
# Install Linux cross-platform tooling
RUN curl -O https://ziglang.org/download/$ZIG_VERSION/zig-linux-$(arch)-$ZIG_VERSION.tar.xz
RUN curl --retry $RETRY_COUNT --retry-connrefused -O https://ziglang.org/download/$ZIG_VERSION/zig-linux-$(arch)-$ZIG_VERSION.tar.xz
RUN tar -C /usr/local -xJf zig-linux-$(arch)-$ZIG_VERSION.tar.xz
RUN mv /usr/local/zig-linux-$(arch)-$ZIG_VERSION /usr/local/zig
RUN cargo install cargo-zigbuild
@ -73,14 +105,16 @@ deps-rust:
# Install android tooling
deps-android:
FROM +deps-base
BUILD +deps-rust
WAIT
BUILD +deps-rust
END
COPY +deps-rust/cargo /usr/local/cargo
COPY +deps-rust/rustup /usr/local/rustup
COPY +deps-rust/cargo-zigbuild /usr/local/cargo/bin/cargo-zigbuild
COPY +deps-rust/zig /usr/local/zig
RUN apt-get install -y openjdk-9-jdk-headless
RUN mkdir /Android; mkdir /Android/Sdk
RUN curl -o /Android/cmdline-tools.zip https://dl.google.com/android/repository/commandlinetools-linux-9123335_latest.zip
RUN curl --retry $RETRY_COUNT --retry-connrefused -o /Android/cmdline-tools.zip https://dl.google.com/android/repository/commandlinetools-linux-9123335_latest.zip
RUN cd /Android; unzip /Android/cmdline-tools.zip
RUN yes | /Android/cmdline-tools/bin/sdkmanager --sdk_root=/Android/Sdk build-tools\;34.0.0 ndk\;27.0.12077973 cmake\;3.22.1 platform-tools platforms\;android-34 cmdline-tools\;latest
RUN rm -rf /Android/cmdline-tools
@ -89,30 +123,34 @@ deps-android:
# Just linux build not android
deps-linux:
FROM +deps-base
BUILD +deps-rust
WAIT
BUILD +deps-rust
END
COPY +deps-rust/cargo /usr/local/cargo
COPY +deps-rust/rustup /usr/local/rustup
COPY +deps-rust/cargo-zigbuild /usr/local/cargo/bin/cargo-zigbuild
COPY +deps-rust/zig /usr/local/zig
# Make a cache image with downloaded and built dependencies
build-linux-cache:
FROM +deps-linux
RUN mkdir veilid-cli veilid-core veilid-server veilid-tools veilid-wasm veilid-flutter veilid-flutter/rust
COPY --dir .cargo scripts Cargo.lock Cargo.toml .
COPY veilid-cli/Cargo.toml veilid-cli
COPY veilid-core/Cargo.toml veilid-core
COPY veilid-server/Cargo.toml veilid-server
COPY veilid-tools/Cargo.toml veilid-tools
COPY veilid-flutter/rust/Cargo.lock veilid-flutter/rust/Cargo.toml veilid-flutter/rust
COPY veilid-wasm/Cargo.toml veilid-wasm
RUN cat /veilid/scripts/earthly/cargo-linux/config.toml >> .cargo/config.toml
COPY --keep-ts --dir .cargo scripts Cargo.lock Cargo.toml .
COPY --keep-ts veilid-cli/Cargo.toml veilid-cli
COPY --keep-ts veilid-core/Cargo.toml veilid-core
COPY --keep-ts veilid-server/Cargo.toml veilid-server
COPY --keep-ts veilid-tools/Cargo.toml veilid-tools
COPY --keep-ts veilid-flutter/rust/Cargo.toml veilid-flutter/rust
COPY --keep-ts veilid-wasm/Cargo.toml veilid-wasm/wasm_remap_paths.sh veilid-wasm
RUN cargo chef prepare --recipe-path recipe.json
RUN cargo chef cook --recipe-path recipe.json
RUN echo $PROJECT_PATH
SAVE ARTIFACT target
RUN cargo chef cook --profile=test --tests --target $DEFAULT_CARGO_TARGET --recipe-path recipe.json -p veilid-server -p veilid-cli -p veilid-tools -p veilid-core
RUN cargo chef cook --zigbuild --release --target x86_64-unknown-linux-gnu --recipe-path recipe.json -p veilid-server -p veilid-cli -p veilid-tools -p veilid-core
RUN cargo chef cook --zigbuild --release --target aarch64-unknown-linux-gnu --recipe-path recipe.json -p veilid-server -p veilid-cli -p veilid-tools -p veilid-core
RUN veilid-wasm/wasm_remap_paths.sh cargo chef cook --zigbuild --release --target wasm32-unknown-unknown --recipe-path recipe.json -p veilid-wasm
ARG CI_REGISTRY_IMAGE=registry.gitlab.com/veilid/veilid
SAVE IMAGE --push $CI_REGISTRY_IMAGE/build-cache:latest
# Import the whole veilid code repository from the earthly host
code-linux:
# This target will either use the full earthly cache of local use (+build-linux-cache), or will use a containerized
# version of the +build-linux-cache from the registry
@ -126,42 +164,31 @@ code-linux:
FROM $CI_REGISTRY_IMAGE/build-cache:latest
# FROM registry.gitlab.com/veilid/build-cache:latest
END
COPY --dir .cargo build_docs.sh files scripts veilid-cli veilid-core veilid-server veilid-tools veilid-flutter veilid-wasm Cargo.lock Cargo.toml /veilid
COPY --keep-ts --dir .cargo build_docs.sh files scripts veilid-cli veilid-core veilid-server veilid-tools veilid-flutter veilid-wasm Cargo.lock Cargo.toml /veilid
# Code + Linux + Android deps
code-android:
FROM +deps-android
COPY --dir .cargo files scripts veilid-cli veilid-core veilid-server veilid-tools veilid-flutter veilid-wasm Cargo.lock Cargo.toml /veilid
RUN cat /veilid/scripts/earthly/cargo-linux/config.toml >> /veilid/.cargo/config.toml
RUN cat /veilid/scripts/earthly/cargo-android/config.toml >> /veilid/.cargo/config.toml
COPY --keep-ts --dir .cargo files scripts veilid-cli veilid-core veilid-server veilid-tools veilid-flutter veilid-wasm Cargo.lock Cargo.toml /veilid
COPY --keep-ts scripts/earthly/cargo-android/config.toml /veilid/.cargo/config.toml
# Clippy only
clippy:
FROM +code-linux
RUN cargo clippy
RUN cargo clippy --target x86_64-unknown-linux-gnu
RUN cargo clippy --manifest-path=veilid-wasm/Cargo.toml --target wasm32-unknown-unknown
# Build
build-release:
FROM +code-linux
RUN cargo build --release -p veilid-server -p veilid-cli -p veilid-tools -p veilid-core
SAVE ARTIFACT ./target/release AS LOCAL ./target/earthly/release
build:
FROM +code-linux
RUN cargo build -p veilid-server -p veilid-cli -p veilid-tools -p veilid-core
SAVE ARTIFACT ./target/debug AS LOCAL ./target/earthly/debug
build-linux-amd64:
FROM +code-linux
# Ensure we have enough memory
IF [ $(free -wmt | grep Total | awk '{print $2}') -lt 7500 ]
RUN echo "not enough container memory to build. increase build host memory."
RUN false
END
RUN cargo zigbuild --target x86_64-unknown-linux-gnu --release -p veilid-server -p veilid-cli -p veilid-tools -p veilid-core
SAVE ARTIFACT ./target/x86_64-unknown-linux-gnu AS LOCAL ./target/artifacts/x86_64-unknown-linux-gnu
build-linux-amd64-debug:
FROM +code-linux
RUN cargo zigbuild --target x86_64-unknown-linux-gnu -p veilid-server -p veilid-cli -p veilid-tools -p veilid-core
SAVE ARTIFACT ./target/x86_64-unknown-linux-gnu AS LOCAL ./target/artifacts/x86_64-unknown-linux-gnu
build-linux-arm64:
FROM +code-linux
RUN cargo zigbuild --target aarch64-unknown-linux-gnu --release -p veilid-server -p veilid-cli -p veilid-tools -p veilid-core
@ -184,7 +211,7 @@ build-android:
# Unit tests
unit-tests-clippy-linux:
FROM +code-linux
RUN cargo clippy
RUN cargo clippy --target $DEFAULT_CARGO_TARGET
unit-tests-clippy-wasm-linux:
FROM +code-linux
@ -196,13 +223,13 @@ unit-tests-docs-linux:
unit-tests-native-linux:
FROM +code-linux
RUN cargo test -p veilid-server -p veilid-cli -p veilid-tools -p veilid-core
RUN cargo test --tests --target $DEFAULT_CARGO_TARGET -p veilid-server -p veilid-cli -p veilid-tools -p veilid-core
unit-tests-wasm-linux:
FROM +code-linux
# Just run build now because actual unit tests require network access
# which should be moved to a separate integration test
RUN veilid-wasm/wasm_build.sh
RUN veilid-wasm/wasm_build.sh release
unit-tests-linux:
WAIT
@ -228,7 +255,7 @@ package-linux-amd64-deb:
#################################
### DEBIAN DPKG .DEB FILES
#################################
COPY --dir package /veilid
COPY --keep-ts --dir package /veilid
# veilid-server
RUN /veilid/package/debian/earthly_make_veilid_server_deb.sh amd64 x86_64-unknown-linux-gnu "$IS_NIGHTLY"
SAVE ARTIFACT --keep-ts /dpkg/out/*.deb AS LOCAL ./target/packages/
@ -239,15 +266,15 @@ package-linux-amd64-deb:
package-linux-amd64-rpm:
ARG IS_NIGHTLY="false"
FROM --platform amd64 rockylinux:9
FROM --platform linux/amd64 rockylinux:9
RUN yum install -y createrepo rpm-build rpm-sign yum-utils rpmdevtools
RUN rpmdev-setuptree
#################################
### RPMBUILD .RPM FILES
#################################
RUN mkdir -p /veilid/target
COPY --dir .cargo files scripts veilid-cli veilid-core veilid-server veilid-tools veilid-flutter veilid-wasm Cargo.lock Cargo.toml package /veilid
COPY +build-linux-amd64/x86_64-unknown-linux-gnu /veilid/target/x86_64-unknown-linux-gnu
COPY --keep-ts --dir package /veilid
COPY --keep-ts +build-linux-amd64/x86_64-unknown-linux-gnu /veilid/target/x86_64-unknown-linux-gnu
RUN mkdir -p /rpm-work-dir/veilid-server
# veilid-server
RUN veilid/package/rpm/veilid-server/earthly_make_veilid_server_rpm.sh x86_64 x86_64-unknown-linux-gnu "$IS_NIGHTLY"
@ -263,7 +290,7 @@ package-linux-arm64-deb:
#################################
### DEBIAN DPKG .DEB FILES
#################################
COPY --dir package /veilid
COPY --keep-ts --dir package /veilid
# veilid-server
RUN /veilid/package/debian/earthly_make_veilid_server_deb.sh arm64 aarch64-unknown-linux-gnu "$IS_NIGHTLY"
SAVE ARTIFACT --keep-ts /dpkg/out/*.deb AS LOCAL ./target/packages/
@ -274,15 +301,15 @@ package-linux-arm64-deb:
package-linux-arm64-rpm:
ARG IS_NIGHTLY="false"
FROM --platform arm64 rockylinux:8
FROM --platform linux/arm64 rockylinux:9
RUN yum install -y createrepo rpm-build rpm-sign yum-utils rpmdevtools
RUN rpmdev-setuptree
#################################
### RPMBUILD .RPM FILES
#################################
RUN mkdir -p /veilid/target
COPY --dir .cargo files scripts veilid-cli veilid-core veilid-server veilid-tools veilid-flutter veilid-wasm Cargo.lock Cargo.toml package /veilid
COPY +build-linux-arm64/aarch64-unknown-linux-gnu /veilid/target/aarch64-unknown-linux-gnu
COPY --keep-ts --dir package /veilid
COPY --keep-ts +build-linux-arm64/aarch64-unknown-linux-gnu /veilid/target/aarch64-unknown-linux-gnu
RUN mkdir -p /rpm-work-dir/veilid-server
# veilid-server
RUN veilid/package/rpm/veilid-server/earthly_make_veilid_server_rpm.sh aarch64 aarch64-unknown-linux-gnu "$IS_NIGHTLY"
@ -293,13 +320,25 @@ package-linux-arm64-rpm:
SAVE ARTIFACT --keep-ts /root/rpmbuild/RPMS/aarch64/*.rpm AS LOCAL ./target/packages/
package-linux-amd64:
BUILD +package-linux-amd64-deb
BUILD +package-linux-amd64-rpm
WAIT
BUILD +package-linux-amd64-deb
END
WAIT
BUILD +package-linux-amd64-rpm
END
package-linux-arm64:
BUILD +package-linux-arm64-deb
BUILD +package-linux-arm64-rpm
WAIT
BUILD +package-linux-arm64-deb
END
WAIT
BUILD +package-linux-arm64-rpm
END
package-linux:
BUILD +package-linux-amd64
BUILD +package-linux-arm64
WAIT
BUILD +package-linux-amd64
END
WAIT
BUILD +package-linux-arm64
END

View File

@ -9,7 +9,7 @@ fi
if [ ! -z "$(command -v apt)" ]; then
# Install APT dependencies
sudo apt update -y
sudo apt install -y openjdk-17-jdk-headless iproute2 curl build-essential cmake libssl-dev openssl file git pkg-config libdbus-1-dev libdbus-glib-1-dev libgirepository1.0-dev libcairo2-dev checkinstall unzip llvm wabt python3-pip
sudo apt install -y openjdk-17-jdk-headless iproute2 curl build-essential cmake libssl-dev openssl file git pkg-config libdbus-1-dev libdbus-glib-1-dev libgirepository1.0-dev libcairo2-dev checkinstall unzip llvm python3-pip
elif [ ! -z "$(command -v dnf)" ]; then
# DNF (formerly yum)
sudo dnf update -y
@ -22,7 +22,7 @@ elif [ ! -z "$(command -v dnf)" ]; then
# them in anyway
#
# Also Fedora doesn't come with pip
sudo dnf install -y java-17-openjdk-headless iproute curl cmake openssl-devel openssl git file pkg-config dbus-devel dbus-glib gobject-introspection-devel cairo-devel unzip llvm wabt python3-pip gcc-c++
sudo dnf install -y java-17-openjdk-headless iproute curl cmake openssl-devel openssl git file pkg-config dbus-devel dbus-glib gobject-introspection-devel cairo-devel unzip llvm python3-pip gcc-c++
# build-essentials
sudo dnf groupinstall -y 'Development Tools'
fi

View File

@ -111,7 +111,7 @@ fi
rustup target add aarch64-linux-android armv7-linux-androideabi i686-linux-android x86_64-linux-android wasm32-unknown-unknown
# install cargo packages
cargo install wasm-bindgen-cli wasm-pack cargo-edit
cargo install wasm-bindgen-cli wasm-pack cargo-edit wasm-tools
# install pip packages
pip3 install --upgrade bumpversion

View File

@ -140,13 +140,13 @@ else
BREW_COMMAND="sudo -H -u $BREW_USER brew"
fi
$BREW_COMMAND install capnp cmake wabt llvm jq
$BREW_COMMAND install capnp cmake llvm jq
# install targets
rustup target add aarch64-apple-darwin aarch64-apple-ios aarch64-apple-ios-sim x86_64-apple-darwin x86_64-apple-ios wasm32-unknown-unknown aarch64-linux-android armv7-linux-androideabi i686-linux-android x86_64-linux-android
# install cargo packages
cargo install wasm-bindgen-cli wasm-pack cargo-edit
cargo install wasm-bindgen-cli wasm-pack cargo-edit wasm-tools
# attempt to install pip packages - this may result in an error, which we will try to catch
pip3 install --upgrade bumpversion || ( \

View File

@ -1,3 +1,6 @@
[build]
rustflags = ["--cfg", "tokio_unstable"]
[target.aarch64-linux-android]
linker = "/Android/Sdk/ndk/27.0.12077973/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android34-clang"
[target.armv7-linux-androideabi]

View File

@ -1,2 +0,0 @@
[target.aarch64-unknown-linux-gnu]
linker = "aarch64-linux-gnu-gcc"

View File

@ -32,7 +32,6 @@ struct AddressFilterInner {
dial_info_failures: BTreeMap<DialInfo, Timestamp>,
}
#[derive(Debug)]
pub(crate) struct AddressFilter {
registry: VeilidComponentRegistry,
inner: Mutex<AddressFilterInner>,
@ -44,6 +43,33 @@ pub(crate) struct AddressFilter {
dial_info_failure_duration_min: usize,
}
impl fmt::Debug for AddressFilter {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("AddressFilter")
//.field("registry", &self.registry)
.field("inner", &self.inner)
.field("max_connections_per_ip4", &self.max_connections_per_ip4)
.field(
"max_connections_per_ip6_prefix",
&self.max_connections_per_ip6_prefix,
)
.field(
"max_connections_per_ip6_prefix_size",
&self.max_connections_per_ip6_prefix_size,
)
.field(
"max_connection_frequency_per_min",
&self.max_connection_frequency_per_min,
)
.field("punishment_duration_min", &self.punishment_duration_min)
.field(
"dial_info_failure_duration_min",
&self.dial_info_failure_duration_min,
)
.finish()
}
}
impl_veilid_component_registry_accessor!(AddressFilter);
impl AddressFilter {

View File

@ -1116,29 +1116,34 @@ impl NetworkManager {
}
};
// Add the node without its peer info
let source_noderef = match routing_table.register_node_with_id(
routing_domain,
envelope.get_sender_typed_id(),
ts,
) {
Ok(v) => v,
Err(e) => {
// If the node couldn't be registered just skip this envelope,
log_net!(debug "failed to register node with existing connection: {}", e);
return Ok(false);
}
};
// Add the sender's node without its peer info
// Gets noderef filtered to the routing domain
let sender_noderef =
match routing_table.register_node_with_id(routing_domain, sender_id, ts) {
Ok(v) => v,
Err(e) => {
// If the node couldn't be registered just skip this envelope,
log_net!(debug "failed to register node with existing connection: {}", e);
return Ok(false);
}
};
// Filter the noderef further by its inbound flow
let sender_noderef = sender_noderef.filtered_clone(
NodeRefFilter::new()
.with_address_type(flow.address_type())
.with_protocol_type(flow.protocol_type()),
);
// Set the envelope version for the peer
source_noderef.add_envelope_version(envelope.get_version());
sender_noderef.add_envelope_version(envelope.get_version());
// Set the last flow for the peer
self.set_last_flow(source_noderef.unfiltered(), flow, ts);
self.set_last_flow(sender_noderef.unfiltered(), flow, ts);
// Pass message to RPC system
if let Err(e) =
rpc.enqueue_direct_message(envelope, source_noderef, flow, routing_domain, body)
rpc.enqueue_direct_message(envelope, sender_noderef, flow, routing_domain, body)
{
// Couldn't enqueue, but not the sender's fault
log_net!(debug "failed to enqueue direct message: {}", e);

View File

@ -114,6 +114,11 @@ impl DiscoveryContext {
async fn request_public_address(&self, node_ref: FilteredNodeRef) -> Option<SocketAddress> {
let rpc = self.rpc_processor();
// Ensure a fresh connection is made so it comes from our public address
// This will only clear the dialinfo filtered flows, as this is a FilteredNodeRef
// filtered down to the protocol/address type we are checking the public address for
node_ref.clear_last_flows();
let res = network_result_value_or_log!(match rpc.rpc_call_status(Destination::direct(node_ref.clone())).await {
Ok(v) => v,
Err(e) => {
@ -301,6 +306,9 @@ impl DiscoveryContext {
redirect: bool,
) -> bool {
// ask the node to send us a dial info validation receipt
// no need to clear_last_flows here, because the dial_info is always returned via the
// send_out_of_band_receipt mechanism, which will always create a new flow
// and the outgoing rpc call is safely able to use existing flows
match self
.rpc_processor()
.rpc_call_validate_dial_info(node_ref.clone(), dial_info, redirect)

View File

@ -198,7 +198,7 @@ impl NetworkManager {
target_node_ref: FilteredNodeRef,
data: Vec<u8>,
) -> EyreResult<NetworkResult<SendDataMethod>> {
// Try to send data to the last socket we've seen this peer on
// Try to send data to the last flow we've seen this peer on
let Some(flow) = target_node_ref.last_flow() else {
return Ok(NetworkResult::no_connection_other(format!(
"Node is not reachable and has no existing connection: {}",
@ -250,7 +250,7 @@ impl NetworkManager {
.sequencing_filtered(Sequencing::NoPreference)
};
// First try to send data to the last socket we've seen this peer on
// First try to send data to the last flow we've seen this peer on
let data = if let Some(flow) = seq_target_node_ref.last_flow() {
match self.net().send_data_to_existing_flow(flow, data).await? {
SendDataToExistingFlowResult::Sent(unique_flow) => {
@ -299,7 +299,7 @@ impl NetworkManager {
target_node_ref: FilteredNodeRef,
data: Vec<u8>,
) -> EyreResult<NetworkResult<SendDataMethod>> {
// First try to send data to the last socket we've seen this peer on
// First try to send data to the last flow we've seen this peer on
let data = if let Some(flow) = target_node_ref.last_flow() {
match self.net().send_data_to_existing_flow(flow, data).await? {
SendDataToExistingFlowResult::Sent(unique_flow) => {
@ -351,7 +351,7 @@ impl NetworkManager {
// Since we have the best dial info already, we can find a connection to use by protocol type
let node_ref = node_ref.filtered_clone(NodeRefFilter::from(dial_info.make_filter()));
// First try to send data to the last socket we've seen this peer on
// First try to send data to the last flow we've seen this peer on
let data = if let Some(flow) = node_ref.last_flow() {
#[cfg(feature = "verbose-tracing")]
log_net!(debug

View File

@ -600,8 +600,16 @@ impl BucketEntryInner {
}
// Clears the table of last flows to ensure we create new ones and drop any existing ones
pub(super) fn clear_last_flows(&mut self) {
self.last_flows.clear();
// With a DialInfo::all filter specified, only clear the flows that match the filter
pub(super) fn clear_last_flows(&mut self, dial_info_filter: DialInfoFilter) {
if dial_info_filter != DialInfoFilter::all() {
self.last_flows.retain(|k, _v| {
!(dial_info_filter.protocol_type_set.contains(k.0)
&& dial_info_filter.address_type_set.contains(k.1))
})
} else {
self.last_flows.clear();
}
}
// Clears the table of last flows except the most recent one
@ -751,7 +759,7 @@ impl BucketEntryInner {
pub fn set_punished(&mut self, punished: Option<PunishmentReason>) {
self.punishment = punished;
if punished.is_some() {
self.clear_last_flows();
self.clear_last_flows(DialInfoFilter::all());
}
}

View File

@ -228,7 +228,7 @@ pub(crate) trait NodeRefCommonTrait: NodeRefAccessorsTrait + NodeRefOperateTrait
}
fn clear_last_flows(&self) {
self.operate_mut(|_rti, e| e.clear_last_flows())
self.operate_mut(|_rti, e| e.clear_last_flows(self.dial_info_filter()))
}
fn set_last_flow(&self, flow: Flow, ts: Timestamp) {

View File

@ -341,7 +341,7 @@ impl RoutingTableInner {
for bucket in &self.buckets[&ck] {
for entry in bucket.entries() {
entry.1.with_mut_inner(|e| {
e.clear_last_flows();
e.clear_last_flows(DialInfoFilter::all());
});
}
}

View File

@ -2,10 +2,10 @@ use super::*;
/// Keepalive pings are done occasionally to ensure holepunched public dialinfo
/// remains valid, as well as to make sure we remain in any relay node's routing table
const RELAY_KEEPALIVE_PING_INTERVAL_SECS: u32 = 1;
const RELAY_KEEPALIVE_PING_INTERVAL_SECS: u32 = 10;
/// Keepalive pings are done for active watch nodes to make sure they are still there
const ACTIVE_WATCH_KEEPALIVE_PING_INTERVAL_SECS: u32 = 1;
const ACTIVE_WATCH_KEEPALIVE_PING_INTERVAL_SECS: u32 = 10;
/// Ping queue processing depth per validator
const MAX_PARALLEL_PINGS: usize = 8;

View File

@ -478,35 +478,9 @@ impl RPCProcessor {
}
};
// Reply directly to the request's source
let sender_node_id = detail.envelope.get_sender_typed_id();
// This may be a different node's reference than the 'sender' in the case of a relay
let peer_noderef = detail.peer_noderef.clone();
// If the sender_id is that of the peer, then this is a direct reply
// else it is a relayed reply through the peer
if peer_noderef.node_ids().contains(&sender_node_id) {
NetworkResult::value(Destination::direct(peer_noderef))
} else {
// Look up the sender node, we should have added it via senderNodeInfo before getting here.
let res = match self.routing_table().lookup_node_ref(sender_node_id) {
Ok(v) => v,
Err(e) => {
return NetworkResult::invalid_message(format!(
"failed to look up node info for respond to: {}",
e
))
}
};
if let Some(sender_noderef) = res {
NetworkResult::value(Destination::relay(peer_noderef, sender_noderef))
} else {
NetworkResult::invalid_message(
"not responding to sender that has no node info",
)
}
}
// Get the filtered noderef of the sender
let sender_noderef = detail.sender_noderef.clone();
NetworkResult::value(Destination::direct(sender_noderef))
}
RespondTo::PrivateRoute(pr) => {
match &request.header.detail {

View File

@ -4,11 +4,11 @@ use super::*;
pub(in crate::rpc_processor) struct RPCMessageHeaderDetailDirect {
/// The decoded header of the envelope
pub envelope: Envelope,
/// The noderef of the peer that sent the message (not the original sender).
/// The noderef of the original peer that sent the message (not the relay if it is relayed)
/// Ensures node doesn't get evicted from routing table until we're done with it
/// Should be filtered to the routing domain of the peer that we received from
pub peer_noderef: FilteredNodeRef,
/// The flow from the peer sent the message (not the original sender)
pub sender_noderef: FilteredNodeRef,
/// The flow from the peer sent the message (possibly a relay)
pub flow: Flow,
/// The routing domain of the peer that we received from
pub routing_domain: RoutingDomain,
@ -65,13 +65,6 @@ impl MessageHeader {
RPCMessageHeaderDetail::PrivateRouted(p) => p.direct.envelope.get_crypto_kind(),
}
}
// pub fn direct_peer_noderef(&self) -> NodeRef {
// match &self.detail {
// RPCMessageHeaderDetail::Direct(d) => d.peer_noderef.clone(),
// RPCMessageHeaderDetail::SafetyRouted(s) => s.direct.peer_noderef.clone(),
// RPCMessageHeaderDetail::PrivateRouted(p) => p.direct.peer_noderef.clone(),
// }
// }
pub fn routing_domain(&self) -> RoutingDomain {
match &self.detail {
RPCMessageHeaderDetail::Direct(d) => d.routing_domain,

View File

@ -1588,7 +1588,7 @@ impl RPCProcessor {
pub fn enqueue_direct_message(
&self,
envelope: Envelope,
peer_noderef: FilteredNodeRef,
sender_noderef: FilteredNodeRef,
flow: Flow,
routing_domain: RoutingDomain,
body: Vec<u8>,
@ -1599,14 +1599,14 @@ impl RPCProcessor {
.enter()
.wrap_err("not started up")?;
if peer_noderef.routing_domain_set() != routing_domain {
if sender_noderef.routing_domain_set() != routing_domain {
bail!("routing domain should match peer noderef filter");
}
let header = MessageHeader {
detail: RPCMessageHeaderDetail::Direct(RPCMessageHeaderDetailDirect {
envelope,
peer_noderef,
sender_noderef,
flow,
routing_domain,
}),

View File

@ -45,7 +45,7 @@ impl RPCProcessor {
let res = match msg.header.detail {
RPCMessageHeaderDetail::Direct(detail) => {
network_manager
.handle_in_band_receipt(receipt, detail.peer_noderef)
.handle_in_band_receipt(receipt, detail.sender_noderef)
.await
}
RPCMessageHeaderDetail::SafetyRouted(_) => {

View File

@ -164,7 +164,7 @@ impl RPCProcessor {
pr_pubkey: TypedKey,
) -> RPCNetworkResult<()> {
// Get sender id of the peer with the crypto kind of the route
let Some(sender_id) = detail.peer_noderef.node_ids().get(pr_pubkey.kind) else {
let Some(sender_id) = detail.sender_noderef.node_ids().get(pr_pubkey.kind) else {
return Ok(NetworkResult::invalid_message(
"route node doesnt have a required crypto kind for routed operation",
));

File diff suppressed because it is too large Load Diff

View File

@ -46,6 +46,8 @@ rt-tokio = [
tracking = ["veilid-core/tracking"]
debug-json-api = []
debug-locks = ["veilid-core/debug-locks"]
perfetto = ["tracing-perfetto"]
flame = ["tracing-flame"]
geolocation = ["veilid-core/geolocation"]
@ -90,7 +92,7 @@ hostname = "^0"
stop-token = { version = "^0", default-features = false }
sysinfo = { version = "^0.30.13", default-features = false }
wg = { version = "^0.9.1", features = ["future"] }
tracing-flame = "0.2.0"
tracing-flame = { version = "0.2.0", optional = true }
time = { version = "0.3.36", features = ["local-offset"] }
chrono = "0.4.38"
@ -103,7 +105,7 @@ daemonize = "^0.5.0"
signal-hook = "^0.3.17"
signal-hook-async-std = "^0.2.2"
nix = "^0.29.0"
tracing-perfetto = "0.1.1"
tracing-perfetto = { version = "0.1.5", optional = true }
[target.'cfg(target_os = "linux")'.dependencies]
tracing-journald = "^0.3.0"

View File

@ -245,6 +245,7 @@ fn main() -> EyreResult<()> {
NamedSocketAddrs::from_str(&otlp).wrap_err("failed to parse OTLP address")?;
settingsrw.logging.otlp.level = LogLevel::Trace;
}
#[cfg(feature = "flame")]
if let Some(flame) = args.flame {
let flame = if flame.is_empty() {
Settings::get_default_flame_path(
@ -260,7 +261,7 @@ fn main() -> EyreResult<()> {
settingsrw.logging.flame.enabled = true;
settingsrw.logging.flame.path = flame;
}
#[cfg(unix)]
#[cfg(all(unix, feature = "perfetto"))]
if let Some(perfetto) = args.perfetto {
let perfetto = if perfetto.is_empty() {
Settings::get_default_perfetto_path(

View File

@ -204,20 +204,20 @@ core:
tcp:
connect: true
listen: true
max_connections: 32
max_connections: 256
listen_address: ':5150'
#'public_address: ''
ws:
connect: true
listen: true
max_connections: 32
max_connections: 256
listen_address: ':5150'
path: 'ws'
# url: 'ws://localhost:5150/ws'
wss:
connect: true
listen: false
max_connections: 32
max_connections: 256
listen_address: ':5150'
path: 'ws'
# url: ''
@ -501,12 +501,14 @@ pub struct Terminal {
pub ignore_log_targets: Vec<String>,
}
#[cfg(feature = "flame")]
#[derive(Debug, Deserialize, Serialize)]
pub struct Flame {
pub enabled: bool,
pub path: String,
}
#[cfg(all(unix, feature = "perfetto"))]
#[derive(Debug, Deserialize, Serialize)]
pub struct Perfetto {
pub enabled: bool,
@ -541,6 +543,7 @@ pub struct Api {
pub ignore_log_targets: Vec<String>,
}
#[cfg(feature = "opentelemetry-otlp")]
#[derive(Debug, Deserialize, Serialize)]
pub struct Otlp {
pub enabled: bool,
@ -563,9 +566,13 @@ pub struct Logging {
pub terminal: Terminal,
pub file: File,
pub api: Api,
#[cfg(feature = "opentelemetry-otlp")]
pub otlp: Otlp,
#[cfg(feature = "flame")]
pub flame: Flame,
#[cfg(all(unix, feature = "perfetto"))]
pub perfetto: Perfetto,
#[cfg(feature = "rt-tokio")]
pub console: Console,
}
@ -904,6 +911,7 @@ impl Settings {
}
/// Determine default flamegraph output path
#[cfg(feature = "flame")]
pub fn get_default_flame_path(subnode_index: u16, subnode_count: u16) -> PathBuf {
let name = if subnode_count == 1 {
if subnode_index == 0 {
@ -922,7 +930,7 @@ impl Settings {
}
/// Determine default perfetto output path
#[cfg(unix)]
#[cfg(all(unix, feature = "perfetto"))]
pub fn get_default_perfetto_path(subnode_index: u16, subnode_count: u16) -> PathBuf {
let name = if subnode_count == 1 {
if subnode_index == 0 {
@ -1075,14 +1083,24 @@ impl Settings {
set_config_value!(inner.logging.api.enabled, value);
set_config_value!(inner.logging.api.level, value);
set_config_value!(inner.logging.api.ignore_log_targets, value);
set_config_value!(inner.logging.otlp.enabled, value);
set_config_value!(inner.logging.otlp.level, value);
set_config_value!(inner.logging.otlp.grpc_endpoint, value);
set_config_value!(inner.logging.otlp.ignore_log_targets, value);
set_config_value!(inner.logging.flame.enabled, value);
set_config_value!(inner.logging.flame.path, value);
set_config_value!(inner.logging.perfetto.enabled, value);
set_config_value!(inner.logging.perfetto.path, value);
#[cfg(feature = "opentelemetry-otlp")]
{
set_config_value!(inner.logging.otlp.enabled, value);
set_config_value!(inner.logging.otlp.level, value);
set_config_value!(inner.logging.otlp.grpc_endpoint, value);
set_config_value!(inner.logging.otlp.ignore_log_targets, value);
}
#[cfg(feature = "flame")]
{
set_config_value!(inner.logging.flame.enabled, value);
set_config_value!(inner.logging.flame.path, value);
}
#[cfg(all(unix, feature = "perfetto"))]
{
set_config_value!(inner.logging.perfetto.enabled, value);
set_config_value!(inner.logging.perfetto.path, value);
}
#[cfg(feature = "rt-tokio")]
set_config_value!(inner.logging.console.enabled, value);
set_config_value!(inner.testing.subnode_index, value);
#[cfg(feature = "virtual-network")]
@ -1730,10 +1748,16 @@ mod tests {
s.logging.otlp.grpc_endpoint,
NamedSocketAddrs::from_str("localhost:4317").unwrap()
);
assert!(!s.logging.flame.enabled);
assert_eq!(s.logging.flame.path, "");
assert!(!s.logging.perfetto.enabled);
assert_eq!(s.logging.perfetto.path, "");
#[cfg(feature = "flame")]
{
assert!(!s.logging.flame.enabled);
assert_eq!(s.logging.flame.path, "");
}
#[cfg(all(unix, feature = "perfetto"))]
{
assert!(!s.logging.perfetto.enabled);
assert_eq!(s.logging.perfetto.path, "");
}
assert!(!s.logging.console.enabled);
assert_eq!(s.testing.subnode_index, 0);
#[cfg(feature = "virtual-network")]
@ -1885,7 +1909,7 @@ mod tests {
//
assert!(s.core.network.protocol.tcp.connect);
assert!(s.core.network.protocol.tcp.listen);
assert_eq!(s.core.network.protocol.tcp.max_connections, 32);
assert_eq!(s.core.network.protocol.tcp.max_connections, 256);
assert_eq!(s.core.network.protocol.tcp.listen_address.name, ":5150");
for addr in &s.core.network.protocol.tcp.listen_address.addrs {
assert!(valid_socket_addrs.contains(addr));
@ -1896,7 +1920,7 @@ mod tests {
//
assert!(s.core.network.protocol.ws.connect);
assert!(s.core.network.protocol.ws.listen);
assert_eq!(s.core.network.protocol.ws.max_connections, 32);
assert_eq!(s.core.network.protocol.ws.max_connections, 256);
assert_eq!(s.core.network.protocol.ws.listen_address.name, ":5150");
for addr in &s.core.network.protocol.ws.listen_address.addrs {
assert!(valid_socket_addrs.contains(addr));
@ -1910,7 +1934,7 @@ mod tests {
//
assert!(s.core.network.protocol.wss.connect);
assert!(!s.core.network.protocol.wss.listen);
assert_eq!(s.core.network.protocol.wss.max_connections, 32);
assert_eq!(s.core.network.protocol.wss.max_connections, 256);
assert_eq!(s.core.network.protocol.wss.listen_address.name, ":5150");
for addr in &s.core.network.protocol.wss.listen_address.addrs {
assert!(valid_socket_addrs.contains(addr));

View File

@ -16,14 +16,16 @@ use std::collections::BTreeMap;
use std::path::*;
use std::sync::Arc;
use tracing_appender::*;
#[cfg(feature = "flame")]
use tracing_flame::FlameLayer;
#[cfg(unix)]
#[cfg(all(unix, feature = "perfetto"))]
use tracing_perfetto::PerfettoLayer;
use tracing_subscriber::prelude::*;
use tracing_subscriber::*;
struct VeilidLogsInner {
_file_guard: Option<non_blocking::WorkerGuard>,
#[cfg(feature = "flame")]
_flame_guard: Option<tracing_flame::FlushGuard<std::io::BufWriter<std::fs::File>>>,
filters: BTreeMap<&'static str, veilid_core::VeilidLayerFilter>,
}
@ -91,7 +93,9 @@ impl VeilidLogs {
}
// Flamegraph logger
#[cfg(feature = "flame")]
let mut flame_guard = None;
#[cfg(feature = "flame")]
if settingsr.logging.flame.enabled {
let filter = veilid_core::VeilidLayerFilter::new_no_default(
veilid_core::VeilidConfigLogLevel::Trace,
@ -114,7 +118,7 @@ impl VeilidLogs {
}
// Perfetto logger
#[cfg(unix)]
#[cfg(all(unix, feature = "perfetto"))]
if settingsr.logging.perfetto.enabled {
let filter = veilid_core::VeilidLayerFilter::new_no_default(
veilid_core::VeilidConfigLogLevel::Trace,
@ -259,6 +263,7 @@ impl VeilidLogs {
Ok(VeilidLogs {
inner: Arc::new(Mutex::new(VeilidLogsInner {
_file_guard: file_guard,
#[cfg(feature = "flame")]
_flame_guard: flame_guard,
filters,
})),

View File

@ -26,19 +26,11 @@ if [[ "$1" == "release" ]]; then
OUTPUTDIR=$SCRIPTDIR/../target/wasm32-unknown-unknown/release/pkg
INPUTDIR=$SCRIPTDIR/../target/wasm32-unknown-unknown/release
# Path to, but not including, the cargo workspace ("veilid")
WORKSPACE_PARENT=$(dirname $(dirname $(cargo locate-project --workspace --message-format=plain)))
# Do not include said path in wasm blob output
RUSTFLAGS="--remap-path-prefix=$WORKSPACE_PARENT=/home/user $RUSTFLAGS"
# Do not include user home directory in wasm blob output
RUSTFLAGS="--remap-path-prefix=$HOME=/home/user $RUSTFLAGS"
# Explicitly mark RUSTFLAGS as an environment variable, so it's passed to cargo
export RUSTFLAGS
cargo build --target wasm32-unknown-unknown --release
./wasm_remap_paths.sh cargo build --target wasm32-unknown-unknown --release
mkdir -p $OUTPUTDIR
wasm-bindgen --out-dir $OUTPUTDIR --target web --weak-refs $INPUTDIR/veilid_wasm.wasm
wasm-strip $OUTPUTDIR/veilid_wasm_bg.wasm
wasm-tools strip $OUTPUTDIR/veilid_wasm_bg.wasm -o $OUTPUTDIR/veilid_wasm_bg.wasm.stripped
mv $OUTPUTDIR/veilid_wasm_bg.wasm.stripped $OUTPUTDIR/veilid_wasm_bg.wasm
else
OUTPUTDIR=$SCRIPTDIR/../target/wasm32-unknown-unknown/debug/pkg
INPUTDIR=$SCRIPTDIR/../target/wasm32-unknown-unknown/debug
@ -54,7 +46,7 @@ else
# wasm-strip $OUTPUTDIR/veilid_wasm_bg.wasm
fi
popd &> /dev/null
popd &> /dev/null
# Print for use with scripts
echo SUCCESS:OUTPUTDIR=$(get_abs_filename $OUTPUTDIR)

16
veilid-wasm/wasm_remap_paths.sh Executable file
View File

@ -0,0 +1,16 @@
#!/bin/bash
set -eo pipefail
# Path to, but not including, the cargo workspace ("veilid")
WORKSPACE_PARENT=$(dirname $(dirname $(cargo locate-project --workspace --message-format=plain))); \
# Do not include said path in wasm blob output
RUSTFLAGS="--remap-path-prefix=$WORKSPACE_PARENT=/home/user $RUSTFLAGS"; \
# Do not include user home directory in wasm blob output
RUSTFLAGS="--remap-path-prefix=$HOME=/home/user $RUSTFLAGS"; \
# Explicitly mark RUSTFLAGS as an environment variable, so it's passed to cargo
export RUSTFLAGS
# Run the rest of the command line
$@