mirror of
https://gitlab.com/veilid/veilid.git
synced 2025-07-28 09:24:12 -04:00
Improve relayed connections and earthfile
This commit is contained in:
parent
54867ec53c
commit
52b7d0b563
27 changed files with 288 additions and 3322 deletions
|
@ -2,6 +2,7 @@ variables:
|
||||||
NO_DOCKER: 1
|
NO_DOCKER: 1
|
||||||
FORCE_COLOR: 1
|
FORCE_COLOR: 1
|
||||||
EARTHLY_EXEC_CMD: "/bin/sh"
|
EARTHLY_EXEC_CMD: "/bin/sh"
|
||||||
|
EARTHLY_DISABLE_REMOTE_REGISTRY_PROXY: 1
|
||||||
GIT_SUBMODULE_STRATEGY: normal
|
GIT_SUBMODULE_STRATEGY: normal
|
||||||
|
|
||||||
stages:
|
stages:
|
||||||
|
@ -35,7 +36,7 @@ format:
|
||||||
|
|
||||||
.earthly: &earthly_setup
|
.earthly: &earthly_setup
|
||||||
- apk update && apk add git
|
- apk update && apk add git
|
||||||
- wget https://github.com/earthly/earthly/releases/download/v0.7.15/earthly-linux-amd64 -O /usr/local/bin/earthly
|
- wget https://github.com/earthly/earthly/releases/download/v0.8.15/earthly-linux-amd64 -O /usr/local/bin/earthly
|
||||||
- chmod +x /usr/local/bin/earthly
|
- chmod +x /usr/local/bin/earthly
|
||||||
- earthly bootstrap
|
- earthly bootstrap
|
||||||
- echo $CI_REGISTRY_PASSWORD | docker login -u $CI_REGISTRY_USER --password-stdin $CI_REGISTRY
|
- echo $CI_REGISTRY_PASSWORD | docker login -u $CI_REGISTRY_USER --password-stdin $CI_REGISTRY
|
||||||
|
|
157
Earthfile
157
Earthfile
|
@ -1,4 +1,4 @@
|
||||||
VERSION 0.7
|
VERSION 0.8
|
||||||
|
|
||||||
########################################################################################################################
|
########################################################################################################################
|
||||||
## ARGUMENTS
|
## ARGUMENTS
|
||||||
|
@ -14,7 +14,6 @@ VERSION 0.7
|
||||||
# Start with older Ubuntu to ensure GLIBC symbol versioning support for older linux
|
# Start with older Ubuntu to ensure GLIBC symbol versioning support for older linux
|
||||||
# Ensure we are using an amd64 platform because some of these targets use cross-platform tooling
|
# Ensure we are using an amd64 platform because some of these targets use cross-platform tooling
|
||||||
FROM ubuntu:18.04
|
FROM ubuntu:18.04
|
||||||
|
|
||||||
ENV ZIG_VERSION=0.13.0
|
ENV ZIG_VERSION=0.13.0
|
||||||
ENV CMAKE_VERSION_MINOR=3.30
|
ENV CMAKE_VERSION_MINOR=3.30
|
||||||
ENV CMAKE_VERSION_PATCH=3.30.1
|
ENV CMAKE_VERSION_PATCH=3.30.1
|
||||||
|
@ -26,14 +25,40 @@ ENV CARGO_HOME=/usr/local/cargo
|
||||||
ENV PATH=$PATH:/usr/local/cargo/bin:/usr/local/zig
|
ENV PATH=$PATH:/usr/local/cargo/bin:/usr/local/zig
|
||||||
ENV LD_LIBRARY_PATH=/usr/local/lib
|
ENV LD_LIBRARY_PATH=/usr/local/lib
|
||||||
ENV RUST_BACKTRACE=1
|
ENV RUST_BACKTRACE=1
|
||||||
|
ENV RETRY_COUNT=12
|
||||||
|
|
||||||
WORKDIR /veilid
|
WORKDIR /veilid
|
||||||
|
|
||||||
|
IF [ $(arch) = "x86_64" ]
|
||||||
|
ENV DEFAULT_CARGO_TARGET = "x86_64-unknown-linux-gnu"
|
||||||
|
ELSE IF [ $(arch) = "aarch64" ]
|
||||||
|
ENV DEFAULT_CARGO_TARGET = "aarch64-unknown-linux-gnu"
|
||||||
|
ELSE
|
||||||
|
RUN echo "Unsupported host platform"
|
||||||
|
RUN false
|
||||||
|
END
|
||||||
|
|
||||||
# Install build prerequisites & setup required directories
|
# Install build prerequisites & setup required directories
|
||||||
deps-base:
|
deps-base:
|
||||||
|
RUN echo '\
|
||||||
|
Acquire::Retries "'$RETRY_COUNT'";\
|
||||||
|
Acquire::https::Timeout "240";\
|
||||||
|
Acquire::http::Timeout "240";\
|
||||||
|
APT::Get::Assume-Yes "true";\
|
||||||
|
APT::Install-Recommends "false";\
|
||||||
|
APT::Install-Suggests "false";\
|
||||||
|
Debug::Acquire::https "true";\
|
||||||
|
' > /etc/apt/apt.conf.d/99custom
|
||||||
RUN apt-get -y update
|
RUN apt-get -y update
|
||||||
RUN apt-get install -y iproute2 curl build-essential libssl-dev openssl file git pkg-config libdbus-1-dev libdbus-glib-1-dev libgirepository1.0-dev libcairo2-dev checkinstall unzip libncursesw5-dev libncurses5-dev
|
RUN apt-get install -y ca-certificates iproute2 curl build-essential libssl-dev openssl file git pkg-config libdbus-1-dev libdbus-glib-1-dev libgirepository1.0-dev libcairo2-dev checkinstall unzip libncursesw5-dev libncurses5-dev
|
||||||
RUN curl -O https://cmake.org/files/v$CMAKE_VERSION_MINOR/cmake-$CMAKE_VERSION_PATCH-linux-$(arch).sh
|
IF [ $(arch) = "x86_64" ]
|
||||||
|
RUN apt-get install -y gcc-aarch64-linux-gnu
|
||||||
|
ELSE IF [ $(arch) = "aarch64" ]
|
||||||
|
RUN apt-get install -y gcc-x86-64-linux-gnu
|
||||||
|
ELSE
|
||||||
|
RUN apt-get install -y gcc-aarch64-linux-gnu gcc-x86-64-linux-gnu
|
||||||
|
END
|
||||||
|
RUN curl --retry $RETRY_COUNT --retry-connrefused -O https://cmake.org/files/v$CMAKE_VERSION_MINOR/cmake-$CMAKE_VERSION_PATCH-linux-$(arch).sh
|
||||||
RUN mkdir /opt/cmake
|
RUN mkdir /opt/cmake
|
||||||
RUN sh cmake-$CMAKE_VERSION_PATCH-linux-$(arch).sh --skip-license --prefix=/opt/cmake
|
RUN sh cmake-$CMAKE_VERSION_PATCH-linux-$(arch).sh --skip-license --prefix=/opt/cmake
|
||||||
RUN ln -s /opt/cmake/bin/cmake /usr/local/bin/cmake
|
RUN ln -s /opt/cmake/bin/cmake /usr/local/bin/cmake
|
||||||
|
@ -41,27 +66,34 @@ deps-base:
|
||||||
# Install Rust
|
# Install Rust
|
||||||
deps-rust:
|
deps-rust:
|
||||||
FROM +deps-base
|
FROM +deps-base
|
||||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --default-toolchain=$RUST_VERSION -y -c clippy --no-modify-path --profile minimal
|
RUN curl --retry $RETRY_COUNT --retry-connrefused --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --default-toolchain=$RUST_VERSION -y -c clippy --no-modify-path --profile minimal
|
||||||
RUN chmod -R a+w $RUSTUP_HOME $CARGO_HOME; \
|
RUN chmod -R a+w $RUSTUP_HOME $CARGO_HOME; \
|
||||||
rustup --version; \
|
rustup --version; \
|
||||||
cargo --version; \
|
cargo --version; \
|
||||||
rustc --version;
|
rustc --version;
|
||||||
|
RUN retry=0; until [ "$retry" -ge $RETRY_COUNT ]; do \
|
||||||
|
rustup target add \
|
||||||
# Linux
|
# Linux
|
||||||
RUN rustup target add x86_64-unknown-linux-gnu
|
x86_64-unknown-linux-gnu \
|
||||||
RUN rustup target add aarch64-unknown-linux-gnu
|
aarch64-unknown-linux-gnu \
|
||||||
# Android
|
# Android
|
||||||
RUN rustup target add aarch64-linux-android
|
aarch64-linux-android \
|
||||||
RUN rustup target add armv7-linux-androideabi
|
armv7-linux-androideabi \
|
||||||
RUN rustup target add i686-linux-android
|
i686-linux-android \
|
||||||
RUN rustup target add x86_64-linux-android
|
x86_64-linux-android \
|
||||||
# WASM
|
# WASM
|
||||||
RUN rustup target add wasm32-unknown-unknown
|
wasm32-unknown-unknown \
|
||||||
RUN cargo install wasm-pack
|
&& break; \
|
||||||
RUN cargo install -f wasm-bindgen-cli --version $WASM_BINDGEN_CLI_VERSION
|
retry=$((retry+1)); \
|
||||||
|
echo "retry #$retry..."; \
|
||||||
|
sleep 10; \
|
||||||
|
done
|
||||||
|
RUN cargo install wasm-pack wasm-tools --locked
|
||||||
|
RUN cargo install -f wasm-bindgen-cli --locked --version $WASM_BINDGEN_CLI_VERSION
|
||||||
# Caching tool
|
# Caching tool
|
||||||
RUN cargo install cargo-chef
|
RUN cargo install cargo-chef --locked
|
||||||
# Install Linux cross-platform tooling
|
# Install Linux cross-platform tooling
|
||||||
RUN curl -O https://ziglang.org/download/$ZIG_VERSION/zig-linux-$(arch)-$ZIG_VERSION.tar.xz
|
RUN curl --retry $RETRY_COUNT --retry-connrefused -O https://ziglang.org/download/$ZIG_VERSION/zig-linux-$(arch)-$ZIG_VERSION.tar.xz
|
||||||
RUN tar -C /usr/local -xJf zig-linux-$(arch)-$ZIG_VERSION.tar.xz
|
RUN tar -C /usr/local -xJf zig-linux-$(arch)-$ZIG_VERSION.tar.xz
|
||||||
RUN mv /usr/local/zig-linux-$(arch)-$ZIG_VERSION /usr/local/zig
|
RUN mv /usr/local/zig-linux-$(arch)-$ZIG_VERSION /usr/local/zig
|
||||||
RUN cargo install cargo-zigbuild
|
RUN cargo install cargo-zigbuild
|
||||||
|
@ -73,14 +105,16 @@ deps-rust:
|
||||||
# Install android tooling
|
# Install android tooling
|
||||||
deps-android:
|
deps-android:
|
||||||
FROM +deps-base
|
FROM +deps-base
|
||||||
|
WAIT
|
||||||
BUILD +deps-rust
|
BUILD +deps-rust
|
||||||
|
END
|
||||||
COPY +deps-rust/cargo /usr/local/cargo
|
COPY +deps-rust/cargo /usr/local/cargo
|
||||||
COPY +deps-rust/rustup /usr/local/rustup
|
COPY +deps-rust/rustup /usr/local/rustup
|
||||||
COPY +deps-rust/cargo-zigbuild /usr/local/cargo/bin/cargo-zigbuild
|
COPY +deps-rust/cargo-zigbuild /usr/local/cargo/bin/cargo-zigbuild
|
||||||
COPY +deps-rust/zig /usr/local/zig
|
COPY +deps-rust/zig /usr/local/zig
|
||||||
RUN apt-get install -y openjdk-9-jdk-headless
|
RUN apt-get install -y openjdk-9-jdk-headless
|
||||||
RUN mkdir /Android; mkdir /Android/Sdk
|
RUN mkdir /Android; mkdir /Android/Sdk
|
||||||
RUN curl -o /Android/cmdline-tools.zip https://dl.google.com/android/repository/commandlinetools-linux-9123335_latest.zip
|
RUN curl --retry $RETRY_COUNT --retry-connrefused -o /Android/cmdline-tools.zip https://dl.google.com/android/repository/commandlinetools-linux-9123335_latest.zip
|
||||||
RUN cd /Android; unzip /Android/cmdline-tools.zip
|
RUN cd /Android; unzip /Android/cmdline-tools.zip
|
||||||
RUN yes | /Android/cmdline-tools/bin/sdkmanager --sdk_root=/Android/Sdk build-tools\;34.0.0 ndk\;27.0.12077973 cmake\;3.22.1 platform-tools platforms\;android-34 cmdline-tools\;latest
|
RUN yes | /Android/cmdline-tools/bin/sdkmanager --sdk_root=/Android/Sdk build-tools\;34.0.0 ndk\;27.0.12077973 cmake\;3.22.1 platform-tools platforms\;android-34 cmdline-tools\;latest
|
||||||
RUN rm -rf /Android/cmdline-tools
|
RUN rm -rf /Android/cmdline-tools
|
||||||
|
@ -89,30 +123,34 @@ deps-android:
|
||||||
# Just linux build not android
|
# Just linux build not android
|
||||||
deps-linux:
|
deps-linux:
|
||||||
FROM +deps-base
|
FROM +deps-base
|
||||||
|
WAIT
|
||||||
BUILD +deps-rust
|
BUILD +deps-rust
|
||||||
|
END
|
||||||
COPY +deps-rust/cargo /usr/local/cargo
|
COPY +deps-rust/cargo /usr/local/cargo
|
||||||
COPY +deps-rust/rustup /usr/local/rustup
|
COPY +deps-rust/rustup /usr/local/rustup
|
||||||
COPY +deps-rust/cargo-zigbuild /usr/local/cargo/bin/cargo-zigbuild
|
COPY +deps-rust/cargo-zigbuild /usr/local/cargo/bin/cargo-zigbuild
|
||||||
COPY +deps-rust/zig /usr/local/zig
|
COPY +deps-rust/zig /usr/local/zig
|
||||||
|
|
||||||
|
# Make a cache image with downloaded and built dependencies
|
||||||
build-linux-cache:
|
build-linux-cache:
|
||||||
FROM +deps-linux
|
FROM +deps-linux
|
||||||
RUN mkdir veilid-cli veilid-core veilid-server veilid-tools veilid-wasm veilid-flutter veilid-flutter/rust
|
RUN mkdir veilid-cli veilid-core veilid-server veilid-tools veilid-wasm veilid-flutter veilid-flutter/rust
|
||||||
COPY --dir .cargo scripts Cargo.lock Cargo.toml .
|
COPY --keep-ts --dir .cargo scripts Cargo.lock Cargo.toml .
|
||||||
COPY veilid-cli/Cargo.toml veilid-cli
|
COPY --keep-ts veilid-cli/Cargo.toml veilid-cli
|
||||||
COPY veilid-core/Cargo.toml veilid-core
|
COPY --keep-ts veilid-core/Cargo.toml veilid-core
|
||||||
COPY veilid-server/Cargo.toml veilid-server
|
COPY --keep-ts veilid-server/Cargo.toml veilid-server
|
||||||
COPY veilid-tools/Cargo.toml veilid-tools
|
COPY --keep-ts veilid-tools/Cargo.toml veilid-tools
|
||||||
COPY veilid-flutter/rust/Cargo.lock veilid-flutter/rust/Cargo.toml veilid-flutter/rust
|
COPY --keep-ts veilid-flutter/rust/Cargo.toml veilid-flutter/rust
|
||||||
COPY veilid-wasm/Cargo.toml veilid-wasm
|
COPY --keep-ts veilid-wasm/Cargo.toml veilid-wasm/wasm_remap_paths.sh veilid-wasm
|
||||||
RUN cat /veilid/scripts/earthly/cargo-linux/config.toml >> .cargo/config.toml
|
|
||||||
RUN cargo chef prepare --recipe-path recipe.json
|
RUN cargo chef prepare --recipe-path recipe.json
|
||||||
RUN cargo chef cook --recipe-path recipe.json
|
RUN cargo chef cook --profile=test --tests --target $DEFAULT_CARGO_TARGET --recipe-path recipe.json -p veilid-server -p veilid-cli -p veilid-tools -p veilid-core
|
||||||
RUN echo $PROJECT_PATH
|
RUN cargo chef cook --zigbuild --release --target x86_64-unknown-linux-gnu --recipe-path recipe.json -p veilid-server -p veilid-cli -p veilid-tools -p veilid-core
|
||||||
SAVE ARTIFACT target
|
RUN cargo chef cook --zigbuild --release --target aarch64-unknown-linux-gnu --recipe-path recipe.json -p veilid-server -p veilid-cli -p veilid-tools -p veilid-core
|
||||||
|
RUN veilid-wasm/wasm_remap_paths.sh cargo chef cook --zigbuild --release --target wasm32-unknown-unknown --recipe-path recipe.json -p veilid-wasm
|
||||||
ARG CI_REGISTRY_IMAGE=registry.gitlab.com/veilid/veilid
|
ARG CI_REGISTRY_IMAGE=registry.gitlab.com/veilid/veilid
|
||||||
SAVE IMAGE --push $CI_REGISTRY_IMAGE/build-cache:latest
|
SAVE IMAGE --push $CI_REGISTRY_IMAGE/build-cache:latest
|
||||||
|
|
||||||
|
# Import the whole veilid code repository from the earthly host
|
||||||
code-linux:
|
code-linux:
|
||||||
# This target will either use the full earthly cache of local use (+build-linux-cache), or will use a containerized
|
# This target will either use the full earthly cache of local use (+build-linux-cache), or will use a containerized
|
||||||
# version of the +build-linux-cache from the registry
|
# version of the +build-linux-cache from the registry
|
||||||
|
@ -126,42 +164,31 @@ code-linux:
|
||||||
FROM $CI_REGISTRY_IMAGE/build-cache:latest
|
FROM $CI_REGISTRY_IMAGE/build-cache:latest
|
||||||
# FROM registry.gitlab.com/veilid/build-cache:latest
|
# FROM registry.gitlab.com/veilid/build-cache:latest
|
||||||
END
|
END
|
||||||
COPY --dir .cargo build_docs.sh files scripts veilid-cli veilid-core veilid-server veilid-tools veilid-flutter veilid-wasm Cargo.lock Cargo.toml /veilid
|
COPY --keep-ts --dir .cargo build_docs.sh files scripts veilid-cli veilid-core veilid-server veilid-tools veilid-flutter veilid-wasm Cargo.lock Cargo.toml /veilid
|
||||||
|
|
||||||
# Code + Linux + Android deps
|
# Code + Linux + Android deps
|
||||||
code-android:
|
code-android:
|
||||||
FROM +deps-android
|
FROM +deps-android
|
||||||
COPY --dir .cargo files scripts veilid-cli veilid-core veilid-server veilid-tools veilid-flutter veilid-wasm Cargo.lock Cargo.toml /veilid
|
COPY --keep-ts --dir .cargo files scripts veilid-cli veilid-core veilid-server veilid-tools veilid-flutter veilid-wasm Cargo.lock Cargo.toml /veilid
|
||||||
RUN cat /veilid/scripts/earthly/cargo-linux/config.toml >> /veilid/.cargo/config.toml
|
COPY --keep-ts scripts/earthly/cargo-android/config.toml /veilid/.cargo/config.toml
|
||||||
RUN cat /veilid/scripts/earthly/cargo-android/config.toml >> /veilid/.cargo/config.toml
|
|
||||||
|
|
||||||
# Clippy only
|
# Clippy only
|
||||||
clippy:
|
clippy:
|
||||||
FROM +code-linux
|
FROM +code-linux
|
||||||
RUN cargo clippy
|
RUN cargo clippy --target x86_64-unknown-linux-gnu
|
||||||
RUN cargo clippy --manifest-path=veilid-wasm/Cargo.toml --target wasm32-unknown-unknown
|
RUN cargo clippy --manifest-path=veilid-wasm/Cargo.toml --target wasm32-unknown-unknown
|
||||||
|
|
||||||
# Build
|
# Build
|
||||||
build-release:
|
|
||||||
FROM +code-linux
|
|
||||||
RUN cargo build --release -p veilid-server -p veilid-cli -p veilid-tools -p veilid-core
|
|
||||||
SAVE ARTIFACT ./target/release AS LOCAL ./target/earthly/release
|
|
||||||
|
|
||||||
build:
|
|
||||||
FROM +code-linux
|
|
||||||
RUN cargo build -p veilid-server -p veilid-cli -p veilid-tools -p veilid-core
|
|
||||||
SAVE ARTIFACT ./target/debug AS LOCAL ./target/earthly/debug
|
|
||||||
|
|
||||||
build-linux-amd64:
|
build-linux-amd64:
|
||||||
FROM +code-linux
|
FROM +code-linux
|
||||||
|
# Ensure we have enough memory
|
||||||
|
IF [ $(free -wmt | grep Total | awk '{print $2}') -lt 7500 ]
|
||||||
|
RUN echo "not enough container memory to build. increase build host memory."
|
||||||
|
RUN false
|
||||||
|
END
|
||||||
RUN cargo zigbuild --target x86_64-unknown-linux-gnu --release -p veilid-server -p veilid-cli -p veilid-tools -p veilid-core
|
RUN cargo zigbuild --target x86_64-unknown-linux-gnu --release -p veilid-server -p veilid-cli -p veilid-tools -p veilid-core
|
||||||
SAVE ARTIFACT ./target/x86_64-unknown-linux-gnu AS LOCAL ./target/artifacts/x86_64-unknown-linux-gnu
|
SAVE ARTIFACT ./target/x86_64-unknown-linux-gnu AS LOCAL ./target/artifacts/x86_64-unknown-linux-gnu
|
||||||
|
|
||||||
build-linux-amd64-debug:
|
|
||||||
FROM +code-linux
|
|
||||||
RUN cargo zigbuild --target x86_64-unknown-linux-gnu -p veilid-server -p veilid-cli -p veilid-tools -p veilid-core
|
|
||||||
SAVE ARTIFACT ./target/x86_64-unknown-linux-gnu AS LOCAL ./target/artifacts/x86_64-unknown-linux-gnu
|
|
||||||
|
|
||||||
build-linux-arm64:
|
build-linux-arm64:
|
||||||
FROM +code-linux
|
FROM +code-linux
|
||||||
RUN cargo zigbuild --target aarch64-unknown-linux-gnu --release -p veilid-server -p veilid-cli -p veilid-tools -p veilid-core
|
RUN cargo zigbuild --target aarch64-unknown-linux-gnu --release -p veilid-server -p veilid-cli -p veilid-tools -p veilid-core
|
||||||
|
@ -184,7 +211,7 @@ build-android:
|
||||||
# Unit tests
|
# Unit tests
|
||||||
unit-tests-clippy-linux:
|
unit-tests-clippy-linux:
|
||||||
FROM +code-linux
|
FROM +code-linux
|
||||||
RUN cargo clippy
|
RUN cargo clippy --target $DEFAULT_CARGO_TARGET
|
||||||
|
|
||||||
unit-tests-clippy-wasm-linux:
|
unit-tests-clippy-wasm-linux:
|
||||||
FROM +code-linux
|
FROM +code-linux
|
||||||
|
@ -196,13 +223,13 @@ unit-tests-docs-linux:
|
||||||
|
|
||||||
unit-tests-native-linux:
|
unit-tests-native-linux:
|
||||||
FROM +code-linux
|
FROM +code-linux
|
||||||
RUN cargo test -p veilid-server -p veilid-cli -p veilid-tools -p veilid-core
|
RUN cargo test --tests --target $DEFAULT_CARGO_TARGET -p veilid-server -p veilid-cli -p veilid-tools -p veilid-core
|
||||||
|
|
||||||
unit-tests-wasm-linux:
|
unit-tests-wasm-linux:
|
||||||
FROM +code-linux
|
FROM +code-linux
|
||||||
# Just run build now because actual unit tests require network access
|
# Just run build now because actual unit tests require network access
|
||||||
# which should be moved to a separate integration test
|
# which should be moved to a separate integration test
|
||||||
RUN veilid-wasm/wasm_build.sh
|
RUN veilid-wasm/wasm_build.sh release
|
||||||
|
|
||||||
unit-tests-linux:
|
unit-tests-linux:
|
||||||
WAIT
|
WAIT
|
||||||
|
@ -228,7 +255,7 @@ package-linux-amd64-deb:
|
||||||
#################################
|
#################################
|
||||||
### DEBIAN DPKG .DEB FILES
|
### DEBIAN DPKG .DEB FILES
|
||||||
#################################
|
#################################
|
||||||
COPY --dir package /veilid
|
COPY --keep-ts --dir package /veilid
|
||||||
# veilid-server
|
# veilid-server
|
||||||
RUN /veilid/package/debian/earthly_make_veilid_server_deb.sh amd64 x86_64-unknown-linux-gnu "$IS_NIGHTLY"
|
RUN /veilid/package/debian/earthly_make_veilid_server_deb.sh amd64 x86_64-unknown-linux-gnu "$IS_NIGHTLY"
|
||||||
SAVE ARTIFACT --keep-ts /dpkg/out/*.deb AS LOCAL ./target/packages/
|
SAVE ARTIFACT --keep-ts /dpkg/out/*.deb AS LOCAL ./target/packages/
|
||||||
|
@ -239,15 +266,15 @@ package-linux-amd64-deb:
|
||||||
|
|
||||||
package-linux-amd64-rpm:
|
package-linux-amd64-rpm:
|
||||||
ARG IS_NIGHTLY="false"
|
ARG IS_NIGHTLY="false"
|
||||||
FROM --platform amd64 rockylinux:9
|
FROM --platform linux/amd64 rockylinux:9
|
||||||
RUN yum install -y createrepo rpm-build rpm-sign yum-utils rpmdevtools
|
RUN yum install -y createrepo rpm-build rpm-sign yum-utils rpmdevtools
|
||||||
RUN rpmdev-setuptree
|
RUN rpmdev-setuptree
|
||||||
#################################
|
#################################
|
||||||
### RPMBUILD .RPM FILES
|
### RPMBUILD .RPM FILES
|
||||||
#################################
|
#################################
|
||||||
RUN mkdir -p /veilid/target
|
RUN mkdir -p /veilid/target
|
||||||
COPY --dir .cargo files scripts veilid-cli veilid-core veilid-server veilid-tools veilid-flutter veilid-wasm Cargo.lock Cargo.toml package /veilid
|
COPY --keep-ts --dir package /veilid
|
||||||
COPY +build-linux-amd64/x86_64-unknown-linux-gnu /veilid/target/x86_64-unknown-linux-gnu
|
COPY --keep-ts +build-linux-amd64/x86_64-unknown-linux-gnu /veilid/target/x86_64-unknown-linux-gnu
|
||||||
RUN mkdir -p /rpm-work-dir/veilid-server
|
RUN mkdir -p /rpm-work-dir/veilid-server
|
||||||
# veilid-server
|
# veilid-server
|
||||||
RUN veilid/package/rpm/veilid-server/earthly_make_veilid_server_rpm.sh x86_64 x86_64-unknown-linux-gnu "$IS_NIGHTLY"
|
RUN veilid/package/rpm/veilid-server/earthly_make_veilid_server_rpm.sh x86_64 x86_64-unknown-linux-gnu "$IS_NIGHTLY"
|
||||||
|
@ -263,7 +290,7 @@ package-linux-arm64-deb:
|
||||||
#################################
|
#################################
|
||||||
### DEBIAN DPKG .DEB FILES
|
### DEBIAN DPKG .DEB FILES
|
||||||
#################################
|
#################################
|
||||||
COPY --dir package /veilid
|
COPY --keep-ts --dir package /veilid
|
||||||
# veilid-server
|
# veilid-server
|
||||||
RUN /veilid/package/debian/earthly_make_veilid_server_deb.sh arm64 aarch64-unknown-linux-gnu "$IS_NIGHTLY"
|
RUN /veilid/package/debian/earthly_make_veilid_server_deb.sh arm64 aarch64-unknown-linux-gnu "$IS_NIGHTLY"
|
||||||
SAVE ARTIFACT --keep-ts /dpkg/out/*.deb AS LOCAL ./target/packages/
|
SAVE ARTIFACT --keep-ts /dpkg/out/*.deb AS LOCAL ./target/packages/
|
||||||
|
@ -274,15 +301,15 @@ package-linux-arm64-deb:
|
||||||
|
|
||||||
package-linux-arm64-rpm:
|
package-linux-arm64-rpm:
|
||||||
ARG IS_NIGHTLY="false"
|
ARG IS_NIGHTLY="false"
|
||||||
FROM --platform arm64 rockylinux:8
|
FROM --platform linux/arm64 rockylinux:9
|
||||||
RUN yum install -y createrepo rpm-build rpm-sign yum-utils rpmdevtools
|
RUN yum install -y createrepo rpm-build rpm-sign yum-utils rpmdevtools
|
||||||
RUN rpmdev-setuptree
|
RUN rpmdev-setuptree
|
||||||
#################################
|
#################################
|
||||||
### RPMBUILD .RPM FILES
|
### RPMBUILD .RPM FILES
|
||||||
#################################
|
#################################
|
||||||
RUN mkdir -p /veilid/target
|
RUN mkdir -p /veilid/target
|
||||||
COPY --dir .cargo files scripts veilid-cli veilid-core veilid-server veilid-tools veilid-flutter veilid-wasm Cargo.lock Cargo.toml package /veilid
|
COPY --keep-ts --dir package /veilid
|
||||||
COPY +build-linux-arm64/aarch64-unknown-linux-gnu /veilid/target/aarch64-unknown-linux-gnu
|
COPY --keep-ts +build-linux-arm64/aarch64-unknown-linux-gnu /veilid/target/aarch64-unknown-linux-gnu
|
||||||
RUN mkdir -p /rpm-work-dir/veilid-server
|
RUN mkdir -p /rpm-work-dir/veilid-server
|
||||||
# veilid-server
|
# veilid-server
|
||||||
RUN veilid/package/rpm/veilid-server/earthly_make_veilid_server_rpm.sh aarch64 aarch64-unknown-linux-gnu "$IS_NIGHTLY"
|
RUN veilid/package/rpm/veilid-server/earthly_make_veilid_server_rpm.sh aarch64 aarch64-unknown-linux-gnu "$IS_NIGHTLY"
|
||||||
|
@ -293,13 +320,25 @@ package-linux-arm64-rpm:
|
||||||
SAVE ARTIFACT --keep-ts /root/rpmbuild/RPMS/aarch64/*.rpm AS LOCAL ./target/packages/
|
SAVE ARTIFACT --keep-ts /root/rpmbuild/RPMS/aarch64/*.rpm AS LOCAL ./target/packages/
|
||||||
|
|
||||||
package-linux-amd64:
|
package-linux-amd64:
|
||||||
|
WAIT
|
||||||
BUILD +package-linux-amd64-deb
|
BUILD +package-linux-amd64-deb
|
||||||
|
END
|
||||||
|
WAIT
|
||||||
BUILD +package-linux-amd64-rpm
|
BUILD +package-linux-amd64-rpm
|
||||||
|
END
|
||||||
|
|
||||||
package-linux-arm64:
|
package-linux-arm64:
|
||||||
|
WAIT
|
||||||
BUILD +package-linux-arm64-deb
|
BUILD +package-linux-arm64-deb
|
||||||
|
END
|
||||||
|
WAIT
|
||||||
BUILD +package-linux-arm64-rpm
|
BUILD +package-linux-arm64-rpm
|
||||||
|
END
|
||||||
|
|
||||||
package-linux:
|
package-linux:
|
||||||
|
WAIT
|
||||||
BUILD +package-linux-amd64
|
BUILD +package-linux-amd64
|
||||||
|
END
|
||||||
|
WAIT
|
||||||
BUILD +package-linux-arm64
|
BUILD +package-linux-arm64
|
||||||
|
END
|
|
@ -9,7 +9,7 @@ fi
|
||||||
if [ ! -z "$(command -v apt)" ]; then
|
if [ ! -z "$(command -v apt)" ]; then
|
||||||
# Install APT dependencies
|
# Install APT dependencies
|
||||||
sudo apt update -y
|
sudo apt update -y
|
||||||
sudo apt install -y openjdk-17-jdk-headless iproute2 curl build-essential cmake libssl-dev openssl file git pkg-config libdbus-1-dev libdbus-glib-1-dev libgirepository1.0-dev libcairo2-dev checkinstall unzip llvm wabt python3-pip
|
sudo apt install -y openjdk-17-jdk-headless iproute2 curl build-essential cmake libssl-dev openssl file git pkg-config libdbus-1-dev libdbus-glib-1-dev libgirepository1.0-dev libcairo2-dev checkinstall unzip llvm python3-pip
|
||||||
elif [ ! -z "$(command -v dnf)" ]; then
|
elif [ ! -z "$(command -v dnf)" ]; then
|
||||||
# DNF (formerly yum)
|
# DNF (formerly yum)
|
||||||
sudo dnf update -y
|
sudo dnf update -y
|
||||||
|
@ -22,7 +22,7 @@ elif [ ! -z "$(command -v dnf)" ]; then
|
||||||
# them in anyway
|
# them in anyway
|
||||||
#
|
#
|
||||||
# Also Fedora doesn't come with pip
|
# Also Fedora doesn't come with pip
|
||||||
sudo dnf install -y java-17-openjdk-headless iproute curl cmake openssl-devel openssl git file pkg-config dbus-devel dbus-glib gobject-introspection-devel cairo-devel unzip llvm wabt python3-pip gcc-c++
|
sudo dnf install -y java-17-openjdk-headless iproute curl cmake openssl-devel openssl git file pkg-config dbus-devel dbus-glib gobject-introspection-devel cairo-devel unzip llvm python3-pip gcc-c++
|
||||||
# build-essentials
|
# build-essentials
|
||||||
sudo dnf groupinstall -y 'Development Tools'
|
sudo dnf groupinstall -y 'Development Tools'
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -111,7 +111,7 @@ fi
|
||||||
rustup target add aarch64-linux-android armv7-linux-androideabi i686-linux-android x86_64-linux-android wasm32-unknown-unknown
|
rustup target add aarch64-linux-android armv7-linux-androideabi i686-linux-android x86_64-linux-android wasm32-unknown-unknown
|
||||||
|
|
||||||
# install cargo packages
|
# install cargo packages
|
||||||
cargo install wasm-bindgen-cli wasm-pack cargo-edit
|
cargo install wasm-bindgen-cli wasm-pack cargo-edit wasm-tools
|
||||||
|
|
||||||
# install pip packages
|
# install pip packages
|
||||||
pip3 install --upgrade bumpversion
|
pip3 install --upgrade bumpversion
|
||||||
|
|
|
@ -140,13 +140,13 @@ else
|
||||||
BREW_COMMAND="sudo -H -u $BREW_USER brew"
|
BREW_COMMAND="sudo -H -u $BREW_USER brew"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
$BREW_COMMAND install capnp cmake wabt llvm jq
|
$BREW_COMMAND install capnp cmake llvm jq
|
||||||
|
|
||||||
# install targets
|
# install targets
|
||||||
rustup target add aarch64-apple-darwin aarch64-apple-ios aarch64-apple-ios-sim x86_64-apple-darwin x86_64-apple-ios wasm32-unknown-unknown aarch64-linux-android armv7-linux-androideabi i686-linux-android x86_64-linux-android
|
rustup target add aarch64-apple-darwin aarch64-apple-ios aarch64-apple-ios-sim x86_64-apple-darwin x86_64-apple-ios wasm32-unknown-unknown aarch64-linux-android armv7-linux-androideabi i686-linux-android x86_64-linux-android
|
||||||
|
|
||||||
# install cargo packages
|
# install cargo packages
|
||||||
cargo install wasm-bindgen-cli wasm-pack cargo-edit
|
cargo install wasm-bindgen-cli wasm-pack cargo-edit wasm-tools
|
||||||
|
|
||||||
# attempt to install pip packages - this may result in an error, which we will try to catch
|
# attempt to install pip packages - this may result in an error, which we will try to catch
|
||||||
pip3 install --upgrade bumpversion || ( \
|
pip3 install --upgrade bumpversion || ( \
|
||||||
|
|
|
@ -1,3 +1,6 @@
|
||||||
|
[build]
|
||||||
|
rustflags = ["--cfg", "tokio_unstable"]
|
||||||
|
|
||||||
[target.aarch64-linux-android]
|
[target.aarch64-linux-android]
|
||||||
linker = "/Android/Sdk/ndk/27.0.12077973/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android34-clang"
|
linker = "/Android/Sdk/ndk/27.0.12077973/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android34-clang"
|
||||||
[target.armv7-linux-androideabi]
|
[target.armv7-linux-androideabi]
|
||||||
|
|
|
@ -1,2 +0,0 @@
|
||||||
[target.aarch64-unknown-linux-gnu]
|
|
||||||
linker = "aarch64-linux-gnu-gcc"
|
|
|
@ -32,7 +32,6 @@ struct AddressFilterInner {
|
||||||
dial_info_failures: BTreeMap<DialInfo, Timestamp>,
|
dial_info_failures: BTreeMap<DialInfo, Timestamp>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub(crate) struct AddressFilter {
|
pub(crate) struct AddressFilter {
|
||||||
registry: VeilidComponentRegistry,
|
registry: VeilidComponentRegistry,
|
||||||
inner: Mutex<AddressFilterInner>,
|
inner: Mutex<AddressFilterInner>,
|
||||||
|
@ -44,6 +43,33 @@ pub(crate) struct AddressFilter {
|
||||||
dial_info_failure_duration_min: usize,
|
dial_info_failure_duration_min: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for AddressFilter {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
f.debug_struct("AddressFilter")
|
||||||
|
//.field("registry", &self.registry)
|
||||||
|
.field("inner", &self.inner)
|
||||||
|
.field("max_connections_per_ip4", &self.max_connections_per_ip4)
|
||||||
|
.field(
|
||||||
|
"max_connections_per_ip6_prefix",
|
||||||
|
&self.max_connections_per_ip6_prefix,
|
||||||
|
)
|
||||||
|
.field(
|
||||||
|
"max_connections_per_ip6_prefix_size",
|
||||||
|
&self.max_connections_per_ip6_prefix_size,
|
||||||
|
)
|
||||||
|
.field(
|
||||||
|
"max_connection_frequency_per_min",
|
||||||
|
&self.max_connection_frequency_per_min,
|
||||||
|
)
|
||||||
|
.field("punishment_duration_min", &self.punishment_duration_min)
|
||||||
|
.field(
|
||||||
|
"dial_info_failure_duration_min",
|
||||||
|
&self.dial_info_failure_duration_min,
|
||||||
|
)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl_veilid_component_registry_accessor!(AddressFilter);
|
impl_veilid_component_registry_accessor!(AddressFilter);
|
||||||
|
|
||||||
impl AddressFilter {
|
impl AddressFilter {
|
||||||
|
|
|
@ -1116,12 +1116,10 @@ impl NetworkManager {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Add the node without its peer info
|
// Add the sender's node without its peer info
|
||||||
let source_noderef = match routing_table.register_node_with_id(
|
// Gets noderef filtered to the routing domain
|
||||||
routing_domain,
|
let sender_noderef =
|
||||||
envelope.get_sender_typed_id(),
|
match routing_table.register_node_with_id(routing_domain, sender_id, ts) {
|
||||||
ts,
|
|
||||||
) {
|
|
||||||
Ok(v) => v,
|
Ok(v) => v,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
// If the node couldn't be registered just skip this envelope,
|
// If the node couldn't be registered just skip this envelope,
|
||||||
|
@ -1130,15 +1128,22 @@ impl NetworkManager {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Filter the noderef further by its inbound flow
|
||||||
|
let sender_noderef = sender_noderef.filtered_clone(
|
||||||
|
NodeRefFilter::new()
|
||||||
|
.with_address_type(flow.address_type())
|
||||||
|
.with_protocol_type(flow.protocol_type()),
|
||||||
|
);
|
||||||
|
|
||||||
// Set the envelope version for the peer
|
// Set the envelope version for the peer
|
||||||
source_noderef.add_envelope_version(envelope.get_version());
|
sender_noderef.add_envelope_version(envelope.get_version());
|
||||||
|
|
||||||
// Set the last flow for the peer
|
// Set the last flow for the peer
|
||||||
self.set_last_flow(source_noderef.unfiltered(), flow, ts);
|
self.set_last_flow(sender_noderef.unfiltered(), flow, ts);
|
||||||
|
|
||||||
// Pass message to RPC system
|
// Pass message to RPC system
|
||||||
if let Err(e) =
|
if let Err(e) =
|
||||||
rpc.enqueue_direct_message(envelope, source_noderef, flow, routing_domain, body)
|
rpc.enqueue_direct_message(envelope, sender_noderef, flow, routing_domain, body)
|
||||||
{
|
{
|
||||||
// Couldn't enqueue, but not the sender's fault
|
// Couldn't enqueue, but not the sender's fault
|
||||||
log_net!(debug "failed to enqueue direct message: {}", e);
|
log_net!(debug "failed to enqueue direct message: {}", e);
|
||||||
|
|
|
@ -114,6 +114,11 @@ impl DiscoveryContext {
|
||||||
async fn request_public_address(&self, node_ref: FilteredNodeRef) -> Option<SocketAddress> {
|
async fn request_public_address(&self, node_ref: FilteredNodeRef) -> Option<SocketAddress> {
|
||||||
let rpc = self.rpc_processor();
|
let rpc = self.rpc_processor();
|
||||||
|
|
||||||
|
// Ensure a fresh connection is made so it comes from our public address
|
||||||
|
// This will only clear the dialinfo filtered flows, as this is a FilteredNodeRef
|
||||||
|
// filtered down to the protocol/address type we are checking the public address for
|
||||||
|
node_ref.clear_last_flows();
|
||||||
|
|
||||||
let res = network_result_value_or_log!(match rpc.rpc_call_status(Destination::direct(node_ref.clone())).await {
|
let res = network_result_value_or_log!(match rpc.rpc_call_status(Destination::direct(node_ref.clone())).await {
|
||||||
Ok(v) => v,
|
Ok(v) => v,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
|
@ -301,6 +306,9 @@ impl DiscoveryContext {
|
||||||
redirect: bool,
|
redirect: bool,
|
||||||
) -> bool {
|
) -> bool {
|
||||||
// ask the node to send us a dial info validation receipt
|
// ask the node to send us a dial info validation receipt
|
||||||
|
// no need to clear_last_flows here, because the dial_info is always returned via the
|
||||||
|
// send_out_of_band_receipt mechanism, which will always create a new flow
|
||||||
|
// and the outgoing rpc call is safely able to use existing flows
|
||||||
match self
|
match self
|
||||||
.rpc_processor()
|
.rpc_processor()
|
||||||
.rpc_call_validate_dial_info(node_ref.clone(), dial_info, redirect)
|
.rpc_call_validate_dial_info(node_ref.clone(), dial_info, redirect)
|
||||||
|
|
|
@ -198,7 +198,7 @@ impl NetworkManager {
|
||||||
target_node_ref: FilteredNodeRef,
|
target_node_ref: FilteredNodeRef,
|
||||||
data: Vec<u8>,
|
data: Vec<u8>,
|
||||||
) -> EyreResult<NetworkResult<SendDataMethod>> {
|
) -> EyreResult<NetworkResult<SendDataMethod>> {
|
||||||
// Try to send data to the last socket we've seen this peer on
|
// Try to send data to the last flow we've seen this peer on
|
||||||
let Some(flow) = target_node_ref.last_flow() else {
|
let Some(flow) = target_node_ref.last_flow() else {
|
||||||
return Ok(NetworkResult::no_connection_other(format!(
|
return Ok(NetworkResult::no_connection_other(format!(
|
||||||
"Node is not reachable and has no existing connection: {}",
|
"Node is not reachable and has no existing connection: {}",
|
||||||
|
@ -250,7 +250,7 @@ impl NetworkManager {
|
||||||
.sequencing_filtered(Sequencing::NoPreference)
|
.sequencing_filtered(Sequencing::NoPreference)
|
||||||
};
|
};
|
||||||
|
|
||||||
// First try to send data to the last socket we've seen this peer on
|
// First try to send data to the last flow we've seen this peer on
|
||||||
let data = if let Some(flow) = seq_target_node_ref.last_flow() {
|
let data = if let Some(flow) = seq_target_node_ref.last_flow() {
|
||||||
match self.net().send_data_to_existing_flow(flow, data).await? {
|
match self.net().send_data_to_existing_flow(flow, data).await? {
|
||||||
SendDataToExistingFlowResult::Sent(unique_flow) => {
|
SendDataToExistingFlowResult::Sent(unique_flow) => {
|
||||||
|
@ -299,7 +299,7 @@ impl NetworkManager {
|
||||||
target_node_ref: FilteredNodeRef,
|
target_node_ref: FilteredNodeRef,
|
||||||
data: Vec<u8>,
|
data: Vec<u8>,
|
||||||
) -> EyreResult<NetworkResult<SendDataMethod>> {
|
) -> EyreResult<NetworkResult<SendDataMethod>> {
|
||||||
// First try to send data to the last socket we've seen this peer on
|
// First try to send data to the last flow we've seen this peer on
|
||||||
let data = if let Some(flow) = target_node_ref.last_flow() {
|
let data = if let Some(flow) = target_node_ref.last_flow() {
|
||||||
match self.net().send_data_to_existing_flow(flow, data).await? {
|
match self.net().send_data_to_existing_flow(flow, data).await? {
|
||||||
SendDataToExistingFlowResult::Sent(unique_flow) => {
|
SendDataToExistingFlowResult::Sent(unique_flow) => {
|
||||||
|
@ -351,7 +351,7 @@ impl NetworkManager {
|
||||||
// Since we have the best dial info already, we can find a connection to use by protocol type
|
// Since we have the best dial info already, we can find a connection to use by protocol type
|
||||||
let node_ref = node_ref.filtered_clone(NodeRefFilter::from(dial_info.make_filter()));
|
let node_ref = node_ref.filtered_clone(NodeRefFilter::from(dial_info.make_filter()));
|
||||||
|
|
||||||
// First try to send data to the last socket we've seen this peer on
|
// First try to send data to the last flow we've seen this peer on
|
||||||
let data = if let Some(flow) = node_ref.last_flow() {
|
let data = if let Some(flow) = node_ref.last_flow() {
|
||||||
#[cfg(feature = "verbose-tracing")]
|
#[cfg(feature = "verbose-tracing")]
|
||||||
log_net!(debug
|
log_net!(debug
|
||||||
|
|
|
@ -600,9 +600,17 @@ impl BucketEntryInner {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clears the table of last flows to ensure we create new ones and drop any existing ones
|
// Clears the table of last flows to ensure we create new ones and drop any existing ones
|
||||||
pub(super) fn clear_last_flows(&mut self) {
|
// With a DialInfo::all filter specified, only clear the flows that match the filter
|
||||||
|
pub(super) fn clear_last_flows(&mut self, dial_info_filter: DialInfoFilter) {
|
||||||
|
if dial_info_filter != DialInfoFilter::all() {
|
||||||
|
self.last_flows.retain(|k, _v| {
|
||||||
|
!(dial_info_filter.protocol_type_set.contains(k.0)
|
||||||
|
&& dial_info_filter.address_type_set.contains(k.1))
|
||||||
|
})
|
||||||
|
} else {
|
||||||
self.last_flows.clear();
|
self.last_flows.clear();
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Clears the table of last flows except the most recent one
|
// Clears the table of last flows except the most recent one
|
||||||
pub(super) fn clear_last_flows_except_latest(&mut self) {
|
pub(super) fn clear_last_flows_except_latest(&mut self) {
|
||||||
|
@ -751,7 +759,7 @@ impl BucketEntryInner {
|
||||||
pub fn set_punished(&mut self, punished: Option<PunishmentReason>) {
|
pub fn set_punished(&mut self, punished: Option<PunishmentReason>) {
|
||||||
self.punishment = punished;
|
self.punishment = punished;
|
||||||
if punished.is_some() {
|
if punished.is_some() {
|
||||||
self.clear_last_flows();
|
self.clear_last_flows(DialInfoFilter::all());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -228,7 +228,7 @@ pub(crate) trait NodeRefCommonTrait: NodeRefAccessorsTrait + NodeRefOperateTrait
|
||||||
}
|
}
|
||||||
|
|
||||||
fn clear_last_flows(&self) {
|
fn clear_last_flows(&self) {
|
||||||
self.operate_mut(|_rti, e| e.clear_last_flows())
|
self.operate_mut(|_rti, e| e.clear_last_flows(self.dial_info_filter()))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn set_last_flow(&self, flow: Flow, ts: Timestamp) {
|
fn set_last_flow(&self, flow: Flow, ts: Timestamp) {
|
||||||
|
|
|
@ -341,7 +341,7 @@ impl RoutingTableInner {
|
||||||
for bucket in &self.buckets[&ck] {
|
for bucket in &self.buckets[&ck] {
|
||||||
for entry in bucket.entries() {
|
for entry in bucket.entries() {
|
||||||
entry.1.with_mut_inner(|e| {
|
entry.1.with_mut_inner(|e| {
|
||||||
e.clear_last_flows();
|
e.clear_last_flows(DialInfoFilter::all());
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,10 +2,10 @@ use super::*;
|
||||||
|
|
||||||
/// Keepalive pings are done occasionally to ensure holepunched public dialinfo
|
/// Keepalive pings are done occasionally to ensure holepunched public dialinfo
|
||||||
/// remains valid, as well as to make sure we remain in any relay node's routing table
|
/// remains valid, as well as to make sure we remain in any relay node's routing table
|
||||||
const RELAY_KEEPALIVE_PING_INTERVAL_SECS: u32 = 1;
|
const RELAY_KEEPALIVE_PING_INTERVAL_SECS: u32 = 10;
|
||||||
|
|
||||||
/// Keepalive pings are done for active watch nodes to make sure they are still there
|
/// Keepalive pings are done for active watch nodes to make sure they are still there
|
||||||
const ACTIVE_WATCH_KEEPALIVE_PING_INTERVAL_SECS: u32 = 1;
|
const ACTIVE_WATCH_KEEPALIVE_PING_INTERVAL_SECS: u32 = 10;
|
||||||
|
|
||||||
/// Ping queue processing depth per validator
|
/// Ping queue processing depth per validator
|
||||||
const MAX_PARALLEL_PINGS: usize = 8;
|
const MAX_PARALLEL_PINGS: usize = 8;
|
||||||
|
|
|
@ -478,35 +478,9 @@ impl RPCProcessor {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Reply directly to the request's source
|
// Get the filtered noderef of the sender
|
||||||
let sender_node_id = detail.envelope.get_sender_typed_id();
|
let sender_noderef = detail.sender_noderef.clone();
|
||||||
|
NetworkResult::value(Destination::direct(sender_noderef))
|
||||||
// This may be a different node's reference than the 'sender' in the case of a relay
|
|
||||||
let peer_noderef = detail.peer_noderef.clone();
|
|
||||||
|
|
||||||
// If the sender_id is that of the peer, then this is a direct reply
|
|
||||||
// else it is a relayed reply through the peer
|
|
||||||
if peer_noderef.node_ids().contains(&sender_node_id) {
|
|
||||||
NetworkResult::value(Destination::direct(peer_noderef))
|
|
||||||
} else {
|
|
||||||
// Look up the sender node, we should have added it via senderNodeInfo before getting here.
|
|
||||||
let res = match self.routing_table().lookup_node_ref(sender_node_id) {
|
|
||||||
Ok(v) => v,
|
|
||||||
Err(e) => {
|
|
||||||
return NetworkResult::invalid_message(format!(
|
|
||||||
"failed to look up node info for respond to: {}",
|
|
||||||
e
|
|
||||||
))
|
|
||||||
}
|
|
||||||
};
|
|
||||||
if let Some(sender_noderef) = res {
|
|
||||||
NetworkResult::value(Destination::relay(peer_noderef, sender_noderef))
|
|
||||||
} else {
|
|
||||||
NetworkResult::invalid_message(
|
|
||||||
"not responding to sender that has no node info",
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
RespondTo::PrivateRoute(pr) => {
|
RespondTo::PrivateRoute(pr) => {
|
||||||
match &request.header.detail {
|
match &request.header.detail {
|
||||||
|
|
|
@ -4,11 +4,11 @@ use super::*;
|
||||||
pub(in crate::rpc_processor) struct RPCMessageHeaderDetailDirect {
|
pub(in crate::rpc_processor) struct RPCMessageHeaderDetailDirect {
|
||||||
/// The decoded header of the envelope
|
/// The decoded header of the envelope
|
||||||
pub envelope: Envelope,
|
pub envelope: Envelope,
|
||||||
/// The noderef of the peer that sent the message (not the original sender).
|
/// The noderef of the original peer that sent the message (not the relay if it is relayed)
|
||||||
/// Ensures node doesn't get evicted from routing table until we're done with it
|
/// Ensures node doesn't get evicted from routing table until we're done with it
|
||||||
/// Should be filtered to the routing domain of the peer that we received from
|
/// Should be filtered to the routing domain of the peer that we received from
|
||||||
pub peer_noderef: FilteredNodeRef,
|
pub sender_noderef: FilteredNodeRef,
|
||||||
/// The flow from the peer sent the message (not the original sender)
|
/// The flow from the peer sent the message (possibly a relay)
|
||||||
pub flow: Flow,
|
pub flow: Flow,
|
||||||
/// The routing domain of the peer that we received from
|
/// The routing domain of the peer that we received from
|
||||||
pub routing_domain: RoutingDomain,
|
pub routing_domain: RoutingDomain,
|
||||||
|
@ -65,13 +65,6 @@ impl MessageHeader {
|
||||||
RPCMessageHeaderDetail::PrivateRouted(p) => p.direct.envelope.get_crypto_kind(),
|
RPCMessageHeaderDetail::PrivateRouted(p) => p.direct.envelope.get_crypto_kind(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// pub fn direct_peer_noderef(&self) -> NodeRef {
|
|
||||||
// match &self.detail {
|
|
||||||
// RPCMessageHeaderDetail::Direct(d) => d.peer_noderef.clone(),
|
|
||||||
// RPCMessageHeaderDetail::SafetyRouted(s) => s.direct.peer_noderef.clone(),
|
|
||||||
// RPCMessageHeaderDetail::PrivateRouted(p) => p.direct.peer_noderef.clone(),
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
pub fn routing_domain(&self) -> RoutingDomain {
|
pub fn routing_domain(&self) -> RoutingDomain {
|
||||||
match &self.detail {
|
match &self.detail {
|
||||||
RPCMessageHeaderDetail::Direct(d) => d.routing_domain,
|
RPCMessageHeaderDetail::Direct(d) => d.routing_domain,
|
||||||
|
|
|
@ -1588,7 +1588,7 @@ impl RPCProcessor {
|
||||||
pub fn enqueue_direct_message(
|
pub fn enqueue_direct_message(
|
||||||
&self,
|
&self,
|
||||||
envelope: Envelope,
|
envelope: Envelope,
|
||||||
peer_noderef: FilteredNodeRef,
|
sender_noderef: FilteredNodeRef,
|
||||||
flow: Flow,
|
flow: Flow,
|
||||||
routing_domain: RoutingDomain,
|
routing_domain: RoutingDomain,
|
||||||
body: Vec<u8>,
|
body: Vec<u8>,
|
||||||
|
@ -1599,14 +1599,14 @@ impl RPCProcessor {
|
||||||
.enter()
|
.enter()
|
||||||
.wrap_err("not started up")?;
|
.wrap_err("not started up")?;
|
||||||
|
|
||||||
if peer_noderef.routing_domain_set() != routing_domain {
|
if sender_noderef.routing_domain_set() != routing_domain {
|
||||||
bail!("routing domain should match peer noderef filter");
|
bail!("routing domain should match peer noderef filter");
|
||||||
}
|
}
|
||||||
|
|
||||||
let header = MessageHeader {
|
let header = MessageHeader {
|
||||||
detail: RPCMessageHeaderDetail::Direct(RPCMessageHeaderDetailDirect {
|
detail: RPCMessageHeaderDetail::Direct(RPCMessageHeaderDetailDirect {
|
||||||
envelope,
|
envelope,
|
||||||
peer_noderef,
|
sender_noderef,
|
||||||
flow,
|
flow,
|
||||||
routing_domain,
|
routing_domain,
|
||||||
}),
|
}),
|
||||||
|
|
|
@ -45,7 +45,7 @@ impl RPCProcessor {
|
||||||
let res = match msg.header.detail {
|
let res = match msg.header.detail {
|
||||||
RPCMessageHeaderDetail::Direct(detail) => {
|
RPCMessageHeaderDetail::Direct(detail) => {
|
||||||
network_manager
|
network_manager
|
||||||
.handle_in_band_receipt(receipt, detail.peer_noderef)
|
.handle_in_band_receipt(receipt, detail.sender_noderef)
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
RPCMessageHeaderDetail::SafetyRouted(_) => {
|
RPCMessageHeaderDetail::SafetyRouted(_) => {
|
||||||
|
|
|
@ -164,7 +164,7 @@ impl RPCProcessor {
|
||||||
pr_pubkey: TypedKey,
|
pr_pubkey: TypedKey,
|
||||||
) -> RPCNetworkResult<()> {
|
) -> RPCNetworkResult<()> {
|
||||||
// Get sender id of the peer with the crypto kind of the route
|
// Get sender id of the peer with the crypto kind of the route
|
||||||
let Some(sender_id) = detail.peer_noderef.node_ids().get(pr_pubkey.kind) else {
|
let Some(sender_id) = detail.sender_noderef.node_ids().get(pr_pubkey.kind) else {
|
||||||
return Ok(NetworkResult::invalid_message(
|
return Ok(NetworkResult::invalid_message(
|
||||||
"route node doesnt have a required crypto kind for routed operation",
|
"route node doesnt have a required crypto kind for routed operation",
|
||||||
));
|
));
|
||||||
|
|
3129
veilid-flutter/rust/Cargo.lock
generated
3129
veilid-flutter/rust/Cargo.lock
generated
File diff suppressed because it is too large
Load diff
|
@ -46,6 +46,8 @@ rt-tokio = [
|
||||||
tracking = ["veilid-core/tracking"]
|
tracking = ["veilid-core/tracking"]
|
||||||
debug-json-api = []
|
debug-json-api = []
|
||||||
debug-locks = ["veilid-core/debug-locks"]
|
debug-locks = ["veilid-core/debug-locks"]
|
||||||
|
perfetto = ["tracing-perfetto"]
|
||||||
|
flame = ["tracing-flame"]
|
||||||
|
|
||||||
geolocation = ["veilid-core/geolocation"]
|
geolocation = ["veilid-core/geolocation"]
|
||||||
|
|
||||||
|
@ -90,7 +92,7 @@ hostname = "^0"
|
||||||
stop-token = { version = "^0", default-features = false }
|
stop-token = { version = "^0", default-features = false }
|
||||||
sysinfo = { version = "^0.30.13", default-features = false }
|
sysinfo = { version = "^0.30.13", default-features = false }
|
||||||
wg = { version = "^0.9.1", features = ["future"] }
|
wg = { version = "^0.9.1", features = ["future"] }
|
||||||
tracing-flame = "0.2.0"
|
tracing-flame = { version = "0.2.0", optional = true }
|
||||||
time = { version = "0.3.36", features = ["local-offset"] }
|
time = { version = "0.3.36", features = ["local-offset"] }
|
||||||
chrono = "0.4.38"
|
chrono = "0.4.38"
|
||||||
|
|
||||||
|
@ -103,7 +105,7 @@ daemonize = "^0.5.0"
|
||||||
signal-hook = "^0.3.17"
|
signal-hook = "^0.3.17"
|
||||||
signal-hook-async-std = "^0.2.2"
|
signal-hook-async-std = "^0.2.2"
|
||||||
nix = "^0.29.0"
|
nix = "^0.29.0"
|
||||||
tracing-perfetto = "0.1.1"
|
tracing-perfetto = { version = "0.1.5", optional = true }
|
||||||
|
|
||||||
[target.'cfg(target_os = "linux")'.dependencies]
|
[target.'cfg(target_os = "linux")'.dependencies]
|
||||||
tracing-journald = "^0.3.0"
|
tracing-journald = "^0.3.0"
|
||||||
|
|
|
@ -245,6 +245,7 @@ fn main() -> EyreResult<()> {
|
||||||
NamedSocketAddrs::from_str(&otlp).wrap_err("failed to parse OTLP address")?;
|
NamedSocketAddrs::from_str(&otlp).wrap_err("failed to parse OTLP address")?;
|
||||||
settingsrw.logging.otlp.level = LogLevel::Trace;
|
settingsrw.logging.otlp.level = LogLevel::Trace;
|
||||||
}
|
}
|
||||||
|
#[cfg(feature = "flame")]
|
||||||
if let Some(flame) = args.flame {
|
if let Some(flame) = args.flame {
|
||||||
let flame = if flame.is_empty() {
|
let flame = if flame.is_empty() {
|
||||||
Settings::get_default_flame_path(
|
Settings::get_default_flame_path(
|
||||||
|
@ -260,7 +261,7 @@ fn main() -> EyreResult<()> {
|
||||||
settingsrw.logging.flame.enabled = true;
|
settingsrw.logging.flame.enabled = true;
|
||||||
settingsrw.logging.flame.path = flame;
|
settingsrw.logging.flame.path = flame;
|
||||||
}
|
}
|
||||||
#[cfg(unix)]
|
#[cfg(all(unix, feature = "perfetto"))]
|
||||||
if let Some(perfetto) = args.perfetto {
|
if let Some(perfetto) = args.perfetto {
|
||||||
let perfetto = if perfetto.is_empty() {
|
let perfetto = if perfetto.is_empty() {
|
||||||
Settings::get_default_perfetto_path(
|
Settings::get_default_perfetto_path(
|
||||||
|
|
|
@ -204,20 +204,20 @@ core:
|
||||||
tcp:
|
tcp:
|
||||||
connect: true
|
connect: true
|
||||||
listen: true
|
listen: true
|
||||||
max_connections: 32
|
max_connections: 256
|
||||||
listen_address: ':5150'
|
listen_address: ':5150'
|
||||||
#'public_address: ''
|
#'public_address: ''
|
||||||
ws:
|
ws:
|
||||||
connect: true
|
connect: true
|
||||||
listen: true
|
listen: true
|
||||||
max_connections: 32
|
max_connections: 256
|
||||||
listen_address: ':5150'
|
listen_address: ':5150'
|
||||||
path: 'ws'
|
path: 'ws'
|
||||||
# url: 'ws://localhost:5150/ws'
|
# url: 'ws://localhost:5150/ws'
|
||||||
wss:
|
wss:
|
||||||
connect: true
|
connect: true
|
||||||
listen: false
|
listen: false
|
||||||
max_connections: 32
|
max_connections: 256
|
||||||
listen_address: ':5150'
|
listen_address: ':5150'
|
||||||
path: 'ws'
|
path: 'ws'
|
||||||
# url: ''
|
# url: ''
|
||||||
|
@ -501,12 +501,14 @@ pub struct Terminal {
|
||||||
pub ignore_log_targets: Vec<String>,
|
pub ignore_log_targets: Vec<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "flame")]
|
||||||
#[derive(Debug, Deserialize, Serialize)]
|
#[derive(Debug, Deserialize, Serialize)]
|
||||||
pub struct Flame {
|
pub struct Flame {
|
||||||
pub enabled: bool,
|
pub enabled: bool,
|
||||||
pub path: String,
|
pub path: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(all(unix, feature = "perfetto"))]
|
||||||
#[derive(Debug, Deserialize, Serialize)]
|
#[derive(Debug, Deserialize, Serialize)]
|
||||||
pub struct Perfetto {
|
pub struct Perfetto {
|
||||||
pub enabled: bool,
|
pub enabled: bool,
|
||||||
|
@ -541,6 +543,7 @@ pub struct Api {
|
||||||
pub ignore_log_targets: Vec<String>,
|
pub ignore_log_targets: Vec<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "opentelemetry-otlp")]
|
||||||
#[derive(Debug, Deserialize, Serialize)]
|
#[derive(Debug, Deserialize, Serialize)]
|
||||||
pub struct Otlp {
|
pub struct Otlp {
|
||||||
pub enabled: bool,
|
pub enabled: bool,
|
||||||
|
@ -563,9 +566,13 @@ pub struct Logging {
|
||||||
pub terminal: Terminal,
|
pub terminal: Terminal,
|
||||||
pub file: File,
|
pub file: File,
|
||||||
pub api: Api,
|
pub api: Api,
|
||||||
|
#[cfg(feature = "opentelemetry-otlp")]
|
||||||
pub otlp: Otlp,
|
pub otlp: Otlp,
|
||||||
|
#[cfg(feature = "flame")]
|
||||||
pub flame: Flame,
|
pub flame: Flame,
|
||||||
|
#[cfg(all(unix, feature = "perfetto"))]
|
||||||
pub perfetto: Perfetto,
|
pub perfetto: Perfetto,
|
||||||
|
#[cfg(feature = "rt-tokio")]
|
||||||
pub console: Console,
|
pub console: Console,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -904,6 +911,7 @@ impl Settings {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Determine default flamegraph output path
|
/// Determine default flamegraph output path
|
||||||
|
#[cfg(feature = "flame")]
|
||||||
pub fn get_default_flame_path(subnode_index: u16, subnode_count: u16) -> PathBuf {
|
pub fn get_default_flame_path(subnode_index: u16, subnode_count: u16) -> PathBuf {
|
||||||
let name = if subnode_count == 1 {
|
let name = if subnode_count == 1 {
|
||||||
if subnode_index == 0 {
|
if subnode_index == 0 {
|
||||||
|
@ -922,7 +930,7 @@ impl Settings {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Determine default perfetto output path
|
/// Determine default perfetto output path
|
||||||
#[cfg(unix)]
|
#[cfg(all(unix, feature = "perfetto"))]
|
||||||
pub fn get_default_perfetto_path(subnode_index: u16, subnode_count: u16) -> PathBuf {
|
pub fn get_default_perfetto_path(subnode_index: u16, subnode_count: u16) -> PathBuf {
|
||||||
let name = if subnode_count == 1 {
|
let name = if subnode_count == 1 {
|
||||||
if subnode_index == 0 {
|
if subnode_index == 0 {
|
||||||
|
@ -1075,14 +1083,24 @@ impl Settings {
|
||||||
set_config_value!(inner.logging.api.enabled, value);
|
set_config_value!(inner.logging.api.enabled, value);
|
||||||
set_config_value!(inner.logging.api.level, value);
|
set_config_value!(inner.logging.api.level, value);
|
||||||
set_config_value!(inner.logging.api.ignore_log_targets, value);
|
set_config_value!(inner.logging.api.ignore_log_targets, value);
|
||||||
|
#[cfg(feature = "opentelemetry-otlp")]
|
||||||
|
{
|
||||||
set_config_value!(inner.logging.otlp.enabled, value);
|
set_config_value!(inner.logging.otlp.enabled, value);
|
||||||
set_config_value!(inner.logging.otlp.level, value);
|
set_config_value!(inner.logging.otlp.level, value);
|
||||||
set_config_value!(inner.logging.otlp.grpc_endpoint, value);
|
set_config_value!(inner.logging.otlp.grpc_endpoint, value);
|
||||||
set_config_value!(inner.logging.otlp.ignore_log_targets, value);
|
set_config_value!(inner.logging.otlp.ignore_log_targets, value);
|
||||||
|
}
|
||||||
|
#[cfg(feature = "flame")]
|
||||||
|
{
|
||||||
set_config_value!(inner.logging.flame.enabled, value);
|
set_config_value!(inner.logging.flame.enabled, value);
|
||||||
set_config_value!(inner.logging.flame.path, value);
|
set_config_value!(inner.logging.flame.path, value);
|
||||||
|
}
|
||||||
|
#[cfg(all(unix, feature = "perfetto"))]
|
||||||
|
{
|
||||||
set_config_value!(inner.logging.perfetto.enabled, value);
|
set_config_value!(inner.logging.perfetto.enabled, value);
|
||||||
set_config_value!(inner.logging.perfetto.path, value);
|
set_config_value!(inner.logging.perfetto.path, value);
|
||||||
|
}
|
||||||
|
#[cfg(feature = "rt-tokio")]
|
||||||
set_config_value!(inner.logging.console.enabled, value);
|
set_config_value!(inner.logging.console.enabled, value);
|
||||||
set_config_value!(inner.testing.subnode_index, value);
|
set_config_value!(inner.testing.subnode_index, value);
|
||||||
#[cfg(feature = "virtual-network")]
|
#[cfg(feature = "virtual-network")]
|
||||||
|
@ -1730,10 +1748,16 @@ mod tests {
|
||||||
s.logging.otlp.grpc_endpoint,
|
s.logging.otlp.grpc_endpoint,
|
||||||
NamedSocketAddrs::from_str("localhost:4317").unwrap()
|
NamedSocketAddrs::from_str("localhost:4317").unwrap()
|
||||||
);
|
);
|
||||||
|
#[cfg(feature = "flame")]
|
||||||
|
{
|
||||||
assert!(!s.logging.flame.enabled);
|
assert!(!s.logging.flame.enabled);
|
||||||
assert_eq!(s.logging.flame.path, "");
|
assert_eq!(s.logging.flame.path, "");
|
||||||
|
}
|
||||||
|
#[cfg(all(unix, feature = "perfetto"))]
|
||||||
|
{
|
||||||
assert!(!s.logging.perfetto.enabled);
|
assert!(!s.logging.perfetto.enabled);
|
||||||
assert_eq!(s.logging.perfetto.path, "");
|
assert_eq!(s.logging.perfetto.path, "");
|
||||||
|
}
|
||||||
assert!(!s.logging.console.enabled);
|
assert!(!s.logging.console.enabled);
|
||||||
assert_eq!(s.testing.subnode_index, 0);
|
assert_eq!(s.testing.subnode_index, 0);
|
||||||
#[cfg(feature = "virtual-network")]
|
#[cfg(feature = "virtual-network")]
|
||||||
|
@ -1885,7 +1909,7 @@ mod tests {
|
||||||
//
|
//
|
||||||
assert!(s.core.network.protocol.tcp.connect);
|
assert!(s.core.network.protocol.tcp.connect);
|
||||||
assert!(s.core.network.protocol.tcp.listen);
|
assert!(s.core.network.protocol.tcp.listen);
|
||||||
assert_eq!(s.core.network.protocol.tcp.max_connections, 32);
|
assert_eq!(s.core.network.protocol.tcp.max_connections, 256);
|
||||||
assert_eq!(s.core.network.protocol.tcp.listen_address.name, ":5150");
|
assert_eq!(s.core.network.protocol.tcp.listen_address.name, ":5150");
|
||||||
for addr in &s.core.network.protocol.tcp.listen_address.addrs {
|
for addr in &s.core.network.protocol.tcp.listen_address.addrs {
|
||||||
assert!(valid_socket_addrs.contains(addr));
|
assert!(valid_socket_addrs.contains(addr));
|
||||||
|
@ -1896,7 +1920,7 @@ mod tests {
|
||||||
//
|
//
|
||||||
assert!(s.core.network.protocol.ws.connect);
|
assert!(s.core.network.protocol.ws.connect);
|
||||||
assert!(s.core.network.protocol.ws.listen);
|
assert!(s.core.network.protocol.ws.listen);
|
||||||
assert_eq!(s.core.network.protocol.ws.max_connections, 32);
|
assert_eq!(s.core.network.protocol.ws.max_connections, 256);
|
||||||
assert_eq!(s.core.network.protocol.ws.listen_address.name, ":5150");
|
assert_eq!(s.core.network.protocol.ws.listen_address.name, ":5150");
|
||||||
for addr in &s.core.network.protocol.ws.listen_address.addrs {
|
for addr in &s.core.network.protocol.ws.listen_address.addrs {
|
||||||
assert!(valid_socket_addrs.contains(addr));
|
assert!(valid_socket_addrs.contains(addr));
|
||||||
|
@ -1910,7 +1934,7 @@ mod tests {
|
||||||
//
|
//
|
||||||
assert!(s.core.network.protocol.wss.connect);
|
assert!(s.core.network.protocol.wss.connect);
|
||||||
assert!(!s.core.network.protocol.wss.listen);
|
assert!(!s.core.network.protocol.wss.listen);
|
||||||
assert_eq!(s.core.network.protocol.wss.max_connections, 32);
|
assert_eq!(s.core.network.protocol.wss.max_connections, 256);
|
||||||
assert_eq!(s.core.network.protocol.wss.listen_address.name, ":5150");
|
assert_eq!(s.core.network.protocol.wss.listen_address.name, ":5150");
|
||||||
for addr in &s.core.network.protocol.wss.listen_address.addrs {
|
for addr in &s.core.network.protocol.wss.listen_address.addrs {
|
||||||
assert!(valid_socket_addrs.contains(addr));
|
assert!(valid_socket_addrs.contains(addr));
|
||||||
|
|
|
@ -16,14 +16,16 @@ use std::collections::BTreeMap;
|
||||||
use std::path::*;
|
use std::path::*;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tracing_appender::*;
|
use tracing_appender::*;
|
||||||
|
#[cfg(feature = "flame")]
|
||||||
use tracing_flame::FlameLayer;
|
use tracing_flame::FlameLayer;
|
||||||
#[cfg(unix)]
|
#[cfg(all(unix, feature = "perfetto"))]
|
||||||
use tracing_perfetto::PerfettoLayer;
|
use tracing_perfetto::PerfettoLayer;
|
||||||
use tracing_subscriber::prelude::*;
|
use tracing_subscriber::prelude::*;
|
||||||
use tracing_subscriber::*;
|
use tracing_subscriber::*;
|
||||||
|
|
||||||
struct VeilidLogsInner {
|
struct VeilidLogsInner {
|
||||||
_file_guard: Option<non_blocking::WorkerGuard>,
|
_file_guard: Option<non_blocking::WorkerGuard>,
|
||||||
|
#[cfg(feature = "flame")]
|
||||||
_flame_guard: Option<tracing_flame::FlushGuard<std::io::BufWriter<std::fs::File>>>,
|
_flame_guard: Option<tracing_flame::FlushGuard<std::io::BufWriter<std::fs::File>>>,
|
||||||
filters: BTreeMap<&'static str, veilid_core::VeilidLayerFilter>,
|
filters: BTreeMap<&'static str, veilid_core::VeilidLayerFilter>,
|
||||||
}
|
}
|
||||||
|
@ -91,7 +93,9 @@ impl VeilidLogs {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Flamegraph logger
|
// Flamegraph logger
|
||||||
|
#[cfg(feature = "flame")]
|
||||||
let mut flame_guard = None;
|
let mut flame_guard = None;
|
||||||
|
#[cfg(feature = "flame")]
|
||||||
if settingsr.logging.flame.enabled {
|
if settingsr.logging.flame.enabled {
|
||||||
let filter = veilid_core::VeilidLayerFilter::new_no_default(
|
let filter = veilid_core::VeilidLayerFilter::new_no_default(
|
||||||
veilid_core::VeilidConfigLogLevel::Trace,
|
veilid_core::VeilidConfigLogLevel::Trace,
|
||||||
|
@ -114,7 +118,7 @@ impl VeilidLogs {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Perfetto logger
|
// Perfetto logger
|
||||||
#[cfg(unix)]
|
#[cfg(all(unix, feature = "perfetto"))]
|
||||||
if settingsr.logging.perfetto.enabled {
|
if settingsr.logging.perfetto.enabled {
|
||||||
let filter = veilid_core::VeilidLayerFilter::new_no_default(
|
let filter = veilid_core::VeilidLayerFilter::new_no_default(
|
||||||
veilid_core::VeilidConfigLogLevel::Trace,
|
veilid_core::VeilidConfigLogLevel::Trace,
|
||||||
|
@ -259,6 +263,7 @@ impl VeilidLogs {
|
||||||
Ok(VeilidLogs {
|
Ok(VeilidLogs {
|
||||||
inner: Arc::new(Mutex::new(VeilidLogsInner {
|
inner: Arc::new(Mutex::new(VeilidLogsInner {
|
||||||
_file_guard: file_guard,
|
_file_guard: file_guard,
|
||||||
|
#[cfg(feature = "flame")]
|
||||||
_flame_guard: flame_guard,
|
_flame_guard: flame_guard,
|
||||||
filters,
|
filters,
|
||||||
})),
|
})),
|
||||||
|
|
|
@ -26,19 +26,11 @@ if [[ "$1" == "release" ]]; then
|
||||||
OUTPUTDIR=$SCRIPTDIR/../target/wasm32-unknown-unknown/release/pkg
|
OUTPUTDIR=$SCRIPTDIR/../target/wasm32-unknown-unknown/release/pkg
|
||||||
INPUTDIR=$SCRIPTDIR/../target/wasm32-unknown-unknown/release
|
INPUTDIR=$SCRIPTDIR/../target/wasm32-unknown-unknown/release
|
||||||
|
|
||||||
# Path to, but not including, the cargo workspace ("veilid")
|
./wasm_remap_paths.sh cargo build --target wasm32-unknown-unknown --release
|
||||||
WORKSPACE_PARENT=$(dirname $(dirname $(cargo locate-project --workspace --message-format=plain)))
|
|
||||||
# Do not include said path in wasm blob output
|
|
||||||
RUSTFLAGS="--remap-path-prefix=$WORKSPACE_PARENT=/home/user $RUSTFLAGS"
|
|
||||||
# Do not include user home directory in wasm blob output
|
|
||||||
RUSTFLAGS="--remap-path-prefix=$HOME=/home/user $RUSTFLAGS"
|
|
||||||
# Explicitly mark RUSTFLAGS as an environment variable, so it's passed to cargo
|
|
||||||
export RUSTFLAGS
|
|
||||||
|
|
||||||
cargo build --target wasm32-unknown-unknown --release
|
|
||||||
mkdir -p $OUTPUTDIR
|
mkdir -p $OUTPUTDIR
|
||||||
wasm-bindgen --out-dir $OUTPUTDIR --target web --weak-refs $INPUTDIR/veilid_wasm.wasm
|
wasm-bindgen --out-dir $OUTPUTDIR --target web --weak-refs $INPUTDIR/veilid_wasm.wasm
|
||||||
wasm-strip $OUTPUTDIR/veilid_wasm_bg.wasm
|
wasm-tools strip $OUTPUTDIR/veilid_wasm_bg.wasm -o $OUTPUTDIR/veilid_wasm_bg.wasm.stripped
|
||||||
|
mv $OUTPUTDIR/veilid_wasm_bg.wasm.stripped $OUTPUTDIR/veilid_wasm_bg.wasm
|
||||||
else
|
else
|
||||||
OUTPUTDIR=$SCRIPTDIR/../target/wasm32-unknown-unknown/debug/pkg
|
OUTPUTDIR=$SCRIPTDIR/../target/wasm32-unknown-unknown/debug/pkg
|
||||||
INPUTDIR=$SCRIPTDIR/../target/wasm32-unknown-unknown/debug
|
INPUTDIR=$SCRIPTDIR/../target/wasm32-unknown-unknown/debug
|
||||||
|
@ -54,7 +46,7 @@ else
|
||||||
# wasm-strip $OUTPUTDIR/veilid_wasm_bg.wasm
|
# wasm-strip $OUTPUTDIR/veilid_wasm_bg.wasm
|
||||||
fi
|
fi
|
||||||
|
|
||||||
popd &> /dev/null
|
popd &> /dev/null
|
||||||
|
|
||||||
# Print for use with scripts
|
# Print for use with scripts
|
||||||
echo SUCCESS:OUTPUTDIR=$(get_abs_filename $OUTPUTDIR)
|
echo SUCCESS:OUTPUTDIR=$(get_abs_filename $OUTPUTDIR)
|
||||||
|
|
16
veilid-wasm/wasm_remap_paths.sh
Executable file
16
veilid-wasm/wasm_remap_paths.sh
Executable file
|
@ -0,0 +1,16 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -eo pipefail
|
||||||
|
|
||||||
|
# Path to, but not including, the cargo workspace ("veilid")
|
||||||
|
WORKSPACE_PARENT=$(dirname $(dirname $(cargo locate-project --workspace --message-format=plain))); \
|
||||||
|
# Do not include said path in wasm blob output
|
||||||
|
RUSTFLAGS="--remap-path-prefix=$WORKSPACE_PARENT=/home/user $RUSTFLAGS"; \
|
||||||
|
# Do not include user home directory in wasm blob output
|
||||||
|
RUSTFLAGS="--remap-path-prefix=$HOME=/home/user $RUSTFLAGS"; \
|
||||||
|
# Explicitly mark RUSTFLAGS as an environment variable, so it's passed to cargo
|
||||||
|
export RUSTFLAGS
|
||||||
|
|
||||||
|
# Run the rest of the command line
|
||||||
|
$@
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue