Upgrade to Rust 1.86.0

This commit is contained in:
Christien Rioux 2025-05-14 18:21:56 -04:00
parent 452e4d0ab8
commit 13d5ca65d6
8 changed files with 502 additions and 437 deletions

511
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -14,7 +14,7 @@ repository = "https://gitlab.com/veilid/veilid"
authors = ["Veilid Team <contact@veilid.com>"] authors = ["Veilid Team <contact@veilid.com>"]
license = "MPL-2.0" license = "MPL-2.0"
edition = "2021" edition = "2021"
rust-version = "1.81.0" rust-version = "1.86.0"
[patch.crates-io] [patch.crates-io]
cursive = { git = "https://gitlab.com/veilid/cursive.git" } cursive = { git = "https://gitlab.com/veilid/cursive.git" }

View file

@ -18,7 +18,7 @@ ENV ZIG_VERSION=0.13.0
ENV CMAKE_VERSION_MINOR=3.30 ENV CMAKE_VERSION_MINOR=3.30
ENV CMAKE_VERSION_PATCH=3.30.1 ENV CMAKE_VERSION_PATCH=3.30.1
ENV WASM_BINDGEN_CLI_VERSION=0.2.100 ENV WASM_BINDGEN_CLI_VERSION=0.2.100
ENV RUST_VERSION=1.81.0 ENV RUST_VERSION=1.86.0
ENV RUSTUP_HOME=/usr/local/rustup ENV RUSTUP_HOME=/usr/local/rustup
ENV RUSTUP_DIST_SERVER=https://static.rust-lang.org ENV RUSTUP_DIST_SERVER=https://static.rust-lang.org
ENV CARGO_HOME=/usr/local/cargo ENV CARGO_HOME=/usr/local/cargo

View file

@ -198,7 +198,7 @@ impl IGDManager {
// Map any port // Map any port
let desc = this.get_description(protocol_type, local_port); let desc = this.get_description(protocol_type, local_port);
let mapped_port = match gw.add_any_port(convert_protocol_type(protocol_type), SocketAddr::new(local_ip, local_port), (UPNP_MAPPING_LIFETIME_MS + 999) / 1000, &desc) { let mapped_port = match gw.add_any_port(convert_protocol_type(protocol_type), SocketAddr::new(local_ip, local_port), UPNP_MAPPING_LIFETIME_MS.div_ceil(1000), &desc) {
Ok(mapped_port) => mapped_port, Ok(mapped_port) => mapped_port,
Err(e) => { Err(e) => {
// Failed to map external port // Failed to map external port
@ -295,7 +295,7 @@ impl IGDManager {
match gw.add_any_port( match gw.add_any_port(
convert_protocol_type(k.protocol_type), convert_protocol_type(k.protocol_type),
SocketAddr::new(local_ip, k.local_port), SocketAddr::new(local_ip, k.local_port),
(UPNP_MAPPING_LIFETIME_MS + 999) / 1000, UPNP_MAPPING_LIFETIME_MS.div_ceil(1000),
&desc, &desc,
) { ) {
Ok(mapped_port) => { Ok(mapped_port) => {
@ -343,7 +343,7 @@ impl IGDManager {
convert_protocol_type(k.protocol_type), convert_protocol_type(k.protocol_type),
v.mapped_port, v.mapped_port,
SocketAddr::new(local_ip, k.local_port), SocketAddr::new(local_ip, k.local_port),
(UPNP_MAPPING_LIFETIME_MS + 999) / 1000, UPNP_MAPPING_LIFETIME_MS.div_ceil(1000),
&desc, &desc,
) { ) {
Ok(()) => { Ok(()) => {

View file

@ -138,7 +138,7 @@ pub fn capability_fanout_node_info_filter(caps: Vec<Capability>) -> FanoutNodeIn
/// * 'node_count' - the number of nodes to keep in the closest_nodes set /// * 'node_count' - the number of nodes to keep in the closest_nodes set
/// * 'fanout' - the number of concurrent calls being processed at the same time /// * 'fanout' - the number of concurrent calls being processed at the same time
/// * 'consensus_count' - the number of nodes in the processed queue that need to be in the /// * 'consensus_count' - the number of nodes in the processed queue that need to be in the
/// 'Accepted' state before we terminate the fanout early. /// 'Accepted' state before we terminate the fanout early.
/// ///
/// The algorithm returns early if 'check_done' returns some value, or if an error is found during the process. /// The algorithm returns early if 'check_done' returns some value, or if an error is found during the process.
/// If the algorithm times out, a Timeout result is returned, however operations will still have been performed and a /// If the algorithm times out, a Timeout result is returned, however operations will still have been performed and a

View file

@ -830,7 +830,7 @@ impl VeilidConfig {
/// specified to override this location /// specified to override this location
/// ///
/// * `program_name` - Pick a program name and do not change it from release to release, /// * `program_name` - Pick a program name and do not change it from release to release,
/// see `VeilidConfig::program_name` for details. /// see `VeilidConfig::program_name` for details.
/// * `organization_name` - Similar to program_name, but for the organization publishing this app /// * `organization_name` - Similar to program_name, but for the organization publishing this app
/// * `qualifier` - Suffix for the application bundle name /// * `qualifier` - Suffix for the application bundle name
/// * `storage_directory` - Override for the path where veilid-core stores its content /// * `storage_directory` - Override for the path where veilid-core stores its content

View file

@ -1,205 +1,205 @@
use crate::*; use crate::*;
use futures_util::stream::FuturesUnordered; use futures_util::stream::FuturesUnordered;
use futures_util::AsyncRead as FuturesAsyncRead; use futures_util::AsyncRead as FuturesAsyncRead;
use futures_util::AsyncWrite as FuturesAsyncWrite; use futures_util::AsyncWrite as FuturesAsyncWrite;
use futures_util::Stream; use futures_util::Stream;
use std::path::PathBuf; use std::path::PathBuf;
use std::{io, path::Path}; use std::{io, path::Path};
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tokio::net::windows::named_pipe::{ use tokio::net::windows::named_pipe::{
ClientOptions, NamedPipeClient, NamedPipeServer, ServerOptions, ClientOptions, NamedPipeClient, NamedPipeServer, ServerOptions,
}; };
///////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////
enum IpcStreamInternal { enum IpcStreamInternal {
Client(NamedPipeClient), Client(NamedPipeClient),
Server(NamedPipeServer), Server(NamedPipeServer),
} }
pub struct IpcStream { pub struct IpcStream {
internal: IpcStreamInternal, internal: IpcStreamInternal,
} }
impl IpcStream { impl IpcStream {
#[expect(clippy::unused_async)] #[expect(clippy::unused_async)]
pub async fn connect<P: AsRef<Path>>(path: P) -> io::Result<IpcStream> { pub async fn connect<P: AsRef<Path>>(path: P) -> io::Result<IpcStream> {
Ok(IpcStream { Ok(IpcStream {
internal: IpcStreamInternal::Client( internal: IpcStreamInternal::Client(
ClientOptions::new().open(path.as_ref().to_path_buf().as_os_str())?, ClientOptions::new().open(path.as_ref().to_path_buf().as_os_str())?,
), ),
}) })
} }
} }
impl FuturesAsyncRead for IpcStream { impl FuturesAsyncRead for IpcStream {
fn poll_read( fn poll_read(
mut self: std::pin::Pin<&mut Self>, mut self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>, cx: &mut std::task::Context<'_>,
buf: &mut [u8], buf: &mut [u8],
) -> std::task::Poll<io::Result<usize>> { ) -> std::task::Poll<io::Result<usize>> {
match &mut self.internal { match &mut self.internal {
IpcStreamInternal::Client(client) => { IpcStreamInternal::Client(client) => {
let mut rb = ReadBuf::new(buf); let mut rb = ReadBuf::new(buf);
match <NamedPipeClient as AsyncRead>::poll_read( match <NamedPipeClient as AsyncRead>::poll_read(
std::pin::Pin::new(client), std::pin::Pin::new(client),
cx, cx,
&mut rb, &mut rb,
) { ) {
std::task::Poll::Ready(r) => { std::task::Poll::Ready(r) => {
std::task::Poll::Ready(r.map(|_| rb.filled().len())) std::task::Poll::Ready(r.map(|_| rb.filled().len()))
} }
std::task::Poll::Pending => std::task::Poll::Pending, std::task::Poll::Pending => std::task::Poll::Pending,
} }
} }
IpcStreamInternal::Server(server) => { IpcStreamInternal::Server(server) => {
let mut rb = ReadBuf::new(buf); let mut rb = ReadBuf::new(buf);
match <NamedPipeServer as AsyncRead>::poll_read( match <NamedPipeServer as AsyncRead>::poll_read(
std::pin::Pin::new(server), std::pin::Pin::new(server),
cx, cx,
&mut rb, &mut rb,
) { ) {
std::task::Poll::Ready(r) => { std::task::Poll::Ready(r) => {
std::task::Poll::Ready(r.map(|_| rb.filled().len())) std::task::Poll::Ready(r.map(|_| rb.filled().len()))
} }
std::task::Poll::Pending => std::task::Poll::Pending, std::task::Poll::Pending => std::task::Poll::Pending,
} }
} }
} }
} }
} }
impl FuturesAsyncWrite for IpcStream { impl FuturesAsyncWrite for IpcStream {
fn poll_write( fn poll_write(
mut self: std::pin::Pin<&mut Self>, mut self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>, cx: &mut std::task::Context<'_>,
buf: &[u8], buf: &[u8],
) -> std::task::Poll<io::Result<usize>> { ) -> std::task::Poll<io::Result<usize>> {
match &mut self.internal { match &mut self.internal {
IpcStreamInternal::Client(client) => { IpcStreamInternal::Client(client) => {
<NamedPipeClient as AsyncWrite>::poll_write(std::pin::Pin::new(client), cx, buf) <NamedPipeClient as AsyncWrite>::poll_write(std::pin::Pin::new(client), cx, buf)
} }
IpcStreamInternal::Server(server) => { IpcStreamInternal::Server(server) => {
<NamedPipeServer as AsyncWrite>::poll_write(std::pin::Pin::new(server), cx, buf) <NamedPipeServer as AsyncWrite>::poll_write(std::pin::Pin::new(server), cx, buf)
} }
} }
} }
fn poll_flush( fn poll_flush(
mut self: std::pin::Pin<&mut Self>, mut self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>, cx: &mut std::task::Context<'_>,
) -> std::task::Poll<io::Result<()>> { ) -> std::task::Poll<io::Result<()>> {
match &mut self.internal { match &mut self.internal {
IpcStreamInternal::Client(client) => { IpcStreamInternal::Client(client) => {
<NamedPipeClient as AsyncWrite>::poll_flush(std::pin::Pin::new(client), cx) <NamedPipeClient as AsyncWrite>::poll_flush(std::pin::Pin::new(client), cx)
} }
IpcStreamInternal::Server(server) => { IpcStreamInternal::Server(server) => {
<NamedPipeServer as AsyncWrite>::poll_flush(std::pin::Pin::new(server), cx) <NamedPipeServer as AsyncWrite>::poll_flush(std::pin::Pin::new(server), cx)
} }
} }
} }
fn poll_close( fn poll_close(
mut self: std::pin::Pin<&mut Self>, mut self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>, cx: &mut std::task::Context<'_>,
) -> std::task::Poll<io::Result<()>> { ) -> std::task::Poll<io::Result<()>> {
match &mut self.internal { match &mut self.internal {
IpcStreamInternal::Client(client) => { IpcStreamInternal::Client(client) => {
<NamedPipeClient as AsyncWrite>::poll_shutdown(std::pin::Pin::new(client), cx) <NamedPipeClient as AsyncWrite>::poll_shutdown(std::pin::Pin::new(client), cx)
} }
IpcStreamInternal::Server(server) => { IpcStreamInternal::Server(server) => {
<NamedPipeServer as AsyncWrite>::poll_shutdown(std::pin::Pin::new(server), cx) <NamedPipeServer as AsyncWrite>::poll_shutdown(std::pin::Pin::new(server), cx)
} }
} }
} }
} }
///////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////
pub struct IpcIncoming<'a> { pub struct IpcIncoming<'a> {
listener: IpcListener, listener: IpcListener,
unord: FuturesUnordered<PinBoxFutureStatic<io::Result<IpcStream>>>, unord: FuturesUnordered<PinBoxFutureStatic<io::Result<IpcStream>>>,
phantom: std::marker::PhantomData<&'a ()>, phantom: std::marker::PhantomData<&'a ()>,
} }
impl Stream for IpcIncoming<'_> { impl Stream for IpcIncoming<'_> {
type Item = io::Result<IpcStream>; type Item = io::Result<IpcStream>;
fn poll_next<'a>( fn poll_next(
mut self: std::pin::Pin<&'a mut Self>, mut self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>, cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Option<Self::Item>> { ) -> std::task::Poll<Option<Self::Item>> {
if self.unord.is_empty() { if self.unord.is_empty() {
self.unord.push(Box::pin(self.listener.accept())); self.unord.push(Box::pin(self.listener.accept()));
} }
match Pin::new(&mut self.unord).poll_next(cx) { match Pin::new(&mut self.unord).poll_next(cx) {
task::Poll::Ready(ro) => { task::Poll::Ready(ro) => {
self.unord.push(Box::pin(self.listener.accept())); self.unord.push(Box::pin(self.listener.accept()));
std::task::Poll::Ready(ro) std::task::Poll::Ready(ro)
} }
task::Poll::Pending => std::task::Poll::Pending, task::Poll::Pending => std::task::Poll::Pending,
} }
} }
} }
///////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////
pub struct IpcListener { pub struct IpcListener {
path: Option<PathBuf>, path: Option<PathBuf>,
internal: Option<Mutex<Option<NamedPipeServer>>>, internal: Option<Mutex<Option<NamedPipeServer>>>,
} }
impl IpcListener { impl IpcListener {
/// Creates a new `IpcListener` bound to the specified path. /// Creates a new `IpcListener` bound to the specified path.
#[expect(clippy::unused_async)] #[expect(clippy::unused_async)]
pub async fn bind<P: AsRef<Path>>(path: P) -> io::Result<Self> { pub async fn bind<P: AsRef<Path>>(path: P) -> io::Result<Self> {
let path = path.as_ref().to_path_buf(); let path = path.as_ref().to_path_buf();
let server = ServerOptions::new() let server = ServerOptions::new()
.first_pipe_instance(true) .first_pipe_instance(true)
.create(&path)?; .create(&path)?;
Ok(Self { Ok(Self {
path: Some(path), path: Some(path),
internal: Some(Mutex::new(Some(server))), internal: Some(Mutex::new(Some(server))),
}) })
} }
/// Accepts a new incoming connection to this listener. /// Accepts a new incoming connection to this listener.
#[must_use] #[must_use]
pub fn accept(&self) -> PinBoxFutureStatic<io::Result<IpcStream>> { pub fn accept(&self) -> PinBoxFutureStatic<io::Result<IpcStream>> {
if self.path.is_none() { if self.path.is_none() {
return Box::pin(std::future::ready(Err(io::Error::from( return Box::pin(std::future::ready(Err(io::Error::from(
io::ErrorKind::NotConnected, io::ErrorKind::NotConnected,
)))); ))));
} }
let internal = self.internal.as_ref().unwrap(); let internal = self.internal.as_ref().unwrap();
let mut opt_server = internal.lock(); let mut opt_server = internal.lock();
let server = opt_server.take().unwrap(); let server = opt_server.take().unwrap();
let path = self.path.clone().unwrap(); let path = self.path.clone().unwrap();
*opt_server = match ServerOptions::new().create(path) { *opt_server = match ServerOptions::new().create(path) {
Ok(v) => Some(v), Ok(v) => Some(v),
Err(e) => return Box::pin(std::future::ready(Err(e))), Err(e) => return Box::pin(std::future::ready(Err(e))),
}; };
Box::pin(async move { Box::pin(async move {
server.connect().await?; server.connect().await?;
Ok(IpcStream { Ok(IpcStream {
internal: IpcStreamInternal::Server(server), internal: IpcStreamInternal::Server(server),
}) })
}) })
} }
/// Returns a stream of incoming connections. /// Returns a stream of incoming connections.
pub fn incoming(&mut self) -> io::Result<IpcIncoming<'_>> { pub fn incoming(&mut self) -> io::Result<IpcIncoming<'_>> {
if self.path.is_none() { if self.path.is_none() {
return Err(io::Error::from(io::ErrorKind::NotConnected)); return Err(io::Error::from(io::ErrorKind::NotConnected));
} }
Ok(IpcIncoming { Ok(IpcIncoming {
listener: IpcListener { listener: IpcListener {
path: self.path.take(), path: self.path.take(),
internal: self.internal.take(), internal: self.internal.take(),
}, },
unord: FuturesUnordered::new(), unord: FuturesUnordered::new(),
phantom: std::marker::PhantomData, phantom: std::marker::PhantomData,
}) })
} }
} }

View file

@ -87,7 +87,7 @@ impl PlatformSupportWindows {
for (n, netmask_elt) in netmask for (n, netmask_elt) in netmask
.iter_mut() .iter_mut()
.enumerate() .enumerate()
.take((prefix.PrefixLength as usize + 7) / 8) .take((prefix.PrefixLength as usize).div_ceil(8))
{ {
let x_byte = ipv4_addr.octets()[n]; let x_byte = ipv4_addr.octets()[n];
let y_byte = a.octets()[n]; let y_byte = a.octets()[n];
@ -140,7 +140,7 @@ impl PlatformSupportWindows {
for (n, netmask_elt) in netmask for (n, netmask_elt) in netmask
.iter_mut() .iter_mut()
.enumerate() .enumerate()
.take((prefix.PrefixLength as usize + 15) / 16) .take((prefix.PrefixLength as usize).div_ceil(16))
{ {
let x_word = ipv6_addr.segments()[n]; let x_word = ipv6_addr.segments()[n];
let y_word = a.segments()[n]; let y_word = a.segments()[n];