From 0a890c8707e31a16818c2d4be03d37b45f51bb6a Mon Sep 17 00:00:00 2001 From: John Smith Date: Mon, 29 May 2023 19:24:57 +0000 Subject: [PATCH] removing dev branch, many changes --- Cargo.lock | 1843 ++++++------ Cargo.toml | 1 - doc/config/sample.config | 41 +- doc/config/veilid-server-config.md | 28 +- external/keyvaluedb | 2 +- package/linux/veilid-server.conf | 2 +- scripts/ios_build.sh | 2 +- scripts/macos_build.sh | 2 +- scripts/new_ios_sim.sh | 3 +- setup_macos.sh | 2 +- veilid-cli/Cargo.toml | 2 +- veilid-cli/src/client_api_connection.rs | 14 +- veilid-cli/src/command_processor.rs | 22 +- veilid-cli/src/peers_table_view.rs | 8 +- veilid-core/Cargo.toml | 17 +- veilid-core/proto/veilid.capnp | 70 +- veilid-core/run_tests.sh | 2 +- veilid-core/src/attachment_manager.rs | 11 +- veilid-core/src/core_context.rs | 65 +- veilid-core/src/crypto/byte_array_types.rs | 102 +- veilid-core/src/crypto/crypto_system.rs | 66 +- veilid-core/src/crypto/envelope.rs | 15 +- veilid-core/src/crypto/mod.rs | 30 +- veilid-core/src/crypto/none/mod.rs | 82 +- veilid-core/src/crypto/receipt.rs | 10 +- veilid-core/src/crypto/tests/test_crypto.rs | 63 +- veilid-core/src/crypto/tests/test_types.rs | 33 + veilid-core/src/crypto/types/crypto_typed.rs | 15 +- .../src/crypto/types/crypto_typed_set.rs | 26 +- veilid-core/src/crypto/types/keypair.rs | 6 +- veilid-core/src/crypto/types/mod.rs | 7 +- veilid-core/src/crypto/value.rs | 0 veilid-core/src/crypto/vld0/mod.rs | 109 +- veilid-core/src/intf/mod.rs | 4 +- veilid-core/src/intf/native/mod.rs | 4 +- .../src/intf/native/protected_store.rs | 33 +- veilid-core/src/intf/native/system.rs | 2 +- veilid-core/src/intf/native/table_store.rs | 147 - veilid-core/src/intf/table_db.rs | 276 -- veilid-core/src/intf/wasm/mod.rs | 4 +- veilid-core/src/intf/wasm/protected_store.rs | 32 +- veilid-core/src/intf/wasm/system.rs | 2 +- veilid-core/src/intf/wasm/table_store.rs | 151 - veilid-core/src/lib.rs | 11 +- .../src/network_manager/connection_table.rs | 2 +- veilid-core/src/network_manager/mod.rs | 25 +- veilid-core/src/network_manager/native/mod.rs | 2 +- veilid-core/src/network_manager/tasks/mod.rs | 4 +- veilid-core/src/network_manager/tests/mod.rs | 2 + .../tests/test_signed_node_info.rs | 145 + .../src/network_manager/types/address.rs | 130 + .../src/network_manager/types/address_type.rs | 22 + .../types/connection_descriptor.rs | 80 + .../network_manager/types/dial_info/mod.rs | 522 ++++ .../network_manager/types/dial_info/tcp.rs | 21 + .../network_manager/types/dial_info/udp.rs | 21 + .../src/network_manager/types/dial_info/ws.rs | 22 + .../network_manager/types/dial_info/wss.rs | 22 + .../network_manager/types/dial_info_class.rs | 50 + .../network_manager/types/dial_info_filter.rs | 86 + .../types/low_level_protocol_type.rs | 31 + veilid-core/src/network_manager/types/mod.rs | 27 + .../network_manager/types/network_class.rs | 37 + .../src/network_manager/types/peer_address.rs | 66 + .../network_manager/types/protocol_type.rs | 104 + .../src/network_manager/types/signal_info.rs | 51 + .../network_manager/types/socket_address.rs | 77 + veilid-core/src/routing_table/bucket_entry.rs | 59 +- veilid-core/src/routing_table/debug.rs | 64 +- veilid-core/src/routing_table/find_peers.rs | 103 + veilid-core/src/routing_table/mod.rs | 70 +- veilid-core/src/routing_table/node_ref.rs | 12 +- veilid-core/src/routing_table/privacy.rs | 31 +- .../src/routing_table/route_spec_store/mod.rs | 3 - .../route_spec_store/route_set_spec_detail.rs | 1 - .../route_spec_store/route_spec_store.rs | 6 +- .../route_spec_store_content.rs | 55 +- .../src/routing_table/routing_domains.rs | 57 +- .../src/routing_table/routing_table_inner.rs | 109 +- .../src/routing_table/tasks/bootstrap.rs | 17 +- veilid-core/src/routing_table/tasks/mod.rs | 4 +- .../routing_table/tasks/relay_management.rs | 4 +- veilid-core/src/routing_table/tests/mod.rs | 1 + .../src/routing_table/tests/test_serialize.rs | 84 + .../routing_table/types/dial_info_detail.rs | 43 + .../src/routing_table/types/direction.rs | 22 + veilid-core/src/routing_table/types/mod.rs | 21 + .../src/routing_table/types/node_info.rs | 164 ++ .../src/routing_table/types/node_status.rs | 66 + .../src/routing_table/types/peer_info.rs | 49 + .../src/routing_table/types/routing_domain.rs | 32 + .../types/signed_direct_node_info.rs | 93 + .../routing_table/types/signed_node_info.rs | 96 + .../types/signed_relayed_node_info.rs | 138 + veilid-core/src/rpc_processor/coders/mod.rs | 19 +- .../src/rpc_processor/coders/node_info.rs | 24 +- .../rpc_processor/coders/operations/answer.rs | 43 +- .../coders/operations/operation.rs | 48 +- .../coders/operations/operation_app_call.rs | 70 +- .../operations/operation_app_message.rs | 35 +- .../operations/operation_cancel_tunnel.rs | 41 +- .../operations/operation_complete_tunnel.rs | 61 +- .../coders/operations/operation_find_block.rs | 97 +- .../coders/operations/operation_find_node.rs | 58 +- .../coders/operations/operation_get_value.rs | 255 +- .../operations/operation_return_receipt.rs | 39 +- .../coders/operations/operation_route.rs | 76 +- .../coders/operations/operation_set_value.rs | 238 +- .../coders/operations/operation_signal.rs | 24 +- .../operations/operation_start_tunnel.rs | 55 +- .../coders/operations/operation_status.rs | 53 +- .../operations/operation_supply_block.rs | 112 +- .../operation_validate_dial_info.rs | 56 +- .../operations/operation_value_changed.rs | 52 +- .../operations/operation_watch_value.rs | 160 +- .../coders/operations/question.rs | 29 +- .../coders/operations/respond_to.rs | 14 +- .../coders/operations/statement.rs | 31 +- .../src/rpc_processor/coders/peer_info.rs | 18 +- .../coders/private_safety_route.rs | 13 +- .../src/rpc_processor/coders/signal_info.rs | 5 +- .../coders/signed_direct_node_info.rs | 17 +- .../rpc_processor/coders/signed_node_info.rs | 6 +- .../coders/signed_relayed_node_info.rs | 38 +- .../rpc_processor/coders/signed_value_data.rs | 31 + .../coders/signed_value_descriptor.rs | 28 + .../src/rpc_processor/coders/value_data.rs | 32 +- veilid-core/src/rpc_processor/fanout_call.rs | 233 ++ veilid-core/src/rpc_processor/mod.rs | 277 +- .../src/rpc_processor/operation_waiter.rs | 70 +- veilid-core/src/rpc_processor/rpc_app_call.rs | 47 +- .../src/rpc_processor/rpc_app_message.rs | 18 +- .../src/rpc_processor/rpc_find_node.rs | 73 +- .../src/rpc_processor/rpc_get_value.rs | 129 +- .../src/rpc_processor/rpc_return_receipt.rs | 9 +- veilid-core/src/rpc_processor/rpc_route.rs | 91 +- .../src/rpc_processor/rpc_set_value.rs | 146 +- veilid-core/src/rpc_processor/rpc_signal.rs | 10 +- veilid-core/src/rpc_processor/rpc_status.rs | 42 +- .../rpc_processor/rpc_validate_dial_info.rs | 24 +- veilid-core/src/storage_manager/debug.rs | 18 + veilid-core/src/storage_manager/get_value.rs | 191 ++ veilid-core/src/storage_manager/keys.rs | 63 + veilid-core/src/storage_manager/mod.rs | 411 +++ .../src/storage_manager/record_store.rs | 548 ++++ .../storage_manager/record_store_limits.rs | 16 + veilid-core/src/storage_manager/set_value.rs | 225 ++ .../storage_manager/storage_manager_inner.rs | 439 +++ .../tasks/flush_record_stores.rs | 21 + veilid-core/src/storage_manager/tasks/mod.rs | 43 + .../types/local_record_detail.rs | 12 + veilid-core/src/storage_manager/types/mod.rs | 17 + .../storage_manager/types/opened_record.rs | 31 + .../src/storage_manager/types/record.rs | 84 + .../src/storage_manager/types/record_data.rs | 31 + .../types/remote_record_detail.rs | 7 + .../types/signed_value_data.rs | 95 + .../types/signed_value_descriptor.rs | 81 + veilid-core/src/supplier_table.rs | 0 veilid-core/src/table_store/mod.rs | 17 + veilid-core/src/table_store/native.rs | 53 + veilid-core/src/table_store/table_db.rs | 382 +++ veilid-core/src/table_store/table_store.rs | 557 ++++ veilid-core/src/table_store/tests/mod.rs | 1 + .../src/table_store/tests/test_table_store.rs | 277 ++ veilid-core/src/table_store/wasm.rs | 40 + veilid-core/src/tests/common/mod.rs | 1 - .../src/tests/common/test_table_store.rs | 170 -- .../src/tests/common/test_veilid_config.rs | 81 +- .../src/tests/common/test_veilid_core.rs | 162 -- veilid-core/src/tests/mod.rs | 3 + veilid-core/src/tests/native/mod.rs | 38 +- veilid-core/src/veilid_api/api.rs | 51 +- veilid-core/src/veilid_api/debug.rs | 89 +- veilid-core/src/veilid_api/error.rs | 57 +- veilid-core/src/veilid_api/mod.rs | 12 +- veilid-core/src/veilid_api/routing_context.rs | 145 +- .../src/veilid_api/serialize_helpers/mod.rs | 14 + .../serialize_helpers/rkyv_enum_set.rs | 53 + .../serialize_helpers/rkyv_range_set_blaze.rs | 73 + .../serialize_helpers/serialize_arc.rs | 12 + .../serialize_json.rs} | 103 +- .../serialize_range_set_blaze.rs | 60 + .../serialize_helpers/veilid_rkyv.rs | 151 + veilid-core/src/veilid_api/tests/mod.rs | 1 + .../veilid_api/tests/test_serialize_rkyv.rs | 16 + veilid-core/src/veilid_api/types.rs | 2499 ----------------- .../src/veilid_api/{ => types}/aligned_u64.rs | 14 + .../src/veilid_api/types/app_message_call.rs | 65 + .../types/dht/dht_record_descriptor.rs | 56 + veilid-core/src/veilid_api/types/dht/mod.rs | 18 + .../src/veilid_api/types/dht/schema/dflt.rs | 84 + .../src/veilid_api/types/dht/schema/mod.rs | 97 + .../src/veilid_api/types/dht/schema/smpl.rs | 152 + .../src/veilid_api/types/dht/value_data.rs | 54 + .../types/dht/value_subkey_range_set.rs | 51 + veilid-core/src/veilid_api/types/fourcc.rs | 65 + veilid-core/src/veilid_api/types/mod.rs | 21 + veilid-core/src/veilid_api/types/safety.rs | 125 + veilid-core/src/veilid_api/types/stats.rs | 113 + veilid-core/src/veilid_api/types/tunnel.rs | 83 + .../src/veilid_api/types/veilid_log.rs | 88 + .../src/veilid_api/types/veilid_state.rs | 144 + veilid-core/src/veilid_config.rs | 125 +- veilid-core/src/watcher_table.rs | 0 veilid-core/tests/web.rs | 38 +- veilid-flutter/example/.gitignore | 1 - veilid-flutter/example/lib/app.dart | 7 +- veilid-flutter/example/lib/log_terminal.dart | 3 - veilid-flutter/example/lib/main.dart | 1 - veilid-flutter/example/lib/veilid_init.dart | 4 +- .../Flutter/GeneratedPluginRegistrant.swift | 6 +- veilid-flutter/example/macos/Podfile | 2 +- veilid-flutter/example/macos/Podfile.lock | 31 +- .../macos/Runner.xcodeproj/project.pbxproj | 9 +- veilid-flutter/example/pubspec.lock | 249 +- veilid-flutter/example/pubspec.yaml | 2 +- veilid-flutter/lib/base64url_no_pad.dart | 15 - veilid-flutter/lib/default_config.dart | 84 +- veilid-flutter/lib/routing_context.dart | 272 ++ veilid-flutter/lib/veilid.dart | 1921 +------------ veilid-flutter/lib/veilid_api_exception.dart | 286 ++ veilid-flutter/lib/veilid_config.dart | 947 +++++++ veilid-flutter/lib/veilid_crypto.dart | 167 ++ veilid-flutter/lib/veilid_encoding.dart | 122 + veilid-flutter/lib/veilid_ffi.dart | 809 +++++- veilid-flutter/lib/veilid_js.dart | 362 ++- veilid-flutter/lib/veilid_state.dart | 547 ++++ veilid-flutter/lib/veilid_table_db.dart | 73 + veilid-flutter/macos/veilid.podspec | 4 +- veilid-flutter/pubspec.yaml | 5 +- veilid-flutter/rust/Cargo.toml | 8 +- veilid-flutter/rust/src/dart_ffi.rs | 685 ++++- .../rust/src/dart_isolate_wrapper.rs | 21 + veilid-flutter/test/veilid_test.dart | 2 +- veilid-server/Cargo.toml | 11 +- veilid-server/src/client_api.rs | 2 +- veilid-server/src/cmdline.rs | 22 +- veilid-server/src/main.rs | 25 +- veilid-server/src/server.rs | 19 +- veilid-server/src/settings.rs | 222 +- veilid-server/src/unix.rs | 36 +- veilid-tools/Cargo.toml | 7 +- veilid-tools/run_tests.sh | 2 +- veilid-tools/src/log_thru.rs | 50 +- veilid-tools/src/random.rs | 15 +- veilid-tools/src/tools.rs | 35 + veilid-wasm/Cargo.toml | 4 +- veilid-wasm/src/lib.rs | 796 +++++- veilid-wasm/tests/web.rs | 22 +- 250 files changed, 18084 insertions(+), 8040 deletions(-) delete mode 100644 veilid-core/src/crypto/value.rs delete mode 100644 veilid-core/src/intf/native/table_store.rs delete mode 100644 veilid-core/src/intf/table_db.rs delete mode 100644 veilid-core/src/intf/wasm/table_store.rs create mode 100644 veilid-core/src/network_manager/tests/test_signed_node_info.rs create mode 100644 veilid-core/src/network_manager/types/address.rs create mode 100644 veilid-core/src/network_manager/types/address_type.rs create mode 100644 veilid-core/src/network_manager/types/connection_descriptor.rs create mode 100644 veilid-core/src/network_manager/types/dial_info/mod.rs create mode 100644 veilid-core/src/network_manager/types/dial_info/tcp.rs create mode 100644 veilid-core/src/network_manager/types/dial_info/udp.rs create mode 100644 veilid-core/src/network_manager/types/dial_info/ws.rs create mode 100644 veilid-core/src/network_manager/types/dial_info/wss.rs create mode 100644 veilid-core/src/network_manager/types/dial_info_class.rs create mode 100644 veilid-core/src/network_manager/types/dial_info_filter.rs create mode 100644 veilid-core/src/network_manager/types/low_level_protocol_type.rs create mode 100644 veilid-core/src/network_manager/types/mod.rs create mode 100644 veilid-core/src/network_manager/types/network_class.rs create mode 100644 veilid-core/src/network_manager/types/peer_address.rs create mode 100644 veilid-core/src/network_manager/types/protocol_type.rs create mode 100644 veilid-core/src/network_manager/types/signal_info.rs create mode 100644 veilid-core/src/network_manager/types/socket_address.rs create mode 100644 veilid-core/src/routing_table/find_peers.rs create mode 100644 veilid-core/src/routing_table/tests/mod.rs create mode 100644 veilid-core/src/routing_table/tests/test_serialize.rs create mode 100644 veilid-core/src/routing_table/types/dial_info_detail.rs create mode 100644 veilid-core/src/routing_table/types/direction.rs create mode 100644 veilid-core/src/routing_table/types/mod.rs create mode 100644 veilid-core/src/routing_table/types/node_info.rs create mode 100644 veilid-core/src/routing_table/types/node_status.rs create mode 100644 veilid-core/src/routing_table/types/peer_info.rs create mode 100644 veilid-core/src/routing_table/types/routing_domain.rs create mode 100644 veilid-core/src/routing_table/types/signed_direct_node_info.rs create mode 100644 veilid-core/src/routing_table/types/signed_node_info.rs create mode 100644 veilid-core/src/routing_table/types/signed_relayed_node_info.rs create mode 100644 veilid-core/src/rpc_processor/coders/signed_value_data.rs create mode 100644 veilid-core/src/rpc_processor/coders/signed_value_descriptor.rs create mode 100644 veilid-core/src/rpc_processor/fanout_call.rs create mode 100644 veilid-core/src/storage_manager/debug.rs create mode 100644 veilid-core/src/storage_manager/get_value.rs create mode 100644 veilid-core/src/storage_manager/keys.rs create mode 100644 veilid-core/src/storage_manager/mod.rs create mode 100644 veilid-core/src/storage_manager/record_store.rs create mode 100644 veilid-core/src/storage_manager/record_store_limits.rs create mode 100644 veilid-core/src/storage_manager/set_value.rs create mode 100644 veilid-core/src/storage_manager/storage_manager_inner.rs create mode 100644 veilid-core/src/storage_manager/tasks/flush_record_stores.rs create mode 100644 veilid-core/src/storage_manager/tasks/mod.rs create mode 100644 veilid-core/src/storage_manager/types/local_record_detail.rs create mode 100644 veilid-core/src/storage_manager/types/mod.rs create mode 100644 veilid-core/src/storage_manager/types/opened_record.rs create mode 100644 veilid-core/src/storage_manager/types/record.rs create mode 100644 veilid-core/src/storage_manager/types/record_data.rs create mode 100644 veilid-core/src/storage_manager/types/remote_record_detail.rs create mode 100644 veilid-core/src/storage_manager/types/signed_value_data.rs create mode 100644 veilid-core/src/storage_manager/types/signed_value_descriptor.rs delete mode 100644 veilid-core/src/supplier_table.rs create mode 100644 veilid-core/src/table_store/mod.rs create mode 100644 veilid-core/src/table_store/native.rs create mode 100644 veilid-core/src/table_store/table_db.rs create mode 100644 veilid-core/src/table_store/table_store.rs create mode 100644 veilid-core/src/table_store/tests/mod.rs create mode 100644 veilid-core/src/table_store/tests/test_table_store.rs create mode 100644 veilid-core/src/table_store/wasm.rs delete mode 100644 veilid-core/src/tests/common/test_table_store.rs create mode 100644 veilid-core/src/veilid_api/serialize_helpers/mod.rs create mode 100644 veilid-core/src/veilid_api/serialize_helpers/rkyv_enum_set.rs create mode 100644 veilid-core/src/veilid_api/serialize_helpers/rkyv_range_set_blaze.rs create mode 100644 veilid-core/src/veilid_api/serialize_helpers/serialize_arc.rs rename veilid-core/src/veilid_api/{serialize_helpers.rs => serialize_helpers/serialize_json.rs} (50%) create mode 100644 veilid-core/src/veilid_api/serialize_helpers/serialize_range_set_blaze.rs create mode 100644 veilid-core/src/veilid_api/serialize_helpers/veilid_rkyv.rs create mode 100644 veilid-core/src/veilid_api/tests/mod.rs create mode 100644 veilid-core/src/veilid_api/tests/test_serialize_rkyv.rs delete mode 100644 veilid-core/src/veilid_api/types.rs rename veilid-core/src/veilid_api/{ => types}/aligned_u64.rs (85%) create mode 100644 veilid-core/src/veilid_api/types/app_message_call.rs create mode 100644 veilid-core/src/veilid_api/types/dht/dht_record_descriptor.rs create mode 100644 veilid-core/src/veilid_api/types/dht/mod.rs create mode 100644 veilid-core/src/veilid_api/types/dht/schema/dflt.rs create mode 100644 veilid-core/src/veilid_api/types/dht/schema/mod.rs create mode 100644 veilid-core/src/veilid_api/types/dht/schema/smpl.rs create mode 100644 veilid-core/src/veilid_api/types/dht/value_data.rs create mode 100644 veilid-core/src/veilid_api/types/dht/value_subkey_range_set.rs create mode 100644 veilid-core/src/veilid_api/types/fourcc.rs create mode 100644 veilid-core/src/veilid_api/types/mod.rs create mode 100644 veilid-core/src/veilid_api/types/safety.rs create mode 100644 veilid-core/src/veilid_api/types/stats.rs create mode 100644 veilid-core/src/veilid_api/types/tunnel.rs create mode 100644 veilid-core/src/veilid_api/types/veilid_log.rs create mode 100644 veilid-core/src/veilid_api/types/veilid_state.rs delete mode 100644 veilid-core/src/watcher_table.rs delete mode 100644 veilid-flutter/lib/base64url_no_pad.dart create mode 100644 veilid-flutter/lib/routing_context.dart create mode 100644 veilid-flutter/lib/veilid_api_exception.dart create mode 100644 veilid-flutter/lib/veilid_config.dart create mode 100644 veilid-flutter/lib/veilid_crypto.dart create mode 100644 veilid-flutter/lib/veilid_encoding.dart create mode 100644 veilid-flutter/lib/veilid_state.dart create mode 100644 veilid-flutter/lib/veilid_table_db.dart diff --git a/Cargo.lock b/Cargo.lock index 0d769411..32980e79 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -23,7 +23,7 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b613b8e1e3cf911a086f53f03bf286f52fd7a7258e4fa606f0ef220d39d8877" dependencies = [ - "generic-array 0.14.6", + "generic-array 0.14.7", ] [[package]] @@ -44,7 +44,7 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.9", "once_cell", "version_check 0.9.4", ] @@ -56,16 +56,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" dependencies = [ "cfg-if 1.0.0", - "getrandom 0.2.8", + "getrandom 0.2.9", "once_cell", "version_check 0.9.4", ] [[package]] name = "aho-corasick" -version = "0.7.20" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" +checksum = "67fc08ce920c31afb70f013dcce1bfc3a3195de6a228474e45e1f145b36f8d04" dependencies = [ "memchr", ] @@ -97,7 +97,7 @@ checksum = "1a52f81f9add01deacdc1fcb05ba09523a8faefdec6c3f69cb752b9fa9c22e5a" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -106,12 +106,12 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53eff4527d2f64c8374a3bbe1d280ce660203e8c83e4a893231037a488639a7b" dependencies = [ - "bytes 1.3.0", + "bytes 1.4.0", "env_logger 0.8.4", "lazy_static", "libc", "log", - "redox_syscall", + "redox_syscall 0.2.16", "thiserror", "time 0.2.27", "winapi 0.3.9", @@ -171,9 +171,20 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.68" +version = "1.0.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cb2f989d18dd141ab8ae82f64d1a8cdd37e0840f73a406896cf5e99502fab61" +checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8" + +[[package]] +name = "argon2" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95c2fcf79ad1932ac6269a738109997a83c227c09b75842ae564dc8ede6a861c" +dependencies = [ + "base64ct", + "blake2", + "password-hash", +] [[package]] name = "arraydeque" @@ -183,9 +194,9 @@ checksum = "f0ffd3d69bd89910509a5d31d1f1353f38ccffdd116dd0099bbd6627f7bd8ad8" [[package]] name = "arrayref" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" +checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" [[package]] name = "arrayvec" @@ -201,7 +212,7 @@ checksum = "45403b49e3954a4b8428a0ac21a4b7afadccf92bfd96273f1a58cd4812496ae0" dependencies = [ "generic-array 0.12.4", "generic-array 0.13.3", - "generic-array 0.14.6", + "generic-array 0.14.7", "stable_deref_trait", ] @@ -212,7 +223,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3203e79f4dd9bdda415ed03cf14dae5a2bf775c683a00f94e9cd1faf0f596e5" dependencies = [ "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -228,9 +239,9 @@ dependencies = [ [[package]] name = "async-executor" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17adb73da160dfb475c183343c8cccd80721ea5a605d3eb57125f0a7b7a92d0b" +checksum = "6fa3dc5f2a8564f07759c008b9109dc0d39de92a88d5588b8a5036d286383afb" dependencies = [ "async-lock", "async-task", @@ -257,39 +268,38 @@ dependencies = [ [[package]] name = "async-io" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c374dda1ed3e7d8f0d9ba58715f924862c63eae6849c92d3a18e7fbde9e2794" +checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" dependencies = [ "async-lock", "autocfg", + "cfg-if 1.0.0", "concurrent-queue", "futures-lite", - "libc", "log", "parking", "polling", + "rustix", "slab", - "socket2", + "socket2 0.4.9", "waker-fn", - "windows-sys 0.42.0", ] [[package]] name = "async-lock" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8101efe8695a6c17e02911402145357e718ac92d3ff88ae8419e84b1707b685" +checksum = "fa24f727524730b077666307f2734b4a1a1c57acb79193127dcc8914d5242dd7" dependencies = [ "event-listener", - "futures-lite", ] [[package]] name = "async-process" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6381ead98388605d0d9ff86371043b5aa922a3905824244de40dc263a14fcba4" +checksum = "7a9d28b1d97e08915212e2e45310d47854eafa69600756fc735fb788f75199c9" dependencies = [ "async-io", "async-lock", @@ -298,9 +308,9 @@ dependencies = [ "cfg-if 1.0.0", "event-listener", "futures-lite", - "libc", + "rustix", "signal-hook", - "windows-sys 0.42.0", + "windows-sys 0.48.0", ] [[package]] @@ -342,36 +352,37 @@ dependencies = [ "futures-io", "futures-util", "pin-utils", - "socket2", + "socket2 0.4.9", "trust-dns-resolver", ] [[package]] name = "async-stream" -version = "0.3.3" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dad5c83079eae9969be7fadefe640a1c566901f05ff91ab221de4b6f68d9507e" +checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" dependencies = [ "async-stream-impl", "futures-core", + "pin-project-lite 0.2.9", ] [[package]] name = "async-stream-impl" -version = "0.3.3" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f203db73a71dfa2fb6dd22763990fa26f3d2625a6da2da900d23b87d26be27" +checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.16", ] [[package]] name = "async-task" -version = "4.3.0" +version = "4.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a40729d2133846d9ed0ea60a8b9541bccddab49cd30f0715a1da672fe9a2524" +checksum = "ecc7ab41815b3c653ccd2978ec3255c81349336702dfdf62ee6f7069b12a3aae" [[package]] name = "async-tls" @@ -381,20 +392,34 @@ checksum = "2f23d769dbf1838d5df5156e7b1ad404f4c463d1ac2c6aeb6cd943630f8a8400" dependencies = [ "futures-core", "futures-io", - "rustls", + "rustls 0.19.1", "webpki 0.21.4", "webpki-roots 0.21.1", ] [[package]] -name = "async-trait" -version = "0.1.63" +name = "async-tls" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eff18d764974428cf3a9328e23fc5c986f5fbed46e6cd4cdf42544df5d297ec1" +checksum = "cfeefd0ca297cbbb3bd34fd6b228401c2a5177038257afd751bc29f0a2da4795" +dependencies = [ + "futures-core", + "futures-io", + "rustls 0.20.8", + "rustls-pemfile 1.0.2", + "webpki 0.22.0", + "webpki-roots 0.22.6", +] + +[[package]] +name = "async-trait" +version = "0.1.68" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.16", ] [[package]] @@ -412,16 +437,16 @@ dependencies = [ [[package]] name = "async-tungstenite" -version = "0.19.0" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e6acf7e4a267eecbb127ed696bb2d50572c22ba7f586a646321e1798d8336a1" +checksum = "6a48bf42ab2178374a79853bceef600e279258c75049b20481b022d73c908882" dependencies = [ - "async-tls", + "async-tls 0.12.0", "futures-io", "futures-util", "log", "pin-project-lite 0.2.9", - "tungstenite 0.18.0", + "tungstenite 0.19.0", ] [[package]] @@ -436,9 +461,9 @@ dependencies = [ "futures-task", "futures-timer", "futures-util", - "pin-project 1.0.12", + "pin-project 1.1.0", "rustc_version 0.4.0", - "tokio 1.24.2", + "tokio 1.28.1", "wasm-bindgen-futures", ] @@ -455,18 +480,15 @@ dependencies = [ [[package]] name = "atomic" -version = "0.5.1" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b88d82667eca772c4aa12f0f1348b3ae643424c8876448f3f7bd5787032e234c" -dependencies = [ - "autocfg", -] +checksum = "c59bdb34bc650a32731b31bd8f0829cc15d24a708ee31559e0bb34f2bc320cba" [[package]] name = "atomic-waker" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "debc29dde2e69f9e47506b525f639ed42300fc014a3e007832592448fa8e4599" +checksum = "1181e1e0d1fce796a03db1ae795d67167da795f9cf4a39c37589e85ef57f26d3" [[package]] name = "attohttpc" @@ -499,14 +521,14 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.6.4" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5694b64066a2459918d8074c2ce0d5a88f409431994c2356617c8ae0c4721fc" +checksum = "f8175979259124331c1d7bf6586ee7e0da434155e4b2d48ec2c8386281d8df39" dependencies = [ "async-trait", "axum-core", - "bitflags", - "bytes 1.3.0", + "bitflags 1.3.2", + "bytes 1.4.0", "futures-util", "http", "http-body", @@ -521,19 +543,18 @@ dependencies = [ "serde", "sync_wrapper", "tower", - "tower-http", "tower-layer", "tower-service", ] [[package]] name = "axum-core" -version = "0.3.2" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cae3e661676ffbacb30f1a824089a8c9150e71017f7e1e38f2aa32009188d34" +checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" dependencies = [ "async-trait", - "bytes 1.3.0", + "bytes 1.4.0", "futures-util", "http", "http-body", @@ -553,7 +574,7 @@ dependencies = [ "cc", "cfg-if 1.0.0", "libc", - "miniz_oxide", + "miniz_oxide 0.6.2", "object", "rustc-demangle", ] @@ -576,13 +597,25 @@ version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" +[[package]] +name = "base64" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" + +[[package]] +name = "base64ct" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" + [[package]] name = "bindgen" version = "0.57.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd4865004a46a0aafb2a0a5eb19d3c9fc46ee5f063a6cfc605c69ac9ecf5263d" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cexpr 0.4.0", "clang-sys", "lazy_static", @@ -601,7 +634,7 @@ version = "0.59.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cexpr 0.6.0", "clang-sys", "clap 2.34.0", @@ -624,6 +657,12 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +[[package]] +name = "bitflags" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f6b0c9ebae276e207a3e4e989ed9f3be8b7ce8728b80629c98c21d27742e6ba" + [[package]] name = "bitvec" version = "1.0.1" @@ -636,6 +675,15 @@ dependencies = [ "wyz", ] +[[package]] +name = "blake2" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" +dependencies = [ + "digest 0.10.6", +] + [[package]] name = "blake3" version = "1.3.3" @@ -658,7 +706,7 @@ checksum = "7b04ce3d2372d05d1ef4ea3fdf427da6ae3c17ca06d688a107b5344836276bc3" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -667,16 +715,16 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "generic-array 0.14.6", + "generic-array 0.14.7", ] [[package]] name = "block-buffer" -version = "0.10.3" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ - "generic-array 0.14.6", + "generic-array 0.14.7", ] [[package]] @@ -697,9 +745,9 @@ checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" [[package]] name = "blocking" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c67b173a56acffd6d2326fb7ab938ba0b00a71480e14902b2591c87bc5741e8" +checksum = "77231a1c8f801696fc0123ec6150ce92cffb8e164a02afb9c8ddee0e9b65ad65" dependencies = [ "async-channel", "async-lock", @@ -707,6 +755,7 @@ dependencies = [ "atomic-waker", "fastrand", "futures-lite", + "log", ] [[package]] @@ -718,23 +767,19 @@ dependencies = [ "cmake", ] -[[package]] -name = "boxfnonce" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5988cb1d626264ac94100be357308f29ff7cbdd3b36bda27f450a4ee3f713426" - [[package]] name = "bugsalot" version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc12a55e9bd3840279c248c96ecf541d5ba98d6654e08869fe167121384a582c" +source = "git+https://github.com/crioux/bugsalot.git#336a7053faadf990b9362edf5752ef34fa1f9615" +dependencies = [ + "libc", +] [[package]] name = "bumpalo" -version = "3.12.0" +version = "3.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535" +checksum = "3c6ed94e98ecff0c12dd1b04c15ec0d7d9458ca8fe806cea6f12954efe74c63b" [[package]] name = "byte-slice-cast" @@ -744,23 +789,24 @@ checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" [[package]] name = "bytecheck" -version = "0.6.9" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d11cac2c12b5adc6570dad2ee1b87eff4955dac476fe12d81e5fdd352e52406f" +checksum = "8b6372023ac861f6e6dc89c8344a8f398fb42aaba2b5dbc649ca0c0e9dbcb627" dependencies = [ "bytecheck_derive", "ptr_meta", + "simdutf8", ] [[package]] name = "bytecheck_derive" -version = "0.6.9" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13e576ebe98e605500b3c8041bb888e966653577172df6dd97398714eb30b9bf" +checksum = "a7ec4c6f261935ad534c0c22dbef2201b45918860eb1c574b972bd213a76af61" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -777,21 +823,21 @@ checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" [[package]] name = "bytes" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfb24e866b15a1af2a1b663f10c6b6b8f397a84aadb828f12e5b289ec23a3a3c" +checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" [[package]] name = "capnp" -version = "0.16.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35400c6acb55f1a91e6843beca189aba6bccd3c11fae5a7c0288fe5a1c3da822" +checksum = "13e2d432d1601d61d1e11140d04e9d239b5cf7316fa1106523c3d86eea19c29d" [[package]] name = "capnp-futures" -version = "0.16.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6cbe2479d667c6d44219a07d3da971379e4321ea7343b319994695be7da97a17" +checksum = "71d520e0af228b92de357f230f4987ee4f9786f2b8aa24b9cfe53f5b11c17198" dependencies = [ "capnp", "futures", @@ -799,9 +845,9 @@ dependencies = [ [[package]] name = "capnp-rpc" -version = "0.16.1" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62097700b1da1dd567d77e91ddd7da0fd22c790599ce56c4eea346d60dff0acd" +checksum = "9ab8e869783e491cbcc350427a5e775aa4d8a1deaa5198d74332957cfa430779" dependencies = [ "capnp", "capnp-futures", @@ -810,9 +856,9 @@ dependencies = [ [[package]] name = "capnpc" -version = "0.16.1" +version = "0.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74147d35b0920efb5d676f49c7b4c6f643eb231a3597a1ca82af3b94cc29841c" +checksum = "93a9ad66f56468a890565d818ec4ab1300c1f6c62adbbc3295392f61d8f7dbd7" dependencies = [ "capnp", ] @@ -841,7 +887,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f4aedb84272dbe89af497cf81375129abda4fc0a9e7c5d317498c15cc30c0d27" dependencies = [ - "nom 5.1.2", + "nom 5.1.3", ] [[package]] @@ -879,12 +925,12 @@ dependencies = [ [[package]] name = "chacha20" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7fc89c7c5b9e7a02dfe45cd2367bae382f9ed31c61ca8debe5f827c420a2f08" +checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818" dependencies = [ "cfg-if 1.0.0", - "cipher 0.4.3", + "cipher 0.4.4", "cpufeatures", ] @@ -903,9 +949,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.23" +version = "0.4.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f" +checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b" dependencies = [ "iana-time-zone", "js-sys", @@ -918,9 +964,9 @@ dependencies = [ [[package]] name = "ciborium" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0c137568cc60b904a7724001b35ce2630fd00d5d84805fbb608ab89509d788f" +checksum = "effd91f6c78e5a4ace8a5d3c0b6bfaec9e2baaef55f3efc00e45fb2e477ee926" dependencies = [ "ciborium-io", "ciborium-ll", @@ -929,15 +975,15 @@ dependencies = [ [[package]] name = "ciborium-io" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "346de753af073cc87b52b2083a506b38ac176a44cfb05497b622e27be899b369" +checksum = "cdf919175532b369853f5d5e20b26b43112613fd6fe7aee757e35f7a44642656" [[package]] name = "ciborium-ll" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "213030a2b5a4e0c0892b6652260cf6ccac84827b83a85a534e178e3906c4cf1b" +checksum = "defaa24ecc093c77630e6c15e17c51f5e187bf35ee514f4e2d67baaa96dae22b" dependencies = [ "ciborium-io", "half", @@ -949,14 +995,14 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ee52072ec15386f770805afd189a01c8841be8696bed250fa2f13c4c0d6dfb7" dependencies = [ - "generic-array 0.14.6", + "generic-array 0.14.7", ] [[package]] name = "cipher" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1873270f8f7942c191139cb8a40fd228da6c3fd2fc376d7e92d47aa14aeb59e" +checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" dependencies = [ "crypto-common", "inout", @@ -964,9 +1010,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "1.4.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa2e27ae6ab525c3d369ded447057bca5438d86dc3a68f6faafb8269ba82ebf3" +checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" dependencies = [ "glob", "libc", @@ -981,7 +1027,7 @@ checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" dependencies = [ "ansi_term", "atty", - "bitflags", + "bitflags 1.3.2", "strsim 0.8.0", "textwrap 0.11.0", "unicode-width", @@ -990,12 +1036,12 @@ dependencies = [ [[package]] name = "clap" -version = "3.2.23" +version = "3.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71655c45cb9845d3270c9d6df84ebe72b4dad3c2ba3f7023ad47c144e4e473a5" +checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123" dependencies = [ "atty", - "bitflags", + "bitflags 1.3.2", "clap_lex", "indexmap", "strsim 0.10.0", @@ -1014,23 +1060,13 @@ dependencies = [ [[package]] name = "cmake" -version = "0.1.49" +version = "0.1.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db34956e100b30725f2eb215f90d4871051239535632f84fea3bc92722c66b7c" +checksum = "a31c789563b815f77f4250caee12365734369f942439b7defd71e18a48197130" dependencies = [ "cc", ] -[[package]] -name = "codespan-reporting" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e" -dependencies = [ - "termcolor", - "unicode-width", -] - [[package]] name = "color-eyre" version = "0.6.2" @@ -1050,15 +1086,15 @@ version = "4.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35ed6e9d84f0b51a7f52daf1c7d71dd136fd7a3f41a8462b8cdb8c78d920fad4" dependencies = [ - "bytes 1.3.0", + "bytes 1.4.0", "memchr", ] [[package]] name = "concurrent-queue" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c278839b831783b70278b14df4d45e1beb1aad306c07bb796637de9a0e323e8e" +checksum = "62ec6771ecfa0762d24683ee5a32ad78487a3d3afdc0fb8cae19d2c5deb50b7c" dependencies = [ "crossbeam-utils", ] @@ -1084,21 +1120,21 @@ dependencies = [ [[package]] name = "console-api" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e57ff02e8ad8e06ab9731d5dc72dc23bef9200778eae1a89d555d8c42e5d4a86" +checksum = "c2895653b4d9f1538a83970077cb01dfc77a4810524e51a110944688e916b18e" dependencies = [ "prost", "prost-types", - "tonic", + "tonic 0.9.2", "tracing-core", ] [[package]] name = "console-subscriber" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22a3a81dfaf6b66bce5d159eddae701e3a002f194d378cbf7be5f053c281d9be" +checksum = "57ab2224a0311582eb03adba4caaf18644f7b1f10a760803a803b9b605187fc7" dependencies = [ "console-api", "crossbeam-channel", @@ -1110,9 +1146,9 @@ dependencies = [ "serde", "serde_json", "thread_local", - "tokio 1.24.2", + "tokio 1.28.1", "tokio-stream", - "tonic", + "tonic 0.9.2", "tracing", "tracing-core", "tracing-subscriber", @@ -1130,9 +1166,9 @@ dependencies = [ [[package]] name = "console_log" -version = "0.2.0" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501a375961cef1a0d44767200e66e4a559283097e91d0730b1d75dfb2f8a1494" +checksum = "e89f72f65e8501878b8a004d5a1afb780987e2ce2b4532c562e367a72c57499f" dependencies = [ "log", "web-sys", @@ -1146,9 +1182,9 @@ checksum = "fbdcdcb6d86f71c5e97409ad45898af11cbc995b4ee8112d59095a28d376c935" [[package]] name = "constant_time_eq" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3ad85c1f65dc7b37604eb0e89748faf0b9653065f2a8ef69f96a687ec1e9279" +checksum = "13418e745008f7349ec7e449155f419a61b92b58a99cc3616942b926825ec76b" [[package]] name = "core-foundation" @@ -1166,7 +1202,7 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" dependencies = [ - "core-foundation-sys 0.8.3", + "core-foundation-sys 0.8.4", "libc", ] @@ -1178,15 +1214,15 @@ checksum = "e7ca8a5221364ef15ce201e8ed2f609fc312682a8f4e0e3d4aa5879764e0fa3b" [[package]] name = "core-foundation-sys" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" +checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "cpufeatures" -version = "0.2.5" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" +checksum = "3e4c1eaa2012c47becbbad2ab175484c2a84d1185b566fb2cc5b8707343dfe58" dependencies = [ "libc", ] @@ -1210,8 +1246,9 @@ dependencies = [ "atty", "cast", "ciborium", - "clap 3.2.23", + "clap 3.2.25", "criterion-plot", + "futures", "itertools", "lazy_static", "num-traits", @@ -1238,9 +1275,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.6" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" +checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" dependencies = [ "cfg-if 1.0.0", "crossbeam-utils", @@ -1248,9 +1285,9 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" +checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" dependencies = [ "cfg-if 1.0.0", "crossbeam-epoch", @@ -1259,22 +1296,22 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.13" +version = "0.9.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01a9af1f4c2ef74bb8aa1f7e19706bc72d03598c8a570bb5de72243c7a9d9d5a" +checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695" dependencies = [ "autocfg", "cfg-if 1.0.0", "crossbeam-utils", - "memoffset 0.7.1", + "memoffset 0.8.0", "scopeguard", ] [[package]] name = "crossbeam-utils" -version = "0.8.14" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" +checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" dependencies = [ "cfg-if 1.0.0", ] @@ -1285,10 +1322,10 @@ version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e64e6c0fbe2c17357405f7c758c1ef960fce08bdfb2c03d88d2a18d7e09c4b67" dependencies = [ - "bitflags", + "bitflags 1.3.2", "crossterm_winapi", "libc", - "mio 0.8.5", + "mio 0.8.6", "parking_lot 0.12.1", "signal-hook", "signal-hook-mio", @@ -1316,7 +1353,7 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ - "generic-array 0.14.6", + "generic-array 0.14.7", "typenum", ] @@ -1326,7 +1363,7 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" dependencies = [ - "generic-array 0.14.6", + "generic-array 0.14.7", "subtle", ] @@ -1337,25 +1374,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" dependencies = [ "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "ctrlc" -version = "3.2.4" +version = "3.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1631ca6e3c59112501a9d87fd86f21591ff77acd31331e8a73f8d80a65bbdd71" +checksum = "bbcf33c2a618cbe41ee43ae6e9f2e48368cd9f9db2896f10167d8d762679f639" dependencies = [ "nix 0.26.2", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] -[[package]] -name = "cty" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b365fabc795046672053e29c954733ec3b05e4be654ab130fe8f1f94d7051f35" - [[package]] name = "cursive" version = "0.20.0" @@ -1370,7 +1401,7 @@ dependencies = [ "libc", "log", "signal-hook", - "tokio 1.24.2", + "tokio 1.28.1", "unicode-segmentation", "unicode-width", ] @@ -1385,7 +1416,7 @@ dependencies = [ "flexi_logger", "lazy_static", "log", - "time 0.3.17", + "time 0.3.21", "unicode-width", ] @@ -1415,8 +1446,8 @@ dependencies = [ "log", "num", "owning_ref", - "time 0.3.17", - "tokio 1.24.2", + "time 0.3.21", + "tokio 1.28.1", "toml", "unicode-segmentation", "unicode-width", @@ -1458,57 +1489,12 @@ dependencies = [ "zeroize", ] -[[package]] -name = "cxx" -version = "1.0.88" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "322296e2f2e5af4270b54df9e85a02ff037e271af20ba3e7fe1575515dc840b8" -dependencies = [ - "cc", - "cxxbridge-flags", - "cxxbridge-macro", - "link-cplusplus", -] - -[[package]] -name = "cxx-build" -version = "1.0.88" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "017a1385b05d631e7875b1f151c9f012d37b53491e2a87f65bff5c262b2111d8" -dependencies = [ - "cc", - "codespan-reporting", - "once_cell", - "proc-macro2", - "quote", - "scratch", - "syn", -] - -[[package]] -name = "cxxbridge-flags" -version = "1.0.88" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c26bbb078acf09bc1ecda02d4223f03bdd28bd4874edcb0379138efc499ce971" - -[[package]] -name = "cxxbridge-macro" -version = "1.0.88" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "357f40d1f06a24b60ae1fe122542c1fb05d28d32acb2aed064e84bc2ad1e252e" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "daemonize" -version = "0.4.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70c24513e34f53b640819f0ac9f705b673fcf4006d7aab8778bee72ebfc89815" +checksum = "ab8bfdaacb3c887a54d41bdf48d3af8873b3f5566469f8ba21b92057509f116e" dependencies = [ - "boxfnonce", "libc", ] @@ -1524,12 +1510,12 @@ dependencies = [ [[package]] name = "darling" -version = "0.14.2" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0dd3cd20dc6b5a876612a6e5accfe7f3dd883db6d07acfbf14c128f61550dfa" +checksum = "0558d22a7b463ed0241e993f76f09f30b126687447751a8638587b864e4b3944" dependencies = [ - "darling_core 0.14.2", - "darling_macro 0.14.2", + "darling_core 0.20.1", + "darling_macro 0.20.1", ] [[package]] @@ -1543,20 +1529,20 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn", + "syn 1.0.109", ] [[package]] name = "darling_core" -version = "0.14.2" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a784d2ccaf7c98501746bf0be29b2022ba41fd62a2e622af997a03e9f972859f" +checksum = "ab8bfa2e259f8ee1ce5e97824a3c55ec4404a0d772ca7fa96bf19f0752a046eb" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", - "syn", + "syn 2.0.16", ] [[package]] @@ -1567,18 +1553,18 @@ checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ "darling_core 0.13.4", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "darling_macro" -version = "0.14.2" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7618812407e9402654622dd402b0a89dff9ba93badd6540781526117b92aab7e" +checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ - "darling_core 0.14.2", + "darling_core 0.20.1", "quote", - "syn", + "syn 2.0.16", ] [[package]] @@ -1588,10 +1574,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "907076dfda823b0b36d2a1bb5f90c96660a5bbcd7729e10727f07858f22c4edc" dependencies = [ "cfg-if 1.0.0", - "hashbrown", + "hashbrown 0.12.3", "lock_api", "once_cell", - "parking_lot_core 0.9.6", + "parking_lot_core 0.9.7", ] [[package]] @@ -1608,7 +1594,7 @@ checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1617,7 +1603,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ - "generic-array 0.14.6", + "generic-array 0.14.7", ] [[package]] @@ -1626,7 +1612,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" dependencies = [ - "block-buffer 0.10.3", + "block-buffer 0.10.4", "crypto-common", "subtle", ] @@ -1700,14 +1686,14 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "enum-map" -version = "2.4.2" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50c25992259941eb7e57b936157961b217a4fc8597829ddef0596d6c3cd86e1a" +checksum = "988f0d17a0fa38291e5f41f71ea8d46a5d5497b9054d5a759fae2cbb819f2356" dependencies = [ "enum-map-derive", ] @@ -1720,7 +1706,7 @@ checksum = "2a4da76b3b6116d758c7ba93f7ec6a35d2e2cf24feda76c6e38a375f4d5c59f2" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1741,14 +1727,14 @@ checksum = "946ee94e3dbf58fdd324f9ce245c7b238d46a66f00e86a020b71996349e46cce" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "enumset" -version = "1.0.12" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19be8061a06ab6f3a6cf21106c873578bf01bd42ad15e0311a9c76161cb1c753" +checksum = "e875f1719c16de097dee81ed675e2d9bb63096823ed3f0ca827b7dea3028bbbb" dependencies = [ "enumset_derive", "serde", @@ -1756,14 +1742,14 @@ dependencies = [ [[package]] name = "enumset_derive" -version = "0.6.1" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03e7b551eba279bf0fa88b83a46330168c1560a52a94f5126f892f0b364ab3e0" +checksum = "e08b6c6ab82d70f08844964ba10c7babb716de2ecaeab9be5717918a5177d3af" dependencies = [ - "darling 0.14.2", + "darling 0.20.1", "proc-macro2", "quote", - "syn", + "syn 2.0.16", ] [[package]] @@ -1800,17 +1786,24 @@ dependencies = [ ] [[package]] -name = "err-derive" +name = "errno" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c34a887c8df3ed90498c1c437ce21f211c8e27672921a8ffa293cb8d6d4caa9e" +checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" dependencies = [ - "proc-macro-error", - "proc-macro2", - "quote", - "rustversion", - "syn", - "synstructure", + "errno-dragonfly", + "libc", + "windows-sys 0.48.0", +] + +[[package]] +name = "errno-dragonfly" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" +dependencies = [ + "cc", + "libc", ] [[package]] @@ -1874,7 +1867,7 @@ checksum = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", "synstructure", ] @@ -1892,9 +1885,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "fastrand" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" dependencies = [ "instant", ] @@ -1929,12 +1922,12 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" -version = "1.0.25" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841" +checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" dependencies = [ "crc32fast", - "miniz_oxide", + "miniz_oxide 0.7.1", ] [[package]] @@ -1952,7 +1945,7 @@ dependencies = [ "regex", "rustversion", "thiserror", - "time 0.3.17", + "time 0.3.21", ] [[package]] @@ -1964,8 +1957,8 @@ dependencies = [ "futures-core", "futures-sink", "nanorand", - "pin-project 1.0.12", - "spin 0.9.4", + "pin-project 1.1.0", + "spin 0.9.8", ] [[package]] @@ -1999,7 +1992,7 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" dependencies = [ - "bitflags", + "bitflags 1.3.2", "fuchsia-zircon-sys", ] @@ -2017,9 +2010,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38390104763dc37a5145a53c29c63c1290b5d316d6086ec32c293f6736051bb0" +checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" dependencies = [ "futures-channel", "futures-core", @@ -2032,9 +2025,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52ba265a92256105f45b719605a571ffe2d1f0fea3807304b522c1d778f79eed" +checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" dependencies = [ "futures-core", "futures-sink", @@ -2042,15 +2035,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04909a7a7e4633ae6c4a9ab280aeb86da1236243a77b694a49eacd659a4bd3ac" +checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" [[package]] name = "futures-executor" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7acc85df6714c176ab5edf386123fafe217be88c0840ec11f199441134a074e2" +checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" dependencies = [ "futures-core", "futures-task", @@ -2059,15 +2052,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00f5fb52a06bdcadeb54e8d3671f8888a39697dcb0b81b23b55174030427f4eb" +checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" [[package]] name = "futures-lite" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7694489acd39452c77daa48516b894c153f192c3578d5a839b62c58099fcbf48" +checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" dependencies = [ "fastrand", "futures-core", @@ -2080,26 +2073,26 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdfb8ce053d86b91919aad980c220b1fb8401a9394410e1c289ed7e66b61835d" +checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.16", ] [[package]] name = "futures-sink" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39c15cf1a4aa79df40f1bb462fb39676d0ad9e366c2a33b590d7c66f4f81fcf9" +checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" [[package]] name = "futures-task" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ffb393ac5d9a6eaa9d3fdf37ae2776656b706e200c8e16b1bdb227f5198e6ea" +checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" [[package]] name = "futures-timer" @@ -2113,9 +2106,9 @@ dependencies = [ [[package]] name = "futures-util" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "197676987abd2f9cadff84926f410af1c183608d36641465df73ae8211dc65d6" +checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" dependencies = [ "futures-channel", "futures-core", @@ -2129,6 +2122,12 @@ dependencies = [ "slab", ] +[[package]] +name = "gen_ops" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7c56cad8ee78109d547e40bf4ad78968a25157e7963d799d79921655629825a" + [[package]] name = "generic-array" version = "0.12.4" @@ -2149,9 +2148,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.6" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check 0.9.4", @@ -2170,9 +2169,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" +checksum = "c85e1d9ab2eadba7e5040d4e09cbd6d072b76a557ad64e797c2cb9d4da21d7e4" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -2183,9 +2182,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.27.1" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "221996f774192f0f718773def8201c4ae31f02616a54ccfc2d358bb0e5cefdec" +checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" [[package]] name = "glob" @@ -2250,11 +2249,11 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.15" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f9f29bc9dda355256b2916cf526ab02ce0aeaaaf2bad60d65ef3f12f11dd0f4" +checksum = "d357c7ae988e7d2182f7d7871d0b963962420b0678b0997ce7de72001aeab782" dependencies = [ - "bytes 1.3.0", + "bytes 1.4.0", "fnv", "futures-core", "futures-sink", @@ -2262,7 +2261,7 @@ dependencies = [ "http", "indexmap", "slab", - "tokio 1.24.2", + "tokio 1.28.1", "tokio-util", "tracing", ] @@ -2292,20 +2291,29 @@ dependencies = [ ] [[package]] -name = "hashlink" -version = "0.8.1" +name = "hashbrown" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ - "hashbrown", - "serde", + "ahash 0.8.3", ] [[package]] name = "hashlink" version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69fe1fcf8b4278d860ad0548329f892a3631fb63f82574df68275f34cdbe0ffa" dependencies = [ - "hashbrown", + "hashbrown 0.12.3", + "serde", +] + +[[package]] +name = "hashlink" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0761a1b9491c4f2e3d66aa0f62d0fba0af9a0e2852e4d48ea506632a4b56e6aa" +dependencies = [ + "hashbrown 0.13.2", ] [[package]] @@ -2335,9 +2343,9 @@ dependencies = [ [[package]] name = "heck" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" @@ -2357,6 +2365,12 @@ dependencies = [ "libc", ] +[[package]] +name = "hermit-abi" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286" + [[package]] name = "hex" version = "0.4.3" @@ -2396,11 +2410,11 @@ dependencies = [ [[package]] name = "http" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" +checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" dependencies = [ - "bytes 1.3.0", + "bytes 1.4.0", "fnv", "itoa", ] @@ -2411,17 +2425,11 @@ version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ - "bytes 1.3.0", + "bytes 1.4.0", "http", "pin-project-lite 0.2.9", ] -[[package]] -name = "http-range-header" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" - [[package]] name = "httparse" version = "1.8.0" @@ -2442,11 +2450,11 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.23" +version = "0.14.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "034711faac9d2166cb1baf1a2fb0b60b1f277f8492fd72176c17f3515e1abd3c" +checksum = "ab302d72a6f11a3b910431ff93aae7e773078c769f0a3ef15fb9ec692ed147d4" dependencies = [ - "bytes 1.3.0", + "bytes 1.4.0", "futures-channel", "futures-core", "futures-util", @@ -2457,8 +2465,8 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite 0.2.9", - "socket2", - "tokio 1.24.2", + "socket2 0.4.9", + "tokio 1.28.1", "tower-service", "tracing", "want", @@ -2472,32 +2480,31 @@ checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" dependencies = [ "hyper", "pin-project-lite 0.2.9", - "tokio 1.24.2", + "tokio 1.28.1", "tokio-io-timeout", ] [[package]] name = "iana-time-zone" -version = "0.1.53" +version = "0.1.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765" +checksum = "0722cd7114b7de04316e7ea5456a0bbb20e4adb46fd27a3697adb812cff0f37c" dependencies = [ "android_system_properties", - "core-foundation-sys 0.8.3", + "core-foundation-sys 0.8.4", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "winapi 0.3.9", + "windows 0.48.0", ] [[package]] name = "iana-time-zone-haiku" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" dependencies = [ - "cxx", - "cxx-build", + "cc", ] [[package]] @@ -2542,7 +2549,7 @@ name = "igd" version = "0.12.0" dependencies = [ "attohttpc", - "bytes 1.3.0", + "bytes 1.4.0", "futures", "http", "hyper", @@ -2551,7 +2558,7 @@ dependencies = [ "simplelog 0.9.0", "tokio 0.2.25", "tokio 0.3.7", - "tokio 1.24.2", + "tokio 1.28.1", "url", "xmltree", ] @@ -2591,7 +2598,7 @@ checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -2602,12 +2609,12 @@ checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" [[package]] name = "indexmap" -version = "1.9.2" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", - "hashbrown", + "hashbrown 0.12.3", ] [[package]] @@ -2616,7 +2623,7 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" dependencies = [ - "generic-array 0.14.6", + "generic-array 0.14.7", ] [[package]] @@ -2640,6 +2647,17 @@ dependencies = [ "web-sys", ] +[[package]] +name = "io-lifetimes" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c66c74d2ae7e79a5a8f7ac924adbe38ee42a859c6539ad869eb51f0b52dc220" +dependencies = [ + "hermit-abi 0.3.1", + "libc", + "windows-sys 0.48.0", +] + [[package]] name = "iovec" version = "0.1.4" @@ -2655,7 +2673,7 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd302af1b90f2463a98fa5ad469fc212c8e3175a41c3068601bfa2727591c5be" dependencies = [ - "socket2", + "socket2 0.4.9", "widestring 0.5.1", "winapi 0.3.9", "winreg", @@ -2663,9 +2681,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.7.1" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30e22bd8629359895450b59ea7a776c850561b96a3b1d31321c1949d9e6c9146" +checksum = "12b6ee2129af8d4fb011108c73d99a1b83a85977f23b82460c0ae2e25bb4b57f" [[package]] name = "itertools" @@ -2678,9 +2696,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440" +checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" [[package]] name = "jni" @@ -2696,6 +2714,22 @@ dependencies = [ "walkdir", ] +[[package]] +name = "jni" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a87aa2bb7d2af34197c04845522473242e1aa17c12f4935d5856491a7fb8c97" +dependencies = [ + "cesu8", + "cfg-if 1.0.0", + "combine", + "jni-sys", + "log", + "thiserror", + "walkdir", + "windows-sys 0.45.0", +] + [[package]] name = "jni-sys" version = "0.3.0" @@ -2704,9 +2738,9 @@ checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" [[package]] name = "js-sys" -version = "0.3.60" +version = "0.3.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47" +checksum = "2f37a4a5928311ac501dee68b3c7613a1037d0edb30c8e5427bd832d55d1b790" dependencies = [ "wasm-bindgen", ] @@ -2766,10 +2800,10 @@ dependencies = [ "byteorder", "cfg-if 1.0.0", "core-foundation 0.9.3", - "core-foundation-sys 0.8.3", + "core-foundation-sys 0.8.4", "directories", "fs4", - "jni", + "jni 0.20.0", "keychain-services", "lazy_static", "log", @@ -2798,7 +2832,7 @@ dependencies = [ "keyvaluedb", "keyvaluedb-shared-tests", "parking_lot 0.12.1", - "tokio 1.24.2", + "tokio 1.28.1", "wasm-bindgen-futures", "wasm-bindgen-test", ] @@ -2827,15 +2861,16 @@ dependencies = [ "parking_lot 0.12.1", "rand 0.8.5", "rusqlite", - "sysinfo", + "sysinfo 0.29.0", "tempfile", - "tokio 1.24.2", + "tokio 1.28.1", ] [[package]] name = "keyvaluedb-web" version = "0.1.0" dependencies = [ + "async-lock", "console_log", "flume", "futures", @@ -2875,9 +2910,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.139" +version = "0.2.144" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79" +checksum = "2b00cc1c228a6782d0f076e7b232802e0c5689d41bb5df366f2a6b6621cfdfe1" [[package]] name = "libloading" @@ -2891,9 +2926,9 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.25.2" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29f835d03d717946d28b1d1ed632eb6f0e24a299388ee623d0c23118d3e8a7fa" +checksum = "afc22eff61b133b115c6e8c74e818c628d6d5e7a502afea6f64dee076dd94326" dependencies = [ "cc", "pkg-config", @@ -2902,9 +2937,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.8" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9702761c3935f8cc2f101793272e202c72b99da8f4224a19ddcf1279a6450bbf" +checksum = "56ee889ecc9568871456d42f603d6a0ce59ff328d291063a45cbdf0036baf6db" dependencies = [ "cc", "libc", @@ -2912,21 +2947,18 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "link-cplusplus" -version = "1.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecd207c9c713c34f95a097a5b029ac2ce6010530c7b49d7fea24d977dede04f5" -dependencies = [ - "cc", -] - [[package]] name = "linked-hash-map" version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" +[[package]] +name = "linux-raw-sys" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ece97ea872ece730aed82664c424eb4c8291e1ff2480247ccf7409044bc6479f" + [[package]] name = "lock_api" version = "0.4.9" @@ -3013,6 +3045,15 @@ dependencies = [ "autocfg", ] +[[package]] +name = "memoffset" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" +dependencies = [ + "autocfg", +] + [[package]] name = "memory_units" version = "0.4.0" @@ -3021,9 +3062,9 @@ checksum = "8452105ba047068f40ff7093dd1d9da90898e63dd61736462e9cdda6a90ad3c3" [[package]] name = "mime" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "minimal-lexical" @@ -3040,6 +3081,15 @@ dependencies = [ "adler", ] +[[package]] +name = "miniz_oxide" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" +dependencies = [ + "adler", +] + [[package]] name = "mio" version = "0.6.23" @@ -3074,14 +3124,14 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de" +checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] @@ -3128,7 +3178,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.9", ] [[package]] @@ -3138,7 +3188,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1bb540dc6ef51cfe1916ec038ce7a620daf3a111e2502d745197cd53d6bca15" dependencies = [ "libc", - "socket2", + "socket2 0.4.9", ] [[package]] @@ -3147,7 +3197,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "451422b7e4718271c8b5b3aadf5adedba43dc76312454b387e98fae0fc951aa0" dependencies = [ - "bitflags", + "bitflags 1.3.2", "jni-sys", "ndk-sys 0.4.1+23.1.7779620", "num_enum", @@ -3185,10 +3235,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0df7ac00c4672f9d5aece54ee3347520b7e20f158656c7db2e6de01902eb7a6c" dependencies = [ "darling 0.13.4", - "proc-macro-crate 1.3.0", + "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -3235,7 +3285,7 @@ name = "netlink-packet-route" version = "0.10.0" dependencies = [ "anyhow", - "bitflags", + "bitflags 1.3.2", "byteorder", "libc", "netlink-packet-core", @@ -3256,12 +3306,12 @@ dependencies = [ name = "netlink-proto" version = "0.9.1" dependencies = [ - "bytes 1.3.0", + "bytes 1.4.0", "futures", "log", "netlink-packet-core", "netlink-sys", - "tokio 1.24.2", + "tokio 1.28.1", ] [[package]] @@ -3269,11 +3319,11 @@ name = "netlink-sys" version = "0.8.1" dependencies = [ "async-io", - "bytes 1.3.0", + "bytes 1.4.0", "futures", "libc", "log", - "tokio 1.24.2", + "tokio 1.28.1", ] [[package]] @@ -3282,7 +3332,7 @@ version = "0.22.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4916f159ed8e5de0082076562152a76b7a1f64a01fd9d1e0fea002c37624faf" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cc", "cfg-if 1.0.0", "libc", @@ -3295,7 +3345,7 @@ version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfdda3d196821d6af13126e40375cdf7da646a96114af134d5f417a9a1dc8e1a" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cfg-if 1.0.0", "libc", "memoffset 0.7.1", @@ -3315,9 +3365,9 @@ dependencies = [ [[package]] name = "nom" -version = "5.1.2" +version = "5.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb4262d26ed83a1c0a33a38fe2bb15797329c85770da05e6b828ddb782627af" +checksum = "08959a387a676302eebf4ddbcbc611da04285579f76f88ee0506c63b1a61dd4b" dependencies = [ "memchr", "version_check 0.9.4", @@ -3333,15 +3383,6 @@ dependencies = [ "minimal-lexical", ] -[[package]] -name = "nom8" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae01545c9c7fc4486ab7debaf2aad7003ac19431791868fb2e8066df97fad2f8" -dependencies = [ - "memchr", -] - [[package]] name = "ntapi" version = "0.3.7" @@ -3353,9 +3394,9 @@ dependencies = [ [[package]] name = "ntapi" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc51db7b362b205941f71232e56c625156eb9a929f8cf74a428fd5bc094a4afc" +checksum = "e8a3895c6391c39d7fe7ebc444a87eb2991b2a0bc718fdabd071eec617fc68e4" dependencies = [ "winapi 0.3.9", ] @@ -3458,23 +3499,23 @@ dependencies = [ [[package]] name = "num_enum" -version = "0.5.9" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d829733185c1ca374f17e52b762f24f535ec625d2cc1f070e34c8a9068f341b" +checksum = "1f646caf906c20226733ed5b1374287eb97e3c2a5c227ce668c1f2ce20ae57c9" dependencies = [ "num_enum_derive", ] [[package]] name = "num_enum_derive" -version = "0.5.9" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2be1598bf1c313dcdd12092e3f1920f463462525a21b7b4e11b4168353d0123e" +checksum = "dcbff9bc912032c62bf65ef1d5aea88983b420f4f839db1e9b0c281a25c9c799" dependencies = [ - "proc-macro-crate 1.3.0", + "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -3497,9 +3538,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.17.0" +version = "1.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f61fba1741ea2b3d6a1e3178721804bb716a68a6aeba1149b5d52e3d464ea66" +checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" [[package]] name = "oorandom" @@ -3539,8 +3580,8 @@ dependencies = [ "prost", "protobuf", "thiserror", - "tokio 1.24.2", - "tonic", + "tokio 1.28.1", + "tonic 0.8.3", ] [[package]] @@ -3555,7 +3596,7 @@ dependencies = [ "opentelemetry", "prost", "protobuf", - "tonic", + "tonic 0.8.3", "tonic-build", ] @@ -3603,7 +3644,7 @@ dependencies = [ "percent-encoding", "rand 0.8.5", "thiserror", - "tokio 1.24.2", + "tokio 1.28.1", "tokio-stream", ] @@ -3614,14 +3655,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ccd746e37177e1711c20dd619a1620f34f5c8b569c53590a72dedd5344d8924a" dependencies = [ "dlv-list", - "hashbrown", + "hashbrown 0.12.3", ] [[package]] name = "os_str_bytes" -version = "6.4.1" +version = "6.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee" +checksum = "ceedf44fb00f2d1984b0bc98102627ce622e083e49a5bacdb3e514fa4238e267" [[package]] name = "oslog" @@ -3671,9 +3712,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.3.0" +version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3840933452adf7b3b9145e27086a5a3376c619dca1a21b1e5a5af0d54979bed" +checksum = "5ddb756ca205bd108aee3c62c6d3c994e1df84a59b9d6d4a5ea42ee1fd5a9a28" dependencies = [ "arrayvec", "bitvec", @@ -3689,17 +3730,17 @@ version = "3.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "86b26a931f824dd4eca30b3e43bb4f31cd5f0d3a403c5f5ff27106b805bfde7b" dependencies = [ - "proc-macro-crate 1.3.0", + "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "parking" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" +checksum = "14f2252c834a40ed9bb5422029649578e63aa341ac401f74e719dd1afda8394e" [[package]] name = "parking_lot" @@ -3719,7 +3760,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.6", + "parking_lot_core 0.9.7", ] [[package]] @@ -3731,29 +3772,40 @@ dependencies = [ "cfg-if 1.0.0", "instant", "libc", - "redox_syscall", + "redox_syscall 0.2.16", "smallvec", "winapi 0.3.9", ] [[package]] name = "parking_lot_core" -version = "0.9.6" +version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba1ef8814b5c993410bb3adfad7a5ed269563e4a2f90c41f5d85be7fb47133bf" +checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall", + "redox_syscall 0.2.16", "smallvec", - "windows-sys 0.42.0", + "windows-sys 0.45.0", +] + +[[package]] +name = "password-hash" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166" +dependencies = [ + "base64ct", + "rand_core 0.6.4", + "subtle", ] [[package]] name = "paste" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d01a5bd0424d00070b0098dd17ebca6f961a959dead1dbcbbbc1d1cd8d3deeba" +checksum = "9f746c4065a8fa3fe23974dd82f15431cc8d40779821001404d10d2e79ca7d79" [[package]] name = "pathdiff" @@ -3775,9 +3827,9 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "pest" -version = "2.5.4" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ab62d2fa33726dbe6321cc97ef96d8cde531e3eeaf858a058de53a8a6d40d8f" +checksum = "e68e84bfb01f0507134eac1e9b410a12ba379d064eab48c50ba4ce329a527b70" dependencies = [ "thiserror", "ucd-trie", @@ -3785,9 +3837,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.5.4" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bf026e2d0581559db66d837fe5242320f525d85c76283c61f4d51a1238d65ea" +checksum = "6b79d4c71c865a25a4322296122e3924d30bc8ee0834c8bfc8b95f7f054afbfb" dependencies = [ "pest", "pest_generator", @@ -3795,22 +3847,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.5.4" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b27bd18aa01d91c8ed2b61ea23406a676b42d82609c6e2581fba42f0c15f17f" +checksum = "6c435bf1076437b851ebc8edc3a18442796b30f1728ffea6262d59bbe28b077e" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn", + "syn 2.0.16", ] [[package]] name = "pest_meta" -version = "2.5.4" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f02b677c1859756359fc9983c2e56a0237f18624a3789528804406b7e915e5d" +checksum = "745a452f8eb71e39ffd8ee32b3c5f51d03845f99786fa9b68db6ff509c505411" dependencies = [ "once_cell", "pest", @@ -3819,9 +3871,9 @@ dependencies = [ [[package]] name = "petgraph" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6d5014253a1331579ce62aa67443b4a658c5e7dd03d4bc6d302b94474888143" +checksum = "4dd7d28ee937e54fe3080c91faa1c3a46c06de6252988a7f4592ba2310ef22a4" dependencies = [ "fixedbitset", "indexmap", @@ -3848,11 +3900,11 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.12" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" +checksum = "c95a7476719eab1e366eaf73d0260af3021184f18177925b07f54b30089ceead" dependencies = [ - "pin-project-internal 1.0.12", + "pin-project-internal 1.1.0", ] [[package]] @@ -3863,18 +3915,18 @@ checksum = "851c8d0ce9bebe43790dedfc86614c23494ac9f423dd618d3a61fc693eafe61e" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "pin-project-internal" -version = "1.0.12" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" +checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.16", ] [[package]] @@ -3897,9 +3949,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" +checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" [[package]] name = "plotters" @@ -3931,16 +3983,18 @@ dependencies = [ [[package]] name = "polling" -version = "2.5.2" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22122d5ec4f9fe1b3916419b76be1e80bcb93f618d071d2edf841b137b2a2bd6" +checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" dependencies = [ "autocfg", + "bitflags 1.3.2", "cfg-if 1.0.0", + "concurrent-queue", "libc", "log", - "wepoll-ffi", - "windows-sys 0.42.0", + "pin-project-lite 0.2.9", + "windows-sys 0.48.0", ] [[package]] @@ -3962,12 +4016,12 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "prettyplease" -version = "0.1.23" +version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e97e3215779627f01ee256d2fad52f3d95e8e1c11e9fc6fd08f7cd455d5d5c78" +checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" dependencies = [ "proc-macro2", - "syn", + "syn 1.0.109", ] [[package]] @@ -3994,38 +4048,14 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66618389e4ec1c7afe67d51a9bf34ff9236480f8d51e7489b7d5ab0303c13f34" +checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" dependencies = [ "once_cell", "toml_edit", ] -[[package]] -name = "proc-macro-error" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" -dependencies = [ - "proc-macro-error-attr", - "proc-macro2", - "quote", - "syn", - "version_check 0.9.4", -] - -[[package]] -name = "proc-macro-error-attr" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" -dependencies = [ - "proc-macro2", - "quote", - "version_check 0.9.4", -] - [[package]] name = "proc-macro-hack" version = "0.5.20+deprecated" @@ -4034,30 +4064,30 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.50" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ef7d57beacfaf2d8aee5937dab7b7f28de3cb8b1828479bb5de2a7106f2bae2" +checksum = "fa1fb82fc0c281dd9671101b66b771ebbe1eaf967b96ac8740dcba4b70005ca8" dependencies = [ "unicode-ident", ] [[package]] name = "prost" -version = "0.11.6" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21dc42e00223fc37204bd4aa177e69420c604ca4a183209a8f9de30c6d934698" +checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" dependencies = [ - "bytes 1.3.0", + "bytes 1.4.0", "prost-derive", ] [[package]] name = "prost-build" -version = "0.11.6" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3f8ad728fb08fe212df3c05169e940fbb6d9d16a877ddde14644a983ba2012e" +checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" dependencies = [ - "bytes 1.3.0", + "bytes 1.4.0", "heck", "itertools", "lazy_static", @@ -4068,31 +4098,30 @@ dependencies = [ "prost", "prost-types", "regex", - "syn", + "syn 1.0.109", "tempfile", "which", ] [[package]] name = "prost-derive" -version = "0.11.6" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bda8c0881ea9f722eb9629376db3d0b903b462477c1aafcb0566610ac28ac5d" +checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", "itertools", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "prost-types" -version = "0.11.6" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5e0526209433e96d83d750dd81a99118edbc55739e7e61a46764fd2ad537788" +checksum = "213622a1460818959ac1181aaeb2dc9c7f63df720db7d788b3e24eacd1983e13" dependencies = [ - "bytes 1.3.0", "prost", ] @@ -4119,7 +4148,7 @@ checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -4130,9 +4159,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.23" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b" +checksum = "8f4f29d145265ec1c483c7c654450edde0bfe043d3938d6972630663356d9500" dependencies = [ "proc-macro2", ] @@ -4202,7 +4231,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.9", ] [[package]] @@ -4215,19 +4244,30 @@ dependencies = [ ] [[package]] -name = "raw-window-handle" -version = "0.5.0" +name = "range-set-blaze" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed7e3d950b66e19e0c372f3fa3fbbcf85b1746b571f74e0c2af6042a5c93420a" +checksum = "ef51566f3ed218c92f4711b54af1c68c4f0c43935d31d216f9cc31b30af6ec64" dependencies = [ - "cty", + "gen_ops", + "itertools", + "num-integer", + "num-traits", + "rand 0.8.5", + "thiserror", ] [[package]] -name = "rayon" -version = "1.6.1" +name = "raw-window-handle" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db3a213adf02b3bcfd2d3846bb41cb22857d131789e01df434fb7e7bc0759b7" +checksum = "f2ff9a1f06a88b01621b7ae906ef0211290d1c8a168a15542486a8f61c0833b9" + +[[package]] +name = "rayon" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" dependencies = [ "either", "rayon-core", @@ -4235,9 +4275,9 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.10.2" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "356a0625f1954f730c0201cdab48611198dc6ce21f4acff55089b5a78e6e835b" +checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" dependencies = [ "crossbeam-channel", "crossbeam-deque", @@ -4251,7 +4291,16 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ - "bitflags", + "bitflags 1.3.2", +] + +[[package]] +name = "redox_syscall" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +dependencies = [ + "bitflags 1.3.2", ] [[package]] @@ -4260,20 +4309,20 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.8", - "redox_syscall", + "getrandom 0.2.9", + "redox_syscall 0.2.16", "thiserror", ] [[package]] name = "regex" -version = "1.7.1" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733" +checksum = "af83e617f331cc6ae2da5443c602dfa5af81e517212d9d611a5b3ba1777b5370" dependencies = [ "aho-corasick", "memchr", - "regex-syntax", + "regex-syntax 0.7.1", ] [[package]] @@ -4282,23 +4331,20 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" dependencies = [ - "regex-syntax", + "regex-syntax 0.6.29", ] [[package]] name = "regex-syntax" -version = "0.6.28" +version = "0.6.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] -name = "remove_dir_all" -version = "0.5.3" +name = "regex-syntax" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" -dependencies = [ - "winapi 0.3.9", -] +checksum = "a5996294f19bd3aae0453a862ad728f60e6600695733dd5df01da90c54363a3c" [[package]] name = "rend" @@ -4336,25 +4382,30 @@ dependencies = [ [[package]] name = "rkyv" -version = "0.7.39" -source = "git+https://github.com/rkyv/rkyv.git?rev=57e2a8d#57e2a8daff3e6381e170e723ed1beea5c113b232" +version = "0.7.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0200c8230b013893c0b2d6213d6ec64ed2b9be2e0e016682b7224ff82cff5c58" dependencies = [ + "bitvec", "bytecheck", - "hashbrown", + "hashbrown 0.12.3", "ptr_meta", "rend", "rkyv_derive", "seahash", + "tinyvec", + "uuid", ] [[package]] name = "rkyv_derive" -version = "0.7.39" -source = "git+https://github.com/rkyv/rkyv.git?rev=57e2a8d#57e2a8daff3e6381e170e723ed1beea5c113b232" +version = "0.7.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2e06b915b5c230a17d7a736d1e2e63ee753c256a8614ef3f5147b13a4f5541d" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -4363,7 +4414,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bb919243f34364b6bd2fc10ef797edbfa75f33c252e7998527479c6d6b47e1ec" dependencies = [ - "bytes 1.3.0", + "bytes 1.4.0", "rustc-hex", ] @@ -4374,7 +4425,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "88073939a61e5b7680558e6be56b419e208420c2adb92be54921fa6b72283f1a" dependencies = [ "base64 0.13.1", - "bitflags", + "bitflags 1.3.2", "serde", ] @@ -4401,19 +4452,19 @@ dependencies = [ "netlink-proto", "nix 0.22.3", "thiserror", - "tokio 1.24.2", + "tokio 1.28.1", ] [[package]] name = "rusqlite" -version = "0.28.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01e213bc3ecb39ac32e81e51ebe31fd888a940515173e3a18a35f8c6e896422a" +checksum = "549b9d036d571d42e6e85d1c1425e2ac83491075078ca9a15be021c56b1641f2" dependencies = [ - "bitflags", + "bitflags 2.3.0", "fallible-iterator", "fallible-streaming-iterator", - "hashlink 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", + "hashlink 0.8.2", "libsqlite3-sys", "smallvec", ] @@ -4434,7 +4485,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a66b1273014079e4cf2b04aad1f3a2849e26e9a106f0411be2b1c15c23a791a" dependencies = [ "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -4449,9 +4500,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.21" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" [[package]] name = "rustc-hash" @@ -4480,7 +4531,21 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.16", + "semver 1.0.17", +] + +[[package]] +name = "rustix" +version = "0.37.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acf8729d8542766f1b2cf77eb034d52f40d375bb8b615d0b147089946e16613d" +dependencies = [ + "bitflags 1.3.2", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys", + "windows-sys 0.48.0", ] [[package]] @@ -4492,10 +4557,22 @@ dependencies = [ "base64 0.13.1", "log", "ring", - "sct", + "sct 0.6.1", "webpki 0.21.4", ] +[[package]] +name = "rustls" +version = "0.20.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" +dependencies = [ + "log", + "ring", + "sct 0.7.0", + "webpki 0.22.0", +] + [[package]] name = "rustls-pemfile" version = "0.2.1" @@ -4506,16 +4583,35 @@ dependencies = [ ] [[package]] -name = "rustversion" -version = "1.0.11" +name = "rustls-pemfile" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5583e89e108996506031660fe09baa5011b9dd0341b89029313006d1fb508d70" +checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" +dependencies = [ + "base64 0.21.0", +] + +[[package]] +name = "rustls-webpki" +version = "0.100.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6207cd5ed3d8dca7816f8f3725513a34609c0c765bf652b8c3cb4cfd87db46b" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "rustversion" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06" [[package]] name = "ryu" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b4b9743ed687d4b4bcedf9ff5eaa7398495ae14e61cba0a295704edbc7decde" +checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" [[package]] name = "same-file" @@ -4538,12 +4634,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" -[[package]] -name = "scratch" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddccb15bcce173023b3fedd9436f882a0739b8dfb45e4f6b6002bee5929f61b2" - [[package]] name = "sct" version = "0.6.1" @@ -4554,6 +4644,16 @@ dependencies = [ "untrusted", ] +[[package]] +name = "sct" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "seahash" version = "4.1.0" @@ -4591,24 +4691,24 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.8.2" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a332be01508d814fed64bf28f798a146d73792121129962fdf335bb3c49a4254" +checksum = "ca2855b3715770894e67cbfa3df957790aa0c9edc3bf06efa1a84d77fa0839d1" dependencies = [ - "bitflags", + "bitflags 1.3.2", "core-foundation 0.9.3", - "core-foundation-sys 0.8.3", + "core-foundation-sys 0.8.4", "libc", "security-framework-sys", ] [[package]] name = "security-framework-sys" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31c9bb296072e961fcbd8853511dd39c2d8be2deb1e17c6860b1d30732b323b4" +checksum = "f51d0c0d83bec45f16480d0ce0058397a69e48fcdc52d1dc8855fb68acbd31a7" dependencies = [ - "core-foundation-sys 0.8.3", + "core-foundation-sys 0.8.4", "libc", ] @@ -4623,9 +4723,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58bc9567378fc7690d6b2addae4e60ac2eeea07becb2c64b9f218b53865cba2a" +checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" [[package]] name = "semver-parser" @@ -4639,12 +4739,6 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" -[[package]] -name = "send_wrapper" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "930c0acf610d3fdb5e2ab6213019aaa04e227ebe9547b0649ba599b16d788bd7" - [[package]] name = "send_wrapper" version = "0.6.0" @@ -4656,18 +4750,18 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.152" +version = "1.0.163" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb" +checksum = "2113ab51b87a539ae008b5c6c02dc020ffa39afd2d83cffcb3f4eb2722cebec2" dependencies = [ "serde_derive", ] [[package]] name = "serde-big-array" -version = "0.4.1" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3323f09a748af288c3dc2474ea6803ee81f118321775bffa3ac8f7e65c5e90e7" +checksum = "11fc7cc2c76d73e0f27ee52abbd64eec84d46f370c88371120433196934e4b7f" dependencies = [ "serde", ] @@ -4684,20 +4778,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.152" +version = "1.0.163" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e" +checksum = "8c805777e3930c8883389c602315a24224bcc738b63905ef87cd1420353ea93e" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.16", ] [[package]] name = "serde_json" -version = "1.0.91" +version = "1.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877c235533714907a8c2464236f5c4b2a17262ef1bd71f38f35ea592c8da6883" +checksum = "057d394a50403bcac12672b2b18fb387ab6d289d957dab67dd201875391e52f1" dependencies = [ "itoa", "ryu", @@ -4706,20 +4800,20 @@ dependencies = [ [[package]] name = "serde_repr" -version = "0.1.10" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a5ec9fa74a20ebbe5d9ac23dac1fc96ba0ecfe9f50f2843b52e537b10fbcb4e" +checksum = "bcec881020c684085e55a25f7fd888954d56609ef363479dc5a1305eb0d40cab" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.16", ] [[package]] name = "serde_yaml" -version = "0.9.17" +version = "0.9.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fb06d4b6cdaef0e0c51fa881acb721bed3c924cfaa71d9c94a3b771dfdf6567" +checksum = "d9d684e3ec7de3bf5466b32bd75303ac16f0736426e5a4e0d6e489559ce1249c" dependencies = [ "indexmap", "itoa", @@ -4750,7 +4844,7 @@ checksum = "b64f9e531ce97c88b4778aad0ceee079216071cffec6ac9b904277f8f92e7fe3" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -4839,9 +4933,9 @@ checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" [[package]] name = "signal-hook" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a253b5e89e2698464fc26b545c9edceb338e18a89effeeecfea192c3025be29d" +checksum = "732768f1176d21d09e076c23a93123d40bba92d50c4058da34d45c8de8e682b9" dependencies = [ "libc", "signal-hook-registry", @@ -4866,15 +4960,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29ad2e15f37ec9a6cc544097b78a1ec90001e9f71b81338ca39f430adaca99af" dependencies = [ "libc", - "mio 0.8.5", + "mio 0.8.6", "signal-hook", ] [[package]] name = "signal-hook-registry" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" dependencies = [ "libc", ] @@ -4885,6 +4979,12 @@ version = "1.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" +[[package]] +name = "simdutf8" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f27f6278552951f1f2b8cf9da965d10969b2efdea95a6ec47987ab46edfe263a" + [[package]] name = "simplelog" version = "0.9.0" @@ -4898,20 +4998,20 @@ dependencies = [ [[package]] name = "simplelog" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48dfff04aade74dd495b007c831cd6f4e0cee19c344dd9dc0884c0289b70a786" +checksum = "acee08041c5de3d5048c8b3f6f13fafb3026b24ba43c6a695a0c76179b844369" dependencies = [ "log", "termcolor", - "time 0.3.17", + "time 0.3.21", ] [[package]] name = "slab" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" +checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" dependencies = [ "autocfg", ] @@ -4934,14 +5034,24 @@ dependencies = [ [[package]] name = "socket2" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" +checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" dependencies = [ "libc", "winapi 0.3.9", ] +[[package]] +name = "socket2" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2538b18701741680e0322a2302176d3253a35388e2e62f172f64f4f16605f877" +dependencies = [ + "libc", + "windows-sys 0.48.0", +] + [[package]] name = "spin" version = "0.5.2" @@ -4950,9 +5060,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "spin" -version = "0.9.4" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f6002a767bff9e83f8eeecf883ecb8011875a21ae8da43bffb817a57e78cc09" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" dependencies = [ "lock_api", ] @@ -5002,7 +5112,7 @@ dependencies = [ "quote", "serde", "serde_derive", - "syn", + "syn 1.0.109", ] [[package]] @@ -5018,7 +5128,7 @@ dependencies = [ "serde_derive", "serde_json", "sha1 0.6.1", - "syn", + "syn 1.0.109", ] [[package]] @@ -5065,9 +5175,20 @@ checksum = "734676eb262c623cec13c3155096e08d1f8f29adce39ba17948b18dad1e54142" [[package]] name = "syn" -version = "1.0.107" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6f671d4b5ffdb8eadec19c0ae67fe2639df8684bd7bc4b83d986b8db549cf01" dependencies = [ "proc-macro2", "quote", @@ -5076,9 +5197,9 @@ dependencies = [ [[package]] name = "sync_wrapper" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20518fe4a4c9acf048008599e464deb21beeae3d3578418951a189c235a7a9a8" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" [[package]] name = "synstructure" @@ -5088,20 +5209,34 @@ checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", "unicode-xid", ] [[package]] name = "sysinfo" -version = "0.27.7" +version = "0.28.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "975fe381e0ecba475d4acff52466906d95b153a40324956552e027b2a9eaa89e" +checksum = "b4c2f3ca6693feb29a89724516f016488e9aafc7f37264f898593ee4b942f31b" dependencies = [ "cfg-if 1.0.0", - "core-foundation-sys 0.8.3", + "core-foundation-sys 0.8.4", "libc", - "ntapi 0.4.0", + "ntapi 0.4.1", + "once_cell", + "winapi 0.3.9", +] + +[[package]] +name = "sysinfo" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02f1dc6930a439cc5d154221b5387d153f8183529b07c19aca24ea31e0a167e1" +dependencies = [ + "cfg-if 1.0.0", + "core-foundation-sys 0.8.4", + "libc", + "ntapi 0.4.1", "once_cell", "rayon", "winapi 0.3.9", @@ -5115,16 +5250,15 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.3.0" +version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" +checksum = "b9fbec84f381d5795b08656e4912bec604d162bff9291d6189a78f4c8ab87998" dependencies = [ "cfg-if 1.0.0", "fastrand", - "libc", - "redox_syscall", - "remove_dir_all", - "winapi 0.3.9", + "redox_syscall 0.3.5", + "rustix", + "windows-sys 0.45.0", ] [[package]] @@ -5153,30 +5287,31 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "thiserror" -version = "1.0.38" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a9cd18aa97d5c45c6603caea1da6628790b37f7a34b6ca89522331c5180fed0" +checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.38" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fb327af4685e4d03fa8cbcf1716380da910eeb2bb8be417e7f9fd3fb164f36f" +checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.16", ] [[package]] name = "thread_local" -version = "1.1.4" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" +checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" dependencies = [ + "cfg-if 1.0.0", "once_cell", ] @@ -5208,23 +5343,23 @@ dependencies = [ [[package]] name = "time" -version = "0.3.17" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376" +checksum = "8f3403384eaacbca9923fa06940178ac13e4edb725486d70e8e15881d0c836cc" dependencies = [ "itoa", "libc", "num_threads", "serde", "time-core", - "time-macros 0.2.6", + "time-macros 0.2.9", ] [[package]] name = "time-core" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" +checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" [[package]] name = "time-macros" @@ -5238,9 +5373,9 @@ dependencies = [ [[package]] name = "time-macros" -version = "0.2.6" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d967f99f534ca7e495c575c62638eebc2898a8c84c119b89e250477bc4ba16b2" +checksum = "372950940a5f07bf38dbe211d7283c9e6d7327df53794992d293e534c733d09b" dependencies = [ "time-core", ] @@ -5255,7 +5390,7 @@ dependencies = [ "proc-macro2", "quote", "standback", - "syn", + "syn 1.0.109", ] [[package]] @@ -5288,9 +5423,9 @@ dependencies = [ [[package]] name = "tinyvec_macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" @@ -5322,23 +5457,22 @@ dependencies = [ [[package]] name = "tokio" -version = "1.24.2" +version = "1.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "597a12a59981d9e3c38d216785b0c37399f6e415e8d0712047620f189371b0bb" +checksum = "0aa32867d44e6f2ce3385e89dceb990188b8bb0fb25b0cf576647a6f98ac5105" dependencies = [ "autocfg", - "bytes 1.3.0", + "bytes 1.4.0", "libc", - "memchr", - "mio 0.8.5", + "mio 0.8.6", "num_cpus", "parking_lot 0.12.1", "pin-project-lite 0.2.9", "signal-hook-registry", - "socket2", + "socket2 0.4.9", "tokio-macros", "tracing", - "windows-sys 0.42.0", + "windows-sys 0.48.0", ] [[package]] @@ -5348,43 +5482,43 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" dependencies = [ "pin-project-lite 0.2.9", - "tokio 1.24.2", + "tokio 1.28.1", ] [[package]] name = "tokio-macros" -version = "1.8.2" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" +checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.16", ] [[package]] name = "tokio-stream" -version = "0.1.11" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d660770404473ccd7bc9f8b28494a811bc18542b915c0855c51e8f419d5223ce" +checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" dependencies = [ "futures-core", "pin-project-lite 0.2.9", - "tokio 1.24.2", + "tokio 1.28.1", ] [[package]] name = "tokio-util" -version = "0.7.4" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740" +checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" dependencies = [ - "bytes 1.3.0", + "bytes 1.4.0", "futures-core", "futures-io", "futures-sink", "pin-project-lite 0.2.9", - "tokio 1.24.2", + "tokio 1.28.1", "tracing", ] @@ -5399,19 +5533,19 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.5.1" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4553f467ac8e3d374bc9a177a26801e5d0f9b211aa1673fb137a403afd1c9cf5" +checksum = "3ab8ed2edee10b50132aed5f331333428b011c99402b5a534154ed15746f9622" [[package]] name = "toml_edit" -version = "0.18.1" +version = "0.19.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56c59d8dd7d0dcbc6428bf7aa2f0e823e26e43b3c9aca15bbc9475d23e5fa12b" +checksum = "239410c8609e8125456927e6707163a3b1fdb40561e4b803bc041f466ccfdc13" dependencies = [ "indexmap", - "nom8", "toml_datetime", + "winnow", ] [[package]] @@ -5424,7 +5558,7 @@ dependencies = [ "async-trait", "axum", "base64 0.13.1", - "bytes 1.3.0", + "bytes 1.4.0", "futures-core", "futures-util", "h2", @@ -5433,10 +5567,10 @@ dependencies = [ "hyper", "hyper-timeout", "percent-encoding", - "pin-project 1.0.12", + "pin-project 1.1.0", "prost", "prost-derive", - "tokio 1.24.2", + "tokio 1.28.1", "tokio-stream", "tokio-util", "tower", @@ -5446,6 +5580,34 @@ dependencies = [ "tracing-futures", ] +[[package]] +name = "tonic" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3082666a3a6433f7f511c7192923fa1fe07c69332d3c6a2e6bb040b569199d5a" +dependencies = [ + "async-trait", + "axum", + "base64 0.21.0", + "bytes 1.4.0", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "hyper", + "hyper-timeout", + "percent-encoding", + "pin-project 1.1.0", + "prost", + "tokio 1.28.1", + "tokio-stream", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "tonic-build" version = "0.8.4" @@ -5456,7 +5618,7 @@ dependencies = [ "proc-macro2", "prost-build", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -5468,36 +5630,17 @@ dependencies = [ "futures-core", "futures-util", "indexmap", - "pin-project 1.0.12", + "pin-project 1.1.0", "pin-project-lite 0.2.9", "rand 0.8.5", "slab", - "tokio 1.24.2", + "tokio 1.28.1", "tokio-util", "tower-layer", "tower-service", "tracing", ] -[[package]] -name = "tower-http" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f873044bf02dd1e8239e9c1293ea39dad76dc594ec16185d0a1bf31d8dc8d858" -dependencies = [ - "bitflags", - "bytes 1.3.0", - "futures-core", - "futures-util", - "http", - "http-body", - "http-range-header", - "pin-project-lite 0.2.9", - "tower", - "tower-layer", - "tower-service", -] - [[package]] name = "tower-layer" version = "0.3.2" @@ -5530,26 +5673,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09d48f71a791638519505cefafe162606f706c25592e4bde4d97600c0195312e" dependencies = [ "crossbeam-channel", - "time 0.3.17", + "time 0.3.21", "tracing-subscriber", ] [[package]] name = "tracing-attributes" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" +checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.16", ] [[package]] name = "tracing-core" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" +checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" dependencies = [ "once_cell", "valuable", @@ -5571,7 +5714,7 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" dependencies = [ - "pin-project 1.0.12", + "pin-project 1.1.0", "tracing", ] @@ -5629,9 +5772,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6176eae26dd70d0c919749377897b54a9276bd7061339665dd68777926b5a70" +checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" dependencies = [ "matchers", "nu-ansi-term", @@ -5676,7 +5819,7 @@ dependencies = [ "smallvec", "thiserror", "tinyvec", - "tokio 1.24.2", + "tokio 1.28.1", "tracing", "url", ] @@ -5696,7 +5839,7 @@ dependencies = [ "resolv-conf", "smallvec", "thiserror", - "tokio 1.24.2", + "tokio 1.28.1", "tracing", "trust-dns-proto", ] @@ -5728,13 +5871,13 @@ dependencies = [ [[package]] name = "tungstenite" -version = "0.18.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ee6ab729cd4cf0fd55218530c4522ed30b7b6081752839b68fcec8d0960788" +checksum = "15fba1a6d6bb030745759a9a2a588bfe8490fc8b4751a277db3a0be1c9ebbf67" dependencies = [ - "base64 0.13.1", "byteorder", - "bytes 1.3.0", + "bytes 1.4.0", + "data-encoding", "http", "httparse", "log", @@ -5771,15 +5914,15 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.10" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54675592c1dbefd78cbd98db9bacd89886e1ca50692a0692baefffdeb92dd58" +checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.6" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc" +checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" [[package]] name = "unicode-normalization" @@ -5792,9 +5935,9 @@ dependencies = [ [[package]] name = "unicode-segmentation" -version = "1.10.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fdbf052a0783de01e944a6ce7a8cb939e295b1e7be835a1112c3b9a7f047a5a" +checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" [[package]] name = "unicode-width" @@ -5820,15 +5963,15 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f214e8f697e925001e66ec2c6e37a4ef93f0f78c2eed7814394e10c62025b05" dependencies = [ - "generic-array 0.14.6", + "generic-array 0.14.7", "subtle", ] [[package]] name = "unsafe-libyaml" -version = "0.2.5" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc7ed8ba44ca06be78ea1ad2c3682a43349126c8818054231ee6f4748012aed2" +checksum = "1865806a559042e51ab5414598446a5871b561d21b6764f2eabb0dd481d880a6" [[package]] name = "untrusted" @@ -5853,6 +5996,12 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" +[[package]] +name = "uuid" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "345444e32442451b267fc254ae85a209c64be56d2890e601a0c37ff0c3c5ecd2" + [[package]] name = "valuable" version = "0.1.0" @@ -5892,7 +6041,7 @@ dependencies = [ "capnp-rpc", "capnpc", "cfg-if 1.0.0", - "clap 3.2.23", + "clap 3.2.25", "config", "crossbeam-channel", "cursive", @@ -5910,7 +6059,7 @@ dependencies = [ "serde_derive", "serial_test", "thiserror", - "tokio 1.24.2", + "tokio 1.28.1", "tokio-util", "veilid-core", ] @@ -5919,21 +6068,21 @@ dependencies = [ name = "veilid-core" version = "0.1.0" dependencies = [ + "argon2", "async-io", "async-lock", "async-std", "async-std-resolver", - "async-tls", - "async-tungstenite 0.19.0", + "async-tls 0.11.0", + "async-tungstenite 0.22.1", "async_executors", "backtrace", "blake3", "bugsalot", - "bytecheck", "capnp", "capnpc", "cfg-if 1.0.0", - "chacha20 0.9.0", + "chacha20 0.9.1", "chacha20poly1305", "chrono", "config", @@ -5947,13 +6096,13 @@ dependencies = [ "eyre", "flume", "futures-util", - "generic-array 0.14.6", - "getrandom 0.2.8", + "generic-array 0.14.7", + "getrandom 0.2.9", "hashlink 0.8.1", "hex", "ifstructs", "igd", - "jni", + "jni 0.21.1", "jni-sys", "js-sys", "json", @@ -5973,23 +6122,24 @@ dependencies = [ "paranoid-android", "parking_lot 0.12.1", "rand 0.7.3", + "range-set-blaze", "rkyv", "rtnetlink", "rusqlite", - "rustls", - "rustls-pemfile", + "rustls 0.19.1", + "rustls-pemfile 0.2.1", "secrecy", "send_wrapper 0.6.0", "serde", "serde-big-array", "serde_json", "serial_test", - "simplelog 0.12.0", - "socket2", + "simplelog 0.12.1", + "socket2 0.5.3", "static_assertions", "stop-token", "thiserror", - "tokio 1.24.2", + "tokio 1.28.1", "tokio-stream", "tokio-util", "tracing", @@ -6006,10 +6156,10 @@ dependencies = [ "weak-table", "web-sys", "webpki 0.22.0", - "webpki-roots 0.22.6", + "webpki-roots 0.23.0", "wee_alloc", "winapi 0.3.9", - "windows", + "windows 0.38.0", "windows-permissions", "ws_stream_wasm", "x25519-dalek-ng", @@ -6027,7 +6177,7 @@ dependencies = [ "ffi-support", "futures-util", "hostname", - "jni", + "jni 0.21.1", "lazy_static", "opentelemetry", "opentelemetry-otlp", @@ -6035,7 +6185,7 @@ dependencies = [ "parking_lot 0.12.1", "serde", "serde_json", - "tokio 1.24.2", + "tokio 1.28.1", "tokio-stream", "tokio-util", "tracing", @@ -6050,14 +6200,14 @@ version = "0.1.0" dependencies = [ "ansi_term", "async-std", - "async-tungstenite 0.19.0", + "async-tungstenite 0.22.1", "backtrace", "bugsalot", "capnp", "capnp-rpc", "capnpc", "cfg-if 1.0.0", - "clap 3.2.23", + "clap 3.2.25", "color-eyre", "config", "console-subscriber", @@ -6082,7 +6232,8 @@ dependencies = [ "signal-hook", "signal-hook-async-std", "stop-token", - "tokio 1.24.2", + "sysinfo 0.28.4", + "tokio 1.28.1", "tokio-stream", "tokio-util", "tracing", @@ -6108,7 +6259,7 @@ dependencies = [ "console_error_panic_hook", "eyre", "futures-util", - "jni", + "jni 0.21.1", "jni-sys", "js-sys", "lazy_static", @@ -6127,11 +6278,11 @@ dependencies = [ "rust-fsm", "send_wrapper 0.6.0", "serial_test", - "simplelog 0.12.0", + "simplelog 0.12.1", "static_assertions", "stop-token", "thiserror", - "tokio 1.24.2", + "tokio 1.28.1", "tokio-util", "tracing", "tracing-oslog", @@ -6188,12 +6339,11 @@ checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" [[package]] name = "walkdir" -version = "2.3.2" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" +checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698" dependencies = [ "same-file", - "winapi 0.3.9", "winapi-util", ] @@ -6227,9 +6377,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.83" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268" +checksum = "5bba0e8cb82ba49ff4e229459ff22a191bbe9a1cb3a341610c9c33efc27ddf73" dependencies = [ "cfg-if 1.0.0", "serde", @@ -6239,24 +6389,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.83" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142" +checksum = "19b04bc93f9d6bdee709f6bd2118f57dd6679cf1176a1af464fca3ab0d66d8fb" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn", + "syn 2.0.16", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.33" +version = "0.4.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23639446165ca5a5de86ae1d8896b737ae80319560fbaa4c2887b7da6e7ebd7d" +checksum = "2d1985d03709c53167ce907ff394f5316aa22cb4e12761295c5dc57dacb6297e" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -6266,9 +6416,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.83" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810" +checksum = "14d6b024f1a526bb0234f52840389927257beb670610081360e5a03c5df9c258" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -6276,28 +6426,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.83" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c" +checksum = "e128beba882dd1eb6200e1dc92ae6c5dbaa4311aa7bb211ca035779e5efc39f8" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.16", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.83" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f" +checksum = "ed9d5b4305409d1fc9482fee2d7f9bcbf24b3972bf59817ef757e23982242a93" [[package]] name = "wasm-bindgen-test" -version = "0.3.33" +version = "0.3.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09d2fff962180c3fadf677438054b1db62bee4aa32af26a45388af07d1287e1d" +checksum = "c9e636f3a428ff62b3742ebc3c70e254dfe12b8c2b469d688ea59cdd4abcf502" dependencies = [ "console_error_panic_hook", "js-sys", @@ -6309,9 +6459,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-test-macro" -version = "0.3.33" +version = "0.3.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4683da3dfc016f704c9f82cf401520c4f1cb3ee440f7f52b3d6ac29506a49ca7" +checksum = "f18c1fad2f7c4958e7bcce014fa212f59a65d5e3721d0f77e6c0b27ede936ba3" dependencies = [ "proc-macro2", "quote", @@ -6336,9 +6486,9 @@ checksum = "323f4da9523e9a669e1eaf9c6e763892769b1d38c623913647bfdc1532fe4549" [[package]] name = "web-sys" -version = "0.3.60" +version = "0.3.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcda906d8be16e728fd5adc5b729afad4e444e106ab28cd1c7256e54fa61510f" +checksum = "3bdd9ef4e984da1187bf8110c5cf5b845fbc87a23602cdf912386a76fcd3a7c2" dependencies = [ "js-sys", "wasm-bindgen", @@ -6382,6 +6532,15 @@ dependencies = [ "webpki 0.22.0", ] +[[package]] +name = "webpki-roots" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa54963694b65584e170cf5dc46aeb4dcaa5584e652ff5f3952e56d66aff0125" +dependencies = [ + "rustls-webpki", +] + [[package]] name = "wee_alloc" version = "0.4.5" @@ -6394,15 +6553,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "wepoll-ffi" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d743fdedc5c64377b5fc2bc036b01c7fd642205a0d96356034ae3404d49eb7fb" -dependencies = [ - "cc", -] - [[package]] name = "which" version = "4.4.0" @@ -6488,67 +6638,95 @@ dependencies = [ "windows_x86_64_msvc 0.38.0", ] +[[package]] +name = "windows" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" +dependencies = [ + "windows-targets 0.48.0", +] + [[package]] name = "windows-permissions" version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e2ccdc3c6bf4d4a094e031b63fadd08d8e42abd259940eb8aa5fdc09d4bf9be" dependencies = [ - "bitflags", + "bitflags 1.3.2", "winapi 0.3.9", ] [[package]] name = "windows-service" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "917fdb865e7ff03af9dd86609f8767bc88fefba89e8efd569de8e208af8724b3" +checksum = "cd9db37ecb5b13762d95468a2fc6009d4b2c62801243223aabd44fca13ad13c8" dependencies = [ - "bitflags", - "err-derive", + "bitflags 1.3.2", "widestring 1.0.2", - "windows-sys 0.36.1", + "windows-sys 0.45.0", ] [[package]] name = "windows-sys" -version = "0.36.1" +version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" dependencies = [ - "windows_aarch64_msvc 0.36.1", - "windows_i686_gnu 0.36.1", - "windows_i686_msvc 0.36.1", - "windows_x86_64_gnu 0.36.1", - "windows_x86_64_msvc 0.36.1", + "windows-targets 0.42.2", ] [[package]] name = "windows-sys" -version = "0.42.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc 0.42.1", - "windows_i686_gnu 0.42.1", - "windows_i686_msvc 0.42.1", - "windows_x86_64_gnu 0.42.1", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc 0.42.1", + "windows-targets 0.48.0", +] + +[[package]] +name = "windows-targets" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + +[[package]] +name = "windows-targets" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" +dependencies = [ + "windows_aarch64_gnullvm 0.48.0", + "windows_aarch64_msvc 0.48.0", + "windows_i686_gnu 0.48.0", + "windows_i686_msvc 0.48.0", + "windows_x86_64_gnu 0.48.0", + "windows_x86_64_gnullvm 0.48.0", + "windows_x86_64_msvc 0.48.0", ] [[package]] name = "windows_aarch64_gnullvm" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" [[package]] -name = "windows_aarch64_msvc" -version = "0.36.1" +name = "windows_aarch64_gnullvm" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" +checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" [[package]] name = "windows_aarch64_msvc" @@ -6558,15 +6736,15 @@ checksum = "b12add87e2fb192fff3f4f7e4342b3694785d79f3a64e2c20d5ceb5ccbcfc3cd" [[package]] name = "windows_aarch64_msvc" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] -name = "windows_i686_gnu" -version = "0.36.1" +name = "windows_aarch64_msvc" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" +checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" [[package]] name = "windows_i686_gnu" @@ -6576,15 +6754,15 @@ checksum = "4c98f2db372c23965c5e0f43896a8f0316dc0fbe48d1aa65bea9bdd295d43c15" [[package]] name = "windows_i686_gnu" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] -name = "windows_i686_msvc" -version = "0.36.1" +name = "windows_i686_gnu" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" +checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" [[package]] name = "windows_i686_msvc" @@ -6594,15 +6772,15 @@ checksum = "cdf0569be0f2863ab6a12a6ba841fcfa7d107cbc7545a3ebd57685330db0a3ff" [[package]] name = "windows_i686_msvc" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] -name = "windows_x86_64_gnu" -version = "0.36.1" +name = "windows_i686_msvc" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" +checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" [[package]] name = "windows_x86_64_gnu" @@ -6612,21 +6790,27 @@ checksum = "905858262c8380a36f32cb8c1990d7e7c3b7a8170e58ed9a98ca6d940b7ea9f1" [[package]] name = "windows_x86_64_gnu" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" [[package]] name = "windows_x86_64_gnullvm" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" [[package]] -name = "windows_x86_64_msvc" -version = "0.36.1" +name = "windows_x86_64_gnullvm" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" +checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" [[package]] name = "windows_x86_64_msvc" @@ -6636,9 +6820,24 @@ checksum = "890c3c6341d441ffb38f705f47196e3665dc6dd79f6d72fa185d937326730561" [[package]] name = "windows_x86_64_msvc" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" + +[[package]] +name = "winnow" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61de7bac303dc551fe038e2b3cef0f571087a47571ea6e79a87692ac99b99699" +dependencies = [ + "memchr", +] [[package]] name = "winreg" @@ -6661,16 +6860,17 @@ dependencies = [ [[package]] name = "ws_stream_wasm" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47ca1ab42f5afed7fc332b22b6e932ca5414b209465412c8cdf0ad23bc0de645" +checksum = "7999f5f4217fe3818726b66257a4475f71e74ffd190776ad053fa159e50737f5" dependencies = [ "async_io_stream", "futures", "js-sys", + "log", "pharos", "rustc_version 0.4.0", - "send_wrapper 0.5.0", + "send_wrapper 0.6.0", "thiserror", "wasm-bindgen", "wasm-bindgen-futures", @@ -6706,9 +6906,9 @@ checksum = "a67300977d3dc3f8034dae89778f502b6ba20b269527b3223ba59c0cf393bb8a" [[package]] name = "xml-rs" -version = "0.8.4" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2d7d3948613f75c98fd9328cfdcc45acc4d360655289d0a7d4ec931392200a3" +checksum = "1690519550bfa95525229b9ca2350c63043a4857b3b0013811b2ccf4a2420b01" [[package]] name = "xmltree" @@ -6760,7 +6960,7 @@ dependencies = [ "proc-macro-crate 0.1.5", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -6774,14 +6974,13 @@ dependencies = [ [[package]] name = "zeroize_derive" -version = "1.3.3" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44bf07cb3e50ea2003396695d58bf46bc9887a1f362260446fad6bc4e79bd36c" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn", - "synstructure", + "syn 2.0.16", ] [[package]] @@ -6804,8 +7003,8 @@ version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4ca5e22593eb4212382d60d26350065bf2a02c34b85bc850474a74b589a3de9" dependencies = [ - "proc-macro-crate 1.3.0", + "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] diff --git a/Cargo.toml b/Cargo.toml index 27fc702e..a70dbee8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,4 @@ [workspace] - members = [ "veilid-tools", "veilid-core", diff --git a/doc/config/sample.config b/doc/config/sample.config index a7dc1033..3fd37b59 100644 --- a/doc/config/sample.config +++ b/doc/config/sample.config @@ -23,13 +23,15 @@ logging: enabled: false level: 'trace' grpc_endpoint: 'localhost:4317' + console: + enabled: false testing: subnode_index: 0 core: protected_store: allow_insecure_fallback: true always_use_insecure_storage: true - insecure_fallback_directory: '%INSECURE_FALLBACK_DIRECTORY%' + directory: '%DIRECTORY%' delete: false table_store: directory: '%TABLE_STORE_DIRECTORY%' @@ -47,10 +49,10 @@ core: client_whitelist_timeout_ms: 300000 reverse_connection_receipt_time_ms: 5000 hole_punch_receipt_time_ms: 5000 - node_id: '' - node_id_secret: '' - bootstrap: ['bootstrap.dev.veilid.net'] routing_table: + node_id: null + node_id_secret: null + bootstrap: ['bootstrap.dev.veilid.net'] limit_over_attached: 64 limit_fully_attached: 32 limit_attached_strong: 16 @@ -61,27 +63,31 @@ core: queue_size: 1024 max_timestamp_behind_ms: 10000 max_timestamp_ahead_ms: 10000 - timeout_ms: 10000 + timeout_ms: 5000 max_route_hop_count: 4 default_route_hop_count: 1 - dht: - resolve_node_timeout: - resolve_node_count: 20 - resolve_node_fanout: 3 max_find_node_count: 20 - get_value_timeout: - get_value_count: 20 - get_value_fanout: 3 - set_value_timeout: - set_value_count: 20 - set_value_fanout: 5 + resolve_node_timeout_ms: 10000 + resolve_node_count: 1 + resolve_node_fanout: 4 + get_value_timeout_ms: 10000 + get_value_count: 3 + get_value_fanout: 4 + set_value_timeout_ms: 10000 + set_value_count: 5 + set_value_fanout: 4 min_peer_count: 20 min_peer_refresh_time_ms: 2000 validate_dial_info_receipt_time_ms: 2000 + local_subkey_cache_size: 128 + local_max_subkey_cache_memory_mb: 256 + remote_subkey_cache_size: 1024 + remote_max_records: 65536 + remote_max_subkey_cache_memory_mb: %REMOTE_MAX_SUBKEY_CACHE_MEMORY_MB% + remote_max_storage_space_mb: 0 upnp: true detect_address_changes: true - enable_local_peer_scope: false restricted_nat_retries: 0 tls: certificate_path: '%CERTIFICATE_PATH%' @@ -123,5 +129,4 @@ core: max_connections: 16 listen_address: ':5150' path: 'ws' - # url: '' - + # url: '' \ No newline at end of file diff --git a/doc/config/veilid-server-config.md b/doc/config/veilid-server-config.md index 525ce32d..388e3b0f 100644 --- a/doc/config/veilid-server-config.md +++ b/doc/config/veilid-server-config.md @@ -155,7 +155,7 @@ testing: protected_store: allow_insecure_fallback: true always_use_insecure_storage: true - insecure_fallback_directory: '%INSECURE_FALLBACK_DIRECTORY%' + directory: '%DIRECTORY%' delete: false ``` @@ -225,7 +225,7 @@ rpc: queue_size: 1024 max_timestamp_behind_ms: 10000 max_timestamp_ahead_ms: 10000 - timeout_ms: 10000 + timeout_ms: 5000 max_route_hop_count: 4 default_route_hop_count: 1 ``` @@ -234,19 +234,25 @@ rpc: ```yaml dht: - resolve_node_timeout: - resolve_node_count: 20 - resolve_node_fanout: 3 max_find_node_count: 20 - get_value_timeout: - get_value_count: 20 - get_value_fanout: 3 - set_value_timeout: - set_value_count: 20 - set_value_fanout: 5 + resolve_node_timeout_ms: 10000 + resolve_node_count: 1 + resolve_node_fanout: 4 + get_value_timeout_ms: 10000 + get_value_count: 3 + get_value_fanout: 4 + set_value_timeout_ms: 10000 + set_value_count: 5 + set_value_fanout: 4 min_peer_count: 20 min_peer_refresh_time_ms: 2000 validate_dial_info_receipt_time_ms: 2000 + local_subkey_cache_size: 128 + local_max_subkey_cache_memory_mb: 256 + remote_subkey_cache_size: 1024 + remote_max_records: 65536 + remote_max_subkey_cache_memory_mb: %REMOTE_MAX_SUBKEY_CACHE_MEMORY_MB% + remote_max_storage_space_mb: 0 ``` #### core:network:tls diff --git a/external/keyvaluedb b/external/keyvaluedb index 3408e0b2..9bb05a54 160000 --- a/external/keyvaluedb +++ b/external/keyvaluedb @@ -1 +1 @@ -Subproject commit 3408e0b2ae3df0088e0714bc23fb33c82a58e22c +Subproject commit 9bb05a54b4c0278a289841b2bf7c1749aa0fbd5d diff --git a/package/linux/veilid-server.conf b/package/linux/veilid-server.conf index cd94b5d7..c7126e01 100644 --- a/package/linux/veilid-server.conf +++ b/package/linux/veilid-server.conf @@ -14,7 +14,7 @@ logging: enabled: false core: protected_store: - insecure_fallback_directory: '/var/db/veilid-server/protected_store' + directory: '/var/db/veilid-server/protected_store' table_store: directory: '/var/db/veilid-server/table_store' block_store: diff --git a/scripts/ios_build.sh b/scripts/ios_build.sh index 51307e68..26d50ff4 100755 --- a/scripts/ios_build.sh +++ b/scripts/ios_build.sh @@ -57,7 +57,7 @@ do HOMEBREW_DIR=$(dirname `which brew`) fi - env -i PATH=/usr/bin:/bin:$HOMEBREW_DIR:$CARGO_DIR HOME="$HOME" USER="$USER" cargo $CARGO_TOOLCHAIN build $EXTRA_CARGO_OPTIONS --target $CARGO_TARGET --manifest-path $CARGO_MANIFEST_PATH + env -i PATH=/usr/bin:/bin:$HOMEBREW_DIR:$CARGO_DIR HOME="$HOME" USER="$USER" IPHONEOS_DEPLOYMENT_TARGET="$IPHONEOS_DEPLOYMENT_TARGET" cargo $CARGO_TOOLCHAIN build $EXTRA_CARGO_OPTIONS --target $CARGO_TARGET --manifest-path $CARGO_MANIFEST_PATH LIPOS="$LIPOS $TARGET_PATH/$CARGO_TARGET/$BUILD_MODE/lib$PACKAGE_NAME.a" diff --git a/scripts/macos_build.sh b/scripts/macos_build.sh index 88fc470c..61703127 100755 --- a/scripts/macos_build.sh +++ b/scripts/macos_build.sh @@ -47,7 +47,7 @@ do HOMEBREW_DIR=$(dirname `which brew`) fi - env -i PATH=/usr/bin:/bin:$HOMEBREW_DIR:$CARGO_DIR HOME="$HOME" USER="$USER" cargo $CARGO_TOOLCHAIN build $EXTRA_CARGO_OPTIONS --target $CARGO_TARGET --manifest-path $CARGO_MANIFEST_PATH + env -i PATH=/usr/bin:/bin:$HOMEBREW_DIR:$CARGO_DIR HOME="$HOME" USER="$USER" MACOSX_DEPLOYMENT_TARGET="$MACOSX_DEPLOYMENT_TARGET" cargo $CARGO_TOOLCHAIN build $EXTRA_CARGO_OPTIONS --target $CARGO_TARGET --manifest-path $CARGO_MANIFEST_PATH LIPOS="$LIPOS $TARGET_PATH/$CARGO_TARGET/$BUILD_MODE/lib$PACKAGE_NAME.dylib" diff --git a/scripts/new_ios_sim.sh b/scripts/new_ios_sim.sh index 4f3b78d4..db938398 100755 --- a/scripts/new_ios_sim.sh +++ b/scripts/new_ios_sim.sh @@ -1,5 +1,6 @@ #!/bin/bash -ID=$(xcrun simctl create test-iphone com.apple.CoreSimulator.SimDeviceType.iPhone-14-Pro com.apple.CoreSimulator.SimRuntime.iOS-16-1 2>/dev/null) +RUNTIME=$(xcrun simctl runtime list -j | jq '.[].runtimeIdentifier' -r | head -1) +ID=$(xcrun simctl create test-iphone com.apple.CoreSimulator.SimDeviceType.iPhone-14-Pro $RUNTIME 2>/dev/null) xcrun simctl boot $ID xcrun simctl bootstatus $ID echo Simulator ID is $ID diff --git a/setup_macos.sh b/setup_macos.sh index f5045efd..cdbbc342 100755 --- a/setup_macos.sh +++ b/setup_macos.sh @@ -122,5 +122,5 @@ if [ "$BREW_USER" == "" ]; then BREW_USER=`whoami` fi fi -sudo -H -u $BREW_USER brew install capnp cmake wabt llvm protobuf openjdk@11 +sudo -H -u $BREW_USER brew install capnp cmake wabt llvm protobuf openjdk@11 jq sudo gem install cocoapods diff --git a/veilid-cli/Cargo.toml b/veilid-cli/Cargo.toml index 3a9ad7b6..ce11242e 100644 --- a/veilid-cli/Cargo.toml +++ b/veilid-cli/Cargo.toml @@ -38,7 +38,7 @@ cfg-if = "^1" capnp = "^0" capnp-rpc = "^0" config = { version = "^0", features = ["yaml"] } -bugsalot = "^0" +bugsalot = { git = "https://github.com/crioux/bugsalot.git" } flexi_logger = { version = "^0", features = ["use_chrono_for_offset"] } thiserror = "^1" crossbeam-channel = "^0" diff --git a/veilid-cli/src/client_api_connection.rs b/veilid-cli/src/client_api_connection.rs index 0bb89122..b2681257 100644 --- a/veilid-cli/src/client_api_connection.rs +++ b/veilid-cli/src/client_api_connection.rs @@ -38,7 +38,7 @@ fn map_to_internal_error(e: T) -> VeilidAPIError { fn decode_api_result( reader: &api_result::Reader, -) -> Result { +) -> VeilidAPIResult { match reader.which().map_err(map_to_internal_error)? { api_result::Which::Ok(v) => { let ok_val = v.map_err(map_to_internal_error)?; @@ -92,7 +92,7 @@ impl veilid_client::Server for VeilidClientImpl { VeilidUpdate::Config(config) => { self.comproc.update_config(config); } - VeilidUpdate::Route(route) => { + VeilidUpdate::RouteChange(route) => { self.comproc.update_route(route); } VeilidUpdate::Shutdown => self.comproc.update_shutdown(), @@ -355,7 +355,7 @@ impl ClientApiConnection { .map_err(map_to_string)? .get_result() .map_err(map_to_string)?; - let res: Result<(), VeilidAPIError> = decode_api_result(&reader); + let res: VeilidAPIResult<()> = decode_api_result(&reader); res.map_err(map_to_string) } @@ -379,7 +379,7 @@ impl ClientApiConnection { .map_err(map_to_string)? .get_result() .map_err(map_to_string)?; - let res: Result<(), VeilidAPIError> = decode_api_result(&reader); + let res: VeilidAPIResult<()> = decode_api_result(&reader); res.map_err(map_to_string) } @@ -422,7 +422,7 @@ impl ClientApiConnection { .map_err(map_to_string)? .get_result() .map_err(map_to_string)?; - let res: Result = decode_api_result(&reader); + let res: VeilidAPIResult = decode_api_result(&reader); res.map_err(map_to_string) } @@ -453,7 +453,7 @@ impl ClientApiConnection { .map_err(map_to_string)? .get_result() .map_err(map_to_string)?; - let res: Result<(), VeilidAPIError> = decode_api_result(&reader); + let res: VeilidAPIResult<()> = decode_api_result(&reader); res.map_err(map_to_string) } @@ -483,7 +483,7 @@ impl ClientApiConnection { .map_err(map_to_string)? .get_result() .map_err(map_to_string)?; - let res: Result<(), VeilidAPIError> = decode_api_result(&reader); + let res: VeilidAPIResult<()> = decode_api_result(&reader); res.map_err(map_to_string) } diff --git a/veilid-cli/src/command_processor.rs b/veilid-cli/src/command_processor.rs index 2e32f584..60fe90d7 100644 --- a/veilid-cli/src/command_processor.rs +++ b/veilid-cli/src/command_processor.rs @@ -406,7 +406,7 @@ reply - reply to an AppCall not handled directly by the server pub fn update_config(&mut self, config: veilid_core::VeilidStateConfig) { self.inner_mut().ui.set_config(config.config) } - pub fn update_route(&mut self, route: veilid_core::VeilidStateRoute) { + pub fn update_route(&mut self, route: veilid_core::VeilidRouteChange) { let mut out = String::new(); if !route.dead_routes.is_empty() { out.push_str(&format!("Dead routes: {:?}", route.dead_routes)); @@ -445,46 +445,46 @@ reply - reply to an AppCall not handled directly by the server pub fn update_app_message(&mut self, msg: veilid_core::VeilidAppMessage) { // check is message body is ascii printable let mut printable = true; - for c in &msg.message { + for c in msg.message() { if *c < 32 || *c > 126 { printable = false; } } let strmsg = if printable { - String::from_utf8_lossy(&msg.message).to_string() + String::from_utf8_lossy(msg.message()).to_string() } else { - hex::encode(&msg.message) + hex::encode(msg.message()) }; self.inner() .ui - .add_node_event(format!("AppMessage ({:?}): {}", msg.sender, strmsg)); + .add_node_event(format!("AppMessage ({:?}): {}", msg.sender(), strmsg)); } pub fn update_app_call(&mut self, call: veilid_core::VeilidAppCall) { // check is message body is ascii printable let mut printable = true; - for c in &call.message { + for c in call.message() { if *c < 32 || *c > 126 { printable = false; } } let strmsg = if printable { - String::from_utf8_lossy(&call.message).to_string() + String::from_utf8_lossy(call.message()).to_string() } else { - format!("#{}", hex::encode(&call.message)) + format!("#{}", hex::encode(call.message())) }; self.inner().ui.add_node_event(format!( "AppCall ({:?}) id = {:016x} : {}", - call.sender, - call.id.as_u64(), + call.sender(), + call.id().as_u64(), strmsg )); - self.inner_mut().last_call_id = Some(call.id); + self.inner_mut().last_call_id = Some(call.id()); } pub fn update_shutdown(&mut self) { diff --git a/veilid-cli/src/peers_table_view.rs b/veilid-cli/src/peers_table_view.rs index 41992584..ff328476 100644 --- a/veilid-cli/src/peers_table_view.rs +++ b/veilid-cli/src/peers_table_view.rs @@ -53,13 +53,9 @@ impl TableViewItem for PeerTableData { PeerTableColumn::NodeId => self .node_ids .first() - .cloned() + .map(|n| n.to_string()) .unwrap_or_else(|| "???".to_owned()), - PeerTableColumn::Address => format!( - "{:?}:{}", - self.peer_address.protocol_type(), - self.peer_address.to_socket_addr() - ), + PeerTableColumn::Address => self.peer_address.clone(), PeerTableColumn::LatencyAvg => format!( "{}", self.peer_stats diff --git a/veilid-core/Cargo.toml b/veilid-core/Cargo.toml index d9c064d8..e145fb2c 100644 --- a/veilid-core/Cargo.toml +++ b/veilid-core/Cargo.toml @@ -10,13 +10,14 @@ license = "LGPL-2.0-or-later OR MPL-2.0 OR (MIT AND BSD-3-Clause)" crate-type = ["cdylib", "staticlib", "rlib"] [features] -default = [ "enable-crypto-vld0" ] -crypto-test = [ "enable-crypto-vld0", "enable-crypto-none" ] -crypto-test-none = [ "enable-crypto-none" ] +default = ["enable-crypto-vld0"] +crypto-test = ["enable-crypto-vld0", "enable-crypto-none"] +crypto-test-none = ["enable-crypto-none"] enable-crypto-vld0 = [] enable-crypto-none = [] rt-async-std = ["async-std", "async-std-resolver", "async_executors/async_std", "rtnetlink?/smol_socket", "veilid-tools/rt-async-std"] rt-tokio = ["tokio", "tokio-util", "tokio-stream", "trust-dns-resolver/tokio-runtime", "async_executors/tokio_tp", "async_executors/tokio_io", "async_executors/tokio_timer", "rtnetlink?/tokio_socket", "veilid-tools/rt-tokio"] +rt-wasm-bindgen = ["veilid-tools/rt-wasm-bindgen", "async_executors/bindgen"] veilid_core_android_tests = ["dep:paranoid-android"] veilid_core_ios_tests = ["dep:tracing-oslog"] @@ -65,11 +66,11 @@ rtnetlink = { version = "^0", default-features = false, optional = true } async-std-resolver = { version = "^0", optional = true } trust-dns-resolver = { version = "^0", optional = true } keyvaluedb = { path = "../external/keyvaluedb/keyvaluedb" } -#rkyv = { version = "^0", default_features = false, features = ["std", "alloc", "strict", "size_32", "validation"] } -rkyv = { git = "https://github.com/rkyv/rkyv.git", rev = "57e2a8d", default_features = false, features = ["std", "alloc", "strict", "size_32", "validation"] } -bytecheck = "^0" +rkyv = { version = "^0", default_features = false, features = ["std", "alloc", "strict", "size_32", "validation"] } data-encoding = { version = "^2" } weak-table = "0.3.2" +range-set-blaze = "0.1.5" +argon2 = "0.5.0" # Dependencies for native builds only # Linux, Windows, Mac, iOS, Android @@ -91,8 +92,8 @@ rustls = "^0.19" rustls-pemfile = "^0.2" futures-util = { version = "^0", default-features = false, features = ["async-await", "sink", "std", "io"] } keyvaluedb-sqlite = { path = "../external/keyvaluedb/keyvaluedb-sqlite" } -socket2 = "^0" -bugsalot = "^0" +socket2 = { version = "^0", features = ["all"] } +bugsalot = { git = "https://github.com/crioux/bugsalot.git" } chrono = "^0" libc = "^0" nix = "^0" diff --git a/veilid-core/proto/veilid.capnp b/veilid-core/proto/veilid.capnp index 5474d11a..2fa813b6 100644 --- a/veilid-core/proto/veilid.capnp +++ b/veilid-core/proto/veilid.capnp @@ -27,13 +27,12 @@ struct Nonce24 @0xb6260db25d8d7dfc { u2 @2 :UInt64; } -using PublicKey = Key256; # Node id / DHT key / Route id, etc +using PublicKey = Key256; # Node id / Hash / DHT key / Route id, etc using Nonce = Nonce24; # One-time encryption nonce using Signature = Signature512; # Signature block using TunnelID = UInt64; # Id for tunnels using CryptoKind = UInt32; # FOURCC code for cryptography type using ValueSeqNum = UInt32; # sequence numbers for values -using ValueSchema = UInt32; # FOURCC code for schema (0 = freeform, SUB0 = subkey control v0) using Subkey = UInt32; # subkey index for dht struct TypedKey @0xe2d567a9f1e61b29 { @@ -312,47 +311,66 @@ struct OperationAppMessage @0x9baf542d81b411f5 { message @0 :Data; # opaque message to application } -struct SubkeyRange { +struct SubkeyRange @0xf592dac0a4d0171c { start @0 :Subkey; # the start of a subkey range end @1 :Subkey; # the end of a subkey range } - -struct ValueData @0xb4b7416f169f2a3d { + +struct SignedValueData @0xb4b7416f169f2a3d { seq @0 :ValueSeqNum; # sequence number of value - schema @1 :ValueSchema; # fourcc code of schema for value - data @2 :Data; # value or subvalue contents + data @1 :Data; # value or subvalue contents + writer @2 :PublicKey; # the public key of the writer + signature @3 :Signature; # signature of data at this subkey, using the writer key (which may be the same as the owner key) + # signature covers: + # * ownerKey + # * subkey + # * sequence number + # * data + # signature does not need to cover schema because schema is validated upon every set + # so the data either fits, or it doesn't. } +struct SignedValueDescriptor @0xe7911cd3f9e1b0e7 { + owner @0 :PublicKey; # the public key of the owner + schemaData @1 :Data; # the schema data + # Changing this after key creation is not supported as it would change the dht key + signature @2 :Signature; # Schema data is signed by ownerKey and is verified both by set and get operations +} + + struct OperationGetValueQ @0xf88a5b6da5eda5d0 { - key @0 :TypedKey; # the location of the value - subkey @1 :Subkey; # the index of the subkey (0 for the default subkey) + key @0 :TypedKey; # DHT Key = Hash(ownerKeyKind) of: [ ownerKeyValue, schema ] + subkey @1 :Subkey; # the index of the subkey + wantDescriptor @2 :Bool; # whether or not to include the descriptor for the key } + struct OperationGetValueA @0xd896bb46f2e0249f { - union { - data @0 :ValueData; # the value if successful - peers @1 :List(PeerInfo); # returned 'closer peer' information if not successful - } + value @0 :SignedValueData; # optional: the value if successful, or if unset, no value returned + peers @1 :List(PeerInfo); # returned 'closer peer' information on either success or failure + descriptor @2 :SignedValueDescriptor; # optional: the descriptor if requested if the value is also returned } -struct OperationSetValueQ @0xbac06191ff8bdbc5 { - key @0 :TypedKey; # the location of the value - subkey @1 :Subkey; # the index of the subkey (0 for the default subkey) - value @2 :ValueData; # value or subvalue contents (older or equal seq number gets dropped) +struct OperationSetValueQ @0xbac06191ff8bdbc5 { + key @0 :TypedKey; # DHT Key = Hash(ownerKeyKind) of: [ ownerKeyValue, schema ] + subkey @1 :Subkey; # the index of the subkey + value @2 :SignedValueData; # value or subvalue contents (older or equal seq number gets dropped) + descriptor @3 :SignedValueDescriptor; # optional: the descriptor if needed } struct OperationSetValueA @0x9378d0732dc95be2 { - union { - data @0 :ValueData; # the new value if successful, may be a different value than what was set if the seq number was lower or equal - peers @1 :List(PeerInfo); # returned 'closer peer' information if not successful - } + set @0 :Bool; # true if the set was close enough to be set + value @1 :SignedValueData; # optional: the current value at the key if the set seq number was lower or equal to what was there before + peers @2 :List(PeerInfo); # returned 'closer peer' information on either success or failure } struct OperationWatchValueQ @0xf9a5a6c547b9b228 { key @0 :TypedKey; # key for value to watch - subkeys @1 :List(SubkeyRange); # subkey range to watch, if empty, watch everything + subkeys @1 :List(SubkeyRange); # subkey range to watch (up to 512 subranges), if empty, watch everything expiration @2 :UInt64; # requested timestamp when this watch will expire in usec since epoch (can be return less, 0 for max) count @3 :UInt32; # requested number of changes to watch for (0 = cancel, 1 = single shot, 2+ = counter, UINT32_MAX = continuous) + watcher @4 :PublicKey; # the watcher performing the watch, can be the owner or a schema member + signature @5 :Signature; # signature of the watcher, must be one of the schema members or the key owner. signature covers: key, subkeys, expiration, count } struct OperationWatchValueA @0xa726cab7064ba893 { @@ -364,7 +382,7 @@ struct OperationValueChanged @0xd1c59ebdd8cc1bf6 { key @0 :TypedKey; # key for value that changed subkeys @1 :List(SubkeyRange); # subkey range that changed (up to 512 ranges at a time) count @2 :UInt32; # remaining changes left (0 means watch has expired) - value @3 :ValueData; # first value that changed (the rest can be gotten with getvalue) + value @3 :SignedValueData; # first value that changed (the rest can be gotten with getvalue) } struct OperationSupplyBlockQ @0xadbf4c542d749971 { @@ -372,10 +390,8 @@ struct OperationSupplyBlockQ @0xadbf4c542d749971 { } struct OperationSupplyBlockA @0xf003822e83b5c0d7 { - union { - expiration @0 :UInt64; # when the block supplier entry will need to be refreshed - peers @1 :List(PeerInfo); # returned 'closer peer' information if not successful - } + expiration @0 :UInt64; # when the block supplier entry will need to be refreshed, or 0 if not successful + peers @1 :List(PeerInfo); # returned 'closer peer' information if not successful } struct OperationFindBlockQ @0xaf4353ff004c7156 { diff --git a/veilid-core/run_tests.sh b/veilid-core/run_tests.sh index 0a8ec9af..713de207 100755 --- a/veilid-core/run_tests.sh +++ b/veilid-core/run_tests.sh @@ -3,7 +3,7 @@ SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" pushd $SCRIPTDIR 2>/dev/null if [[ "$1" == "wasm" ]]; then - WASM_BINDGEN_TEST_TIMEOUT=120 wasm-pack test --firefox --headless + WASM_BINDGEN_TEST_TIMEOUT=120 wasm-pack test --firefox --headless --features=rt-wasm-bindgen elif [[ "$1" == "ios" ]]; then SYMROOT=/tmp/testout APPNAME=veilidcore-tests diff --git a/veilid-core/src/attachment_manager.rs b/veilid-core/src/attachment_manager.rs index d5a1323d..fca5650d 100644 --- a/veilid-core/src/attachment_manager.rs +++ b/veilid-core/src/attachment_manager.rs @@ -1,7 +1,8 @@ -use crate::crypto::Crypto; -use crate::network_manager::*; -use crate::routing_table::*; use crate::*; +use crypto::Crypto; +use network_manager::*; +use routing_table::*; +use storage_manager::*; pub struct AttachmentManagerInner { last_attachment_state: AttachmentState, @@ -26,6 +27,7 @@ pub struct AttachmentManager { impl AttachmentManager { fn new_unlocked_inner( config: VeilidConfig, + storage_manager: StorageManager, protected_store: ProtectedStore, table_store: TableStore, block_store: BlockStore, @@ -35,6 +37,7 @@ impl AttachmentManager { config: config.clone(), network_manager: NetworkManager::new( config, + storage_manager, protected_store, table_store, block_store, @@ -54,6 +57,7 @@ impl AttachmentManager { } pub fn new( config: VeilidConfig, + storage_manager: StorageManager, protected_store: ProtectedStore, table_store: TableStore, block_store: BlockStore, @@ -63,6 +67,7 @@ impl AttachmentManager { inner: Arc::new(Mutex::new(Self::new_inner())), unlocked_inner: Arc::new(Self::new_unlocked_inner( config, + storage_manager, protected_store, table_store, block_store, diff --git a/veilid-core/src/core_context.rs b/veilid-core/src/core_context.rs index 49b115a7..555bdcf2 100644 --- a/veilid-core/src/core_context.rs +++ b/veilid-core/src/core_context.rs @@ -1,12 +1,16 @@ use crate::api_tracing_layer::*; use crate::attachment_manager::*; use crate::crypto::Crypto; +use crate::storage_manager::*; use crate::veilid_api::*; use crate::veilid_config::*; use crate::*; pub type UpdateCallback = Arc; +/// Internal services startup mechanism +/// Ensures that everything is started up, and shut down in the right order +/// and provides an atomic state for if the system is properly operational struct ServicesContext { pub config: VeilidConfig, pub update_callback: UpdateCallback, @@ -16,6 +20,7 @@ struct ServicesContext { pub block_store: Option, pub crypto: Option, pub attachment_manager: Option, + pub storage_manager: Option, } impl ServicesContext { @@ -28,6 +33,7 @@ impl ServicesContext { block_store: None, crypto: None, attachment_manager: None, + storage_manager: None, } } @@ -39,6 +45,7 @@ impl ServicesContext { block_store: BlockStore, crypto: Crypto, attachment_manager: AttachmentManager, + storage_manager: StorageManager, ) -> Self { Self { config, @@ -48,6 +55,7 @@ impl ServicesContext { block_store: Some(block_store), crypto: Some(crypto), attachment_manager: Some(attachment_manager), + storage_manager: Some(storage_manager), } } @@ -62,15 +70,24 @@ impl ServicesContext { trace!("init protected store"); let protected_store = ProtectedStore::new(self.config.clone()); if let Err(e) = protected_store.init().await { + error!("failed to init protected store: {}", e); self.shutdown().await; return Err(e); } self.protected_store = Some(protected_store.clone()); - // Set up tablestore + // Set up tablestore and crypto system + trace!("create table store and crypto system"); + let table_store = TableStore::new(self.config.clone(), protected_store.clone()); + let crypto = Crypto::new(self.config.clone(), table_store.clone()); + table_store.set_crypto(crypto.clone()); + + // Initialize table store first, so crypto code can load caches + // Tablestore can use crypto during init, just not any cached operations or things + // that require flushing back to the tablestore trace!("init table store"); - let table_store = TableStore::new(self.config.clone()); if let Err(e) = table_store.init().await { + error!("failed to init table store: {}", e); self.shutdown().await; return Err(e); } @@ -78,12 +95,8 @@ impl ServicesContext { // Set up crypto trace!("init crypto"); - let crypto = Crypto::new( - self.config.clone(), - table_store.clone(), - protected_store.clone(), - ); if let Err(e) = crypto.init().await { + error!("failed to init crypto: {}", e); self.shutdown().await; return Err(e); } @@ -93,22 +106,41 @@ impl ServicesContext { trace!("init block store"); let block_store = BlockStore::new(self.config.clone()); if let Err(e) = block_store.init().await { + error!("failed to init block store: {}", e); self.shutdown().await; return Err(e); } self.block_store = Some(block_store.clone()); + // Set up storage manager + trace!("init storage manager"); + let storage_manager = StorageManager::new( + self.config.clone(), + self.crypto.clone().unwrap(), + self.protected_store.clone().unwrap(), + self.table_store.clone().unwrap(), + self.block_store.clone().unwrap(), + ); + if let Err(e) = storage_manager.init().await { + error!("failed to init storage manager: {}", e); + self.shutdown().await; + return Err(e); + } + self.storage_manager = Some(storage_manager.clone()); + // Set up attachment manager trace!("init attachment manager"); let update_callback = self.update_callback.clone(); let attachment_manager = AttachmentManager::new( self.config.clone(), + storage_manager, protected_store, table_store, block_store, crypto, ); if let Err(e) = attachment_manager.init(update_callback).await { + error!("failed to init attachment manager: {}", e); self.shutdown().await; return Err(e); } @@ -126,6 +158,10 @@ impl ServicesContext { trace!("terminate attachment manager"); attachment_manager.terminate().await; } + if let Some(storage_manager) = &mut self.storage_manager { + trace!("terminate storage manager"); + storage_manager.terminate().await; + } if let Some(block_store) = &mut self.block_store { trace!("terminate block store"); block_store.terminate().await; @@ -159,6 +195,7 @@ pub struct VeilidCoreContext { pub config: VeilidConfig, pub update_callback: UpdateCallback, // Services + pub storage_manager: StorageManager, pub protected_store: ProtectedStore, pub table_store: TableStore, pub block_store: BlockStore, @@ -171,7 +208,7 @@ impl VeilidCoreContext { async fn new_with_config_callback( update_callback: UpdateCallback, config_callback: ConfigCallback, - ) -> Result { + ) -> VeilidAPIResult { // Set up config from callback trace!("setup config with callback"); let mut config = VeilidConfig::new(); @@ -184,7 +221,7 @@ impl VeilidCoreContext { async fn new_with_config_json( update_callback: UpdateCallback, config_json: String, - ) -> Result { + ) -> VeilidAPIResult { // Set up config from callback trace!("setup config with json"); let mut config = VeilidConfig::new(); @@ -196,7 +233,7 @@ impl VeilidCoreContext { async fn new_common( update_callback: UpdateCallback, config: VeilidConfig, - ) -> Result { + ) -> VeilidAPIResult { cfg_if! { if #[cfg(target_os = "android")] { if !crate::intf::android::is_android_ready() { @@ -209,8 +246,9 @@ impl VeilidCoreContext { sc.startup().await.map_err(VeilidAPIError::generic)?; Ok(VeilidCoreContext { - update_callback: sc.update_callback, config: sc.config, + update_callback: sc.update_callback, + storage_manager: sc.storage_manager.unwrap(), protected_store: sc.protected_store.unwrap(), table_store: sc.table_store.unwrap(), block_store: sc.block_store.unwrap(), @@ -229,6 +267,7 @@ impl VeilidCoreContext { self.block_store, self.crypto, self.attachment_manager, + self.storage_manager, ); sc.shutdown().await; } @@ -244,7 +283,7 @@ lazy_static::lazy_static! { pub async fn api_startup( update_callback: UpdateCallback, config_callback: ConfigCallback, -) -> Result { +) -> VeilidAPIResult { // See if we have an API started up already let mut initialized_lock = INITIALIZED.lock().await; if *initialized_lock { @@ -267,7 +306,7 @@ pub async fn api_startup( pub async fn api_startup_json( update_callback: UpdateCallback, config_json: String, -) -> Result { +) -> VeilidAPIResult { // See if we have an API started up already let mut initialized_lock = INITIALIZED.lock().await; if *initialized_lock { diff --git a/veilid-core/src/crypto/byte_array_types.rs b/veilid-core/src/crypto/byte_array_types.rs index d8c7bc77..8a1f7a01 100644 --- a/veilid-core/src/crypto/byte_array_types.rs +++ b/veilid-core/src/crypto/byte_array_types.rs @@ -7,22 +7,26 @@ use core::hash::Hash; use data_encoding::BASE64URL_NOPAD; -use rkyv::{Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize}; - ////////////////////////////////////////////////////////////////////// -/// Length of a public key in bytes +/// Length of a crypto key in bytes #[allow(dead_code)] -pub const PUBLIC_KEY_LENGTH: usize = 32; -/// Length of a public key in bytes after encoding to base64url +pub const CRYPTO_KEY_LENGTH: usize = 32; +/// Length of a crypto key in bytes after encoding to base64url #[allow(dead_code)] -pub const PUBLIC_KEY_LENGTH_ENCODED: usize = 43; +pub const CRYPTO_KEY_LENGTH_ENCODED: usize = 43; +/// Length of a crypto key in bytes +#[allow(dead_code)] +pub const PUBLIC_KEY_LENGTH: usize = CRYPTO_KEY_LENGTH; +/// Length of a crypto key in bytes after encoding to base64url +#[allow(dead_code)] +pub const PUBLIC_KEY_LENGTH_ENCODED: usize = CRYPTO_KEY_LENGTH_ENCODED; /// Length of a secret key in bytes #[allow(dead_code)] -pub const SECRET_KEY_LENGTH: usize = 32; +pub const SECRET_KEY_LENGTH: usize = CRYPTO_KEY_LENGTH; /// Length of a secret key in bytes after encoding to base64url #[allow(dead_code)] -pub const SECRET_KEY_LENGTH_ENCODED: usize = 43; +pub const SECRET_KEY_LENGTH_ENCODED: usize = CRYPTO_KEY_LENGTH_ENCODED; /// Length of a signature in bytes #[allow(dead_code)] pub const SIGNATURE_LENGTH: usize = 64; @@ -37,16 +41,22 @@ pub const NONCE_LENGTH: usize = 24; pub const NONCE_LENGTH_ENCODED: usize = 32; /// Length of a shared secret in bytes #[allow(dead_code)] -pub const SHARED_SECRET_LENGTH: usize = 32; +pub const SHARED_SECRET_LENGTH: usize = CRYPTO_KEY_LENGTH; /// Length of a shared secret in bytes after encoding to base64url #[allow(dead_code)] -pub const SHARED_SECRET_LENGTH_ENCODED: usize = 43; +pub const SHARED_SECRET_LENGTH_ENCODED: usize = CRYPTO_KEY_LENGTH_ENCODED; /// Length of a route id in bytes #[allow(dead_code)] -pub const ROUTE_ID_LENGTH: usize = 32; +pub const ROUTE_ID_LENGTH: usize = CRYPTO_KEY_LENGTH; /// Length of a route id in bytes afer encoding to base64url #[allow(dead_code)] -pub const ROUTE_ID_LENGTH_ENCODED: usize = 43; +pub const ROUTE_ID_LENGTH_ENCODED: usize = CRYPTO_KEY_LENGTH_ENCODED; +/// Length of a hash digest in bytes +#[allow(dead_code)] +pub const HASH_DIGEST_LENGTH: usize = CRYPTO_KEY_LENGTH; +/// Length of a hash digest in bytes after encoding to base64url +#[allow(dead_code)] +pub const HASH_DIGEST_LENGTH_ENCODED: usize = CRYPTO_KEY_LENGTH_ENCODED; ////////////////////////////////////////////////////////////////////// @@ -56,11 +66,11 @@ where { fn encode(&self) -> String; fn encoded_len() -> usize; - fn try_decode>(input: S) -> Result { + fn try_decode>(input: S) -> VeilidAPIResult { let b = input.as_ref().as_bytes(); Self::try_decode_bytes(b) } - fn try_decode_bytes(b: &[u8]) -> Result; + fn try_decode_bytes(b: &[u8]) -> VeilidAPIResult; } ////////////////////////////////////////////////////////////////////// @@ -120,18 +130,6 @@ macro_rules! byte_array_type { Self { bytes } } - pub fn try_from_vec(v: Vec) -> Result { - let vl = v.len(); - Ok(Self { - bytes: v.try_into().map_err(|_| { - VeilidAPIError::generic(format!( - "Expected a Vec of length {} but it was {}", - $size, vl - )) - })?, - }) - } - pub fn bit(&self, index: usize) -> bool { assert!(index < ($size * 8)); let bi = index / 8; @@ -182,7 +180,7 @@ macro_rules! byte_array_type { fn encoded_len() -> usize { $encoded_size } - fn try_decode_bytes(b: &[u8]) -> Result { + fn try_decode_bytes(b: &[u8]) -> VeilidAPIResult { let mut bytes = [0u8; $size]; let res = BASE64URL_NOPAD.decode_len(b.len()); match res { @@ -244,23 +242,47 @@ macro_rules! byte_array_type { Self::try_decode(value) } } + impl TryFrom<&[u8]> for $name { + type Error = VeilidAPIError; + fn try_from(v: &[u8]) -> Result { + let vl = v.len(); + Ok(Self { + bytes: v.try_into().map_err(|_| { + VeilidAPIError::generic(format!( + "Expected a slice of length {} but it was {}", + $size, vl + )) + })?, + }) + } + } + + impl core::ops::Deref for $name { + type Target = [u8; $size]; + + fn deref(&self) -> &Self::Target { + &self.bytes + } + } + + impl core::ops::DerefMut for $name { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.bytes + } + } }; } ///////////////////////////////////////// -byte_array_type!(PublicKey, PUBLIC_KEY_LENGTH, PUBLIC_KEY_LENGTH_ENCODED); -byte_array_type!(SecretKey, SECRET_KEY_LENGTH, SECRET_KEY_LENGTH_ENCODED); +byte_array_type!(CryptoKey, CRYPTO_KEY_LENGTH, CRYPTO_KEY_LENGTH_ENCODED); + +pub type PublicKey = CryptoKey; +pub type SecretKey = CryptoKey; +pub type HashDigest = CryptoKey; +pub type SharedSecret = CryptoKey; +pub type RouteId = CryptoKey; +pub type CryptoKeyDistance = CryptoKey; + byte_array_type!(Signature, SIGNATURE_LENGTH, SIGNATURE_LENGTH_ENCODED); -byte_array_type!( - PublicKeyDistance, - PUBLIC_KEY_LENGTH, - PUBLIC_KEY_LENGTH_ENCODED -); byte_array_type!(Nonce, NONCE_LENGTH, NONCE_LENGTH_ENCODED); -byte_array_type!( - SharedSecret, - SHARED_SECRET_LENGTH, - SHARED_SECRET_LENGTH_ENCODED -); -byte_array_type!(RouteId, ROUTE_ID_LENGTH, ROUTE_ID_LENGTH_ENCODED); diff --git a/veilid-core/src/crypto/crypto_system.rs b/veilid-core/src/crypto/crypto_system.rs index 5de8fb57..c2a65695 100644 --- a/veilid-core/src/crypto/crypto_system.rs +++ b/veilid-core/src/crypto/crypto_system.rs @@ -6,52 +6,36 @@ pub trait CryptoSystem { fn crypto(&self) -> Crypto; // Cached Operations - fn cached_dh( - &self, - key: &PublicKey, - secret: &SecretKey, - ) -> Result; + fn cached_dh(&self, key: &PublicKey, secret: &SecretKey) -> VeilidAPIResult; // Generation + fn random_bytes(&self, len: u32) -> Vec; + fn default_salt_length(&self) -> u32; + fn hash_password(&self, password: &[u8], salt: &[u8]) -> VeilidAPIResult; + fn verify_password(&self, password: &[u8], password_hash: &str) -> VeilidAPIResult; + fn derive_shared_secret(&self, password: &[u8], salt: &[u8]) -> VeilidAPIResult; fn random_nonce(&self) -> Nonce; fn random_shared_secret(&self) -> SharedSecret; - fn compute_dh( - &self, - key: &PublicKey, - secret: &SecretKey, - ) -> Result; + fn compute_dh(&self, key: &PublicKey, secret: &SecretKey) -> VeilidAPIResult; fn generate_keypair(&self) -> KeyPair; - fn generate_hash(&self, data: &[u8]) -> PublicKey; - fn generate_hash_reader( - &self, - reader: &mut dyn std::io::Read, - ) -> Result; + fn generate_hash(&self, data: &[u8]) -> HashDigest; + fn generate_hash_reader(&self, reader: &mut dyn std::io::Read) -> VeilidAPIResult; // Validation - fn validate_keypair(&self, dht_key: &PublicKey, dht_key_secret: &SecretKey) -> bool; - fn validate_hash(&self, data: &[u8], dht_key: &PublicKey) -> bool; + fn validate_keypair(&self, key: &PublicKey, secret: &SecretKey) -> bool; + fn validate_hash(&self, data: &[u8], hash: &HashDigest) -> bool; fn validate_hash_reader( &self, reader: &mut dyn std::io::Read, - key: &PublicKey, - ) -> Result; + hash: &HashDigest, + ) -> VeilidAPIResult; // Distance Metric - fn distance(&self, key1: &PublicKey, key2: &PublicKey) -> PublicKeyDistance; + fn distance(&self, key1: &CryptoKey, key2: &CryptoKey) -> CryptoKeyDistance; // Authentication - fn sign( - &self, - key: &PublicKey, - secret: &SecretKey, - data: &[u8], - ) -> Result; - fn verify( - &self, - key: &PublicKey, - data: &[u8], - signature: &Signature, - ) -> Result<(), VeilidAPIError>; + fn sign(&self, key: &PublicKey, secret: &SecretKey, data: &[u8]) -> VeilidAPIResult; + fn verify(&self, key: &PublicKey, data: &[u8], signature: &Signature) -> VeilidAPIResult<()>; // AEAD Encrypt/Decrypt fn aead_overhead(&self) -> usize; @@ -61,53 +45,53 @@ pub trait CryptoSystem { nonce: &Nonce, shared_secret: &SharedSecret, associated_data: Option<&[u8]>, - ) -> Result<(), VeilidAPIError>; + ) -> VeilidAPIResult<()>; fn decrypt_aead( &self, body: &[u8], nonce: &Nonce, shared_secret: &SharedSecret, associated_data: Option<&[u8]>, - ) -> Result, VeilidAPIError>; + ) -> VeilidAPIResult>; fn encrypt_in_place_aead( &self, body: &mut Vec, nonce: &Nonce, shared_secret: &SharedSecret, associated_data: Option<&[u8]>, - ) -> Result<(), VeilidAPIError>; + ) -> VeilidAPIResult<()>; fn encrypt_aead( &self, body: &[u8], nonce: &Nonce, shared_secret: &SharedSecret, associated_data: Option<&[u8]>, - ) -> Result, VeilidAPIError>; + ) -> VeilidAPIResult>; // NoAuth Encrypt/Decrypt fn crypt_in_place_no_auth( &self, - body: &mut Vec, - nonce: &Nonce, + body: &mut [u8], + nonce: &[u8; NONCE_LENGTH], shared_secret: &SharedSecret, ); fn crypt_b2b_no_auth( &self, in_buf: &[u8], out_buf: &mut [u8], - nonce: &Nonce, + nonce: &[u8; NONCE_LENGTH], shared_secret: &SharedSecret, ); fn crypt_no_auth_aligned_8( &self, body: &[u8], - nonce: &Nonce, + nonce: &[u8; NONCE_LENGTH], shared_secret: &SharedSecret, ) -> Vec; fn crypt_no_auth_unaligned( &self, body: &[u8], - nonce: &Nonce, + nonce: &[u8; NONCE_LENGTH], shared_secret: &SharedSecret, ) -> Vec; } diff --git a/veilid-core/src/crypto/envelope.rs b/veilid-core/src/crypto/envelope.rs index fe22f2fc..c123ea52 100644 --- a/veilid-core/src/crypto/envelope.rs +++ b/veilid-core/src/crypto/envelope.rs @@ -66,7 +66,7 @@ impl Envelope { } } - pub fn from_signed_data(crypto: Crypto, data: &[u8]) -> Result { + pub fn from_signed_data(crypto: Crypto, data: &[u8]) -> VeilidAPIResult { // Ensure we are at least the length of the envelope // Silent drop here, as we use zero length packets as part of the protocol for hole punching if data.len() < MIN_ENVELOPE_SIZE { @@ -175,7 +175,7 @@ impl Envelope { crypto: Crypto, data: &[u8], node_id_secret: &SecretKey, - ) -> Result, VeilidAPIError> { + ) -> VeilidAPIResult> { // Get DH secret let vcrypto = crypto .get(self.crypto_kind) @@ -183,8 +183,11 @@ impl Envelope { let dh_secret = vcrypto.cached_dh(&self.sender_id, node_id_secret)?; // Decrypt message without authentication - let body = - vcrypto.crypt_no_auth_aligned_8(&data[0x6A..data.len() - 64], &self.nonce, &dh_secret); + let body = vcrypto.crypt_no_auth_aligned_8( + &data[0x6A..data.len() - 64], + &self.nonce.bytes, + &dh_secret, + ); Ok(body) } @@ -194,7 +197,7 @@ impl Envelope { crypto: Crypto, body: &[u8], node_id_secret: &SecretKey, - ) -> Result, VeilidAPIError> { + ) -> VeilidAPIResult> { // Ensure body isn't too long let envelope_size: usize = body.len() + MIN_ENVELOPE_SIZE; if envelope_size > MAX_ENVELOPE_SIZE { @@ -227,7 +230,7 @@ impl Envelope { data[0x4A..0x6A].copy_from_slice(&self.recipient_id.bytes); // Encrypt and authenticate message - let encrypted_body = vcrypto.crypt_no_auth_unaligned(body, &self.nonce, &dh_secret); + let encrypted_body = vcrypto.crypt_no_auth_unaligned(body, &self.nonce.bytes, &dh_secret); // Write body if !encrypted_body.is_empty() { diff --git a/veilid-core/src/crypto/mod.rs b/veilid-core/src/crypto/mod.rs index ec32c677..822687eb 100644 --- a/veilid-core/src/crypto/mod.rs +++ b/veilid-core/src/crypto/mod.rs @@ -4,7 +4,6 @@ mod dh_cache; mod envelope; mod receipt; mod types; -mod value; pub mod crypto_system; #[cfg(feature = "enable-crypto-none")] @@ -20,7 +19,6 @@ pub use dh_cache::*; pub use envelope::*; pub use receipt::*; pub use types::*; -pub use value::*; #[cfg(feature = "enable-crypto-none")] pub use none::*; @@ -84,7 +82,6 @@ struct CryptoInner { struct CryptoUnlockedInner { config: VeilidConfig, table_store: TableStore, - protected_store: ProtectedStore, } /// Crypto factory implementation @@ -106,16 +103,11 @@ impl Crypto { } } - pub fn new( - config: VeilidConfig, - table_store: TableStore, - protected_store: ProtectedStore, - ) -> Self { + pub fn new(config: VeilidConfig, table_store: TableStore) -> Self { let out = Self { unlocked_inner: Arc::new(CryptoUnlockedInner { config, table_store, - protected_store, }), inner: Arc::new(Mutex::new(Self::new_inner())), }; @@ -140,12 +132,11 @@ impl Crypto { pub async fn init(&self) -> EyreResult<()> { trace!("Crypto::init"); let table_store = self.unlocked_inner.table_store.clone(); - // Init node id from config if let Err(e) = self .unlocked_inner .config - .init_node_ids(self.clone(), self.unlocked_inner.protected_store.clone()) + .init_node_ids(self.clone(), table_store.clone()) .await { return Err(e).wrap_err("init node id failed"); @@ -171,13 +162,16 @@ impl Crypto { }; // load caches if they are valid for this node id - let mut db = table_store.open("crypto_caches", 1).await?; - let caches_valid = match db.load(0, b"cache_validity_key")? { + let mut db = table_store + .open("crypto_caches", 1) + .await + .wrap_err("failed to open crypto_caches")?; + let caches_valid = match db.load(0, b"cache_validity_key").await? { Some(v) => v == cache_validity_key, None => false, }; if caches_valid { - if let Some(b) = db.load(0, b"dh_cache")? { + if let Some(b) = db.load(0, b"dh_cache").await? { let mut inner = self.inner.lock(); bytes_to_cache(&b, &mut inner.dh_cache); } @@ -263,7 +257,7 @@ impl Crypto { node_ids: &[TypedKey], data: &[u8], typed_signatures: &[TypedSignature], - ) -> Result { + ) -> VeilidAPIResult { let mut out = TypedKeySet::with_capacity(node_ids.len()); for sig in typed_signatures { for nid in node_ids { @@ -286,7 +280,7 @@ impl Crypto { data: &[u8], typed_key_pairs: &[TypedKeyPair], transform: F, - ) -> Result, VeilidAPIError> + ) -> VeilidAPIResult> where F: Fn(&TypedKeyPair, Signature) -> R, { @@ -302,7 +296,7 @@ impl Crypto { /// Generate keypair /// Does not require startup/init - pub fn generate_keypair(crypto_kind: CryptoKind) -> Result { + pub fn generate_keypair(crypto_kind: CryptoKind) -> VeilidAPIResult { #[cfg(feature = "enable-crypto-vld0")] if crypto_kind == CRYPTO_KIND_VLD0 { let kp = vld0_generate_keypair(); @@ -323,7 +317,7 @@ impl Crypto { vcrypto: &T, key: &PublicKey, secret: &SecretKey, - ) -> Result { + ) -> VeilidAPIResult { Ok( match self.inner.lock().dh_cache.entry( DHCacheKey { diff --git a/veilid-core/src/crypto/none/mod.rs b/veilid-core/src/crypto/none/mod.rs index e4109b77..b81056b6 100644 --- a/veilid-core/src/crypto/none/mod.rs +++ b/veilid-core/src/crypto/none/mod.rs @@ -1,7 +1,8 @@ use super::*; +use argon2::password_hash::Salt; +use data_encoding::BASE64URL_NOPAD; use digest::Digest; use rand::RngCore; - const AEAD_OVERHEAD: usize = PUBLIC_KEY_LENGTH; pub const CRYPTO_KIND_NONE: CryptoKind = FourCC([b'N', b'O', b'N', b'E']); @@ -70,16 +71,49 @@ impl CryptoSystem for CryptoSystemNONE { } // Cached Operations - fn cached_dh( - &self, - key: &PublicKey, - secret: &SecretKey, - ) -> Result { + fn cached_dh(&self, key: &PublicKey, secret: &SecretKey) -> VeilidAPIResult { self.crypto .cached_dh_internal::(self, key, secret) } // Generation + fn random_bytes(&self, len: u32) -> Vec { + let mut bytes = unsafe { unaligned_u8_vec_uninit(len as usize) }; + random_bytes(bytes.as_mut()); + bytes + } + fn default_salt_length(&self) -> u32 { + 4 + } + fn hash_password(&self, password: &[u8], salt: &[u8]) -> VeilidAPIResult { + if salt.len() < Salt::MIN_LENGTH || salt.len() > Salt::MAX_LENGTH { + apibail_generic!("invalid salt length"); + } + Ok(format!( + "{}:{}", + BASE64URL_NOPAD.encode(salt), + BASE64URL_NOPAD.encode(password) + )) + } + fn verify_password(&self, password: &[u8], password_hash: &str) -> VeilidAPIResult { + let Some((salt, _)) = password_hash.split_once(":") else { + apibail_generic!("invalid format"); + }; + let Ok(salt) = BASE64URL_NOPAD.decode(salt.as_bytes()) else { + apibail_generic!("invalid salt"); + }; + return Ok(&self.hash_password(password, &salt)? == password_hash); + } + + fn derive_shared_secret(&self, password: &[u8], salt: &[u8]) -> VeilidAPIResult { + if salt.len() < Salt::MIN_LENGTH || salt.len() > Salt::MAX_LENGTH { + apibail_generic!("invalid salt length"); + } + Ok(SharedSecret::new( + *blake3::hash(self.hash_password(password, salt)?.as_bytes()).as_bytes(), + )) + } + fn random_nonce(&self) -> Nonce { let mut nonce = [0u8; NONCE_LENGTH]; random_bytes(&mut nonce).unwrap(); @@ -90,11 +124,7 @@ impl CryptoSystem for CryptoSystemNONE { random_bytes(&mut s).unwrap(); SharedSecret::new(s) } - fn compute_dh( - &self, - key: &PublicKey, - secret: &SecretKey, - ) -> Result { + fn compute_dh(&self, key: &PublicKey, secret: &SecretKey) -> VeilidAPIResult { let s = do_xor_32(&key.bytes, &secret.bytes); Ok(SharedSecret::new(s)) } @@ -104,10 +134,7 @@ impl CryptoSystem for CryptoSystemNONE { fn generate_hash(&self, data: &[u8]) -> PublicKey { PublicKey::new(*blake3::hash(data).as_bytes()) } - fn generate_hash_reader( - &self, - reader: &mut dyn std::io::Read, - ) -> Result { + fn generate_hash_reader(&self, reader: &mut dyn std::io::Read) -> VeilidAPIResult { let mut hasher = blake3::Hasher::new(); std::io::copy(reader, &mut hasher).map_err(VeilidAPIError::generic)?; Ok(PublicKey::new(*hasher.finalize().as_bytes())) @@ -132,21 +159,21 @@ impl CryptoSystem for CryptoSystemNONE { &self, reader: &mut dyn std::io::Read, dht_key: &PublicKey, - ) -> Result { + ) -> VeilidAPIResult { let mut hasher = blake3::Hasher::new(); std::io::copy(reader, &mut hasher).map_err(VeilidAPIError::generic)?; let bytes = *hasher.finalize().as_bytes(); Ok(bytes == dht_key.bytes) } // Distance Metric - fn distance(&self, key1: &PublicKey, key2: &PublicKey) -> PublicKeyDistance { + fn distance(&self, key1: &PublicKey, key2: &PublicKey) -> CryptoKeyDistance { let mut bytes = [0u8; PUBLIC_KEY_LENGTH]; for (n, byte) in bytes.iter_mut().enumerate() { *byte = key1.bytes[n] ^ key2.bytes[n]; } - PublicKeyDistance::new(bytes) + CryptoKeyDistance::new(bytes) } // Authentication @@ -155,7 +182,7 @@ impl CryptoSystem for CryptoSystemNONE { dht_key: &PublicKey, dht_key_secret: &SecretKey, data: &[u8], - ) -> Result { + ) -> VeilidAPIResult { if !is_bytes_eq_32(&do_xor_32(&dht_key.bytes, &dht_key_secret.bytes), 0xFFu8) { return Err(VeilidAPIError::parse_error( "Keypair is invalid", @@ -178,7 +205,7 @@ impl CryptoSystem for CryptoSystemNONE { dht_key: &PublicKey, data: &[u8], signature: &Signature, - ) -> Result<(), VeilidAPIError> { + ) -> VeilidAPIResult<()> { let mut dig = Blake3Digest512::new(); dig.update(data); let sig = dig.finalize(); @@ -215,7 +242,7 @@ impl CryptoSystem for CryptoSystemNONE { nonce: &Nonce, shared_secret: &SharedSecret, _associated_data: Option<&[u8]>, - ) -> Result<(), VeilidAPIError> { + ) -> VeilidAPIResult<()> { let mut blob = nonce.bytes.to_vec(); blob.extend_from_slice(&[0u8; 8]); let blob = do_xor_32(&blob, &shared_secret.bytes); @@ -237,7 +264,7 @@ impl CryptoSystem for CryptoSystemNONE { nonce: &Nonce, shared_secret: &SharedSecret, associated_data: Option<&[u8]>, - ) -> Result, VeilidAPIError> { + ) -> VeilidAPIResult> { let mut out = body.to_vec(); self.decrypt_in_place_aead(&mut out, nonce, shared_secret, associated_data) .map_err(map_to_string) @@ -251,7 +278,7 @@ impl CryptoSystem for CryptoSystemNONE { nonce: &Nonce, shared_secret: &SharedSecret, _associated_data: Option<&[u8]>, - ) -> Result<(), VeilidAPIError> { + ) -> VeilidAPIResult<()> { let mut blob = nonce.bytes.to_vec(); blob.extend_from_slice(&[0u8; 8]); let blob = do_xor_32(&blob, &shared_secret.bytes); @@ -266,7 +293,7 @@ impl CryptoSystem for CryptoSystemNONE { nonce: &Nonce, shared_secret: &SharedSecret, associated_data: Option<&[u8]>, - ) -> Result, VeilidAPIError> { + ) -> VeilidAPIResult> { let mut out = body.to_vec(); self.encrypt_in_place_aead(&mut out, nonce, shared_secret, associated_data) .map_err(map_to_string) @@ -275,12 +302,7 @@ impl CryptoSystem for CryptoSystemNONE { } // NoAuth Encrypt/Decrypt - fn crypt_in_place_no_auth( - &self, - body: &mut Vec, - nonce: &Nonce, - shared_secret: &SharedSecret, - ) { + fn crypt_in_place_no_auth(&self, body: &mut [u8], nonce: &Nonce, shared_secret: &SharedSecret) { let mut blob = nonce.bytes.to_vec(); blob.extend_from_slice(&[0u8; 8]); let blob = do_xor_32(&blob, &shared_secret.bytes); diff --git a/veilid-core/src/crypto/receipt.rs b/veilid-core/src/crypto/receipt.rs index 50496d04..4f8d4b15 100644 --- a/veilid-core/src/crypto/receipt.rs +++ b/veilid-core/src/crypto/receipt.rs @@ -49,7 +49,7 @@ impl Receipt { nonce: Nonce, sender_id: PublicKey, extra_data: D, - ) -> Result { + ) -> VeilidAPIResult { assert!(VALID_ENVELOPE_VERSIONS.contains(&version)); assert!(VALID_CRYPTO_KINDS.contains(&crypto_kind)); @@ -68,7 +68,7 @@ impl Receipt { }) } - pub fn from_signed_data(crypto: Crypto, data: &[u8]) -> Result { + pub fn from_signed_data(crypto: Crypto, data: &[u8]) -> VeilidAPIResult { // Ensure we are at least the length of the envelope if data.len() < MIN_RECEIPT_SIZE { apibail_parse_error!("receipt too small", data.len()); @@ -153,11 +153,7 @@ impl Receipt { }) } - pub fn to_signed_data( - &self, - crypto: Crypto, - secret: &SecretKey, - ) -> Result, VeilidAPIError> { + pub fn to_signed_data(&self, crypto: Crypto, secret: &SecretKey) -> VeilidAPIResult> { // Ensure extra data isn't too long let receipt_size: usize = self.extra_data.len() + MIN_RECEIPT_SIZE; if receipt_size > MAX_RECEIPT_SIZE { diff --git a/veilid-core/src/crypto/tests/test_crypto.rs b/veilid-core/src/crypto/tests/test_crypto.rs index 3f236b2e..e7e0c73e 100644 --- a/veilid-core/src/crypto/tests/test_crypto.rs +++ b/veilid-core/src/crypto/tests/test_crypto.rs @@ -162,6 +162,66 @@ pub async fn test_dh(vcrypto: CryptoSystemVersion) { trace!("cached_dh: {:?}", r5); } +pub async fn test_generation(vcrypto: CryptoSystemVersion) { + let b1 = vcrypto.random_bytes(32); + let b2 = vcrypto.random_bytes(32); + assert_ne!(b1, b2); + assert_eq!(b1.len(), 32); + assert_eq!(b2.len(), 32); + let b3 = vcrypto.random_bytes(0); + let b4 = vcrypto.random_bytes(0); + assert_eq!(b3, b4); + assert_eq!(b3.len(), 0); + + assert_ne!(vcrypto.default_salt_length(), 0); + + let pstr1 = vcrypto.hash_password(b"abc123", b"qwerasdf").unwrap(); + let pstr2 = vcrypto.hash_password(b"abc123", b"qwerasdf").unwrap(); + assert_eq!(pstr1, pstr2); + let pstr3 = vcrypto.hash_password(b"abc123", b"qwerasdg").unwrap(); + assert_ne!(pstr1, pstr3); + let pstr4 = vcrypto.hash_password(b"abc124", b"qwerasdf").unwrap(); + assert_ne!(pstr1, pstr4); + let pstr5 = vcrypto.hash_password(b"abc124", b"qwerasdg").unwrap(); + assert_ne!(pstr3, pstr5); + + vcrypto + .hash_password(b"abc123", b"qwe") + .expect_err("should reject short salt"); + vcrypto + .hash_password( + b"abc123", + b"qwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerz", + ) + .expect_err("should reject long salt"); + + assert!(vcrypto.verify_password(b"abc123", &pstr1).unwrap()); + assert!(vcrypto.verify_password(b"abc123", &pstr2).unwrap()); + assert!(vcrypto.verify_password(b"abc123", &pstr3).unwrap()); + assert!(!vcrypto.verify_password(b"abc123", &pstr4).unwrap()); + assert!(!vcrypto.verify_password(b"abc123", &pstr5).unwrap()); + + let ss1 = vcrypto.derive_shared_secret(b"abc123", b"qwerasdf"); + let ss2 = vcrypto.derive_shared_secret(b"abc123", b"qwerasdf"); + assert_eq!(ss1, ss2); + let ss3 = vcrypto.derive_shared_secret(b"abc123", b"qwerasdg"); + assert_ne!(ss1, ss3); + let ss4 = vcrypto.derive_shared_secret(b"abc124", b"qwerasdf"); + assert_ne!(ss1, ss4); + let ss5 = vcrypto.derive_shared_secret(b"abc124", b"qwerasdg"); + assert_ne!(ss3, ss5); + + vcrypto + .derive_shared_secret(b"abc123", b"qwe") + .expect_err("should reject short salt"); + vcrypto + .derive_shared_secret( + b"abc123", + b"qwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerz", + ) + .expect_err("should reject long salt"); +} + pub async fn test_all() { let api = crypto_tests_startup().await; let crypto = api.crypto().unwrap(); @@ -171,7 +231,8 @@ pub async fn test_all() { let vcrypto = crypto.get(v).unwrap(); test_aead(vcrypto.clone()).await; test_no_auth(vcrypto.clone()).await; - test_dh(vcrypto).await; + test_dh(vcrypto.clone()).await; + test_generation(vcrypto).await; } crypto_tests_shutdown(api.clone()).await; diff --git a/veilid-core/src/crypto/tests/test_types.rs b/veilid-core/src/crypto/tests/test_types.rs index d61b1cff..72813ea0 100644 --- a/veilid-core/src/crypto/tests/test_types.rs +++ b/veilid-core/src/crypto/tests/test_types.rs @@ -225,6 +225,38 @@ pub async fn test_encode_decode(vcrypto: CryptoSystemVersion) { assert!(f2.is_err()); } +pub async fn test_typed_convert(vcrypto: CryptoSystemVersion) { + let tks1 = format!( + "{}:7lxDEabK_qgjbe38RtBa3IZLrud84P6NhGP-pRTZzdQ", + vcrypto.kind().to_string() + ); + let tk1 = TypedKey::from_str(&tks1).expect("failed"); + let tks1x = tk1.to_string(); + assert_eq!(tks1, tks1x); + + let tks2 = format!( + "{}:7lxDEabK_qgjbe38RtBa3IZLrud84P6NhGP-pRTZzd", + vcrypto.kind().to_string() + ); + let _tk2 = TypedKey::from_str(&tks2).expect_err("succeeded when it shouldnt have"); + + let tks3 = format!("XXXX:7lxDEabK_qgjbe38RtBa3IZLrud84P6NhGP-pRTZzdQ",); + let tk3 = TypedKey::from_str(&tks3).expect("failed"); + let tks3x = tk3.to_string(); + assert_eq!(tks3, tks3x); + + let tks4 = format!("XXXX:7lxDEabK_qgjbe38RtBa3IZLrud84P6NhGP-pRTZzd",); + let _tk4 = TypedKey::from_str(&tks4).expect_err("succeeded when it shouldnt have"); + + let tks5 = format!("XXX:7lxDEabK_qgjbe38RtBa3IZLrud84P6NhGP-pRTZzdQ",); + let _tk5 = TypedKey::from_str(&tks5).expect_err("succeeded when it shouldnt have"); + + let tks6 = format!("7lxDEabK_qgjbe38RtBa3IZLrud84P6NhGP-pRTZzdQ",); + let tk6 = TypedKey::from_str(&tks6).expect("failed"); + let tks6x = tk6.to_string(); + assert!(tks6x.ends_with(&tks6)); +} + async fn test_hash(vcrypto: CryptoSystemVersion) { let mut s = BTreeSet::::new(); @@ -333,6 +365,7 @@ pub async fn test_all() { test_sign_and_verify(vcrypto.clone()).await; test_key_conversions(vcrypto.clone()).await; test_encode_decode(vcrypto.clone()).await; + test_typed_convert(vcrypto.clone()).await; test_hash(vcrypto.clone()).await; test_operations(vcrypto).await; } diff --git a/veilid-core/src/crypto/types/crypto_typed.rs b/veilid-core/src/crypto/types/crypto_typed.rs index b6575769..b0d3c61a 100644 --- a/veilid-core/src/crypto/types/crypto_typed.rs +++ b/veilid-core/src/crypto/types/crypto_typed.rs @@ -127,12 +127,17 @@ where type Err = VeilidAPIError; fn from_str(s: &str) -> Result { let b = s.as_bytes(); - if b.len() != (5 + K::encoded_len()) || b[4..5] != b":"[..] { - apibail_parse_error!("invalid typed key", s); + if b.len() == (5 + K::encoded_len()) && b[4..5] == b":"[..] { + let kind: CryptoKind = b[0..4].try_into().expect("should not fail to convert"); + let value = K::try_decode_bytes(&b[5..])?; + Ok(Self { kind, value }) + } else if b.len() == K::encoded_len() { + let kind = best_crypto_kind(); + let value = K::try_decode_bytes(b)?; + Ok(Self { kind, value }) + } else { + apibail_generic!("invalid cryptotyped format"); } - let kind: CryptoKind = b[0..4].try_into().expect("should not fail to convert"); - let value = K::try_decode_bytes(&b[5..])?; - Ok(Self { kind, value }) } } impl<'de, K> Deserialize<'de> for CryptoTyped diff --git a/veilid-core/src/crypto/types/crypto_typed_set.rs b/veilid-core/src/crypto/types/crypto_typed_set.rs index 19932b67..b8e17bf6 100644 --- a/veilid-core/src/crypto/types/crypto_typed_set.rs +++ b/veilid-core/src/crypto/types/crypto_typed_set.rs @@ -141,9 +141,9 @@ where } false } - pub fn contains_key(&self, key: &K) -> bool { + pub fn contains_value(&self, value: &K) -> bool { for tk in &self.items { - if tk.value == *key { + if tk.value == *value { return true; } } @@ -282,6 +282,28 @@ where tks } } +impl From<&[CryptoTyped]> for CryptoTypedSet +where + K: Clone + + Copy + + fmt::Debug + + fmt::Display + + FromStr + + PartialEq + + Eq + + PartialOrd + + Ord + + Hash + + RkyvArchive + + Encodable, + ::Archived: Hash + PartialEq + Eq, +{ + fn from(x: &[CryptoTyped]) -> Self { + let mut tks = CryptoTypedSet::::with_capacity(x.len()); + tks.add_all(x); + tks + } +} impl Into>> for CryptoTypedSet where K: Clone diff --git a/veilid-core/src/crypto/types/keypair.rs b/veilid-core/src/crypto/types/keypair.rs index 253f84ea..fc53af44 100644 --- a/veilid-core/src/crypto/types/keypair.rs +++ b/veilid-core/src/crypto/types/keypair.rs @@ -39,7 +39,7 @@ impl Encodable for KeyPair { fn encoded_len() -> usize { PublicKey::encoded_len() + 1 + SecretKey::encoded_len() } - fn try_decode_bytes(b: &[u8]) -> Result { + fn try_decode_bytes(b: &[u8]) -> VeilidAPIResult { if b.len() != Self::encoded_len() { apibail_parse_error!("input has wrong encoded length", format!("len={}", b.len())); } @@ -56,9 +56,7 @@ impl fmt::Display for KeyPair { impl fmt::Debug for KeyPair { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, concat!(stringify!($name), "("))?; - write!(f, "{}", self.encode())?; - write!(f, ")") + write!(f, "KeyPair({})", self.encode()) } } diff --git a/veilid-core/src/crypto/types/mod.rs b/veilid-core/src/crypto/types/mod.rs index 355c34b0..1e22829a 100644 --- a/veilid-core/src/crypto/types/mod.rs +++ b/veilid-core/src/crypto/types/mod.rs @@ -5,8 +5,6 @@ use core::convert::TryInto; use core::fmt; use core::hash::Hash; -use rkyv::{Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize}; - /// Cryptography version fourcc code pub type CryptoKind = FourCC; @@ -55,5 +53,10 @@ pub type TypedKey = CryptoTyped; pub type TypedSecret = CryptoTyped; pub type TypedKeyPair = CryptoTyped; pub type TypedSignature = CryptoTyped; +pub type TypedSharedSecret = CryptoTyped; + pub type TypedKeySet = CryptoTypedSet; pub type TypedSecretSet = CryptoTypedSet; +pub type TypedKeyPairSet = CryptoTypedSet; +pub type TypedSignatureSet = CryptoTypedSet; +pub type TypedSharedSecretSet = CryptoTypedSet; diff --git a/veilid-core/src/crypto/value.rs b/veilid-core/src/crypto/value.rs deleted file mode 100644 index e69de29b..00000000 diff --git a/veilid-core/src/crypto/vld0/mod.rs b/veilid-core/src/crypto/vld0/mod.rs index c8539381..5d3a30bf 100644 --- a/veilid-core/src/crypto/vld0/mod.rs +++ b/veilid-core/src/crypto/vld0/mod.rs @@ -1,5 +1,9 @@ use super::*; +use argon2::{ + password_hash::{PasswordHash, PasswordHasher, PasswordVerifier, Salt, SaltString}, + Argon2, +}; use chacha20::cipher::{KeyIvInit, StreamCipher}; use chacha20::XChaCha20; use chacha20poly1305 as ch; @@ -13,7 +17,7 @@ use x25519_dalek as xd; const AEAD_OVERHEAD: usize = 16; pub const CRYPTO_KIND_VLD0: CryptoKind = FourCC([b'V', b'L', b'D', b'0']); -fn ed25519_to_x25519_pk(key: &ed::PublicKey) -> Result { +fn ed25519_to_x25519_pk(key: &ed::PublicKey) -> VeilidAPIResult { let bytes = key.to_bytes(); let compressed = cd::edwards::CompressedEdwardsY(bytes); let point = compressed @@ -22,7 +26,7 @@ fn ed25519_to_x25519_pk(key: &ed::PublicKey) -> Result Result { +fn ed25519_to_x25519_sk(key: &ed::SecretKey) -> VeilidAPIResult { let exp = ed::ExpandedSecretKey::from(key); let bytes: [u8; ed::EXPANDED_SECRET_KEY_LENGTH] = exp.to_bytes(); let lowbytes: [u8; 32] = bytes[0..32].try_into().map_err(VeilidAPIError::internal)?; @@ -61,31 +65,71 @@ impl CryptoSystem for CryptoSystemVLD0 { } // Cached Operations - fn cached_dh( - &self, - key: &PublicKey, - secret: &SecretKey, - ) -> Result { + fn cached_dh(&self, key: &PublicKey, secret: &SecretKey) -> VeilidAPIResult { self.crypto .cached_dh_internal::(self, key, secret) } // Generation + fn random_bytes(&self, len: u32) -> Vec { + let mut bytes = unsafe { unaligned_u8_vec_uninit(len as usize) }; + random_bytes(bytes.as_mut()); + bytes + } + fn default_salt_length(&self) -> u32 { + 16 + } + fn hash_password(&self, password: &[u8], salt: &[u8]) -> VeilidAPIResult { + if salt.len() < Salt::MIN_LENGTH || salt.len() > Salt::MAX_LENGTH { + apibail_generic!("invalid salt length"); + } + + // Hash password to PHC string ($argon2id$v=19$...) + let salt = SaltString::encode_b64(salt).map_err(VeilidAPIError::generic)?; + + // Argon2 with default params (Argon2id v19) + let argon2 = Argon2::default(); + + let password_hash = argon2 + .hash_password(password, &salt) + .map_err(VeilidAPIError::generic)? + .to_string(); + Ok(password_hash) + } + fn verify_password(&self, password: &[u8], password_hash: &str) -> VeilidAPIResult { + let parsed_hash = PasswordHash::new(password_hash).map_err(VeilidAPIError::generic)?; + // Argon2 with default params (Argon2id v19) + let argon2 = Argon2::default(); + + Ok(argon2.verify_password(password, &parsed_hash).is_ok()) + } + + fn derive_shared_secret(&self, password: &[u8], salt: &[u8]) -> VeilidAPIResult { + if salt.len() < Salt::MIN_LENGTH || salt.len() > Salt::MAX_LENGTH { + apibail_generic!("invalid salt length"); + } + + // Argon2 with default params (Argon2id v19) + let argon2 = Argon2::default(); + + let mut output_key_material = [0u8; SHARED_SECRET_LENGTH]; + argon2 + .hash_password_into(password, salt, &mut output_key_material) + .map_err(VeilidAPIError::generic)?; + Ok(SharedSecret::new(output_key_material)) + } + fn random_nonce(&self) -> Nonce { let mut nonce = [0u8; NONCE_LENGTH]; - random_bytes(&mut nonce).unwrap(); + random_bytes(&mut nonce); Nonce::new(nonce) } fn random_shared_secret(&self) -> SharedSecret { let mut s = [0u8; SHARED_SECRET_LENGTH]; - random_bytes(&mut s).unwrap(); + random_bytes(&mut s); SharedSecret::new(s) } - fn compute_dh( - &self, - key: &PublicKey, - secret: &SecretKey, - ) -> Result { + fn compute_dh(&self, key: &PublicKey, secret: &SecretKey) -> VeilidAPIResult { let pk_ed = ed::PublicKey::from_bytes(&key.bytes).map_err(VeilidAPIError::internal)?; let pk_xd = ed25519_to_x25519_pk(&pk_ed)?; let sk_ed = ed::SecretKey::from_bytes(&secret.bytes).map_err(VeilidAPIError::internal)?; @@ -98,10 +142,7 @@ impl CryptoSystem for CryptoSystemVLD0 { fn generate_hash(&self, data: &[u8]) -> PublicKey { PublicKey::new(*blake3::hash(data).as_bytes()) } - fn generate_hash_reader( - &self, - reader: &mut dyn std::io::Read, - ) -> Result { + fn generate_hash_reader(&self, reader: &mut dyn std::io::Read) -> VeilidAPIResult { let mut hasher = blake3::Hasher::new(); std::io::copy(reader, &mut hasher).map_err(VeilidAPIError::generic)?; Ok(PublicKey::new(*hasher.finalize().as_bytes())) @@ -127,21 +168,21 @@ impl CryptoSystem for CryptoSystemVLD0 { &self, reader: &mut dyn std::io::Read, dht_key: &PublicKey, - ) -> Result { + ) -> VeilidAPIResult { let mut hasher = blake3::Hasher::new(); std::io::copy(reader, &mut hasher).map_err(VeilidAPIError::generic)?; let bytes = *hasher.finalize().as_bytes(); Ok(bytes == dht_key.bytes) } // Distance Metric - fn distance(&self, key1: &PublicKey, key2: &PublicKey) -> PublicKeyDistance { + fn distance(&self, key1: &PublicKey, key2: &PublicKey) -> CryptoKeyDistance { let mut bytes = [0u8; PUBLIC_KEY_LENGTH]; for (n, byte) in bytes.iter_mut().enumerate() { *byte = key1.bytes[n] ^ key2.bytes[n]; } - PublicKeyDistance::new(bytes) + CryptoKeyDistance::new(bytes) } // Authentication @@ -150,7 +191,7 @@ impl CryptoSystem for CryptoSystemVLD0 { dht_key: &PublicKey, dht_key_secret: &SecretKey, data: &[u8], - ) -> Result { + ) -> VeilidAPIResult { let mut kpb: [u8; SECRET_KEY_LENGTH + PUBLIC_KEY_LENGTH] = [0u8; SECRET_KEY_LENGTH + PUBLIC_KEY_LENGTH]; @@ -177,7 +218,7 @@ impl CryptoSystem for CryptoSystemVLD0 { dht_key: &PublicKey, data: &[u8], signature: &Signature, - ) -> Result<(), VeilidAPIError> { + ) -> VeilidAPIResult<()> { let pk = ed::PublicKey::from_bytes(&dht_key.bytes) .map_err(|e| VeilidAPIError::parse_error("Public key is invalid", e))?; let sig = ed::Signature::from_bytes(&signature.bytes) @@ -201,7 +242,7 @@ impl CryptoSystem for CryptoSystemVLD0 { nonce: &Nonce, shared_secret: &SharedSecret, associated_data: Option<&[u8]>, - ) -> Result<(), VeilidAPIError> { + ) -> VeilidAPIResult<()> { let key = ch::Key::from(shared_secret.bytes); let xnonce = ch::XNonce::from(nonce.bytes); let aead = ch::XChaCha20Poly1305::new(&key); @@ -216,7 +257,7 @@ impl CryptoSystem for CryptoSystemVLD0 { nonce: &Nonce, shared_secret: &SharedSecret, associated_data: Option<&[u8]>, - ) -> Result, VeilidAPIError> { + ) -> VeilidAPIResult> { let mut out = body.to_vec(); self.decrypt_in_place_aead(&mut out, nonce, shared_secret, associated_data) .map_err(map_to_string) @@ -230,7 +271,7 @@ impl CryptoSystem for CryptoSystemVLD0 { nonce: &Nonce, shared_secret: &SharedSecret, associated_data: Option<&[u8]>, - ) -> Result<(), VeilidAPIError> { + ) -> VeilidAPIResult<()> { let key = ch::Key::from(shared_secret.bytes); let xnonce = ch::XNonce::from(nonce.bytes); let aead = ch::XChaCha20Poly1305::new(&key); @@ -246,7 +287,7 @@ impl CryptoSystem for CryptoSystemVLD0 { nonce: &Nonce, shared_secret: &SharedSecret, associated_data: Option<&[u8]>, - ) -> Result, VeilidAPIError> { + ) -> VeilidAPIResult> { let mut out = body.to_vec(); self.encrypt_in_place_aead(&mut out, nonce, shared_secret, associated_data) .map_err(map_to_string) @@ -257,11 +298,11 @@ impl CryptoSystem for CryptoSystemVLD0 { // NoAuth Encrypt/Decrypt fn crypt_in_place_no_auth( &self, - body: &mut Vec, - nonce: &Nonce, + body: &mut [u8], + nonce: &[u8; NONCE_LENGTH], shared_secret: &SharedSecret, ) { - let mut cipher = XChaCha20::new(&shared_secret.bytes.into(), &nonce.bytes.into()); + let mut cipher = XChaCha20::new(&shared_secret.bytes.into(), nonce.into()); cipher.apply_keystream(body); } @@ -269,17 +310,17 @@ impl CryptoSystem for CryptoSystemVLD0 { &self, in_buf: &[u8], out_buf: &mut [u8], - nonce: &Nonce, + nonce: &[u8; NONCE_LENGTH], shared_secret: &SharedSecret, ) { - let mut cipher = XChaCha20::new(&shared_secret.bytes.into(), &nonce.bytes.into()); + let mut cipher = XChaCha20::new(&shared_secret.bytes.into(), nonce.into()); cipher.apply_keystream_b2b(in_buf, out_buf).unwrap(); } fn crypt_no_auth_aligned_8( &self, in_buf: &[u8], - nonce: &Nonce, + nonce: &[u8; NONCE_LENGTH], shared_secret: &SharedSecret, ) -> Vec { let mut out_buf = unsafe { aligned_8_u8_vec_uninit(in_buf.len()) }; @@ -290,7 +331,7 @@ impl CryptoSystem for CryptoSystemVLD0 { fn crypt_no_auth_unaligned( &self, in_buf: &[u8], - nonce: &Nonce, + nonce: &[u8; NONCE_LENGTH], shared_secret: &SharedSecret, ) -> Vec { let mut out_buf = unsafe { unaligned_u8_vec_uninit(in_buf.len()) }; diff --git a/veilid-core/src/intf/mod.rs b/veilid-core/src/intf/mod.rs index 842e5351..81756b61 100644 --- a/veilid-core/src/intf/mod.rs +++ b/veilid-core/src/intf/mod.rs @@ -1,4 +1,4 @@ -mod table_db; +use super::*; #[cfg(target_arch = "wasm32")] mod wasm; @@ -8,3 +8,5 @@ pub use wasm::*; mod native; #[cfg(not(target_arch = "wasm32"))] pub use native::*; + +pub static KNOWN_PROTECTED_STORE_KEYS: [&'static str; 2] = ["device_encryption_key", "_test_key"]; diff --git a/veilid-core/src/intf/native/mod.rs b/veilid-core/src/intf/native/mod.rs index 786b2dd1..018cba41 100644 --- a/veilid-core/src/intf/native/mod.rs +++ b/veilid-core/src/intf/native/mod.rs @@ -1,13 +1,13 @@ mod block_store; mod protected_store; mod system; -mod table_store; pub use block_store::*; pub use protected_store::*; pub use system::*; -pub use table_store::*; #[cfg(target_os = "android")] pub mod android; pub mod network_interfaces; + +use super::*; diff --git a/veilid-core/src/intf/native/protected_store.rs b/veilid-core/src/intf/native/protected_store.rs index 022a3477..d8e46918 100644 --- a/veilid-core/src/intf/native/protected_store.rs +++ b/veilid-core/src/intf/native/protected_store.rs @@ -1,7 +1,6 @@ -use crate::*; +use super::*; use data_encoding::BASE64URL_NOPAD; use keyring_manager::*; -use rkyv::{Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize}; use std::path::Path; pub struct ProtectedStoreInner { @@ -30,18 +29,12 @@ impl ProtectedStore { #[instrument(level = "trace", skip(self), err)] pub async fn delete_all(&self) -> EyreResult<()> { - // Delete all known keys - if self.remove_user_secret("node_id").await? { - debug!("deleted protected_store key 'node_id'"); - } - if self.remove_user_secret("node_id_secret").await? { - debug!("deleted protected_store key 'node_id_secret'"); - } - if self.remove_user_secret("_test_key").await? { - debug!("deleted protected_store key '_test_key'"); - } - if self.remove_user_secret("RouteSpecStore").await? { - debug!("deleted protected_store key 'RouteSpecStore'"); + for kpsk in &KNOWN_PROTECTED_STORE_KEYS { + if let Err(e) = self.remove_user_secret(kpsk).await { + error!("failed to delete '{}': {}", kpsk, e); + } else { + debug!("deleted table '{}'", kpsk); + } } Ok(()) } @@ -65,9 +58,8 @@ impl ProtectedStore { || c.protected_store.allow_insecure_fallback) && inner.keyring_manager.is_none() { - let insecure_fallback_directory = - Path::new(&c.protected_store.insecure_fallback_directory); - let insecure_keyring_file = insecure_fallback_directory.to_owned().join(format!( + let directory = Path::new(&c.protected_store.directory); + let insecure_keyring_file = directory.to_owned().join(format!( "insecure_keyring{}", if c.namespace.is_empty() { "".to_owned() @@ -153,7 +145,7 @@ impl ProtectedStore { pub async fn save_user_secret_rkyv(&self, key: K, value: &T) -> EyreResult where K: AsRef + fmt::Debug, - T: RkyvSerialize>, + T: RkyvSerialize, { let v = to_rkyv(value)?; self.save_user_secret(key, &v).await @@ -175,9 +167,8 @@ impl ProtectedStore { K: AsRef + fmt::Debug, T: RkyvArchive, ::Archived: - for<'t> bytecheck::CheckBytes>, - ::Archived: - RkyvDeserialize, + for<'t> CheckBytes>, + ::Archived: RkyvDeserialize, { let out = self.load_user_secret(key).await?; let b = match out { diff --git a/veilid-core/src/intf/native/system.rs b/veilid-core/src/intf/native/system.rs index 5491bff4..9855c256 100644 --- a/veilid-core/src/intf/native/system.rs +++ b/veilid-core/src/intf/native/system.rs @@ -2,7 +2,7 @@ use crate::*; -pub async fn get_outbound_relay_peer() -> Option { +pub async fn get_outbound_relay_peer() -> Option { panic!("Native Veilid should never require an outbound relay"); } diff --git a/veilid-core/src/intf/native/table_store.rs b/veilid-core/src/intf/native/table_store.rs deleted file mode 100644 index a09b8e4d..00000000 --- a/veilid-core/src/intf/native/table_store.rs +++ /dev/null @@ -1,147 +0,0 @@ -use crate::intf::table_db::TableDBInner; -pub use crate::intf::table_db::{TableDB, TableDBTransaction}; -use crate::*; -use keyvaluedb_sqlite::*; -use std::path::PathBuf; - -struct TableStoreInner { - opened: BTreeMap>>, -} - -/// Veilid Table Storage -/// Database for storing key value pairs persistently across runs -#[derive(Clone)] -pub struct TableStore { - config: VeilidConfig, - inner: Arc>, -} - -impl TableStore { - fn new_inner() -> TableStoreInner { - TableStoreInner { - opened: BTreeMap::new(), - } - } - pub(crate) fn new(config: VeilidConfig) -> Self { - Self { - config, - inner: Arc::new(Mutex::new(Self::new_inner())), - } - } - - /// Delete all known tables - pub async fn delete_all(&self) { - if let Err(e) = self.delete("crypto_caches").await { - error!("failed to delete 'crypto_caches': {}", e); - } - if let Err(e) = self.delete("RouteSpecStore").await { - error!("failed to delete 'RouteSpecStore': {}", e); - } - if let Err(e) = self.delete("routing_table").await { - error!("failed to delete 'routing_table': {}", e); - } - } - - pub(crate) async fn init(&self) -> EyreResult<()> { - Ok(()) - } - - pub(crate) async fn terminate(&self) { - assert!( - self.inner.lock().opened.is_empty(), - "all open databases should have been closed" - ); - } - - pub(crate) fn on_table_db_drop(&self, table: String) { - let mut inner = self.inner.lock(); - if inner.opened.remove(&table).is_none() { - unreachable!("should have removed an item"); - } - } - - fn get_dbpath(&self, table: &str) -> EyreResult { - if !table - .chars() - .all(|c| char::is_alphanumeric(c) || c == '_' || c == '-') - { - bail!("table name '{}' is invalid", table); - } - let c = self.config.get(); - let tablestoredir = c.table_store.directory.clone(); - std::fs::create_dir_all(&tablestoredir).wrap_err("failed to create tablestore path")?; - - let dbpath: PathBuf = [tablestoredir, String::from(table)].iter().collect(); - Ok(dbpath) - } - - fn get_table_name(&self, table: &str) -> EyreResult { - if !table - .chars() - .all(|c| char::is_alphanumeric(c) || c == '_' || c == '-') - { - bail!("table name '{}' is invalid", table); - } - let c = self.config.get(); - let namespace = c.namespace.clone(); - Ok(if namespace.is_empty() { - table.to_string() - } else { - format!("_ns_{}_{}", namespace, table) - }) - } - - /// Get or create a TableDB database table. If the column count is greater than an - /// existing TableDB's column count, the database will be upgraded to add the missing columns - pub async fn open(&self, name: &str, column_count: u32) -> EyreResult { - let table_name = self.get_table_name(name)?; - - let mut inner = self.inner.lock(); - if let Some(table_db_weak_inner) = inner.opened.get(&table_name) { - match TableDB::try_new_from_weak_inner(table_db_weak_inner.clone()) { - Some(tdb) => { - return Ok(tdb); - } - None => { - inner.opened.remove(&table_name); - } - }; - } - - let dbpath = self.get_dbpath(&table_name)?; - - // Ensure permissions are correct - ensure_file_private_owner(&dbpath)?; - - let cfg = DatabaseConfig::with_columns(column_count); - let db = Database::open(&dbpath, cfg).wrap_err("failed to open tabledb")?; - - // Ensure permissions are correct - ensure_file_private_owner(&dbpath)?; - - trace!( - "opened table store '{}' at path '{:?}' with {} columns", - name, - dbpath, - column_count - ); - let table_db = TableDB::new(table_name.clone(), self.clone(), db); - - inner.opened.insert(table_name, table_db.weak_inner()); - - Ok(table_db) - } - - /// Delete a TableDB table by name - pub async fn delete(&self, name: &str) -> EyreResult { - let table_name = self.get_table_name(name)?; - - let inner = self.inner.lock(); - if inner.opened.contains_key(&table_name) { - bail!("Not deleting table that is still opened"); - } - let dbpath = self.get_dbpath(&table_name)?; - let ret = std::fs::remove_file(dbpath).is_ok(); - Ok(ret) - } -} diff --git a/veilid-core/src/intf/table_db.rs b/veilid-core/src/intf/table_db.rs deleted file mode 100644 index 6c93dcbf..00000000 --- a/veilid-core/src/intf/table_db.rs +++ /dev/null @@ -1,276 +0,0 @@ -use crate::*; -use rkyv::{Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize}; - -cfg_if! { - if #[cfg(target_arch = "wasm32")] { - use keyvaluedb_web::*; - use keyvaluedb::*; - } else { - use keyvaluedb_sqlite::*; - use keyvaluedb::*; - } -} - -pub struct TableDBInner { - table: String, - table_store: TableStore, - database: Database, -} - -impl fmt::Debug for TableDBInner { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "TableDBInner(table={})", self.table) - } -} - -impl Drop for TableDBInner { - fn drop(&mut self) { - self.table_store.on_table_db_drop(self.table.clone()); - } -} - -#[derive(Debug, Clone)] -pub struct TableDB { - inner: Arc>, -} - -impl TableDB { - pub(super) fn new(table: String, table_store: TableStore, database: Database) -> Self { - Self { - inner: Arc::new(Mutex::new(TableDBInner { - table, - table_store, - database, - })), - } - } - - pub(super) fn try_new_from_weak_inner(weak_inner: Weak>) -> Option { - weak_inner.upgrade().map(|table_db_inner| Self { - inner: table_db_inner, - }) - } - - pub(super) fn weak_inner(&self) -> Weak> { - Arc::downgrade(&self.inner) - } - - /// Get the total number of columns in the TableDB - pub fn get_column_count(&self) -> EyreResult { - let db = &self.inner.lock().database; - db.num_columns().wrap_err("failed to get column count: {}") - } - - /// Get the list of keys in a column of the TableDB - pub fn get_keys(&self, col: u32) -> EyreResult>> { - let db = &self.inner.lock().database; - let mut out: Vec> = Vec::new(); - db.iter(col, None, &mut |kv| { - out.push(kv.0.clone().into_boxed_slice()); - Ok(true) - }) - .wrap_err("failed to get keys for column")?; - Ok(out) - } - - /// Start a TableDB write transaction. The transaction object must be committed or rolled back before dropping. - pub fn transact(&self) -> TableDBTransaction { - let dbt = { - let db = &self.inner.lock().database; - db.transaction() - }; - TableDBTransaction::new(self.clone(), dbt) - } - - /// Store a key with a value in a column in the TableDB. Performs a single transaction immediately. - pub async fn store(&self, col: u32, key: &[u8], value: &[u8]) -> EyreResult<()> { - let db = self.inner.lock().database.clone(); - let mut dbt = db.transaction(); - dbt.put(col, key, value); - db.write(dbt).await.wrap_err("failed to store key") - } - - /// Store a key in rkyv format with a value in a column in the TableDB. Performs a single transaction immediately. - pub async fn store_rkyv(&self, col: u32, key: &[u8], value: &T) -> EyreResult<()> - where - T: RkyvSerialize>, - { - let v = to_rkyv(value)?; - - let db = self.inner.lock().database.clone(); - let mut dbt = db.transaction(); - dbt.put(col, key, v.as_slice()); - db.write(dbt).await.wrap_err("failed to store key") - } - - /// Store a key in json format with a value in a column in the TableDB. Performs a single transaction immediately. - pub async fn store_json(&self, col: u32, key: &[u8], value: &T) -> EyreResult<()> - where - T: serde::Serialize, - { - let v = serde_json::to_vec(value)?; - - let db = self.inner.lock().database.clone(); - let mut dbt = db.transaction(); - dbt.put(col, key, v.as_slice()); - db.write(dbt).await.wrap_err("failed to store key") - } - - /// Read a key from a column in the TableDB immediately. - pub fn load(&self, col: u32, key: &[u8]) -> EyreResult>> { - let db = self.inner.lock().database.clone(); - db.get(col, key).wrap_err("failed to get key") - } - - /// Read an rkyv key from a column in the TableDB immediately - pub fn load_rkyv(&self, col: u32, key: &[u8]) -> EyreResult> - where - T: RkyvArchive, - ::Archived: - for<'t> bytecheck::CheckBytes>, - ::Archived: - RkyvDeserialize, - { - let db = self.inner.lock().database.clone(); - let out = db.get(col, key).wrap_err("failed to get key")?; - let b = match out { - Some(v) => v, - None => { - return Ok(None); - } - }; - let obj = from_rkyv(b)?; - Ok(Some(obj)) - } - - /// Read an serde-json key from a column in the TableDB immediately - pub fn load_json(&self, col: u32, key: &[u8]) -> EyreResult> - where - T: for<'de> serde::Deserialize<'de>, - { - let db = self.inner.lock().database.clone(); - let out = db.get(col, key).wrap_err("failed to get key")?; - let b = match out { - Some(v) => v, - None => { - return Ok(None); - } - }; - let obj = serde_json::from_slice(&b)?; - Ok(Some(obj)) - } - - /// Delete key with from a column in the TableDB - pub async fn delete(&self, col: u32, key: &[u8]) -> EyreResult { - let db = self.inner.lock().database.clone(); - let found = db.get(col, key).wrap_err("failed to get key")?; - match found { - None => Ok(false), - Some(_) => { - let mut dbt = db.transaction(); - dbt.delete(col, key); - db.write(dbt).await.wrap_err("failed to delete key")?; - Ok(true) - } - } - } -} - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - -struct TableDBTransactionInner { - dbt: Option, -} - -impl fmt::Debug for TableDBTransactionInner { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "TableDBTransactionInner({})", - match &self.dbt { - Some(dbt) => format!("len={}", dbt.ops.len()), - None => "".to_owned(), - } - ) - } -} - -/// A TableDB transaction -/// Atomically commits a group of writes or deletes to the TableDB -#[derive(Debug, Clone)] -pub struct TableDBTransaction { - db: TableDB, - inner: Arc>, -} - -impl TableDBTransaction { - fn new(db: TableDB, dbt: DBTransaction) -> Self { - Self { - db, - inner: Arc::new(Mutex::new(TableDBTransactionInner { dbt: Some(dbt) })), - } - } - - /// Commit the transaction. Performs all actions atomically. - pub async fn commit(self) -> EyreResult<()> { - let dbt = { - let mut inner = self.inner.lock(); - inner - .dbt - .take() - .ok_or_else(|| eyre!("transaction already completed"))? - }; - let db = self.db.inner.lock().database.clone(); - db.write(dbt) - .await - .wrap_err("commit failed, transaction lost") - } - - /// Rollback the transaction. Does nothing to the TableDB. - pub fn rollback(self) { - let mut inner = self.inner.lock(); - inner.dbt = None; - } - - /// Store a key with a value in a column in the TableDB - pub fn store(&self, col: u32, key: &[u8], value: &[u8]) { - let mut inner = self.inner.lock(); - inner.dbt.as_mut().unwrap().put(col, key, value); - } - - /// Store a key in rkyv format with a value in a column in the TableDB - pub fn store_rkyv(&self, col: u32, key: &[u8], value: &T) -> EyreResult<()> - where - T: RkyvSerialize>, - { - let v = to_rkyv(value)?; - let mut inner = self.inner.lock(); - inner.dbt.as_mut().unwrap().put(col, key, v.as_slice()); - Ok(()) - } - - /// Store a key in rkyv format with a value in a column in the TableDB - pub fn store_json(&self, col: u32, key: &[u8], value: &T) -> EyreResult<()> - where - T: serde::Serialize, - { - let v = serde_json::to_vec(value)?; - let mut inner = self.inner.lock(); - inner.dbt.as_mut().unwrap().put(col, key, v.as_slice()); - Ok(()) - } - - /// Delete key with from a column in the TableDB - pub fn delete(&self, col: u32, key: &[u8]) { - let mut inner = self.inner.lock(); - inner.dbt.as_mut().unwrap().delete(col, key); - } -} - -impl Drop for TableDBTransactionInner { - fn drop(&mut self) { - if self.dbt.is_some() { - warn!("Dropped transaction without commit or rollback"); - } - } -} diff --git a/veilid-core/src/intf/wasm/mod.rs b/veilid-core/src/intf/wasm/mod.rs index 53faa230..b69ada7b 100644 --- a/veilid-core/src/intf/wasm/mod.rs +++ b/veilid-core/src/intf/wasm/mod.rs @@ -1,9 +1,9 @@ mod block_store; mod protected_store; mod system; -mod table_store; pub use block_store::*; pub use protected_store::*; pub use system::*; -pub use table_store::*; + +use super::*; diff --git a/veilid-core/src/intf/wasm/protected_store.rs b/veilid-core/src/intf/wasm/protected_store.rs index 23288fbc..39c9bacd 100644 --- a/veilid-core/src/intf/wasm/protected_store.rs +++ b/veilid-core/src/intf/wasm/protected_store.rs @@ -1,6 +1,9 @@ -use crate::*; +use super::*; use data_encoding::BASE64URL_NOPAD; -use rkyv::{Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize}; +use rkyv::{ + bytecheck::CheckBytes, Archive as RkyvArchive, Deserialize as RkyvDeserialize, + Serialize as RkyvSerialize, +}; use web_sys::*; @@ -16,18 +19,12 @@ impl ProtectedStore { #[instrument(level = "trace", skip(self), err)] pub async fn delete_all(&self) -> EyreResult<()> { - // Delete all known keys - if self.remove_user_secret("node_id").await? { - debug!("deleted protected_store key 'node_id'"); - } - if self.remove_user_secret("node_id_secret").await? { - debug!("deleted protected_store key 'node_id_secret'"); - } - if self.remove_user_secret("_test_key").await? { - debug!("deleted protected_store key '_test_key'"); - } - if self.remove_user_secret("RouteSpecStore").await? { - debug!("deleted protected_store key 'RouteSpecStore'"); + for kpsk in &KNOWN_PROTECTED_STORE_KEYS { + if let Err(e) = self.remove_user_secret(kpsk).await { + error!("failed to delete '{}': {}", kpsk, e); + } else { + debug!("deleted table '{}'", kpsk); + } } Ok(()) } @@ -133,7 +130,7 @@ impl ProtectedStore { pub async fn save_user_secret_rkyv(&self, key: K, value: &T) -> EyreResult where K: AsRef + fmt::Debug, - T: RkyvSerialize>, + T: RkyvSerialize, { let v = to_rkyv(value)?; self.save_user_secret(key, &v).await @@ -155,9 +152,8 @@ impl ProtectedStore { K: AsRef + fmt::Debug, T: RkyvArchive, ::Archived: - for<'t> bytecheck::CheckBytes>, - ::Archived: - RkyvDeserialize, + for<'t> CheckBytes>, + ::Archived: RkyvDeserialize, { let out = self.load_user_secret(key).await?; let b = match out { diff --git a/veilid-core/src/intf/wasm/system.rs b/veilid-core/src/intf/wasm/system.rs index 95e4e544..de8e158a 100644 --- a/veilid-core/src/intf/wasm/system.rs +++ b/veilid-core/src/intf/wasm/system.rs @@ -2,7 +2,7 @@ use crate::*; //use js_sys::*; -pub async fn get_outbound_relay_peer() -> Option { +pub async fn get_outbound_relay_peer() -> Option { // unimplemented! None } diff --git a/veilid-core/src/intf/wasm/table_store.rs b/veilid-core/src/intf/wasm/table_store.rs deleted file mode 100644 index f401220d..00000000 --- a/veilid-core/src/intf/wasm/table_store.rs +++ /dev/null @@ -1,151 +0,0 @@ -use crate::intf::table_db::TableDBInner; -pub use crate::intf::table_db::{TableDB, TableDBTransaction}; -use crate::*; -use keyvaluedb_web::*; - -struct TableStoreInner { - opened: BTreeMap>>, -} - -#[derive(Clone)] -pub struct TableStore { - config: VeilidConfig, - inner: Arc>, - async_lock: Arc>, -} - -impl TableStore { - fn new_inner() -> TableStoreInner { - TableStoreInner { - opened: BTreeMap::new(), - } - } - pub(crate) fn new(config: VeilidConfig) -> Self { - Self { - config, - inner: Arc::new(Mutex::new(Self::new_inner())), - async_lock: Arc::new(AsyncMutex::new(())), - } - } - - /// Delete all known tables - pub async fn delete_all(&self) { - if let Err(e) = self.delete("crypto_caches").await { - error!("failed to delete 'crypto_caches': {}", e); - } - if let Err(e) = self.delete("RouteSpecStore").await { - error!("failed to delete 'RouteSpecStore': {}", e); - } - if let Err(e) = self.delete("routing_table").await { - error!("failed to delete 'routing_table': {}", e); - } - } - - pub(crate) async fn init(&self) -> EyreResult<()> { - let _async_guard = self.async_lock.lock().await; - Ok(()) - } - - pub(crate) async fn terminate(&self) { - let _async_guard = self.async_lock.lock().await; - assert!( - self.inner.lock().opened.len() == 0, - "all open databases should have been closed" - ); - } - - pub(crate) fn on_table_db_drop(&self, table: String) { - let mut inner = self.inner.lock(); - match inner.opened.remove(&table) { - Some(_) => (), - None => { - assert!(false, "should have removed an item"); - } - } - } - - fn get_table_name(&self, table: &str) -> EyreResult { - if !table - .chars() - .all(|c| char::is_alphanumeric(c) || c == '_' || c == '-') - { - bail!("table name '{}' is invalid", table); - } - let c = self.config.get(); - let namespace = c.namespace.clone(); - Ok(if namespace.len() == 0 { - format!("{}", table) - } else { - format!("_ns_{}_{}", namespace, table) - }) - } - - /// Get or create a TableDB database table. If the column count is greater than an - /// existing TableDB's column count, the database will be upgraded to add the missing columns - pub async fn open(&self, name: &str, column_count: u32) -> EyreResult { - let _async_guard = self.async_lock.lock().await; - let table_name = self.get_table_name(name)?; - - { - let mut inner = self.inner.lock(); - if let Some(table_db_weak_inner) = inner.opened.get(&table_name) { - match TableDB::try_new_from_weak_inner(table_db_weak_inner.clone()) { - Some(tdb) => { - return Ok(tdb); - } - None => { - inner.opened.remove(&table_name); - } - }; - } - } - let db = Database::open(table_name.clone(), column_count) - .await - .wrap_err("failed to open tabledb")?; - trace!( - "opened table store '{}' with table name '{:?}' with {} columns", - name, - table_name, - column_count - ); - - let table_db = TableDB::new(table_name.clone(), self.clone(), db); - - { - let mut inner = self.inner.lock(); - inner.opened.insert(table_name, table_db.weak_inner()); - } - - Ok(table_db) - } - - /// Delete a TableDB table by name - pub async fn delete(&self, name: &str) -> EyreResult { - let _async_guard = self.async_lock.lock().await; - trace!("TableStore::delete {}", name); - let table_name = self.get_table_name(name)?; - - { - let inner = self.inner.lock(); - if inner.opened.contains_key(&table_name) { - trace!( - "TableStore::delete {}: Not deleting, still open.", - table_name - ); - bail!("Not deleting table that is still opened"); - } - } - - if is_browser() { - let out = match Database::delete(table_name.clone()).await { - Ok(_) => true, - Err(_) => false, - }; - //.map_err(|e| format!("failed to delete tabledb at: {} ({})", table_name, e))?; - trace!("TableStore::deleted {}", table_name); - Ok(out) - } else { - unimplemented!(); - } - } -} diff --git a/veilid-core/src/lib.rs b/veilid-core/src/lib.rs index f6cc2f76..bca8a6fe 100644 --- a/veilid-core/src/lib.rs +++ b/veilid-core/src/lib.rs @@ -28,8 +28,9 @@ mod network_manager; mod receipt_manager; mod routing_table; mod rpc_processor; +mod storage_manager; +mod table_store; mod veilid_api; -#[macro_use] mod veilid_config; mod veilid_layer_filter; @@ -40,6 +41,14 @@ pub use self::veilid_config::*; pub use self::veilid_layer_filter::*; pub use veilid_tools as tools; +use enumset::*; +use rkyv::{ + bytecheck, bytecheck::CheckBytes, de::deserializers::SharedDeserializeMap, with::Skip, + Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize, +}; +type RkyvDefaultValidator<'t> = rkyv::validation::validators::DefaultValidator<'t>; +use serde::*; + pub mod veilid_capnp { include!(concat!(env!("OUT_DIR"), "/proto/veilid_capnp.rs")); } diff --git a/veilid-core/src/network_manager/connection_table.rs b/veilid-core/src/network_manager/connection_table.rs index 0a09eb21..b0879150 100644 --- a/veilid-core/src/network_manager/connection_table.rs +++ b/veilid-core/src/network_manager/connection_table.rs @@ -187,7 +187,7 @@ impl ConnectionTable { pub fn get_last_connection_by_remote(&self, remote: PeerAddress) -> Option { let mut inner = self.inner.lock(); - let id = inner.ids_by_remote.get(&remote).map(|v| v[(v.len() - 1)])?; + let id = inner.ids_by_remote.get(&remote).map(|v| v[v.len() - 1])?; let protocol_index = Self::protocol_to_index(remote.protocol_type()); let out = inner.conn_by_id[protocol_index].get(&id).unwrap(); Some(out.get_handle()) diff --git a/veilid-core/src/network_manager/mod.rs b/veilid-core/src/network_manager/mod.rs index a01b5bdd..3a57e134 100644 --- a/veilid-core/src/network_manager/mod.rs +++ b/veilid-core/src/network_manager/mod.rs @@ -11,6 +11,7 @@ mod connection_manager; mod connection_table; mod network_connection; mod tasks; +mod types; pub mod tests; @@ -18,6 +19,7 @@ pub mod tests; pub use connection_manager::*; pub use network_connection::*; +pub use types::*; //////////////////////////////////////////////////////////////////////////////////////// use connection_handle::*; @@ -31,6 +33,7 @@ use native::*; use receipt_manager::*; use routing_table::*; use rpc_processor::*; +use storage_manager::*; #[cfg(target_arch = "wasm32")] use wasm::*; @@ -144,6 +147,7 @@ struct NetworkManagerInner { struct NetworkManagerUnlockedInner { // Handles config: VeilidConfig, + storage_manager: StorageManager, protected_store: ProtectedStore, table_store: TableStore, block_store: BlockStore, @@ -174,6 +178,7 @@ impl NetworkManager { } fn new_unlocked_inner( config: VeilidConfig, + storage_manager: StorageManager, protected_store: ProtectedStore, table_store: TableStore, block_store: BlockStore, @@ -181,6 +186,7 @@ impl NetworkManager { ) -> NetworkManagerUnlockedInner { NetworkManagerUnlockedInner { config, + storage_manager, protected_store, table_store, block_store, @@ -195,6 +201,7 @@ impl NetworkManager { pub fn new( config: VeilidConfig, + storage_manager: StorageManager, protected_store: ProtectedStore, table_store: TableStore, block_store: BlockStore, @@ -204,6 +211,7 @@ impl NetworkManager { inner: Arc::new(Mutex::new(Self::new_inner())), unlocked_inner: Arc::new(Self::new_unlocked_inner( config, + storage_manager, protected_store, table_store, block_store, @@ -211,7 +219,7 @@ impl NetworkManager { )), }; - this.start_tasks(); + this.setup_tasks(); this } @@ -224,6 +232,9 @@ impl NetworkManager { { f(&*self.unlocked_inner.config.get()) } + pub fn storage_manager(&self) -> StorageManager { + self.unlocked_inner.storage_manager.clone() + } pub fn protected_store(&self) -> ProtectedStore { self.unlocked_inner.protected_store.clone() } @@ -368,7 +379,7 @@ impl NetworkManager { debug!("starting network manager shutdown"); // Cancel all tasks - self.stop_tasks().await; + self.cancel_tasks().await; // Shutdown network components if they started up debug!("shutting down network components"); @@ -461,7 +472,7 @@ impl NetworkManager { will_validate_dial_info: false, }; }; - let own_node_info = own_peer_info.signed_node_info.node_info(); + let own_node_info = own_peer_info.signed_node_info().node_info(); let will_route = own_node_info.can_inbound_relay(); // xxx: eventually this may have more criteria added let will_tunnel = own_node_info.can_inbound_relay(); // xxx: we may want to restrict by battery life and network bandwidth at some point @@ -488,7 +499,7 @@ impl NetworkManager { }; }; - let own_node_info = own_peer_info.signed_node_info.node_info(); + let own_node_info = own_peer_info.signed_node_info().node_info(); let will_relay = own_node_info.can_inbound_relay(); let will_validate_dial_info = own_node_info.can_validate_dial_info(); @@ -1389,7 +1400,7 @@ impl NetworkManager { let some_relay_nr = if self.check_client_whitelist(sender_id) { // Full relay allowed, do a full resolve_node - match rpc.resolve_node(recipient_id.value).await { + match rpc.resolve_node(recipient_id, SafetySelection::Unsafe(Sequencing::default())).await { Ok(v) => v, Err(e) => { log_net!(debug "failed to resolve recipient node for relay, dropping outbound relayed packet: {}" ,e); @@ -1551,8 +1562,8 @@ impl NetworkManager { if let Some(nr) = routing_table.lookup_node_ref(k) { let peer_stats = nr.peer_stats(); let peer = PeerTableData { - node_ids: nr.node_ids().iter().map(|x| x.to_string()).collect(), - peer_address: v.last_connection.remote(), + node_ids: nr.node_ids().iter().copied().collect(), + peer_address: v.last_connection.remote().to_string(), peer_stats, }; out.push(peer); diff --git a/veilid-core/src/network_manager/native/mod.rs b/veilid-core/src/network_manager/native/mod.rs index c38f799b..319a68c5 100644 --- a/veilid-core/src/network_manager/native/mod.rs +++ b/veilid-core/src/network_manager/native/mod.rs @@ -645,7 +645,7 @@ impl Network { log_net!(debug "enable address {:?} as ipv4", addr); inner.enable_ipv4 = true; } else if addr.is_ipv6() { - let address = crate::Address::from_ip_addr(addr); + let address = Address::from_ip_addr(addr); if address.is_global() { log_net!(debug "enable address {:?} as ipv6 global", address); inner.enable_ipv6_global = true; diff --git a/veilid-core/src/network_manager/tasks/mod.rs b/veilid-core/src/network_manager/tasks/mod.rs index 03f76a42..35e3e99c 100644 --- a/veilid-core/src/network_manager/tasks/mod.rs +++ b/veilid-core/src/network_manager/tasks/mod.rs @@ -4,7 +4,7 @@ pub mod rolling_transfers; use super::*; impl NetworkManager { - pub(crate) fn start_tasks(&self) { + pub(crate) fn setup_tasks(&self) { // Set rolling transfers tick task { let this = self.clone(); @@ -67,7 +67,7 @@ impl NetworkManager { Ok(()) } - pub(crate) async fn stop_tasks(&self) { + pub(crate) async fn cancel_tasks(&self) { debug!("stopping rolling transfers task"); if let Err(e) = self.unlocked_inner.rolling_transfers_task.stop().await { warn!("rolling_transfers_task not stopped: {}", e); diff --git a/veilid-core/src/network_manager/tests/mod.rs b/veilid-core/src/network_manager/tests/mod.rs index f0c7391d..64b2b67f 100644 --- a/veilid-core/src/network_manager/tests/mod.rs +++ b/veilid-core/src/network_manager/tests/mod.rs @@ -1,2 +1,4 @@ pub mod test_connection_table; +pub mod test_signed_node_info; + use super::*; diff --git a/veilid-core/src/network_manager/tests/test_signed_node_info.rs b/veilid-core/src/network_manager/tests/test_signed_node_info.rs new file mode 100644 index 00000000..c6a943d1 --- /dev/null +++ b/veilid-core/src/network_manager/tests/test_signed_node_info.rs @@ -0,0 +1,145 @@ +use super::*; +use crate::tests::common::test_veilid_config::*; + +pub async fn test_signed_node_info() { + info!("--- test_signed_node_info ---"); + + let (update_callback, config_callback) = setup_veilid_core(); + let api = api_startup(update_callback, config_callback) + .await + .expect("startup failed"); + + let crypto = api.crypto().unwrap(); + for ck in VALID_CRYPTO_KINDS { + let vcrypto = crypto.get(ck).unwrap(); + + // Test direct + let node_info = NodeInfo::new( + NetworkClass::InboundCapable, + ProtocolTypeSet::all(), + AddressTypeSet::all(), + VALID_ENVELOPE_VERSIONS.to_vec(), + VALID_CRYPTO_KINDS.to_vec(), + vec![DialInfoDetail { + class: DialInfoClass::Mapped, + dial_info: DialInfo::udp(SocketAddress::default()), + }], + ); + + // Test correct validation + let keypair = vcrypto.generate_keypair(); + let sni = SignedDirectNodeInfo::make_signatures( + crypto.clone(), + vec![TypedKeyPair::new(ck, keypair)], + node_info.clone(), + ) + .unwrap(); + let tks: TypedKeySet = TypedKey::new(ck, keypair.key).into(); + let oldtkslen = tks.len(); + let sdni = SignedDirectNodeInfo::new( + node_info.clone(), + sni.timestamp(), + sni.signatures().to_vec(), + ); + let tks_validated = sdni.validate(&tks, crypto.clone()).unwrap(); + assert_eq!(tks_validated.len(), oldtkslen); + assert_eq!(tks_validated.len(), sni.signatures().len()); + + // Test incorrect validation + let keypair1 = vcrypto.generate_keypair(); + let tks1: TypedKeySet = TypedKey::new(ck, keypair1.key).into(); + let sdni = SignedDirectNodeInfo::new( + node_info.clone(), + sni.timestamp(), + sni.signatures().to_vec(), + ); + sdni.validate(&tks1, crypto.clone()).unwrap_err(); + + // Test unsupported cryptosystem validation + let fake_crypto_kind: CryptoKind = FourCC::from([0, 1, 2, 3]); + let mut tksfake: TypedKeySet = TypedKey::new(fake_crypto_kind, PublicKey::default()).into(); + let mut sigsfake = sni.signatures().to_vec(); + sigsfake.push(TypedSignature::new(fake_crypto_kind, Signature::default())); + tksfake.add(TypedKey::new(ck, keypair.key)); + let sdnifake = + SignedDirectNodeInfo::new(node_info.clone(), sni.timestamp(), sigsfake.clone()); + let tksfake_validated = sdnifake.validate(&tksfake, crypto.clone()).unwrap(); + assert_eq!(tksfake_validated.len(), 1); + assert_eq!(sdnifake.signatures().len(), sigsfake.len()); + + // Test relayed + let node_info2 = NodeInfo::new( + NetworkClass::OutboundOnly, + ProtocolTypeSet::all(), + AddressTypeSet::all(), + VALID_ENVELOPE_VERSIONS.to_vec(), + VALID_CRYPTO_KINDS.to_vec(), + vec![DialInfoDetail { + class: DialInfoClass::Blocked, + dial_info: DialInfo::udp(SocketAddress::default()), + }], + ); + + // Test correct validation + let keypair2 = vcrypto.generate_keypair(); + let tks2: TypedKeySet = TypedKey::new(ck, keypair2.key).into(); + let oldtks2len = tks2.len(); + + let sni2 = SignedRelayedNodeInfo::make_signatures( + crypto.clone(), + vec![TypedKeyPair::new(ck, keypair2)], + node_info2.clone(), + tks.clone(), + sni.clone(), + ) + .unwrap(); + let srni = SignedRelayedNodeInfo::new( + node_info2.clone(), + tks.clone(), + sni.clone(), + sni2.timestamp(), + sni2.signatures().to_vec(), + ); + let tks2_validated = srni.validate(&tks2, crypto.clone()).unwrap(); + + assert_eq!(tks2_validated.len(), oldtks2len); + assert_eq!(tks2_validated.len(), sni2.signatures().len()); + + // Test incorrect validation + let keypair3 = vcrypto.generate_keypair(); + let tks3: TypedKeySet = TypedKey::new(ck, keypair3.key).into(); + + let srni = SignedRelayedNodeInfo::new( + node_info2.clone(), + tks.clone(), + sni.clone(), + sni2.timestamp(), + sni2.signatures().to_vec(), + ); + srni.validate(&tks3, crypto.clone()).unwrap_err(); + + // Test unsupported cryptosystem validation + let fake_crypto_kind: CryptoKind = FourCC::from([0, 1, 2, 3]); + let mut tksfake3: TypedKeySet = + TypedKey::new(fake_crypto_kind, PublicKey::default()).into(); + let mut sigsfake3 = sni2.signatures().to_vec(); + sigsfake3.push(TypedSignature::new(fake_crypto_kind, Signature::default())); + tksfake3.add(TypedKey::new(ck, keypair2.key)); + let srnifake = SignedRelayedNodeInfo::new( + node_info2.clone(), + tks.clone(), + sni.clone(), + sni2.timestamp(), + sigsfake3.clone(), + ); + let tksfake3_validated = srnifake.validate(&tksfake3, crypto.clone()).unwrap(); + assert_eq!(tksfake3_validated.len(), 1); + assert_eq!(srnifake.signatures().len(), sigsfake3.len()); + } + + api.shutdown().await; +} + +pub async fn test_all() { + test_signed_node_info().await; +} diff --git a/veilid-core/src/network_manager/types/address.rs b/veilid-core/src/network_manager/types/address.rs new file mode 100644 index 00000000..45ec1c98 --- /dev/null +++ b/veilid-core/src/network_manager/types/address.rs @@ -0,0 +1,130 @@ +use super::*; + +#[derive( + Copy, + Clone, + Debug, + PartialEq, + PartialOrd, + Ord, + Eq, + Hash, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[archive_attr(repr(u8), derive(CheckBytes))] +pub enum Address { + IPV4(Ipv4Addr), + IPV6(Ipv6Addr), +} + +impl Default for Address { + fn default() -> Self { + Address::IPV4(Ipv4Addr::new(0, 0, 0, 0)) + } +} + +impl Address { + pub fn from_socket_addr(sa: SocketAddr) -> Address { + match sa { + SocketAddr::V4(v4) => Address::IPV4(*v4.ip()), + SocketAddr::V6(v6) => Address::IPV6(*v6.ip()), + } + } + pub fn from_ip_addr(addr: IpAddr) -> Address { + match addr { + IpAddr::V4(v4) => Address::IPV4(v4), + IpAddr::V6(v6) => Address::IPV6(v6), + } + } + pub fn address_type(&self) -> AddressType { + match self { + Address::IPV4(_) => AddressType::IPV4, + Address::IPV6(_) => AddressType::IPV6, + } + } + pub fn address_string(&self) -> String { + match self { + Address::IPV4(v4) => v4.to_string(), + Address::IPV6(v6) => v6.to_string(), + } + } + pub fn address_string_with_port(&self, port: u16) -> String { + match self { + Address::IPV4(v4) => format!("{}:{}", v4, port), + Address::IPV6(v6) => format!("[{}]:{}", v6, port), + } + } + pub fn is_unspecified(&self) -> bool { + match self { + Address::IPV4(v4) => ipv4addr_is_unspecified(v4), + Address::IPV6(v6) => ipv6addr_is_unspecified(v6), + } + } + pub fn is_global(&self) -> bool { + match self { + Address::IPV4(v4) => ipv4addr_is_global(v4) && !ipv4addr_is_multicast(v4), + Address::IPV6(v6) => ipv6addr_is_unicast_global(v6), + } + } + pub fn is_local(&self) -> bool { + match self { + Address::IPV4(v4) => { + ipv4addr_is_private(v4) + || ipv4addr_is_link_local(v4) + || ipv4addr_is_ietf_protocol_assignment(v4) + } + Address::IPV6(v6) => { + ipv6addr_is_unicast_site_local(v6) + || ipv6addr_is_unicast_link_local(v6) + || ipv6addr_is_unique_local(v6) + } + } + } + pub fn to_ip_addr(&self) -> IpAddr { + match self { + Self::IPV4(a) => IpAddr::V4(*a), + Self::IPV6(a) => IpAddr::V6(*a), + } + } + pub fn to_socket_addr(&self, port: u16) -> SocketAddr { + SocketAddr::new(self.to_ip_addr(), port) + } + pub fn to_canonical(&self) -> Address { + match self { + Address::IPV4(v4) => Address::IPV4(*v4), + Address::IPV6(v6) => match v6.to_ipv4() { + Some(v4) => Address::IPV4(v4), + None => Address::IPV6(*v6), + }, + } + } +} + +impl fmt::Display for Address { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Address::IPV4(v4) => write!(f, "{}", v4), + Address::IPV6(v6) => write!(f, "{}", v6), + } + } +} + +impl FromStr for Address { + type Err = VeilidAPIError; + fn from_str(host: &str) -> VeilidAPIResult
{ + if let Ok(addr) = Ipv4Addr::from_str(host) { + Ok(Address::IPV4(addr)) + } else if let Ok(addr) = Ipv6Addr::from_str(host) { + Ok(Address::IPV6(addr)) + } else { + Err(VeilidAPIError::parse_error( + "Address::from_str failed", + host, + )) + } + } +} diff --git a/veilid-core/src/network_manager/types/address_type.rs b/veilid-core/src/network_manager/types/address_type.rs new file mode 100644 index 00000000..5193654e --- /dev/null +++ b/veilid-core/src/network_manager/types/address_type.rs @@ -0,0 +1,22 @@ +use super::*; + +#[allow(clippy::derive_hash_xor_eq)] +#[derive( + Debug, + PartialOrd, + Ord, + Hash, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, + EnumSetType, +)] +#[enumset(repr = "u8")] +#[archive_attr(repr(u8), derive(CheckBytes))] +pub enum AddressType { + IPV4, + IPV6, +} +pub type AddressTypeSet = EnumSet; diff --git a/veilid-core/src/network_manager/types/connection_descriptor.rs b/veilid-core/src/network_manager/types/connection_descriptor.rs new file mode 100644 index 00000000..1046838f --- /dev/null +++ b/veilid-core/src/network_manager/types/connection_descriptor.rs @@ -0,0 +1,80 @@ +use super::*; + +/// Represents the 5-tuple of an established connection +/// Not used to specify connections to create, that is reserved for DialInfo +/// +/// ConnectionDescriptors should never be from unspecified local addresses for connection oriented protocols +/// If the medium does not allow local addresses, None should have been used or 'new_no_local' +/// If we are specifying only a port, then the socket's 'local_address()' should have been used, since an +/// established connection is always from a real address to another real address. +#[derive( + Copy, + Clone, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct ConnectionDescriptor { + remote: PeerAddress, + local: Option, +} + +impl ConnectionDescriptor { + pub fn new(remote: PeerAddress, local: SocketAddress) -> Self { + assert!( + !remote.protocol_type().is_connection_oriented() || !local.address().is_unspecified() + ); + + Self { + remote, + local: Some(local), + } + } + pub fn new_no_local(remote: PeerAddress) -> Self { + Self { + remote, + local: None, + } + } + pub fn remote(&self) -> PeerAddress { + self.remote + } + pub fn remote_address(&self) -> &SocketAddress { + self.remote.socket_address() + } + pub fn local(&self) -> Option { + self.local + } + pub fn protocol_type(&self) -> ProtocolType { + self.remote.protocol_type() + } + pub fn address_type(&self) -> AddressType { + self.remote.address_type() + } + pub fn make_dial_info_filter(&self) -> DialInfoFilter { + DialInfoFilter::all() + .with_protocol_type(self.protocol_type()) + .with_address_type(self.address_type()) + } +} + +impl MatchesDialInfoFilter for ConnectionDescriptor { + fn matches_filter(&self, filter: &DialInfoFilter) -> bool { + if !filter.protocol_type_set.contains(self.protocol_type()) { + return false; + } + if !filter.address_type_set.contains(self.address_type()) { + return false; + } + true + } +} diff --git a/veilid-core/src/network_manager/types/dial_info/mod.rs b/veilid-core/src/network_manager/types/dial_info/mod.rs new file mode 100644 index 00000000..a6a12c69 --- /dev/null +++ b/veilid-core/src/network_manager/types/dial_info/mod.rs @@ -0,0 +1,522 @@ +mod tcp; +mod udp; +mod ws; +mod wss; + +use super::*; + +pub use tcp::*; +pub use udp::*; +pub use ws::*; +pub use wss::*; + +// Keep member order appropriate for sorting < preference +// Must match ProtocolType order +#[derive( + Clone, + Debug, + PartialEq, + PartialOrd, + Ord, + Eq, + Hash, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[archive_attr(repr(u8), derive(CheckBytes))] +#[serde(tag = "kind")] +pub enum DialInfo { + UDP(DialInfoUDP), + TCP(DialInfoTCP), + WS(DialInfoWS), + WSS(DialInfoWSS), +} +impl Default for DialInfo { + fn default() -> Self { + DialInfo::UDP(DialInfoUDP::default()) + } +} + +impl fmt::Display for DialInfo { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + match self { + DialInfo::UDP(di) => write!(f, "udp|{}", di.socket_address), + DialInfo::TCP(di) => write!(f, "tcp|{}", di.socket_address), + DialInfo::WS(di) => { + let url = format!("ws://{}", di.request); + let split_url = SplitUrl::from_str(&url).unwrap(); + match split_url.host { + SplitUrlHost::Hostname(_) => { + write!(f, "ws|{}|{}", di.socket_address.to_ip_addr(), di.request) + } + SplitUrlHost::IpAddr(a) => { + if di.socket_address.to_ip_addr() == a { + write!(f, "ws|{}", di.request) + } else { + panic!("resolved address does not match url: {}", di.request); + } + } + } + } + DialInfo::WSS(di) => { + let url = format!("wss://{}", di.request); + let split_url = SplitUrl::from_str(&url).unwrap(); + match split_url.host { + SplitUrlHost::Hostname(_) => { + write!(f, "wss|{}|{}", di.socket_address.to_ip_addr(), di.request) + } + SplitUrlHost::IpAddr(_) => { + panic!( + "secure websockets can not use ip address in request: {}", + di.request + ); + } + } + } + } + } +} + +impl FromStr for DialInfo { + type Err = VeilidAPIError; + fn from_str(s: &str) -> VeilidAPIResult { + let (proto, rest) = s.split_once('|').ok_or_else(|| { + VeilidAPIError::parse_error("DialInfo::from_str missing protocol '|' separator", s) + })?; + match proto { + "udp" => { + let socket_address = SocketAddress::from_str(rest)?; + Ok(DialInfo::udp(socket_address)) + } + "tcp" => { + let socket_address = SocketAddress::from_str(rest)?; + Ok(DialInfo::tcp(socket_address)) + } + "ws" => { + let url = format!("ws://{}", rest); + let split_url = SplitUrl::from_str(&url).map_err(|e| { + VeilidAPIError::parse_error(format!("unable to split WS url: {}", e), &url) + })?; + if split_url.scheme != "ws" || !url.starts_with("ws://") { + apibail_parse_error!("incorrect scheme for WS dialinfo", url); + } + let url_port = split_url.port.unwrap_or(80u16); + + match rest.split_once('|') { + Some((sa, rest)) => { + let address = Address::from_str(sa)?; + + DialInfo::try_ws( + SocketAddress::new(address, url_port), + format!("ws://{}", rest), + ) + } + None => { + let address = Address::from_str(&split_url.host.to_string())?; + DialInfo::try_ws( + SocketAddress::new(address, url_port), + format!("ws://{}", rest), + ) + } + } + } + "wss" => { + let url = format!("wss://{}", rest); + let split_url = SplitUrl::from_str(&url).map_err(|e| { + VeilidAPIError::parse_error(format!("unable to split WSS url: {}", e), &url) + })?; + if split_url.scheme != "wss" || !url.starts_with("wss://") { + apibail_parse_error!("incorrect scheme for WSS dialinfo", url); + } + let url_port = split_url.port.unwrap_or(443u16); + + let (a, rest) = rest.split_once('|').ok_or_else(|| { + VeilidAPIError::parse_error( + "DialInfo::from_str missing socket address '|' separator", + s, + ) + })?; + + let address = Address::from_str(a)?; + DialInfo::try_wss( + SocketAddress::new(address, url_port), + format!("wss://{}", rest), + ) + } + _ => Err(VeilidAPIError::parse_error( + "DialInfo::from_str has invalid scheme", + s, + )), + } + } +} + +impl DialInfo { + pub fn udp_from_socketaddr(socket_addr: SocketAddr) -> Self { + Self::UDP(DialInfoUDP { + socket_address: SocketAddress::from_socket_addr(socket_addr).to_canonical(), + }) + } + pub fn tcp_from_socketaddr(socket_addr: SocketAddr) -> Self { + Self::TCP(DialInfoTCP { + socket_address: SocketAddress::from_socket_addr(socket_addr).to_canonical(), + }) + } + pub fn udp(socket_address: SocketAddress) -> Self { + Self::UDP(DialInfoUDP { + socket_address: socket_address.to_canonical(), + }) + } + pub fn tcp(socket_address: SocketAddress) -> Self { + Self::TCP(DialInfoTCP { + socket_address: socket_address.to_canonical(), + }) + } + pub fn try_ws(socket_address: SocketAddress, url: String) -> VeilidAPIResult { + let split_url = SplitUrl::from_str(&url).map_err(|e| { + VeilidAPIError::parse_error(format!("unable to split WS url: {}", e), &url) + })?; + if split_url.scheme != "ws" || !url.starts_with("ws://") { + apibail_parse_error!("incorrect scheme for WS dialinfo", url); + } + let url_port = split_url.port.unwrap_or(80u16); + if url_port != socket_address.port() { + apibail_parse_error!("socket address port doesn't match url port", url); + } + if let SplitUrlHost::IpAddr(a) = split_url.host { + if socket_address.to_ip_addr() != a { + apibail_parse_error!( + format!("request address does not match socket address: {}", a), + socket_address + ); + } + } + Ok(Self::WS(DialInfoWS { + socket_address: socket_address.to_canonical(), + request: url[5..].to_string(), + })) + } + pub fn try_wss(socket_address: SocketAddress, url: String) -> VeilidAPIResult { + let split_url = SplitUrl::from_str(&url).map_err(|e| { + VeilidAPIError::parse_error(format!("unable to split WSS url: {}", e), &url) + })?; + if split_url.scheme != "wss" || !url.starts_with("wss://") { + apibail_parse_error!("incorrect scheme for WSS dialinfo", url); + } + let url_port = split_url.port.unwrap_or(443u16); + if url_port != socket_address.port() { + apibail_parse_error!("socket address port doesn't match url port", url); + } + if !matches!(split_url.host, SplitUrlHost::Hostname(_)) { + apibail_parse_error!( + "WSS url can not use address format, only hostname format", + url + ); + } + Ok(Self::WSS(DialInfoWSS { + socket_address: socket_address.to_canonical(), + request: url[6..].to_string(), + })) + } + pub fn protocol_type(&self) -> ProtocolType { + match self { + Self::UDP(_) => ProtocolType::UDP, + Self::TCP(_) => ProtocolType::TCP, + Self::WS(_) => ProtocolType::WS, + Self::WSS(_) => ProtocolType::WSS, + } + } + pub fn address_type(&self) -> AddressType { + self.socket_address().address_type() + } + pub fn address(&self) -> Address { + match self { + Self::UDP(di) => di.socket_address.address(), + Self::TCP(di) => di.socket_address.address(), + Self::WS(di) => di.socket_address.address(), + Self::WSS(di) => di.socket_address.address(), + } + } + pub fn set_address(&mut self, address: Address) { + match self { + Self::UDP(di) => di.socket_address.set_address(address), + Self::TCP(di) => di.socket_address.set_address(address), + Self::WS(di) => di.socket_address.set_address(address), + Self::WSS(di) => di.socket_address.set_address(address), + } + } + pub fn socket_address(&self) -> SocketAddress { + match self { + Self::UDP(di) => di.socket_address, + Self::TCP(di) => di.socket_address, + Self::WS(di) => di.socket_address, + Self::WSS(di) => di.socket_address, + } + } + pub fn to_ip_addr(&self) -> IpAddr { + match self { + Self::UDP(di) => di.socket_address.to_ip_addr(), + Self::TCP(di) => di.socket_address.to_ip_addr(), + Self::WS(di) => di.socket_address.to_ip_addr(), + Self::WSS(di) => di.socket_address.to_ip_addr(), + } + } + pub fn port(&self) -> u16 { + match self { + Self::UDP(di) => di.socket_address.port(), + Self::TCP(di) => di.socket_address.port(), + Self::WS(di) => di.socket_address.port(), + Self::WSS(di) => di.socket_address.port(), + } + } + pub fn set_port(&mut self, port: u16) { + match self { + Self::UDP(di) => di.socket_address.set_port(port), + Self::TCP(di) => di.socket_address.set_port(port), + Self::WS(di) => di.socket_address.set_port(port), + Self::WSS(di) => di.socket_address.set_port(port), + } + } + pub fn to_socket_addr(&self) -> SocketAddr { + match self { + Self::UDP(di) => di.socket_address.to_socket_addr(), + Self::TCP(di) => di.socket_address.to_socket_addr(), + Self::WS(di) => di.socket_address.to_socket_addr(), + Self::WSS(di) => di.socket_address.to_socket_addr(), + } + } + pub fn to_peer_address(&self) -> PeerAddress { + match self { + Self::UDP(di) => PeerAddress::new(di.socket_address, ProtocolType::UDP), + Self::TCP(di) => PeerAddress::new(di.socket_address, ProtocolType::TCP), + Self::WS(di) => PeerAddress::new(di.socket_address, ProtocolType::WS), + Self::WSS(di) => PeerAddress::new(di.socket_address, ProtocolType::WSS), + } + } + pub fn request(&self) -> Option { + match self { + Self::UDP(_) => None, + Self::TCP(_) => None, + Self::WS(di) => Some(format!("ws://{}", di.request)), + Self::WSS(di) => Some(format!("wss://{}", di.request)), + } + } + pub fn is_valid(&self) -> bool { + let socket_address = self.socket_address(); + let address = socket_address.address(); + let port = socket_address.port(); + (address.is_global() || address.is_local()) && port > 0 + } + + pub fn make_filter(&self) -> DialInfoFilter { + DialInfoFilter { + protocol_type_set: ProtocolTypeSet::only(self.protocol_type()), + address_type_set: AddressTypeSet::only(self.address_type()), + } + } + + pub fn try_vec_from_short, H: AsRef>( + short: S, + hostname: H, + ) -> VeilidAPIResult> { + let short = short.as_ref(); + let hostname = hostname.as_ref(); + + if short.len() < 2 { + apibail_parse_error!("invalid short url length", short); + } + let url = match &short[0..1] { + "U" => { + format!("udp://{}:{}", hostname, &short[1..]) + } + "T" => { + format!("tcp://{}:{}", hostname, &short[1..]) + } + "W" => { + format!("ws://{}:{}", hostname, &short[1..]) + } + "S" => { + format!("wss://{}:{}", hostname, &short[1..]) + } + _ => { + apibail_parse_error!("invalid short url type", short); + } + }; + Self::try_vec_from_url(url) + } + + pub fn try_vec_from_url>(url: S) -> VeilidAPIResult> { + let url = url.as_ref(); + let split_url = SplitUrl::from_str(url) + .map_err(|e| VeilidAPIError::parse_error(format!("unable to split url: {}", e), url))?; + + let port = match split_url.scheme.as_str() { + "udp" | "tcp" => split_url + .port + .ok_or_else(|| VeilidAPIError::parse_error("Missing port in udp url", url))?, + "ws" => split_url.port.unwrap_or(80u16), + "wss" => split_url.port.unwrap_or(443u16), + _ => { + apibail_parse_error!("Invalid dial info url scheme", split_url.scheme); + } + }; + + let socket_addrs = { + // Resolve if possible, WASM doesn't support resolution and doesn't need it to connect to the dialinfo + // This will not be used on signed dialinfo, only for bootstrapping, so we don't need to worry about + // the '0.0.0.0' address being propagated across the routing table + cfg_if::cfg_if! { + if #[cfg(target_arch = "wasm32")] { + vec![SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0,0,0,0)), port)] + } else { + match split_url.host { + SplitUrlHost::Hostname(_) => split_url + .host_port(port) + .to_socket_addrs() + .map_err(|_| VeilidAPIError::parse_error("couldn't resolve hostname in url", url))? + .collect(), + SplitUrlHost::IpAddr(a) => vec![SocketAddr::new(a, port)], + } + } + } + }; + + let mut out = Vec::new(); + for sa in socket_addrs { + out.push(match split_url.scheme.as_str() { + "udp" => Self::udp_from_socketaddr(sa), + "tcp" => Self::tcp_from_socketaddr(sa), + "ws" => Self::try_ws( + SocketAddress::from_socket_addr(sa).to_canonical(), + url.to_string(), + )?, + "wss" => Self::try_wss( + SocketAddress::from_socket_addr(sa).to_canonical(), + url.to_string(), + )?, + _ => { + unreachable!("Invalid dial info url scheme") + } + }); + } + Ok(out) + } + + pub async fn to_short(&self) -> (String, String) { + match self { + DialInfo::UDP(di) => ( + format!("U{}", di.socket_address.port()), + intf::ptr_lookup(di.socket_address.to_ip_addr()) + .await + .unwrap_or_else(|_| di.socket_address.to_string()), + ), + DialInfo::TCP(di) => ( + format!("T{}", di.socket_address.port()), + intf::ptr_lookup(di.socket_address.to_ip_addr()) + .await + .unwrap_or_else(|_| di.socket_address.to_string()), + ), + DialInfo::WS(di) => { + let mut split_url = SplitUrl::from_str(&format!("ws://{}", di.request)).unwrap(); + if let SplitUrlHost::IpAddr(a) = split_url.host { + if let Ok(host) = intf::ptr_lookup(a).await { + split_url.host = SplitUrlHost::Hostname(host); + } + } + ( + format!( + "W{}{}", + split_url.port.unwrap_or(80), + split_url + .path + .map(|p| format!("/{}", p)) + .unwrap_or_default() + ), + split_url.host.to_string(), + ) + } + DialInfo::WSS(di) => { + let mut split_url = SplitUrl::from_str(&format!("wss://{}", di.request)).unwrap(); + if let SplitUrlHost::IpAddr(a) = split_url.host { + if let Ok(host) = intf::ptr_lookup(a).await { + split_url.host = SplitUrlHost::Hostname(host); + } + } + ( + format!( + "S{}{}", + split_url.port.unwrap_or(443), + split_url + .path + .map(|p| format!("/{}", p)) + .unwrap_or_default() + ), + split_url.host.to_string(), + ) + } + } + } + pub async fn to_url(&self) -> String { + match self { + DialInfo::UDP(di) => intf::ptr_lookup(di.socket_address.to_ip_addr()) + .await + .map(|h| format!("udp://{}:{}", h, di.socket_address.port())) + .unwrap_or_else(|_| format!("udp://{}", di.socket_address)), + DialInfo::TCP(di) => intf::ptr_lookup(di.socket_address.to_ip_addr()) + .await + .map(|h| format!("tcp://{}:{}", h, di.socket_address.port())) + .unwrap_or_else(|_| format!("tcp://{}", di.socket_address)), + DialInfo::WS(di) => { + let mut split_url = SplitUrl::from_str(&format!("ws://{}", di.request)).unwrap(); + if let SplitUrlHost::IpAddr(a) = split_url.host { + if let Ok(host) = intf::ptr_lookup(a).await { + split_url.host = SplitUrlHost::Hostname(host); + } + } + split_url.to_string() + } + DialInfo::WSS(di) => { + let mut split_url = SplitUrl::from_str(&format!("wss://{}", di.request)).unwrap(); + if let SplitUrlHost::IpAddr(a) = split_url.host { + if let Ok(host) = intf::ptr_lookup(a).await { + split_url.host = SplitUrlHost::Hostname(host); + } + } + split_url.to_string() + } + } + } + + pub fn ordered_sequencing_sort(a: &DialInfo, b: &DialInfo) -> core::cmp::Ordering { + let ca = a.protocol_type().sort_order(Sequencing::EnsureOrdered); + let cb = b.protocol_type().sort_order(Sequencing::EnsureOrdered); + if ca < cb { + return core::cmp::Ordering::Less; + } + if ca > cb { + return core::cmp::Ordering::Greater; + } + match (a, b) { + (DialInfo::UDP(a), DialInfo::UDP(b)) => a.cmp(b), + (DialInfo::TCP(a), DialInfo::TCP(b)) => a.cmp(b), + (DialInfo::WS(a), DialInfo::WS(b)) => a.cmp(b), + (DialInfo::WSS(a), DialInfo::WSS(b)) => a.cmp(b), + _ => unreachable!(), + } + } +} + +impl MatchesDialInfoFilter for DialInfo { + fn matches_filter(&self, filter: &DialInfoFilter) -> bool { + if !filter.protocol_type_set.contains(self.protocol_type()) { + return false; + } + if !filter.address_type_set.contains(self.address_type()) { + return false; + } + true + } +} diff --git a/veilid-core/src/network_manager/types/dial_info/tcp.rs b/veilid-core/src/network_manager/types/dial_info/tcp.rs new file mode 100644 index 00000000..0f93273d --- /dev/null +++ b/veilid-core/src/network_manager/types/dial_info/tcp.rs @@ -0,0 +1,21 @@ +use super::*; + +#[derive( + Clone, + Default, + Debug, + PartialEq, + PartialOrd, + Ord, + Eq, + Hash, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct DialInfoTCP { + pub socket_address: SocketAddress, +} \ No newline at end of file diff --git a/veilid-core/src/network_manager/types/dial_info/udp.rs b/veilid-core/src/network_manager/types/dial_info/udp.rs new file mode 100644 index 00000000..d799e116 --- /dev/null +++ b/veilid-core/src/network_manager/types/dial_info/udp.rs @@ -0,0 +1,21 @@ +use super::*; + +#[derive( + Clone, + Default, + Debug, + PartialEq, + PartialOrd, + Ord, + Eq, + Hash, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct DialInfoUDP { + pub socket_address: SocketAddress, +} diff --git a/veilid-core/src/network_manager/types/dial_info/ws.rs b/veilid-core/src/network_manager/types/dial_info/ws.rs new file mode 100644 index 00000000..18e2a37c --- /dev/null +++ b/veilid-core/src/network_manager/types/dial_info/ws.rs @@ -0,0 +1,22 @@ +use super::*; + +#[derive( + Clone, + Default, + Debug, + PartialEq, + PartialOrd, + Ord, + Eq, + Hash, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct DialInfoWS { + pub socket_address: SocketAddress, + pub request: String, +} \ No newline at end of file diff --git a/veilid-core/src/network_manager/types/dial_info/wss.rs b/veilid-core/src/network_manager/types/dial_info/wss.rs new file mode 100644 index 00000000..e999430d --- /dev/null +++ b/veilid-core/src/network_manager/types/dial_info/wss.rs @@ -0,0 +1,22 @@ +use super::*; + +#[derive( + Clone, + Default, + Debug, + PartialEq, + PartialOrd, + Ord, + Eq, + Hash, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct DialInfoWSS { + pub socket_address: SocketAddress, + pub request: String, +} diff --git a/veilid-core/src/network_manager/types/dial_info_class.rs b/veilid-core/src/network_manager/types/dial_info_class.rs new file mode 100644 index 00000000..f3f91376 --- /dev/null +++ b/veilid-core/src/network_manager/types/dial_info_class.rs @@ -0,0 +1,50 @@ +use super::*; + +// Keep member order appropriate for sorting < preference +#[derive( + Copy, + Clone, + Debug, + Eq, + PartialEq, + Ord, + PartialOrd, + Hash, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[archive_attr(repr(u8), derive(CheckBytes))] +pub enum DialInfoClass { + Direct = 0, // D = Directly reachable with public IP and no firewall, with statically configured port + Mapped = 1, // M = Directly reachable with via portmap behind any NAT or firewalled with dynamically negotiated port + FullConeNAT = 2, // F = Directly reachable device without portmap behind full-cone NAT + Blocked = 3, // B = Inbound blocked at firewall but may hole punch with public address + AddressRestrictedNAT = 4, // A = Device without portmap behind address-only restricted NAT + PortRestrictedNAT = 5, // P = Device without portmap behind address-and-port restricted NAT +} + +impl DialInfoClass { + // Is a signal required to do an inbound hole-punch? + pub fn requires_signal(&self) -> bool { + matches!( + self, + Self::Blocked | Self::AddressRestrictedNAT | Self::PortRestrictedNAT + ) + } + + // Does a relay node need to be allocated for this dial info? + // For full cone NAT, the relay itself may not be used but the keepalive sent to it + // is required to keep the NAT mapping valid in the router state table + pub fn requires_relay(&self) -> bool { + matches!( + self, + Self::FullConeNAT + | Self::Blocked + | Self::AddressRestrictedNAT + | Self::PortRestrictedNAT + ) + } +} diff --git a/veilid-core/src/network_manager/types/dial_info_filter.rs b/veilid-core/src/network_manager/types/dial_info_filter.rs new file mode 100644 index 00000000..c3635957 --- /dev/null +++ b/veilid-core/src/network_manager/types/dial_info_filter.rs @@ -0,0 +1,86 @@ +use super::*; + +#[derive( + Copy, + Clone, + PartialEq, + Eq, + PartialOrd, + Ord, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct DialInfoFilter { + #[with(RkyvEnumSet)] + pub protocol_type_set: ProtocolTypeSet, + #[with(RkyvEnumSet)] + pub address_type_set: AddressTypeSet, +} + +impl Default for DialInfoFilter { + fn default() -> Self { + Self { + protocol_type_set: ProtocolTypeSet::all(), + address_type_set: AddressTypeSet::all(), + } + } +} + +impl DialInfoFilter { + pub fn all() -> Self { + Self { + protocol_type_set: ProtocolTypeSet::all(), + address_type_set: AddressTypeSet::all(), + } + } + pub fn with_protocol_type(mut self, protocol_type: ProtocolType) -> Self { + self.protocol_type_set = ProtocolTypeSet::only(protocol_type); + self + } + pub fn with_protocol_type_set(mut self, protocol_set: ProtocolTypeSet) -> Self { + self.protocol_type_set = protocol_set; + self + } + pub fn with_address_type(mut self, address_type: AddressType) -> Self { + self.address_type_set = AddressTypeSet::only(address_type); + self + } + pub fn with_address_type_set(mut self, address_set: AddressTypeSet) -> Self { + self.address_type_set = address_set; + self + } + pub fn filtered(mut self, other_dif: &DialInfoFilter) -> Self { + self.protocol_type_set &= other_dif.protocol_type_set; + self.address_type_set &= other_dif.address_type_set; + self + } + pub fn is_dead(&self) -> bool { + self.protocol_type_set.is_empty() || self.address_type_set.is_empty() + } +} + +impl fmt::Debug for DialInfoFilter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + let mut out = String::new(); + if self.protocol_type_set != ProtocolTypeSet::all() { + out += &format!("+{:?}", self.protocol_type_set); + } else { + out += "*"; + } + if self.address_type_set != AddressTypeSet::all() { + out += &format!("+{:?}", self.address_type_set); + } else { + out += "*"; + } + write!(f, "[{}]", out) + } +} + +pub trait MatchesDialInfoFilter { + fn matches_filter(&self, filter: &DialInfoFilter) -> bool; +} + diff --git a/veilid-core/src/network_manager/types/low_level_protocol_type.rs b/veilid-core/src/network_manager/types/low_level_protocol_type.rs new file mode 100644 index 00000000..69dfeae7 --- /dev/null +++ b/veilid-core/src/network_manager/types/low_level_protocol_type.rs @@ -0,0 +1,31 @@ +use super::*; + +// Keep member order appropriate for sorting < preference +// Must match DialInfo order +#[allow(clippy::derive_hash_xor_eq)] +#[derive( + Debug, + PartialOrd, + Ord, + Hash, + EnumSetType, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[enumset(repr = "u8")] +#[archive_attr(repr(u8), derive(CheckBytes))] +pub enum LowLevelProtocolType { + UDP, + TCP, +} + +impl LowLevelProtocolType { + pub fn is_connection_oriented(&self) -> bool { + matches!(self, LowLevelProtocolType::TCP) + } +} + +// pub type LowLevelProtocolTypeSet = EnumSet; diff --git a/veilid-core/src/network_manager/types/mod.rs b/veilid-core/src/network_manager/types/mod.rs new file mode 100644 index 00000000..3c1c1e9d --- /dev/null +++ b/veilid-core/src/network_manager/types/mod.rs @@ -0,0 +1,27 @@ +mod address; +mod address_type; +mod connection_descriptor; +mod dial_info; +mod dial_info_class; +mod dial_info_filter; +mod low_level_protocol_type; +mod network_class; +mod peer_address; +mod protocol_type; +mod signal_info; +mod socket_address; + +use super::*; + +pub use address::*; +pub use address_type::*; +pub use connection_descriptor::*; +pub use dial_info::*; +pub use dial_info_class::*; +pub use dial_info_filter::*; +pub use low_level_protocol_type::*; +pub use network_class::*; +pub use peer_address::*; +pub use protocol_type::*; +pub use signal_info::*; +pub use socket_address::*; diff --git a/veilid-core/src/network_manager/types/network_class.rs b/veilid-core/src/network_manager/types/network_class.rs new file mode 100644 index 00000000..828edac7 --- /dev/null +++ b/veilid-core/src/network_manager/types/network_class.rs @@ -0,0 +1,37 @@ +use super::*; + +#[derive( + Copy, + Clone, + Debug, + Eq, + PartialEq, + Ord, + PartialOrd, + Hash, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[archive_attr(repr(u8), derive(CheckBytes))] +pub enum NetworkClass { + InboundCapable = 0, // I = Inbound capable without relay, may require signal + OutboundOnly = 1, // O = Outbound only, inbound relay required except with reverse connect signal + WebApp = 2, // W = PWA, outbound relay is required in most cases + Invalid = 3, // X = Invalid network class, we don't know how to reach this node +} + +impl Default for NetworkClass { + fn default() -> Self { + Self::Invalid + } +} + +impl NetworkClass { + // Should an outbound relay be kept available? + pub fn outbound_wants_relay(&self) -> bool { + matches!(self, Self::WebApp) + } +} diff --git a/veilid-core/src/network_manager/types/peer_address.rs b/veilid-core/src/network_manager/types/peer_address.rs new file mode 100644 index 00000000..83df0bea --- /dev/null +++ b/veilid-core/src/network_manager/types/peer_address.rs @@ -0,0 +1,66 @@ +use super::*; + +#[derive( + Copy, + Clone, + Debug, + PartialEq, + PartialOrd, + Eq, + Ord, + Hash, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct PeerAddress { + protocol_type: ProtocolType, + #[serde(with = "json_as_string")] + socket_address: SocketAddress, +} + +impl PeerAddress { + pub fn new(socket_address: SocketAddress, protocol_type: ProtocolType) -> Self { + Self { + socket_address: socket_address.to_canonical(), + protocol_type, + } + } + + pub fn socket_address(&self) -> &SocketAddress { + &self.socket_address + } + + pub fn protocol_type(&self) -> ProtocolType { + self.protocol_type + } + + pub fn to_socket_addr(&self) -> SocketAddr { + self.socket_address.to_socket_addr() + } + + pub fn address_type(&self) -> AddressType { + self.socket_address.address_type() + } +} + +impl fmt::Display for PeerAddress { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}:{}", self.protocol_type, self.socket_address) + } +} + +impl FromStr for PeerAddress { + type Err = VeilidAPIError; + fn from_str(s: &str) -> VeilidAPIResult { + let Some((first, second)) = s.split_once(':') else { + return Err(VeilidAPIError::parse_error("PeerAddress is missing a colon: {}", s)); + }; + let protocol_type = ProtocolType::from_str(first)?; + let socket_address = SocketAddress::from_str(second)?; + Ok(PeerAddress::new(socket_address, protocol_type)) + } +} diff --git a/veilid-core/src/network_manager/types/protocol_type.rs b/veilid-core/src/network_manager/types/protocol_type.rs new file mode 100644 index 00000000..4ba47000 --- /dev/null +++ b/veilid-core/src/network_manager/types/protocol_type.rs @@ -0,0 +1,104 @@ +use super::*; + +// Keep member order appropriate for sorting < preference +// Must match DialInfo order +#[allow(clippy::derive_hash_xor_eq)] +#[derive( + Debug, + PartialOrd, + Ord, + Hash, + EnumSetType, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[enumset(repr = "u8")] +#[archive_attr(repr(u8), derive(CheckBytes))] +pub enum ProtocolType { + UDP, + TCP, + WS, + WSS, +} + +impl ProtocolType { + pub fn is_connection_oriented(&self) -> bool { + matches!( + self, + ProtocolType::TCP | ProtocolType::WS | ProtocolType::WSS + ) + } + pub fn low_level_protocol_type(&self) -> LowLevelProtocolType { + match self { + ProtocolType::UDP => LowLevelProtocolType::UDP, + ProtocolType::TCP | ProtocolType::WS | ProtocolType::WSS => LowLevelProtocolType::TCP, + } + } + pub fn sort_order(&self, sequencing: Sequencing) -> usize { + match self { + ProtocolType::UDP => { + if sequencing != Sequencing::NoPreference { + 3 + } else { + 0 + } + } + ProtocolType::TCP => { + if sequencing != Sequencing::NoPreference { + 0 + } else { + 1 + } + } + ProtocolType::WS => { + if sequencing != Sequencing::NoPreference { + 1 + } else { + 2 + } + } + ProtocolType::WSS => { + if sequencing != Sequencing::NoPreference { + 2 + } else { + 3 + } + } + } + } + pub fn all_ordered_set() -> ProtocolTypeSet { + ProtocolType::TCP | ProtocolType::WS | ProtocolType::WSS + } +} + +impl fmt::Display for ProtocolType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ProtocolType::UDP => write!(f, "UDP"), + ProtocolType::TCP => write!(f, "TCP"), + ProtocolType::WS => write!(f, "WS"), + ProtocolType::WSS => write!(f, "WSS"), + } + } +} + +impl FromStr for ProtocolType { + type Err = VeilidAPIError; + fn from_str(s: &str) -> VeilidAPIResult { + match s.to_ascii_uppercase().as_str() { + "UDP" => Ok(ProtocolType::UDP), + "TCP" => Ok(ProtocolType::TCP), + "WS" => Ok(ProtocolType::WS), + "WSS" => Ok(ProtocolType::WSS), + _ => Err(VeilidAPIError::parse_error( + "ProtocolType::from_str failed", + s, + )), + } + } +} + +pub type ProtocolTypeSet = EnumSet; diff --git a/veilid-core/src/network_manager/types/signal_info.rs b/veilid-core/src/network_manager/types/signal_info.rs new file mode 100644 index 00000000..bf2aa861 --- /dev/null +++ b/veilid-core/src/network_manager/types/signal_info.rs @@ -0,0 +1,51 @@ +use super::*; + +/// Parameter for Signal operation +#[derive(Clone, Debug, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize)] +#[archive_attr(repr(u8), derive(CheckBytes))] +pub enum SignalInfo { + /// UDP Hole Punch Request + HolePunch { + /// /// Receipt to be returned after the hole punch + receipt: Vec, + /// Sender's peer info + peer_info: PeerInfo, + }, + /// Reverse Connection Request + ReverseConnect { + /// Receipt to be returned by the reverse connection + receipt: Vec, + /// Sender's peer info + peer_info: PeerInfo, + }, + // XXX: WebRTC +} + +impl SignalInfo { + pub fn validate(&self, crypto: Crypto) -> Result<(), RPCError> { + match self { + SignalInfo::HolePunch { receipt, peer_info } => { + if receipt.len() < MIN_RECEIPT_SIZE { + return Err(RPCError::protocol("SignalInfo HolePunch receipt too short")); + } + if receipt.len() > MAX_RECEIPT_SIZE { + return Err(RPCError::protocol("SignalInfo HolePunch receipt too long")); + } + peer_info.validate(crypto).map_err(RPCError::protocol) + } + SignalInfo::ReverseConnect { receipt, peer_info } => { + if receipt.len() < MIN_RECEIPT_SIZE { + return Err(RPCError::protocol( + "SignalInfo ReverseConnect receipt too short", + )); + } + if receipt.len() > MAX_RECEIPT_SIZE { + return Err(RPCError::protocol( + "SignalInfo ReverseConnect receipt too long", + )); + } + peer_info.validate(crypto).map_err(RPCError::protocol) + } + } + } +} diff --git a/veilid-core/src/network_manager/types/socket_address.rs b/veilid-core/src/network_manager/types/socket_address.rs new file mode 100644 index 00000000..35515b90 --- /dev/null +++ b/veilid-core/src/network_manager/types/socket_address.rs @@ -0,0 +1,77 @@ +use super::*; + +#[derive( + Copy, + Default, + Clone, + Debug, + PartialEq, + PartialOrd, + Ord, + Eq, + Hash, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct SocketAddress { + address: Address, + port: u16, +} + +impl SocketAddress { + pub fn new(address: Address, port: u16) -> Self { + Self { address, port } + } + pub fn from_socket_addr(sa: SocketAddr) -> SocketAddress { + Self { + address: Address::from_socket_addr(sa), + port: sa.port(), + } + } + pub fn address(&self) -> Address { + self.address + } + pub fn set_address(&mut self, address: Address) { + self.address = address; + } + pub fn address_type(&self) -> AddressType { + self.address.address_type() + } + pub fn port(&self) -> u16 { + self.port + } + pub fn set_port(&mut self, port: u16) { + self.port = port + } + pub fn to_canonical(&self) -> SocketAddress { + SocketAddress { + address: self.address.to_canonical(), + port: self.port, + } + } + pub fn to_ip_addr(&self) -> IpAddr { + self.address.to_ip_addr() + } + pub fn to_socket_addr(&self) -> SocketAddr { + self.address.to_socket_addr(self.port) + } +} + +impl fmt::Display for SocketAddress { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + write!(f, "{}", self.to_socket_addr()) + } +} + +impl FromStr for SocketAddress { + type Err = VeilidAPIError; + fn from_str(s: &str) -> VeilidAPIResult { + let sa = SocketAddr::from_str(s) + .map_err(|e| VeilidAPIError::parse_error("Failed to parse SocketAddress", e))?; + Ok(SocketAddress::from_socket_addr(sa)) + } +} diff --git a/veilid-core/src/routing_table/bucket_entry.rs b/veilid-core/src/routing_table/bucket_entry.rs index c94577dd..46206d0c 100644 --- a/veilid-core/src/routing_table/bucket_entry.rs +++ b/veilid-core/src/routing_table/bucket_entry.rs @@ -1,8 +1,6 @@ use super::*; use core::sync::atomic::{AtomicU32, Ordering}; -use rkyv::{ - with::Skip, Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize, -}; + /// Reliable pings are done with increased spacing between pings @@ -73,7 +71,9 @@ pub struct BucketEntryLocalNetwork { #[archive_attr(repr(C), derive(CheckBytes))] pub struct BucketEntryInner { /// The node ids matching this bucket entry, with the cryptography versions supported by this node as the 'kind' field - node_ids: TypedKeySet, + validated_node_ids: TypedKeySet, + /// The node ids claimed by the remote node that use cryptography versions we do not support + unsupported_node_ids: TypedKeySet, /// The set of envelope versions supported by the node inclusive of the requirements of any relay the node may be using envelope_support: Vec, /// If this node has updated it's SignedNodeInfo since our network @@ -122,9 +122,11 @@ impl BucketEntryInner { self.node_ref_tracks.remove(&track_id); } - /// Get node ids + /// Get all node ids pub fn node_ids(&self) -> TypedKeySet { - self.node_ids.clone() + let mut node_ids = self.validated_node_ids.clone(); + node_ids.add_all(&self.unsupported_node_ids); + node_ids } /// Add a node id for a particular crypto kind. @@ -132,33 +134,40 @@ impl BucketEntryInner { /// Returns Ok(None) if no previous existing node id was associated with that crypto kind /// Results Err() if this operation would add more crypto kinds than we support pub fn add_node_id(&mut self, node_id: TypedKey) -> EyreResult> { - if let Some(old_node_id) = self.node_ids.get(node_id.kind) { + let total_node_id_count = self.validated_node_ids.len() + self.unsupported_node_ids.len(); + let node_ids = if VALID_CRYPTO_KINDS.contains(&node_id.kind) { + &mut self.validated_node_ids + } else { + &mut self.unsupported_node_ids + }; + + if let Some(old_node_id) = node_ids.get(node_id.kind) { // If this was already there we do nothing if old_node_id == node_id { return Ok(None); } // Won't change number of crypto kinds - self.node_ids.add(node_id); + node_ids.add(node_id); return Ok(Some(old_node_id)); } // Check to ensure we aren't adding more crypto kinds than we support - if self.node_ids.len() == MAX_CRYPTO_KINDS { + if total_node_id_count == MAX_CRYPTO_KINDS { bail!("too many crypto kinds for this node"); } - self.node_ids.add(node_id); + node_ids.add(node_id); Ok(None) } pub fn best_node_id(&self) -> TypedKey { - self.node_ids.best().unwrap() + self.validated_node_ids.best().unwrap() } /// Get crypto kinds pub fn crypto_kinds(&self) -> Vec { - self.node_ids.kinds() + self.validated_node_ids.kinds() } /// Compare sets of crypto kinds pub fn common_crypto_kinds(&self, other: &[CryptoKind]) -> Vec { - common_crypto_kinds(&self.node_ids.kinds(), other) + common_crypto_kinds(&self.validated_node_ids.kinds(), other) } @@ -270,7 +279,7 @@ impl BucketEntryInner { } // Update the envelope version support we have to use - let envelope_support = signed_node_info.node_info().envelope_support.clone(); + let envelope_support = signed_node_info.node_info().envelope_support().to_vec(); // Update the signed node info *opt_current_sni = Some(Box::new(signed_node_info)); @@ -333,10 +342,12 @@ impl BucketEntryInner { RoutingDomain::LocalNetwork => &self.local_network.signed_node_info, RoutingDomain::PublicInternet => &self.public_internet.signed_node_info, }; - opt_current_sni.as_ref().map(|s| PeerInfo { - node_ids: self.node_ids.clone(), - signed_node_info: *s.clone(), - }) + // Peer info includes all node ids, even unvalidated ones + let node_ids = self.node_ids(); + opt_current_sni.as_ref().map(|s| PeerInfo::new( + node_ids, + *s.clone(), + )) } pub fn best_routing_domain( @@ -527,7 +538,7 @@ impl BucketEntryInner { } } - pub fn set_our_node_info_ts(&mut self, routing_domain: RoutingDomain, seen_ts: Timestamp) { + pub fn set_seen_our_node_info_ts(&mut self, routing_domain: RoutingDomain, seen_ts: Timestamp) { match routing_domain { RoutingDomain::LocalNetwork => { self.local_network.last_seen_our_node_info_ts = seen_ts; @@ -780,12 +791,14 @@ pub struct BucketEntry { impl BucketEntry { pub(super) fn new(first_node_id: TypedKey) -> Self { - let now = get_aligned_timestamp(); - let mut node_ids = TypedKeySet::new(); - node_ids.add(first_node_id); + // First node id should always be one we support since TypedKeySets are sorted and we must have at least one supported key + assert!(VALID_CRYPTO_KINDS.contains(&first_node_id.kind)); + + let now = get_aligned_timestamp(); let inner = BucketEntryInner { - node_ids, + validated_node_ids: TypedKeySet::from(first_node_id), + unsupported_node_ids: TypedKeySet::new(), envelope_support: Vec::new(), updated_since_last_network_change: false, last_connections: BTreeMap::new(), diff --git a/veilid-core/src/routing_table/debug.rs b/veilid-core/src/routing_table/debug.rs index 6b4772e9..f329a4b4 100644 --- a/veilid-core/src/routing_table/debug.rs +++ b/veilid-core/src/routing_table/debug.rs @@ -2,28 +2,6 @@ use super::*; use routing_table::tasks::bootstrap::BOOTSTRAP_TXT_VERSION_0; impl RoutingTable { - pub(crate) fn debug_info_nodeinfo(&self) -> String { - let mut out = String::new(); - let inner = self.inner.read(); - out += "Routing Table Info:\n"; - - out += &format!(" Node Ids: {}\n", self.unlocked_inner.node_ids()); - out += &format!( - " Self Latency Stats Accounting: {:#?}\n\n", - inner.self_latency_stats_accounting - ); - out += &format!( - " Self Transfer Stats Accounting: {:#?}\n\n", - inner.self_transfer_stats_accounting - ); - out += &format!( - " Self Transfer Stats: {:#?}\n\n", - inner.self_transfer_stats - ); - - out - } - pub(crate) async fn debug_info_txtrecord(&self) -> String { let mut out = String::new(); @@ -71,14 +49,34 @@ impl RoutingTable { node_ids, some_hostname.unwrap() ); - for short_url in short_urls { - out += &format!(",{}", short_url); - } + out += &short_urls.join(","); out += "\n"; } out } + pub(crate) fn debug_info_nodeinfo(&self) -> String { + let mut out = String::new(); + let inner = self.inner.read(); + out += "Routing Table Info:\n"; + + out += &format!(" Node Ids: {}\n", self.unlocked_inner.node_ids()); + out += &format!( + " Self Latency Stats Accounting: {:#?}\n\n", + inner.self_latency_stats_accounting + ); + out += &format!( + " Self Transfer Stats Accounting: {:#?}\n\n", + inner.self_transfer_stats_accounting + ); + out += &format!( + " Self Transfer Stats: {:#?}\n\n", + inner.self_transfer_stats + ); + + out + } + pub(crate) fn debug_info_dialinfo(&self) -> String { let ldis = self.dial_info_details(RoutingDomain::LocalNetwork); let gdis = self.dial_info_details(RoutingDomain::PublicInternet); @@ -132,13 +130,25 @@ impl RoutingTable { for e in filtered_entries { let state = e.1.with(inner, |_rti, e| e.state(cur_ts)); out += &format!( - " {} [{}]\n", + " {} [{}] {}\n", e.0.encode(), match state { BucketEntryState::Reliable => "R", BucketEntryState::Unreliable => "U", BucketEntryState::Dead => "D", - } + }, + e.1.with(inner, |_rti, e| { + e.peer_stats() + .latency + .as_ref() + .map(|l| { + format!( + "{:.2}ms", + timestamp_to_secs(l.average.as_u64()) * 1000.0 + ) + }) + .unwrap_or_else(|| "???.??ms".to_string()) + }) ); } } diff --git a/veilid-core/src/routing_table/find_peers.rs b/veilid-core/src/routing_table/find_peers.rs new file mode 100644 index 00000000..54a45796 --- /dev/null +++ b/veilid-core/src/routing_table/find_peers.rs @@ -0,0 +1,103 @@ +use super::*; + +impl RoutingTable { + /// Utility to find all closest nodes to a particular key, including possibly our own node and nodes further away from the key than our own, returning their peer info + pub fn find_all_closest_peers(&self, key: TypedKey) -> NetworkResult> { + let Some(own_peer_info) = self.get_own_peer_info(RoutingDomain::PublicInternet) else { + // Our own node info is not yet available, drop this request. + return NetworkResult::service_unavailable(); + }; + + // find N nodes closest to the target node in our routing table + let filter = Box::new( + move |rti: &RoutingTableInner, opt_entry: Option>| { + // Ensure only things that are valid/signed in the PublicInternet domain are returned + rti.filter_has_valid_signed_node_info( + RoutingDomain::PublicInternet, + true, + opt_entry, + ) + }, + ) as RoutingTableEntryFilter; + let filters = VecDeque::from([filter]); + + let node_count = { + let c = self.config.get(); + c.network.dht.max_find_node_count as usize + }; + + let closest_nodes = self.find_closest_nodes( + node_count, + key, + filters, + // transform + |rti, entry| { + rti.transform_to_peer_info(RoutingDomain::PublicInternet, &own_peer_info, entry) + }, + ); + + NetworkResult::value(closest_nodes) + } + + /// Utility to find nodes that are closer to a key than our own node, returning their peer info + pub fn find_peers_closer_to_key(&self, key: TypedKey) -> NetworkResult> { + // add node information for the requesting node to our routing table + let crypto_kind = key.kind; + let own_node_id = self.node_id(crypto_kind); + + // find N nodes closest to the target node in our routing table + // ensure the nodes returned are only the ones closer to the target node than ourself + let Some(vcrypto) = self.crypto().get(crypto_kind) else { + return NetworkResult::invalid_message("unsupported cryptosystem"); + }; + let own_distance = vcrypto.distance(&own_node_id.value, &key.value); + + let filter = Box::new( + move |rti: &RoutingTableInner, opt_entry: Option>| { + // Exclude our own node + let Some(entry) = opt_entry else { + return false; + }; + // Ensure only things that are valid/signed in the PublicInternet domain are returned + if !rti.filter_has_valid_signed_node_info( + RoutingDomain::PublicInternet, + true, + Some(entry.clone()), + ) { + return false; + } + // Ensure things further from the key than our own node are not included + let Some(entry_node_id) = entry.with(rti, |_rti, e| e.node_ids().get(crypto_kind)) else { + return false; + }; + let entry_distance = vcrypto.distance(&entry_node_id.value, &key.value); + if entry_distance >= own_distance { + return false; + } + + true + }, + ) as RoutingTableEntryFilter; + let filters = VecDeque::from([filter]); + + let node_count = { + let c = self.config.get(); + c.network.dht.max_find_node_count as usize + }; + + // + let closest_nodes = self.find_closest_nodes( + node_count, + key, + filters, + // transform + |rti, entry| { + entry.unwrap().with(rti, |_rti, e| { + e.make_peer_info(RoutingDomain::PublicInternet).unwrap() + }) + }, + ); + + NetworkResult::value(closest_nodes) + } +} diff --git a/veilid-core/src/routing_table/mod.rs b/veilid-core/src/routing_table/mod.rs index 01381d0e..1d63fcb5 100644 --- a/veilid-core/src/routing_table/mod.rs +++ b/veilid-core/src/routing_table/mod.rs @@ -1,6 +1,7 @@ mod bucket; mod bucket_entry; mod debug; +mod find_peers; mod node_ref; mod node_ref_filter; mod privacy; @@ -10,16 +11,21 @@ mod routing_domains; mod routing_table_inner; mod stats_accounting; mod tasks; +mod types; -use crate::*; +pub mod tests; + +use super::*; use crate::crypto::*; use crate::network_manager::*; use crate::rpc_processor::*; use bucket::*; +use hashlink::LruCache; + pub use bucket_entry::*; pub use debug::*; -use hashlink::LruCache; +pub use find_peers::*; pub use node_ref::*; pub use node_ref_filter::*; pub use privacy::*; @@ -28,6 +34,7 @@ pub use routing_domain_editor::*; pub use routing_domains::*; pub use routing_table_inner::*; pub use stats_accounting::*; +pub use types::*; ////////////////////////////////////////////////////////////////////////// @@ -50,6 +57,8 @@ pub struct LowLevelPortInfo { } pub type RoutingTableEntryFilter<'t> = Box>) -> bool + Send + 't>; +pub type SerializedBuckets = Vec>; +pub type SerializedBucketMap = BTreeMap; #[derive(Clone, Debug, Default, Eq, PartialEq)] pub struct RoutingTableHealth { @@ -208,7 +217,7 @@ impl RoutingTable { unlocked_inner, }; - this.start_tasks(); + this.setup_tasks(); this } @@ -259,7 +268,7 @@ impl RoutingTable { debug!("starting routing table terminate"); // Stop tasks - self.stop_tasks().await; + self.cancel_tasks().await; // Load bucket entries from table db if possible debug!("saving routing table entries"); @@ -285,14 +294,14 @@ impl RoutingTable { debug!("finished routing table terminate"); } - /// Serialize routing table to table store - async fn save_buckets(&self) -> EyreResult<()> { + /// Serialize the routing table. + fn serialized_buckets(&self) -> EyreResult<(SerializedBucketMap, SerializedBuckets)> { // Since entries are shared by multiple buckets per cryptokind // we need to get the list of all unique entries when serializing let mut all_entries: Vec> = Vec::new(); // Serialize all buckets and get map of entries - let mut serialized_bucket_map: BTreeMap>> = BTreeMap::new(); + let mut serialized_bucket_map: SerializedBucketMap = BTreeMap::new(); { let mut entry_map: HashMap<*const BucketEntry, u32> = HashMap::new(); let inner = &*self.inner.read(); @@ -314,38 +323,55 @@ impl RoutingTable { all_entry_bytes.push(entry_bytes); } + Ok((serialized_bucket_map, all_entry_bytes)) + } + + /// Write the serialized routing table to the table store. + async fn save_buckets(&self) -> EyreResult<()> { + let (serialized_bucket_map, all_entry_bytes) = self.serialized_buckets()?; + let table_store = self.unlocked_inner.network_manager().table_store(); let tdb = table_store.open("routing_table", 1).await?; let dbx = tdb.transact(); if let Err(e) = dbx.store_rkyv(0, b"serialized_bucket_map", &serialized_bucket_map) { dbx.rollback(); - return Err(e); + return Err(e.into()); } if let Err(e) = dbx.store_rkyv(0, b"all_entry_bytes", &all_entry_bytes) { dbx.rollback(); - return Err(e); + return Err(e.into()); } dbx.commit().await?; Ok(()) } - /// Deserialize routing table from table store async fn load_buckets(&self) -> EyreResult<()> { // Deserialize bucket map and all entries from the table store let tstore = self.unlocked_inner.network_manager().table_store(); let tdb = tstore.open("routing_table", 1).await?; - let Some(serialized_bucket_map): Option>>> = tdb.load_rkyv(0, b"serialized_bucket_map")? else { + let Some(serialized_bucket_map): Option = tdb.load_rkyv(0, b"serialized_bucket_map").await? else { log_rtab!(debug "no bucket map in saved routing table"); return Ok(()); }; - let Some(all_entry_bytes): Option>> = tdb.load_rkyv(0, b"all_entry_bytes")? else { + let Some(all_entry_bytes): Option = tdb.load_rkyv(0, b"all_entry_bytes").await? else { log_rtab!(debug "no all_entry_bytes in saved routing table"); return Ok(()); }; // Reconstruct all entries let inner = &mut *self.inner.write(); + self.populate_routing_table(inner, serialized_bucket_map, all_entry_bytes)?; + Ok(()) + } + + /// Write the deserialized table store data to the routing table. + pub fn populate_routing_table( + &self, + inner: &mut RoutingTableInner, + serialized_bucket_map: SerializedBucketMap, + all_entry_bytes: SerializedBuckets, + ) -> EyreResult<()> { let mut all_entries: Vec> = Vec::with_capacity(all_entry_bytes.len()); for entry_bytes in all_entry_bytes { let entryinner = @@ -789,8 +815,8 @@ impl RoutingTable { e.with(rti, |_rti, e| { if let Some(ni) = e.node_info(routing_domain) { let dif = DialInfoFilter::all() - .with_protocol_type_set(ni.outbound_protocols) - .with_address_type_set(ni.address_types); + .with_protocol_type_set(ni.outbound_protocols()) + .with_address_type_set(ni.address_types()); if dial_info.matches_filter(&dif) { return true; } @@ -848,7 +874,7 @@ impl RoutingTable { // does it have some dial info we need? let filter = |n: &NodeInfo| { let mut keep = false; - for did in &n.dial_info_detail_list { + for did in n.dial_info_detail_list() { if matches!(did.dial_info.address_type(), AddressType::IPV4) { for (n, protocol_type) in protocol_types.iter().enumerate() { if nodes_proto_v4[n] < max_per_type @@ -961,6 +987,16 @@ impl RoutingTable { .find_closest_nodes(node_count, node_id, filters, transform) } + pub fn sort_and_clean_closest_noderefs( + &self, + node_id: TypedKey, + closest_nodes: &mut Vec, + ) { + self.inner + .read() + .sort_and_clean_closest_noderefs(node_id, closest_nodes) + } + #[instrument(level = "trace", skip(self), ret)] pub fn register_find_node_answer( &self, @@ -971,12 +1007,12 @@ impl RoutingTable { let mut out = Vec::::with_capacity(peers.len()); for p in peers { // Ensure we're getting back nodes we asked for - if !p.node_ids.kinds().contains(&crypto_kind) { + if !p.node_ids().kinds().contains(&crypto_kind) { continue; } // Don't register our own node - if self.matches_own_node_id(&p.node_ids) { + if self.matches_own_node_id(p.node_ids()) { continue; } diff --git a/veilid-core/src/routing_table/node_ref.rs b/veilid-core/src/routing_table/node_ref.rs index bf44ab99..1f637801 100644 --- a/veilid-core/src/routing_table/node_ref.rs +++ b/veilid-core/src/routing_table/node_ref.rs @@ -170,17 +170,17 @@ pub trait NodeRefBase: Sized { ) -> bool { self.operate(|_rti, e| e.has_seen_our_node_info_ts(routing_domain, our_node_info_ts)) } - fn set_our_node_info_ts(&self, routing_domain: RoutingDomain, seen_ts: Timestamp) { - self.operate_mut(|_rti, e| e.set_our_node_info_ts(routing_domain, seen_ts)); + fn set_seen_our_node_info_ts(&self, routing_domain: RoutingDomain, seen_ts: Timestamp) { + self.operate_mut(|_rti, e| e.set_seen_our_node_info_ts(routing_domain, seen_ts)); } fn network_class(&self, routing_domain: RoutingDomain) -> Option { - self.operate(|_rt, e| e.node_info(routing_domain).map(|n| n.network_class)) + self.operate(|_rt, e| e.node_info(routing_domain).map(|n| n.network_class())) } fn outbound_protocols(&self, routing_domain: RoutingDomain) -> Option { - self.operate(|_rt, e| e.node_info(routing_domain).map(|n| n.outbound_protocols)) + self.operate(|_rt, e| e.node_info(routing_domain).map(|n| n.outbound_protocols())) } fn address_types(&self, routing_domain: RoutingDomain) -> Option { - self.operate(|_rt, e| e.node_info(routing_domain).map(|n| n.address_types)) + self.operate(|_rt, e| e.node_info(routing_domain).map(|n| n.address_types())) } fn node_info_outbound_filter(&self, routing_domain: RoutingDomain) -> DialInfoFilter { let mut dif = DialInfoFilter::all(); @@ -199,7 +199,7 @@ pub trait NodeRefBase: Sized { .and_then(|rpi| { // If relay is ourselves, then return None, because we can't relay through ourselves // and to contact this node we should have had an existing inbound connection - if rti.unlocked_inner.matches_own_node_id(&rpi.node_ids) { + if rti.unlocked_inner.matches_own_node_id(rpi.node_ids()) { return None; } diff --git a/veilid-core/src/routing_table/privacy.rs b/veilid-core/src/routing_table/privacy.rs index e4aa4c34..fc670375 100644 --- a/veilid-core/src/routing_table/privacy.rs +++ b/veilid-core/src/routing_table/privacy.rs @@ -22,6 +22,13 @@ pub enum RouteNode { } impl RouteNode { + pub fn validate(&self, crypto: Crypto) -> VeilidAPIResult<()> { + match self { + RouteNode::NodeId(_) => Ok(()), + RouteNode::PeerInfo(pi) => pi.validate(crypto), + } + } + pub fn node_ref( &self, routing_table: RoutingTable, @@ -48,10 +55,10 @@ impl RouteNode { RouteNode::NodeId(id) => { format!("{}", TypedKey::new(crypto_kind, *id)) } - RouteNode::PeerInfo(pi) => match pi.node_ids.get(crypto_kind) { + RouteNode::PeerInfo(pi) => match pi.node_ids().get(crypto_kind) { Some(id) => format!("{}", id), None => { - format!("({})?{}", crypto_kind, pi.node_ids) + format!("({})?{}", crypto_kind, pi.node_ids()) } }, } @@ -66,6 +73,11 @@ pub struct RouteHop { /// The encrypted blob to pass to the next hop as its data (None for stubs) pub next_hop: Option, } +impl RouteHop { + pub fn validate(&self, crypto: Crypto) -> VeilidAPIResult<()> { + self.node.validate(crypto) + } +} /// The kind of hops a private route can have #[derive(Clone, Debug)] @@ -78,6 +90,15 @@ pub enum PrivateRouteHops { Empty, } +impl PrivateRouteHops { + pub fn validate(&self, crypto: Crypto) -> VeilidAPIResult<()> { + match self { + PrivateRouteHops::FirstHop(rh) => rh.validate(crypto), + PrivateRouteHops::Data(_) => Ok(()), + PrivateRouteHops::Empty => Ok(()), + } + } +} /// A private route for receiver privacy #[derive(Clone, Debug)] pub struct PrivateRoute { @@ -108,6 +129,10 @@ impl PrivateRoute { } } + pub fn validate(&self, crypto: Crypto) -> VeilidAPIResult<()> { + self.hops.validate(crypto) + } + /// Check if this is a stub route pub fn is_stub(&self) -> bool { if let PrivateRouteHops::FirstHop(first_hop) = &self.hops { @@ -155,7 +180,7 @@ impl PrivateRoute { // Get the safety route to use from the spec Some(match &pr_first_hop.node { RouteNode::NodeId(n) => TypedKey::new(self.public_key.kind, *n), - RouteNode::PeerInfo(p) => p.node_ids.get(self.public_key.kind).unwrap(), + RouteNode::PeerInfo(p) => p.node_ids().get(self.public_key.kind).unwrap(), }) } } diff --git a/veilid-core/src/routing_table/route_spec_store/mod.rs b/veilid-core/src/routing_table/route_spec_store/mod.rs index 302b2b6f..618da7f8 100644 --- a/veilid-core/src/routing_table/route_spec_store/mod.rs +++ b/veilid-core/src/routing_table/route_spec_store/mod.rs @@ -16,9 +16,6 @@ pub use route_spec_store_content::*; pub use route_stats::*; use crate::veilid_api::*; -use rkyv::{ - with::Skip, Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize, -}; /// The size of the remote private route cache const REMOTE_PRIVATE_ROUTE_CACHE_SIZE: usize = 1024; diff --git a/veilid-core/src/routing_table/route_spec_store/route_set_spec_detail.rs b/veilid-core/src/routing_table/route_spec_store/route_set_spec_detail.rs index e7858816..7f5c0334 100644 --- a/veilid-core/src/routing_table/route_spec_store/route_set_spec_detail.rs +++ b/veilid-core/src/routing_table/route_spec_store/route_set_spec_detail.rs @@ -6,7 +6,6 @@ pub struct RouteSpecDetail { /// Crypto kind pub crypto_kind: CryptoKind, /// Secret key - #[with(Skip)] pub secret_key: SecretKey, /// Route hops (node id keys) pub hops: Vec, diff --git a/veilid-core/src/routing_table/route_spec_store/route_spec_store.rs b/veilid-core/src/routing_table/route_spec_store/route_spec_store.rs index 7f5d79ae..3f29ba0d 100644 --- a/veilid-core/src/routing_table/route_spec_store/route_spec_store.rs +++ b/veilid-core/src/routing_table/route_spec_store/route_spec_store.rs @@ -115,7 +115,7 @@ impl RouteSpecStore { dr }; - let update = VeilidUpdate::Route(VeilidStateRoute { + let update = VeilidUpdate::RouteChange(VeilidRouteChange { dead_routes, dead_remote_routes, }); @@ -1550,7 +1550,9 @@ impl RouteSpecStore { .get_root::() .map_err(RPCError::internal) .wrap_err("failed to make reader for private_route")?; - let private_route = decode_private_route(&pr_reader, crypto.clone()).wrap_err("failed to decode private route")?; + let private_route = decode_private_route(&pr_reader).wrap_err("failed to decode private route")?; + private_route.validate(crypto.clone()).wrap_err("failed to validate private route")?; + out.push(private_route); } diff --git a/veilid-core/src/routing_table/route_spec_store/route_spec_store_content.rs b/veilid-core/src/routing_table/route_spec_store/route_spec_store_content.rs index 4b5b06ec..b193c398 100644 --- a/veilid-core/src/routing_table/route_spec_store/route_spec_store_content.rs +++ b/veilid-core/src/routing_table/route_spec_store/route_spec_store_content.rs @@ -23,7 +23,7 @@ impl RouteSpecStoreContent { let table_store = routing_table.network_manager().table_store(); let rsstdb = table_store.open("RouteSpecStore", 1).await?; let mut content: RouteSpecStoreContent = - rsstdb.load_rkyv(0, b"content")?.unwrap_or_default(); + rsstdb.load_rkyv(0, b"content").await?.unwrap_or_default(); // Look up all route hop noderefs since we can't serialize those let mut dead_ids = Vec::new(); @@ -55,47 +55,6 @@ impl RouteSpecStoreContent { content.remove_detail(&id); } - // Load secrets from pstore - let pstore = routing_table.network_manager().protected_store(); - let secret_key_map: HashMap = pstore - .load_user_secret_rkyv("RouteSpecStore") - .await? - .unwrap_or_default(); - - // Ensure we got secret keys for all the public keys - let mut got_secret_key_ids = HashSet::new(); - for (rsid, rssd) in content.details.iter_mut() { - let mut found_all = true; - for (pk, rsd) in rssd.iter_route_set_mut() { - if let Some(sk) = secret_key_map.get(pk) { - rsd.secret_key = *sk; - } else { - found_all = false; - break; - } - } - if found_all { - got_secret_key_ids.insert(rsid.clone()); - } - } - - // If we missed any, nuke those route ids - let dead_ids: Vec = content - .details - .keys() - .filter_map(|id| { - if !got_secret_key_ids.contains(id) { - Some(*id) - } else { - None - } - }) - .collect(); - for id in dead_ids { - log_rtab!(debug "missing secret key, killing off private route: {}", id); - content.remove_detail(&id); - } - Ok(content) } @@ -106,18 +65,6 @@ impl RouteSpecStoreContent { let rsstdb = table_store.open("RouteSpecStore", 1).await?; rsstdb.store_rkyv(0, b"content", self).await?; - // Keep secrets in protected store as well - let pstore = routing_table.network_manager().protected_store(); - - let mut out: HashMap = HashMap::new(); - for (_rsid, rssd) in self.details.iter() { - for (pk, rsd) in rssd.iter_route_set() { - out.insert(*pk, rsd.secret_key); - } - } - - let _ = pstore.save_user_secret_rkyv("RouteSpecStore", &out).await?; // ignore if this previously existed or not - Ok(()) } diff --git a/veilid-core/src/routing_table/routing_domains.rs b/veilid-core/src/routing_table/routing_domains.rs index bfd52d88..096d777d 100644 --- a/veilid-core/src/routing_table/routing_domains.rs +++ b/veilid-core/src/routing_table/routing_domains.rs @@ -102,14 +102,14 @@ impl RoutingDomainDetailCommon { } fn make_peer_info(&self, rti: &RoutingTableInner) -> PeerInfo { - let node_info = NodeInfo { - network_class: self.network_class.unwrap_or(NetworkClass::Invalid), - outbound_protocols: self.outbound_protocols, - address_types: self.address_types, - envelope_support: VALID_ENVELOPE_VERSIONS.to_vec(), - crypto_support: VALID_CRYPTO_KINDS.to_vec(), - dial_info_detail_list: self.dial_info_details.clone(), - }; + let node_info = NodeInfo::new( + self.network_class.unwrap_or(NetworkClass::Invalid), + self.outbound_protocols, + self.address_types, + VALID_ENVELOPE_VERSIONS.to_vec(), + VALID_CRYPTO_KINDS.to_vec(), + self.dial_info_details.clone() + ); let relay_info = self .relay_node @@ -117,8 +117,9 @@ impl RoutingDomainDetailCommon { .and_then(|rn| { let opt_relay_pi = rn.locked(rti).make_peer_info(self.routing_domain); if let Some(relay_pi) = opt_relay_pi { - match relay_pi.signed_node_info { - SignedNodeInfo::Direct(d) => Some((relay_pi.node_ids, d)), + let (relay_ids, relay_sni) = relay_pi.destructure(); + match relay_sni { + SignedNodeInfo::Direct(d) => Some((relay_ids, d)), SignedNodeInfo::Relayed(_) => { warn!("relay node should not have a relay itself! if this happens, a relay updated its signed node info and became a relay, which should cause the relay to be dropped"); None @@ -230,8 +231,8 @@ fn first_filtered_dial_info_detail( ) -> Option { let dial_info_filter = dial_info_filter.clone().filtered( &DialInfoFilter::all() - .with_address_type_set(from_node.address_types) - .with_protocol_type_set(from_node.outbound_protocols), + .with_address_type_set(from_node.address_types()) + .with_protocol_type_set(from_node.outbound_protocols()), ); // Get first filtered dialinfo @@ -278,18 +279,18 @@ impl RoutingDomainDetail for PublicInternetRoutingDomainDetail { sequencing: Sequencing, ) -> ContactMethod { // Get the nodeinfos for convenience - let node_a = peer_a.signed_node_info.node_info(); - let node_b = peer_b.signed_node_info.node_info(); + let node_a = peer_a.signed_node_info().node_info(); + let node_b = peer_b.signed_node_info().node_info(); // Get the node ids that would be used between these peers - let cck = common_crypto_kinds(&peer_a.node_ids.kinds(), &peer_b.node_ids.kinds()); + let cck = common_crypto_kinds(&peer_a.node_ids().kinds(), &peer_b.node_ids().kinds()); let Some(best_ck) = cck.first().copied() else { // No common crypto kinds between these nodes, can't contact return ContactMethod::Unreachable; }; - //let node_a_id = peer_a.node_ids.get(best_ck).unwrap(); - let node_b_id = peer_b.node_ids.get(best_ck).unwrap(); + //let node_a_id = peer_a.node_ids().get(best_ck).unwrap(); + let node_b_id = peer_b.node_ids().get(best_ck).unwrap(); // Get the best match dial info for node B if we have it if let Some(target_did) = @@ -302,17 +303,17 @@ impl RoutingDomainDetail for PublicInternetRoutingDomainDetail { } // Get the target's inbound relay, it must have one or it is not reachable - if let Some(node_b_relay) = peer_b.signed_node_info.relay_info() { + if let Some(node_b_relay) = peer_b.signed_node_info().relay_info() { // Note that relay_peer_info could be node_a, in which case a connection already exists // and we only get here if the connection had dropped, in which case node_a is unreachable until // it gets a new relay connection up - if peer_b.signed_node_info.relay_ids().contains_any(&peer_a.node_ids) { + if peer_b.signed_node_info().relay_ids().contains_any(peer_a.node_ids()) { return ContactMethod::Existing; } // Get best node id to contact relay with - let Some(node_b_relay_id) = peer_b.signed_node_info.relay_ids().get(best_ck) else { + let Some(node_b_relay_id) = peer_b.signed_node_info().relay_ids().get(best_ck) else { // No best relay id return ContactMethod::Unreachable; }; @@ -327,7 +328,7 @@ impl RoutingDomainDetail for PublicInternetRoutingDomainDetail { .is_some() { // Can node A receive anything inbound ever? - if matches!(node_a.network_class, NetworkClass::InboundCapable) { + if matches!(node_a.network_class(), NetworkClass::InboundCapable) { ///////// Reverse connection // Get the best match dial info for an reverse inbound connection from node B to node A @@ -390,17 +391,17 @@ impl RoutingDomainDetail for PublicInternetRoutingDomainDetail { } } // If the node B has no direct dial info, it needs to have an inbound relay - else if let Some(node_b_relay) = peer_b.signed_node_info.relay_info() { + else if let Some(node_b_relay) = peer_b.signed_node_info().relay_info() { // Note that relay_peer_info could be node_a, in which case a connection already exists // and we only get here if the connection had dropped, in which case node_a is unreachable until // it gets a new relay connection up - if peer_b.signed_node_info.relay_ids().contains_any(&peer_a.node_ids) { + if peer_b.signed_node_info().relay_ids().contains_any(peer_a.node_ids()) { return ContactMethod::Existing; } // Get best node id to contact relay with - let Some(node_b_relay_id) = peer_b.signed_node_info.relay_ids().get(best_ck) else { + let Some(node_b_relay_id) = peer_b.signed_node_info().relay_ids().get(best_ck) else { // No best relay id return ContactMethod::Unreachable; }; @@ -419,7 +420,7 @@ impl RoutingDomainDetail for PublicInternetRoutingDomainDetail { } // If node A can't reach the node by other means, it may need to use its own relay - if let Some(node_a_relay_id) = peer_a.signed_node_info.relay_ids().get(best_ck) { + if let Some(node_a_relay_id) = peer_a.signed_node_info().relay_ids().get(best_ck) { return ContactMethod::OutboundRelay(node_a_relay_id); } @@ -484,8 +485,8 @@ impl RoutingDomainDetail for LocalNetworkRoutingDomainDetail { // Scope the filter down to protocols node A can do outbound let dial_info_filter = dial_info_filter.filtered( &DialInfoFilter::all() - .with_address_type_set(peer_a.signed_node_info.node_info().address_types) - .with_protocol_type_set(peer_a.signed_node_info.node_info().outbound_protocols), + .with_address_type_set(peer_a.signed_node_info().node_info().address_types()) + .with_protocol_type_set(peer_a.signed_node_info().node_info().outbound_protocols()), ); // Get first filtered dialinfo @@ -509,7 +510,7 @@ impl RoutingDomainDetail for LocalNetworkRoutingDomainDetail { let filter = |did: &DialInfoDetail| did.matches_filter(&dial_info_filter); - let opt_target_did = peer_b.signed_node_info.node_info().first_filtered_dial_info_detail(sort, filter); + let opt_target_did = peer_b.signed_node_info().node_info().first_filtered_dial_info_detail(sort, filter); if let Some(target_did) = opt_target_did { return ContactMethod::Direct(target_did.dial_info); } diff --git a/veilid-core/src/routing_table/routing_table_inner.rs b/veilid-core/src/routing_table/routing_table_inner.rs index de736853..1d4b409c 100644 --- a/veilid-core/src/routing_table/routing_table_inner.rs +++ b/veilid-core/src/routing_table/routing_table_inner.rs @@ -171,11 +171,11 @@ impl RoutingTableInner { node_info: &NodeInfo, ) -> bool { // Should not be passing around nodeinfo with an invalid network class - if matches!(node_info.network_class, NetworkClass::Invalid) { + if matches!(node_info.network_class(), NetworkClass::Invalid) { return false; } // Ensure all of the dial info works in this routing domain - for did in &node_info.dial_info_detail_list { + for did in node_info.dial_info_detail_list() { if !self.ensure_dial_info_is_valid(routing_domain, &did.dial_info) { return false; } @@ -258,7 +258,7 @@ impl RoutingTableInner { } else { Some( rdd.common() - .with_peer_info(self, |pi| pi.signed_node_info.timestamp()), + .with_peer_info(self, |pi| pi.signed_node_info().timestamp()), ) } }) @@ -557,11 +557,18 @@ impl RoutingTableInner { .map(|nr| nr.same_bucket_entry(&entry)) .unwrap_or(false); if e.needs_ping(cur_ts, is_our_relay) { + debug!("needs_ping: {}", e.best_node_id()); return true; } // If we need a ping because this node hasn't seen our latest node info, then do it if let Some(own_node_info_ts) = own_node_info_ts { if !e.has_seen_our_node_info_ts(routing_domain, own_node_info_ts) { + //xxx remove this when we fix #208 + debug!( + "!has_seen_our_node_info_ts: {} own_node_info_ts={}", + e.best_node_id(), + own_node_info_ts + ); return true; } } @@ -803,37 +810,42 @@ impl RoutingTableInner { peer_info: PeerInfo, allow_invalid: bool, ) -> Option { - // if our own node if is in the list then ignore it, as we don't add ourselves to our own routing table - if self.unlocked_inner.matches_own_node_id(&peer_info.node_ids) { + // if our own node is in the list, then ignore it as we don't add ourselves to our own routing table + if self + .unlocked_inner + .matches_own_node_id(peer_info.node_ids()) + { log_rtab!(debug "can't register own node id in routing table"); return None; } // node can not be its own relay - let rids = peer_info.signed_node_info.relay_ids(); - if self.unlocked_inner.matches_own_node_id(&rids) { + let rids = peer_info.signed_node_info().relay_ids(); + let nids = peer_info.node_ids(); + if nids.contains_any(&rids) { log_rtab!(debug "node can not be its own relay"); return None; } if !allow_invalid { // verify signature - if !peer_info.signed_node_info.has_any_signature() { - log_rtab!(debug "signed node info for {:?} has invalid signature", &peer_info.node_ids); + if !peer_info.signed_node_info().has_any_signature() { + log_rtab!(debug "signed node info for {:?} has no valid signature", peer_info.node_ids()); return None; } // verify signed node info is valid in this routing domain if !self.signed_node_info_is_valid_in_routing_domain( routing_domain, - &peer_info.signed_node_info, + peer_info.signed_node_info(), ) { - log_rtab!(debug "signed node info for {:?} not valid in the {:?} routing domain", peer_info.node_ids, routing_domain); + log_rtab!(debug "signed node info for {:?} not valid in the {:?} routing domain", peer_info.node_ids(), routing_domain); return None; } } - self.create_node_ref(outer_self, &peer_info.node_ids, |_rti, e| { - e.update_signed_node_info(routing_domain, peer_info.signed_node_info); + let (node_ids, signed_node_info) = peer_info.destructure(); + self.create_node_ref(outer_self, &node_ids, |_rti, e| { + e.update_signed_node_info(routing_domain, signed_node_info); }) .map(|mut nr| { nr.set_filter(Some( @@ -1149,7 +1161,6 @@ impl RoutingTableInner { let vcrypto = self.unlocked_inner.crypto().get(crypto_kind).unwrap(); // Filter to ensure entries support the crypto kind in use - let filter = Box::new( move |_rti: &RoutingTableInner, opt_entry: Option>| { if let Some(entry) = opt_entry { @@ -1205,9 +1216,6 @@ impl RoutingTableInner { }; // distance is the next metric, closer nodes first - // since multiple cryptosystems are in use, the distance for a key is the shortest - // distance to that key over all supported cryptosystems - let da = vcrypto.distance(&a_key.value, &node_id.value); let db = vcrypto.distance(&b_key.value, &node_id.value); da.cmp(&db) @@ -1218,4 +1226,71 @@ impl RoutingTableInner { log_rtab!(">> find_closest_nodes: node count = {}", out.len()); out } + + pub fn sort_and_clean_closest_noderefs( + &self, + node_id: TypedKey, + closest_nodes: &mut Vec, + ) { + // Lock all noderefs + let kind = node_id.kind; + let mut closest_nodes_locked: Vec = closest_nodes + .iter() + .filter_map(|x| { + if x.node_ids().kinds().contains(&kind) { + Some(x.locked(self)) + } else { + None + } + }) + .collect(); + + // Sort closest + let sort = make_closest_noderef_sort(self.unlocked_inner.crypto(), node_id); + closest_nodes_locked.sort_by(sort); + + // Unlock noderefs + *closest_nodes = closest_nodes_locked.iter().map(|x| x.unlocked()).collect(); + } +} + +fn make_closest_noderef_sort( + crypto: Crypto, + node_id: TypedKey, +) -> impl Fn(&NodeRefLocked, &NodeRefLocked) -> core::cmp::Ordering { + let cur_ts = get_aligned_timestamp(); + let kind = node_id.kind; + // Get cryptoversion to check distance with + let vcrypto = crypto.get(kind).unwrap(); + + move |a: &NodeRefLocked, b: &NodeRefLocked| -> core::cmp::Ordering { + // same nodes are always the same + if a.same_entry(b) { + return core::cmp::Ordering::Equal; + } + + // reliable nodes come first, pessimistically treating our own node as unreliable + a.operate(|_rti, a_entry| { + b.operate(|_rti, b_entry| { + let ra = a_entry.check_reliable(cur_ts); + let rb = b_entry.check_reliable(cur_ts); + if ra != rb { + if ra { + return core::cmp::Ordering::Less; + } else { + return core::cmp::Ordering::Greater; + } + } + + // get keys + let a_key = a_entry.node_ids().get(kind).unwrap(); + let b_key = b_entry.node_ids().get(kind).unwrap(); + + // distance is the next metric, closer nodes first + let da = vcrypto.distance(&a_key.value, &node_id.value); + let db = vcrypto.distance(&b_key.value, &node_id.value); + da.cmp(&db) + }) + }) + } } diff --git a/veilid-core/src/routing_table/tasks/bootstrap.rs b/veilid-core/src/routing_table/tasks/bootstrap.rs index f467c2ba..25a51416 100644 --- a/veilid-core/src/routing_table/tasks/bootstrap.rs +++ b/veilid-core/src/routing_table/tasks/bootstrap.rs @@ -329,14 +329,15 @@ impl RoutingTable { let crypto_support = bsrec.node_ids.kinds(); // Make unsigned SignedNodeInfo - let sni = SignedNodeInfo::Direct(SignedDirectNodeInfo::with_no_signature(NodeInfo { - network_class: NetworkClass::InboundCapable, // Bootstraps are always inbound capable - outbound_protocols: ProtocolTypeSet::only(ProtocolType::UDP), // Bootstraps do not participate in relaying and will not make outbound requests, but will have UDP enabled - address_types: AddressTypeSet::all(), // Bootstraps are always IPV4 and IPV6 capable - envelope_support: bsrec.envelope_support, // Envelope support is as specified in the bootstrap list - crypto_support, // Crypto support is derived from list of node ids - dial_info_detail_list: bsrec.dial_info_details, // Dial info is as specified in the bootstrap list - })); + let sni = + SignedNodeInfo::Direct(SignedDirectNodeInfo::with_no_signature(NodeInfo::new( + NetworkClass::InboundCapable, // Bootstraps are always inbound capable + ProtocolTypeSet::only(ProtocolType::UDP), // Bootstraps do not participate in relaying and will not make outbound requests, but will have UDP enabled + AddressTypeSet::all(), // Bootstraps are always IPV4 and IPV6 capable + bsrec.envelope_support, // Envelope support is as specified in the bootstrap list + crypto_support, // Crypto support is derived from list of node ids + bsrec.dial_info_details, // Dial info is as specified in the bootstrap list + ))); let pi = PeerInfo::new(bsrec.node_ids, sni); diff --git a/veilid-core/src/routing_table/tasks/mod.rs b/veilid-core/src/routing_table/tasks/mod.rs index b7841b78..cf7f7cdc 100644 --- a/veilid-core/src/routing_table/tasks/mod.rs +++ b/veilid-core/src/routing_table/tasks/mod.rs @@ -9,7 +9,7 @@ pub mod rolling_transfers; use super::*; impl RoutingTable { - pub(crate) fn start_tasks(&self) { + pub(crate) fn setup_tasks(&self) { // Set rolling transfers tick task { let this = self.clone(); @@ -176,7 +176,7 @@ impl RoutingTable { Ok(()) } - pub(crate) async fn stop_tasks(&self) { + pub(crate) async fn cancel_tasks(&self) { // Cancel all tasks being ticked debug!("stopping rolling transfers task"); if let Err(e) = self.unlocked_inner.rolling_transfers_task.stop().await { diff --git a/veilid-core/src/routing_table/tasks/relay_management.rs b/veilid-core/src/routing_table/tasks/relay_management.rs index ca80b81f..3bc93145 100644 --- a/veilid-core/src/routing_table/tasks/relay_management.rs +++ b/veilid-core/src/routing_table/tasks/relay_management.rs @@ -13,8 +13,8 @@ impl RoutingTable { let Some(own_peer_info) = self.get_own_peer_info(RoutingDomain::PublicInternet) else { return Ok(()); }; - let own_node_info = own_peer_info.signed_node_info.node_info(); - let network_class = own_node_info.network_class; + let own_node_info = own_peer_info.signed_node_info().node_info(); + let network_class = own_node_info.network_class(); // Get routing domain editor let mut editor = self.edit_routing_domain(RoutingDomain::PublicInternet); diff --git a/veilid-core/src/routing_table/tests/mod.rs b/veilid-core/src/routing_table/tests/mod.rs new file mode 100644 index 00000000..209cc9d3 --- /dev/null +++ b/veilid-core/src/routing_table/tests/mod.rs @@ -0,0 +1 @@ +pub mod test_serialize; diff --git a/veilid-core/src/routing_table/tests/test_serialize.rs b/veilid-core/src/routing_table/tests/test_serialize.rs new file mode 100644 index 00000000..14e07930 --- /dev/null +++ b/veilid-core/src/routing_table/tests/test_serialize.rs @@ -0,0 +1,84 @@ +use crate::*; + +fn fake_routing_table() -> routing_table::RoutingTable { + let veilid_config = VeilidConfig::new(); + let block_store = BlockStore::new(veilid_config.clone()); + let protected_store = ProtectedStore::new(veilid_config.clone()); + let table_store = TableStore::new(veilid_config.clone(), protected_store.clone()); + let crypto = Crypto::new(veilid_config.clone(), table_store.clone()); + let storage_manager = storage_manager::StorageManager::new( + veilid_config.clone(), + crypto.clone(), + protected_store.clone(), + table_store.clone(), + block_store.clone(), + ); + let network_manager = network_manager::NetworkManager::new( + veilid_config.clone(), + storage_manager, + protected_store.clone(), + table_store.clone(), + block_store.clone(), + crypto.clone(), + ); + routing_table::RoutingTable::new(network_manager) +} + +pub async fn test_routingtable_buckets_round_trip() { + let original = fake_routing_table(); + let copy = fake_routing_table(); + original.init().await.unwrap(); + copy.init().await.unwrap(); + + // Add lots of routes to `original` here to exercise all various types. + + let (serialized_bucket_map, all_entry_bytes) = original.serialized_buckets().unwrap(); + + copy.populate_routing_table( + &mut copy.inner.write(), + serialized_bucket_map, + all_entry_bytes, + ) + .unwrap(); + + // Wrap to close lifetime of 'inner' which is borrowed here so terminate() can succeed + // (it also .write() locks routing table inner) + { + let original_inner = &*original.inner.read(); + let copy_inner = &*copy.inner.read(); + + let routing_table_keys: Vec<_> = original_inner.buckets.keys().clone().collect(); + let copy_keys: Vec<_> = copy_inner.buckets.keys().clone().collect(); + + assert_eq!(routing_table_keys.len(), copy_keys.len()); + + for crypto in routing_table_keys { + // The same keys are present in the original and copy RoutingTables. + let original_buckets = original_inner.buckets.get(&crypto).unwrap(); + let copy_buckets = copy_inner.buckets.get(&crypto).unwrap(); + + // Recurse into RoutingTable.inner.buckets + for (left_buckets, right_buckets) in original_buckets.iter().zip(copy_buckets.iter()) { + // Recurse into RoutingTable.inner.buckets.entries + for ((left_crypto, left_entries), (right_crypto, right_entries)) in + left_buckets.entries().zip(right_buckets.entries()) + { + assert_eq!(left_crypto, right_crypto); + + assert_eq!( + format!("{:?}", left_entries), + format!("{:?}", right_entries) + ); + } + } + } + } + + // Even if these are mocks, we should still practice good hygiene. + original.terminate().await; + copy.terminate().await; +} + +pub async fn test_all() { + test_routingtable_buckets_round_trip().await; +} diff --git a/veilid-core/src/routing_table/types/dial_info_detail.rs b/veilid-core/src/routing_table/types/dial_info_detail.rs new file mode 100644 index 00000000..22adf233 --- /dev/null +++ b/veilid-core/src/routing_table/types/dial_info_detail.rs @@ -0,0 +1,43 @@ +use super::*; + +// Keep member order appropriate for sorting < preference +#[derive( + Debug, + Clone, + PartialEq, + PartialOrd, + Ord, + Eq, + Hash, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct DialInfoDetail { + pub class: DialInfoClass, + pub dial_info: DialInfo, +} + +impl MatchesDialInfoFilter for DialInfoDetail { + fn matches_filter(&self, filter: &DialInfoFilter) -> bool { + self.dial_info.matches_filter(filter) + } +} + +impl DialInfoDetail { + pub fn ordered_sequencing_sort(a: &DialInfoDetail, b: &DialInfoDetail) -> core::cmp::Ordering { + if a.class < b.class { + return core::cmp::Ordering::Less; + } + if a.class > b.class { + return core::cmp::Ordering::Greater; + } + DialInfo::ordered_sequencing_sort(&a.dial_info, &b.dial_info) + } + pub const NO_SORT: std::option::Option< + for<'r, 's> fn(&'r DialInfoDetail, &'s DialInfoDetail) -> std::cmp::Ordering, + > = None:: core::cmp::Ordering>; +} diff --git a/veilid-core/src/routing_table/types/direction.rs b/veilid-core/src/routing_table/types/direction.rs new file mode 100644 index 00000000..98f50182 --- /dev/null +++ b/veilid-core/src/routing_table/types/direction.rs @@ -0,0 +1,22 @@ +use super::*; + +#[allow(clippy::derive_hash_xor_eq)] +#[derive( + Debug, + PartialOrd, + Ord, + Hash, + EnumSetType, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[enumset(repr = "u8")] +#[archive_attr(repr(u8), derive(CheckBytes))] +pub enum Direction { + Inbound, + Outbound, +} +pub type DirectionSet = EnumSet; diff --git a/veilid-core/src/routing_table/types/mod.rs b/veilid-core/src/routing_table/types/mod.rs new file mode 100644 index 00000000..217c5d48 --- /dev/null +++ b/veilid-core/src/routing_table/types/mod.rs @@ -0,0 +1,21 @@ +mod dial_info_detail; +mod direction; +mod node_info; +mod node_status; +mod peer_info; +mod routing_domain; +mod signed_direct_node_info; +mod signed_node_info; +mod signed_relayed_node_info; + +use super::*; + +pub use dial_info_detail::*; +pub use direction::*; +pub use node_info::*; +pub use node_status::*; +pub use peer_info::*; +pub use routing_domain::*; +pub use signed_direct_node_info::*; +pub use signed_node_info::*; +pub use signed_relayed_node_info::*; diff --git a/veilid-core/src/routing_table/types/node_info.rs b/veilid-core/src/routing_table/types/node_info.rs new file mode 100644 index 00000000..ca5cbde8 --- /dev/null +++ b/veilid-core/src/routing_table/types/node_info.rs @@ -0,0 +1,164 @@ +use super::*; + +#[derive( + Clone, Default, Debug, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct NodeInfo { + network_class: NetworkClass, + #[with(RkyvEnumSet)] + outbound_protocols: ProtocolTypeSet, + #[with(RkyvEnumSet)] + address_types: AddressTypeSet, + envelope_support: Vec, + crypto_support: Vec, + dial_info_detail_list: Vec, +} + +impl NodeInfo { + pub fn new( + network_class: NetworkClass, + outbound_protocols: ProtocolTypeSet, + address_types: AddressTypeSet, + envelope_support: Vec, + crypto_support: Vec, + dial_info_detail_list: Vec, + ) -> Self { + Self { + network_class, + outbound_protocols, + address_types, + envelope_support, + crypto_support, + dial_info_detail_list, + } + } + + pub fn network_class(&self) -> NetworkClass { + self.network_class + } + pub fn outbound_protocols(&self) -> ProtocolTypeSet { + self.outbound_protocols + } + pub fn address_types(&self) -> AddressTypeSet { + self.address_types + } + pub fn envelope_support(&self) -> &[u8] { + &self.envelope_support + } + pub fn crypto_support(&self) -> &[CryptoKind] { + &self.crypto_support + } + pub fn dial_info_detail_list(&self) -> &[DialInfoDetail] { + &self.dial_info_detail_list + } + + pub fn first_filtered_dial_info_detail( + &self, + sort: Option, + filter: F, + ) -> Option + where + S: Fn(&DialInfoDetail, &DialInfoDetail) -> std::cmp::Ordering, + F: Fn(&DialInfoDetail) -> bool, + { + if let Some(sort) = sort { + let mut dids = self.dial_info_detail_list.clone(); + dids.sort_by(sort); + for did in dids { + if filter(&did) { + return Some(did); + } + } + } else { + for did in &self.dial_info_detail_list { + if filter(did) { + return Some(did.clone()); + } + } + }; + None + } + + pub fn all_filtered_dial_info_details( + &self, + sort: Option, + filter: F, + ) -> Vec + where + S: Fn(&DialInfoDetail, &DialInfoDetail) -> std::cmp::Ordering, + F: Fn(&DialInfoDetail) -> bool, + { + let mut dial_info_detail_list = Vec::new(); + + if let Some(sort) = sort { + let mut dids = self.dial_info_detail_list.clone(); + dids.sort_by(sort); + for did in dids { + if filter(&did) { + dial_info_detail_list.push(did); + } + } + } else { + for did in &self.dial_info_detail_list { + if filter(did) { + dial_info_detail_list.push(did.clone()); + } + } + }; + dial_info_detail_list + } + + /// Does this node has some dial info + pub fn has_dial_info(&self) -> bool { + !self.dial_info_detail_list.is_empty() + } + + /// Is some relay required either for signal or inbound relay or outbound relay? + pub fn requires_relay(&self) -> bool { + match self.network_class { + NetworkClass::InboundCapable => { + for did in &self.dial_info_detail_list { + if did.class.requires_relay() { + return true; + } + } + } + NetworkClass::OutboundOnly => { + return true; + } + NetworkClass::WebApp => { + return true; + } + NetworkClass::Invalid => {} + } + false + } + + /// Can this node assist with signalling? Yes but only if it doesn't require signalling, itself. + pub fn can_signal(&self) -> bool { + // Must be inbound capable + if !matches!(self.network_class, NetworkClass::InboundCapable) { + return false; + } + // Do any of our dial info require signalling? if so, we can't offer signalling + for did in &self.dial_info_detail_list { + if did.class.requires_signal() { + return false; + } + } + true + } + + /// Can this node relay be an inbound relay? + pub fn can_inbound_relay(&self) -> bool { + // For now this is the same + self.can_signal() + } + + /// Is this node capable of validating dial info + pub fn can_validate_dial_info(&self) -> bool { + // For now this is the same + self.can_signal() + } +} diff --git a/veilid-core/src/routing_table/types/node_status.rs b/veilid-core/src/routing_table/types/node_status.rs new file mode 100644 index 00000000..11c388d0 --- /dev/null +++ b/veilid-core/src/routing_table/types/node_status.rs @@ -0,0 +1,66 @@ +use super::*; + +/// RoutingDomain-specific status for each node +/// is returned by the StatusA call + +/// PublicInternet RoutingDomain Status +#[derive( + Clone, Debug, Default, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct PublicInternetNodeStatus { + pub will_route: bool, + pub will_tunnel: bool, + pub will_signal: bool, + pub will_relay: bool, + pub will_validate_dial_info: bool, +} + +#[derive( + Clone, Debug, Default, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct LocalNetworkNodeStatus { + pub will_relay: bool, + pub will_validate_dial_info: bool, +} + +#[derive(Clone, Debug, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize)] +#[archive_attr(repr(u8), derive(CheckBytes))] +pub enum NodeStatus { + PublicInternet(PublicInternetNodeStatus), + LocalNetwork(LocalNetworkNodeStatus), +} + +impl NodeStatus { + pub fn will_route(&self) -> bool { + match self { + NodeStatus::PublicInternet(pi) => pi.will_route, + NodeStatus::LocalNetwork(_) => false, + } + } + pub fn will_tunnel(&self) -> bool { + match self { + NodeStatus::PublicInternet(pi) => pi.will_tunnel, + NodeStatus::LocalNetwork(_) => false, + } + } + pub fn will_signal(&self) -> bool { + match self { + NodeStatus::PublicInternet(pi) => pi.will_signal, + NodeStatus::LocalNetwork(_) => false, + } + } + pub fn will_relay(&self) -> bool { + match self { + NodeStatus::PublicInternet(pi) => pi.will_relay, + NodeStatus::LocalNetwork(ln) => ln.will_relay, + } + } + pub fn will_validate_dial_info(&self) -> bool { + match self { + NodeStatus::PublicInternet(pi) => pi.will_validate_dial_info, + NodeStatus::LocalNetwork(ln) => ln.will_validate_dial_info, + } + } +} diff --git a/veilid-core/src/routing_table/types/peer_info.rs b/veilid-core/src/routing_table/types/peer_info.rs new file mode 100644 index 00000000..b7037646 --- /dev/null +++ b/veilid-core/src/routing_table/types/peer_info.rs @@ -0,0 +1,49 @@ +use super::*; + +#[derive(Clone, Debug, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct PeerInfo { + node_ids: TypedKeySet, + signed_node_info: SignedNodeInfo, +} + +impl PeerInfo { + pub fn new(node_ids: TypedKeySet, signed_node_info: SignedNodeInfo) -> Self { + assert!(node_ids.len() > 0 && node_ids.len() <= MAX_CRYPTO_KINDS); + Self { + node_ids, + signed_node_info, + } + } + + pub fn validate(&self, crypto: Crypto) -> VeilidAPIResult<()> { + let validated_node_ids = self.signed_node_info.validate(&self.node_ids, crypto)?; + if validated_node_ids.is_empty() { + // Shouldn't get here because signed node info validation also checks this + apibail_generic!("no validated node ids"); + } + Ok(()) + } + + pub fn node_ids(&self) -> &TypedKeySet { + &self.node_ids + } + pub fn signed_node_info(&self) -> &SignedNodeInfo { + &self.signed_node_info + } + pub fn destructure(self) -> (TypedKeySet, SignedNodeInfo) { + (self.node_ids, self.signed_node_info) + } + + pub fn validate_vec(peer_info_vec: &mut Vec, crypto: Crypto) { + let mut n = 0usize; + while n < peer_info_vec.len() { + let pi = peer_info_vec.get(n).unwrap(); + if pi.validate(crypto.clone()).is_err() { + peer_info_vec.remove(n); + } else { + n += 1; + } + } + } +} diff --git a/veilid-core/src/routing_table/types/routing_domain.rs b/veilid-core/src/routing_table/types/routing_domain.rs new file mode 100644 index 00000000..e1982a08 --- /dev/null +++ b/veilid-core/src/routing_table/types/routing_domain.rs @@ -0,0 +1,32 @@ +use super::*; + +// Routing domain here is listed in order of preference, keep in order +#[allow(clippy::derive_hash_xor_eq)] +#[derive( + Debug, + Ord, + PartialOrd, + Hash, + EnumSetType, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[enumset(repr = "u8")] +#[archive_attr(repr(u8), derive(CheckBytes))] +pub enum RoutingDomain { + LocalNetwork = 0, + PublicInternet = 1, +} +impl RoutingDomain { + pub const fn count() -> usize { + 2 + } + pub const fn all() -> [RoutingDomain; RoutingDomain::count()] { + // Routing domain here is listed in order of preference, keep in order + [RoutingDomain::LocalNetwork, RoutingDomain::PublicInternet] + } +} +pub type RoutingDomainSet = EnumSet; diff --git a/veilid-core/src/routing_table/types/signed_direct_node_info.rs b/veilid-core/src/routing_table/types/signed_direct_node_info.rs new file mode 100644 index 00000000..e6fee40d --- /dev/null +++ b/veilid-core/src/routing_table/types/signed_direct_node_info.rs @@ -0,0 +1,93 @@ +use super::*; + +/// Signed NodeInfo that can be passed around amongst peers and verifiable +#[derive(Clone, Debug, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct SignedDirectNodeInfo { + node_info: NodeInfo, + timestamp: Timestamp, + signatures: Vec, +} +impl SignedDirectNodeInfo { + /// Returns a new SignedDirectNodeInfo that has its signatures validated. + /// On success, this will modify the node_ids set to only include node_ids whose signatures validate. + /// All signatures are stored however, as this can be passed to other nodes that may be able to validate those signatures. + pub fn new(node_info: NodeInfo, timestamp: Timestamp, signatures: Vec) -> Self { + Self { + node_info, + timestamp, + signatures, + } + } + + pub fn validate(&self, node_ids: &TypedKeySet, crypto: Crypto) -> VeilidAPIResult { + let node_info_bytes = Self::make_signature_bytes(&self.node_info, self.timestamp)?; + + // Verify the signatures that we can + let validated_node_ids = + crypto.verify_signatures(node_ids, &node_info_bytes, &self.signatures)?; + if validated_node_ids.len() == 0 { + apibail_generic!("no valid node ids in direct node info"); + } + + Ok(validated_node_ids) + } + + pub fn make_signatures( + crypto: Crypto, + typed_key_pairs: Vec, + node_info: NodeInfo, + ) -> VeilidAPIResult { + let timestamp = get_aligned_timestamp(); + let node_info_bytes = Self::make_signature_bytes(&node_info, timestamp)?; + let typed_signatures = + crypto.generate_signatures(&node_info_bytes, &typed_key_pairs, |kp, s| { + TypedSignature::new(kp.kind, s) + })?; + Ok(Self { + node_info, + timestamp, + signatures: typed_signatures, + }) + } + + fn make_signature_bytes( + node_info: &NodeInfo, + timestamp: Timestamp, + ) -> VeilidAPIResult> { + let mut node_info_bytes = Vec::new(); + + // Add nodeinfo to signature + let mut ni_msg = ::capnp::message::Builder::new_default(); + let mut ni_builder = ni_msg.init_root::(); + encode_node_info(node_info, &mut ni_builder).map_err(VeilidAPIError::internal)?; + node_info_bytes.append(&mut builder_to_vec(ni_msg).map_err(VeilidAPIError::internal)?); + + // Add timestamp to signature + node_info_bytes.append(&mut timestamp.as_u64().to_le_bytes().to_vec()); + + Ok(node_info_bytes) + } + + pub fn with_no_signature(node_info: NodeInfo) -> Self { + Self { + node_info, + timestamp: get_aligned_timestamp(), + signatures: Vec::new(), + } + } + + pub fn has_any_signature(&self) -> bool { + !self.signatures.is_empty() + } + + pub fn node_info(&self) -> &NodeInfo { + &self.node_info + } + pub fn timestamp(&self) -> Timestamp { + self.timestamp + } + pub fn signatures(&self) -> &[TypedSignature] { + &self.signatures + } +} diff --git a/veilid-core/src/routing_table/types/signed_node_info.rs b/veilid-core/src/routing_table/types/signed_node_info.rs new file mode 100644 index 00000000..5557a76a --- /dev/null +++ b/veilid-core/src/routing_table/types/signed_node_info.rs @@ -0,0 +1,96 @@ +use super::*; + +#[derive(Clone, Debug, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize)] +#[archive_attr(repr(u8), derive(CheckBytes))] +pub enum SignedNodeInfo { + Direct(SignedDirectNodeInfo), + Relayed(SignedRelayedNodeInfo), +} + +impl SignedNodeInfo { + pub fn validate(&self, node_ids: &TypedKeySet, crypto: Crypto) -> VeilidAPIResult { + match self { + SignedNodeInfo::Direct(d) => d.validate(node_ids, crypto), + SignedNodeInfo::Relayed(r) => r.validate(node_ids, crypto), + } + } + + pub fn has_any_signature(&self) -> bool { + match self { + SignedNodeInfo::Direct(d) => d.has_any_signature(), + SignedNodeInfo::Relayed(r) => r.has_any_signature(), + } + } + + pub fn timestamp(&self) -> Timestamp { + match self { + SignedNodeInfo::Direct(d) => d.timestamp(), + SignedNodeInfo::Relayed(r) => r.timestamp(), + } + } + pub fn node_info(&self) -> &NodeInfo { + match self { + SignedNodeInfo::Direct(d) => &d.node_info(), + SignedNodeInfo::Relayed(r) => &r.node_info(), + } + } + pub fn relay_ids(&self) -> TypedKeySet { + match self { + SignedNodeInfo::Direct(_) => TypedKeySet::new(), + SignedNodeInfo::Relayed(r) => r.relay_ids().clone(), + } + } + pub fn relay_info(&self) -> Option<&NodeInfo> { + match self { + SignedNodeInfo::Direct(_) => None, + SignedNodeInfo::Relayed(r) => Some(r.relay_info().node_info()), + } + } + pub fn relay_peer_info(&self) -> Option { + match self { + SignedNodeInfo::Direct(_) => None, + SignedNodeInfo::Relayed(r) => Some(PeerInfo::new( + r.relay_ids().clone(), + SignedNodeInfo::Direct(r.relay_info().clone()), + )), + } + } + pub fn has_any_dial_info(&self) -> bool { + self.node_info().has_dial_info() + || self + .relay_info() + .map(|relay_ni| relay_ni.has_dial_info()) + .unwrap_or_default() + } + + pub fn has_sequencing_matched_dial_info(&self, sequencing: Sequencing) -> bool { + // Check our dial info + for did in self.node_info().dial_info_detail_list() { + match sequencing { + Sequencing::NoPreference | Sequencing::PreferOrdered => return true, + Sequencing::EnsureOrdered => { + if did.dial_info.protocol_type().is_connection_oriented() { + return true; + } + } + } + } + // Check our relay if we have one + return self + .relay_info() + .map(|relay_ni| { + for did in relay_ni.dial_info_detail_list() { + match sequencing { + Sequencing::NoPreference | Sequencing::PreferOrdered => return true, + Sequencing::EnsureOrdered => { + if did.dial_info.protocol_type().is_connection_oriented() { + return true; + } + } + } + } + false + }) + .unwrap_or_default(); + } +} diff --git a/veilid-core/src/routing_table/types/signed_relayed_node_info.rs b/veilid-core/src/routing_table/types/signed_relayed_node_info.rs new file mode 100644 index 00000000..8a429e4c --- /dev/null +++ b/veilid-core/src/routing_table/types/signed_relayed_node_info.rs @@ -0,0 +1,138 @@ +use super::*; + +/// Signed NodeInfo with a relay that can be passed around amongst peers and verifiable +#[derive(Clone, Debug, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct SignedRelayedNodeInfo { + node_info: NodeInfo, + relay_ids: TypedKeySet, + relay_info: SignedDirectNodeInfo, + timestamp: Timestamp, + signatures: Vec, +} + +impl SignedRelayedNodeInfo { + /// Returns a new SignedRelayedNodeInfo that has its signatures validated. + /// On success, this will modify the node_ids set to only include node_ids whose signatures validate. + /// All signatures are stored however, as this can be passed to other nodes that may be able to validate those signatures. + pub fn new( + node_info: NodeInfo, + relay_ids: TypedKeySet, + relay_info: SignedDirectNodeInfo, + timestamp: Timestamp, + signatures: Vec, + ) -> Self { + Self { + node_info, + relay_ids, + relay_info, + timestamp, + signatures, + } + } + + pub fn validate(&self, node_ids: &TypedKeySet, crypto: Crypto) -> VeilidAPIResult { + // Ensure the relay info for the node has a superset of the crypto kinds of the node it is relaying + if common_crypto_kinds( + self.node_info.crypto_support(), + self.relay_info.node_info().crypto_support(), + ) + .len() + != self.node_info.crypto_support().len() + { + apibail_generic!("relay should have superset of node crypto kinds"); + } + + // Verify signatures + let node_info_bytes = Self::make_signature_bytes( + &self.node_info, + &self.relay_ids, + &self.relay_info, + self.timestamp, + )?; + let validated_node_ids = + crypto.verify_signatures(node_ids, &node_info_bytes, &self.signatures)?; + if validated_node_ids.len() == 0 { + apibail_generic!("no valid node ids in relayed node info"); + } + Ok(validated_node_ids) + } + + pub fn make_signatures( + crypto: Crypto, + typed_key_pairs: Vec, + node_info: NodeInfo, + relay_ids: TypedKeySet, + relay_info: SignedDirectNodeInfo, + ) -> VeilidAPIResult { + let timestamp = get_aligned_timestamp(); + let node_info_bytes = + Self::make_signature_bytes(&node_info, &relay_ids, &relay_info, timestamp)?; + let typed_signatures = + crypto.generate_signatures(&node_info_bytes, &typed_key_pairs, |kp, s| { + TypedSignature::new(kp.kind, s) + })?; + Ok(Self { + node_info, + relay_ids, + relay_info, + timestamp, + signatures: typed_signatures, + }) + } + + fn make_signature_bytes( + node_info: &NodeInfo, + relay_ids: &[TypedKey], + relay_info: &SignedDirectNodeInfo, + timestamp: Timestamp, + ) -> VeilidAPIResult> { + let mut sig_bytes = Vec::new(); + + // Add nodeinfo to signature + let mut ni_msg = ::capnp::message::Builder::new_default(); + let mut ni_builder = ni_msg.init_root::(); + encode_node_info(node_info, &mut ni_builder).map_err(VeilidAPIError::internal)?; + sig_bytes.append(&mut builder_to_vec(ni_msg).map_err(VeilidAPIError::internal)?); + + // Add relay ids to signature + for relay_id in relay_ids { + let mut rid_msg = ::capnp::message::Builder::new_default(); + let mut rid_builder = rid_msg.init_root::(); + encode_typed_key(relay_id, &mut rid_builder); + sig_bytes.append(&mut builder_to_vec(rid_msg).map_err(VeilidAPIError::internal)?); + } + + // Add relay info to signature + let mut ri_msg = ::capnp::message::Builder::new_default(); + let mut ri_builder = ri_msg.init_root::(); + encode_signed_direct_node_info(relay_info, &mut ri_builder) + .map_err(VeilidAPIError::internal)?; + sig_bytes.append(&mut builder_to_vec(ri_msg).map_err(VeilidAPIError::internal)?); + + // Add timestamp to signature + sig_bytes.append(&mut timestamp.as_u64().to_le_bytes().to_vec()); + + Ok(sig_bytes) + } + + pub fn has_any_signature(&self) -> bool { + !self.signatures.is_empty() + } + + pub fn node_info(&self) -> &NodeInfo { + &self.node_info + } + pub fn timestamp(&self) -> Timestamp { + self.timestamp + } + pub fn relay_ids(&self) -> &TypedKeySet { + &self.relay_ids + } + pub fn relay_info(&self) -> &SignedDirectNodeInfo { + &self.relay_info + } + pub fn signatures(&self) -> &[TypedSignature] { + &self.signatures + } +} diff --git a/veilid-core/src/rpc_processor/coders/mod.rs b/veilid-core/src/rpc_processor/coders/mod.rs index 8c8ce160..34bc7913 100644 --- a/veilid-core/src/rpc_processor/coders/mod.rs +++ b/veilid-core/src/rpc_processor/coders/mod.rs @@ -19,11 +19,12 @@ mod signature512; mod signed_direct_node_info; mod signed_node_info; mod signed_relayed_node_info; +mod signed_value_data; +mod signed_value_descriptor; mod socket_address; mod tunnel; mod typed_key; mod typed_signature; -mod value_data; pub use address::*; pub use address_type_set::*; @@ -46,10 +47,24 @@ pub use signature512::*; pub use signed_direct_node_info::*; pub use signed_node_info::*; pub use signed_relayed_node_info::*; +pub use signed_value_data::*; +pub use signed_value_descriptor::*; pub use socket_address::*; pub use tunnel::*; pub use typed_key::*; pub use typed_signature::*; -pub use value_data::*; use super::*; + +#[derive(Debug, Clone)] +pub enum QuestionContext { + GetValue(ValidateGetValueContext), + SetValue(ValidateSetValueContext), +} + +#[derive(Clone)] +pub struct RPCValidateContext { + pub crypto: Crypto, + pub rpc_processor: RPCProcessor, + pub question_context: Option, +} diff --git a/veilid-core/src/rpc_processor/coders/node_info.rs b/veilid-core/src/rpc_processor/coders/node_info.rs index 6d2d87c6..874a7e6b 100644 --- a/veilid-core/src/rpc_processor/coders/node_info.rs +++ b/veilid-core/src/rpc_processor/coders/node_info.rs @@ -4,27 +4,27 @@ pub fn encode_node_info( node_info: &NodeInfo, builder: &mut veilid_capnp::node_info::Builder, ) -> Result<(), RPCError> { - builder.set_network_class(encode_network_class(node_info.network_class)); + builder.set_network_class(encode_network_class(node_info.network_class())); let mut ps_builder = builder.reborrow().init_outbound_protocols(); - encode_protocol_type_set(&node_info.outbound_protocols, &mut ps_builder)?; + encode_protocol_type_set(&node_info.outbound_protocols(), &mut ps_builder)?; let mut ats_builder = builder.reborrow().init_address_types(); - encode_address_type_set(&node_info.address_types, &mut ats_builder)?; + encode_address_type_set(&node_info.address_types(), &mut ats_builder)?; let mut es_builder = builder .reborrow() - .init_envelope_support(node_info.envelope_support.len() as u32); + .init_envelope_support(node_info.envelope_support().len() as u32); if let Some(s) = es_builder.as_slice() { - s.clone_from_slice(&node_info.envelope_support); + s.clone_from_slice(&node_info.envelope_support()); } let mut cs_builder = builder .reborrow() - .init_crypto_support(node_info.crypto_support.len() as u32); + .init_crypto_support(node_info.crypto_support().len() as u32); if let Some(s) = cs_builder.as_slice() { let csvec: Vec = node_info - .crypto_support + .crypto_support() .iter() .map(|x| u32::from_be_bytes(x.0)) .collect(); @@ -33,7 +33,7 @@ pub fn encode_node_info( let mut didl_builder = builder.reborrow().init_dial_info_detail_list( node_info - .dial_info_detail_list + .dial_info_detail_list() .len() .try_into() .map_err(RPCError::map_protocol( @@ -41,9 +41,9 @@ pub fn encode_node_info( ))?, ); - for idx in 0..node_info.dial_info_detail_list.len() { + for idx in 0..node_info.dial_info_detail_list().len() { let mut did_builder = didl_builder.reborrow().get(idx as u32); - encode_dial_info_detail(&node_info.dial_info_detail_list[idx], &mut did_builder)?; + encode_dial_info_detail(&node_info.dial_info_detail_list()[idx], &mut did_builder)?; } Ok(()) @@ -131,12 +131,12 @@ pub fn decode_node_info(reader: &veilid_capnp::node_info::Reader) -> Result Self { Self { detail } } - pub fn into_detail(self) -> RPCAnswerDetail { - self.detail + pub fn validate(&mut self, validate_context: &RPCValidateContext) -> Result<(), RPCError> { + self.detail.validate(validate_context) } pub fn desc(&self) -> &'static str { self.detail.desc() } - pub fn decode( - reader: &veilid_capnp::answer::Reader, - crypto: Crypto, - ) -> Result { + pub fn destructure(self) -> RPCAnswerDetail { + self.detail + } + pub fn decode(reader: &veilid_capnp::answer::Reader) -> Result { let d_reader = reader.get_detail(); - let detail = RPCAnswerDetail::decode(&d_reader, crypto)?; + let detail = RPCAnswerDetail::decode(&d_reader)?; Ok(RPCAnswer { detail }) } pub fn encode(&self, builder: &mut veilid_capnp::answer::Builder) -> Result<(), RPCError> { @@ -60,10 +60,23 @@ impl RPCAnswerDetail { RPCAnswerDetail::CancelTunnelA(_) => "CancelTunnelA", } } - + pub fn validate(&mut self, validate_context: &RPCValidateContext) -> Result<(), RPCError> { + match self { + RPCAnswerDetail::StatusA(r) => r.validate(validate_context), + RPCAnswerDetail::FindNodeA(r) => r.validate(validate_context), + RPCAnswerDetail::AppCallA(r) => r.validate(validate_context), + RPCAnswerDetail::GetValueA(r) => r.validate(validate_context), + RPCAnswerDetail::SetValueA(r) => r.validate(validate_context), + RPCAnswerDetail::WatchValueA(r) => r.validate(validate_context), + RPCAnswerDetail::SupplyBlockA(r) => r.validate(validate_context), + RPCAnswerDetail::FindBlockA(r) => r.validate(validate_context), + RPCAnswerDetail::StartTunnelA(r) => r.validate(validate_context), + RPCAnswerDetail::CompleteTunnelA(r) => r.validate(validate_context), + RPCAnswerDetail::CancelTunnelA(r) => r.validate(validate_context), + } + } pub fn decode( reader: &veilid_capnp::answer::detail::Reader, - crypto: Crypto, ) -> Result { let which_reader = reader.which().map_err(RPCError::protocol)?; let out = match which_reader { @@ -74,7 +87,7 @@ impl RPCAnswerDetail { } veilid_capnp::answer::detail::FindNodeA(r) => { let op_reader = r.map_err(RPCError::protocol)?; - let out = RPCOperationFindNodeA::decode(&op_reader, crypto)?; + let out = RPCOperationFindNodeA::decode(&op_reader)?; RPCAnswerDetail::FindNodeA(out) } veilid_capnp::answer::detail::AppCallA(r) => { @@ -84,27 +97,27 @@ impl RPCAnswerDetail { } veilid_capnp::answer::detail::GetValueA(r) => { let op_reader = r.map_err(RPCError::protocol)?; - let out = RPCOperationGetValueA::decode(&op_reader, crypto)?; + let out = RPCOperationGetValueA::decode(&op_reader)?; RPCAnswerDetail::GetValueA(out) } veilid_capnp::answer::detail::SetValueA(r) => { let op_reader = r.map_err(RPCError::protocol)?; - let out = RPCOperationSetValueA::decode(&op_reader, crypto)?; + let out = RPCOperationSetValueA::decode(&op_reader)?; RPCAnswerDetail::SetValueA(out) } veilid_capnp::answer::detail::WatchValueA(r) => { let op_reader = r.map_err(RPCError::protocol)?; - let out = RPCOperationWatchValueA::decode(&op_reader, crypto)?; + let out = RPCOperationWatchValueA::decode(&op_reader)?; RPCAnswerDetail::WatchValueA(out) } veilid_capnp::answer::detail::SupplyBlockA(r) => { let op_reader = r.map_err(RPCError::protocol)?; - let out = RPCOperationSupplyBlockA::decode(&op_reader, crypto)?; + let out = RPCOperationSupplyBlockA::decode(&op_reader)?; RPCAnswerDetail::SupplyBlockA(out) } veilid_capnp::answer::detail::FindBlockA(r) => { let op_reader = r.map_err(RPCError::protocol)?; - let out = RPCOperationFindBlockA::decode(&op_reader, crypto)?; + let out = RPCOperationFindBlockA::decode(&op_reader)?; RPCAnswerDetail::FindBlockA(out) } veilid_capnp::answer::detail::StartTunnelA(r) => { diff --git a/veilid-core/src/rpc_processor/coders/operations/operation.rs b/veilid-core/src/rpc_processor/coders/operations/operation.rs index d27b4d88..9118c2cb 100644 --- a/veilid-core/src/rpc_processor/coders/operations/operation.rs +++ b/veilid-core/src/rpc_processor/coders/operations/operation.rs @@ -16,25 +16,30 @@ impl RPCOperationKind { } } - pub fn decode( - kind_reader: &veilid_capnp::operation::kind::Reader, - crypto: Crypto, - ) -> Result { + pub fn validate(&mut self, validate_context: &RPCValidateContext) -> Result<(), RPCError> { + match self { + RPCOperationKind::Question(r) => r.validate(validate_context), + RPCOperationKind::Statement(r) => r.validate(validate_context), + RPCOperationKind::Answer(r) => r.validate(validate_context), + } + } + + pub fn decode(kind_reader: &veilid_capnp::operation::kind::Reader) -> Result { let which_reader = kind_reader.which().map_err(RPCError::protocol)?; let out = match which_reader { veilid_capnp::operation::kind::Which::Question(r) => { let q_reader = r.map_err(RPCError::protocol)?; - let out = RPCQuestion::decode(&q_reader, crypto)?; + let out = RPCQuestion::decode(&q_reader)?; RPCOperationKind::Question(out) } veilid_capnp::operation::kind::Which::Statement(r) => { let q_reader = r.map_err(RPCError::protocol)?; - let out = RPCStatement::decode(&q_reader, crypto)?; + let out = RPCStatement::decode(&q_reader)?; RPCOperationKind::Statement(out) } veilid_capnp::operation::kind::Which::Answer(r) => { let q_reader = r.map_err(RPCError::protocol)?; - let out = RPCAnswer::decode(&q_reader, crypto)?; + let out = RPCAnswer::decode(&q_reader)?; RPCOperationKind::Answer(out) } }; @@ -93,6 +98,17 @@ impl RPCOperation { } } + pub fn validate(&mut self, validate_context: &RPCValidateContext) -> Result<(), RPCError> { + // Validate sender peer info + if let Some(sender_peer_info) = &self.opt_sender_peer_info { + sender_peer_info + .validate(validate_context.crypto.clone()) + .map_err(RPCError::protocol)?; + } + // Validate operation kind + self.kind.validate(validate_context) + } + pub fn op_id(&self) -> OperationId { self.op_id } @@ -108,21 +124,23 @@ impl RPCOperation { &self.kind } - pub fn into_kind(self) -> RPCOperationKind { - self.kind + pub fn destructure(self) -> (OperationId, Option, Timestamp, RPCOperationKind) { + ( + self.op_id, + self.opt_sender_peer_info, + self.target_node_info_ts, + self.kind, + ) } - pub fn decode( - operation_reader: &veilid_capnp::operation::Reader, - crypto: Crypto, - ) -> Result { + pub fn decode(operation_reader: &veilid_capnp::operation::Reader) -> Result { let op_id = OperationId::new(operation_reader.get_op_id()); let sender_peer_info = if operation_reader.has_sender_peer_info() { let pi_reader = operation_reader .get_sender_peer_info() .map_err(RPCError::protocol)?; - let pi = decode_peer_info(&pi_reader, crypto.clone())?; + let pi = decode_peer_info(&pi_reader)?; Some(pi) } else { None @@ -131,7 +149,7 @@ impl RPCOperation { let target_node_info_ts = Timestamp::new(operation_reader.get_target_node_info_ts()); let kind_reader = operation_reader.get_kind(); - let kind = RPCOperationKind::decode(&kind_reader, crypto)?; + let kind = RPCOperationKind::decode(&kind_reader)?; Ok(RPCOperation { op_id, diff --git a/veilid-core/src/rpc_processor/coders/operations/operation_app_call.rs b/veilid-core/src/rpc_processor/coders/operations/operation_app_call.rs index b1360b9a..ef996b95 100644 --- a/veilid-core/src/rpc_processor/coders/operations/operation_app_call.rs +++ b/veilid-core/src/rpc_processor/coders/operations/operation_app_call.rs @@ -1,16 +1,40 @@ use super::*; +const MAX_APP_CALL_Q_MESSAGE_LEN: usize = 32768; +const MAX_APP_CALL_A_MESSAGE_LEN: usize = 32768; + #[derive(Debug, Clone)] pub struct RPCOperationAppCallQ { - pub message: Vec, + message: Vec, } impl RPCOperationAppCallQ { - pub fn decode( - reader: &veilid_capnp::operation_app_call_q::Reader, - ) -> Result { - let message = reader.get_message().map_err(RPCError::protocol)?.to_vec(); - Ok(RPCOperationAppCallQ { message }) + pub fn new(message: Vec) -> Result { + if message.len() > MAX_APP_CALL_Q_MESSAGE_LEN { + return Err(RPCError::protocol("AppCallQ message too long to set")); + } + Ok(Self { message }) + } + pub fn validate(&mut self, _validate_context: &RPCValidateContext) -> Result<(), RPCError> { + Ok(()) + } + + // pub fn message(&self) -> &[u8] { + // &self.message + // } + + pub fn destructure(self) -> Vec { + self.message + } + + pub fn decode(reader: &veilid_capnp::operation_app_call_q::Reader) -> Result { + let mr = reader.get_message().map_err(RPCError::protocol)?; + if mr.len() > MAX_APP_CALL_Q_MESSAGE_LEN { + return Err(RPCError::protocol("AppCallQ message too long to set")); + } + Ok(Self { + message: mr.to_vec(), + }) } pub fn encode( &self, @@ -23,15 +47,37 @@ impl RPCOperationAppCallQ { #[derive(Debug, Clone)] pub struct RPCOperationAppCallA { - pub message: Vec, + message: Vec, } impl RPCOperationAppCallA { - pub fn decode( - reader: &veilid_capnp::operation_app_call_a::Reader, - ) -> Result { - let message = reader.get_message().map_err(RPCError::protocol)?.to_vec(); - Ok(RPCOperationAppCallA { message }) + pub fn new(message: Vec) -> Result { + if message.len() > MAX_APP_CALL_A_MESSAGE_LEN { + return Err(RPCError::protocol("AppCallA message too long to set")); + } + Ok(Self { message }) + } + + pub fn validate(&mut self, _validate_context: &RPCValidateContext) -> Result<(), RPCError> { + Ok(()) + } + + // pub fn message(&self) -> &[u8] { + // &self.message + // } + + pub fn destructure(self) -> Vec { + self.message + } + + pub fn decode(reader: &veilid_capnp::operation_app_call_a::Reader) -> Result { + let mr = reader.get_message().map_err(RPCError::protocol)?; + if mr.len() > MAX_APP_CALL_A_MESSAGE_LEN { + return Err(RPCError::protocol("AppCallA message too long to set")); + } + Ok(Self { + message: mr.to_vec(), + }) } pub fn encode( &self, diff --git a/veilid-core/src/rpc_processor/coders/operations/operation_app_message.rs b/veilid-core/src/rpc_processor/coders/operations/operation_app_message.rs index 5c969be7..b25ef5f6 100644 --- a/veilid-core/src/rpc_processor/coders/operations/operation_app_message.rs +++ b/veilid-core/src/rpc_processor/coders/operations/operation_app_message.rs @@ -1,16 +1,39 @@ use super::*; +const MAX_APP_MESSAGE_MESSAGE_LEN: usize = 32768; + #[derive(Debug, Clone)] pub struct RPCOperationAppMessage { - pub message: Vec, + message: Vec, } impl RPCOperationAppMessage { - pub fn decode( - reader: &veilid_capnp::operation_app_message::Reader, - ) -> Result { - let message = reader.get_message().map_err(RPCError::protocol)?.to_vec(); - Ok(RPCOperationAppMessage { message }) + pub fn new(message: Vec) -> Result { + if message.len() > MAX_APP_MESSAGE_MESSAGE_LEN { + return Err(RPCError::protocol("AppMessage message too long to set")); + } + Ok(Self { message }) + } + + pub fn validate(&mut self, _validate_context: &RPCValidateContext) -> Result<(), RPCError> { + Ok(()) + } + + // pub fn message(&self) -> &[u8] { + // &self.message + // } + pub fn destructure(self) -> Vec { + self.message + } + + pub fn decode(reader: &veilid_capnp::operation_app_message::Reader) -> Result { + let mr = reader.get_message().map_err(RPCError::protocol)?; + if mr.len() > MAX_APP_MESSAGE_MESSAGE_LEN { + return Err(RPCError::protocol("AppMessage message too long to set")); + } + Ok(Self { + message: mr.to_vec(), + }) } pub fn encode( &self, diff --git a/veilid-core/src/rpc_processor/coders/operations/operation_cancel_tunnel.rs b/veilid-core/src/rpc_processor/coders/operations/operation_cancel_tunnel.rs index 3484ab0c..4bb6ac06 100644 --- a/veilid-core/src/rpc_processor/coders/operations/operation_cancel_tunnel.rs +++ b/veilid-core/src/rpc_processor/coders/operations/operation_cancel_tunnel.rs @@ -2,16 +2,30 @@ use super::*; #[derive(Debug, Clone)] pub struct RPCOperationCancelTunnelQ { - pub id: TunnelId, + id: TunnelId, } impl RPCOperationCancelTunnelQ { + pub fn new(id: TunnelId) -> Self { + Self { id } + } + pub fn validate(&mut self, _validate_context: &RPCValidateContext) -> Result<(), RPCError> { + Ok(()) + } + + pub fn id(&self) -> TunnelId { + self.id + } + + pub fn destructure(self) -> TunnelId { + self.id + } + pub fn decode( reader: &veilid_capnp::operation_cancel_tunnel_q::Reader, - ) -> Result { + ) -> Result { let id = TunnelId::new(reader.get_id()); - - Ok(RPCOperationCancelTunnelQ { id }) + Ok(Self { id }) } pub fn encode( &self, @@ -30,16 +44,25 @@ pub enum RPCOperationCancelTunnelA { } impl RPCOperationCancelTunnelA { + pub fn new_tunnel(id: TunnelId) -> Self { + Self::Tunnel(id) + } + pub fn new_error(error: TunnelError) -> Self { + Self::Error(error) + } + pub fn validate(&mut self, _validate_context: &RPCValidateContext) -> Result<(), RPCError> { + Ok(()) + } pub fn decode( reader: &veilid_capnp::operation_cancel_tunnel_a::Reader, - ) -> Result { + ) -> Result { match reader.which().map_err(RPCError::protocol)? { veilid_capnp::operation_cancel_tunnel_a::Which::Tunnel(r) => { - Ok(RPCOperationCancelTunnelA::Tunnel(TunnelId::new(r))) + Ok(Self::Tunnel(TunnelId::new(r))) } veilid_capnp::operation_cancel_tunnel_a::Which::Error(r) => { let tunnel_error = decode_tunnel_error(r.map_err(RPCError::protocol)?); - Ok(RPCOperationCancelTunnelA::Error(tunnel_error)) + Ok(Self::Error(tunnel_error)) } } } @@ -48,10 +71,10 @@ impl RPCOperationCancelTunnelA { builder: &mut veilid_capnp::operation_cancel_tunnel_a::Builder, ) -> Result<(), RPCError> { match self { - RPCOperationCancelTunnelA::Tunnel(p) => { + Self::Tunnel(p) => { builder.set_tunnel(p.as_u64()); } - RPCOperationCancelTunnelA::Error(e) => { + Self::Error(e) => { builder.set_error(encode_tunnel_error(*e)); } } diff --git a/veilid-core/src/rpc_processor/coders/operations/operation_complete_tunnel.rs b/veilid-core/src/rpc_processor/coders/operations/operation_complete_tunnel.rs index e4737a2d..46b0258a 100644 --- a/veilid-core/src/rpc_processor/coders/operations/operation_complete_tunnel.rs +++ b/veilid-core/src/rpc_processor/coders/operations/operation_complete_tunnel.rs @@ -2,16 +2,45 @@ use super::*; #[derive(Debug, Clone)] pub struct RPCOperationCompleteTunnelQ { - pub id: TunnelId, - pub local_mode: TunnelMode, - pub depth: u8, - pub endpoint: TunnelEndpoint, + id: TunnelId, + local_mode: TunnelMode, + depth: u8, + endpoint: TunnelEndpoint, } impl RPCOperationCompleteTunnelQ { + pub fn new(id: TunnelId, local_mode: TunnelMode, depth: u8, endpoint: TunnelEndpoint) -> Self { + Self { + id, + local_mode, + depth, + endpoint, + } + } + pub fn validate(&mut self, _validate_context: &RPCValidateContext) -> Result<(), RPCError> { + Ok(()) + } + + pub fn id(&self) -> TunnelId { + self.id + } + + pub fn local_mode(&self) -> TunnelMode { + self.local_mode + } + pub fn depth(&self) -> u8 { + self.depth + } + pub fn endpoint(&self) -> &TunnelEndpoint { + &self.endpoint + } + pub fn destructure(self) -> (TunnelId, TunnelMode, u8, TunnelEndpoint) { + (self.id, self.local_mode, self.depth, self.endpoint) + } + pub fn decode( reader: &veilid_capnp::operation_complete_tunnel_q::Reader, - ) -> Result { + ) -> Result { let id = TunnelId::new(reader.get_id()); let local_mode = match reader.get_local_mode().map_err(RPCError::protocol)? { veilid_capnp::TunnelEndpointMode::Raw => TunnelMode::Raw, @@ -21,7 +50,7 @@ impl RPCOperationCompleteTunnelQ { let te_reader = reader.get_endpoint().map_err(RPCError::protocol)?; let endpoint = decode_tunnel_endpoint(&te_reader)?; - Ok(RPCOperationCompleteTunnelQ { + Ok(Self { id, local_mode, depth, @@ -52,18 +81,28 @@ pub enum RPCOperationCompleteTunnelA { } impl RPCOperationCompleteTunnelA { + pub fn new_tunnel(tunnel: FullTunnel) -> Self { + Self::Tunnel(tunnel) + } + pub fn new_error(error: TunnelError) -> Self { + Self::Error(error) + } + pub fn validate(&mut self, _validate_context: &RPCValidateContext) -> Result<(), RPCError> { + Ok(()) + } + pub fn decode( reader: &veilid_capnp::operation_complete_tunnel_a::Reader, - ) -> Result { + ) -> Result { match reader.which().map_err(RPCError::protocol)? { veilid_capnp::operation_complete_tunnel_a::Which::Tunnel(r) => { let ft_reader = r.map_err(RPCError::protocol)?; let full_tunnel = decode_full_tunnel(&ft_reader)?; - Ok(RPCOperationCompleteTunnelA::Tunnel(full_tunnel)) + Ok(Self::Tunnel(full_tunnel)) } veilid_capnp::operation_complete_tunnel_a::Which::Error(r) => { let tunnel_error = decode_tunnel_error(r.map_err(RPCError::protocol)?); - Ok(RPCOperationCompleteTunnelA::Error(tunnel_error)) + Ok(Self::Error(tunnel_error)) } } } @@ -72,10 +111,10 @@ impl RPCOperationCompleteTunnelA { builder: &mut veilid_capnp::operation_complete_tunnel_a::Builder, ) -> Result<(), RPCError> { match self { - RPCOperationCompleteTunnelA::Tunnel(p) => { + Self::Tunnel(p) => { encode_full_tunnel(p, &mut builder.reborrow().init_tunnel())?; } - RPCOperationCompleteTunnelA::Error(e) => { + Self::Error(e) => { builder.set_error(encode_tunnel_error(*e)); } } diff --git a/veilid-core/src/rpc_processor/coders/operations/operation_find_block.rs b/veilid-core/src/rpc_processor/coders/operations/operation_find_block.rs index b5ecab45..b6897558 100644 --- a/veilid-core/src/rpc_processor/coders/operations/operation_find_block.rs +++ b/veilid-core/src/rpc_processor/coders/operations/operation_find_block.rs @@ -1,18 +1,37 @@ use super::*; +const MAX_FIND_BLOCK_A_DATA_LEN: usize = 32768; +const MAX_FIND_BLOCK_A_SUPPLIERS_LEN: usize = 10; +const MAX_FIND_BLOCK_A_PEERS_LEN: usize = 10; + #[derive(Debug, Clone)] pub struct RPCOperationFindBlockQ { - pub block_id: TypedKey, + block_id: TypedKey, } impl RPCOperationFindBlockQ { + pub fn new(block_id: TypedKey) -> Self { + Self { block_id } + } + pub fn validate(&mut self, _validate_context: &RPCValidateContext) -> Result<(), RPCError> { + Ok(()) + } + + pub fn block_id(&self) -> TypedKey { + self.block_id + } + + pub fn destructure(self) -> TypedKey { + self.block_id + } + pub fn decode( reader: &veilid_capnp::operation_find_block_q::Reader, ) -> Result { let bi_reader = reader.get_block_id().map_err(RPCError::protocol)?; let block_id = decode_typed_key(&bi_reader)?; - Ok(RPCOperationFindBlockQ { block_id }) + Ok(Self { block_id }) } pub fn encode( &self, @@ -27,19 +46,68 @@ impl RPCOperationFindBlockQ { #[derive(Debug, Clone)] pub struct RPCOperationFindBlockA { - pub data: Vec, - pub suppliers: Vec, - pub peers: Vec, + data: Vec, + suppliers: Vec, + peers: Vec, } impl RPCOperationFindBlockA { - pub fn decode( - reader: &veilid_capnp::operation_find_block_a::Reader, - crypto: Crypto, - ) -> Result { - let data = reader.get_data().map_err(RPCError::protocol)?.to_vec(); + pub fn new( + data: Vec, + suppliers: Vec, + peers: Vec, + ) -> Result { + if data.len() > MAX_FIND_BLOCK_A_DATA_LEN { + return Err(RPCError::protocol("find block data length too long")); + } + if suppliers.len() > MAX_FIND_BLOCK_A_SUPPLIERS_LEN { + return Err(RPCError::protocol("find block suppliers length too long")); + } + if peers.len() > MAX_FIND_BLOCK_A_PEERS_LEN { + return Err(RPCError::protocol("find block peers length too long")); + } + + Ok(Self { + data, + suppliers, + peers, + }) + } + pub fn validate(&mut self, validate_context: &RPCValidateContext) -> Result<(), RPCError> { + PeerInfo::validate_vec(&mut self.suppliers, validate_context.crypto.clone()); + PeerInfo::validate_vec(&mut self.peers, validate_context.crypto.clone()); + Ok(()) + } + + pub fn data(&self) -> &[u8] { + &self.data + } + pub fn suppliers(&self) -> &[PeerInfo] { + &self.suppliers + } + pub fn peers(&self) -> &[PeerInfo] { + &self.peers + } + + pub fn destructure(self) -> (Vec, Vec, Vec) { + (self.data, self.suppliers, self.peers) + } + pub fn decode(reader: &veilid_capnp::operation_find_block_a::Reader) -> Result { + let data = reader.get_data().map_err(RPCError::protocol)?; + if data.len() > MAX_FIND_BLOCK_A_DATA_LEN { + return Err(RPCError::protocol("find block data length too long")); + } let suppliers_reader = reader.get_suppliers().map_err(RPCError::protocol)?; + if suppliers_reader.len() as usize > MAX_FIND_BLOCK_A_SUPPLIERS_LEN { + return Err(RPCError::protocol("find block suppliers length too long")); + } + + let peers_reader = reader.get_peers().map_err(RPCError::protocol)?; + if peers_reader.len() as usize > MAX_FIND_BLOCK_A_PEERS_LEN { + return Err(RPCError::protocol("find block peers length too long")); + } + let mut suppliers = Vec::::with_capacity( suppliers_reader .len() @@ -47,11 +115,10 @@ impl RPCOperationFindBlockA { .map_err(RPCError::map_internal("too many suppliers"))?, ); for s in suppliers_reader.iter() { - let peer_info = decode_peer_info(&s, crypto.clone())?; + let peer_info = decode_peer_info(&s)?; suppliers.push(peer_info); } - let peers_reader = reader.get_peers().map_err(RPCError::protocol)?; let mut peers = Vec::::with_capacity( peers_reader .len() @@ -59,12 +126,12 @@ impl RPCOperationFindBlockA { .map_err(RPCError::map_internal("too many peers"))?, ); for p in peers_reader.iter() { - let peer_info = decode_peer_info(&p, crypto.clone())?; + let peer_info = decode_peer_info(&p)?; peers.push(peer_info); } - Ok(RPCOperationFindBlockA { - data, + Ok(Self { + data: data.to_vec(), suppliers, peers, }) diff --git a/veilid-core/src/rpc_processor/coders/operations/operation_find_node.rs b/veilid-core/src/rpc_processor/coders/operations/operation_find_node.rs index c3511efa..607dacbc 100644 --- a/veilid-core/src/rpc_processor/coders/operations/operation_find_node.rs +++ b/veilid-core/src/rpc_processor/coders/operations/operation_find_node.rs @@ -1,17 +1,32 @@ use super::*; +const MAX_FIND_NODE_A_PEERS_LEN: usize = 20; + #[derive(Debug, Clone)] pub struct RPCOperationFindNodeQ { - pub node_id: TypedKey, + node_id: TypedKey, } impl RPCOperationFindNodeQ { - pub fn decode( - reader: &veilid_capnp::operation_find_node_q::Reader, - ) -> Result { + pub fn new(node_id: TypedKey) -> Self { + Self { node_id } + } + pub fn validate(&mut self, _validate_context: &RPCValidateContext) -> Result<(), RPCError> { + Ok(()) + } + + // pub fn node_id(&self) -> &TypedKey { + // &self.node_id + // } + + pub fn destructure(self) -> TypedKey { + self.node_id + } + + pub fn decode(reader: &veilid_capnp::operation_find_node_q::Reader) -> Result { let ni_reader = reader.get_node_id().map_err(RPCError::protocol)?; let node_id = decode_typed_key(&ni_reader)?; - Ok(RPCOperationFindNodeQ { node_id }) + Ok(Self { node_id }) } pub fn encode( &self, @@ -25,15 +40,40 @@ impl RPCOperationFindNodeQ { #[derive(Debug, Clone)] pub struct RPCOperationFindNodeA { - pub peers: Vec, + peers: Vec, } impl RPCOperationFindNodeA { + pub fn new(peers: Vec) -> Result { + if peers.len() > MAX_FIND_NODE_A_PEERS_LEN { + return Err(RPCError::protocol("find node peers length too long")); + } + + Ok(Self { peers }) + } + + pub fn validate(&mut self, validate_context: &RPCValidateContext) -> Result<(), RPCError> { + PeerInfo::validate_vec(&mut self.peers, validate_context.crypto.clone()); + Ok(()) + } + + // pub fn peers(&self) -> &[PeerInfo] { + // &self.peers + // } + + pub fn destructure(self) -> Vec { + self.peers + } + pub fn decode( reader: &veilid_capnp::operation_find_node_a::Reader, - crypto: Crypto, ) -> Result { let peers_reader = reader.get_peers().map_err(RPCError::protocol)?; + + if peers_reader.len() as usize > MAX_FIND_NODE_A_PEERS_LEN { + return Err(RPCError::protocol("find node peers length too long")); + } + let mut peers = Vec::::with_capacity( peers_reader .len() @@ -41,11 +81,11 @@ impl RPCOperationFindNodeA { .map_err(RPCError::map_internal("too many peers"))?, ); for p in peers_reader.iter() { - let peer_info = decode_peer_info(&p, crypto.clone())?; + let peer_info = decode_peer_info(&p)?; peers.push(peer_info); } - Ok(RPCOperationFindNodeA { peers }) + Ok(Self { peers }) } pub fn encode( &self, diff --git a/veilid-core/src/rpc_processor/coders/operations/operation_get_value.rs b/veilid-core/src/rpc_processor/coders/operations/operation_get_value.rs index 5db1f993..4261e461 100644 --- a/veilid-core/src/rpc_processor/coders/operations/operation_get_value.rs +++ b/veilid-core/src/rpc_processor/coders/operations/operation_get_value.rs @@ -1,19 +1,67 @@ use super::*; +use crate::storage_manager::{SignedValueData, SignedValueDescriptor}; + +const MAX_GET_VALUE_A_PEERS_LEN: usize = 20; + +#[derive(Clone)] +pub struct ValidateGetValueContext { + pub last_descriptor: Option, + pub subkey: ValueSubkey, + pub vcrypto: CryptoSystemVersion, +} + +impl fmt::Debug for ValidateGetValueContext { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ValidateGetValueContext") + .field("last_descriptor", &self.last_descriptor) + .field("subkey", &self.subkey) + .field("vcrypto", &self.vcrypto.kind().to_string()) + .finish() + } +} #[derive(Debug, Clone)] pub struct RPCOperationGetValueQ { - pub key: TypedKey, - pub subkey: ValueSubkey, + key: TypedKey, + subkey: ValueSubkey, + want_descriptor: bool, } impl RPCOperationGetValueQ { - pub fn decode( - reader: &veilid_capnp::operation_get_value_q::Reader, - ) -> Result { - let k_reader = reader.get_key().map_err(RPCError::protocol)?; + pub fn new(key: TypedKey, subkey: ValueSubkey, want_descriptor: bool) -> Self { + Self { + key, + subkey, + want_descriptor, + } + } + pub fn validate(&mut self, _validate_context: &RPCValidateContext) -> Result<(), RPCError> { + Ok(()) + } + + // pub fn key(&self) -> &TypedKey { + // &self.key + // } + // pub fn subkey(&self) -> ValueSubkey { + // self.subkey + // } + // pub fn want_descriptor(&self) -> bool { + // self.want_descriptor + // } + pub fn destructure(self) -> (TypedKey, ValueSubkey, bool) { + (self.key, self.subkey, self.want_descriptor) + } + + pub fn decode(reader: &veilid_capnp::operation_get_value_q::Reader) -> Result { + let k_reader = reader.reborrow().get_key().map_err(RPCError::protocol)?; let key = decode_typed_key(&k_reader)?; - let subkey = reader.get_subkey(); - Ok(RPCOperationGetValueQ { key, subkey }) + let subkey = reader.reborrow().get_subkey(); + let want_descriptor = reader.reborrow().get_want_descriptor(); + Ok(Self { + key, + subkey, + want_descriptor, + }) } pub fn encode( &self, @@ -22,64 +70,171 @@ impl RPCOperationGetValueQ { let mut k_builder = builder.reborrow().init_key(); encode_typed_key(&self.key, &mut k_builder); builder.set_subkey(self.subkey); + builder.set_want_descriptor(self.want_descriptor); Ok(()) } } #[derive(Debug, Clone)] -pub enum RPCOperationGetValueA { - Data(ValueData), - Peers(Vec), +pub struct RPCOperationGetValueA { + value: Option, + peers: Vec, + descriptor: Option, } impl RPCOperationGetValueA { - pub fn decode( - reader: &veilid_capnp::operation_get_value_a::Reader, - crypto: Crypto, - ) -> Result { - match reader.which().map_err(RPCError::protocol)? { - veilid_capnp::operation_get_value_a::Which::Data(r) => { - let data = decode_value_data(&r.map_err(RPCError::protocol)?)?; - Ok(RPCOperationGetValueA::Data(data)) - } - veilid_capnp::operation_get_value_a::Which::Peers(r) => { - let peers_reader = r.map_err(RPCError::protocol)?; - let mut peers = Vec::::with_capacity( - peers_reader - .len() - .try_into() - .map_err(RPCError::map_internal("too many peers"))?, - ); - for p in peers_reader.iter() { - let peer_info = decode_peer_info(&p, crypto.clone())?; - peers.push(peer_info); - } + pub fn new( + value: Option, + peers: Vec, + descriptor: Option, + ) -> Result { + if peers.len() > MAX_GET_VALUE_A_PEERS_LEN { + return Err(RPCError::protocol("GetValueA peers length too long")); + } + if descriptor.is_some() && !value.is_some() { + return Err(RPCError::protocol( + "GetValueA should not return descriptor without value", + )); + } + Ok(Self { + value, + peers, + descriptor, + }) + } - Ok(RPCOperationGetValueA::Peers(peers)) + pub fn validate(&mut self, validate_context: &RPCValidateContext) -> Result<(), RPCError> { + let question_context = validate_context + .question_context + .as_ref() + .expect("GetValueA requires question context"); + let QuestionContext::GetValue(get_value_context) = question_context else { + panic!("Wrong context type for GetValueA"); + }; + + if let Some(value) = &self.value { + // Get descriptor to validate with + let descriptor = if let Some(descriptor) = &self.descriptor { + if let Some(last_descriptor) = &get_value_context.last_descriptor { + if descriptor.cmp_no_sig(last_descriptor) != cmp::Ordering::Equal { + return Err(RPCError::protocol( + "getvalue descriptor does not match last descriptor", + )); + } + } + descriptor + } else { + let Some(descriptor) = &get_value_context.last_descriptor else { + return Err(RPCError::protocol( + "no last descriptor, requires a descriptor", + )); + }; + descriptor + }; + // Ensure the descriptor itself validates + descriptor + .validate(get_value_context.vcrypto.clone()) + .map_err(RPCError::protocol)?; + + // And the signed value data + value + .validate( + descriptor.owner(), + get_value_context.subkey, + get_value_context.vcrypto.clone(), + ) + .map_err(RPCError::protocol)?; + } else { + // No value, should not have descriptor + if self.descriptor.is_some() { + return Err(RPCError::protocol("descriptor returned without a value")); } } + + PeerInfo::validate_vec(&mut self.peers, validate_context.crypto.clone()); + Ok(()) + } + + // pub fn value(&self) -> Option<&SignedValueData> { + // self.value.as_ref() + // } + // pub fn peers(&self) -> &[PeerInfo] { + // &self.peers + // } + // pub fn descriptor(&self) -> Option<&SignedValueDescriptor> { + // self.descriptor.as_ref() + // } + pub fn destructure( + self, + ) -> ( + Option, + Vec, + Option, + ) { + (self.value, self.peers, self.descriptor) + } + + pub fn decode(reader: &veilid_capnp::operation_get_value_a::Reader) -> Result { + let value = if reader.has_value() { + let value_reader = reader.get_value().map_err(RPCError::protocol)?; + let value = decode_signed_value_data(&value_reader)?; + Some(value) + } else { + None + }; + + let peers_reader = reader.get_peers().map_err(RPCError::protocol)?; + if peers_reader.len() as usize > MAX_GET_VALUE_A_PEERS_LEN { + return Err(RPCError::protocol("GetValueA peers length too long")); + } + let mut peers = Vec::::with_capacity( + peers_reader + .len() + .try_into() + .map_err(RPCError::map_internal("too many peers"))?, + ); + for p in peers_reader.iter() { + let peer_info = decode_peer_info(&p)?; + peers.push(peer_info); + } + + let descriptor = if reader.has_descriptor() { + let d_reader = reader.get_descriptor().map_err(RPCError::protocol)?; + let descriptor = decode_signed_value_descriptor(&d_reader)?; + Some(descriptor) + } else { + None + }; + + Ok(Self { + value, + peers, + descriptor, + }) } pub fn encode( &self, builder: &mut veilid_capnp::operation_get_value_a::Builder, ) -> Result<(), RPCError> { - match self { - RPCOperationGetValueA::Data(data) => { - let mut d_builder = builder.reborrow().init_data(); - encode_value_data(&data, &mut d_builder)?; - } - RPCOperationGetValueA::Peers(peers) => { - let mut peers_builder = builder.reborrow().init_peers( - peers - .len() - .try_into() - .map_err(RPCError::map_internal("invalid peers list length"))?, - ); - for (i, peer) in peers.iter().enumerate() { - let mut pi_builder = peers_builder.reborrow().get(i as u32); - encode_peer_info(peer, &mut pi_builder)?; - } - } + if let Some(value) = &self.value { + let mut v_builder = builder.reborrow().init_value(); + encode_signed_value_data(value, &mut v_builder)?; + } + + let mut peers_builder = builder.reborrow().init_peers( + self.peers + .len() + .try_into() + .map_err(RPCError::map_internal("invalid peers list length"))?, + ); + for (i, peer) in self.peers.iter().enumerate() { + let mut pi_builder = peers_builder.reborrow().get(i as u32); + encode_peer_info(peer, &mut pi_builder)?; + } + + if let Some(descriptor) = &self.descriptor { + let mut d_builder = builder.reborrow().init_descriptor(); + encode_signed_value_descriptor(descriptor, &mut d_builder)?; } Ok(()) diff --git a/veilid-core/src/rpc_processor/coders/operations/operation_return_receipt.rs b/veilid-core/src/rpc_processor/coders/operations/operation_return_receipt.rs index bd7517a7..f049ab1a 100644 --- a/veilid-core/src/rpc_processor/coders/operations/operation_return_receipt.rs +++ b/veilid-core/src/rpc_processor/coders/operations/operation_return_receipt.rs @@ -2,17 +2,46 @@ use super::*; #[derive(Debug, Clone)] pub struct RPCOperationReturnReceipt { - pub receipt: Vec, + receipt: Vec, } impl RPCOperationReturnReceipt { + pub fn new(receipt: Vec) -> Result { + if receipt.len() < MIN_RECEIPT_SIZE { + return Err(RPCError::protocol("ReturnReceipt receipt too short to set")); + } + if receipt.len() > MAX_RECEIPT_SIZE { + return Err(RPCError::protocol("ReturnReceipt receipt too long to set")); + } + + Ok(Self { receipt }) + } + pub fn validate(&mut self, _validate_context: &RPCValidateContext) -> Result<(), RPCError> { + Ok(()) + } + + // pub fn receipt(&self) -> &[u8] { + // &self.receipt + // } + + pub fn destructure(self) -> Vec { + self.receipt + } + pub fn decode( reader: &veilid_capnp::operation_return_receipt::Reader, - ) -> Result { - let rcpt_reader = reader.get_receipt().map_err(RPCError::protocol)?; - let receipt = rcpt_reader.to_vec(); + ) -> Result { + let rr = reader.get_receipt().map_err(RPCError::protocol)?; + if rr.len() < MIN_RECEIPT_SIZE { + return Err(RPCError::protocol("ReturnReceipt receipt too short to set")); + } + if rr.len() > MAX_RECEIPT_SIZE { + return Err(RPCError::protocol("ReturnReceipt receipt too long to set")); + } - Ok(RPCOperationReturnReceipt { receipt }) + Ok(Self { + receipt: rr.to_vec(), + }) } pub fn encode( &self, diff --git a/veilid-core/src/rpc_processor/coders/operations/operation_route.rs b/veilid-core/src/rpc_processor/coders/operations/operation_route.rs index 67333819..7d72bd4c 100644 --- a/veilid-core/src/rpc_processor/coders/operations/operation_route.rs +++ b/veilid-core/src/rpc_processor/coders/operations/operation_route.rs @@ -2,10 +2,10 @@ use super::*; #[derive(Debug, Clone)] pub struct RoutedOperation { - pub sequencing: Sequencing, - pub signatures: Vec, - pub nonce: Nonce, - pub data: Vec, + sequencing: Sequencing, + signatures: Vec, + nonce: Nonce, + data: Vec, } impl RoutedOperation { @@ -17,10 +17,33 @@ impl RoutedOperation { data, } } + pub fn validate(&mut self, _validate_context: &RPCValidateContext) -> Result<(), RPCError> { + //xxx + Ok(()) + } + pub fn sequencing(&self) -> Sequencing { + self.sequencing + } + pub fn signatures(&self) -> &[Signature] { + &self.signatures + } - pub fn decode( - reader: &veilid_capnp::routed_operation::Reader, - ) -> Result { + pub fn add_signature(&mut self, signature: Signature) { + self.signatures.push(signature); + } + + pub fn nonce(&self) -> &Nonce { + &self.nonce + } + pub fn data(&self) -> &[u8] { + &self.data + } + + pub fn destructure(self) -> (Sequencing, Vec, Nonce, Vec) { + (self.sequencing, self.signatures, self.nonce, self.data) + } + + pub fn decode(reader: &veilid_capnp::routed_operation::Reader) -> Result { let sigs_reader = reader.get_signatures().map_err(RPCError::protocol)?; let mut signatures = Vec::::with_capacity( sigs_reader @@ -36,13 +59,13 @@ impl RoutedOperation { let sequencing = decode_sequencing(reader.get_sequencing().map_err(RPCError::protocol)?); let n_reader = reader.get_nonce().map_err(RPCError::protocol)?; let nonce = decode_nonce(&n_reader); - let data = reader.get_data().map_err(RPCError::protocol)?.to_vec(); + let data = reader.get_data().map_err(RPCError::protocol)?; - Ok(RoutedOperation { + Ok(Self { sequencing, signatures, nonce, - data, + data: data.to_vec(), }) } @@ -73,22 +96,39 @@ impl RoutedOperation { #[derive(Debug, Clone)] pub struct RPCOperationRoute { - pub safety_route: SafetyRoute, - pub operation: RoutedOperation, + safety_route: SafetyRoute, + operation: RoutedOperation, } impl RPCOperationRoute { - pub fn decode( - reader: &veilid_capnp::operation_route::Reader, - crypto: Crypto, - ) -> Result { + pub fn new(safety_route: SafetyRoute, operation: RoutedOperation) -> Self { + Self { + safety_route, + operation, + } + } + pub fn validate(&mut self, validate_context: &RPCValidateContext) -> Result<(), RPCError> { + self.operation.validate(validate_context) + } + + pub fn safety_route(&self) -> &SafetyRoute { + &self.safety_route + } + pub fn operation(&self) -> &RoutedOperation { + &self.operation + } + pub fn destructure(self) -> (SafetyRoute, RoutedOperation) { + (self.safety_route, self.operation) + } + + pub fn decode(reader: &veilid_capnp::operation_route::Reader) -> Result { let sr_reader = reader.get_safety_route().map_err(RPCError::protocol)?; - let safety_route = decode_safety_route(&sr_reader, crypto)?; + let safety_route = decode_safety_route(&sr_reader)?; let o_reader = reader.get_operation().map_err(RPCError::protocol)?; let operation = RoutedOperation::decode(&o_reader)?; - Ok(RPCOperationRoute { + Ok(Self { safety_route, operation, }) diff --git a/veilid-core/src/rpc_processor/coders/operations/operation_set_value.rs b/veilid-core/src/rpc_processor/coders/operations/operation_set_value.rs index 4f1c5763..c7fa4cf2 100644 --- a/veilid-core/src/rpc_processor/coders/operations/operation_set_value.rs +++ b/veilid-core/src/rpc_processor/coders/operations/operation_set_value.rs @@ -1,22 +1,96 @@ use super::*; +use crate::storage_manager::{SignedValueData, SignedValueDescriptor}; + +const MAX_SET_VALUE_A_PEERS_LEN: usize = 20; + +#[derive(Clone)] +pub struct ValidateSetValueContext { + pub descriptor: SignedValueDescriptor, + pub subkey: ValueSubkey, + pub vcrypto: CryptoSystemVersion, +} + +impl fmt::Debug for ValidateSetValueContext { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ValidateSetValueContext") + .field("descriptor", &self.descriptor) + .field("subkey", &self.subkey) + .field("vcrypto", &self.vcrypto.kind().to_string()) + .finish() + } +} #[derive(Debug, Clone)] pub struct RPCOperationSetValueQ { - pub key: TypedKey, - pub subkey: ValueSubkey, - pub value: ValueData, + key: TypedKey, + subkey: ValueSubkey, + value: SignedValueData, + descriptor: Option, } impl RPCOperationSetValueQ { - pub fn decode( - reader: &veilid_capnp::operation_set_value_q::Reader, - ) -> Result { + pub fn new( + key: TypedKey, + subkey: ValueSubkey, + value: SignedValueData, + descriptor: Option, + ) -> Self { + Self { + key, + subkey, + value, + descriptor, + } + } + pub fn validate(&mut self, _validate_context: &RPCValidateContext) -> Result<(), RPCError> { + Ok(()) + } + + // pub fn key(&self) -> &TypedKey { + // &self.key + // } + + // pub fn subkey(&self) -> ValueSubkey { + // self.subkey + // } + + // pub fn value(&self) -> &SignedValueData { + // &self.value + // } + + // pub fn descriptor(&self) -> Option<&SignedValueDescriptor> { + // self.descriptor.as_ref() + // } + pub fn destructure( + self, + ) -> ( + TypedKey, + ValueSubkey, + SignedValueData, + Option, + ) { + (self.key, self.subkey, self.value, self.descriptor) + } + + pub fn decode(reader: &veilid_capnp::operation_set_value_q::Reader) -> Result { let k_reader = reader.get_key().map_err(RPCError::protocol)?; let key = decode_typed_key(&k_reader)?; let subkey = reader.get_subkey(); let v_reader = reader.get_value().map_err(RPCError::protocol)?; - let value = decode_value_data(&v_reader)?; - Ok(RPCOperationSetValueQ { key, subkey, value }) + let value = decode_signed_value_data(&v_reader)?; + let descriptor = if reader.has_descriptor() { + let d_reader = reader.get_descriptor().map_err(RPCError::protocol)?; + let descriptor = decode_signed_value_descriptor(&d_reader)?; + Some(descriptor) + } else { + None + }; + Ok(Self { + key, + subkey, + value, + descriptor, + }) } pub fn encode( &self, @@ -26,65 +100,123 @@ impl RPCOperationSetValueQ { encode_typed_key(&self.key, &mut k_builder); builder.set_subkey(self.subkey); let mut v_builder = builder.reborrow().init_value(); - encode_value_data(&self.value, &mut v_builder)?; + encode_signed_value_data(&self.value, &mut v_builder)?; + if let Some(descriptor) = &self.descriptor { + let mut d_builder = builder.reborrow().init_descriptor(); + encode_signed_value_descriptor(descriptor, &mut d_builder)?; + } Ok(()) } } #[derive(Debug, Clone)] -pub enum RPCOperationSetValueA { - Data(ValueData), - Peers(Vec), +pub struct RPCOperationSetValueA { + set: bool, + value: Option, + peers: Vec, } impl RPCOperationSetValueA { - pub fn decode( - reader: &veilid_capnp::operation_set_value_a::Reader, - crypto: Crypto, - ) -> Result { - match reader.which().map_err(RPCError::protocol)? { - veilid_capnp::operation_set_value_a::Which::Data(r) => { - let data = decode_value_data(&r.map_err(RPCError::protocol)?)?; - Ok(RPCOperationSetValueA::Data(data)) - } - veilid_capnp::operation_set_value_a::Which::Peers(r) => { - let peers_reader = r.map_err(RPCError::protocol)?; - let mut peers = Vec::::with_capacity( - peers_reader - .len() - .try_into() - .map_err(RPCError::map_internal("too many peers"))?, - ); - for p in peers_reader.iter() { - let peer_info = decode_peer_info(&p, crypto.clone())?; - peers.push(peer_info); - } - - Ok(RPCOperationSetValueA::Peers(peers)) - } + pub fn new( + set: bool, + value: Option, + peers: Vec, + ) -> Result { + if peers.len() as usize > MAX_SET_VALUE_A_PEERS_LEN { + return Err(RPCError::protocol("SetValueA peers length too long")); } + Ok(Self { set, value, peers }) + } + + pub fn validate(&mut self, validate_context: &RPCValidateContext) -> Result<(), RPCError> { + let question_context = validate_context + .question_context + .as_ref() + .expect("SetValueA requires question context"); + let QuestionContext::SetValue(set_value_context) = question_context else { + panic!("Wrong context type for SetValueA"); + }; + + if let Some(value) = &self.value { + // Ensure the descriptor itself validates + set_value_context + .descriptor + .validate(set_value_context.vcrypto.clone()) + .map_err(RPCError::protocol)?; + + // And the signed value data + value + .validate( + set_value_context.descriptor.owner(), + set_value_context.subkey, + set_value_context.vcrypto.clone(), + ) + .map_err(RPCError::protocol)?; + } + + PeerInfo::validate_vec(&mut self.peers, validate_context.crypto.clone()); + Ok(()) + } + + // pub fn set(&self) -> bool { + // self.set + // } + // pub fn value(&self) -> Option<&SignedValueData> { + // self.value.as_ref() + // } + // pub fn peers(&self) -> &[PeerInfo] { + // &self.peers + // } + pub fn destructure(self) -> (bool, Option, Vec) { + (self.set, self.value, self.peers) + } + + pub fn decode(reader: &veilid_capnp::operation_set_value_a::Reader) -> Result { + let set = reader.get_set(); + let value = if reader.has_value() { + let v_reader = reader.get_value().map_err(RPCError::protocol)?; + let value = decode_signed_value_data(&v_reader)?; + Some(value) + } else { + None + }; + let peers_reader = reader.get_peers().map_err(RPCError::protocol)?; + if peers_reader.len() as usize > MAX_SET_VALUE_A_PEERS_LEN { + return Err(RPCError::protocol("SetValueA peers length too long")); + } + let mut peers = Vec::::with_capacity( + peers_reader + .len() + .try_into() + .map_err(RPCError::map_internal("too many peers"))?, + ); + for p in peers_reader.iter() { + let peer_info = decode_peer_info(&p)?; + peers.push(peer_info); + } + + Ok(Self { set, value, peers }) } pub fn encode( &self, builder: &mut veilid_capnp::operation_set_value_a::Builder, ) -> Result<(), RPCError> { - match self { - RPCOperationSetValueA::Data(data) => { - let mut d_builder = builder.reborrow().init_data(); - encode_value_data(&data, &mut d_builder)?; - } - RPCOperationSetValueA::Peers(peers) => { - let mut peers_builder = builder.reborrow().init_peers( - peers - .len() - .try_into() - .map_err(RPCError::map_internal("invalid peers list length"))?, - ); - for (i, peer) in peers.iter().enumerate() { - let mut pi_builder = peers_builder.reborrow().get(i as u32); - encode_peer_info(peer, &mut pi_builder)?; - } - } + builder.set_set(self.set); + + if let Some(value) = &self.value { + let mut v_builder = builder.reborrow().init_value(); + encode_signed_value_data(value, &mut v_builder)?; + } + + let mut peers_builder = builder.reborrow().init_peers( + self.peers + .len() + .try_into() + .map_err(RPCError::map_internal("invalid peers list length"))?, + ); + for (i, peer) in self.peers.iter().enumerate() { + let mut pi_builder = peers_builder.reborrow().get(i as u32); + encode_peer_info(peer, &mut pi_builder)?; } Ok(()) diff --git a/veilid-core/src/rpc_processor/coders/operations/operation_signal.rs b/veilid-core/src/rpc_processor/coders/operations/operation_signal.rs index a414e300..0b5ec38c 100644 --- a/veilid-core/src/rpc_processor/coders/operations/operation_signal.rs +++ b/veilid-core/src/rpc_processor/coders/operations/operation_signal.rs @@ -2,16 +2,26 @@ use super::*; #[derive(Debug, Clone)] pub struct RPCOperationSignal { - pub signal_info: SignalInfo, + signal_info: SignalInfo, } impl RPCOperationSignal { - pub fn decode( - reader: &veilid_capnp::operation_signal::Reader, - crypto: Crypto, - ) -> Result { - let signal_info = decode_signal_info(reader, crypto)?; - Ok(RPCOperationSignal { signal_info }) + pub fn new(signal_info: SignalInfo) -> Self { + Self { signal_info } + } + pub fn validate(&mut self, validate_context: &RPCValidateContext) -> Result<(), RPCError> { + self.signal_info.validate(validate_context.crypto.clone()) + } + // pub fn signal_info(&self) -> &SignalInfo { + // &self.signal_info + // } + pub fn destructure(self) -> SignalInfo { + self.signal_info + } + + pub fn decode(reader: &veilid_capnp::operation_signal::Reader) -> Result { + let signal_info = decode_signal_info(reader)?; + Ok(Self { signal_info }) } pub fn encode( &self, diff --git a/veilid-core/src/rpc_processor/coders/operations/operation_start_tunnel.rs b/veilid-core/src/rpc_processor/coders/operations/operation_start_tunnel.rs index 274b0af8..b3741462 100644 --- a/veilid-core/src/rpc_processor/coders/operations/operation_start_tunnel.rs +++ b/veilid-core/src/rpc_processor/coders/operations/operation_start_tunnel.rs @@ -2,15 +2,40 @@ use super::*; #[derive(Debug, Clone)] pub struct RPCOperationStartTunnelQ { - pub id: TunnelId, - pub local_mode: TunnelMode, - pub depth: u8, + id: TunnelId, + local_mode: TunnelMode, + depth: u8, } impl RPCOperationStartTunnelQ { + pub fn new(id: TunnelId, local_mode: TunnelMode, depth: u8) -> Self { + Self { + id, + local_mode, + depth, + } + } + + pub fn validate(&mut self, _validate_context: &RPCValidateContext) -> Result<(), RPCError> { + Ok(()) + } + + pub fn id(&self) -> TunnelId { + self.id + } + pub fn local_mode(&self) -> TunnelMode { + self.local_mode + } + pub fn depth(&self) -> u8 { + self.depth + } + pub fn destructure(self) -> (TunnelId, TunnelMode, u8) { + (self.id, self.local_mode, self.depth) + } + pub fn decode( reader: &veilid_capnp::operation_start_tunnel_q::Reader, - ) -> Result { + ) -> Result { let id = TunnelId::new(reader.get_id()); let local_mode = match reader.get_local_mode().map_err(RPCError::protocol)? { veilid_capnp::TunnelEndpointMode::Raw => TunnelMode::Raw, @@ -18,7 +43,7 @@ impl RPCOperationStartTunnelQ { }; let depth = reader.get_depth(); - Ok(RPCOperationStartTunnelQ { + Ok(Self { id, local_mode, depth, @@ -46,18 +71,28 @@ pub enum RPCOperationStartTunnelA { } impl RPCOperationStartTunnelA { + pub fn new_partial(partial_tunnel: PartialTunnel) -> Self { + Self::Partial(partial_tunnel) + } + pub fn new_error(tunnel_error: TunnelError) -> Self { + Self::Error(tunnel_error) + } + pub fn validate(&mut self, _validate_context: &RPCValidateContext) -> Result<(), RPCError> { + Ok(()) + } + pub fn decode( reader: &veilid_capnp::operation_start_tunnel_a::Reader, - ) -> Result { + ) -> Result { match reader.which().map_err(RPCError::protocol)? { veilid_capnp::operation_start_tunnel_a::Which::Partial(r) => { let pt_reader = r.map_err(RPCError::protocol)?; let partial_tunnel = decode_partial_tunnel(&pt_reader)?; - Ok(RPCOperationStartTunnelA::Partial(partial_tunnel)) + Ok(Self::Partial(partial_tunnel)) } veilid_capnp::operation_start_tunnel_a::Which::Error(r) => { let tunnel_error = decode_tunnel_error(r.map_err(RPCError::protocol)?); - Ok(RPCOperationStartTunnelA::Error(tunnel_error)) + Ok(Self::Error(tunnel_error)) } } } @@ -66,10 +101,10 @@ impl RPCOperationStartTunnelA { builder: &mut veilid_capnp::operation_start_tunnel_a::Builder, ) -> Result<(), RPCError> { match self { - RPCOperationStartTunnelA::Partial(p) => { + Self::Partial(p) => { encode_partial_tunnel(p, &mut builder.reborrow().init_partial())?; } - RPCOperationStartTunnelA::Error(e) => { + Self::Error(e) => { builder.set_error(encode_tunnel_error(*e)); } } diff --git a/veilid-core/src/rpc_processor/coders/operations/operation_status.rs b/veilid-core/src/rpc_processor/coders/operations/operation_status.rs index 9ab480a8..99cb5985 100644 --- a/veilid-core/src/rpc_processor/coders/operations/operation_status.rs +++ b/veilid-core/src/rpc_processor/coders/operations/operation_status.rs @@ -2,13 +2,25 @@ use super::*; #[derive(Debug, Clone)] pub struct RPCOperationStatusQ { - pub node_status: Option, + node_status: Option, } impl RPCOperationStatusQ { - pub fn decode( - reader: &veilid_capnp::operation_status_q::Reader, - ) -> Result { + pub fn new(node_status: Option) -> Self { + Self { node_status } + } + pub fn validate(&mut self, _validate_context: &RPCValidateContext) -> Result<(), RPCError> { + Ok(()) + } + + // pub fn node_status(&self) -> Option<&NodeStatus> { + // self.node_status.as_ref() + // } + pub fn destructure(self) -> Option { + self.node_status + } + + pub fn decode(reader: &veilid_capnp::operation_status_q::Reader) -> Result { let node_status = if reader.has_node_status() { let ns_reader = reader.get_node_status().map_err(RPCError::protocol)?; let node_status = decode_node_status(&ns_reader)?; @@ -16,7 +28,7 @@ impl RPCOperationStatusQ { } else { None }; - Ok(RPCOperationStatusQ { node_status }) + Ok(Self { node_status }) } pub fn encode( &self, @@ -32,14 +44,33 @@ impl RPCOperationStatusQ { #[derive(Debug, Clone)] pub struct RPCOperationStatusA { - pub node_status: Option, - pub sender_info: Option, + node_status: Option, + sender_info: Option, } impl RPCOperationStatusA { - pub fn decode( - reader: &veilid_capnp::operation_status_a::Reader, - ) -> Result { + pub fn new(node_status: Option, sender_info: Option) -> Self { + Self { + node_status, + sender_info, + } + } + + pub fn validate(&mut self, _validate_context: &RPCValidateContext) -> Result<(), RPCError> { + Ok(()) + } + + // pub fn node_status(&self) -> Option<&NodeStatus> { + // self.node_status.as_ref() + // } + // pub fn sender_info(&self) -> Option<&SenderInfo> { + // self.sender_info.as_ref() + // } + pub fn destructure(self) -> (Option, Option) { + (self.node_status, self.sender_info) + } + + pub fn decode(reader: &veilid_capnp::operation_status_a::Reader) -> Result { let node_status = if reader.has_node_status() { let ns_reader = reader.get_node_status().map_err(RPCError::protocol)?; let node_status = decode_node_status(&ns_reader)?; @@ -56,7 +87,7 @@ impl RPCOperationStatusA { None }; - Ok(RPCOperationStatusA { + Ok(Self { node_status, sender_info, }) diff --git a/veilid-core/src/rpc_processor/coders/operations/operation_supply_block.rs b/veilid-core/src/rpc_processor/coders/operations/operation_supply_block.rs index f68de596..886b5bd4 100644 --- a/veilid-core/src/rpc_processor/coders/operations/operation_supply_block.rs +++ b/veilid-core/src/rpc_processor/coders/operations/operation_supply_block.rs @@ -1,18 +1,35 @@ use super::*; +const MAX_SUPPLY_BLOCK_A_PEERS_LEN: usize = 20; + #[derive(Debug, Clone)] pub struct RPCOperationSupplyBlockQ { - pub block_id: TypedKey, + block_id: TypedKey, } impl RPCOperationSupplyBlockQ { + pub fn new(block_id: TypedKey) -> Self { + Self { block_id } + } + pub fn validate(&mut self, _validate_context: &RPCValidateContext) -> Result<(), RPCError> { + Ok(()) + } + + pub fn block_id(&self) -> &TypedKey { + &self.block_id + } + + pub fn destructure(self) -> TypedKey { + self.block_id + } + pub fn decode( reader: &veilid_capnp::operation_supply_block_q::Reader, - ) -> Result { + ) -> Result { let bi_reader = reader.get_block_id().map_err(RPCError::protocol)?; let block_id = decode_typed_key(&bi_reader)?; - Ok(RPCOperationSupplyBlockQ { block_id }) + Ok(Self { block_id }) } pub fn encode( &self, @@ -26,57 +43,68 @@ impl RPCOperationSupplyBlockQ { } #[derive(Debug, Clone)] -pub enum RPCOperationSupplyBlockA { - Expiration(u64), - Peers(Vec), +pub struct RPCOperationSupplyBlockA { + expiration: u64, + peers: Vec, } impl RPCOperationSupplyBlockA { + pub fn new(expiration: u64, peers: Vec) -> Result { + if peers.len() > MAX_SUPPLY_BLOCK_A_PEERS_LEN { + return Err(RPCError::protocol("SupplyBlockA peers length too long")); + } + Ok(Self { expiration, peers }) + } + pub fn validate(&mut self, validate_context: &RPCValidateContext) -> Result<(), RPCError> { + PeerInfo::validate_vec(&mut self.peers, validate_context.crypto.clone()); + Ok(()) + } + pub fn expiration(&self) -> u64 { + self.expiration + } + pub fn peers(&self) -> &[PeerInfo] { + &self.peers + } + pub fn destructure(self) -> (u64, Vec) { + (self.expiration, self.peers) + } + pub fn decode( reader: &veilid_capnp::operation_supply_block_a::Reader, - crypto: Crypto, - ) -> Result { - match reader.which().map_err(RPCError::protocol)? { - veilid_capnp::operation_supply_block_a::Which::Expiration(r) => { - Ok(RPCOperationSupplyBlockA::Expiration(r)) - } - veilid_capnp::operation_supply_block_a::Which::Peers(r) => { - let peers_reader = r.map_err(RPCError::protocol)?; - let mut peers = Vec::::with_capacity( - peers_reader - .len() - .try_into() - .map_err(RPCError::map_internal("too many peers"))?, - ); - for p in peers_reader.iter() { - let peer_info = decode_peer_info(&p, crypto.clone())?; - peers.push(peer_info); - } + ) -> Result { + let expiration = reader.get_expiration(); - Ok(RPCOperationSupplyBlockA::Peers(peers)) - } + let peers_reader = reader.get_peers().map_err(RPCError::protocol)?; + if peers_reader.len() as usize > MAX_SUPPLY_BLOCK_A_PEERS_LEN { + return Err(RPCError::protocol("SupplyBlockA peers length too long")); } + let mut peers = Vec::::with_capacity( + peers_reader + .len() + .try_into() + .map_err(RPCError::map_internal("too many peers"))?, + ); + for p in peers_reader.iter() { + let peer_info = decode_peer_info(&p)?; + peers.push(peer_info); + } + + Ok(Self { expiration, peers }) } pub fn encode( &self, builder: &mut veilid_capnp::operation_supply_block_a::Builder, ) -> Result<(), RPCError> { - match self { - RPCOperationSupplyBlockA::Expiration(e) => { - builder.set_expiration(*e); - } - RPCOperationSupplyBlockA::Peers(peers) => { - let mut peers_builder = builder.reborrow().init_peers( - peers - .len() - .try_into() - .map_err(RPCError::map_internal("invalid peers list length"))?, - ); - for (i, peer) in peers.iter().enumerate() { - let mut pi_builder = peers_builder.reborrow().get(i as u32); - encode_peer_info(peer, &mut pi_builder)?; - } - } + builder.set_expiration(self.expiration); + let mut peers_builder = builder.reborrow().init_peers( + self.peers + .len() + .try_into() + .map_err(RPCError::map_internal("invalid peers list length"))?, + ); + for (i, peer) in self.peers.iter().enumerate() { + let mut pi_builder = peers_builder.reborrow().get(i as u32); + encode_peer_info(peer, &mut pi_builder)?; } Ok(()) diff --git a/veilid-core/src/rpc_processor/coders/operations/operation_validate_dial_info.rs b/veilid-core/src/rpc_processor/coders/operations/operation_validate_dial_info.rs index 63a8bd40..fa76e1bd 100644 --- a/veilid-core/src/rpc_processor/coders/operations/operation_validate_dial_info.rs +++ b/veilid-core/src/rpc_processor/coders/operations/operation_validate_dial_info.rs @@ -2,22 +2,68 @@ use super::*; #[derive(Debug, Clone)] pub struct RPCOperationValidateDialInfo { - pub dial_info: DialInfo, - pub receipt: Vec, - pub redirect: bool, + dial_info: DialInfo, + receipt: Vec, + redirect: bool, } impl RPCOperationValidateDialInfo { + pub fn new(dial_info: DialInfo, receipt: Vec, redirect: bool) -> Result { + if receipt.len() < MIN_RECEIPT_SIZE { + return Err(RPCError::protocol( + "ValidateDialInfo receipt too short to set", + )); + } + if receipt.len() > MAX_RECEIPT_SIZE { + return Err(RPCError::protocol( + "ValidateDialInfo receipt too long to set", + )); + } + + Ok(Self { + dial_info, + receipt, + redirect, + }) + } + + pub fn validate(&mut self, _validate_context: &RPCValidateContext) -> Result<(), RPCError> { + Ok(()) + } + // pub fn dial_info(&self) -> &DialInfo { + // &self.dial_info + // } + // pub fn receipt(&self) -> &[u8] { + // &self.receipt + // } + // pub fn redirect(&self) -> bool { + // self.redirect + // } + pub fn destructure(self) -> (DialInfo, Vec, bool) { + (self.dial_info, self.receipt, self.redirect) + } + pub fn decode( reader: &veilid_capnp::operation_validate_dial_info::Reader, - ) -> Result { + ) -> Result { let di_reader = reader.get_dial_info().map_err(RPCError::protocol)?; let dial_info = decode_dial_info(&di_reader)?; let rcpt_reader = reader.get_receipt().map_err(RPCError::protocol)?; + if rcpt_reader.len() < MIN_RECEIPT_SIZE { + return Err(RPCError::protocol( + "ValidateDialInfo receipt too short to set", + )); + } + if rcpt_reader.len() > MAX_RECEIPT_SIZE { + return Err(RPCError::protocol( + "ValidateDialInfo receipt too long to set", + )); + } + let receipt = rcpt_reader.to_vec(); let redirect = reader.get_redirect(); - Ok(RPCOperationValidateDialInfo { + Ok(Self { dial_info, receipt, redirect, diff --git a/veilid-core/src/rpc_processor/coders/operations/operation_value_changed.rs b/veilid-core/src/rpc_processor/coders/operations/operation_value_changed.rs index 86bc9c69..d7a815a2 100644 --- a/veilid-core/src/rpc_processor/coders/operations/operation_value_changed.rs +++ b/veilid-core/src/rpc_processor/coders/operations/operation_value_changed.rs @@ -1,17 +1,53 @@ use super::*; +use crate::storage_manager::SignedValueData; #[derive(Debug, Clone)] pub struct RPCOperationValueChanged { - pub key: TypedKey, - pub subkeys: Vec, - pub count: u32, - pub value: ValueData, + key: TypedKey, + subkeys: Vec, + count: u32, + value: SignedValueData, } impl RPCOperationValueChanged { + pub fn new( + key: TypedKey, + subkeys: Vec, + count: u32, + value: SignedValueData, + ) -> Self { + Self { + key, + subkeys, + count, + value, + } + } + + pub fn validate(&mut self, _validate_context: &RPCValidateContext) -> Result<(), RPCError> { + // validation must be done by storage manager as this is more complicated + Ok(()) + } + + pub fn key(&self) -> &TypedKey { + &self.key + } + pub fn subkeys(&self) -> &[ValueSubkeyRange] { + &self.subkeys + } + pub fn count(&self) -> u32 { + self.count + } + pub fn value(&self) -> &SignedValueData { + &self.value + } + pub fn destructure(self) -> (TypedKey, Vec, u32, SignedValueData) { + (self.key, self.subkeys, self.count, self.value) + } + pub fn decode( reader: &veilid_capnp::operation_value_changed::Reader, - ) -> Result { + ) -> Result { let k_reader = reader.get_key().map_err(RPCError::protocol)?; let key = decode_typed_key(&k_reader)?; @@ -38,8 +74,8 @@ impl RPCOperationValueChanged { } let count = reader.get_count(); let v_reader = reader.get_value().map_err(RPCError::protocol)?; - let value = decode_value_data(&v_reader)?; - Ok(RPCOperationValueChanged { + let value = decode_signed_value_data(&v_reader)?; + Ok(Self { key, subkeys, count, @@ -68,7 +104,7 @@ impl RPCOperationValueChanged { builder.set_count(self.count); let mut v_builder = builder.reborrow().init_value(); - encode_value_data(&self.value, &mut v_builder)?; + encode_signed_value_data(&self.value, &mut v_builder)?; Ok(()) } } diff --git a/veilid-core/src/rpc_processor/coders/operations/operation_watch_value.rs b/veilid-core/src/rpc_processor/coders/operations/operation_watch_value.rs index 7812b98b..9da63df7 100644 --- a/veilid-core/src/rpc_processor/coders/operations/operation_watch_value.rs +++ b/veilid-core/src/rpc_processor/coders/operations/operation_watch_value.rs @@ -1,21 +1,117 @@ use super::*; +const MAX_WATCH_VALUE_Q_SUBKEYS_LEN: usize = 512; +const MAX_WATCH_VALUE_A_PEERS_LEN: usize = 20; + #[derive(Debug, Clone)] pub struct RPCOperationWatchValueQ { - pub key: TypedKey, - pub subkeys: Vec, - pub expiration: u64, - pub count: u32, + key: TypedKey, + subkeys: Vec, + expiration: u64, + count: u32, + watcher: PublicKey, + signature: Signature, } impl RPCOperationWatchValueQ { + pub fn new( + key: TypedKey, + subkeys: Vec, + expiration: u64, + count: u32, + watcher: PublicKey, + signature: Signature, + ) -> Result { + if subkeys.len() > MAX_WATCH_VALUE_Q_SUBKEYS_LEN { + return Err(RPCError::protocol("WatchValueQ subkeys length too long")); + } + Ok(Self { + key, + subkeys, + expiration, + count, + watcher, + signature, + }) + } + + // signature covers: key, subkeys, expiration, count, using watcher key + fn make_signature_data(&self) -> Vec { + let mut sig_data = + Vec::with_capacity(PUBLIC_KEY_LENGTH + 4 + (self.subkeys.len() * 8) + 8 + 4); + sig_data.extend_from_slice(&self.key.kind.0); + sig_data.extend_from_slice(&self.key.value.bytes); + for sk in &self.subkeys { + sig_data.extend_from_slice(&sk.0.to_le_bytes()); + sig_data.extend_from_slice(&sk.1.to_le_bytes()); + } + sig_data.extend_from_slice(&self.expiration.to_le_bytes()); + sig_data.extend_from_slice(&self.count.to_le_bytes()); + sig_data + } + + pub fn validate(&mut self, validate_context: &RPCValidateContext) -> Result<(), RPCError> { + let Some(vcrypto) = validate_context.crypto.get(self.key.kind) else { + return Err(RPCError::protocol("unsupported cryptosystem")); + }; + + let sig_data = self.make_signature_data(); + vcrypto + .verify(&self.watcher, &sig_data, &self.signature) + .map_err(RPCError::protocol)?; + + Ok(()) + } + + pub fn key(&self) -> &TypedKey { + &self.key + } + pub fn subkeys(&self) -> &[ValueSubkeyRange] { + &self.subkeys + } + pub fn expiration(&self) -> u64 { + self.expiration + } + pub fn count(&self) -> u32 { + self.count + } + pub fn watcher(&self) -> &PublicKey { + &self.watcher + } + pub fn signature(&self) -> &Signature { + &self.signature + } + + pub fn destructure( + self, + ) -> ( + TypedKey, + Vec, + u64, + u32, + PublicKey, + Signature, + ) { + ( + self.key, + self.subkeys, + self.expiration, + self.count, + self.watcher, + self.signature, + ) + } + pub fn decode( reader: &veilid_capnp::operation_watch_value_q::Reader, - ) -> Result { + ) -> Result { let k_reader = reader.get_key().map_err(RPCError::protocol)?; let key = decode_typed_key(&k_reader)?; let sk_reader = reader.get_subkeys().map_err(RPCError::protocol)?; + if sk_reader.len() as usize > MAX_WATCH_VALUE_Q_SUBKEYS_LEN { + return Err(RPCError::protocol("WatchValueQ subkeys length too long")); + } let mut subkeys = Vec::::with_capacity( sk_reader .len() @@ -40,13 +136,22 @@ impl RPCOperationWatchValueQ { let expiration = reader.get_expiration(); let count = reader.get_count(); - Ok(RPCOperationWatchValueQ { + let w_reader = reader.get_watcher().map_err(RPCError::protocol)?; + let watcher = decode_key256(&w_reader); + + let s_reader = reader.get_signature().map_err(RPCError::protocol)?; + let signature = decode_signature512(&s_reader); + + Ok(Self { key, subkeys, expiration, count, + watcher, + signature, }) } + pub fn encode( &self, builder: &mut veilid_capnp::operation_watch_value_q::Builder, @@ -67,23 +172,54 @@ impl RPCOperationWatchValueQ { } builder.set_expiration(self.expiration); builder.set_count(self.count); + + let mut w_builder = builder.reborrow().init_watcher(); + encode_key256(&self.watcher, &mut w_builder); + + let mut s_builder = builder.reborrow().init_signature(); + encode_signature512(&self.signature, &mut s_builder); + Ok(()) } } #[derive(Debug, Clone)] pub struct RPCOperationWatchValueA { - pub expiration: u64, - pub peers: Vec, + expiration: u64, + peers: Vec, } impl RPCOperationWatchValueA { + pub fn new(expiration: u64, peers: Vec) -> Result { + if peers.len() > MAX_WATCH_VALUE_A_PEERS_LEN { + return Err(RPCError::protocol("WatchValueA peers length too long")); + } + Ok(Self { expiration, peers }) + } + + pub fn validate(&mut self, validate_context: &RPCValidateContext) -> Result<(), RPCError> { + PeerInfo::validate_vec(&mut self.peers, validate_context.crypto.clone()); + Ok(()) + } + + pub fn expiration(&self) -> u64 { + self.expiration + } + pub fn peers(&self) -> &[PeerInfo] { + &self.peers + } + pub fn destructure(self) -> (u64, Vec) { + (self.expiration, self.peers) + } + pub fn decode( reader: &veilid_capnp::operation_watch_value_a::Reader, - crypto: Crypto, - ) -> Result { + ) -> Result { let expiration = reader.get_expiration(); let peers_reader = reader.get_peers().map_err(RPCError::protocol)?; + if peers_reader.len() as usize > MAX_WATCH_VALUE_A_PEERS_LEN { + return Err(RPCError::protocol("WatchValueA peers length too long")); + } let mut peers = Vec::::with_capacity( peers_reader .len() @@ -91,11 +227,11 @@ impl RPCOperationWatchValueA { .map_err(RPCError::map_internal("too many peers"))?, ); for p in peers_reader.iter() { - let peer_info = decode_peer_info(&p, crypto.clone())?; + let peer_info = decode_peer_info(&p)?; peers.push(peer_info); } - Ok(RPCOperationWatchValueA { expiration, peers }) + Ok(Self { expiration, peers }) } pub fn encode( &self, diff --git a/veilid-core/src/rpc_processor/coders/operations/question.rs b/veilid-core/src/rpc_processor/coders/operations/question.rs index e3f50776..f0f2c056 100644 --- a/veilid-core/src/rpc_processor/coders/operations/question.rs +++ b/veilid-core/src/rpc_processor/coders/operations/question.rs @@ -10,6 +10,10 @@ impl RPCQuestion { pub fn new(respond_to: RespondTo, detail: RPCQuestionDetail) -> Self { Self { respond_to, detail } } + pub fn validate(&mut self, validate_context: &RPCValidateContext) -> Result<(), RPCError> { + self.respond_to.validate(validate_context.crypto.clone())?; + self.detail.validate(validate_context) + } pub fn respond_to(&self) -> &RespondTo { &self.respond_to } @@ -19,12 +23,12 @@ impl RPCQuestion { pub fn desc(&self) -> &'static str { self.detail.desc() } - pub fn decode( - reader: &veilid_capnp::question::Reader, - crypto: Crypto, - ) -> Result { + pub fn destructure(self) -> (RespondTo, RPCQuestionDetail) { + (self.respond_to, self.detail) + } + pub fn decode(reader: &veilid_capnp::question::Reader) -> Result { let rt_reader = reader.get_respond_to(); - let respond_to = RespondTo::decode(&rt_reader, crypto)?; + let respond_to = RespondTo::decode(&rt_reader)?; let d_reader = reader.get_detail(); let detail = RPCQuestionDetail::decode(&d_reader)?; Ok(RPCQuestion { respond_to, detail }) @@ -68,6 +72,21 @@ impl RPCQuestionDetail { RPCQuestionDetail::CancelTunnelQ(_) => "CancelTunnelQ", } } + pub fn validate(&mut self, validate_context: &RPCValidateContext) -> Result<(), RPCError> { + match self { + RPCQuestionDetail::StatusQ(r) => r.validate(validate_context), + RPCQuestionDetail::FindNodeQ(r) => r.validate(validate_context), + RPCQuestionDetail::AppCallQ(r) => r.validate(validate_context), + RPCQuestionDetail::GetValueQ(r) => r.validate(validate_context), + RPCQuestionDetail::SetValueQ(r) => r.validate(validate_context), + RPCQuestionDetail::WatchValueQ(r) => r.validate(validate_context), + RPCQuestionDetail::SupplyBlockQ(r) => r.validate(validate_context), + RPCQuestionDetail::FindBlockQ(r) => r.validate(validate_context), + RPCQuestionDetail::StartTunnelQ(r) => r.validate(validate_context), + RPCQuestionDetail::CompleteTunnelQ(r) => r.validate(validate_context), + RPCQuestionDetail::CancelTunnelQ(r) => r.validate(validate_context), + } + } pub fn decode( reader: &veilid_capnp::question::detail::Reader, diff --git a/veilid-core/src/rpc_processor/coders/operations/respond_to.rs b/veilid-core/src/rpc_processor/coders/operations/respond_to.rs index 43d6f871..bff19c4f 100644 --- a/veilid-core/src/rpc_processor/coders/operations/respond_to.rs +++ b/veilid-core/src/rpc_processor/coders/operations/respond_to.rs @@ -7,6 +7,13 @@ pub enum RespondTo { } impl RespondTo { + pub fn validate(&mut self, crypto: Crypto) -> Result<(), RPCError> { + match self { + RespondTo::Sender => Ok(()), + RespondTo::PrivateRoute(pr) => pr.validate(crypto).map_err(RPCError::protocol), + } + } + pub fn encode( &self, builder: &mut veilid_capnp::question::respond_to::Builder, @@ -23,15 +30,12 @@ impl RespondTo { Ok(()) } - pub fn decode( - reader: &veilid_capnp::question::respond_to::Reader, - crypto: Crypto, - ) -> Result { + pub fn decode(reader: &veilid_capnp::question::respond_to::Reader) -> Result { let respond_to = match reader.which().map_err(RPCError::protocol)? { veilid_capnp::question::respond_to::Sender(()) => RespondTo::Sender, veilid_capnp::question::respond_to::PrivateRoute(pr_reader) => { let pr_reader = pr_reader.map_err(RPCError::protocol)?; - let pr = decode_private_route(&pr_reader, crypto)?; + let pr = decode_private_route(&pr_reader)?; RespondTo::PrivateRoute(pr) } }; diff --git a/veilid-core/src/rpc_processor/coders/operations/statement.rs b/veilid-core/src/rpc_processor/coders/operations/statement.rs index 2108b373..08241e24 100644 --- a/veilid-core/src/rpc_processor/coders/operations/statement.rs +++ b/veilid-core/src/rpc_processor/coders/operations/statement.rs @@ -9,21 +9,21 @@ impl RPCStatement { pub fn new(detail: RPCStatementDetail) -> Self { Self { detail } } + pub fn validate(&mut self, validate_context: &RPCValidateContext) -> Result<(), RPCError> { + self.detail.validate(validate_context) + } pub fn detail(&self) -> &RPCStatementDetail { &self.detail } - pub fn into_detail(self) -> RPCStatementDetail { - self.detail - } pub fn desc(&self) -> &'static str { self.detail.desc() } - pub fn decode( - reader: &veilid_capnp::statement::Reader, - crypto: Crypto, - ) -> Result { + pub fn destructure(self) -> RPCStatementDetail { + self.detail + } + pub fn decode(reader: &veilid_capnp::statement::Reader) -> Result { let d_reader = reader.get_detail(); - let detail = RPCStatementDetail::decode(&d_reader, crypto)?; + let detail = RPCStatementDetail::decode(&d_reader)?; Ok(RPCStatement { detail }) } pub fn encode(&self, builder: &mut veilid_capnp::statement::Builder) -> Result<(), RPCError> { @@ -53,9 +53,18 @@ impl RPCStatementDetail { RPCStatementDetail::AppMessage(_) => "AppMessage", } } + pub fn validate(&mut self, validate_context: &RPCValidateContext) -> Result<(), RPCError> { + match self { + RPCStatementDetail::ValidateDialInfo(r) => r.validate(validate_context), + RPCStatementDetail::Route(r) => r.validate(validate_context), + RPCStatementDetail::ValueChanged(r) => r.validate(validate_context), + RPCStatementDetail::Signal(r) => r.validate(validate_context), + RPCStatementDetail::ReturnReceipt(r) => r.validate(validate_context), + RPCStatementDetail::AppMessage(r) => r.validate(validate_context), + } + } pub fn decode( reader: &veilid_capnp::statement::detail::Reader, - crypto: Crypto, ) -> Result { let which_reader = reader.which().map_err(RPCError::protocol)?; let out = match which_reader { @@ -66,7 +75,7 @@ impl RPCStatementDetail { } veilid_capnp::statement::detail::Route(r) => { let op_reader = r.map_err(RPCError::protocol)?; - let out = RPCOperationRoute::decode(&op_reader, crypto)?; + let out = RPCOperationRoute::decode(&op_reader)?; RPCStatementDetail::Route(out) } veilid_capnp::statement::detail::ValueChanged(r) => { @@ -76,7 +85,7 @@ impl RPCStatementDetail { } veilid_capnp::statement::detail::Signal(r) => { let op_reader = r.map_err(RPCError::protocol)?; - let out = RPCOperationSignal::decode(&op_reader, crypto)?; + let out = RPCOperationSignal::decode(&op_reader)?; RPCStatementDetail::Signal(out) } veilid_capnp::statement::detail::ReturnReceipt(r) => { diff --git a/veilid-core/src/rpc_processor/coders/peer_info.rs b/veilid-core/src/rpc_processor/coders/peer_info.rs index 14a1cdd1..7e5b5091 100644 --- a/veilid-core/src/rpc_processor/coders/peer_info.rs +++ b/veilid-core/src/rpc_processor/coders/peer_info.rs @@ -7,12 +7,12 @@ pub fn encode_peer_info( // let mut nids_builder = builder.reborrow().init_node_ids( peer_info - .node_ids + .node_ids() .len() .try_into() .map_err(RPCError::map_invalid_format("out of bound error"))?, ); - for (i, nid) in peer_info.node_ids.iter().enumerate() { + for (i, nid) in peer_info.node_ids().iter().enumerate() { encode_typed_key( nid, &mut nids_builder.reborrow().get( @@ -22,15 +22,12 @@ pub fn encode_peer_info( ); } let mut sni_builder = builder.reborrow().init_signed_node_info(); - encode_signed_node_info(&peer_info.signed_node_info, &mut sni_builder)?; + encode_signed_node_info(peer_info.signed_node_info(), &mut sni_builder)?; Ok(()) } -pub fn decode_peer_info( - reader: &veilid_capnp::peer_info::Reader, - crypto: Crypto, -) -> Result { +pub fn decode_peer_info(reader: &veilid_capnp::peer_info::Reader) -> Result { let nids_reader = reader .reborrow() .get_node_ids() @@ -43,12 +40,9 @@ pub fn decode_peer_info( for nid_reader in nids_reader.iter() { node_ids.add(decode_typed_key(&nid_reader)?); } - let signed_node_info = decode_signed_node_info(&sni_reader, crypto, &mut node_ids)?; + let signed_node_info = decode_signed_node_info(&sni_reader)?; if node_ids.len() == 0 { return Err(RPCError::protocol("no verified node ids")); } - Ok(PeerInfo { - node_ids, - signed_node_info, - }) + Ok(PeerInfo::new(node_ids, signed_node_info)) } diff --git a/veilid-core/src/rpc_processor/coders/private_safety_route.rs b/veilid-core/src/rpc_processor/coders/private_safety_route.rs index 86d63d03..504b308d 100644 --- a/veilid-core/src/rpc_processor/coders/private_safety_route.rs +++ b/veilid-core/src/rpc_processor/coders/private_safety_route.rs @@ -67,10 +67,7 @@ pub fn encode_route_hop( Ok(()) } -pub fn decode_route_hop( - reader: &veilid_capnp::route_hop::Reader, - crypto: Crypto, -) -> Result { +pub fn decode_route_hop(reader: &veilid_capnp::route_hop::Reader) -> Result { let n_reader = reader.reborrow().get_node(); let node = match n_reader.which().map_err(RPCError::protocol)? { veilid_capnp::route_hop::node::Which::NodeId(ni) => { @@ -80,7 +77,7 @@ pub fn decode_route_hop( veilid_capnp::route_hop::node::Which::PeerInfo(pi) => { let pi_reader = pi.map_err(RPCError::protocol)?; RouteNode::PeerInfo( - decode_peer_info(&pi_reader, crypto) + decode_peer_info(&pi_reader) .map_err(RPCError::map_protocol("invalid peer info in route hop"))?, ) } @@ -128,7 +125,6 @@ pub fn encode_private_route( pub fn decode_private_route( reader: &veilid_capnp::private_route::Reader, - crypto: Crypto, ) -> Result { let public_key = decode_typed_key(&reader.get_public_key().map_err( RPCError::map_protocol("invalid public key in private route"), @@ -138,7 +134,7 @@ pub fn decode_private_route( let hops = match reader.get_hops().which().map_err(RPCError::protocol)? { veilid_capnp::private_route::hops::Which::FirstHop(rh_reader) => { let rh_reader = rh_reader.map_err(RPCError::protocol)?; - PrivateRouteHops::FirstHop(decode_route_hop(&rh_reader, crypto)?) + PrivateRouteHops::FirstHop(decode_route_hop(&rh_reader)?) } veilid_capnp::private_route::hops::Which::Data(rhd_reader) => { let rhd_reader = rhd_reader.map_err(RPCError::protocol)?; @@ -182,7 +178,6 @@ pub fn encode_safety_route( pub fn decode_safety_route( reader: &veilid_capnp::safety_route::Reader, - crypto: Crypto, ) -> Result { let public_key = decode_typed_key( &reader @@ -197,7 +192,7 @@ pub fn decode_safety_route( } veilid_capnp::safety_route::hops::Which::Private(pr_reader) => { let pr_reader = pr_reader.map_err(RPCError::protocol)?; - SafetyRouteHops::Private(decode_private_route(&pr_reader, crypto)?) + SafetyRouteHops::Private(decode_private_route(&pr_reader)?) } }; diff --git a/veilid-core/src/rpc_processor/coders/signal_info.rs b/veilid-core/src/rpc_processor/coders/signal_info.rs index 0f51257e..5e9edc84 100644 --- a/veilid-core/src/rpc_processor/coders/signal_info.rs +++ b/veilid-core/src/rpc_processor/coders/signal_info.rs @@ -34,7 +34,6 @@ pub fn encode_signal_info( pub fn decode_signal_info( reader: &veilid_capnp::operation_signal::Reader, - crypto: Crypto, ) -> Result { Ok( match reader @@ -53,7 +52,7 @@ pub fn decode_signal_info( let pi_reader = r.get_peer_info().map_err(RPCError::map_protocol( "invalid peer info in hole punch signal info", ))?; - let peer_info = decode_peer_info(&pi_reader, crypto)?; + let peer_info = decode_peer_info(&pi_reader)?; SignalInfo::HolePunch { receipt, peer_info } } @@ -69,7 +68,7 @@ pub fn decode_signal_info( let pi_reader = r.get_peer_info().map_err(RPCError::map_protocol( "invalid peer info in reverse connect signal info", ))?; - let peer_info = decode_peer_info(&pi_reader, crypto)?; + let peer_info = decode_peer_info(&pi_reader)?; SignalInfo::ReverseConnect { receipt, peer_info } } diff --git a/veilid-core/src/rpc_processor/coders/signed_direct_node_info.rs b/veilid-core/src/rpc_processor/coders/signed_direct_node_info.rs index c15ddfb8..4740ea59 100644 --- a/veilid-core/src/rpc_processor/coders/signed_direct_node_info.rs +++ b/veilid-core/src/rpc_processor/coders/signed_direct_node_info.rs @@ -6,20 +6,20 @@ pub fn encode_signed_direct_node_info( ) -> Result<(), RPCError> { // let mut ni_builder = builder.reborrow().init_node_info(); - encode_node_info(&signed_direct_node_info.node_info, &mut ni_builder)?; + encode_node_info(signed_direct_node_info.node_info(), &mut ni_builder)?; builder .reborrow() - .set_timestamp(signed_direct_node_info.timestamp.into()); + .set_timestamp(signed_direct_node_info.timestamp().into()); let mut sigs_builder = builder.reborrow().init_signatures( signed_direct_node_info - .signatures + .signatures() .len() .try_into() .map_err(RPCError::map_invalid_format("out of bound error"))?, ); - for (i, typed_signature) in signed_direct_node_info.signatures.iter().enumerate() { + for (i, typed_signature) in signed_direct_node_info.signatures().iter().enumerate() { encode_typed_signature( typed_signature, &mut sigs_builder.reborrow().get( @@ -34,8 +34,6 @@ pub fn encode_signed_direct_node_info( pub fn decode_signed_direct_node_info( reader: &veilid_capnp::signed_direct_node_info::Reader, - crypto: Crypto, - node_ids: &mut TypedKeySet, ) -> Result { let ni_reader = reader .reborrow() @@ -61,6 +59,9 @@ pub fn decode_signed_direct_node_info( typed_signatures.push(typed_signature); } - SignedDirectNodeInfo::new(crypto, node_ids, node_info, timestamp, typed_signatures) - .map_err(RPCError::protocol) + Ok(SignedDirectNodeInfo::new( + node_info, + timestamp, + typed_signatures, + )) } diff --git a/veilid-core/src/rpc_processor/coders/signed_node_info.rs b/veilid-core/src/rpc_processor/coders/signed_node_info.rs index aeede197..e3ced6ad 100644 --- a/veilid-core/src/rpc_processor/coders/signed_node_info.rs +++ b/veilid-core/src/rpc_processor/coders/signed_node_info.rs @@ -20,8 +20,6 @@ pub fn encode_signed_node_info( pub fn decode_signed_node_info( reader: &veilid_capnp::signed_node_info::Reader, - crypto: Crypto, - node_ids: &mut TypedKeySet, ) -> Result { match reader .which() @@ -29,12 +27,12 @@ pub fn decode_signed_node_info( { veilid_capnp::signed_node_info::Direct(d) => { let d_reader = d.map_err(RPCError::protocol)?; - let sdni = decode_signed_direct_node_info(&d_reader, crypto, node_ids)?; + let sdni = decode_signed_direct_node_info(&d_reader)?; Ok(SignedNodeInfo::Direct(sdni)) } veilid_capnp::signed_node_info::Relayed(r) => { let r_reader = r.map_err(RPCError::protocol)?; - let srni = decode_signed_relayed_node_info(&r_reader, crypto, node_ids)?; + let srni = decode_signed_relayed_node_info(&r_reader)?; Ok(SignedNodeInfo::Relayed(srni)) } } diff --git a/veilid-core/src/rpc_processor/coders/signed_relayed_node_info.rs b/veilid-core/src/rpc_processor/coders/signed_relayed_node_info.rs index 4264e853..e4273e8e 100644 --- a/veilid-core/src/rpc_processor/coders/signed_relayed_node_info.rs +++ b/veilid-core/src/rpc_processor/coders/signed_relayed_node_info.rs @@ -6,16 +6,16 @@ pub fn encode_signed_relayed_node_info( ) -> Result<(), RPCError> { // let mut ni_builder = builder.reborrow().init_node_info(); - encode_node_info(&signed_relayed_node_info.node_info, &mut ni_builder)?; + encode_node_info(signed_relayed_node_info.node_info(), &mut ni_builder)?; let mut rids_builder = builder.reborrow().init_relay_ids( signed_relayed_node_info - .relay_ids + .relay_ids() .len() .try_into() .map_err(RPCError::map_invalid_format("out of bound error"))?, ); - for (i, typed_key) in signed_relayed_node_info.relay_ids.iter().enumerate() { + for (i, typed_key) in signed_relayed_node_info.relay_ids().iter().enumerate() { encode_typed_key( typed_key, &mut rids_builder.reborrow().get( @@ -26,20 +26,20 @@ pub fn encode_signed_relayed_node_info( } let mut ri_builder = builder.reborrow().init_relay_info(); - encode_signed_direct_node_info(&signed_relayed_node_info.relay_info, &mut ri_builder)?; + encode_signed_direct_node_info(signed_relayed_node_info.relay_info(), &mut ri_builder)?; builder .reborrow() - .set_timestamp(signed_relayed_node_info.timestamp.into()); + .set_timestamp(signed_relayed_node_info.timestamp().into()); let mut sigs_builder = builder.reborrow().init_signatures( signed_relayed_node_info - .signatures + .signatures() .len() .try_into() .map_err(RPCError::map_invalid_format("out of bound error"))?, ); - for (i, typed_signature) in signed_relayed_node_info.signatures.iter().enumerate() { + for (i, typed_signature) in signed_relayed_node_info.signatures().iter().enumerate() { encode_typed_signature( typed_signature, &mut sigs_builder.reborrow().get( @@ -54,8 +54,6 @@ pub fn encode_signed_relayed_node_info( pub fn decode_signed_relayed_node_info( reader: &veilid_capnp::signed_relayed_node_info::Reader, - crypto: Crypto, - node_ids: &mut TypedKeySet, ) -> Result { let ni_reader = reader .reborrow() @@ -81,20 +79,7 @@ pub fn decode_signed_relayed_node_info( .reborrow() .get_relay_info() .map_err(RPCError::protocol)?; - let relay_info = decode_signed_direct_node_info(&ri_reader, crypto.clone(), &mut relay_ids)?; - - // Ensure the relay info for the node has a superset of the crypto kinds of the node it is relaying - if common_crypto_kinds( - &node_info.crypto_support, - &relay_info.node_info.crypto_support, - ) - .len() - != node_info.crypto_support.len() - { - return Err(RPCError::protocol( - "relay should have superset of node crypto kinds", - )); - } + let relay_info = decode_signed_direct_node_info(&ri_reader)?; let timestamp = reader.reborrow().get_timestamp().into(); @@ -113,14 +98,11 @@ pub fn decode_signed_relayed_node_info( let typed_signature = decode_typed_signature(&sig_reader)?; typed_signatures.push(typed_signature); } - SignedRelayedNodeInfo::new( - crypto, - node_ids, + Ok(SignedRelayedNodeInfo::new( node_info, relay_ids, relay_info, timestamp, typed_signatures, - ) - .map_err(RPCError::protocol) + )) } diff --git a/veilid-core/src/rpc_processor/coders/signed_value_data.rs b/veilid-core/src/rpc_processor/coders/signed_value_data.rs new file mode 100644 index 00000000..a3d29932 --- /dev/null +++ b/veilid-core/src/rpc_processor/coders/signed_value_data.rs @@ -0,0 +1,31 @@ +use super::*; +use crate::storage_manager::*; + +pub fn encode_signed_value_data( + signed_value_data: &SignedValueData, + builder: &mut veilid_capnp::signed_value_data::Builder, +) -> Result<(), RPCError> { + builder.set_seq(signed_value_data.value_data().seq()); + builder.set_data(signed_value_data.value_data().data()); + let mut wb = builder.reborrow().init_writer(); + encode_key256(signed_value_data.value_data().writer(), &mut wb); + let mut sb = builder.reborrow().init_signature(); + encode_signature512(signed_value_data.signature(), &mut sb); + Ok(()) +} + +pub fn decode_signed_value_data( + reader: &veilid_capnp::signed_value_data::Reader, +) -> Result { + let seq = reader.get_seq(); + let data = reader.get_data().map_err(RPCError::protocol)?.to_vec(); + let wr = reader.get_writer().map_err(RPCError::protocol)?; + let writer = decode_key256(&wr); + let sr = reader.get_signature().map_err(RPCError::protocol)?; + let signature = decode_signature512(&sr); + + Ok(SignedValueData::new( + ValueData::new_with_seq(seq, data, writer), + signature, + )) +} diff --git a/veilid-core/src/rpc_processor/coders/signed_value_descriptor.rs b/veilid-core/src/rpc_processor/coders/signed_value_descriptor.rs new file mode 100644 index 00000000..c1c93f69 --- /dev/null +++ b/veilid-core/src/rpc_processor/coders/signed_value_descriptor.rs @@ -0,0 +1,28 @@ +use super::*; +use crate::storage_manager::SignedValueDescriptor; + +pub fn encode_signed_value_descriptor( + signed_value_descriptor: &SignedValueDescriptor, + builder: &mut veilid_capnp::signed_value_descriptor::Builder, +) -> Result<(), RPCError> { + let mut ob = builder.reborrow().init_owner(); + encode_key256(signed_value_descriptor.owner(), &mut ob); + builder.set_schema_data(signed_value_descriptor.schema_data()); + let mut sb = builder.reborrow().init_signature(); + encode_signature512(signed_value_descriptor.signature(), &mut sb); + Ok(()) +} + +pub fn decode_signed_value_descriptor( + reader: &veilid_capnp::signed_value_descriptor::Reader, +) -> Result { + let or = reader.get_owner().map_err(RPCError::protocol)?; + let owner = decode_key256(&or); + let schema_data = reader + .get_schema_data() + .map_err(RPCError::protocol)? + .to_vec(); + let sr = reader.get_signature().map_err(RPCError::protocol)?; + let signature = decode_signature512(&sr); + Ok(SignedValueDescriptor::new(owner, schema_data, signature)) +} diff --git a/veilid-core/src/rpc_processor/coders/value_data.rs b/veilid-core/src/rpc_processor/coders/value_data.rs index 70cbf0a4..c5985b6c 100644 --- a/veilid-core/src/rpc_processor/coders/value_data.rs +++ b/veilid-core/src/rpc_processor/coders/value_data.rs @@ -1,18 +1,30 @@ use super::*; -pub fn encode_value_data( - value_data: &ValueData, - builder: &mut veilid_capnp::value_data::Builder, +pub fn encode_signed_value_data( + signed_value_data: &SignedValueData, + builder: &mut veilid_capnp::signed_value_data::Builder, ) -> Result<(), RPCError> { - builder.set_data(&value_data.data); - builder.set_schema(u32::from_be_bytes(value_data.schema.0)); - builder.set_seq(value_data.seq); + builder.set_seq(signed_value_data.value_data().seq()); + builder.set_data(signed_value_data.value_data().data()); + let mut wb = builder.reborrow().init_writer(); + encode_key256(signed_value_data.value_data().writer(), &mut wb); + let mut sb = builder.reborrow().init_signature(); + encode_signature512(signed_value_data.signature(), &mut sb); Ok(()) } -pub fn decode_value_data(reader: &veilid_capnp::value_data::Reader) -> Result { - let data = reader.get_data().map_err(RPCError::protocol)?.to_vec(); +pub fn decode_signed_value_data( + reader: &veilid_capnp::signed_value_data::Reader, +) -> Result { let seq = reader.get_seq(); - let schema = FourCC::from(reader.get_schema().to_be_bytes()); - Ok(ValueData { data, schema, seq }) + let data = reader.get_data().map_err(RPCError::protocol)?.to_vec(); + let wr = reader.get_writer().map_err(RPCError::protocol)?; + let writer = decode_key256(&wr); + let sr = reader.get_signature().map_err(RPCError::protocol)?; + let signature = decode_signature512(&sr); + + Ok(SignedValueData { + value_data: ValueData { seq, data, writer }, + signature, + }) } diff --git a/veilid-core/src/rpc_processor/fanout_call.rs b/veilid-core/src/rpc_processor/fanout_call.rs new file mode 100644 index 00000000..e55606f5 --- /dev/null +++ b/veilid-core/src/rpc_processor/fanout_call.rs @@ -0,0 +1,233 @@ +use super::*; + +struct FanoutContext +where + R: Unpin, +{ + closest_nodes: Vec, + called_nodes: TypedKeySet, + result: Option>, +} + +pub type FanoutCallReturnType = Result>, RPCError>; + +pub struct FanoutCall +where + R: Unpin, + F: Future, + C: Fn(NodeRef) -> F, + D: Fn(&[NodeRef]) -> Option, +{ + routing_table: RoutingTable, + crypto_kind: CryptoKind, + node_id: TypedKey, + context: Mutex>, + node_count: usize, + fanout: usize, + timeout_us: TimestampDuration, + call_routine: C, + check_done: D, +} + +impl FanoutCall +where + R: Unpin, + F: Future, + C: Fn(NodeRef) -> F, + D: Fn(&[NodeRef]) -> Option, +{ + pub fn new( + routing_table: RoutingTable, + node_id: TypedKey, + node_count: usize, + fanout: usize, + timeout_us: TimestampDuration, + call_routine: C, + check_done: D, + ) -> Arc { + let context = Mutex::new(FanoutContext { + closest_nodes: Vec::with_capacity(node_count), + called_nodes: TypedKeySet::new(), + result: None, + }); + + Arc::new(Self { + routing_table, + node_id, + crypto_kind: node_id.kind, + context, + node_count, + fanout, + timeout_us, + call_routine, + check_done, + }) + } + + fn add_new_nodes(self: Arc, new_nodes: Vec) { + let mut ctx = self.context.lock(); + + for nn in new_nodes { + let mut dup = false; + for cn in &ctx.closest_nodes { + if cn.same_entry(&nn) { + dup = true; + } + } + if !dup { + ctx.closest_nodes.push(nn.clone()); + } + } + + self.routing_table + .sort_and_clean_closest_noderefs(self.node_id, &mut ctx.closest_nodes); + ctx.closest_nodes.truncate(self.node_count); + } + + fn remove_node(self: Arc, dead_node: NodeRef) { + let mut ctx = self.context.lock(); + for n in 0..ctx.closest_nodes.len() { + let cn = &ctx.closest_nodes[n]; + if cn.same_entry(&dead_node) { + ctx.closest_nodes.remove(n); + break; + } + } + } + + fn get_next_node(self: Arc) -> Option { + let mut next_node = None; + let mut ctx = self.context.lock(); + for cn in ctx.closest_nodes.clone() { + if let Some(key) = cn.node_ids().get(self.crypto_kind) { + if !ctx.called_nodes.contains(&key) { + // New fanout call candidate found + next_node = Some(cn.clone()); + ctx.called_nodes.add(key); + } + } + } + next_node + } + + fn evaluate_done(self: Arc) -> bool { + let mut ctx = self.context.lock(); + + // If we have a result, then we're done + if ctx.result.is_some() { + return true; + } + + // Check for a new done result + ctx.result = (self.check_done)(&ctx.closest_nodes).map(|o| Ok(o)); + ctx.result.is_some() + } + + async fn fanout_processor(self: Arc) { + // Check to see if we have a result or are done + while !self.clone().evaluate_done() { + // Get the closest node we haven't processed yet + let next_node = self.clone().get_next_node(); + + // If we don't have a node to process, stop fanning out + let Some(next_node) = next_node else { + return; + }; + + // Do the call for this node + match (self.call_routine)(next_node.clone()).await { + Ok(Some(v)) => { + // Call succeeded + // Register the returned nodes and add them to the closest nodes list in sorted order + let new_nodes = self + .routing_table + .register_find_node_answer(self.crypto_kind, v); + self.clone().add_new_nodes(new_nodes); + } + Ok(None) => { + // Call failed, remove the node so it isn't included in the output + self.clone().remove_node(next_node); + } + Err(e) => { + // Error happened, abort everything and return the error + let mut ctx = self.context.lock(); + ctx.result = Some(Err(e)); + return; + } + }; + } + } + + fn init_closest_nodes(self: Arc) { + // Get the 'node_count' closest nodes to the key out of our routing table + let closest_nodes = { + let routing_table = self.routing_table.clone(); + + let filter = Box::new( + move |rti: &RoutingTableInner, opt_entry: Option>| { + // Exclude our own node + if opt_entry.is_none() { + return false; + } + + // Ensure only things that are valid/signed in the PublicInternet domain are returned + rti.filter_has_valid_signed_node_info( + RoutingDomain::PublicInternet, + true, + opt_entry, + ) + }, + ) as RoutingTableEntryFilter; + let filters = VecDeque::from([filter]); + + let transform = |_rti: &RoutingTableInner, v: Option>| { + NodeRef::new(routing_table.clone(), v.unwrap().clone(), None) + }; + + routing_table.find_closest_nodes(self.node_count, self.node_id, filters, transform) + }; + + let mut ctx = self.context.lock(); + ctx.closest_nodes = closest_nodes; + } + + pub async fn run(self: Arc) -> TimeoutOr, RPCError>> { + // Get timeout in milliseconds + let timeout_ms = match us_to_ms(self.timeout_us.as_u64()).map_err(RPCError::internal) { + Ok(v) => v, + Err(e) => { + return TimeoutOr::value(Err(e)); + } + }; + + // Initialize closest nodes list + self.clone().init_closest_nodes(); + + // Do a quick check to see if we're already done + if self.clone().evaluate_done() { + let mut ctx = self.context.lock(); + return TimeoutOr::value(ctx.result.take().transpose()); + } + + // If not, do the fanout + let mut unord = FuturesUnordered::new(); + { + // Spin up 'fanout' tasks to process the fanout + for _ in 0..self.fanout { + let h = self.clone().fanout_processor(); + unord.push(h); + } + } + // Wait for them to complete + timeout(timeout_ms, async { + while let Some(_) = unord.next().await {} + }) + .await + .into_timeout_or() + .map(|_| { + // Finished, return whatever value we came up with + let mut ctx = self.context.lock(); + ctx.result.take().transpose() + }) + } +} diff --git a/veilid-core/src/rpc_processor/mod.rs b/veilid-core/src/rpc_processor/mod.rs index 45966de0..c526c77f 100644 --- a/veilid-core/src/rpc_processor/mod.rs +++ b/veilid-core/src/rpc_processor/mod.rs @@ -1,5 +1,6 @@ mod coders; mod destination; +mod fanout_call; mod operation_waiter; mod rpc_app_call; mod rpc_app_message; @@ -22,18 +23,20 @@ mod rpc_watch_value; pub use coders::*; pub use destination::*; +pub use fanout_call::*; pub use operation_waiter::*; pub use rpc_error::*; pub use rpc_status::*; use super::*; -use crate::crypto::*; +use crypto::*; use futures_util::StreamExt; use network_manager::*; use receipt_manager::*; use routing_table::*; use stop_token::future::FutureExt; +use storage_manager::*; ///////////////////////////////////////////////////////////////////// @@ -149,7 +152,7 @@ where #[derive(Debug)] struct WaitableReply { - handle: OperationWaitHandle, + handle: OperationWaitHandle>, timeout_us: TimestampDuration, node_ref: NodeRef, send_ts: Timestamp, @@ -235,8 +238,8 @@ pub struct RPCProcessorUnlockedInner { max_route_hop_count: usize, validate_dial_info_receipt_time_ms: u32, update_callback: UpdateCallback, - waiting_rpc_table: OperationWaiter, - waiting_app_call_table: OperationWaiter>, + waiting_rpc_table: OperationWaiter>, + waiting_app_call_table: OperationWaiter, ()>, } #[derive(Clone)] @@ -244,6 +247,7 @@ pub struct RPCProcessor { crypto: Crypto, config: VeilidConfig, network_manager: NetworkManager, + storage_manager: StorageManager, routing_table: RoutingTable, inner: Arc>, unlocked_inner: Arc, @@ -295,6 +299,7 @@ impl RPCProcessor { config: config.clone(), network_manager: network_manager.clone(), routing_table: network_manager.routing_table(), + storage_manager: network_manager.storage_manager(), inner: Arc::new(Mutex::new(Self::new_inner())), unlocked_inner: Arc::new(Self::new_unlocked_inner(config, update_callback)), } @@ -308,33 +313,44 @@ impl RPCProcessor { self.routing_table.clone() } + pub fn storage_manager(&self) -> StorageManager { + self.storage_manager.clone() + } + ////////////////////////////////////////////////////////////////////// #[instrument(level = "debug", skip_all, err)] pub async fn startup(&self) -> EyreResult<()> { - trace!("startup rpc processor"); - let mut inner = self.inner.lock(); + debug!("startup rpc processor"); + { + let mut inner = self.inner.lock(); - let channel = flume::bounded(self.unlocked_inner.queue_size as usize); - inner.send_channel = Some(channel.0.clone()); - inner.stop_source = Some(StopSource::new()); + let channel = flume::bounded(self.unlocked_inner.queue_size as usize); + inner.send_channel = Some(channel.0.clone()); + inner.stop_source = Some(StopSource::new()); - // spin up N workers - trace!( - "Spinning up {} RPC workers", - self.unlocked_inner.concurrency - ); - for _ in 0..self.unlocked_inner.concurrency { - let this = self.clone(); - let receiver = channel.1.clone(); - let jh = spawn(Self::rpc_worker( - this, - inner.stop_source.as_ref().unwrap().token(), - receiver, - )); - inner.worker_join_handles.push(jh); + // spin up N workers + trace!( + "Spinning up {} RPC workers", + self.unlocked_inner.concurrency + ); + for _ in 0..self.unlocked_inner.concurrency { + let this = self.clone(); + let receiver = channel.1.clone(); + let jh = spawn(Self::rpc_worker( + this, + inner.stop_source.as_ref().unwrap().token(), + receiver, + )); + inner.worker_join_handles.push(jh); + } } + // Inform storage manager we are up + self.storage_manager + .set_rpc_processor(Some(self.clone())) + .await; + Ok(()) } @@ -342,6 +358,9 @@ impl RPCProcessor { pub async fn shutdown(&self) { debug!("starting rpc processor shutdown"); + // Stop storage manager from using us + self.storage_manager.set_rpc_processor(None).await; + // Stop the rpc workers let mut unord = FuturesUnordered::new(); { @@ -382,44 +401,79 @@ impl RPCProcessor { /// Search the DHT for a single node closest to a key and add it to the routing table and return the node reference /// If no node was found in the timeout, this returns None - pub async fn search_dht_single_key( + async fn search_dht_single_key( &self, - _node_id: PublicKey, - _count: u32, - _fanout: u32, - _timeout: Option, - ) -> Result, RPCError> { - //let routing_table = self.routing_table(); + node_id: TypedKey, + count: usize, + fanout: usize, + timeout_us: TimestampDuration, + safety_selection: SafetySelection, + ) -> TimeoutOr, RPCError>> { + let routing_table = self.routing_table(); - // xxx find node but stop if we find the exact node we want - // xxx return whatever node is closest after the timeout - Err(RPCError::unimplemented("search_dht_single_key")).map_err(logthru_rpc!(error)) - } + // Routine to call to generate fanout + let call_routine = |next_node: NodeRef| { + let this = self.clone(); + async move { + match this + .clone() + .rpc_call_find_node( + Destination::direct(next_node).with_safety(safety_selection), + node_id, + ) + .await + { + Ok(v) => { + let v = network_result_value_or_log!(v => { + // Any other failures, just try the next node + return Ok(None); + }); + Ok(Some(v.answer)) + } + Err(e) => Err(e), + } + } + }; - /// Search the DHT for the 'count' closest nodes to a key, adding them all to the routing table if they are not there and returning their node references - pub async fn search_dht_multi_key( - &self, - _node_id: PublicKey, - _count: u32, - _fanout: u32, - _timeout: Option, - ) -> Result, RPCError> { - // xxx return closest nodes after the timeout - Err(RPCError::unimplemented("search_dht_multi_key")).map_err(logthru_rpc!(error)) + // Routine to call to check if we're done at each step + let check_done = |closest_nodes: &[NodeRef]| { + // If the node we want to locate is one of the closest nodes, return it immediately + if let Some(out) = closest_nodes + .iter() + .find(|x| x.node_ids().contains(&node_id)) + { + return Some(out.clone()); + } + None + }; + + // Call the fanout + let fanout_call = FanoutCall::new( + routing_table.clone(), + node_id, + count, + fanout, + timeout_us, + call_routine, + check_done, + ); + + fanout_call.run().await } /// Search the DHT for a specific node corresponding to a key unless we have that node in our routing table already, and return the node reference /// Note: This routine can possible be recursive, hence the SendPinBoxFuture async form pub fn resolve_node( &self, - node_id: PublicKey, + node_id: TypedKey, + safety_selection: SafetySelection, ) -> SendPinBoxFuture, RPCError>> { let this = self.clone(); Box::pin(async move { let routing_table = this.routing_table(); // First see if we have the node in our routing table already - if let Some(nr) = routing_table.lookup_any_node_ref(node_id) { + if let Some(nr) = routing_table.lookup_node_ref(node_id) { // ensure we have some dial info for the entry already, // if not, we should do the find_node anyway if nr.has_any_dial_info() { @@ -428,21 +482,30 @@ impl RPCProcessor { } // If nobody knows where this node is, ask the DHT for it - let (count, fanout, timeout) = { + let (node_count, _consensus_count, fanout, timeout) = { let c = this.config.get(); ( - c.network.dht.resolve_node_count, - c.network.dht.resolve_node_fanout, - c.network.dht.resolve_node_timeout_ms.map(ms_to_us), + c.network.dht.max_find_node_count as usize, + c.network.dht.resolve_node_count as usize, + c.network.dht.resolve_node_fanout as usize, + TimestampDuration::from(ms_to_us(c.network.dht.resolve_node_timeout_ms)), ) }; - let nr = this - .search_dht_single_key(node_id, count, fanout, timeout) - .await?; + // Search in preferred cryptosystem order + let nr = match this + .search_dht_single_key(node_id, node_count, fanout, timeout, safety_selection) + .await + { + TimeoutOr::Timeout => None, + TimeoutOr::Value(Ok(v)) => v, + TimeoutOr::Value(Err(e)) => { + return Err(e); + } + }; if let Some(nr) = &nr { - if nr.node_ids().contains_key(&node_id) { + if nr.node_ids().contains(&node_id) { // found a close node, but not exact within our configured resolve_node timeout return Ok(None); } @@ -542,10 +605,7 @@ impl RPCProcessor { // Prepare route operation let sr_hop_count = compiled_route.safety_route.hop_count; - let route_operation = RPCOperationRoute { - safety_route: compiled_route.safety_route, - operation, - }; + let route_operation = RPCOperationRoute::new(compiled_route.safety_route, operation); let ssni_route = self.get_sender_peer_info(&Destination::direct(compiled_route.first_hop.clone())); let operation = RPCOperation::new_statement( @@ -753,7 +813,7 @@ impl RPCProcessor { }; // Get our node info timestamp - let our_node_info_ts = own_peer_info.signed_node_info.timestamp(); + let our_node_info_ts = own_peer_info.signed_node_info().timestamp(); // If the target has seen our node info already don't send it again if target.has_seen_our_node_info_ts(routing_domain, our_node_info_ts) { @@ -998,11 +1058,13 @@ impl RPCProcessor { } /// Issue a question over the network, possibly using an anonymized route + /// Optionally keeps a context to be passed to the answer processor when an answer is received #[instrument(level = "debug", skip(self, question), err)] async fn question( &self, dest: Destination, question: RPCQuestion, + context: Option, ) -> Result, RPCError> { // Get sender peer info if we should send that let spi = self.get_sender_peer_info(&dest); @@ -1030,7 +1092,10 @@ impl RPCProcessor { let timeout_us = self.unlocked_inner.timeout_us * (hop_count as u64); // Set up op id eventual - let handle = self.unlocked_inner.waiting_rpc_table.add_op_waiter(op_id); + let handle = self + .unlocked_inner + .waiting_rpc_table + .add_op_waiter(op_id, context); // Send question let bytes: ByteCount = (message.len() as u64).into(); @@ -1072,7 +1137,7 @@ impl RPCProcessor { })) } - // Issue a statement over the network, possibly using an anonymized route + /// Issue a statement over the network, possibly using an anonymized route #[instrument(level = "debug", skip(self, statement), err)] async fn statement( &self, @@ -1128,9 +1193,8 @@ impl RPCProcessor { Ok(NetworkResult::value(())) } - - // Issue a reply over the network, possibly using an anonymized route - // The request must want a response, or this routine fails + /// Issue a reply over the network, possibly using an anonymized route + /// The request must want a response, or this routine fails #[instrument(level = "debug", skip(self, request, answer), err)] async fn answer( &self, @@ -1189,6 +1253,55 @@ impl RPCProcessor { Ok(NetworkResult::value(())) } + /// Decoding RPC from the wire + /// This performs a capnp decode on the data, and if it passes the capnp schema + /// it performs the cryptographic validation required to pass the operation up for processing + fn decode_rpc_operation( + &self, + encoded_msg: &RPCMessageEncoded, + ) -> Result { + let reader = encoded_msg.data.get_reader()?; + let op_reader = reader + .get_root::() + .map_err(RPCError::protocol) + .map_err(logthru_rpc!())?; + let mut operation = RPCOperation::decode(&op_reader)?; + + // Validate the RPC message + self.validate_rpc_operation(&mut operation)?; + + Ok(operation) + } + + /// Cryptographic RPC validation + /// We do this as part of the RPC network layer to ensure that any RPC operations that are + /// processed have already been validated cryptographically and it is not the job of the + /// caller or receiver. This does not mean the operation is 'semantically correct'. For + /// complex operations that require stateful validation and a more robust context than + /// 'signatures', the caller must still perform whatever validation is necessary + fn validate_rpc_operation(&self, operation: &mut RPCOperation) -> Result<(), RPCError> { + // If this is an answer, get the question context for this answer + // If we received an answer for a question we did not ask, this will return an error + let question_context = if let RPCOperationKind::Answer(_) = operation.kind() { + let op_id = operation.op_id(); + self.unlocked_inner + .waiting_rpc_table + .get_op_context(op_id)? + } else { + None + }; + + // Validate the RPC operation + let validate_context = RPCValidateContext { + crypto: self.crypto.clone(), + rpc_processor: self.clone(), + question_context, + }; + operation.validate(&validate_context)?; + + Ok(()) + } + ////////////////////////////////////////////////////////////////////// #[instrument(level = "trace", skip(self, encoded_msg), err)] async fn process_rpc_message( @@ -1198,32 +1311,26 @@ impl RPCProcessor { // Decode operation appropriately based on header detail let msg = match &encoded_msg.header.detail { RPCMessageHeaderDetail::Direct(detail) => { + // Decode and validate the RPC operation + let operation = match self.decode_rpc_operation(&encoded_msg) { + Ok(v) => v, + Err(e) => return Ok(NetworkResult::invalid_message(e)), + }; + // Get the routing domain this message came over let routing_domain = detail.routing_domain; - // Decode the operation + // Get the sender noderef, incorporating sender's peer info let sender_node_id = TypedKey::new( detail.envelope.get_crypto_kind(), detail.envelope.get_sender_id(), ); - - // Decode the RPC message - let operation = { - let reader = encoded_msg.data.get_reader()?; - let op_reader = reader - .get_root::() - .map_err(RPCError::protocol) - .map_err(logthru_rpc!())?; - RPCOperation::decode(&op_reader, self.crypto.clone())? - }; - - // Get the sender noderef, incorporating sender's peer info let mut opt_sender_nr: Option = None; if let Some(sender_peer_info) = operation.sender_peer_info() { // Ensure the sender peer info is for the actual sender specified in the envelope // Sender PeerInfo was specified, update our routing table with it - if !self.filter_node_info(routing_domain, &sender_peer_info.signed_node_info) { + if !self.filter_node_info(routing_domain, sender_peer_info.signed_node_info()) { return Err(RPCError::invalid_format( "sender peerinfo has invalid peer scope", )); @@ -1243,7 +1350,8 @@ impl RPCProcessor { // Update the 'seen our node info' timestamp to determine if this node needs a // 'node info update' ping if let Some(sender_nr) = &opt_sender_nr { - sender_nr.set_our_node_info_ts(routing_domain, operation.target_node_info_ts()); + sender_nr + .set_seen_our_node_info_ts(routing_domain, operation.target_node_info_ts()); } // Make the RPC message @@ -1254,15 +1362,8 @@ impl RPCProcessor { } } RPCMessageHeaderDetail::SafetyRouted(_) | RPCMessageHeaderDetail::PrivateRouted(_) => { - // Decode the RPC message - let operation = { - let reader = encoded_msg.data.get_reader()?; - let op_reader = reader - .get_root::() - .map_err(RPCError::protocol) - .map_err(logthru_rpc!())?; - RPCOperation::decode(&op_reader, self.crypto.clone())? - }; + // Decode and validate the RPC operation + let operation = self.decode_rpc_operation(&encoded_msg)?; // Make the RPC message RPCMessage { diff --git a/veilid-core/src/rpc_processor/operation_waiter.rs b/veilid-core/src/rpc_processor/operation_waiter.rs index 0dfdd926..2f39e200 100644 --- a/veilid-core/src/rpc_processor/operation_waiter.rs +++ b/veilid-core/src/rpc_processor/operation_waiter.rs @@ -1,18 +1,20 @@ use super::*; #[derive(Debug)] -pub struct OperationWaitHandle +pub struct OperationWaitHandle where T: Unpin, + C: Unpin + Clone, { - waiter: OperationWaiter, + waiter: OperationWaiter, op_id: OperationId, eventual_instance: Option, T)>>, } -impl Drop for OperationWaitHandle +impl Drop for OperationWaitHandle where T: Unpin, + C: Unpin + Clone, { fn drop(&mut self) { if self.eventual_instance.is_some() { @@ -22,24 +24,37 @@ where } #[derive(Debug)] -pub struct OperationWaiterInner +pub struct OperationWaitingOp where T: Unpin, + C: Unpin + Clone, { - waiting_op_table: HashMap, T)>>, + context: C, + eventual: EventualValue<(Option, T)>, } #[derive(Debug)] -pub struct OperationWaiter +pub struct OperationWaiterInner where T: Unpin, + C: Unpin + Clone, { - inner: Arc>>, + waiting_op_table: HashMap>, } -impl Clone for OperationWaiter +#[derive(Debug)] +pub struct OperationWaiter where T: Unpin, + C: Unpin + Clone, +{ + inner: Arc>>, +} + +impl Clone for OperationWaiter +where + T: Unpin, + C: Unpin + Clone, { fn clone(&self) -> Self { Self { @@ -48,9 +63,10 @@ where } } -impl OperationWaiter +impl OperationWaiter where T: Unpin, + C: Unpin + Clone, { pub fn new() -> Self { Self { @@ -60,11 +76,15 @@ where } } - // set up wait for op - pub fn add_op_waiter(&self, op_id: OperationId) -> OperationWaitHandle { + /// Set up wait for operation to complete + pub fn add_op_waiter(&self, op_id: OperationId, context: C) -> OperationWaitHandle { let mut inner = self.inner.lock(); let e = EventualValue::new(); - if inner.waiting_op_table.insert(op_id, e.clone()).is_some() { + let waiting_op = OperationWaitingOp { + context, + eventual: e.clone(), + }; + if inner.waiting_op_table.insert(op_id, waiting_op).is_some() { error!( "add_op_waiter collision should not happen for op_id {}", op_id @@ -78,16 +98,25 @@ where } } - // remove wait for op + /// Get operation context + pub fn get_op_context(&self, op_id: OperationId) -> Result { + let inner = self.inner.lock(); + let Some(waiting_op) = inner.waiting_op_table.get(&op_id) else { + return Err(RPCError::internal("Missing operation id getting op context")); + }; + Ok(waiting_op.context.clone()) + } + + /// Remove wait for op fn cancel_op_waiter(&self, op_id: OperationId) { let mut inner = self.inner.lock(); inner.waiting_op_table.remove(&op_id); } - // complete the app call + /// Complete the app call #[instrument(level = "trace", skip(self, message), err)] pub async fn complete_op_waiter(&self, op_id: OperationId, message: T) -> Result<(), RPCError> { - let eventual = { + let waiting_op = { let mut inner = self.inner.lock(); inner .waiting_op_table @@ -97,17 +126,20 @@ where op_id )))? }; - eventual.resolve((Span::current().id(), message)).await; + waiting_op + .eventual + .resolve((Span::current().id(), message)) + .await; Ok(()) } + /// Wait for opeation to complete pub async fn wait_for_op( &self, - mut handle: OperationWaitHandle, + mut handle: OperationWaitHandle, timeout_us: TimestampDuration, ) -> Result, RPCError> { - let timeout_ms = u32::try_from(timeout_us.as_u64() / 1000u64) - .map_err(|e| RPCError::map_internal("invalid timeout")(e))?; + let timeout_ms = us_to_ms(timeout_us.as_u64()).map_err(RPCError::internal)?; // Take the instance // After this, we must manually cancel since the cancel on handle drop is disabled diff --git a/veilid-core/src/rpc_processor/rpc_app_call.rs b/veilid-core/src/rpc_processor/rpc_app_call.rs index 4d6b78c4..80919d4c 100644 --- a/veilid-core/src/rpc_processor/rpc_app_call.rs +++ b/veilid-core/src/rpc_processor/rpc_app_call.rs @@ -9,14 +9,14 @@ impl RPCProcessor { dest: Destination, message: Vec, ) -> Result>>, RPCError> { - let app_call_q = RPCOperationAppCallQ { message }; + let app_call_q = RPCOperationAppCallQ::new(message)?; let question = RPCQuestion::new( network_result_try!(self.get_destination_respond_to(&dest)?), RPCQuestionDetail::AppCallQ(app_call_q), ); // Send the app call question - let waitable_reply = network_result_try!(self.question(dest, question).await?); + let waitable_reply = network_result_try!(self.question(dest, question, None).await?); // Wait for reply let (msg, latency) = match self.wait_for_reply(waitable_reply).await? { @@ -25,18 +25,18 @@ impl RPCProcessor { }; // Get the right answer type - let app_call_a = match msg.operation.into_kind() { - RPCOperationKind::Answer(a) => match a.into_detail() { + let (_, _, _, kind) = msg.operation.destructure(); + let app_call_a = match kind { + RPCOperationKind::Answer(a) => match a.destructure() { RPCAnswerDetail::AppCallA(a) => a, _ => return Err(RPCError::invalid_format("not an appcall answer")), }, _ => return Err(RPCError::invalid_format("not an answer")), }; - Ok(NetworkResult::value(Answer::new( - latency, - app_call_a.message, - ))) + let a_message = app_call_a.destructure(); + + Ok(NetworkResult::value(Answer::new(latency, a_message))) } #[instrument(level = "trace", skip(self, msg), fields(msg.operation.op_id), ret, err)] @@ -45,9 +45,10 @@ impl RPCProcessor { msg: RPCMessage, ) -> Result, RPCError> { // Get the question - let app_call_q = match msg.operation.kind() { - RPCOperationKind::Question(q) => match q.detail() { - RPCQuestionDetail::AppCallQ(q) => q, + let (op_id, _, _, kind) = msg.operation.clone().destructure(); + let app_call_q = match kind { + RPCOperationKind::Question(q) => match q.destructure() { + (_, RPCQuestionDetail::AppCallQ(q)) => q, _ => panic!("not an appcall question"), }, _ => panic!("not a question"), @@ -60,19 +61,19 @@ impl RPCProcessor { let sender = msg .opt_sender_nr .as_ref() - .map(|nr| nr.node_ids().get(crypto_kind).unwrap().value); + .map(|nr| nr.node_ids().get(crypto_kind).unwrap()); // Register a waiter for this app call - let id = msg.operation.op_id(); - let handle = self.unlocked_inner.waiting_app_call_table.add_op_waiter(id); + let handle = self + .unlocked_inner + .waiting_app_call_table + .add_op_waiter(op_id, ()); // Pass the call up through the update callback - let message = app_call_q.message.clone(); - (self.unlocked_inner.update_callback)(VeilidUpdate::AppCall(VeilidAppCall { - sender, - message, - id, - })); + let message_q = app_call_q.destructure(); + (self.unlocked_inner.update_callback)(VeilidUpdate::AppCall(VeilidAppCall::new( + sender, message_q, op_id, + ))); // Wait for an app call answer to come back from the app let res = self @@ -80,17 +81,17 @@ impl RPCProcessor { .waiting_app_call_table .wait_for_op(handle, self.unlocked_inner.timeout_us) .await?; - let (message, _latency) = match res { + let (message_a, _latency) = match res { TimeoutOr::Timeout => { // No message sent on timeout, but this isn't an error - log_rpc!(debug "App call timed out for id {}", id); + log_rpc!(debug "App call timed out for id {}", op_id); return Ok(NetworkResult::timeout()); } TimeoutOr::Value(v) => v, }; // Return the appcall answer - let app_call_a = RPCOperationAppCallA { message }; + let app_call_a = RPCOperationAppCallA::new(message_a)?; // Send status answer self.answer(msg, RPCAnswer::new(RPCAnswerDetail::AppCallA(app_call_a))) diff --git a/veilid-core/src/rpc_processor/rpc_app_message.rs b/veilid-core/src/rpc_processor/rpc_app_message.rs index 0150a205..726515a4 100644 --- a/veilid-core/src/rpc_processor/rpc_app_message.rs +++ b/veilid-core/src/rpc_processor/rpc_app_message.rs @@ -9,7 +9,7 @@ impl RPCProcessor { dest: Destination, message: Vec, ) -> Result, RPCError> { - let app_message = RPCOperationAppMessage { message }; + let app_message = RPCOperationAppMessage::new(message)?; let statement = RPCStatement::new(RPCStatementDetail::AppMessage(app_message)); // Send the app message request @@ -22,8 +22,9 @@ impl RPCProcessor { msg: RPCMessage, ) -> Result, RPCError> { // Get the statement - let app_message = match msg.operation.into_kind() { - RPCOperationKind::Statement(s) => match s.into_detail() { + let (_, _, _, kind) = msg.operation.destructure(); + let app_message = match kind { + RPCOperationKind::Statement(s) => match s.destructure() { RPCStatementDetail::AppMessage(s) => s, _ => panic!("not an app message"), }, @@ -37,14 +38,13 @@ impl RPCProcessor { let sender = msg .opt_sender_nr .as_ref() - .map(|nr| nr.node_ids().get(crypto_kind).unwrap().value); + .map(|nr| nr.node_ids().get(crypto_kind).unwrap()); // Pass the message up through the update callback - let message = app_message.message; - (self.unlocked_inner.update_callback)(VeilidUpdate::AppMessage(VeilidAppMessage { - sender, - message, - })); + let message = app_message.destructure(); + (self.unlocked_inner.update_callback)(VeilidUpdate::AppMessage(VeilidAppMessage::new( + sender, message, + ))); Ok(NetworkResult::value(())) } diff --git a/veilid-core/src/rpc_processor/rpc_find_node.rs b/veilid-core/src/rpc_processor/rpc_find_node.rs index a3a5af6e..fe2b416f 100644 --- a/veilid-core/src/rpc_processor/rpc_find_node.rs +++ b/veilid-core/src/rpc_processor/rpc_find_node.rs @@ -26,14 +26,14 @@ impl RPCProcessor { )); } - let find_node_q_detail = RPCQuestionDetail::FindNodeQ(RPCOperationFindNodeQ { node_id }); + let find_node_q_detail = RPCQuestionDetail::FindNodeQ(RPCOperationFindNodeQ::new(node_id)); let find_node_q = RPCQuestion::new( network_result_try!(self.get_destination_respond_to(&dest)?), find_node_q_detail, ); // Send the find_node request - let waitable_reply = network_result_try!(self.question(dest, find_node_q).await?); + let waitable_reply = network_result_try!(self.question(dest, find_node_q, None).await?); // Wait for reply let (msg, latency) = match self.wait_for_reply(waitable_reply).await? { @@ -42,8 +42,9 @@ impl RPCProcessor { }; // Get the right answer type - let find_node_a = match msg.operation.into_kind() { - RPCOperationKind::Answer(a) => match a.into_detail() { + let (_, _, _, kind) = msg.operation.destructure(); + let find_node_a = match kind { + RPCOperationKind::Answer(a) => match a.destructure() { RPCAnswerDetail::FindNodeA(a) => a, _ => return Err(RPCError::invalid_format("not a find_node answer")), }, @@ -51,18 +52,17 @@ impl RPCProcessor { }; // Verify peers are in the correct peer scope - for peer_info in &find_node_a.peers { - if !self.filter_node_info(RoutingDomain::PublicInternet, &peer_info.signed_node_info) { + let peers = find_node_a.destructure(); + + for peer_info in &peers { + if !self.filter_node_info(RoutingDomain::PublicInternet, peer_info.signed_node_info()) { return Err(RPCError::invalid_format( "find_node response has invalid peer scope", )); } } - Ok(NetworkResult::value(Answer::new( - latency, - find_node_a.peers, - ))) + Ok(NetworkResult::value(Answer::new(latency, peers))) } #[instrument(level = "trace", skip(self, msg), fields(msg.operation.op_id), ret, err)] @@ -81,55 +81,24 @@ impl RPCProcessor { } // Get the question - let find_node_q = match msg.operation.kind() { - RPCOperationKind::Question(q) => match q.detail() { - RPCQuestionDetail::FindNodeQ(q) => q, - _ => panic!("not a status question"), + let kind = msg.operation.kind().clone(); + let find_node_q = match kind { + RPCOperationKind::Question(q) => match q.destructure() { + (_, RPCQuestionDetail::FindNodeQ(q)) => q, + _ => panic!("not a findnode question"), }, _ => panic!("not a question"), }; + let node_id = find_node_q.destructure(); - // add node information for the requesting node to our routing table + // Get a chunk of the routing table near the requested node id let routing_table = self.routing_table(); - let Some(own_peer_info) = routing_table.get_own_peer_info(RoutingDomain::PublicInternet) else { - // Our own node info is not yet available, drop this request. - return Ok(NetworkResult::service_unavailable()); - }; + let closest_nodes = network_result_try!(routing_table.find_all_closest_peers(node_id)); - // find N nodes closest to the target node in our routing table - let filter = Box::new( - move |rti: &RoutingTableInner, opt_entry: Option>| { - // Ensure only things that are valid/signed in the PublicInternet domain are returned - rti.filter_has_valid_signed_node_info( - RoutingDomain::PublicInternet, - true, - opt_entry, - ) - }, - ) as RoutingTableEntryFilter; - let filters = VecDeque::from([filter]); + // Make FindNode answer + let find_node_a = RPCOperationFindNodeA::new(closest_nodes)?; - let node_count = { - let c = self.config.get(); - c.network.dht.max_find_node_count as usize - }; - - let closest_nodes = routing_table.find_closest_nodes( - node_count, - find_node_q.node_id, - filters, - // transform - |rti, entry| { - rti.transform_to_peer_info(RoutingDomain::PublicInternet, &own_peer_info, entry) - }, - ); - - // Make status answer - let find_node_a = RPCOperationFindNodeA { - peers: closest_nodes, - }; - - // Send status answer + // Send FindNode answer self.answer(msg, RPCAnswer::new(RPCAnswerDetail::FindNodeA(find_node_a))) .await } diff --git a/veilid-core/src/rpc_processor/rpc_get_value.rs b/veilid-core/src/rpc_processor/rpc_get_value.rs index 3a697bb0..7e68b172 100644 --- a/veilid-core/src/rpc_processor/rpc_get_value.rs +++ b/veilid-core/src/rpc_processor/rpc_get_value.rs @@ -1,11 +1,138 @@ use super::*; +use crate::storage_manager::{SignedValueData, SignedValueDescriptor}; + +#[derive(Clone, Debug)] +pub struct GetValueAnswer { + pub value: Option, + pub peers: Vec, + pub descriptor: Option, +} impl RPCProcessor { + /// Sends a get value request and wait for response + /// Can be sent via all methods including relays + /// Safety routes may be used, but never private routes. + /// Because this leaks information about the identity of the node itself, + /// replying to this request received over a private route will leak + /// the identity of the node and defeat the private route. + #[instrument(level = "trace", skip(self), ret, err)] + pub async fn rpc_call_get_value( + self, + dest: Destination, + key: TypedKey, + subkey: ValueSubkey, + last_descriptor: Option, + ) -> Result>, RPCError> { + // Ensure destination never has a private route + if matches!( + dest, + Destination::PrivateRoute { + private_route: _, + safety_selection: _ + } + ) { + return Err(RPCError::internal( + "Never send get value requests over private routes", + )); + } + + let get_value_q = RPCOperationGetValueQ::new(key, subkey, last_descriptor.is_none()); + let question = RPCQuestion::new( + network_result_try!(self.get_destination_respond_to(&dest)?), + RPCQuestionDetail::GetValueQ(get_value_q), + ); + let Some(vcrypto) = self.crypto.get(key.kind) else { + return Err(RPCError::internal("unsupported cryptosystem")); + }; + + // Send the getvalue question + let question_context = QuestionContext::GetValue(ValidateGetValueContext { + last_descriptor, + subkey, + vcrypto, + }); + + let waitable_reply = network_result_try!( + self.question(dest, question, Some(question_context)) + .await? + ); + + // Wait for reply + let (msg, latency) = match self.wait_for_reply(waitable_reply).await? { + TimeoutOr::Timeout => return Ok(NetworkResult::Timeout), + TimeoutOr::Value(v) => v, + }; + + // Get the right answer type + let (_, _, _, kind) = msg.operation.destructure(); + let get_value_a = match kind { + RPCOperationKind::Answer(a) => match a.destructure() { + RPCAnswerDetail::GetValueA(a) => a, + _ => return Err(RPCError::invalid_format("not a getvalue answer")), + }, + _ => return Err(RPCError::invalid_format("not an answer")), + }; + + let (value, peers, descriptor) = get_value_a.destructure(); + + Ok(NetworkResult::value(Answer::new( + latency, + GetValueAnswer { + value, + peers, + descriptor, + }, + ))) + } + #[instrument(level = "trace", skip(self, msg), fields(msg.operation.op_id), ret, err)] pub(crate) async fn process_get_value_q( &self, msg: RPCMessage, ) -> Result, RPCError> { - Err(RPCError::unimplemented("process_get_value_q")) + // Ensure this never came over a private route, safety route is okay though + match &msg.header.detail { + RPCMessageHeaderDetail::Direct(_) | RPCMessageHeaderDetail::SafetyRouted(_) => {} + RPCMessageHeaderDetail::PrivateRouted(_) => { + return Ok(NetworkResult::invalid_message( + "not processing get value request over private route", + )) + } + } + + // Get the question + let kind = msg.operation.kind().clone(); + let get_value_q = match kind { + RPCOperationKind::Question(q) => match q.destructure() { + (_, RPCQuestionDetail::GetValueQ(q)) => q, + _ => panic!("not a getvalue question"), + }, + _ => panic!("not a question"), + }; + + // Destructure + let (key, subkey, want_descriptor) = get_value_q.destructure(); + + // Get the nodes that we know about that are closer to the the key than our own node + let routing_table = self.routing_table(); + let closer_to_key_peers = network_result_try!(routing_table.find_peers_closer_to_key(key)); + + // See if we have this record ourselves + let storage_manager = self.storage_manager(); + let subkey_result = network_result_try!(storage_manager + .inbound_get_value(key, subkey, want_descriptor) + .await + .map_err(RPCError::internal)?); + + // Make GetValue answer + let get_value_a = RPCOperationGetValueA::new( + subkey_result.value, + closer_to_key_peers, + subkey_result.descriptor, + )?; + + // Send GetValue answer + self.answer(msg, RPCAnswer::new(RPCAnswerDetail::GetValueA(get_value_a))) + .await } } diff --git a/veilid-core/src/rpc_processor/rpc_return_receipt.rs b/veilid-core/src/rpc_processor/rpc_return_receipt.rs index a5537369..383dcc6d 100644 --- a/veilid-core/src/rpc_processor/rpc_return_receipt.rs +++ b/veilid-core/src/rpc_processor/rpc_return_receipt.rs @@ -11,7 +11,7 @@ impl RPCProcessor { ) -> Result, RPCError> { let receipt = receipt.as_ref().to_vec(); - let return_receipt = RPCOperationReturnReceipt { receipt }; + let return_receipt = RPCOperationReturnReceipt::new(receipt)?; let statement = RPCStatement::new(RPCStatementDetail::ReturnReceipt(return_receipt)); // Send the return_receipt request @@ -26,9 +26,10 @@ impl RPCProcessor { msg: RPCMessage, ) -> Result, RPCError> { // Get the statement - let RPCOperationReturnReceipt { receipt } = match msg.operation.into_kind() { - RPCOperationKind::Statement(s) => match s.into_detail() { - RPCStatementDetail::ReturnReceipt(s) => s, + let (_, _, _, kind) = msg.operation.destructure(); + let receipt = match kind { + RPCOperationKind::Statement(s) => match s.destructure() { + RPCStatementDetail::ReturnReceipt(s) => s.destructure(), _ => panic!("not a return receipt"), }, _ => panic!("not a statement"), diff --git a/veilid-core/src/rpc_processor/rpc_route.rs b/veilid-core/src/rpc_processor/rpc_route.rs index 14ddccc8..64bc884e 100644 --- a/veilid-core/src/rpc_processor/rpc_route.rs +++ b/veilid-core/src/rpc_processor/rpc_route.rs @@ -34,17 +34,17 @@ impl RPCProcessor { }; // Apply sequencing preference - next_hop_nr.set_sequencing(routed_operation.sequencing); + next_hop_nr.set_sequencing(routed_operation.sequencing()); // Pass along the route - let next_hop_route = RPCOperationRoute { - safety_route: SafetyRoute { + let next_hop_route = RPCOperationRoute::new( + SafetyRoute { public_key: safety_route.public_key, hop_count: safety_route.hop_count - 1, hops: SafetyRouteHops::Data(route_hop.next_hop.unwrap()), }, - operation: routed_operation, - }; + routed_operation, + ); let next_hop_route_stmt = RPCStatement::new(RPCStatementDetail::Route(next_hop_route)); // Send the next route statement @@ -76,17 +76,17 @@ impl RPCProcessor { }; // Apply sequencing preference - next_hop_nr.set_sequencing(routed_operation.sequencing); + next_hop_nr.set_sequencing(routed_operation.sequencing()); // Pass along the route - let next_hop_route = RPCOperationRoute { - safety_route: SafetyRoute { + let next_hop_route = RPCOperationRoute::new( + SafetyRoute { public_key: safety_route_public_key, hop_count: 0, hops: SafetyRouteHops::Private(next_private_route), }, - operation: routed_operation, - }; + routed_operation, + ); let next_hop_route_stmt = RPCStatement::new(RPCStatementDetail::Route(next_hop_route)); // Send the next route statement @@ -114,8 +114,8 @@ impl RPCProcessor { .cached_dh(&remote_sr_pubkey.value, &node_id_secret) .map_err(RPCError::protocol)?; let body = match vcrypto.decrypt_aead( - &routed_operation.data, - &routed_operation.nonce, + routed_operation.data(), + routed_operation.nonce(), &dh_secret, None, ) { @@ -132,7 +132,7 @@ impl RPCProcessor { self.enqueue_safety_routed_message( detail, remote_sr_pubkey.value, - routed_operation.sequencing, + routed_operation.sequencing(), body, ) .map_err(RPCError::internal)?; @@ -162,8 +162,8 @@ impl RPCProcessor { let Some((secret_key, safety_spec)) = rss .with_signature_validated_route( &pr_pubkey, - &routed_operation.signatures, - &routed_operation.data, + routed_operation.signatures(), + routed_operation.data(), sender_id.value, |rssd, rsd| { ( @@ -172,7 +172,7 @@ impl RPCProcessor { preferred_route, hop_count: rssd.hop_count(), stability: rssd.get_stability(), - sequencing: routed_operation.sequencing, + sequencing: routed_operation.sequencing(), }, ) } @@ -188,8 +188,8 @@ impl RPCProcessor { .map_err(RPCError::protocol)?; let body = vcrypto .decrypt_aead( - &routed_operation.data, - &routed_operation.nonce, + routed_operation.data(), + routed_operation.nonce(), &dh_secret, None, ) @@ -341,8 +341,11 @@ impl RPCProcessor { let rh_reader = dec_blob_reader .get_root::() .map_err(RPCError::protocol)?; - decode_route_hop(&rh_reader, self.crypto.clone())? + decode_route_hop(&rh_reader)? }; + + // Validate the RouteHop + route_hop.validate(self.crypto.clone()).map_err(RPCError::protocol)?; // Sign the operation if this is not our last hop // as the last hop is already signed by the envelope @@ -350,9 +353,9 @@ impl RPCProcessor { let node_id = self.routing_table.node_id(crypto_kind); let node_id_secret = self.routing_table.node_id_secret_key(crypto_kind); let sig = vcrypto - .sign(&node_id.value, &node_id_secret, &route_operation.data) + .sign(&node_id.value, &node_id_secret, route_operation.data()) .map_err(RPCError::internal)?; - route_operation.signatures.push(sig); + route_operation.add_signature(sig); } Ok(NetworkResult::value(route_hop)) @@ -374,8 +377,9 @@ impl RPCProcessor { }; // Get the statement - let mut route = match msg.operation.into_kind() { - RPCOperationKind::Statement(s) => match s.into_detail() { + let (_,_,_,kind) = msg.operation.destructure(); + let route = match kind { + RPCOperationKind::Statement(s) => match s.destructure() { RPCStatementDetail::Route(s) => s, _ => panic!("not a route statement"), }, @@ -383,7 +387,7 @@ impl RPCProcessor { }; // Get crypto kind - let crypto_kind = route.safety_route.crypto_kind(); + let crypto_kind = route.safety_route().crypto_kind(); let Some(vcrypto) = self.crypto.get(crypto_kind) else { return Ok(NetworkResult::invalid_message( "routed operation crypto is not supported", @@ -391,13 +395,14 @@ impl RPCProcessor { }; // See what kind of safety route we have going on here - match route.safety_route.hops { + let (safety_route, mut routed_operation) = route.destructure(); + match safety_route.hops { // There is a safety route hop SafetyRouteHops::Data(ref route_hop_data) => { // Decrypt the blob with DEC(nonce, DH(the SR's public key, this hop's secret) let node_id_secret = self.routing_table.node_id_secret_key(crypto_kind); let dh_secret = vcrypto - .cached_dh(&route.safety_route.public_key.value, &node_id_secret) + .cached_dh(&safety_route.public_key.value, &node_id_secret) .map_err(RPCError::protocol)?; let mut dec_blob_data = vcrypto .decrypt_aead( @@ -422,14 +427,17 @@ impl RPCProcessor { let pr_reader = dec_blob_reader .get_root::() .map_err(RPCError::protocol)?; - decode_private_route(&pr_reader, self.crypto.clone())? + decode_private_route(&pr_reader)? }; + + // Validate the private route + private_route.validate(self.crypto.clone()).map_err(RPCError::protocol)?; // Switching from full safety route to private route first hop network_result_try!( self.process_private_route_first_hop( - route.operation, - route.safety_route.public_key, + routed_operation, + safety_route.public_key, private_route, ) .await? @@ -440,15 +448,18 @@ impl RPCProcessor { let rh_reader = dec_blob_reader .get_root::() .map_err(RPCError::protocol)?; - decode_route_hop(&rh_reader, self.crypto.clone())? + decode_route_hop(&rh_reader)? }; + // Validate the route hop + route_hop.validate(self.crypto.clone()).map_err(RPCError::protocol)?; + // Continue the full safety route with another hop network_result_try!( self.process_route_safety_route_hop( - route.operation, + routed_operation, route_hop, - route.safety_route + safety_route ) .await? ); @@ -464,8 +475,8 @@ impl RPCProcessor { // Safety route was a stub, start with the beginning of the private route network_result_try!( self.process_private_route_first_hop( - route.operation, - route.safety_route.public_key, + routed_operation, + safety_route.public_key, private_route, ) .await? @@ -476,7 +487,7 @@ impl RPCProcessor { let route_hop = network_result_try!(self.decrypt_private_route_hop_data( &route_hop_data, &private_route.public_key, - &mut route.operation + &mut routed_operation )?); // Ensure hop count > 0 @@ -489,9 +500,9 @@ impl RPCProcessor { // Make next PrivateRoute and pass it on network_result_try!( self.process_route_private_route_hop( - route.operation, + routed_operation, route_hop.node, - route.safety_route.public_key, + safety_route.public_key, PrivateRoute { public_key: private_route.public_key, hop_count: private_route.hop_count - 1, @@ -511,7 +522,7 @@ impl RPCProcessor { "route should be at the end", )); } - if route.safety_route.hop_count != 0 { + if safety_route.hop_count != 0 { return Ok(NetworkResult::invalid_message( "Safety hop count should be zero if switched to private route", )); @@ -521,8 +532,8 @@ impl RPCProcessor { network_result_try!(self.process_routed_operation( detail, vcrypto, - route.operation, - route.safety_route.public_key, + routed_operation, + safety_route.public_key, private_route.public_key, )?); } diff --git a/veilid-core/src/rpc_processor/rpc_set_value.rs b/veilid-core/src/rpc_processor/rpc_set_value.rs index 2f195412..cf4a61be 100644 --- a/veilid-core/src/rpc_processor/rpc_set_value.rs +++ b/veilid-core/src/rpc_processor/rpc_set_value.rs @@ -1,12 +1,154 @@ use super::*; +#[derive(Clone, Debug)] +pub struct SetValueAnswer { + pub set: bool, + pub value: Option, + pub peers: Vec, +} + impl RPCProcessor { + /// Sends a set value request and wait for response + /// Can be sent via all methods including relays + /// Safety routes may be used, but never private routes. + /// Because this leaks information about the identity of the node itself, + /// replying to this request received over a private route will leak + /// the identity of the node and defeat the private route. + #[instrument(level = "trace", skip(self), ret, err)] + pub async fn rpc_call_set_value( + self, + dest: Destination, + key: TypedKey, + subkey: ValueSubkey, + value: SignedValueData, + descriptor: SignedValueDescriptor, + send_descriptor: bool, + ) -> Result>, RPCError> { + // Ensure destination never has a private route + if matches!( + dest, + Destination::PrivateRoute { + private_route: _, + safety_selection: _ + } + ) { + return Err(RPCError::internal( + "Never send set value requests over private routes", + )); + } + + let set_value_q = RPCOperationSetValueQ::new( + key, + subkey, + value, + if send_descriptor { + Some(descriptor.clone()) + } else { + None + }, + ); + let question = RPCQuestion::new( + network_result_try!(self.get_destination_respond_to(&dest)?), + RPCQuestionDetail::SetValueQ(set_value_q), + ); + let Some(vcrypto) = self.crypto.get(key.kind) else { + return Err(RPCError::internal("unsupported cryptosystem")); + }; + + // Send the setvalue question + let question_context = QuestionContext::SetValue(ValidateSetValueContext { + descriptor, + subkey, + vcrypto, + }); + + let waitable_reply = network_result_try!( + self.question(dest, question, Some(question_context)) + .await? + ); + + // Wait for reply + let (msg, latency) = match self.wait_for_reply(waitable_reply).await? { + TimeoutOr::Timeout => return Ok(NetworkResult::Timeout), + TimeoutOr::Value(v) => v, + }; + + // Get the right answer type + let (_, _, _, kind) = msg.operation.destructure(); + let set_value_a = match kind { + RPCOperationKind::Answer(a) => match a.destructure() { + RPCAnswerDetail::SetValueA(a) => a, + _ => return Err(RPCError::invalid_format("not a setvalue answer")), + }, + _ => return Err(RPCError::invalid_format("not an answer")), + }; + + let (set, value, peers) = set_value_a.destructure(); + + Ok(NetworkResult::value(Answer::new( + latency, + SetValueAnswer { set, value, peers }, + ))) + } + #[instrument(level = "trace", skip(self, msg), fields(msg.operation.op_id), ret, err)] pub(crate) async fn process_set_value_q( &self, msg: RPCMessage, ) -> Result, RPCError> { - // tracing::Span::current().record("res", &tracing::field::display(res)); - Err(RPCError::unimplemented("process_set_value_q")) + // Ensure this never came over a private route, safety route is okay though + match &msg.header.detail { + RPCMessageHeaderDetail::Direct(_) | RPCMessageHeaderDetail::SafetyRouted(_) => {} + RPCMessageHeaderDetail::PrivateRouted(_) => { + return Ok(NetworkResult::invalid_message( + "not processing set value request over private route", + )) + } + } + + // Get the question + let kind = msg.operation.kind().clone(); + let set_value_q = match kind { + RPCOperationKind::Question(q) => match q.destructure() { + (_, RPCQuestionDetail::SetValueQ(q)) => q, + _ => panic!("not a setvalue question"), + }, + _ => panic!("not a question"), + }; + + // Destructure + let (key, subkey, value, descriptor) = set_value_q.destructure(); + + // Get the nodes that we know about that are closer to the the key than our own node + let routing_table = self.routing_table(); + let closer_to_key_peers = network_result_try!(routing_table.find_peers_closer_to_key(key)); + + // If there are less than 'set_value_count' peers that are closer, then store here too + let set_value_count = { + let c = self.config.get(); + c.network.dht.set_value_fanout as usize + }; + let (set, new_value) = if closer_to_key_peers.len() >= set_value_count { + // Not close enough + (false, None) + } else { + // Close enough, lets set it + + // Save the subkey, creating a new record if necessary + let storage_manager = self.storage_manager(); + let new_value = network_result_try!(storage_manager + .inbound_set_value(key, subkey, value, descriptor) + .await + .map_err(RPCError::internal)?); + + (true, new_value) + }; + + // Make SetValue answer + let set_value_a = RPCOperationSetValueA::new(set, new_value, closer_to_key_peers)?; + + // Send SetValue answer + self.answer(msg, RPCAnswer::new(RPCAnswerDetail::SetValueA(set_value_a))) + .await } } diff --git a/veilid-core/src/rpc_processor/rpc_signal.rs b/veilid-core/src/rpc_processor/rpc_signal.rs index d717b395..57f81513 100644 --- a/veilid-core/src/rpc_processor/rpc_signal.rs +++ b/veilid-core/src/rpc_processor/rpc_signal.rs @@ -22,7 +22,7 @@ impl RPCProcessor { )); } - let signal = RPCOperationSignal { signal_info }; + let signal = RPCOperationSignal::new(signal_info); let statement = RPCStatement::new(RPCStatementDetail::Signal(signal)); // Send the signal request @@ -44,8 +44,9 @@ impl RPCProcessor { }; // Get the statement - let signal = match msg.operation.into_kind() { - RPCOperationKind::Statement(s) => match s.into_detail() { + let (_, _, _, kind) = msg.operation.destructure(); + let signal = match kind { + RPCOperationKind::Statement(s) => match s.destructure() { RPCStatementDetail::Signal(s) => s, _ => panic!("not a signal"), }, @@ -54,8 +55,9 @@ impl RPCProcessor { // Handle it let network_manager = self.network_manager(); + let signal_info = signal.destructure(); network_manager - .handle_signal(signal.signal_info) + .handle_signal(signal_info) .await .map_err(RPCError::network) } diff --git a/veilid-core/src/rpc_processor/rpc_status.rs b/veilid-core/src/rpc_processor/rpc_status.rs index 2335100a..f45ea125 100644 --- a/veilid-core/src/rpc_processor/rpc_status.rs +++ b/veilid-core/src/rpc_processor/rpc_status.rs @@ -68,14 +68,15 @@ impl RPCProcessor { } }; - let status_q = RPCOperationStatusQ { node_status }; + let status_q = RPCOperationStatusQ::new(node_status); let question = RPCQuestion::new( network_result_try!(self.get_destination_respond_to(&dest)?), RPCQuestionDetail::StatusQ(status_q), ); // Send the info request - let waitable_reply = network_result_try!(self.question(dest.clone(), question).await?); + let waitable_reply = + network_result_try!(self.question(dest.clone(), question, None).await?); // Note what kind of ping this was and to what peer scope let send_data_kind = waitable_reply.send_data_kind; @@ -87,27 +88,29 @@ impl RPCProcessor { }; // Get the right answer type - let status_a = match msg.operation.into_kind() { - RPCOperationKind::Answer(a) => match a.into_detail() { + let (_, _, _, kind) = msg.operation.destructure(); + let status_a = match kind { + RPCOperationKind::Answer(a) => match a.destructure() { RPCAnswerDetail::StatusA(a) => a, _ => return Err(RPCError::invalid_format("not a status answer")), }, _ => return Err(RPCError::invalid_format("not an answer")), }; + let (a_node_status, sender_info) = status_a.destructure(); // Ensure the returned node status is the kind for the routing domain we asked for if let Some(target_nr) = opt_target_nr { - if let Some(node_status) = status_a.node_status { + if let Some(a_node_status) = a_node_status { match routing_domain { RoutingDomain::PublicInternet => { - if !matches!(node_status, NodeStatus::PublicInternet(_)) { + if !matches!(a_node_status, NodeStatus::PublicInternet(_)) { return Ok(NetworkResult::invalid_message( "node status doesn't match PublicInternet routing domain", )); } } RoutingDomain::LocalNetwork => { - if !matches!(node_status, NodeStatus::LocalNetwork(_)) { + if !matches!(a_node_status, NodeStatus::LocalNetwork(_)) { return Ok(NetworkResult::invalid_message( "node status doesn't match LocalNetwork routing domain", )); @@ -116,7 +119,7 @@ impl RPCProcessor { } // Update latest node status in routing table - target_nr.update_node_status(node_status); + target_nr.update_node_status(a_node_status.clone()); } } @@ -130,7 +133,7 @@ impl RPCProcessor { safety_selection, } => { if matches!(safety_selection, SafetySelection::Unsafe(_)) { - if let Some(sender_info) = status_a.sender_info { + if let Some(sender_info) = sender_info { match send_data_kind { SendDataKind::Direct(connection_descriptor) => { // Directly requested status that actually gets sent directly and not over a relay will tell us what our IP address appears as @@ -184,13 +187,15 @@ impl RPCProcessor { msg: RPCMessage, ) -> Result, RPCError> { // Get the question - let status_q = match msg.operation.kind() { - RPCOperationKind::Question(q) => match q.detail() { - RPCQuestionDetail::StatusQ(q) => q, + let kind = msg.operation.kind().clone(); + let status_q = match kind { + RPCOperationKind::Question(q) => match q.destructure() { + (_, RPCQuestionDetail::StatusQ(q)) => q, _ => panic!("not a status question"), }, _ => panic!("not a question"), }; + let q_node_status = status_q.destructure(); let (node_status, sender_info) = match &msg.header.detail { RPCMessageHeaderDetail::Direct(detail) => { @@ -198,17 +203,17 @@ impl RPCProcessor { let routing_domain = detail.routing_domain; // Ensure the node status from the question is the kind for the routing domain we received the request in - if let Some(node_status) = &status_q.node_status { + if let Some(q_node_status) = q_node_status { match routing_domain { RoutingDomain::PublicInternet => { - if !matches!(node_status, NodeStatus::PublicInternet(_)) { + if !matches!(q_node_status, NodeStatus::PublicInternet(_)) { return Ok(NetworkResult::invalid_message( "node status doesn't match PublicInternet routing domain", )); } } RoutingDomain::LocalNetwork => { - if !matches!(node_status, NodeStatus::LocalNetwork(_)) { + if !matches!(q_node_status, NodeStatus::LocalNetwork(_)) { return Ok(NetworkResult::invalid_message( "node status doesn't match LocalNetwork routing domain", )); @@ -219,7 +224,7 @@ impl RPCProcessor { // update node status for the requesting node to our routing table if let Some(sender_nr) = msg.opt_sender_nr.clone() { // Update latest node status in routing table for the statusq sender - sender_nr.update_node_status(node_status.clone()); + sender_nr.update_node_status(q_node_status.clone()); } } @@ -243,10 +248,7 @@ impl RPCProcessor { }; // Make status answer - let status_a = RPCOperationStatusA { - node_status, - sender_info, - }; + let status_a = RPCOperationStatusA::new(node_status, sender_info); // Send status answer self.answer(msg, RPCAnswer::new(RPCAnswerDetail::StatusA(status_a))) diff --git a/veilid-core/src/rpc_processor/rpc_validate_dial_info.rs b/veilid-core/src/rpc_processor/rpc_validate_dial_info.rs index 7535779e..e1530359 100644 --- a/veilid-core/src/rpc_processor/rpc_validate_dial_info.rs +++ b/veilid-core/src/rpc_processor/rpc_validate_dial_info.rs @@ -17,11 +17,7 @@ impl RPCProcessor { .generate_single_shot_receipt(receipt_time, []) .map_err(RPCError::internal)?; - let validate_dial_info = RPCOperationValidateDialInfo { - dial_info, - receipt, - redirect, - }; + let validate_dial_info = RPCOperationValidateDialInfo::new(dial_info, receipt, redirect)?; let statement = RPCStatement::new(RPCStatementDetail::ValidateDialInfo(validate_dial_info)); // Send the validate_dial_info request @@ -69,13 +65,10 @@ impl RPCProcessor { }; // Get the statement - let RPCOperationValidateDialInfo { - dial_info, - receipt, - redirect, - } = match msg.operation.into_kind() { - RPCOperationKind::Statement(s) => match s.into_detail() { - RPCStatementDetail::ValidateDialInfo(s) => s, + let (_, _, _, kind) = msg.operation.destructure(); + let (dial_info, receipt, redirect) = match kind { + RPCOperationKind::Statement(s) => match s.destructure() { + RPCStatementDetail::ValidateDialInfo(s) => s.destructure(), _ => panic!("not a validate dial info"), }, _ => panic!("not a statement"), @@ -137,11 +130,8 @@ impl RPCProcessor { } // Make a copy of the request, without the redirect flag - let validate_dial_info = RPCOperationValidateDialInfo { - dial_info: dial_info.clone(), - receipt: receipt.clone(), - redirect: false, - }; + let validate_dial_info = + RPCOperationValidateDialInfo::new(dial_info.clone(), receipt.clone(), false)?; let statement = RPCStatement::new(RPCStatementDetail::ValidateDialInfo(validate_dial_info)); diff --git a/veilid-core/src/storage_manager/debug.rs b/veilid-core/src/storage_manager/debug.rs new file mode 100644 index 00000000..0c47b56a --- /dev/null +++ b/veilid-core/src/storage_manager/debug.rs @@ -0,0 +1,18 @@ +use super::*; + +impl StorageManager { + pub(crate) async fn debug_local_records(&self) -> String { + let inner = self.inner.lock().await; + let Some(local_record_store) = &inner.local_record_store else { + return "not initialized".to_owned(); + }; + local_record_store.debug_records() + } + pub(crate) async fn debug_remote_records(&self) -> String { + let inner = self.inner.lock().await; + let Some(remote_record_store) = &inner.remote_record_store else { + return "not initialized".to_owned(); + }; + remote_record_store.debug_records() + } +} diff --git a/veilid-core/src/storage_manager/get_value.rs b/veilid-core/src/storage_manager/get_value.rs new file mode 100644 index 00000000..3d5584e7 --- /dev/null +++ b/veilid-core/src/storage_manager/get_value.rs @@ -0,0 +1,191 @@ +use super::*; + +/// The context of the do_get_value operation +struct DoGetValueContext { + /// The latest value of the subkey, may be the value passed in + pub value: Option, + /// The consensus count for the value we have received + pub value_count: usize, + /// The descriptor if we got a fresh one or empty if no descriptor was needed + pub descriptor: Option, + /// The parsed schema from the descriptor if we have one + pub schema: Option, +} + +impl StorageManager { + + /// Perform a 'get value' query on the network + pub async fn outbound_get_value( + &self, + rpc_processor: RPCProcessor, + key: TypedKey, + subkey: ValueSubkey, + safety_selection: SafetySelection, + last_subkey_result: SubkeyResult, + ) -> VeilidAPIResult { + let routing_table = rpc_processor.routing_table(); + + // Get the DHT parameters for 'GetValue' + let (key_count, consensus_count, fanout, timeout_us) = { + let c = self.unlocked_inner.config.get(); + ( + c.network.dht.max_find_node_count as usize, + c.network.dht.get_value_count as usize, + c.network.dht.get_value_fanout as usize, + TimestampDuration::from(ms_to_us(c.network.dht.get_value_timeout_ms)), + ) + }; + + // Make do-get-value answer context + let schema = if let Some(d) = &last_subkey_result.descriptor { + Some(d.schema()?) + } else { + None + }; + let context = Arc::new(Mutex::new(DoGetValueContext { + value: last_subkey_result.value, + value_count: 0, + descriptor: last_subkey_result.descriptor.clone(), + schema, + })); + + // Routine to call to generate fanout + let call_routine = |next_node: NodeRef| { + let rpc_processor = rpc_processor.clone(); + let context = context.clone(); + let last_descriptor = last_subkey_result.descriptor.clone(); + async move { + let vres = rpc_processor + .clone() + .rpc_call_get_value( + Destination::direct(next_node).with_safety(safety_selection), + key, + subkey, + last_descriptor, + ) + .await?; + let gva = network_result_value_or_log!(vres => { + // Any other failures, just try the next node + return Ok(None); + }); + + // Keep the descriptor if we got one. If we had a last_descriptor it will + // already be validated by rpc_call_get_value + if let Some(descriptor) = gva.answer.descriptor { + let mut ctx = context.lock(); + if ctx.descriptor.is_none() && ctx.schema.is_none() { + ctx.schema = + Some(descriptor.schema().map_err(RPCError::invalid_format)?); + ctx.descriptor = Some(descriptor); + } + } + + // Keep the value if we got one and it is newer and it passes schema validation + if let Some(value) = gva.answer.value { + let mut ctx = context.lock(); + + // Ensure we have a schema and descriptor + let (Some(descriptor), Some(schema)) = (&ctx.descriptor, &ctx.schema) else { + // Got a value but no descriptor for it + // Move to the next node + return Ok(None); + }; + + // Validate with schema + if !schema.check_subkey_value_data( + descriptor.owner(), + subkey, + value.value_data(), + ) { + // Validation failed, ignore this value + // Move to the next node + return Ok(None); + } + + // If we have a prior value, see if this is a newer sequence number + if let Some(prior_value) = &ctx.value { + let prior_seq = prior_value.value_data().seq(); + let new_seq = value.value_data().seq(); + + if new_seq == prior_seq { + // If sequence number is the same, the data should be the same + if prior_value.value_data() != value.value_data() { + // Move to the next node + return Ok(None); + } + // Increase the consensus count for the existing value + ctx.value_count += 1; + } else if new_seq > prior_seq { + // If the sequence number is greater, start over with the new value + ctx.value = Some(value); + // One node has show us this value so far + ctx.value_count = 1; + } else { + // If the sequence number is older, ignore it + } + } + } + + // Return peers if we have some + Ok(Some(gva.answer.peers)) + } + }; + + // Routine to call to check if we're done at each step + let check_done = |_closest_nodes: &[NodeRef]| { + // If we have reached sufficient consensus, return done + let ctx = context.lock(); + if ctx.value.is_some() && ctx.descriptor.is_some() && ctx.value_count >= consensus_count { + return Some(()); + } + None + }; + + // Call the fanout + let fanout_call = FanoutCall::new( + routing_table.clone(), + key, + key_count, + fanout, + timeout_us, + call_routine, + check_done, + ); + + match fanout_call.run().await { + // If we don't finish in the timeout (too much time passed checking for consensus) + TimeoutOr::Timeout | + // If we finished with consensus (enough nodes returning the same value) + TimeoutOr::Value(Ok(Some(()))) | + // If we finished without consensus (ran out of nodes before getting consensus) + TimeoutOr::Value(Ok(None)) => { + // Return the best answer we've got + let ctx = context.lock(); + Ok(SubkeyResult{ + value: ctx.value.clone(), + descriptor: ctx.descriptor.clone(), + }) + } + // Failed + TimeoutOr::Value(Err(e)) => { + // If we finished with an error, return that + Err(e.into()) + } + } + } + + /// Handle a recieved 'Get Value' query + pub async fn inbound_get_value(&self, key: TypedKey, subkey: ValueSubkey, want_descriptor: bool) -> VeilidAPIResult> { + let mut inner = self.lock().await?; + let res = match inner.handle_get_remote_value(key, subkey, want_descriptor).await { + Ok(res) => res, + Err(VeilidAPIError::Internal { message }) => { + apibail_internal!(message); + }, + Err(e) => { + return Ok(NetworkResult::invalid_message(e)); + }, + }; + Ok(NetworkResult::value(res)) + } +} diff --git a/veilid-core/src/storage_manager/keys.rs b/veilid-core/src/storage_manager/keys.rs new file mode 100644 index 00000000..547e4aa9 --- /dev/null +++ b/veilid-core/src/storage_manager/keys.rs @@ -0,0 +1,63 @@ +use super::*; + +#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub struct RecordTableKey { + pub key: TypedKey, +} +impl RecordTableKey { + pub fn bytes(&self) -> [u8; PUBLIC_KEY_LENGTH + 4] { + let mut bytes = [0u8; PUBLIC_KEY_LENGTH + 4]; + bytes[0..4].copy_from_slice(&self.key.kind.0); + bytes[4..PUBLIC_KEY_LENGTH + 4].copy_from_slice(&self.key.value.bytes); + bytes + } +} + +impl TryFrom<&[u8]> for RecordTableKey { + type Error = EyreReport; + fn try_from(bytes: &[u8]) -> Result { + if bytes.len() != PUBLIC_KEY_LENGTH + 4 { + bail!("invalid bytes length"); + } + let kind = FourCC::try_from(&bytes[0..4]).wrap_err("invalid kind")?; + let value = + PublicKey::try_from(&bytes[4..PUBLIC_KEY_LENGTH + 4]).wrap_err("invalid value")?; + let key = TypedKey::new(kind, value); + Ok(RecordTableKey { key }) + } +} + +#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub struct SubkeyTableKey { + pub key: TypedKey, + pub subkey: ValueSubkey, +} +impl SubkeyTableKey { + pub fn bytes(&self) -> [u8; PUBLIC_KEY_LENGTH + 4 + 4] { + let mut bytes = [0u8; PUBLIC_KEY_LENGTH + 4 + 4]; + bytes[0..4].copy_from_slice(&self.key.kind.0); + bytes[4..PUBLIC_KEY_LENGTH + 4].copy_from_slice(&self.key.value.bytes); + bytes[PUBLIC_KEY_LENGTH + 4..PUBLIC_KEY_LENGTH + 4 + 4] + .copy_from_slice(&self.subkey.to_le_bytes()); + bytes + } +} +impl TryFrom<&[u8]> for SubkeyTableKey { + type Error = EyreReport; + fn try_from(bytes: &[u8]) -> Result { + if bytes.len() != PUBLIC_KEY_LENGTH + 4 { + bail!("invalid bytes length"); + } + let kind = FourCC::try_from(&bytes[0..4]).wrap_err("invalid kind")?; + let value = + PublicKey::try_from(&bytes[4..PUBLIC_KEY_LENGTH + 4]).wrap_err("invalid value")?; + let subkey = ValueSubkey::from_le_bytes( + bytes[PUBLIC_KEY_LENGTH + 4..PUBLIC_KEY_LENGTH + 4 + 4] + .try_into() + .wrap_err("invalid subkey")?, + ); + + let key = TypedKey::new(kind, value); + Ok(SubkeyTableKey { key, subkey }) + } +} diff --git a/veilid-core/src/storage_manager/mod.rs b/veilid-core/src/storage_manager/mod.rs new file mode 100644 index 00000000..96f772d2 --- /dev/null +++ b/veilid-core/src/storage_manager/mod.rs @@ -0,0 +1,411 @@ +mod debug; +mod get_value; +mod keys; +mod record_store; +mod record_store_limits; +mod set_value; +mod storage_manager_inner; +mod tasks; +mod types; + +use keys::*; +use record_store::*; +use record_store_limits::*; +use storage_manager_inner::*; + +pub use types::*; + +use super::*; +use crate::rpc_processor::*; + +/// The maximum size of a single subkey +const MAX_SUBKEY_SIZE: usize = ValueData::MAX_LEN; +/// The maximum total size of all subkeys of a record +const MAX_RECORD_DATA_SIZE: usize = 1_048_576; +/// Frequency to flush record stores to disk +const FLUSH_RECORD_STORES_INTERVAL_SECS: u32 = 1; + +struct StorageManagerUnlockedInner { + config: VeilidConfig, + crypto: Crypto, + protected_store: ProtectedStore, + table_store: TableStore, + block_store: BlockStore, + + // Background processes + flush_record_stores_task: TickTask, +} + +#[derive(Clone)] +pub struct StorageManager { + unlocked_inner: Arc, + inner: Arc>, +} + +impl StorageManager { + fn new_unlocked_inner( + config: VeilidConfig, + crypto: Crypto, + protected_store: ProtectedStore, + table_store: TableStore, + block_store: BlockStore, + ) -> StorageManagerUnlockedInner { + StorageManagerUnlockedInner { + config, + crypto, + protected_store, + table_store, + block_store, + flush_record_stores_task: TickTask::new(FLUSH_RECORD_STORES_INTERVAL_SECS), + } + } + fn new_inner(unlocked_inner: Arc) -> StorageManagerInner { + StorageManagerInner::new(unlocked_inner) + } + + pub fn new( + config: VeilidConfig, + crypto: Crypto, + protected_store: ProtectedStore, + table_store: TableStore, + block_store: BlockStore, + ) -> StorageManager { + let unlocked_inner = Arc::new(Self::new_unlocked_inner( + config, + crypto, + protected_store, + table_store, + block_store, + )); + let this = StorageManager { + unlocked_inner: unlocked_inner.clone(), + inner: Arc::new(AsyncMutex::new(Self::new_inner(unlocked_inner))), + }; + + this.setup_tasks(); + + this + } + + #[instrument(level = "debug", skip_all, err)] + pub async fn init(&self) -> EyreResult<()> { + debug!("startup storage manager"); + + let mut inner = self.inner.lock().await; + inner.init(self.clone()).await?; + + Ok(()) + } + + pub async fn terminate(&self) { + debug!("starting storage manager shutdown"); + + let mut inner = self.inner.lock().await; + inner.terminate().await; + + // Cancel all tasks + self.cancel_tasks().await; + + // Release the storage manager + *inner = Self::new_inner(self.unlocked_inner.clone()); + + debug!("finished storage manager shutdown"); + } + + pub async fn set_rpc_processor(&self, opt_rpc_processor: Option) { + let mut inner = self.inner.lock().await; + inner.rpc_processor = opt_rpc_processor + } + + async fn lock(&self) -> VeilidAPIResult> { + let inner = asyncmutex_lock_arc!(&self.inner); + if !inner.initialized { + apibail_not_initialized!(); + } + Ok(inner) + } + + /// Create a local record from scratch with a new owner key, open it, and return the opened descriptor + pub async fn create_record( + &self, + kind: CryptoKind, + schema: DHTSchema, + safety_selection: SafetySelection, + ) -> VeilidAPIResult { + let mut inner = self.lock().await?; + + // Create a new owned local record from scratch + let (key, owner) = inner + .create_new_owned_local_record(kind, schema, safety_selection) + .await?; + + // Now that the record is made we should always succeed to open the existing record + // The initial writer is the owner of the record + inner + .open_existing_record(key, Some(owner), safety_selection) + .map(|r| r.unwrap()) + } + + /// Open an existing local record if it exists, + /// and if it doesnt exist locally, try to pull it from the network and + /// open it and return the opened descriptor + pub async fn open_record( + &self, + key: TypedKey, + writer: Option, + safety_selection: SafetySelection, + ) -> VeilidAPIResult { + let mut inner = self.lock().await?; + + // See if we have a local record already or not + if let Some(res) = inner.open_existing_record(key, writer, safety_selection)? { + return Ok(res); + } + + // No record yet, try to get it from the network + + // Get rpc processor and drop mutex so we don't block while getting the value from the network + let Some(rpc_processor) = inner.rpc_processor.clone() else { + // Offline, try again later + apibail_try_again!(); + }; + + // Drop the mutex so we dont block during network access + drop(inner); + + // No last descriptor, no last value + // Use the safety selection we opened the record with + let subkey: ValueSubkey = 0; + let subkey_result = self + .outbound_get_value( + rpc_processor, + key, + subkey, + safety_selection, + SubkeyResult::default(), + ) + .await?; + + // If we got nothing back, the key wasn't found + if subkey_result.value.is_none() && subkey_result.descriptor.is_none() { + // No result + apibail_key_not_found!(key); + }; + + // Reopen inner to store value we just got + let mut inner = self.lock().await?; + + // Open the new record + inner + .open_new_record(key, writer, subkey, subkey_result, safety_selection) + .await + } + + /// Close an opened local record + pub async fn close_record(&self, key: TypedKey) -> VeilidAPIResult<()> { + let mut inner = self.lock().await?; + inner.close_record(key) + } + + /// Delete a local record + pub async fn delete_record(&self, key: TypedKey) -> VeilidAPIResult<()> { + let mut inner = self.lock().await?; + + // Ensure the record is closed + if inner.opened_records.contains_key(&key) { + inner.close_record(key)?; + } + + let Some(local_record_store) = inner.local_record_store.as_mut() else { + apibail_not_initialized!(); + }; + + // Remove the record from the local store + local_record_store.delete_record(key).await + } + + /// Get the value of a subkey from an opened local record + /// may refresh the record, and will if it is forced to or the subkey is not available locally yet + /// Returns Ok(None) if no value was found + /// Returns Ok(Some(value)) is a value was found online or locally + pub async fn get_value( + &self, + key: TypedKey, + subkey: ValueSubkey, + force_refresh: bool, + ) -> VeilidAPIResult> { + let mut inner = self.lock().await?; + let Some(opened_record) = inner.opened_records.remove(&key) else { + apibail_generic!("record not open"); + }; + + // See if the requested subkey is our local record store + let last_subkey_result = inner.handle_get_local_value(key, subkey, true).await?; + + // Return the existing value if we have one unless we are forcing a refresh + if !force_refresh { + if let Some(last_subkey_result_value) = last_subkey_result.value { + return Ok(Some(last_subkey_result_value.into_value_data())); + } + } + + // Refresh if we can + + // Get rpc processor and drop mutex so we don't block while getting the value from the network + let Some(rpc_processor) = inner.rpc_processor.clone() else { + // Offline, try again later + apibail_try_again!(); + }; + + // Drop the lock for network access + drop(inner); + + // May have last descriptor / value + // Use the safety selection we opened the record with + let opt_last_seq = last_subkey_result + .value + .as_ref() + .map(|v| v.value_data().seq()); + let subkey_result = self + .outbound_get_value( + rpc_processor, + key, + subkey, + opened_record.safety_selection(), + last_subkey_result, + ) + .await?; + + // See if we got a value back + let Some(subkey_result_value) = subkey_result.value else { + // If we got nothing back then we also had nothing beforehand, return nothing + return Ok(None); + }; + + // If we got a new value back then write it to the opened record + if Some(subkey_result_value.value_data().seq()) != opt_last_seq { + let mut inner = self.lock().await?; + inner + .handle_set_local_value(key, subkey, subkey_result_value.clone()) + .await?; + } + Ok(Some(subkey_result_value.into_value_data())) + } + + /// Set the value of a subkey on an opened local record + /// Puts changes to the network immediately and may refresh the record if the there is a newer subkey available online + /// Returns Ok(None) if the value was set + /// Returns Ok(Some(newer value)) if a newer value was found online + pub async fn set_value( + &self, + key: TypedKey, + subkey: ValueSubkey, + data: Vec, + ) -> VeilidAPIResult> { + let mut inner = self.lock().await?; + + // Get cryptosystem + let Some(vcrypto) = self.unlocked_inner.crypto.get(key.kind) else { + apibail_generic!("unsupported cryptosystem"); + }; + + let Some(opened_record) = inner.opened_records.remove(&key) else { + apibail_generic!("record not open"); + }; + + // If we don't have a writer then we can't write + let Some(writer) = opened_record.writer().cloned() else { + apibail_generic!("value is not writable"); + }; + + // See if the subkey we are modifying has a last known local value + let last_subkey_result = inner.handle_get_local_value(key, subkey, true).await?; + + // Get the descriptor and schema for the key + let Some(descriptor) = last_subkey_result.descriptor else { + apibail_generic!("must have a descriptor"); + }; + let schema = descriptor.schema()?; + + // Make new subkey data + let value_data = if let Some(signed_value_data) = last_subkey_result.value { + let seq = signed_value_data.value_data().seq(); + ValueData::new_with_seq(seq + 1, data, writer.key) + } else { + ValueData::new(data, writer.key) + }; + let seq = value_data.seq(); + + // Validate with schema + if !schema.check_subkey_value_data(descriptor.owner(), subkey, &value_data) { + // Validation failed, ignore this value + apibail_generic!("failed schema validation"); + } + + // Sign the new value data with the writer + let signed_value_data = SignedValueData::make_signature( + value_data, + descriptor.owner(), + subkey, + vcrypto, + writer.secret, + )?; + + // Get rpc processor and drop mutex so we don't block while getting the value from the network + let Some(rpc_processor) = inner.rpc_processor.clone() else { + // Offline, just write it locally and return immediately + inner + .handle_set_local_value(key, subkey, signed_value_data.clone()) + .await?; + + // Add to offline writes to flush + inner.offline_subkey_writes.entry(key).and_modify(|x| { x.insert(subkey); } ).or_insert(ValueSubkeyRangeSet::single(subkey)); + return Ok(Some(signed_value_data.into_value_data())) + }; + + // Drop the lock for network access + drop(inner); + + // Use the safety selection we opened the record with + + let final_signed_value_data = self + .outbound_set_value( + rpc_processor, + key, + subkey, + opened_record.safety_selection(), + signed_value_data, + descriptor, + ) + .await?; + + // If we got a new value back then write it to the opened record + if final_signed_value_data.value_data().seq() != seq { + let mut inner = self.lock().await?; + inner + .handle_set_local_value(key, subkey, final_signed_value_data.clone()) + .await?; + } + Ok(Some(final_signed_value_data.into_value_data())) + } + + pub async fn watch_values( + &self, + key: TypedKey, + subkeys: ValueSubkeyRangeSet, + expiration: Timestamp, + count: u32, + ) -> VeilidAPIResult { + let inner = self.lock().await?; + unimplemented!(); + } + + pub async fn cancel_watch_values( + &self, + key: TypedKey, + subkeys: ValueSubkeyRangeSet, + ) -> VeilidAPIResult { + let inner = self.lock().await?; + unimplemented!(); + } +} diff --git a/veilid-core/src/storage_manager/record_store.rs b/veilid-core/src/storage_manager/record_store.rs new file mode 100644 index 00000000..27119fe7 --- /dev/null +++ b/veilid-core/src/storage_manager/record_store.rs @@ -0,0 +1,548 @@ +/// RecordStore +/// Keeps an LRU cache of dht keys and their associated subkey valuedata. +/// Instances of this store are used for 'local' (persistent) and 'remote' (ephemeral) dht key storage. +/// This store does not perform any validation on the schema, and all ValueRecordData passed in must have been previously validated. +/// Uses an in-memory store for the records, backed by the TableStore. Subkey data is LRU cached and rotated out by a limits policy, +/// and backed to the TableStore for persistence. +use super::*; +use hashlink::LruCache; + +pub struct RecordStore +where + D: Clone + RkyvArchive + RkyvSerialize, + for<'t> ::Archived: CheckBytes>, + ::Archived: RkyvDeserialize, +{ + table_store: TableStore, + name: String, + limits: RecordStoreLimits, + + record_table: Option, + subkey_table: Option, + record_index: LruCache>, + subkey_cache: LruCache, + subkey_cache_total_size: usize, + total_storage_space: usize, + + dead_records: Vec<(RecordTableKey, Record)>, + changed_records: HashSet, + + purge_dead_records_mutex: Arc>, +} + +/// The result of the do_get_value_operation +#[derive(Default, Debug)] +pub struct SubkeyResult { + /// The subkey value if we got one + pub value: Option, + /// The descriptor if we got a fresh one or empty if no descriptor was needed + pub descriptor: Option, +} + +impl RecordStore +where + D: Clone + RkyvArchive + RkyvSerialize, + for<'t> ::Archived: CheckBytes>, + ::Archived: RkyvDeserialize, +{ + pub fn new(table_store: TableStore, name: &str, limits: RecordStoreLimits) -> Self { + let subkey_cache_size = limits.subkey_cache_size as usize; + Self { + table_store, + name: name.to_owned(), + limits, + record_table: None, + subkey_table: None, + record_index: LruCache::new(limits.max_records.unwrap_or(usize::MAX)), + subkey_cache: LruCache::new(subkey_cache_size), + subkey_cache_total_size: 0, + total_storage_space: 0, + dead_records: Vec::new(), + changed_records: HashSet::new(), + purge_dead_records_mutex: Arc::new(AsyncMutex::new(())), + } + } + + pub async fn init(&mut self) -> EyreResult<()> { + let record_table = self + .table_store + .open(&format!("{}_records", self.name), 1) + .await?; + let subkey_table = self + .table_store + .open(&&format!("{}_subkeys", self.name), 1) + .await?; + + // Pull record index from table into a vector to ensure we sort them + let record_table_keys = record_table.get_keys(0).await?; + let mut record_index_saved: Vec<(RecordTableKey, Record)> = + Vec::with_capacity(record_table_keys.len()); + for rtk in record_table_keys { + if let Some(vr) = record_table.load_rkyv::>(0, &rtk).await? { + let rik = RecordTableKey::try_from(rtk.as_ref())?; + record_index_saved.push((rik, vr)); + } + } + + // Sort the record index by last touched time and insert in sorted order + record_index_saved.sort_by(|a, b| a.1.last_touched().cmp(&b.1.last_touched())); + let mut dead_records = Vec::new(); + for ri in record_index_saved { + // total the storage space + self.total_storage_space += mem::size_of::(); + self.total_storage_space += ri.1.total_size(); + + // add to index and ensure we deduplicate in the case of an error + if let Some(v) = self.record_index.insert(ri.0, ri.1, |k, v| { + // If the configuration change, we only want to keep the 'limits.max_records' records + dead_records.push((k, v)); + }) { + // This shouldn't happen, but deduplicate anyway + log_stor!(warn "duplicate record in table: {:?}", ri.0); + dead_records.push((ri.0, v)); + } + } + for (k, v) in dead_records { + self.add_dead_record(k, v); + } + + self.record_table = Some(record_table); + self.subkey_table = Some(subkey_table); + Ok(()) + } + + fn add_dead_record(&mut self, key: RecordTableKey, record: Record) { + self.dead_records.push((key, record)); + } + + fn mark_record_changed(&mut self, key: RecordTableKey) { + self.changed_records.insert(key); + } + + fn add_to_subkey_cache(&mut self, key: SubkeyTableKey, record_data: RecordData) { + let record_data_total_size = record_data.total_size(); + // Write to subkey cache + let mut dead_size = 0usize; + if let Some(old_record_data) = self.subkey_cache.insert(key, record_data, |_, v| { + // LRU out + dead_size += v.total_size(); + }) { + // Old data + dead_size += old_record_data.total_size(); + } + self.subkey_cache_total_size -= dead_size; + self.subkey_cache_total_size += record_data_total_size; + + // Purge over size limit + if let Some(max_subkey_cache_memory_mb) = self.limits.max_subkey_cache_memory_mb { + while self.subkey_cache_total_size > (max_subkey_cache_memory_mb * 1_048_576usize) { + if let Some((_, v)) = self.subkey_cache.remove_lru() { + self.subkey_cache_total_size -= v.total_size(); + } else { + break; + } + } + } + } + + fn remove_from_subkey_cache(&mut self, key: SubkeyTableKey) { + if let Some(dead_record_data) = self.subkey_cache.remove(&key) { + self.subkey_cache_total_size -= dead_record_data.total_size(); + } + } + + async fn purge_dead_records(&mut self, lazy: bool) { + let purge_dead_records_mutex = self.purge_dead_records_mutex.clone(); + let _lock = if lazy { + match asyncmutex_try_lock!(purge_dead_records_mutex) { + Some(v) => v, + None => { + // If not ready now, just skip it if we're lazy + return; + } + } + } else { + // Not lazy, must wait + purge_dead_records_mutex.lock().await + }; + + // Delete dead keys + if self.dead_records.is_empty() { + return; + } + + let record_table = self.record_table.clone().unwrap(); + let subkey_table = self.subkey_table.clone().unwrap(); + + let rt_xact = record_table.transact(); + let st_xact = subkey_table.transact(); + let dead_records = mem::take(&mut self.dead_records); + for (k, v) in dead_records { + // Record should already be gone from index + if self.record_index.contains_key(&k) { + log_stor!(error "dead record found in index: {:?}", k); + } + + // Delete record + rt_xact.delete(0, &k.bytes()); + + // Delete subkeys + let subkey_count = v.subkey_count() as u32; + for sk in 0..subkey_count { + // From table + let stk = SubkeyTableKey { + key: k.key, + subkey: sk, + }; + st_xact.delete(0, &stk.bytes()); + + // From cache + self.remove_from_subkey_cache(stk); + } + + // Remove from total size + self.total_storage_space -= mem::size_of::(); + self.total_storage_space -= v.total_size(); + } + if let Err(e) = rt_xact.commit().await { + log_stor!(error "failed to commit record table transaction: {}", e); + } + if let Err(e) = st_xact.commit().await { + log_stor!(error "failed to commit subkey table transaction: {}", e); + } + } + + async fn flush_changed_records(&mut self) { + // touch records + if self.changed_records.is_empty() { + return; + } + + let record_table = self.record_table.clone().unwrap(); + + let rt_xact = record_table.transact(); + let changed_records = mem::take(&mut self.changed_records); + for rtk in changed_records { + // Get the changed record and save it to the table + if let Some(r) = self.record_index.peek(&rtk) { + if let Err(e) = rt_xact.store_rkyv(0, &rtk.bytes(), r) { + log_stor!(error "failed to save record: {}", e); + } + } + } + if let Err(e) = rt_xact.commit().await { + log_stor!(error "failed to commit record table transaction: {}", e); + } + } + + pub async fn tick(&mut self) -> EyreResult<()> { + self.flush_changed_records().await; + self.purge_dead_records(true).await; + Ok(()) + } + + pub async fn new_record(&mut self, key: TypedKey, record: Record) -> VeilidAPIResult<()> { + let rtk = RecordTableKey { key }; + if self.record_index.contains_key(&rtk) { + apibail_internal!("record already exists"); + } + + // Get record table + let Some(record_table) = self.record_table.clone() else { + apibail_internal!("record store not initialized"); + }; + + // If over size limit, dont create record + let new_total_storage_space = + self.total_storage_space + mem::size_of::() + record.total_size(); + if let Some(max_storage_space_mb) = &self.limits.max_storage_space_mb { + if new_total_storage_space > (max_storage_space_mb * 1_048_576usize) { + apibail_try_again!(); + } + } + + // Save to record table + record_table + .store_rkyv(0, &rtk.bytes(), &record) + .await + .map_err(VeilidAPIError::internal)?; + + // Save to record index + let mut dead_records = Vec::new(); + if let Some(v) = self.record_index.insert(rtk, record, |k, v| { + dead_records.push((k, v)); + }) { + // Shouldn't happen but log it + log_stor!(warn "new duplicate record in table: {:?}", rtk); + self.add_dead_record(rtk, v); + } + for dr in dead_records { + self.add_dead_record(dr.0, dr.1); + } + + // Update storage space + self.total_storage_space = new_total_storage_space; + + Ok(()) + } + + pub async fn delete_record(&mut self, key: TypedKey) -> VeilidAPIResult<()> { + // Get the record table key + let rtk = RecordTableKey { key }; + + // Remove record from the index + let Some(record) = self.record_index.remove(&rtk) else { + apibail_key_not_found!(key); + }; + + self.add_dead_record(rtk, record); + + self.purge_dead_records(false).await; + + Ok(()) + } + + pub(super) fn with_record(&mut self, key: TypedKey, f: F) -> Option + where + F: FnOnce(&Record) -> R, + { + // Get record from index + let mut out = None; + let rtk = RecordTableKey { key }; + if let Some(record) = self.record_index.get_mut(&rtk) { + // Callback + out = Some(f(record)); + + // Touch + record.touch(get_aligned_timestamp()); + } + if out.is_some() { + self.mark_record_changed(rtk); + } + + out + } + + pub(super) fn with_record_mut(&mut self, key: TypedKey, f: F) -> Option + where + F: FnOnce(&mut Record) -> R, + { + // Get record from index + let mut out = None; + let rtk = RecordTableKey { key }; + if let Some(record) = self.record_index.get_mut(&rtk) { + // Callback + out = Some(f(record)); + + // Touch + record.touch(get_aligned_timestamp()); + } + if out.is_some() { + self.mark_record_changed(rtk); + } + + out + } + + // pub fn get_descriptor(&mut self, key: TypedKey) -> Option { + // self.with_record(key, |record| record.descriptor().clone()) + // } + + pub async fn get_subkey( + &mut self, + key: TypedKey, + subkey: ValueSubkey, + want_descriptor: bool, + ) -> VeilidAPIResult> { + // record from index + let Some((subkey_count, opt_descriptor)) = self.with_record(key, |record| { + (record.subkey_count(), if want_descriptor { + Some(record.descriptor().clone()) + } else { + None + }) + }) else { + // Record not available + return Ok(None); + }; + + // Check if the subkey is in range + if subkey as usize >= subkey_count { + apibail_invalid_argument!("subkey out of range", "subkey", subkey); + } + + // Get subkey table + let Some(subkey_table) = self.subkey_table.clone() else { + apibail_internal!("record store not initialized"); + }; + + // If subkey exists in subkey cache, use that + let stk = SubkeyTableKey { key, subkey }; + if let Some(record_data) = self.subkey_cache.get_mut(&stk) { + let out = record_data.signed_value_data().clone(); + + return Ok(Some(SubkeyResult { + value: Some(out), + descriptor: opt_descriptor, + })); + } + // If not in cache, try to pull from table store + if let Some(record_data) = subkey_table + .load_rkyv::(0, &stk.bytes()) + .await + .map_err(VeilidAPIError::internal)? + { + let out = record_data.signed_value_data().clone(); + + // Add to cache, do nothing with lru out + self.add_to_subkey_cache(stk, record_data); + + return Ok(Some(SubkeyResult { + value: Some(out), + descriptor: opt_descriptor, + })); + }; + + // Record was available, but subkey was not found, maybe descriptor gets returned + Ok(Some(SubkeyResult { + value: None, + descriptor: opt_descriptor, + })) + } + + pub async fn set_subkey( + &mut self, + key: TypedKey, + subkey: ValueSubkey, + signed_value_data: SignedValueData, + ) -> VeilidAPIResult<()> { + // Check size limit for data + if signed_value_data.value_data().data().len() > self.limits.max_subkey_size { + apibail_invalid_argument!( + "record subkey too large", + "signed_value_data.value_data.data.len", + signed_value_data.value_data().data().len() + ); + } + + // Get record from index + let Some((subkey_count, total_size)) = self.with_record(key, |record| { + (record.subkey_count(), record.total_size()) + }) else { + apibail_invalid_argument!("no record at this key", "key", key); + }; + + // Check if the subkey is in range + if subkey as usize >= subkey_count { + apibail_invalid_argument!("subkey out of range", "subkey", subkey); + } + + // Get subkey table + let Some(subkey_table) = self.subkey_table.clone() else { + apibail_internal!("record store not initialized"); + }; + + // Get the previous subkey and ensure we aren't going over the record size limit + let mut prior_record_data_size = 0usize; + + // If subkey exists in subkey cache, use that + let stk = SubkeyTableKey { key, subkey }; + let stk_bytes = stk.bytes(); + + if let Some(record_data) = self.subkey_cache.peek(&stk) { + prior_record_data_size = record_data.total_size(); + } else { + // If not in cache, try to pull from table store + if let Some(record_data) = subkey_table + .load_rkyv::(0, &stk_bytes) + .await + .map_err(VeilidAPIError::internal)? + { + prior_record_data_size = record_data.total_size(); + } + } + + // Make new record data + let record_data = RecordData::new(signed_value_data); + + // Check new total record size + let new_record_data_size = record_data.total_size(); + let new_total_size = total_size + new_record_data_size - prior_record_data_size; + if new_total_size > self.limits.max_record_total_size { + apibail_generic!("dht record too large"); + } + + // Check new total storage space + let new_total_storage_space = + self.total_storage_space + new_record_data_size - prior_record_data_size; + if let Some(max_storage_space_mb) = self.limits.max_storage_space_mb { + if new_total_storage_space > (max_storage_space_mb * 1_048_576usize) { + apibail_try_again!(); + } + } + + // Write subkey + subkey_table + .store_rkyv(0, &stk_bytes, &record_data) + .await + .map_err(VeilidAPIError::internal)?; + + // Write to subkey cache + self.add_to_subkey_cache(stk, record_data); + + // Update record + self.with_record_mut(key, |record| { + record.set_record_data_size(new_record_data_size); + }) + .expect("record should still be here"); + + Ok(()) + } + + /// LRU out some records until we reclaim the amount of space requested + /// This will force a garbage collection of the space immediately + /// If zero is passed in here, a garbage collection will be performed of dead records + /// without removing any live records + pub async fn reclaim_space(&mut self, space: usize) { + let mut reclaimed = 0usize; + while reclaimed < space { + if let Some((k, v)) = self.record_index.remove_lru() { + reclaimed += mem::size_of::(); + reclaimed += v.total_size(); + self.add_dead_record(k, v); + } + } + self.purge_dead_records(false).await; + } + + pub(super) fn debug_records(&self) -> String { + // Dump fields in an abbreviated way + let mut out = String::new(); + + out += "Record Index:\n"; + for (rik, rec) in &self.record_index { + out += &format!( + " {} @ {} len={}\n", + rik.key.to_string(), + rec.last_touched().as_u64(), + rec.record_data_size() + ); + } + out += &format!("Subkey Cache Count: {}\n", self.subkey_cache.len()); + out += &format!( + "Subkey Cache Total Size: {}\n", + self.subkey_cache_total_size + ); + out += &format!("Total Storage Space: {}\n", self.total_storage_space); + out += &format!("Dead Records: {}\n", self.dead_records.len()); + for dr in &self.dead_records { + out += &format!(" {}\n", dr.0.key.to_string()); + } + out += &format!("Changed Records: {}\n", self.changed_records.len()); + for cr in &self.changed_records { + out += &format!(" {}\n", cr.key.to_string()); + } + + out + } +} diff --git a/veilid-core/src/storage_manager/record_store_limits.rs b/veilid-core/src/storage_manager/record_store_limits.rs new file mode 100644 index 00000000..5dfb25d4 --- /dev/null +++ b/veilid-core/src/storage_manager/record_store_limits.rs @@ -0,0 +1,16 @@ +/// Configuration for the record store +#[derive(Debug, Default, Copy, Clone)] +pub struct RecordStoreLimits { + /// Number of subkeys to keep in the memory cache + pub subkey_cache_size: usize, + /// Maximum size of an individual subkey + pub max_subkey_size: usize, + /// Maximum total record data size per record + pub max_record_total_size: usize, + /// Limit on the total number of records in the table store + pub max_records: Option, + /// Limit on the amount of subkey cache memory to use before evicting cache items + pub max_subkey_cache_memory_mb: Option, + /// Limit on the amount of storage space to use for subkey data and record data + pub max_storage_space_mb: Option, +} diff --git a/veilid-core/src/storage_manager/set_value.rs b/veilid-core/src/storage_manager/set_value.rs new file mode 100644 index 00000000..2ea9ddf2 --- /dev/null +++ b/veilid-core/src/storage_manager/set_value.rs @@ -0,0 +1,225 @@ +use super::*; + +/// The context of the do_get_value operation +struct DoSetValueContext { + /// The latest value of the subkey, may be the value passed in + pub value: SignedValueData, + /// The consensus count for the value we have received + pub value_count: usize, + /// The parsed schema from the descriptor if we have one + pub schema: DHTSchema, +} + +impl StorageManager { + + /// Perform a 'set value' query on the network + pub async fn outbound_set_value( + &self, + rpc_processor: RPCProcessor, + key: TypedKey, + subkey: ValueSubkey, + safety_selection: SafetySelection, + value: SignedValueData, + descriptor: SignedValueDescriptor, + ) -> VeilidAPIResult { + let routing_table = rpc_processor.routing_table(); + + // Get the DHT parameters for 'SetValue' + let (key_count, consensus_count, fanout, timeout_us) = { + let c = self.unlocked_inner.config.get(); + ( + c.network.dht.max_find_node_count as usize, + c.network.dht.set_value_count as usize, + c.network.dht.set_value_fanout as usize, + TimestampDuration::from(ms_to_us(c.network.dht.set_value_timeout_ms)), + ) + }; + + // Make do-set-value answer context + let schema = descriptor.schema()?; + let context = Arc::new(Mutex::new(DoSetValueContext { + value, + value_count: 0, + schema, + })); + + // Routine to call to generate fanout + let call_routine = |next_node: NodeRef| { + let rpc_processor = rpc_processor.clone(); + let context = context.clone(); + let descriptor = descriptor.clone(); + async move { + + let send_descriptor = true; // xxx check if next_node needs the descriptor or not + + // get most recent value to send + let value = { + let ctx = context.lock(); + ctx.value.clone() + }; + + // send across the wire + let vres = rpc_processor + .clone() + .rpc_call_set_value( + Destination::direct(next_node).with_safety(safety_selection), + key, + subkey, + value, + descriptor.clone(), + send_descriptor, + ) + .await?; + let sva = network_result_value_or_log!(vres => { + // Any other failures, just try the next node + return Ok(None); + }); + + // If the node was close enough to possibly set the value + if sva.answer.set { + let mut ctx = context.lock(); + + // Keep the value if we got one and it is newer and it passes schema validation + if let Some(value) = sva.answer.value { + + // Validate with schema + if !ctx.schema.check_subkey_value_data( + descriptor.owner(), + subkey, + value.value_data(), + ) { + // Validation failed, ignore this value + // Move to the next node + return Ok(None); + } + + // We have a prior value, ensure this is a newer sequence number + let prior_seq = ctx.value.value_data().seq(); + let new_seq = value.value_data().seq(); + if new_seq > prior_seq { + // If the sequence number is greater, keep it + ctx.value = value; + // One node has show us this value so far + ctx.value_count = 1; + } else { + // If the sequence number is older, or an equal sequence number, + // node should have not returned a value here. + // Skip this node's closer list because it is misbehaving + return Ok(None); + } + } + else + { + // It was set on this node and no newer value was found and returned, + // so increase our consensus count + ctx.value_count += 1; + } + } + + // Return peers if we have some + Ok(Some(sva.answer.peers)) + } + }; + + // Routine to call to check if we're done at each step + let check_done = |_closest_nodes: &[NodeRef]| { + // If we have reached sufficient consensus, return done + let ctx = context.lock(); + if ctx.value_count >= consensus_count { + return Some(()); + } + None + }; + + // Call the fanout + let fanout_call = FanoutCall::new( + routing_table.clone(), + key, + key_count, + fanout, + timeout_us, + call_routine, + check_done, + ); + + match fanout_call.run().await { + // If we don't finish in the timeout (too much time passed checking for consensus) + TimeoutOr::Timeout | + // If we finished with consensus (enough nodes returning the same value) + TimeoutOr::Value(Ok(Some(()))) | + // If we finished without consensus (ran out of nodes before getting consensus) + TimeoutOr::Value(Ok(None)) => { + // Return the best answer we've got + let ctx = context.lock(); + Ok(ctx.value.clone()) + } + // Failed + TimeoutOr::Value(Err(e)) => { + // If we finished with an error, return that + Err(e.into()) + } + } + } + + /// Handle a recieved 'Set Value' query + /// Returns a None if the value passed in was set + /// Returns a Some(current value) if the value was older and the current value was kept + pub async fn inbound_set_value(&self, key: TypedKey, subkey: ValueSubkey, value: SignedValueData, descriptor: Option) -> VeilidAPIResult>> { + let mut inner = self.lock().await?; + + // See if the subkey we are modifying has a last known local value + let last_subkey_result = inner.handle_get_local_value(key, subkey, true).await?; + + // Make sure this value would actually be newer + if let Some(last_value) = &last_subkey_result.value { + if value.value_data().seq() < last_value.value_data().seq() { + // inbound value is older than the one we have, just return the one we have + return Ok(NetworkResult::value(Some(last_value.clone()))); + } + } + + // Get the descriptor and schema for the key + let actual_descriptor = match last_subkey_result.descriptor { + Some(last_descriptor) => { + if let Some(descriptor) = descriptor { + // Descriptor must match last one if it is provided + if descriptor.cmp_no_sig(&last_descriptor) != cmp::Ordering::Equal { + return Ok(NetworkResult::invalid_message("setvalue descriptor does not match last descriptor")); + } + } else { + // Descriptor was not provided always go with last descriptor + } + last_descriptor + } + None => { + if let Some(descriptor) = descriptor { + descriptor + } else { + // No descriptor + return Ok(NetworkResult::invalid_message("descriptor must be provided")); + } + } + }; + let Ok(schema) = actual_descriptor.schema() else { + return Ok(NetworkResult::invalid_message("invalid schema")); + }; + + // Validate new value with schema + if !schema.check_subkey_value_data(actual_descriptor.owner(), subkey, value.value_data()) { + // Validation failed, ignore this value + return Ok(NetworkResult::invalid_message("failed schema validation")); + } + + // Do the set and return no new value + match inner.handle_set_remote_value(key, subkey, value, actual_descriptor).await { + Ok(()) => {}, + Err(VeilidAPIError::Internal { message }) => { + apibail_internal!(message); + }, + Err(e) => { + return Ok(NetworkResult::invalid_message(e)); + }, + } + Ok(NetworkResult::value(None)) + } +} diff --git a/veilid-core/src/storage_manager/storage_manager_inner.rs b/veilid-core/src/storage_manager/storage_manager_inner.rs new file mode 100644 index 00000000..e50cbcc3 --- /dev/null +++ b/veilid-core/src/storage_manager/storage_manager_inner.rs @@ -0,0 +1,439 @@ +use super::*; + +/// Locked structure for storage manager +pub(super) struct StorageManagerInner { + unlocked_inner: Arc, + /// If we are started up + pub initialized: bool, + /// Records that have been 'opened' and are not yet closed + pub opened_records: HashMap, + /// Records that have ever been 'created' or 'opened' by this node, things we care about that we must republish to keep alive + pub local_record_store: Option>, + /// Records that have been pushed to this node for distribution by other nodes, that we make an effort to republish + pub remote_record_store: Option>, + /// Record subkeys that have not been pushed to the network because they were written to offline + pub offline_subkey_writes: HashMap, + /// Storage manager metadata that is persistent, including copy of offline subkey writes + pub metadata_db: Option, + /// RPC processor if it is available + pub rpc_processor: Option, + /// Background processing task (not part of attachment manager tick tree so it happens when detached too) + pub tick_future: Option>, +} + +fn local_limits_from_config(config: VeilidConfig) -> RecordStoreLimits { + let c = config.get(); + RecordStoreLimits { + subkey_cache_size: c.network.dht.local_subkey_cache_size as usize, + max_subkey_size: MAX_SUBKEY_SIZE, + max_record_total_size: MAX_RECORD_DATA_SIZE, + max_records: None, + max_subkey_cache_memory_mb: Some( + c.network.dht.local_max_subkey_cache_memory_mb as usize, + ), + max_storage_space_mb: None, + } +} + +fn remote_limits_from_config(config: VeilidConfig) -> RecordStoreLimits { + let c = config.get(); + RecordStoreLimits { + subkey_cache_size: c.network.dht.remote_subkey_cache_size as usize, + max_subkey_size: MAX_SUBKEY_SIZE, + max_record_total_size: MAX_RECORD_DATA_SIZE, + max_records: Some(c.network.dht.remote_max_records as usize), + max_subkey_cache_memory_mb: Some( + c.network.dht.remote_max_subkey_cache_memory_mb as usize, + ), + max_storage_space_mb: Some(c.network.dht.remote_max_storage_space_mb as usize), + } +} + +impl StorageManagerInner { + pub fn new(unlocked_inner: Arc) -> Self { + Self { + unlocked_inner, + initialized: false, + opened_records: Default::default(), + local_record_store: Default::default(), + remote_record_store: Default::default(), + offline_subkey_writes: Default::default(), + metadata_db: Default::default(), + rpc_processor: Default::default(), + tick_future: Default::default(), + } + } + + pub async fn init(&mut self, outer_self: StorageManager) -> EyreResult<()> { + + let metadata_db = self.unlocked_inner + .table_store + .open(&format!("storage_manager_metadata"), 1) + .await?; + + let local_limits = local_limits_from_config(self.unlocked_inner.config.clone()); + let remote_limits = remote_limits_from_config(self.unlocked_inner.config.clone()); + + let mut local_record_store = RecordStore::new( + self.unlocked_inner.table_store.clone(), + "local", + local_limits, + ); + local_record_store.init().await?; + + let mut remote_record_store = RecordStore::new( + self.unlocked_inner.table_store.clone(), + "remote", + remote_limits, + ); + remote_record_store.init().await?; + + self.metadata_db = Some(metadata_db); + self.local_record_store = Some(local_record_store); + self.remote_record_store = Some(remote_record_store); + + self.load_metadata().await?; + + // Schedule tick + let tick_future = interval(1000, move || { + let this = outer_self.clone(); + async move { + if let Err(e) = this.tick().await { + log_stor!(warn "storage manager tick failed: {}", e); + } + } + }); + self.tick_future = Some(tick_future); + + self.initialized = true; + + Ok(()) + } + + pub async fn terminate(&mut self) { + + // Stop ticker + let tick_future = self.tick_future.take(); + if let Some(f) = tick_future { + f.await; + } + + // Final flush on record stores + if let Some(mut local_record_store) = self.local_record_store.take() { + if let Err(e) = local_record_store.tick().await { + log_stor!(error "termination local record store tick failed: {}", e); + } + } + if let Some(mut remote_record_store) = self.remote_record_store.take() { + if let Err(e) = remote_record_store.tick().await { + log_stor!(error "termination remote record store tick failed: {}", e); + } + } + + // Save metadata + if self.metadata_db.is_some() { + if let Err(e) = self.save_metadata().await { + log_stor!(error "termination metadata save failed: {}", e); + } + self.metadata_db = None; + } + self.offline_subkey_writes.clear(); + + // Mark not initialized + self.initialized = false; + } + + async fn save_metadata(&mut self) -> EyreResult<()>{ + if let Some(metadata_db) = &self.metadata_db { + let tx = metadata_db.transact(); + tx.store_rkyv(0, b"offline_subkey_writes", &self.offline_subkey_writes)?; + tx.commit().await.wrap_err("failed to commit")? + } + Ok(()) + } + + async fn load_metadata(&mut self) -> EyreResult<()> { + if let Some(metadata_db) = &self.metadata_db { + self.offline_subkey_writes = match metadata_db.load_rkyv(0, b"offline_subkey_writes").await { + Ok(v) => v.unwrap_or_default(), + Err(_) => { + if let Err(e) = metadata_db.delete(0,b"offline_subkey_writes").await { + debug!("offline_subkey_writes format changed, clearing: {}", e); + } + Default::default() + } + } + } + Ok(()) + } + + pub async fn create_new_owned_local_record( + &mut self, + kind: CryptoKind, + schema: DHTSchema, + safety_selection: SafetySelection, + ) -> VeilidAPIResult<(TypedKey, KeyPair)> { + // Get cryptosystem + let Some(vcrypto) = self.unlocked_inner.crypto.get(kind) else { + apibail_generic!("unsupported cryptosystem"); + }; + + // Get local record store + let Some(local_record_store) = self.local_record_store.as_mut() else { + apibail_not_initialized!(); + }; + + // Compile the dht schema + let schema_data = schema.compile(); + + // New values require a new owner key + let owner = vcrypto.generate_keypair(); + + // Make a signed value descriptor for this dht value + let signed_value_descriptor = SignedValueDescriptor::make_signature( + owner.key, + schema_data, + vcrypto.clone(), + owner.secret, + )?; + + // Add new local value record + let cur_ts = get_aligned_timestamp(); + let local_record_detail = LocalRecordDetail { safety_selection }; + let record = + Record::::new(cur_ts, signed_value_descriptor, local_record_detail)?; + + let dht_key = Self::get_key(vcrypto.clone(), &record); + local_record_store.new_record(dht_key, record).await?; + + Ok((dht_key, owner)) + } + + pub fn open_existing_record( + &mut self, + key: TypedKey, + writer: Option, + safety_selection: SafetySelection, + ) -> VeilidAPIResult> { + // Ensure the record is closed + if self.opened_records.contains_key(&key) { + apibail_generic!("record is already open and should be closed first"); + } + + // Get local record store + let Some(local_record_store) = self.local_record_store.as_mut() else { + apibail_not_initialized!(); + }; + + // See if we have a local record already or not + let cb = |r: &mut Record| { + // Process local record + + // Keep the safety selection we opened the record with + r.detail_mut().safety_selection = safety_selection; + + // Return record details + (r.owner().clone(), r.schema()) + }; + let Some((owner, schema)) = local_record_store.with_record_mut(key, cb) else { + return Ok(None); + }; + // Had local record + + // If the writer we chose is also the owner, we have the owner secret + // Otherwise this is just another subkey writer + let owner_secret = if let Some(writer) = writer { + if writer.key == owner { + Some(writer.secret) + } else { + None + } + } else { + None + }; + + // Write open record + self.opened_records + .insert(key, OpenedRecord::new(writer, safety_selection)); + + // Make DHT Record Descriptor to return + let descriptor = DHTRecordDescriptor::new(key, owner, owner_secret, schema); + Ok(Some(descriptor)) + } + + pub async fn open_new_record( + &mut self, + key: TypedKey, + writer: Option, + subkey: ValueSubkey, + subkey_result: SubkeyResult, + safety_selection: SafetySelection, + ) -> VeilidAPIResult { + // Ensure the record is closed + if self.opened_records.contains_key(&key) { + panic!("new record should never be opened at this point"); + } + + // Must have descriptor + let Some(signed_value_descriptor) = subkey_result.descriptor else { + // No descriptor for new record, can't store this + apibail_generic!("no descriptor"); + }; + // Get owner + let owner = signed_value_descriptor.owner().clone(); + + // If the writer we chose is also the owner, we have the owner secret + // Otherwise this is just another subkey writer + let owner_secret = if let Some(writer) = writer { + if writer.key == owner { + Some(writer.secret) + } else { + None + } + } else { + None + }; + let schema = signed_value_descriptor.schema()?; + + // Get local record store + let Some(local_record_store) = self.local_record_store.as_mut() else { + apibail_not_initialized!(); + }; + + // Make and store a new record for this descriptor + let record = Record::::new( + get_aligned_timestamp(), + signed_value_descriptor, + LocalRecordDetail { safety_selection }, + )?; + local_record_store.new_record(key, record).await?; + + // If we got a subkey with the getvalue, it has already been validated against the schema, so store it + if let Some(signed_value_data) = subkey_result.value { + // Write subkey to local store + local_record_store + .set_subkey(key, subkey, signed_value_data) + .await?; + } + + // Write open record + self.opened_records + .insert(key, OpenedRecord::new(writer, safety_selection)); + + // Make DHT Record Descriptor to return + let descriptor = DHTRecordDescriptor::new(key, owner, owner_secret, schema); + Ok(descriptor) + } + + pub fn close_record(&mut self, key: TypedKey) -> VeilidAPIResult<()> { + let Some(_opened_record) = self.opened_records.remove(&key) else { + apibail_generic!("record not open"); + }; + Ok(()) + } + + pub async fn handle_get_local_value( + &mut self, + key: TypedKey, + subkey: ValueSubkey, + want_descriptor: bool, + ) -> VeilidAPIResult { + // See if it's in the local record store + let Some(local_record_store) = self.local_record_store.as_mut() else { + apibail_not_initialized!(); + }; + if let Some(subkey_result) = local_record_store.get_subkey(key, subkey, want_descriptor).await? { + return Ok(subkey_result); + } + + Ok(SubkeyResult { + value: None, + descriptor: None, + }) + } + + pub async fn handle_set_local_value( + &mut self, + key: TypedKey, + subkey: ValueSubkey, + signed_value_data: SignedValueData, + ) -> VeilidAPIResult<()> { + // See if it's in the local record store + let Some(local_record_store) = self.local_record_store.as_mut() else { + apibail_not_initialized!(); + }; + + // Write subkey to local store + local_record_store + .set_subkey(key, subkey, signed_value_data) + .await?; + + Ok(()) + } + + pub async fn handle_get_remote_value( + &mut self, + key: TypedKey, + subkey: ValueSubkey, + want_descriptor: bool, + ) -> VeilidAPIResult { + // See if it's in the remote record store + let Some(remote_record_store) = self.remote_record_store.as_mut() else { + apibail_not_initialized!(); + }; + if let Some(subkey_result) = remote_record_store.get_subkey(key, subkey, want_descriptor).await? { + return Ok(subkey_result); + } + + Ok(SubkeyResult { + value: None, + descriptor: None, + }) + } + + pub async fn handle_set_remote_value( + &mut self, + key: TypedKey, + subkey: ValueSubkey, + signed_value_data: SignedValueData, + signed_value_descriptor: SignedValueDescriptor, + ) -> VeilidAPIResult<()> { + // See if it's in the remote record store + let Some(remote_record_store) = self.remote_record_store.as_mut() else { + apibail_not_initialized!(); + }; + + // See if we have a remote record already or not + if remote_record_store.with_record(key, |_|{}).is_none() { + // record didn't exist, make it + let cur_ts = get_aligned_timestamp(); + let remote_record_detail = RemoteRecordDetail { }; + let record = + Record::::new(cur_ts, signed_value_descriptor, remote_record_detail)?; + remote_record_store.new_record(key, record).await? + }; + + // Write subkey to remote store + remote_record_store + .set_subkey(key, subkey, signed_value_data) + .await?; + + Ok(()) + } + + /// # DHT Key = Hash(ownerKeyKind) of: [ ownerKeyValue, schema ] + fn get_key(vcrypto: CryptoSystemVersion, record: &Record) -> TypedKey + where + D: Clone + RkyvArchive + RkyvSerialize, + for<'t> ::Archived: CheckBytes>, + ::Archived: RkyvDeserialize, + { + let compiled = record.descriptor().schema_data(); + let mut hash_data = Vec::::with_capacity(PUBLIC_KEY_LENGTH + 4 + compiled.len()); + hash_data.extend_from_slice(&vcrypto.kind().0); + hash_data.extend_from_slice(&record.owner().bytes); + hash_data.extend_from_slice(compiled); + let hash = vcrypto.generate_hash(&hash_data); + TypedKey::new(vcrypto.kind(), hash) + } +} diff --git a/veilid-core/src/storage_manager/tasks/flush_record_stores.rs b/veilid-core/src/storage_manager/tasks/flush_record_stores.rs new file mode 100644 index 00000000..c2fb1b0d --- /dev/null +++ b/veilid-core/src/storage_manager/tasks/flush_record_stores.rs @@ -0,0 +1,21 @@ +use super::*; + +impl StorageManager { + // Flush records stores to disk and remove dead records + #[instrument(level = "trace", skip(self), err)] + pub(crate) async fn flush_record_stores_task_routine( + self, + stop_token: StopToken, + _last_ts: Timestamp, + _cur_ts: Timestamp, + ) -> EyreResult<()> { + let mut inner = self.inner.lock().await; + if let Some(local_record_store) = &mut inner.local_record_store { + local_record_store.tick().await?; + } + if let Some(remote_record_store) = &mut inner.remote_record_store { + remote_record_store.tick().await?; + } + Ok(()) + } +} diff --git a/veilid-core/src/storage_manager/tasks/mod.rs b/veilid-core/src/storage_manager/tasks/mod.rs new file mode 100644 index 00000000..cd90a82e --- /dev/null +++ b/veilid-core/src/storage_manager/tasks/mod.rs @@ -0,0 +1,43 @@ +pub mod flush_record_stores; + +use super::*; + +impl StorageManager { + pub(crate) fn setup_tasks(&self) { + // Set rolling transfers tick task + debug!("starting flush record stores task"); + { + let this = self.clone(); + self.unlocked_inner + .flush_record_stores_task + .set_routine(move |s, l, t| { + Box::pin( + this.clone() + .flush_record_stores_task_routine( + s, + Timestamp::new(l), + Timestamp::new(t), + ) + .instrument(trace_span!( + parent: None, + "StorageManager flush record stores task routine" + )), + ) + }); + } + } + + pub async fn tick(&self) -> EyreResult<()> { + // Run the rolling transfers task + self.unlocked_inner.flush_record_stores_task.tick().await?; + + Ok(()) + } + + pub(crate) async fn cancel_tasks(&self) { + debug!("stopping flush record stores task"); + if let Err(e) = self.unlocked_inner.flush_record_stores_task.stop().await { + warn!("flush_record_stores_task not stopped: {}", e); + } + } +} diff --git a/veilid-core/src/storage_manager/types/local_record_detail.rs b/veilid-core/src/storage_manager/types/local_record_detail.rs new file mode 100644 index 00000000..9f16ba80 --- /dev/null +++ b/veilid-core/src/storage_manager/types/local_record_detail.rs @@ -0,0 +1,12 @@ +use super::*; + +/// Information required to handle locally opened records +#[derive( + Clone, Debug, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct LocalRecordDetail { + /// The last 'safety selection' used when creating/opening this record. + /// Even when closed, this safety selection applies to re-publication attempts by the system. + pub safety_selection: SafetySelection, +} diff --git a/veilid-core/src/storage_manager/types/mod.rs b/veilid-core/src/storage_manager/types/mod.rs new file mode 100644 index 00000000..a295241b --- /dev/null +++ b/veilid-core/src/storage_manager/types/mod.rs @@ -0,0 +1,17 @@ +mod local_record_detail; +mod opened_record; +mod record; +mod record_data; +mod remote_record_detail; +mod signed_value_data; +mod signed_value_descriptor; + +use super::*; + +pub use local_record_detail::*; +pub use opened_record::*; +pub use record::*; +pub use record_data::*; +pub use remote_record_detail::*; +pub use signed_value_data::*; +pub use signed_value_descriptor::*; diff --git a/veilid-core/src/storage_manager/types/opened_record.rs b/veilid-core/src/storage_manager/types/opened_record.rs new file mode 100644 index 00000000..8f47786c --- /dev/null +++ b/veilid-core/src/storage_manager/types/opened_record.rs @@ -0,0 +1,31 @@ +use super::*; + +/// The state associated with a local record when it is opened +/// This is not serialized to storage as it is ephemeral for the lifetime of the opened record +#[derive(Clone, Debug, Default)] +pub struct OpenedRecord { + /// The key pair used to perform writes to subkey on this opened record + /// Without this, set_value() will fail regardless of which key or subkey is being written to + /// as all writes are signed + writer: Option, + + /// The safety selection in current use + safety_selection: SafetySelection, +} + +impl OpenedRecord { + pub fn new(writer: Option, safety_selection: SafetySelection) -> Self { + Self { + writer, + safety_selection, + } + } + + pub fn writer(&self) -> Option<&KeyPair> { + self.writer.as_ref() + } + + pub fn safety_selection(&self) -> SafetySelection { + self.safety_selection + } +} diff --git a/veilid-core/src/storage_manager/types/record.rs b/veilid-core/src/storage_manager/types/record.rs new file mode 100644 index 00000000..c8ec9cb7 --- /dev/null +++ b/veilid-core/src/storage_manager/types/record.rs @@ -0,0 +1,84 @@ +use super::*; + +#[derive( + Clone, Debug, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct Record +where + D: Clone + RkyvArchive + RkyvSerialize, + for<'t> ::Archived: CheckBytes>, + ::Archived: RkyvDeserialize, +{ + descriptor: SignedValueDescriptor, + subkey_count: usize, + last_touched_ts: Timestamp, + record_data_size: usize, + detail: D, +} + +impl Record +where + D: Clone + RkyvArchive + RkyvSerialize, + for<'t> ::Archived: CheckBytes>, + ::Archived: RkyvDeserialize, +{ + pub fn new( + cur_ts: Timestamp, + descriptor: SignedValueDescriptor, + detail: D, + ) -> VeilidAPIResult { + let schema = descriptor.schema()?; + let subkey_count = schema.subkey_count(); + Ok(Self { + descriptor, + subkey_count, + last_touched_ts: cur_ts, + record_data_size: 0, + detail, + }) + } + + pub fn descriptor(&self) -> &SignedValueDescriptor { + &self.descriptor + } + pub fn owner(&self) -> &PublicKey { + self.descriptor.owner() + } + + pub fn subkey_count(&self) -> usize { + self.subkey_count + } + + pub fn touch(&mut self, cur_ts: Timestamp) { + self.last_touched_ts = cur_ts + } + + pub fn last_touched(&self) -> Timestamp { + self.last_touched_ts + } + + pub fn set_record_data_size(&mut self, size: usize) { + self.record_data_size = size; + } + + pub fn record_data_size(&self) -> usize { + self.record_data_size + } + + pub fn schema(&self) -> DHTSchema { + // unwrap is safe here because descriptor is immutable and set in new() + self.descriptor.schema().unwrap() + } + + pub fn total_size(&self) -> usize { + mem::size_of::>() + self.descriptor.total_size() + self.record_data_size + } + + pub fn detail(&self) -> &D { + &self.detail + } + pub fn detail_mut(&mut self) -> &mut D { + &mut self.detail + } +} diff --git a/veilid-core/src/storage_manager/types/record_data.rs b/veilid-core/src/storage_manager/types/record_data.rs new file mode 100644 index 00000000..a9f8ed51 --- /dev/null +++ b/veilid-core/src/storage_manager/types/record_data.rs @@ -0,0 +1,31 @@ +use super::*; + +#[derive( + Clone, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct RecordData { + signed_value_data: SignedValueData, +} + +impl RecordData { + pub fn new(signed_value_data: SignedValueData) -> Self { + Self { signed_value_data } + } + pub fn signed_value_data(&self) -> &SignedValueData { + &self.signed_value_data + } + pub fn total_size(&self) -> usize { + mem::size_of::() + self.signed_value_data.value_data().data().len() + } +} diff --git a/veilid-core/src/storage_manager/types/remote_record_detail.rs b/veilid-core/src/storage_manager/types/remote_record_detail.rs new file mode 100644 index 00000000..e835faa6 --- /dev/null +++ b/veilid-core/src/storage_manager/types/remote_record_detail.rs @@ -0,0 +1,7 @@ +use super::*; + +#[derive( + Clone, Debug, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct RemoteRecordDetail {} diff --git a/veilid-core/src/storage_manager/types/signed_value_data.rs b/veilid-core/src/storage_manager/types/signed_value_data.rs new file mode 100644 index 00000000..466764c8 --- /dev/null +++ b/veilid-core/src/storage_manager/types/signed_value_data.rs @@ -0,0 +1,95 @@ +use super::*; + +///////////////////////////////////////////////////////////////////////////////////////////////////// +/// + +#[derive( + Clone, + Debug, + PartialOrd, + PartialEq, + Eq, + Ord, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct SignedValueData { + value_data: ValueData, + signature: Signature, +} +impl SignedValueData { + pub fn new(value_data: ValueData, signature: Signature) -> Self { + Self { + value_data, + signature, + } + } + + pub fn validate( + &self, + owner: &PublicKey, + subkey: ValueSubkey, + vcrypto: CryptoSystemVersion, + ) -> VeilidAPIResult<()> { + let node_info_bytes = Self::make_signature_bytes(&self.value_data, owner, subkey)?; + // validate signature + vcrypto.verify(&self.value_data.writer(), &node_info_bytes, &self.signature) + } + + pub fn make_signature( + value_data: ValueData, + owner: &PublicKey, + subkey: ValueSubkey, + vcrypto: CryptoSystemVersion, + writer_secret: SecretKey, + ) -> VeilidAPIResult { + let node_info_bytes = Self::make_signature_bytes(&value_data, owner, subkey)?; + + // create signature + let signature = vcrypto.sign(&value_data.writer(), &writer_secret, &node_info_bytes)?; + Ok(Self { + value_data, + signature, + }) + } + + pub fn value_data(&self) -> &ValueData { + &self.value_data + } + + pub fn into_value_data(self) -> ValueData { + self.value_data + } + + pub fn signature(&self) -> &Signature { + &self.signature + } + + pub fn total_size(&self) -> usize { + (mem::size_of::() - mem::size_of::()) + self.value_data.total_size() + } + + fn make_signature_bytes( + value_data: &ValueData, + owner: &PublicKey, + subkey: ValueSubkey, + ) -> VeilidAPIResult> { + let mut node_info_bytes = + Vec::with_capacity(PUBLIC_KEY_LENGTH + 4 + 4 + value_data.data().len()); + + // Add owner to signature + node_info_bytes.extend_from_slice(&owner.bytes); + // Add subkey to signature + node_info_bytes.extend_from_slice(&subkey.to_le_bytes()); + // Add sequence number to signature + node_info_bytes.extend_from_slice(&value_data.seq().to_le_bytes()); + // Add data to signature + node_info_bytes.extend_from_slice(value_data.data()); + + Ok(node_info_bytes) + } +} diff --git a/veilid-core/src/storage_manager/types/signed_value_descriptor.rs b/veilid-core/src/storage_manager/types/signed_value_descriptor.rs new file mode 100644 index 00000000..fa718dcb --- /dev/null +++ b/veilid-core/src/storage_manager/types/signed_value_descriptor.rs @@ -0,0 +1,81 @@ +use super::*; + +///////////////////////////////////////////////////////////////////////////////////////////////////// +/// + +#[derive( + Clone, + Debug, + PartialOrd, + PartialEq, + Eq, + Ord, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct SignedValueDescriptor { + owner: PublicKey, + schema_data: Vec, + signature: Signature, +} +impl SignedValueDescriptor { + pub fn new(owner: PublicKey, schema_data: Vec, signature: Signature) -> Self { + Self { + owner, + schema_data, + signature, + } + } + + pub fn validate(&self, vcrypto: CryptoSystemVersion) -> VeilidAPIResult<()> { + // validate signature + vcrypto.verify(&self.owner, &self.schema_data, &self.signature) + } + + pub fn owner(&self) -> &PublicKey { + &self.owner + } + + pub fn schema_data(&self) -> &[u8] { + &self.schema_data + } + + pub fn schema(&self) -> VeilidAPIResult { + DHTSchema::try_from(self.schema_data.as_slice()) + } + + pub fn signature(&self) -> &Signature { + &self.signature + } + + pub fn make_signature( + owner: PublicKey, + schema_data: Vec, + vcrypto: CryptoSystemVersion, + owner_secret: SecretKey, + ) -> VeilidAPIResult { + // create signature + let signature = vcrypto.sign(&owner, &owner_secret, &schema_data)?; + Ok(Self { + owner, + schema_data, + signature, + }) + } + + pub fn total_size(&self) -> usize { + mem::size_of::() + self.schema_data.len() + } + + pub fn cmp_no_sig(&self, other: &Self) -> cmp::Ordering { + let o = self.owner.cmp(&other.owner); + if o != cmp::Ordering::Equal { + return o; + } + self.schema_data.cmp(&other.schema_data) + } +} diff --git a/veilid-core/src/supplier_table.rs b/veilid-core/src/supplier_table.rs deleted file mode 100644 index e69de29b..00000000 diff --git a/veilid-core/src/table_store/mod.rs b/veilid-core/src/table_store/mod.rs new file mode 100644 index 00000000..1e2f27e8 --- /dev/null +++ b/veilid-core/src/table_store/mod.rs @@ -0,0 +1,17 @@ +use super::*; + +mod table_db; +mod table_store; +pub use table_db::*; +pub use table_store::*; + +pub mod tests; + +#[cfg(target_arch = "wasm32")] +mod wasm; +#[cfg(target_arch = "wasm32")] +use wasm::*; +#[cfg(not(target_arch = "wasm32"))] +mod native; +#[cfg(not(target_arch = "wasm32"))] +use native::*; diff --git a/veilid-core/src/table_store/native.rs b/veilid-core/src/table_store/native.rs new file mode 100644 index 00000000..9e1186d5 --- /dev/null +++ b/veilid-core/src/table_store/native.rs @@ -0,0 +1,53 @@ +use super::*; +pub use keyvaluedb_sqlite::*; +use std::path::PathBuf; + +#[derive(Clone)] +pub(crate) struct TableStoreDriver { + config: VeilidConfig, +} + +impl TableStoreDriver { + pub fn new(config: VeilidConfig) -> Self { + Self { config } + } + + fn get_dbpath(&self, table: &str) -> VeilidAPIResult { + let c = self.config.get(); + let tablestoredir = c.table_store.directory.clone(); + std::fs::create_dir_all(&tablestoredir).map_err(VeilidAPIError::from)?; + + let dbpath: PathBuf = [tablestoredir, String::from(table)].iter().collect(); + Ok(dbpath) + } + + pub async fn open(&self, table_name: &str, column_count: u32) -> VeilidAPIResult { + let dbpath = self.get_dbpath(&table_name)?; + + // Ensure permissions are correct + ensure_file_private_owner(&dbpath).map_err(VeilidAPIError::internal)?; + + let cfg = DatabaseConfig::with_columns(column_count); + let db = Database::open(&dbpath, cfg).map_err(VeilidAPIError::from)?; + + // Ensure permissions are correct + ensure_file_private_owner(&dbpath).map_err(VeilidAPIError::internal)?; + + trace!( + "opened table store '{}' at path '{:?}' with {} columns", + table_name, + dbpath, + column_count + ); + Ok(db) + } + + pub async fn delete(&self, table_name: &str) -> VeilidAPIResult { + let dbpath = self.get_dbpath(&table_name)?; + if !dbpath.exists() { + return Ok(false); + } + std::fs::remove_file(dbpath).map_err(VeilidAPIError::from)?; + Ok(true) + } +} diff --git a/veilid-core/src/table_store/table_db.rs b/veilid-core/src/table_store/table_db.rs new file mode 100644 index 00000000..0702a6de --- /dev/null +++ b/veilid-core/src/table_store/table_db.rs @@ -0,0 +1,382 @@ +use crate::*; + +cfg_if! { + if #[cfg(target_arch = "wasm32")] { + use keyvaluedb_web::*; + use keyvaluedb::*; + } else { + use keyvaluedb_sqlite::*; + use keyvaluedb::*; + } +} + +struct CryptInfo { + vcrypto: CryptoSystemVersion, + key: SharedSecret, +} +impl CryptInfo { + pub fn new(crypto: Crypto, typed_key: TypedSharedSecret) -> Self { + let vcrypto = crypto.get(typed_key.kind).unwrap(); + let key = typed_key.value; + Self { vcrypto, key } + } +} + +pub struct TableDBUnlockedInner { + table: String, + table_store: TableStore, + database: Database, + // Encryption and decryption key will be the same unless configured for an in-place migration + encrypt_info: Option, + decrypt_info: Option, +} + +impl fmt::Debug for TableDBUnlockedInner { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "TableDBInner(table={})", self.table) + } +} + +impl Drop for TableDBUnlockedInner { + fn drop(&mut self) { + self.table_store.on_table_db_drop(self.table.clone()); + } +} + +#[derive(Debug, Clone)] +pub struct TableDB { + unlocked_inner: Arc, +} + +impl TableDB { + pub(super) fn new( + table: String, + table_store: TableStore, + crypto: Crypto, + database: Database, + encryption_key: Option, + decryption_key: Option, + ) -> Self { + let encrypt_info = encryption_key.map(|ek| CryptInfo::new(crypto.clone(), ek)); + let decrypt_info = decryption_key.map(|dk| CryptInfo::new(crypto.clone(), dk)); + + Self { + unlocked_inner: Arc::new(TableDBUnlockedInner { + table, + table_store, + database, + encrypt_info, + decrypt_info, + }), + } + } + + pub(super) fn try_new_from_weak_inner(weak_inner: Weak) -> Option { + weak_inner.upgrade().map(|table_db_unlocked_inner| Self { + unlocked_inner: table_db_unlocked_inner, + }) + } + + pub(super) fn weak_inner(&self) -> Weak { + Arc::downgrade(&self.unlocked_inner) + } + + /// Get the total number of columns in the TableDB + pub fn get_column_count(&self) -> VeilidAPIResult { + let db = &self.unlocked_inner.database; + db.num_columns().map_err(VeilidAPIError::from) + } + + /// Encrypt buffer using encrypt key and prepend nonce to output + /// Keyed nonces are unique because keys must be unique + /// Normally they must be sequential or random, but the critical + /// requirement is that they are different for each encryption + /// but if the contents are guaranteed to be unique, then a nonce + /// can be generated from the hash of the contents and the encryption key itself + fn maybe_encrypt(&self, data: &[u8], keyed_nonce: bool) -> Vec { + if let Some(ei) = &self.unlocked_inner.encrypt_info { + let mut out = unsafe { unaligned_u8_vec_uninit(NONCE_LENGTH + data.len()) }; + + if keyed_nonce { + // Key content nonce + let mut noncedata = Vec::with_capacity(data.len() + PUBLIC_KEY_LENGTH); + noncedata.extend_from_slice(data); + noncedata.extend_from_slice(&ei.key.bytes); + let noncehash = ei.vcrypto.generate_hash(&noncedata); + out[0..NONCE_LENGTH].copy_from_slice(&noncehash[0..NONCE_LENGTH]) + } else { + // Random nonce + random_bytes(&mut out[0..NONCE_LENGTH]); + } + + let (nonce, encout) = out.split_at_mut(NONCE_LENGTH); + ei.vcrypto.crypt_b2b_no_auth( + data, + encout, + (nonce as &[u8]).try_into().unwrap(), + &ei.key, + ); + out + } else { + data.to_vec() + } + } + + /// Decrypt buffer using decrypt key with nonce prepended to input + fn maybe_decrypt(&self, data: &[u8]) -> Vec { + if let Some(di) = &self.unlocked_inner.decrypt_info { + assert!(data.len() >= NONCE_LENGTH); + if data.len() == NONCE_LENGTH { + return Vec::new(); + } + + let mut out = unsafe { unaligned_u8_vec_uninit(data.len() - NONCE_LENGTH) }; + + di.vcrypto.crypt_b2b_no_auth( + &data[NONCE_LENGTH..], + &mut out, + (&data[0..NONCE_LENGTH]).try_into().unwrap(), + &di.key, + ); + out + } else { + data.to_vec() + } + } + + /// Get the list of keys in a column of the TableDB + pub async fn get_keys(&self, col: u32) -> VeilidAPIResult>> { + let db = self.unlocked_inner.database.clone(); + let mut out = Vec::new(); + db.iter_keys(col, None, |k| { + out.push(self.maybe_decrypt(k)); + Ok(Option::<()>::None) + }) + .await + .map_err(VeilidAPIError::from)?; + Ok(out) + } + + /// Start a TableDB write transaction. The transaction object must be committed or rolled back before dropping. + pub fn transact(&self) -> TableDBTransaction { + let dbt = self.unlocked_inner.database.transaction(); + TableDBTransaction::new(self.clone(), dbt) + } + + /// Store a key with a value in a column in the TableDB. Performs a single transaction immediately. + pub async fn store(&self, col: u32, key: &[u8], value: &[u8]) -> VeilidAPIResult<()> { + let db = self.unlocked_inner.database.clone(); + let mut dbt = db.transaction(); + dbt.put( + col, + self.maybe_encrypt(key, true), + self.maybe_encrypt(value, false), + ); + db.write(dbt).await.map_err(VeilidAPIError::generic) + } + + /// Store a key in rkyv format with a value in a column in the TableDB. Performs a single transaction immediately. + pub async fn store_rkyv(&self, col: u32, key: &[u8], value: &T) -> VeilidAPIResult<()> + where + T: RkyvSerialize, + { + let value = to_rkyv(value)?; + self.store(col, key, &value).await + } + + /// Store a key in json format with a value in a column in the TableDB. Performs a single transaction immediately. + pub async fn store_json(&self, col: u32, key: &[u8], value: &T) -> VeilidAPIResult<()> + where + T: serde::Serialize, + { + let value = serde_json::to_vec(value).map_err(VeilidAPIError::internal)?; + self.store(col, key, &value).await + } + + /// Read a key from a column in the TableDB immediately. + pub async fn load(&self, col: u32, key: &[u8]) -> VeilidAPIResult>> { + let db = self.unlocked_inner.database.clone(); + let key = self.maybe_encrypt(key, true); + Ok(db + .get(col, &key) + .await + .map_err(VeilidAPIError::from)? + .map(|v| self.maybe_decrypt(&v))) + } + + /// Read an rkyv key from a column in the TableDB immediately + pub async fn load_rkyv(&self, col: u32, key: &[u8]) -> VeilidAPIResult> + where + T: RkyvArchive, + ::Archived: + for<'t> CheckBytes>, + ::Archived: RkyvDeserialize, + { + let out = match self.load(col, key).await? { + Some(v) => Some(from_rkyv(v)?), + None => None, + }; + Ok(out) + } + + /// Read an serde-json key from a column in the TableDB immediately + pub async fn load_json(&self, col: u32, key: &[u8]) -> VeilidAPIResult> + where + T: for<'de> serde::Deserialize<'de>, + { + let out = match self.load(col, key).await? { + Some(v) => Some(serde_json::from_slice(&v).map_err(VeilidAPIError::internal)?), + None => None, + }; + Ok(out) + } + + /// Delete key with from a column in the TableDB + pub async fn delete(&self, col: u32, key: &[u8]) -> VeilidAPIResult>> { + let key = self.maybe_encrypt(key, true); + + let db = self.unlocked_inner.database.clone(); + let old_value = db + .delete(col, &key) + .await + .map_err(VeilidAPIError::from)? + .map(|v| self.maybe_decrypt(&v)); + Ok(old_value) + } + + /// Delete rkyv key with from a column in the TableDB + pub async fn delete_rkyv(&self, col: u32, key: &[u8]) -> VeilidAPIResult> + where + T: RkyvArchive, + ::Archived: + for<'t> CheckBytes>, + ::Archived: RkyvDeserialize, + { + let old_value = match self.delete(col, key).await? { + Some(v) => Some(from_rkyv(v)?), + None => None, + }; + Ok(old_value) + } + + /// Delete serde-json key with from a column in the TableDB + pub async fn delete_json(&self, col: u32, key: &[u8]) -> VeilidAPIResult> + where + T: for<'de> serde::Deserialize<'de>, + { + let old_value = match self.delete(col, key).await? { + Some(v) => Some(serde_json::from_slice(&v).map_err(VeilidAPIError::internal)?), + None => None, + }; + Ok(old_value) + } +} + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +struct TableDBTransactionInner { + dbt: Option, +} + +impl fmt::Debug for TableDBTransactionInner { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "TableDBTransactionInner({})", + match &self.dbt { + Some(dbt) => format!("len={}", dbt.ops.len()), + None => "".to_owned(), + } + ) + } +} + +/// A TableDB transaction +/// Atomically commits a group of writes or deletes to the TableDB +#[derive(Debug, Clone)] +pub struct TableDBTransaction { + db: TableDB, + inner: Arc>, +} + +impl TableDBTransaction { + fn new(db: TableDB, dbt: DBTransaction) -> Self { + Self { + db, + inner: Arc::new(Mutex::new(TableDBTransactionInner { dbt: Some(dbt) })), + } + } + + /// Commit the transaction. Performs all actions atomically. + pub async fn commit(self) -> VeilidAPIResult<()> { + let dbt = { + let mut inner = self.inner.lock(); + inner + .dbt + .take() + .ok_or_else(|| VeilidAPIError::generic("transaction already completed"))? + }; + + let db = self.db.unlocked_inner.database.clone(); + db.write(dbt) + .await + .map_err(|e| VeilidAPIError::generic(format!("commit failed, transaction lost: {}", e))) + } + + /// Rollback the transaction. Does nothing to the TableDB. + pub fn rollback(self) { + let mut inner = self.inner.lock(); + inner.dbt = None; + } + + /// Store a key with a value in a column in the TableDB + pub fn store(&self, col: u32, key: &[u8], value: &[u8]) { + let key = self.db.maybe_encrypt(key, true); + let value = self.db.maybe_encrypt(value, false); + let mut inner = self.inner.lock(); + inner.dbt.as_mut().unwrap().put_owned(col, key, value); + } + + /// Store a key in rkyv format with a value in a column in the TableDB + pub fn store_rkyv(&self, col: u32, key: &[u8], value: &T) -> VeilidAPIResult<()> + where + T: RkyvSerialize, + { + let value = to_rkyv(value)?; + let key = self.db.maybe_encrypt(key, true); + let value = self.db.maybe_encrypt(&value, false); + + let mut inner = self.inner.lock(); + inner.dbt.as_mut().unwrap().put_owned(col, key, value); + Ok(()) + } + + /// Store a key in rkyv format with a value in a column in the TableDB + pub fn store_json(&self, col: u32, key: &[u8], value: &T) -> VeilidAPIResult<()> + where + T: serde::Serialize, + { + let value = serde_json::to_vec(value).map_err(VeilidAPIError::internal)?; + let key = self.db.maybe_encrypt(key, true); + let value = self.db.maybe_encrypt(&value, false); + + let mut inner = self.inner.lock(); + inner.dbt.as_mut().unwrap().put_owned(col, key, value); + Ok(()) + } + + /// Delete key with from a column in the TableDB + pub fn delete(&self, col: u32, key: &[u8]) { + let key = self.db.maybe_encrypt(key, true); + let mut inner = self.inner.lock(); + inner.dbt.as_mut().unwrap().delete_owned(col, key); + } +} + +impl Drop for TableDBTransactionInner { + fn drop(&mut self) { + if self.dbt.is_some() { + warn!("Dropped transaction without commit or rollback"); + } + } +} diff --git a/veilid-core/src/table_store/table_store.rs b/veilid-core/src/table_store/table_store.rs new file mode 100644 index 00000000..648fa65c --- /dev/null +++ b/veilid-core/src/table_store/table_store.rs @@ -0,0 +1,557 @@ +use super::*; +use keyvaluedb::*; + +struct TableStoreInner { + opened: BTreeMap>, + encryption_key: Option, + all_table_names: HashMap, + all_tables_db: Option, + crypto: Option, +} + +/// Veilid Table Storage +/// Database for storing key value pairs persistently and securely across runs +#[derive(Clone)] +pub struct TableStore { + config: VeilidConfig, + protected_store: ProtectedStore, + table_store_driver: TableStoreDriver, + inner: Arc>, // Sync mutex here because TableDB drops can happen at any time + async_lock: Arc>, // Async mutex for operations +} + +impl TableStore { + fn new_inner() -> TableStoreInner { + TableStoreInner { + opened: BTreeMap::new(), + encryption_key: None, + all_table_names: HashMap::new(), + all_tables_db: None, + crypto: None, + } + } + pub(crate) fn new(config: VeilidConfig, protected_store: ProtectedStore) -> Self { + let inner = Self::new_inner(); + let table_store_driver = TableStoreDriver::new(config.clone()); + + Self { + config, + protected_store, + inner: Arc::new(Mutex::new(inner)), + table_store_driver, + async_lock: Arc::new(AsyncMutex::new(())), + } + } + + pub(crate) fn set_crypto(&self, crypto: Crypto) { + let mut inner = self.inner.lock(); + inner.crypto = Some(crypto); + } + + // Flush internal control state (must not use crypto) + async fn flush(&self) { + let (all_table_names_value, all_tables_db) = { + let inner = self.inner.lock(); + let all_table_names_value = + to_rkyv(&inner.all_table_names).expect("failed to archive all_table_names"); + (all_table_names_value, inner.all_tables_db.clone().unwrap()) + }; + let mut dbt = DBTransaction::new(); + dbt.put(0, b"all_table_names", &all_table_names_value); + if let Err(e) = all_tables_db.write(dbt).await { + error!("failed to write all tables db: {}", e); + } + } + + // Internal naming support + // Adds rename capability and ensures names of tables are totally unique and valid + + fn namespaced_name(&self, table: &str) -> VeilidAPIResult { + if !table + .chars() + .all(|c| char::is_alphanumeric(c) || c == '_' || c == '-') + { + apibail_invalid_argument!("table name is invalid", "table", table); + } + let c = self.config.get(); + let namespace = c.namespace.clone(); + Ok(if namespace.is_empty() { + table.to_string() + } else { + format!("_ns_{}_{}", namespace, table) + }) + } + + async fn name_get_or_create(&self, table: &str) -> VeilidAPIResult { + let name = self.namespaced_name(table)?; + + let mut inner = self.inner.lock(); + // Do we have this name yet? + if let Some(real_name) = inner.all_table_names.get(&name) { + return Ok(real_name.clone()); + } + + // If not, make a new low level name mapping + let mut real_name_bytes = [0u8; 32]; + random_bytes(&mut real_name_bytes); + let real_name = data_encoding::BASE64URL_NOPAD.encode(&real_name_bytes); + + if inner + .all_table_names + .insert(name.to_owned(), real_name.clone()) + .is_some() + { + panic!("should not have had some value"); + }; + + Ok(real_name) + } + + async fn name_delete(&self, table: &str) -> VeilidAPIResult> { + let name = self.namespaced_name(table)?; + let mut inner = self.inner.lock(); + let real_name = inner.all_table_names.remove(&name); + Ok(real_name) + } + + async fn name_get(&self, table: &str) -> VeilidAPIResult> { + let name = self.namespaced_name(table)?; + let inner = self.inner.lock(); + let real_name = inner.all_table_names.get(&name).cloned(); + Ok(real_name) + } + + async fn name_rename(&self, old_table: &str, new_table: &str) -> VeilidAPIResult<()> { + let old_name = self.namespaced_name(old_table)?; + let new_name = self.namespaced_name(new_table)?; + + let mut inner = self.inner.lock(); + // Ensure new name doesn't exist + if inner.all_table_names.contains_key(&new_name) { + return Err(VeilidAPIError::generic("new table already exists")); + } + // Do we have this name yet? + let Some(real_name) = inner.all_table_names.remove(&old_name) else { + return Err(VeilidAPIError::generic("table does not exist")); + }; + // Insert with new name + inner.all_table_names.insert(new_name.to_owned(), real_name); + + Ok(()) + } + + /// Delete all known tables + async fn delete_all(&self) { + // Get all tables + let real_names = { + let mut inner = self.inner.lock(); + let real_names = inner + .all_table_names + .values() + .cloned() + .collect::>(); + inner.all_table_names.clear(); + real_names + }; + + // Delete all tables + for table_name in real_names { + if let Err(e) = self.table_store_driver.delete(&table_name).await { + error!("error deleting table: {}", e); + } + } + self.flush().await; + } + + pub fn maybe_unprotect_device_encryption_key( + &self, + dek_bytes: &[u8], + device_encryption_key_password: &str, + ) -> EyreResult { + // Ensure the key is at least as long as necessary if unencrypted + if dek_bytes.len() < (4 + SHARED_SECRET_LENGTH) { + bail!("device encryption key is not valid"); + } + + // Get cryptosystem + let kind = FourCC::try_from(&dek_bytes[0..4]).unwrap(); + let crypto = self.inner.lock().crypto.as_ref().unwrap().clone(); + let Some(vcrypto) = crypto.get(kind) else { + bail!("unsupported cryptosystem"); + }; + + if !device_encryption_key_password.is_empty() { + if dek_bytes.len() + != (4 + SHARED_SECRET_LENGTH + vcrypto.aead_overhead() + NONCE_LENGTH) + { + bail!("password protected device encryption key is not valid"); + } + let protected_key = &dek_bytes[4..(4 + SHARED_SECRET_LENGTH + vcrypto.aead_overhead())]; + let nonce = &dek_bytes[(4 + SHARED_SECRET_LENGTH + vcrypto.aead_overhead())..]; + + let shared_secret = vcrypto + .derive_shared_secret(device_encryption_key_password.as_bytes(), &nonce) + .wrap_err("failed to derive shared secret")?; + let unprotected_key = vcrypto + .decrypt_aead( + &protected_key, + &Nonce::try_from(nonce).wrap_err("invalid nonce")?, + &shared_secret, + None, + ) + .wrap_err("failed to decrypt device encryption key")?; + return Ok(TypedSharedSecret::new( + kind, + SharedSecret::try_from(unprotected_key.as_slice()) + .wrap_err("invalid shared secret")?, + )); + } + + if dek_bytes.len() != (4 + SHARED_SECRET_LENGTH) { + bail!("password protected device encryption key is not valid"); + } + + Ok(TypedSharedSecret::new( + kind, + SharedSecret::try_from(&dek_bytes[4..])?, + )) + } + + pub fn maybe_protect_device_encryption_key( + &self, + dek: TypedSharedSecret, + device_encryption_key_password: &str, + ) -> EyreResult> { + // Check if we are to protect the key + if device_encryption_key_password.is_empty() { + debug!("no dek password"); + // Return the unprotected key bytes + let mut out = Vec::with_capacity(4 + SHARED_SECRET_LENGTH); + out.extend_from_slice(&dek.kind.0); + out.extend_from_slice(&dek.value.bytes); + return Ok(out); + } + + // Get cryptosystem + let crypto = self.inner.lock().crypto.as_ref().unwrap().clone(); + let Some(vcrypto) = crypto.get(dek.kind) else { + bail!("unsupported cryptosystem"); + }; + + let nonce = vcrypto.random_nonce(); + let shared_secret = vcrypto + .derive_shared_secret(device_encryption_key_password.as_bytes(), &nonce.bytes) + .wrap_err("failed to derive shared secret")?; + let mut protected_key = vcrypto + .encrypt_aead( + &dek.value.bytes, + &Nonce::try_from(nonce).wrap_err("invalid nonce")?, + &shared_secret, + None, + ) + .wrap_err("failed to decrypt device encryption key")?; + let mut out = + Vec::with_capacity(4 + SHARED_SECRET_LENGTH + vcrypto.aead_overhead() + NONCE_LENGTH); + out.extend_from_slice(&dek.kind.0); + out.append(&mut protected_key); + out.extend_from_slice(&nonce.bytes); + assert!(out.len() == 4 + SHARED_SECRET_LENGTH + vcrypto.aead_overhead() + NONCE_LENGTH); + Ok(out) + } + + async fn load_device_encryption_key(&self) -> EyreResult> { + let dek_bytes: Option> = self + .protected_store + .load_user_secret("device_encryption_key") + .await?; + let Some(dek_bytes) = dek_bytes else { + debug!("no device encryption key"); + return Ok(None); + }; + + // Get device encryption key protection password if we have it + let device_encryption_key_password = { + let c = self.config.get(); + c.protected_store.device_encryption_key_password.clone() + }; + + Ok(Some(self.maybe_unprotect_device_encryption_key( + &dek_bytes, + &device_encryption_key_password, + )?)) + } + async fn save_device_encryption_key( + &self, + device_encryption_key: Option, + ) -> EyreResult<()> { + let Some(device_encryption_key) = device_encryption_key else { + // Remove the device encryption key + let existed = self + .protected_store + .remove_user_secret("device_encryption_key") + .await?; + debug!("removed device encryption key. existed: {}", existed); + return Ok(()); + }; + + // Get new device encryption key protection password if we are changing it + let new_device_encryption_key_password = { + let c = self.config.get(); + c.protected_store.new_device_encryption_key_password.clone() + }; + let device_encryption_key_password = + if let Some(new_device_encryption_key_password) = new_device_encryption_key_password { + // Change password + debug!("changing dek password"); + self.config + .with_mut(|c| { + c.protected_store.device_encryption_key_password = + new_device_encryption_key_password.clone(); + Ok(new_device_encryption_key_password) + }) + .unwrap() + } else { + // Get device encryption key protection password if we have it + debug!("saving with existing dek password"); + let c = self.config.get(); + c.protected_store.device_encryption_key_password.clone() + }; + + let dek_bytes = self.maybe_protect_device_encryption_key( + device_encryption_key, + &device_encryption_key_password, + )?; + + // Save the new device encryption key + let existed = self + .protected_store + .save_user_secret("device_encryption_key", &dek_bytes) + .await?; + debug!("saving device encryption key. existed: {}", existed); + Ok(()) + } + + pub(crate) async fn init(&self) -> EyreResult<()> { + let _async_guard = self.async_lock.lock().await; + + // Get device encryption key from protected store + let mut device_encryption_key = self.load_device_encryption_key().await?; + let mut device_encryption_key_changed = false; + if let Some(device_encryption_key) = device_encryption_key { + // If encryption in current use is not the best encryption, then run table migration + let best_kind = best_crypto_kind(); + if device_encryption_key.kind != best_kind { + // XXX: Run migration. See issue #209 + } + } else { + // If we don't have an encryption key yet, then make one with the best cryptography and save it + let best_kind = best_crypto_kind(); + let mut shared_secret = SharedSecret::default(); + random_bytes(&mut shared_secret.bytes); + + device_encryption_key = Some(TypedSharedSecret::new(best_kind, shared_secret)); + device_encryption_key_changed = true; + } + + // Check for password change + let changing_password = self + .config + .get() + .protected_store + .new_device_encryption_key_password + .is_some(); + + // Save encryption key if it has changed or if the protecting password wants to change + if device_encryption_key_changed || changing_password { + self.save_device_encryption_key(device_encryption_key) + .await?; + } + + // Deserialize all table names + let all_tables_db = self + .table_store_driver + .open("__veilid_all_tables", 1) + .await + .wrap_err("failed to create all tables table")?; + match all_tables_db.get(0, b"all_table_names").await { + Ok(Some(v)) => match from_rkyv::>(v) { + Ok(all_table_names) => { + let mut inner = self.inner.lock(); + inner.all_table_names = all_table_names; + } + Err(e) => { + error!("could not deserialize __veilid_all_tables: {}", e); + } + }, + Ok(None) => { + // No table names yet, that's okay + trace!("__veilid_all_tables is empty"); + } + Err(e) => { + error!("could not get __veilid_all_tables: {}", e); + } + }; + + { + let mut inner = self.inner.lock(); + inner.encryption_key = device_encryption_key; + inner.all_tables_db = Some(all_tables_db); + } + + let do_delete = { + let c = self.config.get(); + c.table_store.delete + }; + + if do_delete { + self.delete_all().await; + } + + Ok(()) + } + + pub(crate) async fn terminate(&self) { + let _async_guard = self.async_lock.lock().await; + + self.flush().await; + + let mut inner = self.inner.lock(); + if !inner.opened.is_empty() { + panic!( + "all open databases should have been closed: {:?}", + inner.opened + ); + } + inner.all_tables_db = None; + inner.all_table_names.clear(); + inner.encryption_key = None; + } + + pub(crate) fn on_table_db_drop(&self, table: String) { + let mut inner = self.inner.lock(); + if inner.opened.remove(&table).is_none() { + unreachable!("should have removed an item"); + } + } + + /// Get or create a TableDB database table. If the column count is greater than an + /// existing TableDB's column count, the database will be upgraded to add the missing columns + pub async fn open(&self, name: &str, column_count: u32) -> VeilidAPIResult { + let _async_guard = self.async_lock.lock().await; + + // If we aren't initialized yet, bail + { + let inner = self.inner.lock(); + if inner.all_tables_db.is_none() { + apibail_not_initialized!(); + } + } + + let table_name = self.name_get_or_create(name).await?; + + // See if this table is already opened + { + let mut inner = self.inner.lock(); + if let Some(table_db_weak_inner) = inner.opened.get(&table_name) { + match TableDB::try_new_from_weak_inner(table_db_weak_inner.clone()) { + Some(tdb) => { + return Ok(tdb); + } + None => { + inner.opened.remove(&table_name); + } + }; + } + } + + // Open table db using platform-specific driver + let db = match self + .table_store_driver + .open(&table_name, column_count) + .await + { + Ok(db) => db, + Err(e) => { + self.name_delete(name).await.expect("cleanup failed"); + self.flush().await; + return Err(e); + } + }; + + // Flush table names to disk + self.flush().await; + + // Wrap low-level Database in TableDB object + let mut inner = self.inner.lock(); + let table_db = TableDB::new( + table_name.clone(), + self.clone(), + inner.crypto.as_ref().unwrap().clone(), + db, + inner.encryption_key.clone(), + inner.encryption_key.clone(), + ); + + // Keep track of opened DBs + inner + .opened + .insert(table_name.clone(), table_db.weak_inner()); + + Ok(table_db) + } + + /// Delete a TableDB table by name + pub async fn delete(&self, name: &str) -> VeilidAPIResult { + let _async_guard = self.async_lock.lock().await; + // If we aren't initialized yet, bail + { + let inner = self.inner.lock(); + if inner.all_tables_db.is_none() { + apibail_not_initialized!(); + } + } + + let Some(table_name) = self.name_get(name).await? else { + // Did not exist in name table + return Ok(false); + }; + + // See if this table is opened + { + let inner = self.inner.lock(); + if inner.opened.contains_key(&table_name) { + apibail_generic!("Not deleting table that is still opened"); + } + } + + // Delete table db using platform-specific driver + let deleted = self.table_store_driver.delete(&table_name).await?; + if !deleted { + // Table missing? Just remove name + self.name_delete(&name) + .await + .expect("failed to delete name"); + warn!( + "table existed in name table but not in storage: {} : {}", + name, table_name + ); + return Ok(false); + } + + Ok(true) + } + + /// Rename a TableDB table + pub async fn rename(&self, old_name: &str, new_name: &str) -> VeilidAPIResult<()> { + let _async_guard = self.async_lock.lock().await; + // If we aren't initialized yet, bail + { + let inner = self.inner.lock(); + if inner.all_tables_db.is_none() { + apibail_not_initialized!(); + } + } + trace!("TableStore::rename {} -> {}", old_name, new_name); + self.name_rename(old_name, new_name).await + } +} diff --git a/veilid-core/src/table_store/tests/mod.rs b/veilid-core/src/table_store/tests/mod.rs new file mode 100644 index 00000000..e749760f --- /dev/null +++ b/veilid-core/src/table_store/tests/mod.rs @@ -0,0 +1 @@ +pub mod test_table_store; diff --git a/veilid-core/src/table_store/tests/test_table_store.rs b/veilid-core/src/table_store/tests/test_table_store.rs new file mode 100644 index 00000000..41bfd022 --- /dev/null +++ b/veilid-core/src/table_store/tests/test_table_store.rs @@ -0,0 +1,277 @@ +use crate::tests::test_veilid_config::*; +use crate::*; + +async fn startup() -> VeilidAPI { + trace!("test_table_store: starting"); + let (update_callback, config_callback) = setup_veilid_core(); + api_startup(update_callback, config_callback) + .await + .expect("startup failed") +} + +async fn shutdown(api: VeilidAPI) { + trace!("test_table_store: shutting down"); + api.shutdown().await; + trace!("test_table_store: finished"); +} + +pub async fn test_delete_open_delete(ts: TableStore) { + trace!("test_delete_open_delete"); + + let _ = ts.delete("test"); + let db = ts.open("test", 3).await.expect("should have opened"); + assert!( + ts.delete("test").await.is_err(), + "should fail because file is opened" + ); + drop(db); + assert!( + ts.delete("test").await.is_ok(), + "should succeed because file is closed" + ); + let db = ts.open("test", 3).await.expect("should have opened"); + assert!( + ts.delete("test").await.is_err(), + "should fail because file is opened" + ); + drop(db); + let db = ts.open("test", 3).await.expect("should have opened"); + assert!( + ts.delete("test").await.is_err(), + "should fail because file is opened" + ); + drop(db); + assert!( + ts.delete("test").await.is_ok(), + "should succeed because file is closed" + ); +} + +pub async fn test_store_delete_load(ts: TableStore) { + trace!("test_store_delete_load"); + + let _ = ts.delete("test"); + let db = ts.open("test", 3).await.expect("should have opened"); + assert!( + ts.delete("test").await.is_err(), + "should fail because file is opened" + ); + + assert_eq!( + db.load(0, b"foo").await.unwrap(), + None, + "should not load missing key" + ); + assert!( + db.store(1, b"foo", b"1234567890").await.is_ok(), + "should store new key" + ); + assert_eq!( + db.load(0, b"foo").await.unwrap(), + None, + "should not load missing key" + ); + assert_eq!( + db.load(1, b"foo").await.unwrap(), + Some(b"1234567890".to_vec()) + ); + + assert!( + db.store(1, b"bar", b"FNORD").await.is_ok(), + "should store new key" + ); + assert!( + db.store(0, b"bar", b"ABCDEFGHIJKLMNOPQRSTUVWXYZ") + .await + .is_ok(), + "should store new key" + ); + assert!( + db.store(2, b"bar", b"FNORD").await.is_ok(), + "should store new key" + ); + assert!( + db.store(2, b"baz", b"QWERTY").await.is_ok(), + "should store new key" + ); + assert!( + db.store(2, b"bar", b"QWERTYUIOP").await.is_ok(), + "should store new key" + ); + + assert_eq!(db.load(1, b"bar").await.unwrap(), Some(b"FNORD".to_vec())); + assert_eq!( + db.load(0, b"bar").await.unwrap(), + Some(b"ABCDEFGHIJKLMNOPQRSTUVWXYZ".to_vec()) + ); + assert_eq!( + db.load(2, b"bar").await.unwrap(), + Some(b"QWERTYUIOP".to_vec()) + ); + assert_eq!(db.load(2, b"baz").await.unwrap(), Some(b"QWERTY".to_vec())); + + assert_eq!(db.delete(1, b"bar").await.unwrap(), Some(b"FNORD".to_vec())); + assert_eq!(db.delete(1, b"bar").await.unwrap(), None); + assert!( + db.delete(4, b"bar").await.is_err(), + "can't delete from column that doesn't exist" + ); + + drop(db); + let db = ts.open("test", 3).await.expect("should have opened"); + + assert_eq!(db.load(1, b"bar").await.unwrap(), None); + assert_eq!( + db.load(0, b"bar").await.unwrap(), + Some(b"ABCDEFGHIJKLMNOPQRSTUVWXYZ".to_vec()) + ); + assert_eq!( + db.load(2, b"bar").await.unwrap(), + Some(b"QWERTYUIOP".to_vec()) + ); + assert_eq!(db.load(2, b"baz").await.unwrap(), Some(b"QWERTY".to_vec())); +} + +pub async fn test_rkyv(vcrypto: CryptoSystemVersion, ts: TableStore) { + trace!("test_rkyv"); + + let _ = ts.delete("test"); + let db = ts.open("test", 3).await.expect("should have opened"); + let keypair = vcrypto.generate_keypair(); + + assert!(db.store_rkyv(0, b"asdf", &keypair).await.is_ok()); + + assert_eq!(db.load_rkyv::(0, b"qwer").await.unwrap(), None); + + let d = match db.load_rkyv::(0, b"asdf").await { + Ok(x) => x, + Err(e) => { + panic!("couldn't decode: {}", e); + } + }; + assert_eq!(d, Some(keypair), "keys should be equal"); + + let d = match db.delete_rkyv::(0, b"asdf").await { + Ok(x) => x, + Err(e) => { + panic!("couldn't decode: {}", e); + } + }; + assert_eq!(d, Some(keypair), "keys should be equal"); + + assert!( + db.store(1, b"foo", b"1234567890").await.is_ok(), + "should store new key" + ); + + assert!( + db.load_rkyv::(1, b"foo").await.is_err(), + "should fail to unfreeze" + ); +} + +pub async fn test_json(vcrypto: CryptoSystemVersion, ts: TableStore) { + trace!("test_json"); + + let _ = ts.delete("test"); + let db = ts.open("test", 3).await.expect("should have opened"); + let keypair = vcrypto.generate_keypair(); + + assert!(db.store_json(0, b"asdf", &keypair).await.is_ok()); + + assert_eq!(db.load_json::(0, b"qwer").await.unwrap(), None); + + let d = match db.load_json::(0, b"asdf").await { + Ok(x) => x, + Err(e) => { + panic!("couldn't decode: {}", e); + } + }; + assert_eq!(d, Some(keypair), "keys should be equal"); + + let d = match db.delete_json::(0, b"asdf").await { + Ok(x) => x, + Err(e) => { + panic!("couldn't decode: {}", e); + } + }; + assert_eq!(d, Some(keypair), "keys should be equal"); + + assert!( + db.store(1, b"foo", b"1234567890").await.is_ok(), + "should store new key" + ); + + assert!( + db.load_json::(1, b"foo").await.is_err(), + "should fail to unfreeze" + ); +} + +pub async fn test_protect_unprotect(vcrypto: CryptoSystemVersion, ts: TableStore) { + trace!("test_protect_unprotect"); + + let dek1 = TypedSharedSecret::new( + vcrypto.kind(), + SharedSecret::new([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, + ]), + ); + let dek2 = TypedSharedSecret::new( + vcrypto.kind(), + SharedSecret::new([ + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0xFF, + ]), + ); + let dek3 = TypedSharedSecret::new( + vcrypto.kind(), + SharedSecret::new([0x80u8; SHARED_SECRET_LENGTH]), + ); + + let deks = [dek1, dek2, dek3]; + let passwords = ["", " ", " ", "12345678", "|/\\!@#$%^&*()_+", "Ⓜ️", "🔥🔥♾️"]; + + for dek in deks { + for password in passwords { + let dek_bytes = ts + .maybe_protect_device_encryption_key(dek, password) + .expect(&format!("protect: dek: '{}' pw: '{}'", dek, password)); + let unprotected = ts + .maybe_unprotect_device_encryption_key(&dek_bytes, password) + .expect(&format!("unprotect: dek: '{}' pw: '{}'", dek, password)); + assert_eq!(unprotected, dek); + let invalid_password = format!("{}x", password); + let _ = ts + .maybe_unprotect_device_encryption_key(&dek_bytes, &invalid_password) + .expect_err(&format!( + "invalid_password: dek: '{}' pw: '{}'", + dek, &invalid_password + )); + if password != "" { + let _ = ts + .maybe_unprotect_device_encryption_key(&dek_bytes, "") + .expect_err(&format!("empty_password: dek: '{}' pw: ''", dek)); + } + } + } +} + +pub async fn test_all() { + let api = startup().await; + let crypto = api.crypto().unwrap(); + let ts = api.table_store().unwrap(); + + for ck in VALID_CRYPTO_KINDS { + let vcrypto = crypto.get(ck).unwrap(); + test_protect_unprotect(vcrypto.clone(), ts.clone()).await; + test_delete_open_delete(ts.clone()).await; + test_store_delete_load(ts.clone()).await; + test_rkyv(vcrypto.clone(), ts.clone()).await; + test_json(vcrypto, ts.clone()).await; + let _ = ts.delete("test").await; + } + + shutdown(api).await; +} diff --git a/veilid-core/src/table_store/wasm.rs b/veilid-core/src/table_store/wasm.rs new file mode 100644 index 00000000..71b2b4fa --- /dev/null +++ b/veilid-core/src/table_store/wasm.rs @@ -0,0 +1,40 @@ +use super::*; +pub use keyvaluedb_web::*; + +#[derive(Clone)] +pub struct TableStoreDriver { + _config: VeilidConfig, +} + +impl TableStoreDriver { + pub(crate) fn new(config: VeilidConfig) -> Self { + Self { _config: config } + } + + pub async fn open(&self, table_name: &str, column_count: u32) -> VeilidAPIResult { + let db = Database::open(table_name, column_count, false) + .await + .map_err(VeilidAPIError::generic)?; + trace!( + "opened table store '{}' with {} columns", + table_name, + column_count + ); + Ok(db) + } + + /// Delete a TableDB table by name + pub async fn delete(&self, table_name: &str) -> VeilidAPIResult { + if is_browser() { + let out = Database::delete(table_name).await.is_ok(); + if out { + trace!("TableStore::delete {} deleted", table_name); + } else { + debug!("TableStore::delete {} not deleted", table_name); + } + Ok(out) + } else { + unimplemented!(); + } + } +} diff --git a/veilid-core/src/tests/common/mod.rs b/veilid-core/src/tests/common/mod.rs index f0fbc066..13d151cd 100644 --- a/veilid-core/src/tests/common/mod.rs +++ b/veilid-core/src/tests/common/mod.rs @@ -1,5 +1,4 @@ pub mod test_host_interface; pub mod test_protected_store; -pub mod test_table_store; pub mod test_veilid_config; pub mod test_veilid_core; diff --git a/veilid-core/src/tests/common/test_table_store.rs b/veilid-core/src/tests/common/test_table_store.rs deleted file mode 100644 index d7af7094..00000000 --- a/veilid-core/src/tests/common/test_table_store.rs +++ /dev/null @@ -1,170 +0,0 @@ -use super::test_veilid_config::*; -use crate::*; - -async fn startup() -> VeilidAPI { - trace!("test_table_store: starting"); - let (update_callback, config_callback) = setup_veilid_core(); - api_startup(update_callback, config_callback) - .await - .expect("startup failed") -} - -async fn shutdown(api: VeilidAPI) { - trace!("test_table_store: shutting down"); - api.shutdown().await; - trace!("test_table_store: finished"); -} - -pub async fn test_delete_open_delete(ts: TableStore) { - trace!("test_delete_open_delete"); - - let _ = ts.delete("test"); - let db = ts.open("test", 3).await.expect("should have opened"); - assert!( - ts.delete("test").await.is_err(), - "should fail because file is opened" - ); - drop(db); - assert!( - ts.delete("test").await.is_ok(), - "should succeed because file is closed" - ); - let db = ts.open("test", 3).await.expect("should have opened"); - assert!( - ts.delete("test").await.is_err(), - "should fail because file is opened" - ); - drop(db); - let db = ts.open("test", 3).await.expect("should have opened"); - assert!( - ts.delete("test").await.is_err(), - "should fail because file is opened" - ); - drop(db); - assert!( - ts.delete("test").await.is_ok(), - "should succeed because file is closed" - ); -} - -pub async fn test_store_delete_load(ts: TableStore) { - trace!("test_store_delete_load"); - - let _ = ts.delete("test"); - let db = ts.open("test", 3).await.expect("should have opened"); - assert!( - ts.delete("test").await.is_err(), - "should fail because file is opened" - ); - - assert_eq!( - db.load(0, b"foo").unwrap(), - None, - "should not load missing key" - ); - assert!( - db.store(1, b"foo", b"1234567890").await.is_ok(), - "should store new key" - ); - assert_eq!( - db.load(0, b"foo").unwrap(), - None, - "should not load missing key" - ); - assert_eq!(db.load(1, b"foo").unwrap(), Some(b"1234567890".to_vec())); - - assert!( - db.store(1, b"bar", b"FNORD").await.is_ok(), - "should store new key" - ); - assert!( - db.store(0, b"bar", b"ABCDEFGHIJKLMNOPQRSTUVWXYZ") - .await - .is_ok(), - "should store new key" - ); - assert!( - db.store(2, b"bar", b"FNORD").await.is_ok(), - "should store new key" - ); - assert!( - db.store(2, b"baz", b"QWERTY").await.is_ok(), - "should store new key" - ); - assert!( - db.store(2, b"bar", b"QWERTYUIOP").await.is_ok(), - "should store new key" - ); - - assert_eq!(db.load(1, b"bar").unwrap(), Some(b"FNORD".to_vec())); - assert_eq!( - db.load(0, b"bar").unwrap(), - Some(b"ABCDEFGHIJKLMNOPQRSTUVWXYZ".to_vec()) - ); - assert_eq!(db.load(2, b"bar").unwrap(), Some(b"QWERTYUIOP".to_vec())); - assert_eq!(db.load(2, b"baz").unwrap(), Some(b"QWERTY".to_vec())); - - assert_eq!(db.delete(1, b"bar").await.unwrap(), true); - assert_eq!(db.delete(1, b"bar").await.unwrap(), false); - assert!( - db.delete(4, b"bar").await.is_err(), - "can't delete from column that doesn't exist" - ); - - drop(db); - let db = ts.open("test", 3).await.expect("should have opened"); - - assert_eq!(db.load(1, b"bar").unwrap(), None); - assert_eq!( - db.load(0, b"bar").unwrap(), - Some(b"ABCDEFGHIJKLMNOPQRSTUVWXYZ".to_vec()) - ); - assert_eq!(db.load(2, b"bar").unwrap(), Some(b"QWERTYUIOP".to_vec())); - assert_eq!(db.load(2, b"baz").unwrap(), Some(b"QWERTY".to_vec())); -} - -pub async fn test_frozen(vcrypto: CryptoSystemVersion, ts: TableStore) { - trace!("test_frozen"); - - let _ = ts.delete("test"); - let db = ts.open("test", 3).await.expect("should have opened"); - let keypair = vcrypto.generate_keypair(); - - assert!(db.store_rkyv(0, b"asdf", &keypair).await.is_ok()); - - assert_eq!(db.load_rkyv::(0, b"qwer").unwrap(), None); - - let d = match db.load_rkyv::(0, b"asdf") { - Ok(x) => x, - Err(e) => { - panic!("couldn't decode: {}", e); - } - }; - assert_eq!(d, Some(keypair), "keys should be equal"); - - assert!( - db.store(1, b"foo", b"1234567890").await.is_ok(), - "should store new key" - ); - - assert!( - db.load_rkyv::(1, b"foo").is_err(), - "should fail to unfreeze" - ); -} - -pub async fn test_all() { - let api = startup().await; - let crypto = api.crypto().unwrap(); - let ts = api.table_store().unwrap(); - - for ck in VALID_CRYPTO_KINDS { - let vcrypto = crypto.get(ck).unwrap(); - test_delete_open_delete(ts.clone()).await; - test_store_delete_load(ts.clone()).await; - test_frozen(vcrypto, ts.clone()).await; - let _ = ts.delete("test").await; - } - - shutdown(api).await; -} diff --git a/veilid-core/src/tests/common/test_veilid_config.rs b/veilid-core/src/tests/common/test_veilid_config.rs index 9347a7a2..f7951d6d 100644 --- a/veilid-core/src/tests/common/test_veilid_config.rs +++ b/veilid-core/src/tests/common/test_veilid_config.rs @@ -166,7 +166,7 @@ pub fn setup_veilid_core() -> (UpdateCallback, ConfigCallback) { fn config_callback(key: String) -> ConfigCallbackReturn { match key.as_str() { - "program_name" => Ok(Box::new(String::from("Veilid"))), + "program_name" => Ok(Box::new(String::from("VeilidCoreTests"))), "namespace" => Ok(Box::new(String::from(""))), "capabilities.protocol_udp" => Ok(Box::new(true)), "capabilities.protocol_connect_tcp" => Ok(Box::new(true)), @@ -176,13 +176,17 @@ fn config_callback(key: String) -> ConfigCallbackReturn { "capabilities.protocol_connect_wss" => Ok(Box::new(true)), "capabilities.protocol_accept_wss" => Ok(Box::new(true)), "table_store.directory" => Ok(Box::new(get_table_store_path())), - "table_store.delete" => Ok(Box::new(false)), + "table_store.delete" => Ok(Box::new(true)), "block_store.directory" => Ok(Box::new(get_block_store_path())), - "block_store.delete" => Ok(Box::new(false)), + "block_store.delete" => Ok(Box::new(true)), "protected_store.allow_insecure_fallback" => Ok(Box::new(true)), "protected_store.always_use_insecure_storage" => Ok(Box::new(false)), - "protected_store.insecure_fallback_directory" => Ok(Box::new(get_protected_store_path())), - "protected_store.delete" => Ok(Box::new(false)), + "protected_store.directory" => Ok(Box::new(get_protected_store_path())), + "protected_store.delete" => Ok(Box::new(true)), + "protected_store.device_encryption_key_password" => Ok(Box::new("".to_owned())), + "protected_store.new_device_encryption_key_password" => { + Ok(Box::new(Option::::None)) + } "network.connection_initial_timeout_ms" => Ok(Box::new(2_000u32)), "network.connection_inactivity_timeout_ms" => Ok(Box::new(60_000u32)), "network.max_connections_per_ip4" => Ok(Box::new(8u32)), @@ -204,22 +208,28 @@ fn config_callback(key: String) -> ConfigCallbackReturn { "network.rpc.queue_size" => Ok(Box::new(1024u32)), "network.rpc.max_timestamp_behind_ms" => Ok(Box::new(Some(10_000u32))), "network.rpc.max_timestamp_ahead_ms" => Ok(Box::new(Some(10_000u32))), - "network.rpc.timeout_ms" => Ok(Box::new(10_000u32)), + "network.rpc.timeout_ms" => Ok(Box::new(5_000u32)), "network.rpc.max_route_hop_count" => Ok(Box::new(4u8)), "network.rpc.default_route_hop_count" => Ok(Box::new(1u8)), - "network.dht.resolve_node_timeout_ms" => Ok(Box::new(Option::::None)), - "network.dht.resolve_node_count" => Ok(Box::new(20u32)), - "network.dht.resolve_node_fanout" => Ok(Box::new(3u32)), "network.dht.max_find_node_count" => Ok(Box::new(20u32)), - "network.dht.get_value_timeout_ms" => Ok(Box::new(Option::::None)), - "network.dht.get_value_count" => Ok(Box::new(20u32)), - "network.dht.get_value_fanout" => Ok(Box::new(3u32)), - "network.dht.set_value_timeout_ms" => Ok(Box::new(Option::::None)), - "network.dht.set_value_count" => Ok(Box::new(20u32)), - "network.dht.set_value_fanout" => Ok(Box::new(5u32)), + "network.dht.resolve_node_timeout_ms" => Ok(Box::new(10_000u32)), + "network.dht.resolve_node_count" => Ok(Box::new(1u32)), + "network.dht.resolve_node_fanout" => Ok(Box::new(4u32)), + "network.dht.get_value_timeout_ms" => Ok(Box::new(10_000u32)), + "network.dht.get_value_count" => Ok(Box::new(3u32)), + "network.dht.get_value_fanout" => Ok(Box::new(4u32)), + "network.dht.set_value_timeout_ms" => Ok(Box::new(10_000u32)), + "network.dht.set_value_count" => Ok(Box::new(5u32)), + "network.dht.set_value_fanout" => Ok(Box::new(4u32)), "network.dht.min_peer_count" => Ok(Box::new(20u32)), "network.dht.min_peer_refresh_time_ms" => Ok(Box::new(2_000u32)), "network.dht.validate_dial_info_receipt_time_ms" => Ok(Box::new(5_000u32)), + "network.dht.local_subkey_cache_size" => Ok(Box::new(128u32)), + "network.dht.local_max_subkey_cache_memory_mb" => Ok(Box::new(256u32)), + "network.dht.remote_subkey_cache_size" => Ok(Box::new(1024u32)), + "network.dht.remote_max_records" => Ok(Box::new(4096u32)), + "network.dht.remote_max_subkey_cache_memory_mb" => Ok(Box::new(64u32)), + "network.dht.remote_max_storage_space_mb" => Ok(Box::new(64u32)), "network.upnp" => Ok(Box::new(false)), "network.detect_address_changes" => Ok(Box::new(true)), "network.restricted_nat_retries" => Ok(Box::new(3u32)), @@ -286,7 +296,7 @@ pub async fn test_config() { } let inner = vc.get(); - assert_eq!(inner.program_name, String::from("Veilid")); + assert_eq!(inner.program_name, String::from("VeilidCoreTests")); assert_eq!(inner.namespace, String::from("")); assert_eq!(inner.capabilities.protocol_udp, true); assert_eq!(inner.capabilities.protocol_connect_tcp, true); @@ -296,16 +306,21 @@ pub async fn test_config() { assert_eq!(inner.capabilities.protocol_connect_wss, true); assert_eq!(inner.capabilities.protocol_accept_wss, true); assert_eq!(inner.table_store.directory, get_table_store_path()); - assert_eq!(inner.table_store.delete, false); + assert_eq!(inner.table_store.delete, true); assert_eq!(inner.block_store.directory, get_block_store_path()); - assert_eq!(inner.block_store.delete, false); + assert_eq!(inner.block_store.delete, true); assert_eq!(inner.protected_store.allow_insecure_fallback, true); assert_eq!(inner.protected_store.always_use_insecure_storage, false); + assert_eq!(inner.protected_store.directory, get_protected_store_path()); + assert_eq!(inner.protected_store.delete, true); assert_eq!( - inner.protected_store.insecure_fallback_directory, - get_protected_store_path() + inner.protected_store.device_encryption_key_password, + "".to_owned() + ); + assert_eq!( + inner.protected_store.new_device_encryption_key_password, + Option::::None ); - assert_eq!(inner.protected_store.delete, false); assert_eq!(inner.network.connection_initial_timeout_ms, 2_000u32); assert_eq!(inner.network.connection_inactivity_timeout_ms, 60_000u32); assert_eq!(inner.network.max_connections_per_ip4, 8u32); @@ -317,7 +332,7 @@ pub async fn test_config() { assert_eq!(inner.network.hole_punch_receipt_time_ms, 5_000u32); assert_eq!(inner.network.rpc.concurrency, 2u32); assert_eq!(inner.network.rpc.queue_size, 1024u32); - assert_eq!(inner.network.rpc.timeout_ms, 10_000u32); + assert_eq!(inner.network.rpc.timeout_ms, 5_000u32); assert_eq!(inner.network.rpc.max_route_hop_count, 4u8); assert_eq!(inner.network.rpc.default_route_hop_count, 1u8); assert_eq!(inner.network.routing_table.node_id.len(), 0); @@ -329,18 +344,16 @@ pub async fn test_config() { assert_eq!(inner.network.routing_table.limit_attached_good, 8u32); assert_eq!(inner.network.routing_table.limit_attached_weak, 4u32); - assert_eq!( - inner.network.dht.resolve_node_timeout_ms, - Option::::None - ); - assert_eq!(inner.network.dht.resolve_node_count, 20u32); - assert_eq!(inner.network.dht.resolve_node_fanout, 3u32); - assert_eq!(inner.network.dht.get_value_timeout_ms, Option::::None); - assert_eq!(inner.network.dht.get_value_count, 20u32); - assert_eq!(inner.network.dht.get_value_fanout, 3u32); - assert_eq!(inner.network.dht.set_value_timeout_ms, Option::::None); - assert_eq!(inner.network.dht.set_value_count, 20u32); - assert_eq!(inner.network.dht.set_value_fanout, 5u32); + assert_eq!(inner.network.dht.max_find_node_count, 20u32); + assert_eq!(inner.network.dht.resolve_node_timeout_ms, 10_000u32); + assert_eq!(inner.network.dht.resolve_node_count, 1u32); + assert_eq!(inner.network.dht.resolve_node_fanout, 4u32); + assert_eq!(inner.network.dht.get_value_timeout_ms, 10_000u32); + assert_eq!(inner.network.dht.get_value_count, 3u32); + assert_eq!(inner.network.dht.get_value_fanout, 4u32); + assert_eq!(inner.network.dht.set_value_timeout_ms, 10_000u32); + assert_eq!(inner.network.dht.set_value_count, 5u32); + assert_eq!(inner.network.dht.set_value_fanout, 4u32); assert_eq!(inner.network.dht.min_peer_count, 20u32); assert_eq!(inner.network.dht.min_peer_refresh_time_ms, 2_000u32); assert_eq!( diff --git a/veilid-core/src/tests/common/test_veilid_core.rs b/veilid-core/src/tests/common/test_veilid_core.rs index eab1dde3..e1f20f3e 100644 --- a/veilid-core/src/tests/common/test_veilid_core.rs +++ b/veilid-core/src/tests/common/test_veilid_core.rs @@ -42,169 +42,7 @@ pub async fn test_attach_detach() { api.shutdown().await; } -pub async fn test_signed_node_info() { - info!("--- test_signed_node_info ---"); - - let (update_callback, config_callback) = setup_veilid_core(); - let api = api_startup(update_callback, config_callback) - .await - .expect("startup failed"); - - let crypto = api.crypto().unwrap(); - for ck in VALID_CRYPTO_KINDS { - let vcrypto = crypto.get(ck).unwrap(); - - // Test direct - let node_info = NodeInfo { - network_class: NetworkClass::InboundCapable, - outbound_protocols: ProtocolTypeSet::all(), - address_types: AddressTypeSet::all(), - envelope_support: VALID_ENVELOPE_VERSIONS.to_vec(), - crypto_support: VALID_CRYPTO_KINDS.to_vec(), - dial_info_detail_list: vec![DialInfoDetail { - class: DialInfoClass::Mapped, - dial_info: DialInfo::udp(SocketAddress::default()), - }], - }; - - // Test correct validation - let keypair = vcrypto.generate_keypair(); - let sni = SignedDirectNodeInfo::make_signatures( - crypto.clone(), - vec![TypedKeyPair::new(ck, keypair)], - node_info.clone(), - ) - .unwrap(); - let mut tks: TypedKeySet = TypedKey::new(ck, keypair.key).into(); - let oldtkslen = tks.len(); - let _ = SignedDirectNodeInfo::new( - crypto.clone(), - &mut tks, - node_info.clone(), - sni.timestamp, - sni.signatures.clone(), - ) - .unwrap(); - assert_eq!(tks.len(), oldtkslen); - assert_eq!(tks.len(), sni.signatures.len()); - - // Test incorrect validation - let keypair1 = vcrypto.generate_keypair(); - let mut tks1: TypedKeySet = TypedKey::new(ck, keypair1.key).into(); - let oldtks1len = tks1.len(); - let _ = SignedDirectNodeInfo::new( - crypto.clone(), - &mut tks1, - node_info.clone(), - sni.timestamp, - sni.signatures.clone(), - ) - .unwrap_err(); - assert_eq!(tks1.len(), oldtks1len); - assert_eq!(tks1.len(), sni.signatures.len()); - - // Test unsupported cryptosystem validation - let fake_crypto_kind: CryptoKind = FourCC::from([0, 1, 2, 3]); - let mut tksfake: TypedKeySet = TypedKey::new(fake_crypto_kind, PublicKey::default()).into(); - let mut sigsfake = sni.signatures.clone(); - sigsfake.push(TypedSignature::new(fake_crypto_kind, Signature::default())); - tksfake.add(TypedKey::new(ck, keypair.key)); - let sdnifake = SignedDirectNodeInfo::new( - crypto.clone(), - &mut tksfake, - node_info.clone(), - sni.timestamp, - sigsfake.clone(), - ) - .unwrap(); - assert_eq!(tksfake.len(), 1); - assert_eq!(sdnifake.signatures.len(), sigsfake.len()); - - // Test relayed - let node_info2 = NodeInfo { - network_class: NetworkClass::OutboundOnly, - outbound_protocols: ProtocolTypeSet::all(), - address_types: AddressTypeSet::all(), - envelope_support: VALID_ENVELOPE_VERSIONS.to_vec(), - crypto_support: VALID_CRYPTO_KINDS.to_vec(), - dial_info_detail_list: vec![DialInfoDetail { - class: DialInfoClass::Blocked, - dial_info: DialInfo::udp(SocketAddress::default()), - }], - }; - - // Test correct validation - let keypair2 = vcrypto.generate_keypair(); - let mut tks2: TypedKeySet = TypedKey::new(ck, keypair2.key).into(); - let oldtks2len = tks2.len(); - - let sni2 = SignedRelayedNodeInfo::make_signatures( - crypto.clone(), - vec![TypedKeyPair::new(ck, keypair2)], - node_info2.clone(), - tks.clone(), - sni.clone(), - ) - .unwrap(); - let _ = SignedRelayedNodeInfo::new( - crypto.clone(), - &mut tks2, - node_info2.clone(), - tks.clone(), - sni.clone(), - sni2.timestamp, - sni2.signatures.clone(), - ) - .unwrap(); - - assert_eq!(tks2.len(), oldtks2len); - assert_eq!(tks2.len(), sni2.signatures.len()); - - // Test incorrect validation - let keypair3 = vcrypto.generate_keypair(); - let mut tks3: TypedKeySet = TypedKey::new(ck, keypair3.key).into(); - let oldtks3len = tks3.len(); - - let _ = SignedRelayedNodeInfo::new( - crypto.clone(), - &mut tks3, - node_info2.clone(), - tks.clone(), - sni.clone(), - sni2.timestamp, - sni2.signatures.clone(), - ) - .unwrap_err(); - - assert_eq!(tks3.len(), oldtks3len); - assert_eq!(tks3.len(), sni2.signatures.len()); - - // Test unsupported cryptosystem validation - let fake_crypto_kind: CryptoKind = FourCC::from([0, 1, 2, 3]); - let mut tksfake3: TypedKeySet = - TypedKey::new(fake_crypto_kind, PublicKey::default()).into(); - let mut sigsfake3 = sni2.signatures.clone(); - sigsfake3.push(TypedSignature::new(fake_crypto_kind, Signature::default())); - tksfake3.add(TypedKey::new(ck, keypair2.key)); - let srnifake = SignedRelayedNodeInfo::new( - crypto.clone(), - &mut tksfake3, - node_info2.clone(), - tks.clone(), - sni.clone(), - sni2.timestamp, - sigsfake3.clone(), - ) - .unwrap(); - assert_eq!(tksfake3.len(), 1); - assert_eq!(srnifake.signatures.len(), sigsfake3.len()); - } - - api.shutdown().await; -} - pub async fn test_all() { test_startup_shutdown().await; test_attach_detach().await; - test_signed_node_info().await; } diff --git a/veilid-core/src/tests/mod.rs b/veilid-core/src/tests/mod.rs index 2a050ac5..3ef5396d 100644 --- a/veilid-core/src/tests/mod.rs +++ b/veilid-core/src/tests/mod.rs @@ -12,3 +12,6 @@ use super::*; pub use common::*; pub use crypto::tests::*; pub use network_manager::tests::*; +pub use routing_table::tests::test_serialize as test_routing_table_serialize; +pub use table_store::tests::*; +pub use veilid_api::tests::*; diff --git a/veilid-core/src/tests/native/mod.rs b/veilid-core/src/tests/native/mod.rs index ccb7d420..f2c5ab07 100644 --- a/veilid-core/src/tests/native/mod.rs +++ b/veilid-core/src/tests/native/mod.rs @@ -2,16 +2,20 @@ #![cfg(not(target_arch = "wasm32"))] use crate::crypto::tests::*; use crate::network_manager::tests::*; +use crate::routing_table; +use crate::table_store::tests::*; use crate::tests::common::*; +use crate::veilid_api; use crate::*; /////////////////////////////////////////////////////////////////////////// #[allow(dead_code)] pub async fn run_all_tests() { + // iOS and Android tests also run these. info!("TEST: test_host_interface"); test_host_interface::test_all().await; - info!("TEST: test_dht_key"); + info!("TEST: test_types"); test_types::test_all().await; info!("TEST: test_veilid_core"); test_veilid_core::test_all().await; @@ -19,6 +23,8 @@ pub async fn run_all_tests() { test_veilid_config::test_all().await; info!("TEST: test_connection_table"); test_connection_table::test_all().await; + info!("TEST: test_signed_node_info"); + test_signed_node_info::test_all().await; info!("TEST: test_table_store"); test_table_store::test_all().await; info!("TEST: test_protected_store"); @@ -27,6 +33,10 @@ pub async fn run_all_tests() { test_crypto::test_all().await; info!("TEST: test_envelope_receipt"); test_envelope_receipt::test_all().await; + info!("TEST: veilid_api::test_serialize"); + veilid_api::tests::test_serialize_rkyv::test_all().await; + info!("TEST: routing_table::test_serialize"); + routing_table::tests::test_serialize::test_all().await; info!("Finished unit tests"); } @@ -116,6 +126,15 @@ cfg_if! { }) } + #[test] + #[serial] + fn run_test_signed_node_info() { + setup(); + block_on(async { + test_signed_node_info::test_all().await; + }) + } + #[test] #[serial] fn run_test_table_store() { @@ -152,5 +171,22 @@ cfg_if! { }) } + #[test] + #[serial] + fn run_test_serialize_rkyv() { + setup(); + block_on(async { + veilid_api::tests::test_serialize_rkyv::test_all().await; + }) + } + + #[test] + #[serial] + fn run_test_routing_table_serialize() { + setup(); + block_on(async { + routing_table::tests::test_serialize::test_all().await; + }) + } } } diff --git a/veilid-core/src/veilid_api/api.rs b/veilid-core/src/veilid_api/api.rs index ec50636e..ab40f11f 100644 --- a/veilid-core/src/veilid_api/api.rs +++ b/veilid-core/src/veilid_api/api.rs @@ -49,75 +49,82 @@ impl VeilidAPI { //////////////////////////////////////////////////////////////// // Accessors - pub fn config(&self) -> Result { + pub fn config(&self) -> VeilidAPIResult { let inner = self.inner.lock(); if let Some(context) = &inner.context { return Ok(context.config.clone()); } Err(VeilidAPIError::NotInitialized) } - pub fn crypto(&self) -> Result { + pub fn crypto(&self) -> VeilidAPIResult { let inner = self.inner.lock(); if let Some(context) = &inner.context { return Ok(context.crypto.clone()); } Err(VeilidAPIError::NotInitialized) } - pub fn table_store(&self) -> Result { + pub fn table_store(&self) -> VeilidAPIResult { let inner = self.inner.lock(); if let Some(context) = &inner.context { return Ok(context.table_store.clone()); } Err(VeilidAPIError::not_initialized()) } - pub fn block_store(&self) -> Result { + pub fn block_store(&self) -> VeilidAPIResult { let inner = self.inner.lock(); if let Some(context) = &inner.context { return Ok(context.block_store.clone()); } Err(VeilidAPIError::not_initialized()) } - pub fn protected_store(&self) -> Result { + pub fn protected_store(&self) -> VeilidAPIResult { let inner = self.inner.lock(); if let Some(context) = &inner.context { return Ok(context.protected_store.clone()); } Err(VeilidAPIError::not_initialized()) } - pub fn attachment_manager(&self) -> Result { + pub fn attachment_manager(&self) -> VeilidAPIResult { let inner = self.inner.lock(); if let Some(context) = &inner.context { return Ok(context.attachment_manager.clone()); } Err(VeilidAPIError::not_initialized()) } - pub fn network_manager(&self) -> Result { + pub fn network_manager(&self) -> VeilidAPIResult { let inner = self.inner.lock(); if let Some(context) = &inner.context { return Ok(context.attachment_manager.network_manager()); } Err(VeilidAPIError::not_initialized()) } - pub fn rpc_processor(&self) -> Result { + pub fn rpc_processor(&self) -> VeilidAPIResult { let inner = self.inner.lock(); if let Some(context) = &inner.context { return Ok(context.attachment_manager.network_manager().rpc_processor()); } Err(VeilidAPIError::NotInitialized) } - pub fn routing_table(&self) -> Result { + pub fn routing_table(&self) -> VeilidAPIResult { let inner = self.inner.lock(); if let Some(context) = &inner.context { return Ok(context.attachment_manager.network_manager().routing_table()); } Err(VeilidAPIError::NotInitialized) } + pub fn storage_manager(&self) -> VeilidAPIResult { + let inner = self.inner.lock(); + if let Some(context) = &inner.context { + return Ok(context.storage_manager.clone()); + } + Err(VeilidAPIError::NotInitialized) + } //////////////////////////////////////////////////////////////// // Attach/Detach /// Get a full copy of the current state - pub async fn get_state(&self) -> Result { + pub async fn get_state(&self) -> VeilidAPIResult { let attachment_manager = self.attachment_manager()?; let network_manager = attachment_manager.network_manager(); let config = self.config()?; @@ -135,7 +142,7 @@ impl VeilidAPI { /// Connect to the network #[instrument(level = "debug", err, skip_all)] - pub async fn attach(&self) -> Result<(), VeilidAPIError> { + pub async fn attach(&self) -> VeilidAPIResult<()> { let attachment_manager = self.attachment_manager()?; if !attachment_manager.attach().await { apibail_generic!("Already attached"); @@ -145,7 +152,7 @@ impl VeilidAPI { /// Disconnect from the network #[instrument(level = "debug", err, skip_all)] - pub async fn detach(&self) -> Result<(), VeilidAPIError> { + pub async fn detach(&self) -> VeilidAPIResult<()> { let attachment_manager = self.attachment_manager()?; if !attachment_manager.detach().await { apibail_generic!("Already detached"); @@ -168,7 +175,7 @@ impl VeilidAPI { /// Returns a route id and a publishable 'blob' with the route encrypted with each crypto kind /// Those nodes importing the blob will have their choice of which crypto kind to use #[instrument(level = "debug", skip(self))] - pub async fn new_private_route(&self) -> Result<(RouteId, Vec), VeilidAPIError> { + pub async fn new_private_route(&self) -> VeilidAPIResult<(RouteId, Vec)> { self.new_custom_private_route( &VALID_CRYPTO_KINDS, Stability::default(), @@ -184,7 +191,7 @@ impl VeilidAPI { crypto_kinds: &[CryptoKind], stability: Stability, sequencing: Sequencing, - ) -> Result<(RouteId, Vec), VeilidAPIError> { + ) -> VeilidAPIResult<(RouteId, Vec)> { let default_route_hop_count: usize = { let config = self.config()?; let c = config.get(); @@ -231,14 +238,14 @@ impl VeilidAPI { } #[instrument(level = "debug", skip(self))] - pub fn import_remote_private_route(&self, blob: Vec) -> Result { + pub fn import_remote_private_route(&self, blob: Vec) -> VeilidAPIResult { let rss = self.routing_table()?.route_spec_store(); rss.import_remote_private_route(blob) .map_err(|e| VeilidAPIError::invalid_argument(e, "blob", "private route blob")) } #[instrument(level = "debug", skip(self))] - pub fn release_private_route(&self, route_id: RouteId) -> Result<(), VeilidAPIError> { + pub fn release_private_route(&self, route_id: RouteId) -> VeilidAPIResult<()> { let rss = self.routing_table()?.route_spec_store(); if !rss.release_route(route_id) { apibail_invalid_argument!("release_private_route", "key", route_id); @@ -250,11 +257,7 @@ impl VeilidAPI { // App Calls #[instrument(level = "debug", skip(self))] - pub async fn app_call_reply( - &self, - id: OperationId, - message: Vec, - ) -> Result<(), VeilidAPIError> { + pub async fn app_call_reply(&self, id: OperationId, message: Vec) -> VeilidAPIResult<()> { let rpc_processor = self.rpc_processor()?; rpc_processor .app_call_reply(id, message) @@ -270,7 +273,7 @@ impl VeilidAPI { &self, _endpoint_mode: TunnelMode, _depth: u8, - ) -> Result { + ) -> VeilidAPIResult { panic!("unimplemented"); } @@ -280,12 +283,12 @@ impl VeilidAPI { _endpoint_mode: TunnelMode, _depth: u8, _partial_tunnel: PartialTunnel, - ) -> Result { + ) -> VeilidAPIResult { panic!("unimplemented"); } #[instrument(level = "debug", err, skip(self))] - pub async fn cancel_tunnel(&self, _tunnel_id: TunnelId) -> Result { + pub async fn cancel_tunnel(&self, _tunnel_id: TunnelId) -> VeilidAPIResult { panic!("unimplemented"); } } diff --git a/veilid-core/src/veilid_api/debug.rs b/veilid-core/src/veilid_api/debug.rs index c744fa3b..7151b680 100644 --- a/veilid-core/src/veilid_api/debug.rs +++ b/veilid-core/src/veilid_api/debug.rs @@ -3,6 +3,7 @@ use super::*; use data_encoding::BASE64URL_NOPAD; +use network_manager::*; use routing_table::*; #[derive(Default, Debug)] @@ -304,7 +305,7 @@ fn get_debug_argument Option>( context: &str, argument: &str, getter: G, -) -> Result { +) -> VeilidAPIResult { let Some(val) = getter(value) else { apibail_invalid_argument!(context, argument, value); }; @@ -316,7 +317,7 @@ fn get_debug_argument_at Option>( context: &str, argument: &str, getter: G, -) -> Result { +) -> VeilidAPIResult { if pos >= debug_args.len() { apibail_missing_argument!(context, argument); } @@ -328,7 +329,7 @@ fn get_debug_argument_at Option>( } impl VeilidAPI { - async fn debug_buckets(&self, args: String) -> Result { + async fn debug_buckets(&self, args: String) -> VeilidAPIResult { let args: Vec = args.split_whitespace().map(|s| s.to_owned()).collect(); let mut min_state = BucketEntryState::Unreliable; if args.len() == 1 { @@ -344,19 +345,19 @@ impl VeilidAPI { Ok(routing_table.debug_info_buckets(min_state)) } - async fn debug_dialinfo(&self, _args: String) -> Result { + async fn debug_dialinfo(&self, _args: String) -> VeilidAPIResult { // Dump routing table dialinfo let routing_table = self.network_manager()?.routing_table(); Ok(routing_table.debug_info_dialinfo()) } - async fn debug_txtrecord(&self, _args: String) -> Result { + async fn debug_txtrecord(&self, _args: String) -> VeilidAPIResult { // Dump routing table txt record let routing_table = self.network_manager()?.routing_table(); Ok(routing_table.debug_info_txtrecord().await) } - async fn debug_entries(&self, args: String) -> Result { + async fn debug_entries(&self, args: String) -> VeilidAPIResult { let args: Vec = args.split_whitespace().map(|s| s.to_owned()).collect(); let mut min_state = BucketEntryState::Unreliable; @@ -373,7 +374,7 @@ impl VeilidAPI { Ok(routing_table.debug_info_entries(min_state)) } - async fn debug_entry(&self, args: String) -> Result { + async fn debug_entry(&self, args: String) -> VeilidAPIResult { let args: Vec = args.split_whitespace().map(|s| s.to_owned()).collect(); let routing_table = self.network_manager()?.routing_table(); @@ -390,13 +391,13 @@ impl VeilidAPI { Ok(routing_table.debug_info_entry(node_ref)) } - async fn debug_nodeinfo(&self, _args: String) -> Result { + async fn debug_nodeinfo(&self, _args: String) -> VeilidAPIResult { // Dump routing table entry let routing_table = self.network_manager()?.routing_table(); Ok(routing_table.debug_info_nodeinfo()) } - async fn debug_config(&self, args: String) -> Result { + async fn debug_config(&self, args: String) -> VeilidAPIResult { let config = self.config()?; let args = args.trim_start(); if args.is_empty() { @@ -425,7 +426,7 @@ impl VeilidAPI { Ok("Config value set".to_owned()) } - async fn debug_restart(&self, args: String) -> Result { + async fn debug_restart(&self, args: String) -> VeilidAPIResult { let args = args.trim_start(); if args.is_empty() { apibail_missing_argument!("debug_restart", "arg_0"); @@ -451,7 +452,7 @@ impl VeilidAPI { } } - async fn debug_purge(&self, args: String) -> Result { + async fn debug_purge(&self, args: String) -> VeilidAPIResult { let args: Vec = args.split_whitespace().map(|s| s.to_owned()).collect(); if !args.is_empty() { if args[0] == "buckets" { @@ -503,7 +504,7 @@ impl VeilidAPI { } } - async fn debug_attach(&self, _args: String) -> Result { + async fn debug_attach(&self, _args: String) -> VeilidAPIResult { if !matches!( self.get_state().await?.attachment.state, AttachmentState::Detached @@ -516,7 +517,7 @@ impl VeilidAPI { Ok("Attached".to_owned()) } - async fn debug_detach(&self, _args: String) -> Result { + async fn debug_detach(&self, _args: String) -> VeilidAPIResult { if matches!( self.get_state().await?.attachment.state, AttachmentState::Detaching @@ -529,7 +530,7 @@ impl VeilidAPI { Ok("Detached".to_owned()) } - async fn debug_contact(&self, args: String) -> Result { + async fn debug_contact(&self, args: String) -> VeilidAPIResult { let args: Vec = args.split_whitespace().map(|s| s.to_owned()).collect(); let network_manager = self.network_manager()?; @@ -550,7 +551,7 @@ impl VeilidAPI { Ok(format!("{:#?}", cm)) } - async fn debug_ping(&self, args: String) -> Result { + async fn debug_ping(&self, args: String) -> VeilidAPIResult { let netman = self.network_manager()?; let routing_table = netman.routing_table(); let rpc = netman.rpc_processor(); @@ -592,7 +593,7 @@ impl VeilidAPI { Ok(format!("{:#?}", out)) } - async fn debug_route_allocate(&self, args: Vec) -> Result { + async fn debug_route_allocate(&self, args: Vec) -> VeilidAPIResult { // [ord|*ord] [rel] [] [in|out] [avoid_node_id] let netman = self.network_manager()?; @@ -651,7 +652,7 @@ impl VeilidAPI { Ok(out) } - async fn debug_route_release(&self, args: Vec) -> Result { + async fn debug_route_release(&self, args: Vec) -> VeilidAPIResult { // let netman = self.network_manager()?; let routing_table = netman.routing_table(); @@ -683,7 +684,7 @@ impl VeilidAPI { Ok(out) } - async fn debug_route_publish(&self, args: Vec) -> Result { + async fn debug_route_publish(&self, args: Vec) -> VeilidAPIResult { // [full] let netman = self.network_manager()?; let routing_table = netman.routing_table(); @@ -735,7 +736,7 @@ impl VeilidAPI { Ok(out) } - async fn debug_route_unpublish(&self, args: Vec) -> Result { + async fn debug_route_unpublish(&self, args: Vec) -> VeilidAPIResult { // let netman = self.network_manager()?; let routing_table = netman.routing_table(); @@ -757,7 +758,7 @@ impl VeilidAPI { }; Ok(out) } - async fn debug_route_print(&self, args: Vec) -> Result { + async fn debug_route_print(&self, args: Vec) -> VeilidAPIResult { // let netman = self.network_manager()?; let routing_table = netman.routing_table(); @@ -776,7 +777,7 @@ impl VeilidAPI { None => Ok("Route does not exist".to_owned()), } } - async fn debug_route_list(&self, _args: Vec) -> Result { + async fn debug_route_list(&self, _args: Vec) -> VeilidAPIResult { // let netman = self.network_manager()?; let routing_table = netman.routing_table(); @@ -799,7 +800,7 @@ impl VeilidAPI { Ok(out) } - async fn debug_route_import(&self, args: Vec) -> Result { + async fn debug_route_import(&self, args: Vec) -> VeilidAPIResult { // let blob = get_debug_argument_at(&args, 1, "debug_route", "blob", get_string)?; @@ -819,7 +820,7 @@ impl VeilidAPI { return Ok(out); } - async fn debug_route_test(&self, args: Vec) -> Result { + async fn debug_route_test(&self, args: Vec) -> VeilidAPIResult { // let netman = self.network_manager()?; let routing_table = netman.routing_table(); @@ -847,7 +848,7 @@ impl VeilidAPI { return Ok(out); } - async fn debug_route(&self, args: String) -> Result { + async fn debug_route(&self, args: String) -> VeilidAPIResult { let args: Vec = args.split_whitespace().map(|s| s.to_owned()).collect(); let command = get_debug_argument_at(&args, 0, "debug_route", "command", get_string)?; @@ -873,7 +874,40 @@ impl VeilidAPI { } } - pub async fn debug_help(&self, _args: String) -> Result { + async fn debug_record_list(&self, args: Vec) -> VeilidAPIResult { + // + let storage_manager = self.storage_manager()?; + + let scope = get_debug_argument_at(&args, 1, "debug_record_list", "scope", get_string)?; + let out = match scope.as_str() { + "local" => { + let mut out = format!("Local Records:\n"); + out += &storage_manager.debug_local_records().await; + out + } + "remote" => { + let mut out = format!("Remote Records:\n"); + out += &storage_manager.debug_remote_records().await; + out + } + _ => "Invalid scope\n".to_owned(), + }; + return Ok(out); + } + + async fn debug_record(&self, args: String) -> VeilidAPIResult { + let args: Vec = args.split_whitespace().map(|s| s.to_owned()).collect(); + + let command = get_debug_argument_at(&args, 0, "debug_record", "command", get_string)?; + + if command == "list" { + self.debug_record_list(args).await + } else { + Ok(">>> Unknown command\n".to_owned()) + } + } + + pub async fn debug_help(&self, _args: String) -> VeilidAPIResult { Ok(r#">>> Debug commands: help buckets [dead|reliable] @@ -896,6 +930,7 @@ impl VeilidAPI { list import test + record list is: * direct: [+][] @@ -912,7 +947,7 @@ impl VeilidAPI { .to_owned()) } - pub async fn debug(&self, args: String) -> Result { + pub async fn debug(&self, args: String) -> VeilidAPIResult { let res = { let args = args.trim_start(); if args.is_empty() { @@ -952,6 +987,8 @@ impl VeilidAPI { self.debug_restart(rest).await } else if arg == "route" { self.debug_route(rest).await + } else if arg == "record" { + self.debug_record(rest).await } else { Err(VeilidAPIError::generic("Unknown debug command")) } diff --git a/veilid-core/src/veilid_api/error.rs b/veilid-core/src/veilid_api/error.rs index 7e04b947..778b4830 100644 --- a/veilid-core/src/veilid_api/error.rs +++ b/veilid-core/src/veilid_api/error.rs @@ -1,5 +1,13 @@ use super::*; +#[allow(unused_macros)] +#[macro_export] +macro_rules! apibail_not_initialized { + () => { + return Err(VeilidAPIError::not_initialized()) + }; +} + #[allow(unused_macros)] #[macro_export] macro_rules! apibail_timeout { @@ -64,6 +72,14 @@ macro_rules! apibail_no_connection { }; } +#[allow(unused_macros)] +#[macro_export] +macro_rules! apibail_key_not_found { + ($x:expr) => { + return Err(VeilidAPIError::key_not_found($x)) + }; +} + #[allow(unused_macros)] #[macro_export] macro_rules! apibail_invalid_target { @@ -119,8 +135,8 @@ pub enum VeilidAPIError { InvalidTarget, #[error("No connection: {message}")] NoConnection { message: String }, - #[error("No peer info: {node_id}")] - NoPeerInfo { node_id: TypedKey }, + #[error("Key not found: {key}")] + KeyNotFound { key: TypedKey }, #[error("Internal: {message}")] Internal { message: String }, #[error("Unimplemented: {message}")] @@ -163,8 +179,8 @@ impl VeilidAPIError { message: msg.to_string(), } } - pub fn no_peer_info(node_id: TypedKey) -> Self { - Self::NoPeerInfo { node_id } + pub fn key_not_found(key: TypedKey) -> Self { + Self::KeyNotFound { key } } pub fn internal(msg: T) -> Self { Self::Internal { @@ -205,3 +221,36 @@ impl VeilidAPIError { } } } + +pub type VeilidAPIResult = Result; + +impl From for VeilidAPIError { + fn from(e: std::io::Error) -> Self { + match e.kind() { + std::io::ErrorKind::TimedOut => VeilidAPIError::timeout(), + std::io::ErrorKind::ConnectionRefused => VeilidAPIError::no_connection(e.to_string()), + std::io::ErrorKind::ConnectionReset => VeilidAPIError::no_connection(e.to_string()), + #[cfg(feature = "io_error_more")] + std::io::ErrorKind::HostUnreachable => VeilidAPIError::no_connection(e.to_string()), + #[cfg(feature = "io_error_more")] + std::io::ErrorKind::NetworkUnreachable => VeilidAPIError::no_connection(e.to_string()), + std::io::ErrorKind::ConnectionAborted => VeilidAPIError::no_connection(e.to_string()), + std::io::ErrorKind::NotConnected => VeilidAPIError::no_connection(e.to_string()), + std::io::ErrorKind::AddrInUse => VeilidAPIError::no_connection(e.to_string()), + std::io::ErrorKind::AddrNotAvailable => VeilidAPIError::no_connection(e.to_string()), + #[cfg(feature = "io_error_more")] + std::io::ErrorKind::NetworkDown => VeilidAPIError::no_connection(e.to_string()), + #[cfg(feature = "io_error_more")] + std::io::ErrorKind::ReadOnlyFilesystem => VeilidAPIError::internal(e.to_string()), + #[cfg(feature = "io_error_more")] + std::io::ErrorKind::NotSeekable => VeilidAPIError::internal(e.to_string()), + #[cfg(feature = "io_error_more")] + std::io::ErrorKind::FilesystemQuotaExceeded => VeilidAPIError::internal(e.to_string()), + #[cfg(feature = "io_error_more")] + std::io::ErrorKind::Deadlock => VeilidAPIError::internal(e.to_string()), + std::io::ErrorKind::Unsupported => VeilidAPIError::internal(e.to_string()), + std::io::ErrorKind::OutOfMemory => VeilidAPIError::internal(e.to_string()), + _ => VeilidAPIError::generic(e.to_string()), + } + } +} diff --git a/veilid-core/src/veilid_api/mod.rs b/veilid-core/src/veilid_api/mod.rs index 02e1e308..d046b71f 100644 --- a/veilid-core/src/veilid_api/mod.rs +++ b/veilid-core/src/veilid_api/mod.rs @@ -1,6 +1,5 @@ #![allow(dead_code)] -mod aligned_u64; mod api; mod debug; mod error; @@ -8,7 +7,8 @@ mod routing_context; mod serialize_helpers; mod types; -pub use aligned_u64::*; +pub mod tests; + pub use api::*; pub use debug::*; pub use error::*; @@ -22,17 +22,15 @@ pub use core::str::FromStr; pub use crypto::*; pub use intf::BlockStore; pub use intf::ProtectedStore; -pub use intf::{TableDB, TableDBTransaction, TableStore}; pub use network_manager::NetworkManager; pub use routing_table::{NodeRef, NodeRefBase}; +pub use table_store::{TableDB, TableDBTransaction, TableStore}; use crate::*; use core::fmt; use core_context::{api_shutdown, VeilidCoreContext}; -use enumset::*; -use rkyv::{Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize}; -use routing_table::{RouteSpecStore, RoutingTable}; +use routing_table::{Direction, RouteSpecStore, RoutingTable}; use rpc_processor::*; -use serde::*; +use storage_manager::StorageManager; ///////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/veilid-core/src/veilid_api/routing_context.rs b/veilid-core/src/veilid_api/routing_context.rs index 981fb197..952a42b7 100644 --- a/veilid-core/src/veilid_api/routing_context.rs +++ b/veilid-core/src/veilid_api/routing_context.rs @@ -4,7 +4,7 @@ use super::*; #[derive(Clone, Debug)] pub enum Target { - NodeId(PublicKey), // Node by any of its public keys + NodeId(TypedKey), // Node by its public key PrivateRoute(RouteId), // Remote private route by its id } @@ -45,11 +45,11 @@ impl RoutingContext { } } - pub fn with_privacy(self) -> Result { + pub fn with_privacy(self) -> VeilidAPIResult { self.with_custom_privacy(Stability::default()) } - pub fn with_custom_privacy(self, stability: Stability) -> Result { + pub fn with_custom_privacy(self, stability: Stability) -> VeilidAPIResult { let config = self.api.config()?; let c = config.get(); @@ -96,16 +96,16 @@ impl RoutingContext { self.api.clone() } - async fn get_destination( - &self, - target: Target, - ) -> Result { + async fn get_destination(&self, target: Target) -> VeilidAPIResult { let rpc_processor = self.api.rpc_processor()?; match target { Target::NodeId(node_id) => { // Resolve node - let mut nr = match rpc_processor.resolve_node(node_id).await { + let mut nr = match rpc_processor + .resolve_node(node_id, self.unlocked_inner.safety_selection) + .await + { Ok(Some(nr)) => nr, Ok(None) => apibail_invalid_target!(), Err(e) => return Err(e.into()), @@ -138,11 +138,7 @@ impl RoutingContext { // App-level Messaging #[instrument(level = "debug", err, skip(self))] - pub async fn app_call( - &self, - target: Target, - request: Vec, - ) -> Result, VeilidAPIError> { + pub async fn app_call(&self, target: Target, request: Vec) -> VeilidAPIResult> { let rpc_processor = self.api.rpc_processor()?; // Get destination @@ -167,11 +163,7 @@ impl RoutingContext { } #[instrument(level = "debug", err, skip(self))] - pub async fn app_message( - &self, - target: Target, - message: Vec, - ) -> Result<(), VeilidAPIError> { + pub async fn app_message(&self, target: Target, message: Vec) -> VeilidAPIResult<()> { let rpc_processor = self.api.rpc_processor()?; // Get destination @@ -195,51 +187,114 @@ impl RoutingContext { } /////////////////////////////////// - /// DHT Values + /// DHT Records - pub async fn get_value( + /// Creates a new DHT record a specified crypto kind and schema + /// Returns the newly allocated DHT record's key if successful. The records is considered 'open' after the create operation succeeds. + pub async fn create_dht_record( &self, - _key: TypedKey, - _subkey: ValueSubkey, - ) -> Result { - panic!("unimplemented"); + kind: CryptoKind, + schema: DHTSchema, + ) -> VeilidAPIResult { + let storage_manager = self.api.storage_manager()?; + storage_manager + .create_record(kind, schema, self.unlocked_inner.safety_selection) + .await } - pub async fn set_value( + /// Opens a DHT record at a specific key. Associates a secret if one is provided to provide writer capability. + /// Returns the DHT record descriptor for the opened record if successful + /// Records may only be opened or created . To re-open with a different routing context, first close the value. + pub async fn open_dht_record( &self, - _key: TypedKey, - _subkey: ValueSubkey, - _value: ValueData, - ) -> Result { - panic!("unimplemented"); + key: TypedKey, + writer: Option, + ) -> VeilidAPIResult { + let storage_manager = self.api.storage_manager()?; + storage_manager + .open_record(key, writer, self.unlocked_inner.safety_selection) + .await } - pub async fn watch_value( - &self, - _key: TypedKey, - _subkeys: &[ValueSubkeyRange], - _expiration: Timestamp, - _count: u32, - ) -> Result { - panic!("unimplemented"); + /// Closes a DHT record at a specific key that was opened with create_dht_record or open_dht_record. + /// Closing a record allows you to re-open it with a different routing context + pub async fn close_dht_record(&self, key: TypedKey) -> VeilidAPIResult<()> { + let storage_manager = self.api.storage_manager()?; + storage_manager.close_record(key).await } - pub async fn cancel_watch_value( + /// Deletes a DHT record at a specific key. If the record is opened, it must be closed before it is deleted. + /// Deleting a record does not delete it from the network, but will remove the storage of the record + /// locally, and will prevent its value from being refreshed on the network by this node. + pub async fn delete_dht_record(&self, key: TypedKey) -> VeilidAPIResult<()> { + let storage_manager = self.api.storage_manager()?; + storage_manager.delete_record(key).await + } + + /// Gets the latest value of a subkey + /// May pull the latest value from the network, but by settings 'force_refresh' you can force a network data refresh + /// Returns None if the value subkey has not yet been set + /// Returns Some(data) if the value subkey has valid data + pub async fn get_dht_value( &self, - _key: TypedKey, - _subkeys: &[ValueSubkeyRange], - ) -> Result { - panic!("unimplemented"); + key: TypedKey, + subkey: ValueSubkey, + force_refresh: bool, + ) -> VeilidAPIResult> { + let storage_manager = self.api.storage_manager()?; + storage_manager.get_value(key, subkey, force_refresh).await + } + + /// Pushes a changed subkey value to the network + /// Returns None if the value was successfully put + /// Returns Some(data) if the value put was older than the one available on the network + pub async fn set_dht_value( + &self, + key: TypedKey, + subkey: ValueSubkey, + data: Vec, + ) -> VeilidAPIResult> { + let storage_manager = self.api.storage_manager()?; + storage_manager.set_value(key, subkey, data).await + } + + /// Watches changes to an opened or created value + /// Changes to subkeys within the subkey range are returned via a ValueChanged callback + /// If the subkey range is empty, all subkey changes are considered + /// Expiration can be infinite to keep the watch for the maximum amount of time + /// Return value upon success is the amount of time allowed for the watch + pub async fn watch_dht_values( + &self, + key: TypedKey, + subkeys: ValueSubkeyRangeSet, + expiration: Timestamp, + count: u32, + ) -> VeilidAPIResult { + let storage_manager = self.api.storage_manager()?; + storage_manager + .watch_values(key, subkeys, expiration, count) + .await + } + + /// Cancels a watch early + /// This is a convenience function that cancels watching all subkeys in a range + pub async fn cancel_dht_watch( + &self, + key: TypedKey, + subkeys: ValueSubkeyRangeSet, + ) -> VeilidAPIResult { + let storage_manager = self.api.storage_manager()?; + storage_manager.cancel_watch_values(key, subkeys).await } /////////////////////////////////// /// Block Store - pub async fn find_block(&self, _block_id: PublicKey) -> Result, VeilidAPIError> { + pub async fn find_block(&self, _block_id: PublicKey) -> VeilidAPIResult> { panic!("unimplemented"); } - pub async fn supply_block(&self, _block_id: PublicKey) -> Result { + pub async fn supply_block(&self, _block_id: PublicKey) -> VeilidAPIResult { panic!("unimplemented"); } } diff --git a/veilid-core/src/veilid_api/serialize_helpers/mod.rs b/veilid-core/src/veilid_api/serialize_helpers/mod.rs new file mode 100644 index 00000000..5a3805fd --- /dev/null +++ b/veilid-core/src/veilid_api/serialize_helpers/mod.rs @@ -0,0 +1,14 @@ +mod rkyv_enum_set; +mod rkyv_range_set_blaze; +pub mod serialize_arc; +mod serialize_json; +pub mod serialize_range_set_blaze; +mod veilid_rkyv; + +use super::*; +use core::fmt::Debug; + +pub use rkyv_enum_set::*; +pub use rkyv_range_set_blaze::*; +pub use serialize_json::*; +pub use veilid_rkyv::*; diff --git a/veilid-core/src/veilid_api/serialize_helpers/rkyv_enum_set.rs b/veilid-core/src/veilid_api/serialize_helpers/rkyv_enum_set.rs new file mode 100644 index 00000000..a3422b48 --- /dev/null +++ b/veilid-core/src/veilid_api/serialize_helpers/rkyv_enum_set.rs @@ -0,0 +1,53 @@ +use super::*; + +pub struct RkyvEnumSet; + +impl rkyv::with::ArchiveWith> for RkyvEnumSet +where + T: EnumSetType + EnumSetTypeWithRepr, + ::Repr: rkyv::Archive, +{ + type Archived = rkyv::Archived<::Repr>; + type Resolver = rkyv::Resolver<::Repr>; + + #[inline] + unsafe fn resolve_with( + field: &EnumSet, + pos: usize, + resolver: Self::Resolver, + out: *mut Self::Archived, + ) { + let r = field.as_repr(); + r.resolve(pos, resolver, out); + } +} + +impl rkyv::with::SerializeWith, S> for RkyvEnumSet +where + S: rkyv::Fallible + ?Sized, + T: EnumSetType + EnumSetTypeWithRepr, + ::Repr: rkyv::Serialize, +{ + fn serialize_with(field: &EnumSet, serializer: &mut S) -> Result { + let r = field.as_repr(); + r.serialize(serializer) + } +} + +impl + rkyv::with::DeserializeWith::Repr>, EnumSet, D> + for RkyvEnumSet +where + D: rkyv::Fallible + ?Sized, + T: EnumSetType + EnumSetTypeWithRepr, + ::Repr: rkyv::Archive, + rkyv::Archived<::Repr>: + rkyv::Deserialize<::Repr, D>, +{ + fn deserialize_with( + field: &rkyv::Archived<::Repr>, + deserializer: &mut D, + ) -> Result, D::Error> { + Ok(EnumSet::::from_repr(field.deserialize(deserializer)?)) + } +} diff --git a/veilid-core/src/veilid_api/serialize_helpers/rkyv_range_set_blaze.rs b/veilid-core/src/veilid_api/serialize_helpers/rkyv_range_set_blaze.rs new file mode 100644 index 00000000..67388e4a --- /dev/null +++ b/veilid-core/src/veilid_api/serialize_helpers/rkyv_range_set_blaze.rs @@ -0,0 +1,73 @@ +use super::*; + +use range_set_blaze::*; + +pub struct RkyvRangeSetBlaze; + +impl rkyv::with::ArchiveWith> for RkyvRangeSetBlaze +where + T: rkyv::Archive + Integer, +{ + type Archived = rkyv::Archived>; + type Resolver = rkyv::Resolver>; + + #[inline] + unsafe fn resolve_with( + field: &RangeSetBlaze, + pos: usize, + resolver: Self::Resolver, + out: *mut Self::Archived, + ) { + let mut r = Vec::::with_capacity(field.ranges_len() * 2); + for range in field.ranges() { + r.push(*range.start()); + r.push(*range.end()); + } + r.resolve(pos, resolver, out); + } +} + +impl rkyv::with::SerializeWith, S> for RkyvRangeSetBlaze +where + S: rkyv::Fallible + ?Sized, + Vec: rkyv::Serialize, + T: rkyv::Archive + Integer, +{ + fn serialize_with( + field: &RangeSetBlaze, + serializer: &mut S, + ) -> Result { + let mut r = Vec::::with_capacity(field.ranges_len() * 2); + for range in field.ranges() { + r.push(*range.start()); + r.push(*range.end()); + } + r.serialize(serializer) + } +} + +impl rkyv::with::DeserializeWith>, RangeSetBlaze, D> + for RkyvRangeSetBlaze +where + D: rkyv::Fallible + ?Sized, + T: rkyv::Archive + Integer, + rkyv::Archived: rkyv::Deserialize, + D::Error: From, +{ + fn deserialize_with( + field: &rkyv::Archived>, + deserializer: &mut D, + ) -> Result, D::Error> { + let mut out = RangeSetBlaze::::new(); + if field.len() % 2 == 1 { + return Err("invalid range set length".to_owned().into()); + } + let f = field.as_slice(); + for i in 0..field.len() / 2 { + let l: T = f[i * 2].deserialize(deserializer)?; + let u: T = f[i * 2 + 1].deserialize(deserializer)?; + out.ranges_insert(l..=u); + } + Ok(out) + } +} diff --git a/veilid-core/src/veilid_api/serialize_helpers/serialize_arc.rs b/veilid-core/src/veilid_api/serialize_helpers/serialize_arc.rs new file mode 100644 index 00000000..cf8097b2 --- /dev/null +++ b/veilid-core/src/veilid_api/serialize_helpers/serialize_arc.rs @@ -0,0 +1,12 @@ +use alloc::sync::Arc; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; + +pub fn serialize(v: &Arc, s: S) -> Result { + T::serialize(v.as_ref(), s) +} + +pub fn deserialize<'de, T: Deserialize<'de>, D: Deserializer<'de>>( + d: D, +) -> Result, D::Error> { + Ok(Arc::new(T::deserialize(d)?)) +} diff --git a/veilid-core/src/veilid_api/serialize_helpers.rs b/veilid-core/src/veilid_api/serialize_helpers/serialize_json.rs similarity index 50% rename from veilid-core/src/veilid_api/serialize_helpers.rs rename to veilid-core/src/veilid_api/serialize_helpers/serialize_json.rs index 91a3b283..5e98624c 100644 --- a/veilid-core/src/veilid_api/serialize_helpers.rs +++ b/veilid-core/src/veilid_api/serialize_helpers/serialize_json.rs @@ -1,16 +1,9 @@ use super::*; -pub use bytecheck::CheckBytes; -use core::fmt::Debug; -use rkyv::Archive as RkyvArchive; -use rkyv::Deserialize as RkyvDeserialize; -use rkyv::Serialize as RkyvSerialize; // Don't trace these functions as they are used in the transfer of API logs, which will recurse! // #[instrument(level = "trace", ret, err)] -pub fn deserialize_json<'a, T: de::Deserialize<'a> + Debug>( - arg: &'a str, -) -> Result { +pub fn deserialize_json<'a, T: de::Deserialize<'a> + Debug>(arg: &'a str) -> VeilidAPIResult { serde_json::from_str(arg).map_err(|e| VeilidAPIError::ParseError { message: e.to_string(), value: format!( @@ -24,7 +17,7 @@ pub fn deserialize_json<'a, T: de::Deserialize<'a> + Debug>( // #[instrument(level = "trace", ret, err)] pub fn deserialize_opt_json( arg: Option, -) -> Result { +) -> VeilidAPIResult { let arg = arg.as_ref().ok_or_else(|| VeilidAPIError::ParseError { message: "invalid null string".to_owned(), value: format!( @@ -117,95 +110,3 @@ pub mod opt_json_as_string { } } } - -pub mod arc_serialize { - use alloc::sync::Arc; - use serde::{Deserialize, Deserializer, Serialize, Serializer}; - - pub fn serialize(v: &Arc, s: S) -> Result { - T::serialize(v.as_ref(), s) - } - - pub fn deserialize<'de, T: Deserialize<'de>, D: Deserializer<'de>>( - d: D, - ) -> Result, D::Error> { - Ok(Arc::new(T::deserialize(d)?)) - } -} - -pub fn to_rkyv(v: &T) -> EyreResult> -where - T: RkyvSerialize>, -{ - Ok(rkyv::to_bytes::(v) - .wrap_err("failed to freeze object")? - .to_vec()) -} - -pub fn from_rkyv(v: Vec) -> EyreResult -where - T: RkyvArchive, - ::Archived: - for<'t> bytecheck::CheckBytes>, - ::Archived: - rkyv::Deserialize, -{ - match rkyv::from_bytes::(&v) { - Ok(v) => Ok(v), - Err(e) => { - bail!("failed to deserialize frozen object: {}", e); - } - } -} - -pub struct RkyvEnumSet; - -impl rkyv::with::ArchiveWith> for RkyvEnumSet -where - T: EnumSetType + EnumSetTypeWithRepr, - ::Repr: rkyv::Archive, -{ - type Archived = rkyv::Archived<::Repr>; - type Resolver = rkyv::Resolver<::Repr>; - - #[inline] - unsafe fn resolve_with( - field: &EnumSet, - pos: usize, - resolver: Self::Resolver, - out: *mut Self::Archived, - ) { - let r = field.as_repr(); - r.resolve(pos, resolver, out); - } -} - -impl rkyv::with::SerializeWith, S> for RkyvEnumSet -where - S: rkyv::Fallible + ?Sized, - T: EnumSetType + EnumSetTypeWithRepr, - ::Repr: rkyv::Serialize, -{ - fn serialize_with(field: &EnumSet, serializer: &mut S) -> Result { - let r = field.as_repr(); - r.serialize(serializer) - } -} - -impl - rkyv::with::DeserializeWith::Repr>, EnumSet, D> - for RkyvEnumSet -where - D: rkyv::Fallible + ?Sized, - T: EnumSetType + EnumSetTypeWithRepr, - ::Repr: rkyv::Archive, - rkyv::Archived<::Repr>: - rkyv::Deserialize<::Repr, D>, -{ - fn deserialize_with( - field: &rkyv::Archived<::Repr>, - deserializer: &mut D, - ) -> Result, D::Error> { - Ok(EnumSet::::from_repr(field.deserialize(deserializer)?)) - } -} diff --git a/veilid-core/src/veilid_api/serialize_helpers/serialize_range_set_blaze.rs b/veilid-core/src/veilid_api/serialize_helpers/serialize_range_set_blaze.rs new file mode 100644 index 00000000..5ef5fa46 --- /dev/null +++ b/veilid-core/src/veilid_api/serialize_helpers/serialize_range_set_blaze.rs @@ -0,0 +1,60 @@ +use core::fmt; +use core::marker::PhantomData; +use range_set_blaze::*; +use serde::{ + de::SeqAccess, de::Visitor, ser::SerializeSeq, Deserialize, Deserializer, Serialize, Serializer, +}; + +pub fn serialize( + v: &RangeSetBlaze, + s: S, +) -> Result { + let cnt = v.ranges_len() * 2; + let mut seq = s.serialize_seq(Some(cnt))?; + for range in v.ranges() { + seq.serialize_element(range.start())?; + seq.serialize_element(range.end())?; + } + seq.end() +} + +pub fn deserialize<'de, T: Integer + Deserialize<'de>, D: Deserializer<'de>>( + d: D, +) -> Result, D::Error> { + struct RangeSetBlazeVisitor { + marker: PhantomData, + } + + impl<'de, T> Visitor<'de> for RangeSetBlazeVisitor + where + T: Deserialize<'de> + Integer, + { + type Value = RangeSetBlaze; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a RangeSetBlaze") + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: SeqAccess<'de>, + { + let mut values = RangeSetBlaze::::new(); + + while let Some(start) = seq.next_element()? { + let Some(end) = seq.next_element()? else { + break; + }; + values.ranges_insert(start..=end); + } + + Ok(values) + } + } + + let visitor = RangeSetBlazeVisitor { + marker: PhantomData, + }; + + d.deserialize_seq(visitor) +} diff --git a/veilid-core/src/veilid_api/serialize_helpers/veilid_rkyv.rs b/veilid-core/src/veilid_api/serialize_helpers/veilid_rkyv.rs new file mode 100644 index 00000000..cff3c7f8 --- /dev/null +++ b/veilid-core/src/veilid_api/serialize_helpers/veilid_rkyv.rs @@ -0,0 +1,151 @@ +use super::*; +use rkyv::ser::Serializer; + +/////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +pub struct VeilidRkyvSerializer { + inner: S, +} + +impl VeilidRkyvSerializer { + pub fn into_inner(self) -> S { + self.inner + } +} + +impl rkyv::Fallible for VeilidRkyvSerializer { + type Error = VeilidRkyvError; +} + +impl rkyv::ser::ScratchSpace for VeilidRkyvSerializer { + unsafe fn push_scratch( + &mut self, + layout: core::alloc::Layout, + ) -> Result, Self::Error> { + self.inner + .push_scratch(layout) + .map_err(VeilidRkyvError::Inner) + } + unsafe fn pop_scratch( + &mut self, + ptr: core::ptr::NonNull, + layout: core::alloc::Layout, + ) -> Result<(), Self::Error> { + self.inner + .pop_scratch(ptr, layout) + .map_err(VeilidRkyvError::Inner) + } +} + +impl rkyv::ser::Serializer for VeilidRkyvSerializer { + #[inline] + fn pos(&self) -> usize { + self.inner.pos() + } + + #[inline] + fn write(&mut self, bytes: &[u8]) -> Result<(), Self::Error> { + self.inner.write(bytes).map_err(VeilidRkyvError::Inner) + } +} + +impl Default for VeilidRkyvSerializer { + fn default() -> Self { + Self { + inner: S::default(), + } + } +} + +pub type DefaultVeilidRkyvSerializer = + VeilidRkyvSerializer>; + +/////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +#[derive(Debug, Default)] +pub struct VeilidSharedDeserializeMap { + inner: SharedDeserializeMap, +} + +impl VeilidSharedDeserializeMap { + #[inline] + pub fn new() -> Self { + Self { + inner: SharedDeserializeMap::new(), + } + } +} +impl rkyv::Fallible for VeilidSharedDeserializeMap { + type Error = VeilidRkyvError; +} + +impl rkyv::de::SharedDeserializeRegistry for VeilidSharedDeserializeMap { + fn get_shared_ptr(&mut self, ptr: *const u8) -> Option<&dyn rkyv::de::SharedPointer> { + self.inner.get_shared_ptr(ptr) + } + + fn add_shared_ptr( + &mut self, + ptr: *const u8, + shared: Box, + ) -> Result<(), Self::Error> { + self.inner + .add_shared_ptr(ptr, shared) + .map_err(VeilidRkyvError::Inner) + } +} + +/////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +#[derive(Debug)] +pub enum VeilidRkyvError { + Inner(E), + StringError(String), +} + +impl From for VeilidRkyvError { + fn from(s: String) -> Self { + Self::StringError(s) + } +} + +impl fmt::Display for VeilidRkyvError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + VeilidRkyvError::Inner(e) => write!(f, "Inner: {}", e), + VeilidRkyvError::StringError(s) => write!(f, "StringError: {}", s), + } + } +} + +impl std::error::Error for VeilidRkyvError {} + +/////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +pub fn to_rkyv(value: &T) -> VeilidAPIResult> +where + T: RkyvSerialize, +{ + let mut serializer = DefaultVeilidRkyvSerializer::default(); + serializer + .serialize_value(value) + .map_err(|e| VeilidAPIError::generic(format!("failed to serialize object: {}", e)))?; + Ok(serializer + .into_inner() + .into_serializer() + .into_inner() + .to_vec()) +} + +pub fn from_rkyv(bytes: Vec) -> VeilidAPIResult +where + T: RkyvArchive, + ::Archived: + for<'t> CheckBytes>, + ::Archived: RkyvDeserialize, +{ + rkyv::check_archived_root::(&bytes) + .map_err(|e| VeilidAPIError::generic(format!("checkbytes failed: {}", e)))? + .deserialize(&mut VeilidSharedDeserializeMap::default()) + .map_err(|e| VeilidAPIError::generic(format!("failed to deserialize: {}", e))) +} diff --git a/veilid-core/src/veilid_api/tests/mod.rs b/veilid-core/src/veilid_api/tests/mod.rs new file mode 100644 index 00000000..3c3d2f33 --- /dev/null +++ b/veilid-core/src/veilid_api/tests/mod.rs @@ -0,0 +1 @@ +pub mod test_serialize_rkyv; diff --git a/veilid-core/src/veilid_api/tests/test_serialize_rkyv.rs b/veilid-core/src/veilid_api/tests/test_serialize_rkyv.rs new file mode 100644 index 00000000..0566f7b7 --- /dev/null +++ b/veilid-core/src/veilid_api/tests/test_serialize_rkyv.rs @@ -0,0 +1,16 @@ +use crate::*; + +pub async fn test_simple_string() { + let plain = "basic string".to_string(); + let serialized = b"basic string\x0c\x00\x00\x00\xf4\xff\xff\xff".to_vec(); + + let a = to_rkyv(&plain); + assert_eq!(a.unwrap(), serialized); + + let b = from_rkyv::(serialized); + assert_eq!(b.unwrap(), plain); +} + +pub async fn test_all() { + test_simple_string().await; +} diff --git a/veilid-core/src/veilid_api/types.rs b/veilid-core/src/veilid_api/types.rs deleted file mode 100644 index 73abb9a7..00000000 --- a/veilid-core/src/veilid_api/types.rs +++ /dev/null @@ -1,2499 +0,0 @@ -use super::*; - -///////////////////////////////////////////////////////////////////////////////////////////////////// - -/// Microseconds since epoch -pub type Timestamp = AlignedU64; -pub fn get_aligned_timestamp() -> Timestamp { - get_timestamp().into() -} -/// Microseconds duration -pub type TimestampDuration = AlignedU64; -/// Request/Response matching id -pub type OperationId = AlignedU64; -/// Number of bytes -pub type ByteCount = AlignedU64; -/// Tunnel identifier -pub type TunnelId = AlignedU64; -/// Value schema -pub type ValueSchema = FourCC; -/// Value subkey -pub type ValueSubkey = u32; -/// Value subkey range -pub type ValueSubkeyRange = (u32, u32); -/// Value sequence number -pub type ValueSeqNum = u32; - -/// FOURCC code -#[derive( - Copy, - Default, - Clone, - Hash, - PartialOrd, - Ord, - PartialEq, - Eq, - Serialize, - Deserialize, - RkyvArchive, - RkyvSerialize, - RkyvDeserialize, -)] -#[archive_attr(repr(C), derive(CheckBytes, PartialOrd, Ord, PartialEq, Eq, Hash))] -pub struct FourCC(pub [u8; 4]); - -impl From<[u8; 4]> for FourCC { - fn from(b: [u8; 4]) -> Self { - Self(b) - } -} -impl TryFrom<&[u8]> for FourCC { - type Error = VeilidAPIError; - fn try_from(b: &[u8]) -> Result { - Ok(Self(b.try_into().map_err(VeilidAPIError::generic)?)) - } -} - -impl fmt::Display for FourCC { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { - write!(f, "{}", String::from_utf8_lossy(&self.0)) - } -} -impl fmt::Debug for FourCC { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { - write!(f, "{}", String::from_utf8_lossy(&self.0)) - } -} - -impl FromStr for FourCC { - type Err = VeilidAPIError; - fn from_str(s: &str) -> Result { - Ok(Self( - s.as_bytes().try_into().map_err(VeilidAPIError::generic)?, - )) - } -} - -/// Log level for VeilidCore -#[derive( - Debug, - Clone, - PartialEq, - Eq, - PartialOrd, - Ord, - Copy, - Serialize, - Deserialize, - RkyvArchive, - RkyvSerialize, - RkyvDeserialize, -)] -#[archive_attr(repr(u8), derive(CheckBytes))] -pub enum VeilidLogLevel { - Error = 1, - Warn, - Info, - Debug, - Trace, -} - -impl VeilidLogLevel { - pub fn from_tracing_level(level: tracing::Level) -> VeilidLogLevel { - match level { - tracing::Level::ERROR => VeilidLogLevel::Error, - tracing::Level::WARN => VeilidLogLevel::Warn, - tracing::Level::INFO => VeilidLogLevel::Info, - tracing::Level::DEBUG => VeilidLogLevel::Debug, - tracing::Level::TRACE => VeilidLogLevel::Trace, - } - } - pub fn from_log_level(level: log::Level) -> VeilidLogLevel { - match level { - log::Level::Error => VeilidLogLevel::Error, - log::Level::Warn => VeilidLogLevel::Warn, - log::Level::Info => VeilidLogLevel::Info, - log::Level::Debug => VeilidLogLevel::Debug, - log::Level::Trace => VeilidLogLevel::Trace, - } - } - pub fn to_tracing_level(&self) -> tracing::Level { - match self { - Self::Error => tracing::Level::ERROR, - Self::Warn => tracing::Level::WARN, - Self::Info => tracing::Level::INFO, - Self::Debug => tracing::Level::DEBUG, - Self::Trace => tracing::Level::TRACE, - } - } - pub fn to_log_level(&self) -> log::Level { - match self { - Self::Error => log::Level::Error, - Self::Warn => log::Level::Warn, - Self::Info => log::Level::Info, - Self::Debug => log::Level::Debug, - Self::Trace => log::Level::Trace, - } - } -} - -impl fmt::Display for VeilidLogLevel { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { - let text = match self { - Self::Error => "ERROR", - Self::Warn => "WARN", - Self::Info => "INFO", - Self::Debug => "DEBUG", - Self::Trace => "TRACE", - }; - write!(f, "{}", text) - } -} - -/// A VeilidCore log message with optional backtrace -#[derive( - Debug, Clone, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize, -)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct VeilidLog { - pub log_level: VeilidLogLevel, - pub message: String, - pub backtrace: Option, -} - -/// Direct statement blob passed to hosting application for processing -#[derive( - Debug, Clone, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize, -)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct VeilidAppMessage { - /// Some(sender) if the message was sent directly, None if received via a private/safety route - #[serde(with = "opt_json_as_string")] - pub sender: Option, - /// The content of the message to deliver to the application - #[serde(with = "json_as_base64")] - pub message: Vec, -} - -/// Direct question blob passed to hosting application for processing to send an eventual AppReply -#[derive( - Debug, Clone, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize, -)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct VeilidAppCall { - /// Some(sender) if the request was sent directly, None if received via a private/safety route - #[serde(with = "opt_json_as_string")] - pub sender: Option, - /// The content of the request to deliver to the application - #[serde(with = "json_as_base64")] - pub message: Vec, - /// The id to reply to - #[serde(with = "json_as_string")] - pub id: OperationId, -} - -/// Attachment abstraction for network 'signal strength' -#[derive( - Debug, - PartialEq, - Eq, - Clone, - Copy, - Serialize, - Deserialize, - RkyvArchive, - RkyvSerialize, - RkyvDeserialize, -)] -#[archive_attr(repr(u8), derive(CheckBytes))] -pub enum AttachmentState { - Detached, - Attaching, - AttachedWeak, - AttachedGood, - AttachedStrong, - FullyAttached, - OverAttached, - Detaching, -} - -impl fmt::Display for AttachmentState { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { - let out = match self { - AttachmentState::Attaching => "attaching".to_owned(), - AttachmentState::AttachedWeak => "attached_weak".to_owned(), - AttachmentState::AttachedGood => "attached_good".to_owned(), - AttachmentState::AttachedStrong => "attached_strong".to_owned(), - AttachmentState::FullyAttached => "fully_attached".to_owned(), - AttachmentState::OverAttached => "over_attached".to_owned(), - AttachmentState::Detaching => "detaching".to_owned(), - AttachmentState::Detached => "detached".to_owned(), - }; - write!(f, "{}", out) - } -} - -impl TryFrom for AttachmentState { - type Error = (); - - fn try_from(s: String) -> Result { - Ok(match s.as_str() { - "attaching" => AttachmentState::Attaching, - "attached_weak" => AttachmentState::AttachedWeak, - "attached_good" => AttachmentState::AttachedGood, - "attached_strong" => AttachmentState::AttachedStrong, - "fully_attached" => AttachmentState::FullyAttached, - "over_attached" => AttachmentState::OverAttached, - "detaching" => AttachmentState::Detaching, - "detached" => AttachmentState::Detached, - _ => return Err(()), - }) - } -} - -#[derive( - Debug, Clone, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize, -)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct VeilidStateAttachment { - pub state: AttachmentState, - pub public_internet_ready: bool, - pub local_network_ready: bool, -} - -#[derive( - Debug, Clone, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize, -)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct PeerTableData { - pub node_ids: Vec, - pub peer_address: PeerAddress, - pub peer_stats: PeerStats, -} - -#[derive( - Debug, Clone, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize, -)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct VeilidStateNetwork { - pub started: bool, - #[serde(with = "json_as_string")] - pub bps_down: ByteCount, - #[serde(with = "json_as_string")] - pub bps_up: ByteCount, - pub peers: Vec, -} - -#[derive( - Debug, Clone, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize, -)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct VeilidStateRoute { - pub dead_routes: Vec, - pub dead_remote_routes: Vec, -} - -#[derive( - Debug, Clone, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize, -)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct VeilidStateConfig { - pub config: VeilidConfigInner, -} - -#[derive( - Debug, Clone, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize, -)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct VeilidValueChange { - key: TypedKey, - subkeys: Vec, - count: u32, - value: ValueData, -} - -#[derive(Debug, Clone, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize)] -#[archive_attr(repr(u8), derive(CheckBytes))] -#[serde(tag = "kind")] -pub enum VeilidUpdate { - Log(VeilidLog), - AppMessage(VeilidAppMessage), - AppCall(VeilidAppCall), - Attachment(VeilidStateAttachment), - Network(VeilidStateNetwork), - Config(VeilidStateConfig), - Route(VeilidStateRoute), - ValueChange(VeilidValueChange), - Shutdown, -} - -#[derive(Debug, Clone, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct VeilidState { - pub attachment: VeilidStateAttachment, - pub network: VeilidStateNetwork, - pub config: VeilidStateConfig, -} - -///////////////////////////////////////////////////////////////////////////////////////////////////// -/// - -#[derive( - Clone, - Debug, - Default, - PartialOrd, - PartialEq, - Eq, - Ord, - Serialize, - Deserialize, - RkyvArchive, - RkyvSerialize, - RkyvDeserialize, -)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct ValueData { - pub seq: ValueSeqNum, - pub schema: ValueSchema, - pub data: Vec, -} -impl ValueData { - pub fn new(schema: ValueSchema, data: Vec) -> Self { - Self { - seq: 0, - schema, - data, - } - } - pub fn new_with_seq(seq: ValueSeqNum, schema: ValueSchema, data: Vec) -> Self { - Self { seq, schema, data } - } - pub fn change(&mut self, data: Vec) { - self.data = data; - self.seq += 1; - } -} - -///////////////////////////////////////////////////////////////////////////////////////////////////// - -// Keep member order appropriate for sorting < preference -#[derive( - Copy, - Clone, - Debug, - Eq, - PartialEq, - Ord, - PartialOrd, - Hash, - Serialize, - Deserialize, - RkyvArchive, - RkyvSerialize, - RkyvDeserialize, -)] -#[archive_attr(repr(u8), derive(CheckBytes))] -pub enum DialInfoClass { - Direct = 0, // D = Directly reachable with public IP and no firewall, with statically configured port - Mapped = 1, // M = Directly reachable with via portmap behind any NAT or firewalled with dynamically negotiated port - FullConeNAT = 2, // F = Directly reachable device without portmap behind full-cone NAT - Blocked = 3, // B = Inbound blocked at firewall but may hole punch with public address - AddressRestrictedNAT = 4, // A = Device without portmap behind address-only restricted NAT - PortRestrictedNAT = 5, // P = Device without portmap behind address-and-port restricted NAT -} - -impl DialInfoClass { - // Is a signal required to do an inbound hole-punch? - pub fn requires_signal(&self) -> bool { - matches!( - self, - Self::Blocked | Self::AddressRestrictedNAT | Self::PortRestrictedNAT - ) - } - - // Does a relay node need to be allocated for this dial info? - // For full cone NAT, the relay itself may not be used but the keepalive sent to it - // is required to keep the NAT mapping valid in the router state table - pub fn requires_relay(&self) -> bool { - matches!( - self, - Self::FullConeNAT - | Self::Blocked - | Self::AddressRestrictedNAT - | Self::PortRestrictedNAT - ) - } -} - -// Ordering here matters, >= is used to check strength of sequencing requirement -#[derive( - Copy, - Clone, - Debug, - PartialEq, - Eq, - PartialOrd, - Ord, - Hash, - Serialize, - Deserialize, - RkyvArchive, - RkyvSerialize, - RkyvDeserialize, -)] -#[archive_attr(repr(u8), derive(CheckBytes))] -pub enum Sequencing { - NoPreference, - PreferOrdered, - EnsureOrdered, -} - -impl Default for Sequencing { - fn default() -> Self { - Self::NoPreference - } -} - -// Ordering here matters, >= is used to check strength of stability requirement -#[derive( - Copy, - Clone, - Debug, - PartialEq, - Eq, - PartialOrd, - Ord, - Hash, - Serialize, - Deserialize, - RkyvArchive, - RkyvSerialize, - RkyvDeserialize, -)] -#[archive_attr(repr(u8), derive(CheckBytes))] -pub enum Stability { - LowLatency, - Reliable, -} - -impl Default for Stability { - fn default() -> Self { - Self::LowLatency - } -} - -/// The choice of safety route to include in compiled routes -#[derive( - Copy, - Clone, - Debug, - PartialEq, - Eq, - PartialOrd, - Ord, - Hash, - Serialize, - Deserialize, - RkyvArchive, - RkyvSerialize, - RkyvDeserialize, -)] -#[archive_attr(repr(u8), derive(CheckBytes))] -pub enum SafetySelection { - /// Don't use a safety route, only specify the sequencing preference - Unsafe(Sequencing), - /// Use a safety route and parameters specified by a SafetySpec - Safe(SafetySpec), -} - -impl SafetySelection { - pub fn get_sequencing(&self) -> Sequencing { - match self { - SafetySelection::Unsafe(seq) => *seq, - SafetySelection::Safe(ss) => ss.sequencing, - } - } -} - -/// Options for safety routes (sender privacy) -#[derive( - Copy, - Clone, - Debug, - PartialEq, - Eq, - PartialOrd, - Ord, - Hash, - Serialize, - Deserialize, - RkyvArchive, - RkyvSerialize, - RkyvDeserialize, -)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct SafetySpec { - /// preferred safety route set id if it still exists - pub preferred_route: Option, - /// must be greater than 0 - pub hop_count: usize, - /// prefer reliability over speed - pub stability: Stability, - /// prefer connection-oriented sequenced protocols - pub sequencing: Sequencing, -} - -// Keep member order appropriate for sorting < preference -#[derive( - Debug, - Clone, - PartialEq, - PartialOrd, - Ord, - Eq, - Hash, - Serialize, - Deserialize, - RkyvArchive, - RkyvSerialize, - RkyvDeserialize, -)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct DialInfoDetail { - pub class: DialInfoClass, - pub dial_info: DialInfo, -} - -impl MatchesDialInfoFilter for DialInfoDetail { - fn matches_filter(&self, filter: &DialInfoFilter) -> bool { - self.dial_info.matches_filter(filter) - } -} - -impl DialInfoDetail { - pub fn ordered_sequencing_sort(a: &DialInfoDetail, b: &DialInfoDetail) -> core::cmp::Ordering { - if a.class < b.class { - return core::cmp::Ordering::Less; - } - if a.class > b.class { - return core::cmp::Ordering::Greater; - } - DialInfo::ordered_sequencing_sort(&a.dial_info, &b.dial_info) - } - pub const NO_SORT: std::option::Option< - for<'r, 's> fn( - &'r veilid_api::DialInfoDetail, - &'s veilid_api::DialInfoDetail, - ) -> std::cmp::Ordering, - > = None:: core::cmp::Ordering>; -} - -#[derive( - Copy, - Clone, - Debug, - Eq, - PartialEq, - Ord, - PartialOrd, - Hash, - Serialize, - Deserialize, - RkyvArchive, - RkyvSerialize, - RkyvDeserialize, -)] -#[archive_attr(repr(u8), derive(CheckBytes))] -pub enum NetworkClass { - InboundCapable = 0, // I = Inbound capable without relay, may require signal - OutboundOnly = 1, // O = Outbound only, inbound relay required except with reverse connect signal - WebApp = 2, // W = PWA, outbound relay is required in most cases - Invalid = 3, // X = Invalid network class, we don't know how to reach this node -} - -impl Default for NetworkClass { - fn default() -> Self { - Self::Invalid - } -} - -impl NetworkClass { - // Should an outbound relay be kept available? - pub fn outbound_wants_relay(&self) -> bool { - matches!(self, Self::WebApp) - } -} - -/// RoutingDomain-specific status for each node -/// is returned by the StatusA call - -/// PublicInternet RoutingDomain Status -#[derive( - Clone, Debug, Default, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize, -)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct PublicInternetNodeStatus { - pub will_route: bool, - pub will_tunnel: bool, - pub will_signal: bool, - pub will_relay: bool, - pub will_validate_dial_info: bool, -} - -#[derive( - Clone, Debug, Default, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize, -)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct LocalNetworkNodeStatus { - pub will_relay: bool, - pub will_validate_dial_info: bool, -} - -#[derive(Clone, Debug, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize)] -#[archive_attr(repr(u8), derive(CheckBytes))] -pub enum NodeStatus { - PublicInternet(PublicInternetNodeStatus), - LocalNetwork(LocalNetworkNodeStatus), -} - -impl NodeStatus { - pub fn will_route(&self) -> bool { - match self { - NodeStatus::PublicInternet(pi) => pi.will_route, - NodeStatus::LocalNetwork(_) => false, - } - } - pub fn will_tunnel(&self) -> bool { - match self { - NodeStatus::PublicInternet(pi) => pi.will_tunnel, - NodeStatus::LocalNetwork(_) => false, - } - } - pub fn will_signal(&self) -> bool { - match self { - NodeStatus::PublicInternet(pi) => pi.will_signal, - NodeStatus::LocalNetwork(_) => false, - } - } - pub fn will_relay(&self) -> bool { - match self { - NodeStatus::PublicInternet(pi) => pi.will_relay, - NodeStatus::LocalNetwork(ln) => ln.will_relay, - } - } - pub fn will_validate_dial_info(&self) -> bool { - match self { - NodeStatus::PublicInternet(pi) => pi.will_validate_dial_info, - NodeStatus::LocalNetwork(ln) => ln.will_validate_dial_info, - } - } -} - -#[derive(Clone, Debug, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct NodeInfo { - pub network_class: NetworkClass, - #[with(RkyvEnumSet)] - pub outbound_protocols: ProtocolTypeSet, - #[with(RkyvEnumSet)] - pub address_types: AddressTypeSet, - pub envelope_support: Vec, - pub crypto_support: Vec, - pub dial_info_detail_list: Vec, -} - -impl NodeInfo { - pub fn first_filtered_dial_info_detail( - &self, - sort: Option, - filter: F, - ) -> Option - where - S: Fn(&DialInfoDetail, &DialInfoDetail) -> std::cmp::Ordering, - F: Fn(&DialInfoDetail) -> bool, - { - if let Some(sort) = sort { - let mut dids = self.dial_info_detail_list.clone(); - dids.sort_by(sort); - for did in dids { - if filter(&did) { - return Some(did); - } - } - } else { - for did in &self.dial_info_detail_list { - if filter(did) { - return Some(did.clone()); - } - } - }; - None - } - - pub fn all_filtered_dial_info_details( - &self, - sort: Option, - filter: F, - ) -> Vec - where - S: Fn(&DialInfoDetail, &DialInfoDetail) -> std::cmp::Ordering, - F: Fn(&DialInfoDetail) -> bool, - { - let mut dial_info_detail_list = Vec::new(); - - if let Some(sort) = sort { - let mut dids = self.dial_info_detail_list.clone(); - dids.sort_by(sort); - for did in dids { - if filter(&did) { - dial_info_detail_list.push(did); - } - } - } else { - for did in &self.dial_info_detail_list { - if filter(did) { - dial_info_detail_list.push(did.clone()); - } - } - }; - dial_info_detail_list - } - - /// Does this node has some dial info - pub fn has_dial_info(&self) -> bool { - !self.dial_info_detail_list.is_empty() - } - - /// Is some relay required either for signal or inbound relay or outbound relay? - pub fn requires_relay(&self) -> bool { - match self.network_class { - NetworkClass::InboundCapable => { - for did in &self.dial_info_detail_list { - if did.class.requires_relay() { - return true; - } - } - } - NetworkClass::OutboundOnly => { - return true; - } - NetworkClass::WebApp => { - return true; - } - NetworkClass::Invalid => {} - } - false - } - - /// Can this node assist with signalling? Yes but only if it doesn't require signalling, itself. - pub fn can_signal(&self) -> bool { - // Must be inbound capable - if !matches!(self.network_class, NetworkClass::InboundCapable) { - return false; - } - // Do any of our dial info require signalling? if so, we can't offer signalling - for did in &self.dial_info_detail_list { - if did.class.requires_signal() { - return false; - } - } - true - } - - /// Can this node relay be an inbound relay? - pub fn can_inbound_relay(&self) -> bool { - // For now this is the same - self.can_signal() - } - - /// Is this node capable of validating dial info - pub fn can_validate_dial_info(&self) -> bool { - // For now this is the same - self.can_signal() - } -} - -#[allow(clippy::derive_hash_xor_eq)] -#[derive( - Debug, - PartialOrd, - Ord, - Hash, - EnumSetType, - Serialize, - Deserialize, - RkyvArchive, - RkyvSerialize, - RkyvDeserialize, -)] -#[enumset(repr = "u8")] -#[archive_attr(repr(u8), derive(CheckBytes))] -pub enum Direction { - Inbound, - Outbound, -} -pub type DirectionSet = EnumSet; - -// Keep member order appropriate for sorting < preference -// Must match DialInfo order -#[allow(clippy::derive_hash_xor_eq)] -#[derive( - Debug, - PartialOrd, - Ord, - Hash, - EnumSetType, - Serialize, - Deserialize, - RkyvArchive, - RkyvSerialize, - RkyvDeserialize, -)] -#[enumset(repr = "u8")] -#[archive_attr(repr(u8), derive(CheckBytes))] -pub enum LowLevelProtocolType { - UDP, - TCP, -} - -impl LowLevelProtocolType { - pub fn is_connection_oriented(&self) -> bool { - matches!(self, LowLevelProtocolType::TCP) - } -} -pub type LowLevelProtocolTypeSet = EnumSet; - -// Keep member order appropriate for sorting < preference -// Must match DialInfo order -#[allow(clippy::derive_hash_xor_eq)] -#[derive( - Debug, - PartialOrd, - Ord, - Hash, - EnumSetType, - Serialize, - Deserialize, - RkyvArchive, - RkyvSerialize, - RkyvDeserialize, -)] -#[enumset(repr = "u8")] -#[archive_attr(repr(u8), derive(CheckBytes))] -pub enum ProtocolType { - UDP, - TCP, - WS, - WSS, -} - -impl ProtocolType { - pub fn is_connection_oriented(&self) -> bool { - matches!( - self, - ProtocolType::TCP | ProtocolType::WS | ProtocolType::WSS - ) - } - pub fn low_level_protocol_type(&self) -> LowLevelProtocolType { - match self { - ProtocolType::UDP => LowLevelProtocolType::UDP, - ProtocolType::TCP | ProtocolType::WS | ProtocolType::WSS => LowLevelProtocolType::TCP, - } - } - pub fn sort_order(&self, sequencing: Sequencing) -> usize { - match self { - ProtocolType::UDP => { - if sequencing != Sequencing::NoPreference { - 3 - } else { - 0 - } - } - ProtocolType::TCP => { - if sequencing != Sequencing::NoPreference { - 0 - } else { - 1 - } - } - ProtocolType::WS => { - if sequencing != Sequencing::NoPreference { - 1 - } else { - 2 - } - } - ProtocolType::WSS => { - if sequencing != Sequencing::NoPreference { - 2 - } else { - 3 - } - } - } - } - pub fn all_ordered_set() -> ProtocolTypeSet { - ProtocolType::TCP | ProtocolType::WS | ProtocolType::WSS - } -} - -pub type ProtocolTypeSet = EnumSet; - -#[allow(clippy::derive_hash_xor_eq)] -#[derive( - Debug, - PartialOrd, - Ord, - Hash, - Serialize, - Deserialize, - RkyvArchive, - RkyvSerialize, - RkyvDeserialize, - EnumSetType, -)] -#[enumset(repr = "u8")] -#[archive_attr(repr(u8), derive(CheckBytes))] -pub enum AddressType { - IPV4, - IPV6, -} -pub type AddressTypeSet = EnumSet; - -// Routing domain here is listed in order of preference, keep in order -#[allow(clippy::derive_hash_xor_eq)] -#[derive( - Debug, - Ord, - PartialOrd, - Hash, - EnumSetType, - Serialize, - Deserialize, - RkyvArchive, - RkyvSerialize, - RkyvDeserialize, -)] -#[enumset(repr = "u8")] -#[archive_attr(repr(u8), derive(CheckBytes))] -pub enum RoutingDomain { - LocalNetwork = 0, - PublicInternet = 1, -} -impl RoutingDomain { - pub const fn count() -> usize { - 2 - } - pub const fn all() -> [RoutingDomain; RoutingDomain::count()] { - // Routing domain here is listed in order of preference, keep in order - [RoutingDomain::LocalNetwork, RoutingDomain::PublicInternet] - } -} -pub type RoutingDomainSet = EnumSet; - -#[derive( - Copy, - Clone, - Debug, - PartialEq, - PartialOrd, - Ord, - Eq, - Hash, - Serialize, - Deserialize, - RkyvArchive, - RkyvSerialize, - RkyvDeserialize, -)] -#[archive_attr(repr(u8), derive(CheckBytes))] -pub enum Address { - IPV4(Ipv4Addr), - IPV6(Ipv6Addr), -} - -impl Default for Address { - fn default() -> Self { - Address::IPV4(Ipv4Addr::new(0, 0, 0, 0)) - } -} - -impl Address { - pub fn from_socket_addr(sa: SocketAddr) -> Address { - match sa { - SocketAddr::V4(v4) => Address::IPV4(*v4.ip()), - SocketAddr::V6(v6) => Address::IPV6(*v6.ip()), - } - } - pub fn from_ip_addr(addr: IpAddr) -> Address { - match addr { - IpAddr::V4(v4) => Address::IPV4(v4), - IpAddr::V6(v6) => Address::IPV6(v6), - } - } - pub fn address_type(&self) -> AddressType { - match self { - Address::IPV4(_) => AddressType::IPV4, - Address::IPV6(_) => AddressType::IPV6, - } - } - pub fn address_string(&self) -> String { - match self { - Address::IPV4(v4) => v4.to_string(), - Address::IPV6(v6) => v6.to_string(), - } - } - pub fn address_string_with_port(&self, port: u16) -> String { - match self { - Address::IPV4(v4) => format!("{}:{}", v4, port), - Address::IPV6(v6) => format!("[{}]:{}", v6, port), - } - } - pub fn is_unspecified(&self) -> bool { - match self { - Address::IPV4(v4) => ipv4addr_is_unspecified(v4), - Address::IPV6(v6) => ipv6addr_is_unspecified(v6), - } - } - pub fn is_global(&self) -> bool { - match self { - Address::IPV4(v4) => ipv4addr_is_global(v4) && !ipv4addr_is_multicast(v4), - Address::IPV6(v6) => ipv6addr_is_unicast_global(v6), - } - } - pub fn is_local(&self) -> bool { - match self { - Address::IPV4(v4) => { - ipv4addr_is_private(v4) - || ipv4addr_is_link_local(v4) - || ipv4addr_is_ietf_protocol_assignment(v4) - } - Address::IPV6(v6) => { - ipv6addr_is_unicast_site_local(v6) - || ipv6addr_is_unicast_link_local(v6) - || ipv6addr_is_unique_local(v6) - } - } - } - pub fn to_ip_addr(&self) -> IpAddr { - match self { - Self::IPV4(a) => IpAddr::V4(*a), - Self::IPV6(a) => IpAddr::V6(*a), - } - } - pub fn to_socket_addr(&self, port: u16) -> SocketAddr { - SocketAddr::new(self.to_ip_addr(), port) - } - pub fn to_canonical(&self) -> Address { - match self { - Address::IPV4(v4) => Address::IPV4(*v4), - Address::IPV6(v6) => match v6.to_ipv4() { - Some(v4) => Address::IPV4(v4), - None => Address::IPV6(*v6), - }, - } - } -} - -impl fmt::Display for Address { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Address::IPV4(v4) => write!(f, "{}", v4), - Address::IPV6(v6) => write!(f, "{}", v6), - } - } -} - -impl FromStr for Address { - type Err = VeilidAPIError; - fn from_str(host: &str) -> Result { - if let Ok(addr) = Ipv4Addr::from_str(host) { - Ok(Address::IPV4(addr)) - } else if let Ok(addr) = Ipv6Addr::from_str(host) { - Ok(Address::IPV6(addr)) - } else { - Err(VeilidAPIError::parse_error( - "Address::from_str failed", - host, - )) - } - } -} - -#[derive( - Copy, - Default, - Clone, - Debug, - PartialEq, - PartialOrd, - Ord, - Eq, - Hash, - Serialize, - Deserialize, - RkyvArchive, - RkyvSerialize, - RkyvDeserialize, -)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct SocketAddress { - address: Address, - port: u16, -} - -impl SocketAddress { - pub fn new(address: Address, port: u16) -> Self { - Self { address, port } - } - pub fn from_socket_addr(sa: SocketAddr) -> SocketAddress { - Self { - address: Address::from_socket_addr(sa), - port: sa.port(), - } - } - pub fn address(&self) -> Address { - self.address - } - pub fn address_type(&self) -> AddressType { - self.address.address_type() - } - pub fn port(&self) -> u16 { - self.port - } - pub fn to_canonical(&self) -> SocketAddress { - SocketAddress { - address: self.address.to_canonical(), - port: self.port, - } - } - pub fn to_ip_addr(&self) -> IpAddr { - self.address.to_ip_addr() - } - pub fn to_socket_addr(&self) -> SocketAddr { - self.address.to_socket_addr(self.port) - } -} - -impl fmt::Display for SocketAddress { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { - write!(f, "{}", self.to_socket_addr()) - } -} - -impl FromStr for SocketAddress { - type Err = VeilidAPIError; - fn from_str(s: &str) -> Result { - let sa = SocketAddr::from_str(s) - .map_err(|e| VeilidAPIError::parse_error("Failed to parse SocketAddress", e))?; - Ok(SocketAddress::from_socket_addr(sa)) - } -} - -////////////////////////////////////////////////////////////////// - -#[derive( - Copy, - Clone, - PartialEq, - Eq, - PartialOrd, - Ord, - Serialize, - Deserialize, - RkyvArchive, - RkyvSerialize, - RkyvDeserialize, -)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct DialInfoFilter { - #[with(RkyvEnumSet)] - pub protocol_type_set: ProtocolTypeSet, - #[with(RkyvEnumSet)] - pub address_type_set: AddressTypeSet, -} - -impl Default for DialInfoFilter { - fn default() -> Self { - Self { - protocol_type_set: ProtocolTypeSet::all(), - address_type_set: AddressTypeSet::all(), - } - } -} - -impl DialInfoFilter { - pub fn all() -> Self { - Self { - protocol_type_set: ProtocolTypeSet::all(), - address_type_set: AddressTypeSet::all(), - } - } - pub fn with_protocol_type(mut self, protocol_type: ProtocolType) -> Self { - self.protocol_type_set = ProtocolTypeSet::only(protocol_type); - self - } - pub fn with_protocol_type_set(mut self, protocol_set: ProtocolTypeSet) -> Self { - self.protocol_type_set = protocol_set; - self - } - pub fn with_address_type(mut self, address_type: AddressType) -> Self { - self.address_type_set = AddressTypeSet::only(address_type); - self - } - pub fn with_address_type_set(mut self, address_set: AddressTypeSet) -> Self { - self.address_type_set = address_set; - self - } - pub fn filtered(mut self, other_dif: &DialInfoFilter) -> Self { - self.protocol_type_set &= other_dif.protocol_type_set; - self.address_type_set &= other_dif.address_type_set; - self - } - pub fn is_dead(&self) -> bool { - self.protocol_type_set.is_empty() || self.address_type_set.is_empty() - } -} - -impl fmt::Debug for DialInfoFilter { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { - let mut out = String::new(); - if self.protocol_type_set != ProtocolTypeSet::all() { - out += &format!("+{:?}", self.protocol_type_set); - } else { - out += "*"; - } - if self.address_type_set != AddressTypeSet::all() { - out += &format!("+{:?}", self.address_type_set); - } else { - out += "*"; - } - write!(f, "[{}]", out) - } -} - -pub trait MatchesDialInfoFilter { - fn matches_filter(&self, filter: &DialInfoFilter) -> bool; -} - -#[derive( - Clone, - Default, - Debug, - PartialEq, - PartialOrd, - Ord, - Eq, - Hash, - Serialize, - Deserialize, - RkyvArchive, - RkyvSerialize, - RkyvDeserialize, -)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct DialInfoUDP { - pub socket_address: SocketAddress, -} - -#[derive( - Clone, - Default, - Debug, - PartialEq, - PartialOrd, - Ord, - Eq, - Hash, - Serialize, - Deserialize, - RkyvArchive, - RkyvSerialize, - RkyvDeserialize, -)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct DialInfoTCP { - pub socket_address: SocketAddress, -} - -#[derive( - Clone, - Default, - Debug, - PartialEq, - PartialOrd, - Ord, - Eq, - Hash, - Serialize, - Deserialize, - RkyvArchive, - RkyvSerialize, - RkyvDeserialize, -)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct DialInfoWS { - pub socket_address: SocketAddress, - pub request: String, -} - -#[derive( - Clone, - Default, - Debug, - PartialEq, - PartialOrd, - Ord, - Eq, - Hash, - Serialize, - Deserialize, - RkyvArchive, - RkyvSerialize, - RkyvDeserialize, -)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct DialInfoWSS { - pub socket_address: SocketAddress, - pub request: String, -} - -// Keep member order appropriate for sorting < preference -// Must match ProtocolType order -#[derive( - Clone, - Debug, - PartialEq, - PartialOrd, - Ord, - Eq, - Hash, - Serialize, - Deserialize, - RkyvArchive, - RkyvSerialize, - RkyvDeserialize, -)] -#[archive_attr(repr(u8), derive(CheckBytes))] -#[serde(tag = "kind")] -pub enum DialInfo { - UDP(DialInfoUDP), - TCP(DialInfoTCP), - WS(DialInfoWS), - WSS(DialInfoWSS), -} -impl Default for DialInfo { - fn default() -> Self { - DialInfo::UDP(DialInfoUDP::default()) - } -} - -impl fmt::Display for DialInfo { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { - match self { - DialInfo::UDP(di) => write!(f, "udp|{}", di.socket_address), - DialInfo::TCP(di) => write!(f, "tcp|{}", di.socket_address), - DialInfo::WS(di) => { - let url = format!("ws://{}", di.request); - let split_url = SplitUrl::from_str(&url).unwrap(); - match split_url.host { - SplitUrlHost::Hostname(_) => { - write!(f, "ws|{}|{}", di.socket_address.to_ip_addr(), di.request) - } - SplitUrlHost::IpAddr(a) => { - if di.socket_address.to_ip_addr() == a { - write!(f, "ws|{}", di.request) - } else { - panic!("resolved address does not match url: {}", di.request); - } - } - } - } - DialInfo::WSS(di) => { - let url = format!("wss://{}", di.request); - let split_url = SplitUrl::from_str(&url).unwrap(); - match split_url.host { - SplitUrlHost::Hostname(_) => { - write!(f, "wss|{}|{}", di.socket_address.to_ip_addr(), di.request) - } - SplitUrlHost::IpAddr(_) => { - panic!( - "secure websockets can not use ip address in request: {}", - di.request - ); - } - } - } - } - } -} - -impl FromStr for DialInfo { - type Err = VeilidAPIError; - fn from_str(s: &str) -> Result { - let (proto, rest) = s.split_once('|').ok_or_else(|| { - VeilidAPIError::parse_error("DialInfo::from_str missing protocol '|' separator", s) - })?; - match proto { - "udp" => { - let socket_address = SocketAddress::from_str(rest)?; - Ok(DialInfo::udp(socket_address)) - } - "tcp" => { - let socket_address = SocketAddress::from_str(rest)?; - Ok(DialInfo::tcp(socket_address)) - } - "ws" => { - let url = format!("ws://{}", rest); - let split_url = SplitUrl::from_str(&url).map_err(|e| { - VeilidAPIError::parse_error(format!("unable to split WS url: {}", e), &url) - })?; - if split_url.scheme != "ws" || !url.starts_with("ws://") { - apibail_parse_error!("incorrect scheme for WS dialinfo", url); - } - let url_port = split_url.port.unwrap_or(80u16); - - match rest.split_once('|') { - Some((sa, rest)) => { - let address = Address::from_str(sa)?; - - DialInfo::try_ws( - SocketAddress::new(address, url_port), - format!("ws://{}", rest), - ) - } - None => { - let address = Address::from_str(&split_url.host.to_string())?; - DialInfo::try_ws( - SocketAddress::new(address, url_port), - format!("ws://{}", rest), - ) - } - } - } - "wss" => { - let url = format!("wss://{}", rest); - let split_url = SplitUrl::from_str(&url).map_err(|e| { - VeilidAPIError::parse_error(format!("unable to split WSS url: {}", e), &url) - })?; - if split_url.scheme != "wss" || !url.starts_with("wss://") { - apibail_parse_error!("incorrect scheme for WSS dialinfo", url); - } - let url_port = split_url.port.unwrap_or(443u16); - - let (a, rest) = rest.split_once('|').ok_or_else(|| { - VeilidAPIError::parse_error( - "DialInfo::from_str missing socket address '|' separator", - s, - ) - })?; - - let address = Address::from_str(a)?; - DialInfo::try_wss( - SocketAddress::new(address, url_port), - format!("wss://{}", rest), - ) - } - _ => Err(VeilidAPIError::parse_error( - "DialInfo::from_str has invalid scheme", - s, - )), - } - } -} - -impl DialInfo { - pub fn udp_from_socketaddr(socket_addr: SocketAddr) -> Self { - Self::UDP(DialInfoUDP { - socket_address: SocketAddress::from_socket_addr(socket_addr).to_canonical(), - }) - } - pub fn tcp_from_socketaddr(socket_addr: SocketAddr) -> Self { - Self::TCP(DialInfoTCP { - socket_address: SocketAddress::from_socket_addr(socket_addr).to_canonical(), - }) - } - pub fn udp(socket_address: SocketAddress) -> Self { - Self::UDP(DialInfoUDP { - socket_address: socket_address.to_canonical(), - }) - } - pub fn tcp(socket_address: SocketAddress) -> Self { - Self::TCP(DialInfoTCP { - socket_address: socket_address.to_canonical(), - }) - } - pub fn try_ws(socket_address: SocketAddress, url: String) -> Result { - let split_url = SplitUrl::from_str(&url).map_err(|e| { - VeilidAPIError::parse_error(format!("unable to split WS url: {}", e), &url) - })?; - if split_url.scheme != "ws" || !url.starts_with("ws://") { - apibail_parse_error!("incorrect scheme for WS dialinfo", url); - } - let url_port = split_url.port.unwrap_or(80u16); - if url_port != socket_address.port() { - apibail_parse_error!("socket address port doesn't match url port", url); - } - if let SplitUrlHost::IpAddr(a) = split_url.host { - if socket_address.to_ip_addr() != a { - apibail_parse_error!( - format!("request address does not match socket address: {}", a), - socket_address - ); - } - } - Ok(Self::WS(DialInfoWS { - socket_address: socket_address.to_canonical(), - request: url[5..].to_string(), - })) - } - pub fn try_wss(socket_address: SocketAddress, url: String) -> Result { - let split_url = SplitUrl::from_str(&url).map_err(|e| { - VeilidAPIError::parse_error(format!("unable to split WSS url: {}", e), &url) - })?; - if split_url.scheme != "wss" || !url.starts_with("wss://") { - apibail_parse_error!("incorrect scheme for WSS dialinfo", url); - } - let url_port = split_url.port.unwrap_or(443u16); - if url_port != socket_address.port() { - apibail_parse_error!("socket address port doesn't match url port", url); - } - if !matches!(split_url.host, SplitUrlHost::Hostname(_)) { - apibail_parse_error!( - "WSS url can not use address format, only hostname format", - url - ); - } - Ok(Self::WSS(DialInfoWSS { - socket_address: socket_address.to_canonical(), - request: url[6..].to_string(), - })) - } - pub fn protocol_type(&self) -> ProtocolType { - match self { - Self::UDP(_) => ProtocolType::UDP, - Self::TCP(_) => ProtocolType::TCP, - Self::WS(_) => ProtocolType::WS, - Self::WSS(_) => ProtocolType::WSS, - } - } - pub fn address_type(&self) -> AddressType { - self.socket_address().address_type() - } - pub fn address(&self) -> Address { - match self { - Self::UDP(di) => di.socket_address.address, - Self::TCP(di) => di.socket_address.address, - Self::WS(di) => di.socket_address.address, - Self::WSS(di) => di.socket_address.address, - } - } - pub fn socket_address(&self) -> SocketAddress { - match self { - Self::UDP(di) => di.socket_address, - Self::TCP(di) => di.socket_address, - Self::WS(di) => di.socket_address, - Self::WSS(di) => di.socket_address, - } - } - pub fn to_ip_addr(&self) -> IpAddr { - match self { - Self::UDP(di) => di.socket_address.to_ip_addr(), - Self::TCP(di) => di.socket_address.to_ip_addr(), - Self::WS(di) => di.socket_address.to_ip_addr(), - Self::WSS(di) => di.socket_address.to_ip_addr(), - } - } - pub fn port(&self) -> u16 { - match self { - Self::UDP(di) => di.socket_address.port, - Self::TCP(di) => di.socket_address.port, - Self::WS(di) => di.socket_address.port, - Self::WSS(di) => di.socket_address.port, - } - } - pub fn set_port(&mut self, port: u16) { - match self { - Self::UDP(di) => di.socket_address.port = port, - Self::TCP(di) => di.socket_address.port = port, - Self::WS(di) => di.socket_address.port = port, - Self::WSS(di) => di.socket_address.port = port, - } - } - pub fn to_socket_addr(&self) -> SocketAddr { - match self { - Self::UDP(di) => di.socket_address.to_socket_addr(), - Self::TCP(di) => di.socket_address.to_socket_addr(), - Self::WS(di) => di.socket_address.to_socket_addr(), - Self::WSS(di) => di.socket_address.to_socket_addr(), - } - } - pub fn to_peer_address(&self) -> PeerAddress { - match self { - Self::UDP(di) => PeerAddress::new(di.socket_address, ProtocolType::UDP), - Self::TCP(di) => PeerAddress::new(di.socket_address, ProtocolType::TCP), - Self::WS(di) => PeerAddress::new(di.socket_address, ProtocolType::WS), - Self::WSS(di) => PeerAddress::new(di.socket_address, ProtocolType::WSS), - } - } - pub fn request(&self) -> Option { - match self { - Self::UDP(_) => None, - Self::TCP(_) => None, - Self::WS(di) => Some(format!("ws://{}", di.request)), - Self::WSS(di) => Some(format!("wss://{}", di.request)), - } - } - pub fn is_valid(&self) -> bool { - let socket_address = self.socket_address(); - let address = socket_address.address(); - let port = socket_address.port(); - (address.is_global() || address.is_local()) && port > 0 - } - - pub fn make_filter(&self) -> DialInfoFilter { - DialInfoFilter { - protocol_type_set: ProtocolTypeSet::only(self.protocol_type()), - address_type_set: AddressTypeSet::only(self.address_type()), - } - } - - pub fn try_vec_from_short, H: AsRef>( - short: S, - hostname: H, - ) -> Result, VeilidAPIError> { - let short = short.as_ref(); - let hostname = hostname.as_ref(); - - if short.len() < 2 { - apibail_parse_error!("invalid short url length", short); - } - let url = match &short[0..1] { - "U" => { - format!("udp://{}:{}", hostname, &short[1..]) - } - "T" => { - format!("tcp://{}:{}", hostname, &short[1..]) - } - "W" => { - format!("ws://{}:{}", hostname, &short[1..]) - } - "S" => { - format!("wss://{}:{}", hostname, &short[1..]) - } - _ => { - apibail_parse_error!("invalid short url type", short); - } - }; - Self::try_vec_from_url(url) - } - - pub fn try_vec_from_url>(url: S) -> Result, VeilidAPIError> { - let url = url.as_ref(); - let split_url = SplitUrl::from_str(url) - .map_err(|e| VeilidAPIError::parse_error(format!("unable to split url: {}", e), url))?; - - let port = match split_url.scheme.as_str() { - "udp" | "tcp" => split_url - .port - .ok_or_else(|| VeilidAPIError::parse_error("Missing port in udp url", url))?, - "ws" => split_url.port.unwrap_or(80u16), - "wss" => split_url.port.unwrap_or(443u16), - _ => { - apibail_parse_error!("Invalid dial info url scheme", split_url.scheme); - } - }; - - let socket_addrs = { - // Resolve if possible, WASM doesn't support resolution and doesn't need it to connect to the dialinfo - // This will not be used on signed dialinfo, only for bootstrapping, so we don't need to worry about - // the '0.0.0.0' address being propagated across the routing table - cfg_if::cfg_if! { - if #[cfg(target_arch = "wasm32")] { - vec![SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0,0,0,0)), port)] - } else { - match split_url.host { - SplitUrlHost::Hostname(_) => split_url - .host_port(port) - .to_socket_addrs() - .map_err(|_| VeilidAPIError::parse_error("couldn't resolve hostname in url", url))? - .collect(), - SplitUrlHost::IpAddr(a) => vec![SocketAddr::new(a, port)], - } - } - } - }; - - let mut out = Vec::new(); - for sa in socket_addrs { - out.push(match split_url.scheme.as_str() { - "udp" => Self::udp_from_socketaddr(sa), - "tcp" => Self::tcp_from_socketaddr(sa), - "ws" => Self::try_ws( - SocketAddress::from_socket_addr(sa).to_canonical(), - url.to_string(), - )?, - "wss" => Self::try_wss( - SocketAddress::from_socket_addr(sa).to_canonical(), - url.to_string(), - )?, - _ => { - unreachable!("Invalid dial info url scheme") - } - }); - } - Ok(out) - } - - pub async fn to_short(&self) -> (String, String) { - match self { - DialInfo::UDP(di) => ( - format!("U{}", di.socket_address.port()), - intf::ptr_lookup(di.socket_address.to_ip_addr()) - .await - .unwrap_or_else(|_| di.socket_address.to_string()), - ), - DialInfo::TCP(di) => ( - format!("T{}", di.socket_address.port()), - intf::ptr_lookup(di.socket_address.to_ip_addr()) - .await - .unwrap_or_else(|_| di.socket_address.to_string()), - ), - DialInfo::WS(di) => { - let mut split_url = SplitUrl::from_str(&format!("ws://{}", di.request)).unwrap(); - if let SplitUrlHost::IpAddr(a) = split_url.host { - if let Ok(host) = intf::ptr_lookup(a).await { - split_url.host = SplitUrlHost::Hostname(host); - } - } - ( - format!( - "W{}{}", - split_url.port.unwrap_or(80), - split_url - .path - .map(|p| format!("/{}", p)) - .unwrap_or_default() - ), - split_url.host.to_string(), - ) - } - DialInfo::WSS(di) => { - let mut split_url = SplitUrl::from_str(&format!("wss://{}", di.request)).unwrap(); - if let SplitUrlHost::IpAddr(a) = split_url.host { - if let Ok(host) = intf::ptr_lookup(a).await { - split_url.host = SplitUrlHost::Hostname(host); - } - } - ( - format!( - "S{}{}", - split_url.port.unwrap_or(443), - split_url - .path - .map(|p| format!("/{}", p)) - .unwrap_or_default() - ), - split_url.host.to_string(), - ) - } - } - } - pub async fn to_url(&self) -> String { - match self { - DialInfo::UDP(di) => intf::ptr_lookup(di.socket_address.to_ip_addr()) - .await - .map(|h| format!("udp://{}:{}", h, di.socket_address.port())) - .unwrap_or_else(|_| format!("udp://{}", di.socket_address)), - DialInfo::TCP(di) => intf::ptr_lookup(di.socket_address.to_ip_addr()) - .await - .map(|h| format!("tcp://{}:{}", h, di.socket_address.port())) - .unwrap_or_else(|_| format!("tcp://{}", di.socket_address)), - DialInfo::WS(di) => { - let mut split_url = SplitUrl::from_str(&format!("ws://{}", di.request)).unwrap(); - if let SplitUrlHost::IpAddr(a) = split_url.host { - if let Ok(host) = intf::ptr_lookup(a).await { - split_url.host = SplitUrlHost::Hostname(host); - } - } - split_url.to_string() - } - DialInfo::WSS(di) => { - let mut split_url = SplitUrl::from_str(&format!("wss://{}", di.request)).unwrap(); - if let SplitUrlHost::IpAddr(a) = split_url.host { - if let Ok(host) = intf::ptr_lookup(a).await { - split_url.host = SplitUrlHost::Hostname(host); - } - } - split_url.to_string() - } - } - } - - pub fn ordered_sequencing_sort(a: &DialInfo, b: &DialInfo) -> core::cmp::Ordering { - let ca = a.protocol_type().sort_order(Sequencing::EnsureOrdered); - let cb = b.protocol_type().sort_order(Sequencing::EnsureOrdered); - if ca < cb { - return core::cmp::Ordering::Less; - } - if ca > cb { - return core::cmp::Ordering::Greater; - } - match (a, b) { - (DialInfo::UDP(a), DialInfo::UDP(b)) => a.cmp(b), - (DialInfo::TCP(a), DialInfo::TCP(b)) => a.cmp(b), - (DialInfo::WS(a), DialInfo::WS(b)) => a.cmp(b), - (DialInfo::WSS(a), DialInfo::WSS(b)) => a.cmp(b), - _ => unreachable!(), - } - } -} - -impl MatchesDialInfoFilter for DialInfo { - fn matches_filter(&self, filter: &DialInfoFilter) -> bool { - if !filter.protocol_type_set.contains(self.protocol_type()) { - return false; - } - if !filter.address_type_set.contains(self.address_type()) { - return false; - } - true - } -} - -////////////////////////////////////////////////////////////////////////// - -/// Signed NodeInfo that can be passed around amongst peers and verifiable -#[derive(Clone, Debug, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct SignedDirectNodeInfo { - pub node_info: NodeInfo, - pub timestamp: Timestamp, - pub signatures: Vec, -} -impl SignedDirectNodeInfo { - /// Returns a new SignedDirectNodeInfo that has its signatures validated. - /// On success, this will modify the node_ids set to only include node_ids whose signatures validate. - /// All signatures are stored however, as this can be passed to other nodes that may be able to validate those signatures. - pub fn new( - crypto: Crypto, - node_ids: &mut TypedKeySet, - node_info: NodeInfo, - timestamp: Timestamp, - typed_signatures: Vec, - ) -> Result { - let node_info_bytes = Self::make_signature_bytes(&node_info, timestamp)?; - - // Verify the signatures that we can - let validated_node_ids = - crypto.verify_signatures(node_ids, &node_info_bytes, &typed_signatures)?; - *node_ids = validated_node_ids; - if node_ids.len() == 0 { - apibail_generic!("no valid node ids in direct node info"); - } - - Ok(Self { - node_info, - timestamp, - signatures: typed_signatures, - }) - } - - pub fn make_signatures( - crypto: Crypto, - typed_key_pairs: Vec, - node_info: NodeInfo, - ) -> Result { - let timestamp = get_aligned_timestamp(); - let node_info_bytes = Self::make_signature_bytes(&node_info, timestamp)?; - let typed_signatures = - crypto.generate_signatures(&node_info_bytes, &typed_key_pairs, |kp, s| { - TypedSignature::new(kp.kind, s) - })?; - Ok(Self { - node_info, - timestamp, - signatures: typed_signatures, - }) - } - - fn make_signature_bytes( - node_info: &NodeInfo, - timestamp: Timestamp, - ) -> Result, VeilidAPIError> { - let mut node_info_bytes = Vec::new(); - - // Add nodeinfo to signature - let mut ni_msg = ::capnp::message::Builder::new_default(); - let mut ni_builder = ni_msg.init_root::(); - encode_node_info(node_info, &mut ni_builder).map_err(VeilidAPIError::internal)?; - node_info_bytes.append(&mut builder_to_vec(ni_msg).map_err(VeilidAPIError::internal)?); - - // Add timestamp to signature - node_info_bytes.append(&mut timestamp.as_u64().to_le_bytes().to_vec()); - - Ok(node_info_bytes) - } - - pub fn with_no_signature(node_info: NodeInfo) -> Self { - Self { - node_info, - timestamp: get_aligned_timestamp(), - signatures: Vec::new(), - } - } - - pub fn has_any_signature(&self) -> bool { - !self.signatures.is_empty() - } -} - -/// Signed NodeInfo with a relay that can be passed around amongst peers and verifiable -#[derive(Clone, Debug, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct SignedRelayedNodeInfo { - pub node_info: NodeInfo, - pub relay_ids: TypedKeySet, - pub relay_info: SignedDirectNodeInfo, - pub timestamp: Timestamp, - pub signatures: Vec, -} - -impl SignedRelayedNodeInfo { - /// Returns a new SignedRelayedNodeInfo that has its signatures validated. - /// On success, this will modify the node_ids set to only include node_ids whose signatures validate. - /// All signatures are stored however, as this can be passed to other nodes that may be able to validate those signatures. - pub fn new( - crypto: Crypto, - node_ids: &mut TypedKeySet, - node_info: NodeInfo, - relay_ids: TypedKeySet, - relay_info: SignedDirectNodeInfo, - timestamp: Timestamp, - typed_signatures: Vec, - ) -> Result { - let node_info_bytes = - Self::make_signature_bytes(&node_info, &relay_ids, &relay_info, timestamp)?; - let validated_node_ids = - crypto.verify_signatures(node_ids, &node_info_bytes, &typed_signatures)?; - *node_ids = validated_node_ids; - if node_ids.len() == 0 { - apibail_generic!("no valid node ids in relayed node info"); - } - - Ok(Self { - node_info, - relay_ids, - relay_info, - timestamp, - signatures: typed_signatures, - }) - } - - pub fn make_signatures( - crypto: Crypto, - typed_key_pairs: Vec, - node_info: NodeInfo, - relay_ids: TypedKeySet, - relay_info: SignedDirectNodeInfo, - ) -> Result { - let timestamp = get_aligned_timestamp(); - let node_info_bytes = - Self::make_signature_bytes(&node_info, &relay_ids, &relay_info, timestamp)?; - let typed_signatures = - crypto.generate_signatures(&node_info_bytes, &typed_key_pairs, |kp, s| { - TypedSignature::new(kp.kind, s) - })?; - Ok(Self { - node_info, - relay_ids, - relay_info, - timestamp, - signatures: typed_signatures, - }) - } - - fn make_signature_bytes( - node_info: &NodeInfo, - relay_ids: &[TypedKey], - relay_info: &SignedDirectNodeInfo, - timestamp: Timestamp, - ) -> Result, VeilidAPIError> { - let mut sig_bytes = Vec::new(); - - // Add nodeinfo to signature - let mut ni_msg = ::capnp::message::Builder::new_default(); - let mut ni_builder = ni_msg.init_root::(); - encode_node_info(node_info, &mut ni_builder).map_err(VeilidAPIError::internal)?; - sig_bytes.append(&mut builder_to_vec(ni_msg).map_err(VeilidAPIError::internal)?); - - // Add relay ids to signature - for relay_id in relay_ids { - let mut rid_msg = ::capnp::message::Builder::new_default(); - let mut rid_builder = rid_msg.init_root::(); - encode_typed_key(relay_id, &mut rid_builder); - sig_bytes.append(&mut builder_to_vec(rid_msg).map_err(VeilidAPIError::internal)?); - } - - // Add relay info to signature - let mut ri_msg = ::capnp::message::Builder::new_default(); - let mut ri_builder = ri_msg.init_root::(); - encode_signed_direct_node_info(relay_info, &mut ri_builder) - .map_err(VeilidAPIError::internal)?; - sig_bytes.append(&mut builder_to_vec(ri_msg).map_err(VeilidAPIError::internal)?); - - // Add timestamp to signature - sig_bytes.append(&mut timestamp.as_u64().to_le_bytes().to_vec()); - - Ok(sig_bytes) - } - - pub fn has_any_signature(&self) -> bool { - !self.signatures.is_empty() - } -} - -#[derive(Clone, Debug, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize)] -#[archive_attr(repr(u8), derive(CheckBytes))] -pub enum SignedNodeInfo { - Direct(SignedDirectNodeInfo), - Relayed(SignedRelayedNodeInfo), -} - -impl SignedNodeInfo { - pub fn has_any_signature(&self) -> bool { - match self { - SignedNodeInfo::Direct(d) => d.has_any_signature(), - SignedNodeInfo::Relayed(r) => r.has_any_signature(), - } - } - - pub fn timestamp(&self) -> Timestamp { - match self { - SignedNodeInfo::Direct(d) => d.timestamp, - SignedNodeInfo::Relayed(r) => r.timestamp, - } - } - pub fn node_info(&self) -> &NodeInfo { - match self { - SignedNodeInfo::Direct(d) => &d.node_info, - SignedNodeInfo::Relayed(r) => &r.node_info, - } - } - pub fn relay_ids(&self) -> TypedKeySet { - match self { - SignedNodeInfo::Direct(_) => TypedKeySet::new(), - SignedNodeInfo::Relayed(r) => r.relay_ids.clone(), - } - } - pub fn relay_info(&self) -> Option<&NodeInfo> { - match self { - SignedNodeInfo::Direct(_) => None, - SignedNodeInfo::Relayed(r) => Some(&r.relay_info.node_info), - } - } - pub fn relay_peer_info(&self) -> Option { - match self { - SignedNodeInfo::Direct(_) => None, - SignedNodeInfo::Relayed(r) => Some(PeerInfo::new( - r.relay_ids.clone(), - SignedNodeInfo::Direct(r.relay_info.clone()), - )), - } - } - pub fn has_any_dial_info(&self) -> bool { - self.node_info().has_dial_info() - || self - .relay_info() - .map(|relay_ni| relay_ni.has_dial_info()) - .unwrap_or_default() - } - - pub fn has_sequencing_matched_dial_info(&self, sequencing: Sequencing) -> bool { - // Check our dial info - for did in &self.node_info().dial_info_detail_list { - match sequencing { - Sequencing::NoPreference | Sequencing::PreferOrdered => return true, - Sequencing::EnsureOrdered => { - if did.dial_info.protocol_type().is_connection_oriented() { - return true; - } - } - } - } - // Check our relay if we have one - return self - .relay_info() - .map(|relay_ni| { - for did in &relay_ni.dial_info_detail_list { - match sequencing { - Sequencing::NoPreference | Sequencing::PreferOrdered => return true, - Sequencing::EnsureOrdered => { - if did.dial_info.protocol_type().is_connection_oriented() { - return true; - } - } - } - } - false - }) - .unwrap_or_default(); - } -} - -#[derive(Clone, Debug, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct PeerInfo { - pub node_ids: TypedKeySet, - pub signed_node_info: SignedNodeInfo, -} - -impl PeerInfo { - pub fn new(node_ids: TypedKeySet, signed_node_info: SignedNodeInfo) -> Self { - assert!(node_ids.len() > 0 && node_ids.len() <= MAX_CRYPTO_KINDS); - Self { - node_ids, - signed_node_info, - } - } -} - -#[derive( - Copy, - Clone, - Debug, - PartialEq, - PartialOrd, - Eq, - Ord, - Hash, - Serialize, - Deserialize, - RkyvArchive, - RkyvSerialize, - RkyvDeserialize, -)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct PeerAddress { - protocol_type: ProtocolType, - #[serde(with = "json_as_string")] - socket_address: SocketAddress, -} - -impl PeerAddress { - pub fn new(socket_address: SocketAddress, protocol_type: ProtocolType) -> Self { - Self { - socket_address: socket_address.to_canonical(), - protocol_type, - } - } - - pub fn socket_address(&self) -> &SocketAddress { - &self.socket_address - } - - pub fn protocol_type(&self) -> ProtocolType { - self.protocol_type - } - - pub fn to_socket_addr(&self) -> SocketAddr { - self.socket_address.to_socket_addr() - } - - pub fn address_type(&self) -> AddressType { - self.socket_address.address_type() - } -} - -/// Represents the 5-tuple of an established connection -/// Not used to specify connections to create, that is reserved for DialInfo -/// -/// ConnectionDescriptors should never be from unspecified local addresses for connection oriented protocols -/// If the medium does not allow local addresses, None should have been used or 'new_no_local' -/// If we are specifying only a port, then the socket's 'local_address()' should have been used, since an -/// established connection is always from a real address to another real address. -#[derive( - Copy, - Clone, - Debug, - PartialEq, - Eq, - PartialOrd, - Ord, - Hash, - Serialize, - Deserialize, - RkyvArchive, - RkyvSerialize, - RkyvDeserialize, -)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct ConnectionDescriptor { - remote: PeerAddress, - local: Option, -} - -impl ConnectionDescriptor { - pub fn new(remote: PeerAddress, local: SocketAddress) -> Self { - assert!( - !remote.protocol_type().is_connection_oriented() || !local.address().is_unspecified() - ); - - Self { - remote, - local: Some(local), - } - } - pub fn new_no_local(remote: PeerAddress) -> Self { - Self { - remote, - local: None, - } - } - pub fn remote(&self) -> PeerAddress { - self.remote - } - pub fn remote_address(&self) -> &SocketAddress { - self.remote.socket_address() - } - pub fn local(&self) -> Option { - self.local - } - pub fn protocol_type(&self) -> ProtocolType { - self.remote.protocol_type - } - pub fn address_type(&self) -> AddressType { - self.remote.address_type() - } - pub fn make_dial_info_filter(&self) -> DialInfoFilter { - DialInfoFilter::all() - .with_protocol_type(self.protocol_type()) - .with_address_type(self.address_type()) - } -} - -impl MatchesDialInfoFilter for ConnectionDescriptor { - fn matches_filter(&self, filter: &DialInfoFilter) -> bool { - if !filter.protocol_type_set.contains(self.protocol_type()) { - return false; - } - if !filter.address_type_set.contains(self.address_type()) { - return false; - } - true - } -} - -////////////////////////////////////////////////////////////////////////// - -#[derive( - Clone, - Debug, - Default, - PartialEq, - Eq, - Serialize, - Deserialize, - RkyvArchive, - RkyvSerialize, - RkyvDeserialize, -)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct LatencyStats { - #[serde(with = "json_as_string")] - pub fastest: TimestampDuration, // fastest latency in the ROLLING_LATENCIES_SIZE last latencies - #[serde(with = "json_as_string")] - pub average: TimestampDuration, // average latency over the ROLLING_LATENCIES_SIZE last latencies - #[serde(with = "json_as_string")] - pub slowest: TimestampDuration, // slowest latency in the ROLLING_LATENCIES_SIZE last latencies -} - -#[derive( - Clone, - Debug, - Default, - PartialEq, - Eq, - Serialize, - Deserialize, - RkyvArchive, - RkyvSerialize, - RkyvDeserialize, -)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct TransferStats { - #[serde(with = "json_as_string")] - pub total: ByteCount, // total amount transferred ever - #[serde(with = "json_as_string")] - pub maximum: ByteCount, // maximum rate over the ROLLING_TRANSFERS_SIZE last amounts - #[serde(with = "json_as_string")] - pub average: ByteCount, // average rate over the ROLLING_TRANSFERS_SIZE last amounts - #[serde(with = "json_as_string")] - pub minimum: ByteCount, // minimum rate over the ROLLING_TRANSFERS_SIZE last amounts -} - -#[derive( - Clone, - Debug, - Default, - PartialEq, - Eq, - Serialize, - Deserialize, - RkyvArchive, - RkyvSerialize, - RkyvDeserialize, -)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct TransferStatsDownUp { - pub down: TransferStats, - pub up: TransferStats, -} - -#[derive( - Clone, - Debug, - Default, - PartialEq, - Eq, - Serialize, - Deserialize, - RkyvArchive, - RkyvSerialize, - RkyvDeserialize, -)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct RPCStats { - pub messages_sent: u32, // number of rpcs that have been sent in the total_time range - pub messages_rcvd: u32, // number of rpcs that have been received in the total_time range - pub questions_in_flight: u32, // number of questions issued that have yet to be answered - #[serde(with = "opt_json_as_string")] - pub last_question_ts: Option, // when the peer was last questioned (either successfully or not) and we wanted an answer - #[serde(with = "opt_json_as_string")] - pub last_seen_ts: Option, // when the peer was last seen for any reason, including when we first attempted to reach out to it - #[serde(with = "opt_json_as_string")] - pub first_consecutive_seen_ts: Option, // the timestamp of the first consecutive proof-of-life for this node (an answer or received question) - pub recent_lost_answers: u32, // number of answers that have been lost since we lost reliability - pub failed_to_send: u32, // number of messages that have failed to send since we last successfully sent one -} - -#[derive( - Clone, - Debug, - Default, - PartialEq, - Eq, - Serialize, - Deserialize, - RkyvArchive, - RkyvSerialize, - RkyvDeserialize, -)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct PeerStats { - #[serde(with = "json_as_string")] - pub time_added: Timestamp, // when the peer was added to the routing table - pub rpc_stats: RPCStats, // information about RPCs - pub latency: Option, // latencies for communications with the peer - pub transfer: TransferStatsDownUp, // Stats for communications with the peer -} - -///////////////////////////////////////////////////////////////////////////////////////////////////// - -#[derive(Clone, Debug, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize)] -#[archive_attr(repr(u8), derive(CheckBytes))] -pub enum SignalInfo { - HolePunch { - // UDP Hole Punch Request - receipt: Vec, // Receipt to be returned after the hole punch - peer_info: PeerInfo, // Sender's peer info - }, - ReverseConnect { - // Reverse Connection Request - receipt: Vec, // Receipt to be returned by the reverse connection - peer_info: PeerInfo, // Sender's peer info - }, - // XXX: WebRTC -} - -///////////////////////////////////////////////////////////////////////////////////////////////////// -#[derive( - Copy, - Clone, - Debug, - PartialOrd, - PartialEq, - Eq, - Ord, - Serialize, - Deserialize, - RkyvArchive, - RkyvSerialize, - RkyvDeserialize, -)] -#[archive_attr(repr(u8), derive(CheckBytes))] -pub enum TunnelMode { - Raw, - Turn, -} - -#[derive( - Copy, - Clone, - Debug, - PartialOrd, - PartialEq, - Eq, - Ord, - Serialize, - Deserialize, - RkyvArchive, - RkyvSerialize, - RkyvDeserialize, -)] -#[archive_attr(repr(u8), derive(CheckBytes))] -pub enum TunnelError { - BadId, // Tunnel ID was rejected - NoEndpoint, // Endpoint was unreachable - RejectedMode, // Endpoint couldn't provide mode - NoCapacity, // Endpoint is full -} - -#[derive(Clone, Debug, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct TunnelEndpoint { - pub mode: TunnelMode, - pub description: String, // XXX: TODO -} - -impl Default for TunnelEndpoint { - fn default() -> Self { - Self { - mode: TunnelMode::Raw, - description: "".to_string(), - } - } -} - -#[derive( - Clone, Debug, Default, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize, -)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct FullTunnel { - pub id: TunnelId, - pub timeout: TimestampDuration, - pub local: TunnelEndpoint, - pub remote: TunnelEndpoint, -} - -#[derive( - Clone, Debug, Default, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize, -)] -#[archive_attr(repr(C), derive(CheckBytes))] -pub struct PartialTunnel { - pub id: TunnelId, - pub timeout: TimestampDuration, - pub local: TunnelEndpoint, -} diff --git a/veilid-core/src/veilid_api/aligned_u64.rs b/veilid-core/src/veilid_api/types/aligned_u64.rs similarity index 85% rename from veilid-core/src/veilid_api/aligned_u64.rs rename to veilid-core/src/veilid_api/types/aligned_u64.rs index 31c161aa..6a64b86d 100644 --- a/veilid-core/src/veilid_api/aligned_u64.rs +++ b/veilid-core/src/veilid_api/types/aligned_u64.rs @@ -120,3 +120,17 @@ impl AlignedU64 { Self(self.0.saturating_sub(rhs.0)) } } + +///////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Microseconds since epoch +pub type Timestamp = AlignedU64; +pub fn get_aligned_timestamp() -> Timestamp { + get_timestamp().into() +} +/// Microseconds duration +pub type TimestampDuration = AlignedU64; +/// Request/Response matching id +pub type OperationId = AlignedU64; +/// Number of bytes +pub type ByteCount = AlignedU64; diff --git a/veilid-core/src/veilid_api/types/app_message_call.rs b/veilid-core/src/veilid_api/types/app_message_call.rs new file mode 100644 index 00000000..6f0805fe --- /dev/null +++ b/veilid-core/src/veilid_api/types/app_message_call.rs @@ -0,0 +1,65 @@ +use super::*; + +/// Direct statement blob passed to hosting application for processing +#[derive( + Debug, Clone, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct VeilidAppMessage { + /// Some(sender) if the message was sent directly, None if received via a private/safety route + #[serde(with = "opt_json_as_string")] + sender: Option, + /// The content of the message to deliver to the application + #[serde(with = "json_as_base64")] + message: Vec, +} + +impl VeilidAppMessage { + pub fn new(sender: Option, message: Vec) -> Self { + Self { sender, message } + } + + pub fn sender(&self) -> Option<&TypedKey> { + self.sender.as_ref() + } + pub fn message(&self) -> &[u8] { + &self.message + } +} + +/// Direct question blob passed to hosting application for processing to send an eventual AppReply +#[derive( + Debug, Clone, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct VeilidAppCall { + /// Some(sender) if the request was sent directly, None if received via a private/safety route + #[serde(with = "opt_json_as_string")] + sender: Option, + /// The content of the request to deliver to the application + #[serde(with = "json_as_base64")] + message: Vec, + /// The id to reply to + #[serde(with = "json_as_string")] + id: OperationId, +} + +impl VeilidAppCall { + pub fn new(sender: Option, message: Vec, id: OperationId) -> Self { + Self { + sender, + message, + id, + } + } + + pub fn sender(&self) -> Option<&TypedKey> { + self.sender.as_ref() + } + pub fn message(&self) -> &[u8] { + &self.message + } + pub fn id(&self) -> OperationId { + self.id + } +} diff --git a/veilid-core/src/veilid_api/types/dht/dht_record_descriptor.rs b/veilid-core/src/veilid_api/types/dht/dht_record_descriptor.rs new file mode 100644 index 00000000..1c4d114e --- /dev/null +++ b/veilid-core/src/veilid_api/types/dht/dht_record_descriptor.rs @@ -0,0 +1,56 @@ +use super::*; + +/// DHT Record Descriptor +#[derive( + Debug, + Clone, + PartialOrd, + Ord, + PartialEq, + Eq, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct DHTRecordDescriptor { + /// DHT Key = Hash(ownerKeyKind) of: [ ownerKeyValue, schema ] + key: TypedKey, + /// The public key of the owner + owner: PublicKey, + /// If this key is being created: Some(the secret key of the owner) + /// If this key is just being opened: None + owner_secret: Option, + /// The schema in use associated with the key + schema: DHTSchema, +} + +impl DHTRecordDescriptor { + pub fn new( + key: TypedKey, + owner: PublicKey, + owner_secret: Option, + schema: DHTSchema, + ) -> Self { + Self { + key, + owner, + owner_secret, + schema, + } + } + + pub fn owner(&self) -> &PublicKey { + &self.owner + } + + pub fn owner_secret(&self) -> Option<&SecretKey> { + self.owner_secret.as_ref() + } + + pub fn schema(&self) -> &DHTSchema { + &self.schema + } +} diff --git a/veilid-core/src/veilid_api/types/dht/mod.rs b/veilid-core/src/veilid_api/types/dht/mod.rs new file mode 100644 index 00000000..3a830b47 --- /dev/null +++ b/veilid-core/src/veilid_api/types/dht/mod.rs @@ -0,0 +1,18 @@ +mod dht_record_descriptor; +mod schema; +mod value_data; +mod value_subkey_range_set; + +use super::*; + +pub use dht_record_descriptor::*; +pub use schema::*; +pub use value_data::*; +pub use value_subkey_range_set::*; + +/// Value subkey +pub type ValueSubkey = u32; +/// Value subkey range +pub type ValueSubkeyRange = (u32, u32); +/// Value sequence number +pub type ValueSeqNum = u32; diff --git a/veilid-core/src/veilid_api/types/dht/schema/dflt.rs b/veilid-core/src/veilid_api/types/dht/schema/dflt.rs new file mode 100644 index 00000000..bd9c0858 --- /dev/null +++ b/veilid-core/src/veilid_api/types/dht/schema/dflt.rs @@ -0,0 +1,84 @@ +use super::*; + +/// Default DHT Schema (DFLT) +#[derive( + Debug, + Clone, + PartialEq, + Eq, + Ord, + PartialOrd, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct DHTSchemaDFLT { + /// Owner subkey count + pub o_cnt: u16, +} + +impl DHTSchemaDFLT { + pub const FCC: [u8; 4] = *b"DFLT"; + pub const FIXED_SIZE: usize = 6; + + /// Build the data representation of the schema + pub fn compile(&self) -> Vec { + let mut out = Vec::::with_capacity(Self::FIXED_SIZE); + // kind + out.extend_from_slice(&Self::FCC); + // o_cnt + out.extend_from_slice(&self.o_cnt.to_le_bytes()); + out + } + + /// Get the number of subkeys this schema allocates + pub fn subkey_count(&self) -> usize { + self.o_cnt as usize + } + /// Get the data size of this schema beyond the size of the structure itself + pub fn data_size(&self) -> usize { + 0 + } + + /// Check a subkey value data against the schema + pub fn check_subkey_value_data( + &self, + owner: &PublicKey, + subkey: ValueSubkey, + value_data: &ValueData, + ) -> bool { + let subkey = subkey as usize; + + // Check is subkey is in owner range + if subkey < (self.o_cnt as usize) { + // Check value data has valid writer + if value_data.writer() == owner { + return true; + } + // Wrong writer + return false; + } + + // Subkey out of range + false + } +} + +impl TryFrom<&[u8]> for DHTSchemaDFLT { + type Error = VeilidAPIError; + fn try_from(b: &[u8]) -> Result { + if b.len() != Self::FIXED_SIZE { + apibail_generic!("invalid size"); + } + if &b[0..4] != &Self::FCC { + apibail_generic!("wrong fourcc"); + } + + let o_cnt = u16::from_le_bytes(b[4..6].try_into().map_err(VeilidAPIError::internal)?); + + Ok(Self { o_cnt }) + } +} diff --git a/veilid-core/src/veilid_api/types/dht/schema/mod.rs b/veilid-core/src/veilid_api/types/dht/schema/mod.rs new file mode 100644 index 00000000..b043afcc --- /dev/null +++ b/veilid-core/src/veilid_api/types/dht/schema/mod.rs @@ -0,0 +1,97 @@ +mod dflt; +mod smpl; + +use super::*; + +pub use dflt::*; +pub use smpl::*; + +/// Enum over all the supported DHT Schemas +#[derive( + Debug, + Clone, + PartialEq, + Eq, + Ord, + PartialOrd, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[archive_attr(repr(u8), derive(CheckBytes))] +#[serde(tag = "kind")] +pub enum DHTSchema { + DFLT(DHTSchemaDFLT), + SMPL(DHTSchemaSMPL), +} + +impl DHTSchema { + pub fn dflt(o_cnt: u16) -> DHTSchema { + DHTSchema::DFLT(DHTSchemaDFLT { o_cnt }) + } + pub fn smpl(o_cnt: u16, members: Vec) -> DHTSchema { + DHTSchema::SMPL(DHTSchemaSMPL { o_cnt, members }) + } + + /// Build the data representation of the schema + pub fn compile(&self) -> Vec { + match self { + DHTSchema::DFLT(d) => d.compile(), + DHTSchema::SMPL(s) => s.compile(), + } + } + + /// Get the number of subkeys this schema allocates + pub fn subkey_count(&self) -> usize { + match self { + DHTSchema::DFLT(d) => d.subkey_count(), + DHTSchema::SMPL(s) => s.subkey_count(), + } + } + + /// Get the data size of this schema beyond the size of the structure itself + pub fn data_size(&self) -> usize { + match self { + DHTSchema::DFLT(d) => d.data_size(), + DHTSchema::SMPL(s) => s.data_size(), + } + } + + /// Check a subkey value data against the schema + pub fn check_subkey_value_data( + &self, + owner: &PublicKey, + subkey: ValueSubkey, + value_data: &ValueData, + ) -> bool { + match self { + DHTSchema::DFLT(d) => d.check_subkey_value_data(owner, subkey, value_data), + DHTSchema::SMPL(s) => s.check_subkey_value_data(owner, subkey, value_data), + } + } +} + +impl Default for DHTSchema { + fn default() -> Self { + Self::dflt(1) + } +} + +impl TryFrom<&[u8]> for DHTSchema { + type Error = VeilidAPIError; + fn try_from(b: &[u8]) -> Result { + if b.len() < 4 { + apibail_generic!("invalid size"); + } + let fcc: [u8; 4] = b[0..4].try_into().unwrap(); + match fcc { + DHTSchemaDFLT::FCC => Ok(DHTSchema::DFLT(DHTSchemaDFLT::try_from(b)?)), + DHTSchemaSMPL::FCC => Ok(DHTSchema::SMPL(DHTSchemaSMPL::try_from(b)?)), + _ => { + apibail_generic!("unknown fourcc"); + } + } + } +} diff --git a/veilid-core/src/veilid_api/types/dht/schema/smpl.rs b/veilid-core/src/veilid_api/types/dht/schema/smpl.rs new file mode 100644 index 00000000..90b20b86 --- /dev/null +++ b/veilid-core/src/veilid_api/types/dht/schema/smpl.rs @@ -0,0 +1,152 @@ +use super::*; + +/// Simple DHT Schema (SMPL) Member +#[derive( + Debug, + Clone, + PartialEq, + Eq, + PartialOrd, + Ord, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct DHTSchemaSMPLMember { + /// Member key + pub m_key: PublicKey, + /// Member subkey count + pub m_cnt: u16, +} + +/// Simple DHT Schema (SMPL) +#[derive( + Debug, + Clone, + PartialEq, + Eq, + Ord, + PartialOrd, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct DHTSchemaSMPL { + /// Owner subkey count + pub o_cnt: u16, + /// Members + pub members: Vec, +} + +impl DHTSchemaSMPL { + pub const FCC: [u8; 4] = *b"SMPL"; + pub const FIXED_SIZE: usize = 6; + + /// Build the data representation of the schema + pub fn compile(&self) -> Vec { + let mut out = Vec::::with_capacity( + Self::FIXED_SIZE + (self.members.len() * (PUBLIC_KEY_LENGTH + 2)), + ); + // kind + out.extend_from_slice(&Self::FCC); + // o_cnt + out.extend_from_slice(&self.o_cnt.to_le_bytes()); + // members + for m in &self.members { + // m_key + out.extend_from_slice(&m.m_key.bytes); + // m_cnt + out.extend_from_slice(&m.m_cnt.to_le_bytes()); + } + out + } + + /// Get the number of subkeys this schema allocates + pub fn subkey_count(&self) -> usize { + self.members + .iter() + .fold(self.o_cnt as usize, |acc, x| acc + (x.m_cnt as usize)) + } + + /// Get the data size of this schema beyond the size of the structure itself + pub fn data_size(&self) -> usize { + self.members.len() * mem::size_of::() + } + + /// Check a subkey value data against the schema + pub fn check_subkey_value_data( + &self, + owner: &PublicKey, + subkey: ValueSubkey, + value_data: &ValueData, + ) -> bool { + let mut cur_subkey = subkey as usize; + + // Check is subkey is in owner range + if cur_subkey < (self.o_cnt as usize) { + // Check value data has valid writer + if value_data.writer() == owner { + return true; + } + // Wrong writer + return false; + } + cur_subkey -= self.o_cnt as usize; + + // Check all member ranges + for m in &self.members { + // Check if subkey is in member range + if cur_subkey < (m.m_cnt as usize) { + // Check value data has valid writer + if value_data.writer() == &m.m_key { + return true; + } + // Wrong writer + return false; + } + cur_subkey -= m.m_cnt as usize; + } + + // Subkey out of range + false + } +} + +impl TryFrom<&[u8]> for DHTSchemaSMPL { + type Error = VeilidAPIError; + fn try_from(b: &[u8]) -> Result { + if b.len() != Self::FIXED_SIZE { + apibail_generic!("invalid size"); + } + if &b[0..4] != &Self::FCC { + apibail_generic!("wrong fourcc"); + } + if (b.len() - Self::FIXED_SIZE) % (PUBLIC_KEY_LENGTH + 2) != 0 { + apibail_generic!("invalid member length"); + } + + let o_cnt = u16::from_le_bytes(b[4..6].try_into().map_err(VeilidAPIError::internal)?); + + let members_len = (b.len() - Self::FIXED_SIZE) / (PUBLIC_KEY_LENGTH + 2); + let mut members: Vec = Vec::with_capacity(members_len); + for n in 0..members_len { + let mstart = Self::FIXED_SIZE + n * (PUBLIC_KEY_LENGTH + 2); + let m_key = PublicKey::try_from(&b[mstart..mstart + PUBLIC_KEY_LENGTH]) + .map_err(VeilidAPIError::internal)?; + let m_cnt = u16::from_le_bytes( + b[mstart + PUBLIC_KEY_LENGTH..mstart + PUBLIC_KEY_LENGTH + 2] + .try_into() + .map_err(VeilidAPIError::internal)?, + ); + members.push(DHTSchemaSMPLMember { m_key, m_cnt }); + } + + Ok(Self { o_cnt, members }) + } +} diff --git a/veilid-core/src/veilid_api/types/dht/value_data.rs b/veilid-core/src/veilid_api/types/dht/value_data.rs new file mode 100644 index 00000000..734e735d --- /dev/null +++ b/veilid-core/src/veilid_api/types/dht/value_data.rs @@ -0,0 +1,54 @@ +use super::*; + +#[derive( + Clone, + Debug, + Default, + PartialOrd, + PartialEq, + Eq, + Ord, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct ValueData { + seq: ValueSeqNum, + data: Vec, + writer: PublicKey, +} +impl ValueData { + pub const MAX_LEN: usize = 32768; + + pub fn new(data: Vec, writer: PublicKey) -> Self { + assert!(data.len() <= Self::MAX_LEN); + Self { + seq: 0, + data, + writer, + } + } + pub fn new_with_seq(seq: ValueSeqNum, data: Vec, writer: PublicKey) -> Self { + assert!(data.len() <= Self::MAX_LEN); + Self { seq, data, writer } + } + + pub fn seq(&self) -> ValueSeqNum { + self.seq + } + + pub fn writer(&self) -> &PublicKey { + &self.writer + } + + pub fn data(&self) -> &[u8] { + &self.data + } + + pub fn total_size(&self) -> usize { + mem::size_of::() + self.data.len() + } +} diff --git a/veilid-core/src/veilid_api/types/dht/value_subkey_range_set.rs b/veilid-core/src/veilid_api/types/dht/value_subkey_range_set.rs new file mode 100644 index 00000000..3dd40f67 --- /dev/null +++ b/veilid-core/src/veilid_api/types/dht/value_subkey_range_set.rs @@ -0,0 +1,51 @@ +use super::*; +use core::ops::{Deref, DerefMut}; +use range_set_blaze::*; + +#[derive( + Clone, + Debug, + Default, + PartialOrd, + PartialEq, + Eq, + Ord, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct ValueSubkeyRangeSet { + #[with(RkyvRangeSetBlaze)] + #[serde(with = "serialize_range_set_blaze")] + data: RangeSetBlaze, +} + +impl ValueSubkeyRangeSet { + pub fn new() -> Self { + Self { + data: Default::default(), + } + } + pub fn single(value: ValueSubkey) -> Self { + let mut data = RangeSetBlaze::new(); + data.insert(value); + Self { data } + } +} + +impl Deref for ValueSubkeyRangeSet { + type Target = RangeSetBlaze; + + fn deref(&self) -> &Self::Target { + &self.data + } +} + +impl DerefMut for ValueSubkeyRangeSet { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.data + } +} diff --git a/veilid-core/src/veilid_api/types/fourcc.rs b/veilid-core/src/veilid_api/types/fourcc.rs new file mode 100644 index 00000000..edce2b03 --- /dev/null +++ b/veilid-core/src/veilid_api/types/fourcc.rs @@ -0,0 +1,65 @@ +use super::*; + +/// FOURCC code +#[derive( + Copy, + Default, + Clone, + Hash, + PartialOrd, + Ord, + PartialEq, + Eq, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes, PartialOrd, Ord, PartialEq, Eq, Hash))] +pub struct FourCC(pub [u8; 4]); + +impl From<[u8; 4]> for FourCC { + fn from(b: [u8; 4]) -> Self { + Self(b) + } +} + +impl From for FourCC { + fn from(u: u32) -> Self { + Self(u.to_be_bytes()) + } +} + +impl From for u32 { + fn from(u: FourCC) -> Self { + u32::from_be_bytes(u.0) + } +} + +impl TryFrom<&[u8]> for FourCC { + type Error = VeilidAPIError; + fn try_from(b: &[u8]) -> Result { + Ok(Self(b.try_into().map_err(VeilidAPIError::generic)?)) + } +} + +impl fmt::Display for FourCC { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + write!(f, "{}", String::from_utf8_lossy(&self.0)) + } +} +impl fmt::Debug for FourCC { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + write!(f, "{}", String::from_utf8_lossy(&self.0)) + } +} + +impl FromStr for FourCC { + type Err = VeilidAPIError; + fn from_str(s: &str) -> Result { + Ok(Self( + s.as_bytes().try_into().map_err(VeilidAPIError::generic)?, + )) + } +} diff --git a/veilid-core/src/veilid_api/types/mod.rs b/veilid-core/src/veilid_api/types/mod.rs new file mode 100644 index 00000000..6dea3db5 --- /dev/null +++ b/veilid-core/src/veilid_api/types/mod.rs @@ -0,0 +1,21 @@ +mod aligned_u64; +mod app_message_call; +mod dht; +mod fourcc; +mod safety; +mod stats; +mod tunnel; +mod veilid_log; +mod veilid_state; + +use super::*; + +pub use aligned_u64::*; +pub use app_message_call::*; +pub use dht::*; +pub use fourcc::*; +pub use safety::*; +pub use stats::*; +pub use tunnel::*; +pub use veilid_log::*; +pub use veilid_state::*; diff --git a/veilid-core/src/veilid_api/types/safety.rs b/veilid-core/src/veilid_api/types/safety.rs new file mode 100644 index 00000000..27cb46ba --- /dev/null +++ b/veilid-core/src/veilid_api/types/safety.rs @@ -0,0 +1,125 @@ +use super::*; + +// Ordering here matters, >= is used to check strength of sequencing requirement +#[derive( + Copy, + Clone, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[archive_attr(repr(u8), derive(CheckBytes))] +pub enum Sequencing { + NoPreference, + PreferOrdered, + EnsureOrdered, +} + +impl Default for Sequencing { + fn default() -> Self { + Self::NoPreference + } +} + +// Ordering here matters, >= is used to check strength of stability requirement +#[derive( + Copy, + Clone, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[archive_attr(repr(u8), derive(CheckBytes))] +pub enum Stability { + LowLatency, + Reliable, +} + +impl Default for Stability { + fn default() -> Self { + Self::LowLatency + } +} + +/// The choice of safety route to include in compiled routes +#[derive( + Copy, + Clone, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[archive_attr(repr(u8), derive(CheckBytes))] +pub enum SafetySelection { + /// Don't use a safety route, only specify the sequencing preference + Unsafe(Sequencing), + /// Use a safety route and parameters specified by a SafetySpec + Safe(SafetySpec), +} + +impl SafetySelection { + pub fn get_sequencing(&self) -> Sequencing { + match self { + SafetySelection::Unsafe(seq) => *seq, + SafetySelection::Safe(ss) => ss.sequencing, + } + } +} + +impl Default for SafetySelection { + fn default() -> Self { + Self::Unsafe(Sequencing::NoPreference) + } +} + +/// Options for safety routes (sender privacy) +#[derive( + Copy, + Clone, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct SafetySpec { + /// preferred safety route set id if it still exists + pub preferred_route: Option, + /// must be greater than 0 + pub hop_count: usize, + /// prefer reliability over speed + pub stability: Stability, + /// prefer connection-oriented sequenced protocols + pub sequencing: Sequencing, +} diff --git a/veilid-core/src/veilid_api/types/stats.rs b/veilid-core/src/veilid_api/types/stats.rs new file mode 100644 index 00000000..8ea79e11 --- /dev/null +++ b/veilid-core/src/veilid_api/types/stats.rs @@ -0,0 +1,113 @@ +use super::*; + +#[derive( + Clone, + Debug, + Default, + PartialEq, + Eq, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct LatencyStats { + #[serde(with = "json_as_string")] + pub fastest: TimestampDuration, // fastest latency in the ROLLING_LATENCIES_SIZE last latencies + #[serde(with = "json_as_string")] + pub average: TimestampDuration, // average latency over the ROLLING_LATENCIES_SIZE last latencies + #[serde(with = "json_as_string")] + pub slowest: TimestampDuration, // slowest latency in the ROLLING_LATENCIES_SIZE last latencies +} + +#[derive( + Clone, + Debug, + Default, + PartialEq, + Eq, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct TransferStats { + #[serde(with = "json_as_string")] + pub total: ByteCount, // total amount transferred ever + #[serde(with = "json_as_string")] + pub maximum: ByteCount, // maximum rate over the ROLLING_TRANSFERS_SIZE last amounts + #[serde(with = "json_as_string")] + pub average: ByteCount, // average rate over the ROLLING_TRANSFERS_SIZE last amounts + #[serde(with = "json_as_string")] + pub minimum: ByteCount, // minimum rate over the ROLLING_TRANSFERS_SIZE last amounts +} + +#[derive( + Clone, + Debug, + Default, + PartialEq, + Eq, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct TransferStatsDownUp { + pub down: TransferStats, + pub up: TransferStats, +} + +#[derive( + Clone, + Debug, + Default, + PartialEq, + Eq, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct RPCStats { + pub messages_sent: u32, // number of rpcs that have been sent in the total_time range + pub messages_rcvd: u32, // number of rpcs that have been received in the total_time range + pub questions_in_flight: u32, // number of questions issued that have yet to be answered + #[serde(with = "opt_json_as_string")] + pub last_question_ts: Option, // when the peer was last questioned (either successfully or not) and we wanted an answer + #[serde(with = "opt_json_as_string")] + pub last_seen_ts: Option, // when the peer was last seen for any reason, including when we first attempted to reach out to it + #[serde(with = "opt_json_as_string")] + pub first_consecutive_seen_ts: Option, // the timestamp of the first consecutive proof-of-life for this node (an answer or received question) + pub recent_lost_answers: u32, // number of answers that have been lost since we lost reliability + pub failed_to_send: u32, // number of messages that have failed to send since we last successfully sent one +} + +#[derive( + Clone, + Debug, + Default, + PartialEq, + Eq, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct PeerStats { + #[serde(with = "json_as_string")] + pub time_added: Timestamp, // when the peer was added to the routing table + pub rpc_stats: RPCStats, // information about RPCs + pub latency: Option, // latencies for communications with the peer + pub transfer: TransferStatsDownUp, // Stats for communications with the peer +} diff --git a/veilid-core/src/veilid_api/types/tunnel.rs b/veilid-core/src/veilid_api/types/tunnel.rs new file mode 100644 index 00000000..968c7695 --- /dev/null +++ b/veilid-core/src/veilid_api/types/tunnel.rs @@ -0,0 +1,83 @@ +use super::*; + +/// Tunnel identifier +pub type TunnelId = AlignedU64; + +#[derive( + Copy, + Clone, + Debug, + PartialOrd, + PartialEq, + Eq, + Ord, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[archive_attr(repr(u8), derive(CheckBytes))] +pub enum TunnelMode { + Raw, + Turn, +} + +#[derive( + Copy, + Clone, + Debug, + PartialOrd, + PartialEq, + Eq, + Ord, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[archive_attr(repr(u8), derive(CheckBytes))] +pub enum TunnelError { + BadId, // Tunnel ID was rejected + NoEndpoint, // Endpoint was unreachable + RejectedMode, // Endpoint couldn't provide mode + NoCapacity, // Endpoint is full +} + +#[derive(Clone, Debug, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct TunnelEndpoint { + pub mode: TunnelMode, + pub description: String, // XXX: TODO +} + +impl Default for TunnelEndpoint { + fn default() -> Self { + Self { + mode: TunnelMode::Raw, + description: "".to_string(), + } + } +} + +#[derive( + Clone, Debug, Default, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct FullTunnel { + pub id: TunnelId, + pub timeout: TimestampDuration, + pub local: TunnelEndpoint, + pub remote: TunnelEndpoint, +} + +#[derive( + Clone, Debug, Default, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct PartialTunnel { + pub id: TunnelId, + pub timeout: TimestampDuration, + pub local: TunnelEndpoint, +} diff --git a/veilid-core/src/veilid_api/types/veilid_log.rs b/veilid-core/src/veilid_api/types/veilid_log.rs new file mode 100644 index 00000000..5eab945c --- /dev/null +++ b/veilid-core/src/veilid_api/types/veilid_log.rs @@ -0,0 +1,88 @@ +use super::*; + +/// Log level for VeilidCore +#[derive( + Debug, + Clone, + PartialEq, + Eq, + PartialOrd, + Ord, + Copy, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[archive_attr(repr(u8), derive(CheckBytes))] +pub enum VeilidLogLevel { + Error = 1, + Warn, + Info, + Debug, + Trace, +} + +impl VeilidLogLevel { + pub fn from_tracing_level(level: tracing::Level) -> VeilidLogLevel { + match level { + tracing::Level::ERROR => VeilidLogLevel::Error, + tracing::Level::WARN => VeilidLogLevel::Warn, + tracing::Level::INFO => VeilidLogLevel::Info, + tracing::Level::DEBUG => VeilidLogLevel::Debug, + tracing::Level::TRACE => VeilidLogLevel::Trace, + } + } + pub fn from_log_level(level: log::Level) -> VeilidLogLevel { + match level { + log::Level::Error => VeilidLogLevel::Error, + log::Level::Warn => VeilidLogLevel::Warn, + log::Level::Info => VeilidLogLevel::Info, + log::Level::Debug => VeilidLogLevel::Debug, + log::Level::Trace => VeilidLogLevel::Trace, + } + } + pub fn to_tracing_level(&self) -> tracing::Level { + match self { + Self::Error => tracing::Level::ERROR, + Self::Warn => tracing::Level::WARN, + Self::Info => tracing::Level::INFO, + Self::Debug => tracing::Level::DEBUG, + Self::Trace => tracing::Level::TRACE, + } + } + pub fn to_log_level(&self) -> log::Level { + match self { + Self::Error => log::Level::Error, + Self::Warn => log::Level::Warn, + Self::Info => log::Level::Info, + Self::Debug => log::Level::Debug, + Self::Trace => log::Level::Trace, + } + } +} + +impl fmt::Display for VeilidLogLevel { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + let text = match self { + Self::Error => "ERROR", + Self::Warn => "WARN", + Self::Info => "INFO", + Self::Debug => "DEBUG", + Self::Trace => "TRACE", + }; + write!(f, "{}", text) + } +} + +/// A VeilidCore log message with optional backtrace +#[derive( + Debug, Clone, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct VeilidLog { + pub log_level: VeilidLogLevel, + pub message: String, + pub backtrace: Option, +} diff --git a/veilid-core/src/veilid_api/types/veilid_state.rs b/veilid-core/src/veilid_api/types/veilid_state.rs new file mode 100644 index 00000000..09f21a24 --- /dev/null +++ b/veilid-core/src/veilid_api/types/veilid_state.rs @@ -0,0 +1,144 @@ +use super::*; + +/// Attachment abstraction for network 'signal strength' +#[derive( + Debug, + PartialEq, + Eq, + Clone, + Copy, + Serialize, + Deserialize, + RkyvArchive, + RkyvSerialize, + RkyvDeserialize, +)] +#[archive_attr(repr(u8), derive(CheckBytes))] +pub enum AttachmentState { + Detached, + Attaching, + AttachedWeak, + AttachedGood, + AttachedStrong, + FullyAttached, + OverAttached, + Detaching, +} + +impl fmt::Display for AttachmentState { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + let out = match self { + AttachmentState::Attaching => "attaching".to_owned(), + AttachmentState::AttachedWeak => "attached_weak".to_owned(), + AttachmentState::AttachedGood => "attached_good".to_owned(), + AttachmentState::AttachedStrong => "attached_strong".to_owned(), + AttachmentState::FullyAttached => "fully_attached".to_owned(), + AttachmentState::OverAttached => "over_attached".to_owned(), + AttachmentState::Detaching => "detaching".to_owned(), + AttachmentState::Detached => "detached".to_owned(), + }; + write!(f, "{}", out) + } +} + +impl TryFrom for AttachmentState { + type Error = (); + + fn try_from(s: String) -> Result { + Ok(match s.as_str() { + "attaching" => AttachmentState::Attaching, + "attached_weak" => AttachmentState::AttachedWeak, + "attached_good" => AttachmentState::AttachedGood, + "attached_strong" => AttachmentState::AttachedStrong, + "fully_attached" => AttachmentState::FullyAttached, + "over_attached" => AttachmentState::OverAttached, + "detaching" => AttachmentState::Detaching, + "detached" => AttachmentState::Detached, + _ => return Err(()), + }) + } +} + +#[derive( + Debug, Clone, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct VeilidStateAttachment { + pub state: AttachmentState, + pub public_internet_ready: bool, + pub local_network_ready: bool, +} + +#[derive( + Debug, Clone, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct PeerTableData { + pub node_ids: Vec, + pub peer_address: String, + pub peer_stats: PeerStats, +} + +#[derive( + Debug, Clone, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct VeilidStateNetwork { + pub started: bool, + #[serde(with = "json_as_string")] + pub bps_down: ByteCount, + #[serde(with = "json_as_string")] + pub bps_up: ByteCount, + pub peers: Vec, +} + +#[derive( + Debug, Clone, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct VeilidRouteChange { + pub dead_routes: Vec, + pub dead_remote_routes: Vec, +} + +#[derive( + Debug, Clone, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct VeilidStateConfig { + pub config: VeilidConfigInner, +} + +#[derive( + Debug, Clone, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize, +)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct VeilidValueChange { + key: TypedKey, + subkeys: Vec, + count: u32, + value: ValueData, +} + +#[derive(Debug, Clone, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize)] +#[archive_attr(repr(u8), derive(CheckBytes))] +#[serde(tag = "kind")] +pub enum VeilidUpdate { + Log(VeilidLog), + AppMessage(VeilidAppMessage), + AppCall(VeilidAppCall), + Attachment(VeilidStateAttachment), + Network(VeilidStateNetwork), + Config(VeilidStateConfig), + RouteChange(VeilidRouteChange), + ValueChange(VeilidValueChange), + Shutdown, +} + +#[derive(Debug, Clone, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize)] +#[archive_attr(repr(C), derive(CheckBytes))] +pub struct VeilidState { + pub attachment: VeilidStateAttachment, + pub network: VeilidStateNetwork, + pub config: VeilidStateConfig, +} diff --git a/veilid-core/src/veilid_config.rs b/veilid-core/src/veilid_config.rs index ce5f07c5..68c06e6f 100644 --- a/veilid-core/src/veilid_config.rs +++ b/veilid-core/src/veilid_config.rs @@ -1,9 +1,7 @@ use crate::*; -use rkyv::{Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize}; -use serde::*; //////////////////////////////////////////////////////////////////////////////////////////////// -pub type ConfigCallbackReturn = Result, VeilidAPIError>; +pub type ConfigCallbackReturn = VeilidAPIResult>; pub type ConfigCallback = Arc ConfigCallbackReturn + Send + Sync>; /// Enable and configure HTTPS access to the Veilid node @@ -277,19 +275,25 @@ pub struct VeilidConfigTLS { RkyvDeserialize, )] pub struct VeilidConfigDHT { - pub resolve_node_timeout_ms: Option, + pub max_find_node_count: u32, + pub resolve_node_timeout_ms: u32, pub resolve_node_count: u32, pub resolve_node_fanout: u32, - pub max_find_node_count: u32, - pub get_value_timeout_ms: Option, + pub get_value_timeout_ms: u32, pub get_value_count: u32, pub get_value_fanout: u32, - pub set_value_timeout_ms: Option, + pub set_value_timeout_ms: u32, pub set_value_count: u32, pub set_value_fanout: u32, pub min_peer_count: u32, pub min_peer_refresh_time_ms: u32, pub validate_dial_info_receipt_time_ms: u32, + pub local_subkey_cache_size: u32, + pub local_max_subkey_cache_memory_mb: u32, + pub remote_subkey_cache_size: u32, + pub remote_max_records: u32, + pub remote_max_subkey_cache_memory_mb: u32, + pub remote_max_storage_space_mb: u32, } /// Configure RPC @@ -425,8 +429,10 @@ pub struct VeilidConfigBlockStore { pub struct VeilidConfigProtectedStore { pub allow_insecure_fallback: bool, pub always_use_insecure_storage: bool, - pub insecure_fallback_directory: String, + pub directory: String, pub delete: bool, + pub device_encryption_key_password: String, + pub new_device_encryption_key_password: Option, } #[derive( @@ -581,7 +587,7 @@ impl VeilidConfig { &mut self, config: String, update_cb: UpdateCallback, - ) -> Result<(), VeilidAPIError> { + ) -> VeilidAPIResult<()> { self.update_cb = Some(update_cb); self.with_mut(|inner| { @@ -590,11 +596,7 @@ impl VeilidConfig { }) } - pub fn setup( - &mut self, - cb: ConfigCallback, - update_cb: UpdateCallback, - ) -> Result<(), VeilidAPIError> { + pub fn setup(&mut self, cb: ConfigCallback, update_cb: UpdateCallback) -> VeilidAPIResult<()> { self.update_cb = Some(update_cb); self.with_mut(|inner| { // Simple config transformation @@ -630,8 +632,10 @@ impl VeilidConfig { get_config!(inner.block_store.delete); get_config!(inner.protected_store.allow_insecure_fallback); get_config!(inner.protected_store.always_use_insecure_storage); - get_config!(inner.protected_store.insecure_fallback_directory); + get_config!(inner.protected_store.directory); get_config!(inner.protected_store.delete); + get_config!(inner.protected_store.device_encryption_key_password); + get_config!(inner.protected_store.new_device_encryption_key_password); get_config!(inner.network.connection_initial_timeout_ms); get_config!(inner.network.connection_inactivity_timeout_ms); get_config!(inner.network.max_connections_per_ip4); @@ -649,10 +653,10 @@ impl VeilidConfig { get_config!(inner.network.routing_table.limit_attached_strong); get_config!(inner.network.routing_table.limit_attached_good); get_config!(inner.network.routing_table.limit_attached_weak); + get_config!(inner.network.dht.max_find_node_count); get_config!(inner.network.dht.resolve_node_timeout_ms); get_config!(inner.network.dht.resolve_node_count); get_config!(inner.network.dht.resolve_node_fanout); - get_config!(inner.network.dht.max_find_node_count); get_config!(inner.network.dht.get_value_timeout_ms); get_config!(inner.network.dht.get_value_count); get_config!(inner.network.dht.get_value_fanout); @@ -662,6 +666,12 @@ impl VeilidConfig { get_config!(inner.network.dht.min_peer_count); get_config!(inner.network.dht.min_peer_refresh_time_ms); get_config!(inner.network.dht.validate_dial_info_receipt_time_ms); + get_config!(inner.network.dht.local_subkey_cache_size); + get_config!(inner.network.dht.local_max_subkey_cache_memory_mb); + get_config!(inner.network.dht.remote_subkey_cache_size); + get_config!(inner.network.dht.remote_max_records); + get_config!(inner.network.dht.remote_max_subkey_cache_memory_mb); + get_config!(inner.network.dht.remote_max_storage_space_mb); get_config!(inner.network.rpc.concurrency); get_config!(inner.network.rpc.queue_size); get_config!(inner.network.rpc.max_timestamp_behind_ms); @@ -724,13 +734,16 @@ impl VeilidConfig { // Remove secrets safe_cfg.network.routing_table.node_id_secret = TypedSecretSet::new(); + safe_cfg.protected_store.device_encryption_key_password = "".to_owned(); + safe_cfg.protected_store.new_device_encryption_key_password = None; + safe_cfg } - pub fn with_mut(&self, f: F) -> Result + pub fn with_mut(&self, f: F) -> VeilidAPIResult where - F: FnOnce(&mut VeilidConfigInner) -> Result, + F: FnOnce(&mut VeilidConfigInner) -> VeilidAPIResult, { let out = { let inner = &mut *self.inner.write(); @@ -754,7 +767,7 @@ impl VeilidConfig { Ok(out) } - pub fn get_key_json(&self, key: &str) -> Result { + pub fn get_key_json(&self, key: &str) -> VeilidAPIResult { let c = self.get(); // Generate json from whole config @@ -777,7 +790,7 @@ impl VeilidConfig { Ok(out.to_string()) } } - pub fn set_key_json(&self, key: &str, value: &str) -> Result<(), VeilidAPIError> { + pub fn set_key_json(&self, key: &str, value: &str) -> VeilidAPIResult<()> { self.with_mut(|c| { // Split key into path parts let keypath: Vec<&str> = key.split('.').collect(); @@ -814,7 +827,7 @@ impl VeilidConfig { }) } - fn validate(inner: &VeilidConfigInner) -> Result<(), VeilidAPIError> { + fn validate(inner: &VeilidConfigInner) -> VeilidAPIResult<()> { if inner.program_name.is_empty() { apibail_generic!("Program name must not be empty in 'program_name'"); } @@ -923,8 +936,8 @@ impl VeilidConfig { async fn init_node_id( &self, vcrypto: CryptoSystemVersion, - protected_store: intf::ProtectedStore, - ) -> Result<(TypedKey, TypedSecret), VeilidAPIError> { + table_store: TableStore, + ) -> VeilidAPIResult<(TypedKey, TypedSecret)> { let ck = vcrypto.kind(); let mut node_id = self.inner.read().network.routing_table.node_id.get(ck); let mut node_id_secret = self @@ -935,45 +948,36 @@ impl VeilidConfig { .node_id_secret .get(ck); - // See if node id was previously stored in the protected store + // See if node id was previously stored in the table store + let config_table = table_store.open("__veilid_config", 1).await?; + + let table_key_node_id = format!("node_id_{}", ck); + let table_key_node_id_secret = format!("node_id_secret_{}", ck); + if node_id.is_none() { - debug!("pulling node_id_{} from storage", ck); - if let Some(s) = protected_store - .load_user_secret_string(format!("node_id_{}", ck)) + debug!("pulling {} from storage", table_key_node_id); + if let Ok(Some(stored_node_id)) = config_table + .load_json::(0, table_key_node_id.as_bytes()) .await - .map_err(VeilidAPIError::internal)? { - debug!("node_id_{} found in storage", ck); - node_id = match TypedKey::from_str(s.as_str()) { - Ok(v) => Some(v), - Err(_) => { - debug!("node id in protected store is not valid"); - None - } - } + debug!("{} found in storage", table_key_node_id); + node_id = Some(stored_node_id); } else { - debug!("node_id_{} not found in storage", ck); + debug!("{} not found in storage", table_key_node_id); } } // See if node id secret was previously stored in the protected store if node_id_secret.is_none() { - debug!("pulling node id secret from storage"); - if let Some(s) = protected_store - .load_user_secret_string(format!("node_id_secret_{}", ck)) + debug!("pulling {} from storage", table_key_node_id_secret); + if let Ok(Some(stored_node_id_secret)) = config_table + .load_json::(0, table_key_node_id_secret.as_bytes()) .await - .map_err(VeilidAPIError::internal)? { - debug!("node_id_secret_{} found in storage", ck); - node_id_secret = match TypedSecret::from_str(s.as_str()) { - Ok(v) => Some(v), - Err(_) => { - debug!("node id secret in protected store is not valid"); - None - } - } + debug!("{} found in storage", table_key_node_id_secret); + node_id_secret = Some(stored_node_id_secret); } else { - debug!("node_id_secret_{} not found in storage", ck); + debug!("{} not found in storage", table_key_node_id_secret); } } @@ -997,14 +1001,12 @@ impl VeilidConfig { info!("Node Id: {}", node_id); // Save the node id / secret in storage - protected_store - .save_user_secret_string(format!("node_id_{}", ck), node_id.to_string()) - .await - .map_err(VeilidAPIError::internal)?; - protected_store - .save_user_secret_string(format!("node_id_secret_{}", ck), node_id_secret.to_string()) - .await - .map_err(VeilidAPIError::internal)?; + config_table + .store_json(0, table_key_node_id.as_bytes(), &node_id) + .await?; + config_table + .store_json(0, table_key_node_id_secret.as_bytes(), &node_id_secret) + .await?; Ok((node_id, node_id_secret)) } @@ -1015,8 +1017,8 @@ impl VeilidConfig { pub async fn init_node_ids( &self, crypto: Crypto, - protected_store: intf::ProtectedStore, - ) -> Result<(), VeilidAPIError> { + table_store: TableStore, + ) -> VeilidAPIResult<()> { let mut out_node_id = TypedKeySet::new(); let mut out_node_id_secret = TypedSecretSet::new(); @@ -1031,8 +1033,7 @@ impl VeilidConfig { (TypedKey::new(ck, kp.key), TypedSecret::new(ck, kp.secret)) }; #[cfg(not(test))] - let (node_id, node_id_secret) = - self.init_node_id(vcrypto, protected_store.clone()).await?; + let (node_id, node_id_secret) = self.init_node_id(vcrypto, table_store.clone()).await?; // Save for config out_node_id.add(node_id); diff --git a/veilid-core/src/watcher_table.rs b/veilid-core/src/watcher_table.rs deleted file mode 100644 index e69de29b..00000000 diff --git a/veilid-core/tests/web.rs b/veilid-core/tests/web.rs index 5024dfd8..f8a2b6fd 100644 --- a/veilid-core/tests/web.rs +++ b/veilid-core/tests/web.rs @@ -31,55 +31,73 @@ pub fn setup() -> () { } #[wasm_bindgen_test] -async fn run_test_host_interface() { +async fn wasm_test_host_interface() { setup(); test_host_interface::test_all().await; } #[wasm_bindgen_test] -async fn run_test_dht_key() { +async fn wasm_test_types() { setup(); - test_dht_key::test_all().await; + test_types::test_all().await; } #[wasm_bindgen_test] -async fn run_test_veilid_core() { +async fn wasm_test_veilid_core() { setup(); test_veilid_core::test_all().await; } #[wasm_bindgen_test] -async fn test_veilid_config() { +async fn wasm_test_veilid_config() { setup(); test_veilid_config::test_all().await; } #[wasm_bindgen_test] -async fn run_test_connection_table() { +async fn wasm_test_connection_table() { setup(); test_connection_table::test_all().await; } #[wasm_bindgen_test] -async fn exec_test_table_store() { +async fn wasm_test_signed_node_info() { + setup(); + test_signed_node_info::test_all().await; +} + +#[wasm_bindgen_test] +async fn wasm_test_table_store() { setup(); test_table_store::test_all().await; } #[wasm_bindgen_test] -async fn exec_test_protected_store() { +async fn wasm_test_protected_store() { setup(); test_protected_store::test_all().await; } #[wasm_bindgen_test] -async fn exec_test_crypto() { +async fn wasm_test_crypto() { setup(); test_crypto::test_all().await; } #[wasm_bindgen_test] -async fn exec_test_envelope_receipt() { +async fn wasm_test_envelope_receipt() { setup(); test_envelope_receipt::test_all().await; } + +#[wasm_bindgen_test] +async fn wasm_test_serialize_rkyv() { + setup(); + test_serialize_rkyv::test_all().await; +} + +#[wasm_bindgen_test] +async fn wasm_test_routing_table_serialize() { + setup(); + test_routing_table_serialize::test_all().await; +} diff --git a/veilid-flutter/example/.gitignore b/veilid-flutter/example/.gitignore index 0fa6b675..a1345d01 100644 --- a/veilid-flutter/example/.gitignore +++ b/veilid-flutter/example/.gitignore @@ -32,7 +32,6 @@ /build/ # Web related -lib/generated_plugin_registrant.dart # Symbolication related app.*.symbols diff --git a/veilid-flutter/example/lib/app.dart b/veilid-flutter/example/lib/app.dart index 00e6600a..06576424 100644 --- a/veilid-flutter/example/lib/app.dart +++ b/veilid-flutter/example/lib/app.dart @@ -1,4 +1,5 @@ import 'dart:async'; +import 'dart:convert'; import 'package:flutter/material.dart'; import 'package:flutter/services.dart'; @@ -97,11 +98,11 @@ class _MyAppState extends State with UiLoggy { if (update is VeilidLog) { await processLog(update); } else if (update is VeilidAppMessage) { - loggy.info("AppMessage: ${update.json}"); + loggy.info("AppMessage: ${jsonEncode(update)}"); } else if (update is VeilidAppCall) { - loggy.info("AppCall: ${update.json}"); + loggy.info("AppCall: ${jsonEncode(update)}"); } else { - loggy.trace("Update: ${update.json}"); + loggy.trace("Update: ${jsonEncode(update)}"); } } } diff --git a/veilid-flutter/example/lib/log_terminal.dart b/veilid-flutter/example/lib/log_terminal.dart index b19d4af6..b9c8eacd 100644 --- a/veilid-flutter/example/lib/log_terminal.dart +++ b/veilid-flutter/example/lib/log_terminal.dart @@ -1,6 +1,3 @@ -import 'dart:convert'; -import 'dart:io'; - import 'package:flutter/material.dart'; import 'package:flutter/services.dart'; import 'package:xterm/xterm.dart'; diff --git a/veilid-flutter/example/lib/main.dart b/veilid-flutter/example/lib/main.dart index b93e91bd..08dfb159 100644 --- a/veilid-flutter/example/lib/main.dart +++ b/veilid-flutter/example/lib/main.dart @@ -2,7 +2,6 @@ import 'dart:async'; import 'package:flutter/material.dart'; import 'package:flutter/foundation.dart'; -import 'package:veilid/veilid.dart'; import 'package:flutter_acrylic/flutter_acrylic.dart'; import 'veilid_theme.dart'; diff --git a/veilid-flutter/example/lib/veilid_init.dart b/veilid-flutter/example/lib/veilid_init.dart index 7d0f8fb3..a7d6b5b7 100644 --- a/veilid-flutter/example/lib/veilid_init.dart +++ b/veilid-flutter/example/lib/veilid_init.dart @@ -14,7 +14,7 @@ void veilidInit() { logsInConsole: false), api: VeilidWASMConfigLoggingApi( enabled: true, level: VeilidConfigLogLevel.info))); - Veilid.instance.initializeVeilidCore(platformConfig.json); + Veilid.instance.initializeVeilidCore(platformConfig.toJson()); } else { var platformConfig = VeilidFFIConfig( logging: VeilidFFIConfigLogging( @@ -29,6 +29,6 @@ void veilidInit() { serviceName: "VeilidExample"), api: VeilidFFIConfigLoggingApi( enabled: true, level: VeilidConfigLogLevel.info))); - Veilid.instance.initializeVeilidCore(platformConfig.json); + Veilid.instance.initializeVeilidCore(platformConfig.toJson()); } } diff --git a/veilid-flutter/example/macos/Flutter/GeneratedPluginRegistrant.swift b/veilid-flutter/example/macos/Flutter/GeneratedPluginRegistrant.swift index 298c79e7..bcc47afb 100644 --- a/veilid-flutter/example/macos/Flutter/GeneratedPluginRegistrant.swift +++ b/veilid-flutter/example/macos/Flutter/GeneratedPluginRegistrant.swift @@ -5,12 +5,12 @@ import FlutterMacOS import Foundation -import flutter_acrylic -import path_provider_macos +import macos_window_utils +import path_provider_foundation import veilid func RegisterGeneratedPlugins(registry: FlutterPluginRegistry) { - FlutterAcrylicPlugin.register(with: registry.registrar(forPlugin: "FlutterAcrylicPlugin")) + MacOSWindowUtilsPlugin.register(with: registry.registrar(forPlugin: "MacOSWindowUtilsPlugin")) PathProviderPlugin.register(with: registry.registrar(forPlugin: "PathProviderPlugin")) VeilidPlugin.register(with: registry.registrar(forPlugin: "VeilidPlugin")) } diff --git a/veilid-flutter/example/macos/Podfile b/veilid-flutter/example/macos/Podfile index b5c9c07c..404ad4ea 100644 --- a/veilid-flutter/example/macos/Podfile +++ b/veilid-flutter/example/macos/Podfile @@ -1,4 +1,4 @@ -platform :osx, '10.12.2' +platform :osx, '10.14.6' # CocoaPods analytics sends network stats synchronously affecting flutter build latency. ENV['COCOAPODS_DISABLE_STATS'] = 'true' diff --git a/veilid-flutter/example/macos/Podfile.lock b/veilid-flutter/example/macos/Podfile.lock index 38ac07f6..dad885d8 100644 --- a/veilid-flutter/example/macos/Podfile.lock +++ b/veilid-flutter/example/macos/Podfile.lock @@ -1,34 +1,35 @@ PODS: - - flutter_acrylic (0.1.0): - - FlutterMacOS - FlutterMacOS (1.0.0) - - path_provider_macos (0.0.1): + - macos_window_utils (1.0.0): + - FlutterMacOS + - path_provider_foundation (0.0.1): + - Flutter - FlutterMacOS - veilid (0.0.1): - FlutterMacOS DEPENDENCIES: - - flutter_acrylic (from `Flutter/ephemeral/.symlinks/plugins/flutter_acrylic/macos`) - FlutterMacOS (from `Flutter/ephemeral`) - - path_provider_macos (from `Flutter/ephemeral/.symlinks/plugins/path_provider_macos/macos`) + - macos_window_utils (from `Flutter/ephemeral/.symlinks/plugins/macos_window_utils/macos`) + - path_provider_foundation (from `Flutter/ephemeral/.symlinks/plugins/path_provider_foundation/darwin`) - veilid (from `Flutter/ephemeral/.symlinks/plugins/veilid/macos`) EXTERNAL SOURCES: - flutter_acrylic: - :path: Flutter/ephemeral/.symlinks/plugins/flutter_acrylic/macos FlutterMacOS: :path: Flutter/ephemeral - path_provider_macos: - :path: Flutter/ephemeral/.symlinks/plugins/path_provider_macos/macos + macos_window_utils: + :path: Flutter/ephemeral/.symlinks/plugins/macos_window_utils/macos + path_provider_foundation: + :path: Flutter/ephemeral/.symlinks/plugins/path_provider_foundation/darwin veilid: :path: Flutter/ephemeral/.symlinks/plugins/veilid/macos SPEC CHECKSUMS: - flutter_acrylic: c3df24ae52ab6597197837ce59ef2a8542640c17 - FlutterMacOS: ae6af50a8ea7d6103d888583d46bd8328a7e9811 - path_provider_macos: 3c0c3b4b0d4a76d2bf989a913c2de869c5641a19 - veilid: ef97d3a2d5fda3b25a4017eae65c37afa8035203 + FlutterMacOS: 8f6f14fa908a6fb3fba0cd85dbd81ec4b251fb24 + macos_window_utils: 933f91f64805e2eb91a5bd057cf97cd097276663 + path_provider_foundation: eaf5b3e458fc0e5fbb9940fb09980e853fe058b8 + veilid: a54f57b7bcf0e4e072fe99272d76ca126b2026d0 -PODFILE CHECKSUM: baac1aaddb7c3a00c396723592a2ffb396a7fed7 +PODFILE CHECKSUM: 73d2f470b1d889e27fcfda1d6e6efec66f98af3f -COCOAPODS: 1.11.3 +COCOAPODS: 1.12.1 diff --git a/veilid-flutter/example/macos/Runner.xcodeproj/project.pbxproj b/veilid-flutter/example/macos/Runner.xcodeproj/project.pbxproj index 2211295b..d8402d5b 100644 --- a/veilid-flutter/example/macos/Runner.xcodeproj/project.pbxproj +++ b/veilid-flutter/example/macos/Runner.xcodeproj/project.pbxproj @@ -3,7 +3,7 @@ archiveVersion = 1; classes = { }; - objectVersion = 51; + objectVersion = 54; objects = { /* Begin PBXAggregateTarget section */ @@ -261,6 +261,7 @@ /* Begin PBXShellScriptBuildPhase section */ 3399D490228B24CF009A79C7 /* ShellScript */ = { isa = PBXShellScriptBuildPhase; + alwaysOutOfDate = 1; buildActionMask = 2147483647; files = ( ); @@ -409,7 +410,7 @@ GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; GCC_WARN_UNUSED_FUNCTION = YES; GCC_WARN_UNUSED_VARIABLE = YES; - MACOSX_DEPLOYMENT_TARGET = 10.12.2; + MACOSX_DEPLOYMENT_TARGET = 10.14.6; MTL_ENABLE_DEBUG_INFO = NO; SDKROOT = macosx; SWIFT_COMPILATION_MODE = wholemodule; @@ -496,7 +497,7 @@ GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; GCC_WARN_UNUSED_FUNCTION = YES; GCC_WARN_UNUSED_VARIABLE = YES; - MACOSX_DEPLOYMENT_TARGET = 10.12.2; + MACOSX_DEPLOYMENT_TARGET = 10.14.6; MTL_ENABLE_DEBUG_INFO = YES; ONLY_ACTIVE_ARCH = YES; SDKROOT = macosx; @@ -543,7 +544,7 @@ GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; GCC_WARN_UNUSED_FUNCTION = YES; GCC_WARN_UNUSED_VARIABLE = YES; - MACOSX_DEPLOYMENT_TARGET = 10.12.2; + MACOSX_DEPLOYMENT_TARGET = 10.14.6; MTL_ENABLE_DEBUG_INFO = NO; SDKROOT = macosx; SWIFT_COMPILATION_MODE = wholemodule; diff --git a/veilid-flutter/example/pubspec.lock b/veilid-flutter/example/pubspec.lock index 74aca102..1daa306a 100644 --- a/veilid-flutter/example/pubspec.lock +++ b/veilid-flutter/example/pubspec.lock @@ -5,93 +5,122 @@ packages: dependency: "direct main" description: name: ansicolor - url: "https://pub.dartlang.org" + sha256: "607f8fa9786f392043f169898923e6c59b4518242b68b8862eb8a8b7d9c30b4a" + url: "https://pub.dev" source: hosted version: "2.0.1" async: dependency: transitive description: name: async - url: "https://pub.dartlang.org" + sha256: "947bfcf187f74dbc5e146c9eb9c0f10c9f8b30743e341481c1e2ed3ecc18c20c" + url: "https://pub.dev" source: hosted - version: "2.9.0" + version: "2.11.0" boolean_selector: dependency: transitive description: name: boolean_selector - url: "https://pub.dartlang.org" + sha256: "6cfb5af12253eaf2b368f07bacc5a80d1301a071c73360d746b7f2e32d762c66" + url: "https://pub.dev" source: hosted - version: "2.1.0" + version: "2.1.1" change_case: dependency: transitive description: name: change_case - url: "https://pub.dartlang.org" + sha256: f4e08feaa845e75e4f5ad2b0e15f24813d7ea6c27e7b78252f0c17f752cf1157 + url: "https://pub.dev" source: hosted - version: "1.0.2" + version: "1.1.0" characters: dependency: transitive description: name: characters - url: "https://pub.dartlang.org" + sha256: "04a925763edad70e8443c99234dc3328f442e811f1d8fd1a72f1c8ad0f69a605" + url: "https://pub.dev" source: hosted - version: "1.2.1" + version: "1.3.0" + charcode: + dependency: transitive + description: + name: charcode + sha256: fb98c0f6d12c920a02ee2d998da788bca066ca5f148492b7085ee23372b12306 + url: "https://pub.dev" + source: hosted + version: "1.3.1" clock: dependency: transitive description: name: clock - url: "https://pub.dartlang.org" + sha256: cb6d7f03e1de671e34607e909a7213e31d7752be4fb66a86d29fe1eb14bfb5cf + url: "https://pub.dev" source: hosted version: "1.1.1" collection: dependency: transitive description: name: collection - url: "https://pub.dartlang.org" + sha256: "4a07be6cb69c84d677a6c3096fcf960cc3285a8330b4603e0d463d15d9bd934c" + url: "https://pub.dev" source: hosted - version: "1.16.0" + version: "1.17.1" convert: dependency: transitive description: name: convert - url: "https://pub.dartlang.org" + sha256: "0f08b14755d163f6e2134cb58222dd25ea2a2ee8a195e53983d57c075324d592" + url: "https://pub.dev" source: hosted version: "3.1.1" cupertino_icons: dependency: "direct main" description: name: cupertino_icons - url: "https://pub.dartlang.org" + sha256: e35129dc44c9118cee2a5603506d823bab99c68393879edb440e0090d07586be + url: "https://pub.dev" source: hosted version: "1.0.5" equatable: dependency: transitive description: name: equatable - url: "https://pub.dartlang.org" + sha256: c2b87cb7756efdf69892005af546c56c0b5037f54d2a88269b4f347a505e3ca2 + url: "https://pub.dev" source: hosted version: "2.0.5" fake_async: dependency: transitive description: name: fake_async - url: "https://pub.dartlang.org" + sha256: "511392330127add0b769b75a987850d136345d9227c6b94c96a04cf4a391bf78" + url: "https://pub.dev" source: hosted version: "1.3.1" ffi: dependency: transitive description: name: ffi - url: "https://pub.dartlang.org" + sha256: ed5337a5660c506388a9f012be0288fb38b49020ce2b45fe1f8b8323fe429f99 + url: "https://pub.dev" source: hosted - version: "2.0.1" + version: "2.0.2" file: dependency: transitive description: name: file - url: "https://pub.dartlang.org" + sha256: "1b92bec4fc2a72f59a8e15af5f52cd441e4a7860b49499d69dfa817af20e925d" + url: "https://pub.dev" source: hosted version: "6.1.4" + file_utils: + dependency: transitive + description: + name: file_utils + sha256: d1e64389a22649095c8405c9e177272caf05139255931c9ff30d53b5c9bcaa34 + url: "https://pub.dev" + source: hosted + version: "1.0.1" flutter: dependency: "direct main" description: flutter @@ -101,14 +130,16 @@ packages: dependency: "direct main" description: name: flutter_acrylic - url: "https://pub.dartlang.org" + sha256: "5aea2c850c560c07717a62434ea9cb1565c2282dc78dd2e60f98a78c05f13d7b" + url: "https://pub.dev" source: hosted - version: "1.0.0+2" + version: "1.1.2" flutter_lints: dependency: "direct dev" description: name: flutter_lints - url: "https://pub.dartlang.org" + sha256: aeb0b80a8b3709709c9cc496cdc027c5b3216796bc0af0ce1007eaf24464fd4c + url: "https://pub.dev" source: hosted version: "2.0.1" flutter_test: @@ -121,139 +152,166 @@ packages: description: flutter source: sdk version: "0.0.0" + globbing: + dependency: transitive + description: + name: globbing + sha256: "4f89cfaf6fa74c9c1740a96259da06bd45411ede56744e28017cc534a12b6e2d" + url: "https://pub.dev" + source: hosted + version: "1.0.0" js: dependency: transitive description: name: js - url: "https://pub.dartlang.org" + sha256: f2c445dce49627136094980615a031419f7f3eb393237e4ecd97ac15dea343f3 + url: "https://pub.dev" source: hosted - version: "0.6.4" + version: "0.6.7" lints: dependency: transitive description: name: lints - url: "https://pub.dartlang.org" + sha256: "6b0206b0bf4f04961fc5438198ccb3a885685cd67d4d4a32cc20ad7f8adbe015" + url: "https://pub.dev" source: hosted - version: "2.0.1" + version: "2.1.0" loggy: dependency: "direct main" description: name: loggy - url: "https://pub.dartlang.org" + sha256: "981e03162bbd3a5a843026f75f73d26e4a0d8aa035ae060456ca7b30dfd1e339" + url: "https://pub.dev" source: hosted version: "2.0.3" + macos_window_utils: + dependency: transitive + description: + name: macos_window_utils + sha256: "510de576b5432dd9ef9e4c258abcc021c6dfbb17a78a344688848a6784b352b8" + url: "https://pub.dev" + source: hosted + version: "1.1.2" matcher: dependency: transitive description: name: matcher - url: "https://pub.dartlang.org" + sha256: "6501fbd55da300384b768785b83e5ce66991266cec21af89ab9ae7f5ce1c4cbb" + url: "https://pub.dev" source: hosted - version: "0.12.12" + version: "0.12.15" material_color_utilities: dependency: transitive description: name: material_color_utilities - url: "https://pub.dartlang.org" + sha256: d92141dc6fe1dad30722f9aa826c7fbc896d021d792f80678280601aff8cf724 + url: "https://pub.dev" source: hosted - version: "0.1.5" + version: "0.2.0" meta: dependency: transitive description: name: meta - url: "https://pub.dartlang.org" + sha256: "3c74dbf8763d36539f114c799d8a2d87343b5067e9d796ca22b5eb8437090ee3" + url: "https://pub.dev" source: hosted - version: "1.8.0" + version: "1.9.1" path: dependency: "direct main" description: name: path - url: "https://pub.dartlang.org" + sha256: "8829d8a55c13fc0e37127c29fedf290c102f4e40ae94ada574091fe0ff96c917" + url: "https://pub.dev" source: hosted - version: "1.8.2" + version: "1.8.3" path_provider: dependency: "direct main" description: name: path_provider - url: "https://pub.dartlang.org" + sha256: "3087813781ab814e4157b172f1a11c46be20179fcc9bea043e0fba36bc0acaa2" + url: "https://pub.dev" source: hosted - version: "2.0.11" + version: "2.0.15" path_provider_android: dependency: transitive description: name: path_provider_android - url: "https://pub.dartlang.org" + sha256: "2cec049d282c7f13c594b4a73976b0b4f2d7a1838a6dd5aaf7bd9719196bee86" + url: "https://pub.dev" source: hosted - version: "2.0.22" - path_provider_ios: + version: "2.0.27" + path_provider_foundation: dependency: transitive description: - name: path_provider_ios - url: "https://pub.dartlang.org" + name: path_provider_foundation + sha256: "1995d88ec2948dac43edf8fe58eb434d35d22a2940ecee1a9fefcd62beee6eb3" + url: "https://pub.dev" source: hosted - version: "2.0.11" + version: "2.2.3" path_provider_linux: dependency: transitive description: name: path_provider_linux - url: "https://pub.dartlang.org" + sha256: "2ae08f2216225427e64ad224a24354221c2c7907e448e6e0e8b57b1eb9f10ad1" + url: "https://pub.dev" source: hosted - version: "2.1.7" - path_provider_macos: - dependency: transitive - description: - name: path_provider_macos - url: "https://pub.dartlang.org" - source: hosted - version: "2.0.6" + version: "2.1.10" path_provider_platform_interface: dependency: transitive description: name: path_provider_platform_interface - url: "https://pub.dartlang.org" + sha256: "57585299a729335f1298b43245842678cb9f43a6310351b18fb577d6e33165ec" + url: "https://pub.dev" source: hosted - version: "2.0.5" + version: "2.0.6" path_provider_windows: dependency: transitive description: name: path_provider_windows - url: "https://pub.dartlang.org" + sha256: d3f80b32e83ec208ac95253e0cd4d298e104fbc63cb29c5c69edaed43b0c69d6 + url: "https://pub.dev" source: hosted - version: "2.1.3" + version: "2.1.6" platform: dependency: transitive description: name: platform - url: "https://pub.dartlang.org" + sha256: "4a451831508d7d6ca779f7ac6e212b4023dd5a7d08a27a63da33756410e32b76" + url: "https://pub.dev" source: hosted version: "3.1.0" platform_info: dependency: transitive description: name: platform_info - url: "https://pub.dartlang.org" + sha256: "012e73712166cf0b56d3eb95c0d33491f56b428c169eca385f036448474147e4" + url: "https://pub.dev" source: hosted version: "3.2.0" plugin_platform_interface: dependency: transitive description: name: plugin_platform_interface - url: "https://pub.dartlang.org" + sha256: "6a2128648c854906c53fa8e33986fc0247a1116122f9534dd20e3ab9e16a32bc" + url: "https://pub.dev" source: hosted - version: "2.1.3" + version: "2.1.4" process: dependency: transitive description: name: process - url: "https://pub.dartlang.org" + sha256: "53fd8db9cec1d37b0574e12f07520d582019cb6c44abf5479a01505099a34a09" + url: "https://pub.dev" source: hosted version: "4.2.4" quiver: dependency: transitive description: name: quiver - url: "https://pub.dartlang.org" + sha256: b1c1ac5ce6688d77f65f3375a9abb9319b3cb32486bdc7a1e0fdf004d7ba4e47 + url: "https://pub.dev" source: hosted - version: "3.1.0" + version: "3.2.1" sky_engine: dependency: transitive description: flutter @@ -263,58 +321,74 @@ packages: dependency: transitive description: name: source_span - url: "https://pub.dartlang.org" + sha256: dd904f795d4b4f3b870833847c461801f6750a9fa8e61ea5ac53f9422b31f250 + url: "https://pub.dev" source: hosted - version: "1.9.0" + version: "1.9.1" stack_trace: dependency: transitive description: name: stack_trace - url: "https://pub.dartlang.org" + sha256: c3c7d8edb15bee7f0f74debd4b9c5f3c2ea86766fe4178eb2a18eb30a0bdaed5 + url: "https://pub.dev" source: hosted - version: "1.10.0" + version: "1.11.0" stream_channel: dependency: transitive description: name: stream_channel - url: "https://pub.dartlang.org" + sha256: "83615bee9045c1d322bbbd1ba209b7a749c2cbcdcb3fdd1df8eb488b3279c1c8" + url: "https://pub.dev" source: hosted - version: "2.1.0" + version: "2.1.1" string_scanner: dependency: transitive description: name: string_scanner - url: "https://pub.dartlang.org" + sha256: "556692adab6cfa87322a115640c11f13cb77b3f076ddcc5d6ae3c20242bedcde" + url: "https://pub.dev" source: hosted - version: "1.1.1" + version: "1.2.0" + system_info2: + dependency: transitive + description: + name: system_info2 + sha256: af2f948e3f31a3367a049932a8ad59faf0063ecf836a020d975b9f41566d8bc9 + url: "https://pub.dev" + source: hosted + version: "3.0.2" term_glyph: dependency: transitive description: name: term_glyph - url: "https://pub.dartlang.org" + sha256: a29248a84fbb7c79282b40b8c72a1209db169a2e0542bce341da992fe1bc7e84 + url: "https://pub.dev" source: hosted version: "1.2.1" test_api: dependency: transitive description: name: test_api - url: "https://pub.dartlang.org" + sha256: eb6ac1540b26de412b3403a163d919ba86f6a973fe6cc50ae3541b80092fdcfb + url: "https://pub.dev" source: hosted - version: "0.4.12" + version: "0.5.1" typed_data: dependency: transitive description: name: typed_data - url: "https://pub.dartlang.org" + sha256: facc8d6582f16042dd49f2463ff1bd6e2c9ef9f3d5da3d9b087e244a7b564b3c + url: "https://pub.dev" source: hosted - version: "1.3.1" + version: "1.3.2" vector_math: dependency: transitive description: name: vector_math - url: "https://pub.dartlang.org" + sha256: "80b3257d1492ce4d091729e3a67a60407d227c27241d6927be0130c98e741803" + url: "https://pub.dev" source: hosted - version: "2.1.2" + version: "2.1.4" veilid: dependency: "direct main" description: @@ -326,23 +400,26 @@ packages: dependency: transitive description: name: win32 - url: "https://pub.dartlang.org" + sha256: "5a751eddf9db89b3e5f9d50c20ab8612296e4e8db69009788d6c8b060a84191c" + url: "https://pub.dev" source: hosted - version: "3.1.2" + version: "4.1.4" xdg_directories: dependency: transitive description: name: xdg_directories - url: "https://pub.dartlang.org" + sha256: ee1505df1426458f7f60aac270645098d318a8b4766d85fde75f76f2e21807d1 + url: "https://pub.dev" source: hosted - version: "0.2.0+2" + version: "1.0.0" xterm: dependency: "direct main" description: name: xterm - url: "https://pub.dartlang.org" + sha256: "6a02b15d03152b8186e12790902ff28c8a932fc441e89fa7255a7491661a8e69" + url: "https://pub.dev" source: hosted - version: "3.4.0" + version: "3.5.0" sdks: - dart: ">=2.18.0 <3.0.0" - flutter: ">=3.0.0" + dart: ">=3.0.0 <4.0.0" + flutter: ">=3.7.0" diff --git a/veilid-flutter/example/pubspec.yaml b/veilid-flutter/example/pubspec.yaml index a781e59b..a3ac6915 100644 --- a/veilid-flutter/example/pubspec.yaml +++ b/veilid-flutter/example/pubspec.yaml @@ -7,7 +7,7 @@ version: 1.0.0+1 publish_to: "none" # Remove this line if you wish to publish to pub.dev environment: - sdk: ">=2.17.0 <3.0.0" + sdk: '>=3.0.0 <4.0.0' # Dependencies specify other packages that your package needs in order to work. # To automatically upgrade your package dependencies to the latest versions diff --git a/veilid-flutter/lib/base64url_no_pad.dart b/veilid-flutter/lib/base64url_no_pad.dart deleted file mode 100644 index 81f97a2d..00000000 --- a/veilid-flutter/lib/base64url_no_pad.dart +++ /dev/null @@ -1,15 +0,0 @@ -import 'dart:convert'; -import 'dart:typed_data'; - -String base64UrlNoPadEncode(List bytes) { - var x = base64Url.encode(bytes); - while (x.endsWith('=')) { - x = x.substring(0, x.length - 1); - } - return x; -} - -Uint8List base64UrlNoPadDecode(String source) { - source = base64.normalize(source); - return base64.decode(source); -} diff --git a/veilid-flutter/lib/default_config.dart b/veilid-flutter/lib/default_config.dart index 90a5f245..30f08c68 100644 --- a/veilid-flutter/lib/default_config.dart +++ b/veilid-flutter/lib/default_config.dart @@ -1,8 +1,53 @@ import 'package:flutter/foundation.dart' show kIsWeb; import 'package:path_provider/path_provider.dart'; import 'package:path/path.dart' as p; +import 'package:system_info2/system_info2.dart' as sysinfo; import 'veilid.dart'; +const int megaByte = 1024 * 1024; + +int getLocalSubkeyCacheSize() { + if (kIsWeb) { + return 128; + } + return 1024; +} + +int getLocalMaxSubkeyCacheMemoryMb() { + if (kIsWeb) { + return 256; + } + return sysinfo.SysInfo.getTotalPhysicalMemory() ~/ 32 ~/ megaByte; +} + +int getRemoteSubkeyCacheSize() { + if (kIsWeb) { + return 64; + } + return 128; +} + +int getRemoteMaxRecords() { + if (kIsWeb) { + return 64; + } + return 128; +} + +int getRemoteMaxSubkeyCacheMemoryMb() { + if (kIsWeb) { + return 256; + } + return sysinfo.SysInfo.getTotalPhysicalMemory() ~/ 32 ~/ megaByte; +} + +int getRemoteMaxStorageSpaceMb() { + if (kIsWeb) { + return 128; + } + return 256; +} + Future getDefaultVeilidConfig(String programName) async { return VeilidConfig( programName: programName, @@ -19,8 +64,10 @@ Future getDefaultVeilidConfig(String programName) async { protectedStore: VeilidConfigProtectedStore( allowInsecureFallback: false, alwaysUseInsecureStorage: false, - insecureFallbackDirectory: "", + directory: "", delete: false, + deviceEncryptionKey: "", + newDeviceEncryptionKey: null, ), tableStore: VeilidConfigTableStore( directory: kIsWeb @@ -63,25 +110,30 @@ Future getDefaultVeilidConfig(String programName) async { queueSize: 1024, maxTimestampBehindMs: 10000, maxTimestampAheadMs: 10000, - timeoutMs: 10000, + timeoutMs: 5000, maxRouteHopCount: 4, defaultRouteHopCount: 1, ), dht: VeilidConfigDHT( - resolveNodeTimeoutMs: null, - resolveNodeCount: 20, - resolveNodeFanout: 3, - maxFindNodeCount: 20, - getValueTimeoutMs: null, - getValueCount: 20, - getValueFanout: 3, - setValueTimeoutMs: null, - setValueCount: 20, - setValueFanout: 5, - minPeerCount: 20, - minPeerRefreshTimeMs: 2000, - validateDialInfoReceiptTimeMs: 2000, - ), + resolveNodeTimeoutMs: 10000, + resolveNodeCount: 20, + resolveNodeFanout: 3, + maxFindNodeCount: 20, + getValueTimeoutMs: 10000, + getValueCount: 20, + getValueFanout: 3, + setValueTimeoutMs: 10000, + setValueCount: 20, + setValueFanout: 5, + minPeerCount: 20, + minPeerRefreshTimeMs: 2000, + validateDialInfoReceiptTimeMs: 2000, + localSubkeyCacheSize: getLocalSubkeyCacheSize(), + localMaxSubkeyCacheMemoryMb: getLocalMaxSubkeyCacheMemoryMb(), + remoteSubkeyCacheSize: getRemoteSubkeyCacheSize(), + remoteMaxRecords: getRemoteMaxRecords(), + remoteMaxSubkeyCacheMemoryMb: getRemoteMaxSubkeyCacheMemoryMb(), + remoteMaxStorageSpaceMb: getRemoteMaxStorageSpaceMb()), upnp: true, detectAddressChanges: true, restrictedNatRetries: 0, diff --git a/veilid-flutter/lib/routing_context.dart b/veilid-flutter/lib/routing_context.dart new file mode 100644 index 00000000..39956503 --- /dev/null +++ b/veilid-flutter/lib/routing_context.dart @@ -0,0 +1,272 @@ +import 'dart:async'; +import 'dart:typed_data'; + +import 'package:change_case/change_case.dart'; + +import 'veilid_encoding.dart'; +import 'veilid.dart'; + +////////////////////////////////////// + +////////////////////////////////////// +/// DHT Schema + +abstract class DHTSchema { + factory DHTSchema.fromJson(dynamic json) { + switch (json["kind"]) { + case "DFLT": + { + return DHTSchemaDFLT(oCnt: json["o_cnt"]); + } + case "SMPL": + { + return DHTSchemaSMPL( + oCnt: json["o_cnt"], + members: List.from( + json['members'].map((j) => DHTSchemaMember.fromJson(j)))); + } + default: + { + throw VeilidAPIExceptionInternal( + "Invalid VeilidAPIException type: ${json['kind']}"); + } + } + } + Map toJson(); +} + +class DHTSchemaDFLT implements DHTSchema { + final int oCnt; + // + DHTSchemaDFLT({ + required this.oCnt, + }) { + if (oCnt < 0 || oCnt > 65535) { + throw VeilidAPIExceptionInvalidArgument( + "value out of range", "oCnt", oCnt.toString()); + } + } + + @override + Map toJson() { + return { + 'kind': "DFLT", + 'o_cnt': oCnt, + }; + } +} + +class DHTSchemaMember { + PublicKey mKey; + int mCnt; + + DHTSchemaMember({ + required this.mKey, + required this.mCnt, + }) { + if (mCnt < 0 || mCnt > 65535) { + throw VeilidAPIExceptionInvalidArgument( + "value out of range", "mCnt", mCnt.toString()); + } + } + + Map toJson() { + return { + 'm_key': mKey, + 'm_cnt': mCnt, + }; + } + + DHTSchemaMember.fromJson(dynamic json) + : mKey = json['m_key'], + mCnt = json['m_cnt']; +} + +class DHTSchemaSMPL implements DHTSchema { + final int oCnt; + final List members; + // + DHTSchemaSMPL({ + required this.oCnt, + required this.members, + }) { + if (oCnt < 0 || oCnt > 65535) { + throw VeilidAPIExceptionInvalidArgument( + "value out of range", "oCnt", oCnt.toString()); + } + } + @override + Map toJson() { + return { + 'kind': "SMPL", + 'o_cnt': oCnt, + 'members': members.map((p) => p.toJson()).toList(), + }; + } +} + +////////////////////////////////////// +/// DHTRecordDescriptor + +class DHTRecordDescriptor { + TypedKey key; + PublicKey owner; + PublicKey? ownerSecret; + DHTSchema schema; + + DHTRecordDescriptor({ + required this.key, + required this.owner, + this.ownerSecret, + required this.schema, + }); + + Map toJson() { + return { + 'key': key.toString(), + 'owner': owner, + 'owner_secret': ownerSecret, + 'schema': schema.toJson(), + }; + } + + DHTRecordDescriptor.fromJson(dynamic json) + : key = TypedKey.fromString(json['key']), + owner = json['owner'], + ownerSecret = json['owner_secret'], + schema = DHTSchema.fromJson(json['schema']); +} + +////////////////////////////////////// +/// ValueSubkeyRange + +class ValueSubkeyRange { + final int low; + final int high; + + ValueSubkeyRange({ + required this.low, + required this.high, + }) { + if (low < 0 || low > high) { + throw VeilidAPIExceptionInvalidArgument( + "invalid range", "low", low.toString()); + } + if (high < 0) { + throw VeilidAPIExceptionInvalidArgument( + "invalid range", "high", high.toString()); + } + } + + ValueSubkeyRange.fromJson(dynamic json) + : low = json[0], + high = json[1] { + if ((json as List).length != 2) { + throw VeilidAPIExceptionInvalidArgument( + "not a pair of integers", "json", json.toString()); + } + } + + List toJson() { + return [low, high]; + } +} + +////////////////////////////////////// +/// ValueData + +class ValueData { + final int seq; + final Uint8List data; + final PublicKey writer; + + ValueData({ + required this.seq, + required this.data, + required this.writer, + }); + + ValueData.fromJson(dynamic json) + : seq = json['seq'], + data = base64UrlNoPadDecode(json['data']), + writer = json['writer']; + + Map toJson() { + return {'seq': seq, 'data': base64UrlNoPadEncode(data), 'writer': writer}; + } +} + +/// Stability + +enum Stability { + lowLatency, + reliable; + + String toJson() { + return name.toPascalCase(); + } + + factory Stability.fromJson(String j) { + return Stability.values.byName(j.toCamelCase()); + } +} + +////////////////////////////////////// +/// Sequencing + +enum Sequencing { + noPreference, + preferOrdered, + ensureOrdered; + + String toJson() { + return name.toPascalCase(); + } + + factory Sequencing.fromJson(String j) { + return Sequencing.values.byName(j.toCamelCase()); + } +} + +////////////////////////////////////// +/// RouteBlob +class RouteBlob { + final String routeId; + final Uint8List blob; + + RouteBlob(this.routeId, this.blob); + + RouteBlob.fromJson(dynamic json) + : routeId = json['route_id'], + blob = base64UrlNoPadDecode(json['blob']); + + Map toJson() { + return {'route_id': routeId, 'blob': base64UrlNoPadEncode(blob)}; + } +} + +////////////////////////////////////// +/// VeilidRoutingContext + +abstract class VeilidRoutingContext { + // Modifiers + VeilidRoutingContext withPrivacy(); + VeilidRoutingContext withCustomPrivacy(Stability stability); + VeilidRoutingContext withSequencing(Sequencing sequencing); + + // App call/message + Future appCall(String target, Uint8List request); + Future appMessage(String target, Uint8List message); + + // DHT Operations + Future createDHTRecord( + CryptoKind kind, DHTSchema schema); + Future openDHTRecord(TypedKey key, KeyPair? writer); + Future closeDHTRecord(TypedKey key); + Future deleteDHTRecord(TypedKey key); + Future getDHTValue(TypedKey key, int subkey, bool forceRefresh); + Future setDHTValue(TypedKey key, int subkey, Uint8List data); + Future watchDHTValues( + TypedKey key, ValueSubkeyRange subkeys, Timestamp expiration, int count); + Future cancelDHTWatch(TypedKey key, ValueSubkeyRange subkeys); +} diff --git a/veilid-flutter/lib/veilid.dart b/veilid-flutter/lib/veilid.dart index 8d9a8c90..650513a7 100644 --- a/veilid-flutter/lib/veilid.dart +++ b/veilid-flutter/lib/veilid.dart @@ -1,220 +1,26 @@ import 'dart:async'; import 'dart:typed_data'; -import 'dart:convert'; - -import 'package:change_case/change_case.dart'; import 'veilid_stub.dart' if (dart.library.io) 'veilid_ffi.dart' if (dart.library.js) 'veilid_js.dart'; -import 'base64url_no_pad.dart'; - ////////////////////////////////////////////////////////// +import 'routing_context.dart'; +import 'veilid_config.dart'; +import 'veilid_crypto.dart'; +import 'veilid_table_db.dart'; +import 'veilid_state.dart'; + export 'default_config.dart'; - -////////////////////////////////////////////////////////// -// FFI Platform-specific config - -class VeilidFFIConfigLoggingTerminal { - bool enabled; - VeilidConfigLogLevel level; - - VeilidFFIConfigLoggingTerminal({ - required this.enabled, - required this.level, - }); - - Map get json { - return { - 'enabled': enabled, - 'level': level.json, - }; - } - - VeilidFFIConfigLoggingTerminal.fromJson(dynamic json) - : enabled = json['enabled'], - level = veilidConfigLogLevelFromJson(json['level']); -} - -class VeilidFFIConfigLoggingOtlp { - bool enabled; - VeilidConfigLogLevel level; - String grpcEndpoint; - String serviceName; - - VeilidFFIConfigLoggingOtlp({ - required this.enabled, - required this.level, - required this.grpcEndpoint, - required this.serviceName, - }); - - Map get json { - return { - 'enabled': enabled, - 'level': level.json, - 'grpc_endpoint': grpcEndpoint, - 'service_name': serviceName, - }; - } - - VeilidFFIConfigLoggingOtlp.fromJson(dynamic json) - : enabled = json['enabled'], - level = veilidConfigLogLevelFromJson(json['level']), - grpcEndpoint = json['grpc_endpoint'], - serviceName = json['service_name']; -} - -class VeilidFFIConfigLoggingApi { - bool enabled; - VeilidConfigLogLevel level; - - VeilidFFIConfigLoggingApi({ - required this.enabled, - required this.level, - }); - - Map get json { - return { - 'enabled': enabled, - 'level': level.json, - }; - } - - VeilidFFIConfigLoggingApi.fromJson(dynamic json) - : enabled = json['enabled'], - level = veilidConfigLogLevelFromJson(json['level']); -} - -class VeilidFFIConfigLogging { - VeilidFFIConfigLoggingTerminal terminal; - VeilidFFIConfigLoggingOtlp otlp; - VeilidFFIConfigLoggingApi api; - - VeilidFFIConfigLogging( - {required this.terminal, required this.otlp, required this.api}); - - Map get json { - return { - 'terminal': terminal.json, - 'otlp': otlp.json, - 'api': api.json, - }; - } - - VeilidFFIConfigLogging.fromJson(dynamic json) - : terminal = VeilidFFIConfigLoggingTerminal.fromJson(json['terminal']), - otlp = VeilidFFIConfigLoggingOtlp.fromJson(json['otlp']), - api = VeilidFFIConfigLoggingApi.fromJson(json['api']); -} - -class VeilidFFIConfig { - VeilidFFIConfigLogging logging; - - VeilidFFIConfig({ - required this.logging, - }); - - Map get json { - return { - 'logging': logging.json, - }; - } - - VeilidFFIConfig.fromJson(Map json) - : logging = VeilidFFIConfigLogging.fromJson(json['logging']); -} - -////////////////////////////////////////////////////////// -// WASM Platform-specific config - -class VeilidWASMConfigLoggingPerformance { - bool enabled; - VeilidConfigLogLevel level; - bool logsInTimings; - bool logsInConsole; - - VeilidWASMConfigLoggingPerformance({ - required this.enabled, - required this.level, - required this.logsInTimings, - required this.logsInConsole, - }); - - Map get json { - return { - 'enabled': enabled, - 'level': level.json, - 'logs_in_timings': logsInTimings, - 'logs_in_console': logsInConsole, - }; - } - - VeilidWASMConfigLoggingPerformance.fromJson(dynamic json) - : enabled = json['enabled'], - level = veilidConfigLogLevelFromJson(json['level']), - logsInTimings = json['logs_in_timings'], - logsInConsole = json['logs_in_console']; -} - -class VeilidWASMConfigLoggingApi { - bool enabled; - VeilidConfigLogLevel level; - - VeilidWASMConfigLoggingApi({ - required this.enabled, - required this.level, - }); - - Map get json { - return { - 'enabled': enabled, - 'level': level.json, - }; - } - - VeilidWASMConfigLoggingApi.fromJson(dynamic json) - : enabled = json['enabled'], - level = veilidConfigLogLevelFromJson(json['level']); -} - -class VeilidWASMConfigLogging { - VeilidWASMConfigLoggingPerformance performance; - VeilidWASMConfigLoggingApi api; - - VeilidWASMConfigLogging({required this.performance, required this.api}); - - Map get json { - return { - 'performance': performance.json, - 'api': api.json, - }; - } - - VeilidWASMConfigLogging.fromJson(dynamic json) - : performance = - VeilidWASMConfigLoggingPerformance.fromJson(json['performance']), - api = VeilidWASMConfigLoggingApi.fromJson(json['api']); -} - -class VeilidWASMConfig { - VeilidWASMConfigLogging logging; - - VeilidWASMConfig({ - required this.logging, - }); - - Map get json { - return { - 'logging': logging.json, - }; - } - - VeilidWASMConfig.fromJson(dynamic json) - : logging = VeilidWASMConfigLogging.fromJson(json['logging']); -} +export 'routing_context.dart'; +export 'veilid_config.dart'; +export 'veilid_crypto.dart'; +export 'veilid_table_db.dart'; +export 'veilid_api_exception.dart'; +export 'veilid_state.dart'; +export 'veilid.dart'; ////////////////////////////////////// /// JSON Encode Helper @@ -224,1549 +30,27 @@ Object? veilidApiToEncodable(Object? value) { return value; } switch (value.runtimeType) { - case AttachmentState: - return (value as AttachmentState).json; - case VeilidLogLevel: - return (value as VeilidLogLevel).json; - case VeilidConfigLogLevel: - return (value as VeilidConfigLogLevel).json; + // case KeyPair: + // return (value as KeyPair).json; } throw UnsupportedError('Cannot convert to JSON: $value'); } -////////////////////////////////////// -/// AttachmentState - -enum AttachmentState { - detached, - attaching, - attachedWeak, - attachedGood, - attachedStrong, - fullyAttached, - overAttached, - detaching, -} - -extension AttachmentStateExt on AttachmentState { - String get json { - return name.toPascalCase(); - } -} - -AttachmentState attachmentStateFromJson(String j) { - return AttachmentState.values.byName(j.toCamelCase()); -} - -////////////////////////////////////// -/// VeilidLogLevel - -enum VeilidLogLevel { - error, - warn, - info, - debug, - trace, -} - -extension VeilidLogLevelExt on VeilidLogLevel { - String get json { - return name.toPascalCase(); - } -} - -VeilidLogLevel veilidLogLevelFromJson(String j) { - return VeilidLogLevel.values.byName(j.toCamelCase()); -} - -////////////////////////////////////// -/// VeilidConfigLogLevel - -enum VeilidConfigLogLevel { - off, - error, - warn, - info, - debug, - trace, -} - -extension VeilidConfigLogLevelExt on VeilidConfigLogLevel { - String get json { - return name.toPascalCase(); - } -} - -VeilidConfigLogLevel veilidConfigLogLevelFromJson(String j) { - return VeilidConfigLogLevel.values.byName(j.toCamelCase()); -} - -////////////////////////////////////// -/// VeilidConfig - -class VeilidConfigHTTPS { - bool enabled; - String listenAddress; - String path; - String? url; - - VeilidConfigHTTPS({ - required this.enabled, - required this.listenAddress, - required this.path, - this.url, - }); - - Map get json { - return { - 'enabled': enabled, - 'listen_address': listenAddress, - 'path': path, - 'url': url - }; - } - - VeilidConfigHTTPS.fromJson(dynamic json) - : enabled = json['enabled'], - listenAddress = json['listen_address'], - path = json['path'], - url = json['url']; -} - -//////////// - -class VeilidConfigHTTP { - bool enabled; - String listenAddress; - String path; - String? url; - - VeilidConfigHTTP({ - required this.enabled, - required this.listenAddress, - required this.path, - this.url, - }); - - Map get json { - return { - 'enabled': enabled, - 'listen_address': listenAddress, - 'path': path, - 'url': url - }; - } - - VeilidConfigHTTP.fromJson(dynamic json) - : enabled = json['enabled'], - listenAddress = json['listen_address'], - path = json['path'], - url = json['url']; -} - -//////////// - -class VeilidConfigApplication { - VeilidConfigHTTPS https; - VeilidConfigHTTP http; - - VeilidConfigApplication({ - required this.https, - required this.http, - }); - - Map get json { - return { - 'https': https.json, - 'http': http.json, - }; - } - - VeilidConfigApplication.fromJson(dynamic json) - : https = VeilidConfigHTTPS.fromJson(json['https']), - http = VeilidConfigHTTP.fromJson(json['http']); -} - -//////////// - -class VeilidConfigUDP { - bool enabled; - int socketPoolSize; - String listenAddress; - String? publicAddress; - - VeilidConfigUDP( - {required this.enabled, - required this.socketPoolSize, - required this.listenAddress, - this.publicAddress}); - - Map get json { - return { - 'enabled': enabled, - 'socket_pool_size': socketPoolSize, - 'listen_address': listenAddress, - 'public_address': publicAddress, - }; - } - - VeilidConfigUDP.fromJson(dynamic json) - : enabled = json['enabled'], - socketPoolSize = json['socket_pool_size'], - listenAddress = json['listen_address'], - publicAddress = json['publicAddress']; -} - -//////////// - -class VeilidConfigTCP { - bool connect; - bool listen; - int maxConnections; - String listenAddress; - String? publicAddress; - - VeilidConfigTCP( - {required this.connect, - required this.listen, - required this.maxConnections, - required this.listenAddress, - this.publicAddress}); - - Map get json { - return { - 'connect': connect, - 'listen': listen, - 'max_connections': maxConnections, - 'listen_address': listenAddress, - 'public_address': publicAddress, - }; - } - - VeilidConfigTCP.fromJson(dynamic json) - : connect = json['connect'], - listen = json['listen'], - maxConnections = json['max_connections'], - listenAddress = json['listen_address'], - publicAddress = json['publicAddress']; -} - -//////////// - -class VeilidConfigWS { - bool connect; - bool listen; - int maxConnections; - String listenAddress; - String path; - String? url; - - VeilidConfigWS( - {required this.connect, - required this.listen, - required this.maxConnections, - required this.listenAddress, - required this.path, - this.url}); - - Map get json { - return { - 'connect': connect, - 'listen': listen, - 'max_connections': maxConnections, - 'listen_address': listenAddress, - 'path': path, - 'url': url, - }; - } - - VeilidConfigWS.fromJson(dynamic json) - : connect = json['connect'], - listen = json['listen'], - maxConnections = json['max_connections'], - listenAddress = json['listen_address'], - path = json['path'], - url = json['url']; -} - -//////////// - -class VeilidConfigWSS { - bool connect; - bool listen; - int maxConnections; - String listenAddress; - String path; - String? url; - - VeilidConfigWSS( - {required this.connect, - required this.listen, - required this.maxConnections, - required this.listenAddress, - required this.path, - this.url}); - - Map get json { - return { - 'connect': connect, - 'listen': listen, - 'max_connections': maxConnections, - 'listen_address': listenAddress, - 'path': path, - 'url': url, - }; - } - - VeilidConfigWSS.fromJson(dynamic json) - : connect = json['connect'], - listen = json['listen'], - maxConnections = json['max_connections'], - listenAddress = json['listen_address'], - path = json['path'], - url = json['url']; -} - -//////////// - -class VeilidConfigProtocol { - VeilidConfigUDP udp; - VeilidConfigTCP tcp; - VeilidConfigWS ws; - VeilidConfigWSS wss; - - VeilidConfigProtocol({ - required this.udp, - required this.tcp, - required this.ws, - required this.wss, - }); - - Map get json { - return { - 'udp': udp.json, - 'tcp': tcp.json, - 'ws': ws.json, - 'wss': wss.json, - }; - } - - VeilidConfigProtocol.fromJson(dynamic json) - : udp = VeilidConfigUDP.fromJson(json['udp']), - tcp = VeilidConfigTCP.fromJson(json['tcp']), - ws = VeilidConfigWS.fromJson(json['ws']), - wss = VeilidConfigWSS.fromJson(json['wss']); -} - -//////////// - -class VeilidConfigTLS { - String certificatePath; - String privateKeyPath; - int connectionInitialTimeoutMs; - - VeilidConfigTLS({ - required this.certificatePath, - required this.privateKeyPath, - required this.connectionInitialTimeoutMs, - }); - - Map get json { - return { - 'certificate_path': certificatePath, - 'private_key_path': privateKeyPath, - 'connection_initial_timeout_ms': connectionInitialTimeoutMs, - }; - } - - VeilidConfigTLS.fromJson(dynamic json) - : certificatePath = json['certificate_path'], - privateKeyPath = json['private_key_path'], - connectionInitialTimeoutMs = json['connection_initial_timeout_ms']; -} - -//////////// - -class VeilidConfigDHT { - int? resolveNodeTimeoutMs; - int resolveNodeCount; - int resolveNodeFanout; - int maxFindNodeCount; - int? getValueTimeoutMs; - int getValueCount; - int getValueFanout; - int? setValueTimeoutMs; - int setValueCount; - int setValueFanout; - int minPeerCount; - int minPeerRefreshTimeMs; - int validateDialInfoReceiptTimeMs; - - VeilidConfigDHT( - {this.resolveNodeTimeoutMs, - required this.resolveNodeCount, - required this.resolveNodeFanout, - required this.maxFindNodeCount, - this.getValueTimeoutMs, - required this.getValueCount, - required this.getValueFanout, - this.setValueTimeoutMs, - required this.setValueCount, - required this.setValueFanout, - required this.minPeerCount, - required this.minPeerRefreshTimeMs, - required this.validateDialInfoReceiptTimeMs}); - - Map get json { - return { - 'resolve_node_timeout_ms': resolveNodeTimeoutMs, - 'resolve_node_count': resolveNodeCount, - 'resolve_node_fanout': resolveNodeFanout, - 'max_find_node_count': maxFindNodeCount, - 'get_value_timeout_ms': getValueTimeoutMs, - 'get_value_count': getValueCount, - 'get_value_fanout': getValueFanout, - 'set_value_timeout_ms': setValueTimeoutMs, - 'set_value_count': setValueCount, - 'set_value_fanout': setValueFanout, - 'min_peer_count': minPeerCount, - 'min_peer_refresh_time_ms': minPeerRefreshTimeMs, - 'validate_dial_info_receipt_time_ms': validateDialInfoReceiptTimeMs - }; - } - - VeilidConfigDHT.fromJson(dynamic json) - : resolveNodeTimeoutMs = json['resolve_node_timeout_ms'], - resolveNodeCount = json['resolve_node_count'], - resolveNodeFanout = json['resolve_node_fanout'], - maxFindNodeCount = json['max_find_node_count'], - getValueTimeoutMs = json['get_value_timeout_ms'], - getValueCount = json['get_value_count'], - getValueFanout = json['get_value_fanout'], - setValueTimeoutMs = json['set_value_timeout_ms'], - setValueCount = json['set_value_count'], - setValueFanout = json['set_value_fanout'], - minPeerCount = json['min_peer_count'], - minPeerRefreshTimeMs = json['min_peer_refresh_time_ms'], - validateDialInfoReceiptTimeMs = - json['validate_dial_info_receipt_time_ms']; -} - -//////////// - -class VeilidConfigRPC { - int concurrency; - int queueSize; - int? maxTimestampBehindMs; - int? maxTimestampAheadMs; - int timeoutMs; - int maxRouteHopCount; - int defaultRouteHopCount; - - VeilidConfigRPC( - {required this.concurrency, - required this.queueSize, - this.maxTimestampBehindMs, - this.maxTimestampAheadMs, - required this.timeoutMs, - required this.maxRouteHopCount, - required this.defaultRouteHopCount}); - - Map get json { - return { - 'concurrency': concurrency, - 'queue_size': queueSize, - 'max_timestamp_behind_ms': maxTimestampBehindMs, - 'max_timestamp_ahead_ms': maxTimestampAheadMs, - 'timeout_ms': timeoutMs, - 'max_route_hop_count': maxRouteHopCount, - 'default_route_hop_count': defaultRouteHopCount, - }; - } - - VeilidConfigRPC.fromJson(dynamic json) - : concurrency = json['concurrency'], - queueSize = json['queue_size'], - maxTimestampBehindMs = json['max_timestamp_behind_ms'], - maxTimestampAheadMs = json['max_timestamp_ahead_ms'], - timeoutMs = json['timeout_ms'], - maxRouteHopCount = json['max_route_hop_count'], - defaultRouteHopCount = json['default_route_hop_count']; -} - -//////////// - -class VeilidConfigRoutingTable { - List nodeId; - List nodeIdSecret; - List bootstrap; - int limitOverAttached; - int limitFullyAttached; - int limitAttachedStrong; - int limitAttachedGood; - int limitAttachedWeak; - - VeilidConfigRoutingTable({ - required this.nodeId, - required this.nodeIdSecret, - required this.bootstrap, - required this.limitOverAttached, - required this.limitFullyAttached, - required this.limitAttachedStrong, - required this.limitAttachedGood, - required this.limitAttachedWeak, - }); - - Map get json { - return { - 'node_id': nodeId.map((p) => p).toList(), - 'node_id_secret': nodeIdSecret.map((p) => p).toList(), - 'bootstrap': bootstrap.map((p) => p).toList(), - 'limit_over_attached': limitOverAttached, - 'limit_fully_attached': limitFullyAttached, - 'limit_attached_strong': limitAttachedStrong, - 'limit_attached_good': limitAttachedGood, - 'limit_attached_weak': limitAttachedWeak, - }; - } - - VeilidConfigRoutingTable.fromJson(dynamic json) - : nodeId = List.from(json['node_id'].map((j) => j)), - nodeIdSecret = List.from(json['node_id_secret'].map((j) => j)), - bootstrap = List.from(json['bootstrap'].map((j) => j)), - limitOverAttached = json['limit_over_attached'], - limitFullyAttached = json['limit_fully_attached'], - limitAttachedStrong = json['limit_attached_strong'], - limitAttachedGood = json['limit_attached_good'], - limitAttachedWeak = json['limit_attached_weak']; -} - -//////////// - -class VeilidConfigNetwork { - int connectionInitialTimeoutMs; - int connectionInactivityTimeoutMs; - int maxConnectionsPerIp4; - int maxConnectionsPerIp6Prefix; - int maxConnectionsPerIp6PrefixSize; - int maxConnectionFrequencyPerMin; - int clientWhitelistTimeoutMs; - int reverseConnectionReceiptTimeMs; - int holePunchReceiptTimeMs; - VeilidConfigRoutingTable routingTable; - VeilidConfigRPC rpc; - VeilidConfigDHT dht; - bool upnp; - bool detectAddressChanges; - int restrictedNatRetries; - VeilidConfigTLS tls; - VeilidConfigApplication application; - VeilidConfigProtocol protocol; - - VeilidConfigNetwork({ - required this.connectionInitialTimeoutMs, - required this.connectionInactivityTimeoutMs, - required this.maxConnectionsPerIp4, - required this.maxConnectionsPerIp6Prefix, - required this.maxConnectionsPerIp6PrefixSize, - required this.maxConnectionFrequencyPerMin, - required this.clientWhitelistTimeoutMs, - required this.reverseConnectionReceiptTimeMs, - required this.holePunchReceiptTimeMs, - required this.routingTable, - required this.rpc, - required this.dht, - required this.upnp, - required this.detectAddressChanges, - required this.restrictedNatRetries, - required this.tls, - required this.application, - required this.protocol, - }); - - Map get json { - return { - 'connection_initial_timeout_ms': connectionInitialTimeoutMs, - 'connection_inactivity_timeout_ms': connectionInactivityTimeoutMs, - 'max_connections_per_ip4': maxConnectionsPerIp4, - 'max_connections_per_ip6_prefix': maxConnectionsPerIp6Prefix, - 'max_connections_per_ip6_prefix_size': maxConnectionsPerIp6PrefixSize, - 'max_connection_frequency_per_min': maxConnectionFrequencyPerMin, - 'client_whitelist_timeout_ms': clientWhitelistTimeoutMs, - 'reverse_connection_receipt_time_ms': reverseConnectionReceiptTimeMs, - 'hole_punch_receipt_time_ms': holePunchReceiptTimeMs, - 'routing_table': routingTable.json, - 'rpc': rpc.json, - 'dht': dht.json, - 'upnp': upnp, - 'detect_address_changes': detectAddressChanges, - 'restricted_nat_retries': restrictedNatRetries, - 'tls': tls.json, - 'application': application.json, - 'protocol': protocol.json, - }; - } - - VeilidConfigNetwork.fromJson(dynamic json) - : connectionInitialTimeoutMs = json['connection_initial_timeout_ms'], - connectionInactivityTimeoutMs = - json['connection_inactivity_timeout_ms'], - maxConnectionsPerIp4 = json['max_connections_per_ip4'], - maxConnectionsPerIp6Prefix = json['max_connections_per_ip6_prefix'], - maxConnectionsPerIp6PrefixSize = - json['max_connections_per_ip6_prefix_size'], - maxConnectionFrequencyPerMin = json['max_connection_frequency_per_min'], - clientWhitelistTimeoutMs = json['client_whitelist_timeout_ms'], - reverseConnectionReceiptTimeMs = - json['reverse_connection_receipt_time_ms'], - holePunchReceiptTimeMs = json['hole_punch_receipt_time_ms'], - routingTable = VeilidConfigRoutingTable.fromJson(json['routing_table']), - rpc = VeilidConfigRPC.fromJson(json['rpc']), - dht = VeilidConfigDHT.fromJson(json['dht']), - upnp = json['upnp'], - detectAddressChanges = json['detect_address_changes'], - restrictedNatRetries = json['restricted_nat_retries'], - tls = VeilidConfigTLS.fromJson(json['tls']), - application = VeilidConfigApplication.fromJson(json['application']), - protocol = VeilidConfigProtocol.fromJson(json['protocol']); -} - -//////////// - -class VeilidConfigTableStore { - String directory; - bool delete; - - VeilidConfigTableStore({ - required this.directory, - required this.delete, - }); - - Map get json { - return {'directory': directory, 'delete': delete}; - } - - VeilidConfigTableStore.fromJson(dynamic json) - : directory = json['directory'], - delete = json['delete']; -} - -//////////// - -class VeilidConfigBlockStore { - String directory; - bool delete; - - VeilidConfigBlockStore({ - required this.directory, - required this.delete, - }); - - Map get json { - return {'directory': directory, 'delete': delete}; - } - - VeilidConfigBlockStore.fromJson(dynamic json) - : directory = json['directory'], - delete = json['delete']; -} - -//////////// - -class VeilidConfigProtectedStore { - bool allowInsecureFallback; - bool alwaysUseInsecureStorage; - String insecureFallbackDirectory; - bool delete; - - VeilidConfigProtectedStore({ - required this.allowInsecureFallback, - required this.alwaysUseInsecureStorage, - required this.insecureFallbackDirectory, - required this.delete, - }); - - Map get json { - return { - 'allow_insecure_fallback': allowInsecureFallback, - 'always_use_insecure_storage': alwaysUseInsecureStorage, - 'insecure_fallback_directory': insecureFallbackDirectory, - 'delete': delete, - }; - } - - VeilidConfigProtectedStore.fromJson(dynamic json) - : allowInsecureFallback = json['allow_insecure_fallback'], - alwaysUseInsecureStorage = json['always_use_insecure_storage'], - insecureFallbackDirectory = json['insecure_fallback_directory'], - delete = json['delete']; -} - -//////////// - -class VeilidConfigCapabilities { - bool protocolUDP; - bool protocolConnectTCP; - bool protocolAcceptTCP; - bool protocolConnectWS; - bool protocolAcceptWS; - bool protocolConnectWSS; - bool protocolAcceptWSS; - - VeilidConfigCapabilities({ - required this.protocolUDP, - required this.protocolConnectTCP, - required this.protocolAcceptTCP, - required this.protocolConnectWS, - required this.protocolAcceptWS, - required this.protocolConnectWSS, - required this.protocolAcceptWSS, - }); - - Map get json { - return { - 'protocol_udp': protocolUDP, - 'protocol_connect_tcp': protocolConnectTCP, - 'protocol_accept_tcp': protocolAcceptTCP, - 'protocol_connect_ws': protocolConnectWS, - 'protocol_accept_ws': protocolAcceptWS, - 'protocol_connect_wss': protocolConnectWSS, - 'protocol_accept_wss': protocolAcceptWSS, - }; - } - - VeilidConfigCapabilities.fromJson(dynamic json) - : protocolUDP = json['protocol_udp'], - protocolConnectTCP = json['protocol_connect_tcp'], - protocolAcceptTCP = json['protocol_accept_tcp'], - protocolConnectWS = json['protocol_connect_ws'], - protocolAcceptWS = json['protocol_accept_ws'], - protocolConnectWSS = json['protocol_connect_wss'], - protocolAcceptWSS = json['protocol_accept_wss']; -} - -//////////// - -class VeilidConfig { - String programName; - String namespace; - VeilidConfigCapabilities capabilities; - VeilidConfigProtectedStore protectedStore; - VeilidConfigTableStore tableStore; - VeilidConfigBlockStore blockStore; - VeilidConfigNetwork network; - - VeilidConfig({ - required this.programName, - required this.namespace, - required this.capabilities, - required this.protectedStore, - required this.tableStore, - required this.blockStore, - required this.network, - }); - - Map get json { - return { - 'program_name': programName, - 'namespace': namespace, - 'capabilities': capabilities.json, - 'protected_store': protectedStore.json, - 'table_store': tableStore.json, - 'block_store': blockStore.json, - 'network': network.json - }; - } - - VeilidConfig.fromJson(dynamic json) - : programName = json['program_name'], - namespace = json['namespace'], - capabilities = VeilidConfigCapabilities.fromJson(json['capabilities']), - protectedStore = - VeilidConfigProtectedStore.fromJson(json['protected_store']), - tableStore = VeilidConfigTableStore.fromJson(json['table_store']), - blockStore = VeilidConfigBlockStore.fromJson(json['block_store']), - network = VeilidConfigNetwork.fromJson(json['network']); -} - -//////////// - -class LatencyStats { - BigInt fastest; - BigInt average; - BigInt slowest; - - LatencyStats({ - required this.fastest, - required this.average, - required this.slowest, - }); - - Map get json { - return { - 'fastest': fastest.toString(), - 'average': average.toString(), - 'slowest': slowest.toString(), - }; - } - - LatencyStats.fromJson(dynamic json) - : fastest = BigInt.parse(json['fastest']), - average = BigInt.parse(json['average']), - slowest = BigInt.parse(json['slowest']); -} - -//////////// - -class TransferStats { - BigInt total; - BigInt maximum; - BigInt average; - BigInt minimum; - - TransferStats({ - required this.total, - required this.maximum, - required this.average, - required this.minimum, - }); - - Map get json { - return { - 'total': total.toString(), - 'maximum': maximum.toString(), - 'average': average.toString(), - 'minimum': minimum.toString(), - }; - } - - TransferStats.fromJson(dynamic json) - : total = BigInt.parse(json['total']), - maximum = BigInt.parse(json['maximum']), - average = BigInt.parse(json['average']), - minimum = BigInt.parse(json['minimum']); -} - -//////////// - -class TransferStatsDownUp { - TransferStats down; - TransferStats up; - - TransferStatsDownUp({ - required this.down, - required this.up, - }); - - Map get json { - return { - 'down': down.json, - 'up': up.json, - }; - } - - TransferStatsDownUp.fromJson(dynamic json) - : down = TransferStats.fromJson(json['down']), - up = TransferStats.fromJson(json['up']); -} - -//////////// - -class RPCStats { - int messagesSent; - int messagesRcvd; - int questionsInFlight; - BigInt? lastQuestion; - BigInt? lastSeenTs; - BigInt? firstConsecutiveSeenTs; - int recentLostAnswers; - int failedToSend; - - RPCStats({ - required this.messagesSent, - required this.messagesRcvd, - required this.questionsInFlight, - required this.lastQuestion, - required this.lastSeenTs, - required this.firstConsecutiveSeenTs, - required this.recentLostAnswers, - required this.failedToSend, - }); - - Map get json { - return { - 'messages_sent': messagesSent, - 'messages_rcvd': messagesRcvd, - 'questions_in_flight': questionsInFlight, - 'last_question': lastQuestion?.toString(), - 'last_seen_ts': lastSeenTs?.toString(), - 'first_consecutive_seen_ts': firstConsecutiveSeenTs?.toString(), - 'recent_lost_answers': recentLostAnswers, - 'failed_to_send': failedToSend, - }; - } - - RPCStats.fromJson(dynamic json) - : messagesSent = json['messages_sent'], - messagesRcvd = json['messages_rcvd'], - questionsInFlight = json['questions_in_flight'], - lastQuestion = json['last_question'] != null - ? BigInt.parse(json['last_question']) - : null, - lastSeenTs = json['last_seen_ts'] != null - ? BigInt.parse(json['last_seen_ts']) - : null, - firstConsecutiveSeenTs = json['first_consecutive_seen_ts'] != null - ? BigInt.parse(json['first_consecutive_seen_ts']) - : null, - recentLostAnswers = json['recent_lost_answers'], - failedToSend = json['failed_to_send']; -} - -//////////// - -class PeerStats { - BigInt timeAdded; - RPCStats rpcStats; - LatencyStats? latency; - TransferStatsDownUp transfer; - - PeerStats({ - required this.timeAdded, - required this.rpcStats, - required this.latency, - required this.transfer, - }); - - Map get json { - return { - 'time_added': timeAdded.toString(), - 'rpc_stats': rpcStats.json, - 'latency': latency?.json, - 'transfer': transfer.json, - }; - } - - PeerStats.fromJson(dynamic json) - : timeAdded = BigInt.parse(json['time_added']), - rpcStats = RPCStats.fromJson(json['rpc_stats']), - latency = json['latency'] != null - ? LatencyStats.fromJson(json['latency']) - : null, - transfer = TransferStatsDownUp.fromJson(json['transfer']); -} - -//////////// - -class PeerTableData { - List nodeIds; - PeerAddress peerAddress; - PeerStats peerStats; - - PeerTableData({ - required this.nodeIds, - required this.peerAddress, - required this.peerStats, - }); - - Map get json { - return { - 'node_ids': nodeIds.map((p) => p).toList(), - 'peer_address': peerAddress.json, - 'peer_stats': peerStats.json, - }; - } - - PeerTableData.fromJson(dynamic json) - : nodeIds = List.from(json['node_ids'].map((j) => j)), - peerAddress = PeerAddress.fromJson(json['peer_address']), - peerStats = PeerStats.fromJson(json['peer_stats']); -} - -////////////////////////////////////// -/// AttachmentState - -enum ProtocolType { - udp, - tcp, - ws, - wss, -} - -extension ProtocolTypeExt on ProtocolType { - String get json { - return name.toUpperCase(); - } -} - -ProtocolType protocolTypeFromJson(String j) { - return ProtocolType.values.byName(j.toLowerCase()); -} - -//////////// - -class PeerAddress { - ProtocolType protocolType; - String socketAddress; - - PeerAddress({ - required this.protocolType, - required this.socketAddress, - }); - - Map get json { - return { - 'protocol_type': protocolType.json, - 'socket_address': socketAddress, - }; - } - - PeerAddress.fromJson(dynamic json) - : protocolType = protocolTypeFromJson(json['protocol_type']), - socketAddress = json['socket_address']; -} - -////////////////////////////////////// -/// VeilidUpdate - -abstract class VeilidUpdate { - factory VeilidUpdate.fromJson(dynamic json) { - switch (json["kind"]) { - case "Log": - { - return VeilidLog( - logLevel: veilidLogLevelFromJson(json["log_level"]), - message: json["message"], - backtrace: json["backtrace"]); - } - case "AppMessage": - { - return VeilidAppMessage( - sender: json["sender"], message: json["message"]); - } - case "AppCall": - { - return VeilidAppCall( - sender: json["sender"], message: json["message"], id: json["id"]); - } - case "Attachment": - { - return VeilidUpdateAttachment( - state: VeilidStateAttachment.fromJson(json)); - } - case "Network": - { - return VeilidUpdateNetwork(state: VeilidStateNetwork.fromJson(json)); - } - case "Config": - { - return VeilidUpdateConfig(state: VeilidStateConfig.fromJson(json)); - } - case "Route": - { - return VeilidUpdateRoute(state: VeilidStateRoute.fromJson(json)); - } - default: - { - throw VeilidAPIExceptionInternal( - "Invalid VeilidAPIException type: ${json['kind']}"); - } +T? Function(dynamic) optFromJson(T Function(dynamic) jsonConstructor) { + return (dynamic j) { + if (j == null) { + return null; + } else { + return jsonConstructor(j); } - } - Map get json; + }; } -class VeilidLog implements VeilidUpdate { - final VeilidLogLevel logLevel; - final String message; - final String? backtrace; - // - VeilidLog({ - required this.logLevel, - required this.message, - required this.backtrace, - }); - - @override - Map get json { - return { - 'kind': "Log", - 'log_level': logLevel.json, - 'message': message, - 'backtrace': backtrace - }; - } -} - -class VeilidAppMessage implements VeilidUpdate { - final String? sender; - final Uint8List message; - - // - VeilidAppMessage({ - required this.sender, - required this.message, - }); - - @override - Map get json { - return { - 'kind': "AppMessage", - 'sender': sender, - 'message': base64UrlNoPadEncode(message) - }; - } -} - -class VeilidAppCall implements VeilidUpdate { - final String? sender; - final Uint8List message; - final String id; - - // - VeilidAppCall({ - required this.sender, - required this.message, - required this.id, - }); - - @override - Map get json { - return { - 'kind': "AppMessage", - 'sender': sender, - 'message': base64UrlNoPadEncode(message), - 'id': id, - }; - } -} - -class VeilidUpdateAttachment implements VeilidUpdate { - final VeilidStateAttachment state; - // - VeilidUpdateAttachment({required this.state}); - - @override - Map get json { - var jsonRep = state.json; - jsonRep['kind'] = "Attachment"; - return jsonRep; - } -} - -class VeilidUpdateNetwork implements VeilidUpdate { - final VeilidStateNetwork state; - // - VeilidUpdateNetwork({required this.state}); - - @override - Map get json { - var jsonRep = state.json; - jsonRep['kind'] = "Network"; - return jsonRep; - } -} - -class VeilidUpdateConfig implements VeilidUpdate { - final VeilidStateConfig state; - // - VeilidUpdateConfig({required this.state}); - - @override - Map get json { - var jsonRep = state.json; - jsonRep['kind'] = "Config"; - return jsonRep; - } -} - -class VeilidUpdateRoute implements VeilidUpdate { - final VeilidStateRoute state; - // - VeilidUpdateRoute({required this.state}); - - @override - Map get json { - var jsonRep = state.json; - jsonRep['kind'] = "Route"; - return jsonRep; - } -} - -////////////////////////////////////// -/// VeilidStateAttachment - -class VeilidStateAttachment { - final AttachmentState state; - final bool publicInternetReady; - final bool localNetworkReady; - - VeilidStateAttachment( - this.state, this.publicInternetReady, this.localNetworkReady); - - VeilidStateAttachment.fromJson(dynamic json) - : state = attachmentStateFromJson(json['state']), - publicInternetReady = json['public_internet_ready'], - localNetworkReady = json['local_network_ready']; - - Map get json { - return { - 'state': state.json, - 'public_internet_ready': publicInternetReady, - 'local_network_ready': localNetworkReady, - }; - } -} - -////////////////////////////////////// -/// VeilidStateNetwork - -class VeilidStateNetwork { - final bool started; - final BigInt bpsDown; - final BigInt bpsUp; - final List peers; - - VeilidStateNetwork( - {required this.started, - required this.bpsDown, - required this.bpsUp, - required this.peers}); - - VeilidStateNetwork.fromJson(dynamic json) - : started = json['started'], - bpsDown = BigInt.parse(json['bps_down']), - bpsUp = BigInt.parse(json['bps_up']), - peers = List.from( - json['peers'].map((j) => PeerTableData.fromJson(j))); - - Map get json { - return { - 'started': started, - 'bps_down': bpsDown.toString(), - 'bps_up': bpsUp.toString(), - 'peers': peers.map((p) => p.json).toList(), - }; - } -} - -////////////////////////////////////// -/// VeilidStateConfig - -class VeilidStateConfig { - final Map config; - - VeilidStateConfig({ - required this.config, - }); - - VeilidStateConfig.fromJson(dynamic json) : config = json['config']; - - Map get json { - return {'config': config}; - } -} - -////////////////////////////////////// -/// VeilidStateRoute - -class VeilidStateRoute { - final List deadRoutes; - final List deadRemoteRoutes; - - VeilidStateRoute({ - required this.deadRoutes, - required this.deadRemoteRoutes, - }); - - VeilidStateRoute.fromJson(dynamic json) - : deadRoutes = List.from(json['dead_routes'].map((j) => j)), - deadRemoteRoutes = - List.from(json['dead_remote_routes'].map((j) => j)); - - Map get json { - return { - 'dead_routes': deadRoutes.map((p) => p).toList(), - 'dead_remote_routes': deadRemoteRoutes.map((p) => p).toList() - }; - } -} - -////////////////////////////////////// -/// VeilidState - -class VeilidState { - final VeilidStateAttachment attachment; - final VeilidStateNetwork network; - final VeilidStateConfig config; - - VeilidState.fromJson(dynamic json) - : attachment = VeilidStateAttachment.fromJson(json['attachment']), - network = VeilidStateNetwork.fromJson(json['network']), - config = VeilidStateConfig.fromJson(json['config']); - - Map get json { - return { - 'attachment': attachment.json, - 'network': network.json, - 'config': config.json - }; - } -} - -////////////////////////////////////// -/// VeilidAPIException - -abstract class VeilidAPIException implements Exception { - factory VeilidAPIException.fromJson(dynamic json) { - switch (json["kind"]) { - case "NotInitialized": - { - return VeilidAPIExceptionNotInitialized(); - } - case "AlreadyInitialized": - { - return VeilidAPIExceptionAlreadyInitialized(); - } - case "Timeout": - { - return VeilidAPIExceptionTimeout(); - } - case "Shutdown": - { - return VeilidAPIExceptionShutdown(); - } - case "NodeNotFound": - { - return VeilidAPIExceptionNodeNotFound(json["node_id"]); - } - case "NoDialInfo": - { - return VeilidAPIExceptionNoDialInfo(json["node_id"]); - } - case "Internal": - { - return VeilidAPIExceptionInternal(json["message"]); - } - case "Unimplemented": - { - return VeilidAPIExceptionUnimplemented(json["unimplemented"]); - } - case "ParseError": - { - return VeilidAPIExceptionParseError(json["message"], json["value"]); - } - case "InvalidArgument": - { - return VeilidAPIExceptionInvalidArgument( - json["context"], json["argument"], json["value"]); - } - case "MissingArgument": - { - return VeilidAPIExceptionMissingArgument( - json["context"], json["argument"]); - } - case "Generic": - { - return VeilidAPIExceptionGeneric(json["message"]); - } - default: - { - throw VeilidAPIExceptionInternal( - "Invalid VeilidAPIException type: ${json['kind']}"); - } - } - } - - String toDisplayError(); -} - -class VeilidAPIExceptionNotInitialized implements VeilidAPIException { - @override - String toString() { - return "VeilidAPIException: NotInitialized"; - } - - @override - String toDisplayError() { - return "Not initialized"; - } -} - -class VeilidAPIExceptionAlreadyInitialized implements VeilidAPIException { - @override - String toString() { - return "VeilidAPIException: AlreadyInitialized"; - } - - @override - String toDisplayError() { - return "Already initialized"; - } -} - -class VeilidAPIExceptionTimeout implements VeilidAPIException { - @override - String toString() { - return "VeilidAPIException: Timeout"; - } - - @override - String toDisplayError() { - return "Timeout"; - } -} - -class VeilidAPIExceptionShutdown implements VeilidAPIException { - @override - String toString() { - return "VeilidAPIException: Shutdown"; - } - - @override - String toDisplayError() { - return "Currently shut down"; - } -} - -class VeilidAPIExceptionNodeNotFound implements VeilidAPIException { - final String nodeId; - - @override - String toString() { - return "VeilidAPIException: NodeNotFound (nodeId: $nodeId)"; - } - - @override - String toDisplayError() { - return "Node node found: $nodeId"; - } - - // - VeilidAPIExceptionNodeNotFound(this.nodeId); -} - -class VeilidAPIExceptionNoDialInfo implements VeilidAPIException { - final String nodeId; - - @override - String toString() { - return "VeilidAPIException: NoDialInfo (nodeId: $nodeId)"; - } - - @override - String toDisplayError() { - return "No dial info: $nodeId"; - } - - // - VeilidAPIExceptionNoDialInfo(this.nodeId); -} - -class VeilidAPIExceptionInternal implements VeilidAPIException { - final String message; - - @override - String toString() { - return "VeilidAPIException: Internal ($message)"; - } - - @override - String toDisplayError() { - return "Internal error: $message"; - } - - // - VeilidAPIExceptionInternal(this.message); -} - -class VeilidAPIExceptionUnimplemented implements VeilidAPIException { - final String message; - - @override - String toString() { - return "VeilidAPIException: Unimplemented ($message)"; - } - - @override - String toDisplayError() { - return "Unimplemented: $message"; - } - - // - VeilidAPIExceptionUnimplemented(this.message); -} - -class VeilidAPIExceptionParseError implements VeilidAPIException { - final String message; - final String value; - - @override - String toString() { - return "VeilidAPIException: ParseError ($message)\n value: $value"; - } - - @override - String toDisplayError() { - return "Parse error: $message"; - } - - // - VeilidAPIExceptionParseError(this.message, this.value); -} - -class VeilidAPIExceptionInvalidArgument implements VeilidAPIException { - final String context; - final String argument; - final String value; - - @override - String toString() { - return "VeilidAPIException: InvalidArgument ($context:$argument)\n value: $value"; - } - - @override - String toDisplayError() { - return "Invalid argument for $context: $argument"; - } - - // - VeilidAPIExceptionInvalidArgument(this.context, this.argument, this.value); -} - -class VeilidAPIExceptionMissingArgument implements VeilidAPIException { - final String context; - final String argument; - - @override - String toString() { - return "VeilidAPIException: MissingArgument ($context:$argument)"; - } - - @override - String toDisplayError() { - return "Missing argument for $context: $argument"; - } - - // - VeilidAPIExceptionMissingArgument(this.context, this.argument); -} - -class VeilidAPIExceptionGeneric implements VeilidAPIException { - final String message; - - @override - String toString() { - return "VeilidAPIException: Generic (message: $message)"; - } - - @override - String toDisplayError() { - return message; - } - - // - VeilidAPIExceptionGeneric(this.message); +List Function(dynamic) jsonListConstructor( + T Function(dynamic) jsonConstructor) { + return (dynamic j) { + return (j as List).map((e) => jsonConstructor(e)).toList(); + }; } ////////////////////////////////////// @@ -1781,122 +65,54 @@ class VeilidVersion { } ////////////////////////////////////// -/// Stability +/// Timestamp +class Timestamp { + final BigInt value; + Timestamp({required this.value}); -enum Stability { - lowLatency, - reliable, -} + @override + String toString() { + return value.toString(); + } -extension StabilityExt on Stability { - String get json { - return name.toPascalCase(); + Timestamp.fromString(String s) : value = BigInt.parse(s); + + Timestamp.fromJson(dynamic json) : this.fromString(json as String); + String toJson() { + return toString(); + } + + TimestampDuration diff(Timestamp other) { + return TimestampDuration(value: value - other.value); + } + + Timestamp offset(TimestampDuration dur) { + return Timestamp(value: value + dur.value); } } -Stability stabilityFromJson(String j) { - return Stability.values.byName(j.toCamelCase()); -} +class TimestampDuration { + final BigInt value; + TimestampDuration({required this.value}); -////////////////////////////////////// -/// Sequencing - -enum Sequencing { - noPreference, - preferOrdered, - ensureOrdered, -} - -extension SequencingExt on Sequencing { - String get json { - return name.toPascalCase(); - } -} - -Sequencing sequencingFromJson(String j) { - return Sequencing.values.byName(j.toCamelCase()); -} - -////////////////////////////////////// -/// RouteBlob -class RouteBlob { - final String routeId; - final Uint8List blob; - - RouteBlob(this.routeId, this.blob); - - RouteBlob.fromJson(dynamic json) - : routeId = json['route_id'], - blob = base64UrlNoPadDecode(json['blob']); - - Map get json { - return {'route_id': routeId, 'blob': base64UrlNoPadEncode(blob)}; - } -} - -////////////////////////////////////// -/// VeilidRoutingContext -abstract class VeilidRoutingContext { - VeilidRoutingContext withPrivacy(); - VeilidRoutingContext withCustomPrivacy(Stability stability); - VeilidRoutingContext withSequencing(Sequencing sequencing); - Future appCall(String target, Uint8List request); - Future appMessage(String target, Uint8List message); -} - -///////////////////////////////////// -/// VeilidTableDB -abstract class VeilidTableDBTransaction { - Future commit(); - Future rollback(); - Future store(int col, Uint8List key, Uint8List value); - Future delete(int col, Uint8List key); - - Future storeJson(int col, Uint8List key, Object? object, - {Object? Function(Object? nonEncodable)? toEncodable}) async { - return store(col, key, - utf8.encoder.convert(jsonEncode(object, toEncodable: toEncodable))); + @override + String toString() { + return value.toString(); } - Future storeStringJson(int col, String key, Object? object, - {Object? Function(Object? nonEncodable)? toEncodable}) { - return storeJson(col, utf8.encoder.convert(key), object, - toEncodable: toEncodable); - } -} + TimestampDuration.fromString(String s) : value = BigInt.parse(s); -abstract class VeilidTableDB { - int getColumnCount(); - List getKeys(int col); - VeilidTableDBTransaction transact(); - Future store(int col, Uint8List key, Uint8List value); - Future load(int col, Uint8List key); - Future delete(int col, Uint8List key); - - Future storeJson(int col, Uint8List key, Object? object, - {Object? Function(Object? nonEncodable)? toEncodable}) { - return store(col, key, - utf8.encoder.convert(jsonEncode(object, toEncodable: toEncodable))); + TimestampDuration.fromJson(dynamic json) : this.fromString(json as String); + String toJson() { + return toString(); } - Future storeStringJson(int col, String key, Object? object, - {Object? Function(Object? nonEncodable)? toEncodable}) { - return storeJson(col, utf8.encoder.convert(key), object, - toEncodable: toEncodable); + int toMillis() { + return (value ~/ BigInt.from(1000)).toInt(); } - Future loadJson(int col, Uint8List key, - {Object? Function(Object? key, Object? value)? reviver}) async { - var s = await load(col, key); - if (s == null) { - return null; - } - return jsonDecode(utf8.decode(s, allowMalformed: false), reviver: reviver); - } - - Future loadStringJson(int col, String key, - {Object? Function(Object? key, Object? value)? reviver}) { - return loadJson(col, utf8.encoder.convert(key), reviver: reviver); + BigInt toMicros(Timestamp other) { + return value; } } @@ -1904,7 +120,7 @@ abstract class VeilidTableDB { /// Veilid singleton factory abstract class Veilid { - static late Veilid instance = getVeilid(); + static Veilid instance = getVeilid(); void initializeVeilidCore(Map platformConfigJson); void changeLogLevel(String layer, VeilidConfigLogLevel logLevel); @@ -1914,6 +130,16 @@ abstract class Veilid { Future detach(); Future shutdownVeilidCore(); + // Crypto + List validCryptoKinds(); + Future getCryptoSystem(CryptoKind kind); + Future bestCryptoSystem(); + Future> verifySignatures( + List nodeIds, Uint8List data, List signatures); + Future> generateSignatures( + Uint8List data, List keyPairs); + Future generateKeyPair(CryptoKind kind); + // Routing context Future routingContext(); @@ -1932,6 +158,7 @@ abstract class Veilid { Future deleteTableDB(String name); // Misc + Timestamp now(); String veilidVersionString(); VeilidVersion veilidVersion(); Future debug(String command); diff --git a/veilid-flutter/lib/veilid_api_exception.dart b/veilid-flutter/lib/veilid_api_exception.dart new file mode 100644 index 00000000..40a9db66 --- /dev/null +++ b/veilid-flutter/lib/veilid_api_exception.dart @@ -0,0 +1,286 @@ +////////////////////////////////////// +/// VeilidAPIException + +abstract class VeilidAPIException implements Exception { + factory VeilidAPIException.fromJson(dynamic json) { + switch (json["kind"]) { + case "NotInitialized": + { + return VeilidAPIExceptionNotInitialized(); + } + case "AlreadyInitialized": + { + return VeilidAPIExceptionAlreadyInitialized(); + } + case "Timeout": + { + return VeilidAPIExceptionTimeout(); + } + case "TryAgain": + { + return VeilidAPIExceptionTryAgain(); + } + case "Shutdown": + { + return VeilidAPIExceptionShutdown(); + } + case "InvalidTarget": + { + return VeilidAPIExceptionInvalidTarget(); + } + case "NoConnection": + { + return VeilidAPIExceptionNoConnection(json["message"]); + } + case "KeyNotFound": + { + return VeilidAPIExceptionKeyNotFound(json["key"]); + } + case "Internal": + { + return VeilidAPIExceptionInternal(json["message"]); + } + case "Unimplemented": + { + return VeilidAPIExceptionUnimplemented(json["unimplemented"]); + } + case "ParseError": + { + return VeilidAPIExceptionParseError(json["message"], json["value"]); + } + case "InvalidArgument": + { + return VeilidAPIExceptionInvalidArgument( + json["context"], json["argument"], json["value"]); + } + case "MissingArgument": + { + return VeilidAPIExceptionMissingArgument( + json["context"], json["argument"]); + } + case "Generic": + { + return VeilidAPIExceptionGeneric(json["message"]); + } + default: + { + throw VeilidAPIExceptionInternal( + "Invalid VeilidAPIException type: ${json['kind']}"); + } + } + } + + String toDisplayError(); +} + +class VeilidAPIExceptionNotInitialized implements VeilidAPIException { + @override + String toString() { + return "VeilidAPIException: NotInitialized"; + } + + @override + String toDisplayError() { + return "Not initialized"; + } +} + +class VeilidAPIExceptionAlreadyInitialized implements VeilidAPIException { + @override + String toString() { + return "VeilidAPIException: AlreadyInitialized"; + } + + @override + String toDisplayError() { + return "Already initialized"; + } +} + +class VeilidAPIExceptionTimeout implements VeilidAPIException { + @override + String toString() { + return "VeilidAPIException: Timeout"; + } + + @override + String toDisplayError() { + return "Timeout"; + } +} + +class VeilidAPIExceptionTryAgain implements VeilidAPIException { + @override + String toString() { + return "VeilidAPIException: TryAgain"; + } + + @override + String toDisplayError() { + return "Try again"; + } +} + +class VeilidAPIExceptionShutdown implements VeilidAPIException { + @override + String toString() { + return "VeilidAPIException: Shutdown"; + } + + @override + String toDisplayError() { + return "Currently shut down"; + } +} + +class VeilidAPIExceptionInvalidTarget implements VeilidAPIException { + @override + String toString() { + return "VeilidAPIException: InvalidTarget"; + } + + @override + String toDisplayError() { + return "Invalid target"; + } +} + +class VeilidAPIExceptionNoConnection implements VeilidAPIException { + final String message; + + @override + String toString() { + return "VeilidAPIException: NoConnection (message: $message)"; + } + + @override + String toDisplayError() { + return "No connection: $message"; + } + + // + VeilidAPIExceptionNoConnection(this.message); +} + +class VeilidAPIExceptionKeyNotFound implements VeilidAPIException { + final String key; + + @override + String toString() { + return "VeilidAPIException: KeyNotFound (key: $key)"; + } + + @override + String toDisplayError() { + return "Key not found: $key"; + } + + // + VeilidAPIExceptionKeyNotFound(this.key); +} + +class VeilidAPIExceptionInternal implements VeilidAPIException { + final String message; + + @override + String toString() { + return "VeilidAPIException: Internal ($message)"; + } + + @override + String toDisplayError() { + return "Internal error: $message"; + } + + // + VeilidAPIExceptionInternal(this.message); +} + +class VeilidAPIExceptionUnimplemented implements VeilidAPIException { + final String message; + + @override + String toString() { + return "VeilidAPIException: Unimplemented ($message)"; + } + + @override + String toDisplayError() { + return "Unimplemented: $message"; + } + + // + VeilidAPIExceptionUnimplemented(this.message); +} + +class VeilidAPIExceptionParseError implements VeilidAPIException { + final String message; + final String value; + + @override + String toString() { + return "VeilidAPIException: ParseError ($message)\n value: $value"; + } + + @override + String toDisplayError() { + return "Parse error: $message"; + } + + // + VeilidAPIExceptionParseError(this.message, this.value); +} + +class VeilidAPIExceptionInvalidArgument implements VeilidAPIException { + final String context; + final String argument; + final String value; + + @override + String toString() { + return "VeilidAPIException: InvalidArgument ($context:$argument)\n value: $value"; + } + + @override + String toDisplayError() { + return "Invalid argument for $context: $argument"; + } + + // + VeilidAPIExceptionInvalidArgument(this.context, this.argument, this.value); +} + +class VeilidAPIExceptionMissingArgument implements VeilidAPIException { + final String context; + final String argument; + + @override + String toString() { + return "VeilidAPIException: MissingArgument ($context:$argument)"; + } + + @override + String toDisplayError() { + return "Missing argument for $context: $argument"; + } + + // + VeilidAPIExceptionMissingArgument(this.context, this.argument); +} + +class VeilidAPIExceptionGeneric implements VeilidAPIException { + final String message; + + @override + String toString() { + return "VeilidAPIException: Generic (message: $message)"; + } + + @override + String toDisplayError() { + return message; + } + + // + VeilidAPIExceptionGeneric(this.message); +} diff --git a/veilid-flutter/lib/veilid_config.dart b/veilid-flutter/lib/veilid_config.dart new file mode 100644 index 00000000..7a2134a8 --- /dev/null +++ b/veilid-flutter/lib/veilid_config.dart @@ -0,0 +1,947 @@ +import 'package:change_case/change_case.dart'; + +import 'veilid.dart'; + +////////////////////////////////////////////////////////// +// FFI Platform-specific config + +class VeilidFFIConfigLoggingTerminal { + bool enabled; + VeilidConfigLogLevel level; + + VeilidFFIConfigLoggingTerminal({ + required this.enabled, + required this.level, + }); + + Map toJson() { + return { + 'enabled': enabled, + 'level': level.toJson(), + }; + } + + VeilidFFIConfigLoggingTerminal.fromJson(dynamic json) + : enabled = json['enabled'], + level = VeilidConfigLogLevel.fromJson(json['level']); +} + +class VeilidFFIConfigLoggingOtlp { + bool enabled; + VeilidConfigLogLevel level; + String grpcEndpoint; + String serviceName; + + VeilidFFIConfigLoggingOtlp({ + required this.enabled, + required this.level, + required this.grpcEndpoint, + required this.serviceName, + }); + + Map toJson() { + return { + 'enabled': enabled, + 'level': level.toJson(), + 'grpc_endpoint': grpcEndpoint, + 'service_name': serviceName, + }; + } + + VeilidFFIConfigLoggingOtlp.fromJson(dynamic json) + : enabled = json['enabled'], + level = VeilidConfigLogLevel.fromJson(json['level']), + grpcEndpoint = json['grpc_endpoint'], + serviceName = json['service_name']; +} + +class VeilidFFIConfigLoggingApi { + bool enabled; + VeilidConfigLogLevel level; + + VeilidFFIConfigLoggingApi({ + required this.enabled, + required this.level, + }); + + Map toJson() { + return { + 'enabled': enabled, + 'level': level.toJson(), + }; + } + + VeilidFFIConfigLoggingApi.fromJson(dynamic json) + : enabled = json['enabled'], + level = VeilidConfigLogLevel.fromJson(json['level']); +} + +class VeilidFFIConfigLogging { + VeilidFFIConfigLoggingTerminal terminal; + VeilidFFIConfigLoggingOtlp otlp; + VeilidFFIConfigLoggingApi api; + + VeilidFFIConfigLogging( + {required this.terminal, required this.otlp, required this.api}); + + Map toJson() { + return { + 'terminal': terminal.toJson(), + 'otlp': otlp.toJson(), + 'api': api.toJson(), + }; + } + + VeilidFFIConfigLogging.fromJson(dynamic json) + : terminal = VeilidFFIConfigLoggingTerminal.fromJson(json['terminal']), + otlp = VeilidFFIConfigLoggingOtlp.fromJson(json['otlp']), + api = VeilidFFIConfigLoggingApi.fromJson(json['api']); +} + +class VeilidFFIConfig { + VeilidFFIConfigLogging logging; + + VeilidFFIConfig({ + required this.logging, + }); + + Map toJson() { + return { + 'logging': logging.toJson(), + }; + } + + VeilidFFIConfig.fromJson(Map json) + : logging = VeilidFFIConfigLogging.fromJson(json['logging']); +} + +////////////////////////////////////// +/// VeilidConfigLogLevel + +enum VeilidConfigLogLevel { + off, + error, + warn, + info, + debug, + trace; + + String toJson() { + return name.toPascalCase(); + } + + factory VeilidConfigLogLevel.fromJson(dynamic j) { + return VeilidConfigLogLevel.values.byName((j as String).toCamelCase()); + } +} + +////////////////////////////////////////////////////////// +// WASM Platform-specific config + +class VeilidWASMConfigLoggingPerformance { + bool enabled; + VeilidConfigLogLevel level; + bool logsInTimings; + bool logsInConsole; + + VeilidWASMConfigLoggingPerformance({ + required this.enabled, + required this.level, + required this.logsInTimings, + required this.logsInConsole, + }); + + Map toJson() { + return { + 'enabled': enabled, + 'level': level.toJson(), + 'logs_in_timings': logsInTimings, + 'logs_in_console': logsInConsole, + }; + } + + VeilidWASMConfigLoggingPerformance.fromJson(dynamic json) + : enabled = json['enabled'], + level = VeilidConfigLogLevel.fromJson(json['level']), + logsInTimings = json['logs_in_timings'], + logsInConsole = json['logs_in_console']; +} + +class VeilidWASMConfigLoggingApi { + bool enabled; + VeilidConfigLogLevel level; + + VeilidWASMConfigLoggingApi({ + required this.enabled, + required this.level, + }); + + Map toJson() { + return { + 'enabled': enabled, + 'level': level.toJson(), + }; + } + + VeilidWASMConfigLoggingApi.fromJson(dynamic json) + : enabled = json['enabled'], + level = VeilidConfigLogLevel.fromJson(json['level']); +} + +class VeilidWASMConfigLogging { + VeilidWASMConfigLoggingPerformance performance; + VeilidWASMConfigLoggingApi api; + + VeilidWASMConfigLogging({required this.performance, required this.api}); + + Map toJson() { + return { + 'performance': performance.toJson(), + 'api': api.toJson(), + }; + } + + VeilidWASMConfigLogging.fromJson(dynamic json) + : performance = + VeilidWASMConfigLoggingPerformance.fromJson(json['performance']), + api = VeilidWASMConfigLoggingApi.fromJson(json['api']); +} + +class VeilidWASMConfig { + VeilidWASMConfigLogging logging; + + VeilidWASMConfig({ + required this.logging, + }); + + Map toJson() { + return { + 'logging': logging.toJson(), + }; + } + + VeilidWASMConfig.fromJson(dynamic json) + : logging = VeilidWASMConfigLogging.fromJson(json['logging']); +} + +////////////////////////////////////// +/// VeilidConfig + +class VeilidConfigHTTPS { + bool enabled; + String listenAddress; + String path; + String? url; + + VeilidConfigHTTPS({ + required this.enabled, + required this.listenAddress, + required this.path, + this.url, + }); + + Map toJson() { + return { + 'enabled': enabled, + 'listen_address': listenAddress, + 'path': path, + 'url': url + }; + } + + VeilidConfigHTTPS.fromJson(dynamic json) + : enabled = json['enabled'], + listenAddress = json['listen_address'], + path = json['path'], + url = json['url']; +} + +//////////// + +class VeilidConfigHTTP { + bool enabled; + String listenAddress; + String path; + String? url; + + VeilidConfigHTTP({ + required this.enabled, + required this.listenAddress, + required this.path, + this.url, + }); + + Map toJson() { + return { + 'enabled': enabled, + 'listen_address': listenAddress, + 'path': path, + 'url': url + }; + } + + VeilidConfigHTTP.fromJson(dynamic json) + : enabled = json['enabled'], + listenAddress = json['listen_address'], + path = json['path'], + url = json['url']; +} + +//////////// + +class VeilidConfigApplication { + VeilidConfigHTTPS https; + VeilidConfigHTTP http; + + VeilidConfigApplication({ + required this.https, + required this.http, + }); + + Map toJson() { + return { + 'https': https.toJson(), + 'http': http.toJson(), + }; + } + + VeilidConfigApplication.fromJson(dynamic json) + : https = VeilidConfigHTTPS.fromJson(json['https']), + http = VeilidConfigHTTP.fromJson(json['http']); +} + +//////////// + +class VeilidConfigUDP { + bool enabled; + int socketPoolSize; + String listenAddress; + String? publicAddress; + + VeilidConfigUDP( + {required this.enabled, + required this.socketPoolSize, + required this.listenAddress, + this.publicAddress}); + + Map toJson() { + return { + 'enabled': enabled, + 'socket_pool_size': socketPoolSize, + 'listen_address': listenAddress, + 'public_address': publicAddress, + }; + } + + VeilidConfigUDP.fromJson(dynamic json) + : enabled = json['enabled'], + socketPoolSize = json['socket_pool_size'], + listenAddress = json['listen_address'], + publicAddress = json['publicAddress']; +} + +//////////// + +class VeilidConfigTCP { + bool connect; + bool listen; + int maxConnections; + String listenAddress; + String? publicAddress; + + VeilidConfigTCP( + {required this.connect, + required this.listen, + required this.maxConnections, + required this.listenAddress, + this.publicAddress}); + + Map toJson() { + return { + 'connect': connect, + 'listen': listen, + 'max_connections': maxConnections, + 'listen_address': listenAddress, + 'public_address': publicAddress, + }; + } + + VeilidConfigTCP.fromJson(dynamic json) + : connect = json['connect'], + listen = json['listen'], + maxConnections = json['max_connections'], + listenAddress = json['listen_address'], + publicAddress = json['publicAddress']; +} + +//////////// + +class VeilidConfigWS { + bool connect; + bool listen; + int maxConnections; + String listenAddress; + String path; + String? url; + + VeilidConfigWS( + {required this.connect, + required this.listen, + required this.maxConnections, + required this.listenAddress, + required this.path, + this.url}); + + Map toJson() { + return { + 'connect': connect, + 'listen': listen, + 'max_connections': maxConnections, + 'listen_address': listenAddress, + 'path': path, + 'url': url, + }; + } + + VeilidConfigWS.fromJson(dynamic json) + : connect = json['connect'], + listen = json['listen'], + maxConnections = json['max_connections'], + listenAddress = json['listen_address'], + path = json['path'], + url = json['url']; +} + +//////////// + +class VeilidConfigWSS { + bool connect; + bool listen; + int maxConnections; + String listenAddress; + String path; + String? url; + + VeilidConfigWSS( + {required this.connect, + required this.listen, + required this.maxConnections, + required this.listenAddress, + required this.path, + this.url}); + + Map toJson() { + return { + 'connect': connect, + 'listen': listen, + 'max_connections': maxConnections, + 'listen_address': listenAddress, + 'path': path, + 'url': url, + }; + } + + VeilidConfigWSS.fromJson(dynamic json) + : connect = json['connect'], + listen = json['listen'], + maxConnections = json['max_connections'], + listenAddress = json['listen_address'], + path = json['path'], + url = json['url']; +} + +//////////// + +class VeilidConfigProtocol { + VeilidConfigUDP udp; + VeilidConfigTCP tcp; + VeilidConfigWS ws; + VeilidConfigWSS wss; + + VeilidConfigProtocol({ + required this.udp, + required this.tcp, + required this.ws, + required this.wss, + }); + + Map toJson() { + return { + 'udp': udp.toJson(), + 'tcp': tcp.toJson(), + 'ws': ws.toJson(), + 'wss': wss.toJson(), + }; + } + + VeilidConfigProtocol.fromJson(dynamic json) + : udp = VeilidConfigUDP.fromJson(json['udp']), + tcp = VeilidConfigTCP.fromJson(json['tcp']), + ws = VeilidConfigWS.fromJson(json['ws']), + wss = VeilidConfigWSS.fromJson(json['wss']); +} + +//////////// + +class VeilidConfigTLS { + String certificatePath; + String privateKeyPath; + int connectionInitialTimeoutMs; + + VeilidConfigTLS({ + required this.certificatePath, + required this.privateKeyPath, + required this.connectionInitialTimeoutMs, + }); + + Map toJson() { + return { + 'certificate_path': certificatePath, + 'private_key_path': privateKeyPath, + 'connection_initial_timeout_ms': connectionInitialTimeoutMs, + }; + } + + VeilidConfigTLS.fromJson(dynamic json) + : certificatePath = json['certificate_path'], + privateKeyPath = json['private_key_path'], + connectionInitialTimeoutMs = json['connection_initial_timeout_ms']; +} + +//////////// + +class VeilidConfigDHT { + int resolveNodeTimeoutMs; + int resolveNodeCount; + int resolveNodeFanout; + int maxFindNodeCount; + int getValueTimeoutMs; + int getValueCount; + int getValueFanout; + int setValueTimeoutMs; + int setValueCount; + int setValueFanout; + int minPeerCount; + int minPeerRefreshTimeMs; + int validateDialInfoReceiptTimeMs; + int localSubkeyCacheSize; + int localMaxSubkeyCacheMemoryMb; + int remoteSubkeyCacheSize; + int remoteMaxRecords; + int remoteMaxSubkeyCacheMemoryMb; + int remoteMaxStorageSpaceMb; + + VeilidConfigDHT( + {required this.resolveNodeTimeoutMs, + required this.resolveNodeCount, + required this.resolveNodeFanout, + required this.maxFindNodeCount, + required this.getValueTimeoutMs, + required this.getValueCount, + required this.getValueFanout, + required this.setValueTimeoutMs, + required this.setValueCount, + required this.setValueFanout, + required this.minPeerCount, + required this.minPeerRefreshTimeMs, + required this.validateDialInfoReceiptTimeMs, + required this.localSubkeyCacheSize, + required this.localMaxSubkeyCacheMemoryMb, + required this.remoteSubkeyCacheSize, + required this.remoteMaxRecords, + required this.remoteMaxSubkeyCacheMemoryMb, + required this.remoteMaxStorageSpaceMb}); + + Map toJson() { + return { + 'max_find_node_count': maxFindNodeCount, + 'resolve_node_timeout_ms': resolveNodeTimeoutMs, + 'resolve_node_count': resolveNodeCount, + 'resolve_node_fanout': resolveNodeFanout, + 'get_value_timeout_ms': getValueTimeoutMs, + 'get_value_count': getValueCount, + 'get_value_fanout': getValueFanout, + 'set_value_timeout_ms': setValueTimeoutMs, + 'set_value_count': setValueCount, + 'set_value_fanout': setValueFanout, + 'min_peer_count': minPeerCount, + 'min_peer_refresh_time_ms': minPeerRefreshTimeMs, + 'validate_dial_info_receipt_time_ms': validateDialInfoReceiptTimeMs, + 'local_subkey_cache_size': localSubkeyCacheSize, + 'local_max_subkey_cache_memory_mb': localMaxSubkeyCacheMemoryMb, + 'remote_subkey_cache_size': remoteSubkeyCacheSize, + 'remote_max_records': remoteMaxRecords, + 'remote_max_subkey_cache_memory_mb': remoteMaxSubkeyCacheMemoryMb, + 'remote_max_storage_space_mb': remoteMaxStorageSpaceMb, + }; + } + + VeilidConfigDHT.fromJson(dynamic json) + : resolveNodeTimeoutMs = json['resolve_node_timeout_ms'], + resolveNodeCount = json['resolve_node_count'], + resolveNodeFanout = json['resolve_node_fanout'], + maxFindNodeCount = json['max_find_node_count'], + getValueTimeoutMs = json['get_value_timeout_ms'], + getValueCount = json['get_value_count'], + getValueFanout = json['get_value_fanout'], + setValueTimeoutMs = json['set_value_timeout_ms'], + setValueCount = json['set_value_count'], + setValueFanout = json['set_value_fanout'], + minPeerCount = json['min_peer_count'], + minPeerRefreshTimeMs = json['min_peer_refresh_time_ms'], + validateDialInfoReceiptTimeMs = + json['validate_dial_info_receipt_time_ms'], + localSubkeyCacheSize = json['local_subkey_cache_size'], + localMaxSubkeyCacheMemoryMb = json['local_max_subkey_cache_memory_mb'], + remoteSubkeyCacheSize = json['remote_subkey_cache_size'], + remoteMaxRecords = json['remote_max_records'], + remoteMaxSubkeyCacheMemoryMb = + json['remote_max_subkey_cache_memory_mb'], + remoteMaxStorageSpaceMb = json['remote_max_storage_space_mb']; +} + +//////////// + +class VeilidConfigRPC { + int concurrency; + int queueSize; + int? maxTimestampBehindMs; + int? maxTimestampAheadMs; + int timeoutMs; + int maxRouteHopCount; + int defaultRouteHopCount; + + VeilidConfigRPC( + {required this.concurrency, + required this.queueSize, + this.maxTimestampBehindMs, + this.maxTimestampAheadMs, + required this.timeoutMs, + required this.maxRouteHopCount, + required this.defaultRouteHopCount}); + + Map toJson() { + return { + 'concurrency': concurrency, + 'queue_size': queueSize, + 'max_timestamp_behind_ms': maxTimestampBehindMs, + 'max_timestamp_ahead_ms': maxTimestampAheadMs, + 'timeout_ms': timeoutMs, + 'max_route_hop_count': maxRouteHopCount, + 'default_route_hop_count': defaultRouteHopCount, + }; + } + + VeilidConfigRPC.fromJson(dynamic json) + : concurrency = json['concurrency'], + queueSize = json['queue_size'], + maxTimestampBehindMs = json['max_timestamp_behind_ms'], + maxTimestampAheadMs = json['max_timestamp_ahead_ms'], + timeoutMs = json['timeout_ms'], + maxRouteHopCount = json['max_route_hop_count'], + defaultRouteHopCount = json['default_route_hop_count']; +} + +//////////// + +class VeilidConfigRoutingTable { + List nodeId; + List nodeIdSecret; + List bootstrap; + int limitOverAttached; + int limitFullyAttached; + int limitAttachedStrong; + int limitAttachedGood; + int limitAttachedWeak; + + VeilidConfigRoutingTable({ + required this.nodeId, + required this.nodeIdSecret, + required this.bootstrap, + required this.limitOverAttached, + required this.limitFullyAttached, + required this.limitAttachedStrong, + required this.limitAttachedGood, + required this.limitAttachedWeak, + }); + + Map toJson() { + return { + 'node_id': nodeId.map((p) => p.toJson()).toList(), + 'node_id_secret': nodeIdSecret.map((p) => p.toJson()).toList(), + 'bootstrap': bootstrap.map((p) => p).toList(), + 'limit_over_attached': limitOverAttached, + 'limit_fully_attached': limitFullyAttached, + 'limit_attached_strong': limitAttachedStrong, + 'limit_attached_good': limitAttachedGood, + 'limit_attached_weak': limitAttachedWeak, + }; + } + + VeilidConfigRoutingTable.fromJson(dynamic json) + : nodeId = List.from( + json['node_id'].map((j) => PublicKey.fromJson(j))), + nodeIdSecret = List.from( + json['node_id_secret'].map((j) => PublicKey.fromJson(j))), + bootstrap = List.from(json['bootstrap'].map((j) => j)), + limitOverAttached = json['limit_over_attached'], + limitFullyAttached = json['limit_fully_attached'], + limitAttachedStrong = json['limit_attached_strong'], + limitAttachedGood = json['limit_attached_good'], + limitAttachedWeak = json['limit_attached_weak']; +} + +//////////// + +class VeilidConfigNetwork { + int connectionInitialTimeoutMs; + int connectionInactivityTimeoutMs; + int maxConnectionsPerIp4; + int maxConnectionsPerIp6Prefix; + int maxConnectionsPerIp6PrefixSize; + int maxConnectionFrequencyPerMin; + int clientWhitelistTimeoutMs; + int reverseConnectionReceiptTimeMs; + int holePunchReceiptTimeMs; + VeilidConfigRoutingTable routingTable; + VeilidConfigRPC rpc; + VeilidConfigDHT dht; + bool upnp; + bool detectAddressChanges; + int restrictedNatRetries; + VeilidConfigTLS tls; + VeilidConfigApplication application; + VeilidConfigProtocol protocol; + + VeilidConfigNetwork({ + required this.connectionInitialTimeoutMs, + required this.connectionInactivityTimeoutMs, + required this.maxConnectionsPerIp4, + required this.maxConnectionsPerIp6Prefix, + required this.maxConnectionsPerIp6PrefixSize, + required this.maxConnectionFrequencyPerMin, + required this.clientWhitelistTimeoutMs, + required this.reverseConnectionReceiptTimeMs, + required this.holePunchReceiptTimeMs, + required this.routingTable, + required this.rpc, + required this.dht, + required this.upnp, + required this.detectAddressChanges, + required this.restrictedNatRetries, + required this.tls, + required this.application, + required this.protocol, + }); + + Map toJson() { + return { + 'connection_initial_timeout_ms': connectionInitialTimeoutMs, + 'connection_inactivity_timeout_ms': connectionInactivityTimeoutMs, + 'max_connections_per_ip4': maxConnectionsPerIp4, + 'max_connections_per_ip6_prefix': maxConnectionsPerIp6Prefix, + 'max_connections_per_ip6_prefix_size': maxConnectionsPerIp6PrefixSize, + 'max_connection_frequency_per_min': maxConnectionFrequencyPerMin, + 'client_whitelist_timeout_ms': clientWhitelistTimeoutMs, + 'reverse_connection_receipt_time_ms': reverseConnectionReceiptTimeMs, + 'hole_punch_receipt_time_ms': holePunchReceiptTimeMs, + 'routing_table': routingTable.toJson(), + 'rpc': rpc.toJson(), + 'dht': dht.toJson(), + 'upnp': upnp, + 'detect_address_changes': detectAddressChanges, + 'restricted_nat_retries': restrictedNatRetries, + 'tls': tls.toJson(), + 'application': application.toJson(), + 'protocol': protocol.toJson(), + }; + } + + VeilidConfigNetwork.fromJson(dynamic json) + : connectionInitialTimeoutMs = json['connection_initial_timeout_ms'], + connectionInactivityTimeoutMs = + json['connection_inactivity_timeout_ms'], + maxConnectionsPerIp4 = json['max_connections_per_ip4'], + maxConnectionsPerIp6Prefix = json['max_connections_per_ip6_prefix'], + maxConnectionsPerIp6PrefixSize = + json['max_connections_per_ip6_prefix_size'], + maxConnectionFrequencyPerMin = json['max_connection_frequency_per_min'], + clientWhitelistTimeoutMs = json['client_whitelist_timeout_ms'], + reverseConnectionReceiptTimeMs = + json['reverse_connection_receipt_time_ms'], + holePunchReceiptTimeMs = json['hole_punch_receipt_time_ms'], + routingTable = VeilidConfigRoutingTable.fromJson(json['routing_table']), + rpc = VeilidConfigRPC.fromJson(json['rpc']), + dht = VeilidConfigDHT.fromJson(json['dht']), + upnp = json['upnp'], + detectAddressChanges = json['detect_address_changes'], + restrictedNatRetries = json['restricted_nat_retries'], + tls = VeilidConfigTLS.fromJson(json['tls']), + application = VeilidConfigApplication.fromJson(json['application']), + protocol = VeilidConfigProtocol.fromJson(json['protocol']); +} + +//////////// + +class VeilidConfigTableStore { + String directory; + bool delete; + + VeilidConfigTableStore({ + required this.directory, + required this.delete, + }); + + Map toJson() { + return {'directory': directory, 'delete': delete}; + } + + VeilidConfigTableStore.fromJson(dynamic json) + : directory = json['directory'], + delete = json['delete']; +} + +//////////// + +class VeilidConfigBlockStore { + String directory; + bool delete; + + VeilidConfigBlockStore({ + required this.directory, + required this.delete, + }); + + Map toJson() { + return {'directory': directory, 'delete': delete}; + } + + VeilidConfigBlockStore.fromJson(dynamic json) + : directory = json['directory'], + delete = json['delete']; +} + +//////////// + +class VeilidConfigProtectedStore { + bool allowInsecureFallback; + bool alwaysUseInsecureStorage; + String directory; + bool delete; + String deviceEncryptionKey; + String? newDeviceEncryptionKey; + + VeilidConfigProtectedStore( + {required this.allowInsecureFallback, + required this.alwaysUseInsecureStorage, + required this.directory, + required this.delete, + required this.deviceEncryptionKey, + String? newDeviceEncryptionKey}); + + Map toJson() { + return { + 'allow_insecure_fallback': allowInsecureFallback, + 'always_use_insecure_storage': alwaysUseInsecureStorage, + 'directory': directory, + 'delete': delete, + 'device_encryption_key': deviceEncryptionKey, + 'new_device_encryption_key': newDeviceEncryptionKey, + }; + } + + VeilidConfigProtectedStore.fromJson(dynamic json) + : allowInsecureFallback = json['allow_insecure_fallback'], + alwaysUseInsecureStorage = json['always_use_insecure_storage'], + directory = json['directory'], + delete = json['delete'], + deviceEncryptionKey = json['device_encryption_key'], + newDeviceEncryptionKey = json['new_device_encryption_key']; +} + +//////////// + +class VeilidConfigCapabilities { + bool protocolUDP; + bool protocolConnectTCP; + bool protocolAcceptTCP; + bool protocolConnectWS; + bool protocolAcceptWS; + bool protocolConnectWSS; + bool protocolAcceptWSS; + + VeilidConfigCapabilities({ + required this.protocolUDP, + required this.protocolConnectTCP, + required this.protocolAcceptTCP, + required this.protocolConnectWS, + required this.protocolAcceptWS, + required this.protocolConnectWSS, + required this.protocolAcceptWSS, + }); + + Map toJson() { + return { + 'protocol_udp': protocolUDP, + 'protocol_connect_tcp': protocolConnectTCP, + 'protocol_accept_tcp': protocolAcceptTCP, + 'protocol_connect_ws': protocolConnectWS, + 'protocol_accept_ws': protocolAcceptWS, + 'protocol_connect_wss': protocolConnectWSS, + 'protocol_accept_wss': protocolAcceptWSS, + }; + } + + VeilidConfigCapabilities.fromJson(dynamic json) + : protocolUDP = json['protocol_udp'], + protocolConnectTCP = json['protocol_connect_tcp'], + protocolAcceptTCP = json['protocol_accept_tcp'], + protocolConnectWS = json['protocol_connect_ws'], + protocolAcceptWS = json['protocol_accept_ws'], + protocolConnectWSS = json['protocol_connect_wss'], + protocolAcceptWSS = json['protocol_accept_wss']; +} + +//////////// + +class VeilidConfig { + String programName; + String namespace; + VeilidConfigCapabilities capabilities; + VeilidConfigProtectedStore protectedStore; + VeilidConfigTableStore tableStore; + VeilidConfigBlockStore blockStore; + VeilidConfigNetwork network; + + VeilidConfig({ + required this.programName, + required this.namespace, + required this.capabilities, + required this.protectedStore, + required this.tableStore, + required this.blockStore, + required this.network, + }); + + Map toJson() { + return { + 'program_name': programName, + 'namespace': namespace, + 'capabilities': capabilities.toJson(), + 'protected_store': protectedStore.toJson(), + 'table_store': tableStore.toJson(), + 'block_store': blockStore.toJson(), + 'network': network.toJson() + }; + } + + VeilidConfig.fromJson(dynamic json) + : programName = json['program_name'], + namespace = json['namespace'], + capabilities = VeilidConfigCapabilities.fromJson(json['capabilities']), + protectedStore = + VeilidConfigProtectedStore.fromJson(json['protected_store']), + tableStore = VeilidConfigTableStore.fromJson(json['table_store']), + blockStore = VeilidConfigBlockStore.fromJson(json['block_store']), + network = VeilidConfigNetwork.fromJson(json['network']); +} diff --git a/veilid-flutter/lib/veilid_crypto.dart b/veilid-flutter/lib/veilid_crypto.dart new file mode 100644 index 00000000..881f98db --- /dev/null +++ b/veilid-flutter/lib/veilid_crypto.dart @@ -0,0 +1,167 @@ +import 'dart:async'; +import 'dart:typed_data'; + +import 'package:charcode/charcode.dart'; + +import 'veilid_encoding.dart'; +import 'veilid.dart'; + +////////////////////////////////////// +/// CryptoKind + +typedef CryptoKind = int; +const CryptoKind cryptoKindVLD0 = + $V << 0 | $L << 8 | $D << 16 | $0 << 24; // "VLD0" +const CryptoKind cryptoKindNONE = + $N << 0 | $O << 8 | $N << 16 | $E << 24; // "NONE" + +String cryptoKindToString(CryptoKind kind) { + return "${String.fromCharCode(kind & 0xFF)}${String.fromCharCode((kind >> 8) & 0xFF)}${String.fromCharCode((kind >> 16) & 0xFF)}${String.fromCharCode((kind >> 24) & 0xFF)}"; +} + +CryptoKind cryptoKindFromString(String s) { + if (s.codeUnits.length != 4) { + throw const FormatException("malformed string"); + } + CryptoKind kind = s.codeUnits[0] | + s.codeUnits[1] << 8 | + s.codeUnits[2] << 16 | + s.codeUnits[3] << 24; + return kind; +} + +////////////////////////////////////// +/// Types + +class Typed { + late CryptoKind kind; + late V value; + Typed({required this.kind, required this.value}); + + @override + String toString() { + return "${cryptoKindToString(kind)}:$value"; + } + + Typed.fromString(String s) { + var parts = s.split(":"); + if (parts.length < 2 || parts[0].codeUnits.length != 4) { + throw const FormatException("malformed string"); + } + kind = parts[0].codeUnits[0] | + parts[0].codeUnits[1] << 8 | + parts[0].codeUnits[2] << 16 | + parts[0].codeUnits[3] << 24; + value = EncodedString.fromString(parts.sublist(1).join(":")); + } + + String toJson() { + return toString(); + } + + Typed.fromJson(dynamic json) : this.fromString(json as String); +} + +class KeyPair { + late PublicKey key; + late PublicKey secret; + KeyPair({required this.key, required this.secret}); + + @override + String toString() { + return "${key.toString()}:${secret.toString()}"; + } + + KeyPair.fromString(String s) { + var parts = s.split(":"); + if (parts.length != 2 || + parts[0].codeUnits.length != 43 || + parts[1].codeUnits.length != 43) { + throw const FormatException("malformed string"); + } + key = PublicKey(parts[0]); + secret = PublicKey(parts[1]); + } + + String toJson() { + return toString(); + } + + KeyPair.fromJson(dynamic json) : this.fromString(json as String); +} + +class TypedKeyPair { + late CryptoKind kind; + late PublicKey key; + late PublicKey secret; + TypedKeyPair({required this.kind, required this.key, required this.secret}); + + @override + String toString() { + return "${cryptoKindToString(kind)}:${key.toString()}:${secret.toString()}"; + } + + TypedKeyPair.fromString(String s) { + var parts = s.split(":"); + if (parts.length != 3 || + parts[0].codeUnits.length != 4 || + parts[1].codeUnits.length != 43 || + parts[2].codeUnits.length != 43) { + throw VeilidAPIExceptionInvalidArgument("malformed string", "s", s); + } + kind = cryptoKindFromString(parts[0]); + key = PublicKey(parts[1]); + secret = PublicKey(parts[2]); + } + + String toJson() { + return toString(); + } + + TypedKeyPair.fromJson(dynamic json) : this.fromString(json as String); +} + +typedef CryptoKey = FixedEncodedString43; +typedef Signature = FixedEncodedString86; +typedef Nonce = FixedEncodedString32; + +typedef PublicKey = CryptoKey; +typedef SecretKey = CryptoKey; +typedef HashDigest = CryptoKey; +typedef SharedSecret = CryptoKey; +typedef CryptoKeyDistance = CryptoKey; + +typedef TypedKey = Typed; +typedef TypedSignature = Typed; + +////////////////////////////////////// +/// VeilidCryptoSystem + +abstract class VeilidCryptoSystem { + CryptoKind kind(); + Future cachedDH(PublicKey key, SecretKey secret); + Future computeDH(PublicKey key, SecretKey secret); + Future randomBytes(int len); + Future defaultSaltLength(); + Future hashPassword(Uint8List password, Uint8List salt); + Future verifyPassword(Uint8List password, String passwordHash); + Future deriveSharedSecret(Uint8List password, Uint8List salt); + Future randomNonce(); + Future randomSharedSecret(); + Future generateKeyPair(); + Future generateHash(Uint8List data); + //Future generateHashReader(Stream> reader); + Future validateKeyPair(PublicKey key, SecretKey secret); + Future validateHash(Uint8List data, HashDigest hash); + //Future validateHashReader(Stream> reader, HashDigest hash); + Future distance(CryptoKey key1, CryptoKey key2); + Future sign(PublicKey key, SecretKey secret, Uint8List data); + Future verify(PublicKey key, Uint8List data, Signature signature); + Future aeadOverhead(); + Future decryptAead(Uint8List body, Nonce nonce, + SharedSecret sharedSecret, Uint8List? associatedData); + Future encryptAead(Uint8List body, Nonce nonce, + SharedSecret sharedSecret, Uint8List? associatedData); + Future cryptNoAuth( + Uint8List body, Nonce nonce, SharedSecret sharedSecret); +} diff --git a/veilid-flutter/lib/veilid_encoding.dart b/veilid-flutter/lib/veilid_encoding.dart new file mode 100644 index 00000000..9ef54f14 --- /dev/null +++ b/veilid-flutter/lib/veilid_encoding.dart @@ -0,0 +1,122 @@ +import 'dart:convert'; +import 'dart:typed_data'; + +String base64UrlNoPadEncode(List bytes) { + var x = base64Url.encode(bytes); + while (x.endsWith('=')) { + x = x.substring(0, x.length - 1); + } + return x; +} + +Uint8List base64UrlNoPadDecode(String source) { + source = base64.normalize(source); + return base64.decode(source); +} + +Uint8List base64UrlNoPadDecodeDynamic(dynamic source) { + source = source as String; + source = base64.normalize(source); + return base64.decode(source); +} + +abstract class EncodedString { + late String contents; + EncodedString(String s) { + validate(s); + contents = s; + } + EncodedString.encode(List b) { + var s = base64UrlNoPadEncode(b); + validate(s); + contents = s; + } + + int encodedLength(); + int decodedLength(); + void validate(String s) { + var d = base64UrlNoPadDecode(s); + if (d.length != decodedLength()) { + throw Exception("length ${s.length} should be ${encodedLength()}"); + } + } + + Uint8List decode() { + return base64UrlNoPadDecode(contents); + } + + @override + String toString() { + return contents; + } + + static T fromString(String s) { + switch (T) { + case FixedEncodedString32: + return FixedEncodedString32(s) as T; + case FixedEncodedString43: + return FixedEncodedString43(s) as T; + case FixedEncodedString86: + return FixedEncodedString86(s) as T; + default: + throw UnimplementedError(); + } + } +} + +class FixedEncodedString32 extends EncodedString { + FixedEncodedString32(String s) : super(s); + @override + int encodedLength() { + return 32; + } + + @override + int decodedLength() { + return 24; + } + + String toJson() { + return toString(); + } + + FixedEncodedString32.fromJson(dynamic json) : this(json as String); +} + +class FixedEncodedString43 extends EncodedString { + FixedEncodedString43(String s) : super(s); + @override + int encodedLength() { + return 43; + } + + @override + int decodedLength() { + return 32; + } + + String toJson() { + return toString(); + } + + FixedEncodedString43.fromJson(dynamic json) : this(json as String); +} + +class FixedEncodedString86 extends EncodedString { + FixedEncodedString86(String s) : super(s); + @override + int encodedLength() { + return 86; + } + + @override + int decodedLength() { + return 64; + } + + String toJson() { + return toString(); + } + + FixedEncodedString86.fromJson(dynamic json) : this(json as String); +} diff --git a/veilid-flutter/lib/veilid_ffi.dart b/veilid-flutter/lib/veilid_ffi.dart index dd0b13a4..5f17e7ec 100644 --- a/veilid-flutter/lib/veilid_ffi.dart +++ b/veilid-flutter/lib/veilid_ffi.dart @@ -8,7 +8,7 @@ import 'dart:typed_data'; import 'package:ffi/ffi.dart'; import 'veilid.dart'; -import 'base64url_no_pad.dart'; +import 'veilid_encoding.dart'; ////////////////////////////////////////////////////////// @@ -76,6 +76,46 @@ typedef _RoutingContextAppMessageC = Void Function( Int64, Uint32, Pointer, Pointer); typedef _RoutingContextAppMessageDart = void Function( int, int, Pointer, Pointer); +// fn routing_context_create_dht_record(port: i64, id: u32, kind: u32, schema: FfiStr) +typedef _RoutingContextCreateDHTRecordC = Void Function( + Int64, Uint32, Uint32, Pointer); +typedef _RoutingContextCreateDHTRecordDart = void Function( + int, int, int, Pointer); +// fn routing_context_open_dht_record(port: i64, id: u32, key: FfiStr, writer: FfiStr) +typedef _RoutingContextOpenDHTRecordC = Void Function( + Int64, Uint32, Pointer, Pointer); +typedef _RoutingContextOpenDHTRecordDart = void Function( + int, int, Pointer, Pointer); +// fn routing_context_close_dht_record(port: i64, id: u32, key: FfiStr) +typedef _RoutingContextCloseDHTRecordC = Void Function( + Int64, Uint32, Pointer); +typedef _RoutingContextCloseDHTRecordDart = void Function( + int, int, Pointer); +// fn routing_context_delete_dht_record(port: i64, id: u32, key: FfiStr) +typedef _RoutingContextDeleteDHTRecordC = Void Function( + Int64, Uint32, Pointer); +typedef _RoutingContextDeleteDHTRecordDart = void Function( + int, int, Pointer); +// fn routing_context_get_dht_value(port: i64, id: u32, key: FfiStr, subkey: u32, force_refresh: bool) +typedef _RoutingContextGetDHTValueC = Void Function( + Int64, Uint32, Pointer, Uint32, Bool); +typedef _RoutingContextGetDHTValueDart = void Function( + int, int, Pointer, int, bool); +// fn routing_context_set_dht_value(port: i64, id: u32, key: FfiStr, subkey: u32, data: FfiStr) +typedef _RoutingContextSetDHTValueC = Void Function( + Int64, Uint32, Pointer, Uint32, Pointer); +typedef _RoutingContextSetDHTValueDart = void Function( + int, int, Pointer, int, Pointer); +// fn routing_context_watch_dht_values(port: i64, id: u32, key: FfiStr, subkeys: FfiStr, expiration: FfiStr, count: u32) +typedef _RoutingContextWatchDHTValuesC = Void Function( + Int64, Uint32, Pointer, Pointer, Uint64, Uint32); +typedef _RoutingContextWatchDHTValuesDart = void Function( + int, int, Pointer, Pointer, int, int); +// fn routing_context_cancel_dht_watch(port: i64, id: u32, key: FfiStr, subkeys: FfiStr) +typedef _RoutingContextCancelDHTWatchC = Void Function( + Int64, Uint32, Pointer, Pointer); +typedef _RoutingContextCancelDHTWatchDart = void Function( + int, int, Pointer, Pointer); // fn new_private_route(port: i64) typedef _NewPrivateRouteC = Void Function(Int64); @@ -108,9 +148,9 @@ typedef _DeleteTableDbDart = void Function(int, Pointer); // fn table_db_get_column_count(id: u32) -> u32 typedef _TableDbGetColumnCountC = Uint32 Function(Uint32); typedef _TableDbGetColumnCountDart = int Function(int); -// fn table_db_get_keys(id: u32, col: u32) -> *mut c_char -typedef _TableDbGetKeysC = Pointer Function(Uint32, Uint32); -typedef _TableDbGetKeysDart = Pointer Function(int, int); +// fn table_db_get_keys(port: i64, id: u32, col: u32) +typedef _TableDbGetKeysC = Pointer Function(Uint64, Uint32, Uint32); +typedef _TableDbGetKeysDart = Pointer Function(int, int, int); // fn table_db_store(port: i64, id: u32, col: u32, key: FfiStr, value: FfiStr) typedef _TableDbStoreC = Void Function( Int64, Uint32, Uint32, Pointer, Pointer); @@ -144,7 +184,116 @@ typedef _TableDbTransactionDeleteC = Void Function( Int64, Uint32, Uint32, Pointer); typedef _TableDbTransactionDeleteDart = void Function( int, int, int, Pointer); +// fn valid_crypto_kinds() -> *mut c_char +typedef _ValidCryptoKindsC = Pointer Function(); +typedef _ValidCryptoKindsDart = Pointer Function(); +// fn best_crypto_kind() -> u32 +typedef _BestCryptoKindC = Uint32 Function(); +typedef _BestCryptoKindDart = int Function(); +// fn verify_signatures(port: i64, node_ids: FfiStr, data: FfiStr, signatures: FfiStr) +typedef _VerifySignaturesC = Void Function( + Int64, Pointer, Pointer, Pointer); +typedef _VerifySignaturesDart = void Function( + int, Pointer, Pointer, Pointer); +// fn generate_signatures(port: i64, data: FfiStr, key_pairs: FfiStr) +typedef _GenerateSignaturesC = Void Function( + Int64, Pointer, Pointer); +typedef _GenerateSignaturesDart = void Function( + int, Pointer, Pointer); +// fn generate_key_pair(port: i64, kind: u32) { +typedef _GenerateKeyPairC = Void Function(Int64, Uint32); +typedef _GenerateKeyPairDart = void Function(int, int); +// fn crypto_cached_dh(port: i64, kind: u32, key: FfiStr, secret: FfiStr) +typedef _CryptoCachedDHC = Void Function( + Int64, Uint32, Pointer, Pointer); +typedef _CryptoCachedDHDart = void Function( + int, int, Pointer, Pointer); +// fn crypto_compute_dh(port: i64, kind: u32, key: FfiStr, secret: FfiStr) +typedef _CryptoComputeDHC = Void Function( + Int64, Uint32, Pointer, Pointer); +typedef _CryptoComputeDHDart = void Function( + int, int, Pointer, Pointer); +// fn crypto_random_bytes(port: i64, kind: u32, len: u32) +typedef _CryptoRandomBytesC = Void Function(Int64, Uint32, Uint32); +typedef _CryptoRandomBytesDart = void Function(int, int, int); +// fn crypto_default_salt_length(port: i64, kind: u32) +typedef _CryptoDefaultSaltLengthC = Void Function(Int64, Uint32); +typedef _CryptoDefaultSaltLengthDart = void Function(int, int); +// fn crypto_hash_password(port: i64, kind: u32, password: FfiStr, salt: FfiStr ) +typedef _CryptoHashPasswordC = Void Function( + Int64, Uint32, Pointer, Pointer); +typedef _CryptoHashPasswordDart = void Function( + int, int, Pointer, Pointer); +// fn crypto_verify_password(port: i64, kind: u32, password: FfiStr, password_hash: FfiStr ) +typedef _CryptoVerifyPasswordC = Void Function( + Int64, Uint32, Pointer, Pointer); +typedef _CryptoVerifyPasswordDart = void Function( + int, int, Pointer, Pointer); +// fn crypto_derive_shared_secret(port: i64, kind: u32, password: FfiStr, salt: FfiStr ) +typedef _CryptoDeriveSharedSecretC = Void Function( + Int64, Uint32, Pointer, Pointer); +typedef _CryptoDeriveSharedSecretDart = void Function( + int, int, Pointer, Pointer); +// fn crypto_random_nonce(port: i64, kind: u32) +typedef _CryptoRandomNonceC = Void Function(Int64, Uint32); +typedef _CryptoRandomNonceDart = void Function(int, int); +// fn crypto_random_shared_secret(port: i64, kind: u32) +typedef _CryptoRandomSharedSecretC = Void Function(Int64, Uint32); +typedef _CryptoRandomSharedSecretDart = void Function(int, int); +// fn crypto_generate_key_pair(port: i64, kind: u32) +typedef _CryptoGenerateKeyPairC = Void Function(Int64, Uint32); +typedef _CryptoGenerateKeyPairDart = void Function(int, int); +// fn crypto_generate_hash(port: i64, kind: u32, data: FfiStr) +typedef _CryptoGenerateHashC = Void Function(Int64, Uint32, Pointer); +typedef _CryptoGenerateHashDart = void Function(int, int, Pointer); +// fn crypto_validate_key_pair(port: i64, kind: u32, key: FfiStr, secret: FfiStr) +typedef _CryptoValidateKeyPairC = Void Function( + Int64, Uint32, Pointer, Pointer); +typedef _CryptoValidateKeyPairDart = void Function( + int, int, Pointer, Pointer); +// fn crypto_validate_hash(port: i64, kind: u32, data: FfiStr, hash: FfiStr) +typedef _CryptoValidateHashC = Void Function( + Int64, Uint32, Pointer, Pointer); +typedef _CryptoValidateHashDart = void Function( + int, int, Pointer, Pointer); +// fn crypto_distance(port: i64, kind: u32, key1: FfiStr, key2: FfiStr) +typedef _CryptoDistanceC = Void Function( + Int64, Uint32, Pointer, Pointer); +typedef _CryptoDistanceDart = void Function( + int, int, Pointer, Pointer); +// fn crypto_sign(port: i64, kind: u32, key: FfiStr, secret: FfiStr, data: FfiStr) +typedef _CryptoSignC = Void Function( + Int64, Uint32, Pointer, Pointer, Pointer); +typedef _CryptoSignDart = void Function( + int, int, Pointer, Pointer, Pointer); +// fn crypto_verify(port: i64, kind: u32, key: FfiStr, data: FfiStr, signature: FfiStr) +typedef _CryptoVerifyC = Void Function( + Int64, Uint32, Pointer, Pointer, Pointer); +typedef _CryptoVerifyDart = void Function( + int, int, Pointer, Pointer, Pointer); +// fn crypto_aead_overhead(port: i64, kind: u32) +typedef _CryptoAeadOverheadC = Void Function(Int64, Uint32); +typedef _CryptoAeadOverheadDart = void Function(int, int); +// fn crypto_decrypt_aead(port: i64, kind: u32, body: FfiStr, nonce: FfiStr, shared_secret: FfiStr, associated_data: FfiStr) +typedef _CryptoDecryptAeadC = Void Function( + Int64, Uint32, Pointer, Pointer, Pointer, Pointer); +typedef _CryptoDecryptAeadDart = void Function( + int, int, Pointer, Pointer, Pointer, Pointer); +// fn crypto_encrypt_aead(port: i64, kind: u32, body: FfiStr, nonce: FfiStr, shared_secret: FfiStr, associated_data: FfiStr) +typedef _CryptoEncryptAeadC = Void Function( + Int64, Uint32, Pointer, Pointer, Pointer, Pointer); +typedef _CryptoEncryptAeadDart = void Function( + int, int, Pointer, Pointer, Pointer, Pointer); +// fn crypto_crypt_no_auth(port: i64, kind: u32, body: FfiStr, nonce: FfiStr, shared_secret: FfiStr) +typedef _CryptoCryptNoAuthC = Void Function( + Int64, Uint32, Pointer, Pointer, Pointer); +typedef _CryptoCryptNoAuthDart = void Function( + int, int, Pointer, Pointer, Pointer); + +// fn now() -> u64 +typedef _NowC = Uint64 Function(); +typedef _NowDart = int Function(); // fn debug(port: i64, log_level: FfiStr) typedef _DebugC = Void Function(Int64, Pointer); typedef _DebugDart = void Function(int, Pointer); @@ -156,7 +305,7 @@ typedef _VeilidVersionStringC = Pointer Function(); typedef _VeilidVersionStringDart = Pointer Function(); // fn veilid_version() -> VeilidVersion -class VeilidVersionFFI extends Struct { +final class VeilidVersionFFI extends Struct { @Uint32() external int major; @Uint32() @@ -253,6 +402,42 @@ Future processFutureJson( }); } +Future processFutureOptJson( + T Function(dynamic) jsonConstructor, Future future) { + return future.then((value) { + final list = value as List; + switch (list[0] as int) { + case messageErr: + { + throw VeilidAPIExceptionInternal("Internal API Error: ${list[1]}"); + } + case messageOkJson: + { + if (list[1] == null) { + return null; + } + var ret = jsonDecode(list[1] as String); + return jsonConstructor(ret); + } + case messageErrJson: + { + throw VeilidAPIException.fromJson(jsonDecode(list[1])); + } + default: + { + throw VeilidAPIExceptionInternal( + "Unexpected async return message type: ${list[0]}"); + } + } + }).catchError((e) { + // Wrap all other errors in VeilidAPIExceptionInternal + throw VeilidAPIExceptionInternal(e.toString()); + }, test: (e) { + // Pass errors that are already VeilidAPIException through without wrapping + return e is! VeilidAPIException; + }); +} + Future processFutureVoid(Future future) { return future.then((value) { final list = value as List; @@ -397,7 +582,7 @@ class _Ctx { class VeilidRoutingContextFFI implements VeilidRoutingContext { final _Ctx _ctx; static final Finalizer<_Ctx> _finalizer = - Finalizer((ctx) => {ctx.ffi._releaseRoutingContext(ctx.id)}); + Finalizer((ctx) => ctx.ffi._releaseRoutingContext(ctx.id)); VeilidRoutingContextFFI._(this._ctx) { _finalizer.attach(this, _ctx, detach: this); @@ -412,14 +597,14 @@ class VeilidRoutingContextFFI implements VeilidRoutingContext { @override VeilidRoutingContextFFI withCustomPrivacy(Stability stability) { final newId = _ctx.ffi._routingContextWithCustomPrivacy( - _ctx.id, stability.json.toNativeUtf8()); + _ctx.id, jsonEncode(stability).toNativeUtf8()); return VeilidRoutingContextFFI._(_Ctx(newId, _ctx.ffi)); } @override VeilidRoutingContextFFI withSequencing(Sequencing sequencing) { - final newId = _ctx.ffi - ._routingContextWithSequencing(_ctx.id, sequencing.json.toNativeUtf8()); + final newId = _ctx.ffi._routingContextWithSequencing( + _ctx.id, jsonEncode(sequencing).toNativeUtf8()); return VeilidRoutingContextFFI._(_Ctx(newId, _ctx.ffi)); } @@ -437,7 +622,7 @@ class VeilidRoutingContextFFI implements VeilidRoutingContext { } @override - Future appMessage(String target, Uint8List message) async { + Future appMessage(String target, Uint8List message) { final nativeEncodedTarget = target.toNativeUtf8(); final nativeEncodedMessage = base64UrlNoPadEncode(message).toNativeUtf8(); @@ -447,6 +632,111 @@ class VeilidRoutingContextFFI implements VeilidRoutingContext { nativeEncodedTarget, nativeEncodedMessage); return processFutureVoid(recvPort.first); } + + @override + Future createDHTRecord( + CryptoKind kind, DHTSchema schema) async { + final nativeSchema = jsonEncode(schema).toNativeUtf8(); + final recvPort = ReceivePort("routing_context_create_dht_record"); + final sendPort = recvPort.sendPort; + _ctx.ffi._routingContextCreateDHTRecord( + sendPort.nativePort, _ctx.id, kind, nativeSchema); + final dhtRecordDescriptor = + await processFutureJson(DHTRecordDescriptor.fromJson, recvPort.first); + return dhtRecordDescriptor; + } + + @override + Future openDHTRecord( + TypedKey key, KeyPair? writer) async { + final nativeKey = jsonEncode(key).toNativeUtf8(); + final nativeWriter = + writer != null ? jsonEncode(key).toNativeUtf8() : nullptr; + final recvPort = ReceivePort("routing_context_open_dht_record"); + final sendPort = recvPort.sendPort; + _ctx.ffi._routingContextOpenDHTRecord( + sendPort.nativePort, _ctx.id, nativeKey, nativeWriter); + final dhtRecordDescriptor = + await processFutureJson(DHTRecordDescriptor.fromJson, recvPort.first); + return dhtRecordDescriptor; + } + + @override + Future closeDHTRecord(TypedKey key) { + final nativeKey = jsonEncode(key).toNativeUtf8(); + final recvPort = ReceivePort("routing_context_close_dht_record"); + final sendPort = recvPort.sendPort; + _ctx.ffi + ._routingContextCloseDHTRecord(sendPort.nativePort, _ctx.id, nativeKey); + return processFutureVoid(recvPort.first); + } + + @override + Future deleteDHTRecord(TypedKey key) { + final nativeKey = jsonEncode(key).toNativeUtf8(); + final recvPort = ReceivePort("routing_context_delete_dht_record"); + final sendPort = recvPort.sendPort; + _ctx.ffi._routingContextDeleteDHTRecord( + sendPort.nativePort, _ctx.id, nativeKey); + return processFutureVoid(recvPort.first); + } + + @override + Future getDHTValue( + TypedKey key, int subkey, bool forceRefresh) async { + final nativeKey = jsonEncode(key).toNativeUtf8(); + final recvPort = ReceivePort("routing_context_get_dht_value"); + final sendPort = recvPort.sendPort; + _ctx.ffi._routingContextGetDHTValue( + sendPort.nativePort, _ctx.id, nativeKey, subkey, forceRefresh); + final valueData = await processFutureJson( + optFromJson(ValueData.fromJson), recvPort.first); + return valueData; + } + + @override + Future setDHTValue( + TypedKey key, int subkey, Uint8List data) async { + final nativeKey = jsonEncode(key).toNativeUtf8(); + final nativeData = base64UrlNoPadEncode(data).toNativeUtf8(); + + final recvPort = ReceivePort("routing_context_set_dht_value"); + final sendPort = recvPort.sendPort; + _ctx.ffi._routingContextSetDHTValue( + sendPort.nativePort, _ctx.id, nativeKey, subkey, nativeData); + final valueData = await processFutureJson( + optFromJson(ValueData.fromJson), recvPort.first); + return valueData; + } + + @override + Future watchDHTValues(TypedKey key, ValueSubkeyRange subkeys, + Timestamp expiration, int count) async { + final nativeKey = jsonEncode(key).toNativeUtf8(); + final nativeSubkeys = jsonEncode(subkeys).toNativeUtf8(); + final nativeExpiration = expiration.value.toInt(); + + final recvPort = ReceivePort("routing_context_watch_dht_values"); + final sendPort = recvPort.sendPort; + _ctx.ffi._routingContextWatchDHTValues(sendPort.nativePort, _ctx.id, + nativeKey, nativeSubkeys, nativeExpiration, count); + final actualExpiration = Timestamp( + value: BigInt.from(await processFuturePlain(recvPort.first))); + return actualExpiration; + } + + @override + Future cancelDHTWatch(TypedKey key, ValueSubkeyRange subkeys) async { + final nativeKey = jsonEncode(key).toNativeUtf8(); + final nativeSubkeys = jsonEncode(subkeys).toNativeUtf8(); + + final recvPort = ReceivePort("routing_context_cancel_dht_watch"); + final sendPort = recvPort.sendPort; + _ctx.ffi._routingContextCancelDHTWatch( + sendPort.nativePort, _ctx.id, nativeKey, nativeSubkeys); + final cancelled = await processFuturePlain(recvPort.first); + return cancelled; + } } class _TDBT { @@ -461,7 +751,7 @@ class _TDBT { class VeilidTableDBTransactionFFI extends VeilidTableDBTransaction { final _TDBT _tdbt; static final Finalizer<_TDBT> _finalizer = - Finalizer((tdbt) => {tdbt.ffi._releaseTableDbTransaction(tdbt.id)}); + Finalizer((tdbt) => tdbt.ffi._releaseTableDbTransaction(tdbt.id)); VeilidTableDBTransactionFFI._(this._tdbt) { _finalizer.attach(this, _tdbt, detach: this); @@ -532,7 +822,7 @@ class _TDB { class VeilidTableDBFFI extends VeilidTableDB { final _TDB _tdb; static final Finalizer<_TDB> _finalizer = - Finalizer((tdb) => {tdb.ffi._releaseTableDb(tdb.id)}); + Finalizer((tdb) => tdb.ffi._releaseTableDb(tdb.id)); VeilidTableDBFFI._(this._tdb) { _finalizer.attach(this, _tdb, detach: this); @@ -544,15 +834,15 @@ class VeilidTableDBFFI extends VeilidTableDB { } @override - List getKeys(int col) { - final s = _tdb.ffi._tableDbGetKeys(_tdb.id, col); - if (s.address == nullptr.address) { - throw VeilidAPIExceptionInternal("No db for id"); - } - String ja = s.toDartString(); - _tdb.ffi._freeString(s); - List jarr = jsonDecode(ja); - return jarr.map((e) => base64UrlNoPadDecode(e)).toList(); + Future> getKeys(int col) { + final recvPort = ReceivePort("veilid_table_db_get_keys"); + final sendPort = recvPort.sendPort; + + _tdb.ffi._tableDbGetKeys(sendPort.nativePort, _tdb.id, col); + + return processFutureJson( + jsonListConstructor(base64UrlNoPadDecodeDynamic), + recvPort.first); } @override @@ -598,12 +888,12 @@ class VeilidTableDBFFI extends VeilidTableDB { } @override - Future delete(int col, Uint8List key) { + Future delete(int col, Uint8List key) { final nativeEncodedKey = base64UrlNoPadEncode(key).toNativeUtf8(); final recvPort = ReceivePort("veilid_table_db_delete"); final sendPort = recvPort.sendPort; - _tdb.ffi._tableDbLoad( + _tdb.ffi._tableDbDelete( sendPort.nativePort, _tdb.id, col, @@ -613,6 +903,251 @@ class VeilidTableDBFFI extends VeilidTableDB { } } +// FFI implementation of VeilidCryptoSystem +class VeilidCryptoSystemFFI implements VeilidCryptoSystem { + final CryptoKind _kind; + final VeilidFFI _ffi; + + VeilidCryptoSystemFFI._(this._ffi, this._kind); + + @override + CryptoKind kind() { + return _kind; + } + + @override + Future cachedDH(PublicKey key, SecretKey secret) { + final nativeKey = jsonEncode(key).toNativeUtf8(); + final nativeSecret = jsonEncode(secret).toNativeUtf8(); + + final recvPort = ReceivePort("crypto_cached_dh"); + final sendPort = recvPort.sendPort; + _ffi._cryptoCachedDH(sendPort.nativePort, _kind, nativeKey, nativeSecret); + return processFutureJson(SharedSecret.fromJson, recvPort.first); + } + + @override + Future computeDH(PublicKey key, SecretKey secret) { + final nativeKey = jsonEncode(key).toNativeUtf8(); + final nativeSecret = jsonEncode(secret).toNativeUtf8(); + + final recvPort = ReceivePort("crypto_compute_dh"); + final sendPort = recvPort.sendPort; + _ffi._cryptoComputeDH(sendPort.nativePort, _kind, nativeKey, nativeSecret); + return processFutureJson(SharedSecret.fromJson, recvPort.first); + } + + @override + Future randomBytes(int len) async { + final recvPort = ReceivePort("crypto_random_bytes"); + final sendPort = recvPort.sendPort; + _ffi._cryptoRandomBytes(sendPort.nativePort, _kind, len); + final out = await processFuturePlain(recvPort.first); + return base64UrlNoPadDecode(out); + } + + @override + Future defaultSaltLength() { + final recvPort = ReceivePort("crypto_default_salt_length"); + final sendPort = recvPort.sendPort; + _ffi._cryptoDefaultSaltLength(sendPort.nativePort, _kind); + return processFuturePlain(recvPort.first); + } + + @override + Future hashPassword(Uint8List password, Uint8List salt) { + final nativeEncodedPassword = base64UrlNoPadEncode(password).toNativeUtf8(); + final nativeEncodedSalt = base64UrlNoPadEncode(salt).toNativeUtf8(); + + final recvPort = ReceivePort("crypto_hash_password"); + final sendPort = recvPort.sendPort; + _ffi._cryptoHashPassword( + sendPort.nativePort, _kind, nativeEncodedPassword, nativeEncodedSalt); + return processFuturePlain(recvPort.first); + } + + @override + Future verifyPassword(Uint8List password, String passwordHash) { + final nativeEncodedPassword = base64UrlNoPadEncode(password).toNativeUtf8(); + final nativeEncodedPasswordHash = passwordHash.toNativeUtf8(); + + final recvPort = ReceivePort("crypto_verify_password"); + final sendPort = recvPort.sendPort; + _ffi._cryptoVerifyPassword(sendPort.nativePort, _kind, + nativeEncodedPassword, nativeEncodedPasswordHash); + return processFuturePlain(recvPort.first); + } + + @override + Future deriveSharedSecret(Uint8List password, Uint8List salt) { + final nativeEncodedPassword = base64UrlNoPadEncode(password).toNativeUtf8(); + final nativeEncodedSalt = base64UrlNoPadEncode(salt).toNativeUtf8(); + + final recvPort = ReceivePort("crypto_derive_shared_secret"); + final sendPort = recvPort.sendPort; + _ffi._cryptoDeriveSharedSecret( + sendPort.nativePort, _kind, nativeEncodedPassword, nativeEncodedSalt); + return processFutureJson(SharedSecret.fromJson, recvPort.first); + } + + @override + Future randomNonce() { + final recvPort = ReceivePort("crypto_random_nonce"); + final sendPort = recvPort.sendPort; + _ffi._cryptoRandomNonce(sendPort.nativePort, _kind); + return processFutureJson(Nonce.fromJson, recvPort.first); + } + + @override + Future randomSharedSecret() { + final recvPort = ReceivePort("crypto_random_shared_secret"); + final sendPort = recvPort.sendPort; + _ffi._cryptoRandomSharedSecret(sendPort.nativePort, _kind); + return processFutureJson(SharedSecret.fromJson, recvPort.first); + } + + @override + Future generateKeyPair() { + final recvPort = ReceivePort("crypto_generate_key_pair"); + final sendPort = recvPort.sendPort; + _ffi._cryptoGenerateKeyPair(sendPort.nativePort, _kind); + return processFutureJson(KeyPair.fromJson, recvPort.first); + } + + @override + Future generateHash(Uint8List data) { + final nativeEncodedData = base64UrlNoPadEncode(data).toNativeUtf8(); + + final recvPort = ReceivePort("crypto_generate_hash"); + final sendPort = recvPort.sendPort; + _ffi._cryptoGenerateHash(sendPort.nativePort, _kind, nativeEncodedData); + return processFutureJson(HashDigest.fromJson, recvPort.first); + } + + @override + Future validateKeyPair(PublicKey key, SecretKey secret) { + final nativeKey = jsonEncode(key).toNativeUtf8(); + final nativeSecret = jsonEncode(secret).toNativeUtf8(); + + final recvPort = ReceivePort("crypto_validate_key_pair"); + final sendPort = recvPort.sendPort; + _ffi._cryptoValidateKeyPair( + sendPort.nativePort, _kind, nativeKey, nativeSecret); + return processFuturePlain(recvPort.first); + } + + @override + Future validateHash(Uint8List data, HashDigest hash) { + final nativeEncodedData = base64UrlNoPadEncode(data).toNativeUtf8(); + final nativeHash = jsonEncode(hash).toNativeUtf8(); + + final recvPort = ReceivePort("crypto_validate_hash"); + final sendPort = recvPort.sendPort; + _ffi._cryptoValidateHash( + sendPort.nativePort, _kind, nativeEncodedData, nativeHash); + return processFuturePlain(recvPort.first); + } + + @override + Future distance(CryptoKey key1, CryptoKey key2) { + final nativeKey1 = jsonEncode(key1).toNativeUtf8(); + final nativeKey2 = jsonEncode(key2).toNativeUtf8(); + + final recvPort = ReceivePort("crypto_distance"); + final sendPort = recvPort.sendPort; + _ffi._cryptoDistance(sendPort.nativePort, _kind, nativeKey1, nativeKey2); + return processFutureJson(CryptoKeyDistance.fromJson, recvPort.first); + } + + @override + Future sign(PublicKey key, SecretKey secret, Uint8List data) { + final nativeKey = jsonEncode(key).toNativeUtf8(); + final nativeSecret = jsonEncode(secret).toNativeUtf8(); + final nativeEncodedData = base64UrlNoPadEncode(data).toNativeUtf8(); + + final recvPort = ReceivePort("crypto_sign"); + final sendPort = recvPort.sendPort; + _ffi._cryptoSign( + sendPort.nativePort, _kind, nativeKey, nativeSecret, nativeEncodedData); + return processFutureJson(Signature.fromJson, recvPort.first); + } + + @override + Future verify(PublicKey key, Uint8List data, Signature signature) { + final nativeKey = jsonEncode(key).toNativeUtf8(); + final nativeEncodedData = base64UrlNoPadEncode(data).toNativeUtf8(); + final nativeSignature = jsonEncode(signature).toNativeUtf8(); + + final recvPort = ReceivePort("crypto_verify"); + final sendPort = recvPort.sendPort; + _ffi._cryptoVerify(sendPort.nativePort, _kind, nativeKey, nativeEncodedData, + nativeSignature); + return processFutureVoid(recvPort.first); + } + + @override + Future aeadOverhead() { + final recvPort = ReceivePort("crypto_aead_overhead"); + final sendPort = recvPort.sendPort; + _ffi._cryptoAeadOverhead( + sendPort.nativePort, + _kind, + ); + return processFuturePlain(recvPort.first); + } + + @override + Future decryptAead(Uint8List body, Nonce nonce, + SharedSecret sharedSecret, Uint8List? associatedData) async { + final nativeEncodedBody = base64UrlNoPadEncode(body).toNativeUtf8(); + final nativeNonce = jsonEncode(nonce).toNativeUtf8(); + final nativeSharedSecret = jsonEncode(sharedSecret).toNativeUtf8(); + final nativeSignature = (associatedData != null) + ? jsonEncode(associatedData).toNativeUtf8() + : nullptr; + + final recvPort = ReceivePort("crypto_decrypt_aead"); + final sendPort = recvPort.sendPort; + _ffi._cryptoDecryptAead(sendPort.nativePort, _kind, nativeEncodedBody, + nativeNonce, nativeSharedSecret, nativeSignature); + final out = await processFuturePlain(recvPort.first); + return base64UrlNoPadDecode(out); + } + + @override + Future encryptAead(Uint8List body, Nonce nonce, + SharedSecret sharedSecret, Uint8List? associatedData) async { + final nativeEncodedBody = base64UrlNoPadEncode(body).toNativeUtf8(); + final nativeNonce = jsonEncode(nonce).toNativeUtf8(); + final nativeSharedSecret = jsonEncode(sharedSecret).toNativeUtf8(); + final nativeSignature = (associatedData != null) + ? jsonEncode(associatedData).toNativeUtf8() + : nullptr; + + final recvPort = ReceivePort("crypto_encrypt_aead"); + final sendPort = recvPort.sendPort; + _ffi._cryptoEncryptAead(sendPort.nativePort, _kind, nativeEncodedBody, + nativeNonce, nativeSharedSecret, nativeSignature); + final out = await processFuturePlain(recvPort.first); + return base64UrlNoPadDecode(out); + } + + @override + Future cryptNoAuth( + Uint8List body, Nonce nonce, SharedSecret sharedSecret) async { + final nativeEncodedBody = base64UrlNoPadEncode(body).toNativeUtf8(); + final nativeNonce = jsonEncode(nonce).toNativeUtf8(); + final nativeSharedSecret = jsonEncode(sharedSecret).toNativeUtf8(); + + final recvPort = ReceivePort("crypto_crypt_no_auth"); + final sendPort = recvPort.sendPort; + _ffi._cryptoCryptNoAuth(sendPort.nativePort, _kind, nativeEncodedBody, + nativeNonce, nativeSharedSecret); + final out = await processFuturePlain(recvPort.first); + return base64UrlNoPadDecode(out); + } +} + // FFI implementation of high level Veilid API class VeilidFFI implements Veilid { // veilid_core shared library @@ -635,6 +1170,14 @@ class VeilidFFI implements Veilid { final _RoutingContextWithSequencingDart _routingContextWithSequencing; final _RoutingContextAppCallDart _routingContextAppCall; final _RoutingContextAppMessageDart _routingContextAppMessage; + final _RoutingContextCreateDHTRecordDart _routingContextCreateDHTRecord; + final _RoutingContextOpenDHTRecordDart _routingContextOpenDHTRecord; + final _RoutingContextCloseDHTRecordDart _routingContextCloseDHTRecord; + final _RoutingContextDeleteDHTRecordDart _routingContextDeleteDHTRecord; + final _RoutingContextGetDHTValueDart _routingContextGetDHTValue; + final _RoutingContextSetDHTValueDart _routingContextSetDHTValue; + final _RoutingContextWatchDHTValuesDart _routingContextWatchDHTValues; + final _RoutingContextCancelDHTWatchDart _routingContextCancelDHTWatch; final _NewPrivateRouteDart _newPrivateRoute; final _NewCustomPrivateRouteDart _newCustomPrivateRoute; @@ -658,6 +1201,36 @@ class VeilidFFI implements Veilid { final _TableDbTransactionStoreDart _tableDbTransactionStore; final _TableDbTransactionDeleteDart _tableDbTransactionDelete; + final _ValidCryptoKindsDart _validCryptoKinds; + final _BestCryptoKindDart _bestCryptoKind; + final _VerifySignaturesDart _verifySignatures; + final _GenerateSignaturesDart _generateSignatures; + final _GenerateKeyPairDart _generateKeyPair; + + final _CryptoCachedDHDart _cryptoCachedDH; + final _CryptoComputeDHDart _cryptoComputeDH; + + final _CryptoRandomBytesDart _cryptoRandomBytes; + final _CryptoDefaultSaltLengthDart _cryptoDefaultSaltLength; + final _CryptoHashPasswordDart _cryptoHashPassword; + final _CryptoVerifyPasswordDart _cryptoVerifyPassword; + final _CryptoDeriveSharedSecretDart _cryptoDeriveSharedSecret; + + final _CryptoRandomNonceDart _cryptoRandomNonce; + final _CryptoRandomSharedSecretDart _cryptoRandomSharedSecret; + final _CryptoGenerateKeyPairDart _cryptoGenerateKeyPair; + final _CryptoGenerateHashDart _cryptoGenerateHash; + final _CryptoValidateKeyPairDart _cryptoValidateKeyPair; + final _CryptoValidateHashDart _cryptoValidateHash; + final _CryptoDistanceDart _cryptoDistance; + final _CryptoSignDart _cryptoSign; + final _CryptoVerifyDart _cryptoVerify; + final _CryptoAeadOverheadDart _cryptoAeadOverhead; + final _CryptoDecryptAeadDart _cryptoDecryptAead; + final _CryptoEncryptAeadDart _cryptoEncryptAead; + final _CryptoCryptNoAuthDart _cryptoCryptNoAuth; + + final _NowDart _now; final _DebugDart _debug; final _VeilidVersionStringDart _veilidVersionString; final _VeilidVersionDart _veilidVersion; @@ -703,6 +1276,36 @@ class VeilidFFI implements Veilid { _routingContextAppMessage = dylib.lookupFunction< _RoutingContextAppMessageC, _RoutingContextAppMessageDart>('routing_context_app_message'), + _routingContextCreateDHTRecord = dylib.lookupFunction< + _RoutingContextCreateDHTRecordC, + _RoutingContextCreateDHTRecordDart>( + 'routing_context_create_dht_record'), + _routingContextOpenDHTRecord = dylib.lookupFunction< + _RoutingContextOpenDHTRecordC, + _RoutingContextOpenDHTRecordDart>( + 'routing_context_open_dht_record'), + _routingContextCloseDHTRecord = dylib.lookupFunction< + _RoutingContextCloseDHTRecordC, + _RoutingContextCloseDHTRecordDart>( + 'routing_context_close_dht_record'), + _routingContextDeleteDHTRecord = dylib.lookupFunction< + _RoutingContextDeleteDHTRecordC, + _RoutingContextDeleteDHTRecordDart>( + 'routing_context_delete_dht_record'), + _routingContextGetDHTValue = dylib.lookupFunction< + _RoutingContextGetDHTValueC, + _RoutingContextGetDHTValueDart>('routing_context_get_dht_value'), + _routingContextSetDHTValue = dylib.lookupFunction< + _RoutingContextSetDHTValueC, + _RoutingContextSetDHTValueDart>('routing_context_set_dht_value'), + _routingContextWatchDHTValues = dylib.lookupFunction< + _RoutingContextWatchDHTValuesC, + _RoutingContextWatchDHTValuesDart>( + 'routing_context_watch_dht_values'), + _routingContextCancelDHTWatch = dylib.lookupFunction< + _RoutingContextCancelDHTWatchC, + _RoutingContextCancelDHTWatchDart>( + 'routing_context_cancel_dht_watch'), _newPrivateRoute = dylib.lookupFunction<_NewPrivateRouteC, _NewPrivateRouteDart>( 'new_private_route'), @@ -753,6 +1356,77 @@ class VeilidFFI implements Veilid { _tableDbTransactionDelete = dylib.lookupFunction< _TableDbTransactionDeleteC, _TableDbTransactionDeleteDart>('table_db_transaction_delete'), + _validCryptoKinds = + dylib.lookupFunction<_ValidCryptoKindsC, _ValidCryptoKindsDart>( + 'valid_crypto_kinds'), + _bestCryptoKind = + dylib.lookupFunction<_BestCryptoKindC, _BestCryptoKindDart>( + 'best_crypto_kind'), + _verifySignatures = + dylib.lookupFunction<_VerifySignaturesC, _VerifySignaturesDart>( + 'verify_signatures'), + _generateSignatures = + dylib.lookupFunction<_GenerateSignaturesC, _GenerateSignaturesDart>( + 'generate_signatures'), + _generateKeyPair = + dylib.lookupFunction<_GenerateKeyPairC, _GenerateKeyPairDart>( + 'generate_key_pair'), + _cryptoCachedDH = + dylib.lookupFunction<_CryptoCachedDHC, _CryptoCachedDHDart>( + 'crypto_cached_dh'), + _cryptoComputeDH = + dylib.lookupFunction<_CryptoComputeDHC, _CryptoComputeDHDart>( + 'crypto_compute_dh'), + _cryptoRandomBytes = + dylib.lookupFunction<_CryptoRandomBytesC, _CryptoRandomBytesDart>( + 'crypto_random_bytes'), + _cryptoDefaultSaltLength = dylib.lookupFunction< + _CryptoDefaultSaltLengthC, + _CryptoDefaultSaltLengthDart>('crypto_default_salt_length'), + _cryptoHashPassword = + dylib.lookupFunction<_CryptoHashPasswordC, _CryptoHashPasswordDart>( + 'crypto_hash_password'), + _cryptoVerifyPassword = dylib.lookupFunction<_CryptoVerifyPasswordC, + _CryptoVerifyPasswordDart>('crypto_verify_password'), + _cryptoDeriveSharedSecret = dylib.lookupFunction< + _CryptoDeriveSharedSecretC, + _CryptoVerifyPasswordDart>('crypto_derive_shared_secret'), + _cryptoRandomNonce = + dylib.lookupFunction<_CryptoRandomNonceC, _CryptoRandomNonceDart>( + 'crypto_random_nonce'), + _cryptoRandomSharedSecret = dylib.lookupFunction< + _CryptoRandomSharedSecretC, + _CryptoRandomSharedSecretDart>('crypto_random_shared_secret'), + _cryptoGenerateKeyPair = dylib.lookupFunction<_CryptoGenerateKeyPairC, + _CryptoGenerateKeyPairDart>('crypto_generate_key_pair'), + _cryptoGenerateHash = + dylib.lookupFunction<_CryptoGenerateHashC, _CryptoGenerateHashDart>( + 'crypto_generate_hash'), + _cryptoValidateKeyPair = dylib.lookupFunction<_CryptoValidateKeyPairC, + _CryptoValidateKeyPairDart>('crypto_validate_key_pair'), + _cryptoValidateHash = + dylib.lookupFunction<_CryptoValidateHashC, _CryptoValidateHashDart>( + 'crypto_validate_hash'), + _cryptoDistance = + dylib.lookupFunction<_CryptoDistanceC, _CryptoDistanceDart>( + 'crypto_distance'), + _cryptoSign = + dylib.lookupFunction<_CryptoSignC, _CryptoSignDart>('crypto_sign'), + _cryptoVerify = dylib + .lookupFunction<_CryptoVerifyC, _CryptoVerifyDart>('crypto_verify'), + _cryptoAeadOverhead = + dylib.lookupFunction<_CryptoAeadOverheadC, _CryptoAeadOverheadDart>( + 'crypto_aead_overhead'), + _cryptoDecryptAead = + dylib.lookupFunction<_CryptoDecryptAeadC, _CryptoDecryptAeadDart>( + 'crypto_decrypt_aead'), + _cryptoEncryptAead = + dylib.lookupFunction<_CryptoEncryptAeadC, _CryptoEncryptAeadDart>( + 'crypto_encrypt_aead'), + _cryptoCryptNoAuth = + dylib.lookupFunction<_CryptoCryptNoAuthC, _CryptoCryptNoAuthDart>( + 'crypto_crypt_no_auth'), + _now = dylib.lookupFunction<_NowC, _NowDart>('now'), _debug = dylib.lookupFunction<_DebugC, _DebugDart>('debug'), _veilidVersionString = dylib.lookupFunction<_VeilidVersionStringC, _VeilidVersionStringDart>('veilid_version_string'), @@ -768,9 +1442,7 @@ class VeilidFFI implements Veilid { @override void initializeVeilidCore(Map platformConfigJson) { - var nativePlatformConfig = - jsonEncode(platformConfigJson, toEncodable: veilidApiToEncodable) - .toNativeUtf8(); + var nativePlatformConfig = jsonEncode(platformConfigJson).toNativeUtf8(); _initializeVeilidCore(nativePlatformConfig); @@ -779,9 +1451,7 @@ class VeilidFFI implements Veilid { @override void changeLogLevel(String layer, VeilidConfigLogLevel logLevel) { - var nativeLogLevel = - jsonEncode(logLevel.json, toEncodable: veilidApiToEncodable) - .toNativeUtf8(); + var nativeLogLevel = jsonEncode(logLevel).toNativeUtf8(); var nativeLayer = layer.toNativeUtf8(); _changeLogLevel(nativeLayer, nativeLogLevel); malloc.free(nativeLayer); @@ -790,9 +1460,7 @@ class VeilidFFI implements Veilid { @override Future> startupVeilidCore(VeilidConfig config) { - var nativeConfig = - jsonEncode(config.json, toEncodable: veilidApiToEncodable) - .toNativeUtf8(); + var nativeConfig = jsonEncode(config).toNativeUtf8(); final recvStreamPort = ReceivePort("veilid_api_stream"); final sendStreamPort = recvStreamPort.sendPort; final recvPort = ReceivePort("startup_veilid_core"); @@ -856,14 +1524,15 @@ class VeilidFFI implements Veilid { @override Future newCustomPrivateRoute( - Stability stability, Sequencing sequencing) async { + Stability stability, Sequencing sequencing) { final recvPort = ReceivePort("new_custom_private_route"); final sendPort = recvPort.sendPort; - _newCustomPrivateRoute(sendPort.nativePort, stability.json.toNativeUtf8(), - sequencing.json.toNativeUtf8()); - final routeBlob = - await processFutureJson(RouteBlob.fromJson, recvPort.first); - return routeBlob; + _newCustomPrivateRoute( + sendPort.nativePort, + jsonEncode(stability).toNativeUtf8(), + jsonEncode(sequencing).toNativeUtf8()); + + return processFutureJson(RouteBlob.fromJson, recvPort.first); } @override @@ -914,6 +1583,70 @@ class VeilidFFI implements Veilid { return deleted; } + @override + List validCryptoKinds() { + final vckString = _validCryptoKinds(); + final vck = jsonDecode(vckString.toDartString()); + _freeString(vckString); + return vck; + } + + @override + Future getCryptoSystem(CryptoKind kind) async { + if (!validCryptoKinds().contains(kind)) { + throw VeilidAPIExceptionGeneric("unsupported cryptosystem"); + } + return VeilidCryptoSystemFFI._(this, kind); + } + + @override + Future bestCryptoSystem() async { + return VeilidCryptoSystemFFI._(this, _bestCryptoKind()); + } + + @override + Future> verifySignatures( + List nodeIds, Uint8List data, List signatures) { + final nativeNodeIds = jsonEncode(nodeIds).toNativeUtf8(); + final nativeData = base64UrlNoPadEncode(data).toNativeUtf8(); + final nativeSignatures = jsonEncode(signatures).toNativeUtf8(); + + final recvPort = ReceivePort("verify_signatures"); + final sendPort = recvPort.sendPort; + _verifySignatures( + sendPort.nativePort, nativeNodeIds, nativeData, nativeSignatures); + return processFutureJson( + jsonListConstructor(TypedKey.fromJson), recvPort.first); + } + + @override + Future> generateSignatures( + Uint8List data, List keyPairs) { + final nativeData = base64UrlNoPadEncode(data).toNativeUtf8(); + final nativeKeyPairs = jsonEncode(keyPairs).toNativeUtf8(); + + final recvPort = ReceivePort("generate_signatures"); + final sendPort = recvPort.sendPort; + _generateSignatures(sendPort.nativePort, nativeData, nativeKeyPairs); + return processFutureJson( + jsonListConstructor(TypedSignature.fromJson), + recvPort.first); + } + + @override + Timestamp now() { + final ts = _now(); + return Timestamp(value: BigInt.from(ts)); + } + + @override + Future generateKeyPair(CryptoKind kind) { + final recvPort = ReceivePort("generate_key_pair"); + final sendPort = recvPort.sendPort; + _generateKeyPair(sendPort.nativePort, kind); + return processFutureJson(TypedKeyPair.fromJson, recvPort.first); + } + @override Future debug(String command) async { var nativeCommand = command.toNativeUtf8(); diff --git a/veilid-flutter/lib/veilid_js.dart b/veilid-flutter/lib/veilid_js.dart index 963a5fca..8a323516 100644 --- a/veilid-flutter/lib/veilid_js.dart +++ b/veilid-flutter/lib/veilid_js.dart @@ -7,7 +7,7 @@ import 'dart:async'; import 'dart:convert'; import 'dart:typed_data'; -import 'base64url_no_pad.dart'; +import 'veilid_encoding.dart'; ////////////////////////////////////////////////////////// @@ -30,9 +30,8 @@ class _Ctx { // JS implementation of VeilidRoutingContext class VeilidRoutingContextJS implements VeilidRoutingContext { final _Ctx _ctx; - static final Finalizer<_Ctx> _finalizer = Finalizer((ctx) => { - js_util.callMethod(wasm, "release_routing_context", [ctx.id]) - }); + static final Finalizer<_Ctx> _finalizer = Finalizer( + (ctx) => js_util.callMethod(wasm, "release_routing_context", [ctx.id])); VeilidRoutingContextJS._(this._ctx) { _finalizer.attach(this, _ctx, detach: this); @@ -48,15 +47,17 @@ class VeilidRoutingContextJS implements VeilidRoutingContext { @override VeilidRoutingContextJS withCustomPrivacy(Stability stability) { final newId = js_util.callMethod( - wasm, "routing_context_with_custom_privacy", [_ctx.id, stability.json]); + wasm, + "routing_context_with_custom_privacy", + [_ctx.id, jsonEncode(stability)]); return VeilidRoutingContextJS._(_Ctx(newId, _ctx.js)); } @override VeilidRoutingContextJS withSequencing(Sequencing sequencing) { - final newId = js_util.callMethod( - wasm, "routing_context_with_sequencing", [_ctx.id, sequencing.json]); + final newId = js_util.callMethod(wasm, "routing_context_with_sequencing", + [_ctx.id, jsonEncode(sequencing)]); return VeilidRoutingContextJS._(_Ctx(newId, _ctx.js)); } @@ -75,6 +76,254 @@ class VeilidRoutingContextJS implements VeilidRoutingContext { return _wrapApiPromise(js_util.callMethod(wasm, "routing_context_app_message", [_ctx.id, target, encodedMessage])); } + + @override + Future createDHTRecord( + CryptoKind kind, DHTSchema schema) async { + return DHTRecordDescriptor.fromJson(jsonDecode(await _wrapApiPromise(js_util + .callMethod(wasm, "routing_context_create_dht_record", + [_ctx.id, kind, jsonEncode(schema)])))); + } + + @override + Future openDHTRecord( + TypedKey key, KeyPair? writer) async { + return DHTRecordDescriptor.fromJson(jsonDecode(await _wrapApiPromise(js_util + .callMethod(wasm, "routing_context_open_dht_record", [ + _ctx.id, + jsonEncode(key), + writer != null ? jsonEncode(writer) : null + ])))); + } + + @override + Future closeDHTRecord(TypedKey key) { + return _wrapApiPromise(js_util.callMethod( + wasm, "routing_context_close_dht_record", [_ctx.id, jsonEncode(key)])); + } + + @override + Future deleteDHTRecord(TypedKey key) { + return _wrapApiPromise(js_util.callMethod( + wasm, "routing_context_delete_dht_record", [_ctx.id, jsonEncode(key)])); + } + + @override + Future getDHTValue( + TypedKey key, int subkey, bool forceRefresh) async { + final opt = await _wrapApiPromise(js_util.callMethod( + wasm, + "routing_context_get_dht_value", + [_ctx.id, jsonEncode(key), subkey, forceRefresh])); + return opt == null ? null : ValueData.fromJson(jsonDecode(opt)); + } + + @override + Future setDHTValue( + TypedKey key, int subkey, Uint8List data) async { + final opt = await _wrapApiPromise(js_util.callMethod( + wasm, + "routing_context_set_dht_value", + [_ctx.id, jsonEncode(key), subkey, base64UrlNoPadEncode(data)])); + return opt == null ? null : ValueData.fromJson(jsonDecode(opt)); + } + + @override + Future watchDHTValues(TypedKey key, ValueSubkeyRange subkeys, + Timestamp expiration, int count) async { + final ts = await _wrapApiPromise(js_util.callMethod( + wasm, "routing_context_watch_dht_values", [ + _ctx.id, + jsonEncode(key), + jsonEncode(subkeys), + expiration.toString(), + count + ])); + return Timestamp.fromString(ts); + } + + @override + Future cancelDHTWatch(TypedKey key, ValueSubkeyRange subkeys) { + return _wrapApiPromise(js_util.callMethod( + wasm, + "routing_context_cancel_dht_watch", + [_ctx.id, jsonEncode(key), jsonEncode(subkeys)])); + } +} + +// JS implementation of VeilidCryptoSystem +class VeilidCryptoSystemJS implements VeilidCryptoSystem { + final CryptoKind _kind; + final VeilidJS _js; + + VeilidCryptoSystemJS._(this._js, this._kind) { + // Keep the reference + _js; + } + + @override + CryptoKind kind() { + return _kind; + } + + @override + Future cachedDH(PublicKey key, SecretKey secret) async { + return SharedSecret.fromJson(jsonDecode(await _wrapApiPromise(js_util + .callMethod(wasm, "crypto_cached_dh", + [_kind, jsonEncode(key), jsonEncode(secret)])))); + } + + @override + Future computeDH(PublicKey key, SecretKey secret) async { + return SharedSecret.fromJson(jsonDecode(await _wrapApiPromise(js_util + .callMethod(wasm, "crypto_compute_dh", + [_kind, jsonEncode(key), jsonEncode(secret)])))); + } + + @override + Future randomBytes(int len) async { + return base64UrlNoPadDecode(await _wrapApiPromise( + js_util.callMethod(wasm, "crypto_random_bytes", [_kind, len]))); + } + + @override + Future defaultSaltLength() { + return _wrapApiPromise( + js_util.callMethod(wasm, "crypto_default_salt_length", [_kind])); + } + + @override + Future hashPassword(Uint8List password, Uint8List salt) { + return _wrapApiPromise(js_util.callMethod(wasm, "crypto_hash_password", + [_kind, base64UrlNoPadEncode(password), base64UrlNoPadEncode(salt)])); + } + + @override + Future verifyPassword(Uint8List password, String passwordHash) { + return _wrapApiPromise(js_util.callMethod(wasm, "crypto_verify_password", + [_kind, base64UrlNoPadEncode(password), passwordHash])); + } + + @override + Future deriveSharedSecret( + Uint8List password, Uint8List salt) async { + return SharedSecret.fromJson(jsonDecode(await _wrapApiPromise(js_util + .callMethod(wasm, "crypto_derive_shared_secret", [ + _kind, + base64UrlNoPadEncode(password), + base64UrlNoPadEncode(salt) + ])))); + } + + @override + Future randomNonce() async { + return Nonce.fromJson(jsonDecode(await _wrapApiPromise( + js_util.callMethod(wasm, "crypto_random_nonce", [_kind])))); + } + + @override + Future randomSharedSecret() async { + return SharedSecret.fromJson(jsonDecode(await _wrapApiPromise( + js_util.callMethod(wasm, "crypto_random_shared_secret", [_kind])))); + } + + @override + Future generateKeyPair() async { + return KeyPair.fromJson(jsonDecode(await _wrapApiPromise( + js_util.callMethod(wasm, "crypto_generate_key_pair", [_kind])))); + } + + @override + Future generateHash(Uint8List data) async { + return HashDigest.fromJson(jsonDecode(await _wrapApiPromise(js_util + .callMethod(wasm, "crypto_generate_hash", + [_kind, base64UrlNoPadEncode(data)])))); + } + + @override + Future validateKeyPair(PublicKey key, SecretKey secret) { + return _wrapApiPromise(js_util.callMethod(wasm, "crypto_validate_key_pair", + [_kind, jsonEncode(key), jsonEncode(secret)])); + } + + @override + Future validateHash(Uint8List data, HashDigest hash) { + return _wrapApiPromise(js_util.callMethod(wasm, "crypto_validate_hash", + [_kind, base64UrlNoPadEncode(data), jsonEncode(hash)])); + } + + @override + Future distance(CryptoKey key1, CryptoKey key2) async { + return CryptoKeyDistance.fromJson(jsonDecode(await _wrapApiPromise(js_util + .callMethod(wasm, "crypto_distance", + [_kind, jsonEncode(key1), jsonEncode(key2)])))); + } + + @override + Future sign( + PublicKey key, SecretKey secret, Uint8List data) async { + return Signature.fromJson(jsonDecode(await _wrapApiPromise(js_util + .callMethod(wasm, "crypto_sign", [ + _kind, + jsonEncode(key), + jsonEncode(secret), + base64UrlNoPadEncode(data) + ])))); + } + + @override + Future verify(PublicKey key, Uint8List data, Signature signature) { + return _wrapApiPromise(js_util.callMethod(wasm, "crypto_verify", [ + _kind, + jsonEncode(key), + base64UrlNoPadEncode(data), + jsonEncode(signature), + ])); + } + + @override + Future aeadOverhead() { + return _wrapApiPromise( + js_util.callMethod(wasm, "crypto_aead_overhead", [_kind])); + } + + @override + Future decryptAead(Uint8List body, Nonce nonce, + SharedSecret sharedSecret, Uint8List? associatedData) async { + return base64UrlNoPadDecode( + await _wrapApiPromise(js_util.callMethod(wasm, "crypto_decrypt_aead", [ + _kind, + base64UrlNoPadEncode(body), + jsonEncode(nonce), + jsonEncode(sharedSecret), + associatedData != null ? base64UrlNoPadEncode(associatedData) : null + ]))); + } + + @override + Future encryptAead(Uint8List body, Nonce nonce, + SharedSecret sharedSecret, Uint8List? associatedData) async { + return base64UrlNoPadDecode( + await _wrapApiPromise(js_util.callMethod(wasm, "crypto_encrypt_aead", [ + _kind, + base64UrlNoPadEncode(body), + jsonEncode(nonce), + jsonEncode(sharedSecret), + associatedData != null ? base64UrlNoPadEncode(associatedData) : null + ]))); + } + + @override + Future cryptNoAuth( + Uint8List body, Nonce nonce, SharedSecret sharedSecret) async { + return base64UrlNoPadDecode(await _wrapApiPromise(js_util.callMethod( + wasm, "crypto_crypt_no_auth", [ + _kind, + base64UrlNoPadEncode(body), + jsonEncode(nonce), + jsonEncode(sharedSecret) + ]))); + } } class _TDBT { @@ -88,9 +337,8 @@ class _TDBT { // JS implementation of VeilidTableDBTransaction class VeilidTableDBTransactionJS extends VeilidTableDBTransaction { final _TDBT _tdbt; - static final Finalizer<_TDBT> _finalizer = Finalizer((tdbt) => { - js_util.callMethod(wasm, "release_table_db_transaction", [tdbt.id]) - }); + static final Finalizer<_TDBT> _finalizer = Finalizer((tdbt) => + js_util.callMethod(wasm, "release_table_db_transaction", [tdbt.id])); VeilidTableDBTransactionJS._(this._tdbt) { _finalizer.attach(this, _tdbt, detach: this); @@ -138,9 +386,8 @@ class _TDB { // JS implementation of VeilidTableDB class VeilidTableDBJS extends VeilidTableDB { final _TDB _tdb; - static final Finalizer<_TDB> _finalizer = Finalizer((tdb) => { - js_util.callMethod(wasm, "release_table_db", [tdb.id]) - }); + static final Finalizer<_TDB> _finalizer = Finalizer( + (tdb) => js_util.callMethod(wasm, "release_table_db", [tdb.id])); VeilidTableDBJS._(this._tdb) { _finalizer.attach(this, _tdb, detach: this); @@ -152,13 +399,9 @@ class VeilidTableDBJS extends VeilidTableDB { } @override - List getKeys(int col) { - String? s = js_util.callMethod(wasm, "table_db_get_keys", [_tdb.id, col]); - if (s == null) { - throw VeilidAPIExceptionInternal("No db for id"); - } - List jarr = jsonDecode(s); - return jarr.map((e) => base64UrlNoPadDecode(e)).toList(); + Future> getKeys(int col) async { + return jsonListConstructor(base64UrlNoPadDecodeDynamic)(jsonDecode( + await js_util.callMethod(wasm, "table_db_get_keys", [_tdb.id, col]))); } @override @@ -190,7 +433,7 @@ class VeilidTableDBJS extends VeilidTableDB { } @override - Future delete(int col, Uint8List key) { + Future delete(int col, Uint8List key) { final encodedKey = base64UrlNoPadEncode(key); return _wrapApiPromise(js_util @@ -203,16 +446,14 @@ class VeilidTableDBJS extends VeilidTableDB { class VeilidJS implements Veilid { @override void initializeVeilidCore(Map platformConfigJson) { - var platformConfigJsonString = - jsonEncode(platformConfigJson, toEncodable: veilidApiToEncodable); + var platformConfigJsonString = jsonEncode(platformConfigJson); js_util .callMethod(wasm, "initialize_veilid_core", [platformConfigJsonString]); } @override void changeLogLevel(String layer, VeilidConfigLogLevel logLevel) { - var logLevelJsonString = - jsonEncode(logLevel.json, toEncodable: veilidApiToEncodable); + var logLevelJsonString = jsonEncode(logLevel); js_util.callMethod(wasm, "change_log_level", [layer, logLevelJsonString]); } @@ -229,10 +470,8 @@ class VeilidJS implements Veilid { } } - await _wrapApiPromise(js_util.callMethod(wasm, "startup_veilid_core", [ - js.allowInterop(updateCallback), - jsonEncode(config.json, toEncodable: veilidApiToEncodable) - ])); + await _wrapApiPromise(js_util.callMethod(wasm, "startup_veilid_core", + [js.allowInterop(updateCallback), jsonEncode(config)])); return streamController.stream; } @@ -259,6 +498,50 @@ class VeilidJS implements Veilid { js_util.callMethod(wasm, "shutdown_veilid_core", [])); } + @override + List validCryptoKinds() { + return jsonDecode(js_util.callMethod(wasm, "valid_crypto_kinds", [])); + } + + @override + Future getCryptoSystem(CryptoKind kind) async { + if (!validCryptoKinds().contains(kind)) { + throw VeilidAPIExceptionGeneric("unsupported cryptosystem"); + } + return VeilidCryptoSystemJS._(this, kind); + } + + @override + Future bestCryptoSystem() async { + return VeilidCryptoSystemJS._( + this, js_util.callMethod(wasm, "best_crypto_kind", [])); + } + + @override + Future> verifySignatures(List nodeIds, + Uint8List data, List signatures) async { + return jsonListConstructor(TypedKey.fromJson)(jsonDecode( + await _wrapApiPromise(js_util.callMethod(wasm, "verify_signatures", [ + jsonEncode(nodeIds), + base64UrlNoPadEncode(data), + jsonEncode(signatures) + ])))); + } + + @override + Future> generateSignatures( + Uint8List data, List keyPairs) async { + return jsonListConstructor(TypedSignature.fromJson)(jsonDecode( + await _wrapApiPromise(js_util.callMethod(wasm, "generate_signatures", + [base64UrlNoPadEncode(data), jsonEncode(keyPairs)])))); + } + + @override + Future generateKeyPair(CryptoKind kind) async { + return TypedKeyPair.fromJson(jsonDecode(await _wrapApiPromise( + js_util.callMethod(wasm, "generate_key_pair", [kind])))); + } + @override Future routingContext() async { int id = @@ -268,23 +551,19 @@ class VeilidJS implements Veilid { @override Future newPrivateRoute() async { - Map blobJson = jsonDecode(await _wrapApiPromise( - js_util.callMethod(wasm, "new_private_route", []))); - return RouteBlob.fromJson(blobJson); + return RouteBlob.fromJson(jsonDecode(await _wrapApiPromise( + js_util.callMethod(wasm, "new_private_route", [])))); } @override Future newCustomPrivateRoute( Stability stability, Sequencing sequencing) async { - var stabilityString = - jsonEncode(stability, toEncodable: veilidApiToEncodable); - var sequencingString = - jsonEncode(sequencing, toEncodable: veilidApiToEncodable); + var stabilityString = jsonEncode(stability); + var sequencingString = jsonEncode(sequencing); - Map blobJson = jsonDecode(await _wrapApiPromise(js_util + return RouteBlob.fromJson(jsonDecode(await _wrapApiPromise(js_util .callMethod( - wasm, "new_private_route", [stabilityString, sequencingString]))); - return RouteBlob.fromJson(blobJson); + wasm, "new_private_route", [stabilityString, sequencingString])))); } @override @@ -319,6 +598,11 @@ class VeilidJS implements Veilid { return _wrapApiPromise(js_util.callMethod(wasm, "delete_table_db", [name])); } + @override + Timestamp now() { + return Timestamp.fromString(js_util.callMethod(wasm, "now", [])); + } + @override Future debug(String command) async { return await _wrapApiPromise(js_util.callMethod(wasm, "debug", [command])); diff --git a/veilid-flutter/lib/veilid_state.dart b/veilid-flutter/lib/veilid_state.dart new file mode 100644 index 00000000..fea12cdf --- /dev/null +++ b/veilid-flutter/lib/veilid_state.dart @@ -0,0 +1,547 @@ +import 'dart:typed_data'; + +import 'package:change_case/change_case.dart'; + +import 'veilid_encoding.dart'; +import 'veilid.dart'; + +////////////////////////////////////// +/// AttachmentState + +enum AttachmentState { + detached, + attaching, + attachedWeak, + attachedGood, + attachedStrong, + fullyAttached, + overAttached, + detaching; + + String toJson() { + return name.toPascalCase(); + } + + factory AttachmentState.fromJson(String j) { + return AttachmentState.values.byName(j.toCamelCase()); + } +} + +////////////////////////////////////// +/// VeilidLogLevel + +enum VeilidLogLevel { + error, + warn, + info, + debug, + trace; + + String toJson() { + return name.toPascalCase(); + } + + factory VeilidLogLevel.fromJson(String j) { + return VeilidLogLevel.values.byName(j.toCamelCase()); + } +} + +//////////// + +class LatencyStats { + TimestampDuration fastest; + TimestampDuration average; + TimestampDuration slowest; + + LatencyStats({ + required this.fastest, + required this.average, + required this.slowest, + }); + + Map toJson() { + return { + 'fastest': fastest.toJson(), + 'average': average.toJson(), + 'slowest': slowest.toJson(), + }; + } + + LatencyStats.fromJson(dynamic json) + : fastest = TimestampDuration.fromJson(json['fastest']), + average = TimestampDuration.fromJson(json['average']), + slowest = TimestampDuration.fromJson(json['slowest']); +} + +//////////// + +class TransferStats { + BigInt total; + BigInt maximum; + BigInt average; + BigInt minimum; + + TransferStats({ + required this.total, + required this.maximum, + required this.average, + required this.minimum, + }); + + Map toJson() { + return { + 'total': total.toString(), + 'maximum': maximum.toString(), + 'average': average.toString(), + 'minimum': minimum.toString(), + }; + } + + TransferStats.fromJson(dynamic json) + : total = BigInt.parse(json['total']), + maximum = BigInt.parse(json['maximum']), + average = BigInt.parse(json['average']), + minimum = BigInt.parse(json['minimum']); +} + +//////////// + +class TransferStatsDownUp { + TransferStats down; + TransferStats up; + + TransferStatsDownUp({ + required this.down, + required this.up, + }); + + Map toJson() { + return { + 'down': down.toJson(), + 'up': up.toJson(), + }; + } + + TransferStatsDownUp.fromJson(dynamic json) + : down = TransferStats.fromJson(json['down']), + up = TransferStats.fromJson(json['up']); +} + +//////////// + +class RPCStats { + int messagesSent; + int messagesRcvd; + int questionsInFlight; + Timestamp? lastQuestion; + Timestamp? lastSeenTs; + Timestamp? firstConsecutiveSeenTs; + int recentLostAnswers; + int failedToSend; + + RPCStats({ + required this.messagesSent, + required this.messagesRcvd, + required this.questionsInFlight, + required this.lastQuestion, + required this.lastSeenTs, + required this.firstConsecutiveSeenTs, + required this.recentLostAnswers, + required this.failedToSend, + }); + + Map toJson() { + return { + 'messages_sent': messagesSent, + 'messages_rcvd': messagesRcvd, + 'questions_in_flight': questionsInFlight, + 'last_question': lastQuestion?.toJson(), + 'last_seen_ts': lastSeenTs?.toJson(), + 'first_consecutive_seen_ts': firstConsecutiveSeenTs?.toJson(), + 'recent_lost_answers': recentLostAnswers, + 'failed_to_send': failedToSend, + }; + } + + RPCStats.fromJson(dynamic json) + : messagesSent = json['messages_sent'], + messagesRcvd = json['messages_rcvd'], + questionsInFlight = json['questions_in_flight'], + lastQuestion = json['last_question'] != null + ? Timestamp.fromJson(json['last_question']) + : null, + lastSeenTs = json['last_seen_ts'] != null + ? Timestamp.fromJson(json['last_seen_ts']) + : null, + firstConsecutiveSeenTs = json['first_consecutive_seen_ts'] != null + ? Timestamp.fromJson(json['first_consecutive_seen_ts']) + : null, + recentLostAnswers = json['recent_lost_answers'], + failedToSend = json['failed_to_send']; +} + +//////////// + +class PeerStats { + Timestamp timeAdded; + RPCStats rpcStats; + LatencyStats? latency; + TransferStatsDownUp transfer; + + PeerStats({ + required this.timeAdded, + required this.rpcStats, + required this.latency, + required this.transfer, + }); + + Map toJson() { + return { + 'time_added': timeAdded.toJson(), + 'rpc_stats': rpcStats.toJson(), + 'latency': latency?.toJson(), + 'transfer': transfer.toJson(), + }; + } + + PeerStats.fromJson(dynamic json) + : timeAdded = Timestamp.fromJson(json['time_added']), + rpcStats = RPCStats.fromJson(json['rpc_stats']), + latency = json['latency'] != null + ? LatencyStats.fromJson(json['latency']) + : null, + transfer = TransferStatsDownUp.fromJson(json['transfer']); +} + +//////////// + +class PeerTableData { + List nodeIds; + String peerAddress; + PeerStats peerStats; + + PeerTableData({ + required this.nodeIds, + required this.peerAddress, + required this.peerStats, + }); + + Map toJson() { + return { + 'node_ids': nodeIds.map((p) => p.toJson()).toList(), + 'peer_address': peerAddress, + 'peer_stats': peerStats.toJson(), + }; + } + + PeerTableData.fromJson(dynamic json) + : nodeIds = List.from( + json['node_ids'].map((j) => TypedKey.fromJson(j))), + peerAddress = json['peer_address'], + peerStats = PeerStats.fromJson(json['peer_stats']); +} + +////////////////////////////////////// +/// VeilidUpdate + +abstract class VeilidUpdate { + factory VeilidUpdate.fromJson(dynamic json) { + switch (json["kind"]) { + case "Log": + { + return VeilidLog( + logLevel: VeilidLogLevel.fromJson(json["log_level"]), + message: json["message"], + backtrace: json["backtrace"]); + } + case "AppMessage": + { + return VeilidAppMessage( + sender: json["sender"], message: json["message"]); + } + case "AppCall": + { + return VeilidAppCall( + sender: json["sender"], message: json["message"], id: json["id"]); + } + case "Attachment": + { + return VeilidUpdateAttachment( + state: VeilidStateAttachment.fromJson(json)); + } + case "Network": + { + return VeilidUpdateNetwork(state: VeilidStateNetwork.fromJson(json)); + } + case "Config": + { + return VeilidUpdateConfig(state: VeilidStateConfig.fromJson(json)); + } + case "RouteChange": + { + return VeilidUpdateRouteChange( + deadRoutes: List.from(json['dead_routes'].map((j) => j)), + deadRemoteRoutes: + List.from(json['dead_remote_routes'].map((j) => j))); + } + case "ValueChange": + { + return VeilidUpdateValueChange( + key: TypedKey.fromJson(json['key']), + subkeys: List.from( + json['subkeys'].map((j) => ValueSubkeyRange.fromJson(j))), + count: json['count'], + valueData: ValueData.fromJson(json['value_data'])); + } + default: + { + throw VeilidAPIExceptionInternal( + "Invalid VeilidAPIException type: ${json['kind']}"); + } + } + } + Map toJson(); +} + +class VeilidLog implements VeilidUpdate { + final VeilidLogLevel logLevel; + final String message; + final String? backtrace; + // + VeilidLog({ + required this.logLevel, + required this.message, + required this.backtrace, + }); + + @override + Map toJson() { + return { + 'kind': "Log", + 'log_level': logLevel.toJson(), + 'message': message, + 'backtrace': backtrace + }; + } +} + +class VeilidAppMessage implements VeilidUpdate { + final TypedKey? sender; + final Uint8List message; + + // + VeilidAppMessage({ + required this.sender, + required this.message, + }); + + @override + Map toJson() { + return { + 'kind': "AppMessage", + 'sender': sender, + 'message': base64UrlNoPadEncode(message) + }; + } +} + +class VeilidAppCall implements VeilidUpdate { + final String? sender; + final Uint8List message; + final String id; + + // + VeilidAppCall({ + required this.sender, + required this.message, + required this.id, + }); + + @override + Map toJson() { + return { + 'kind': "AppMessage", + 'sender': sender, + 'message': base64UrlNoPadEncode(message), + 'id': id, + }; + } +} + +class VeilidUpdateAttachment implements VeilidUpdate { + final VeilidStateAttachment state; + // + VeilidUpdateAttachment({required this.state}); + + @override + Map toJson() { + var jsonRep = state.toJson(); + jsonRep['kind'] = "Attachment"; + return jsonRep; + } +} + +class VeilidUpdateNetwork implements VeilidUpdate { + final VeilidStateNetwork state; + // + VeilidUpdateNetwork({required this.state}); + + @override + Map toJson() { + var jsonRep = state.toJson(); + jsonRep['kind'] = "Network"; + return jsonRep; + } +} + +class VeilidUpdateConfig implements VeilidUpdate { + final VeilidStateConfig state; + // + VeilidUpdateConfig({required this.state}); + + @override + Map toJson() { + var jsonRep = state.toJson(); + jsonRep['kind'] = "Config"; + return jsonRep; + } +} + +class VeilidUpdateRouteChange implements VeilidUpdate { + final List deadRoutes; + final List deadRemoteRoutes; + // + VeilidUpdateRouteChange({ + required this.deadRoutes, + required this.deadRemoteRoutes, + }); + + @override + Map toJson() { + return { + 'dead_routes': deadRoutes.map((p) => p).toList(), + 'dead_remote_routes': deadRemoteRoutes.map((p) => p).toList() + }; + } +} + +class VeilidUpdateValueChange implements VeilidUpdate { + final TypedKey key; + final List subkeys; + final int count; + final ValueData valueData; + + // + VeilidUpdateValueChange({ + required this.key, + required this.subkeys, + required this.count, + required this.valueData, + }); + + @override + Map toJson() { + return { + 'key': key.toJson(), + 'subkeys': subkeys.map((p) => p.toJson()).toList(), + 'count': count, + 'value_data': valueData.toJson(), + }; + } +} + +////////////////////////////////////// +/// VeilidStateAttachment + +class VeilidStateAttachment { + final AttachmentState state; + final bool publicInternetReady; + final bool localNetworkReady; + + VeilidStateAttachment( + this.state, this.publicInternetReady, this.localNetworkReady); + + VeilidStateAttachment.fromJson(dynamic json) + : state = AttachmentState.fromJson(json['state']), + publicInternetReady = json['public_internet_ready'], + localNetworkReady = json['local_network_ready']; + + Map toJson() { + return { + 'state': state.toJson(), + 'public_internet_ready': publicInternetReady, + 'local_network_ready': localNetworkReady, + }; + } +} + +////////////////////////////////////// +/// VeilidStateNetwork + +class VeilidStateNetwork { + final bool started; + final BigInt bpsDown; + final BigInt bpsUp; + final List peers; + + VeilidStateNetwork( + {required this.started, + required this.bpsDown, + required this.bpsUp, + required this.peers}); + + VeilidStateNetwork.fromJson(dynamic json) + : started = json['started'], + bpsDown = BigInt.parse(json['bps_down']), + bpsUp = BigInt.parse(json['bps_up']), + peers = List.from( + json['peers'].map((j) => PeerTableData.fromJson(j))); + + Map toJson() { + return { + 'started': started, + 'bps_down': bpsDown.toString(), + 'bps_up': bpsUp.toString(), + 'peers': peers.map((p) => p.toJson()).toList(), + }; + } +} + +////////////////////////////////////// +/// VeilidStateConfig + +class VeilidStateConfig { + final Map config; + + VeilidStateConfig({ + required this.config, + }); + + VeilidStateConfig.fromJson(dynamic json) : config = json['config']; + + Map toJson() { + return {'config': config}; + } +} + +////////////////////////////////////// +/// VeilidState + +class VeilidState { + final VeilidStateAttachment attachment; + final VeilidStateNetwork network; + final VeilidStateConfig config; + + VeilidState.fromJson(dynamic json) + : attachment = VeilidStateAttachment.fromJson(json['attachment']), + network = VeilidStateNetwork.fromJson(json['network']), + config = VeilidStateConfig.fromJson(json['config']); + + Map toJson() { + return { + 'attachment': attachment.toJson(), + 'network': network.toJson(), + 'config': config.toJson() + }; + } +} diff --git a/veilid-flutter/lib/veilid_table_db.dart b/veilid-flutter/lib/veilid_table_db.dart new file mode 100644 index 00000000..84923fd0 --- /dev/null +++ b/veilid-flutter/lib/veilid_table_db.dart @@ -0,0 +1,73 @@ +import 'dart:async'; +import 'dart:typed_data'; +import 'dart:convert'; + +///////////////////////////////////// +/// VeilidTableDB +abstract class VeilidTableDBTransaction { + Future commit(); + Future rollback(); + Future store(int col, Uint8List key, Uint8List value); + Future delete(int col, Uint8List key); + + Future storeJson(int col, Uint8List key, Object? object, + {Object? Function(Object? nonEncodable)? toEncodable}) async { + return store(col, key, + utf8.encoder.convert(jsonEncode(object, toEncodable: toEncodable))); + } + + Future storeStringJson(int col, String key, Object? object, + {Object? Function(Object? nonEncodable)? toEncodable}) { + return storeJson(col, utf8.encoder.convert(key), object, + toEncodable: toEncodable); + } +} + +abstract class VeilidTableDB { + int getColumnCount(); + Future> getKeys(int col); + VeilidTableDBTransaction transact(); + Future store(int col, Uint8List key, Uint8List value); + Future load(int col, Uint8List key); + Future delete(int col, Uint8List key); + + Future storeJson(int col, Uint8List key, Object? object, + {Object? Function(Object? nonEncodable)? toEncodable}) { + return store(col, key, + utf8.encoder.convert(jsonEncode(object, toEncodable: toEncodable))); + } + + Future storeStringJson(int col, String key, Object? object, + {Object? Function(Object? nonEncodable)? toEncodable}) { + return storeJson(col, utf8.encoder.convert(key), object, + toEncodable: toEncodable); + } + + Future loadJson(int col, Uint8List key, + {Object? Function(Object? key, Object? value)? reviver}) async { + var s = await load(col, key); + if (s == null) { + return null; + } + return jsonDecode(utf8.decode(s, allowMalformed: false), reviver: reviver); + } + + Future loadStringJson(int col, String key, + {Object? Function(Object? key, Object? value)? reviver}) { + return loadJson(col, utf8.encoder.convert(key), reviver: reviver); + } + + Future deleteJson(int col, Uint8List key, + {Object? Function(Object? key, Object? value)? reviver}) async { + var s = await delete(col, key); + if (s == null) { + return null; + } + return jsonDecode(utf8.decode(s, allowMalformed: false), reviver: reviver); + } + + Future deleteStringJson(int col, String key, + {Object? Function(Object? key, Object? value)? reviver}) { + return deleteJson(col, utf8.encoder.convert(key), reviver: reviver); + } +} diff --git a/veilid-flutter/macos/veilid.podspec b/veilid-flutter/macos/veilid.podspec index 3646e82e..fd6ced3f 100644 --- a/veilid-flutter/macos/veilid.podspec +++ b/veilid-flutter/macos/veilid.podspec @@ -16,8 +16,8 @@ Veilid Network Plugin s.source_files = 'Classes/**/*' s.dependency 'FlutterMacOS' - s.platform = :osx, '10.12.2' - s.osx.deployment_target = '10.12.2' + s.platform = :osx, '10.14.6' + s.osx.deployment_target = '10.14.6' s.pod_target_xcconfig = { 'DEFINES_MODULE' => 'YES' } s.swift_version = '5.0' diff --git a/veilid-flutter/pubspec.yaml b/veilid-flutter/pubspec.yaml index 9830207c..04dbdad0 100644 --- a/veilid-flutter/pubspec.yaml +++ b/veilid-flutter/pubspec.yaml @@ -5,8 +5,7 @@ homepage: https://veilid.com publish_to: "none" # Remove this line if you wish to publish to pub.dev environment: - sdk: ">=2.16.1 <3.0.0" - flutter: ">=2.5.0" + sdk: '>=3.0.0 <4.0.0' dependencies: flutter: @@ -17,6 +16,8 @@ dependencies: change_case: ^1.0.1 path_provider: ^2.0.9 path: ^1.8.0 + system_info2: ^3.0.2 + charcode: ^1.3.1 dev_dependencies: flutter_test: diff --git a/veilid-flutter/rust/Cargo.toml b/veilid-flutter/rust/Cargo.toml index 8b8b0773..002db7da 100644 --- a/veilid-flutter/rust/Cargo.toml +++ b/veilid-flutter/rust/Cargo.toml @@ -27,10 +27,10 @@ data-encoding = { version = "^2" } # Dependencies for native builds only # Linux, Windows, Mac, iOS, Android [target.'cfg(not(target_arch = "wasm32"))'.dependencies] -tracing-opentelemetry = "^0" -opentelemetry = { version = "^0" } -opentelemetry-otlp = { version = "^0" } -opentelemetry-semantic-conventions = "^0" +tracing-opentelemetry = "0.18" +opentelemetry = { version = "0.18" } +opentelemetry-otlp = { version = "0.11" } +opentelemetry-semantic-conventions = "0.10" async-std = { version = "^1", features = ["unstable"], optional = true } tokio = { version = "^1", features = ["full"], optional = true } tokio-stream = { version = "^0", features = ["net"], optional = true } diff --git a/veilid-flutter/rust/src/dart_ffi.rs b/veilid-flutter/rust/src/dart_ffi.rs index 4650844d..d18d0e30 100644 --- a/veilid-flutter/rust/src/dart_ffi.rs +++ b/veilid-flutter/rust/src/dart_ffi.rs @@ -33,7 +33,7 @@ lazy_static! { Mutex::new(BTreeMap::new()); } -async fn get_veilid_api() -> Result { +async fn get_veilid_api() -> veilid_core::VeilidAPIResult { let api_lock = VEILID_API.lock().await; api_lock .as_ref() @@ -41,7 +41,7 @@ async fn get_veilid_api() -> Result Result { +async fn take_veilid_api() -> veilid_core::VeilidAPIResult { let mut api_lock = VEILID_API.lock().await; api_lock .take() @@ -55,7 +55,7 @@ async fn take_veilid_api() -> Result = Result; +type APIResult = veilid_core::VeilidAPIResult; const APIRESULT_VOID: APIResult<()> = APIResult::Ok(()); // Parse target @@ -74,7 +74,7 @@ async fn parse_target(s: String) -> APIResult { } // Is this a node id? - if let Ok(nid) = veilid_core::PublicKey::from_str(&s) { + if let Ok(nid) = veilid_core::TypedKey::from_str(&s) { return Ok(veilid_core::Target::NodeId(nid)); } @@ -481,7 +481,7 @@ pub extern "C" fn routing_context_app_message(port: i64, id: u32, target: FfiStr let routing_context = { let rc = ROUTING_CONTEXTS.lock(); let Some(routing_context) = rc.get(&id) else { - return APIResult::Err(veilid_core::VeilidAPIError::invalid_argument("routing_context_app_call", "id", id)); + return APIResult::Err(veilid_core::VeilidAPIError::invalid_argument("routing_context_app_message", "id", id)); }; routing_context.clone() }; @@ -492,6 +492,158 @@ pub extern "C" fn routing_context_app_message(port: i64, id: u32, target: FfiStr }); } +#[no_mangle] +pub extern "C" fn routing_context_create_dht_record(port: i64, id: u32, kind: u32, schema: FfiStr) { + let crypto_kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + let schema: veilid_core::DHTSchema = veilid_core::deserialize_opt_json(schema.into_opt_string()).unwrap(); + + DartIsolateWrapper::new(port).spawn_result_json(async move { + let routing_context = { + let rc = ROUTING_CONTEXTS.lock(); + let Some(routing_context) = rc.get(&id) else { + return APIResult::Err(veilid_core::VeilidAPIError::invalid_argument("routing_context_create_dht_record", "id", id)); + }; + routing_context.clone() + }; + + let dht_record_descriptor = routing_context.create_dht_record(crypto_kind, schema).await?; + APIResult::Ok(dht_record_descriptor) + }); +} + +#[no_mangle] +pub extern "C" fn routing_context_open_dht_record(port: i64, id: u32, key: FfiStr, writer: FfiStr) { + let key: veilid_core::TypedKey = veilid_core::deserialize_opt_json(key.into_opt_string()).unwrap(); + let writer: Option = writer.into_opt_string().map(|s| veilid_core::deserialize_json(&s).unwrap()); + DartIsolateWrapper::new(port).spawn_result_json(async move { + let routing_context = { + let rc = ROUTING_CONTEXTS.lock(); + let Some(routing_context) = rc.get(&id) else { + return APIResult::Err(veilid_core::VeilidAPIError::invalid_argument("routing_context_open_dht_record", "id", id)); + }; + routing_context.clone() + }; + let dht_record_descriptor = routing_context.open_dht_record(key, writer).await?; + APIResult::Ok(dht_record_descriptor) + }); +} + + +#[no_mangle] +pub extern "C" fn routing_context_close_dht_record(port: i64, id: u32, key: FfiStr) { + let key: veilid_core::TypedKey = veilid_core::deserialize_opt_json(key.into_opt_string()).unwrap(); + DartIsolateWrapper::new(port).spawn_result(async move { + let routing_context = { + let rc = ROUTING_CONTEXTS.lock(); + let Some(routing_context) = rc.get(&id) else { + return APIResult::Err(veilid_core::VeilidAPIError::invalid_argument("routing_context_close_dht_record", "id", id)); + }; + routing_context.clone() + }; + routing_context.close_dht_record(key).await?; + APIRESULT_VOID + }); +} + + +#[no_mangle] +pub extern "C" fn routing_context_delete_dht_record(port: i64, id: u32, key: FfiStr) { + let key: veilid_core::TypedKey = veilid_core::deserialize_opt_json(key.into_opt_string()).unwrap(); + DartIsolateWrapper::new(port).spawn_result(async move { + let routing_context = { + let rc = ROUTING_CONTEXTS.lock(); + let Some(routing_context) = rc.get(&id) else { + return APIResult::Err(veilid_core::VeilidAPIError::invalid_argument("routing_context_delete_dht_record", "id", id)); + }; + routing_context.clone() + }; + routing_context.delete_dht_record(key).await?; + APIRESULT_VOID + }); +} + + +#[no_mangle] +pub extern "C" fn routing_context_get_dht_value(port: i64, id: u32, key: FfiStr, subkey: u32, force_refresh: bool) { + let key: veilid_core::TypedKey = veilid_core::deserialize_opt_json(key.into_opt_string()).unwrap(); + DartIsolateWrapper::new(port).spawn_result_opt_json(async move { + let routing_context = { + let rc = ROUTING_CONTEXTS.lock(); + let Some(routing_context) = rc.get(&id) else { + return APIResult::Err(veilid_core::VeilidAPIError::invalid_argument("routing_context_get_dht_value", "id", id)); + }; + routing_context.clone() + }; + let res = routing_context.get_dht_value(key, subkey, force_refresh).await?; + APIResult::Ok(res) + }); +} + + +#[no_mangle] +pub extern "C" fn routing_context_set_dht_value(port: i64, id: u32, key: FfiStr, subkey: u32, data: FfiStr) { + let key: veilid_core::TypedKey = veilid_core::deserialize_opt_json(key.into_opt_string()).unwrap(); + let data: Vec = data_encoding::BASE64URL_NOPAD + .decode( + data.into_opt_string() + .unwrap() + .as_bytes(), + ) + .unwrap(); + + DartIsolateWrapper::new(port).spawn_result_opt_json(async move { + let routing_context = { + let rc = ROUTING_CONTEXTS.lock(); + let Some(routing_context) = rc.get(&id) else { + return APIResult::Err(veilid_core::VeilidAPIError::invalid_argument("routing_context_set_dht_value", "id", id)); + }; + routing_context.clone() + }; + let res = routing_context.set_dht_value(key, subkey, data).await?; + APIResult::Ok(res) + }); +} + + +#[no_mangle] +pub extern "C" fn routing_context_watch_dht_values(port: i64, id: u32, key: FfiStr, subkeys: FfiStr, expiration: u64, count: u32) { + let key: veilid_core::TypedKey = veilid_core::deserialize_opt_json(key.into_opt_string()).unwrap(); + let subkeys: veilid_core::ValueSubkeyRangeSet = veilid_core::deserialize_opt_json(subkeys.into_opt_string()).unwrap(); + let expiration = veilid_core::Timestamp::from(expiration); + + DartIsolateWrapper::new(port).spawn_result(async move { + let routing_context = { + let rc = ROUTING_CONTEXTS.lock(); + let Some(routing_context) = rc.get(&id) else { + return APIResult::Err(veilid_core::VeilidAPIError::invalid_argument("routing_context_watch_dht_values", "id", id)); + }; + routing_context.clone() + }; + let res = routing_context.watch_dht_values(key, subkeys, expiration, count).await?; + APIResult::Ok(res.as_u64()) + }); +} + + +#[no_mangle] +pub extern "C" fn routing_context_cancel_dht_watch(port: i64, id: u32, key: FfiStr, subkeys: FfiStr) { + let key: veilid_core::TypedKey = veilid_core::deserialize_opt_json(key.into_opt_string()).unwrap(); + let subkeys: veilid_core::ValueSubkeyRangeSet = veilid_core::deserialize_opt_json(subkeys.into_opt_string()).unwrap(); + + DartIsolateWrapper::new(port).spawn_result(async move { + let routing_context = { + let rc = ROUTING_CONTEXTS.lock(); + let Some(routing_context) = rc.get(&id) else { + return APIResult::Err(veilid_core::VeilidAPIError::invalid_argument("routing_context_set_dht_value", "id", id)); + }; + routing_context.clone() + }; + let res = routing_context.cancel_dht_watch(key, subkeys).await?; + APIResult::Ok(res) + }); +} + + #[no_mangle] pub extern "C" fn new_private_route(port: i64) { DartIsolateWrapper::new(port).spawn_result_json(async move { @@ -629,17 +781,20 @@ pub extern "C" fn table_db_get_column_count(id: u32) -> u32 { } #[no_mangle] -pub extern "C" fn table_db_get_keys(id: u32, col: u32) -> *mut c_char { - let table_dbs = TABLE_DBS.lock(); - let Some(table_db) = table_dbs.get(&id) else { - return std::ptr::null_mut(); - }; - let Ok(keys) = table_db.clone().get_keys(col) else { - return std::ptr::null_mut(); - }; - let keys: Vec = keys.into_iter().map(|k| BASE64URL_NOPAD.encode(&k)).collect(); - let out = veilid_core::serialize_json(keys); - out.into_ffi_value() +pub extern "C" fn table_db_get_keys(port: i64, id: u32, col: u32) { + DartIsolateWrapper::new(port).spawn_result_json(async move { + let table_db = { + let table_dbs = TABLE_DBS.lock(); + let Some(table_db) = table_dbs.get(&id) else { + return APIResult::Err(veilid_core::VeilidAPIError::invalid_argument("table_db_get_keys", "id", id)); + }; + table_db.clone() + }; + + let keys = table_db.get_keys(col).await?; + let out: Vec = keys.into_iter().map(|k| BASE64URL_NOPAD.encode(&k)).collect(); + APIResult::Ok(out) + }); } fn add_table_db_transaction(tdbt: veilid_core::TableDBTransaction) -> u32 { @@ -684,7 +839,7 @@ pub extern "C" fn table_db_transaction_commit(port: i64, id: u32) { tdbt.clone() }; - tdbt.commit().await.map_err(veilid_core::VeilidAPIError::generic)?; + tdbt.commit().await?; APIRESULT_VOID }); } @@ -783,7 +938,7 @@ pub extern "C" fn table_db_store(port: i64, id: u32, col: u32, key: FfiStr, valu table_db.clone() }; - table_db.store(col, &key, &value).await.map_err(veilid_core::VeilidAPIError::generic)?; + table_db.store(col, &key, &value).await?; APIRESULT_VOID }); } @@ -805,7 +960,7 @@ pub extern "C" fn table_db_load(port: i64, id: u32, col: u32, key: FfiStr) { table_db.clone() }; - let out = table_db.load(col, &key).map_err(veilid_core::VeilidAPIError::generic)?; + let out = table_db.load(col, &key).await?; let out = out.map(|x| data_encoding::BASE64URL_NOPAD.encode(&x)); APIResult::Ok(out) }); @@ -829,11 +984,501 @@ pub extern "C" fn table_db_delete(port: i64, id: u32, col: u32, key: FfiStr) { table_db.clone() }; - let out = table_db.delete(col, &key).await.map_err(veilid_core::VeilidAPIError::generic)?; + let out = table_db.delete(col, &key).await?; + let out = out.map(|x| data_encoding::BASE64URL_NOPAD.encode(&x)); APIResult::Ok(out) }); } + +#[no_mangle] +pub extern "C" fn valid_crypto_kinds() -> *mut c_char { + veilid_core::serialize_json(veilid_core::VALID_CRYPTO_KINDS.iter().map(|k| (*k).into()).collect::>()).into_ffi_value() +} + +#[no_mangle] +pub extern "C" fn best_crypto_kind() -> u32 { + veilid_core::best_crypto_kind().into() +} + +#[no_mangle] +pub extern "C" fn verify_signatures(port: i64, node_ids: FfiStr, data: FfiStr, signatures: FfiStr) { + let node_ids: Vec = + veilid_core::deserialize_opt_json(node_ids.into_opt_string()).unwrap(); + + let data: Vec = data_encoding::BASE64URL_NOPAD + .decode( + data.into_opt_string() + .unwrap() + .as_bytes(), + ) + .unwrap(); + + let typed_signatures: Vec = + veilid_core::deserialize_opt_json(signatures.into_opt_string()).unwrap(); + + DartIsolateWrapper::new(port).spawn_result_json(async move { + let veilid_api = get_veilid_api().await?; + let crypto = veilid_api.crypto()?; + let out = crypto.verify_signatures(&node_ids, &data, &typed_signatures)?; + APIResult::Ok(out) + }); +} + +#[no_mangle] +pub extern "C" fn generate_signatures(port: i64, data: FfiStr, key_pairs: FfiStr) { + + let data: Vec = data_encoding::BASE64URL_NOPAD + .decode( + data.into_opt_string() + .unwrap() + .as_bytes(), + ) + .unwrap(); + + let key_pairs: Vec = + veilid_core::deserialize_opt_json(key_pairs.into_opt_string()).unwrap(); + + DartIsolateWrapper::new(port).spawn_result_json(async move { + let veilid_api = get_veilid_api().await?; + let crypto = veilid_api.crypto()?; + let out = crypto.generate_signatures(&data, &key_pairs, |k, s| { + veilid_core::TypedSignature::new(k.kind, s) + })?; + APIResult::Ok(out) + }); +} + +#[no_mangle] +pub extern "C" fn generate_key_pair(port: i64, kind: u32) { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + DartIsolateWrapper::new(port).spawn_result_json(async move { + let out = veilid_core::Crypto::generate_keypair(kind)?; + APIResult::Ok(out) + }); +} + +#[no_mangle] +pub extern "C" fn crypto_cached_dh(port: i64, kind: u32, key: FfiStr, secret: FfiStr) { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + let key: veilid_core::PublicKey = + veilid_core::deserialize_opt_json(key.into_opt_string()).unwrap(); + let secret: veilid_core::SecretKey = + veilid_core::deserialize_opt_json(secret.into_opt_string()).unwrap(); + + DartIsolateWrapper::new(port).spawn_result_json(async move { + let veilid_api = get_veilid_api().await?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| veilid_core::VeilidAPIError::invalid_argument("crypto_cached_dh", "kind", kind.to_string()))?; + let out = csv.cached_dh(&key, &secret)?; + APIResult::Ok(out) + }); +} + +#[no_mangle] +pub extern "C" fn crypto_compute_dh(port: i64, kind: u32, key: FfiStr, secret: FfiStr) { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + let key: veilid_core::PublicKey = + veilid_core::deserialize_opt_json(key.into_opt_string()).unwrap(); + let secret: veilid_core::SecretKey = + veilid_core::deserialize_opt_json(secret.into_opt_string()).unwrap(); + + DartIsolateWrapper::new(port).spawn_result_json(async move { + let veilid_api = get_veilid_api().await?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| veilid_core::VeilidAPIError::invalid_argument("crypto_compute_dh", "kind", kind.to_string()))?; + let out = csv.compute_dh(&key, &secret)?; + APIResult::Ok(out) + }); +} + + +#[no_mangle] +pub extern "C" fn crypto_random_bytes(port: i64, kind: u32, len: u32) { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + DartIsolateWrapper::new(port).spawn_result(async move { + let veilid_api = get_veilid_api().await?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| veilid_core::VeilidAPIError::invalid_argument("crypto_random_bytes", "kind", kind.to_string()))?; + let out = csv.random_bytes(len); + let out = data_encoding::BASE64URL_NOPAD.encode(&out); + APIResult::Ok(out) + }); +} + +#[no_mangle] +pub extern "C" fn crypto_default_salt_length(port: i64, kind: u32) { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + DartIsolateWrapper::new(port).spawn_result(async move { + let veilid_api = get_veilid_api().await?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| veilid_core::VeilidAPIError::invalid_argument("crypto_default_salt_length", "kind", kind.to_string()))?; + let out = csv.default_salt_length(); + APIResult::Ok(out) + }); +} + +#[no_mangle] +pub extern "C" fn crypto_hash_password(port: i64, kind: u32, password: FfiStr, salt: FfiStr ) { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + let password: Vec = data_encoding::BASE64URL_NOPAD + .decode( + password.into_opt_string() + .unwrap() + .as_bytes(), + ) + .unwrap(); + let salt: Vec = data_encoding::BASE64URL_NOPAD + .decode( + salt.into_opt_string() + .unwrap() + .as_bytes(), + ) + .unwrap(); + + DartIsolateWrapper::new(port).spawn_result(async move { + let veilid_api = get_veilid_api().await?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| veilid_core::VeilidAPIError::invalid_argument("crypto_hash_password", "kind", kind.to_string()))?; + let out = csv.hash_password(&password, &salt)?; + APIResult::Ok(out) + }); +} + +#[no_mangle] +pub extern "C" fn crypto_verify_password(port: i64, kind: u32, password: FfiStr, password_hash: FfiStr ) { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + let password: Vec = data_encoding::BASE64URL_NOPAD + .decode( + password.into_opt_string() + .unwrap() + .as_bytes(), + ) + .unwrap(); + let password_hash = password_hash.into_opt_string().unwrap(); + + DartIsolateWrapper::new(port).spawn_result(async move { + let veilid_api = get_veilid_api().await?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| veilid_core::VeilidAPIError::invalid_argument("crypto_verify_password", "kind", kind.to_string()))?; + let out = csv.verify_password(&password, &password_hash)?; + APIResult::Ok(out) + }); +} + +#[no_mangle] +pub extern "C" fn crypto_derive_shared_secret(port: i64, kind: u32, password: FfiStr, salt: FfiStr ) { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + let password: Vec = data_encoding::BASE64URL_NOPAD + .decode( + password.into_opt_string() + .unwrap() + .as_bytes(), + ) + .unwrap(); + let salt: Vec = data_encoding::BASE64URL_NOPAD + .decode( + salt.into_opt_string() + .unwrap() + .as_bytes(), + ) + .unwrap(); + + DartIsolateWrapper::new(port).spawn_result_json(async move { + let veilid_api = get_veilid_api().await?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| veilid_core::VeilidAPIError::invalid_argument("crypto_derive_shared_secret", "kind", kind.to_string()))?; + let out = csv.derive_shared_secret(&password, &salt)?; + APIResult::Ok(out) + }); +} + +#[no_mangle] +pub extern "C" fn crypto_random_nonce(port: i64, kind: u32) { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + DartIsolateWrapper::new(port).spawn_result_json(async move { + let veilid_api = get_veilid_api().await?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| veilid_core::VeilidAPIError::invalid_argument("crypto_random_nonce", "kind", kind.to_string()))?; + let out = csv.random_nonce(); + APIResult::Ok(out) + }); +} + +#[no_mangle] +pub extern "C" fn crypto_random_shared_secret(port: i64, kind: u32) { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + DartIsolateWrapper::new(port).spawn_result_json(async move { + let veilid_api = get_veilid_api().await?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| veilid_core::VeilidAPIError::invalid_argument("crypto_random_shared_secret", "kind", kind.to_string()))?; + let out = csv.random_shared_secret(); + APIResult::Ok(out) + }); +} + +#[no_mangle] +pub extern "C" fn crypto_generate_key_pair(port: i64, kind: u32) { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + DartIsolateWrapper::new(port).spawn_result_json(async move { + let veilid_api = get_veilid_api().await?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| veilid_core::VeilidAPIError::invalid_argument("crypto_generate_key_pair", "kind", kind.to_string()))?; + let out = csv.generate_keypair(); + APIResult::Ok(out) + }); +} + +#[no_mangle] +pub extern "C" fn crypto_generate_hash(port: i64, kind: u32, data: FfiStr) { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + let data: Vec = data_encoding::BASE64URL_NOPAD + .decode( + data.into_opt_string() + .unwrap() + .as_bytes(), + ) + .unwrap(); + + DartIsolateWrapper::new(port).spawn_result_json(async move { + let veilid_api = get_veilid_api().await?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| veilid_core::VeilidAPIError::invalid_argument("crypto_generate_hash", "kind", kind.to_string()))?; + let out = csv.generate_hash(&data); + APIResult::Ok(out) + }); +} + +#[no_mangle] +pub extern "C" fn crypto_validate_key_pair(port: i64, kind: u32, key: FfiStr, secret: FfiStr) { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + let key: veilid_core::PublicKey = + veilid_core::deserialize_opt_json(key.into_opt_string()).unwrap(); + let secret: veilid_core::SecretKey = + veilid_core::deserialize_opt_json(secret.into_opt_string()).unwrap(); + + DartIsolateWrapper::new(port).spawn_result(async move { + let veilid_api = get_veilid_api().await?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| veilid_core::VeilidAPIError::invalid_argument("crypto_validate_key_pair", "kind", kind.to_string()))?; + let out = csv.validate_keypair(&key, &secret); + APIResult::Ok(out) + }); +} + +#[no_mangle] +pub extern "C" fn crypto_validate_hash(port: i64, kind: u32, data: FfiStr, hash: FfiStr) { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + let data: Vec = data_encoding::BASE64URL_NOPAD + .decode( + data.into_opt_string() + .unwrap() + .as_bytes(), + ) + .unwrap(); + + let hash: veilid_core::HashDigest = + veilid_core::deserialize_opt_json(hash.into_opt_string()).unwrap(); + + DartIsolateWrapper::new(port).spawn_result(async move { + let veilid_api = get_veilid_api().await?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| veilid_core::VeilidAPIError::invalid_argument("crypto_validate_hash", "kind", kind.to_string()))?; + let out = csv.validate_hash(&data, &hash); + APIResult::Ok(out) + }); +} + +#[no_mangle] +pub extern "C" fn crypto_distance(port: i64, kind: u32, key1: FfiStr, key2: FfiStr) { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + let key1: veilid_core::CryptoKey = + veilid_core::deserialize_opt_json(key1.into_opt_string()).unwrap(); + let key2: veilid_core::CryptoKey = + veilid_core::deserialize_opt_json(key2.into_opt_string()).unwrap(); + + DartIsolateWrapper::new(port).spawn_result_json(async move { + let veilid_api = get_veilid_api().await?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| veilid_core::VeilidAPIError::invalid_argument("crypto_distance", "kind", kind.to_string()))?; + let out = csv.distance(&key1, &key2); + APIResult::Ok(out) + }); +} + +#[no_mangle] +pub extern "C" fn crypto_sign(port: i64, kind: u32, key: FfiStr, secret: FfiStr, data: FfiStr) { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + let key: veilid_core::CryptoKey = + veilid_core::deserialize_opt_json(key.into_opt_string()).unwrap(); + let secret: veilid_core::CryptoKey = + veilid_core::deserialize_opt_json(secret.into_opt_string()).unwrap(); + let data: Vec = data_encoding::BASE64URL_NOPAD + .decode( + data.into_opt_string() + .unwrap() + .as_bytes(), + ) + .unwrap(); + + DartIsolateWrapper::new(port).spawn_result_json(async move { + let veilid_api = get_veilid_api().await?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| veilid_core::VeilidAPIError::invalid_argument("crypto_sign", "kind", kind.to_string()))?; + let out = csv.sign(&key, &secret, &data)?; + APIResult::Ok(out) + }); +} + +#[no_mangle] +pub extern "C" fn crypto_verify(port: i64, kind: u32, key: FfiStr, data: FfiStr, signature: FfiStr) { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + let key: veilid_core::CryptoKey = + veilid_core::deserialize_opt_json(key.into_opt_string()).unwrap(); + let data: Vec = data_encoding::BASE64URL_NOPAD + .decode( + data.into_opt_string() + .unwrap() + .as_bytes(), + ) + .unwrap(); + let signature: veilid_core::Signature = + veilid_core::deserialize_opt_json(signature.into_opt_string()).unwrap(); + + DartIsolateWrapper::new(port).spawn_result(async move { + let veilid_api = get_veilid_api().await?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| veilid_core::VeilidAPIError::invalid_argument("crypto_verify", "kind", kind.to_string()))?; + csv.verify(&key, &data, &signature)?; + APIRESULT_VOID + }); +} + +#[no_mangle] +pub extern "C" fn crypto_aead_overhead(port: i64, kind: u32) { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + DartIsolateWrapper::new(port).spawn_result(async move { + let veilid_api = get_veilid_api().await?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| veilid_core::VeilidAPIError::invalid_argument("crypto_aead_overhead", "kind", kind.to_string()))?; + let out = csv.aead_overhead(); + APIResult::Ok(out) + }); +} + +#[no_mangle] +pub extern "C" fn crypto_decrypt_aead(port: i64, kind: u32, body: FfiStr, nonce: FfiStr, shared_secret: FfiStr, associated_data: FfiStr) { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + let body: Vec = data_encoding::BASE64URL_NOPAD + .decode( + body.into_opt_string() + .unwrap() + .as_bytes(), + ) + .unwrap(); + + let nonce: veilid_core::Nonce = + veilid_core::deserialize_opt_json(nonce.into_opt_string()).unwrap(); + + let shared_secret: veilid_core::SharedSecret = + veilid_core::deserialize_opt_json(shared_secret.into_opt_string()).unwrap(); + + let associated_data: Option> = associated_data.into_opt_string().map(|s| data_encoding::BASE64URL_NOPAD.decode(s.as_bytes()).unwrap()); + + DartIsolateWrapper::new(port).spawn_result_json(async move { + let veilid_api = get_veilid_api().await?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| veilid_core::VeilidAPIError::invalid_argument("crypto_decrypt_aead", "kind", kind.to_string()))?; + let out = csv.decrypt_aead(&body, &nonce, &shared_secret, match &associated_data { + Some(ad) => Some(ad.as_slice()), + None => None + })?; + APIResult::Ok(out) + }); +} + +#[no_mangle] +pub extern "C" fn crypto_encrypt_aead(port: i64, kind: u32, body: FfiStr, nonce: FfiStr, shared_secret: FfiStr, associated_data: FfiStr) { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + let body: Vec = data_encoding::BASE64URL_NOPAD + .decode( + body.into_opt_string() + .unwrap() + .as_bytes(), + ) + .unwrap(); + + let nonce: veilid_core::Nonce = + veilid_core::deserialize_opt_json(nonce.into_opt_string()).unwrap(); + + let shared_secret: veilid_core::SharedSecret = + veilid_core::deserialize_opt_json(shared_secret.into_opt_string()).unwrap(); + + let associated_data: Option> = associated_data.into_opt_string().map(|s| data_encoding::BASE64URL_NOPAD.decode(s.as_bytes()).unwrap()); + + DartIsolateWrapper::new(port).spawn_result_json(async move { + let veilid_api = get_veilid_api().await?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| veilid_core::VeilidAPIError::invalid_argument("crypto_encrypt_aead", "kind", kind.to_string()))?; + let out = csv.encrypt_aead(&body, &nonce, &shared_secret, match &associated_data { + Some(ad) => Some(ad.as_slice()), + None => None + })?; + APIResult::Ok(out) + }); +} + + + +#[no_mangle] +pub extern "C" fn crypto_crypt_no_auth(port: i64, kind: u32, body: FfiStr, nonce: FfiStr, shared_secret: FfiStr) { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + let mut body: Vec = data_encoding::BASE64URL_NOPAD + .decode( + body.into_opt_string() + .unwrap() + .as_bytes(), + ) + .unwrap(); + + let nonce: veilid_core::Nonce = + veilid_core::deserialize_opt_json(nonce.into_opt_string()).unwrap(); + + let shared_secret: veilid_core::SharedSecret = + veilid_core::deserialize_opt_json(shared_secret.into_opt_string()).unwrap(); + + DartIsolateWrapper::new(port).spawn_result_json(async move { + let veilid_api = get_veilid_api().await?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| veilid_core::VeilidAPIError::invalid_argument("crypto_crypt_no_auth", "kind", kind.to_string()))?; + csv.crypt_in_place_no_auth(&mut body, &nonce, &shared_secret); + APIResult::Ok(body) + }); +} + +#[no_mangle] +pub extern "C" fn now() -> u64 { + veilid_core::get_aligned_timestamp().as_u64() +} + #[no_mangle] pub extern "C" fn debug(port: i64, command: FfiStr) { let command = command.into_opt_string().unwrap_or_default(); diff --git a/veilid-flutter/rust/src/dart_isolate_wrapper.rs b/veilid-flutter/rust/src/dart_isolate_wrapper.rs index 2c0bb371..3adc7d50 100644 --- a/veilid-flutter/rust/src/dart_isolate_wrapper.rs +++ b/veilid-flutter/rust/src/dart_isolate_wrapper.rs @@ -52,6 +52,17 @@ impl DartIsolateWrapper { }); } + pub fn spawn_result_opt_json(self, future: F) + where + F: Future, E>> + Send + 'static, + T: Serialize + Debug, + E: Serialize + Debug, + { + spawn(async move { + self.result_opt_json(future.await); + }); + } + pub fn result(self, result: Result) -> bool { match result { Ok(v) => self.ok(v), @@ -67,6 +78,16 @@ impl DartIsolateWrapper { Err(e) => self.err_json(e), } } + pub fn result_opt_json( + self, + result: Result, E>, + ) -> bool { + match result { + Ok(Some(v)) => self.ok_json(v), + Ok(None) => self.ok(()), + Err(e) => self.err_json(e), + } + } pub fn ok(self, value: T) -> bool { self.isolate .post(vec![MESSAGE_OK.into_dart(), value.into_dart()]) diff --git a/veilid-flutter/test/veilid_test.dart b/veilid-flutter/test/veilid_test.dart index 659ebb1d..82a21982 100644 --- a/veilid-flutter/test/veilid_test.dart +++ b/veilid-flutter/test/veilid_test.dart @@ -11,6 +11,6 @@ void main() { tearDown(() {}); test('veilidVersionString', () async { - expect(Veilid.instance.veilidVersionString(), '0.1.0'); + expect(api.veilidVersionString(), '0.1.0'); }); } diff --git a/veilid-server/Cargo.toml b/veilid-server/Cargo.toml index 67f85e66..977e55a1 100644 --- a/veilid-server/Cargo.toml +++ b/veilid-server/Cargo.toml @@ -24,11 +24,11 @@ veilid-core = { path = "../veilid-core", default-features = false } tracing = { version = "^0", features = ["log", "attributes"] } tracing-subscriber = { version = "^0", features = ["env-filter"] } tracing-appender = "^0" -tracing-opentelemetry = "^0" +tracing-opentelemetry = "0.18" # Buggy: tracing-error = "^0" -opentelemetry = { version = "^0" } -opentelemetry-otlp = { version = "^0" } -opentelemetry-semantic-conventions = "^0" +opentelemetry = { version = "0.18" } +opentelemetry-otlp = { version = "0.11" } +opentelemetry-semantic-conventions = "0.10" async-std = { version = "^1", features = ["unstable"], optional = true } tokio = { version = "^1", features = ["full", "tracing"], optional = true } console-subscriber = { version = "^0", optional = true } @@ -52,11 +52,12 @@ futures-util = { version = "^0", default_features = false, features = ["alloc"] url = "^2" ctrlc = "^3" lazy_static = "^1" -bugsalot = "^0" +bugsalot = { git = "https://github.com/crioux/bugsalot.git" } flume = { version = "^0", features = ["async"] } rpassword = "^6" hostname = "^0" stop-token = { version = "^0", default-features = false } +sysinfo = { version = "^0.28.4", default-features = false } [target.'cfg(windows)'.dependencies] windows-service = "^0" diff --git a/veilid-server/src/client_api.rs b/veilid-server/src/client_api.rs index fac14b9a..269dce3a 100644 --- a/veilid-server/src/client_api.rs +++ b/veilid-server/src/client_api.rs @@ -19,7 +19,7 @@ use veilid_core::*; // Encoding for ApiResult fn encode_api_result( - result: &Result, + result: &VeilidAPIResult, builder: &mut api_result::Builder, ) { match result { diff --git a/veilid-server/src/cmdline.rs b/veilid-server/src/cmdline.rs index 9f37de05..3d4fe163 100644 --- a/veilid-server/src/cmdline.rs +++ b/veilid-server/src/cmdline.rs @@ -42,6 +42,19 @@ fn do_clap_matches(default_config_path: &OsStr) -> Result Result EyreResult<(Settings, ArgMatches)> { if matches.occurrences_of("delete-table-store") != 0 { settingsrw.core.table_store.delete = true; } + if matches.occurrences_of("password") != 0 { + settingsrw.core.protected_store.device_encryption_key_password = matches.value_of("password").unwrap().to_owned(); + } + if matches.occurrences_of("new-password") != 0 { + settingsrw.core.protected_store.new_device_encryption_key_password = Some(matches.value_of("new-password").unwrap().to_owned()); + } + if matches.occurrences_of("dump-txt-record") != 0 { // Turn off terminal logging so we can be interactive settingsrw.logging.terminal.enabled = false; diff --git a/veilid-server/src/main.rs b/veilid-server/src/main.rs index 19b08b71..63bdf1b3 100644 --- a/veilid-server/src/main.rs +++ b/veilid-server/src/main.rs @@ -45,10 +45,27 @@ fn main() -> EyreResult<()> { // --- Generate DHT Key --- if matches.occurrences_of("generate-key-pair") != 0 { if let Some(ckstr) = matches.get_one::("generate-key-pair") { - let ck: veilid_core::CryptoKind = - veilid_core::FourCC::from_str(ckstr).wrap_err("couldn't parse crypto kind")?; - let tkp = veilid_core::Crypto::generate_keypair(ck).wrap_err("invalid crypto kind")?; - println!("{}", tkp.to_string()); + if ckstr == "" { + let mut tks = veilid_core::TypedKeySet::new(); + let mut tss = veilid_core::TypedSecretSet::new(); + for ck in veilid_core::VALID_CRYPTO_KINDS { + let tkp = veilid_core::Crypto::generate_keypair(ck) + .wrap_err("invalid crypto kind")?; + tks.add(veilid_core::TypedKey::new(tkp.kind, tkp.value.key)); + tss.add(veilid_core::TypedSecret::new(tkp.kind, tkp.value.secret)); + } + println!( + "Public Keys:\n{}\nSecret Keys:\n{}\n", + tks.to_string(), + tss.to_string() + ); + } else { + let ck: veilid_core::CryptoKind = + veilid_core::FourCC::from_str(ckstr).wrap_err("couldn't parse crypto kind")?; + let tkp = + veilid_core::Crypto::generate_keypair(ck).wrap_err("invalid crypto kind")?; + println!("{}", tkp.to_string()); + } return Ok(()); } else { bail!("missing crypto kind"); diff --git a/veilid-server/src/server.rs b/veilid-server/src/server.rs index 45385203..1ffa64f1 100644 --- a/veilid-server/src/server.rs +++ b/veilid-server/src/server.rs @@ -59,8 +59,10 @@ pub async fn run_veilid_server_internal( // Create VeilidCore setup let update_callback = Arc::new(move |change: veilid_core::VeilidUpdate| { - if sender.send(change).is_err() { - error!("error sending veilid update callback"); + if let Err(e) = sender.send(change) { + // Don't log here, as that loops the update callback in some cases and will deadlock + let change = e.into_inner(); + eprintln!("error sending veilid update callback: {:?}", change); } }); let config_callback = settings.get_core_config_callback(); @@ -88,12 +90,8 @@ pub async fn run_veilid_server_internal( // Process all updates let capi2 = capi.clone(); - let mut shutdown_switch = { - let shutdown_switch_locked = SHUTDOWN_SWITCH.lock(); - (*shutdown_switch_locked).as_ref().map(|ss| ss.instance()) - } - .unwrap() - .fuse(); + let update_receiver_shutdown = SingleShotEventual::new(Some(())); + let mut update_receiver_shutdown_instance = update_receiver_shutdown.instance().fuse(); let update_receiver_jh = spawn_local(async move { loop { select! { @@ -107,7 +105,7 @@ pub async fn run_veilid_server_internal( break; } } - _ = shutdown_switch => { + _ = update_receiver_shutdown_instance => { break; } }; @@ -177,6 +175,9 @@ pub async fn run_veilid_server_internal( // Shut down Veilid API to release state change sender veilid_api.shutdown().await; + // Shut down update receiver now that there are no more updates + update_receiver_shutdown.resolve(()).await; + // Wait for update receiver to exit let _ = update_receiver_jh.await; diff --git a/veilid-server/src/settings.rs b/veilid-server/src/settings.rs index 3867edbb..ab7e36ef 100644 --- a/veilid-server/src/settings.rs +++ b/veilid-server/src/settings.rs @@ -6,12 +6,14 @@ use serde_derive::*; use std::ffi::OsStr; use std::net::SocketAddr; use std::path::{Path, PathBuf}; +use std::sync::Arc; +use sysinfo::{DiskExt, SystemExt}; use url::Url; use veilid_core::tools::*; use veilid_core::*; pub fn load_default_config() -> EyreResult { - let default_config = String::from( + let mut default_config = String::from( r#"--- daemon: enabled: false @@ -46,8 +48,10 @@ core: protected_store: allow_insecure_fallback: true always_use_insecure_storage: true - insecure_fallback_directory: '%INSECURE_FALLBACK_DIRECTORY%' + directory: '%DIRECTORY%' delete: false + device_encryption_key_password: '%DEVICE_ENCRYPTION_KEY_PASSWORD%' + new_device_encryption_key_password: %NEW_DEVICE_ENCRYPTION_KEY_PASSWORD% table_store: directory: '%TABLE_STORE_DIRECTORY%' delete: false @@ -78,23 +82,29 @@ core: queue_size: 1024 max_timestamp_behind_ms: 10000 max_timestamp_ahead_ms: 10000 - timeout_ms: 10000 + timeout_ms: 5000 max_route_hop_count: 4 default_route_hop_count: 1 dht: - resolve_node_timeout: - resolve_node_count: 20 - resolve_node_fanout: 3 max_find_node_count: 20 - get_value_timeout: - get_value_count: 20 - get_value_fanout: 3 - set_value_timeout: - set_value_count: 20 - set_value_fanout: 5 + resolve_node_timeout_ms: 10000 + resolve_node_count: 1 + resolve_node_fanout: 4 + get_value_timeout_ms: 10000 + get_value_count: 3 + get_value_fanout: 4 + set_value_timeout_ms: 10000 + set_value_count: 5 + set_value_fanout: 4 min_peer_count: 20 min_peer_refresh_time_ms: 2000 validate_dial_info_receipt_time_ms: 2000 + local_subkey_cache_size: 128 + local_max_subkey_cache_memory_mb: 256 + remote_subkey_cache_size: 1024 + remote_max_records: 65536 + remote_max_subkey_cache_memory_mb: %REMOTE_MAX_SUBKEY_CACHE_MEMORY_MB% + remote_max_storage_space_mb: 0 upnp: true detect_address_changes: true restricted_nat_retries: 0 @@ -150,8 +160,8 @@ core: &Settings::get_default_block_store_path().to_string_lossy(), ) .replace( - "%INSECURE_FALLBACK_DIRECTORY%", - &Settings::get_default_protected_store_insecure_fallback_directory().to_string_lossy(), + "%DIRECTORY%", + &Settings::get_default_protected_store_directory().to_string_lossy(), ) .replace( "%CERTIFICATE_PATH%", @@ -164,7 +174,35 @@ core: &Settings::get_default_private_key_directory() .join("server.key") .to_string_lossy(), + ) + .replace( + "%REMOTE_MAX_SUBKEY_CACHE_MEMORY_MB%", + &Settings::get_default_remote_max_subkey_cache_memory_mb().to_string(), ); + + let dek_password = if let Some(dek_password) = std::env::var_os("DEK_PASSWORD") { + dek_password + .to_str() + .ok_or_else(|| eyre!("DEK_PASSWORD is not valid unicode"))? + .to_owned() + } else { + "".to_owned() + }; + default_config = default_config.replace("%DEVICE_ENCRYPTION_KEY_PASSWORD%", &dek_password); + + let new_dek_password = if let Some(new_dek_password) = std::env::var_os("NEW_DEK_PASSWORD") { + format!( + "'{}'", + new_dek_password + .to_str() + .ok_or_else(|| eyre!("NEW_DEK_PASSWORD is not valid unicode"))? + ) + } else { + "null".to_owned() + }; + default_config = + default_config.replace("%NEW_DEVICE_ENCRYPTION_KEY_PASSWORD%", &new_dek_password); + config::Config::builder() .add_source(config::File::from_str( &default_config, @@ -499,19 +537,25 @@ pub struct Rpc { #[derive(Debug, Deserialize, Serialize)] pub struct Dht { - pub resolve_node_timeout_ms: Option, + pub max_find_node_count: u32, + pub resolve_node_timeout_ms: u32, pub resolve_node_count: u32, pub resolve_node_fanout: u32, - pub max_find_node_count: u32, - pub get_value_timeout_ms: Option, + pub get_value_timeout_ms: u32, pub get_value_count: u32, pub get_value_fanout: u32, - pub set_value_timeout_ms: Option, + pub set_value_timeout_ms: u32, pub set_value_count: u32, pub set_value_fanout: u32, pub min_peer_count: u32, pub min_peer_refresh_time_ms: u32, pub validate_dial_info_receipt_time_ms: u32, + pub local_subkey_cache_size: u32, + pub local_max_subkey_cache_memory_mb: u32, + pub remote_subkey_cache_size: u32, + pub remote_max_records: u32, + pub remote_max_subkey_cache_memory_mb: u32, + pub remote_max_storage_space_mb: u32, } #[derive(Debug, Deserialize, Serialize)] @@ -569,8 +613,10 @@ pub struct BlockStore { pub struct ProtectedStore { pub allow_insecure_fallback: bool, pub always_use_insecure_storage: bool, - pub insecure_fallback_directory: PathBuf, + pub directory: PathBuf, pub delete: bool, + pub device_encryption_key_password: String, + pub new_device_encryption_key_password: Option, } #[derive(Debug, Deserialize, Serialize)] @@ -621,7 +667,13 @@ impl Settings { } // Generate config - let inner: SettingsInner = cfg.try_deserialize()?; + let mut inner: SettingsInner = cfg.try_deserialize()?; + + // Fill in missing defaults + if inner.core.network.dht.remote_max_storage_space_mb == 0 { + inner.core.network.dht.remote_max_storage_space_mb = + Self::get_default_remote_max_storage_space_mb(&inner); + } // Ok(Self { @@ -772,7 +824,7 @@ impl Settings { bs_path } - pub fn get_default_protected_store_insecure_fallback_directory() -> PathBuf { + pub fn get_default_protected_store_directory() -> PathBuf { #[cfg(unix)] { let globalpath = PathBuf::from("/var/db/veilid-server/protected_store"); @@ -833,6 +885,36 @@ impl Settings { pk_path } + pub fn get_default_remote_max_subkey_cache_memory_mb() -> u32 { + let sys = sysinfo::System::new_with_specifics(sysinfo::RefreshKind::new().with_memory()); + ((sys.free_memory() / (1024u64 * 1024u64)) / 16) as u32 + } + + pub fn get_default_remote_max_storage_space_mb(inner: &SettingsInner) -> u32 { + let mut sys = sysinfo::System::new_with_specifics(sysinfo::RefreshKind::new().with_disks()); + let dht_storage_path = inner.core.table_store.directory.clone(); + // Sort longer mount point paths first since we want the mount point closest to our table store directory + sys.sort_disks_by(|a, b| { + b.mount_point() + .to_string_lossy() + .len() + .cmp(&a.mount_point().to_string_lossy().len()) + }); + for disk in sys.disks() { + if dht_storage_path.starts_with(disk.mount_point()) { + let available_mb = disk.available_space() / 1_000_000u64; + if available_mb > 40_000 { + // Default to 10GB if more than 40GB is available + return 10_000; + } + // Default to 1/4 of the available space, if less than 40GB is available + return available_mb as u32; + } + } + // If we can't figure out our storage path go with 1GB of space and pray + 1_000 + } + pub fn set(&self, key: &str, value: &str) -> EyreResult<()> { let mut inner = self.inner.write(); @@ -882,11 +964,19 @@ impl Settings { inner.core.protected_store.always_use_insecure_storage, value ); + set_config_value!(inner.core.protected_store.directory, value); + set_config_value!(inner.core.protected_store.delete, value); set_config_value!( - inner.core.protected_store.insecure_fallback_directory, + inner.core.protected_store.device_encryption_key_password, + value + ); + set_config_value!( + inner + .core + .protected_store + .new_device_encryption_key_password, value ); - set_config_value!(inner.core.protected_store.delete, value); set_config_value!(inner.core.table_store.directory, value); set_config_value!(inner.core.table_store.delete, value); set_config_value!(inner.core.block_store.directory, value); @@ -921,10 +1011,10 @@ impl Settings { set_config_value!(inner.core.network.rpc.timeout_ms, value); set_config_value!(inner.core.network.rpc.max_route_hop_count, value); set_config_value!(inner.core.network.rpc.default_route_hop_count, value); + set_config_value!(inner.core.network.dht.max_find_node_count, value); set_config_value!(inner.core.network.dht.resolve_node_timeout_ms, value); set_config_value!(inner.core.network.dht.resolve_node_count, value); set_config_value!(inner.core.network.dht.resolve_node_fanout, value); - set_config_value!(inner.core.network.dht.max_find_node_count, value); set_config_value!(inner.core.network.dht.get_value_timeout_ms, value); set_config_value!(inner.core.network.dht.get_value_count, value); set_config_value!(inner.core.network.dht.get_value_fanout, value); @@ -937,6 +1027,18 @@ impl Settings { inner.core.network.dht.validate_dial_info_receipt_time_ms, value ); + set_config_value!(inner.core.network.dht.local_subkey_cache_size, value); + set_config_value!( + inner.core.network.dht.local_max_subkey_cache_memory_mb, + value + ); + set_config_value!(inner.core.network.dht.remote_subkey_cache_size, value); + set_config_value!(inner.core.network.dht.remote_max_records, value); + set_config_value!( + inner.core.network.dht.remote_max_subkey_cache_memory_mb, + value + ); + set_config_value!(inner.core.network.dht.remote_max_storage_space_mb, value); set_config_value!(inner.core.network.upnp, value); set_config_value!(inner.core.network.detect_address_changes, value); set_config_value!(inner.core.network.restricted_nat_retries, value); @@ -1000,15 +1102,29 @@ impl Settings { "protected_store.always_use_insecure_storage" => Ok(Box::new( inner.core.protected_store.always_use_insecure_storage, )), - "protected_store.insecure_fallback_directory" => Ok(Box::new( + "protected_store.directory" => Ok(Box::new( inner .core .protected_store - .insecure_fallback_directory + .directory .to_string_lossy() .to_string(), )), "protected_store.delete" => Ok(Box::new(inner.core.protected_store.delete)), + "protected_store.device_encryption_key_password" => Ok(Box::new( + inner + .core + .protected_store + .device_encryption_key_password + .clone(), + )), + "protected_store.new_device_encryption_key_password" => Ok(Box::new( + inner + .core + .protected_store + .new_device_encryption_key_password + .clone(), + )), "table_store.directory" => Ok(Box::new( inner @@ -1108,6 +1224,9 @@ impl Settings { "network.rpc.default_route_hop_count" => { Ok(Box::new(inner.core.network.rpc.default_route_hop_count)) } + "network.dht.max_find_node_count" => { + Ok(Box::new(inner.core.network.dht.max_find_node_count)) + } "network.dht.resolve_node_timeout_ms" => { Ok(Box::new(inner.core.network.dht.resolve_node_timeout_ms)) } @@ -1117,9 +1236,6 @@ impl Settings { "network.dht.resolve_node_fanout" => { Ok(Box::new(inner.core.network.dht.resolve_node_fanout)) } - "network.dht.max_find_node_count" => { - Ok(Box::new(inner.core.network.dht.max_find_node_count)) - } "network.dht.get_value_timeout_ms" => { Ok(Box::new(inner.core.network.dht.get_value_timeout_ms)) } @@ -1145,6 +1261,25 @@ impl Settings { "network.dht.validate_dial_info_receipt_time_ms" => Ok(Box::new( inner.core.network.dht.validate_dial_info_receipt_time_ms, )), + "network.dht.local_subkey_cache_size" => { + Ok(Box::new(inner.core.network.dht.local_subkey_cache_size)) + } + "network.dht.local_max_subkey_cache_memory_mb" => Ok(Box::new( + inner.core.network.dht.local_max_subkey_cache_memory_mb, + )), + "network.dht.remote_subkey_cache_size" => { + Ok(Box::new(inner.core.network.dht.remote_subkey_cache_size)) + } + "network.dht.remote_max_records" => { + Ok(Box::new(inner.core.network.dht.remote_max_records)) + } + "network.dht.remote_max_subkey_cache_memory_mb" => Ok(Box::new( + inner.core.network.dht.remote_max_subkey_cache_memory_mb, + )), + "network.dht.remote_max_storage_space_mb" => { + Ok(Box::new(inner.core.network.dht.remote_max_storage_space_mb)) + } + "network.upnp" => Ok(Box::new(inner.core.network.upnp)), "network.detect_address_changes" => { Ok(Box::new(inner.core.network.detect_address_changes)) @@ -1420,10 +1555,15 @@ mod tests { assert_eq!(s.core.protected_store.allow_insecure_fallback, true); assert_eq!(s.core.protected_store.always_use_insecure_storage, true); assert_eq!( - s.core.protected_store.insecure_fallback_directory, - Settings::get_default_protected_store_insecure_fallback_directory() + s.core.protected_store.directory, + Settings::get_default_protected_store_directory() ); assert_eq!(s.core.protected_store.delete, false); + assert_eq!(s.core.protected_store.device_encryption_key_password, ""); + assert_eq!( + s.core.protected_store.new_device_encryption_key_password, + None + ); assert_eq!(s.core.network.connection_initial_timeout_ms, 2_000u32); assert_eq!(s.core.network.connection_inactivity_timeout_ms, 60_000u32); @@ -1446,20 +1586,20 @@ mod tests { assert_eq!(s.core.network.rpc.queue_size, 1024); assert_eq!(s.core.network.rpc.max_timestamp_behind_ms, Some(10_000u32)); assert_eq!(s.core.network.rpc.max_timestamp_ahead_ms, Some(10_000u32)); - assert_eq!(s.core.network.rpc.timeout_ms, 10_000u32); + assert_eq!(s.core.network.rpc.timeout_ms, 5_000u32); assert_eq!(s.core.network.rpc.max_route_hop_count, 4); assert_eq!(s.core.network.rpc.default_route_hop_count, 1); // - assert_eq!(s.core.network.dht.resolve_node_timeout_ms, None); - assert_eq!(s.core.network.dht.resolve_node_count, 20u32); - assert_eq!(s.core.network.dht.resolve_node_fanout, 3u32); assert_eq!(s.core.network.dht.max_find_node_count, 20u32); - assert_eq!(s.core.network.dht.get_value_timeout_ms, None); - assert_eq!(s.core.network.dht.get_value_count, 20u32); - assert_eq!(s.core.network.dht.get_value_fanout, 3u32); - assert_eq!(s.core.network.dht.set_value_timeout_ms, None); - assert_eq!(s.core.network.dht.set_value_count, 20u32); - assert_eq!(s.core.network.dht.set_value_fanout, 5u32); + assert_eq!(s.core.network.dht.resolve_node_timeout_ms, 10_000u32); + assert_eq!(s.core.network.dht.resolve_node_count, 1u32); + assert_eq!(s.core.network.dht.resolve_node_fanout, 4u32); + assert_eq!(s.core.network.dht.get_value_timeout_ms, 10_000u32); + assert_eq!(s.core.network.dht.get_value_count, 3u32); + assert_eq!(s.core.network.dht.get_value_fanout, 4u32); + assert_eq!(s.core.network.dht.set_value_timeout_ms, 10_000u32); + assert_eq!(s.core.network.dht.set_value_count, 5u32); + assert_eq!(s.core.network.dht.set_value_fanout, 4u32); assert_eq!(s.core.network.dht.min_peer_count, 20u32); assert_eq!(s.core.network.dht.min_peer_refresh_time_ms, 2_000u32); assert_eq!( diff --git a/veilid-server/src/unix.rs b/veilid-server/src/unix.rs index fe66e943..1d2aa389 100644 --- a/veilid-server/src/unix.rs +++ b/veilid-server/src/unix.rs @@ -7,7 +7,7 @@ use clap::ArgMatches; use futures_util::StreamExt; use signal_hook::consts::signal::*; use signal_hook_async_std::Signals; -use std::io::Read; +//use std::io::Read; use tracing::*; #[instrument(skip(signals))] @@ -34,23 +34,23 @@ pub fn run_daemon(settings: Settings, _matches: ArgMatches) -> EyreResult<()> { let s = settings.read(); if let Some(pid_file) = s.daemon.pid_file.clone() { daemon = daemon.pid_file(pid_file.clone()); //.chown_pid_file(true); - daemon = daemon.exit_action(move || { - // wait for pid file to exist before exiting parent - let pid_path = std::path::Path::new(&pid_file); - loop { - if let Ok(mut f) = std::fs::File::open(pid_path) { - let mut s = String::new(); - if f.read_to_string(&mut s).is_ok() - && !s.is_empty() - && s.parse::().is_ok() - { - println!("pidfile found"); - break; - } - } - std::thread::sleep(std::time::Duration::from_millis(100)); - } - }) + // daemon = daemon.exit_action(move || { + // // wait for pid file to exist before exiting parent + // let pid_path = std::path::Path::new(&pid_file); + // loop { + // if let Ok(mut f) = std::fs::File::open(pid_path) { + // let mut s = String::new(); + // if f.read_to_string(&mut s).is_ok() + // && !s.is_empty() + // && s.parse::().is_ok() + // { + // println!("pidfile found"); + // break; + // } + // } + // std::thread::sleep(std::time::Duration::from_millis(100)); + // } + // }) } if let Some(chroot) = &s.daemon.chroot { daemon = daemon.chroot(chroot); diff --git a/veilid-tools/Cargo.toml b/veilid-tools/Cargo.toml index e7f976b6..7e3f3d12 100644 --- a/veilid-tools/Cargo.toml +++ b/veilid-tools/Cargo.toml @@ -11,8 +11,9 @@ crate-type = [ "cdylib", "staticlib", "rlib" ] [features] default = [] -rt-async-std = [ "async-std", "async_executors/async_std", ] -rt-tokio = [ "tokio", "tokio-util", "async_executors/tokio_tp", "async_executors/tokio_io", "async_executors/tokio_timer", ] +rt-async-std = [ "async-std", "async_executors/async_std" ] +rt-tokio = [ "tokio", "tokio-util", "async_executors/tokio_tp", "async_executors/tokio_io", "async_executors/tokio_timer" ] +rt-wasm-bindgen = [ "async_executors/bindgen", "async_executors/timer"] veilid_tools_android_tests = [ "dep:paranoid-android" ] veilid_tools_ios_tests = [ "dep:oslog", "dep:tracing-oslog" ] @@ -52,7 +53,7 @@ nix = "^0" wasm-bindgen = "^0" js-sys = "^0" wasm-bindgen-futures = "^0" -async_executors = { version = "^0", default-features = false, features = [ "bindgen", "timer" ]} +async_executors = { version = "^0", default-features = false} async-lock = "^2" send_wrapper = { version = "^0.6", features = ["futures"] } diff --git a/veilid-tools/run_tests.sh b/veilid-tools/run_tests.sh index de450f39..816b36cb 100755 --- a/veilid-tools/run_tests.sh +++ b/veilid-tools/run_tests.sh @@ -3,7 +3,7 @@ SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" pushd $SCRIPTDIR 2>/dev/null if [[ "$1" == "wasm" ]]; then - WASM_BINDGEN_TEST_TIMEOUT=120 wasm-pack test --firefox --headless + WASM_BINDGEN_TEST_TIMEOUT=120 wasm-pack test --firefox --headless --features=rt-wasm-bindgen elif [[ "$1" == "ios" ]]; then SYMROOT=/tmp/testout APPNAME=veilidtools-tests diff --git a/veilid-tools/src/log_thru.rs b/veilid-tools/src/log_thru.rs index 587a8393..89ef8233 100644 --- a/veilid-tools/src/log_thru.rs +++ b/veilid-tools/src/log_thru.rs @@ -1,6 +1,6 @@ // LogThru // Pass errors through and log them simultaneously via map_err() -// Also contains common log facilities (net, rpc, rtab, pstore, crypto, etc ) +// Also contains common log facilities (net, rpc, rtab, stor, pstore, crypto, etc ) use super::*; @@ -123,6 +123,42 @@ macro_rules! log_rtab { } } +#[macro_export] +macro_rules! log_stor { + (error $text:expr) => { error!( + target: "stor", + "{}", + $text, + )}; + (error $fmt:literal, $($arg:expr),+) => { + error!(target:"stor", $fmt, $($arg),+); + }; + (warn $text:expr) => { warn!( + target: "stor", + "{}", + $text, + )}; + (warn $fmt:literal, $($arg:expr),+) => { + warn!(target:"stor", $fmt, $($arg),+); + }; + (debug $text:expr) => { debug!( + target: "stor", + "{}", + $text, + )}; + (debug $fmt:literal, $($arg:expr),+) => { + debug!(target:"stor", $fmt, $($arg),+); + }; + ($text:expr) => {trace!( + target: "stor", + "{}", + $text, + )}; + ($fmt:literal, $($arg:expr),+) => { + trace!(target:"stor", $fmt, $($arg),+); + } +} + #[macro_export] macro_rules! log_pstore { (error $text:expr) => { error!( @@ -216,6 +252,18 @@ macro_rules! logthru_rtab { } } #[macro_export] +macro_rules! logthru_stor { + ($($level:ident)?) => { + logthru!($($level)? "stor") + }; + ($($level:ident)? $text:literal) => { + logthru!($($level)? "stor", $text) + }; + ($($level:ident)? $fmt:literal, $($arg:expr),+) => { + logthru!($($level)? "stor", $fmt, $($arg),+) + } +} +#[macro_export] macro_rules! logthru_pstore { ($($level:ident)?) => { logthru!($($level)? "pstore") diff --git a/veilid-tools/src/random.rs b/veilid-tools/src/random.rs index 9c3d9fa7..0ac52be3 100644 --- a/veilid-tools/src/random.rs +++ b/veilid-tools/src/random.rs @@ -16,13 +16,12 @@ impl RngCore for VeilidRng { } fn fill_bytes(&mut self, dest: &mut [u8]) { - if let Err(e) = self.try_fill_bytes(dest) { - panic!("Error: {}", e); - } + random_bytes(dest); } fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), rand::Error> { - random_bytes(dest).map_err(rand::Error::new) + random_bytes(dest); + Ok(()) } } @@ -30,7 +29,7 @@ cfg_if! { if #[cfg(target_arch = "wasm32")] { use js_sys::Math; - pub fn random_bytes(dest: &mut [u8]) -> EyreResult<()> { + pub fn random_bytes(dest: &mut [u8]) { let len = dest.len(); let u32len = len / 4; let remlen = len % 4; @@ -49,8 +48,6 @@ cfg_if! { dest[u32len * 4 + n] = ((r >> (n * 8)) & 0xFF) as u8; } } - - Ok(()) } pub fn get_random_u32() -> u32 { @@ -65,9 +62,9 @@ cfg_if! { } else { - pub fn random_bytes(dest: &mut [u8]) -> EyreResult<()> { + pub fn random_bytes(dest: &mut [u8]) { let mut rng = rand::thread_rng(); - rng.try_fill_bytes(dest).wrap_err("failed to fill bytes") + rng.fill_bytes(dest); } pub fn get_random_u32() -> u32 { diff --git a/veilid-tools/src/tools.rs b/veilid-tools/src/tools.rs index a58a1fd6..4aba1f00 100644 --- a/veilid-tools/src/tools.rs +++ b/veilid-tools/src/tools.rs @@ -32,6 +32,37 @@ macro_rules! bail_io_error_other { }; } +cfg_if::cfg_if! { + if #[cfg(feature="rt-tokio")] { + #[macro_export] + macro_rules! asyncmutex_try_lock { + ($x:expr) => { + $x.try_lock().ok() + }; + } + + #[macro_export] + macro_rules! asyncmutex_lock_arc { + ($x:expr) => { + $x.clone().lock_owned().await + }; + } + } else { + #[macro_export] + macro_rules! asyncmutex_try_lock { + ($x:expr) => { + $x.try_lock() + }; + } + #[macro_export] + macro_rules! asyncmutex_lock_arc { + ($x:expr) => { + $x.lock_arc().await + }; + } + } +} + ////////////////////////////////////////////////////////////////////////////////////////////////////////////// pub fn system_boxed<'a, Out>( @@ -99,6 +130,10 @@ pub fn ms_to_us(ms: u32) -> u64 { (ms as u64) * 1000u64 } +pub fn us_to_ms(us: u64) -> EyreResult { + u32::try_from(us / 1000u64).wrap_err("could not convert microseconds") +} + // Calculate retry attempt with logarhythmic falloff pub fn retry_falloff_log( last_us: u64, diff --git a/veilid-wasm/Cargo.toml b/veilid-wasm/Cargo.toml index b253fbf1..dfe6820c 100644 --- a/veilid-wasm/Cargo.toml +++ b/veilid-wasm/Cargo.toml @@ -10,8 +10,8 @@ crate-type = ["cdylib", "rlib"] [features] -default = [ "veilid-core/rt-tokio", "veilid-core/default" ] -crypto-test = [ "veilid-core/rt-tokio", "veilid-core/crypto-test"] +default = [ "veilid-core/rt-wasm-bindgen", "veilid-core/default" ] +crypto-test = [ "veilid-core/rt-wasm-bindgen", "veilid-core/crypto-test"] [dependencies] veilid-core = { path = "../veilid-core", default-features = false } diff --git a/veilid-wasm/src/lib.rs b/veilid-wasm/src/lib.rs index 26da17bf..a43f7ce5 100644 --- a/veilid-wasm/src/lib.rs +++ b/veilid-wasm/src/lib.rs @@ -59,6 +59,12 @@ fn take_veilid_api() -> Result(val: T) -> JsValue { JsValue::from_str(&serialize_json(val)) } +pub fn to_opt_json(val: Option) -> JsValue { + match val { + Some(v) => JsValue::from_str(&serialize_json(v)), + None => JsValue::UNDEFINED, + } +} pub fn to_jsvalue(val: T) -> JsValue where @@ -94,7 +100,7 @@ fn parse_target(s: String) -> APIResult { } // Is this a node id? - if let Ok(nid) = veilid_core::PublicKey::from_str(&s) { + if let Ok(nid) = veilid_core::TypedKey::from_str(&s) { return Ok(veilid_core::Target::NodeId(nid)); } @@ -113,6 +119,14 @@ where future_to_promise(future.map(|res| res.map(|v| to_json(v)).map_err(|e| to_json(e)))) } +pub fn wrap_api_future_opt_json(future: F) -> Promise +where + F: Future>> + 'static, + T: Serialize + Debug + 'static, +{ + future_to_promise(future.map(|res| res.map(|v| to_opt_json(v)).map_err(|e| to_json(e)))) +} + pub fn wrap_api_future_plain(future: F) -> Promise where F: Future> + 'static, @@ -375,9 +389,6 @@ pub fn routing_context_app_call(id: u32, target: String, request: String) -> Pro .decode(request.as_bytes()) .unwrap(); wrap_api_future_plain(async move { - let veilid_api = get_veilid_api()?; - let routing_table = veilid_api.routing_table()?; - let routing_context = { let rc = (*ROUTING_CONTEXTS).borrow(); let Some(routing_context) = rc.get(&id) else { @@ -399,9 +410,6 @@ pub fn routing_context_app_message(id: u32, target: String, message: String) -> .decode(message.as_bytes()) .unwrap(); wrap_api_future_void(async move { - let veilid_api = get_veilid_api()?; - let routing_table = veilid_api.routing_table()?; - let routing_context = { let rc = (*ROUTING_CONTEXTS).borrow(); let Some(routing_context) = rc.get(&id) else { @@ -416,6 +424,167 @@ pub fn routing_context_app_message(id: u32, target: String, message: String) -> }) } +#[wasm_bindgen()] +pub fn routing_context_create_dht_record(id: u32, kind: u32, schema: String) -> Promise { + let crypto_kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + let schema: veilid_core::DHTSchema = veilid_core::deserialize_json(&schema).unwrap(); + + wrap_api_future_json(async move { + let routing_context = { + let rc = (*ROUTING_CONTEXTS).borrow(); + let Some(routing_context) = rc.get(&id) else { + return APIResult::Err(veilid_core::VeilidAPIError::invalid_argument("routing_context_create_dht_record", "id", id)); + }; + routing_context.clone() + }; + + let dht_record_descriptor = routing_context + .create_dht_record(crypto_kind, schema) + .await?; + APIResult::Ok(dht_record_descriptor) + }) +} + +#[wasm_bindgen()] +pub fn routing_context_open_dht_record(id: u32, key: String, writer: Option) -> Promise { + let key: veilid_core::TypedKey = veilid_core::deserialize_json(&key).unwrap(); + let writer: Option = + writer.map(|s| veilid_core::deserialize_json(&s).unwrap()); + wrap_api_future_json(async move { + let routing_context = { + let rc = (*ROUTING_CONTEXTS).borrow(); + let Some(routing_context) = rc.get(&id) else { + return APIResult::Err(veilid_core::VeilidAPIError::invalid_argument("routing_context_open_dht_record", "id", id)); + }; + routing_context.clone() + }; + let dht_record_descriptor = routing_context.open_dht_record(key, writer).await?; + APIResult::Ok(dht_record_descriptor) + }) +} + +#[wasm_bindgen()] +pub fn routing_context_close_dht_record(id: u32, key: String) -> Promise { + let key: veilid_core::TypedKey = veilid_core::deserialize_json(&key).unwrap(); + wrap_api_future_void(async move { + let routing_context = { + let rc = (*ROUTING_CONTEXTS).borrow(); + let Some(routing_context) = rc.get(&id) else { + return APIResult::Err(veilid_core::VeilidAPIError::invalid_argument("routing_context_close_dht_record", "id", id)); + }; + routing_context.clone() + }; + routing_context.close_dht_record(key).await?; + APIRESULT_UNDEFINED + }) +} + +#[wasm_bindgen()] +pub fn routing_context_delete_dht_record(id: u32, key: String) -> Promise { + let key: veilid_core::TypedKey = veilid_core::deserialize_json(&key).unwrap(); + wrap_api_future_void(async move { + let routing_context = { + let rc = (*ROUTING_CONTEXTS).borrow(); + let Some(routing_context) = rc.get(&id) else { + return APIResult::Err(veilid_core::VeilidAPIError::invalid_argument("routing_context_delete_dht_record", "id", id)); + }; + routing_context.clone() + }; + routing_context.delete_dht_record(key).await?; + APIRESULT_UNDEFINED + }) +} + +#[wasm_bindgen()] +pub fn routing_context_get_dht_value( + id: u32, + key: String, + subkey: u32, + force_refresh: bool, +) -> Promise { + let key: veilid_core::TypedKey = veilid_core::deserialize_json(&key).unwrap(); + wrap_api_future_opt_json(async move { + let routing_context = { + let rc = (*ROUTING_CONTEXTS).borrow(); + let Some(routing_context) = rc.get(&id) else { + return APIResult::Err(veilid_core::VeilidAPIError::invalid_argument("routing_context_get_dht_value", "id", id)); + }; + routing_context.clone() + }; + let res = routing_context + .get_dht_value(key, subkey, force_refresh) + .await?; + APIResult::Ok(res) + }) +} + +#[wasm_bindgen()] +pub fn routing_context_set_dht_value(id: u32, key: String, subkey: u32, data: String) -> Promise { + let key: veilid_core::TypedKey = veilid_core::deserialize_json(&key).unwrap(); + let data: Vec = data_encoding::BASE64URL_NOPAD + .decode(&data.as_bytes()) + .unwrap(); + + wrap_api_future_opt_json(async move { + let routing_context = { + let rc = (*ROUTING_CONTEXTS).borrow(); + let Some(routing_context) = rc.get(&id) else { + return APIResult::Err(veilid_core::VeilidAPIError::invalid_argument("routing_context_set_dht_value", "id", id)); + }; + routing_context.clone() + }; + let res = routing_context.set_dht_value(key, subkey, data).await?; + APIResult::Ok(res) + }) +} + +#[wasm_bindgen()] +pub fn routing_context_watch_dht_values( + id: u32, + key: String, + subkeys: String, + expiration: String, + count: u32, +) -> Promise { + let key: veilid_core::TypedKey = veilid_core::deserialize_json(&key).unwrap(); + let subkeys: veilid_core::ValueSubkeyRangeSet = + veilid_core::deserialize_json(&subkeys).unwrap(); + let expiration = veilid_core::Timestamp::from_str(&expiration).unwrap(); + + wrap_api_future_plain(async move { + let routing_context = { + let rc = (*ROUTING_CONTEXTS).borrow(); + let Some(routing_context) = rc.get(&id) else { + return APIResult::Err(veilid_core::VeilidAPIError::invalid_argument("routing_context_watch_dht_values", "id", id)); + }; + routing_context.clone() + }; + let res = routing_context + .watch_dht_values(key, subkeys, expiration, count) + .await?; + APIResult::Ok(res.to_string()) + }) +} + +#[wasm_bindgen()] +pub fn routing_context_cancel_dht_watch(id: u32, key: String, subkeys: String) -> Promise { + let key: veilid_core::TypedKey = veilid_core::deserialize_json(&key).unwrap(); + let subkeys: veilid_core::ValueSubkeyRangeSet = + veilid_core::deserialize_json(&subkeys).unwrap(); + + wrap_api_future_plain(async move { + let routing_context = { + let rc = (*ROUTING_CONTEXTS).borrow(); + let Some(routing_context) = rc.get(&id) else { + return APIResult::Err(veilid_core::VeilidAPIError::invalid_argument("routing_context_cancel_dht_watch", "id", id)); + }; + routing_context.clone() + }; + let res = routing_context.cancel_dht_watch(key, subkeys).await?; + APIResult::Ok(res) + }) +} + #[wasm_bindgen()] pub fn new_private_route() -> Promise { wrap_api_future_json(async move { @@ -548,20 +717,23 @@ pub fn table_db_get_column_count(id: u32) -> u32 { } #[wasm_bindgen()] -pub fn table_db_get_keys(id: u32, col: u32) -> Option { - let table_dbs = (*TABLE_DBS).borrow(); - let Some(table_db) = table_dbs.get(&id) else { - return None; - }; - let Ok(keys) = table_db.clone().get_keys(col) else { - return None; - }; - let keys: Vec = keys - .into_iter() - .map(|k| data_encoding::BASE64URL_NOPAD.encode(&k)) - .collect(); - let out = veilid_core::serialize_json(keys); - Some(out) +pub fn table_db_get_keys(id: u32, col: u32) -> Promise { + wrap_api_future_json(async move { + let table_db = { + let table_dbs = (*TABLE_DBS).borrow(); + let Some(table_db) = table_dbs.get(&id) else { + return APIResult::Err(veilid_core::VeilidAPIError::invalid_argument("table_db_store", "id", id)); + }; + table_db.clone() + }; + + let keys = table_db.clone().get_keys(col).await?; + let out: Vec = keys + .into_iter() + .map(|k| data_encoding::BASE64URL_NOPAD.encode(&k)) + .collect(); + APIResult::Ok(out) + }) } fn add_table_db_transaction(tdbt: veilid_core::TableDBTransaction) -> u32 { @@ -605,9 +777,7 @@ pub fn table_db_transaction_commit(id: u32) -> Promise { tdbt.clone() }; - tdbt.commit() - .await - .map_err(veilid_core::VeilidAPIError::generic)?; + tdbt.commit().await?; APIRESULT_UNDEFINED }) } @@ -686,10 +856,7 @@ pub fn table_db_store(id: u32, col: u32, key: String, value: String) -> Promise table_db.clone() }; - table_db - .store(col, &key, &value) - .await - .map_err(veilid_core::VeilidAPIError::generic)?; + table_db.store(col, &key, &value).await?; APIRESULT_UNDEFINED }) } @@ -708,9 +875,7 @@ pub fn table_db_load(id: u32, col: u32, key: String) -> Promise { table_db.clone() }; - let out = table_db - .load(col, &key) - .map_err(veilid_core::VeilidAPIError::generic)?; + let out = table_db.load(col, &key).await?; let out = out.map(|x| data_encoding::BASE64URL_NOPAD.encode(&x)); APIResult::Ok(out) }) @@ -730,14 +895,575 @@ pub fn table_db_delete(id: u32, col: u32, key: String) -> Promise { table_db.clone() }; - let out = table_db - .delete(col, &key) - .await - .map_err(veilid_core::VeilidAPIError::generic)?; + let out = table_db.delete(col, &key).await?; + let out = out.map(|x| data_encoding::BASE64URL_NOPAD.encode(&x)); APIResult::Ok(out) }) } +#[wasm_bindgen()] +pub fn valid_crypto_kinds() -> String { + veilid_core::serialize_json( + veilid_core::VALID_CRYPTO_KINDS + .iter() + .map(|k| (*k).into()) + .collect::>(), + ) +} + +#[wasm_bindgen()] +pub fn best_crypto_kind() -> u32 { + veilid_core::best_crypto_kind().into() +} + +#[wasm_bindgen()] +pub fn verify_signatures(node_ids: String, data: String, signatures: String) -> Promise { + let node_ids: Vec = veilid_core::deserialize_json(&node_ids).unwrap(); + + let data: Vec = data_encoding::BASE64URL_NOPAD + .decode(data.as_bytes()) + .unwrap(); + + let typed_signatures: Vec = + veilid_core::deserialize_json(&signatures).unwrap(); + + wrap_api_future_json(async move { + let veilid_api = get_veilid_api()?; + let crypto = veilid_api.crypto()?; + let out = crypto.verify_signatures(&node_ids, &data, &typed_signatures)?; + APIResult::Ok(out) + }) +} + +#[wasm_bindgen()] +pub fn generate_signatures(data: String, key_pairs: String) -> Promise { + let data: Vec = data_encoding::BASE64URL_NOPAD + .decode(data.as_bytes()) + .unwrap(); + + let key_pairs: Vec = + veilid_core::deserialize_json(&key_pairs).unwrap(); + + wrap_api_future_json(async move { + let veilid_api = get_veilid_api()?; + let crypto = veilid_api.crypto()?; + let out = crypto.generate_signatures(&data, &key_pairs, |k, s| { + veilid_core::TypedSignature::new(k.kind, s) + })?; + APIResult::Ok(out) + }) +} + +#[wasm_bindgen()] +pub fn generate_key_pair(kind: u32) -> Promise { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + wrap_api_future_json(async move { + let out = veilid_core::Crypto::generate_keypair(kind)?; + APIResult::Ok(out) + }) +} + +#[wasm_bindgen()] +pub fn crypto_cached_dh(kind: u32, key: String, secret: String) -> Promise { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + let key: veilid_core::PublicKey = veilid_core::deserialize_json(&key).unwrap(); + let secret: veilid_core::SecretKey = veilid_core::deserialize_json(&secret).unwrap(); + + wrap_api_future_json(async move { + let veilid_api = get_veilid_api()?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| { + veilid_core::VeilidAPIError::invalid_argument( + "crypto_cached_dh", + "kind", + kind.to_string(), + ) + })?; + let out = csv.cached_dh(&key, &secret)?; + APIResult::Ok(out) + }) +} + +#[wasm_bindgen()] +pub fn crypto_compute_dh(kind: u32, key: String, secret: String) -> Promise { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + let key: veilid_core::PublicKey = veilid_core::deserialize_json(&key).unwrap(); + let secret: veilid_core::SecretKey = veilid_core::deserialize_json(&secret).unwrap(); + + wrap_api_future_json(async move { + let veilid_api = get_veilid_api()?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| { + veilid_core::VeilidAPIError::invalid_argument( + "crypto_compute_dh", + "kind", + kind.to_string(), + ) + })?; + let out = csv.compute_dh(&key, &secret)?; + APIResult::Ok(out) + }) +} + +#[wasm_bindgen()] +pub fn crypto_random_bytes(kind: u32, len: u32) -> Promise { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + wrap_api_future_plain(async move { + let veilid_api = get_veilid_api()?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| { + veilid_core::VeilidAPIError::invalid_argument( + "crypto_random_bytes", + "kind", + kind.to_string(), + ) + })?; + let out = csv.random_bytes(len); + let out = data_encoding::BASE64URL_NOPAD.encode(&out); + APIResult::Ok(out) + }) +} + +#[wasm_bindgen()] +pub fn crypto_default_salt_length(kind: u32) -> Promise { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + wrap_api_future_plain(async move { + let veilid_api = get_veilid_api()?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| { + veilid_core::VeilidAPIError::invalid_argument( + "crypto_default_salt_length", + "kind", + kind.to_string(), + ) + })?; + let out = csv.default_salt_length(); + APIResult::Ok(out) + }) +} + +#[wasm_bindgen()] +pub fn crypto_hash_password(kind: u32, password: String, salt: String) -> Promise { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + let password: Vec = data_encoding::BASE64URL_NOPAD + .decode(password.as_bytes()) + .unwrap(); + let salt: Vec = data_encoding::BASE64URL_NOPAD + .decode(salt.as_bytes()) + .unwrap(); + + wrap_api_future_plain(async move { + let veilid_api = get_veilid_api()?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| { + veilid_core::VeilidAPIError::invalid_argument( + "crypto_hash_password", + "kind", + kind.to_string(), + ) + })?; + let out = csv.hash_password(&password, &salt)?; + APIResult::Ok(out) + }) +} + +#[wasm_bindgen()] +pub fn crypto_verify_password(kind: u32, password: String, password_hash: String) -> Promise { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + let password: Vec = data_encoding::BASE64URL_NOPAD + .decode(password.as_bytes()) + .unwrap(); + + wrap_api_future_plain(async move { + let veilid_api = get_veilid_api()?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| { + veilid_core::VeilidAPIError::invalid_argument( + "crypto_verify_password", + "kind", + kind.to_string(), + ) + })?; + let out = csv.verify_password(&password, &password_hash)?; + APIResult::Ok(out) + }) +} + +#[wasm_bindgen()] +pub fn crypto_derive_shared_secret(kind: u32, password: String, salt: String) -> Promise { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + let password: Vec = data_encoding::BASE64URL_NOPAD + .decode(password.as_bytes()) + .unwrap(); + let salt: Vec = data_encoding::BASE64URL_NOPAD + .decode(salt.as_bytes()) + .unwrap(); + + wrap_api_future_json(async move { + let veilid_api = get_veilid_api()?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| { + veilid_core::VeilidAPIError::invalid_argument( + "crypto_derive_shared_secret", + "kind", + kind.to_string(), + ) + })?; + let out = csv.derive_shared_secret(&password, &salt)?; + APIResult::Ok(out) + }) +} + +#[wasm_bindgen()] +pub fn crypto_random_nonce(kind: u32) -> Promise { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + wrap_api_future_json(async move { + let veilid_api = get_veilid_api()?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| { + veilid_core::VeilidAPIError::invalid_argument( + "crypto_random_nonce", + "kind", + kind.to_string(), + ) + })?; + let out = csv.random_nonce(); + APIResult::Ok(out) + }) +} + +#[wasm_bindgen()] +pub fn crypto_random_shared_secret(kind: u32) -> Promise { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + wrap_api_future_json(async move { + let veilid_api = get_veilid_api()?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| { + veilid_core::VeilidAPIError::invalid_argument( + "crypto_random_shared_secret", + "kind", + kind.to_string(), + ) + })?; + let out = csv.random_shared_secret(); + APIResult::Ok(out) + }) +} + +#[wasm_bindgen()] +pub fn crypto_generate_key_pair(kind: u32) -> Promise { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + wrap_api_future_json(async move { + let veilid_api = get_veilid_api()?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| { + veilid_core::VeilidAPIError::invalid_argument( + "crypto_generate_key_pair", + "kind", + kind.to_string(), + ) + })?; + let out = csv.generate_keypair(); + APIResult::Ok(out) + }) +} + +#[wasm_bindgen()] +pub fn crypto_generate_hash(kind: u32, data: String) -> Promise { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + let data: Vec = data_encoding::BASE64URL_NOPAD + .decode(data.as_bytes()) + .unwrap(); + + wrap_api_future_json(async move { + let veilid_api = get_veilid_api()?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| { + veilid_core::VeilidAPIError::invalid_argument( + "crypto_generate_hash", + "kind", + kind.to_string(), + ) + })?; + let out = csv.generate_hash(&data); + APIResult::Ok(out) + }) +} + +#[wasm_bindgen()] +pub fn crypto_validate_key_pair(kind: u32, key: String, secret: String) -> Promise { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + let key: veilid_core::PublicKey = veilid_core::deserialize_json(&key).unwrap(); + let secret: veilid_core::SecretKey = veilid_core::deserialize_json(&secret).unwrap(); + + wrap_api_future_plain(async move { + let veilid_api = get_veilid_api()?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| { + veilid_core::VeilidAPIError::invalid_argument( + "crypto_validate_key_pair", + "kind", + kind.to_string(), + ) + })?; + let out = csv.validate_keypair(&key, &secret); + APIResult::Ok(out) + }) +} + +#[wasm_bindgen()] +pub fn crypto_validate_hash(kind: u32, data: String, hash: String) -> Promise { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + let data: Vec = data_encoding::BASE64URL_NOPAD + .decode(data.as_bytes()) + .unwrap(); + + let hash: veilid_core::HashDigest = veilid_core::deserialize_json(&hash).unwrap(); + + wrap_api_future_plain(async move { + let veilid_api = get_veilid_api()?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| { + veilid_core::VeilidAPIError::invalid_argument( + "crypto_validate_hash", + "kind", + kind.to_string(), + ) + })?; + let out = csv.validate_hash(&data, &hash); + APIResult::Ok(out) + }) +} + +#[wasm_bindgen()] +pub fn crypto_distance(kind: u32, key1: String, key2: String) -> Promise { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + let key1: veilid_core::CryptoKey = veilid_core::deserialize_json(&key1).unwrap(); + let key2: veilid_core::CryptoKey = veilid_core::deserialize_json(&key2).unwrap(); + + wrap_api_future_json(async move { + let veilid_api = get_veilid_api()?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| { + veilid_core::VeilidAPIError::invalid_argument( + "crypto_distance", + "kind", + kind.to_string(), + ) + })?; + let out = csv.distance(&key1, &key2); + APIResult::Ok(out) + }) +} + +#[wasm_bindgen()] +pub fn crypto_sign(kind: u32, key: String, secret: String, data: String) -> Promise { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + let key: veilid_core::CryptoKey = veilid_core::deserialize_json(&key).unwrap(); + let secret: veilid_core::CryptoKey = veilid_core::deserialize_json(&secret).unwrap(); + + let data: Vec = data_encoding::BASE64URL_NOPAD + .decode(data.as_bytes()) + .unwrap(); + + wrap_api_future_json(async move { + let veilid_api = get_veilid_api()?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| { + veilid_core::VeilidAPIError::invalid_argument("crypto_sign", "kind", kind.to_string()) + })?; + let out = csv.sign(&key, &secret, &data)?; + APIResult::Ok(out) + }) +} + +#[wasm_bindgen()] +pub fn crypto_verify(kind: u32, key: String, data: String, signature: String) -> Promise { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + let key: veilid_core::CryptoKey = veilid_core::deserialize_json(&key).unwrap(); + let data: Vec = data_encoding::BASE64URL_NOPAD + .decode(data.as_bytes()) + .unwrap(); + let signature: veilid_core::Signature = veilid_core::deserialize_json(&signature).unwrap(); + + wrap_api_future_void(async move { + let veilid_api = get_veilid_api()?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| { + veilid_core::VeilidAPIError::invalid_argument("crypto_verify", "kind", kind.to_string()) + })?; + csv.verify(&key, &data, &signature)?; + APIRESULT_UNDEFINED + }) +} + +#[wasm_bindgen()] +pub fn crypto_aead_overhead(kind: u32) -> Promise { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + wrap_api_future_plain(async move { + let veilid_api = get_veilid_api()?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| { + veilid_core::VeilidAPIError::invalid_argument( + "crypto_aead_overhead", + "kind", + kind.to_string(), + ) + })?; + let out = csv.aead_overhead(); + APIResult::Ok(out) + }) +} + +#[wasm_bindgen()] +pub fn crypto_decrypt_aead( + kind: u32, + body: String, + nonce: String, + shared_secret: String, + associated_data: Option, +) -> Promise { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + let body: Vec = data_encoding::BASE64URL_NOPAD + .decode(body.as_bytes()) + .unwrap(); + + let nonce: veilid_core::Nonce = veilid_core::deserialize_json(&nonce).unwrap(); + + let shared_secret: veilid_core::SharedSecret = + veilid_core::deserialize_json(&shared_secret).unwrap(); + + let associated_data: Option> = associated_data.map(|ad| { + data_encoding::BASE64URL_NOPAD + .decode(ad.as_bytes()) + .unwrap() + }); + + wrap_api_future_json(async move { + let veilid_api = get_veilid_api()?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| { + veilid_core::VeilidAPIError::invalid_argument( + "crypto_decrypt_aead", + "kind", + kind.to_string(), + ) + })?; + let out = csv.decrypt_aead( + &body, + &nonce, + &shared_secret, + match &associated_data { + Some(ad) => Some(ad.as_slice()), + None => None, + }, + )?; + APIResult::Ok(out) + }) +} + +#[wasm_bindgen()] +pub fn crypto_encrypt_aead( + kind: u32, + body: String, + nonce: String, + shared_secret: String, + associated_data: Option, +) -> Promise { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + let body: Vec = data_encoding::BASE64URL_NOPAD + .decode(body.as_bytes()) + .unwrap(); + + let nonce: veilid_core::Nonce = veilid_core::deserialize_json(&nonce).unwrap(); + + let shared_secret: veilid_core::SharedSecret = + veilid_core::deserialize_json(&shared_secret).unwrap(); + + let associated_data: Option> = associated_data.map(|ad| { + data_encoding::BASE64URL_NOPAD + .decode(ad.as_bytes()) + .unwrap() + }); + + wrap_api_future_json(async move { + let veilid_api = get_veilid_api()?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| { + veilid_core::VeilidAPIError::invalid_argument( + "crypto_encrypt_aead", + "kind", + kind.to_string(), + ) + })?; + let out = csv.encrypt_aead( + &body, + &nonce, + &shared_secret, + match &associated_data { + Some(ad) => Some(ad.as_slice()), + None => None, + }, + )?; + APIResult::Ok(out) + }) +} + +#[wasm_bindgen()] +pub fn crypto_crypt_no_auth( + kind: u32, + body: String, + nonce: String, + shared_secret: String, +) -> Promise { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + let mut body: Vec = data_encoding::BASE64URL_NOPAD + .decode(body.as_bytes()) + .unwrap(); + + let nonce: veilid_core::Nonce = veilid_core::deserialize_json(&nonce).unwrap(); + + let shared_secret: veilid_core::SharedSecret = + veilid_core::deserialize_json(&shared_secret).unwrap(); + + wrap_api_future_json(async move { + let veilid_api = get_veilid_api()?; + let crypto = veilid_api.crypto()?; + let csv = crypto.get(kind).ok_or_else(|| { + veilid_core::VeilidAPIError::invalid_argument( + "crypto_crypt_no_auth", + "kind", + kind.to_string(), + ) + })?; + csv.crypt_in_place_no_auth(&mut body, &nonce, &shared_secret); + APIResult::Ok(body) + }) +} + +#[wasm_bindgen()] +pub fn now() -> u64 { + veilid_core::get_aligned_timestamp().as_u64() +} + #[wasm_bindgen()] pub fn debug(command: String) -> Promise { wrap_api_future_plain(async move { diff --git a/veilid-wasm/tests/web.rs b/veilid-wasm/tests/web.rs index 2ae8ca70..5cfbfd26 100644 --- a/veilid-wasm/tests/web.rs +++ b/veilid-wasm/tests/web.rs @@ -1,4 +1,8 @@ //! Test suite for the Web and headless browsers. + +//XXXXXXXXXXXXXXX +//XXX DOES NOT WORK. + #![cfg(target_arch = "wasm32")] extern crate alloc; @@ -52,16 +56,16 @@ fn init_callbacks() { case "network.rpc.timeout": return 10000000; case "network.rpc.max_route_hop_count": return 4; case "network.rpc.default_route_hop_count": return 1; - case "network.dht.resolve_node_timeout": return null; - case "network.dht.resolve_node_count": return 20; - case "network.dht.resolve_node_fanout": return 3; case "network.dht.max_find_node_count": return 20; - case "network.dht.get_value_timeout": return null; - case "network.dht.get_value_count": return 20; - case "network.dht.get_value_fanout": return 3; - case "network.dht.set_value_timeout": return null; - case "network.dht.set_value_count": return 20; - case "network.dht.set_value_fanout": return 5; + case "network.dht.resolve_node_timeout": return 10000; + case "network.dht.resolve_node_count": return 1; + case "network.dht.resolve_node_fanout": return 4; + case "network.dht.get_value_timeout": return 10000; + case "network.dht.get_value_count": return 3; + case "network.dht.get_value_fanout": return 4; + case "network.dht.set_value_timeout": return 10000; + case "network.dht.set_value_count": return 5; + case "network.dht.set_value_fanout": return 4; case "network.dht.min_peer_count": return 20; case "network.dht.min_peer_refresh_time": return 2000000; case "network.dht.validate_dial_info_receipt_time": return 5000000;