Simplify edge case handling in hash_layer

- When retrieving last chunks, set next_start_child_chunk_index
so can know the correct start index without needing to modify
the offset
- Other smaller cleanup
This commit is contained in:
j-berman 2024-05-24 18:12:21 -07:00
parent 988c4eae40
commit ab7c74136b
7 changed files with 176 additions and 170 deletions

View File

@ -1301,7 +1301,8 @@ void BlockchainLMDB::remove_spent_key(const crypto::key_image& k_image)
}
template<typename C>
void BlockchainLMDB::grow_layer(const std::vector<fcmp::curve_trees::LayerExtension<C>> &layer_extensions,
void BlockchainLMDB::grow_layer(const C &curve,
const std::vector<fcmp::curve_trees::LayerExtension<C>> &layer_extensions,
const std::size_t ext_idx,
const std::size_t layer_idx,
const fcmp::curve_trees::LastChunkData<C> *last_chunk_ptr)
@ -1317,26 +1318,17 @@ void BlockchainLMDB::grow_layer(const std::vector<fcmp::curve_trees::LayerExtens
CHECK_AND_ASSERT_THROW_MES(!ext.hashes.empty(), "empty layer extension");
// TODO: make sure last_chunk_ptr->parent_layer_size is correct
// Check if we started at 0 if fresh layer, or if started 1 after the last element in the layer
const bool started_after_tip = (ext.start_idx == 0 && last_chunk_ptr == nullptr)
|| (last_chunk_ptr != nullptr && ext.start_idx == last_chunk_ptr->parent_layer_size);
// Check if we updated the last element in the layer
const bool started_at_tip = (last_chunk_ptr != nullptr
&& (ext.start_idx + 1) == last_chunk_ptr->parent_layer_size);
CHECK_AND_ASSERT_THROW_MES(started_after_tip || started_at_tip, "unexpected layer start");
// TODO: make sure last_chunk_ptr->next_start_child_chunk_index lines up
MDB_val_copy<uint64_t> k(layer_idx);
if (started_at_tip)
const bool update_last_parent = last_chunk_ptr != nullptr && last_chunk_ptr->update_last_parent;
if (update_last_parent)
{
// We updated the last hash, so update it
layer_val lv;
lv.child_chunk_idx = ext.start_idx;
lv.child_chunk_hash = std::array<uint8_t, 32UL>(); // ext.hashes.front(); // TODO
lv.child_chunk_hash = curve.to_bytes(ext.hashes.front());
MDB_val_set(v, lv);
// We expect to overwrite the existing hash
@ -1347,11 +1339,11 @@ void BlockchainLMDB::grow_layer(const std::vector<fcmp::curve_trees::LayerExtens
}
// Now add all the new hashes found in the extension
for (std::size_t i = started_at_tip ? 1 : 0; i < ext.hashes.size(); ++i)
for (std::size_t i = update_last_parent ? 1 : 0; i < ext.hashes.size(); ++i)
{
layer_val lv;
lv.child_chunk_idx = i + ext.start_idx;
lv.child_chunk_hash = std::array<uint8_t, 32UL>(); // ext.hashes[i]; // TODO
lv.child_chunk_hash = curve.to_bytes(ext.hashes[i]);
MDB_val_set(v, lv);
// TODO: according to the docs, MDB_APPENDDUP isn't supposed to perform any key comparisons to maximize efficiency.
@ -1410,7 +1402,11 @@ void BlockchainLMDB::grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tree
? nullptr
: &last_chunks.c2_last_chunks[c2_idx];
this->grow_layer<fcmp::curve_trees::Selene>(c2_extensions, c2_idx, layer_idx, c2_last_chunk_ptr);
this->grow_layer<fcmp::curve_trees::Selene>(curve_trees.m_c2,
c2_extensions,
c2_idx,
layer_idx,
c2_last_chunk_ptr);
++c2_idx;
}
@ -1420,7 +1416,11 @@ void BlockchainLMDB::grow_tree(const fcmp::curve_trees::CurveTreesV1 &curve_tree
? nullptr
: &last_chunks.c1_last_chunks[c1_idx];
this->grow_layer<fcmp::curve_trees::Helios>(c1_extensions, c1_idx, layer_idx, c1_last_chunk_ptr);
this->grow_layer<fcmp::curve_trees::Helios>(curve_trees.m_c1,
c1_extensions,
c1_idx,
layer_idx,
c1_last_chunk_ptr);
++c1_idx;
}

View File

@ -411,7 +411,8 @@ private:
virtual void remove_spent_key(const crypto::key_image& k_image);
template<typename C>
void grow_layer(const std::vector<fcmp::curve_trees::LayerExtension<C>> &layer_extensions,
void grow_layer(const C &curve,
const std::vector<fcmp::curve_trees::LayerExtension<C>> &layer_extensions,
const std::size_t c_idx,
const std::size_t layer_idx,
const fcmp::curve_trees::LastChunkData<C> *last_chunk_data);

View File

@ -62,7 +62,6 @@ template<typename C>
static typename C::Point get_first_parent(const C &curve,
const typename C::Chunk &new_children,
const std::size_t chunk_width,
const bool child_layer_last_hash_updated,
const LastChunkData<C> *last_chunk_ptr,
const std::size_t offset)
{
@ -70,28 +69,31 @@ static typename C::Point get_first_parent(const C &curve,
if (last_chunk_ptr == nullptr)
return get_new_parent<C>(curve, new_children);
typename C::Scalar first_child_after_offset = curve.zero_scalar();
if (child_layer_last_hash_updated)
typename C::Scalar prior_child_after_offset;
if (last_chunk_ptr->update_last_parent)
{
// If the last chunk has updated children in it, then we need to get the delta to the old children
first_child_after_offset = last_chunk_ptr->last_child;
// If the last parent has an updated child in it, then we need to get the delta to the old child
prior_child_after_offset = last_chunk_ptr->last_child;
}
else if (offset > 0)
{
// If we're updating the parent hash and no children were updated, then we're just adding new children
// to the existing last chunk and can leave first_child_after_offset as zero
// If we're not updating the last parent hash and offset is non-zero, then we must be adding new children
// to the existing last chunk. New children means no prior child after offset exists, use zero scalar
prior_child_after_offset = curve.zero_scalar();
}
else
{
// If the last chunk is already full and isn't updated in any way, then we just get a new parent
// If we're not updating the last parent and the last chunk is already full, we can get a new parent
return get_new_parent<C>(curve, new_children);
}
MDEBUG("Updating existing hash: " << curve.to_string(last_chunk_ptr->last_parent) << " , offset: " << offset
<< ", prior_child_after_offset: " << curve.to_string(prior_child_after_offset));
return curve.hash_grow(
last_chunk_ptr->last_parent,
offset,
first_child_after_offset,
prior_child_after_offset,
new_children
);
};
@ -99,77 +101,55 @@ static typename C::Point get_first_parent(const C &curve,
// After hashing a layer of children points, convert those children x-coordinates into their respective cycle
// scalars, and prepare them to be hashed for the next layer
template<typename C_CHILD, typename C_PARENT>
static std::vector<typename C_PARENT::Scalar> next_child_scalars_from_children(const C_CHILD &c_child,
static std::size_t next_child_scalars_from_children(const C_CHILD &c_child,
const bool updating_root_layer,
const LastChunkData<C_CHILD> *last_child_chunk_ptr,
const LastChunkData<C_PARENT> *last_parent_chunk_ptr,
const LayerExtension<C_CHILD> &children)
const LayerExtension<C_CHILD> &children,
std::vector<typename C_PARENT::Scalar> &child_scalars_out)
{
std::vector<typename C_PARENT::Scalar> child_scalars;
child_scalars_out.clear();
child_scalars_out.reserve(1 + children.hashes.size());
// The existing root would have a size of 1
const bool updating_root_layer = last_child_chunk_ptr != nullptr
&& last_child_chunk_ptr->parent_layer_size == 1;
std::uint64_t next_child_start_index = children.start_idx;
// If we're creating a *new* root at the existing root layer, we may need to include the *existing* root when
// hashing the *existing* root layer
if (updating_root_layer)
{
// We should be updating the existing root, there shouldn't be a last parent chunk
CHECK_AND_ASSERT_THROW_MES(last_parent_chunk_ptr == nullptr, "last parent chunk exists at root");
CHECK_AND_ASSERT_THROW_MES(last_child_chunk_ptr != nullptr, "last child chunk does not exist at root");
// If the children don't already include the existing root, then we need to include it to be hashed
// - the children would include the existing root already if the existing root was updated in the child
// layer (the start_idx would be 0)
if (children.start_idx > 0)
child_scalars.emplace_back(c_child.point_to_cycle_scalar(last_child_chunk_ptr->last_parent));
if (next_child_start_index > 0)
{
MDEBUG("Updating root layer and including the existing root in next children");
child_scalars_out.emplace_back(c_child.point_to_cycle_scalar(last_child_chunk_ptr->last_parent));
--next_child_start_index;
}
}
// Convert child points to scalars
tower_cycle::extend_scalars_from_cycle_points<C_CHILD, C_PARENT>(c_child, children.hashes, child_scalars);
tower_cycle::extend_scalars_from_cycle_points<C_CHILD, C_PARENT>(c_child, children.hashes, child_scalars_out);
return child_scalars;
return next_child_start_index;
};
//----------------------------------------------------------------------------------------------------------------------
// Hash chunks of a layer of new children, outputting the next layer's parents
template<typename C>
static void hash_layer(const C &curve,
const LastChunkData<C> *last_parent_chunk_ptr,
const LastChunkData<C> *last_chunk_ptr,
const std::vector<typename C::Scalar> &child_scalars,
const std::size_t children_start_idx,
const std::size_t child_start_idx,
const std::size_t chunk_width,
LayerExtension<C> &parents_out)
{
parents_out.start_idx = (last_parent_chunk_ptr == nullptr) ? 0 : last_parent_chunk_ptr->parent_layer_size;
parents_out.start_idx = (last_chunk_ptr == nullptr) ? 0 : last_chunk_ptr->next_start_child_chunk_index;
parents_out.hashes.clear();
CHECK_AND_ASSERT_THROW_MES(!child_scalars.empty(), "empty child scalars");
// If the child layer had its existing last hash updated (if the new children include the last element in
// the child layer), then we'll need to use the last hash's prior version in order to update the existing
// last parent hash in this layer
// - Note: the leaf layer is strictly append-only, so this cannot be true for the leaf layer
const bool child_layer_last_hash_updated = (last_parent_chunk_ptr == nullptr)
? false
: last_parent_chunk_ptr->child_layer_size == (children_start_idx + 1);
std::size_t offset = (last_parent_chunk_ptr == nullptr) ? 0 : last_parent_chunk_ptr->child_offset;
// The offset needs to be brought back because we're going to start with the prior hash, and so the chunk
// will start from there and may need 1 more to fill
CHECK_AND_ASSERT_THROW_MES(chunk_width > offset, "unexpected offset");
if (child_layer_last_hash_updated)
{
MDEBUG("child_layer_last_hash_updated, updating offset: " << offset);
offset = offset > 0 ? (offset - 1) : (chunk_width - 1);
}
// If we're adding new children to an existing last chunk, then we need to pull the parent start idx back 1
// since we'll be updating the existing parent hash of the last chunk
if (offset > 0 || child_layer_last_hash_updated)
{
CHECK_AND_ASSERT_THROW_MES(parents_out.start_idx > 0, "parent start idx should be > 0");
--parents_out.start_idx;
}
const std::size_t offset = child_start_idx % chunk_width;
// See how many children we need to fill up the existing last chunk
std::size_t chunk_size = std::min(child_scalars.size(), chunk_width - offset);
@ -184,21 +164,19 @@ static void hash_layer(const C &curve,
const auto chunk_start = child_scalars.data() + chunk_start_idx;
const typename C::Chunk chunk{chunk_start, chunk_size};
for (uint c = 0; c < chunk_size; ++c) {
MDEBUG("Hashing " << curve.to_string(chunk_start[c]));
}
for (std::size_t i = 0; i < chunk_size; ++i)
MDEBUG("Hashing child " << curve.to_string(chunk_start[i]));
// Hash the chunk of children
typename C::Point chunk_hash = chunk_start_idx == 0
? get_first_parent<C>(curve,
chunk,
chunk_width,
child_layer_last_hash_updated,
last_parent_chunk_ptr,
last_chunk_ptr,
offset)
: get_new_parent<C>(curve, chunk);
MDEBUG("Hash chunk_start_idx " << chunk_start_idx << " result: " << curve.to_string(chunk_hash)
MDEBUG("Child chunk_start_idx " << chunk_start_idx << " result: " << curve.to_string(chunk_hash)
<< " , chunk_size: " << chunk_size);
// We've got our hash
@ -248,10 +226,7 @@ typename CurveTrees<C1, C2>::TreeExtension CurveTrees<C1, C2>::get_tree_extensio
const auto &c1_last_chunks = existing_last_chunks.c1_last_chunks;
const auto &c2_last_chunks = existing_last_chunks.c2_last_chunks;
// Set the leaf start idx
tree_extension.leaves.start_idx = c2_last_chunks.empty()
? 0
: c2_last_chunks[0].child_layer_size;
tree_extension.leaves.start_idx = existing_last_chunks.next_start_leaf_index;
// Copy the leaves
// TODO: don't copy here
@ -285,6 +260,8 @@ typename CurveTrees<C1, C2>::TreeExtension CurveTrees<C1, C2>::get_tree_extensio
if (c2_layer_extensions_out.back().hashes.size() == 1 && c2_layer_extensions_out.back().start_idx == 0)
return tree_extension;
const std::size_t next_root_layer_idx = c1_last_chunks.size() + c2_last_chunks.size();
// Alternate between hashing c2 children, c1 children, c2, c1, ...
bool parent_is_c1 = true;
@ -293,11 +270,14 @@ typename CurveTrees<C1, C2>::TreeExtension CurveTrees<C1, C2>::get_tree_extensio
// TODO: calculate max number of layers it should take to add all leaves (existing leaves + new leaves)
while (true)
{
const LastChunkData<C1> *c1_last_chunk_ptr = (c1_last_idx >= c1_last_chunks.size())
const std::size_t updating_layer_idx = 1 + c1_last_idx + c2_last_idx;
const std::size_t updating_root_layer = updating_layer_idx == next_root_layer_idx;
const auto *c1_last_chunk_ptr = (c1_last_idx >= c1_last_chunks.size())
? nullptr
: &c1_last_chunks[c1_last_idx];
const LastChunkData<C2> *c2_last_chunk_ptr = (c2_last_idx >= c2_last_chunks.size())
const auto *c2_last_chunk_ptr = (c2_last_idx >= c2_last_chunks.size())
? nullptr
: &c2_last_chunks[c2_last_idx];
@ -308,16 +288,18 @@ typename CurveTrees<C1, C2>::TreeExtension CurveTrees<C1, C2>::get_tree_extensio
const auto &c2_child_extension = c2_layer_extensions_out[c2_last_idx];
const auto c1_child_scalars = next_child_scalars_from_children<C2, C1>(m_c2,
std::vector<typename C1::Scalar> c1_child_scalars;
const std::size_t next_child_start_idx = next_child_scalars_from_children<C2, C1>(m_c2,
updating_root_layer,
c2_last_chunk_ptr,
c1_last_chunk_ptr,
c2_child_extension);
c2_child_extension,
c1_child_scalars);
LayerExtension<C1> c1_layer_extension;
hash_layer<C1>(m_c1,
c1_last_chunk_ptr,
c1_child_scalars,
c2_child_extension.start_idx,
next_child_start_idx,
m_c1_width,
c1_layer_extension);
@ -335,16 +317,18 @@ typename CurveTrees<C1, C2>::TreeExtension CurveTrees<C1, C2>::get_tree_extensio
const auto &c1_child_extension = c1_layer_extensions_out[c1_last_idx];
const auto c2_child_scalars = next_child_scalars_from_children<C1, C2>(m_c1,
std::vector<typename C2::Scalar> c2_child_scalars;
const std::size_t next_child_start_idx = next_child_scalars_from_children<C1, C2>(m_c1,
updating_root_layer,
c1_last_chunk_ptr,
c2_last_chunk_ptr,
c1_child_extension);
c1_child_extension,
c2_child_scalars);
LayerExtension<C2> c2_layer_extension;
hash_layer<C2>(m_c2,
c2_last_chunk_ptr,
c2_child_scalars,
c1_child_extension.start_idx,
next_child_start_idx,
m_c2_width,
c2_layer_extension);

View File

@ -60,16 +60,23 @@ struct LayerExtension final
template<typename C>
struct LastChunkData final
{
// The total number of children % child layer chunk width
const std::size_t child_offset;
// The last child in the chunk (and therefore the last child in the child layer)
/* TODO: const */ typename C::Scalar last_child;
// The hash of the last chunk of child scalars
/* TODO: const */ typename C::Point last_parent;
// Total number of children in the child layer
const std::size_t child_layer_size;
// Total number of hashes in the parent layer
const std::size_t parent_layer_size;
// The next starting index in the layer (referencing the "next" child chunk)
const std::size_t next_start_child_chunk_index;
// The existing hash of the last chunk of child scalars
// - Used to grow the existing last chunk in the layer
// - Only must be set if the existing last chunk isn't full
const typename C::Point last_parent;
// Whether or not the existing last parent in the layer needs to be updated
// - True if the last leaf layer chunk is not yet full
// - If true, next_start_child_chunk_index == existing layer size
// - If false, next_start_child_chunk_index == (existing layer size - 1), since updating existing last parent
const bool update_last_parent;
// The last child in the last chunk (and therefore the last child in the child layer)
// - Used to get the delta from the existing last child to the new last child
// - Only needs to be set if update_last_parent is true
// - Since the leaf layer is append-only, the layer above leaf layer does not actually need this value since the
// last leaf will never change (and therefore, we'll never need the delta to a prior leaf)
const typename C::Scalar last_child;
};
//----------------------------------------------------------------------------------------------------------------------
//----------------------------------------------------------------------------------------------------------------------
@ -111,7 +118,7 @@ public:
struct Leaves final
{
// Starting index in the leaf layer
std::size_t start_idx;
std::size_t start_idx{0};
// Contiguous leaves in a tree that start at the start_idx
std::vector<LeafTuple> tuples;
};
@ -131,6 +138,7 @@ public:
// - c2_last_chunks[0] is first layer after leaves, then c1_last_chunks[0], then c2_last_chunks[1], etc
struct LastChunks final
{
std::size_t next_start_leaf_index{0};
std::vector<LastChunkData<C1>> c1_last_chunks;
std::vector<LastChunkData<C2>> c2_last_chunks;
};
@ -150,12 +158,14 @@ private:
// Flatten leaves [(O.x, I.x, C.x),(O.x, I.x, C.x),...] -> [scalar,scalar,scalar,scalar,scalar,scalar,...]
std::vector<typename C2::Scalar> flatten_leaves(const std::vector<LeafTuple> &leaves) const;
//member variables
private:
// The curves
//public member variables
public:
// The curve interfaces
const C1 &m_c1;
const C2 &m_c2;
//member variables
private:
// The chunk widths of the layers in the tree tied to each curve
const std::size_t m_c1_width;
const std::size_t m_c2_width;

View File

@ -150,6 +150,8 @@ std::string Selene::to_string(const typename Selene::Point &point) const
//----------------------------------------------------------------------------------------------------------------------
SeleneScalar ed_25519_point_to_scalar(const crypto::ec_point &point)
{
static_assert(sizeof(SeleneScalar) == sizeof(point), "size of selene scalar != size of ed25519 point");
// If this function receives the ec_point, this is fine
// If this function can receive a decompressed point, it'd be notably faster
// to extract the Wei25519 x coordinate from the C side of things and then

View File

@ -41,8 +41,6 @@ namespace tower_cycle
//----------------------------------------------------------------------------------------------------------------------
// Rust types
//----------------------------------------------------------------------------------------------------------------------
using RustEd25519Point = std::array<uint8_t, 32UL>;
// Need to forward declare Scalar types for point_to_cycle_scalar below
using SeleneScalar = fcmp_rust::SeleneScalar;
using HeliosScalar = fcmp_rust::HeliosScalar;
@ -70,9 +68,7 @@ class Curve
{
//constructor
public:
// This doesn't have a reference as doing so delays initialization and borks
// it
Curve(const typename C::Point hash_init_point):
Curve(const typename C::Point &hash_init_point):
m_hash_init_point{hash_init_point}
{};
@ -97,6 +93,7 @@ public:
//member variables
public:
// kayabaNerve: this doesn't have a reference as doing so delays initialization and borks it
const typename C::Point m_hash_init_point;
};
//----------------------------------------------------------------------------------------------------------------------

View File

@ -35,31 +35,31 @@
// CurveTreesUnitTest helpers
//----------------------------------------------------------------------------------------------------------------------
template<typename C>
static fcmp::curve_trees::LastChunkData<C> get_last_child_layer_chunk(const C &curve,
const std::size_t child_layer_size,
static fcmp::curve_trees::LastChunkData<C> get_last_child_layer_chunk(const bool update_last_parent,
const std::size_t parent_layer_size,
const std::size_t chunk_width,
const typename C::Scalar &last_child,
const typename C::Point &last_parent)
const typename C::Point &last_parent,
const typename C::Scalar &last_child)
{
CHECK_AND_ASSERT_THROW_MES(child_layer_size > 0, "empty child layer");
CHECK_AND_ASSERT_THROW_MES(parent_layer_size > 0, "empty parent layer");
if (update_last_parent)
CHECK_AND_ASSERT_THROW_MES(parent_layer_size > 0, "empty parent layer");
const std::size_t child_offset = child_layer_size % chunk_width;
// If updating last parent, the next start will be the last parent's index, else we start at the tip
const std::size_t next_start_child_chunk_index = update_last_parent
? (parent_layer_size - 1)
: parent_layer_size;
return fcmp::curve_trees::LastChunkData<C>{
.child_offset = child_offset,
.last_child = last_child,
.last_parent = last_parent,
.child_layer_size = child_layer_size,
.parent_layer_size = parent_layer_size
.next_start_child_chunk_index = next_start_child_chunk_index,
.last_parent = last_parent,
.update_last_parent = update_last_parent,
.last_child = last_child
};
}
//----------------------------------------------------------------------------------------------------------------------
template<typename C_PARENT>
static bool validate_layer(const C_PARENT &c_parent,
const CurveTreesUnitTest::Layer<C_PARENT> &parents,
const std::vector<typename C_PARENT::Scalar> &child_scalars,
template<typename C>
static bool validate_layer(const C &curve,
const CurveTreesUnitTest::Layer<C> &parents,
const std::vector<typename C::Scalar> &child_scalars,
const std::size_t max_chunk_size)
{
// Hash chunk of children scalars, then see if the hash matches up to respective parent
@ -70,15 +70,20 @@ static bool validate_layer(const C_PARENT &c_parent,
const std::size_t chunk_size = std::min(child_scalars.size() - chunk_start_idx, max_chunk_size);
CHECK_AND_ASSERT_MES(child_scalars.size() >= (chunk_start_idx + chunk_size), false, "chunk size too large");
const typename C_PARENT::Point &parent = parents[i];
const typename C::Point &parent = parents[i];
const auto chunk_start = child_scalars.data() + chunk_start_idx;
const typename C_PARENT::Chunk chunk{chunk_start, chunk_size};
const typename C::Chunk chunk{chunk_start, chunk_size};
const typename C_PARENT::Point chunk_hash = fcmp::curve_trees::get_new_parent(c_parent, chunk);
for (std::size_t i = 0; i < chunk_size; ++i)
MDEBUG("Hashing " << curve.to_string(chunk_start[i]));
const auto actual_bytes = c_parent.to_bytes(parent);
const auto expected_bytes = c_parent.to_bytes(chunk_hash);
const typename C::Point chunk_hash = fcmp::curve_trees::get_new_parent(curve, chunk);
MDEBUG("chunk_start_idx: " << chunk_start_idx << " , chunk_size: " << chunk_size << " , chunk_hash: " << curve.to_string(chunk_hash));
const auto actual_bytes = curve.to_bytes(parent);
const auto expected_bytes = curve.to_bytes(chunk_hash);
CHECK_AND_ASSERT_MES(actual_bytes == expected_bytes, false, "unexpected hash");
chunk_start_idx += chunk_size;
@ -104,6 +109,10 @@ CurveTreesV1::LastChunks CurveTreesUnitTest::get_last_chunks(const CurveTreesUni
CurveTreesV1::LastChunks last_chunks;
// Since leaf layer is append-only, we know the next start will be right after all existing leaf tuple
const std::size_t num_leaf_tuples = leaves.size() * CurveTreesV1::LEAF_TUPLE_SIZE;
last_chunks.next_start_leaf_index = num_leaf_tuples;
if (c2_layers.empty())
return last_chunks;
@ -114,12 +123,13 @@ CurveTreesV1::LastChunks CurveTreesUnitTest::get_last_chunks(const CurveTreesUni
c2_last_chunks_out.reserve(c2_layers.size());
// First push the last leaf chunk data into c2 chunks
auto last_leaf_chunk = get_last_child_layer_chunk<Selene>(m_curve_trees.m_c2,
/*child_layer_size */ leaves.size() * CurveTreesV1::LEAF_TUPLE_SIZE,
/*parent_layer_size*/ c2_layers[0].size(),
/*chunk_width */ m_curve_trees.m_leaf_layer_chunk_width,
/*last_child */ leaves.back().C_x,
/*last_parent */ c2_layers[0].back());
const bool update_last_parent = (num_leaf_tuples % m_curve_trees.m_leaf_layer_chunk_width) > 0;
auto last_leaf_chunk = get_last_child_layer_chunk<Selene>(
/*update_last_parent*/ update_last_parent,
/*parent_layer_size */ c2_layers[0].size(),
/*last_parent */ c2_layers[0].back(),
// Since the leaf layer is append-only, we'll never need access to the last child
/*last_child */ m_curve_trees.m_c2.zero_scalar());
c2_last_chunks_out.push_back(std::move(last_leaf_chunk));
@ -149,12 +159,10 @@ CurveTreesV1::LastChunks CurveTreesUnitTest::get_last_chunks(const CurveTreesUni
const auto &last_child = m_curve_trees.m_c2.point_to_cycle_scalar(child_layer.back());
auto last_parent_chunk = get_last_child_layer_chunk<Helios>(m_curve_trees.m_c1,
child_layer.size(),
auto last_parent_chunk = get_last_child_layer_chunk<Helios>(update_last_parent,
parent_layer.size(),
m_curve_trees.m_c1_width,
last_child,
parent_layer.back());
parent_layer.back(),
last_child);
c1_last_chunks_out.push_back(std::move(last_parent_chunk));
@ -170,12 +178,10 @@ CurveTreesV1::LastChunks CurveTreesUnitTest::get_last_chunks(const CurveTreesUni
const auto &last_child = m_curve_trees.m_c1.point_to_cycle_scalar(child_layer.back());
auto last_parent_chunk = get_last_child_layer_chunk<Selene>(m_curve_trees.m_c2,
child_layer.size(),
auto last_parent_chunk = get_last_child_layer_chunk<Selene>(update_last_parent,
parent_layer.size(),
m_curve_trees.m_c2_width,
last_child,
parent_layer.back());
parent_layer.back(),
last_child);
c2_last_chunks_out.push_back(std::move(last_parent_chunk));
@ -303,6 +309,8 @@ bool CurveTreesUnitTest::validate_tree(const CurveTreesUnitTest::Tree &tree)
// TODO: implement templated function for below if statement
if (parent_is_c2)
{
MDEBUG("Validating parent c2 layer " << c2_idx << " , child c1 layer " << c1_idx);
CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_layers.size(), "unexpected c2_idx");
CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_layers.size(), "unexpected c1_idx");
@ -328,6 +336,8 @@ bool CurveTreesUnitTest::validate_tree(const CurveTreesUnitTest::Tree &tree)
}
else
{
MDEBUG("Validating parent c1 layer " << c1_idx << " , child c2 layer " << c2_idx);
CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_layers.size(), "unexpected c1_idx");
CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_layers.size(), "unexpected c2_idx");
@ -356,6 +366,8 @@ bool CurveTreesUnitTest::validate_tree(const CurveTreesUnitTest::Tree &tree)
parent_is_c2 = !parent_is_c2;
}
MDEBUG("Validating leaves");
// Now validate leaves
return validate_layer<Selene>(m_curve_trees.m_c2,
c2_layers[0],
@ -384,11 +396,10 @@ void CurveTreesUnitTest::log_last_chunks(const CurveTreesV1::LastChunks &last_ch
const fcmp::curve_trees::LastChunkData<Selene> &last_chunk = c2_last_chunks[c2_idx];
MDEBUG("child_offset: " << last_chunk.child_offset
<< " , last_child: " << m_curve_trees.m_c2.to_string(last_chunk.last_child)
<< " , last_parent: " << m_curve_trees.m_c2.to_string(last_chunk.last_parent)
<< " , child_layer_size: " << last_chunk.child_layer_size
<< " , parent_layer_size: " << last_chunk.parent_layer_size);
MDEBUG("next_start_child_chunk_index: " << last_chunk.next_start_child_chunk_index
<< " , last_parent: " << m_curve_trees.m_c2.to_string(last_chunk.last_parent)
<< " , update_last_parent: " << last_chunk.update_last_parent
<< " , last_child: " << m_curve_trees.m_c2.to_string(last_chunk.last_child));
++c2_idx;
}
@ -398,11 +409,10 @@ void CurveTreesUnitTest::log_last_chunks(const CurveTreesV1::LastChunks &last_ch
const fcmp::curve_trees::LastChunkData<Helios> &last_chunk = c1_last_chunks[c1_idx];
MDEBUG("child_offset: " << last_chunk.child_offset
<< " , last_child: " << m_curve_trees.m_c1.to_string(last_chunk.last_child)
<< " , last_parent: " << m_curve_trees.m_c1.to_string(last_chunk.last_parent)
<< " , child_layer_size: " << last_chunk.child_layer_size
<< " , parent_layer_size: " << last_chunk.parent_layer_size);
MDEBUG("next_start_child_chunk_index: " << last_chunk.next_start_child_chunk_index
<< " , last_parent: " << m_curve_trees.m_c1.to_string(last_chunk.last_parent)
<< " , update_last_parent: " << last_chunk.update_last_parent
<< " , last_child: " << m_curve_trees.m_c1.to_string(last_chunk.last_child));
++c1_idx;
}
@ -445,7 +455,7 @@ void CurveTreesUnitTest::log_tree_extension(const CurveTreesV1::TreeExtension &t
MDEBUG("Selene tree extension start idx: " << c2_layer.start_idx);
for (std::size_t j = 0; j < c2_layer.hashes.size(); ++j)
MDEBUG("Hash idx: " << (j + c2_layer.start_idx) << " , hash: "
MDEBUG("Child chunk start idx: " << (j + c2_layer.start_idx) << " , hash: "
<< m_curve_trees.m_c2.to_string(c2_layer.hashes[j]));
++c2_idx;
@ -458,7 +468,7 @@ void CurveTreesUnitTest::log_tree_extension(const CurveTreesV1::TreeExtension &t
MDEBUG("Helios tree extension start idx: " << c1_layer.start_idx);
for (std::size_t j = 0; j < c1_layer.hashes.size(); ++j)
MDEBUG("Hash idx: " << (j + c1_layer.start_idx) << " , hash: "
MDEBUG("Child chunk start idx: " << (j + c1_layer.start_idx) << " , hash: "
<< m_curve_trees.m_c1.to_string(c1_layer.hashes[j]));
++c1_idx;
@ -497,7 +507,7 @@ void CurveTreesUnitTest::log_tree(const CurveTreesUnitTest::Tree &tree)
MDEBUG("Selene layer size: " << c2_layer.size() << " , tree layer: " << i);
for (std::size_t j = 0; j < c2_layer.size(); ++j)
MDEBUG("Hash idx: " << j << " , hash: " << m_curve_trees.m_c2.to_string(c2_layer[j]));
MDEBUG("Child chunk start idx: " << j << " , hash: " << m_curve_trees.m_c2.to_string(c2_layer[j]));
++c2_idx;
}
@ -509,7 +519,7 @@ void CurveTreesUnitTest::log_tree(const CurveTreesUnitTest::Tree &tree)
MDEBUG("Helios layer size: " << c1_layer.size() << " , tree layer: " << i);
for (std::size_t j = 0; j < c1_layer.size(); ++j)
MDEBUG("Hash idx: " << j << " , hash: " << m_curve_trees.m_c1.to_string(c1_layer[j]));
MDEBUG("Child chunk start idx: " << j << " , hash: " << m_curve_trees.m_c1.to_string(c1_layer[j]));
++c1_idx;
}
@ -543,7 +553,7 @@ const std::vector<CurveTreesV1::LeafTuple> generate_random_leaves(const CurveTre
return tuples;
}
//----------------------------------------------------------------------------------------------------------------------
static void grow_tree(CurveTreesV1 &curve_trees,
static bool grow_tree(CurveTreesV1 &curve_trees,
CurveTreesUnitTest &curve_trees_accessor,
const std::size_t num_leaves,
CurveTreesUnitTest::Tree &tree_inout)
@ -566,7 +576,7 @@ static void grow_tree(CurveTreesV1 &curve_trees,
curve_trees_accessor.log_tree(tree_inout);
// Validate tree structure and all hashes
ASSERT_TRUE(curve_trees_accessor.validate_tree(tree_inout));
return curve_trees_accessor.validate_tree(tree_inout);
}
//----------------------------------------------------------------------------------------------------------------------
//----------------------------------------------------------------------------------------------------------------------
@ -617,13 +627,11 @@ TEST(curve_trees, grow_tree)
{
for (const std::size_t ext_leaves : N_LEAVES)
{
// Tested reverse order already
if (ext_leaves < init_leaves)
continue;
// Only test 3rd layer once because it's a huge test
if (init_leaves > 1 && ext_leaves == NEED_3_LAYERS)
continue;
if (ext_leaves > 1 && init_leaves == NEED_3_LAYERS)
continue;
LOG_PRINT_L1("Adding " << init_leaves << " leaves to tree, then extending by " << ext_leaves << " leaves");
@ -632,21 +640,25 @@ TEST(curve_trees, grow_tree)
// Initialize global tree with `init_leaves`
MDEBUG("Adding " << init_leaves << " leaves to tree");
grow_tree(curve_trees,
bool res = grow_tree(curve_trees,
curve_trees_accessor,
init_leaves,
global_tree);
ASSERT_TRUE(res);
MDEBUG("Successfully added initial " << init_leaves << " leaves to tree");
// Then extend the global tree by `ext_leaves`
MDEBUG("Extending tree by " << ext_leaves << " leaves");
grow_tree(curve_trees,
res = grow_tree(curve_trees,
curve_trees_accessor,
ext_leaves,
global_tree);
ASSERT_TRUE(res);
MDEBUG("Successfully extended by " << ext_leaves << " leaves");
}
}