Implement and test trim_tree algo in memory

This commit is contained in:
j-berman 2024-07-08 20:01:14 -07:00
parent 36f1e1965f
commit 5ddca0ce11
9 changed files with 916 additions and 742 deletions

@ -177,7 +177,7 @@ static LayerExtension<C> hash_children_chunks(const C &curve,
return parents_out;
};
//----------------------------------------------------------------------------------------------------------------------
static UpdateLayerMetadata get_update_layer_metadata(const std::size_t old_total_children,
static GrowLayerInstructions get_grow_layer_instructions(const std::size_t old_total_children,
const std::size_t new_total_children,
const std::size_t parent_chunk_width,
const bool last_child_will_change)
@ -265,7 +265,7 @@ static UpdateLayerMetadata get_update_layer_metadata(const std::size_t old_total
<< " , start_offset: " << offset
<< " , next_parent_start_index: " << next_parent_start_index);
return UpdateLayerMetadata{
return GrowLayerInstructions{
.parent_chunk_width = parent_chunk_width,
.old_total_children = old_total_children,
.new_total_children = new_total_children,
@ -280,7 +280,7 @@ static UpdateLayerMetadata get_update_layer_metadata(const std::size_t old_total
};
//----------------------------------------------------------------------------------------------------------------------
static UpdateLayerMetadata get_update_leaf_layer_metadata(const std::size_t old_n_leaf_tuples,
static GrowLayerInstructions get_leaf_layer_grow_instructions(const std::size_t old_n_leaf_tuples,
const std::size_t new_n_leaf_tuples,
const std::size_t leaf_tuple_size,
const std::size_t leaf_layer_chunk_width)
@ -332,7 +332,7 @@ static UpdateLayerMetadata get_update_leaf_layer_metadata(const std::size_t old_
<< " , start_offset: " << offset
<< " , next_parent_start_index: " << next_parent_start_index);
return UpdateLayerMetadata{
return GrowLayerInstructions{
.parent_chunk_width = leaf_layer_chunk_width,
.old_total_children = old_total_children,
.new_total_children = new_total_children,
@ -352,7 +352,7 @@ static UpdateLayerMetadata get_update_leaf_layer_metadata(const std::size_t old_
template<typename C_CHILD, typename C_PARENT>
static LayerExtension<C_PARENT> get_next_layer_extension(const C_CHILD &c_child,
const C_PARENT &c_parent,
const UpdateLayerMetadata &update_layer_metadata,
const GrowLayerInstructions &grow_layer_instructions,
const std::vector<typename C_CHILD::Point> &child_last_hashes,
const std::vector<typename C_PARENT::Point> &parent_last_hashes,
const std::vector<LayerExtension<C_CHILD>> child_layer_extensions,
@ -372,7 +372,7 @@ static LayerExtension<C_PARENT> get_next_layer_extension(const C_CHILD &c_child,
CHECK_AND_ASSERT_THROW_MES(last_updated_child_idx < child_layer_extensions.size(), "missing child layer");
const auto &child_extension = child_layer_extensions[last_updated_child_idx];
if (update_layer_metadata.setting_next_layer_after_old_root)
if (grow_layer_instructions.setting_next_layer_after_old_root)
{
CHECK_AND_ASSERT_THROW_MES((last_updated_child_idx + 1) == child_last_hashes.size(),
"unexpected last updated child idx");
@ -380,14 +380,14 @@ static LayerExtension<C_PARENT> get_next_layer_extension(const C_CHILD &c_child,
}
const auto child_scalars = next_child_scalars_from_children<C_CHILD, C_PARENT>(c_child,
update_layer_metadata.setting_next_layer_after_old_root ? child_last_hash : nullptr,
grow_layer_instructions.setting_next_layer_after_old_root ? child_last_hash : nullptr,
child_extension);
if (update_layer_metadata.need_old_last_parent)
if (grow_layer_instructions.need_old_last_parent)
CHECK_AND_ASSERT_THROW_MES(parent_last_hash != nullptr, "missing last parent");
typename C_PARENT::Scalar last_child_scalar;
if (update_layer_metadata.need_old_last_child)
if (grow_layer_instructions.need_old_last_child)
{
CHECK_AND_ASSERT_THROW_MES(child_last_hash != nullptr, "missing last child");
last_child_scalar = c_child.point_to_cycle_scalar(*child_last_hash);
@ -396,21 +396,208 @@ static LayerExtension<C_PARENT> get_next_layer_extension(const C_CHILD &c_child,
// Do the hashing
LayerExtension<C_PARENT> layer_extension = hash_children_chunks(
c_parent,
update_layer_metadata.need_old_last_child ? &last_child_scalar : nullptr,
update_layer_metadata.need_old_last_parent ? parent_last_hash : nullptr,
update_layer_metadata.start_offset,
update_layer_metadata.next_parent_start_index,
grow_layer_instructions.need_old_last_child ? &last_child_scalar : nullptr,
grow_layer_instructions.need_old_last_parent ? parent_last_hash : nullptr,
grow_layer_instructions.start_offset,
grow_layer_instructions.next_parent_start_index,
child_scalars,
update_layer_metadata.parent_chunk_width
grow_layer_instructions.parent_chunk_width
);
CHECK_AND_ASSERT_THROW_MES((layer_extension.start_idx + layer_extension.hashes.size()) ==
update_layer_metadata.new_total_parents,
grow_layer_instructions.new_total_parents,
"unexpected num parents extended");
return layer_extension;
}
//----------------------------------------------------------------------------------------------------------------------
static TrimLayerInstructions get_trim_layer_instructions(
const std::size_t old_total_children,
const std::size_t new_total_children,
const std::size_t parent_chunk_width,
const bool last_child_will_change)
{
CHECK_AND_ASSERT_THROW_MES(new_total_children > 0, "new total children must be > 0");
CHECK_AND_ASSERT_THROW_MES(old_total_children >= new_total_children,
"old_total_children must be >= new_total_children");
// Calculate old and new total number of parents using totals for children
const std::size_t old_total_parents = 1 + ((old_total_children - 1) / parent_chunk_width);
const std::size_t new_total_parents = 1 + ((new_total_children - 1) / parent_chunk_width);
CHECK_AND_ASSERT_THROW_MES(old_total_parents >= new_total_parents,
"old_total_parents must be >= new_total_parents");
CHECK_AND_ASSERT_THROW_MES(new_total_children > new_total_parents,
"new_total_children must be > new_total_parents");
const std::size_t old_offset = old_total_children % parent_chunk_width;
std::size_t new_offset = new_total_children % parent_chunk_width;
// Get the number of existing children in what will become the new last chunk after trimming
const std::size_t new_last_chunk_old_num_children = (old_total_parents > new_total_parents || old_offset == 0)
? parent_chunk_width
: old_offset;
MDEBUG("new_last_chunk_old_num_children: " << new_last_chunk_old_num_children << ", new_offset: " << new_offset);
CHECK_AND_ASSERT_THROW_MES(new_last_chunk_old_num_children >= new_offset,
"unexpected new_last_chunk_old_num_children");
// Get the number of children we'll be trimming from the new last chunk
const std::size_t trim_n_children = new_offset == 0
? 0 // The last chunk wil remain full when the new_offset == 0
: new_last_chunk_old_num_children - new_offset;
// We use hash trim if we're trimming fewer elems in the last chunk than the number of elems remaining
const bool need_last_chunk_children_to_trim = trim_n_children > 0 && trim_n_children <= new_offset;
// Otherwise we use hash_grow
const bool need_last_chunk_remaining_children = trim_n_children > 0 && trim_n_children > new_offset;
CHECK_AND_ASSERT_THROW_MES(!(need_last_chunk_children_to_trim && need_last_chunk_remaining_children),
"cannot both need last children to trim and need the remaining children");
// TODO: cleaner conditional approach
// TODO: comments
const bool need_last_chunk_parent = !need_last_chunk_remaining_children &&
(need_last_chunk_children_to_trim || last_child_will_change);
const bool update_existing_last_hash = need_last_chunk_remaining_children || need_last_chunk_parent;
std::size_t hash_offset = new_offset;
if (last_child_will_change)
{
hash_offset = hash_offset == 0 ? (parent_chunk_width - 1) : (hash_offset - 1);
if (need_last_chunk_children_to_trim || need_last_chunk_remaining_children)
--new_offset;
}
if (need_last_chunk_remaining_children)
{
hash_offset = 0;
}
MDEBUG("parent_chunk_width: " << parent_chunk_width
<< " , old_total_children: " << old_total_children
<< " , new_total_children: " << new_total_children
<< " , old_total_parents: " << old_total_parents
<< " , new_total_parents: " << new_total_parents
<< " , need_last_chunk_children_to_trim: " << need_last_chunk_children_to_trim
<< " , need_last_chunk_remaining_children: " << need_last_chunk_remaining_children
<< " , need_last_chunk_parent: " << need_last_chunk_parent
<< " , need_new_last_child: " << last_child_will_change
<< " , update_existing_last_hash: " << update_existing_last_hash
<< " , new_offset: " << new_offset
<< " , hash_offset: " << hash_offset);
return TrimLayerInstructions{
.parent_chunk_width = parent_chunk_width,
.old_total_children = old_total_children,
.new_total_children = new_total_children,
.old_total_parents = old_total_parents,
.new_total_parents = new_total_parents,
.need_last_chunk_children_to_trim = need_last_chunk_children_to_trim,
.need_last_chunk_remaining_children = need_last_chunk_remaining_children,
.need_last_chunk_parent = need_last_chunk_parent,
.need_new_last_child = last_child_will_change,
.update_existing_last_hash = update_existing_last_hash,
.new_offset = new_offset,
.hash_offset = hash_offset,
};
}
//----------------------------------------------------------------------------------------------------------------------
template<typename C_CHILD, typename C_PARENT>
static typename fcmp::curve_trees::LayerReduction<C_PARENT> get_next_layer_reduction(
const C_CHILD &c_child,
const C_PARENT &c_parent,
const TrimLayerInstructions &trim_layer_instructions,
const std::vector<typename C_PARENT::Point> &parent_last_hashes,
const std::vector<std::vector<typename C_PARENT::Scalar>> &children_to_trim,
const std::vector<typename C_CHILD::Point> &child_last_hashes,
const std::size_t parent_layer_idx,
const std::size_t child_layer_idx,
const std::vector<LayerReduction<C_CHILD>> &child_reductions)
{
LayerReduction<C_PARENT> layer_reduction_out;
layer_reduction_out.new_total_parents = trim_layer_instructions.new_total_parents;
layer_reduction_out.update_existing_last_hash = trim_layer_instructions.update_existing_last_hash;
typename C_PARENT::Point existing_hash = c_parent.m_hash_init_point;
if (trim_layer_instructions.need_last_chunk_parent)
{
CHECK_AND_ASSERT_THROW_MES(parent_last_hashes.size() > parent_layer_idx, "missing last parent hash");
existing_hash = parent_last_hashes[parent_layer_idx];
}
std::vector<typename C_PARENT::Scalar> child_scalars;
if (trim_layer_instructions.need_last_chunk_children_to_trim
|| trim_layer_instructions.need_last_chunk_remaining_children)
{
// TODO: a clean way to do this without copying
CHECK_AND_ASSERT_THROW_MES(children_to_trim.size() > parent_layer_idx, "missing children to trim");
child_scalars = children_to_trim[parent_layer_idx];
}
typename C_PARENT::Scalar new_last_child_scalar = c_parent.zero_scalar();
if (trim_layer_instructions.need_new_last_child)
{
CHECK_AND_ASSERT_THROW_MES(child_layer_idx > 0, "child index cannot be 0 here");
CHECK_AND_ASSERT_THROW_MES(child_reductions.size() == child_layer_idx, "unexpected child layer idx");
const std::size_t last_child_layer_idx = child_layer_idx - 1;
const typename C_CHILD::Point &new_last_child = child_reductions.back().new_last_hash;
new_last_child_scalar = c_child.point_to_cycle_scalar(new_last_child);
if (trim_layer_instructions.need_last_chunk_remaining_children)
{
child_scalars.emplace_back(std::move(new_last_child_scalar));
}
else if (!trim_layer_instructions.need_last_chunk_children_to_trim)
{
// TODO: cleaner conditional for this case
CHECK_AND_ASSERT_THROW_MES(child_last_hashes.size() > last_child_layer_idx, "missing last child hash");
const typename C_CHILD::Point &old_last_child = child_last_hashes[last_child_layer_idx];
auto old_last_child_scalar = c_child.point_to_cycle_scalar(old_last_child);
child_scalars.emplace_back(std::move(old_last_child_scalar));
}
}
for (std::size_t i = 0; i < child_scalars.size(); ++i)
MDEBUG("Hashing child " << c_parent.to_string(child_scalars[i]));
if (trim_layer_instructions.need_last_chunk_remaining_children)
{
MDEBUG("hash_grow: existing_hash: " << c_parent.to_string(existing_hash)
<< " , hash_offset: " << trim_layer_instructions.hash_offset);
layer_reduction_out.new_last_hash = c_parent.hash_grow(
existing_hash,
trim_layer_instructions.hash_offset,
c_parent.zero_scalar(),
typename C_PARENT::Chunk{child_scalars.data(), child_scalars.size()});
}
else
{
MDEBUG("hash_trim: existing_hash: " << c_parent.to_string(existing_hash)
<< " , hash_offset: " << trim_layer_instructions.hash_offset
<< " , new_last_child_scalar: " << c_parent.to_string(new_last_child_scalar));
layer_reduction_out.new_last_hash = c_parent.hash_trim(
existing_hash,
trim_layer_instructions.hash_offset,
typename C_PARENT::Chunk{child_scalars.data(), child_scalars.size()},
new_last_child_scalar);
}
MDEBUG("Result hash: " << c_parent.to_string(layer_reduction_out.new_last_hash));
return layer_reduction_out;
}
//----------------------------------------------------------------------------------------------------------------------
//----------------------------------------------------------------------------------------------------------------------
// CurveTrees public member functions
//----------------------------------------------------------------------------------------------------------------------
@ -430,149 +617,6 @@ CurveTrees<Helios, Selene>::LeafTuple CurveTrees<Helios, Selene>::output_to_leaf
};
//----------------------------------------------------------------------------------------------------------------------
template<typename C1, typename C2>
UpdateLayerMetadata CurveTrees<C1, C2>::set_next_layer_extension(
const UpdateLayerMetadata &prev_layer_metadata,
const bool parent_is_c1,
const LastHashes &last_hashes,
std::size_t &c1_last_idx_inout,
std::size_t &c2_last_idx_inout,
TreeExtension &tree_extension_inout) const
{
const auto &c1_last_hashes = last_hashes.c1_last_hashes;
const auto &c2_last_hashes = last_hashes.c2_last_hashes;
auto &c1_layer_extensions_out = tree_extension_inout.c1_layer_extensions;
auto &c2_layer_extensions_out = tree_extension_inout.c2_layer_extensions;
const std::size_t parent_chunk_width = parent_is_c1 ? m_c1_width : m_c2_width;
const auto update_layer_metadata = get_update_layer_metadata(
prev_layer_metadata.old_total_parents,
prev_layer_metadata.new_total_parents,
parent_chunk_width,
prev_layer_metadata.need_old_last_parent
);
if (parent_is_c1)
{
auto c1_layer_extension = get_next_layer_extension<C2, C1>(
m_c2,
m_c1,
update_layer_metadata,
c2_last_hashes,
c1_last_hashes,
c2_layer_extensions_out,
c2_last_idx_inout,
c1_last_idx_inout
);
c1_layer_extensions_out.emplace_back(std::move(c1_layer_extension));
++c2_last_idx_inout;
}
else
{
auto c2_layer_extension = get_next_layer_extension<C1, C2>(
m_c1,
m_c2,
update_layer_metadata,
c1_last_hashes,
c2_last_hashes,
c1_layer_extensions_out,
c1_last_idx_inout,
c2_last_idx_inout
);
c2_layer_extensions_out.emplace_back(std::move(c2_layer_extension));
++c1_last_idx_inout;
}
return update_layer_metadata;
};
//----------------------------------------------------------------------------------------------------------------------
template<typename C1, typename C2>
typename CurveTrees<C1, C2>::TreeExtension CurveTrees<C1, C2>::get_tree_extension(
const std::size_t old_n_leaf_tuples,
const LastHashes &existing_last_hashes,
const std::vector<LeafTuple> &new_leaf_tuples) const
{
TreeExtension tree_extension;
if (new_leaf_tuples.empty())
return tree_extension;
auto update_layer_metadata = get_update_leaf_layer_metadata(
old_n_leaf_tuples,
new_leaf_tuples.size(),
LEAF_TUPLE_SIZE,
m_leaf_layer_chunk_width);
tree_extension.leaves.start_idx = update_layer_metadata.old_total_children;
// Copy the leaves
// TODO: don't copy here
tree_extension.leaves.tuples.reserve(new_leaf_tuples.size());
for (const auto &leaf : new_leaf_tuples)
{
tree_extension.leaves.tuples.emplace_back(LeafTuple{
.O_x = leaf.O_x,
.I_x = leaf.I_x,
.C_x = leaf.C_x
});
}
if (update_layer_metadata.need_old_last_parent)
CHECK_AND_ASSERT_THROW_MES(!existing_last_hashes.c2_last_hashes.empty(), "missing last c2 parent");
// Hash the leaf layer
auto leaf_parents = hash_children_chunks(m_c2,
nullptr, // We never need the old last child from leaf layer because the leaf layer is always append-only
update_layer_metadata.need_old_last_parent ? &existing_last_hashes.c2_last_hashes[0] : nullptr,
update_layer_metadata.start_offset,
update_layer_metadata.next_parent_start_index,
this->flatten_leaves(new_leaf_tuples),
m_leaf_layer_chunk_width
);
CHECK_AND_ASSERT_THROW_MES(
(leaf_parents.start_idx + leaf_parents.hashes.size()) == update_layer_metadata.new_total_parents,
"unexpected num leaf parents extended");
tree_extension.c2_layer_extensions.emplace_back(std::move(leaf_parents));
// Alternate between hashing c2 children, c1 children, c2, c1, ...
bool parent_is_c1 = true;
std::size_t c1_last_idx = 0;
std::size_t c2_last_idx = 0;
while (update_layer_metadata.new_total_parents > 1)
{
MDEBUG("Getting extension for layer " << (c1_last_idx + c2_last_idx + 1));
const std::size_t new_total_children = update_layer_metadata.new_total_parents;
update_layer_metadata = this->set_next_layer_extension(
update_layer_metadata,
parent_is_c1,
existing_last_hashes,
c1_last_idx,
c2_last_idx,
tree_extension
);
// Sanity check to make sure we're making progress to exit the while loop
CHECK_AND_ASSERT_THROW_MES(update_layer_metadata.new_total_parents < new_total_children,
"expect fewer parents than children in every layer");
parent_is_c1 = !parent_is_c1;
}
return tree_extension;
};
//----------------------------------------------------------------------------------------------------------------------
//----------------------------------------------------------------------------------------------------------------------
// CurveTrees private member functions
//----------------------------------------------------------------------------------------------------------------------
template<typename C1, typename C2>
std::vector<typename C2::Scalar> CurveTrees<C1, C2>::flatten_leaves(const std::vector<LeafTuple> &leaves) const
{
std::vector<typename C2::Scalar> flattened_leaves;
@ -589,6 +633,253 @@ std::vector<typename C2::Scalar> CurveTrees<C1, C2>::flatten_leaves(const std::v
return flattened_leaves;
};
//----------------------------------------------------------------------------------------------------------------------
template<typename C1, typename C2>
typename CurveTrees<C1, C2>::TreeExtension CurveTrees<C1, C2>::get_tree_extension(
const std::size_t old_n_leaf_tuples,
const LastHashes &existing_last_hashes,
const std::vector<LeafTuple> &new_leaf_tuples) const
{
TreeExtension tree_extension;
if (new_leaf_tuples.empty())
return tree_extension;
auto grow_layer_instructions = get_leaf_layer_grow_instructions(
old_n_leaf_tuples,
new_leaf_tuples.size(),
LEAF_TUPLE_SIZE,
m_leaf_layer_chunk_width);
tree_extension.leaves.start_idx = grow_layer_instructions.old_total_children;
// Copy the leaves
// TODO: don't copy here
tree_extension.leaves.tuples.reserve(new_leaf_tuples.size());
for (const auto &leaf : new_leaf_tuples)
{
tree_extension.leaves.tuples.emplace_back(LeafTuple{
.O_x = leaf.O_x,
.I_x = leaf.I_x,
.C_x = leaf.C_x
});
}
if (grow_layer_instructions.need_old_last_parent)
CHECK_AND_ASSERT_THROW_MES(!existing_last_hashes.c2_last_hashes.empty(), "missing last c2 parent");
// Hash the leaf layer
auto leaf_parents = hash_children_chunks(m_c2,
nullptr, // We never need the old last child from leaf layer because the leaf layer is always append-only
grow_layer_instructions.need_old_last_parent ? &existing_last_hashes.c2_last_hashes[0] : nullptr,
grow_layer_instructions.start_offset,
grow_layer_instructions.next_parent_start_index,
this->flatten_leaves(new_leaf_tuples),
m_leaf_layer_chunk_width
);
CHECK_AND_ASSERT_THROW_MES(
(leaf_parents.start_idx + leaf_parents.hashes.size()) == grow_layer_instructions.new_total_parents,
"unexpected num leaf parents extended");
tree_extension.c2_layer_extensions.emplace_back(std::move(leaf_parents));
// Alternate between hashing c2 children, c1 children, c2, c1, ...
bool parent_is_c1 = true;
std::size_t c1_last_idx = 0;
std::size_t c2_last_idx = 0;
while (grow_layer_instructions.new_total_parents > 1)
{
MDEBUG("Getting extension for layer " << (c1_last_idx + c2_last_idx + 1));
const std::size_t new_total_children = grow_layer_instructions.new_total_parents;
grow_layer_instructions = this->set_next_layer_extension(
grow_layer_instructions,
parent_is_c1,
existing_last_hashes,
c1_last_idx,
c2_last_idx,
tree_extension
);
// Sanity check to make sure we're making progress to exit the while loop
CHECK_AND_ASSERT_THROW_MES(grow_layer_instructions.new_total_parents < new_total_children,
"expect fewer parents than children in every layer");
parent_is_c1 = !parent_is_c1;
}
return tree_extension;
};
//----------------------------------------------------------------------------------------------------------------------
template<typename C1, typename C2>
std::vector<TrimLayerInstructions> CurveTrees<C1, C2>::get_trim_instructions(
const std::size_t old_n_leaf_tuples,
const std::size_t trim_n_leaf_tuples)
{
CHECK_AND_ASSERT_THROW_MES(old_n_leaf_tuples > trim_n_leaf_tuples, "cannot trim more leaves than exist");
CHECK_AND_ASSERT_THROW_MES(trim_n_leaf_tuples > 0, "must be trimming some leaves");
std::vector<TrimLayerInstructions> trim_instructions;
// Get trim instructions for the leaf layer
{
const std::size_t old_total_leaves = old_n_leaf_tuples * LEAF_TUPLE_SIZE;
const std::size_t new_total_leaves = (old_n_leaf_tuples - trim_n_leaf_tuples) * LEAF_TUPLE_SIZE;
const std::size_t parent_chunk_width = m_leaf_layer_chunk_width;
// Leaf layer's last child never changes since leaf layer is pop-/append-only
const bool last_child_will_change = false;
auto trim_leaf_layer_instructions = get_trim_layer_instructions(
old_total_leaves,
new_total_leaves,
parent_chunk_width,
last_child_will_change);
trim_instructions.emplace_back(std::move(trim_leaf_layer_instructions));
}
bool use_c2 = false;
while (trim_instructions.back().new_total_parents > 1)
{
auto trim_layer_instructions = get_trim_layer_instructions(
trim_instructions.back().old_total_parents,
trim_instructions.back().new_total_parents,
use_c2 ? m_c2_width : m_c1_width,
trim_instructions.back().update_existing_last_hash);
trim_instructions.emplace_back(std::move(trim_layer_instructions));
use_c2 = !use_c2;
}
return trim_instructions;
}
//----------------------------------------------------------------------------------------------------------------------
template<typename C1, typename C2>
typename CurveTrees<C1, C2>::TreeReduction CurveTrees<C1, C2>::get_tree_reduction(
const std::vector<TrimLayerInstructions> &trim_instructions,
const LastChunkChildrenToTrim &children_to_trim,
const LastHashes &last_hashes) const
{
TreeReduction tree_reduction_out;
tree_reduction_out.new_total_leaves = trim_instructions[0].new_total_children;
bool use_c2 = true;
std::size_t c1_idx = 0;
std::size_t c2_idx = 0;
for (const auto &trim_layer_instructions : trim_instructions)
{
MDEBUG("Trimming layer " << (c1_idx + c2_idx) << " (c1_idx: " << c1_idx << " , c2_idx: " << c2_idx << ")");
if (use_c2)
{
auto c2_layer_reduction_out = get_next_layer_reduction(
m_c1,
m_c2,
trim_layer_instructions,
last_hashes.c2_last_hashes,
children_to_trim.c2_children,
last_hashes.c1_last_hashes,
c2_idx,
c1_idx,
tree_reduction_out.c1_layer_reductions
);
tree_reduction_out.c2_layer_reductions.emplace_back(std::move(c2_layer_reduction_out));
++c2_idx;
}
else
{
auto c1_layer_reduction_out = get_next_layer_reduction(
m_c2,
m_c1,
trim_layer_instructions,
last_hashes.c1_last_hashes,
children_to_trim.c1_children,
last_hashes.c2_last_hashes,
c1_idx,
c2_idx,
tree_reduction_out.c2_layer_reductions
);
tree_reduction_out.c1_layer_reductions.emplace_back(std::move(c1_layer_reduction_out));
++c1_idx;
}
use_c2 = !use_c2;
}
return tree_reduction_out;
};
//----------------------------------------------------------------------------------------------------------------------
//----------------------------------------------------------------------------------------------------------------------
// CurveTrees private member functions
//----------------------------------------------------------------------------------------------------------------------
template<typename C1, typename C2>
GrowLayerInstructions CurveTrees<C1, C2>::set_next_layer_extension(
const GrowLayerInstructions &prev_layer_instructions,
const bool parent_is_c1,
const LastHashes &last_hashes,
std::size_t &c1_last_idx_inout,
std::size_t &c2_last_idx_inout,
TreeExtension &tree_extension_inout) const
{
const auto &c1_last_hashes = last_hashes.c1_last_hashes;
const auto &c2_last_hashes = last_hashes.c2_last_hashes;
auto &c1_layer_extensions_out = tree_extension_inout.c1_layer_extensions;
auto &c2_layer_extensions_out = tree_extension_inout.c2_layer_extensions;
const std::size_t parent_chunk_width = parent_is_c1 ? m_c1_width : m_c2_width;
const auto grow_layer_instructions = get_grow_layer_instructions(
prev_layer_instructions.old_total_parents,
prev_layer_instructions.new_total_parents,
parent_chunk_width,
prev_layer_instructions.need_old_last_parent
);
if (parent_is_c1)
{
auto c1_layer_extension = get_next_layer_extension<C2, C1>(
m_c2,
m_c1,
grow_layer_instructions,
c2_last_hashes,
c1_last_hashes,
c2_layer_extensions_out,
c2_last_idx_inout,
c1_last_idx_inout
);
c1_layer_extensions_out.emplace_back(std::move(c1_layer_extension));
++c2_last_idx_inout;
}
else
{
auto c2_layer_extension = get_next_layer_extension<C1, C2>(
m_c1,
m_c2,
grow_layer_instructions,
c1_last_hashes,
c2_last_hashes,
c1_layer_extensions_out,
c1_last_idx_inout,
c2_last_idx_inout
);
c2_layer_extensions_out.emplace_back(std::move(c2_layer_extension));
++c1_last_idx_inout;
}
return grow_layer_instructions;
};
//----------------------------------------------------------------------------------------------------------------------
//----------------------------------------------------------------------------------------------------------------------
} //namespace curve_trees
} //namespace fcmp

@ -55,8 +55,17 @@ struct LayerExtension final
std::vector<typename C::Point> hashes;
};
// Useful metadata for updating a layer
struct UpdateLayerMetadata final
// A struct useful to trim a layer and update its last hash if necessary
template<typename C>
struct LayerReduction final
{
std::size_t new_total_parents{0};
bool update_existing_last_hash;
typename C::Point new_last_hash;
};
// Useful metadata for growing a layer
struct GrowLayerInstructions final
{
// The max chunk width of children used to hash into a parent
std::size_t parent_chunk_width;
@ -83,6 +92,32 @@ struct UpdateLayerMetadata final
// The parent's starting index in the layer
std::size_t next_parent_start_index;
};
// Useful metadata for trimming a layer
struct TrimLayerInstructions final
{
// The max chunk width of children used to hash into a parent
std::size_t parent_chunk_width;
// Total children refers to the total number of elements in a layer
std::size_t old_total_children;
std::size_t new_total_children;
// Total parents refers to the total number of hashes of chunks of children
std::size_t old_total_parents;
std::size_t new_total_parents;
bool need_last_chunk_children_to_trim;
bool need_last_chunk_remaining_children;
bool need_last_chunk_parent;
bool need_new_last_child;
bool update_existing_last_hash;
std::size_t new_offset;
std::size_t hash_offset;
};
//----------------------------------------------------------------------------------------------------------------------
//----------------------------------------------------------------------------------------------------------------------
// This class is useful help update the curve trees tree without needing to keep the entire tree in memory
@ -137,6 +172,16 @@ public:
std::vector<LayerExtension<C2>> c2_layer_extensions;
};
// A struct useful to reduce the number of leaves in an existing tree
// - layers alternate between C1 and C2
// - c2_layer_reductions[0] is first layer after leaves, then c1_layer_reductions[0], c2_layer_reductions[1], etc
struct TreeReduction final
{
std::size_t new_total_leaves;
std::vector<LayerReduction<C1>> c1_layer_reductions;
std::vector<LayerReduction<C2>> c2_layer_reductions;
};
// Last hashes from each layer in the tree
// - layers alternate between C1 and C2
// - c2_last_hashes[0] refers to the layer after leaves, then c1_last_hashes[0], then c2_last_hashes[1], etc
@ -146,26 +191,48 @@ public:
std::vector<typename C2::Point> c2_last_hashes;
};
// The children we'll trim from each last chunk in the tree
// - layers alternate between C1 and C2
// - c2_children[0] refers to the layer after leaves, then c1_children[0], then c2_children[1], etc
struct LastChunkChildrenToTrim final
{
std::vector<std::vector<typename C1::Scalar>> c1_children;
std::vector<std::vector<typename C2::Scalar>> c2_children;
};
//member functions
public:
// Convert cryptonote output pub key and commitment to a leaf tuple for the curve trees tree
LeafTuple output_to_leaf_tuple(const crypto::public_key &O, const crypto::public_key &C) const;
// Flatten leaves [(O.x, I.x, C.x),(O.x, I.x, C.x),...] -> [scalar,scalar,scalar,scalar,scalar,scalar,...]
std::vector<typename C2::Scalar> flatten_leaves(const std::vector<LeafTuple> &leaves) const;
// Take in the existing number of leaf tuples and the existing last hashes of each layer in the tree, as well as new
// leaves to add to the tree, and return a tree extension struct that can be used to extend a global tree
// leaves to add to the tree, and return a tree extension struct that can be used to extend a tree
TreeExtension get_tree_extension(const std::size_t old_n_leaf_tuples,
const LastHashes &existing_last_hashes,
const std::vector<LeafTuple> &new_leaf_tuples) const;
// Flatten leaves [(O.x, I.x, C.x),(O.x, I.x, C.x),...] -> [scalar,scalar,scalar,scalar,scalar,scalar,...]
std::vector<typename C2::Scalar> flatten_leaves(const std::vector<LeafTuple> &leaves) const;
// Get instructions useful for trimming all existing layers in the tree
std::vector<TrimLayerInstructions> get_trim_instructions(
const std::size_t old_n_leaf_tuples,
const std::size_t trim_n_leaf_tuples);
// Take in the instructions useful for trimming all existing layers in the tree, all children to be trimmed from
// each last chunk, and the existing last hashes in what will become the new last parent of each layer, and return
// a tree reduction struct that can be used to trim a tree
TreeReduction get_tree_reduction(
const std::vector<TrimLayerInstructions> &trim_instructions,
const LastChunkChildrenToTrim &children_to_trim,
const LastHashes &last_hashes) const;
private:
// Helper function used to set the next layer extension used to grow the next layer in the tree
// - for example, if we just grew the parent layer after the leaf layer, the "next layer" would be the grandparent
// layer of the leaf layer
UpdateLayerMetadata set_next_layer_extension(
const UpdateLayerMetadata &prev_layer_metadata,
GrowLayerInstructions set_next_layer_extension(
const GrowLayerInstructions &prev_layer_instructions,
const bool parent_is_c1,
const LastHashes &last_hashes,
std::size_t &c1_last_idx_inout,

@ -71,7 +71,7 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "ciphersuite"
version = "0.4.1"
source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a11db094aac0384b9e62930f6f9f0d062d436897"
source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#506fe19c9ea0381a65a55eb846deacfdfcff5b2a"
dependencies = [
"blake2",
"dalek-ff-group",
@ -130,9 +130,9 @@ dependencies = [
[[package]]
name = "curve25519-dalek"
version = "4.1.2"
version = "4.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0a677b8922c94e01bdbb12126b0bc852f00447528dee1782229af9c720c3f348"
checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be"
dependencies = [
"cfg-if",
"cpufeatures",
@ -140,7 +140,6 @@ dependencies = [
"digest",
"fiat-crypto",
"group",
"platforms",
"rand_core",
"rustc_version",
"subtle",
@ -161,7 +160,7 @@ dependencies = [
[[package]]
name = "dalek-ff-group"
version = "0.4.1"
source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a11db094aac0384b9e62930f6f9f0d062d436897"
source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#506fe19c9ea0381a65a55eb846deacfdfcff5b2a"
dependencies = [
"crypto-bigint",
"curve25519-dalek",
@ -198,7 +197,7 @@ dependencies = [
[[package]]
name = "ec-divisors"
version = "0.1.0"
source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a11db094aac0384b9e62930f6f9f0d062d436897"
source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#506fe19c9ea0381a65a55eb846deacfdfcff5b2a"
dependencies = [
"dalek-ff-group",
"group",
@ -272,7 +271,7 @@ checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d"
[[package]]
name = "flexible-transcript"
version = "0.3.2"
source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a11db094aac0384b9e62930f6f9f0d062d436897"
source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#506fe19c9ea0381a65a55eb846deacfdfcff5b2a"
dependencies = [
"blake2",
"digest",
@ -285,7 +284,7 @@ dependencies = [
[[package]]
name = "full-chain-membership-proofs"
version = "0.1.0"
source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a11db094aac0384b9e62930f6f9f0d062d436897"
source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#506fe19c9ea0381a65a55eb846deacfdfcff5b2a"
dependencies = [
"ciphersuite",
"ec-divisors",
@ -305,7 +304,7 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c"
[[package]]
name = "generalized-bulletproofs"
version = "0.1.0"
source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a11db094aac0384b9e62930f6f9f0d062d436897"
source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#506fe19c9ea0381a65a55eb846deacfdfcff5b2a"
dependencies = [
"ciphersuite",
"flexible-transcript",
@ -327,9 +326,9 @@ dependencies = [
[[package]]
name = "generic-array"
version = "1.0.0"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fe739944a5406424e080edccb6add95685130b9f160d5407c639c7df0c5836b0"
checksum = "96512db27971c2c3eece70a1e106fbe6c87760234e31e8f7e5634912fe52794a"
dependencies = [
"typenum",
]
@ -368,7 +367,7 @@ dependencies = [
[[package]]
name = "helioselene"
version = "0.1.0"
source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a11db094aac0384b9e62930f6f9f0d062d436897"
source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#506fe19c9ea0381a65a55eb846deacfdfcff5b2a"
dependencies = [
"crypto-bigint",
"dalek-ff-group",
@ -429,11 +428,11 @@ dependencies = [
[[package]]
name = "minimal-ed448"
version = "0.4.0"
source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a11db094aac0384b9e62930f6f9f0d062d436897"
source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#506fe19c9ea0381a65a55eb846deacfdfcff5b2a"
dependencies = [
"crypto-bigint",
"ff",
"generic-array 1.0.0",
"generic-array 1.1.0",
"group",
"rand_core",
"rustversion",
@ -444,7 +443,7 @@ dependencies = [
[[package]]
name = "multiexp"
version = "0.4.0"
source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a11db094aac0384b9e62930f6f9f0d062d436897"
source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#506fe19c9ea0381a65a55eb846deacfdfcff5b2a"
dependencies = [
"ff",
"group",
@ -481,12 +480,6 @@ dependencies = [
"spki",
]
[[package]]
name = "platforms"
version = "3.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "db23d408679286588f4d4644f965003d056e3dd5abcaaa938116871d7ce2fee7"
[[package]]
name = "primeorder"
version = "0.13.6"
@ -498,9 +491,9 @@ dependencies = [
[[package]]
name = "proc-macro2"
version = "1.0.83"
version = "1.0.86"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b33eb56c327dec362a9e55b3ad14f9d2f0904fb5a5b03b513ab5465399e9f43"
checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77"
dependencies = [
"unicode-ident",
]
@ -613,7 +606,7 @@ dependencies = [
[[package]]
name = "std-shims"
version = "0.1.1"
source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#a11db094aac0384b9e62930f6f9f0d062d436897"
source = "git+https://github.com/kayabaNerve/fcmp-plus-plus#506fe19c9ea0381a65a55eb846deacfdfcff5b2a"
dependencies = [
"hashbrown",
"spin",
@ -621,15 +614,15 @@ dependencies = [
[[package]]
name = "subtle"
version = "2.5.0"
version = "2.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc"
checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292"
[[package]]
name = "syn"
version = "2.0.66"
version = "2.0.70"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c42f3f41a2de00b01c0aaad383c5a45241efc8b2d1eda5661812fda5f3cdcff5"
checksum = "2f0209b68b3613b093e0ec905354eccaedcfe83b8cb37cbdeae64026c3064c16"
dependencies = [
"proc-macro2",
"quote",
@ -677,18 +670,18 @@ dependencies = [
[[package]]
name = "zerocopy"
version = "0.7.34"
version = "0.7.35"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087"
checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0"
dependencies = [
"zerocopy-derive",
]
[[package]]
name = "zerocopy-derive"
version = "0.7.34"
version = "0.7.35"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b"
checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e"
dependencies = [
"proc-macro2",
"quote",
@ -697,9 +690,9 @@ dependencies = [
[[package]]
name = "zeroize"
version = "1.7.0"
version = "1.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d"
checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde"
dependencies = [
"zeroize_derive",
]

@ -113,21 +113,23 @@ SeleneScalar selene_zero_scalar();
CResult<HeliosPoint> hash_grow_helios(HeliosPoint existing_hash,
uintptr_t offset,
HeliosScalar first_child_after_offset,
HeliosScalar existing_child_at_offset,
HeliosScalarSlice new_children);
CResult<HeliosPoint> hash_trim_helios(HeliosPoint existing_hash,
uintptr_t offset,
HeliosScalarSlice children);
HeliosScalarSlice children,
HeliosScalar child_to_grow_back);
CResult<SelenePoint> hash_grow_selene(SelenePoint existing_hash,
uintptr_t offset,
SeleneScalar first_child_after_offset,
SeleneScalar existing_child_at_offset,
SeleneScalarSlice new_children);
CResult<SelenePoint> hash_trim_selene(SelenePoint existing_hash,
uintptr_t offset,
SeleneScalarSlice children);
SeleneScalarSlice children,
SeleneScalar child_to_grow_back);
} // extern "C"

@ -152,14 +152,14 @@ impl<T, E> CResult<T, E> {
pub extern "C" fn hash_grow_helios(
existing_hash: HeliosPoint,
offset: usize,
first_child_after_offset: HeliosScalar,
existing_child_at_offset: HeliosScalar,
new_children: HeliosScalarSlice,
) -> CResult<HeliosPoint, io::Error> {
let hash = hash_grow(
helios_generators(),
existing_hash,
offset,
first_child_after_offset,
existing_child_at_offset,
new_children.into(),
);
@ -178,12 +178,14 @@ pub extern "C" fn hash_trim_helios(
existing_hash: HeliosPoint,
offset: usize,
children: HeliosScalarSlice,
child_to_grow_back: HeliosScalar,
) -> CResult<HeliosPoint, io::Error> {
let hash = hash_trim(
helios_generators(),
existing_hash,
offset,
children.into(),
child_to_grow_back,
);
if let Some(hash) = hash {
@ -200,14 +202,14 @@ pub extern "C" fn hash_trim_helios(
pub extern "C" fn hash_grow_selene(
existing_hash: SelenePoint,
offset: usize,
first_child_after_offset: SeleneScalar,
existing_child_at_offset: SeleneScalar,
new_children: SeleneScalarSlice,
) -> CResult<SelenePoint, io::Error> {
let hash = hash_grow(
selene_generators(),
existing_hash,
offset,
first_child_after_offset,
existing_child_at_offset,
new_children.into(),
);
@ -226,12 +228,14 @@ pub extern "C" fn hash_trim_selene(
existing_hash: SelenePoint,
offset: usize,
children: SeleneScalarSlice,
child_to_grow_back: SeleneScalar,
) -> CResult<SelenePoint, io::Error> {
let hash = hash_trim(
selene_generators(),
existing_hash,
offset,
children.into(),
child_to_grow_back,
);
if let Some(hash) = hash {

@ -48,13 +48,13 @@ Selene::CycleScalar Selene::point_to_cycle_scalar(const Selene::Point &point) co
Helios::Point Helios::hash_grow(
const Helios::Point &existing_hash,
const std::size_t offset,
const Helios::Scalar &first_child_after_offset,
const Helios::Scalar &existing_child_at_offset,
const Helios::Chunk &new_children) const
{
fcmp_rust::CResult<Helios::Point> res = fcmp_rust::hash_grow_helios(
existing_hash,
offset,
first_child_after_offset,
existing_child_at_offset,
new_children);
if (res.err != 0) {
throw std::runtime_error("failed to hash grow");
@ -65,12 +65,14 @@ Helios::Point Helios::hash_grow(
Helios::Point Helios::hash_trim(
const Helios::Point &existing_hash,
const std::size_t offset,
const Helios::Chunk &children) const
const Helios::Chunk &children,
const Helios::Scalar &child_to_grow_back) const
{
fcmp_rust::CResult<Helios::Point> res = fcmp_rust::hash_trim_helios(
existing_hash,
offset,
children);
children,
child_to_grow_back);
if (res.err != 0) {
throw std::runtime_error("failed to hash trim");
}
@ -80,13 +82,13 @@ Helios::Point Helios::hash_trim(
Selene::Point Selene::hash_grow(
const Selene::Point &existing_hash,
const std::size_t offset,
const Selene::Scalar &first_child_after_offset,
const Selene::Scalar &existing_child_at_offset,
const Selene::Chunk &new_children) const
{
fcmp_rust::CResult<Selene::Point> res = fcmp_rust::hash_grow_selene(
existing_hash,
offset,
first_child_after_offset,
existing_child_at_offset,
new_children);
if (res.err != 0) {
throw std::runtime_error("failed to hash grow");
@ -97,12 +99,14 @@ Selene::Point Selene::hash_grow(
Selene::Point Selene::hash_trim(
const Selene::Point &existing_hash,
const std::size_t offset,
const Selene::Chunk &children) const
const Selene::Chunk &children,
const Selene::Scalar &child_to_grow_back) const
{
fcmp_rust::CResult<Selene::Point> res = fcmp_rust::hash_trim_selene(
existing_hash,
offset,
children);
children,
child_to_grow_back);
if (res.err != 0) {
throw std::runtime_error("failed to hash trim");
}

@ -80,13 +80,14 @@ public:
virtual typename C::Point hash_grow(
const typename C::Point &existing_hash,
const std::size_t offset,
const typename C::Scalar &first_child_after_offset,
const typename C::Scalar &existing_child_at_offset,
const typename C::Chunk &new_children) const = 0;
virtual typename C::Point hash_trim(
const typename C::Point &existing_hash,
const std::size_t offset,
const typename C::Chunk &children) const = 0;
const typename C::Chunk &children,
const typename C::Scalar &child_to_grow_back) const = 0;
virtual typename C::Scalar zero_scalar() const = 0;
@ -124,13 +125,14 @@ public:
Point hash_grow(
const Point &existing_hash,
const std::size_t offset,
const Scalar &first_child_after_offset,
const Scalar &existing_child_at_offset,
const Chunk &new_children) const override;
Point hash_trim(
const Point &existing_hash,
const std::size_t offset,
const Chunk &children) const override;
const Chunk &children,
const Scalar &child_to_grow_back) const override;
Scalar zero_scalar() const override;
@ -163,13 +165,14 @@ public:
Point hash_grow(
const Point &existing_hash,
const std::size_t offset,
const Scalar &first_child_after_offset,
const Scalar &existing_child_at_offset,
const Chunk &new_children) const override;
Point hash_trim(
const Point &existing_hash,
const std::size_t offset,
const Chunk &children) const override;
const Chunk &children,
const Scalar &child_to_grow_back) const override;
Scalar zero_scalar() const override;

@ -230,520 +230,311 @@ void CurveTreesGlobalTree::extend_tree(const CurveTreesV1::TreeExtension &tree_e
}
}
//----------------------------------------------------------------------------------------------------------------------
// If we reached the new root, then clear all remaining elements in the tree above the root. Otherwise continue
template <typename C>
static bool handle_root_after_trim(const std::size_t num_parents,
const std::size_t c1_expected_n_layers,
const std::size_t c2_expected_n_layers,
CurveTreesGlobalTree::Layer<C> &parents_inout,
std::vector<CurveTreesGlobalTree::Layer<Helios>> &c1_layers_inout,
std::vector<CurveTreesGlobalTree::Layer<Selene>> &c2_layers_inout)
void CurveTreesGlobalTree::reduce_tree(const CurveTreesV1::TreeReduction &tree_reduction)
{
// We're at the root if there should only be 1 element in the layer
if (num_parents > 1)
return false;
MDEBUG("We have encountered the root, clearing remaining elements in the tree");
// Clear all parents after root
while (parents_inout.size() > 1)
parents_inout.pop_back();
// Clear all remaining layers, if any
while (c1_layers_inout.size() > c1_expected_n_layers)
c1_layers_inout.pop_back();
while (c2_layers_inout.size() > c2_expected_n_layers)
c2_layers_inout.pop_back();
return true;
}
//----------------------------------------------------------------------------------------------------------------------
// Trims the child layer and caches values needed to update and trim the child's parent layer
// TODO: work on consolidating this function with the leaf layer logic and simplifying edge case handling
template <typename C_CHILD, typename C_PARENT>
static typename C_PARENT::Point trim_children(const C_CHILD &c_child,
const C_PARENT &c_parent,
const std::size_t parent_width,
const CurveTreesGlobalTree::Layer<C_PARENT> &parents,
const typename C_CHILD::Point &old_last_child_hash,
CurveTreesGlobalTree::Layer<C_CHILD> &children_inout,
std::size_t &last_parent_idx_inout,
typename C_PARENT::Point &old_last_parent_hash_out)
{
const std::size_t old_num_children = children_inout.size();
const std::size_t old_last_parent_idx = (old_num_children - 1) / parent_width;
const std::size_t old_last_offset = old_num_children % parent_width;
const std::size_t new_num_children = last_parent_idx_inout + 1;
const std::size_t new_last_parent_idx = (new_num_children - 1) / parent_width;
const std::size_t new_last_offset = new_num_children % parent_width;
CHECK_AND_ASSERT_THROW_MES(old_num_children >= new_num_children, "unexpected new_num_children");
last_parent_idx_inout = new_last_parent_idx;
old_last_parent_hash_out = parents[new_last_parent_idx];
MDEBUG("old_num_children: " << old_num_children <<
" , old_last_parent_idx: " << old_last_parent_idx <<
" , old_last_offset: " << old_last_offset <<
" , old_last_parent_hash_out: " << c_parent.to_string(old_last_parent_hash_out) <<
" , new_num_children: " << new_num_children <<
" , new_last_parent_idx: " << new_last_parent_idx <<
" , new_last_offset: " << new_last_offset);
// TODO: consolidate logic handling this function with the edge case at the end of this function
if (old_num_children == new_num_children)
{
// No new children means we only updated the last child, so use it to get the new last parent
const auto new_last_child = c_child.point_to_cycle_scalar(children_inout.back());
std::vector<typename C_PARENT::Scalar> new_child_v{new_last_child};
const auto &chunk = typename C_PARENT::Chunk{new_child_v.data(), new_child_v.size()};
const auto new_last_parent = c_parent.hash_grow(
/*existing_hash*/ old_last_parent_hash_out,
/*offset*/ (new_num_children - 1) % parent_width,
/*first_child_after_offset*/ c_child.point_to_cycle_scalar(old_last_child_hash),
/*children*/ chunk);
MDEBUG("New last parent using updated last child " << c_parent.to_string(new_last_parent));
return new_last_parent;
}
// Get the number of existing children in what will become the new last chunk after trimming
const std::size_t new_last_chunk_old_num_children = (old_last_parent_idx > new_last_parent_idx
|| old_last_offset == 0)
? parent_width
: old_last_offset;
CHECK_AND_ASSERT_THROW_MES(new_last_chunk_old_num_children > new_last_offset,
"unexpected new_last_chunk_old_num_children");
// Get the number of children we'll be trimming from the new last chunk
const std::size_t trim_n_children_from_new_last_chunk = new_last_offset == 0
? 0 // it wil remain full
: new_last_chunk_old_num_children - new_last_offset;
// We use hash trim if we're removing fewer elems in the last chunk than the number of elems remaining
const bool last_chunk_use_hash_trim = trim_n_children_from_new_last_chunk > 0
&& trim_n_children_from_new_last_chunk < new_last_offset;
MDEBUG("new_last_chunk_old_num_children: " << new_last_chunk_old_num_children <<
" , trim_n_children_from_new_last_chunk: " << trim_n_children_from_new_last_chunk <<
" , last_chunk_use_hash_trim: " << last_chunk_use_hash_trim);
// If we're using hash_trim for the last chunk, we'll need to collect the children we're removing
// TODO: use a separate function to handle last_chunk_use_hash_trim case
std::vector<typename C_CHILD::Point> new_last_chunk_children_to_trim;
if (last_chunk_use_hash_trim)
new_last_chunk_children_to_trim.reserve(trim_n_children_from_new_last_chunk);
// Trim the children starting at the back of the child layer
MDEBUG("Trimming " << (old_num_children - new_num_children) << " children");
while (children_inout.size() > new_num_children)
{
// If we're using hash_trim for the last chunk, collect children from the last chunk
if (last_chunk_use_hash_trim)
{
const std::size_t cur_last_parent_idx = (children_inout.size() - 1) / parent_width;
if (cur_last_parent_idx == new_last_parent_idx)
new_last_chunk_children_to_trim.emplace_back(std::move(children_inout.back()));
}
children_inout.pop_back();
}
CHECK_AND_ASSERT_THROW_MES(children_inout.size() == new_num_children, "unexpected new children");
// We're done trimming the children
// If we're not using hash_trim for the last chunk, and we will be trimming from the new last chunk, then
// we'll need to collect the new last chunk's remaining children for hash_grow
// TODO: use a separate function to handle last_chunk_remaining_children case
std::vector<typename C_CHILD::Point> last_chunk_remaining_children;
if (!last_chunk_use_hash_trim && new_last_offset > 0)
{
last_chunk_remaining_children.reserve(new_last_offset);
const std::size_t start_child_idx = new_last_parent_idx * parent_width;
CHECK_AND_ASSERT_THROW_MES((start_child_idx + new_last_offset) == children_inout.size(),
"unexpected start_child_idx");
for (std::size_t i = start_child_idx; i < children_inout.size(); ++i)
{
CHECK_AND_ASSERT_THROW_MES(i < children_inout.size(), "unexpected child idx");
last_chunk_remaining_children.push_back(children_inout[i]);
}
}
CHECK_AND_ASSERT_THROW_MES(!parents.empty(), "empty parent layer");
CHECK_AND_ASSERT_THROW_MES(new_last_parent_idx < parents.size(), "unexpected new_last_parent_idx");
// Set the new last chunk's parent hash
if (last_chunk_use_hash_trim)
{
CHECK_AND_ASSERT_THROW_MES(new_last_chunk_children_to_trim.size() == trim_n_children_from_new_last_chunk,
"unexpected size of last child chunk");
// We need to reverse the order in order to match the order the children were initially inserted into the tree
std::reverse(new_last_chunk_children_to_trim.begin(), new_last_chunk_children_to_trim.end());
// Check if the last child changed
const auto &old_last_child = old_last_child_hash;
const auto &new_last_child = children_inout.back();
if (c_child.to_bytes(old_last_child) == c_child.to_bytes(new_last_child))
{
// If the last child didn't change, then simply trim the collected children
std::vector<typename C_PARENT::Scalar> child_scalars;
fcmp::tower_cycle::extend_scalars_from_cycle_points<C_CHILD, C_PARENT>(c_child,
new_last_chunk_children_to_trim,
child_scalars);
for (std::size_t i = 0; i < child_scalars.size(); ++i)
MDEBUG("Trimming child " << c_parent.to_string(child_scalars[i]));
const auto &chunk = typename C_PARENT::Chunk{child_scalars.data(), child_scalars.size()};
const auto new_last_parent = c_parent.hash_trim(
old_last_parent_hash_out,
new_last_offset,
chunk);
MDEBUG("New last parent using simple hash_trim " << c_parent.to_string(new_last_parent));
return new_last_parent;
}
// The last child changed, so trim the old child, then grow the chunk by 1 with the new child
// TODO: implement prior_child_at_offset in hash_trim
new_last_chunk_children_to_trim.insert(new_last_chunk_children_to_trim.begin(), old_last_child);
std::vector<typename C_PARENT::Scalar> child_scalars;
fcmp::tower_cycle::extend_scalars_from_cycle_points<C_CHILD, C_PARENT>(c_child,
new_last_chunk_children_to_trim,
child_scalars);
for (std::size_t i = 0; i < child_scalars.size(); ++i)
MDEBUG("Trimming child " << c_parent.to_string(child_scalars[i]));
const auto &chunk = typename C_PARENT::Chunk{child_scalars.data(), child_scalars.size()};
CHECK_AND_ASSERT_THROW_MES(new_last_offset > 0, "new_last_offset must be >0");
auto new_last_parent = c_parent.hash_trim(
old_last_parent_hash_out,
new_last_offset - 1,
chunk);
std::vector<typename C_PARENT::Scalar> new_last_child_scalar{c_child.point_to_cycle_scalar(new_last_child)};
const auto &new_last_child_chunk = typename C_PARENT::Chunk{
new_last_child_scalar.data(),
new_last_child_scalar.size()};
MDEBUG("Growing with new child: " << c_parent.to_string(new_last_child_scalar[0]));
new_last_parent = c_parent.hash_grow(
new_last_parent,
new_last_offset - 1,
c_parent.zero_scalar(),
new_last_child_chunk);
MDEBUG("New last parent using hash_trim AND updated last child " << c_parent.to_string(new_last_parent));
return new_last_parent;
}
else if (!last_chunk_remaining_children.empty())
{
// If we have reamining children in the new last chunk, and some children were trimmed from the chunk, then
// use hash_grow to calculate the new hash
std::vector<typename C_PARENT::Scalar> child_scalars;
fcmp::tower_cycle::extend_scalars_from_cycle_points<C_CHILD, C_PARENT>(c_child,
last_chunk_remaining_children,
child_scalars);
const auto &chunk = typename C_PARENT::Chunk{child_scalars.data(), child_scalars.size()};
auto new_last_parent = c_parent.hash_grow(
/*existing_hash*/ c_parent.m_hash_init_point,
/*offset*/ 0,
/*first_child_after_offset*/ c_parent.zero_scalar(),
/*children*/ chunk);
MDEBUG("New last parent from re-growing last chunk " << c_parent.to_string(new_last_parent));
return new_last_parent;
}
// Check if the last child updated
const auto &old_last_child = old_last_child_hash;
const auto &new_last_child = children_inout.back();
const auto old_last_child_bytes = c_child.to_bytes(old_last_child);
const auto new_last_child_bytes = c_child.to_bytes(new_last_child);
if (old_last_child_bytes == new_last_child_bytes)
{
MDEBUG("The last child didn't update, nothing left to do");
return old_last_parent_hash_out;
}
// TODO: try to consolidate handling this edge case with the case of old_num_children == new_num_children
MDEBUG("The last child changed, updating last chunk parent hash");
CHECK_AND_ASSERT_THROW_MES(new_last_offset == 0, "unexpected new last offset");
const auto old_last_child_scalar = c_child.point_to_cycle_scalar(old_last_child);
auto new_last_child_scalar = c_child.point_to_cycle_scalar(new_last_child);
std::vector<typename C_PARENT::Scalar> child_scalars{std::move(new_last_child_scalar)};
const auto &chunk = typename C_PARENT::Chunk{child_scalars.data(), child_scalars.size()};
auto new_last_parent = c_parent.hash_grow(
/*existing_hash*/ old_last_parent_hash_out,
/*offset*/ parent_width - 1,
/*first_child_after_offset*/ old_last_child_scalar,
/*children*/ chunk);
MDEBUG("New last parent from updated last child " << c_parent.to_string(new_last_parent));
return new_last_parent;
}
//----------------------------------------------------------------------------------------------------------------------
void CurveTreesGlobalTree::trim_tree(const std::size_t new_num_leaves)
{
// TODO: consolidate below logic with trim_children above
CHECK_AND_ASSERT_THROW_MES(new_num_leaves >= CurveTreesV1::LEAF_TUPLE_SIZE,
"tree must have at least 1 leaf tuple in it");
CHECK_AND_ASSERT_THROW_MES(new_num_leaves % CurveTreesV1::LEAF_TUPLE_SIZE == 0,
"num leaves must be divisible by leaf tuple size");
auto &leaves_out = m_tree.leaves;
auto &c1_layers_out = m_tree.c1_layers;
auto &c2_layers_out = m_tree.c2_layers;
const std::size_t old_num_leaves = leaves_out.size() * CurveTreesV1::LEAF_TUPLE_SIZE;
CHECK_AND_ASSERT_THROW_MES(old_num_leaves > new_num_leaves, "unexpected new num leaves");
const std::size_t old_last_leaf_parent_idx = (old_num_leaves - CurveTreesV1::LEAF_TUPLE_SIZE)
/ m_curve_trees.m_leaf_layer_chunk_width;
const std::size_t old_last_leaf_offset = old_num_leaves % m_curve_trees.m_leaf_layer_chunk_width;
const std::size_t new_last_leaf_parent_idx = (new_num_leaves - CurveTreesV1::LEAF_TUPLE_SIZE)
/ m_curve_trees.m_leaf_layer_chunk_width;
const std::size_t new_last_leaf_offset = new_num_leaves % m_curve_trees.m_leaf_layer_chunk_width;
MDEBUG("old_num_leaves: " << old_num_leaves <<
", old_last_leaf_parent_idx: " << old_last_leaf_parent_idx <<
", old_last_leaf_offset: " << old_last_leaf_offset <<
", new_num_leaves: " << new_num_leaves <<
", new_last_leaf_parent_idx: " << new_last_leaf_parent_idx <<
", new_last_leaf_offset: " << new_last_leaf_offset);
// Get the number of existing leaves in what will become the new last chunk after trimming
const std::size_t new_last_chunk_old_num_leaves = (old_last_leaf_parent_idx > new_last_leaf_parent_idx
|| old_last_leaf_offset == 0)
? m_curve_trees.m_leaf_layer_chunk_width
: old_last_leaf_offset;
CHECK_AND_ASSERT_THROW_MES(new_last_chunk_old_num_leaves > new_last_leaf_offset,
"unexpected last_chunk_old_num_leaves");
// Get the number of leaves we'll be trimming from the new last chunk
const std::size_t n_leaves_trim_from_new_last_chunk = new_last_leaf_offset == 0
? 0 // the last chunk wil remain full
: new_last_chunk_old_num_leaves - new_last_leaf_offset;
// We use hash trim if we're removing fewer elems in the last chunk than the number of elems remaining
const bool last_chunk_use_hash_trim = n_leaves_trim_from_new_last_chunk > 0
&& n_leaves_trim_from_new_last_chunk < new_last_leaf_offset;
MDEBUG("new_last_chunk_old_num_leaves: " << new_last_chunk_old_num_leaves <<
", n_leaves_trim_from_new_last_chunk: " << n_leaves_trim_from_new_last_chunk <<
", last_chunk_use_hash_trim: " << last_chunk_use_hash_trim);
// If we're using hash_trim for the last chunk, we'll need to collect the leaves we're trimming from that chunk
std::vector<Selene::Scalar> new_last_chunk_leaves_to_trim;
if (last_chunk_use_hash_trim)
new_last_chunk_leaves_to_trim.reserve(n_leaves_trim_from_new_last_chunk);
// Trim the leaves starting at the back of the leaf layer
const std::size_t new_num_leaf_tuples = new_num_leaves / CurveTreesV1::LEAF_TUPLE_SIZE;
while (leaves_out.size() > new_num_leaf_tuples)
{
// If we're using hash_trim for the last chunk, collect leaves from the last chunk to use later
if (last_chunk_use_hash_trim)
{
// Check if we're now trimming leaves from what will be the new last chunk
const std::size_t num_leaves_remaining = (leaves_out.size() - 1) * CurveTreesV1::LEAF_TUPLE_SIZE;
const std::size_t cur_last_leaf_parent_idx = num_leaves_remaining / m_curve_trees.m_leaf_layer_chunk_width;
if (cur_last_leaf_parent_idx == new_last_leaf_parent_idx)
{
// Add leaves in reverse order, because we're going to reverse the entire vector later on to get the
// correct trim order
new_last_chunk_leaves_to_trim.emplace_back(std::move(leaves_out.back().C_x));
new_last_chunk_leaves_to_trim.emplace_back(std::move(leaves_out.back().I_x));
new_last_chunk_leaves_to_trim.emplace_back(std::move(leaves_out.back().O_x));
}
}
leaves_out.pop_back();
}
CHECK_AND_ASSERT_THROW_MES(leaves_out.size() == new_num_leaf_tuples, "unexpected size of new leaves");
const std::size_t cur_last_leaf_parent_idx = ((leaves_out.size() - 1) * CurveTreesV1::LEAF_TUPLE_SIZE)
/ m_curve_trees.m_leaf_layer_chunk_width;
CHECK_AND_ASSERT_THROW_MES(cur_last_leaf_parent_idx == new_last_leaf_parent_idx, "unexpected last leaf parent idx");
// If we're not using hash_trim for the last chunk, and the new last chunk is not full already, we'll need to
// collect the existing leaves to get the hash using hash_grow
std::vector<Selene::Scalar> last_chunk_remaining_leaves;
if (!last_chunk_use_hash_trim && new_last_leaf_offset > 0)
{
last_chunk_remaining_leaves.reserve(new_last_leaf_offset);
const std::size_t start_leaf_idx = new_last_leaf_parent_idx * m_curve_trees.m_leaf_layer_chunk_width;
MDEBUG("start_leaf_idx: " << start_leaf_idx << ", leaves_out.size(): " << leaves_out.size());
CHECK_AND_ASSERT_THROW_MES((start_leaf_idx + new_last_leaf_offset) == new_num_leaves,
"unexpected start_leaf_idx");
for (std::size_t i = (start_leaf_idx / CurveTreesV1::LEAF_TUPLE_SIZE); i < leaves_out.size(); ++i)
{
CHECK_AND_ASSERT_THROW_MES(i < leaves_out.size(), "unexpected leaf idx");
last_chunk_remaining_leaves.push_back(leaves_out[i].O_x);
last_chunk_remaining_leaves.push_back(leaves_out[i].I_x);
last_chunk_remaining_leaves.push_back(leaves_out[i].C_x);
}
}
CHECK_AND_ASSERT_THROW_MES(!c2_layers_out.empty(), "empty leaf parent layer");
CHECK_AND_ASSERT_THROW_MES(cur_last_leaf_parent_idx < c2_layers_out[0].size(),
"unexpected cur_last_leaf_parent_idx");
// Set the new last leaf parent
Selene::Point old_last_c2_hash = std::move(c2_layers_out[0][cur_last_leaf_parent_idx]);
if (last_chunk_use_hash_trim)
{
CHECK_AND_ASSERT_THROW_MES(new_last_chunk_leaves_to_trim.size() == n_leaves_trim_from_new_last_chunk,
"unexpected size of last leaf chunk");
// We need to reverse the order in order to match the order the leaves were initially inserted into the tree
std::reverse(new_last_chunk_leaves_to_trim.begin(), new_last_chunk_leaves_to_trim.end());
const Selene::Chunk trim_leaves{new_last_chunk_leaves_to_trim.data(), new_last_chunk_leaves_to_trim.size()};
for (std::size_t i = 0; i < new_last_chunk_leaves_to_trim.size(); ++i)
MDEBUG("Trimming leaf " << m_curve_trees.m_c2.to_string(new_last_chunk_leaves_to_trim[i]));
auto new_last_leaf_parent = m_curve_trees.m_c2.hash_trim(
old_last_c2_hash,
new_last_leaf_offset,
trim_leaves);
MDEBUG("New hash " << m_curve_trees.m_c2.to_string(new_last_leaf_parent));
c2_layers_out[0][cur_last_leaf_parent_idx] = std::move(new_last_leaf_parent);
}
else if (new_last_leaf_offset > 0)
{
for (std::size_t i = 0; i < last_chunk_remaining_leaves.size(); ++i)
MDEBUG("Hashing leaf " << m_curve_trees.m_c2.to_string(last_chunk_remaining_leaves[i]));
const auto &leaves = Selene::Chunk{last_chunk_remaining_leaves.data(), last_chunk_remaining_leaves.size()};
auto new_last_leaf_parent = m_curve_trees.m_c2.hash_grow(
/*existing_hash*/ m_curve_trees.m_c2.m_hash_init_point,
/*offset*/ 0,
/*first_child_after_offset*/ m_curve_trees.m_c2.zero_scalar(),
/*children*/ leaves);
MDEBUG("Result hash " << m_curve_trees.m_c2.to_string(new_last_leaf_parent));
c2_layers_out[0][cur_last_leaf_parent_idx] = std::move(new_last_leaf_parent);
}
if (handle_root_after_trim<Selene>(
/*num_parents*/ cur_last_leaf_parent_idx + 1,
/*c1_expected_n_layers*/ 0,
/*c2_expected_n_layers*/ 1,
/*parents_inout*/ c2_layers_out[0],
/*c1_layers_inout*/ c1_layers_out,
/*c2_layers_inout*/ c2_layers_out))
{
return;
}
// Go layer-by-layer starting by trimming the c2 layer we just set, and updating the parent layer hashes
bool trim_c1 = true;
std::size_t c1_idx = 0;
// Trim the leaves
const std::size_t init_num_leaves = m_tree.leaves.size() * m_curve_trees.LEAF_TUPLE_SIZE;
CHECK_AND_ASSERT_THROW_MES(init_num_leaves > tree_reduction.new_total_leaves, "expected fewer new total leaves");
CHECK_AND_ASSERT_THROW_MES((tree_reduction.new_total_leaves % m_curve_trees.LEAF_TUPLE_SIZE) == 0,
"unexpected new total leaves");
const std::size_t new_total_leaf_tuples = tree_reduction.new_total_leaves / m_curve_trees.LEAF_TUPLE_SIZE;
while (m_tree.leaves.size() > new_total_leaf_tuples)
m_tree.leaves.pop_back();
// Trim the layers
const auto &c2_layer_reductions = tree_reduction.c2_layer_reductions;
const auto &c1_layer_reductions = tree_reduction.c1_layer_reductions;
CHECK_AND_ASSERT_THROW_MES(!c2_layer_reductions.empty(), "empty c2 layer reductions");
bool use_c2 = true;
std::size_t c2_idx = 0;
std::size_t last_parent_idx = cur_last_leaf_parent_idx;
Helios::Point old_last_c1_hash;
for (std::size_t i = 0; i < (c1_layers_out.size() + c2_layers_out.size()); ++i)
std::size_t c1_idx = 0;
for (std::size_t i = 0; i < (c2_layer_reductions.size() + c1_layer_reductions.size()); ++i)
{
MDEBUG("Trimming layer " << i);
CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_layers_out.size(), "unexpected c1 layer");
CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_layers_out.size(), "unexpected c2 layer");
auto &c1_layer_out = c1_layers_out[c1_idx];
auto &c2_layer_out = c2_layers_out[c2_idx];
if (trim_c1)
// TODO: template below if statement
if (use_c2)
{
// TODO: fewer params
auto new_last_parent = trim_children(m_curve_trees.m_c2,
m_curve_trees.m_c1,
m_curve_trees.m_c1_width,
c1_layer_out,
old_last_c2_hash,
c2_layer_out,
last_parent_idx,
old_last_c1_hash);
CHECK_AND_ASSERT_THROW_MES(c2_idx < c2_layer_reductions.size(), "unexpected c2 layer reduction");
const auto &c2_reduction = c2_layer_reductions[c2_idx];
// Update the last parent
c1_layer_out[last_parent_idx] = std::move(new_last_parent);
CHECK_AND_ASSERT_THROW_MES(c2_idx < m_tree.c2_layers.size(), "missing c2 layer");
auto &c2_inout = m_tree.c2_layers[c2_idx];
if (handle_root_after_trim<Helios>(last_parent_idx + 1,
c1_idx + 1,
c2_idx + 1,
c1_layer_out,
c1_layers_out,
c2_layers_out))
CHECK_AND_ASSERT_THROW_MES(c2_reduction.new_total_parents <= c2_inout.size(),
"unexpected c2 new total parents");
c2_inout.resize(c2_reduction.new_total_parents);
c2_inout.shrink_to_fit();
// We updated the last hash
if (c2_reduction.update_existing_last_hash)
{
return;
c2_inout.back() = c2_reduction.new_last_hash;
}
++c2_idx;
}
else
{
// TODO: fewer params
auto new_last_parent = trim_children(m_curve_trees.m_c1,
m_curve_trees.m_c2,
m_curve_trees.m_c2_width,
c2_layer_out,
old_last_c1_hash,
c1_layer_out,
last_parent_idx,
old_last_c2_hash);
CHECK_AND_ASSERT_THROW_MES(c1_idx < c1_layer_reductions.size(), "unexpected c1 layer reduction");
const auto &c1_reduction = c1_layer_reductions[c1_idx];
// Update the last parent
c2_layer_out[last_parent_idx] = std::move(new_last_parent);
CHECK_AND_ASSERT_THROW_MES(c1_idx < m_tree.c1_layers.size(), "missing c1 layer");
auto &c1_inout = m_tree.c1_layers[c1_idx];
if (handle_root_after_trim<Selene>(last_parent_idx + 1,
c1_idx + 1,
c2_idx + 1,
c2_layer_out,
c1_layers_out,
c2_layers_out))
CHECK_AND_ASSERT_THROW_MES(c1_reduction.new_total_parents <= c1_inout.size(),
"unexpected c1 new total parents");
c1_inout.resize(c1_reduction.new_total_parents);
c1_inout.shrink_to_fit();
// We updated the last hash
if (c1_reduction.update_existing_last_hash)
{
return;
c1_inout.back() = c1_reduction.new_last_hash;
}
++c1_idx;
}
trim_c1 = !trim_c1;
use_c2 = !use_c2;
}
// Delete remaining layers
m_tree.c1_layers.resize(c1_layer_reductions.size());
m_tree.c2_layers.resize(c2_layer_reductions.size());
m_tree.c1_layers.shrink_to_fit();
m_tree.c2_layers.shrink_to_fit();
}
//----------------------------------------------------------------------------------------------------------------------
bool CurveTreesGlobalTree::audit_tree()
template<typename C_CHILD, typename C_PARENT>
static std::vector<typename C_PARENT::Scalar> get_last_chunk_children_to_trim(const C_CHILD &c_child,
const fcmp::curve_trees::TrimLayerInstructions &trim_instructions,
const CurveTreesGlobalTree::Layer<C_CHILD> &child_layer)
{
std::vector<typename C_PARENT::Scalar> children_to_trim_out;
const std::size_t new_total_children = trim_instructions.new_total_children;
const std::size_t old_total_children = trim_instructions.old_total_children;
const std::size_t new_total_parents = trim_instructions.new_total_parents;
const std::size_t parent_chunk_width = trim_instructions.parent_chunk_width;
const std::size_t new_offset = trim_instructions.new_offset;
CHECK_AND_ASSERT_THROW_MES(new_total_children > 0, "expected some new children");
CHECK_AND_ASSERT_THROW_MES(new_total_children >= new_offset, "expected more children than offset");
CHECK_AND_ASSERT_THROW_MES(new_total_parents > 0, "expected some new parents");
if (trim_instructions.need_last_chunk_children_to_trim)
{
std::size_t idx = ((new_total_parents - 1) * parent_chunk_width) + new_offset;
MDEBUG("Start trim from idx: " << idx);
do
{
// TODO: consolidate do while inner logic with below
CHECK_AND_ASSERT_THROW_MES(child_layer.size() > idx, "idx too high");
const auto &child_point = child_layer[idx];
auto child_scalar = c_child.point_to_cycle_scalar(child_point);
children_to_trim_out.push_back(std::move(child_scalar));
++idx;
}
while ((idx < old_total_children) && (idx % parent_chunk_width != 0));
}
else if (trim_instructions.need_last_chunk_remaining_children && new_offset > 0)
{
std::size_t idx = new_total_children - new_offset;
MDEBUG("Start grow remaining from idx: " << idx);
do
{
CHECK_AND_ASSERT_THROW_MES(child_layer.size() > idx, "idx too high");
const auto &child_point = child_layer[idx];
auto child_scalar = c_child.point_to_cycle_scalar(child_point);
children_to_trim_out.push_back(std::move(child_scalar));
++idx;
}
while ((idx < new_total_children) && (idx % parent_chunk_width != 0));
}
return children_to_trim_out;
}
//----------------------------------------------------------------------------------------------------------------------
// TODO: template
CurveTreesV1::LastChunkChildrenToTrim CurveTreesGlobalTree::get_all_last_chunk_children_to_trim(
const std::vector<fcmp::curve_trees::TrimLayerInstructions> &trim_instructions)
{
CurveTreesV1::LastChunkChildrenToTrim all_children_to_trim;
// Leaf layer
CHECK_AND_ASSERT_THROW_MES(!trim_instructions.empty(), "no instructions");
const auto &trim_leaf_layer_instructions = trim_instructions[0];
const std::size_t new_total_children = trim_leaf_layer_instructions.new_total_children;
const std::size_t old_total_children = trim_leaf_layer_instructions.old_total_children;
const std::size_t new_total_parents = trim_leaf_layer_instructions.new_total_parents;
const std::size_t parent_chunk_width = trim_leaf_layer_instructions.parent_chunk_width;
const std::size_t new_offset = trim_leaf_layer_instructions.new_offset;
CHECK_AND_ASSERT_THROW_MES(new_total_children >= CurveTreesV1::LEAF_TUPLE_SIZE, "expected some new leaves");
CHECK_AND_ASSERT_THROW_MES(new_total_children >= new_offset, "expected more children than offset");
CHECK_AND_ASSERT_THROW_MES(new_total_parents > 0, "expected some new parents");
std::vector<Selene::Scalar> leaves_to_trim;
// TODO: separate function
// TODO: calculate starting indexes in trim instructions, perhaps calculate end indexes also
if (trim_leaf_layer_instructions.need_last_chunk_children_to_trim)
{
std::size_t idx = ((new_total_parents - 1) * parent_chunk_width) + new_offset;
MDEBUG("Start trim from idx: " << idx);
do
{
CHECK_AND_ASSERT_THROW_MES(idx % CurveTreesV1::LEAF_TUPLE_SIZE == 0, "expected divisble by leaf tuple size");
const std::size_t leaf_tuple_idx = idx / CurveTreesV1::LEAF_TUPLE_SIZE;
CHECK_AND_ASSERT_THROW_MES(m_tree.leaves.size() > leaf_tuple_idx, "leaf_tuple_idx too high");
const auto &leaf_tuple = m_tree.leaves[leaf_tuple_idx];
leaves_to_trim.push_back(leaf_tuple.O_x);
leaves_to_trim.push_back(leaf_tuple.I_x);
leaves_to_trim.push_back(leaf_tuple.C_x);
idx += CurveTreesV1::LEAF_TUPLE_SIZE;
}
while ((idx < old_total_children) && (idx % parent_chunk_width != 0));
}
else if (trim_leaf_layer_instructions.need_last_chunk_remaining_children && new_offset > 0)
{
std::size_t idx = new_total_children - new_offset;
do
{
CHECK_AND_ASSERT_THROW_MES(idx % CurveTreesV1::LEAF_TUPLE_SIZE == 0, "expected divisble by leaf tuple size");
const std::size_t leaf_tuple_idx = idx / CurveTreesV1::LEAF_TUPLE_SIZE;
CHECK_AND_ASSERT_THROW_MES(m_tree.leaves.size() > leaf_tuple_idx, "leaf_tuple_idx too high");
const auto &leaf_tuple = m_tree.leaves[leaf_tuple_idx];
leaves_to_trim.push_back(leaf_tuple.O_x);
leaves_to_trim.push_back(leaf_tuple.I_x);
leaves_to_trim.push_back(leaf_tuple.C_x);
idx += CurveTreesV1::LEAF_TUPLE_SIZE;
}
while ((idx < new_total_children) && (idx % parent_chunk_width != 0));
}
all_children_to_trim.c2_children.emplace_back(std::move(leaves_to_trim));
bool parent_is_c2 = false;
std::size_t c1_idx = 0;
std::size_t c2_idx = 0;
for (std::size_t i = 1; i < trim_instructions.size(); ++i)
{
const auto &trim_layer_instructions = trim_instructions[i];
if (parent_is_c2)
{
CHECK_AND_ASSERT_THROW_MES(m_tree.c1_layers.size() > c1_idx, "c1_idx too high");
auto children_to_trim = get_last_chunk_children_to_trim<Helios, Selene>(
m_curve_trees.m_c1,
trim_layer_instructions,
m_tree.c1_layers[c1_idx]);
all_children_to_trim.c2_children.emplace_back(std::move(children_to_trim));
++c1_idx;
}
else
{
CHECK_AND_ASSERT_THROW_MES(m_tree.c2_layers.size() > c2_idx, "c2_idx too high");
auto children_to_trim = get_last_chunk_children_to_trim<Selene, Helios>(
m_curve_trees.m_c2,
trim_layer_instructions,
m_tree.c2_layers[c2_idx]);
all_children_to_trim.c1_children.emplace_back(std::move(children_to_trim));
++c2_idx;
}
parent_is_c2 = !parent_is_c2;
}
return all_children_to_trim;
}
//----------------------------------------------------------------------------------------------------------------------
CurveTreesV1::LastHashes CurveTreesGlobalTree::get_last_hashes_to_trim(
const std::vector<fcmp::curve_trees::TrimLayerInstructions> &trim_instructions) const
{
CurveTreesV1::LastHashes last_hashes;
CHECK_AND_ASSERT_THROW_MES(!trim_instructions.empty(), "no instructions");
bool parent_is_c2 = true;
std::size_t c1_idx = 0;
std::size_t c2_idx = 0;
for (const auto &trim_layer_instructions : trim_instructions)
{
const std::size_t new_total_parents = trim_layer_instructions.new_total_parents;
CHECK_AND_ASSERT_THROW_MES(new_total_parents > 0, "no new parents");
if (parent_is_c2)
{
CHECK_AND_ASSERT_THROW_MES(m_tree.c2_layers.size() > c2_idx, "c2_idx too high");
const auto &c2_layer = m_tree.c2_layers[c2_idx];
CHECK_AND_ASSERT_THROW_MES(c2_layer.size() >= new_total_parents, "not enough c2 parents");
last_hashes.c2_last_hashes.push_back(c2_layer[new_total_parents - 1]);
++c2_idx;
}
else
{
CHECK_AND_ASSERT_THROW_MES(m_tree.c1_layers.size() > c1_idx, "c1_idx too high");
const auto &c1_layer = m_tree.c1_layers[c1_idx];
CHECK_AND_ASSERT_THROW_MES(c1_layer.size() >= new_total_parents, "not enough c1 parents");
last_hashes.c1_last_hashes.push_back(c1_layer[new_total_parents - 1]);
++c1_idx;
}
parent_is_c2 = !parent_is_c2;
}
return last_hashes;
}
//----------------------------------------------------------------------------------------------------------------------
void CurveTreesGlobalTree::trim_tree(const std::size_t trim_n_leaf_tuples)
{
const std::size_t old_n_leaf_tuples = this->get_num_leaf_tuples();
MDEBUG(old_n_leaf_tuples << " leaves in the tree, trimming " << trim_n_leaf_tuples);
// Get trim instructions
const auto trim_instructions = m_curve_trees.get_trim_instructions(old_n_leaf_tuples, trim_n_leaf_tuples);
MDEBUG("Acquired trim instructions for " << trim_instructions.size() << " layers");
// Do initial tree reads
const auto last_chunk_children_to_trim = this->get_all_last_chunk_children_to_trim(trim_instructions);
const auto last_hashes_to_trim = this->get_last_hashes_to_trim(trim_instructions);
// Get the new hashes, wrapped in a simple struct we can use to trim the tree
const auto tree_reduction = m_curve_trees.get_tree_reduction(
trim_instructions,
last_chunk_children_to_trim,
last_hashes_to_trim);
// Use tree reduction to trim tree
this->reduce_tree(tree_reduction);
const std::size_t new_n_leaf_tuples = this->get_num_leaf_tuples();
CHECK_AND_ASSERT_THROW_MES((new_n_leaf_tuples + trim_n_leaf_tuples) == old_n_leaf_tuples,
"unexpected num leaves after trim");
}
//----------------------------------------------------------------------------------------------------------------------
bool CurveTreesGlobalTree::audit_tree(const std::size_t expected_n_leaf_tuples)
{
MDEBUG("Auditing global tree");
@ -752,6 +543,8 @@ bool CurveTreesGlobalTree::audit_tree()
const auto &c2_layers = m_tree.c2_layers;
CHECK_AND_ASSERT_MES(!leaves.empty(), false, "must have at least 1 leaf in tree");
CHECK_AND_ASSERT_MES(leaves.size() == expected_n_leaf_tuples, false, "unexpected num leaves");
CHECK_AND_ASSERT_MES(!c2_layers.empty(), false, "must have at least 1 c2 layer in tree");
CHECK_AND_ASSERT_MES(c2_layers.size() == c1_layers.size() || c2_layers.size() == (c1_layers.size() + 1),
false, "unexpected mismatch of c2 and c1 layers");
@ -983,7 +776,7 @@ void CurveTreesGlobalTree::log_tree()
//----------------------------------------------------------------------------------------------------------------------
// Test helpers
//----------------------------------------------------------------------------------------------------------------------
const std::vector<CurveTreesV1::LeafTuple> generate_random_leaves(const CurveTreesV1 &curve_trees,
static const std::vector<CurveTreesV1::LeafTuple> generate_random_leaves(const CurveTreesV1 &curve_trees,
const std::size_t num_leaves)
{
std::vector<CurveTreesV1::LeafTuple> tuples;
@ -1005,9 +798,18 @@ const std::vector<CurveTreesV1::LeafTuple> generate_random_leaves(const CurveTre
return tuples;
}
//----------------------------------------------------------------------------------------------------------------------
static const Selene::Scalar generate_random_selene_scalar()
{
crypto::secret_key s;
crypto::public_key S;
crypto::generate_keys(S, s, s, false);
return fcmp::tower_cycle::ed_25519_point_to_scalar(S);
}
//----------------------------------------------------------------------------------------------------------------------
static bool grow_tree(CurveTreesV1 &curve_trees,
CurveTreesGlobalTree &global_tree,
const std::size_t num_leaves)
const std::size_t new_n_leaf_tuples)
{
// Do initial tree reads
const std::size_t old_n_leaf_tuples = global_tree.get_num_leaf_tuples();
@ -1019,7 +821,7 @@ static bool grow_tree(CurveTreesV1 &curve_trees,
// - The tree extension includes all elements we'll need to add to the existing tree when adding the new leaves
const auto tree_extension = curve_trees.get_tree_extension(old_n_leaf_tuples,
last_hashes,
generate_random_leaves(curve_trees, num_leaves));
generate_random_leaves(curve_trees, new_n_leaf_tuples));
global_tree.log_tree_extension(tree_extension);
@ -1029,7 +831,8 @@ static bool grow_tree(CurveTreesV1 &curve_trees,
global_tree.log_tree();
// Validate tree structure and all hashes
return global_tree.audit_tree();
const std::size_t expected_n_leaf_tuples = old_n_leaf_tuples + new_n_leaf_tuples;
return global_tree.audit_tree(expected_n_leaf_tuples);
}
//----------------------------------------------------------------------------------------------------------------------
static bool grow_tree_in_memory(const std::size_t init_leaves,
@ -1059,25 +862,27 @@ static bool grow_tree_in_memory(const std::size_t init_leaves,
return true;
}
//----------------------------------------------------------------------------------------------------------------------
static bool trim_tree_in_memory(const std::size_t init_leaves,
const std::size_t trim_leaves,
static bool trim_tree_in_memory(const std::size_t trim_n_leaf_tuples,
CurveTreesGlobalTree &&global_tree)
{
// Trim the global tree by `trim_leaves`
LOG_PRINT_L1("Trimming " << trim_leaves << " leaves from tree");
const std::size_t old_n_leaf_tuples = global_tree.get_num_leaf_tuples();
CHECK_AND_ASSERT_THROW_MES(old_n_leaf_tuples > trim_n_leaf_tuples, "cannot trim more leaves than exist");
CHECK_AND_ASSERT_THROW_MES(trim_n_leaf_tuples > 0, "must be trimming some leaves");
CHECK_AND_ASSERT_MES(init_leaves > trim_leaves, false, "trimming too many leaves");
const std::size_t new_num_leaves = init_leaves - trim_leaves;
global_tree.trim_tree(new_num_leaves * CurveTreesV1::LEAF_TUPLE_SIZE);
// Trim the global tree by `trim_n_leaf_tuples`
LOG_PRINT_L1("Trimming " << trim_n_leaf_tuples << " leaf tuples from tree");
MDEBUG("Finished trimming " << trim_leaves << " leaves from tree");
global_tree.trim_tree(trim_n_leaf_tuples);
MDEBUG("Finished trimming " << trim_n_leaf_tuples << " leaf tuples from tree");
global_tree.log_tree();
bool res = global_tree.audit_tree();
const std::size_t expected_n_leaf_tuples = old_n_leaf_tuples - trim_n_leaf_tuples;
bool res = global_tree.audit_tree(expected_n_leaf_tuples);
CHECK_AND_ASSERT_MES(res, false, "failed to trim tree in memory");
MDEBUG("Successfully trimmed " << trim_leaves << " leaves in memory");
MDEBUG("Successfully trimmed " << trim_n_leaf_tuples << " leaves in memory");
return true;
}
//----------------------------------------------------------------------------------------------------------------------
@ -1116,12 +921,9 @@ TEST(curve_trees, grow_tree)
Helios helios;
Selene selene;
// Constant for how deep we want the tree
const std::size_t TEST_N_LAYERS = 4;
// Use lower values for chunk width than prod so that we can quickly test a many-layer deep tree
const std::size_t helios_chunk_width = 3;
const std::size_t selene_chunk_width = 2;
static const std::size_t helios_chunk_width = 3;
static const std::size_t selene_chunk_width = 2;
static_assert(helios_chunk_width > 1, "helios width must be > 1");
static_assert(selene_chunk_width > 1, "selene width must be > 1");
@ -1129,6 +931,9 @@ TEST(curve_trees, grow_tree)
LOG_PRINT_L1("Test grow tree with helios chunk width " << helios_chunk_width
<< ", selene chunk width " << selene_chunk_width);
// Constant for how deep we want the tree
static const std::size_t TEST_N_LAYERS = 4;
// Number of leaves for which x number of layers is required
std::size_t leaves_needed_for_n_layers = selene_chunk_width;
for (std::size_t i = 1; i < TEST_N_LAYERS; ++i)
@ -1153,7 +958,7 @@ TEST(curve_trees, grow_tree)
{
// TODO: init tree once, then extend a copy of that tree
// Then extend the tree with ext_leaves
for (std::size_t ext_leaves = 1; (init_leaves + ext_leaves) < leaves_needed_for_n_layers; ++ext_leaves)
for (std::size_t ext_leaves = 1; (init_leaves + ext_leaves) <= leaves_needed_for_n_layers; ++ext_leaves)
{
ASSERT_TRUE(grow_tree_in_memory(init_leaves, ext_leaves, curve_trees));
ASSERT_TRUE(grow_tree_db(init_leaves, ext_leaves, curve_trees, test_db));

@ -35,14 +35,6 @@ using Helios = fcmp::curve_trees::Helios;
using Selene = fcmp::curve_trees::Selene;
using CurveTreesV1 = fcmp::curve_trees::CurveTreesV1;
const std::vector<CurveTreesV1::LeafTuple> generate_random_leaves(const CurveTreesV1 &curve_trees,
const std::size_t num_leaves);
// https://github.com/kayabaNerve/fcmp-plus-plus/blob
// /b2742e86f3d18155fd34dd1ed69cb8f79b900fce/crypto/fcmps/src/tests.rs#L81-L82
const std::size_t HELIOS_CHUNK_WIDTH = 38;
const std::size_t SELENE_CHUNK_WIDTH = 18;
// Helper class to read/write a global tree in memory. It's only used in testing because normally the tree isn't kept
// in memory (it's stored in the db)
class CurveTreesGlobalTree
@ -74,17 +66,30 @@ public:
// Use the tree extension to extend the in-memory tree
void extend_tree(const CurveTreesV1::TreeExtension &tree_extension);
// Trim tree to the provided number of leaves
void trim_tree(const std::size_t new_num_leaves);
// Use the tree reduction to reduce the in-memory tree
void reduce_tree(const CurveTreesV1::TreeReduction &tree_reduction);
// Trim the provided number of leaf tuples from the tree
void trim_tree(const std::size_t trim_n_leaf_tuples);
// Validate the in-memory tree by re-hashing every layer, starting from root and working down to leaf layer
bool audit_tree();
bool audit_tree(const std::size_t expected_n_leaf_tuples);
// logging helpers
void log_last_hashes(const CurveTreesV1::LastHashes &last_hashes);
void log_tree_extension(const CurveTreesV1::TreeExtension &tree_extension);
void log_tree();
// Read the in-memory tree and get data from what will be the last chunks after trimming the tree to the provided
// number of leaves
// - This function is useful to collect all tree data necessary to perform the actual trim operation
// - This function can return elems from each last chunk that will need to be trimmed
CurveTreesV1::LastHashes get_last_hashes_to_trim(
const std::vector<fcmp::curve_trees::TrimLayerInstructions> &trim_instructions) const;
CurveTreesV1::LastChunkChildrenToTrim get_all_last_chunk_children_to_trim(
const std::vector<fcmp::curve_trees::TrimLayerInstructions> &trim_instructions);
private:
CurveTreesV1 &m_curve_trees;
Tree m_tree = Tree{};