mirror of
https://codeberg.org/shufflecake/shufflecake-c.git
synced 2026-01-06 02:55:28 -05:00
Make it more generic
This commit is contained in:
parent
2e96ec2374
commit
32c1b16e12
1 changed files with 104 additions and 50 deletions
|
|
@ -29,6 +29,12 @@
|
|||
#include "sflc_lite.h"
|
||||
|
||||
|
||||
/*
|
||||
*------------------
|
||||
* Helpers
|
||||
*------------------
|
||||
*/
|
||||
|
||||
/**
|
||||
* Encrypt/decrypt exactly one block, already encoded in the scatterlist.
|
||||
* The IV is constructed as the right-0-padded LE representation of the
|
||||
|
|
@ -69,14 +75,15 @@ static int crypt_block_sg(struct crypto_sync_skcipher *tfm, struct scatterlist *
|
|||
*/
|
||||
|
||||
typedef unsigned (*count_sgs_fn) (void *b, sector_t nblocks);
|
||||
typedef struct scatterlist* (*map_sg_fn) (struct scatterlist *sg, void *b, void *iter);
|
||||
typedef void (*iter_get_curr_fn) (void *b, void *iter, struct page **page, unsigned *size, unsigned *offset);
|
||||
typedef void (*iter_advance_fn) (void *b, void *iter, unsigned size);
|
||||
|
||||
static int sgtable_setup(struct sg_table *sgt, struct scatterlist *prealloc_sg, unsigned nents, gfp_t gfp_flags)
|
||||
static int sgtable_setup(struct sg_table *sgt, struct scatterlist *prealloc_sg, unsigned nents, gfp_t gfp)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
if (nents > 1) {
|
||||
err = sg_alloc_table(sgt, nents, gfp_flags);
|
||||
err = sg_alloc_table(sgt, nents, gfp);
|
||||
if (err)
|
||||
return err;
|
||||
} else {
|
||||
|
|
@ -94,10 +101,44 @@ static void sgtable_teardown(struct sg_table *sgt)
|
|||
sg_free_table(sgt);
|
||||
}
|
||||
|
||||
static struct scatterlist* map_to_sg_generic(struct scatterlist *sg, void *b, void *iter,
|
||||
iter_get_curr_fn iter_get_curr, iter_advance_fn iter_advance)
|
||||
{
|
||||
struct scatterlist *last_sg;
|
||||
unsigned left;
|
||||
|
||||
left = SFLITE_BLOCK_SIZE;
|
||||
while (left) {
|
||||
struct page *page;
|
||||
unsigned size, offset;
|
||||
|
||||
// New iteration -> new last_sg
|
||||
last_sg = sg;
|
||||
|
||||
// Compute page,size,offset of current segment
|
||||
iter_get_curr(b, iter, &page, &size, &offset);
|
||||
size = min(left, size);
|
||||
|
||||
// Set current sg
|
||||
sg_set_page(sg, page, size, offset);
|
||||
|
||||
// Advance loop (also iterator of the caller's loop)
|
||||
sg = sg_next(sg);
|
||||
iter_advance(b, iter, size);
|
||||
left -= size;
|
||||
}
|
||||
|
||||
// Not sure if this is needed, given that we provide a crypt_len anyway TODO investigate
|
||||
sg_mark_end(last_sg);
|
||||
|
||||
return sg;
|
||||
}
|
||||
|
||||
static int crypt_blocks_generic(struct crypto_sync_skcipher *tfm,
|
||||
void *src, void *src_iter, void *dst, void *dst_iter,
|
||||
sector_t num_blocks, sector_t first_pblk_num, int rw, gfp_t gfp,
|
||||
count_sgs_fn count_sgs, map_sg_fn map_sg)
|
||||
count_sgs_fn count_sgs, iter_get_curr_fn iter_get_curr,
|
||||
iter_advance_fn iter_advance)
|
||||
{
|
||||
struct sg_table sgt_dst, sgt_src;
|
||||
struct scatterlist prealloc_sg_dst, prealloc_sg_src;
|
||||
|
|
@ -116,31 +157,29 @@ static int crypt_blocks_generic(struct crypto_sync_skcipher *tfm,
|
|||
src_nents = count_sgs(src, num_blocks);
|
||||
err = sgtable_setup(&sgt_src, &prealloc_sg_src, src_nents, gfp);
|
||||
if (err) {
|
||||
DMERR("Could not setup sg_table src");
|
||||
sgtable_teardown(&sgt_src);
|
||||
return err;
|
||||
}
|
||||
if (!is_inplace) {
|
||||
dst_nents = count_sgs(dst, num_blocks);
|
||||
err = sgtable_setup(&sgt_dst, &prealloc_sg_dst, dst_nents, gfp);
|
||||
if (err) {
|
||||
DMERR("Could not setup sg_table dst");
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/* Loop initialisation */
|
||||
next_sg_dst = NULL;
|
||||
next_sg_src = sgt_src.sgl;
|
||||
if (!is_inplace)
|
||||
next_sg_dst = sgt_dst.sgl;
|
||||
/* Loop: encrypt every block separately, with its own IV */
|
||||
for (pblk_num = first_pblk_num; pblk_num < last_pblk_num; pblk_num++) {
|
||||
/* The function map_sg also advances the iter */
|
||||
/* The function map_to_sg_generic also advances the iter */
|
||||
sg_src = next_sg_src;
|
||||
next_sg_src = map_sg(sg_src, src, src_iter);
|
||||
next_sg_src = map_to_sg_generic(sg_src, src, src_iter, iter_get_curr, iter_advance);
|
||||
if (!is_inplace) {
|
||||
sg_dst = next_sg_dst;
|
||||
next_sg_dst = map_sg(sg_dst, dst, dst_iter);
|
||||
next_sg_dst = map_to_sg_generic(sg_dst, dst, dst_iter, iter_get_curr, iter_advance);
|
||||
}
|
||||
|
||||
err = crypt_block_sg(tfm, sg_src, *p_sg_dst, pblk_num, rw);
|
||||
|
|
@ -148,6 +187,8 @@ static int crypt_blocks_generic(struct crypto_sync_skcipher *tfm,
|
|||
goto out;
|
||||
}
|
||||
|
||||
// We should have gotten precisely to the end of the sg_table
|
||||
WARN_ON(next_sg_src || next_sg_dst);
|
||||
|
||||
out:
|
||||
if (!is_inplace)
|
||||
|
|
@ -182,9 +223,40 @@ int sflite_crypt_block_page(struct crypto_sync_skcipher *tfm, struct page *src_p
|
|||
|
||||
|
||||
/*
|
||||
*---------------------------------------
|
||||
*------------------
|
||||
* Public function: bio enc/dec
|
||||
*------------------
|
||||
*/
|
||||
|
||||
static unsigned count_sgs_from_bio(void *buf, sector_t /*nblocks*/)
|
||||
{
|
||||
struct bio *bio = buf;
|
||||
struct bio_vec bvl;
|
||||
struct bvec_iter iter;
|
||||
unsigned remainder;
|
||||
unsigned nents;
|
||||
|
||||
nents = 0;
|
||||
remainder = 0;
|
||||
/* Count how many blocks "touch" each bvec, including leftover from previous bvec */
|
||||
bio_for_each_bvec(bvl, bio, iter) {
|
||||
// First block of this bvec starts @remainder bytes behind
|
||||
nents += DIV_ROUND_UP(bvl.bv_len + remainder, SFLITE_BLOCK_SIZE);
|
||||
|
||||
// Last block is only partially filled, and completed at the next bvec
|
||||
unsigned last_filled = bvl.bv_len & (SFLITE_BLOCK_SIZE - 1);
|
||||
remainder = SFLITE_BLOCK_SIZE - last_filled;
|
||||
}
|
||||
|
||||
BUG_ON(remainder);
|
||||
return nents;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
*-------------------
|
||||
* Public function: memory buffer enc/dec
|
||||
*---------------------------------------
|
||||
*-------------------
|
||||
*/
|
||||
|
||||
static unsigned count_sgs_from_buf(void *buf, sector_t nblocks)
|
||||
|
|
@ -194,50 +266,32 @@ static unsigned count_sgs_from_buf(void *buf, sector_t nblocks)
|
|||
* This is true coming from the FLUSH path, where the offset is 0. */
|
||||
BUG_ON(offset_in_page(buf) & (SFLITE_BLOCK_SIZE - 1));
|
||||
|
||||
// No detection of contiguous pages
|
||||
return nblocks * DIV_ROUND_UP(SFLITE_BLOCK_SIZE, PAGE_SIZE);
|
||||
}
|
||||
|
||||
static struct scatterlist* map_sg_from_buf(struct scatterlist *sg, void *buf, void *iter)
|
||||
static void iter_get_curr_from_buf(void *buf, void *iter, struct page **page, unsigned *size, unsigned *offset)
|
||||
{
|
||||
unsigned long *iter_off = iter;
|
||||
bool is_vmalloc = is_vmalloc_addr(buf);
|
||||
void *cur_ptr;
|
||||
struct page *cur_page;
|
||||
unsigned cur_size, cur_off;
|
||||
struct scatterlist *cur_sg, *last_sg;
|
||||
unsigned left;
|
||||
void *ptr = buf + *iter_off;
|
||||
|
||||
cur_sg = sg;
|
||||
cur_ptr = buf + *iter_off;
|
||||
cur_off = offset_in_page(cur_ptr);
|
||||
left = SFLITE_BLOCK_SIZE;
|
||||
while (left) {
|
||||
// New iteration -> new last_sg
|
||||
last_sg = cur_sg;
|
||||
// Return values
|
||||
*offset = offset_in_page(ptr);
|
||||
*size = PAGE_SIZE - *offset;
|
||||
if (is_vmalloc_addr(ptr))
|
||||
*page = vmalloc_to_page(ptr);
|
||||
else
|
||||
*page = virt_to_page(ptr);
|
||||
/* It's not ideal to call offset_in_page() and is_vmalloc_addr()
|
||||
* in a loop, but it's ok given that we're in the FLUSH path. */
|
||||
|
||||
// Compute current page,size,offset (offset is known)
|
||||
if (is_vmalloc)
|
||||
cur_page = vmalloc_to_page(cur_ptr);
|
||||
else
|
||||
cur_page = virt_to_page(cur_ptr);
|
||||
cur_size = min(left, PAGE_SIZE - cur_off);
|
||||
return;
|
||||
}
|
||||
|
||||
// Set current sg
|
||||
sg_set_page(cur_sg, cur_page, cur_size, cur_off);
|
||||
|
||||
// Advance loop
|
||||
cur_sg = sg_next(cur_sg);
|
||||
cur_ptr += cur_size;
|
||||
cur_off = 0;
|
||||
left -= cur_size;
|
||||
}
|
||||
|
||||
// Not sure if this is needed, given that we provide a crypt_len anyway TODO investigate
|
||||
sg_mark_end(last_sg);
|
||||
|
||||
// Advance iterator of the "outer" loop
|
||||
*iter_off += SFLITE_BLOCK_SIZE;
|
||||
return cur_sg;
|
||||
static void iter_advance_from_buf(void *b, void *iter, unsigned size)
|
||||
{
|
||||
unsigned long *iter_off = iter;
|
||||
*iter_off += size;
|
||||
}
|
||||
|
||||
/* Encrypt-decrypt consecutive blocks in a memory buffer (can be vmalloc'ed) */
|
||||
|
|
@ -250,7 +304,7 @@ int sflite_crypt_blocks_buf(struct crypto_sync_skcipher *tfm, void *src_buf, voi
|
|||
src_off = dst_off = 0;
|
||||
return crypt_blocks_generic(tfm, src_buf, (void *)&src_off, dst_buf, (void *)&dst_off,
|
||||
num_blocks, first_pblk_num, rw, gfp,
|
||||
count_sgs_from_buf, map_sg_from_buf);
|
||||
count_sgs_from_buf, iter_get_curr_from_buf, iter_advance_from_buf);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue