Write generic crypt_blocks

This commit is contained in:
toninov 2025-10-26 11:49:39 +01:00
parent f6c15bf871
commit b399ca8c3d
No known key found for this signature in database

View file

@ -31,12 +31,11 @@
/**
* Encrypt/decrypt exactly one block, already encoded in the scatterlist.
* All other crypto functions reduce to this one.
* The IV is constructed as the right-0-padded LE representation of the
* physical block number, which is exactly what dm-crypt does when using the
* IV mode "plain64".
*/
static int crypt_sg(struct crypto_sync_skcipher *tfm, struct scatterlist *src,
static int crypt_block_sg(struct crypto_sync_skcipher *tfm, struct scatterlist *src,
struct scatterlist *dst, u64 pblk_num, int rw)
{
u8 iv[SFLITE_XTS_IVLEN];
@ -61,6 +60,109 @@ static int crypt_sg(struct crypto_sync_skcipher *tfm, struct scatterlist *src,
return err;
}
/**
* "Generic" function for encrypting multiple blocks. Memory buffer can be
* either a single pointer (vmalloc or virtual, both work), or a bio's bvec.
* Depending on which of the two is needed, the appropriate function pointers
* need to be passed.
*/
typedef unsigned (*count_sgs_fn) (void *b, unsigned block_size);
typedef struct scatterlist* (*map_sg_fn) (struct scatterlist *sg, void *b, void *iter, unsigned size);
static int sgtable_setup(struct sg_table *sgt, struct scatterlist *prealloc_sg, unsigned nents, gfp_t gfp_flags)
{
int err = 0;
if (nents > 1) {
err = sg_alloc_table(sgt, nents, gfp_flags);
if (err)
return err;
} else {
sg_init_table(prealloc_sg, 1);
sgt->sgl = prealloc_sg;
sgt->nents = sgt->orig_nents = 1;
}
return 0;
}
static void sgtable_teardown(struct sg_table *sgt)
{
if (sgt->orig_nents > 1)
sg_free_table(sgt);
}
static int crypt_blocks_generic(struct crypto_sync_skcipher *tfm,
void *src, void *src_iter, void *dst, void *dst_iter,
sector_t num_blocks, sector_t first_pblk_num, int rw, gfp_t gfp,
count_sgs_fn count_sgs, map_sg_fn map_sg)
{
struct sg_table sgt_dst, sgt_src;
struct scatterlist prealloc_sg_dst, prealloc_sg_src;
struct scatterlist *sg_src, *next_sg_src, *sg_dst, *next_sg_dst, **p_sg_dst;
unsigned src_nents, dst_nents;
sector_t last_pblk_num = first_pblk_num + num_blocks;
u64 pblk_num;
bool is_inplace;
int err = 0;
/* Use same sgs if in-place */
is_inplace = (src == dst);
p_sg_dst = is_inplace ? &sg_src : &sg_dst;
/* Allocate sg_table */
src_nents = count_sgs(src, SFLITE_BLOCK_SIZE);
err = sgtable_setup(&sgt_src, &prealloc_sg_src, src_nents, gfp);
if (err) {
DMERR("Could not setup sg_table src");
sgtable_teardown(&sgt_src);
return err;
}
if (!is_inplace) {
dst_nents = count_sgs(dst, SFLITE_BLOCK_SIZE);
err = sgtable_setup(&sgt_dst, &prealloc_sg_dst, dst_nents, gfp);
if (err) {
DMERR("Could not setup sg_table dst");
goto out;
}
}
/* Loop initialisation */
next_sg_src = sgt_src.sgl;
if (!is_inplace)
next_sg_dst = sgt_dst.sgl;
/* Loop: encrypt every block separately, with its own IV */
for (pblk_num = first_pblk_num; pblk_num < last_pblk_num; pblk_num++) {
/* The function map_sg also advances the iter */
sg_src = next_sg_src;
next_sg_src = map_sg(sg_src, src, src_iter, SFLITE_BLOCK_SIZE);
if (!is_inplace) {
sg_dst = next_sg_dst;
next_sg_dst = map_sg(sg_dst, dst, dst_iter, SFLITE_BLOCK_SIZE);
}
err = crypt_sg(tfm, sg_src, *p_sg_dst, pblk_num, rw);
if (err)
goto out;
}
out:
if (!is_inplace)
sgtable_teardown(&sgt_dst);
sgtable_teardown(&sgt_src);
return err;
}
int sflite_crypt_bio(struct crypto_sync_skcipher *tfm, struct bio *src_bio,
struct bio *dst_bio, u64 num_blocks, u64 first_pblk_num, int rw)
{
}
/* Encrypt-decrypt a single block (memory buffer is a page) */
int sflite_crypt_block_page(struct crypto_sync_skcipher *tfm, struct page *src_page,
struct page *dst_page, u64 pblk_num, int rw)