mirror of
https://codeberg.org/shufflecake/shufflecake-c.git
synced 2025-12-27 06:04:57 -05:00
Finish helpers for flush
This commit is contained in:
parent
806c524c02
commit
050fc9a385
1 changed files with 58 additions and 72 deletions
|
|
@ -54,7 +54,7 @@ static int __serialise_and_encrypt_posmap_blocks(struct sflite_volume *svol, u32
|
|||
* Stash into `crypt_entries` the dirty posmap blocks, while we hold the locks,
|
||||
* so that the CWBs can then be sent without holding any locks.
|
||||
*/
|
||||
static int prepare_posmap_cwbs(struct sflite_volume *svol, gfp_t gfp_flags)
|
||||
static int prepare_posmap_cwbs(struct sflite_volume *svol, gfp_t gfp)
|
||||
{
|
||||
u32 i, j, nblocks;
|
||||
int err;
|
||||
|
|
@ -70,7 +70,7 @@ static int prepare_posmap_cwbs(struct sflite_volume *svol, gfp_t gfp_flags)
|
|||
j = find_next_zero_bit(svol->posmap.dirty, nblocks, i);
|
||||
|
||||
// Encrypt onto `crypt_entries` from block i (inclusive) to j (exclusive)
|
||||
err = __serialise_and_encrypt_posmap_blocks(svol, i, j, gfp_flags);
|
||||
err = __serialise_and_encrypt_posmap_blocks(svol, i, j, gfp);
|
||||
if (err)
|
||||
break;
|
||||
|
||||
|
|
@ -82,15 +82,12 @@ static int prepare_posmap_cwbs(struct sflite_volume *svol, gfp_t gfp_flags)
|
|||
// We need i==j here, for the next loop iteration
|
||||
}
|
||||
|
||||
// If there was an encryption error, unmark flush_pending and give up the FLUSH altogether
|
||||
if (err)
|
||||
bitmap_clear(svol->posmap.flush_pending, 0, nblocks);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
struct cwb_context {
|
||||
struct cwb_context
|
||||
{
|
||||
struct sflite_volume *svol;
|
||||
/* Region */
|
||||
u32 i
|
||||
|
|
@ -100,7 +97,7 @@ struct cwb_context {
|
|||
struct completion *compl;
|
||||
};
|
||||
|
||||
static void cwb_endio(unsigned long error, void *context)
|
||||
static void cwb_notify_io(unsigned long error, void *context)
|
||||
{
|
||||
struct cwb_context *ctx = context;
|
||||
|
||||
|
|
@ -118,12 +115,12 @@ static void cwb_endio(unsigned long error, void *context)
|
|||
* Send all CWBs prepared before (marked by flush_pending), and
|
||||
* synchronously wait for them. No lock needs to be held.
|
||||
*/
|
||||
static int sflite_send_posmap_cwbs(struct sflite_volume *svol, gfp_t gfp_flags)
|
||||
static int sflite_send_posmap_cwbs(struct sflite_volume *svol, gfp_t gfp)
|
||||
{
|
||||
u32 i, j, nblocks;
|
||||
struct dm_io_region region;
|
||||
struct dm_io_request req;
|
||||
struct completion compl = COMPLETION_INITIALIZER_ONSTACK(compl);
|
||||
DECLARE_COMPLETION_ONSTACK(compl);
|
||||
struct atomic_t pending = ATOMIC_INIT(1);
|
||||
struct cwb_context *ctx;
|
||||
int err;
|
||||
|
|
@ -138,7 +135,7 @@ static int sflite_send_posmap_cwbs(struct sflite_volume *svol, gfp_t gfp_flags)
|
|||
j = find_next_zero_bit(svol->posmap.flush_pending, nblocks, i);
|
||||
|
||||
// Allocate context
|
||||
ctx = kmalloc(sizeof(*ctx), gfp_flags);
|
||||
ctx = kmalloc(sizeof(*ctx), gfp);
|
||||
if (!ctx) {
|
||||
err = -ENOMEM;
|
||||
break;
|
||||
|
|
@ -160,7 +157,7 @@ static int sflite_send_posmap_cwbs(struct sflite_volume *svol, gfp_t gfp_flags)
|
|||
req.mem.type = DM_IO_VMA;
|
||||
req.mem.ptr.vma = svol->posmap.crypt_entries + ((unsigned long)i * SFLITE_BLOCK_SIZE);
|
||||
req.client = svol->sdev->io_client;
|
||||
req.notify.fn = cwb_endio;
|
||||
req.notify.fn = cwb_notify_io;
|
||||
req.notify.context = ctx;
|
||||
atomic_inc(&pending);
|
||||
|
||||
|
|
@ -182,79 +179,68 @@ static int sflite_send_posmap_cwbs(struct sflite_volume *svol, gfp_t gfp_flags)
|
|||
}
|
||||
|
||||
|
||||
/**
|
||||
* Synchronously store (and flush) all the dirty posmap blocks.
|
||||
*/
|
||||
int sflite_flush_posmap(struct sflite_volume *svol, gfp_t gfp_flags)
|
||||
static void flush_remap_endio(struct bio *clone)
|
||||
{
|
||||
struct sflite_device *sdev = svol->sdev;
|
||||
struct dm_io_region region;
|
||||
struct dm_io_request req;
|
||||
struct sflite_flush_io_notify endio = {
|
||||
.compl = COMPLETION_INITIALIZER_ONSTACK(endio.compl),
|
||||
.pending = ATOMIC_INIT(1),
|
||||
.errors = ATOMIC_INIT(0),
|
||||
};
|
||||
unsigned int bitmap_bits = sdev->posmap_size_blocks;
|
||||
u32 i = 0;
|
||||
int err = 0;
|
||||
complete(clone->bi_private);
|
||||
}
|
||||
|
||||
while (1) {
|
||||
u32 j;
|
||||
/**
|
||||
* Issue a FLUSH to the underlying device, either brand-new or cloning
|
||||
* an orig_bio coming from the upper layer.
|
||||
* The status code of the cloned bio is copied onto the original one.
|
||||
*/
|
||||
int issue_lowlevel_flush(struct sflite_volume *svol, struct bio *orig_bio, gfp_t gfp)
|
||||
{
|
||||
struct bio *clone;
|
||||
DECLARE_COMPLETION_ONSTACK(compl);
|
||||
|
||||
// Find next region of 1s
|
||||
i = find_next_bit(svol->posmap.dirty, bitmap_bits, i);
|
||||
if (unlikely(i == bitmap_bits))
|
||||
break;
|
||||
j = find_next_zero_bit(svol->posmap.dirty, bitmap_bits, i);
|
||||
// Can either issue a brand-new request
|
||||
if (orig_bio == NULL)
|
||||
return blkdev_issue_flush(svol->dm_dev->bdev);
|
||||
|
||||
// Encrypt from block i (inclusive) to j (exclusive)
|
||||
err = __serialise_and_encrypt_posmap_blocks(svol, i, j, gfp_flags);
|
||||
if (unlikely(err))
|
||||
break;
|
||||
// Or clone and remap one from the upper layer
|
||||
clone = bio_alloc_clone(svol->dm_dev->bdev, orig_bio, gfp, svol->sdev->bioset);
|
||||
if (!clone)
|
||||
return -ENOMEM;
|
||||
clone->bi_opf |= REQ_SYNC;
|
||||
clone->bi_private = &compl;
|
||||
clone->bi_end_io = flush_remap_endio;
|
||||
|
||||
// Region on-disk
|
||||
region.bdev = svol->dm_dev->bdev;
|
||||
region.sector = SFLITE_POSMAP_START_SECTOR(svol) + (i << SFLITE_BLOCK_SHIFT);
|
||||
region.count = (sector_t)(j - i) << SFLITE_BLOCK_SHIFT;
|
||||
// Submit and wait, then copy status code
|
||||
dm_submit_bio_remap(orig_bio, clone);
|
||||
wait_for_completion_io(&compl);
|
||||
orig_bio->bi_status = clone->bi_status;
|
||||
|
||||
// Request to dm-io
|
||||
req.bi_opf = REQ_OP_WRITE | REQ_SYNC;
|
||||
req.mem.type = DM_IO_VMA;
|
||||
req.mem.ptr.vma = svol->posmap.crypt_entries + (i * SFLITE_BLOCK_SIZE);
|
||||
req.client = sdev->io_client;
|
||||
req.notify.fn = __flush_notify_io;
|
||||
req.notify.context = &endio;
|
||||
atomic_inc(&endio.pending);
|
||||
bio_put(clone);
|
||||
return blk_status_to_errno(orig_bio->bi_status);
|
||||
}
|
||||
|
||||
/* Writing via async dm-io (implied by notify.fn above) won't return an error */
|
||||
(void) sflc_dm_io(&req, 1, ®ion, NULL);
|
||||
i = j;
|
||||
|
||||
/**
|
||||
* Finalise the FLUSH operation by marking clean the posmap blocks whose CWB
|
||||
* did not fail, if their sequence number hasn't changed in the meantime (we
|
||||
* detect cache re-dirtying).
|
||||
*/
|
||||
int mark_posmap_blocks_clean(struct sflite_volume *svol)
|
||||
{
|
||||
u32 block;
|
||||
int err = 0; // Set to non-zero if at least one CWB failed
|
||||
|
||||
for_each_set_bit(block, svol->posmap.flush_pending, svol->sdev->posmap_size_blocks) {
|
||||
if (test_bit(block, svol->posmap.cwb_error))
|
||||
err = -EIO;
|
||||
else if (svol->posmap.snap_seqnum[block] == svol->posmap.seqnum[block])
|
||||
__clear_bit(block, svol->posmap.dirty);
|
||||
// Nothing to do otherwise: don't set global error and don't mark block clean,
|
||||
// if the CWB succeeded but the seqnum changed in the meantime.
|
||||
}
|
||||
|
||||
// Wait for workers, even in case of error inside the loop
|
||||
if (atomic_dec_and_test(&endio.pending))
|
||||
complete(&endio.compl);
|
||||
wait_for_completion_io(&endio.compl);
|
||||
// Now check for error inside the loop
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
// The WRITEs inside the loop didn't have REQ_PREFLUSH. Flush now
|
||||
err = __send_empty_flush(svol);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
// Clear dirty bitmap if no workers failed
|
||||
if (!atomic_read(&endio.errors))
|
||||
bitmap_clear(svol->posmap.dirty, 0, bitmap_bits);
|
||||
else
|
||||
err = -EIO;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/* Landing here from ->map() always through the flush_queue */
|
||||
void sflite_flush_work_fn(struct work_struct *work)
|
||||
{
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue