dm-crypt: enable DM_TARGET_ATOMIC_WRITES

Allow handling of bios with REQ_ATOMIC flag set.

Don't split these bios and fail them if they overrun the hard limit
"BIO_MAX_VECS << PAGE_SHIFT".

In order to simplify the code, this commit joins the logic that avoids
splitting emulated zone append bios with the logic that avoids
splitting atomic write bios.

Signed-off-by: John Garry <john.g.garry@oracle.com>
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Tested-by: John Garry <john.g.garry@oracle.com>
This commit is contained in:
Mikulas Patocka
2025-11-05 16:02:36 +01:00
parent de67c139b3
commit ce51c6963a

View File

@@ -254,22 +254,15 @@ static unsigned int max_write_size = 0;
module_param(max_write_size, uint, 0644);
MODULE_PARM_DESC(max_write_size, "Maximum size of a write request");
static unsigned get_max_request_sectors(struct dm_target *ti, struct bio *bio)
static unsigned get_max_request_sectors(struct dm_target *ti, struct bio *bio, bool no_split)
{
struct crypt_config *cc = ti->private;
unsigned val, sector_align;
bool wrt = op_is_write(bio_op(bio));
if (wrt) {
/*
* For zoned devices, splitting write operations creates the
* risk of deadlocking queue freeze operations with zone write
* plugging BIO work when the reminder of a split BIO is
* issued. So always allow the entire BIO to proceed.
*/
if (ti->emulate_zone_append)
return bio_sectors(bio);
if (no_split) {
val = -1;
} else if (wrt) {
val = min_not_zero(READ_ONCE(max_write_size),
DM_CRYPT_DEFAULT_MAX_WRITE_SIZE);
} else {
@@ -3462,6 +3455,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
struct dm_crypt_io *io;
struct crypt_config *cc = ti->private;
unsigned max_sectors;
bool no_split;
/*
* If bio is REQ_PREFLUSH or REQ_OP_DISCARD, just bypass crypt queues.
@@ -3479,10 +3473,20 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
/*
* Check if bio is too large, split as needed.
*
* For zoned devices, splitting write operations creates the
* risk of deadlocking queue freeze operations with zone write
* plugging BIO work when the reminder of a split BIO is
* issued. So always allow the entire BIO to proceed.
*/
max_sectors = get_max_request_sectors(ti, bio);
if (unlikely(bio_sectors(bio) > max_sectors))
no_split = (ti->emulate_zone_append && op_is_write(bio_op(bio))) ||
(bio->bi_opf & REQ_ATOMIC);
max_sectors = get_max_request_sectors(ti, bio, no_split);
if (unlikely(bio_sectors(bio) > max_sectors)) {
if (unlikely(no_split))
return DM_MAPIO_KILL;
dm_accept_partial_bio(bio, max_sectors);
}
/*
* Ensure that bio is a multiple of internal sector encryption size
@@ -3728,15 +3732,20 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
if (ti->emulate_zone_append)
limits->max_hw_sectors = min(limits->max_hw_sectors,
BIO_MAX_VECS << PAGE_SECTORS_SHIFT);
limits->atomic_write_hw_unit_max = min(limits->atomic_write_hw_unit_max,
BIO_MAX_VECS << PAGE_SHIFT);
limits->atomic_write_hw_max = min(limits->atomic_write_hw_max,
BIO_MAX_VECS << PAGE_SHIFT);
}
static struct target_type crypt_target = {
.name = "crypt",
.version = {1, 28, 0},
.version = {1, 29, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
.features = DM_TARGET_ZONED_HM,
.features = DM_TARGET_ZONED_HM | DM_TARGET_ATOMIC_WRITES,
.report_zones = crypt_report_zones,
.map = crypt_map,
.status = crypt_status,