diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:35:05 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:39:31 +0000 |
commit | 85c675d0d09a45a135bddd15d7b385f8758c32fb (patch) | |
tree | 76267dbc9b9a130337be3640948fe397b04ac629 /drivers/md/dm.c | |
parent | Adding upstream version 6.6.15. (diff) | |
download | linux-85c675d0d09a45a135bddd15d7b385f8758c32fb.tar.xz linux-85c675d0d09a45a135bddd15d7b385f8758c32fb.zip |
Adding upstream version 6.7.7.upstream/6.7.7
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r-- | drivers/md/dm.c | 141 |
1 files changed, 84 insertions, 57 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 64a1f306c9..23c32cd1f1 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -570,13 +570,15 @@ static void dm_end_io_acct(struct dm_io *io) dm_io_acct(io, true); } -static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) +static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio, gfp_t gfp_mask) { struct dm_io *io; struct dm_target_io *tio; struct bio *clone; - clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->mempools->io_bs); + clone = bio_alloc_clone(NULL, bio, gfp_mask, &md->mempools->io_bs); + if (unlikely(!clone)) + return NULL; tio = clone_to_tio(clone); tio->flags = 0; dm_tio_set_flag(tio, DM_TIO_INSIDE_DM_IO); @@ -724,7 +726,7 @@ static struct table_device *open_table_device(struct mapped_device *md, dev_t dev, blk_mode_t mode) { struct table_device *td; - struct block_device *bdev; + struct bdev_handle *bdev_handle; u64 part_off; int r; @@ -733,9 +735,9 @@ static struct table_device *open_table_device(struct mapped_device *md, return ERR_PTR(-ENOMEM); refcount_set(&td->count, 1); - bdev = blkdev_get_by_dev(dev, mode, _dm_claim_ptr, NULL); - if (IS_ERR(bdev)) { - r = PTR_ERR(bdev); + bdev_handle = bdev_open_by_dev(dev, mode, _dm_claim_ptr, NULL); + if (IS_ERR(bdev_handle)) { + r = PTR_ERR(bdev_handle); goto out_free_td; } @@ -745,20 +747,22 @@ static struct table_device *open_table_device(struct mapped_device *md, * called. */ if (md->disk->slave_dir) { - r = bd_link_disk_holder(bdev, md->disk); + r = bd_link_disk_holder(bdev_handle->bdev, md->disk); if (r) goto out_blkdev_put; } td->dm_dev.mode = mode; - td->dm_dev.bdev = bdev; - td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off, NULL, NULL); + td->dm_dev.bdev = bdev_handle->bdev; + td->dm_dev.bdev_handle = bdev_handle; + td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev_handle->bdev, &part_off, + NULL, NULL); format_dev_t(td->dm_dev.name, dev); list_add(&td->list, &md->table_devices); return td; out_blkdev_put: - blkdev_put(bdev, _dm_claim_ptr); + bdev_release(bdev_handle); out_free_td: kfree(td); return ERR_PTR(r); @@ -771,7 +775,7 @@ static void close_table_device(struct table_device *td, struct mapped_device *md { if (md->disk->slave_dir) bd_unlink_disk_holder(td->dm_dev.bdev, md->disk); - blkdev_put(td->dm_dev.bdev, _dm_claim_ptr); + bdev_release(td->dm_dev.bdev_handle); put_dax(td->dm_dev.dax_dev); list_del(&td->list); kfree(td); @@ -1424,9 +1428,16 @@ static void __map_bio(struct bio *clone) if (unlikely(dm_emulate_zone_append(md))) r = dm_zone_map_bio(tio); else + goto do_map; + } else { +do_map: + if (likely(ti->type->map == linear_map)) + r = linear_map(ti, clone); + else if (ti->type->map == stripe_map) + r = stripe_map(ti, clone); + else r = ti->type->map(ti, clone); - } else - r = ti->type->map(ti, clone); + } switch (r) { case DM_MAPIO_SUBMITTED: @@ -1471,15 +1482,15 @@ static void setup_split_accounting(struct clone_info *ci, unsigned int len) static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, struct dm_target *ti, unsigned int num_bios, - unsigned *len) + unsigned *len, gfp_t gfp_flag) { struct bio *bio; - int try; + int try = (gfp_flag & GFP_NOWAIT) ? 0 : 1; - for (try = 0; try < 2; try++) { + for (; try < 2; try++) { int bio_nr; - if (try) + if (try && num_bios > 1) mutex_lock(&ci->io->md->table_devices_lock); for (bio_nr = 0; bio_nr < num_bios; bio_nr++) { bio = alloc_tio(ci, ti, bio_nr, len, @@ -1489,7 +1500,7 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, bio_list_add(blist, bio); } - if (try) + if (try && num_bios > 1) mutex_unlock(&ci->io->md->table_devices_lock); if (bio_nr == num_bios) return; @@ -1499,34 +1510,31 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, } } -static int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, - unsigned int num_bios, unsigned int *len) +static unsigned int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, + unsigned int num_bios, unsigned int *len, + gfp_t gfp_flag) { struct bio_list blist = BIO_EMPTY_LIST; struct bio *clone; unsigned int ret = 0; - switch (num_bios) { - case 0: - break; - case 1: - if (len) - setup_split_accounting(ci, *len); - clone = alloc_tio(ci, ti, 0, len, GFP_NOIO); - __map_bio(clone); - ret = 1; - break; - default: - if (len) - setup_split_accounting(ci, *len); - /* dm_accept_partial_bio() is not supported with shared tio->len_ptr */ - alloc_multiple_bios(&blist, ci, ti, num_bios, len); - while ((clone = bio_list_pop(&blist))) { + if (WARN_ON_ONCE(num_bios == 0)) /* num_bios = 0 is a bug in caller */ + return 0; + + /* dm_accept_partial_bio() is not supported with shared tio->len_ptr */ + if (len) + setup_split_accounting(ci, *len); + + /* + * Using alloc_multiple_bios(), even if num_bios is 1, to consistently + * support allocating using GFP_NOWAIT with GFP_NOIO fallback. + */ + alloc_multiple_bios(&blist, ci, ti, num_bios, len, gfp_flag); + while ((clone = bio_list_pop(&blist))) { + if (num_bios > 1) dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO); - __map_bio(clone); - ret += 1; - } - break; + __map_bio(clone); + ret += 1; } return ret; @@ -1553,8 +1561,12 @@ static void __send_empty_flush(struct clone_info *ci) unsigned int bios; struct dm_target *ti = dm_table_get_target(t, i); + if (unlikely(ti->num_flush_bios == 0)) + continue; + atomic_add(ti->num_flush_bios, &ci->io->io_count); - bios = __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); + bios = __send_duplicate_bios(ci, ti, ti->num_flush_bios, + NULL, GFP_NOWAIT); atomic_sub(ti->num_flush_bios - bios, &ci->io->io_count); } @@ -1567,10 +1579,9 @@ static void __send_empty_flush(struct clone_info *ci) bio_uninit(ci->bio); } -static void __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, - unsigned int num_bios, - unsigned int max_granularity, - unsigned int max_sectors) +static void __send_abnormal_io(struct clone_info *ci, struct dm_target *ti, + unsigned int num_bios, unsigned int max_granularity, + unsigned int max_sectors) { unsigned int len, bios; @@ -1578,7 +1589,7 @@ static void __send_changing_extent_only(struct clone_info *ci, struct dm_target __max_io_len(ti, ci->sector, max_granularity, max_sectors)); atomic_add(num_bios, &ci->io->io_count); - bios = __send_duplicate_bios(ci, ti, num_bios, &len); + bios = __send_duplicate_bios(ci, ti, num_bios, &len, GFP_NOIO); /* * alloc_io() takes one extra reference for submission, so the * reference won't reach 0 without the following (+1) subtraction @@ -1647,8 +1658,8 @@ static blk_status_t __process_abnormal_io(struct clone_info *ci, if (unlikely(!num_bios)) return BLK_STS_NOTSUPP; - __send_changing_extent_only(ci, ti, num_bios, - max_granularity, max_sectors); + __send_abnormal_io(ci, ti, num_bios, max_granularity, max_sectors); + return BLK_STS_OK; } @@ -1707,10 +1718,6 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci) if (unlikely(!ti)) return BLK_STS_IOERR; - if (unlikely((ci->bio->bi_opf & REQ_NOWAIT) != 0) && - unlikely(!dm_target_supports_nowait(ti->type))) - return BLK_STS_NOTSUPP; - if (unlikely(ci->is_abnormal_io)) return __process_abnormal_io(ci, ti); @@ -1722,7 +1729,17 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci) len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count); setup_split_accounting(ci, len); - clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO); + + if (unlikely(ci->bio->bi_opf & REQ_NOWAIT)) { + if (unlikely(!dm_target_supports_nowait(ti->type))) + return BLK_STS_NOTSUPP; + + clone = alloc_tio(ci, ti, 0, &len, GFP_NOWAIT); + if (unlikely(!clone)) + return BLK_STS_AGAIN; + } else { + clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO); + } __map_bio(clone); ci->sector += len; @@ -1731,11 +1748,11 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci) return BLK_STS_OK; } -static void init_clone_info(struct clone_info *ci, struct mapped_device *md, +static void init_clone_info(struct clone_info *ci, struct dm_io *io, struct dm_table *map, struct bio *bio, bool is_abnormal) { ci->map = map; - ci->io = alloc_io(md, bio); + ci->io = io; ci->bio = bio; ci->is_abnormal_io = is_abnormal; ci->submit_as_polled = false; @@ -1770,8 +1787,18 @@ static void dm_split_and_process_bio(struct mapped_device *md, return; } - init_clone_info(&ci, md, map, bio, is_abnormal); - io = ci.io; + /* Only support nowait for normal IO */ + if (unlikely(bio->bi_opf & REQ_NOWAIT) && !is_abnormal) { + io = alloc_io(md, bio, GFP_NOWAIT); + if (unlikely(!io)) { + /* Unable to do anything without dm_io. */ + bio_wouldblock_error(bio); + return; + } + } else { + io = alloc_io(md, bio, GFP_NOIO); + } + init_clone_info(&ci, io, map, bio, is_abnormal); if (bio->bi_opf & REQ_PREFLUSH) { __send_empty_flush(&ci); |