summaryrefslogtreecommitdiffstats
path: root/drivers/block/virtio_blk.c
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:11:27 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:11:27 +0000
commit34996e42f82bfd60bc2c191e5cae3c6ab233ec6c (patch)
tree62db60558cbf089714b48daeabca82bf2b20b20e /drivers/block/virtio_blk.c
parentAdding debian version 6.8.12-1. (diff)
downloadlinux-34996e42f82bfd60bc2c191e5cae3c6ab233ec6c.tar.xz
linux-34996e42f82bfd60bc2c191e5cae3c6ab233ec6c.zip
Merging upstream version 6.9.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/block/virtio_blk.c')
-rw-r--r--drivers/block/virtio_blk.c303
1 files changed, 155 insertions, 148 deletions
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 2bf14a0e28..42dea7601d 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -720,25 +720,24 @@ fail_report:
return ret;
}
-static int virtblk_probe_zoned_device(struct virtio_device *vdev,
- struct virtio_blk *vblk,
- struct request_queue *q)
+static int virtblk_read_zoned_limits(struct virtio_blk *vblk,
+ struct queue_limits *lim)
{
+ struct virtio_device *vdev = vblk->vdev;
u32 v, wg;
dev_dbg(&vdev->dev, "probing host-managed zoned device\n");
- disk_set_zoned(vblk->disk);
- blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
+ lim->zoned = true;
virtio_cread(vdev, struct virtio_blk_config,
zoned.max_open_zones, &v);
- disk_set_max_open_zones(vblk->disk, v);
+ lim->max_open_zones = v;
dev_dbg(&vdev->dev, "max open zones = %u\n", v);
virtio_cread(vdev, struct virtio_blk_config,
zoned.max_active_zones, &v);
- disk_set_max_active_zones(vblk->disk, v);
+ lim->max_active_zones = v;
dev_dbg(&vdev->dev, "max active zones = %u\n", v);
virtio_cread(vdev, struct virtio_blk_config,
@@ -747,8 +746,8 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
dev_warn(&vdev->dev, "zero write granularity reported\n");
return -ENODEV;
}
- blk_queue_physical_block_size(q, wg);
- blk_queue_io_min(q, wg);
+ lim->physical_block_size = wg;
+ lim->io_min = wg;
dev_dbg(&vdev->dev, "write granularity = %u\n", wg);
@@ -764,13 +763,13 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
vblk->zone_sectors);
return -ENODEV;
}
- blk_queue_chunk_sectors(q, vblk->zone_sectors);
+ lim->chunk_sectors = vblk->zone_sectors;
dev_dbg(&vdev->dev, "zone sectors = %u\n", vblk->zone_sectors);
if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
dev_warn(&vblk->vdev->dev,
"ignoring negotiated F_DISCARD for zoned device\n");
- blk_queue_max_discard_sectors(q, 0);
+ lim->max_hw_discard_sectors = 0;
}
virtio_cread(vdev, struct virtio_blk_config,
@@ -785,25 +784,21 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
wg, v);
return -ENODEV;
}
- blk_queue_max_zone_append_sectors(q, v);
+ lim->max_zone_append_sectors = v;
dev_dbg(&vdev->dev, "max append sectors = %u\n", v);
- return blk_revalidate_disk_zones(vblk->disk, NULL);
+ return 0;
}
-
#else
-
/*
- * Zoned block device support is not configured in this kernel.
- * Host-managed zoned devices can't be supported, but others are
- * good to go as regular block devices.
+ * Zoned block device support is not configured in this kernel, host-managed
+ * zoned devices can't be supported.
*/
#define virtblk_report_zones NULL
-
-static inline int virtblk_probe_zoned_device(struct virtio_device *vdev,
- struct virtio_blk *vblk, struct request_queue *q)
+static inline int virtblk_read_zoned_limits(struct virtio_blk *vblk,
+ struct queue_limits *lim)
{
- dev_err(&vdev->dev,
+ dev_err(&vblk->vdev->dev,
"virtio_blk: zoned devices are not supported");
return -EOPNOTSUPP;
}
@@ -1248,31 +1243,17 @@ static const struct blk_mq_ops virtio_mq_ops = {
static unsigned int virtblk_queue_depth;
module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
-static int virtblk_probe(struct virtio_device *vdev)
+static int virtblk_read_limits(struct virtio_blk *vblk,
+ struct queue_limits *lim)
{
- struct virtio_blk *vblk;
- struct request_queue *q;
- int err, index;
-
+ struct virtio_device *vdev = vblk->vdev;
u32 v, blk_size, max_size, sg_elems, opt_io_size;
u32 max_discard_segs = 0;
u32 discard_granularity = 0;
u16 min_io_size;
u8 physical_block_exp, alignment_offset;
- unsigned int queue_depth;
size_t max_dma_size;
-
- if (!vdev->config->get) {
- dev_err(&vdev->dev, "%s failure: config access disabled\n",
- __func__);
- return -EINVAL;
- }
-
- err = ida_alloc_range(&vd_index_ida, 0,
- minor_to_index(1 << MINORBITS) - 1, GFP_KERNEL);
- if (err < 0)
- goto out;
- index = err;
+ int err;
/* We need to know how many segments before we allocate. */
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
@@ -1286,78 +1267,11 @@ static int virtblk_probe(struct virtio_device *vdev)
/* Prevent integer overflows and honor max vq size */
sg_elems = min_t(u32, sg_elems, VIRTIO_BLK_MAX_SG_ELEMS - 2);
- vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
- if (!vblk) {
- err = -ENOMEM;
- goto out_free_index;
- }
-
- mutex_init(&vblk->vdev_mutex);
-
- vblk->vdev = vdev;
-
- INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
-
- err = init_vq(vblk);
- if (err)
- goto out_free_vblk;
-
- /* Default queue sizing is to fill the ring. */
- if (!virtblk_queue_depth) {
- queue_depth = vblk->vqs[0].vq->num_free;
- /* ... but without indirect descs, we use 2 descs per req */
- if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
- queue_depth /= 2;
- } else {
- queue_depth = virtblk_queue_depth;
- }
-
- memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
- vblk->tag_set.ops = &virtio_mq_ops;
- vblk->tag_set.queue_depth = queue_depth;
- vblk->tag_set.numa_node = NUMA_NO_NODE;
- vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
- vblk->tag_set.cmd_size =
- sizeof(struct virtblk_req) +
- sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT;
- vblk->tag_set.driver_data = vblk;
- vblk->tag_set.nr_hw_queues = vblk->num_vqs;
- vblk->tag_set.nr_maps = 1;
- if (vblk->io_queues[HCTX_TYPE_POLL])
- vblk->tag_set.nr_maps = 3;
-
- err = blk_mq_alloc_tag_set(&vblk->tag_set);
- if (err)
- goto out_free_vq;
-
- vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, vblk);
- if (IS_ERR(vblk->disk)) {
- err = PTR_ERR(vblk->disk);
- goto out_free_tags;
- }
- q = vblk->disk->queue;
-
- virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
-
- vblk->disk->major = major;
- vblk->disk->first_minor = index_to_minor(index);
- vblk->disk->minors = 1 << PART_BITS;
- vblk->disk->private_data = vblk;
- vblk->disk->fops = &virtblk_fops;
- vblk->index = index;
-
- /* configure queue flush support */
- virtblk_update_cache_mode(vdev);
-
- /* If disk is read-only in the host, the guest should obey */
- if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
- set_disk_ro(vblk->disk, 1);
-
/* We can handle whatever the host told us to handle. */
- blk_queue_max_segments(q, sg_elems);
+ lim->max_segments = sg_elems;
/* No real sector limit. */
- blk_queue_max_hw_sectors(q, UINT_MAX);
+ lim->max_hw_sectors = UINT_MAX;
max_dma_size = virtio_max_dma_size(vdev);
max_size = max_dma_size > U32_MAX ? U32_MAX : max_dma_size;
@@ -1369,7 +1283,7 @@ static int virtblk_probe(struct virtio_device *vdev)
if (!err)
max_size = min(max_size, v);
- blk_queue_max_segment_size(q, max_size);
+ lim->max_segment_size = max_size;
/* Host can optionally specify the block size of the device */
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
@@ -1381,38 +1295,37 @@ static int virtblk_probe(struct virtio_device *vdev)
dev_err(&vdev->dev,
"virtio_blk: invalid block size: 0x%x\n",
blk_size);
- goto out_cleanup_disk;
+ return err;
}
- blk_queue_logical_block_size(q, blk_size);
+ lim->logical_block_size = blk_size;
} else
- blk_size = queue_logical_block_size(q);
+ blk_size = lim->logical_block_size;
/* Use topology information if available */
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
struct virtio_blk_config, physical_block_exp,
&physical_block_exp);
if (!err && physical_block_exp)
- blk_queue_physical_block_size(q,
- blk_size * (1 << physical_block_exp));
+ lim->physical_block_size = blk_size * (1 << physical_block_exp);
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
struct virtio_blk_config, alignment_offset,
&alignment_offset);
if (!err && alignment_offset)
- blk_queue_alignment_offset(q, blk_size * alignment_offset);
+ lim->alignment_offset = blk_size * alignment_offset;
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
struct virtio_blk_config, min_io_size,
&min_io_size);
if (!err && min_io_size)
- blk_queue_io_min(q, blk_size * min_io_size);
+ lim->io_min = blk_size * min_io_size;
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
struct virtio_blk_config, opt_io_size,
&opt_io_size);
if (!err && opt_io_size)
- blk_queue_io_opt(q, blk_size * opt_io_size);
+ lim->io_opt = blk_size * opt_io_size;
if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
virtio_cread(vdev, struct virtio_blk_config,
@@ -1420,7 +1333,7 @@ static int virtblk_probe(struct virtio_device *vdev)
virtio_cread(vdev, struct virtio_blk_config,
max_discard_sectors, &v);
- blk_queue_max_discard_sectors(q, v ? v : UINT_MAX);
+ lim->max_hw_discard_sectors = v ? v : UINT_MAX;
virtio_cread(vdev, struct virtio_blk_config, max_discard_seg,
&max_discard_segs);
@@ -1429,7 +1342,7 @@ static int virtblk_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) {
virtio_cread(vdev, struct virtio_blk_config,
max_write_zeroes_sectors, &v);
- blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX);
+ lim->max_write_zeroes_sectors = v ? v : UINT_MAX;
}
/* The discard and secure erase limits are combined since the Linux
@@ -1455,8 +1368,7 @@ static int virtblk_probe(struct virtio_device *vdev)
if (!v) {
dev_err(&vdev->dev,
"virtio_blk: secure_erase_sector_alignment can't be 0\n");
- err = -EINVAL;
- goto out_cleanup_disk;
+ return -EINVAL;
}
discard_granularity = min_not_zero(discard_granularity, v);
@@ -1470,11 +1382,10 @@ static int virtblk_probe(struct virtio_device *vdev)
if (!v) {
dev_err(&vdev->dev,
"virtio_blk: max_secure_erase_sectors can't be 0\n");
- err = -EINVAL;
- goto out_cleanup_disk;
+ return -EINVAL;
}
- blk_queue_max_secure_erase_sectors(q, v);
+ lim->max_secure_erase_sectors = v;
virtio_cread(vdev, struct virtio_blk_config,
max_secure_erase_seg, &v);
@@ -1485,8 +1396,7 @@ static int virtblk_probe(struct virtio_device *vdev)
if (!v) {
dev_err(&vdev->dev,
"virtio_blk: max_secure_erase_seg can't be 0\n");
- err = -EINVAL;
- goto out_cleanup_disk;
+ return -EINVAL;
}
max_discard_segs = min_not_zero(max_discard_segs, v);
@@ -1502,45 +1412,142 @@ static int virtblk_probe(struct virtio_device *vdev)
if (!max_discard_segs)
max_discard_segs = sg_elems;
- blk_queue_max_discard_segments(q,
- min(max_discard_segs, MAX_DISCARD_SEGMENTS));
+ lim->max_discard_segments =
+ min(max_discard_segs, MAX_DISCARD_SEGMENTS);
if (discard_granularity)
- q->limits.discard_granularity = discard_granularity << SECTOR_SHIFT;
+ lim->discard_granularity =
+ discard_granularity << SECTOR_SHIFT;
else
- q->limits.discard_granularity = blk_size;
+ lim->discard_granularity = blk_size;
}
- virtblk_update_capacity(vblk, false);
- virtio_device_ready(vdev);
-
- /*
- * All steps that follow use the VQs therefore they need to be
- * placed after the virtio_device_ready() call above.
- */
if (virtio_has_feature(vdev, VIRTIO_BLK_F_ZONED)) {
u8 model;
- virtio_cread(vdev, struct virtio_blk_config, zoned.model,
- &model);
+ virtio_cread(vdev, struct virtio_blk_config, zoned.model, &model);
switch (model) {
case VIRTIO_BLK_Z_NONE:
case VIRTIO_BLK_Z_HA:
- /* Present the host-aware device as non-zoned */
- break;
+ /* treat host-aware devices as non-zoned */
+ return 0;
case VIRTIO_BLK_Z_HM:
- err = virtblk_probe_zoned_device(vdev, vblk, q);
+ err = virtblk_read_zoned_limits(vblk, lim);
if (err)
- goto out_cleanup_disk;
+ return err;
break;
default:
- dev_err(&vdev->dev, "unsupported zone model %d\n",
- model);
- err = -EINVAL;
- goto out_cleanup_disk;
+ dev_err(&vdev->dev, "unsupported zone model %d\n", model);
+ return -EINVAL;
}
}
+ return 0;
+}
+
+static int virtblk_probe(struct virtio_device *vdev)
+{
+ struct virtio_blk *vblk;
+ struct queue_limits lim = { };
+ int err, index;
+ unsigned int queue_depth;
+
+ if (!vdev->config->get) {
+ dev_err(&vdev->dev, "%s failure: config access disabled\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ err = ida_alloc_range(&vd_index_ida, 0,
+ minor_to_index(1 << MINORBITS) - 1, GFP_KERNEL);
+ if (err < 0)
+ goto out;
+ index = err;
+
+ vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
+ if (!vblk) {
+ err = -ENOMEM;
+ goto out_free_index;
+ }
+
+ mutex_init(&vblk->vdev_mutex);
+
+ vblk->vdev = vdev;
+
+ INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
+
+ err = init_vq(vblk);
+ if (err)
+ goto out_free_vblk;
+
+ /* Default queue sizing is to fill the ring. */
+ if (!virtblk_queue_depth) {
+ queue_depth = vblk->vqs[0].vq->num_free;
+ /* ... but without indirect descs, we use 2 descs per req */
+ if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
+ queue_depth /= 2;
+ } else {
+ queue_depth = virtblk_queue_depth;
+ }
+
+ memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
+ vblk->tag_set.ops = &virtio_mq_ops;
+ vblk->tag_set.queue_depth = queue_depth;
+ vblk->tag_set.numa_node = NUMA_NO_NODE;
+ vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ vblk->tag_set.cmd_size =
+ sizeof(struct virtblk_req) +
+ sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT;
+ vblk->tag_set.driver_data = vblk;
+ vblk->tag_set.nr_hw_queues = vblk->num_vqs;
+ vblk->tag_set.nr_maps = 1;
+ if (vblk->io_queues[HCTX_TYPE_POLL])
+ vblk->tag_set.nr_maps = 3;
+
+ err = blk_mq_alloc_tag_set(&vblk->tag_set);
+ if (err)
+ goto out_free_vq;
+
+ err = virtblk_read_limits(vblk, &lim);
+ if (err)
+ goto out_free_tags;
+
+ vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, &lim, vblk);
+ if (IS_ERR(vblk->disk)) {
+ err = PTR_ERR(vblk->disk);
+ goto out_free_tags;
+ }
+
+ virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
+
+ vblk->disk->major = major;
+ vblk->disk->first_minor = index_to_minor(index);
+ vblk->disk->minors = 1 << PART_BITS;
+ vblk->disk->private_data = vblk;
+ vblk->disk->fops = &virtblk_fops;
+ vblk->index = index;
+
+ /* configure queue flush support */
+ virtblk_update_cache_mode(vdev);
+
+ /* If disk is read-only in the host, the guest should obey */
+ if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
+ set_disk_ro(vblk->disk, 1);
+
+ virtblk_update_capacity(vblk, false);
+ virtio_device_ready(vdev);
+
+ /*
+ * All steps that follow use the VQs therefore they need to be
+ * placed after the virtio_device_ready() call above.
+ */
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && lim.zoned) {
+ blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, vblk->disk->queue);
+ err = blk_revalidate_disk_zones(vblk->disk, NULL);
+ if (err)
+ goto out_cleanup_disk;
+ }
+
err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
if (err)
goto out_cleanup_disk;