From 83506c85f8d4332b3edfdc8f1fd07aa691415350 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 8 May 2024 19:45:29 +0200 Subject: Adding upstream version 5.10.216. Signed-off-by: Daniel Baumann --- block/blk-settings.c | 41 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 40 insertions(+), 1 deletion(-) (limited to 'block/blk-settings.c') diff --git a/block/blk-settings.c b/block/blk-settings.c index c3aa7f8ee..ebd373469 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -60,6 +60,7 @@ void blk_set_default_limits(struct queue_limits *lim) lim->io_opt = 0; lim->misaligned = 0; lim->zoned = BLK_ZONED_NONE; + lim->zone_write_granularity = 0; } EXPORT_SYMBOL(blk_set_default_limits); @@ -353,6 +354,28 @@ void blk_queue_physical_block_size(struct request_queue *q, unsigned int size) } EXPORT_SYMBOL(blk_queue_physical_block_size); +/** + * blk_queue_zone_write_granularity - set zone write granularity for the queue + * @q: the request queue for the zoned device + * @size: the zone write granularity size, in bytes + * + * Description: + * This should be set to the lowest possible size allowing to write in + * sequential zones of a zoned block device. + */ +void blk_queue_zone_write_granularity(struct request_queue *q, + unsigned int size) +{ + if (WARN_ON_ONCE(!blk_queue_is_zoned(q))) + return; + + q->limits.zone_write_granularity = size; + + if (q->limits.zone_write_granularity < q->limits.logical_block_size) + q->limits.zone_write_granularity = q->limits.logical_block_size; +} +EXPORT_SYMBOL_GPL(blk_queue_zone_write_granularity); + /** * blk_queue_alignment_offset - set physical block alignment offset * @q: the request queue for the device @@ -630,7 +653,13 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, t->discard_granularity; } + t->zone_write_granularity = max(t->zone_write_granularity, + b->zone_write_granularity); t->zoned = max(t->zoned, b->zoned); + if (!t->zoned) { + t->zone_write_granularity = 0; + t->max_zone_append_sectors = 0; + } return ret; } EXPORT_SYMBOL(blk_stack_limits); @@ -846,6 +875,8 @@ EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging); */ void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model) { + struct request_queue *q = disk->queue; + switch (model) { case BLK_ZONED_HM: /* @@ -874,7 +905,15 @@ void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model) break; } - disk->queue->limits.zoned = model; + q->limits.zoned = model; + if (model != BLK_ZONED_NONE) { + /* + * Set the zone write granularity to the device logical block + * size by default. The driver can change this value if needed. + */ + blk_queue_zone_write_granularity(q, + queue_logical_block_size(q)); + } } EXPORT_SYMBOL_GPL(blk_queue_set_zoned); -- cgit v1.2.3