summaryrefslogtreecommitdiffstats
path: root/drivers/block/nbd.c
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:11:22 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:11:22 +0000
commitb20732900e4636a467c0183a47f7396700f5f743 (patch)
tree42f079ff82e701ebcb76829974b4caca3e5b6798 /drivers/block/nbd.c
parentAdding upstream version 6.8.12. (diff)
downloadlinux-b20732900e4636a467c0183a47f7396700f5f743.tar.xz
linux-b20732900e4636a467c0183a47f7396700f5f743.zip
Adding upstream version 6.9.7.upstream/6.9.7
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/block/nbd.c')
-rw-r--r--drivers/block/nbd.c77
1 files changed, 51 insertions, 26 deletions
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index b7c332528e..1ddd3e5497 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -316,9 +316,12 @@ static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
nsock->sent = 0;
}
-static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
+static int __nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
loff_t blksize)
{
+ struct queue_limits lim;
+ int error;
+
if (!blksize)
blksize = 1u << NBD_DEF_BLKSIZE_BITS;
@@ -334,10 +337,16 @@ static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
if (!nbd->pid)
return 0;
+ lim = queue_limits_start_update(nbd->disk->queue);
if (nbd->config->flags & NBD_FLAG_SEND_TRIM)
- blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
- blk_queue_logical_block_size(nbd->disk->queue, blksize);
- blk_queue_physical_block_size(nbd->disk->queue, blksize);
+ lim.max_hw_discard_sectors = UINT_MAX;
+ else
+ lim.max_hw_discard_sectors = 0;
+ lim.logical_block_size = blksize;
+ lim.physical_block_size = blksize;
+ error = queue_limits_commit_update(nbd->disk->queue, &lim);
+ if (error)
+ return error;
if (max_part)
set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
@@ -346,6 +355,18 @@ static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
return 0;
}
+static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
+ loff_t blksize)
+{
+ int error;
+
+ blk_mq_freeze_queue(nbd->disk->queue);
+ error = __nbd_set_size(nbd, bytesize, blksize);
+ blk_mq_unfreeze_queue(nbd->disk->queue);
+
+ return error;
+}
+
static void nbd_complete_rq(struct request *req)
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
@@ -567,7 +588,10 @@ static inline int was_interrupted(int result)
return result == -ERESTARTSYS || result == -EINTR;
}
-/* always call with the tx_lock held */
+/*
+ * Returns BLK_STS_RESOURCE if the caller should retry after a delay. Returns
+ * -EAGAIN if the caller should requeue @cmd. Returns -EIO if sending failed.
+ */
static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
{
struct request *req = blk_mq_rq_from_pdu(cmd);
@@ -584,6 +608,9 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
u32 nbd_cmd_flags = 0;
int sent = nsock->sent, skip = 0;
+ lockdep_assert_held(&cmd->lock);
+ lockdep_assert_held(&nsock->tx_lock);
+
iov_iter_kvec(&from, ITER_SOURCE, &iov, 1, sizeof(request));
type = req_to_nbd_cmd_type(req);
@@ -648,7 +675,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
nsock->sent = sent;
}
set_bit(NBD_CMD_REQUEUED, &cmd->flags);
- return BLK_STS_RESOURCE;
+ return (__force int)BLK_STS_RESOURCE;
}
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Send control failed (result %d)\n", result);
@@ -689,7 +716,7 @@ send_pages:
nsock->pending = req;
nsock->sent = sent;
set_bit(NBD_CMD_REQUEUED, &cmd->flags);
- return BLK_STS_RESOURCE;
+ return (__force int)BLK_STS_RESOURCE;
}
dev_err(disk_to_dev(nbd->disk),
"Send data failed (result %d)\n",
@@ -986,7 +1013,7 @@ static int wait_for_reconnect(struct nbd_device *nbd)
return !test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
}
-static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
+static blk_status_t nbd_handle_cmd(struct nbd_cmd *cmd, int index)
{
struct request *req = blk_mq_rq_from_pdu(cmd);
struct nbd_device *nbd = cmd->nbd;
@@ -994,18 +1021,20 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
struct nbd_sock *nsock;
int ret;
+ lockdep_assert_held(&cmd->lock);
+
config = nbd_get_config_unlocked(nbd);
if (!config) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Socks array is empty\n");
- return -EINVAL;
+ return BLK_STS_IOERR;
}
if (index >= config->num_connections) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Attempted send on invalid socket\n");
nbd_config_put(nbd);
- return -EINVAL;
+ return BLK_STS_IOERR;
}
cmd->status = BLK_STS_OK;
again:
@@ -1028,7 +1057,7 @@ again:
*/
sock_shutdown(nbd);
nbd_config_put(nbd);
- return -EIO;
+ return BLK_STS_IOERR;
}
goto again;
}
@@ -1041,7 +1070,7 @@ again:
blk_mq_start_request(req);
if (unlikely(nsock->pending && nsock->pending != req)) {
nbd_requeue_cmd(cmd);
- ret = 0;
+ ret = BLK_STS_OK;
goto out;
}
/*
@@ -1060,19 +1089,19 @@ again:
"Request send failed, requeueing\n");
nbd_mark_nsock_dead(nbd, nsock, 1);
nbd_requeue_cmd(cmd);
- ret = 0;
+ ret = BLK_STS_OK;
}
out:
mutex_unlock(&nsock->tx_lock);
nbd_config_put(nbd);
- return ret;
+ return ret < 0 ? BLK_STS_IOERR : (__force blk_status_t)ret;
}
static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
- int ret;
+ blk_status_t ret;
/*
* Since we look at the bio's to send the request over the network we
@@ -1092,10 +1121,6 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
* appropriate.
*/
ret = nbd_handle_cmd(cmd, hctx->queue_num);
- if (ret < 0)
- ret = BLK_STS_IOERR;
- else if (!ret)
- ret = BLK_STS_OK;
mutex_unlock(&cmd->lock);
return ret;
@@ -1351,7 +1376,6 @@ static void nbd_config_put(struct nbd_device *nbd)
nbd->config = NULL;
nbd->tag_set.timeout = 0;
- blk_queue_max_discard_sectors(nbd->disk->queue, 0);
mutex_unlock(&nbd->config_lock);
nbd_put(nbd);
@@ -1783,6 +1807,12 @@ static const struct blk_mq_ops nbd_mq_ops = {
static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
{
+ struct queue_limits lim = {
+ .max_hw_sectors = 65536,
+ .max_user_sectors = 256,
+ .max_segments = USHRT_MAX,
+ .max_segment_size = UINT_MAX,
+ };
struct nbd_device *nbd;
struct gendisk *disk;
int err = -ENOMEM;
@@ -1823,7 +1853,7 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
if (err < 0)
goto out_free_tags;
- disk = blk_mq_alloc_disk(&nbd->tag_set, NULL);
+ disk = blk_mq_alloc_disk(&nbd->tag_set, &lim, NULL);
if (IS_ERR(disk)) {
err = PTR_ERR(disk);
goto out_free_idr;
@@ -1843,11 +1873,6 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
* Tell the block layer that we are not a rotational device
*/
blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
- blk_queue_max_discard_sectors(disk->queue, 0);
- blk_queue_max_segment_size(disk->queue, UINT_MAX);
- blk_queue_max_segments(disk->queue, USHRT_MAX);
- blk_queue_max_hw_sectors(disk->queue, 65536);
- disk->queue->limits.max_sectors = 256;
mutex_init(&nbd->config_lock);
refcount_set(&nbd->config_refs, 0);