diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 18:50:03 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 18:50:03 +0000 |
commit | 01a69402cf9d38ff180345d55c2ee51c7e89fbc7 (patch) | |
tree | b406c5242a088c4f59c6e4b719b783f43aca6ae9 /drivers/nvme | |
parent | Adding upstream version 6.7.12. (diff) | |
download | linux-01a69402cf9d38ff180345d55c2ee51c7e89fbc7.tar.xz linux-01a69402cf9d38ff180345d55c2ee51c7e89fbc7.zip |
Adding upstream version 6.8.9.upstream/6.8.9
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/nvme')
30 files changed, 729 insertions, 530 deletions
diff --git a/drivers/nvme/common/auth.c b/drivers/nvme/common/auth.c index a23ab5c968..a3455f1d67 100644 --- a/drivers/nvme/common/auth.c +++ b/drivers/nvme/common/auth.c @@ -471,4 +471,5 @@ int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key) } EXPORT_SYMBOL_GPL(nvme_auth_generate_key); +MODULE_DESCRIPTION("NVMe Authentication framework"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvme/common/keyring.c b/drivers/nvme/common/keyring.c index ee341b83ee..6f7e7a8fa5 100644 --- a/drivers/nvme/common/keyring.c +++ b/drivers/nvme/common/keyring.c @@ -111,7 +111,7 @@ static struct key *nvme_tls_psk_lookup(struct key *keyring, * should be preferred to 'generated' PSKs, * and SHA-384 should be preferred to SHA-256. */ -struct nvme_tls_psk_priority_list { +static struct nvme_tls_psk_priority_list { bool generated; enum nvme_tcp_tls_cipher cipher; } nvme_tls_psk_prio[] = { @@ -181,5 +181,6 @@ static void __exit nvme_keyring_exit(void) MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Hannes Reinecke <hare@suse.de>"); +MODULE_DESCRIPTION("NVMe Keyring implementation"); module_init(nvme_keyring_init); module_exit(nvme_keyring_exit); diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c index 596bb11eeb..c727cd1f26 100644 --- a/drivers/nvme/host/apple.c +++ b/drivers/nvme/host/apple.c @@ -797,6 +797,7 @@ static int apple_nvme_init_request(struct blk_mq_tag_set *set, static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown) { + enum nvme_ctrl_state state = nvme_ctrl_state(&anv->ctrl); u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS); bool dead = false, freeze = false; unsigned long flags; @@ -808,8 +809,8 @@ static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown) if (csts & NVME_CSTS_CFS) dead = true; - if (anv->ctrl.state == NVME_CTRL_LIVE || - anv->ctrl.state == NVME_CTRL_RESETTING) { + if (state == NVME_CTRL_LIVE || + state == NVME_CTRL_RESETTING) { freeze = true; nvme_start_freeze(&anv->ctrl); } @@ -881,7 +882,7 @@ static enum blk_eh_timer_return apple_nvme_timeout(struct request *req) unsigned long flags; u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS); - if (anv->ctrl.state != NVME_CTRL_LIVE) { + if (nvme_ctrl_state(&anv->ctrl) != NVME_CTRL_LIVE) { /* * From rdma.c: * If we are resetting, connecting or deleting we should @@ -985,10 +986,10 @@ static void apple_nvme_reset_work(struct work_struct *work) u32 boot_status, aqa; struct apple_nvme *anv = container_of(work, struct apple_nvme, ctrl.reset_work); + enum nvme_ctrl_state state = nvme_ctrl_state(&anv->ctrl); - if (anv->ctrl.state != NVME_CTRL_RESETTING) { - dev_warn(anv->dev, "ctrl state %d is not RESETTING\n", - anv->ctrl.state); + if (state != NVME_CTRL_RESETTING) { + dev_warn(anv->dev, "ctrl state %d is not RESETTING\n", state); ret = -ENODEV; goto out; } diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c index 72c0525c75..a264b3ae07 100644 --- a/drivers/nvme/host/auth.c +++ b/drivers/nvme/host/auth.c @@ -48,11 +48,6 @@ struct nvme_dhchap_queue_context { static struct workqueue_struct *nvme_auth_wq; -#define nvme_auth_flags_from_qid(qid) \ - (qid == 0) ? 0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED -#define nvme_auth_queue_from_qid(ctrl, qid) \ - (qid == 0) ? (ctrl)->fabrics_q : (ctrl)->connect_q - static inline int ctrl_max_dhchaps(struct nvme_ctrl *ctrl) { return ctrl->opts->nr_io_queues + ctrl->opts->nr_write_queues + @@ -63,10 +58,15 @@ static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid, void *data, size_t data_len, bool auth_send) { struct nvme_command cmd = {}; - blk_mq_req_flags_t flags = nvme_auth_flags_from_qid(qid); - struct request_queue *q = nvme_auth_queue_from_qid(ctrl, qid); + nvme_submit_flags_t flags = NVME_SUBMIT_RETRY; + struct request_queue *q = ctrl->fabrics_q; int ret; + if (qid != 0) { + flags |= NVME_SUBMIT_NOWAIT | NVME_SUBMIT_RESERVED; + q = ctrl->connect_q; + } + cmd.auth_common.opcode = nvme_fabrics_command; cmd.auth_common.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER; cmd.auth_common.spsp0 = 0x01; @@ -80,8 +80,7 @@ static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid, } ret = __nvme_submit_sync_cmd(q, &cmd, NULL, data, data_len, - qid == 0 ? NVME_QID_ANY : qid, - 0, flags); + qid == 0 ? NVME_QID_ANY : qid, flags); if (ret > 0) dev_warn(ctrl->device, "qid %d auth_send failed with status %d\n", qid, ret); @@ -897,7 +896,7 @@ static void nvme_ctrl_auth_work(struct work_struct *work) * If the ctrl is no connected, bail as reconnect will handle * authentication. */ - if (ctrl->state != NVME_CTRL_LIVE) + if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) return; /* Authenticate admin queue first */ diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c index 20f46c2308..6f2ebb5fcd 100644 --- a/drivers/nvme/host/constants.c +++ b/drivers/nvme/host/constants.c @@ -171,15 +171,15 @@ static const char * const nvme_statuses[] = { [NVME_SC_HOST_ABORTED_CMD] = "Host Aborted Command", }; -const unsigned char *nvme_get_error_status_str(u16 status) +const char *nvme_get_error_status_str(u16 status) { status &= 0x7ff; if (status < ARRAY_SIZE(nvme_statuses) && nvme_statuses[status]) - return nvme_statuses[status & 0x7ff]; + return nvme_statuses[status]; return "Unknown"; } -const unsigned char *nvme_get_opcode_str(u8 opcode) +const char *nvme_get_opcode_str(u8 opcode) { if (opcode < ARRAY_SIZE(nvme_ops) && nvme_ops[opcode]) return nvme_ops[opcode]; @@ -187,7 +187,7 @@ const unsigned char *nvme_get_opcode_str(u8 opcode) } EXPORT_SYMBOL_GPL(nvme_get_opcode_str); -const unsigned char *nvme_get_admin_opcode_str(u8 opcode) +const char *nvme_get_admin_opcode_str(u8 opcode) { if (opcode < ARRAY_SIZE(nvme_admin_ops) && nvme_admin_ops[opcode]) return nvme_admin_ops[opcode]; @@ -195,7 +195,7 @@ const unsigned char *nvme_get_admin_opcode_str(u8 opcode) } EXPORT_SYMBOL_GPL(nvme_get_admin_opcode_str); -const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode) { +const char *nvme_get_fabrics_opcode_str(u8 opcode) { if (opcode < ARRAY_SIZE(nvme_fabrics_ops) && nvme_fabrics_ops[opcode]) return nvme_fabrics_ops[opcode]; return "Unknown"; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 771eac1412..fe3627c5bd 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -20,6 +20,7 @@ #include <linux/ptrace.h> #include <linux/nvme_ioctl.h> #include <linux/pm_qos.h> +#include <linux/ratelimit.h> #include <asm/unaligned.h> #include "nvme.h" @@ -312,12 +313,12 @@ static void nvme_log_error(struct request *req) struct nvme_request *nr = nvme_req(req); if (ns) { - pr_err_ratelimited("%s: %s(0x%x) @ LBA %llu, %llu blocks, %s (sct 0x%x / sc 0x%x) %s%s\n", + pr_err_ratelimited("%s: %s(0x%x) @ LBA %llu, %u blocks, %s (sct 0x%x / sc 0x%x) %s%s\n", ns->disk ? ns->disk->disk_name : "?", nvme_get_opcode_str(nr->cmd->common.opcode), nr->cmd->common.opcode, - (unsigned long long)nvme_sect_to_lba(ns, blk_rq_pos(req)), - (unsigned long long)blk_rq_bytes(req) >> ns->lba_shift, + nvme_sect_to_lba(ns->head, blk_rq_pos(req)), + blk_rq_bytes(req) >> ns->head->lba_shift, nvme_get_error_status_str(nr->status), nr->status >> 8 & 7, /* Status Code Type */ nr->status & 0xff, /* Status Code */ @@ -337,6 +338,30 @@ static void nvme_log_error(struct request *req) nr->status & NVME_SC_DNR ? "DNR " : ""); } +static void nvme_log_err_passthru(struct request *req) +{ + struct nvme_ns *ns = req->q->queuedata; + struct nvme_request *nr = nvme_req(req); + + pr_err_ratelimited("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s" + "cdw10=0x%x cdw11=0x%x cdw12=0x%x cdw13=0x%x cdw14=0x%x cdw15=0x%x\n", + ns ? ns->disk->disk_name : dev_name(nr->ctrl->device), + ns ? nvme_get_opcode_str(nr->cmd->common.opcode) : + nvme_get_admin_opcode_str(nr->cmd->common.opcode), + nr->cmd->common.opcode, + nvme_get_error_status_str(nr->status), + nr->status >> 8 & 7, /* Status Code Type */ + nr->status & 0xff, /* Status Code */ + nr->status & NVME_SC_MORE ? "MORE " : "", + nr->status & NVME_SC_DNR ? "DNR " : "", + nr->cmd->common.cdw10, + nr->cmd->common.cdw11, + nr->cmd->common.cdw12, + nr->cmd->common.cdw13, + nr->cmd->common.cdw14, + nr->cmd->common.cdw14); +} + enum nvme_disposition { COMPLETE, RETRY, @@ -372,17 +397,24 @@ static inline enum nvme_disposition nvme_decide_disposition(struct request *req) static inline void nvme_end_req_zoned(struct request *req) { if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && - req_op(req) == REQ_OP_ZONE_APPEND) - req->__sector = nvme_lba_to_sect(req->q->queuedata, + req_op(req) == REQ_OP_ZONE_APPEND) { + struct nvme_ns *ns = req->q->queuedata; + + req->__sector = nvme_lba_to_sect(ns->head, le64_to_cpu(nvme_req(req)->result.u64)); + } } static inline void nvme_end_req(struct request *req) { blk_status_t status = nvme_error_status(nvme_req(req)->status); - if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) - nvme_log_error(req); + if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) { + if (blk_rq_is_passthrough(req)) + nvme_log_err_passthru(req); + else + nvme_log_error(req); + } nvme_end_req_zoned(req); nvme_trace_bio_complete(req); if (req->cmd_flags & REQ_NVME_MPATH) @@ -675,10 +707,21 @@ static inline void nvme_clear_nvme_request(struct request *req) /* initialize a passthrough request */ void nvme_init_request(struct request *req, struct nvme_command *cmd) { - if (req->q->queuedata) + struct nvme_request *nr = nvme_req(req); + bool logging_enabled; + + if (req->q->queuedata) { + struct nvme_ns *ns = req->q->disk->private_data; + + logging_enabled = ns->head->passthru_err_log_enabled; req->timeout = NVME_IO_TIMEOUT; - else /* no queuedata implies admin queue */ + } else { /* no queuedata implies admin queue */ + logging_enabled = nr->ctrl->passthru_err_log_enabled; req->timeout = NVME_ADMIN_TIMEOUT; + } + + if (!logging_enabled) + req->rq_flags |= RQF_QUIET; /* passthru commands should let the driver set the SGL flags */ cmd->common.flags &= ~NVME_CMD_SGL_ALL; @@ -687,8 +730,7 @@ void nvme_init_request(struct request *req, struct nvme_command *cmd) if (req->mq_hctx->type == HCTX_TYPE_POLL) req->cmd_flags |= REQ_POLLED; nvme_clear_nvme_request(req); - req->rq_flags |= RQF_QUIET; - memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd)); + memcpy(nr->cmd, cmd, sizeof(*cmd)); } EXPORT_SYMBOL_GPL(nvme_init_request); @@ -717,7 +759,7 @@ blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl, EXPORT_SYMBOL_GPL(nvme_fail_nonready_command); bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, - bool queue_live) + bool queue_live, enum nvme_ctrl_state state) { struct nvme_request *req = nvme_req(rq); @@ -738,7 +780,7 @@ bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, * command, which is require to set the queue live in the * appropinquate states. */ - switch (nvme_ctrl_state(ctrl)) { + switch (state) { case NVME_CTRL_CONNECTING: if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) && (req->cmd->fabrics.fctype == nvme_fabrics_type_connect || @@ -793,8 +835,8 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, } if (queue_max_discard_segments(req->q) == 1) { - u64 slba = nvme_sect_to_lba(ns, blk_rq_pos(req)); - u32 nlb = blk_rq_sectors(req) >> (ns->lba_shift - 9); + u64 slba = nvme_sect_to_lba(ns->head, blk_rq_pos(req)); + u32 nlb = blk_rq_sectors(req) >> (ns->head->lba_shift - 9); range[0].cattr = cpu_to_le32(0); range[0].nlb = cpu_to_le32(nlb); @@ -802,8 +844,9 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, n = 1; } else { __rq_for_each_bio(bio, req) { - u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector); - u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; + u64 slba = nvme_sect_to_lba(ns->head, + bio->bi_iter.bi_sector); + u32 nlb = bio->bi_iter.bi_size >> ns->head->lba_shift; if (n < segments) { range[n].cattr = cpu_to_le32(0); @@ -841,7 +884,7 @@ static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd, u64 ref48; /* both rw and write zeroes share the same reftag format */ - switch (ns->guard_type) { + switch (ns->head->guard_type) { case NVME_NVM_NS_16B_GUARD: cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req)); break; @@ -869,17 +912,18 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns, cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes; cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id); cmnd->write_zeroes.slba = - cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); + cpu_to_le64(nvme_sect_to_lba(ns->head, blk_rq_pos(req))); cmnd->write_zeroes.length = - cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); + cpu_to_le16((blk_rq_bytes(req) >> ns->head->lba_shift) - 1); - if (!(req->cmd_flags & REQ_NOUNMAP) && (ns->features & NVME_NS_DEAC)) + if (!(req->cmd_flags & REQ_NOUNMAP) && + (ns->head->features & NVME_NS_DEAC)) cmnd->write_zeroes.control |= cpu_to_le16(NVME_WZ_DEAC); - if (nvme_ns_has_pi(ns)) { + if (nvme_ns_has_pi(ns->head)) { cmnd->write_zeroes.control |= cpu_to_le16(NVME_RW_PRINFO_PRACT); - switch (ns->pi_type) { + switch (ns->head->pi_type) { case NVME_NS_DPS_PI_TYPE1: case NVME_NS_DPS_PI_TYPE2: nvme_set_ref_tag(ns, cmnd, req); @@ -911,13 +955,15 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, cmnd->rw.cdw2 = 0; cmnd->rw.cdw3 = 0; cmnd->rw.metadata = 0; - cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); - cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); + cmnd->rw.slba = + cpu_to_le64(nvme_sect_to_lba(ns->head, blk_rq_pos(req))); + cmnd->rw.length = + cpu_to_le16((blk_rq_bytes(req) >> ns->head->lba_shift) - 1); cmnd->rw.reftag = 0; cmnd->rw.apptag = 0; cmnd->rw.appmask = 0; - if (ns->ms) { + if (ns->head->ms) { /* * If formated with metadata, the block layer always provides a * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else @@ -925,12 +971,12 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, * namespace capacity to zero to prevent any I/O. */ if (!blk_integrity_rq(req)) { - if (WARN_ON_ONCE(!nvme_ns_has_pi(ns))) + if (WARN_ON_ONCE(!nvme_ns_has_pi(ns->head))) return BLK_STS_NOTSUPP; control |= NVME_RW_PRINFO_PRACT; } - switch (ns->pi_type) { + switch (ns->head->pi_type) { case NVME_NS_DPS_PI_TYPE3: control |= NVME_RW_PRINFO_PRCHK_GUARD; break; @@ -1043,20 +1089,27 @@ EXPORT_SYMBOL_NS_GPL(nvme_execute_rq, NVME_TARGET_PASSTHRU); */ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, union nvme_result *result, void *buffer, unsigned bufflen, - int qid, int at_head, blk_mq_req_flags_t flags) + int qid, nvme_submit_flags_t flags) { struct request *req; int ret; + blk_mq_req_flags_t blk_flags = 0; + if (flags & NVME_SUBMIT_NOWAIT) + blk_flags |= BLK_MQ_REQ_NOWAIT; + if (flags & NVME_SUBMIT_RESERVED) + blk_flags |= BLK_MQ_REQ_RESERVED; if (qid == NVME_QID_ANY) - req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags); + req = blk_mq_alloc_request(q, nvme_req_op(cmd), blk_flags); else - req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags, + req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), blk_flags, qid - 1); if (IS_ERR(req)) return PTR_ERR(req); nvme_init_request(req, cmd); + if (flags & NVME_SUBMIT_RETRY) + req->cmd_flags &= ~REQ_FAILFAST_DRIVER; if (buffer && bufflen) { ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); @@ -1064,7 +1117,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, goto out; } - ret = nvme_execute_rq(req, at_head); + ret = nvme_execute_rq(req, flags & NVME_SUBMIT_AT_HEAD); if (result && ret >= 0) *result = nvme_req(req)->result; out: @@ -1077,7 +1130,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, void *buffer, unsigned bufflen) { return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, - NVME_QID_ANY, 0, 0); + NVME_QID_ANY, 0); } EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd); @@ -1100,6 +1153,10 @@ u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) effects &= ~NVME_CMD_EFFECTS_CSE_MASK; } else { effects = le32_to_cpu(ctrl->effects->acs[opcode]); + + /* Ignore execution restrictions if any relaxation bits are set */ + if (effects & NVME_CMD_EFFECTS_CSER_MASK) + effects &= ~NVME_CMD_EFFECTS_CSE_MASK; } return effects; @@ -1452,7 +1509,7 @@ free_data: return status; } -static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid, +int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid, struct nvme_id_ns **id) { struct nvme_command c = { }; @@ -1552,7 +1609,7 @@ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid, c.features.dword11 = cpu_to_le32(dword11); ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, - buffer, buflen, NVME_QID_ANY, 0, 0); + buffer, buflen, NVME_QID_ANY, 0); if (ret >= 0 && result) *result = le32_to_cpu(res.u32); return ret; @@ -1671,14 +1728,14 @@ int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) } #ifdef CONFIG_BLK_DEV_INTEGRITY -static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns, - u32 max_integrity_segments) +static void nvme_init_integrity(struct gendisk *disk, + struct nvme_ns_head *head, u32 max_integrity_segments) { struct blk_integrity integrity = { }; - switch (ns->pi_type) { + switch (head->pi_type) { case NVME_NS_DPS_PI_TYPE3: - switch (ns->guard_type) { + switch (head->guard_type) { case NVME_NVM_NS_16B_GUARD: integrity.profile = &t10_pi_type3_crc; integrity.tag_size = sizeof(u16) + sizeof(u32); @@ -1696,7 +1753,7 @@ static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns, break; case NVME_NS_DPS_PI_TYPE1: case NVME_NS_DPS_PI_TYPE2: - switch (ns->guard_type) { + switch (head->guard_type) { case NVME_NVM_NS_16B_GUARD: integrity.profile = &t10_pi_type1_crc; integrity.tag_size = sizeof(u16); @@ -1717,27 +1774,28 @@ static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns, break; } - integrity.tuple_size = ns->ms; + integrity.tuple_size = head->ms; blk_integrity_register(disk, &integrity); blk_queue_max_integrity_segments(disk->queue, max_integrity_segments); } #else -static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns, - u32 max_integrity_segments) +static void nvme_init_integrity(struct gendisk *disk, + struct nvme_ns_head *head, u32 max_integrity_segments) { } #endif /* CONFIG_BLK_DEV_INTEGRITY */ -static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns) +static void nvme_config_discard(struct nvme_ctrl *ctrl, struct gendisk *disk, + struct nvme_ns_head *head) { - struct nvme_ctrl *ctrl = ns->ctrl; struct request_queue *queue = disk->queue; - u32 size = queue_logical_block_size(queue); + u32 max_discard_sectors; - if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns, UINT_MAX)) - ctrl->max_discard_sectors = nvme_lba_to_sect(ns, ctrl->dmrsl); - - if (ctrl->max_discard_sectors == 0) { + if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(head, UINT_MAX)) { + max_discard_sectors = nvme_lba_to_sect(head, ctrl->dmrsl); + } else if (ctrl->oncs & NVME_CTRL_ONCS_DSM) { + max_discard_sectors = UINT_MAX; + } else { blk_queue_max_discard_sectors(queue, 0); return; } @@ -1745,14 +1803,22 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns) BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) < NVME_DSM_MAX_RANGES); - queue->limits.discard_granularity = size; - - /* If discard is already enabled, don't reset queue limits */ + /* + * If discard is already enabled, don't reset queue limits. + * + * This works around the fact that the block layer can't cope well with + * updating the hardware limits when overridden through sysfs. This is + * harmless because discard limits in NVMe are purely advisory. + */ if (queue->limits.max_discard_sectors) return; - blk_queue_max_discard_sectors(queue, ctrl->max_discard_sectors); - blk_queue_max_discard_segments(queue, ctrl->max_discard_segments); + blk_queue_max_discard_sectors(queue, max_discard_sectors); + if (ctrl->dmrl) + blk_queue_max_discard_segments(queue, ctrl->dmrl); + else + blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES); + queue->limits.discard_granularity = queue_logical_block_size(queue); if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) blk_queue_max_write_zeroes_sectors(queue, UINT_MAX); @@ -1766,21 +1832,21 @@ static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b) a->csi == b->csi; } -static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id) +static int nvme_init_ms(struct nvme_ctrl *ctrl, struct nvme_ns_head *head, + struct nvme_id_ns *id) { bool first = id->dps & NVME_NS_DPS_PI_FIRST; unsigned lbaf = nvme_lbaf_index(id->flbas); - struct nvme_ctrl *ctrl = ns->ctrl; struct nvme_command c = { }; struct nvme_id_ns_nvm *nvm; int ret = 0; u32 elbaf; - ns->pi_size = 0; - ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); + head->pi_size = 0; + head->ms = le16_to_cpu(id->lbaf[lbaf].ms); if (!(ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) { - ns->pi_size = sizeof(struct t10_pi_tuple); - ns->guard_type = NVME_NVM_NS_16B_GUARD; + head->pi_size = sizeof(struct t10_pi_tuple); + head->guard_type = NVME_NVM_NS_16B_GUARD; goto set_pi; } @@ -1789,11 +1855,11 @@ static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id) return -ENOMEM; c.identify.opcode = nvme_admin_identify; - c.identify.nsid = cpu_to_le32(ns->head->ns_id); + c.identify.nsid = cpu_to_le32(head->ns_id); c.identify.cns = NVME_ID_CNS_CS_NS; c.identify.csi = NVME_CSI_NVM; - ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, nvm, sizeof(*nvm)); + ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, nvm, sizeof(*nvm)); if (ret) goto free_data; @@ -1803,13 +1869,13 @@ static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id) if (nvme_elbaf_sts(elbaf)) goto free_data; - ns->guard_type = nvme_elbaf_guard_type(elbaf); - switch (ns->guard_type) { + head->guard_type = nvme_elbaf_guard_type(elbaf); + switch (head->guard_type) { case NVME_NVM_NS_64B_GUARD: - ns->pi_size = sizeof(struct crc64_pi_tuple); + head->pi_size = sizeof(struct crc64_pi_tuple); break; case NVME_NVM_NS_16B_GUARD: - ns->pi_size = sizeof(struct t10_pi_tuple); + head->pi_size = sizeof(struct t10_pi_tuple); break; default: break; @@ -1818,25 +1884,25 @@ static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id) free_data: kfree(nvm); set_pi: - if (ns->pi_size && (first || ns->ms == ns->pi_size)) - ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK; + if (head->pi_size && (first || head->ms == head->pi_size)) + head->pi_type = id->dps & NVME_NS_DPS_PI_MASK; else - ns->pi_type = 0; + head->pi_type = 0; return ret; } -static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) +static int nvme_configure_metadata(struct nvme_ctrl *ctrl, + struct nvme_ns_head *head, struct nvme_id_ns *id) { - struct nvme_ctrl *ctrl = ns->ctrl; int ret; - ret = nvme_init_ms(ns, id); + ret = nvme_init_ms(ctrl, head, id); if (ret) return ret; - ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS); - if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) + head->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS); + if (!head->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) return 0; if (ctrl->ops->flags & NVME_F_FABRICS) { @@ -1848,7 +1914,7 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT))) return 0; - ns->features |= NVME_NS_EXT_LBAS; + head->features |= NVME_NS_EXT_LBAS; /* * The current fabrics transport drivers support namespace @@ -1859,8 +1925,8 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) * Note, this check will need to be modified if any drivers * gain the ability to use other metadata formats. */ - if (ctrl->max_integrity_segments && nvme_ns_has_pi(ns)) - ns->features |= NVME_NS_METADATA_SUPPORTED; + if (ctrl->max_integrity_segments && nvme_ns_has_pi(head)) + head->features |= NVME_NS_METADATA_SUPPORTED; } else { /* * For PCIe controllers, we can't easily remap the separate @@ -1869,9 +1935,9 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) * We allow extended LBAs for the passthrough interface, though. */ if (id->flbas & NVME_NS_FLBAS_META_EXT) - ns->features |= NVME_NS_EXT_LBAS; + head->features |= NVME_NS_EXT_LBAS; else - ns->features |= NVME_NS_METADATA_SUPPORTED; + head->features |= NVME_NS_METADATA_SUPPORTED; } return 0; } @@ -1894,11 +1960,11 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, blk_queue_write_cache(q, vwc, vwc); } -static void nvme_update_disk_info(struct gendisk *disk, - struct nvme_ns *ns, struct nvme_id_ns *id) +static void nvme_update_disk_info(struct nvme_ctrl *ctrl, struct gendisk *disk, + struct nvme_ns_head *head, struct nvme_id_ns *id) { - sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze)); - u32 bs = 1U << ns->lba_shift; + sector_t capacity = nvme_lba_to_sect(head, le64_to_cpu(id->nsze)); + u32 bs = 1U << head->lba_shift; u32 atomic_bs, phys_bs, io_opt = 0; /* @@ -1906,7 +1972,7 @@ static void nvme_update_disk_info(struct gendisk *disk, * or smaller than a sector size yet, so catch this early and don't * allow block I/O. */ - if (ns->lba_shift > PAGE_SHIFT || ns->lba_shift < SECTOR_SHIFT) { + if (head->lba_shift > PAGE_SHIFT || head->lba_shift < SECTOR_SHIFT) { capacity = 0; bs = (1 << 9); } @@ -1923,7 +1989,7 @@ static void nvme_update_disk_info(struct gendisk *disk, if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf) atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs; else - atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs; + atomic_bs = (1 + ctrl->subsys->awupf) * bs; } if (id->nsfeat & NVME_NS_FEAT_IO_OPT) { @@ -1949,20 +2015,20 @@ static void nvme_update_disk_info(struct gendisk *disk, * I/O to namespaces with metadata except when the namespace supports * PI, as it can strip/insert in that case. */ - if (ns->ms) { + if (head->ms) { if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) && - (ns->features & NVME_NS_METADATA_SUPPORTED)) - nvme_init_integrity(disk, ns, - ns->ctrl->max_integrity_segments); - else if (!nvme_ns_has_pi(ns)) + (head->features & NVME_NS_METADATA_SUPPORTED)) + nvme_init_integrity(disk, head, + ctrl->max_integrity_segments); + else if (!nvme_ns_has_pi(head)) capacity = 0; } set_capacity_and_notify(disk, capacity); - nvme_config_discard(disk, ns); + nvme_config_discard(ctrl, disk, head); blk_queue_max_write_zeroes_sectors(disk->queue, - ns->ctrl->max_zeroes_sectors); + ctrl->max_zeroes_sectors); } static bool nvme_ns_is_readonly(struct nvme_ns *ns, struct nvme_ns_info *info) @@ -1985,7 +2051,7 @@ static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id) is_power_of_2(ctrl->max_hw_sectors)) iob = ctrl->max_hw_sectors; else - iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob)); + iob = nvme_lba_to_sect(ns->head, le16_to_cpu(id->noiob)); if (!iob) return; @@ -2052,16 +2118,17 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns, blk_mq_freeze_queue(ns->disk->queue); lbaf = nvme_lbaf_index(id->flbas); - ns->lba_shift = id->lbaf[lbaf].ds; + ns->head->lba_shift = id->lbaf[lbaf].ds; + ns->head->nuse = le64_to_cpu(id->nuse); nvme_set_queue_limits(ns->ctrl, ns->queue); - ret = nvme_configure_metadata(ns, id); + ret = nvme_configure_metadata(ns->ctrl, ns->head, id); if (ret < 0) { blk_mq_unfreeze_queue(ns->disk->queue); goto out; } nvme_set_chunk_sectors(ns, id); - nvme_update_disk_info(ns->disk, ns, id); + nvme_update_disk_info(ns->ctrl, ns->disk, ns->head, id); if (ns->head->ids.csi == NVME_CSI_ZNS) { ret = nvme_update_zone_info(ns, lbaf); @@ -2078,7 +2145,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns, * do not return zeroes. */ if ((id->dlfeat & 0x7) == 0x1 && (id->dlfeat & (1 << 3))) - ns->features |= NVME_NS_DEAC; + ns->head->features |= NVME_NS_DEAC; set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info)); set_bit(NVME_NS_READY, &ns->flags); blk_mq_unfreeze_queue(ns->disk->queue); @@ -2091,7 +2158,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns, if (nvme_ns_head_multipath(ns->head)) { blk_mq_freeze_queue(ns->head->disk->queue); - nvme_update_disk_info(ns->head->disk, ns, id); + nvme_update_disk_info(ns->ctrl, ns->head->disk, ns->head, id); set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info)); nvme_mpath_revalidate_paths(ns); blk_stack_limits(&ns->head->disk->queue->limits, @@ -2154,7 +2221,7 @@ static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t l cmd.common.cdw11 = cpu_to_le32(len); return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, - NVME_QID_ANY, 1, 0); + NVME_QID_ANY, NVME_SUBMIT_AT_HEAD); } static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended) @@ -2920,14 +2987,6 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl) struct nvme_id_ctrl_nvm *id; int ret; - if (ctrl->oncs & NVME_CTRL_ONCS_DSM) { - ctrl->max_discard_sectors = UINT_MAX; - ctrl->max_discard_segments = NVME_DSM_MAX_RANGES; - } else { - ctrl->max_discard_sectors = 0; - ctrl->max_discard_segments = 0; - } - /* * Even though NVMe spec explicitly states that MDTS is not applicable * to the write-zeroes, we are cautious and limit the size to the @@ -2957,8 +3016,7 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl) if (ret) goto free_data; - if (id->dmrl) - ctrl->max_discard_segments = id->dmrl; + ctrl->dmrl = id->dmrl; ctrl->dmrsl = le32_to_cpu(id->dmrsl); if (id->wzsl) ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl); @@ -3026,6 +3084,42 @@ static int nvme_init_effects(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) return 0; } +static int nvme_check_ctrl_fabric_info(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) +{ + /* + * In fabrics we need to verify the cntlid matches the + * admin connect + */ + if (ctrl->cntlid != le16_to_cpu(id->cntlid)) { + dev_err(ctrl->device, + "Mismatching cntlid: Connect %u vs Identify %u, rejecting\n", + ctrl->cntlid, le16_to_cpu(id->cntlid)); + return -EINVAL; + } + + if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) { + dev_err(ctrl->device, + "keep-alive support is mandatory for fabrics\n"); + return -EINVAL; + } + + if (!nvme_discovery_ctrl(ctrl) && ctrl->ioccsz < 4) { + dev_err(ctrl->device, + "I/O queue command capsule supported size %d < 4\n", + ctrl->ioccsz); + return -EINVAL; + } + + if (!nvme_discovery_ctrl(ctrl) && ctrl->iorcsz < 1) { + dev_err(ctrl->device, + "I/O queue response capsule supported size %d < 1\n", + ctrl->iorcsz); + return -EINVAL; + } + + return 0; +} + static int nvme_init_identify(struct nvme_ctrl *ctrl) { struct nvme_id_ctrl *id; @@ -3138,25 +3232,9 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl) ctrl->iorcsz = le32_to_cpu(id->iorcsz); ctrl->maxcmd = le16_to_cpu(id->maxcmd); - /* - * In fabrics we need to verify the cntlid matches the - * admin connect - */ - if (ctrl->cntlid != le16_to_cpu(id->cntlid)) { - dev_err(ctrl->device, - "Mismatching cntlid: Connect %u vs Identify " - "%u, rejecting\n", - ctrl->cntlid, le16_to_cpu(id->cntlid)); - ret = -EINVAL; - goto out_free; - } - - if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) { - dev_err(ctrl->device, - "keep-alive support is mandatory for fabrics\n"); - ret = -EINVAL; + ret = nvme_check_ctrl_fabric_info(ctrl, id); + if (ret) goto out_free; - } } else { ctrl->hmpre = le32_to_cpu(id->hmpre); ctrl->hmmin = le32_to_cpu(id->hmmin); @@ -3415,6 +3493,8 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, head->ns_id = info->nsid; head->ids = info->ids; head->shared = info->is_shared; + ratelimit_state_init(&head->rs_nuse, 5 * HZ, 1); + ratelimit_set_flags(&head->rs_nuse, RATELIMIT_MSG_ON_RELEASE); kref_init(&head->ref); if (head->ids.csi) { @@ -3674,7 +3754,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) up_write(&ctrl->namespaces_rwsem); nvme_get_ctrl(ctrl); - if (device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups)) + if (device_add_disk(ctrl->device, ns->disk, nvme_ns_attr_groups)) goto out_cleanup_ns_from_list; if (!nvme_ns_head_multipath(ns->head)) @@ -3683,6 +3763,13 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) nvme_mpath_add_disk(ns, info->anagrpid); nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name); + /* + * Set ns->disk->device->driver_data to ns so we can access + * ns->head->passthru_err_log_enabled in + * nvme_io_passthru_err_log_enabled_[store | show](). + */ + dev_set_drvdata(disk_to_dev(ns->disk), ns); + return; out_cleanup_ns_from_list: @@ -4488,6 +4575,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, int ret; WRITE_ONCE(ctrl->state, NVME_CTRL_NEW); + ctrl->passthru_err_log_enabled = false; clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags); spin_lock_init(&ctrl->lock); mutex_init(&ctrl->scan_lock); @@ -4825,5 +4913,6 @@ static void __exit nvme_core_exit(void) MODULE_LICENSE("GPL"); MODULE_VERSION("1.0"); +MODULE_DESCRIPTION("NVMe host core framework"); module_init(nvme_core_init); module_exit(nvme_core_exit); diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index aa88606a44..495c171dae 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -180,7 +180,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) cmd.prop_get.offset = cpu_to_le32(off); ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, - NVME_QID_ANY, 0, 0); + NVME_QID_ANY, 0); if (ret >= 0) *val = le64_to_cpu(res.u64); @@ -226,7 +226,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) cmd.prop_get.offset = cpu_to_le32(off); ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, - NVME_QID_ANY, 0, 0); + NVME_QID_ANY, 0); if (ret >= 0) *val = le64_to_cpu(res.u64); @@ -271,7 +271,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) cmd.prop_set.value = cpu_to_le64(val); ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0, - NVME_QID_ANY, 0, 0); + NVME_QID_ANY, 0); if (unlikely(ret)) dev_err(ctrl->device, "Property Set error: %d, offset %#x\n", @@ -387,8 +387,8 @@ static struct nvmf_connect_data *nvmf_connect_data_prep(struct nvme_ctrl *ctrl, uuid_copy(&data->hostid, &ctrl->opts->host->id); data->cntlid = cpu_to_le16(cntlid); - strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE); - strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE); + strscpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE); + strscpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE); return data; } @@ -450,8 +450,10 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl) return -ENOMEM; ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, - data, sizeof(*data), NVME_QID_ANY, 1, - BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); + data, sizeof(*data), NVME_QID_ANY, + NVME_SUBMIT_AT_HEAD | + NVME_SUBMIT_NOWAIT | + NVME_SUBMIT_RESERVED); if (ret) { nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32), &cmd, data); @@ -525,11 +527,14 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid) return -ENOMEM; ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res, - data, sizeof(*data), qid, 1, - BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); + data, sizeof(*data), qid, + NVME_SUBMIT_AT_HEAD | + NVME_SUBMIT_RESERVED | + NVME_SUBMIT_NOWAIT); if (ret) { nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32), &cmd, data); + goto out_free_data; } result = le32_to_cpu(res.u32); if (result & (NVME_CONNECT_AUTHREQ_ATR | NVME_CONNECT_AUTHREQ_ASCR)) { @@ -1488,6 +1493,7 @@ static void __exit nvmf_exit(void) } MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("NVMe host fabrics library"); module_init(nvmf_init); module_exit(nvmf_exit); diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h index e4acccc315..37c974c38d 100644 --- a/drivers/nvme/host/fabrics.h +++ b/drivers/nvme/host/fabrics.h @@ -178,9 +178,11 @@ static inline bool nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl, struct nvmf_ctrl_options *opts) { - if (ctrl->state == NVME_CTRL_DELETING || - ctrl->state == NVME_CTRL_DELETING_NOIO || - ctrl->state == NVME_CTRL_DEAD || + enum nvme_ctrl_state state = nvme_ctrl_state(ctrl); + + if (state == NVME_CTRL_DELETING || + state == NVME_CTRL_DELETING_NOIO || + state == NVME_CTRL_DEAD || strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) || strcmp(opts->host->nqn, ctrl->opts->host->nqn) || !uuid_equal(&opts->host->id, &ctrl->opts->host->id)) diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index e0f4129c3a..68a5d97165 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -1211,10 +1211,10 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl, /* Linux supports only Dynamic controllers */ assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff); uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id); - strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn, - min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE)); - strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn, - min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE)); + strscpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn, + sizeof(assoc_rqst->assoc_cmd.hostnqn)); + strscpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn, + sizeof(assoc_rqst->assoc_cmd.subnqn)); lsop->queue = queue; lsreq->rqstaddr = assoc_rqst; @@ -2567,6 +2567,7 @@ static enum blk_eh_timer_return nvme_fc_timeout(struct request *rq) { struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); struct nvme_fc_ctrl *ctrl = op->ctrl; + u16 qnum = op->queue->qnum; struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; struct nvme_command *sqe = &cmdiu->sqe; @@ -2575,10 +2576,11 @@ static enum blk_eh_timer_return nvme_fc_timeout(struct request *rq) * will detect the aborted io and will fail the connection. */ dev_info(ctrl->ctrl.device, - "NVME-FC{%d.%d}: io timeout: opcode %d fctype %d w10/11: " + "NVME-FC{%d.%d}: io timeout: opcode %d fctype %d (%s) w10/11: " "x%08x/x%08x\n", - ctrl->cnum, op->queue->qnum, sqe->common.opcode, - sqe->connect.fctype, sqe->common.cdw10, sqe->common.cdw11); + ctrl->cnum, qnum, sqe->common.opcode, sqe->fabrics.fctype, + nvme_fabrics_opcode_str(qnum, sqe), + sqe->common.cdw10, sqe->common.cdw11); if (__nvme_fc_abort_op(ctrl, op)) nvme_fc_error_recovery(ctrl, "io timeout abort failed"); @@ -3491,10 +3493,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, ctrl->ctrl.opts = opts; ctrl->ctrl.nr_reconnects = 0; - if (lport->dev) - ctrl->ctrl.numa_node = dev_to_node(lport->dev); - else - ctrl->ctrl.numa_node = NUMA_NO_NODE; INIT_LIST_HEAD(&ctrl->ctrl_list); ctrl->lport = lport; ctrl->rport = rport; @@ -3539,6 +3537,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0); if (ret) goto out_free_queues; + if (lport->dev) + ctrl->ctrl.numa_node = dev_to_node(lport->dev); /* at this point, teardown path changes to ref counting on nvme ctrl */ @@ -3570,8 +3570,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, flush_delayed_work(&ctrl->connect_work); dev_info(ctrl->ctrl.device, - "NVME-FC{%d}: new ctrl: NQN \"%s\"\n", - ctrl->cnum, nvmf_ctrl_subsysnqn(&ctrl->ctrl)); + "NVME-FC{%d}: new ctrl: NQN \"%s\", hostnqn: %s\n", + ctrl->cnum, nvmf_ctrl_subsysnqn(&ctrl->ctrl), opts->host->nqn); return &ctrl->ctrl; @@ -3971,4 +3971,5 @@ static void __exit nvme_fc_exit_module(void) module_init(nvme_fc_init_module); module_exit(nvme_fc_exit_module); +MODULE_DESCRIPTION("NVMe host FC transport driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index 4939ed3563..3dfd5ae99a 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -5,7 +5,7 @@ */ #include <linux/ptrace.h> /* for force_successful_syscall_return */ #include <linux/nvme_ioctl.h> -#include <linux/io_uring.h> +#include <linux/io_uring/cmd.h> #include "nvme.h" enum { @@ -97,58 +97,6 @@ static void __user *nvme_to_user_ptr(uintptr_t ptrval) return (void __user *)ptrval; } -static void *nvme_add_user_metadata(struct request *req, void __user *ubuf, - unsigned len, u32 seed) -{ - struct bio_integrity_payload *bip; - int ret = -ENOMEM; - void *buf; - struct bio *bio = req->bio; - - buf = kmalloc(len, GFP_KERNEL); - if (!buf) - goto out; - - if (req_op(req) == REQ_OP_DRV_OUT) { - ret = -EFAULT; - if (copy_from_user(buf, ubuf, len)) - goto out_free_meta; - } else { - memset(buf, 0, len); - } - - bip = bio_integrity_alloc(bio, GFP_KERNEL, 1); - if (IS_ERR(bip)) { - ret = PTR_ERR(bip); - goto out_free_meta; - } - - bip->bip_iter.bi_sector = seed; - ret = bio_integrity_add_page(bio, virt_to_page(buf), len, - offset_in_page(buf)); - if (ret != len) { - ret = -ENOMEM; - goto out_free_meta; - } - - req->cmd_flags |= REQ_INTEGRITY; - return buf; -out_free_meta: - kfree(buf); -out: - return ERR_PTR(ret); -} - -static int nvme_finish_user_metadata(struct request *req, void __user *ubuf, - void *meta, unsigned len, int ret) -{ - if (!ret && req_op(req) == REQ_OP_DRV_IN && - copy_to_user(ubuf, meta, len)) - ret = -EFAULT; - kfree(meta); - return ret; -} - static struct request *nvme_alloc_user_request(struct request_queue *q, struct nvme_command *cmd, blk_opf_t rq_flags, blk_mq_req_flags_t blk_flags) @@ -165,14 +113,12 @@ static struct request *nvme_alloc_user_request(struct request_queue *q, static int nvme_map_user_request(struct request *req, u64 ubuffer, unsigned bufflen, void __user *meta_buffer, unsigned meta_len, - u32 meta_seed, void **metap, struct io_uring_cmd *ioucmd, - unsigned int flags) + u32 meta_seed, struct io_uring_cmd *ioucmd, unsigned int flags) { struct request_queue *q = req->q; struct nvme_ns *ns = q->queuedata; struct block_device *bdev = ns ? ns->disk->part0 : NULL; struct bio *bio = NULL; - void *meta = NULL; int ret; if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) { @@ -194,18 +140,17 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer, if (ret) goto out; + bio = req->bio; - if (bdev) + if (bdev) { bio_set_dev(bio, bdev); - - if (bdev && meta_buffer && meta_len) { - meta = nvme_add_user_metadata(req, meta_buffer, meta_len, - meta_seed); - if (IS_ERR(meta)) { - ret = PTR_ERR(meta); - goto out_unmap; + if (meta_buffer && meta_len) { + ret = bio_integrity_map_user(bio, meta_buffer, meta_len, + meta_seed); + if (ret) + goto out_unmap; + req->cmd_flags |= REQ_INTEGRITY; } - *metap = meta; } return ret; @@ -226,7 +171,6 @@ static int nvme_submit_user_cmd(struct request_queue *q, struct nvme_ns *ns = q->queuedata; struct nvme_ctrl *ctrl; struct request *req; - void *meta = NULL; struct bio *bio; u32 effects; int ret; @@ -238,7 +182,7 @@ static int nvme_submit_user_cmd(struct request_queue *q, req->timeout = timeout; if (ubuffer && bufflen) { ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer, - meta_len, meta_seed, &meta, NULL, flags); + meta_len, meta_seed, NULL, flags); if (ret) return ret; } @@ -250,9 +194,6 @@ static int nvme_submit_user_cmd(struct request_queue *q, ret = nvme_execute_rq(req, false); if (result) *result = le64_to_cpu(nvme_req(req)->result.u64); - if (meta) - ret = nvme_finish_user_metadata(req, meta_buffer, meta, - meta_len, ret); if (bio) blk_rq_unmap_user(bio); blk_mq_free_request(req); @@ -284,10 +225,10 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) return -EINVAL; } - length = (io.nblocks + 1) << ns->lba_shift; + length = (io.nblocks + 1) << ns->head->lba_shift; if ((io.control & NVME_RW_PRINFO_PRACT) && - ns->ms == sizeof(struct t10_pi_tuple)) { + (ns->head->ms == ns->head->pi_size)) { /* * Protection information is stripped/inserted by the * controller. @@ -297,11 +238,11 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) meta_len = 0; metadata = NULL; } else { - meta_len = (io.nblocks + 1) * ns->ms; + meta_len = (io.nblocks + 1) * ns->head->ms; metadata = nvme_to_user_ptr(io.metadata); } - if (ns->features & NVME_NS_EXT_LBAS) { + if (ns->head->features & NVME_NS_EXT_LBAS) { length += meta_len; meta_len = 0; } else if (meta_len) { @@ -447,19 +388,10 @@ struct nvme_uring_data { * Expect build errors if this grows larger than that. */ struct nvme_uring_cmd_pdu { - union { - struct bio *bio; - struct request *req; - }; - u32 meta_len; - u32 nvme_status; - union { - struct { - void *meta; /* kernel-resident buffer */ - void __user *meta_buffer; - }; - u64 result; - } u; + struct request *req; + struct bio *bio; + u64 result; + int status; }; static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu( @@ -468,31 +400,6 @@ static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu( return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu; } -static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd, - unsigned issue_flags) -{ - struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); - struct request *req = pdu->req; - int status; - u64 result; - - if (nvme_req(req)->flags & NVME_REQ_CANCELLED) - status = -EINTR; - else - status = nvme_req(req)->status; - - result = le64_to_cpu(nvme_req(req)->result.u64); - - if (pdu->meta_len) - status = nvme_finish_user_metadata(req, pdu->u.meta_buffer, - pdu->u.meta, pdu->meta_len, status); - if (req->bio) - blk_rq_unmap_user(req->bio); - blk_mq_free_request(req); - - io_uring_cmd_done(ioucmd, status, result, issue_flags); -} - static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd, unsigned issue_flags) { @@ -500,8 +407,7 @@ static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd, if (pdu->bio) blk_rq_unmap_user(pdu->bio); - - io_uring_cmd_done(ioucmd, pdu->nvme_status, pdu->u.result, issue_flags); + io_uring_cmd_done(ioucmd, pdu->status, pdu->result, issue_flags); } static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req, @@ -510,53 +416,24 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req, struct io_uring_cmd *ioucmd = req->end_io_data; struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); - req->bio = pdu->bio; - if (nvme_req(req)->flags & NVME_REQ_CANCELLED) { - pdu->nvme_status = -EINTR; - } else { - pdu->nvme_status = nvme_req(req)->status; - if (!pdu->nvme_status) - pdu->nvme_status = blk_status_to_errno(err); - } - pdu->u.result = le64_to_cpu(nvme_req(req)->result.u64); + if (nvme_req(req)->flags & NVME_REQ_CANCELLED) + pdu->status = -EINTR; + else + pdu->status = nvme_req(req)->status; + pdu->result = le64_to_cpu(nvme_req(req)->result.u64); /* * For iopoll, complete it directly. * Otherwise, move the completion to task work. */ - if (blk_rq_is_poll(req)) { - WRITE_ONCE(ioucmd->cookie, NULL); + if (blk_rq_is_poll(req)) nvme_uring_task_cb(ioucmd, IO_URING_F_UNLOCKED); - } else { + else io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb); - } return RQ_END_IO_FREE; } -static enum rq_end_io_ret nvme_uring_cmd_end_io_meta(struct request *req, - blk_status_t err) -{ - struct io_uring_cmd *ioucmd = req->end_io_data; - struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); - - req->bio = pdu->bio; - pdu->req = req; - - /* - * For iopoll, complete it directly. - * Otherwise, move the completion to task work. - */ - if (blk_rq_is_poll(req)) { - WRITE_ONCE(ioucmd->cookie, NULL); - nvme_uring_task_meta_cb(ioucmd, IO_URING_F_UNLOCKED); - } else { - io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_meta_cb); - } - - return RQ_END_IO_NONE; -} - static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns, struct io_uring_cmd *ioucmd, unsigned int issue_flags, bool vec) { @@ -568,7 +445,6 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns, struct request *req; blk_opf_t rq_flags = REQ_ALLOC_CACHE; blk_mq_req_flags_t blk_flags = 0; - void *meta = NULL; int ret; c.common.opcode = READ_ONCE(cmd->opcode); @@ -616,27 +492,16 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns, if (d.addr && d.data_len) { ret = nvme_map_user_request(req, d.addr, d.data_len, nvme_to_user_ptr(d.metadata), - d.metadata_len, 0, &meta, ioucmd, vec); + d.metadata_len, 0, ioucmd, vec); if (ret) return ret; } - if (blk_rq_is_poll(req)) { - ioucmd->flags |= IORING_URING_CMD_POLLED; - WRITE_ONCE(ioucmd->cookie, req); - } - /* to free bio on completion, as req->bio will be null at that time */ pdu->bio = req->bio; - pdu->meta_len = d.metadata_len; + pdu->req = req; req->end_io_data = ioucmd; - if (pdu->meta_len) { - pdu->u.meta = meta; - pdu->u.meta_buffer = nvme_to_user_ptr(d.metadata); - req->end_io = nvme_uring_cmd_end_io_meta; - } else { - req->end_io = nvme_uring_cmd_end_io; - } + req->end_io = nvme_uring_cmd_end_io; blk_execute_rq_nowait(req, false); return -EIOCBQUEUED; } @@ -787,16 +652,12 @@ int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd, struct io_comp_batch *iob, unsigned int poll_flags) { - struct request *req; - int ret = 0; - - if (!(ioucmd->flags & IORING_URING_CMD_POLLED)) - return 0; + struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); + struct request *req = pdu->req; - req = READ_ONCE(ioucmd->cookie); if (req && blk_rq_is_poll(req)) - ret = blk_rq_poll(req, iob, poll_flags); - return ret; + return blk_rq_poll(req, iob, poll_flags); + return 0; } #ifdef CONFIG_NVME_MULTIPATH static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd, diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 0a88d7bdc5..74de1e64ae 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -156,7 +156,7 @@ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) if (!ns->head->disk) continue; kblockd_schedule_work(&ns->head->requeue_work); - if (ctrl->state == NVME_CTRL_LIVE) + if (nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE) disk_uevent(ns->head->disk, KOBJ_CHANGE); } up_read(&ctrl->namespaces_rwsem); @@ -223,13 +223,14 @@ void nvme_mpath_revalidate_paths(struct nvme_ns *ns) static bool nvme_path_is_disabled(struct nvme_ns *ns) { + enum nvme_ctrl_state state = nvme_ctrl_state(ns->ctrl); + /* * We don't treat NVME_CTRL_DELETING as a disabled path as I/O should * still be able to complete assuming that the controller is connected. * Otherwise it will fail immediately and return to the requeue list. */ - if (ns->ctrl->state != NVME_CTRL_LIVE && - ns->ctrl->state != NVME_CTRL_DELETING) + if (state != NVME_CTRL_LIVE && state != NVME_CTRL_DELETING) return true; if (test_bit(NVME_NS_ANA_PENDING, &ns->flags) || !test_bit(NVME_NS_READY, &ns->flags)) @@ -331,7 +332,7 @@ out: static inline bool nvme_path_is_optimized(struct nvme_ns *ns) { - return ns->ctrl->state == NVME_CTRL_LIVE && + return nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE && ns->ana_state == NVME_ANA_OPTIMIZED; } @@ -358,7 +359,7 @@ static bool nvme_available_path(struct nvme_ns_head *head) list_for_each_entry_rcu(ns, &head->list, siblings) { if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags)) continue; - switch (ns->ctrl->state) { + switch (nvme_ctrl_state(ns->ctrl)) { case NVME_CTRL_LIVE: case NVME_CTRL_RESETTING: case NVME_CTRL_CONNECTING: @@ -579,7 +580,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns) */ if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) { rc = device_add_disk(&head->subsys->dev, head->disk, - nvme_ns_id_attr_groups); + nvme_ns_attr_groups); if (rc) { clear_bit(NVME_NSHEAD_DISK_LIVE, &ns->flags); return; @@ -667,7 +668,7 @@ static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc, * controller is ready. */ if (nvme_state_is_live(ns->ana_state) && - ns->ctrl->state == NVME_CTRL_LIVE) + nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE) nvme_mpath_set_live(ns); } @@ -748,7 +749,7 @@ static void nvme_ana_work(struct work_struct *work) { struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work); - if (ctrl->state != NVME_CTRL_LIVE) + if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) return; nvme_read_ana_log(ctrl); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index e7411dac00..7b87763e2f 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -16,6 +16,7 @@ #include <linux/rcupdate.h> #include <linux/wait.h> #include <linux/t10-pi.h> +#include <linux/ratelimit_types.h> #include <trace/events/block.h> @@ -262,6 +263,7 @@ enum nvme_ctrl_flags { struct nvme_ctrl { bool comp_seen; bool identified; + bool passthru_err_log_enabled; enum nvme_ctrl_state state; spinlock_t lock; struct mutex scan_lock; @@ -302,14 +304,13 @@ struct nvme_ctrl { u32 max_hw_sectors; u32 max_segments; u32 max_integrity_segments; - u32 max_discard_sectors; - u32 max_discard_segments; u32 max_zeroes_sectors; #ifdef CONFIG_BLK_DEV_ZONED u32 max_zone_append; #endif u16 crdt[3]; u16 oncs; + u8 dmrl; u32 dmrsl; u16 oacs; u16 sqsize; @@ -450,13 +451,28 @@ struct nvme_ns_head { struct list_head list; struct srcu_struct srcu; struct nvme_subsystem *subsys; - unsigned ns_id; struct nvme_ns_ids ids; struct list_head entry; struct kref ref; bool shared; + bool passthru_err_log_enabled; int instance; struct nvme_effects_log *effects; + u64 nuse; + unsigned ns_id; + int lba_shift; + u16 ms; + u16 pi_size; + u8 pi_type; + u8 guard_type; + u16 sgs; + u32 sws; +#ifdef CONFIG_BLK_DEV_ZONED + u64 zsze; +#endif + unsigned long features; + + struct ratelimit_state rs_nuse; struct cdev cdev; struct device cdev_device; @@ -498,17 +514,6 @@ struct nvme_ns { struct kref kref; struct nvme_ns_head *head; - int lba_shift; - u16 ms; - u16 pi_size; - u16 sgs; - u32 sws; - u8 pi_type; - u8 guard_type; -#ifdef CONFIG_BLK_DEV_ZONED - u64 zsze; -#endif - unsigned long features; unsigned long flags; #define NVME_NS_REMOVING 0 #define NVME_NS_ANA_PENDING 2 @@ -519,13 +524,12 @@ struct nvme_ns { struct device cdev_device; struct nvme_fault_inject fault_inject; - }; /* NVMe ns supports metadata actions by the controller (generate/strip) */ -static inline bool nvme_ns_has_pi(struct nvme_ns *ns) +static inline bool nvme_ns_has_pi(struct nvme_ns_head *head) { - return ns->pi_type && ns->ms == ns->pi_size; + return head->pi_type && head->ms == head->pi_size; } struct nvme_ctrl_ops { @@ -657,17 +661,17 @@ static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl) /* * Convert a 512B sector number to a device logical block number. */ -static inline u64 nvme_sect_to_lba(struct nvme_ns *ns, sector_t sector) +static inline u64 nvme_sect_to_lba(struct nvme_ns_head *head, sector_t sector) { - return sector >> (ns->lba_shift - SECTOR_SHIFT); + return sector >> (head->lba_shift - SECTOR_SHIFT); } /* * Convert a device logical block number to a 512B sector number. */ -static inline sector_t nvme_lba_to_sect(struct nvme_ns *ns, u64 lba) +static inline sector_t nvme_lba_to_sect(struct nvme_ns_head *head, u64 lba) { - return lba << (ns->lba_shift - SECTOR_SHIFT); + return lba << (head->lba_shift - SECTOR_SHIFT); } /* @@ -802,17 +806,18 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req); blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl, struct request *req); bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, - bool queue_live); + bool queue_live, enum nvme_ctrl_state state); static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, bool queue_live) { - if (likely(ctrl->state == NVME_CTRL_LIVE)) + enum nvme_ctrl_state state = nvme_ctrl_state(ctrl); + + if (likely(state == NVME_CTRL_LIVE)) return true; - if (ctrl->ops->flags & NVME_F_FABRICS && - ctrl->state == NVME_CTRL_DELETING) + if (ctrl->ops->flags & NVME_F_FABRICS && state == NVME_CTRL_DELETING) return queue_live; - return __nvme_check_ready(ctrl, rq, queue_live); + return __nvme_check_ready(ctrl, rq, queue_live, state); } /* @@ -833,12 +838,27 @@ static inline bool nvme_is_unique_nsid(struct nvme_ctrl *ctrl, (ctrl->ctratt & NVME_CTRL_CTRATT_NVM_SETS); } +/* + * Flags for __nvme_submit_sync_cmd() + */ +typedef __u32 __bitwise nvme_submit_flags_t; + +enum { + /* Insert request at the head of the queue */ + NVME_SUBMIT_AT_HEAD = (__force nvme_submit_flags_t)(1 << 0), + /* Set BLK_MQ_REQ_NOWAIT when allocating request */ + NVME_SUBMIT_NOWAIT = (__force nvme_submit_flags_t)(1 << 1), + /* Set BLK_MQ_REQ_RESERVED when allocating request */ + NVME_SUBMIT_RESERVED = (__force nvme_submit_flags_t)(1 << 2), + /* Retry command when NVME_SC_DNR is not set in the result */ + NVME_SUBMIT_RETRY = (__force nvme_submit_flags_t)(1 << 3), +}; + int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, void *buf, unsigned bufflen); int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, union nvme_result *result, void *buffer, unsigned bufflen, - int qid, int at_head, - blk_mq_req_flags_t flags); + int qid, nvme_submit_flags_t flags); int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid, unsigned int dword11, void *buffer, size_t buflen, u32 *result); @@ -873,10 +893,12 @@ int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags); int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags); +int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid, + struct nvme_id_ns **id); int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo); int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags); -extern const struct attribute_group *nvme_ns_id_attr_groups[]; +extern const struct attribute_group *nvme_ns_attr_groups[]; extern const struct pr_ops nvme_pr_ops; extern const struct block_device_operations nvme_ns_head_ops; extern const struct attribute_group nvme_dev_attrs_group; @@ -926,6 +948,10 @@ extern struct device_attribute dev_attr_ana_grpid; extern struct device_attribute dev_attr_ana_state; extern struct device_attribute subsys_attr_iopolicy; +static inline bool nvme_disk_is_ns_head(struct gendisk *disk) +{ + return disk->fops == &nvme_ns_head_ops; +} #else #define multipath false static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) @@ -1003,6 +1029,10 @@ static inline void nvme_mpath_start_request(struct request *rq) static inline void nvme_mpath_end_request(struct request *rq) { } +static inline bool nvme_disk_is_ns_head(struct gendisk *disk) +{ + return false; +} #endif /* CONFIG_NVME_MULTIPATH */ int nvme_revalidate_zones(struct nvme_ns *ns); @@ -1031,7 +1061,10 @@ static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf) static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev) { - return dev_to_disk(dev)->private_data; + struct gendisk *disk = dev_to_disk(dev); + + WARN_ON(nvme_disk_is_ns_head(disk)); + return disk->private_data; } #ifdef CONFIG_NVME_HWMON @@ -1108,35 +1141,42 @@ static inline bool nvme_multi_css(struct nvme_ctrl *ctrl) } #ifdef CONFIG_NVME_VERBOSE_ERRORS -const unsigned char *nvme_get_error_status_str(u16 status); -const unsigned char *nvme_get_opcode_str(u8 opcode); -const unsigned char *nvme_get_admin_opcode_str(u8 opcode); -const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode); +const char *nvme_get_error_status_str(u16 status); +const char *nvme_get_opcode_str(u8 opcode); +const char *nvme_get_admin_opcode_str(u8 opcode); +const char *nvme_get_fabrics_opcode_str(u8 opcode); #else /* CONFIG_NVME_VERBOSE_ERRORS */ -static inline const unsigned char *nvme_get_error_status_str(u16 status) +static inline const char *nvme_get_error_status_str(u16 status) { return "I/O Error"; } -static inline const unsigned char *nvme_get_opcode_str(u8 opcode) +static inline const char *nvme_get_opcode_str(u8 opcode) { return "I/O Cmd"; } -static inline const unsigned char *nvme_get_admin_opcode_str(u8 opcode) +static inline const char *nvme_get_admin_opcode_str(u8 opcode) { return "Admin Cmd"; } -static inline const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode) +static inline const char *nvme_get_fabrics_opcode_str(u8 opcode) { return "Fabrics Cmd"; } #endif /* CONFIG_NVME_VERBOSE_ERRORS */ -static inline const unsigned char *nvme_opcode_str(int qid, u8 opcode, u8 fctype) +static inline const char *nvme_opcode_str(int qid, u8 opcode) { - if (opcode == nvme_fabrics_command) - return nvme_get_fabrics_opcode_str(fctype); return qid ? nvme_get_opcode_str(opcode) : nvme_get_admin_opcode_str(opcode); } + +static inline const char *nvme_fabrics_opcode_str( + int qid, const struct nvme_command *cmd) +{ + if (nvme_is_fabrics(cmd)) + return nvme_get_fabrics_opcode_str(cmd->fabrics.fctype); + + return nvme_opcode_str(qid, cmd->common.opcode); +} #endif /* _NVME_H */ diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 61af7ff1a9..8e0bb96926 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1284,6 +1284,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req) struct request *abort_req; struct nvme_command cmd = { }; u32 csts = readl(dev->bar + NVME_REG_CSTS); + u8 opcode; /* If PCI error recovery process is happening, we cannot reset or * the recovery mechanism will surely fail. @@ -1310,8 +1311,8 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req) if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) { dev_warn(dev->ctrl.device, - "I/O %d QID %d timeout, completion polled\n", - req->tag, nvmeq->qid); + "I/O tag %d (%04x) QID %d timeout, completion polled\n", + req->tag, nvme_cid(req), nvmeq->qid); return BLK_EH_DONE; } @@ -1327,8 +1328,8 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req) fallthrough; case NVME_CTRL_DELETING: dev_warn_ratelimited(dev->ctrl.device, - "I/O %d QID %d timeout, disable controller\n", - req->tag, nvmeq->qid); + "I/O tag %d (%04x) QID %d timeout, disable controller\n", + req->tag, nvme_cid(req), nvmeq->qid); nvme_req(req)->flags |= NVME_REQ_CANCELLED; nvme_dev_disable(dev, true); return BLK_EH_DONE; @@ -1343,10 +1344,12 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req) * command was already aborted once before and still hasn't been * returned to the driver, or if this is the admin queue. */ + opcode = nvme_req(req)->cmd->common.opcode; if (!nvmeq->qid || iod->aborted) { dev_warn(dev->ctrl.device, - "I/O %d QID %d timeout, reset controller\n", - req->tag, nvmeq->qid); + "I/O tag %d (%04x) opcode %#x (%s) QID %d timeout, reset controller\n", + req->tag, nvme_cid(req), opcode, + nvme_opcode_str(nvmeq->qid, opcode), nvmeq->qid); nvme_req(req)->flags |= NVME_REQ_CANCELLED; goto disable; } @@ -1362,10 +1365,10 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req) cmd.abort.sqid = cpu_to_le16(nvmeq->qid); dev_warn(nvmeq->dev->ctrl.device, - "I/O %d (%s) QID %d timeout, aborting\n", - req->tag, - nvme_get_opcode_str(nvme_req(req)->cmd->common.opcode), - nvmeq->qid); + "I/O tag %d (%04x) opcode %#x (%s) QID %d timeout, aborting req_op:%s(%u) size:%u\n", + req->tag, nvme_cid(req), opcode, nvme_get_opcode_str(opcode), + nvmeq->qid, blk_op_str(req_op(req)), req_op(req), + blk_rq_bytes(req)); abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, nvme_req_op(&cmd), BLK_MQ_REQ_NOWAIT); @@ -2743,10 +2746,10 @@ static void nvme_reset_work(struct work_struct *work) * controller around but remove all namespaces. */ if (dev->online_queues > 1) { + nvme_dbbuf_set(dev); nvme_unquiesce_io_queues(&dev->ctrl); nvme_wait_freeze(&dev->ctrl); nvme_pci_update_nr_queues(dev); - nvme_dbbuf_set(dev); nvme_unfreeze(&dev->ctrl); } else { dev_warn(dev->ctrl.device, "IO queues lost\n"); @@ -3360,6 +3363,9 @@ static const struct pci_device_id nvme_id_table[] = { NVME_QUIRK_BOGUS_NID, }, { PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */ .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x126f, 0x2262), /* Silicon Motion generic */ + .driver_data = NVME_QUIRK_NO_DEEPEST_PS | + NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */ .driver_data = NVME_QUIRK_NO_NS_DESC_LIST | NVME_QUIRK_BOGUS_NID, }, @@ -3408,6 +3414,8 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE(0x1c5c, 0x174a), /* SK Hynix P31 SSD */ .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1c5c, 0x1D59), /* SK Hynix BC901 */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE(0x1d97, 0x2263), /* SPCC */ @@ -3538,5 +3546,6 @@ static void __exit nvme_exit(void) MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); MODULE_LICENSE("GPL"); MODULE_VERSION("1.0"); +MODULE_DESCRIPTION("NVMe host PCIe transport driver"); module_init(nvme_init); module_exit(nvme_exit); diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c index 391b1465eb..fc3eed00f9 100644 --- a/drivers/nvme/host/pr.c +++ b/drivers/nvme/host/pr.c @@ -98,7 +98,7 @@ static int nvme_send_pr_command(struct block_device *bdev, struct nvme_command *c, void *data, unsigned int data_len) { if (IS_ENABLED(CONFIG_NVME_MULTIPATH) && - bdev->bd_disk->fops == &nvme_ns_head_ops) + nvme_disk_is_ns_head(bdev->bd_disk)) return nvme_send_ns_head_pr_command(bdev, c, data, data_len); return nvme_send_ns_pr_command(bdev->bd_disk->private_data, c, data, diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 81e2621169..20fdd40b18 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1410,6 +1410,8 @@ static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue, struct nvme_ns *ns = rq->q->queuedata; struct bio *bio = rq->bio; struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; + struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk); + u32 xfer_len; int nr; req->mr = ib_mr_pool_get(queue->qp, &queue->qp->sig_mrs); @@ -1422,8 +1424,7 @@ static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue, if (unlikely(nr)) goto mr_put; - nvme_rdma_set_sig_attrs(blk_get_integrity(bio->bi_bdev->bd_disk), c, - req->mr->sig_attrs, ns->pi_type); + nvme_rdma_set_sig_attrs(bi, c, req->mr->sig_attrs, ns->head->pi_type); nvme_rdma_set_prot_checks(c, &req->mr->sig_attrs->check_mask); ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey)); @@ -1441,7 +1442,11 @@ static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue, IB_ACCESS_REMOTE_WRITE; sg->addr = cpu_to_le64(req->mr->iova); - put_unaligned_le24(req->mr->length, sg->length); + xfer_len = req->mr->length; + /* Check if PI is added by the HW */ + if (!pi_count) + xfer_len += (xfer_len >> bi->interval_exp) * ns->head->pi_size; + put_unaligned_le24(xfer_len, sg->length); put_unaligned_le32(req->mr->rkey, sg->key); sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4; @@ -1946,9 +1951,13 @@ static enum blk_eh_timer_return nvme_rdma_timeout(struct request *rq) struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); struct nvme_rdma_queue *queue = req->queue; struct nvme_rdma_ctrl *ctrl = queue->ctrl; + struct nvme_command *cmd = req->req.cmd; + int qid = nvme_rdma_queue_idx(queue); - dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n", - rq->tag, nvme_rdma_queue_idx(queue)); + dev_warn(ctrl->ctrl.device, + "I/O tag %d (%04x) opcode %#x (%s) QID %d timeout\n", + rq->tag, nvme_cid(rq), cmd->common.opcode, + nvme_fabrics_opcode_str(qid, cmd), qid); if (nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_LIVE) { /* @@ -2017,7 +2026,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, queue->pi_support && (c->common.opcode == nvme_cmd_write || c->common.opcode == nvme_cmd_read) && - nvme_ns_has_pi(ns)) + nvme_ns_has_pi(ns->head)) req->use_sig_mr = true; else req->use_sig_mr = false; @@ -2291,8 +2300,8 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, if (ret) goto out_uninit_ctrl; - dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n", - nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr); + dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs, hostnqn: %s\n", + nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr, opts->host->nqn); mutex_lock(&nvme_rdma_ctrl_mutex); list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list); @@ -2395,4 +2404,5 @@ static void __exit nvme_rdma_cleanup_module(void) module_init(nvme_rdma_init_module); module_exit(nvme_rdma_cleanup_module); +MODULE_DESCRIPTION("NVMe host RDMA transport driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvme/host/sysfs.c b/drivers/nvme/host/sysfs.c index c6b7fbd4d3..09fcaa519e 100644 --- a/drivers/nvme/host/sysfs.c +++ b/drivers/nvme/host/sysfs.c @@ -35,16 +35,71 @@ static ssize_t nvme_sysfs_rescan(struct device *dev, } static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan); +static ssize_t nvme_adm_passthru_err_log_enabled_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + + return sysfs_emit(buf, + ctrl->passthru_err_log_enabled ? "on\n" : "off\n"); +} + +static ssize_t nvme_adm_passthru_err_log_enabled_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + bool passthru_err_log_enabled; + int err; + + err = kstrtobool(buf, &passthru_err_log_enabled); + if (err) + return -EINVAL; + + ctrl->passthru_err_log_enabled = passthru_err_log_enabled; + + return count; +} + static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev) { struct gendisk *disk = dev_to_disk(dev); - if (disk->fops == &nvme_bdev_ops) - return nvme_get_ns_from_dev(dev)->head; - else + if (nvme_disk_is_ns_head(disk)) return disk->private_data; + return nvme_get_ns_from_dev(dev)->head; } +static ssize_t nvme_io_passthru_err_log_enabled_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvme_ns_head *head = dev_to_ns_head(dev); + + return sysfs_emit(buf, head->passthru_err_log_enabled ? "on\n" : "off\n"); +} + +static ssize_t nvme_io_passthru_err_log_enabled_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct nvme_ns_head *head = dev_to_ns_head(dev); + bool passthru_err_log_enabled; + int err; + + err = kstrtobool(buf, &passthru_err_log_enabled); + if (err) + return -EINVAL; + head->passthru_err_log_enabled = passthru_err_log_enabled; + + return count; +} + +static struct device_attribute dev_attr_adm_passthru_err_log_enabled = \ + __ATTR(passthru_err_log_enabled, S_IRUGO | S_IWUSR, \ + nvme_adm_passthru_err_log_enabled_show, nvme_adm_passthru_err_log_enabled_store); + +static struct device_attribute dev_attr_io_passthru_err_log_enabled = \ + __ATTR(passthru_err_log_enabled, S_IRUGO | S_IWUSR, \ + nvme_io_passthru_err_log_enabled_show, nvme_io_passthru_err_log_enabled_store); + static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -114,20 +169,103 @@ static ssize_t nsid_show(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RO(nsid); -static struct attribute *nvme_ns_id_attrs[] = { +static ssize_t csi_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%u\n", dev_to_ns_head(dev)->ids.csi); +} +static DEVICE_ATTR_RO(csi); + +static ssize_t metadata_bytes_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%u\n", dev_to_ns_head(dev)->ms); +} +static DEVICE_ATTR_RO(metadata_bytes); + +static int ns_head_update_nuse(struct nvme_ns_head *head) +{ + struct nvme_id_ns *id; + struct nvme_ns *ns; + int srcu_idx, ret = -EWOULDBLOCK; + + /* Avoid issuing commands too often by rate limiting the update */ + if (!__ratelimit(&head->rs_nuse)) + return 0; + + srcu_idx = srcu_read_lock(&head->srcu); + ns = nvme_find_path(head); + if (!ns) + goto out_unlock; + + ret = nvme_identify_ns(ns->ctrl, head->ns_id, &id); + if (ret) + goto out_unlock; + + head->nuse = le64_to_cpu(id->nuse); + kfree(id); + +out_unlock: + srcu_read_unlock(&head->srcu, srcu_idx); + return ret; +} + +static int ns_update_nuse(struct nvme_ns *ns) +{ + struct nvme_id_ns *id; + int ret; + + /* Avoid issuing commands too often by rate limiting the update. */ + if (!__ratelimit(&ns->head->rs_nuse)) + return 0; + + ret = nvme_identify_ns(ns->ctrl, ns->head->ns_id, &id); + if (ret) + return ret; + + ns->head->nuse = le64_to_cpu(id->nuse); + kfree(id); + return 0; +} + +static ssize_t nuse_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct nvme_ns_head *head = dev_to_ns_head(dev); + struct gendisk *disk = dev_to_disk(dev); + struct block_device *bdev = disk->part0; + int ret; + + if (IS_ENABLED(CONFIG_NVME_MULTIPATH) && + bdev->bd_disk->fops == &nvme_ns_head_ops) + ret = ns_head_update_nuse(head); + else + ret = ns_update_nuse(bdev->bd_disk->private_data); + if (ret) + return ret; + + return sysfs_emit(buf, "%llu\n", head->nuse); +} +static DEVICE_ATTR_RO(nuse); + +static struct attribute *nvme_ns_attrs[] = { &dev_attr_wwid.attr, &dev_attr_uuid.attr, &dev_attr_nguid.attr, &dev_attr_eui.attr, + &dev_attr_csi.attr, &dev_attr_nsid.attr, + &dev_attr_metadata_bytes.attr, + &dev_attr_nuse.attr, #ifdef CONFIG_NVME_MULTIPATH &dev_attr_ana_grpid.attr, &dev_attr_ana_state.attr, #endif + &dev_attr_io_passthru_err_log_enabled.attr, NULL, }; -static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj, +static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj, struct attribute *a, int n) { struct device *dev = container_of(kobj, struct device, kobj); @@ -148,7 +286,8 @@ static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj, } #ifdef CONFIG_NVME_MULTIPATH if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) { - if (dev_to_disk(dev)->fops != &nvme_bdev_ops) /* per-path attr */ + /* per-path attr */ + if (nvme_disk_is_ns_head(dev_to_disk(dev))) return 0; if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl)) return 0; @@ -157,13 +296,13 @@ static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj, return a->mode; } -static const struct attribute_group nvme_ns_id_attr_group = { - .attrs = nvme_ns_id_attrs, - .is_visible = nvme_ns_id_attrs_are_visible, +static const struct attribute_group nvme_ns_attr_group = { + .attrs = nvme_ns_attrs, + .is_visible = nvme_ns_attrs_are_visible, }; -const struct attribute_group *nvme_ns_id_attr_groups[] = { - &nvme_ns_id_attr_group, +const struct attribute_group *nvme_ns_attr_groups[] = { + &nvme_ns_attr_group, NULL, }; @@ -226,6 +365,7 @@ static ssize_t nvme_sysfs_show_state(struct device *dev, char *buf) { struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + unsigned state = (unsigned)nvme_ctrl_state(ctrl); static const char *const state_name[] = { [NVME_CTRL_NEW] = "new", [NVME_CTRL_LIVE] = "live", @@ -236,9 +376,8 @@ static ssize_t nvme_sysfs_show_state(struct device *dev, [NVME_CTRL_DEAD] = "dead", }; - if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) && - state_name[ctrl->state]) - return sysfs_emit(buf, "%s\n", state_name[ctrl->state]); + if (state < ARRAY_SIZE(state_name) && state_name[state]) + return sysfs_emit(buf, "%s\n", state_name[state]); return sysfs_emit(buf, "unknown state\n"); } @@ -570,6 +709,7 @@ static struct attribute *nvme_dev_attrs[] = { #ifdef CONFIG_NVME_TCP_TLS &dev_attr_tls_key.attr, #endif + &dev_attr_adm_passthru_err_log_enabled.attr, NULL }; diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 08805f0278..a6d596e056 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -1922,14 +1922,13 @@ static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl) ctrl->opts->subsysnqn); if (!pskid) { dev_err(ctrl->device, "no valid PSK found\n"); - ret = -ENOKEY; - goto out_free_queue; + return -ENOKEY; } } ret = nvme_tcp_alloc_queue(ctrl, 0, pskid); if (ret) - goto out_free_queue; + return ret; ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl)); if (ret) @@ -2429,13 +2428,13 @@ static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq) struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req); - u8 opc = pdu->cmd.common.opcode, fctype = pdu->cmd.fabrics.fctype; + struct nvme_command *cmd = &pdu->cmd; int qid = nvme_tcp_queue_id(req->queue); dev_warn(ctrl->device, - "queue %d: timeout cid %#x type %d opcode %#x (%s)\n", - nvme_tcp_queue_id(req->queue), nvme_cid(rq), pdu->hdr.type, - opc, nvme_opcode_str(qid, opc, fctype)); + "I/O tag %d (%04x) type %d opcode %#x (%s) QID %d timeout\n", + rq->tag, nvme_cid(rq), pdu->hdr.type, cmd->common.opcode, + nvme_fabrics_opcode_str(qid, cmd), qid); if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) { /* @@ -2754,8 +2753,8 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev, if (ret) goto out_uninit_ctrl; - dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n", - nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr); + dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp, hostnqn: %s\n", + nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr, opts->host->nqn); mutex_lock(&nvme_tcp_ctrl_mutex); list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list); @@ -2827,4 +2826,5 @@ static void __exit nvme_tcp_cleanup_module(void) module_init(nvme_tcp_init_module); module_exit(nvme_tcp_cleanup_module); +MODULE_DESCRIPTION("NVMe host TCP transport driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c index ec8557810c..499bbb0eee 100644 --- a/drivers/nvme/host/zns.c +++ b/drivers/nvme/host/zns.c @@ -11,7 +11,7 @@ int nvme_revalidate_zones(struct nvme_ns *ns) { struct request_queue *q = ns->queue; - blk_queue_chunk_sectors(q, ns->zsze); + blk_queue_chunk_sectors(q, ns->head->zsze); blk_queue_max_zone_append_sectors(q, ns->ctrl->max_zone_append); return blk_revalidate_disk_zones(ns->disk, NULL); @@ -99,16 +99,17 @@ int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf) goto free_data; } - ns->zsze = nvme_lba_to_sect(ns, le64_to_cpu(id->lbafe[lbaf].zsze)); - if (!is_power_of_2(ns->zsze)) { + ns->head->zsze = + nvme_lba_to_sect(ns->head, le64_to_cpu(id->lbafe[lbaf].zsze)); + if (!is_power_of_2(ns->head->zsze)) { dev_warn(ns->ctrl->device, "invalid zone size:%llu for namespace:%u\n", - ns->zsze, ns->head->ns_id); + ns->head->zsze, ns->head->ns_id); status = -ENODEV; goto free_data; } - disk_set_zoned(ns->disk, BLK_ZONED_HM); + disk_set_zoned(ns->disk); blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q); disk_set_max_open_zones(ns->disk, le32_to_cpu(id->mor) + 1); disk_set_max_active_zones(ns->disk, le32_to_cpu(id->mar) + 1); @@ -128,7 +129,7 @@ static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns, sizeof(struct nvme_zone_descriptor); nr_zones = min_t(unsigned int, nr_zones, - get_capacity(ns->disk) >> ilog2(ns->zsze)); + get_capacity(ns->disk) >> ilog2(ns->head->zsze)); bufsize = sizeof(struct nvme_zone_report) + nr_zones * sizeof(struct nvme_zone_descriptor); @@ -147,7 +148,8 @@ static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns, return NULL; } -static int nvme_zone_parse_entry(struct nvme_ns *ns, +static int nvme_zone_parse_entry(struct nvme_ctrl *ctrl, + struct nvme_ns_head *head, struct nvme_zone_descriptor *entry, unsigned int idx, report_zones_cb cb, void *data) @@ -155,20 +157,20 @@ static int nvme_zone_parse_entry(struct nvme_ns *ns, struct blk_zone zone = { }; if ((entry->zt & 0xf) != NVME_ZONE_TYPE_SEQWRITE_REQ) { - dev_err(ns->ctrl->device, "invalid zone type %#x\n", + dev_err(ctrl->device, "invalid zone type %#x\n", entry->zt); return -EINVAL; } zone.type = BLK_ZONE_TYPE_SEQWRITE_REQ; zone.cond = entry->zs >> 4; - zone.len = ns->zsze; - zone.capacity = nvme_lba_to_sect(ns, le64_to_cpu(entry->zcap)); - zone.start = nvme_lba_to_sect(ns, le64_to_cpu(entry->zslba)); + zone.len = head->zsze; + zone.capacity = nvme_lba_to_sect(head, le64_to_cpu(entry->zcap)); + zone.start = nvme_lba_to_sect(head, le64_to_cpu(entry->zslba)); if (zone.cond == BLK_ZONE_COND_FULL) zone.wp = zone.start + zone.len; else - zone.wp = nvme_lba_to_sect(ns, le64_to_cpu(entry->wp)); + zone.wp = nvme_lba_to_sect(head, le64_to_cpu(entry->wp)); return cb(&zone, idx, data); } @@ -196,11 +198,11 @@ int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector, c.zmr.zrasf = NVME_ZRASF_ZONE_REPORT_ALL; c.zmr.pr = NVME_REPORT_ZONE_PARTIAL; - sector &= ~(ns->zsze - 1); + sector &= ~(ns->head->zsze - 1); while (zone_idx < nr_zones && sector < get_capacity(ns->disk)) { memset(report, 0, buflen); - c.zmr.slba = cpu_to_le64(nvme_sect_to_lba(ns, sector)); + c.zmr.slba = cpu_to_le64(nvme_sect_to_lba(ns->head, sector)); ret = nvme_submit_sync_cmd(ns->queue, &c, report, buflen); if (ret) { if (ret > 0) @@ -213,14 +215,15 @@ int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector, break; for (i = 0; i < nz && zone_idx < nr_zones; i++) { - ret = nvme_zone_parse_entry(ns, &report->entries[i], + ret = nvme_zone_parse_entry(ns->ctrl, ns->head, + &report->entries[i], zone_idx, cb, data); if (ret) goto out_free; zone_idx++; } - sector += ns->zsze * nz; + sector += ns->head->zsze * nz; } if (zone_idx > 0) @@ -239,7 +242,7 @@ blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req, c->zms.opcode = nvme_cmd_zone_mgmt_send; c->zms.nsid = cpu_to_le32(ns->head->ns_id); - c->zms.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); + c->zms.slba = cpu_to_le64(nvme_sect_to_lba(ns->head, blk_rq_pos(req))); c->zms.zsa = action; if (req_op(req) == REQ_OP_ZONE_RESET_ALL) diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index d937fe0512..2482a0db25 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -1276,7 +1276,7 @@ static ssize_t nvmet_subsys_attr_cntlid_min_store(struct config_item *item, return -EINVAL; down_write(&nvmet_config_sem); - if (cntlid_min >= to_subsys(item)->cntlid_max) + if (cntlid_min > to_subsys(item)->cntlid_max) goto out_unlock; to_subsys(item)->cntlid_min = cntlid_min; up_write(&nvmet_config_sem); @@ -1306,7 +1306,7 @@ static ssize_t nvmet_subsys_attr_cntlid_max_store(struct config_item *item, return -EINVAL; down_write(&nvmet_config_sem); - if (cntlid_max <= to_subsys(item)->cntlid_min) + if (cntlid_max < to_subsys(item)->cntlid_min) goto out_unlock; to_subsys(item)->cntlid_max = cntlid_max; up_write(&nvmet_config_sem); diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 3935165048..8658e9c085 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -248,7 +248,7 @@ void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid) nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid)); if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR)) continue; - nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, + nvmet_add_async_event(ctrl, NVME_AER_NOTICE, NVME_AER_NOTICE_NS_CHANGED, NVME_LOG_CHANGED_NS); } @@ -265,7 +265,7 @@ void nvmet_send_ana_event(struct nvmet_subsys *subsys, continue; if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE)) continue; - nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, + nvmet_add_async_event(ctrl, NVME_AER_NOTICE, NVME_AER_NOTICE_ANA, NVME_LOG_ANA); } mutex_unlock(&subsys->lock); @@ -1425,9 +1425,6 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, if (!ctrl->sqs) goto out_free_changed_ns_list; - if (subsys->cntlid_min > subsys->cntlid_max) - goto out_free_sqs; - ret = ida_alloc_range(&cntlid_ida, subsys->cntlid_min, subsys->cntlid_max, GFP_KERNEL); @@ -1708,4 +1705,5 @@ static void __exit nvmet_exit(void) module_init(nvmet_init); module_exit(nvmet_exit); +MODULE_DESCRIPTION("NVMe target core framework"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c index 668d257fa9..68e82ccc0e 100644 --- a/drivers/nvme/target/discovery.c +++ b/drivers/nvme/target/discovery.c @@ -21,7 +21,7 @@ static void __nvmet_disc_changed(struct nvmet_port *port, if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_DISC_CHANGE)) return; - nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, + nvmet_add_async_event(ctrl, NVME_AER_NOTICE, NVME_AER_NOTICE_DISC_CHANGED, NVME_LOG_DISC); } diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c index d8da840a1c..9964ffe347 100644 --- a/drivers/nvme/target/fabrics-cmd.c +++ b/drivers/nvme/target/fabrics-cmd.c @@ -209,7 +209,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) struct nvmf_connect_command *c = &req->cmd->connect; struct nvmf_connect_data *d; struct nvmet_ctrl *ctrl = NULL; - u16 status = 0; + u16 status; int ret; if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data))) @@ -290,7 +290,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req) struct nvmf_connect_data *d; struct nvmet_ctrl *ctrl; u16 qid = le16_to_cpu(c->qid); - u16 status = 0; + u16 status; if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data))) return; diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 666130878e..fd229f310c 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -497,8 +497,7 @@ nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc) * message is normal. Otherwise, send unless the hostport has * already been invalidated by the lldd. */ - if (!tgtport->ops->ls_req || !assoc->hostport || - assoc->hostport->invalid) + if (!tgtport->ops->ls_req || assoc->hostport->invalid) return; lsop = kzalloc((sizeof(*lsop) + @@ -1030,7 +1029,7 @@ nvmet_fc_match_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) list_for_each_entry(host, &tgtport->host_list, host_list) { if (host->hosthandle == hosthandle && !host->invalid) { if (nvmet_fc_hostport_get(host)) - return (host); + return host; } } @@ -1115,14 +1114,27 @@ nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc) queue_work(nvmet_wq, &assoc->del_work); } +static bool +nvmet_fc_assoc_exits(struct nvmet_fc_tgtport *tgtport, u64 association_id) +{ + struct nvmet_fc_tgt_assoc *a; + + list_for_each_entry_rcu(a, &tgtport->assoc_list, a_list) { + if (association_id == a->association_id) + return true; + } + + return false; +} + static struct nvmet_fc_tgt_assoc * nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle) { - struct nvmet_fc_tgt_assoc *assoc, *tmpassoc; + struct nvmet_fc_tgt_assoc *assoc; unsigned long flags; + bool done; u64 ran; int idx; - bool needrandom = true; if (!tgtport->pe) return NULL; @@ -1135,12 +1147,9 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle) if (idx < 0) goto out_free_assoc; - if (!nvmet_fc_tgtport_get(tgtport)) - goto out_ida; - assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle); if (IS_ERR(assoc->hostport)) - goto out_put; + goto out_ida; assoc->tgtport = tgtport; assoc->a_id = idx; @@ -1149,29 +1158,24 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle) INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc_work); atomic_set(&assoc->terminating, 0); - while (needrandom) { + done = false; + do { get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID); ran = ran << BYTES_FOR_QID_SHIFT; spin_lock_irqsave(&tgtport->lock, flags); - needrandom = false; - list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) { - if (ran == tmpassoc->association_id) { - needrandom = true; - break; - } - } - if (!needrandom) { + rcu_read_lock(); + if (!nvmet_fc_assoc_exits(tgtport, ran)) { assoc->association_id = ran; list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list); + done = true; } + rcu_read_unlock(); spin_unlock_irqrestore(&tgtport->lock, flags); - } + } while (!done); return assoc; -out_put: - nvmet_fc_tgtport_put(tgtport); out_ida: ida_free(&tgtport->assoc_cnt, idx); out_free_assoc: @@ -1208,7 +1212,6 @@ nvmet_fc_target_assoc_free(struct kref *ref) dev_info(tgtport->dev, "{%d:%d} Association freed\n", tgtport->fc_target_port.port_num, assoc->a_id); - nvmet_fc_tgtport_put(tgtport); kfree(assoc); } @@ -1552,8 +1555,7 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port, spin_lock_irqsave(&tgtport->lock, flags); list_for_each_entry_safe(assoc, next, &tgtport->assoc_list, a_list) { - if (!assoc->hostport || - assoc->hostport->hosthandle != hosthandle) + if (assoc->hostport->hosthandle != hosthandle) continue; if (!nvmet_fc_tgt_a_get(assoc)) continue; @@ -2962,4 +2964,5 @@ static void __exit nvmet_fc_exit_module(void) module_init(nvmet_fc_init_module); module_exit(nvmet_fc_exit_module); +MODULE_DESCRIPTION("NVMe target FC transport driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index e6d4226827..1471af250e 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c @@ -995,11 +995,6 @@ fcloop_nport_free(struct kref *ref) { struct fcloop_nport *nport = container_of(ref, struct fcloop_nport, ref); - unsigned long flags; - - spin_lock_irqsave(&fcloop_lock, flags); - list_del(&nport->nport_list); - spin_unlock_irqrestore(&fcloop_lock, flags); kfree(nport); } @@ -1357,6 +1352,8 @@ __unlink_remote_port(struct fcloop_nport *nport) nport->tport->remoteport = NULL; nport->rport = NULL; + list_del(&nport->nport_list); + return rport; } @@ -1653,4 +1650,5 @@ static void __exit fcloop_exit(void) module_init(fcloop_init); module_exit(fcloop_exit); +MODULE_DESCRIPTION("NVMe target FC loop transport driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 9cb434c580..e589915dde 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -400,7 +400,7 @@ static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl) } nvme_quiesce_admin_queue(&ctrl->ctrl); - if (ctrl->ctrl.state == NVME_CTRL_LIVE) + if (nvme_ctrl_state(&ctrl->ctrl) == NVME_CTRL_LIVE) nvme_disable_ctrl(&ctrl->ctrl, true); nvme_cancel_admin_tagset(&ctrl->ctrl); @@ -434,8 +434,10 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work) nvme_loop_shutdown_ctrl(ctrl); if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { - if (ctrl->ctrl.state != NVME_CTRL_DELETING && - ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO) + enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl); + + if (state != NVME_CTRL_DELETING && + state != NVME_CTRL_DELETING_NOIO) /* state change failure for non-deleted ctrl? */ WARN_ON_ONCE(1); return; @@ -688,5 +690,6 @@ static void __exit nvme_loop_cleanup_module(void) module_init(nvme_loop_init_module); module_exit(nvme_loop_cleanup_module); +MODULE_DESCRIPTION("NVMe target loop transport driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */ diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index 9fe07d7efa..f2d963e1fe 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -602,7 +602,7 @@ int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys) goto out_put_file; } - old = xa_cmpxchg(&passthru_subsystems, ctrl->cntlid, NULL, + old = xa_cmpxchg(&passthru_subsystems, ctrl->instance, NULL, subsys, GFP_KERNEL); if (xa_is_err(old)) { ret = xa_err(old); @@ -635,7 +635,7 @@ out_unlock: static void __nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys) { if (subsys->passthru_ctrl) { - xa_erase(&passthru_subsystems, subsys->passthru_ctrl->cntlid); + xa_erase(&passthru_subsystems, subsys->passthru_ctrl->instance); module_put(subsys->passthru_ctrl->ops->module); nvme_put_ctrl(subsys->passthru_ctrl); } diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 4597bca43a..3a0f2c170f 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -37,6 +37,8 @@ #define NVMET_RDMA_MAX_MDTS 8 #define NVMET_RDMA_MAX_METADATA_MDTS 5 +#define NVMET_RDMA_BACKLOG 128 + struct nvmet_rdma_srq; struct nvmet_rdma_cmd { @@ -1583,8 +1585,19 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id, } if (queue->host_qid == 0) { - /* Let inflight controller teardown complete */ - flush_workqueue(nvmet_wq); + struct nvmet_rdma_queue *q; + int pending = 0; + + /* Check for pending controller teardown */ + mutex_lock(&nvmet_rdma_queue_mutex); + list_for_each_entry(q, &nvmet_rdma_queue_list, queue_list) { + if (q->nvme_sq.ctrl == queue->nvme_sq.ctrl && + q->state == NVMET_RDMA_Q_DISCONNECTING) + pending++; + } + mutex_unlock(&nvmet_rdma_queue_mutex); + if (pending > NVMET_RDMA_BACKLOG) + return NVME_SC_CONNECT_CTRL_BUSY; } ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); @@ -1880,7 +1893,7 @@ static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port) goto out_destroy_id; } - ret = rdma_listen(cm_id, 128); + ret = rdma_listen(cm_id, NVMET_RDMA_BACKLOG); if (ret) { pr_err("listening to %pISpcs failed (%d)\n", addr, ret); goto out_destroy_id; @@ -2091,5 +2104,6 @@ static void __exit nvmet_rdma_exit(void) module_init(nvmet_rdma_init); module_exit(nvmet_rdma_exit); +MODULE_DESCRIPTION("NVMe target RDMA transport driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */ diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index bb42ae42b1..c8655fc5aa 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -25,6 +25,7 @@ #define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE) #define NVMET_TCP_MAXH2CDATA 0x400000 /* 16M arbitrary limit */ +#define NVMET_TCP_BACKLOG 128 static int param_store_val(const char *str, int *val, int min, int max) { @@ -985,8 +986,7 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue) if (unlikely(data->ttag >= queue->nr_cmds)) { pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n", queue->idx, data->ttag, queue->nr_cmds); - nvmet_tcp_fatal_error(queue); - return -EPROTO; + goto err_proto; } cmd = &queue->cmds[data->ttag]; } else { @@ -997,9 +997,7 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue) pr_err("ttag %u unexpected data offset %u (expected %u)\n", data->ttag, le32_to_cpu(data->data_offset), cmd->rbytes_done); - /* FIXME: use path and transport errors */ - nvmet_tcp_fatal_error(queue); - return -EPROTO; + goto err_proto; } exp_data_len = le32_to_cpu(data->hdr.plen) - @@ -1012,9 +1010,7 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue) cmd->pdu_len == 0 || cmd->pdu_len > NVMET_TCP_MAXH2CDATA)) { pr_err("H2CData PDU len %u is invalid\n", cmd->pdu_len); - /* FIXME: use proper transport errors */ - nvmet_tcp_fatal_error(queue); - return -EPROTO; + goto err_proto; } cmd->pdu_recv = 0; nvmet_tcp_build_pdu_iovec(cmd); @@ -1022,6 +1018,11 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue) queue->rcv_state = NVMET_TCP_RECV_DATA; return 0; + +err_proto: + /* FIXME: use proper transport errors */ + nvmet_tcp_fatal_error(queue); + return -EPROTO; } static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue) @@ -2067,7 +2068,7 @@ static int nvmet_tcp_add_port(struct nvmet_port *nport) goto err_sock; } - ret = kernel_listen(port->sock, 128); + ret = kernel_listen(port->sock, NVMET_TCP_BACKLOG); if (ret) { pr_err("failed to listen %d on port sock\n", ret); goto err_sock; @@ -2133,8 +2134,19 @@ static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq) container_of(sq, struct nvmet_tcp_queue, nvme_sq); if (sq->qid == 0) { - /* Let inflight controller teardown complete */ - flush_workqueue(nvmet_wq); + struct nvmet_tcp_queue *q; + int pending = 0; + + /* Check for pending controller teardown */ + mutex_lock(&nvmet_tcp_queue_mutex); + list_for_each_entry(q, &nvmet_tcp_queue_list, queue_list) { + if (q->nvme_sq.ctrl == sq->ctrl && + q->state == NVMET_TCP_Q_DISCONNECTING) + pending++; + } + mutex_unlock(&nvmet_tcp_queue_mutex); + if (pending > NVMET_TCP_BACKLOG) + return NVME_SC_CONNECT_CTRL_BUSY; } queue->nr_cmds = sq->size * 2; @@ -2210,5 +2222,6 @@ static void __exit nvmet_tcp_exit(void) module_init(nvmet_tcp_init); module_exit(nvmet_tcp_exit); +MODULE_DESCRIPTION("NVMe target TCP transport driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */ diff --git a/drivers/nvme/target/trace.c b/drivers/nvme/target/trace.c index bff454d462..6ee1f3db81 100644 --- a/drivers/nvme/target/trace.c +++ b/drivers/nvme/target/trace.c @@ -211,7 +211,7 @@ const char *nvmet_trace_disk_name(struct trace_seq *p, char *name) return ret; } -const char *nvmet_trace_ctrl_name(struct trace_seq *p, struct nvmet_ctrl *ctrl) +const char *nvmet_trace_ctrl_id(struct trace_seq *p, u16 ctrl_id) { const char *ret = trace_seq_buffer_ptr(p); @@ -224,8 +224,8 @@ const char *nvmet_trace_ctrl_name(struct trace_seq *p, struct nvmet_ctrl *ctrl) * If we can know the extra data of the connect command in this stage, * we can update this print statement later. */ - if (ctrl) - trace_seq_printf(p, "%d", ctrl->cntlid); + if (ctrl_id) + trace_seq_printf(p, "%d", ctrl_id); else trace_seq_printf(p, "_"); trace_seq_putc(p, 0); diff --git a/drivers/nvme/target/trace.h b/drivers/nvme/target/trace.h index 974d99d47f..7f7ebf9558 100644 --- a/drivers/nvme/target/trace.h +++ b/drivers/nvme/target/trace.h @@ -32,18 +32,24 @@ const char *nvmet_trace_parse_fabrics_cmd(struct trace_seq *p, u8 fctype, nvmet_trace_parse_nvm_cmd(p, opcode, cdw10) : \ nvmet_trace_parse_admin_cmd(p, opcode, cdw10))) -const char *nvmet_trace_ctrl_name(struct trace_seq *p, struct nvmet_ctrl *ctrl); -#define __print_ctrl_name(ctrl) \ - nvmet_trace_ctrl_name(p, ctrl) +const char *nvmet_trace_ctrl_id(struct trace_seq *p, u16 ctrl_id); +#define __print_ctrl_id(ctrl_id) \ + nvmet_trace_ctrl_id(p, ctrl_id) const char *nvmet_trace_disk_name(struct trace_seq *p, char *name); #define __print_disk_name(name) \ nvmet_trace_disk_name(p, name) #ifndef TRACE_HEADER_MULTI_READ -static inline struct nvmet_ctrl *nvmet_req_to_ctrl(struct nvmet_req *req) +static inline u16 nvmet_req_to_ctrl_id(struct nvmet_req *req) { - return req->sq->ctrl; + /* + * The queue and controller pointers are not valid until an association + * has been established. + */ + if (!req->sq || !req->sq->ctrl) + return 0; + return req->sq->ctrl->cntlid; } static inline void __assign_req_name(char *name, struct nvmet_req *req) @@ -62,7 +68,7 @@ TRACE_EVENT(nvmet_req_init, TP_ARGS(req, cmd), TP_STRUCT__entry( __field(struct nvme_command *, cmd) - __field(struct nvmet_ctrl *, ctrl) + __field(u16, ctrl_id) __array(char, disk, DISK_NAME_LEN) __field(int, qid) __field(u16, cid) @@ -75,7 +81,7 @@ TRACE_EVENT(nvmet_req_init, ), TP_fast_assign( __entry->cmd = cmd; - __entry->ctrl = nvmet_req_to_ctrl(req); + __entry->ctrl_id = nvmet_req_to_ctrl_id(req); __assign_req_name(__entry->disk, req); __entry->qid = req->sq->qid; __entry->cid = cmd->common.command_id; @@ -89,7 +95,7 @@ TRACE_EVENT(nvmet_req_init, ), TP_printk("nvmet%s: %sqid=%d, cmdid=%u, nsid=%u, flags=%#x, " "meta=%#llx, cmd=(%s, %s)", - __print_ctrl_name(__entry->ctrl), + __print_ctrl_id(__entry->ctrl_id), __print_disk_name(__entry->disk), __entry->qid, __entry->cid, __entry->nsid, __entry->flags, __entry->metadata, @@ -103,7 +109,7 @@ TRACE_EVENT(nvmet_req_complete, TP_PROTO(struct nvmet_req *req), TP_ARGS(req), TP_STRUCT__entry( - __field(struct nvmet_ctrl *, ctrl) + __field(u16, ctrl_id) __array(char, disk, DISK_NAME_LEN) __field(int, qid) __field(int, cid) @@ -111,7 +117,7 @@ TRACE_EVENT(nvmet_req_complete, __field(u16, status) ), TP_fast_assign( - __entry->ctrl = nvmet_req_to_ctrl(req); + __entry->ctrl_id = nvmet_req_to_ctrl_id(req); __entry->qid = req->cq->qid; __entry->cid = req->cqe->command_id; __entry->result = le64_to_cpu(req->cqe->result.u64); @@ -119,7 +125,7 @@ TRACE_EVENT(nvmet_req_complete, __assign_req_name(__entry->disk, req); ), TP_printk("nvmet%s: %sqid=%d, cmdid=%u, res=%#llx, status=%#x", - __print_ctrl_name(__entry->ctrl), + __print_ctrl_id(__entry->ctrl_id), __print_disk_name(__entry->disk), __entry->qid, __entry->cid, __entry->result, __entry->status) |