diff options
Diffstat (limited to 'drivers/nvme/target')
-rw-r--r-- | drivers/nvme/target/admin-cmd.c | 2 | ||||
-rw-r--r-- | drivers/nvme/target/auth.c | 38 | ||||
-rw-r--r-- | drivers/nvme/target/configfs.c | 146 | ||||
-rw-r--r-- | drivers/nvme/target/core.c | 38 | ||||
-rw-r--r-- | drivers/nvme/target/discovery.c | 2 | ||||
-rw-r--r-- | drivers/nvme/target/fabrics-cmd-auth.c | 52 | ||||
-rw-r--r-- | drivers/nvme/target/fabrics-cmd.c | 22 | ||||
-rw-r--r-- | drivers/nvme/target/fc.c | 19 | ||||
-rw-r--r-- | drivers/nvme/target/fcloop.c | 17 | ||||
-rw-r--r-- | drivers/nvme/target/io-cmd-bdev.c | 16 | ||||
-rw-r--r-- | drivers/nvme/target/nvmet.h | 16 | ||||
-rw-r--r-- | drivers/nvme/target/passthru.c | 8 | ||||
-rw-r--r-- | drivers/nvme/target/rdma.c | 50 | ||||
-rw-r--r-- | drivers/nvme/target/tcp.c | 5 | ||||
-rw-r--r-- | drivers/nvme/target/trace.c | 98 | ||||
-rw-r--r-- | drivers/nvme/target/zns.c | 15 |
16 files changed, 389 insertions, 155 deletions
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 39cb570f83..f5b7054a4a 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -428,7 +428,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) id->cqes = (0x4 << 4) | 0x4; /* no enforcement soft-limit for maxcmd - pick arbitrary high value */ - id->maxcmd = cpu_to_le16(NVMET_MAX_CMD); + id->maxcmd = cpu_to_le16(NVMET_MAX_CMD(ctrl)); id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES); id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES); diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c index fb518b00f7..8bc3f431c7 100644 --- a/drivers/nvme/target/auth.c +++ b/drivers/nvme/target/auth.c @@ -44,6 +44,7 @@ int nvmet_auth_set_key(struct nvmet_host *host, const char *secret, dhchap_secret = kstrdup(secret, GFP_KERNEL); if (!dhchap_secret) return -ENOMEM; + down_write(&nvmet_config_sem); if (set_ctrl) { kfree(host->dhchap_ctrl_secret); host->dhchap_ctrl_secret = strim(dhchap_secret); @@ -53,6 +54,7 @@ int nvmet_auth_set_key(struct nvmet_host *host, const char *secret, host->dhchap_secret = strim(dhchap_secret); host->dhchap_key_hash = key_hash; } + up_write(&nvmet_config_sem); return 0; } @@ -124,12 +126,11 @@ int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id) return ret; } -int nvmet_setup_auth(struct nvmet_ctrl *ctrl) +u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl) { int ret = 0; struct nvmet_host_link *p; struct nvmet_host *host = NULL; - const char *hash_name; down_read(&nvmet_config_sem); if (nvmet_is_disc_subsys(ctrl->subsys)) @@ -147,13 +148,16 @@ int nvmet_setup_auth(struct nvmet_ctrl *ctrl) } if (!host) { pr_debug("host %s not found\n", ctrl->hostnqn); - ret = -EPERM; + ret = NVME_AUTH_DHCHAP_FAILURE_FAILED; goto out_unlock; } ret = nvmet_setup_dhgroup(ctrl, host->dhchap_dhgroup_id); - if (ret < 0) + if (ret < 0) { pr_warn("Failed to setup DH group"); + ret = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE; + goto out_unlock; + } if (!host->dhchap_secret) { pr_debug("No authentication provided\n"); @@ -164,12 +168,6 @@ int nvmet_setup_auth(struct nvmet_ctrl *ctrl) pr_debug("Re-use existing hash ID %d\n", ctrl->shash_id); } else { - hash_name = nvme_auth_hmac_name(host->dhchap_hash_id); - if (!hash_name) { - pr_warn("Hash ID %d invalid\n", host->dhchap_hash_id); - ret = -EINVAL; - goto out_unlock; - } ctrl->shash_id = host->dhchap_hash_id; } @@ -178,7 +176,7 @@ int nvmet_setup_auth(struct nvmet_ctrl *ctrl) ctrl->host_key = nvme_auth_extract_key(host->dhchap_secret + 10, host->dhchap_key_hash); if (IS_ERR(ctrl->host_key)) { - ret = PTR_ERR(ctrl->host_key); + ret = NVME_AUTH_DHCHAP_FAILURE_NOT_USABLE; ctrl->host_key = NULL; goto out_free_hash; } @@ -196,7 +194,7 @@ int nvmet_setup_auth(struct nvmet_ctrl *ctrl) ctrl->ctrl_key = nvme_auth_extract_key(host->dhchap_ctrl_secret + 10, host->dhchap_ctrl_key_hash); if (IS_ERR(ctrl->ctrl_key)) { - ret = PTR_ERR(ctrl->ctrl_key); + ret = NVME_AUTH_DHCHAP_FAILURE_NOT_USABLE; ctrl->ctrl_key = NULL; goto out_free_hash; } @@ -316,7 +314,7 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response, req->sq->dhchap_c1, challenge, shash_len); if (ret) - goto out_free_response; + goto out_free_challenge; } pr_debug("ctrl %d qid %d host response seq %u transaction %d\n", @@ -327,7 +325,7 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response, GFP_KERNEL); if (!shash) { ret = -ENOMEM; - goto out_free_response; + goto out_free_challenge; } shash->tfm = shash_tfm; ret = crypto_shash_init(shash); @@ -363,9 +361,10 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response, goto out; ret = crypto_shash_final(shash, response); out: + kfree(shash); +out_free_challenge: if (challenge != req->sq->dhchap_c1) kfree(challenge); - kfree(shash); out_free_response: nvme_auth_free_key(transformed_key); out_free_tfm: @@ -429,14 +428,14 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response, req->sq->dhchap_c2, challenge, shash_len); if (ret) - goto out_free_response; + goto out_free_challenge; } shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(shash_tfm), GFP_KERNEL); if (!shash) { ret = -ENOMEM; - goto out_free_response; + goto out_free_challenge; } shash->tfm = shash_tfm; @@ -473,14 +472,15 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response, goto out; ret = crypto_shash_final(shash, response); out: + kfree(shash); +out_free_challenge: if (challenge != req->sq->dhchap_c2) kfree(challenge); - kfree(shash); out_free_response: nvme_auth_free_key(transformed_key); out_free_tfm: crypto_free_shash(shash_tfm); - return 0; + return ret; } int nvmet_auth_ctrl_exponential(struct nvmet_req *req, diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index 3ef6bc6556..685e89b35d 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -273,6 +273,32 @@ static ssize_t nvmet_param_inline_data_size_store(struct config_item *item, CONFIGFS_ATTR(nvmet_, param_inline_data_size); +static ssize_t nvmet_param_max_queue_size_show(struct config_item *item, + char *page) +{ + struct nvmet_port *port = to_nvmet_port(item); + + return snprintf(page, PAGE_SIZE, "%d\n", port->max_queue_size); +} + +static ssize_t nvmet_param_max_queue_size_store(struct config_item *item, + const char *page, size_t count) +{ + struct nvmet_port *port = to_nvmet_port(item); + int ret; + + if (nvmet_is_port_enabled(port, __func__)) + return -EACCES; + ret = kstrtoint(page, 0, &port->max_queue_size); + if (ret) { + pr_err("Invalid value '%s' for max_queue_size\n", page); + return -EINVAL; + } + return count; +} + +CONFIGFS_ATTR(nvmet_, param_max_queue_size); + #ifdef CONFIG_BLK_DEV_INTEGRITY static ssize_t nvmet_param_pi_enable_show(struct config_item *item, char *page) @@ -384,7 +410,29 @@ static ssize_t nvmet_addr_tsas_show(struct config_item *item, return sprintf(page, "%s\n", nvmet_addr_tsas_rdma[i].name); } } - return sprintf(page, "reserved\n"); + return sprintf(page, "\n"); +} + +static u8 nvmet_addr_tsas_rdma_store(const char *page) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_rdma); i++) { + if (sysfs_streq(page, nvmet_addr_tsas_rdma[i].name)) + return nvmet_addr_tsas_rdma[i].type; + } + return NVMF_RDMA_QPTYPE_INVALID; +} + +static u8 nvmet_addr_tsas_tcp_store(const char *page) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_tcp); i++) { + if (sysfs_streq(page, nvmet_addr_tsas_tcp[i].name)) + return nvmet_addr_tsas_tcp[i].type; + } + return NVMF_TCP_SECTYPE_INVALID; } static ssize_t nvmet_addr_tsas_store(struct config_item *item, @@ -392,20 +440,19 @@ static ssize_t nvmet_addr_tsas_store(struct config_item *item, { struct nvmet_port *port = to_nvmet_port(item); u8 treq = nvmet_port_disc_addr_treq_mask(port); - u8 sectype; - int i; + u8 sectype, qptype; if (nvmet_is_port_enabled(port, __func__)) return -EACCES; - if (port->disc_addr.trtype != NVMF_TRTYPE_TCP) - return -EINVAL; - - for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_tcp); i++) { - if (sysfs_streq(page, nvmet_addr_tsas_tcp[i].name)) { - sectype = nvmet_addr_tsas_tcp[i].type; + if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA) { + qptype = nvmet_addr_tsas_rdma_store(page); + if (qptype == port->disc_addr.tsas.rdma.qptype) + return count; + } else if (port->disc_addr.trtype == NVMF_TRTYPE_TCP) { + sectype = nvmet_addr_tsas_tcp_store(page); + if (sectype != NVMF_TCP_SECTYPE_INVALID) goto found; - } } pr_err("Invalid value '%s' for tsas\n", page); @@ -650,10 +697,18 @@ static ssize_t nvmet_ns_enable_store(struct config_item *item, if (kstrtobool(page, &enable)) return -EINVAL; + /* + * take a global nvmet_config_sem because the disable routine has a + * window where it releases the subsys-lock, giving a chance to + * a parallel enable to concurrently execute causing the disable to + * have a misaccounting of the ns percpu_ref. + */ + down_write(&nvmet_config_sem); if (enable) ret = nvmet_ns_enable(ns); else nvmet_ns_disable(ns); + up_write(&nvmet_config_sem); return ret ? ret : count; } @@ -1599,6 +1654,11 @@ static struct config_group *nvmet_subsys_make(struct config_group *group, return ERR_PTR(-EINVAL); } + if (sysfs_streq(name, nvmet_disc_subsys->subsysnqn)) { + pr_err("can't create subsystem using unique discovery NQN\n"); + return ERR_PTR(-EINVAL); + } + subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME); if (IS_ERR(subsys)) return ERR_CAST(subsys); @@ -1871,6 +1931,7 @@ static struct configfs_attribute *nvmet_port_attrs[] = { &nvmet_attr_addr_trtype, &nvmet_attr_addr_tsas, &nvmet_attr_param_inline_data_size, + &nvmet_attr_param_max_queue_size, #ifdef CONFIG_BLK_DEV_INTEGRITY &nvmet_attr_param_pi_enable, #endif @@ -1929,6 +1990,7 @@ static struct config_group *nvmet_ports_make(struct config_group *group, INIT_LIST_HEAD(&port->subsystems); INIT_LIST_HEAD(&port->referrals); port->inline_data_size = -1; /* < 0 == let the transport choose */ + port->max_queue_size = -1; /* < 0 == let the transport choose */ port->disc_addr.portid = cpu_to_le16(portid); port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX; @@ -1974,11 +2036,17 @@ static struct config_group nvmet_ports_group; static ssize_t nvmet_host_dhchap_key_show(struct config_item *item, char *page) { - u8 *dhchap_secret = to_host(item)->dhchap_secret; + u8 *dhchap_secret; + ssize_t ret; + down_read(&nvmet_config_sem); + dhchap_secret = to_host(item)->dhchap_secret; if (!dhchap_secret) - return sprintf(page, "\n"); - return sprintf(page, "%s\n", dhchap_secret); + ret = sprintf(page, "\n"); + else + ret = sprintf(page, "%s\n", dhchap_secret); + up_read(&nvmet_config_sem); + return ret; } static ssize_t nvmet_host_dhchap_key_store(struct config_item *item, @@ -2002,10 +2070,16 @@ static ssize_t nvmet_host_dhchap_ctrl_key_show(struct config_item *item, char *page) { u8 *dhchap_secret = to_host(item)->dhchap_ctrl_secret; + ssize_t ret; + down_read(&nvmet_config_sem); + dhchap_secret = to_host(item)->dhchap_ctrl_secret; if (!dhchap_secret) - return sprintf(page, "\n"); - return sprintf(page, "%s\n", dhchap_secret); + ret = sprintf(page, "\n"); + else + ret = sprintf(page, "%s\n", dhchap_secret); + up_read(&nvmet_config_sem); + return ret; } static ssize_t nvmet_host_dhchap_ctrl_key_store(struct config_item *item, @@ -2143,7 +2217,49 @@ static const struct config_item_type nvmet_hosts_type = { static struct config_group nvmet_hosts_group; +static ssize_t nvmet_root_discovery_nqn_show(struct config_item *item, + char *page) +{ + return snprintf(page, PAGE_SIZE, "%s\n", nvmet_disc_subsys->subsysnqn); +} + +static ssize_t nvmet_root_discovery_nqn_store(struct config_item *item, + const char *page, size_t count) +{ + struct list_head *entry; + size_t len; + + len = strcspn(page, "\n"); + if (!len || len > NVMF_NQN_FIELD_LEN - 1) + return -EINVAL; + + down_write(&nvmet_config_sem); + list_for_each(entry, &nvmet_subsystems_group.cg_children) { + struct config_item *item = + container_of(entry, struct config_item, ci_entry); + + if (!strncmp(config_item_name(item), page, len)) { + pr_err("duplicate NQN %s\n", config_item_name(item)); + up_write(&nvmet_config_sem); + return -EINVAL; + } + } + memset(nvmet_disc_subsys->subsysnqn, 0, NVMF_NQN_FIELD_LEN); + memcpy(nvmet_disc_subsys->subsysnqn, page, len); + up_write(&nvmet_config_sem); + + return len; +} + +CONFIGFS_ATTR(nvmet_root_, discovery_nqn); + +static struct configfs_attribute *nvmet_root_attrs[] = { + &nvmet_root_attr_discovery_nqn, + NULL, +}; + static const struct config_item_type nvmet_root_type = { + .ct_attrs = nvmet_root_attrs, .ct_owner = THIS_MODULE, }; diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 7a6b3d37cc..4ff460ba28 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -358,6 +358,18 @@ int nvmet_enable_port(struct nvmet_port *port) if (port->inline_data_size < 0) port->inline_data_size = 0; + /* + * If the transport didn't set the max_queue_size properly, then clamp + * it to the target limits. Also set default values in case the + * transport didn't set it at all. + */ + if (port->max_queue_size < 0) + port->max_queue_size = NVMET_MAX_QUEUE_SIZE; + else + port->max_queue_size = clamp_t(int, port->max_queue_size, + NVMET_MIN_QUEUE_SIZE, + NVMET_MAX_QUEUE_SIZE); + port->enabled = true; port->tr_ops = ops; return 0; @@ -806,6 +818,15 @@ void nvmet_sq_destroy(struct nvmet_sq *sq) percpu_ref_exit(&sq->ref); nvmet_auth_sq_free(sq); + /* + * we must reference the ctrl again after waiting for inflight IO + * to complete. Because admin connect may have sneaked in after we + * store sq->ctrl locally, but before we killed the percpu_ref. the + * admin connect allocates and assigns sq->ctrl, which now needs a + * final ref put, as this ctrl is going away. + */ + ctrl = sq->ctrl; + if (ctrl) { /* * The teardown flow may take some time, and the host may not @@ -936,6 +957,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, req->metadata_sg_cnt = 0; req->transfer_len = 0; req->metadata_len = 0; + req->cqe->result.u64 = 0; req->cqe->status = 0; req->cqe->sq_head = 0; req->ns = NULL; @@ -1226,9 +1248,10 @@ static void nvmet_init_cap(struct nvmet_ctrl *ctrl) ctrl->cap |= (15ULL << 24); /* maximum queue entries supported: */ if (ctrl->ops->get_max_queue_size) - ctrl->cap |= ctrl->ops->get_max_queue_size(ctrl) - 1; + ctrl->cap |= min_t(u16, ctrl->ops->get_max_queue_size(ctrl), + ctrl->port->max_queue_size) - 1; else - ctrl->cap |= NVMET_QUEUE_SIZE - 1; + ctrl->cap |= ctrl->port->max_queue_size - 1; if (nvmet_is_passthru_subsys(ctrl->subsys)) nvmet_passthrough_override_cap(ctrl); @@ -1414,6 +1437,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, kref_init(&ctrl->ref); ctrl->subsys = subsys; + ctrl->pi_support = ctrl->port->pi_enable && ctrl->subsys->pi_support; nvmet_init_cap(ctrl); WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL); @@ -1530,6 +1554,13 @@ static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port, } down_read(&nvmet_config_sem); + if (!strncmp(nvmet_disc_subsys->subsysnqn, subsysnqn, + NVMF_NQN_SIZE)) { + if (kref_get_unless_zero(&nvmet_disc_subsys->ref)) { + up_read(&nvmet_config_sem); + return nvmet_disc_subsys; + } + } list_for_each_entry(p, &port->subsystems, entry) { if (!strncmp(p->subsys->subsysnqn, subsysnqn, NVMF_NQN_SIZE)) { @@ -1665,7 +1696,8 @@ static int __init nvmet_init(void) if (!buffered_io_wq) goto out_free_zbd_work_queue; - nvmet_wq = alloc_workqueue("nvmet-wq", WQ_MEM_RECLAIM, 0); + nvmet_wq = alloc_workqueue("nvmet-wq", + WQ_MEM_RECLAIM | WQ_UNBOUND, 0); if (!nvmet_wq) goto out_free_buffered_work_queue; diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c index 68e82ccc0e..ce54da8c6b 100644 --- a/drivers/nvme/target/discovery.c +++ b/drivers/nvme/target/discovery.c @@ -282,7 +282,7 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req) id->lpa = (1 << 2); /* no enforcement soft-limit for maxcmd - pick arbitrary high value */ - id->maxcmd = cpu_to_le16(NVMET_MAX_CMD); + id->maxcmd = cpu_to_le16(NVMET_MAX_CMD(ctrl)); id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */ if (ctrl->ops->flags & NVMF_KEYED_SGLS) diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c index eb7785be0c..cb34d644ed 100644 --- a/drivers/nvme/target/fabrics-cmd-auth.c +++ b/drivers/nvme/target/fabrics-cmd-auth.c @@ -31,7 +31,7 @@ void nvmet_auth_sq_init(struct nvmet_sq *sq) sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE; } -static u16 nvmet_auth_negotiate(struct nvmet_req *req, void *d) +static u8 nvmet_auth_negotiate(struct nvmet_req *req, void *d) { struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvmf_auth_dhchap_negotiate_data *data = d; @@ -109,7 +109,7 @@ static u16 nvmet_auth_negotiate(struct nvmet_req *req, void *d) return 0; } -static u16 nvmet_auth_reply(struct nvmet_req *req, void *d) +static u8 nvmet_auth_reply(struct nvmet_req *req, void *d) { struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvmf_auth_dhchap_reply_data *data = d; @@ -172,7 +172,7 @@ static u16 nvmet_auth_reply(struct nvmet_req *req, void *d) return 0; } -static u16 nvmet_auth_failure2(void *d) +static u8 nvmet_auth_failure2(void *d) { struct nvmf_auth_dhchap_failure_data *data = d; @@ -186,6 +186,7 @@ void nvmet_execute_auth_send(struct nvmet_req *req) void *d; u32 tl; u16 status = 0; + u8 dhchap_status; if (req->cmd->auth_send.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) { status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; @@ -237,30 +238,32 @@ void nvmet_execute_auth_send(struct nvmet_req *req) if (data->auth_type == NVME_AUTH_COMMON_MESSAGES) { if (data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE) { /* Restart negotiation */ - pr_debug("%s: ctrl %d qid %d reset negotiation\n", __func__, - ctrl->cntlid, req->sq->qid); + pr_debug("%s: ctrl %d qid %d reset negotiation\n", + __func__, ctrl->cntlid, req->sq->qid); if (!req->sq->qid) { - if (nvmet_setup_auth(ctrl) < 0) { - status = NVME_SC_INTERNAL; - pr_err("ctrl %d qid 0 failed to setup" - "re-authentication", + dhchap_status = nvmet_setup_auth(ctrl); + if (dhchap_status) { + pr_err("ctrl %d qid 0 failed to setup re-authentication\n", ctrl->cntlid); - goto done_failure1; + req->sq->dhchap_status = dhchap_status; + req->sq->dhchap_step = + NVME_AUTH_DHCHAP_MESSAGE_FAILURE1; + goto done_kfree; } } - req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE; + req->sq->dhchap_step = + NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE; } else if (data->auth_id != req->sq->dhchap_step) goto done_failure1; /* Validate negotiation parameters */ - status = nvmet_auth_negotiate(req, d); - if (status == 0) + dhchap_status = nvmet_auth_negotiate(req, d); + if (dhchap_status == 0) req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE; else { req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1; - req->sq->dhchap_status = status; - status = 0; + req->sq->dhchap_status = dhchap_status; } goto done_kfree; } @@ -284,15 +287,14 @@ void nvmet_execute_auth_send(struct nvmet_req *req) switch (data->auth_id) { case NVME_AUTH_DHCHAP_MESSAGE_REPLY: - status = nvmet_auth_reply(req, d); - if (status == 0) + dhchap_status = nvmet_auth_reply(req, d); + if (dhchap_status == 0) req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1; else { req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1; - req->sq->dhchap_status = status; - status = 0; + req->sq->dhchap_status = dhchap_status; } goto done_kfree; case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2: @@ -301,13 +303,12 @@ void nvmet_execute_auth_send(struct nvmet_req *req) __func__, ctrl->cntlid, req->sq->qid); goto done_kfree; case NVME_AUTH_DHCHAP_MESSAGE_FAILURE2: - status = nvmet_auth_failure2(d); - if (status) { + dhchap_status = nvmet_auth_failure2(d); + if (dhchap_status) { pr_warn("ctrl %d qid %d: authentication failed (%d)\n", - ctrl->cntlid, req->sq->qid, status); - req->sq->dhchap_status = status; + ctrl->cntlid, req->sq->qid, dhchap_status); + req->sq->dhchap_status = dhchap_status; req->sq->authenticated = false; - status = 0; } goto done_kfree; default: @@ -332,7 +333,6 @@ done: pr_debug("%s: ctrl %d qid %d nvme status %x error loc %d\n", __func__, ctrl->cntlid, req->sq->qid, status, req->error_loc); - req->cqe->result.u64 = 0; if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 && req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) { unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120; @@ -515,8 +515,6 @@ void nvmet_execute_auth_receive(struct nvmet_req *req) status = nvmet_copy_to_sgl(req, 0, d, al); kfree(d); done: - req->cqe->result.u64 = 0; - if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2) nvmet_auth_sq_free(req->sq); else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) { diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c index 9964ffe347..69d77d34be 100644 --- a/drivers/nvme/target/fabrics-cmd.c +++ b/drivers/nvme/target/fabrics-cmd.c @@ -157,7 +157,8 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req) return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; } - if (sqsize > mqes) { + /* for fabrics, this value applies to only the I/O Submission Queues */ + if (qid && sqsize > mqes) { pr_warn("sqsize %u is larger than MQES supported %u cntlid %d\n", sqsize, mqes, ctrl->cntlid); req->error_loc = offsetof(struct nvmf_connect_command, sqsize); @@ -210,7 +211,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) struct nvmf_connect_data *d; struct nvmet_ctrl *ctrl = NULL; u16 status; - int ret; + u8 dhchap_status; if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data))) return; @@ -225,9 +226,6 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) if (status) goto out; - /* zero out initial completion result, assign values as needed */ - req->cqe->result.u32 = 0; - if (c->recfmt != 0) { pr_warn("invalid connect version (%d).\n", le16_to_cpu(c->recfmt)); @@ -251,15 +249,14 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) if (status) goto out; - ctrl->pi_support = ctrl->port->pi_enable && ctrl->subsys->pi_support; - uuid_copy(&ctrl->hostid, &d->hostid); - ret = nvmet_setup_auth(ctrl); - if (ret < 0) { - pr_err("Failed to setup authentication, error %d\n", ret); + dhchap_status = nvmet_setup_auth(ctrl); + if (dhchap_status) { + pr_err("Failed to setup authentication, dhchap status %u\n", + dhchap_status); nvmet_ctrl_put(ctrl); - if (ret == -EPERM) + if (dhchap_status == NVME_AUTH_DHCHAP_FAILURE_FAILED) status = (NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR); else status = NVME_SC_INTERNAL; @@ -305,9 +302,6 @@ static void nvmet_execute_io_connect(struct nvmet_req *req) if (status) goto out; - /* zero out initial completion result, assign values as needed */ - req->cqe->result.u32 = 0; - if (c->recfmt != 0) { pr_warn("invalid connect version (%d).\n", le16_to_cpu(c->recfmt)); diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index fd229f310c..381b439473 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -148,7 +148,7 @@ struct nvmet_fc_tgt_queue { struct workqueue_struct *work_q; struct kref ref; /* array of fcp_iods */ - struct nvmet_fc_fcp_iod fod[] __counted_by(sqsize); + struct nvmet_fc_fcp_iod fod[] /* __counted_by(sqsize) */; } __aligned(sizeof(unsigned long long)); struct nvmet_fc_hostport { @@ -1115,16 +1115,21 @@ nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc) } static bool -nvmet_fc_assoc_exits(struct nvmet_fc_tgtport *tgtport, u64 association_id) +nvmet_fc_assoc_exists(struct nvmet_fc_tgtport *tgtport, u64 association_id) { struct nvmet_fc_tgt_assoc *a; + bool found = false; + rcu_read_lock(); list_for_each_entry_rcu(a, &tgtport->assoc_list, a_list) { - if (association_id == a->association_id) - return true; + if (association_id == a->association_id) { + found = true; + break; + } } + rcu_read_unlock(); - return false; + return found; } static struct nvmet_fc_tgt_assoc * @@ -1164,13 +1169,11 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle) ran = ran << BYTES_FOR_QID_SHIFT; spin_lock_irqsave(&tgtport->lock, flags); - rcu_read_lock(); - if (!nvmet_fc_assoc_exits(tgtport, ran)) { + if (!nvmet_fc_assoc_exists(tgtport, ran)) { assoc->association_id = ran; list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list); done = true; } - rcu_read_unlock(); spin_unlock_irqrestore(&tgtport->lock, flags); } while (!done); diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index 1471af250e..913cd2ec7a 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c @@ -1556,7 +1556,9 @@ static const struct attribute_group *fcloop_dev_attr_groups[] = { NULL, }; -static struct class *fcloop_class; +static const struct class fcloop_class = { + .name = "fcloop", +}; static struct device *fcloop_device; @@ -1564,15 +1566,14 @@ static int __init fcloop_init(void) { int ret; - fcloop_class = class_create("fcloop"); - if (IS_ERR(fcloop_class)) { + ret = class_register(&fcloop_class); + if (ret) { pr_err("couldn't register class fcloop\n"); - ret = PTR_ERR(fcloop_class); return ret; } fcloop_device = device_create_with_groups( - fcloop_class, NULL, MKDEV(0, 0), NULL, + &fcloop_class, NULL, MKDEV(0, 0), NULL, fcloop_dev_attr_groups, "ctl"); if (IS_ERR(fcloop_device)) { pr_err("couldn't create ctl device!\n"); @@ -1585,7 +1586,7 @@ static int __init fcloop_init(void) return 0; out_destroy_class: - class_destroy(fcloop_class); + class_unregister(&fcloop_class); return ret; } @@ -1643,8 +1644,8 @@ static void __exit fcloop_exit(void) put_device(fcloop_device); - device_destroy(fcloop_class, MKDEV(0, 0)); - class_destroy(fcloop_class); + device_destroy(&fcloop_class, MKDEV(0, 0)); + class_unregister(&fcloop_class); } module_init(fcloop_init); diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index f11400a908..6426aac263 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -50,10 +50,10 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id) void nvmet_bdev_ns_disable(struct nvmet_ns *ns) { - if (ns->bdev_handle) { - bdev_release(ns->bdev_handle); + if (ns->bdev_file) { + fput(ns->bdev_file); ns->bdev = NULL; - ns->bdev_handle = NULL; + ns->bdev_file = NULL; } } @@ -85,18 +85,18 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns) if (ns->buffered_io) return -ENOTBLK; - ns->bdev_handle = bdev_open_by_path(ns->device_path, + ns->bdev_file = bdev_file_open_by_path(ns->device_path, BLK_OPEN_READ | BLK_OPEN_WRITE, NULL, NULL); - if (IS_ERR(ns->bdev_handle)) { - ret = PTR_ERR(ns->bdev_handle); + if (IS_ERR(ns->bdev_file)) { + ret = PTR_ERR(ns->bdev_file); if (ret != -ENOTBLK) { pr_err("failed to open block device %s: (%d)\n", ns->device_path, ret); } - ns->bdev_handle = NULL; + ns->bdev_file = NULL; return ret; } - ns->bdev = ns->bdev_handle->bdev; + ns->bdev = file_bdev(ns->bdev_file); ns->size = bdev_nr_bytes(ns->bdev); ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev)); diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 477416abf8..2f22b07eab 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -58,7 +58,7 @@ struct nvmet_ns { struct percpu_ref ref; - struct bdev_handle *bdev_handle; + struct file *bdev_file; struct block_device *bdev; struct file *file; bool readonly; @@ -113,8 +113,8 @@ struct nvmet_sq { bool authenticated; struct delayed_work auth_expired_work; u16 dhchap_tid; - u16 dhchap_status; - int dhchap_step; + u8 dhchap_status; + u8 dhchap_step; u8 *dhchap_c1; u8 *dhchap_c2; u32 dhchap_s1; @@ -163,6 +163,7 @@ struct nvmet_port { void *priv; bool enabled; int inline_data_size; + int max_queue_size; const struct nvmet_fabrics_ops *tr_ops; bool pi_enable; }; @@ -544,9 +545,10 @@ void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type, u8 event_info, u8 log_page); bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid); -#define NVMET_QUEUE_SIZE 1024 +#define NVMET_MIN_QUEUE_SIZE 16 +#define NVMET_MAX_QUEUE_SIZE 1024 #define NVMET_NR_QUEUES 128 -#define NVMET_MAX_CMD NVMET_QUEUE_SIZE +#define NVMET_MAX_CMD(ctrl) (NVME_CAP_MQES(ctrl->cap) + 1) /* * Nice round number that makes a list of nsids fit into a page. @@ -712,7 +714,7 @@ void nvmet_execute_auth_receive(struct nvmet_req *req); int nvmet_auth_set_key(struct nvmet_host *host, const char *secret, bool set_ctrl); int nvmet_auth_set_host_hash(struct nvmet_host *host, const char *hash); -int nvmet_setup_auth(struct nvmet_ctrl *ctrl); +u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl); void nvmet_auth_sq_init(struct nvmet_sq *sq); void nvmet_destroy_auth(struct nvmet_ctrl *ctrl); void nvmet_auth_sq_free(struct nvmet_sq *sq); @@ -731,7 +733,7 @@ int nvmet_auth_ctrl_exponential(struct nvmet_req *req, int nvmet_auth_ctrl_sesskey(struct nvmet_req *req, u8 *buf, int buf_size); #else -static inline int nvmet_setup_auth(struct nvmet_ctrl *ctrl) +static inline u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl) { return 0; } diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index f2d963e1fe..f003782d4e 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -132,7 +132,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req) id->sqes = min_t(__u8, ((0x6 << 4) | 0x6), id->sqes); id->cqes = min_t(__u8, ((0x4 << 4) | 0x4), id->cqes); - id->maxcmd = cpu_to_le16(NVMET_MAX_CMD); + id->maxcmd = cpu_to_le16(NVMET_MAX_CMD(ctrl)); /* don't support fuse commands */ id->fuses = 0; @@ -226,13 +226,13 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w) req->cmd->common.opcode == nvme_admin_identify) { switch (req->cmd->identify.cns) { case NVME_ID_CNS_CTRL: - nvmet_passthru_override_id_ctrl(req); + status = nvmet_passthru_override_id_ctrl(req); break; case NVME_ID_CNS_NS: - nvmet_passthru_override_id_ns(req); + status = nvmet_passthru_override_id_ns(req); break; case NVME_ID_CNS_NS_DESC_LIST: - nvmet_passthru_override_id_descs(req); + status = nvmet_passthru_override_id_descs(req); break; } } else if (status < 0) diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 3a0f2c170f..689bb5d3cf 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -53,7 +53,6 @@ struct nvmet_rdma_cmd { enum { NVMET_RDMA_REQ_INLINE_DATA = (1 << 0), - NVMET_RDMA_REQ_INVALIDATE_RKEY = (1 << 1), }; struct nvmet_rdma_rsp { @@ -475,12 +474,8 @@ nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue) return 0; out_free: - while (--i >= 0) { - struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; - - list_del(&rsp->free_list); - nvmet_rdma_free_rsp(ndev, rsp); - } + while (--i >= 0) + nvmet_rdma_free_rsp(ndev, &queue->rsps[i]); kfree(queue->rsps); out: return ret; @@ -491,12 +486,8 @@ static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue) struct nvmet_rdma_device *ndev = queue->dev; int i, nr_rsps = queue->recv_queue_size * 2; - for (i = 0; i < nr_rsps; i++) { - struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; - - list_del(&rsp->free_list); - nvmet_rdma_free_rsp(ndev, rsp); - } + for (i = 0; i < nr_rsps; i++) + nvmet_rdma_free_rsp(ndev, &queue->rsps[i]); kfree(queue->rsps); } @@ -722,7 +713,7 @@ static void nvmet_rdma_queue_response(struct nvmet_req *req) struct rdma_cm_id *cm_id = rsp->queue->cm_id; struct ib_send_wr *first_wr; - if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) { + if (rsp->invalidate_rkey) { rsp->send_wr.opcode = IB_WR_SEND_WITH_INV; rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey; } else { @@ -905,10 +896,8 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp, goto error_out; rsp->n_rdma += ret; - if (invalidate) { + if (invalidate) rsp->invalidate_rkey = key; - rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY; - } return 0; @@ -1047,6 +1036,7 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) rsp->req.cmd = cmd->nvme_cmd; rsp->req.port = queue->port; rsp->n_rdma = 0; + rsp->invalidate_rkey = 0; if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) { unsigned long flags; @@ -1816,18 +1806,14 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl) { - struct nvmet_rdma_queue *queue; + struct nvmet_rdma_queue *queue, *n; -restart: mutex_lock(&nvmet_rdma_queue_mutex); - list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) { - if (queue->nvme_sq.ctrl == ctrl) { - list_del_init(&queue->queue_list); - mutex_unlock(&nvmet_rdma_queue_mutex); - - __nvmet_rdma_queue_disconnect(queue); - goto restart; - } + list_for_each_entry_safe(queue, n, &nvmet_rdma_queue_list, queue_list) { + if (queue->nvme_sq.ctrl != ctrl) + continue; + list_del_init(&queue->queue_list); + __nvmet_rdma_queue_disconnect(queue); } mutex_unlock(&nvmet_rdma_queue_mutex); } @@ -1956,6 +1942,14 @@ static int nvmet_rdma_add_port(struct nvmet_port *nport) nport->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE; } + if (nport->max_queue_size < 0) { + nport->max_queue_size = NVME_RDMA_DEFAULT_QUEUE_SIZE; + } else if (nport->max_queue_size > NVME_RDMA_MAX_QUEUE_SIZE) { + pr_warn("max_queue_size %u is too large, reducing to %u\n", + nport->max_queue_size, NVME_RDMA_MAX_QUEUE_SIZE); + nport->max_queue_size = NVME_RDMA_MAX_QUEUE_SIZE; + } + ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr, nport->disc_addr.trsvcid, &port->addr); if (ret) { @@ -2015,6 +2009,8 @@ static u8 nvmet_rdma_get_mdts(const struct nvmet_ctrl *ctrl) static u16 nvmet_rdma_get_max_queue_size(const struct nvmet_ctrl *ctrl) { + if (ctrl->pi_support) + return NVME_RDMA_MAX_METADATA_QUEUE_SIZE; return NVME_RDMA_MAX_QUEUE_SIZE; } diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 8d4531a160..380f22ee3e 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -899,6 +899,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue) pr_err("bad nvme-tcp pdu length (%d)\n", le32_to_cpu(icreq->hdr.plen)); nvmet_tcp_fatal_error(queue); + return -EPROTO; } if (icreq->pfv != NVME_TCP_PFV_1_0) { @@ -1588,7 +1589,6 @@ static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue) static void nvmet_tcp_release_queue_work(struct work_struct *w) { - struct page *page; struct nvmet_tcp_queue *queue = container_of(w, struct nvmet_tcp_queue, release_work); @@ -1612,8 +1612,7 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w) if (queue->hdr_digest || queue->data_digest) nvmet_tcp_free_crypto(queue); ida_free(&nvmet_tcp_queue_ida, queue->idx); - page = virt_to_head_page(queue->pf_cache.va); - __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias); + page_frag_cache_drain(&queue->pf_cache); kfree(queue); } diff --git a/drivers/nvme/target/trace.c b/drivers/nvme/target/trace.c index 6ee1f3db81..8d1806a828 100644 --- a/drivers/nvme/target/trace.c +++ b/drivers/nvme/target/trace.c @@ -119,6 +119,67 @@ const char *nvmet_trace_parse_admin_cmd(struct trace_seq *p, } } +static const char *nvmet_trace_zone_mgmt_send(struct trace_seq *p, u8 *cdw10) +{ + static const char * const zsa_strs[] = { + [0x01] = "close zone", + [0x02] = "finish zone", + [0x03] = "open zone", + [0x04] = "reset zone", + [0x05] = "offline zone", + [0x10] = "set zone descriptor extension" + }; + const char *ret = trace_seq_buffer_ptr(p); + u64 slba = get_unaligned_le64(cdw10); + const char *zsa_str; + u8 zsa = cdw10[12]; + u8 all = cdw10[13]; + + if (zsa < ARRAY_SIZE(zsa_strs) && zsa_strs[zsa]) + zsa_str = zsa_strs[zsa]; + else + zsa_str = "reserved"; + + trace_seq_printf(p, "slba=%llu, zsa=%u:%s, all=%u", + slba, zsa, zsa_str, all); + trace_seq_putc(p, 0); + + return ret; +} + +static const char *nvmet_trace_zone_mgmt_recv(struct trace_seq *p, u8 *cdw10) +{ + static const char * const zrasf_strs[] = { + [0x00] = "list all zones", + [0x01] = "list the zones in the ZSE: Empty state", + [0x02] = "list the zones in the ZSIO: Implicitly Opened state", + [0x03] = "list the zones in the ZSEO: Explicitly Opened state", + [0x04] = "list the zones in the ZSC: Closed state", + [0x05] = "list the zones in the ZSF: Full state", + [0x06] = "list the zones in the ZSRO: Read Only state", + [0x07] = "list the zones in the ZSO: Offline state", + [0x09] = "list the zones that have the zone attribute" + }; + const char *ret = trace_seq_buffer_ptr(p); + u64 slba = get_unaligned_le64(cdw10); + u32 numd = get_unaligned_le32(&cdw10[8]); + u8 zra = cdw10[12]; + u8 zrasf = cdw10[13]; + const char *zrasf_str; + u8 pr = cdw10[14]; + + if (zrasf < ARRAY_SIZE(zrasf_strs) && zrasf_strs[zrasf]) + zrasf_str = zrasf_strs[zrasf]; + else + zrasf_str = "reserved"; + + trace_seq_printf(p, "slba=%llu, numd=%u, zra=%u, zrasf=%u:%s, pr=%u", + slba, numd, zra, zrasf, zrasf_str, pr); + trace_seq_putc(p, 0); + + return ret; +} + const char *nvmet_trace_parse_nvm_cmd(struct trace_seq *p, u8 opcode, u8 *cdw10) { @@ -126,9 +187,14 @@ const char *nvmet_trace_parse_nvm_cmd(struct trace_seq *p, case nvme_cmd_read: case nvme_cmd_write: case nvme_cmd_write_zeroes: + case nvme_cmd_zone_append: return nvmet_trace_read_write(p, cdw10); case nvme_cmd_dsm: return nvmet_trace_dsm(p, cdw10); + case nvme_cmd_zone_mgmt_send: + return nvmet_trace_zone_mgmt_send(p, cdw10); + case nvme_cmd_zone_mgmt_recv: + return nvmet_trace_zone_mgmt_recv(p, cdw10); default: return nvmet_trace_common(p, cdw10); } @@ -176,6 +242,34 @@ static const char *nvmet_trace_fabrics_property_get(struct trace_seq *p, return ret; } +static const char *nvmet_trace_fabrics_auth_send(struct trace_seq *p, u8 *spc) +{ + const char *ret = trace_seq_buffer_ptr(p); + u8 spsp0 = spc[1]; + u8 spsp1 = spc[2]; + u8 secp = spc[3]; + u32 tl = get_unaligned_le32(spc + 4); + + trace_seq_printf(p, "spsp0=%02x, spsp1=%02x, secp=%02x, tl=%u", + spsp0, spsp1, secp, tl); + trace_seq_putc(p, 0); + return ret; +} + +static const char *nvmet_trace_fabrics_auth_receive(struct trace_seq *p, u8 *spc) +{ + const char *ret = trace_seq_buffer_ptr(p); + u8 spsp0 = spc[1]; + u8 spsp1 = spc[2]; + u8 secp = spc[3]; + u32 al = get_unaligned_le32(spc + 4); + + trace_seq_printf(p, "spsp0=%02x, spsp1=%02x, secp=%02x, al=%u", + spsp0, spsp1, secp, al); + trace_seq_putc(p, 0); + return ret; +} + static const char *nvmet_trace_fabrics_common(struct trace_seq *p, u8 *spc) { const char *ret = trace_seq_buffer_ptr(p); @@ -195,6 +289,10 @@ const char *nvmet_trace_parse_fabrics_cmd(struct trace_seq *p, return nvmet_trace_fabrics_connect(p, spc); case nvme_fabrics_type_property_get: return nvmet_trace_fabrics_property_get(p, spc); + case nvme_fabrics_type_auth_send: + return nvmet_trace_fabrics_auth_send(p, spc); + case nvme_fabrics_type_auth_receive: + return nvmet_trace_fabrics_auth_receive(p, spc); default: return nvmet_trace_fabrics_common(p, spc); } diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c index 5b5c1e4817..0021d06041 100644 --- a/drivers/nvme/target/zns.c +++ b/drivers/nvme/target/zns.c @@ -52,14 +52,10 @@ bool nvmet_bdev_zns_enable(struct nvmet_ns *ns) if (get_capacity(bd_disk) & (bdev_zone_sectors(ns->bdev) - 1)) return false; /* - * ZNS does not define a conventional zone type. If the underlying - * device has a bitmap set indicating the existence of conventional - * zones, reject the device. Otherwise, use report zones to detect if - * the device has conventional zones. + * ZNS does not define a conventional zone type. Use report zones + * to detect if the device has conventional zones and reject it if + * it does. */ - if (ns->bdev->bd_disk->conv_zones_bitmap) - return false; - ret = blkdev_report_zones(ns->bdev, 0, bdev_nr_zones(ns->bdev), validate_conv_zones_cb, NULL); if (ret < 0) @@ -456,8 +452,7 @@ static u16 nvmet_bdev_execute_zmgmt_send_all(struct nvmet_req *req) switch (zsa_req_op(req->cmd->zms.zsa)) { case REQ_OP_ZONE_RESET: ret = blkdev_zone_mgmt(req->ns->bdev, REQ_OP_ZONE_RESET, 0, - get_capacity(req->ns->bdev->bd_disk), - GFP_KERNEL); + get_capacity(req->ns->bdev->bd_disk)); if (ret < 0) return blkdev_zone_mgmt_errno_to_nvme_status(ret); break; @@ -508,7 +503,7 @@ static void nvmet_bdev_zmgmt_send_work(struct work_struct *w) goto out; } - ret = blkdev_zone_mgmt(bdev, op, sect, zone_sectors, GFP_KERNEL); + ret = blkdev_zone_mgmt(bdev, op, sect, zone_sectors); if (ret < 0) status = blkdev_zone_mgmt_errno_to_nvme_status(ret); |