summaryrefslogtreecommitdiffstats
path: root/drivers/nvme
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme')
-rw-r--r--drivers/nvme/host/apple.c28
-rw-r--r--drivers/nvme/host/auth.c6
-rw-r--r--drivers/nvme/host/core.c111
-rw-r--r--drivers/nvme/host/fabrics.c57
-rw-r--r--drivers/nvme/host/fabrics.h2
-rw-r--r--drivers/nvme/host/fc.c4
-rw-r--r--drivers/nvme/host/ioctl.c43
-rw-r--r--drivers/nvme/host/multipath.c23
-rw-r--r--drivers/nvme/host/nvme.h10
-rw-r--r--drivers/nvme/host/pci.c21
-rw-r--r--drivers/nvme/host/rdma.c23
-rw-r--r--drivers/nvme/host/tcp.c30
-rw-r--r--drivers/nvme/target/auth.c36
-rw-r--r--drivers/nvme/target/configfs.c63
-rw-r--r--drivers/nvme/target/core.c10
-rw-r--r--drivers/nvme/target/fabrics-cmd-auth.c52
-rw-r--r--drivers/nvme/target/fabrics-cmd.c17
-rw-r--r--drivers/nvme/target/fc.c2
-rw-r--r--drivers/nvme/target/nvmet.h8
-rw-r--r--drivers/nvme/target/rdma.c16
-rw-r--r--drivers/nvme/target/zns.c10
21 files changed, 345 insertions, 227 deletions
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index dd6ec08651..25ecc1a005 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -1388,7 +1388,7 @@ static void devm_apple_nvme_mempool_destroy(void *data)
mempool_destroy(data);
}
-static int apple_nvme_probe(struct platform_device *pdev)
+static struct apple_nvme *apple_nvme_alloc(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct apple_nvme *anv;
@@ -1396,7 +1396,7 @@ static int apple_nvme_probe(struct platform_device *pdev)
anv = devm_kzalloc(dev, sizeof(*anv), GFP_KERNEL);
if (!anv)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
anv->dev = get_device(dev);
anv->adminq.is_adminq = true;
@@ -1516,10 +1516,26 @@ static int apple_nvme_probe(struct platform_device *pdev)
goto put_dev;
}
+ return anv;
+put_dev:
+ put_device(anv->dev);
+ return ERR_PTR(ret);
+}
+
+static int apple_nvme_probe(struct platform_device *pdev)
+{
+ struct apple_nvme *anv;
+ int ret;
+
+ anv = apple_nvme_alloc(pdev);
+ if (IS_ERR(anv))
+ return PTR_ERR(anv);
+
anv->ctrl.admin_q = blk_mq_alloc_queue(&anv->admin_tagset, NULL, NULL);
if (IS_ERR(anv->ctrl.admin_q)) {
ret = -ENOMEM;
- goto put_dev;
+ anv->ctrl.admin_q = NULL;
+ goto out_uninit_ctrl;
}
nvme_reset_ctrl(&anv->ctrl);
@@ -1527,8 +1543,9 @@ static int apple_nvme_probe(struct platform_device *pdev)
return 0;
-put_dev:
- put_device(anv->dev);
+out_uninit_ctrl:
+ nvme_uninit_ctrl(&anv->ctrl);
+ nvme_put_ctrl(&anv->ctrl);
return ret;
}
@@ -1602,4 +1619,5 @@ static struct platform_driver apple_nvme_driver = {
module_platform_driver(apple_nvme_driver);
MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
+MODULE_DESCRIPTION("Apple ANS NVM Express device driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
index a264b3ae07..371e14f0a2 100644
--- a/drivers/nvme/host/auth.c
+++ b/drivers/nvme/host/auth.c
@@ -730,7 +730,7 @@ static void nvme_queue_auth_work(struct work_struct *work)
NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE);
if (ret) {
chap->status = ret;
- chap->error = -ECONNREFUSED;
+ chap->error = -EKEYREJECTED;
return;
}
@@ -797,7 +797,7 @@ static void nvme_queue_auth_work(struct work_struct *work)
NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1);
if (ret) {
chap->status = ret;
- chap->error = -ECONNREFUSED;
+ chap->error = -EKEYREJECTED;
return;
}
@@ -818,7 +818,7 @@ static void nvme_queue_auth_work(struct work_struct *work)
ret = nvme_auth_process_dhchap_success1(ctrl, chap);
if (ret) {
/* Controller authentication failed */
- chap->error = -ECONNREFUSED;
+ chap->error = -EKEYREJECTED;
goto fail2;
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index d513fd2758..782090ce0b 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -383,14 +383,14 @@ static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
if (likely(nvme_req(req)->status == 0))
return COMPLETE;
- if ((nvme_req(req)->status & 0x7ff) == NVME_SC_AUTH_REQUIRED)
- return AUTHENTICATE;
-
if (blk_noretry_request(req) ||
(nvme_req(req)->status & NVME_SC_DNR) ||
nvme_req(req)->retries >= nvme_max_retries)
return COMPLETE;
+ if ((nvme_req(req)->status & 0x7ff) == NVME_SC_AUTH_REQUIRED)
+ return AUTHENTICATE;
+
if (req->cmd_flags & REQ_NVME_MPATH) {
if (nvme_is_path_error(nvme_req(req)->status) ||
blk_queue_dying(req->q))
@@ -678,7 +678,7 @@ static void nvme_free_ns(struct kref *kref)
kfree(ns);
}
-static inline bool nvme_get_ns(struct nvme_ns *ns)
+bool nvme_get_ns(struct nvme_ns *ns)
{
return kref_get_unless_zero(&ns->kref);
}
@@ -998,6 +998,7 @@ void nvme_cleanup_cmd(struct request *req)
clear_bit_unlock(0, &ctrl->discard_page_busy);
else
kfree(bvec_virt(&req->special_vec));
+ req->rq_flags &= ~RQF_SPECIAL_PAYLOAD;
}
}
EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
@@ -2137,7 +2138,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
blk_mq_unfreeze_queue(ns->disk->queue);
if (blk_queue_is_zoned(ns->queue)) {
- ret = blk_revalidate_disk_zones(ns->disk, NULL);
+ ret = blk_revalidate_disk_zones(ns->disk);
if (ret && !nvme_first_scan(ns->disk))
goto out;
}
@@ -3684,9 +3685,10 @@ out_unlock:
struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
struct nvme_ns *ns, *ret = NULL;
+ int srcu_idx;
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list) {
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
if (ns->head->ns_id == nsid) {
if (!nvme_get_ns(ns))
continue;
@@ -3696,7 +3698,7 @@ struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
if (ns->head->ns_id > nsid)
break;
}
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
return ret;
}
EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU);
@@ -3710,7 +3712,7 @@ static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) {
if (tmp->head->ns_id < ns->head->ns_id) {
- list_add(&ns->list, &tmp->list);
+ list_add_rcu(&ns->list, &tmp->list);
return;
}
}
@@ -3776,17 +3778,18 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
if (nvme_update_ns_info(ns, info))
goto out_unlink_ns;
- down_write(&ctrl->namespaces_rwsem);
+ mutex_lock(&ctrl->namespaces_lock);
/*
* Ensure that no namespaces are added to the ctrl list after the queues
* are frozen, thereby avoiding a deadlock between scan and reset.
*/
if (test_bit(NVME_CTRL_FROZEN, &ctrl->flags)) {
- up_write(&ctrl->namespaces_rwsem);
+ mutex_unlock(&ctrl->namespaces_lock);
goto out_unlink_ns;
}
nvme_ns_add_to_ctrl_list(ns);
- up_write(&ctrl->namespaces_rwsem);
+ mutex_unlock(&ctrl->namespaces_lock);
+ synchronize_srcu(&ctrl->srcu);
nvme_get_ctrl(ctrl);
if (device_add_disk(ctrl->device, ns->disk, nvme_ns_attr_groups))
@@ -3809,9 +3812,10 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
out_cleanup_ns_from_list:
nvme_put_ctrl(ctrl);
- down_write(&ctrl->namespaces_rwsem);
- list_del_init(&ns->list);
- up_write(&ctrl->namespaces_rwsem);
+ mutex_lock(&ctrl->namespaces_lock);
+ list_del_rcu(&ns->list);
+ mutex_unlock(&ctrl->namespaces_lock);
+ synchronize_srcu(&ctrl->srcu);
out_unlink_ns:
mutex_lock(&ctrl->subsys->lock);
list_del_rcu(&ns->siblings);
@@ -3861,9 +3865,10 @@ static void nvme_ns_remove(struct nvme_ns *ns)
nvme_cdev_del(&ns->cdev, &ns->cdev_device);
del_gendisk(ns->disk);
- down_write(&ns->ctrl->namespaces_rwsem);
- list_del_init(&ns->list);
- up_write(&ns->ctrl->namespaces_rwsem);
+ mutex_lock(&ns->ctrl->namespaces_lock);
+ list_del_rcu(&ns->list);
+ mutex_unlock(&ns->ctrl->namespaces_lock);
+ synchronize_srcu(&ns->ctrl->srcu);
if (last_path)
nvme_mpath_shutdown_disk(ns->head);
@@ -3953,16 +3958,18 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
struct nvme_ns *ns, *next;
LIST_HEAD(rm_list);
- down_write(&ctrl->namespaces_rwsem);
+ mutex_lock(&ctrl->namespaces_lock);
list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
- if (ns->head->ns_id > nsid)
- list_move_tail(&ns->list, &rm_list);
+ if (ns->head->ns_id > nsid) {
+ list_del_rcu(&ns->list);
+ synchronize_srcu(&ctrl->srcu);
+ list_add_tail_rcu(&ns->list, &rm_list);
+ }
}
- up_write(&ctrl->namespaces_rwsem);
+ mutex_unlock(&ctrl->namespaces_lock);
list_for_each_entry_safe(ns, next, &rm_list, list)
nvme_ns_remove(ns);
-
}
static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
@@ -4132,9 +4139,10 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
/* this is a no-op when called from the controller reset handler */
nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO);
- down_write(&ctrl->namespaces_rwsem);
- list_splice_init(&ctrl->namespaces, &ns_list);
- up_write(&ctrl->namespaces_rwsem);
+ mutex_lock(&ctrl->namespaces_lock);
+ list_splice_init_rcu(&ctrl->namespaces, &ns_list, synchronize_rcu);
+ mutex_unlock(&ctrl->namespaces_lock);
+ synchronize_srcu(&ctrl->srcu);
list_for_each_entry_safe(ns, next, &ns_list, list)
nvme_ns_remove(ns);
@@ -4582,6 +4590,7 @@ static void nvme_free_ctrl(struct device *dev)
key_put(ctrl->tls_key);
nvme_free_cels(ctrl);
nvme_mpath_uninit(ctrl);
+ cleanup_srcu_struct(&ctrl->srcu);
nvme_auth_stop(ctrl);
nvme_auth_free(ctrl);
__free_page(ctrl->discard_page);
@@ -4614,10 +4623,15 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
ctrl->passthru_err_log_enabled = false;
clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
spin_lock_init(&ctrl->lock);
+ mutex_init(&ctrl->namespaces_lock);
+
+ ret = init_srcu_struct(&ctrl->srcu);
+ if (ret)
+ return ret;
+
mutex_init(&ctrl->scan_lock);
INIT_LIST_HEAD(&ctrl->namespaces);
xa_init(&ctrl->cels);
- init_rwsem(&ctrl->namespaces_rwsem);
ctrl->dev = dev;
ctrl->ops = ops;
ctrl->quirks = quirks;
@@ -4697,6 +4711,7 @@ out_release_instance:
out:
if (ctrl->discard_page)
__free_page(ctrl->discard_page);
+ cleanup_srcu_struct(&ctrl->srcu);
return ret;
}
EXPORT_SYMBOL_GPL(nvme_init_ctrl);
@@ -4705,22 +4720,24 @@ EXPORT_SYMBOL_GPL(nvme_init_ctrl);
void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
+ int srcu_idx;
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list)
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list)
blk_mark_disk_dead(ns->disk);
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
}
EXPORT_SYMBOL_GPL(nvme_mark_namespaces_dead);
void nvme_unfreeze(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
+ int srcu_idx;
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list)
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list)
blk_mq_unfreeze_queue(ns->queue);
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
clear_bit(NVME_CTRL_FROZEN, &ctrl->flags);
}
EXPORT_SYMBOL_GPL(nvme_unfreeze);
@@ -4728,14 +4745,15 @@ EXPORT_SYMBOL_GPL(nvme_unfreeze);
int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
{
struct nvme_ns *ns;
+ int srcu_idx;
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list) {
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout);
if (timeout <= 0)
break;
}
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
return timeout;
}
EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout);
@@ -4743,23 +4761,25 @@ EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout);
void nvme_wait_freeze(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
+ int srcu_idx;
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list)
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list)
blk_mq_freeze_queue_wait(ns->queue);
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
}
EXPORT_SYMBOL_GPL(nvme_wait_freeze);
void nvme_start_freeze(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
+ int srcu_idx;
set_bit(NVME_CTRL_FROZEN, &ctrl->flags);
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list)
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list)
blk_freeze_queue_start(ns->queue);
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
}
EXPORT_SYMBOL_GPL(nvme_start_freeze);
@@ -4802,11 +4822,12 @@ EXPORT_SYMBOL_GPL(nvme_unquiesce_admin_queue);
void nvme_sync_io_queues(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
+ int srcu_idx;
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list)
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list)
blk_sync_queue(ns->queue);
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
}
EXPORT_SYMBOL_GPL(nvme_sync_io_queues);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 1f0ea1f32d..ceb9c0ed31 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -180,7 +180,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
cmd.prop_get.offset = cpu_to_le32(off);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
- NVME_QID_ANY, 0);
+ NVME_QID_ANY, NVME_SUBMIT_RESERVED);
if (ret >= 0)
*val = le64_to_cpu(res.u64);
@@ -226,7 +226,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
cmd.prop_get.offset = cpu_to_le32(off);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
- NVME_QID_ANY, 0);
+ NVME_QID_ANY, NVME_SUBMIT_RESERVED);
if (ret >= 0)
*val = le64_to_cpu(res.u64);
@@ -271,7 +271,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
cmd.prop_set.value = cpu_to_le64(val);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0,
- NVME_QID_ANY, 0);
+ NVME_QID_ANY, NVME_SUBMIT_RESERVED);
if (unlikely(ret))
dev_err(ctrl->device,
"Property Set error: %d, offset %#x\n",
@@ -428,12 +428,6 @@ static void nvmf_connect_cmd_prep(struct nvme_ctrl *ctrl, u16 qid,
* fabrics-protocol connection of the NVMe Admin queue between the
* host system device and the allocated NVMe controller on the
* target system via a NVMe Fabrics "Connect" command.
- *
- * Return:
- * 0: success
- * > 0: NVMe error status code
- * < 0: Linux errno error code
- *
*/
int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
{
@@ -467,7 +461,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
if (result & NVME_CONNECT_AUTHREQ_ASCR) {
dev_warn(ctrl->device,
"qid 0: secure concatenation is not supported\n");
- ret = NVME_SC_AUTH_REQUIRED;
+ ret = -EOPNOTSUPP;
goto out_free_data;
}
/* Authentication required */
@@ -475,14 +469,14 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
if (ret) {
dev_warn(ctrl->device,
"qid 0: authentication setup failed\n");
- ret = NVME_SC_AUTH_REQUIRED;
goto out_free_data;
}
ret = nvme_auth_wait(ctrl, 0);
- if (ret)
+ if (ret) {
dev_warn(ctrl->device,
- "qid 0: authentication failed\n");
- else
+ "qid 0: authentication failed, error %d\n",
+ ret);
+ } else
dev_info(ctrl->device,
"qid 0: authenticated\n");
}
@@ -542,7 +536,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
if (result & NVME_CONNECT_AUTHREQ_ASCR) {
dev_warn(ctrl->device,
"qid 0: secure concatenation is not supported\n");
- ret = NVME_SC_AUTH_REQUIRED;
+ ret = -EOPNOTSUPP;
goto out_free_data;
}
/* Authentication required */
@@ -550,12 +544,13 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
if (ret) {
dev_warn(ctrl->device,
"qid %d: authentication setup failed\n", qid);
- ret = NVME_SC_AUTH_REQUIRED;
- } else {
- ret = nvme_auth_wait(ctrl, qid);
- if (ret)
- dev_warn(ctrl->device,
- "qid %u: authentication failed\n", qid);
+ goto out_free_data;
+ }
+ ret = nvme_auth_wait(ctrl, qid);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid %u: authentication failed, error %d\n",
+ qid, ret);
}
}
out_free_data:
@@ -564,8 +559,26 @@ out_free_data:
}
EXPORT_SYMBOL_GPL(nvmf_connect_io_queue);
-bool nvmf_should_reconnect(struct nvme_ctrl *ctrl)
+/*
+ * Evaluate the status information returned by the transport in order to decided
+ * if a reconnect attempt should be scheduled.
+ *
+ * Do not retry when:
+ *
+ * - the DNR bit is set and the specification states no further connect
+ * attempts with the same set of paramenters should be attempted.
+ *
+ * - when the authentication attempt fails, because the key was invalid.
+ * This error code is set on the host side.
+ */
+bool nvmf_should_reconnect(struct nvme_ctrl *ctrl, int status)
{
+ if (status > 0 && (status & NVME_SC_DNR))
+ return false;
+
+ if (status == -EKEYREJECTED)
+ return false;
+
if (ctrl->opts->max_reconnects == -1 ||
ctrl->nr_reconnects < ctrl->opts->max_reconnects)
return true;
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 37c974c38d..602135910a 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -223,7 +223,7 @@ int nvmf_register_transport(struct nvmf_transport_ops *ops);
void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
void nvmf_free_options(struct nvmf_ctrl_options *opts);
int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
-bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
+bool nvmf_should_reconnect(struct nvme_ctrl *ctrl, int status);
bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
struct nvmf_ctrl_options *opts);
void nvmf_set_io_queues(struct nvmf_ctrl_options *opts, u32 nr_io_queues,
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index a5b29e9ad3..f0b0813327 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -3310,12 +3310,10 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
ctrl->cnum, status);
- if (status > 0 && (status & NVME_SC_DNR))
- recon = false;
} else if (time_after_eq(jiffies, rport->dev_loss_end))
recon = false;
- if (recon && nvmf_should_reconnect(&ctrl->ctrl)) {
+ if (recon && nvmf_should_reconnect(&ctrl->ctrl, status)) {
if (portptr->port_state == FC_OBJSTATE_ONLINE)
dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: Reconnect attempt in %ld "
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 3dfd5ae99a..8b69427a44 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -111,6 +111,13 @@ static struct request *nvme_alloc_user_request(struct request_queue *q,
return req;
}
+static void nvme_unmap_bio(struct bio *bio)
+{
+ if (bio_integrity(bio))
+ bio_integrity_unmap_free_user(bio);
+ blk_rq_unmap_user(bio);
+}
+
static int nvme_map_user_request(struct request *req, u64 ubuffer,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
u32 meta_seed, struct io_uring_cmd *ioucmd, unsigned int flags)
@@ -157,7 +164,7 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
out_unmap:
if (bio)
- blk_rq_unmap_user(bio);
+ nvme_unmap_bio(bio);
out:
blk_mq_free_request(req);
return ret;
@@ -195,7 +202,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
if (result)
*result = le64_to_cpu(nvme_req(req)->result.u64);
if (bio)
- blk_rq_unmap_user(bio);
+ nvme_unmap_bio(bio);
blk_mq_free_request(req);
if (effects)
@@ -406,7 +413,7 @@ static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
if (pdu->bio)
- blk_rq_unmap_user(pdu->bio);
+ nvme_unmap_bio(pdu->bio);
io_uring_cmd_done(ioucmd, pdu->status, pdu->result, issue_flags);
}
@@ -423,13 +430,20 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
pdu->result = le64_to_cpu(nvme_req(req)->result.u64);
/*
- * For iopoll, complete it directly.
+ * For iopoll, complete it directly. Note that using the uring_cmd
+ * helper for this is safe only because we check blk_rq_is_poll().
+ * As that returns false if we're NOT on a polled queue, then it's
+ * safe to use the polled completion helper.
+ *
* Otherwise, move the completion to task work.
*/
- if (blk_rq_is_poll(req))
- nvme_uring_task_cb(ioucmd, IO_URING_F_UNLOCKED);
- else
+ if (blk_rq_is_poll(req)) {
+ if (pdu->bio)
+ nvme_unmap_bio(pdu->bio);
+ io_uring_cmd_iopoll_done(ioucmd, pdu->result, pdu->status);
+ } else {
io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb);
+ }
return RQ_END_IO_FREE;
}
@@ -782,15 +796,15 @@ static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp,
bool open_for_write)
{
struct nvme_ns *ns;
- int ret;
+ int ret, srcu_idx;
- down_read(&ctrl->namespaces_rwsem);
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
if (list_empty(&ctrl->namespaces)) {
ret = -ENOTTY;
goto out_unlock;
}
- ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
+ ns = list_first_or_null_rcu(&ctrl->namespaces, struct nvme_ns, list);
if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
dev_warn(ctrl->device,
"NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
@@ -800,15 +814,18 @@ static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp,
dev_warn(ctrl->device,
"using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
- kref_get(&ns->kref);
- up_read(&ctrl->namespaces_rwsem);
+ if (!nvme_get_ns(ns)) {
+ ret = -ENXIO;
+ goto out_unlock;
+ }
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
ret = nvme_user_cmd(ctrl, ns, argp, 0, open_for_write);
nvme_put_ns(ns);
return ret;
out_unlock:
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
return ret;
}
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index a4e46eb20b..d8b6b4648e 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -151,16 +151,17 @@ void nvme_mpath_end_request(struct request *rq)
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
+ int srcu_idx;
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list) {
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
if (!ns->head->disk)
continue;
kblockd_schedule_work(&ns->head->requeue_work);
if (nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE)
disk_uevent(ns->head->disk, KOBJ_CHANGE);
}
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
}
static const char *nvme_ana_state_names[] = {
@@ -194,13 +195,14 @@ out:
void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
+ int srcu_idx;
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list) {
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
nvme_mpath_clear_current_path(ns);
kblockd_schedule_work(&ns->head->requeue_work);
}
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
}
void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
@@ -596,7 +598,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
int node, srcu_idx;
srcu_idx = srcu_read_lock(&head->srcu);
- for_each_node(node)
+ for_each_online_node(node)
__nvme_find_path(head, node);
srcu_read_unlock(&head->srcu, srcu_idx);
}
@@ -681,6 +683,7 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
u32 nr_nsids = le32_to_cpu(desc->nnsids), n = 0;
unsigned *nr_change_groups = data;
struct nvme_ns *ns;
+ int srcu_idx;
dev_dbg(ctrl->device, "ANA group %d: %s.\n",
le32_to_cpu(desc->grpid),
@@ -692,8 +695,8 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
if (!nr_nsids)
return 0;
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list) {
+ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
unsigned nsid;
again:
nsid = le32_to_cpu(desc->nsids[n]);
@@ -706,7 +709,7 @@ again:
if (ns->head->ns_id > nsid)
goto again;
}
- up_read(&ctrl->namespaces_rwsem);
+ srcu_read_unlock(&ctrl->srcu, srcu_idx);
return 0;
}
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index d7bcc6d51e..68b400f9c4 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -282,7 +282,8 @@ struct nvme_ctrl {
struct blk_mq_tag_set *tagset;
struct blk_mq_tag_set *admin_tagset;
struct list_head namespaces;
- struct rw_semaphore namespaces_rwsem;
+ struct mutex namespaces_lock;
+ struct srcu_struct srcu;
struct device ctrl_device;
struct device *device; /* char device */
#ifdef CONFIG_NVME_HWMON
@@ -471,8 +472,6 @@ struct nvme_ns_head {
u8 pi_type;
u8 pi_offset;
u8 guard_type;
- u16 sgs;
- u32 sws;
#ifdef CONFIG_BLK_DEV_ZONED
u64 zsze;
#endif
@@ -503,7 +502,7 @@ static inline bool nvme_ns_head_multipath(struct nvme_ns_head *head)
enum nvme_ns_features {
NVME_NS_EXT_LBAS = 1 << 0, /* support extended LBA format */
NVME_NS_METADATA_SUPPORTED = 1 << 1, /* support getting generated md */
- NVME_NS_DEAC, /* DEAC bit in Write Zeores supported */
+ NVME_NS_DEAC = 1 << 2, /* DEAC bit in Write Zeores supported */
};
struct nvme_ns {
@@ -1149,7 +1148,7 @@ static inline int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
}
static inline int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
{
- return NVME_SC_AUTH_REQUIRED;
+ return -EPROTONOSUPPORT;
}
static inline void nvme_auth_free(struct nvme_ctrl *ctrl) {};
#endif
@@ -1162,6 +1161,7 @@ void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects,
struct nvme_command *cmd, int status);
struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
+bool nvme_get_ns(struct nvme_ns *ns);
void nvme_put_ns(struct nvme_ns *ns);
static inline bool nvme_multi_css(struct nvme_ctrl *ctrl)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 710043086d..a823330567 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -778,7 +778,8 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
struct bio_vec bv = req_bvec(req);
if (!is_pci_p2pdma_page(bv.bv_page)) {
- if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
+ if ((bv.bv_offset & (NVME_CTRL_PAGE_SIZE - 1)) +
+ bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
return nvme_setup_prp_simple(dev, req,
&cmnd->rw, &bv);
@@ -825,9 +826,9 @@ static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req,
struct nvme_command *cmnd)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct bio_vec bv = rq_integrity_vec(req);
- iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req),
- rq_dma_dir(req), 0);
+ iod->meta_dma = dma_map_bvec(dev->dev, &bv, rq_dma_dir(req), 0);
if (dma_mapping_error(dev->dev, iod->meta_dma))
return BLK_STS_IOERR;
cmnd->rw.metadata = cpu_to_le64(iod->meta_dma);
@@ -862,7 +863,8 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
nvme_start_request(req);
return BLK_STS_OK;
out_unmap_data:
- nvme_unmap_data(dev, req);
+ if (blk_rq_nr_phys_segments(req))
+ nvme_unmap_data(dev, req);
out_free_cmd:
nvme_cleanup_cmd(req);
return ret;
@@ -966,7 +968,7 @@ static __always_inline void nvme_pci_unmap_rq(struct request *req)
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
dma_unmap_page(dev->dev, iod->meta_dma,
- rq_integrity_vec(req)->bv_len, rq_dma_dir(req));
+ rq_integrity_vec(req).bv_len, rq_dma_dir(req));
}
if (blk_rq_nr_phys_segments(req))
@@ -1273,7 +1275,7 @@ static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
dev_warn(dev->ctrl.device,
"Does your device have a faulty power saving mode enabled?\n");
dev_warn(dev->ctrl.device,
- "Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off\" and report a bug\n");
+ "Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off pcie_port_pm=off\" and report a bug\n");
}
static enum blk_eh_timer_return nvme_timeout(struct request *req)
@@ -2931,6 +2933,13 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
return NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND;
}
+ /*
+ * NVMe SSD drops off the PCIe bus after system idle
+ * for 10 hours on a Lenovo N60z board.
+ */
+ if (dmi_match(DMI_BOARD_NAME, "LXKT-ZXEG-N6"))
+ return NVME_QUIRK_NO_APST;
+
return 0;
}
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 366f0bb4eb..51a62b0c64 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -982,7 +982,8 @@ free_ctrl:
kfree(ctrl);
}
-static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl)
+static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl,
+ int status)
{
enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
@@ -992,7 +993,7 @@ static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl)
return;
}
- if (nvmf_should_reconnect(&ctrl->ctrl)) {
+ if (nvmf_should_reconnect(&ctrl->ctrl, status)) {
dev_info(ctrl->ctrl.device, "Reconnecting in %d seconds...\n",
ctrl->ctrl.opts->reconnect_delay);
queue_delayed_work(nvme_wq, &ctrl->reconnect_work,
@@ -1104,10 +1105,12 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
{
struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work),
struct nvme_rdma_ctrl, reconnect_work);
+ int ret;
++ctrl->ctrl.nr_reconnects;
- if (nvme_rdma_setup_ctrl(ctrl, false))
+ ret = nvme_rdma_setup_ctrl(ctrl, false);
+ if (ret)
goto requeue;
dev_info(ctrl->ctrl.device, "Successfully reconnected (%d attempts)\n",
@@ -1118,9 +1121,9 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
return;
requeue:
- dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n",
- ctrl->ctrl.nr_reconnects);
- nvme_rdma_reconnect_or_remove(ctrl);
+ dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d/%d\n",
+ ctrl->ctrl.nr_reconnects, ctrl->ctrl.opts->max_reconnects);
+ nvme_rdma_reconnect_or_remove(ctrl, ret);
}
static void nvme_rdma_error_recovery_work(struct work_struct *work)
@@ -1145,7 +1148,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
return;
}
- nvme_rdma_reconnect_or_remove(ctrl);
+ nvme_rdma_reconnect_or_remove(ctrl, 0);
}
static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
@@ -2169,6 +2172,7 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
{
struct nvme_rdma_ctrl *ctrl =
container_of(work, struct nvme_rdma_ctrl, ctrl.reset_work);
+ int ret;
nvme_stop_ctrl(&ctrl->ctrl);
nvme_rdma_shutdown_ctrl(ctrl, false);
@@ -2179,14 +2183,15 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
return;
}
- if (nvme_rdma_setup_ctrl(ctrl, false))
+ ret = nvme_rdma_setup_ctrl(ctrl, false);
+ if (ret)
goto out_fail;
return;
out_fail:
++ctrl->ctrl.nr_reconnects;
- nvme_rdma_reconnect_or_remove(ctrl);
+ nvme_rdma_reconnect_or_remove(ctrl, ret);
}
static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 28bc2f373c..8b5e4327fe 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2161,7 +2161,8 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
nvme_tcp_destroy_io_queues(ctrl, remove);
}
-static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
+static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl,
+ int status)
{
enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
@@ -2171,13 +2172,14 @@ static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
return;
}
- if (nvmf_should_reconnect(ctrl)) {
+ if (nvmf_should_reconnect(ctrl, status)) {
dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
ctrl->opts->reconnect_delay);
queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
ctrl->opts->reconnect_delay * HZ);
} else {
- dev_info(ctrl->device, "Removing controller...\n");
+ dev_info(ctrl->device, "Removing controller (%d)...\n",
+ status);
nvme_delete_ctrl(ctrl);
}
}
@@ -2258,23 +2260,25 @@ static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
struct nvme_tcp_ctrl, connect_work);
struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
+ int ret;
++ctrl->nr_reconnects;
- if (nvme_tcp_setup_ctrl(ctrl, false))
+ ret = nvme_tcp_setup_ctrl(ctrl, false);
+ if (ret)
goto requeue;
- dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
- ctrl->nr_reconnects);
+ dev_info(ctrl->device, "Successfully reconnected (attempt %d/%d)\n",
+ ctrl->nr_reconnects, ctrl->opts->max_reconnects);
ctrl->nr_reconnects = 0;
return;
requeue:
- dev_info(ctrl->device, "Failed reconnect attempt %d\n",
- ctrl->nr_reconnects);
- nvme_tcp_reconnect_or_remove(ctrl);
+ dev_info(ctrl->device, "Failed reconnect attempt %d/%d\n",
+ ctrl->nr_reconnects, ctrl->opts->max_reconnects);
+ nvme_tcp_reconnect_or_remove(ctrl, ret);
}
static void nvme_tcp_error_recovery_work(struct work_struct *work)
@@ -2301,7 +2305,7 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
return;
}
- nvme_tcp_reconnect_or_remove(ctrl);
+ nvme_tcp_reconnect_or_remove(ctrl, 0);
}
static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
@@ -2321,6 +2325,7 @@ static void nvme_reset_ctrl_work(struct work_struct *work)
{
struct nvme_ctrl *ctrl =
container_of(work, struct nvme_ctrl, reset_work);
+ int ret;
nvme_stop_ctrl(ctrl);
nvme_tcp_teardown_ctrl(ctrl, false);
@@ -2334,14 +2339,15 @@ static void nvme_reset_ctrl_work(struct work_struct *work)
return;
}
- if (nvme_tcp_setup_ctrl(ctrl, false))
+ ret = nvme_tcp_setup_ctrl(ctrl, false);
+ if (ret)
goto out_fail;
return;
out_fail:
++ctrl->nr_reconnects;
- nvme_tcp_reconnect_or_remove(ctrl);
+ nvme_tcp_reconnect_or_remove(ctrl, ret);
}
static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl)
diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c
index 4f08362aee..8bc3f431c7 100644
--- a/drivers/nvme/target/auth.c
+++ b/drivers/nvme/target/auth.c
@@ -44,6 +44,7 @@ int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
dhchap_secret = kstrdup(secret, GFP_KERNEL);
if (!dhchap_secret)
return -ENOMEM;
+ down_write(&nvmet_config_sem);
if (set_ctrl) {
kfree(host->dhchap_ctrl_secret);
host->dhchap_ctrl_secret = strim(dhchap_secret);
@@ -53,6 +54,7 @@ int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
host->dhchap_secret = strim(dhchap_secret);
host->dhchap_key_hash = key_hash;
}
+ up_write(&nvmet_config_sem);
return 0;
}
@@ -124,12 +126,11 @@ int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id)
return ret;
}
-int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
+u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl)
{
int ret = 0;
struct nvmet_host_link *p;
struct nvmet_host *host = NULL;
- const char *hash_name;
down_read(&nvmet_config_sem);
if (nvmet_is_disc_subsys(ctrl->subsys))
@@ -147,13 +148,16 @@ int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
}
if (!host) {
pr_debug("host %s not found\n", ctrl->hostnqn);
- ret = -EPERM;
+ ret = NVME_AUTH_DHCHAP_FAILURE_FAILED;
goto out_unlock;
}
ret = nvmet_setup_dhgroup(ctrl, host->dhchap_dhgroup_id);
- if (ret < 0)
+ if (ret < 0) {
pr_warn("Failed to setup DH group");
+ ret = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
+ goto out_unlock;
+ }
if (!host->dhchap_secret) {
pr_debug("No authentication provided\n");
@@ -164,12 +168,6 @@ int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
pr_debug("Re-use existing hash ID %d\n",
ctrl->shash_id);
} else {
- hash_name = nvme_auth_hmac_name(host->dhchap_hash_id);
- if (!hash_name) {
- pr_warn("Hash ID %d invalid\n", host->dhchap_hash_id);
- ret = -EINVAL;
- goto out_unlock;
- }
ctrl->shash_id = host->dhchap_hash_id;
}
@@ -178,7 +176,7 @@ int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
ctrl->host_key = nvme_auth_extract_key(host->dhchap_secret + 10,
host->dhchap_key_hash);
if (IS_ERR(ctrl->host_key)) {
- ret = PTR_ERR(ctrl->host_key);
+ ret = NVME_AUTH_DHCHAP_FAILURE_NOT_USABLE;
ctrl->host_key = NULL;
goto out_free_hash;
}
@@ -196,7 +194,7 @@ int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
ctrl->ctrl_key = nvme_auth_extract_key(host->dhchap_ctrl_secret + 10,
host->dhchap_ctrl_key_hash);
if (IS_ERR(ctrl->ctrl_key)) {
- ret = PTR_ERR(ctrl->ctrl_key);
+ ret = NVME_AUTH_DHCHAP_FAILURE_NOT_USABLE;
ctrl->ctrl_key = NULL;
goto out_free_hash;
}
@@ -316,7 +314,7 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
req->sq->dhchap_c1,
challenge, shash_len);
if (ret)
- goto out_free_response;
+ goto out_free_challenge;
}
pr_debug("ctrl %d qid %d host response seq %u transaction %d\n",
@@ -327,7 +325,7 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
GFP_KERNEL);
if (!shash) {
ret = -ENOMEM;
- goto out_free_response;
+ goto out_free_challenge;
}
shash->tfm = shash_tfm;
ret = crypto_shash_init(shash);
@@ -363,9 +361,10 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
goto out;
ret = crypto_shash_final(shash, response);
out:
+ kfree(shash);
+out_free_challenge:
if (challenge != req->sq->dhchap_c1)
kfree(challenge);
- kfree(shash);
out_free_response:
nvme_auth_free_key(transformed_key);
out_free_tfm:
@@ -429,14 +428,14 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
req->sq->dhchap_c2,
challenge, shash_len);
if (ret)
- goto out_free_response;
+ goto out_free_challenge;
}
shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(shash_tfm),
GFP_KERNEL);
if (!shash) {
ret = -ENOMEM;
- goto out_free_response;
+ goto out_free_challenge;
}
shash->tfm = shash_tfm;
@@ -473,9 +472,10 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
goto out;
ret = crypto_shash_final(shash, response);
out:
+ kfree(shash);
+out_free_challenge:
if (challenge != req->sq->dhchap_c2)
kfree(challenge);
- kfree(shash);
out_free_response:
nvme_auth_free_key(transformed_key);
out_free_tfm:
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index dfdff6aba6..685e89b35d 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -410,7 +410,29 @@ static ssize_t nvmet_addr_tsas_show(struct config_item *item,
return sprintf(page, "%s\n", nvmet_addr_tsas_rdma[i].name);
}
}
- return sprintf(page, "reserved\n");
+ return sprintf(page, "\n");
+}
+
+static u8 nvmet_addr_tsas_rdma_store(const char *page)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_rdma); i++) {
+ if (sysfs_streq(page, nvmet_addr_tsas_rdma[i].name))
+ return nvmet_addr_tsas_rdma[i].type;
+ }
+ return NVMF_RDMA_QPTYPE_INVALID;
+}
+
+static u8 nvmet_addr_tsas_tcp_store(const char *page)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_tcp); i++) {
+ if (sysfs_streq(page, nvmet_addr_tsas_tcp[i].name))
+ return nvmet_addr_tsas_tcp[i].type;
+ }
+ return NVMF_TCP_SECTYPE_INVALID;
}
static ssize_t nvmet_addr_tsas_store(struct config_item *item,
@@ -418,20 +440,19 @@ static ssize_t nvmet_addr_tsas_store(struct config_item *item,
{
struct nvmet_port *port = to_nvmet_port(item);
u8 treq = nvmet_port_disc_addr_treq_mask(port);
- u8 sectype;
- int i;
+ u8 sectype, qptype;
if (nvmet_is_port_enabled(port, __func__))
return -EACCES;
- if (port->disc_addr.trtype != NVMF_TRTYPE_TCP)
- return -EINVAL;
-
- for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_tcp); i++) {
- if (sysfs_streq(page, nvmet_addr_tsas_tcp[i].name)) {
- sectype = nvmet_addr_tsas_tcp[i].type;
+ if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA) {
+ qptype = nvmet_addr_tsas_rdma_store(page);
+ if (qptype == port->disc_addr.tsas.rdma.qptype)
+ return count;
+ } else if (port->disc_addr.trtype == NVMF_TRTYPE_TCP) {
+ sectype = nvmet_addr_tsas_tcp_store(page);
+ if (sectype != NVMF_TCP_SECTYPE_INVALID)
goto found;
- }
}
pr_err("Invalid value '%s' for tsas\n", page);
@@ -2015,11 +2036,17 @@ static struct config_group nvmet_ports_group;
static ssize_t nvmet_host_dhchap_key_show(struct config_item *item,
char *page)
{
- u8 *dhchap_secret = to_host(item)->dhchap_secret;
+ u8 *dhchap_secret;
+ ssize_t ret;
+ down_read(&nvmet_config_sem);
+ dhchap_secret = to_host(item)->dhchap_secret;
if (!dhchap_secret)
- return sprintf(page, "\n");
- return sprintf(page, "%s\n", dhchap_secret);
+ ret = sprintf(page, "\n");
+ else
+ ret = sprintf(page, "%s\n", dhchap_secret);
+ up_read(&nvmet_config_sem);
+ return ret;
}
static ssize_t nvmet_host_dhchap_key_store(struct config_item *item,
@@ -2043,10 +2070,16 @@ static ssize_t nvmet_host_dhchap_ctrl_key_show(struct config_item *item,
char *page)
{
u8 *dhchap_secret = to_host(item)->dhchap_ctrl_secret;
+ ssize_t ret;
+ down_read(&nvmet_config_sem);
+ dhchap_secret = to_host(item)->dhchap_ctrl_secret;
if (!dhchap_secret)
- return sprintf(page, "\n");
- return sprintf(page, "%s\n", dhchap_secret);
+ ret = sprintf(page, "\n");
+ else
+ ret = sprintf(page, "%s\n", dhchap_secret);
+ up_read(&nvmet_config_sem);
+ return ret;
}
static ssize_t nvmet_host_dhchap_ctrl_key_store(struct config_item *item,
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 2fde223236..4ff460ba28 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -818,6 +818,15 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
percpu_ref_exit(&sq->ref);
nvmet_auth_sq_free(sq);
+ /*
+ * we must reference the ctrl again after waiting for inflight IO
+ * to complete. Because admin connect may have sneaked in after we
+ * store sq->ctrl locally, but before we killed the percpu_ref. the
+ * admin connect allocates and assigns sq->ctrl, which now needs a
+ * final ref put, as this ctrl is going away.
+ */
+ ctrl = sq->ctrl;
+
if (ctrl) {
/*
* The teardown flow may take some time, and the host may not
@@ -948,6 +957,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
req->metadata_sg_cnt = 0;
req->transfer_len = 0;
req->metadata_len = 0;
+ req->cqe->result.u64 = 0;
req->cqe->status = 0;
req->cqe->sq_head = 0;
req->ns = NULL;
diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c
index eb7785be0c..cb34d644ed 100644
--- a/drivers/nvme/target/fabrics-cmd-auth.c
+++ b/drivers/nvme/target/fabrics-cmd-auth.c
@@ -31,7 +31,7 @@ void nvmet_auth_sq_init(struct nvmet_sq *sq)
sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
}
-static u16 nvmet_auth_negotiate(struct nvmet_req *req, void *d)
+static u8 nvmet_auth_negotiate(struct nvmet_req *req, void *d)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvmf_auth_dhchap_negotiate_data *data = d;
@@ -109,7 +109,7 @@ static u16 nvmet_auth_negotiate(struct nvmet_req *req, void *d)
return 0;
}
-static u16 nvmet_auth_reply(struct nvmet_req *req, void *d)
+static u8 nvmet_auth_reply(struct nvmet_req *req, void *d)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvmf_auth_dhchap_reply_data *data = d;
@@ -172,7 +172,7 @@ static u16 nvmet_auth_reply(struct nvmet_req *req, void *d)
return 0;
}
-static u16 nvmet_auth_failure2(void *d)
+static u8 nvmet_auth_failure2(void *d)
{
struct nvmf_auth_dhchap_failure_data *data = d;
@@ -186,6 +186,7 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
void *d;
u32 tl;
u16 status = 0;
+ u8 dhchap_status;
if (req->cmd->auth_send.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
@@ -237,30 +238,32 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
if (data->auth_type == NVME_AUTH_COMMON_MESSAGES) {
if (data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE) {
/* Restart negotiation */
- pr_debug("%s: ctrl %d qid %d reset negotiation\n", __func__,
- ctrl->cntlid, req->sq->qid);
+ pr_debug("%s: ctrl %d qid %d reset negotiation\n",
+ __func__, ctrl->cntlid, req->sq->qid);
if (!req->sq->qid) {
- if (nvmet_setup_auth(ctrl) < 0) {
- status = NVME_SC_INTERNAL;
- pr_err("ctrl %d qid 0 failed to setup"
- "re-authentication",
+ dhchap_status = nvmet_setup_auth(ctrl);
+ if (dhchap_status) {
+ pr_err("ctrl %d qid 0 failed to setup re-authentication\n",
ctrl->cntlid);
- goto done_failure1;
+ req->sq->dhchap_status = dhchap_status;
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
+ goto done_kfree;
}
}
- req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
} else if (data->auth_id != req->sq->dhchap_step)
goto done_failure1;
/* Validate negotiation parameters */
- status = nvmet_auth_negotiate(req, d);
- if (status == 0)
+ dhchap_status = nvmet_auth_negotiate(req, d);
+ if (dhchap_status == 0)
req->sq->dhchap_step =
NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE;
else {
req->sq->dhchap_step =
NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
- req->sq->dhchap_status = status;
- status = 0;
+ req->sq->dhchap_status = dhchap_status;
}
goto done_kfree;
}
@@ -284,15 +287,14 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
switch (data->auth_id) {
case NVME_AUTH_DHCHAP_MESSAGE_REPLY:
- status = nvmet_auth_reply(req, d);
- if (status == 0)
+ dhchap_status = nvmet_auth_reply(req, d);
+ if (dhchap_status == 0)
req->sq->dhchap_step =
NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1;
else {
req->sq->dhchap_step =
NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
- req->sq->dhchap_status = status;
- status = 0;
+ req->sq->dhchap_status = dhchap_status;
}
goto done_kfree;
case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2:
@@ -301,13 +303,12 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
__func__, ctrl->cntlid, req->sq->qid);
goto done_kfree;
case NVME_AUTH_DHCHAP_MESSAGE_FAILURE2:
- status = nvmet_auth_failure2(d);
- if (status) {
+ dhchap_status = nvmet_auth_failure2(d);
+ if (dhchap_status) {
pr_warn("ctrl %d qid %d: authentication failed (%d)\n",
- ctrl->cntlid, req->sq->qid, status);
- req->sq->dhchap_status = status;
+ ctrl->cntlid, req->sq->qid, dhchap_status);
+ req->sq->dhchap_status = dhchap_status;
req->sq->authenticated = false;
- status = 0;
}
goto done_kfree;
default:
@@ -332,7 +333,6 @@ done:
pr_debug("%s: ctrl %d qid %d nvme status %x error loc %d\n",
__func__, ctrl->cntlid, req->sq->qid,
status, req->error_loc);
- req->cqe->result.u64 = 0;
if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 &&
req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) {
unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120;
@@ -515,8 +515,6 @@ void nvmet_execute_auth_receive(struct nvmet_req *req)
status = nvmet_copy_to_sgl(req, 0, d, al);
kfree(d);
done:
- req->cqe->result.u64 = 0;
-
if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2)
nvmet_auth_sq_free(req->sq);
else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index b23f4cf840..69d77d34be 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -211,7 +211,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
struct nvmf_connect_data *d;
struct nvmet_ctrl *ctrl = NULL;
u16 status;
- int ret;
+ u8 dhchap_status;
if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data)))
return;
@@ -226,9 +226,6 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
if (status)
goto out;
- /* zero out initial completion result, assign values as needed */
- req->cqe->result.u32 = 0;
-
if (c->recfmt != 0) {
pr_warn("invalid connect version (%d).\n",
le16_to_cpu(c->recfmt));
@@ -254,11 +251,12 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
uuid_copy(&ctrl->hostid, &d->hostid);
- ret = nvmet_setup_auth(ctrl);
- if (ret < 0) {
- pr_err("Failed to setup authentication, error %d\n", ret);
+ dhchap_status = nvmet_setup_auth(ctrl);
+ if (dhchap_status) {
+ pr_err("Failed to setup authentication, dhchap status %u\n",
+ dhchap_status);
nvmet_ctrl_put(ctrl);
- if (ret == -EPERM)
+ if (dhchap_status == NVME_AUTH_DHCHAP_FAILURE_FAILED)
status = (NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR);
else
status = NVME_SC_INTERNAL;
@@ -304,9 +302,6 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
if (status)
goto out;
- /* zero out initial completion result, assign values as needed */
- req->cqe->result.u32 = 0;
-
if (c->recfmt != 0) {
pr_warn("invalid connect version (%d).\n",
le16_to_cpu(c->recfmt));
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 337ee1cb09..381b439473 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -148,7 +148,7 @@ struct nvmet_fc_tgt_queue {
struct workqueue_struct *work_q;
struct kref ref;
/* array of fcp_iods */
- struct nvmet_fc_fcp_iod fod[] __counted_by(sqsize);
+ struct nvmet_fc_fcp_iod fod[] /* __counted_by(sqsize) */;
} __aligned(sizeof(unsigned long long));
struct nvmet_fc_hostport {
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index c1306de1f4..2f22b07eab 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -113,8 +113,8 @@ struct nvmet_sq {
bool authenticated;
struct delayed_work auth_expired_work;
u16 dhchap_tid;
- u16 dhchap_status;
- int dhchap_step;
+ u8 dhchap_status;
+ u8 dhchap_step;
u8 *dhchap_c1;
u8 *dhchap_c2;
u32 dhchap_s1;
@@ -714,7 +714,7 @@ void nvmet_execute_auth_receive(struct nvmet_req *req);
int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
bool set_ctrl);
int nvmet_auth_set_host_hash(struct nvmet_host *host, const char *hash);
-int nvmet_setup_auth(struct nvmet_ctrl *ctrl);
+u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl);
void nvmet_auth_sq_init(struct nvmet_sq *sq);
void nvmet_destroy_auth(struct nvmet_ctrl *ctrl);
void nvmet_auth_sq_free(struct nvmet_sq *sq);
@@ -733,7 +733,7 @@ int nvmet_auth_ctrl_exponential(struct nvmet_req *req,
int nvmet_auth_ctrl_sesskey(struct nvmet_req *req,
u8 *buf, int buf_size);
#else
-static inline int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
+static inline u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl)
{
return 0;
}
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 6e1b4140cd..689bb5d3cf 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -1806,18 +1806,14 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl)
{
- struct nvmet_rdma_queue *queue;
+ struct nvmet_rdma_queue *queue, *n;
-restart:
mutex_lock(&nvmet_rdma_queue_mutex);
- list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) {
- if (queue->nvme_sq.ctrl == ctrl) {
- list_del_init(&queue->queue_list);
- mutex_unlock(&nvmet_rdma_queue_mutex);
-
- __nvmet_rdma_queue_disconnect(queue);
- goto restart;
- }
+ list_for_each_entry_safe(queue, n, &nvmet_rdma_queue_list, queue_list) {
+ if (queue->nvme_sq.ctrl != ctrl)
+ continue;
+ list_del_init(&queue->queue_list);
+ __nvmet_rdma_queue_disconnect(queue);
}
mutex_unlock(&nvmet_rdma_queue_mutex);
}
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
index 3148d9f1bd..0021d06041 100644
--- a/drivers/nvme/target/zns.c
+++ b/drivers/nvme/target/zns.c
@@ -52,14 +52,10 @@ bool nvmet_bdev_zns_enable(struct nvmet_ns *ns)
if (get_capacity(bd_disk) & (bdev_zone_sectors(ns->bdev) - 1))
return false;
/*
- * ZNS does not define a conventional zone type. If the underlying
- * device has a bitmap set indicating the existence of conventional
- * zones, reject the device. Otherwise, use report zones to detect if
- * the device has conventional zones.
+ * ZNS does not define a conventional zone type. Use report zones
+ * to detect if the device has conventional zones and reject it if
+ * it does.
*/
- if (ns->bdev->bd_disk->conv_zones_bitmap)
- return false;
-
ret = blkdev_report_zones(ns->bdev, 0, bdev_nr_zones(ns->bdev),
validate_conv_zones_cb, NULL);
if (ret < 0)