summaryrefslogtreecommitdiffstats
path: root/drivers/nvme
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-08 16:58:07 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-08 16:58:07 +0000
commit5a6d13c86f2fe6304450e907fc1d8d72da82efef (patch)
tree1bd4e8854203c575eabaa99e2c509f8886065733 /drivers/nvme
parentAdding upstream version 6.1.76. (diff)
downloadlinux-5a6d13c86f2fe6304450e907fc1d8d72da82efef.tar.xz
linux-5a6d13c86f2fe6304450e907fc1d8d72da82efef.zip
Adding upstream version 6.1.82.upstream/6.1.82
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--drivers/nvme/host/fc.c47
-rw-r--r--drivers/nvme/target/fc.c131
-rw-r--r--drivers/nvme/target/fcloop.c6
-rw-r--r--drivers/nvme/target/tcp.c1
4 files changed, 85 insertions, 100 deletions
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 177a365b8..3dbf926fd 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -221,11 +221,6 @@ static LIST_HEAD(nvme_fc_lport_list);
static DEFINE_IDA(nvme_fc_local_port_cnt);
static DEFINE_IDA(nvme_fc_ctrl_cnt);
-static struct workqueue_struct *nvme_fc_wq;
-
-static bool nvme_fc_waiting_to_unload;
-static DECLARE_COMPLETION(nvme_fc_unload_proceed);
-
/*
* These items are short-term. They will eventually be moved into
* a generic FC class. See comments in module init.
@@ -255,8 +250,6 @@ nvme_fc_free_lport(struct kref *ref)
/* remove from transport list */
spin_lock_irqsave(&nvme_fc_lock, flags);
list_del(&lport->port_list);
- if (nvme_fc_waiting_to_unload && list_empty(&nvme_fc_lport_list))
- complete(&nvme_fc_unload_proceed);
spin_unlock_irqrestore(&nvme_fc_lock, flags);
ida_free(&nvme_fc_local_port_cnt, lport->localport.port_num);
@@ -3869,10 +3862,6 @@ static int __init nvme_fc_init_module(void)
{
int ret;
- nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0);
- if (!nvme_fc_wq)
- return -ENOMEM;
-
/*
* NOTE:
* It is expected that in the future the kernel will combine
@@ -3890,7 +3879,7 @@ static int __init nvme_fc_init_module(void)
ret = class_register(&fc_class);
if (ret) {
pr_err("couldn't register class fc\n");
- goto out_destroy_wq;
+ return ret;
}
/*
@@ -3914,8 +3903,6 @@ out_destroy_device:
device_destroy(&fc_class, MKDEV(0, 0));
out_destroy_class:
class_unregister(&fc_class);
-out_destroy_wq:
- destroy_workqueue(nvme_fc_wq);
return ret;
}
@@ -3935,45 +3922,23 @@ nvme_fc_delete_controllers(struct nvme_fc_rport *rport)
spin_unlock(&rport->lock);
}
-static void
-nvme_fc_cleanup_for_unload(void)
+static void __exit nvme_fc_exit_module(void)
{
struct nvme_fc_lport *lport;
struct nvme_fc_rport *rport;
-
- list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
- list_for_each_entry(rport, &lport->endp_list, endp_list) {
- nvme_fc_delete_controllers(rport);
- }
- }
-}
-
-static void __exit nvme_fc_exit_module(void)
-{
unsigned long flags;
- bool need_cleanup = false;
spin_lock_irqsave(&nvme_fc_lock, flags);
- nvme_fc_waiting_to_unload = true;
- if (!list_empty(&nvme_fc_lport_list)) {
- need_cleanup = true;
- nvme_fc_cleanup_for_unload();
- }
+ list_for_each_entry(lport, &nvme_fc_lport_list, port_list)
+ list_for_each_entry(rport, &lport->endp_list, endp_list)
+ nvme_fc_delete_controllers(rport);
spin_unlock_irqrestore(&nvme_fc_lock, flags);
- if (need_cleanup) {
- pr_info("%s: waiting for ctlr deletes\n", __func__);
- wait_for_completion(&nvme_fc_unload_proceed);
- pr_info("%s: ctrl deletes complete\n", __func__);
- }
+ flush_workqueue(nvme_delete_wq);
nvmf_unregister_transport(&nvme_fc_transport);
- ida_destroy(&nvme_fc_local_port_cnt);
- ida_destroy(&nvme_fc_ctrl_cnt);
-
device_destroy(&fc_class, MKDEV(0, 0));
class_unregister(&fc_class);
- destroy_workqueue(nvme_fc_wq);
}
module_init(nvme_fc_init_module);
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 1ab6601fd..8a02ed63b 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -111,6 +111,8 @@ struct nvmet_fc_tgtport {
struct nvmet_fc_port_entry *pe;
struct kref ref;
u32 max_sg_cnt;
+
+ struct work_struct put_work;
};
struct nvmet_fc_port_entry {
@@ -165,7 +167,7 @@ struct nvmet_fc_tgt_assoc {
struct nvmet_fc_hostport *hostport;
struct nvmet_fc_ls_iod *rcv_disconn;
struct list_head a_list;
- struct nvmet_fc_tgt_queue __rcu *queues[NVMET_NR_QUEUES + 1];
+ struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1];
struct kref ref;
struct work_struct del_work;
struct rcu_head rcu;
@@ -248,6 +250,13 @@ static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
+static void nvmet_fc_put_tgtport_work(struct work_struct *work)
+{
+ struct nvmet_fc_tgtport *tgtport =
+ container_of(work, struct nvmet_fc_tgtport, put_work);
+
+ nvmet_fc_tgtport_put(tgtport);
+}
static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_fcp_iod *fod);
@@ -359,7 +368,7 @@ __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
if (!lsop->req_queued) {
spin_unlock_irqrestore(&tgtport->lock, flags);
- return;
+ goto out_putwork;
}
list_del(&lsop->lsreq_list);
@@ -372,7 +381,8 @@ __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
(lsreq->rqstlen + lsreq->rsplen),
DMA_BIDIRECTIONAL);
- nvmet_fc_tgtport_put(tgtport);
+out_putwork:
+ queue_work(nvmet_wq, &tgtport->put_work);
}
static int
@@ -801,14 +811,11 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
if (!queue)
return NULL;
- if (!nvmet_fc_tgt_a_get(assoc))
- goto out_free_queue;
-
queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
assoc->tgtport->fc_target_port.port_num,
assoc->a_id, qid);
if (!queue->work_q)
- goto out_a_put;
+ goto out_free_queue;
queue->qid = qid;
queue->sqsize = sqsize;
@@ -830,15 +837,13 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
goto out_fail_iodlist;
WARN_ON(assoc->queues[qid]);
- rcu_assign_pointer(assoc->queues[qid], queue);
+ assoc->queues[qid] = queue;
return queue;
out_fail_iodlist:
nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
destroy_workqueue(queue->work_q);
-out_a_put:
- nvmet_fc_tgt_a_put(assoc);
out_free_queue:
kfree(queue);
return NULL;
@@ -851,12 +856,8 @@ nvmet_fc_tgt_queue_free(struct kref *ref)
struct nvmet_fc_tgt_queue *queue =
container_of(ref, struct nvmet_fc_tgt_queue, ref);
- rcu_assign_pointer(queue->assoc->queues[queue->qid], NULL);
-
nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
- nvmet_fc_tgt_a_put(queue->assoc);
-
destroy_workqueue(queue->work_q);
kfree_rcu(queue, rcu);
@@ -968,7 +969,7 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
rcu_read_lock();
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
if (association_id == assoc->association_id) {
- queue = rcu_dereference(assoc->queues[qid]);
+ queue = assoc->queues[qid];
if (queue &&
(!atomic_read(&queue->connected) ||
!nvmet_fc_tgt_q_get(queue)))
@@ -1077,8 +1078,6 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
/* new allocation not needed */
kfree(newhost);
newhost = match;
- /* no new allocation - release reference */
- nvmet_fc_tgtport_put(tgtport);
} else {
newhost->tgtport = tgtport;
newhost->hosthandle = hosthandle;
@@ -1093,13 +1092,28 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
}
static void
-nvmet_fc_delete_assoc(struct work_struct *work)
+nvmet_fc_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
+{
+ nvmet_fc_delete_target_assoc(assoc);
+ nvmet_fc_tgt_a_put(assoc);
+}
+
+static void
+nvmet_fc_delete_assoc_work(struct work_struct *work)
{
struct nvmet_fc_tgt_assoc *assoc =
container_of(work, struct nvmet_fc_tgt_assoc, del_work);
+ struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
- nvmet_fc_delete_target_assoc(assoc);
- nvmet_fc_tgt_a_put(assoc);
+ nvmet_fc_delete_assoc(assoc);
+ nvmet_fc_tgtport_put(tgtport);
+}
+
+static void
+nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
+{
+ nvmet_fc_tgtport_get(assoc->tgtport);
+ queue_work(nvmet_wq, &assoc->del_work);
}
static struct nvmet_fc_tgt_assoc *
@@ -1111,6 +1125,9 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
int idx;
bool needrandom = true;
+ if (!tgtport->pe)
+ return NULL;
+
assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
if (!assoc)
return NULL;
@@ -1130,7 +1147,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
assoc->a_id = idx;
INIT_LIST_HEAD(&assoc->a_list);
kref_init(&assoc->ref);
- INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
+ INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc_work);
atomic_set(&assoc->terminating, 0);
while (needrandom) {
@@ -1171,13 +1188,18 @@ nvmet_fc_target_assoc_free(struct kref *ref)
struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
struct nvmet_fc_ls_iod *oldls;
unsigned long flags;
+ int i;
+
+ for (i = NVMET_NR_QUEUES; i >= 0; i--) {
+ if (assoc->queues[i])
+ nvmet_fc_delete_target_queue(assoc->queues[i]);
+ }
/* Send Disconnect now that all i/o has completed */
nvmet_fc_xmt_disconnect_assoc(assoc);
nvmet_fc_free_hostport(assoc->hostport);
spin_lock_irqsave(&tgtport->lock, flags);
- list_del_rcu(&assoc->a_list);
oldls = assoc->rcv_disconn;
spin_unlock_irqrestore(&tgtport->lock, flags);
/* if pending Rcv Disconnect Association LS, send rsp now */
@@ -1207,7 +1229,7 @@ static void
nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
{
struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
- struct nvmet_fc_tgt_queue *queue;
+ unsigned long flags;
int i, terminating;
terminating = atomic_xchg(&assoc->terminating, 1);
@@ -1216,29 +1238,21 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
if (terminating)
return;
+ spin_lock_irqsave(&tgtport->lock, flags);
+ list_del_rcu(&assoc->a_list);
+ spin_unlock_irqrestore(&tgtport->lock, flags);
- for (i = NVMET_NR_QUEUES; i >= 0; i--) {
- rcu_read_lock();
- queue = rcu_dereference(assoc->queues[i]);
- if (!queue) {
- rcu_read_unlock();
- continue;
- }
+ synchronize_rcu();
- if (!nvmet_fc_tgt_q_get(queue)) {
- rcu_read_unlock();
- continue;
- }
- rcu_read_unlock();
- nvmet_fc_delete_target_queue(queue);
- nvmet_fc_tgt_q_put(queue);
+ /* ensure all in-flight I/Os have been processed */
+ for (i = NVMET_NR_QUEUES; i >= 0; i--) {
+ if (assoc->queues[i])
+ flush_workqueue(assoc->queues[i]->work_q);
}
dev_info(tgtport->dev,
"{%d:%d} Association deleted\n",
tgtport->fc_target_port.port_num, assoc->a_id);
-
- nvmet_fc_tgt_a_put(assoc);
}
static struct nvmet_fc_tgt_assoc *
@@ -1414,6 +1428,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
kref_init(&newrec->ref);
ida_init(&newrec->assoc_cnt);
newrec->max_sg_cnt = template->max_sgl_segments;
+ INIT_WORK(&newrec->put_work, nvmet_fc_put_tgtport_work);
ret = nvmet_fc_alloc_ls_iodlist(newrec);
if (ret) {
@@ -1491,9 +1506,8 @@ __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
if (!nvmet_fc_tgt_a_get(assoc))
continue;
- if (!queue_work(nvmet_wq, &assoc->del_work))
- /* already deleting - release local reference */
- nvmet_fc_tgt_a_put(assoc);
+ nvmet_fc_schedule_delete_assoc(assoc);
+ nvmet_fc_tgt_a_put(assoc);
}
rcu_read_unlock();
}
@@ -1546,9 +1560,8 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
continue;
assoc->hostport->invalid = 1;
noassoc = false;
- if (!queue_work(nvmet_wq, &assoc->del_work))
- /* already deleting - release local reference */
- nvmet_fc_tgt_a_put(assoc);
+ nvmet_fc_schedule_delete_assoc(assoc);
+ nvmet_fc_tgt_a_put(assoc);
}
spin_unlock_irqrestore(&tgtport->lock, flags);
@@ -1580,7 +1593,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
rcu_read_lock();
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
- queue = rcu_dereference(assoc->queues[0]);
+ queue = assoc->queues[0];
if (queue && queue->nvme_sq.ctrl == ctrl) {
if (nvmet_fc_tgt_a_get(assoc))
found_ctrl = true;
@@ -1592,9 +1605,8 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
nvmet_fc_tgtport_put(tgtport);
if (found_ctrl) {
- if (!queue_work(nvmet_wq, &assoc->del_work))
- /* already deleting - release local reference */
- nvmet_fc_tgt_a_put(assoc);
+ nvmet_fc_schedule_delete_assoc(assoc);
+ nvmet_fc_tgt_a_put(assoc);
return;
}
@@ -1624,6 +1636,8 @@ nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
/* terminate any outstanding associations */
__nvmet_fc_free_assocs(tgtport);
+ flush_workqueue(nvmet_wq);
+
/*
* should terminate LS's as well. However, LS's will be generated
* at the tail end of association termination, so they likely don't
@@ -1869,9 +1883,6 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
FCNVME_LS_DISCONNECT_ASSOC);
- /* release get taken in nvmet_fc_find_target_assoc */
- nvmet_fc_tgt_a_put(assoc);
-
/*
* The rules for LS response says the response cannot
* go back until ABTS's have been sent for all outstanding
@@ -1886,8 +1897,6 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
assoc->rcv_disconn = iod;
spin_unlock_irqrestore(&tgtport->lock, flags);
- nvmet_fc_delete_target_assoc(assoc);
-
if (oldls) {
dev_info(tgtport->dev,
"{%d:%d} Multiple Disconnect Association LS's "
@@ -1903,6 +1912,9 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
nvmet_fc_xmt_ls_rsp(tgtport, oldls);
}
+ nvmet_fc_schedule_delete_assoc(assoc);
+ nvmet_fc_tgt_a_put(assoc);
+
return false;
}
@@ -2539,8 +2551,9 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
fod->req.cmd = &fod->cmdiubuf.sqe;
fod->req.cqe = &fod->rspiubuf.cqe;
- if (tgtport->pe)
- fod->req.port = tgtport->pe->port;
+ if (!tgtport->pe)
+ goto transport_error;
+ fod->req.port = tgtport->pe->port;
/* clear any response payload */
memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
@@ -2901,6 +2914,9 @@ nvmet_fc_remove_port(struct nvmet_port *port)
nvmet_fc_portentry_unbind(pe);
+ /* terminate any outstanding associations */
+ __nvmet_fc_free_assocs(pe->tgtport);
+
kfree(pe);
}
@@ -2932,6 +2948,9 @@ static int __init nvmet_fc_init_module(void)
static void __exit nvmet_fc_exit_module(void)
{
+ /* ensure any shutdown operation, e.g. delete ctrls have finished */
+ flush_workqueue(nvmet_wq);
+
/* sanity check - all lports should be removed */
if (!list_empty(&nvmet_fc_target_list))
pr_warn("%s: targetport list not empty\n", __func__);
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index c780af36c..f5b8442b6 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -358,7 +358,7 @@ fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
if (!rport->targetport) {
tls_req->status = -ECONNREFUSED;
spin_lock(&rport->lock);
- list_add_tail(&rport->ls_list, &tls_req->ls_list);
+ list_add_tail(&tls_req->ls_list, &rport->ls_list);
spin_unlock(&rport->lock);
queue_work(nvmet_wq, &rport->ls_work);
return ret;
@@ -391,7 +391,7 @@ fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
if (remoteport) {
rport = remoteport->private;
spin_lock(&rport->lock);
- list_add_tail(&rport->ls_list, &tls_req->ls_list);
+ list_add_tail(&tls_req->ls_list, &rport->ls_list);
spin_unlock(&rport->lock);
queue_work(nvmet_wq, &rport->ls_work);
}
@@ -446,7 +446,7 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
if (!tport->remoteport) {
tls_req->status = -ECONNREFUSED;
spin_lock(&tport->lock);
- list_add_tail(&tport->ls_list, &tls_req->ls_list);
+ list_add_tail(&tls_req->ls_list, &tport->ls_list);
spin_unlock(&tport->lock);
queue_work(nvmet_wq, &tport->ls_work);
return ret;
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index ce42afe8f..348076827 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1884,6 +1884,7 @@ static void __exit nvmet_tcp_exit(void)
flush_workqueue(nvmet_wq);
destroy_workqueue(nvmet_tcp_wq);
+ ida_destroy(&nvmet_tcp_queue_ida);
}
module_init(nvmet_tcp_init);