summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/elx/libefc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/elx/libefc')
-rw-r--r--drivers/scsi/elx/libefc/efc.h52
-rw-r--r--drivers/scsi/elx/libefc/efc_cmds.c782
-rw-r--r--drivers/scsi/elx/libefc/efc_cmds.h35
-rw-r--r--drivers/scsi/elx/libefc/efc_device.c1602
-rw-r--r--drivers/scsi/elx/libefc/efc_device.h72
-rw-r--r--drivers/scsi/elx/libefc/efc_domain.c1088
-rw-r--r--drivers/scsi/elx/libefc/efc_domain.h54
-rw-r--r--drivers/scsi/elx/libefc/efc_els.c1094
-rw-r--r--drivers/scsi/elx/libefc/efc_els.h107
-rw-r--r--drivers/scsi/elx/libefc/efc_fabric.c1563
-rw-r--r--drivers/scsi/elx/libefc/efc_fabric.h116
-rw-r--r--drivers/scsi/elx/libefc/efc_node.c1102
-rw-r--r--drivers/scsi/elx/libefc/efc_node.h191
-rw-r--r--drivers/scsi/elx/libefc/efc_nport.c777
-rw-r--r--drivers/scsi/elx/libefc/efc_nport.h50
-rw-r--r--drivers/scsi/elx/libefc/efc_sm.c54
-rw-r--r--drivers/scsi/elx/libefc/efc_sm.h197
-rw-r--r--drivers/scsi/elx/libefc/efclib.c81
-rw-r--r--drivers/scsi/elx/libefc/efclib.h623
19 files changed, 9640 insertions, 0 deletions
diff --git a/drivers/scsi/elx/libefc/efc.h b/drivers/scsi/elx/libefc/efc.h
new file mode 100644
index 000000000..468ff3cc9
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#ifndef __EFC_H__
+#define __EFC_H__
+
+#include "../include/efc_common.h"
+#include "efclib.h"
+#include "efc_sm.h"
+#include "efc_cmds.h"
+#include "efc_domain.h"
+#include "efc_nport.h"
+#include "efc_node.h"
+#include "efc_fabric.h"
+#include "efc_device.h"
+#include "efc_els.h"
+
+#define EFC_MAX_REMOTE_NODES 2048
+#define NODE_SPARAMS_SIZE 256
+
+enum efc_scsi_del_initiator_reason {
+ EFC_SCSI_INITIATOR_DELETED,
+ EFC_SCSI_INITIATOR_MISSING,
+};
+
+enum efc_scsi_del_target_reason {
+ EFC_SCSI_TARGET_DELETED,
+ EFC_SCSI_TARGET_MISSING,
+};
+
+#define EFC_FC_ELS_DEFAULT_RETRIES 3
+
+#define domain_sm_trace(domain) \
+ efc_log_debug(domain->efc, "[domain:%s] %-20s %-20s\n", \
+ domain->display_name, __func__, efc_sm_event_name(evt)) \
+
+#define domain_trace(domain, fmt, ...) \
+ efc_log_debug(domain->efc, \
+ "[%s]" fmt, domain->display_name, ##__VA_ARGS__) \
+
+#define node_sm_trace() \
+ efc_log_debug(node->efc, "[%s] %-20s %-20s\n", \
+ node->display_name, __func__, efc_sm_event_name(evt)) \
+
+#define nport_sm_trace(nport) \
+ efc_log_debug(nport->efc, \
+ "[%s] %-20s %-20s\n", nport->display_name, __func__, efc_sm_event_name(evt)) \
+
+#endif /* __EFC_H__ */
diff --git a/drivers/scsi/elx/libefc/efc_cmds.c b/drivers/scsi/elx/libefc/efc_cmds.c
new file mode 100644
index 000000000..da4ac8a4c
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_cmds.c
@@ -0,0 +1,782 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include "efclib.h"
+#include "../libefc_sli/sli4.h"
+#include "efc_cmds.h"
+#include "efc_sm.h"
+
+static void
+efc_nport_free_resources(struct efc_nport *nport, int evt, void *data)
+{
+ struct efc *efc = nport->efc;
+
+ /* Clear the nport attached flag */
+ nport->attached = false;
+
+ /* Free the service parameters buffer */
+ if (nport->dma.virt) {
+ dma_free_coherent(&efc->pci->dev, nport->dma.size,
+ nport->dma.virt, nport->dma.phys);
+ memset(&nport->dma, 0, sizeof(struct efc_dma));
+ }
+
+ /* Free the SLI resources */
+ sli_resource_free(efc->sli, SLI4_RSRC_VPI, nport->indicator);
+
+ efc_nport_cb(efc, evt, nport);
+}
+
+static int
+efc_nport_get_mbox_status(struct efc_nport *nport, u8 *mqe, int status)
+{
+ struct efc *efc = nport->efc;
+ struct sli4_mbox_command_header *hdr =
+ (struct sli4_mbox_command_header *)mqe;
+
+ if (status || le16_to_cpu(hdr->status)) {
+ efc_log_debug(efc, "bad status vpi=%#x st=%x hdr=%x\n",
+ nport->indicator, status, le16_to_cpu(hdr->status));
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int
+efc_nport_free_unreg_vpi_cb(struct efc *efc, int status, u8 *mqe, void *arg)
+{
+ struct efc_nport *nport = arg;
+ int evt = EFC_EVT_NPORT_FREE_OK;
+ int rc;
+
+ rc = efc_nport_get_mbox_status(nport, mqe, status);
+ if (rc)
+ evt = EFC_EVT_NPORT_FREE_FAIL;
+
+ efc_nport_free_resources(nport, evt, mqe);
+ return rc;
+}
+
+static void
+efc_nport_free_unreg_vpi(struct efc_nport *nport)
+{
+ struct efc *efc = nport->efc;
+ int rc;
+ u8 data[SLI4_BMBX_SIZE];
+
+ rc = sli_cmd_unreg_vpi(efc->sli, data, nport->indicator,
+ SLI4_UNREG_TYPE_PORT);
+ if (rc) {
+ efc_log_err(efc, "UNREG_VPI format failure\n");
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_FREE_FAIL, data);
+ return;
+ }
+
+ rc = efc->tt.issue_mbox_rqst(efc->base, data,
+ efc_nport_free_unreg_vpi_cb, nport);
+ if (rc) {
+ efc_log_err(efc, "UNREG_VPI command failure\n");
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_FREE_FAIL, data);
+ }
+}
+
+static void
+efc_nport_send_evt(struct efc_nport *nport, int evt, void *data)
+{
+ struct efc *efc = nport->efc;
+
+ /* Now inform the registered callbacks */
+ efc_nport_cb(efc, evt, nport);
+
+ /* Set the nport attached flag */
+ if (evt == EFC_EVT_NPORT_ATTACH_OK)
+ nport->attached = true;
+
+ /* If there is a pending free request, then handle it now */
+ if (nport->free_req_pending)
+ efc_nport_free_unreg_vpi(nport);
+}
+
+static int
+efc_nport_alloc_init_vpi_cb(struct efc *efc, int status, u8 *mqe, void *arg)
+{
+ struct efc_nport *nport = arg;
+
+ if (efc_nport_get_mbox_status(nport, mqe, status)) {
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, mqe);
+ return -EIO;
+ }
+
+ efc_nport_send_evt(nport, EFC_EVT_NPORT_ALLOC_OK, mqe);
+ return 0;
+}
+
+static void
+efc_nport_alloc_init_vpi(struct efc_nport *nport)
+{
+ struct efc *efc = nport->efc;
+ u8 data[SLI4_BMBX_SIZE];
+ int rc;
+
+ /* If there is a pending free request, then handle it now */
+ if (nport->free_req_pending) {
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_FREE_OK, data);
+ return;
+ }
+
+ rc = sli_cmd_init_vpi(efc->sli, data,
+ nport->indicator, nport->domain->indicator);
+ if (rc) {
+ efc_log_err(efc, "INIT_VPI format failure\n");
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
+ return;
+ }
+
+ rc = efc->tt.issue_mbox_rqst(efc->base, data,
+ efc_nport_alloc_init_vpi_cb, nport);
+ if (rc) {
+ efc_log_err(efc, "INIT_VPI command failure\n");
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
+ }
+}
+
+static int
+efc_nport_alloc_read_sparm64_cb(struct efc *efc, int status, u8 *mqe, void *arg)
+{
+ struct efc_nport *nport = arg;
+ u8 *payload = NULL;
+
+ if (efc_nport_get_mbox_status(nport, mqe, status)) {
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, mqe);
+ return -EIO;
+ }
+
+ payload = nport->dma.virt;
+
+ memcpy(&nport->sli_wwpn, payload + SLI4_READ_SPARM64_WWPN_OFFSET,
+ sizeof(nport->sli_wwpn));
+ memcpy(&nport->sli_wwnn, payload + SLI4_READ_SPARM64_WWNN_OFFSET,
+ sizeof(nport->sli_wwnn));
+
+ dma_free_coherent(&efc->pci->dev, nport->dma.size, nport->dma.virt,
+ nport->dma.phys);
+ memset(&nport->dma, 0, sizeof(struct efc_dma));
+ efc_nport_alloc_init_vpi(nport);
+ return 0;
+}
+
+static void
+efc_nport_alloc_read_sparm64(struct efc *efc, struct efc_nport *nport)
+{
+ u8 data[SLI4_BMBX_SIZE];
+ int rc;
+
+ /* Allocate memory for the service parameters */
+ nport->dma.size = EFC_SPARAM_DMA_SZ;
+ nport->dma.virt = dma_alloc_coherent(&efc->pci->dev,
+ nport->dma.size, &nport->dma.phys,
+ GFP_KERNEL);
+ if (!nport->dma.virt) {
+ efc_log_err(efc, "Failed to allocate DMA memory\n");
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
+ return;
+ }
+
+ rc = sli_cmd_read_sparm64(efc->sli, data,
+ &nport->dma, nport->indicator);
+ if (rc) {
+ efc_log_err(efc, "READ_SPARM64 format failure\n");
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
+ return;
+ }
+
+ rc = efc->tt.issue_mbox_rqst(efc->base, data,
+ efc_nport_alloc_read_sparm64_cb, nport);
+ if (rc) {
+ efc_log_err(efc, "READ_SPARM64 command failure\n");
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
+ }
+}
+
+int
+efc_cmd_nport_alloc(struct efc *efc, struct efc_nport *nport,
+ struct efc_domain *domain, u8 *wwpn)
+{
+ u32 index;
+
+ nport->indicator = U32_MAX;
+ nport->free_req_pending = false;
+
+ if (wwpn)
+ memcpy(&nport->sli_wwpn, wwpn, sizeof(nport->sli_wwpn));
+
+ /*
+ * allocate a VPI object for the port and stores it in the
+ * indicator field of the port object.
+ */
+ if (sli_resource_alloc(efc->sli, SLI4_RSRC_VPI,
+ &nport->indicator, &index)) {
+ efc_log_err(efc, "VPI allocation failure\n");
+ return -EIO;
+ }
+
+ if (domain) {
+ /*
+ * If the WWPN is NULL, fetch the default
+ * WWPN and WWNN before initializing the VPI
+ */
+ if (!wwpn)
+ efc_nport_alloc_read_sparm64(efc, nport);
+ else
+ efc_nport_alloc_init_vpi(nport);
+ } else if (!wwpn) {
+ /* domain NULL and wwpn non-NULL */
+ efc_log_err(efc, "need WWN for physical port\n");
+ sli_resource_free(efc->sli, SLI4_RSRC_VPI, nport->indicator);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int
+efc_nport_attach_reg_vpi_cb(struct efc *efc, int status, u8 *mqe,
+ void *arg)
+{
+ struct efc_nport *nport = arg;
+
+ nport->attaching = false;
+ if (efc_nport_get_mbox_status(nport, mqe, status)) {
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ATTACH_FAIL, mqe);
+ return -EIO;
+ }
+
+ efc_nport_send_evt(nport, EFC_EVT_NPORT_ATTACH_OK, mqe);
+ return 0;
+}
+
+int
+efc_cmd_nport_attach(struct efc *efc, struct efc_nport *nport, u32 fc_id)
+{
+ u8 buf[SLI4_BMBX_SIZE];
+ int rc = 0;
+
+ if (!nport) {
+ efc_log_err(efc, "bad param(s) nport=%p\n", nport);
+ return -EIO;
+ }
+
+ nport->fc_id = fc_id;
+
+ /* register previously-allocated VPI with the device */
+ rc = sli_cmd_reg_vpi(efc->sli, buf, nport->fc_id,
+ nport->sli_wwpn, nport->indicator,
+ nport->domain->indicator, false);
+ if (rc) {
+ efc_log_err(efc, "REG_VPI format failure\n");
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ATTACH_FAIL, buf);
+ return rc;
+ }
+
+ rc = efc->tt.issue_mbox_rqst(efc->base, buf,
+ efc_nport_attach_reg_vpi_cb, nport);
+ if (rc) {
+ efc_log_err(efc, "REG_VPI command failure\n");
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ATTACH_FAIL, buf);
+ } else {
+ nport->attaching = true;
+ }
+
+ return rc;
+}
+
+int
+efc_cmd_nport_free(struct efc *efc, struct efc_nport *nport)
+{
+ if (!nport) {
+ efc_log_err(efc, "bad parameter(s) nport=%p\n", nport);
+ return -EIO;
+ }
+
+ /* Issue the UNREG_VPI command to free the assigned VPI context */
+ if (nport->attached)
+ efc_nport_free_unreg_vpi(nport);
+ else if (nport->attaching)
+ nport->free_req_pending = true;
+ else
+ efc_sm_post_event(&nport->sm, EFC_EVT_NPORT_FREE_OK, NULL);
+
+ return 0;
+}
+
+static int
+efc_domain_get_mbox_status(struct efc_domain *domain, u8 *mqe, int status)
+{
+ struct efc *efc = domain->efc;
+ struct sli4_mbox_command_header *hdr =
+ (struct sli4_mbox_command_header *)mqe;
+
+ if (status || le16_to_cpu(hdr->status)) {
+ efc_log_debug(efc, "bad status vfi=%#x st=%x hdr=%x\n",
+ domain->indicator, status,
+ le16_to_cpu(hdr->status));
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void
+efc_domain_free_resources(struct efc_domain *domain, int evt, void *data)
+{
+ struct efc *efc = domain->efc;
+
+ /* Free the service parameters buffer */
+ if (domain->dma.virt) {
+ dma_free_coherent(&efc->pci->dev,
+ domain->dma.size, domain->dma.virt,
+ domain->dma.phys);
+ memset(&domain->dma, 0, sizeof(struct efc_dma));
+ }
+
+ /* Free the SLI resources */
+ sli_resource_free(efc->sli, SLI4_RSRC_VFI, domain->indicator);
+
+ efc_domain_cb(efc, evt, domain);
+}
+
+static void
+efc_domain_send_nport_evt(struct efc_domain *domain,
+ int port_evt, int domain_evt, void *data)
+{
+ struct efc *efc = domain->efc;
+
+ /* Send alloc/attach ok to the physical nport */
+ efc_nport_send_evt(domain->nport, port_evt, NULL);
+
+ /* Now inform the registered callbacks */
+ efc_domain_cb(efc, domain_evt, domain);
+}
+
+static int
+efc_domain_alloc_read_sparm64_cb(struct efc *efc, int status, u8 *mqe,
+ void *arg)
+{
+ struct efc_domain *domain = arg;
+
+ if (efc_domain_get_mbox_status(domain, mqe, status)) {
+ efc_domain_free_resources(domain,
+ EFC_HW_DOMAIN_ALLOC_FAIL, mqe);
+ return -EIO;
+ }
+
+ efc_domain_send_nport_evt(domain, EFC_EVT_NPORT_ALLOC_OK,
+ EFC_HW_DOMAIN_ALLOC_OK, mqe);
+ return 0;
+}
+
+static void
+efc_domain_alloc_read_sparm64(struct efc_domain *domain)
+{
+ struct efc *efc = domain->efc;
+ u8 data[SLI4_BMBX_SIZE];
+ int rc;
+
+ rc = sli_cmd_read_sparm64(efc->sli, data, &domain->dma, 0);
+ if (rc) {
+ efc_log_err(efc, "READ_SPARM64 format failure\n");
+ efc_domain_free_resources(domain,
+ EFC_HW_DOMAIN_ALLOC_FAIL, data);
+ return;
+ }
+
+ rc = efc->tt.issue_mbox_rqst(efc->base, data,
+ efc_domain_alloc_read_sparm64_cb, domain);
+ if (rc) {
+ efc_log_err(efc, "READ_SPARM64 command failure\n");
+ efc_domain_free_resources(domain,
+ EFC_HW_DOMAIN_ALLOC_FAIL, data);
+ }
+}
+
+static int
+efc_domain_alloc_init_vfi_cb(struct efc *efc, int status, u8 *mqe,
+ void *arg)
+{
+ struct efc_domain *domain = arg;
+
+ if (efc_domain_get_mbox_status(domain, mqe, status)) {
+ efc_domain_free_resources(domain,
+ EFC_HW_DOMAIN_ALLOC_FAIL, mqe);
+ return -EIO;
+ }
+
+ efc_domain_alloc_read_sparm64(domain);
+ return 0;
+}
+
+static void
+efc_domain_alloc_init_vfi(struct efc_domain *domain)
+{
+ struct efc *efc = domain->efc;
+ struct efc_nport *nport = domain->nport;
+ u8 data[SLI4_BMBX_SIZE];
+ int rc;
+
+ /*
+ * For FC, the HW alread registered an FCFI.
+ * Copy FCF information into the domain and jump to INIT_VFI.
+ */
+ domain->fcf_indicator = efc->fcfi;
+ rc = sli_cmd_init_vfi(efc->sli, data, domain->indicator,
+ domain->fcf_indicator, nport->indicator);
+ if (rc) {
+ efc_log_err(efc, "INIT_VFI format failure\n");
+ efc_domain_free_resources(domain,
+ EFC_HW_DOMAIN_ALLOC_FAIL, data);
+ return;
+ }
+
+ efc_log_err(efc, "%s issue mbox\n", __func__);
+ rc = efc->tt.issue_mbox_rqst(efc->base, data,
+ efc_domain_alloc_init_vfi_cb, domain);
+ if (rc) {
+ efc_log_err(efc, "INIT_VFI command failure\n");
+ efc_domain_free_resources(domain,
+ EFC_HW_DOMAIN_ALLOC_FAIL, data);
+ }
+}
+
+int
+efc_cmd_domain_alloc(struct efc *efc, struct efc_domain *domain, u32 fcf)
+{
+ u32 index;
+
+ if (!domain || !domain->nport) {
+ efc_log_err(efc, "bad parameter(s) domain=%p nport=%p\n",
+ domain, domain ? domain->nport : NULL);
+ return -EIO;
+ }
+
+ /* allocate memory for the service parameters */
+ domain->dma.size = EFC_SPARAM_DMA_SZ;
+ domain->dma.virt = dma_alloc_coherent(&efc->pci->dev,
+ domain->dma.size,
+ &domain->dma.phys, GFP_KERNEL);
+ if (!domain->dma.virt) {
+ efc_log_err(efc, "Failed to allocate DMA memory\n");
+ return -EIO;
+ }
+
+ domain->fcf = fcf;
+ domain->fcf_indicator = U32_MAX;
+ domain->indicator = U32_MAX;
+
+ if (sli_resource_alloc(efc->sli, SLI4_RSRC_VFI, &domain->indicator,
+ &index)) {
+ efc_log_err(efc, "VFI allocation failure\n");
+
+ dma_free_coherent(&efc->pci->dev,
+ domain->dma.size, domain->dma.virt,
+ domain->dma.phys);
+ memset(&domain->dma, 0, sizeof(struct efc_dma));
+
+ return -EIO;
+ }
+
+ efc_domain_alloc_init_vfi(domain);
+ return 0;
+}
+
+static int
+efc_domain_attach_reg_vfi_cb(struct efc *efc, int status, u8 *mqe,
+ void *arg)
+{
+ struct efc_domain *domain = arg;
+
+ if (efc_domain_get_mbox_status(domain, mqe, status)) {
+ efc_domain_free_resources(domain,
+ EFC_HW_DOMAIN_ATTACH_FAIL, mqe);
+ return -EIO;
+ }
+
+ efc_domain_send_nport_evt(domain, EFC_EVT_NPORT_ATTACH_OK,
+ EFC_HW_DOMAIN_ATTACH_OK, mqe);
+ return 0;
+}
+
+int
+efc_cmd_domain_attach(struct efc *efc, struct efc_domain *domain, u32 fc_id)
+{
+ u8 buf[SLI4_BMBX_SIZE];
+ int rc = 0;
+
+ if (!domain) {
+ efc_log_err(efc, "bad param(s) domain=%p\n", domain);
+ return -EIO;
+ }
+
+ domain->nport->fc_id = fc_id;
+
+ rc = sli_cmd_reg_vfi(efc->sli, buf, SLI4_BMBX_SIZE, domain->indicator,
+ domain->fcf_indicator, domain->dma,
+ domain->nport->indicator, domain->nport->sli_wwpn,
+ domain->nport->fc_id);
+ if (rc) {
+ efc_log_err(efc, "REG_VFI format failure\n");
+ goto cleanup;
+ }
+
+ rc = efc->tt.issue_mbox_rqst(efc->base, buf,
+ efc_domain_attach_reg_vfi_cb, domain);
+ if (rc) {
+ efc_log_err(efc, "REG_VFI command failure\n");
+ goto cleanup;
+ }
+
+ return rc;
+
+cleanup:
+ efc_domain_free_resources(domain, EFC_HW_DOMAIN_ATTACH_FAIL, buf);
+
+ return rc;
+}
+
+static int
+efc_domain_free_unreg_vfi_cb(struct efc *efc, int status, u8 *mqe, void *arg)
+{
+ struct efc_domain *domain = arg;
+ int evt = EFC_HW_DOMAIN_FREE_OK;
+ int rc;
+
+ rc = efc_domain_get_mbox_status(domain, mqe, status);
+ if (rc) {
+ evt = EFC_HW_DOMAIN_FREE_FAIL;
+ rc = -EIO;
+ }
+
+ efc_domain_free_resources(domain, evt, mqe);
+ return rc;
+}
+
+static void
+efc_domain_free_unreg_vfi(struct efc_domain *domain)
+{
+ struct efc *efc = domain->efc;
+ int rc;
+ u8 data[SLI4_BMBX_SIZE];
+
+ rc = sli_cmd_unreg_vfi(efc->sli, data, domain->indicator,
+ SLI4_UNREG_TYPE_DOMAIN);
+ if (rc) {
+ efc_log_err(efc, "UNREG_VFI format failure\n");
+ goto cleanup;
+ }
+
+ rc = efc->tt.issue_mbox_rqst(efc->base, data,
+ efc_domain_free_unreg_vfi_cb, domain);
+ if (rc) {
+ efc_log_err(efc, "UNREG_VFI command failure\n");
+ goto cleanup;
+ }
+
+ return;
+
+cleanup:
+ efc_domain_free_resources(domain, EFC_HW_DOMAIN_FREE_FAIL, data);
+}
+
+int
+efc_cmd_domain_free(struct efc *efc, struct efc_domain *domain)
+{
+ if (!domain) {
+ efc_log_err(efc, "bad parameter(s) domain=%p\n", domain);
+ return -EIO;
+ }
+
+ efc_domain_free_unreg_vfi(domain);
+ return 0;
+}
+
+int
+efc_cmd_node_alloc(struct efc *efc, struct efc_remote_node *rnode, u32 fc_addr,
+ struct efc_nport *nport)
+{
+ /* Check for invalid indicator */
+ if (rnode->indicator != U32_MAX) {
+ efc_log_err(efc,
+ "RPI allocation failure addr=%#x rpi=%#x\n",
+ fc_addr, rnode->indicator);
+ return -EIO;
+ }
+
+ /* NULL SLI port indicates an unallocated remote node */
+ rnode->nport = NULL;
+
+ if (sli_resource_alloc(efc->sli, SLI4_RSRC_RPI,
+ &rnode->indicator, &rnode->index)) {
+ efc_log_err(efc, "RPI allocation failure addr=%#x\n",
+ fc_addr);
+ return -EIO;
+ }
+
+ rnode->fc_id = fc_addr;
+ rnode->nport = nport;
+
+ return 0;
+}
+
+static int
+efc_cmd_node_attach_cb(struct efc *efc, int status, u8 *mqe, void *arg)
+{
+ struct efc_remote_node *rnode = arg;
+ struct sli4_mbox_command_header *hdr =
+ (struct sli4_mbox_command_header *)mqe;
+ int evt = 0;
+
+ if (status || le16_to_cpu(hdr->status)) {
+ efc_log_debug(efc, "bad status cqe=%#x mqe=%#x\n", status,
+ le16_to_cpu(hdr->status));
+ rnode->attached = false;
+ evt = EFC_EVT_NODE_ATTACH_FAIL;
+ } else {
+ rnode->attached = true;
+ evt = EFC_EVT_NODE_ATTACH_OK;
+ }
+
+ efc_remote_node_cb(efc, evt, rnode);
+
+ return 0;
+}
+
+int
+efc_cmd_node_attach(struct efc *efc, struct efc_remote_node *rnode,
+ struct efc_dma *sparms)
+{
+ int rc = -EIO;
+ u8 buf[SLI4_BMBX_SIZE];
+
+ if (!rnode || !sparms) {
+ efc_log_err(efc, "bad parameter(s) rnode=%p sparms=%p\n",
+ rnode, sparms);
+ return -EIO;
+ }
+
+ /*
+ * If the attach count is non-zero, this RPI has already been reg'd.
+ * Otherwise, register the RPI
+ */
+ if (rnode->index == U32_MAX) {
+ efc_log_err(efc, "bad parameter rnode->index invalid\n");
+ return -EIO;
+ }
+
+ /* Update a remote node object with the remote port's service params */
+ if (!sli_cmd_reg_rpi(efc->sli, buf, rnode->indicator,
+ rnode->nport->indicator, rnode->fc_id, sparms, 0, 0))
+ rc = efc->tt.issue_mbox_rqst(efc->base, buf,
+ efc_cmd_node_attach_cb, rnode);
+
+ return rc;
+}
+
+int
+efc_node_free_resources(struct efc *efc, struct efc_remote_node *rnode)
+{
+ int rc = 0;
+
+ if (!rnode) {
+ efc_log_err(efc, "bad parameter rnode=%p\n", rnode);
+ return -EIO;
+ }
+
+ if (rnode->nport) {
+ if (rnode->attached) {
+ efc_log_err(efc, "rnode is still attached\n");
+ return -EIO;
+ }
+ if (rnode->indicator != U32_MAX) {
+ if (sli_resource_free(efc->sli, SLI4_RSRC_RPI,
+ rnode->indicator)) {
+ efc_log_err(efc,
+ "RPI free fail RPI %d addr=%#x\n",
+ rnode->indicator, rnode->fc_id);
+ rc = -EIO;
+ } else {
+ rnode->indicator = U32_MAX;
+ rnode->index = U32_MAX;
+ }
+ }
+ }
+
+ return rc;
+}
+
+static int
+efc_cmd_node_free_cb(struct efc *efc, int status, u8 *mqe, void *arg)
+{
+ struct efc_remote_node *rnode = arg;
+ struct sli4_mbox_command_header *hdr =
+ (struct sli4_mbox_command_header *)mqe;
+ int evt = EFC_EVT_NODE_FREE_FAIL;
+ int rc = 0;
+
+ if (status || le16_to_cpu(hdr->status)) {
+ efc_log_debug(efc, "bad status cqe=%#x mqe=%#x\n", status,
+ le16_to_cpu(hdr->status));
+
+ /*
+ * In certain cases, a non-zero MQE status is OK (all must be
+ * true):
+ * - node is attached
+ * - status is 0x1400
+ */
+ if (!rnode->attached ||
+ (le16_to_cpu(hdr->status) != SLI4_MBX_STATUS_RPI_NOT_REG))
+ rc = -EIO;
+ }
+
+ if (!rc) {
+ rnode->attached = false;
+ evt = EFC_EVT_NODE_FREE_OK;
+ }
+
+ efc_remote_node_cb(efc, evt, rnode);
+
+ return rc;
+}
+
+int
+efc_cmd_node_detach(struct efc *efc, struct efc_remote_node *rnode)
+{
+ u8 buf[SLI4_BMBX_SIZE];
+ int rc = -EIO;
+
+ if (!rnode) {
+ efc_log_err(efc, "bad parameter rnode=%p\n", rnode);
+ return -EIO;
+ }
+
+ if (rnode->nport) {
+ if (!rnode->attached)
+ return -EIO;
+
+ rc = -EIO;
+
+ if (!sli_cmd_unreg_rpi(efc->sli, buf, rnode->indicator,
+ SLI4_RSRC_RPI, U32_MAX))
+ rc = efc->tt.issue_mbox_rqst(efc->base, buf,
+ efc_cmd_node_free_cb, rnode);
+
+ if (rc != 0) {
+ efc_log_err(efc, "UNREG_RPI failed\n");
+ rc = -EIO;
+ }
+ }
+
+ return rc;
+}
diff --git a/drivers/scsi/elx/libefc/efc_cmds.h b/drivers/scsi/elx/libefc/efc_cmds.h
new file mode 100644
index 000000000..4d353ab04
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_cmds.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#ifndef __EFC_CMDS_H__
+#define __EFC_CMDS_H__
+
+#define EFC_SPARAM_DMA_SZ 112
+int
+efc_cmd_nport_alloc(struct efc *efc, struct efc_nport *nport,
+ struct efc_domain *domain, u8 *wwpn);
+int
+efc_cmd_nport_attach(struct efc *efc, struct efc_nport *nport, u32 fc_id);
+int
+efc_cmd_nport_free(struct efc *efc, struct efc_nport *nport);
+int
+efc_cmd_domain_alloc(struct efc *efc, struct efc_domain *domain, u32 fcf);
+int
+efc_cmd_domain_attach(struct efc *efc, struct efc_domain *domain, u32 fc_id);
+int
+efc_cmd_domain_free(struct efc *efc, struct efc_domain *domain);
+int
+efc_cmd_node_detach(struct efc *efc, struct efc_remote_node *rnode);
+int
+efc_node_free_resources(struct efc *efc, struct efc_remote_node *rnode);
+int
+efc_cmd_node_attach(struct efc *efc, struct efc_remote_node *rnode,
+ struct efc_dma *sparms);
+int
+efc_cmd_node_alloc(struct efc *efc, struct efc_remote_node *rnode, u32 fc_addr,
+ struct efc_nport *nport);
+
+#endif /* __EFC_CMDS_H */
diff --git a/drivers/scsi/elx/libefc/efc_device.c b/drivers/scsi/elx/libefc/efc_device.c
new file mode 100644
index 000000000..52be01333
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_device.c
@@ -0,0 +1,1602 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * device_sm Node State Machine: Remote Device States
+ */
+
+#include "efc.h"
+#include "efc_device.h"
+#include "efc_fabric.h"
+
+void
+efc_d_send_prli_rsp(struct efc_node *node, u16 ox_id)
+{
+ int rc = EFC_SCSI_CALL_COMPLETE;
+ struct efc *efc = node->efc;
+
+ node->ls_acc_oxid = ox_id;
+ node->send_ls_acc = EFC_NODE_SEND_LS_ACC_PRLI;
+
+ /*
+ * Wait for backend session registration
+ * to complete before sending PRLI resp
+ */
+
+ if (node->init) {
+ efc_log_info(efc, "[%s] found(initiator) WWPN:%s WWNN:%s\n",
+ node->display_name, node->wwpn, node->wwnn);
+ if (node->nport->enable_tgt)
+ rc = efc->tt.scsi_new_node(efc, node);
+ }
+
+ if (rc < 0)
+ efc_node_post_event(node, EFC_EVT_NODE_SESS_REG_FAIL, NULL);
+
+ if (rc == EFC_SCSI_CALL_COMPLETE)
+ efc_node_post_event(node, EFC_EVT_NODE_SESS_REG_OK, NULL);
+}
+
+static void
+__efc_d_common(const char *funcname, struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = NULL;
+ struct efc *efc = NULL;
+
+ node = ctx->app;
+ efc = node->efc;
+
+ switch (evt) {
+ /* Handle shutdown events */
+ case EFC_EVT_SHUTDOWN:
+ efc_log_debug(efc, "[%s] %-20s %-20s\n", node->display_name,
+ funcname, efc_sm_event_name(evt));
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
+ break;
+ case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ efc_log_debug(efc, "[%s] %-20s %-20s\n",
+ node->display_name, funcname,
+ efc_sm_event_name(evt));
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_EXPLICIT_LOGO;
+ efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
+ break;
+ case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ efc_log_debug(efc, "[%s] %-20s %-20s\n", node->display_name,
+ funcname, efc_sm_event_name(evt));
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_IMPLICIT_LOGO;
+ efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
+ break;
+
+ default:
+ /* call default event handler common to all nodes */
+ __efc_node_common(funcname, ctx, evt, arg);
+ }
+}
+
+static void
+__efc_d_wait_del_node(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ /*
+ * State is entered when a node sends a delete initiator/target call
+ * to the target-server/initiator-client and needs to wait for that
+ * work to complete.
+ */
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ fallthrough;
+
+ case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
+ case EFC_EVT_ALL_CHILD_NODES_FREE:
+ /* These are expected events. */
+ break;
+
+ case EFC_EVT_NODE_DEL_INI_COMPLETE:
+ case EFC_EVT_NODE_DEL_TGT_COMPLETE:
+ /*
+ * node has either been detached or is in the process
+ * of being detached,
+ * call common node's initiate cleanup function
+ */
+ efc_node_initiate_cleanup(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_FAIL:
+ /* Can happen as ELS IO IO's complete */
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ break;
+
+ /* ignore shutdown events as we're already in shutdown path */
+ case EFC_EVT_SHUTDOWN:
+ /* have default shutdown event take precedence */
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ fallthrough;
+
+ case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ node_printf(node, "%s received\n", efc_sm_event_name(evt));
+ break;
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ /* don't care about domain_attach_ok */
+ break;
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+static void
+__efc_d_wait_del_ini_tgt(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ fallthrough;
+
+ case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
+ case EFC_EVT_ALL_CHILD_NODES_FREE:
+ /* These are expected events. */
+ break;
+
+ case EFC_EVT_NODE_DEL_INI_COMPLETE:
+ case EFC_EVT_NODE_DEL_TGT_COMPLETE:
+ efc_node_transition(node, __efc_d_wait_del_node, NULL);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_FAIL:
+ /* Can happen as ELS IO IO's complete */
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ break;
+
+ /* ignore shutdown events as we're already in shutdown path */
+ case EFC_EVT_SHUTDOWN:
+ /* have default shutdown event take precedence */
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ fallthrough;
+
+ case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ node_printf(node, "%s received\n", efc_sm_event_name(evt));
+ break;
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ /* don't care about domain_attach_ok */
+ break;
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_initiate_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+ struct efc *efc = node->efc;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER: {
+ int rc = EFC_SCSI_CALL_COMPLETE;
+
+ /* assume no wait needed */
+ node->els_io_enabled = false;
+
+ /* make necessary delete upcall(s) */
+ if (node->init && !node->targ) {
+ efc_log_info(node->efc,
+ "[%s] delete (initiator) WWPN %s WWNN %s\n",
+ node->display_name,
+ node->wwpn, node->wwnn);
+ efc_node_transition(node,
+ __efc_d_wait_del_node,
+ NULL);
+ if (node->nport->enable_tgt)
+ rc = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_INITIATOR_DELETED);
+
+ if (rc == EFC_SCSI_CALL_COMPLETE || rc < 0)
+ efc_node_post_event(node,
+ EFC_EVT_NODE_DEL_INI_COMPLETE, NULL);
+
+ } else if (node->targ && !node->init) {
+ efc_log_info(node->efc,
+ "[%s] delete (target) WWPN %s WWNN %s\n",
+ node->display_name,
+ node->wwpn, node->wwnn);
+ efc_node_transition(node,
+ __efc_d_wait_del_node,
+ NULL);
+ if (node->nport->enable_ini)
+ rc = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_TARGET_DELETED);
+
+ if (rc == EFC_SCSI_CALL_COMPLETE)
+ efc_node_post_event(node,
+ EFC_EVT_NODE_DEL_TGT_COMPLETE, NULL);
+
+ } else if (node->init && node->targ) {
+ efc_log_info(node->efc,
+ "[%s] delete (I+T) WWPN %s WWNN %s\n",
+ node->display_name, node->wwpn, node->wwnn);
+ efc_node_transition(node, __efc_d_wait_del_ini_tgt,
+ NULL);
+ if (node->nport->enable_tgt)
+ rc = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_INITIATOR_DELETED);
+
+ if (rc == EFC_SCSI_CALL_COMPLETE)
+ efc_node_post_event(node,
+ EFC_EVT_NODE_DEL_INI_COMPLETE, NULL);
+ /* assume no wait needed */
+ rc = EFC_SCSI_CALL_COMPLETE;
+ if (node->nport->enable_ini)
+ rc = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_TARGET_DELETED);
+
+ if (rc == EFC_SCSI_CALL_COMPLETE)
+ efc_node_post_event(node,
+ EFC_EVT_NODE_DEL_TGT_COMPLETE, NULL);
+ }
+
+ /* we've initiated the upcalls as needed, now kick off the node
+ * detach to precipitate the aborting of outstanding exchanges
+ * associated with said node
+ *
+ * Beware: if we've made upcall(s), we've already transitioned
+ * to a new state by the time we execute this.
+ * consider doing this before the upcalls?
+ */
+ if (node->attached) {
+ /* issue hw node free; don't care if succeeds right
+ * away or sometime later, will check node->attached
+ * later in shutdown process
+ */
+ rc = efc_cmd_node_detach(efc, &node->rnode);
+ if (rc < 0)
+ node_printf(node,
+ "Failed freeing HW node, rc=%d\n",
+ rc);
+ }
+
+ /* if neither initiator nor target, proceed to cleanup */
+ if (!node->init && !node->targ) {
+ /*
+ * node has either been detached or is in
+ * the process of being detached,
+ * call common node's initiate cleanup function
+ */
+ efc_node_initiate_cleanup(node);
+ }
+ break;
+ }
+ case EFC_EVT_ALL_CHILD_NODES_FREE:
+ /* Ignore, this can happen if an ELS is
+ * aborted while in a delay/retry state
+ */
+ break;
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_loop(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_DOMAIN_ATTACH_OK: {
+ /* send PLOGI automatically if initiator */
+ efc_node_init_device(node, true);
+ break;
+ }
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+efc_send_ls_acc_after_attach(struct efc_node *node,
+ struct fc_frame_header *hdr,
+ enum efc_node_send_ls_acc ls)
+{
+ u16 ox_id = be16_to_cpu(hdr->fh_ox_id);
+
+ /* Save the OX_ID for sending LS_ACC sometime later */
+ WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_NONE);
+
+ node->ls_acc_oxid = ox_id;
+ node->send_ls_acc = ls;
+ node->ls_acc_did = ntoh24(hdr->fh_d_id);
+}
+
+void
+efc_process_prli_payload(struct efc_node *node, void *prli)
+{
+ struct {
+ struct fc_els_prli prli;
+ struct fc_els_spp sp;
+ } *pp;
+
+ pp = prli;
+ node->init = (pp->sp.spp_flags & FCP_SPPF_INIT_FCN) != 0;
+ node->targ = (pp->sp.spp_flags & FCP_SPPF_TARG_FCN) != 0;
+}
+
+void
+__efc_d_wait_plogi_acc_cmpl(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_CMPL_FAIL:
+ WARN_ON(!node->els_cmpl_cnt);
+ node->els_cmpl_cnt--;
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
+ break;
+
+ case EFC_EVT_SRRS_ELS_CMPL_OK: /* PLOGI ACC completions */
+ WARN_ON(!node->els_cmpl_cnt);
+ node->els_cmpl_cnt--;
+ efc_node_transition(node, __efc_d_port_logged_in, NULL);
+ break;
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_logo_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_OK:
+ case EFC_EVT_SRRS_ELS_REQ_RJT:
+ case EFC_EVT_SRRS_ELS_REQ_FAIL:
+ /* LOGO response received, sent shutdown */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_LOGO,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ node_printf(node,
+ "LOGO sent (evt=%s), shutdown node\n",
+ efc_sm_event_name(evt));
+ /* sm: / post explicit logout */
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN_EXPLICIT_LOGO,
+ NULL);
+ break;
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+efc_node_init_device(struct efc_node *node, bool send_plogi)
+{
+ node->send_plogi = send_plogi;
+ if ((node->efc->nodedb_mask & EFC_NODEDB_PAUSE_NEW_NODES) &&
+ (node->rnode.fc_id != FC_FID_DOM_MGR)) {
+ node->nodedb_state = __efc_d_init;
+ efc_node_transition(node, __efc_node_paused, NULL);
+ } else {
+ efc_node_transition(node, __efc_d_init, NULL);
+ }
+}
+
+static void
+efc_d_check_plogi_topology(struct efc_node *node, u32 d_id)
+{
+ switch (node->nport->topology) {
+ case EFC_NPORT_TOPO_P2P:
+ /* we're not attached and nport is p2p,
+ * need to attach
+ */
+ efc_domain_attach(node->nport->domain, d_id);
+ efc_node_transition(node, __efc_d_wait_domain_attach, NULL);
+ break;
+ case EFC_NPORT_TOPO_FABRIC:
+ /* we're not attached and nport is fabric, domain
+ * attach should have already been requested as part
+ * of the fabric state machine, wait for it
+ */
+ efc_node_transition(node, __efc_d_wait_domain_attach, NULL);
+ break;
+ case EFC_NPORT_TOPO_UNKNOWN:
+ /* Two possibilities:
+ * 1. received a PLOGI before our FLOGI has completed
+ * (possible since completion comes in on another
+ * CQ), thus we don't know what we're connected to
+ * yet; transition to a state to wait for the
+ * fabric node to tell us;
+ * 2. PLOGI received before link went down and we
+ * haven't performed domain attach yet.
+ * Note: we cannot distinguish between 1. and 2.
+ * so have to assume PLOGI
+ * was received after link back up.
+ */
+ node_printf(node, "received PLOGI, unknown topology did=0x%x\n",
+ d_id);
+ efc_node_transition(node, __efc_d_wait_topology_notify, NULL);
+ break;
+ default:
+ node_printf(node, "received PLOGI, unexpected topology %d\n",
+ node->nport->topology);
+ }
+}
+
+void
+__efc_d_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ /*
+ * This state is entered when a node is instantiated,
+ * either having been discovered from a name services query,
+ * or having received a PLOGI/FLOGI.
+ */
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ if (!node->send_plogi)
+ break;
+ /* only send if we have initiator capability,
+ * and domain is attached
+ */
+ if (node->nport->enable_ini &&
+ node->nport->domain->attached) {
+ efc_send_plogi(node);
+
+ efc_node_transition(node, __efc_d_wait_plogi_rsp, NULL);
+ } else {
+ node_printf(node, "not sending plogi nport.ini=%d,",
+ node->nport->enable_ini);
+ node_printf(node, "domain attached=%d\n",
+ node->nport->domain->attached);
+ }
+ break;
+ case EFC_EVT_PLOGI_RCVD: {
+ /* T, or I+T */
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+ int rc;
+
+ efc_node_save_sparms(node, cbdata->payload->dma.virt);
+ efc_send_ls_acc_after_attach(node,
+ cbdata->header->dma.virt,
+ EFC_NODE_SEND_LS_ACC_PLOGI);
+
+ /* domain not attached; several possibilities: */
+ if (!node->nport->domain->attached) {
+ efc_d_check_plogi_topology(node, ntoh24(hdr->fh_d_id));
+ break;
+ }
+
+ /* domain already attached */
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_d_wait_node_attach, NULL);
+ if (rc < 0)
+ efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL, NULL);
+
+ break;
+ }
+
+ case EFC_EVT_FDISC_RCVD: {
+ __efc_d_common(__func__, ctx, evt, arg);
+ break;
+ }
+
+ case EFC_EVT_FLOGI_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+ u32 d_id = ntoh24(hdr->fh_d_id);
+
+ /* sm: / save sparams, send FLOGI acc */
+ memcpy(node->nport->domain->flogi_service_params,
+ cbdata->payload->dma.virt,
+ sizeof(struct fc_els_flogi));
+
+ /* send FC LS_ACC response, override s_id */
+ efc_fabric_set_topology(node, EFC_NPORT_TOPO_P2P);
+
+ efc_send_flogi_p2p_acc(node, be16_to_cpu(hdr->fh_ox_id), d_id);
+
+ if (efc_p2p_setup(node->nport)) {
+ node_printf(node, "p2p failed, shutting down node\n");
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
+ break;
+ }
+
+ efc_node_transition(node, __efc_p2p_wait_flogi_acc_cmpl, NULL);
+ break;
+ }
+
+ case EFC_EVT_LOGO_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ if (!node->nport->domain->attached) {
+ /* most likely a frame left over from before a link
+ * down; drop and
+ * shut node down w/ "explicit logout" so pending
+ * frames are processed
+ */
+ node_printf(node, "%s domain not attached, dropping\n",
+ efc_sm_event_name(evt));
+ efc_node_post_event(node,
+ EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
+ break;
+ }
+
+ efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL);
+ break;
+ }
+
+ case EFC_EVT_PRLI_RCVD:
+ case EFC_EVT_PRLO_RCVD:
+ case EFC_EVT_PDISC_RCVD:
+ case EFC_EVT_ADISC_RCVD:
+ case EFC_EVT_RSCN_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ if (!node->nport->domain->attached) {
+ /* most likely a frame left over from before a link
+ * down; drop and shut node down w/ "explicit logout"
+ * so pending frames are processed
+ */
+ node_printf(node, "%s domain not attached, dropping\n",
+ efc_sm_event_name(evt));
+
+ efc_node_post_event(node,
+ EFC_EVT_SHUTDOWN_EXPLICIT_LOGO,
+ NULL);
+ break;
+ }
+ node_printf(node, "%s received, sending reject\n",
+ efc_sm_event_name(evt));
+
+ efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
+ ELS_RJT_UNAB, ELS_EXPL_PLOGI_REQD, 0);
+
+ break;
+ }
+
+ case EFC_EVT_FCP_CMD_RCVD: {
+ /* note: problem, we're now expecting an ELS REQ completion
+ * from both the LOGO and PLOGI
+ */
+ if (!node->nport->domain->attached) {
+ /* most likely a frame left over from before a
+ * link down; drop and
+ * shut node down w/ "explicit logout" so pending
+ * frames are processed
+ */
+ node_printf(node, "%s domain not attached, dropping\n",
+ efc_sm_event_name(evt));
+ efc_node_post_event(node,
+ EFC_EVT_SHUTDOWN_EXPLICIT_LOGO,
+ NULL);
+ break;
+ }
+
+ /* Send LOGO */
+ node_printf(node, "FCP_CMND received, send LOGO\n");
+ if (efc_send_logo(node)) {
+ /*
+ * failed to send LOGO, go ahead and cleanup node
+ * anyways
+ */
+ node_printf(node, "Failed to send LOGO\n");
+ efc_node_post_event(node,
+ EFC_EVT_SHUTDOWN_EXPLICIT_LOGO,
+ NULL);
+ } else {
+ /* sent LOGO, wait for response */
+ efc_node_transition(node,
+ __efc_d_wait_logo_rsp, NULL);
+ }
+ break;
+ }
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ /* don't care about domain_attach_ok */
+ break;
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_plogi_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ int rc;
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_PLOGI_RCVD: {
+ /* T, or I+T */
+ /* received PLOGI with svc parms, go ahead and attach node
+ * when PLOGI that was sent ultimately completes, it'll be a
+ * no-op
+ *
+ * If there is an outstanding PLOGI sent, can we set a flag
+ * to indicate that we don't want to retry it if it times out?
+ */
+ efc_node_save_sparms(node, cbdata->payload->dma.virt);
+ efc_send_ls_acc_after_attach(node,
+ cbdata->header->dma.virt,
+ EFC_NODE_SEND_LS_ACC_PLOGI);
+ /* sm: domain->attached / efc_node_attach */
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_d_wait_node_attach, NULL);
+ if (rc < 0)
+ efc_node_post_event(node,
+ EFC_EVT_NODE_ATTACH_FAIL, NULL);
+
+ break;
+ }
+
+ case EFC_EVT_PRLI_RCVD:
+ /* I, or I+T */
+ /* sent PLOGI and before completion was seen, received the
+ * PRLI from the remote node (WCQEs and RCQEs come in on
+ * different queues and order of processing cannot be assumed)
+ * Save OXID so PRLI can be sent after the attach and continue
+ * to wait for PLOGI response
+ */
+ efc_process_prli_payload(node, cbdata->payload->dma.virt);
+ efc_send_ls_acc_after_attach(node,
+ cbdata->header->dma.virt,
+ EFC_NODE_SEND_LS_ACC_PRLI);
+ efc_node_transition(node, __efc_d_wait_plogi_rsp_recvd_prli,
+ NULL);
+ break;
+
+ case EFC_EVT_LOGO_RCVD: /* why don't we do a shutdown here?? */
+ case EFC_EVT_PRLO_RCVD:
+ case EFC_EVT_PDISC_RCVD:
+ case EFC_EVT_FDISC_RCVD:
+ case EFC_EVT_ADISC_RCVD:
+ case EFC_EVT_RSCN_RCVD:
+ case EFC_EVT_SCR_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ node_printf(node, "%s received, sending reject\n",
+ efc_sm_event_name(evt));
+
+ efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
+ ELS_RJT_UNAB, ELS_EXPL_PLOGI_REQD, 0);
+
+ break;
+ }
+
+ case EFC_EVT_SRRS_ELS_REQ_OK: /* PLOGI response received */
+ /* Completion from PLOGI sent */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / save sparams, efc_node_attach */
+ efc_node_save_sparms(node, cbdata->els_rsp.virt);
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_d_wait_node_attach, NULL);
+ if (rc < 0)
+ efc_node_post_event(node,
+ EFC_EVT_NODE_ATTACH_FAIL, NULL);
+
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */
+ /* PLOGI failed, shutdown the node */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_RJT:
+ /* Our PLOGI was rejected, this is ok in some cases */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ break;
+
+ case EFC_EVT_FCP_CMD_RCVD: {
+ /* not logged in yet and outstanding PLOGI so don't send LOGO,
+ * just drop
+ */
+ node_printf(node, "FCP_CMND received, drop\n");
+ break;
+ }
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_plogi_rsp_recvd_prli(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ int rc;
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ /*
+ * Since we've received a PRLI, we have a port login and will
+ * just need to wait for the PLOGI response to do the node
+ * attach and then we can send the LS_ACC for the PRLI. If,
+ * during this time, we receive FCP_CMNDs (which is possible
+ * since we've already sent a PRLI and our peer may have
+ * accepted). At this time, we are not waiting on any other
+ * unsolicited frames to continue with the login process. Thus,
+ * it will not hurt to hold frames here.
+ */
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_OK: /* PLOGI response received */
+ /* Completion from PLOGI sent */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / save sparams, efc_node_attach */
+ efc_node_save_sparms(node, cbdata->els_rsp.virt);
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_d_wait_node_attach, NULL);
+ if (rc < 0)
+ efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
+ NULL);
+
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */
+ case EFC_EVT_SRRS_ELS_REQ_RJT:
+ /* PLOGI failed, shutdown the node */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
+ break;
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_domain_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ int rc;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ WARN_ON(!node->nport->domain->attached);
+ /* sm: / efc_node_attach */
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_d_wait_node_attach, NULL);
+ if (rc < 0)
+ efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
+ NULL);
+
+ break;
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_topology_notify(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ int rc;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_NPORT_TOPOLOGY_NOTIFY: {
+ enum efc_nport_topology *topology = arg;
+
+ WARN_ON(node->nport->domain->attached);
+
+ WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_PLOGI);
+
+ node_printf(node, "topology notification, topology=%d\n",
+ *topology);
+
+ /* At the time the PLOGI was received, the topology was unknown,
+ * so we didn't know which node would perform the domain attach:
+ * 1. The node from which the PLOGI was sent (p2p) or
+ * 2. The node to which the FLOGI was sent (fabric).
+ */
+ if (*topology == EFC_NPORT_TOPO_P2P) {
+ /* if this is p2p, need to attach to the domain using
+ * the d_id from the PLOGI received
+ */
+ efc_domain_attach(node->nport->domain,
+ node->ls_acc_did);
+ }
+ /* else, if this is fabric, the domain attach
+ * should be performed by the fabric node (node sending FLOGI);
+ * just wait for attach to complete
+ */
+
+ efc_node_transition(node, __efc_d_wait_domain_attach, NULL);
+ break;
+ }
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ WARN_ON(!node->nport->domain->attached);
+ node_printf(node, "domain attach ok\n");
+ /* sm: / efc_node_attach */
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_d_wait_node_attach, NULL);
+ if (rc < 0)
+ efc_node_post_event(node,
+ EFC_EVT_NODE_ATTACH_FAIL, NULL);
+
+ break;
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_node_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_NODE_ATTACH_OK:
+ node->attached = true;
+ switch (node->send_ls_acc) {
+ case EFC_NODE_SEND_LS_ACC_PLOGI: {
+ /* sm: send_plogi_acc is set / send PLOGI acc */
+ /* Normal case for T, or I+T */
+ efc_send_plogi_acc(node, node->ls_acc_oxid);
+ efc_node_transition(node, __efc_d_wait_plogi_acc_cmpl,
+ NULL);
+ node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
+ node->ls_acc_io = NULL;
+ break;
+ }
+ case EFC_NODE_SEND_LS_ACC_PRLI: {
+ efc_d_send_prli_rsp(node, node->ls_acc_oxid);
+ node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
+ node->ls_acc_io = NULL;
+ break;
+ }
+ case EFC_NODE_SEND_LS_ACC_NONE:
+ default:
+ /* Normal case for I */
+ /* sm: send_plogi_acc is not set / send PLOGI acc */
+ efc_node_transition(node,
+ __efc_d_port_logged_in, NULL);
+ break;
+ }
+ break;
+
+ case EFC_EVT_NODE_ATTACH_FAIL:
+ /* node attach failed, shutdown the node */
+ node->attached = false;
+ node_printf(node, "node attach failed\n");
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
+ break;
+
+ /* Handle shutdown events */
+ case EFC_EVT_SHUTDOWN:
+ node_printf(node, "%s received\n", efc_sm_event_name(evt));
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_node_transition(node, __efc_d_wait_attach_evt_shutdown,
+ NULL);
+ break;
+ case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ node_printf(node, "%s received\n", efc_sm_event_name(evt));
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_EXPLICIT_LOGO;
+ efc_node_transition(node, __efc_d_wait_attach_evt_shutdown,
+ NULL);
+ break;
+ case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ node_printf(node, "%s received\n", efc_sm_event_name(evt));
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_IMPLICIT_LOGO;
+ efc_node_transition(node,
+ __efc_d_wait_attach_evt_shutdown, NULL);
+ break;
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_attach_evt_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ /* wait for any of these attach events and then shutdown */
+ case EFC_EVT_NODE_ATTACH_OK:
+ node->attached = true;
+ node_printf(node, "Attach evt=%s, proceed to shutdown\n",
+ efc_sm_event_name(evt));
+ efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
+ break;
+
+ case EFC_EVT_NODE_ATTACH_FAIL:
+ /* node attach failed, shutdown the node */
+ node->attached = false;
+ node_printf(node, "Attach evt=%s, proceed to shutdown\n",
+ efc_sm_event_name(evt));
+ efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
+ break;
+
+ /* ignore shutdown events as we're already in shutdown path */
+ case EFC_EVT_SHUTDOWN:
+ /* have default shutdown event take precedence */
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ fallthrough;
+
+ case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ node_printf(node, "%s received\n", efc_sm_event_name(evt));
+ break;
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_port_logged_in(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ /* Normal case for I or I+T */
+ if (node->nport->enable_ini &&
+ !(node->rnode.fc_id != FC_FID_DOM_MGR)) {
+ /* sm: if enable_ini / send PRLI */
+ efc_send_prli(node);
+ /* can now expect ELS_REQ_OK/FAIL/RJT */
+ }
+ break;
+
+ case EFC_EVT_FCP_CMD_RCVD: {
+ break;
+ }
+
+ case EFC_EVT_PRLI_RCVD: {
+ /* Normal case for T or I+T */
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+ struct {
+ struct fc_els_prli prli;
+ struct fc_els_spp sp;
+ } *pp;
+
+ pp = cbdata->payload->dma.virt;
+ if (pp->sp.spp_type != FC_TYPE_FCP) {
+ /*Only FCP is supported*/
+ efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
+ ELS_RJT_UNAB, ELS_EXPL_UNSUPR, 0);
+ break;
+ }
+
+ efc_process_prli_payload(node, cbdata->payload->dma.virt);
+ efc_d_send_prli_rsp(node, be16_to_cpu(hdr->fh_ox_id));
+ break;
+ }
+
+ case EFC_EVT_NODE_SESS_REG_OK:
+ if (node->send_ls_acc == EFC_NODE_SEND_LS_ACC_PRLI)
+ efc_send_prli_acc(node, node->ls_acc_oxid);
+
+ node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
+ efc_node_transition(node, __efc_d_device_ready, NULL);
+ break;
+
+ case EFC_EVT_NODE_SESS_REG_FAIL:
+ efc_send_ls_rjt(node, node->ls_acc_oxid, ELS_RJT_UNAB,
+ ELS_EXPL_UNSUPR, 0);
+ node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_OK: { /* PRLI response */
+ /* Normal case for I or I+T */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PRLI,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / process PRLI payload */
+ efc_process_prli_payload(node, cbdata->els_rsp.virt);
+ efc_node_transition(node, __efc_d_device_ready, NULL);
+ break;
+ }
+
+ case EFC_EVT_SRRS_ELS_REQ_FAIL: { /* PRLI response failed */
+ /* I, I+T, assume some link failure, shutdown node */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PRLI,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
+ break;
+ }
+
+ case EFC_EVT_SRRS_ELS_REQ_RJT: {
+ /* PRLI rejected by remote
+ * Normal for I, I+T (connected to an I)
+ * Node doesn't want to be a target, stay here and wait for a
+ * PRLI from the remote node
+ * if it really wants to connect to us as target
+ */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PRLI,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ break;
+ }
+
+ case EFC_EVT_SRRS_ELS_CMPL_OK: {
+ /* Normal T, I+T, target-server rejected the process login */
+ /* This would be received only in the case where we sent
+ * LS_RJT for the PRLI, so
+ * do nothing. (note: as T only we could shutdown the node)
+ */
+ WARN_ON(!node->els_cmpl_cnt);
+ node->els_cmpl_cnt--;
+ break;
+ }
+
+ case EFC_EVT_PLOGI_RCVD: {
+ /*sm: / save sparams, set send_plogi_acc,
+ *post implicit logout
+ * Save plogi parameters
+ */
+ efc_node_save_sparms(node, cbdata->payload->dma.virt);
+ efc_send_ls_acc_after_attach(node,
+ cbdata->header->dma.virt,
+ EFC_NODE_SEND_LS_ACC_PLOGI);
+
+ /* Restart node attach with new service parameters,
+ * and send ACC
+ */
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN_IMPLICIT_LOGO,
+ NULL);
+ break;
+ }
+
+ case EFC_EVT_LOGO_RCVD: {
+ /* I, T, I+T */
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ node_printf(node, "%s received attached=%d\n",
+ efc_sm_event_name(evt),
+ node->attached);
+ /* sm: / send LOGO acc */
+ efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL);
+ break;
+ }
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_logo_acc_cmpl(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_CMPL_OK:
+ case EFC_EVT_SRRS_ELS_CMPL_FAIL:
+ /* sm: / post explicit logout */
+ WARN_ON(!node->els_cmpl_cnt);
+ node->els_cmpl_cnt--;
+ efc_node_post_event(node,
+ EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
+ break;
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_device_ready(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+ struct efc *efc = node->efc;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ if (evt != EFC_EVT_FCP_CMD_RCVD)
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ node->fcp_enabled = true;
+ if (node->targ) {
+ efc_log_info(efc,
+ "[%s] found (target) WWPN %s WWNN %s\n",
+ node->display_name,
+ node->wwpn, node->wwnn);
+ if (node->nport->enable_ini)
+ efc->tt.scsi_new_node(efc, node);
+ }
+ break;
+
+ case EFC_EVT_EXIT:
+ node->fcp_enabled = false;
+ break;
+
+ case EFC_EVT_PLOGI_RCVD: {
+ /* sm: / save sparams, set send_plogi_acc, post implicit
+ * logout
+ * Save plogi parameters
+ */
+ efc_node_save_sparms(node, cbdata->payload->dma.virt);
+ efc_send_ls_acc_after_attach(node,
+ cbdata->header->dma.virt,
+ EFC_NODE_SEND_LS_ACC_PLOGI);
+
+ /*
+ * Restart node attach with new service parameters,
+ * and send ACC
+ */
+ efc_node_post_event(node,
+ EFC_EVT_SHUTDOWN_IMPLICIT_LOGO, NULL);
+ break;
+ }
+
+ case EFC_EVT_PRLI_RCVD: {
+ /* T, I+T: remote initiator is slow to get started */
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+ struct {
+ struct fc_els_prli prli;
+ struct fc_els_spp sp;
+ } *pp;
+
+ pp = cbdata->payload->dma.virt;
+ if (pp->sp.spp_type != FC_TYPE_FCP) {
+ /*Only FCP is supported*/
+ efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
+ ELS_RJT_UNAB, ELS_EXPL_UNSUPR, 0);
+ break;
+ }
+
+ efc_process_prli_payload(node, cbdata->payload->dma.virt);
+ efc_send_prli_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ break;
+ }
+
+ case EFC_EVT_PRLO_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+ /* sm: / send PRLO acc */
+ efc_send_prlo_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ /* need implicit logout? */
+ break;
+ }
+
+ case EFC_EVT_LOGO_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ node_printf(node, "%s received attached=%d\n",
+ efc_sm_event_name(evt), node->attached);
+ /* sm: / send LOGO acc */
+ efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL);
+ break;
+ }
+
+ case EFC_EVT_ADISC_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+ /* sm: / send ADISC acc */
+ efc_send_adisc_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ break;
+ }
+
+ case EFC_EVT_ABTS_RCVD:
+ /* sm: / process ABTS */
+ efc_log_err(efc, "Unexpected event:%s\n",
+ efc_sm_event_name(evt));
+ break;
+
+ case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
+ break;
+
+ case EFC_EVT_NODE_REFOUND:
+ break;
+
+ case EFC_EVT_NODE_MISSING:
+ if (node->nport->enable_rscn)
+ efc_node_transition(node, __efc_d_device_gone, NULL);
+
+ break;
+
+ case EFC_EVT_SRRS_ELS_CMPL_OK:
+ /* T, or I+T, PRLI accept completed ok */
+ WARN_ON(!node->els_cmpl_cnt);
+ node->els_cmpl_cnt--;
+ break;
+
+ case EFC_EVT_SRRS_ELS_CMPL_FAIL:
+ /* T, or I+T, PRLI accept failed to complete */
+ WARN_ON(!node->els_cmpl_cnt);
+ node->els_cmpl_cnt--;
+ node_printf(node, "Failed to send PRLI LS_ACC\n");
+ break;
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_device_gone(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+ struct efc *efc = node->efc;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER: {
+ int rc = EFC_SCSI_CALL_COMPLETE;
+ int rc_2 = EFC_SCSI_CALL_COMPLETE;
+ static const char * const labels[] = {
+ "none", "initiator", "target", "initiator+target"
+ };
+
+ efc_log_info(efc, "[%s] missing (%s) WWPN %s WWNN %s\n",
+ node->display_name,
+ labels[(node->targ << 1) | (node->init)],
+ node->wwpn, node->wwnn);
+
+ switch (efc_node_get_enable(node)) {
+ case EFC_NODE_ENABLE_T_TO_T:
+ case EFC_NODE_ENABLE_I_TO_T:
+ case EFC_NODE_ENABLE_IT_TO_T:
+ rc = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_TARGET_MISSING);
+ break;
+
+ case EFC_NODE_ENABLE_T_TO_I:
+ case EFC_NODE_ENABLE_I_TO_I:
+ case EFC_NODE_ENABLE_IT_TO_I:
+ rc = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_INITIATOR_MISSING);
+ break;
+
+ case EFC_NODE_ENABLE_T_TO_IT:
+ rc = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_INITIATOR_MISSING);
+ break;
+
+ case EFC_NODE_ENABLE_I_TO_IT:
+ rc = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_TARGET_MISSING);
+ break;
+
+ case EFC_NODE_ENABLE_IT_TO_IT:
+ rc = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_INITIATOR_MISSING);
+ rc_2 = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_TARGET_MISSING);
+ break;
+
+ default:
+ rc = EFC_SCSI_CALL_COMPLETE;
+ break;
+ }
+
+ if (rc == EFC_SCSI_CALL_COMPLETE &&
+ rc_2 == EFC_SCSI_CALL_COMPLETE)
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
+
+ break;
+ }
+ case EFC_EVT_NODE_REFOUND:
+ /* two approaches, reauthenticate with PLOGI/PRLI, or ADISC */
+
+ /* reauthenticate with PLOGI/PRLI */
+ /* efc_node_transition(node, __efc_d_discovered, NULL); */
+
+ /* reauthenticate with ADISC */
+ /* sm: / send ADISC */
+ efc_send_adisc(node);
+ efc_node_transition(node, __efc_d_wait_adisc_rsp, NULL);
+ break;
+
+ case EFC_EVT_PLOGI_RCVD: {
+ /* sm: / save sparams, set send_plogi_acc, post implicit
+ * logout
+ * Save plogi parameters
+ */
+ efc_node_save_sparms(node, cbdata->payload->dma.virt);
+ efc_send_ls_acc_after_attach(node,
+ cbdata->header->dma.virt,
+ EFC_NODE_SEND_LS_ACC_PLOGI);
+
+ /*
+ * Restart node attach with new service parameters, and send
+ * ACC
+ */
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN_IMPLICIT_LOGO,
+ NULL);
+ break;
+ }
+
+ case EFC_EVT_FCP_CMD_RCVD: {
+ /* most likely a stale frame (received prior to link down),
+ * if attempt to send LOGO, will probably timeout and eat
+ * up 20s; thus, drop FCP_CMND
+ */
+ node_printf(node, "FCP_CMND received, drop\n");
+ break;
+ }
+ case EFC_EVT_LOGO_RCVD: {
+ /* I, T, I+T */
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ node_printf(node, "%s received attached=%d\n",
+ efc_sm_event_name(evt), node->attached);
+ /* sm: / send LOGO acc */
+ efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL);
+ break;
+ }
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_adisc_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_SRRS_ELS_REQ_OK:
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_ADISC,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ efc_node_transition(node, __efc_d_device_ready, NULL);
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_RJT:
+ /* received an LS_RJT, in this case, send shutdown
+ * (explicit logo) event which will unregister the node,
+ * and start over with PLOGI
+ */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_ADISC,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / post explicit logout */
+ efc_node_post_event(node,
+ EFC_EVT_SHUTDOWN_EXPLICIT_LOGO,
+ NULL);
+ break;
+
+ case EFC_EVT_LOGO_RCVD: {
+ /* In this case, we have the equivalent of an LS_RJT for
+ * the ADISC, so we need to abort the ADISC, and re-login
+ * with PLOGI
+ */
+ /* sm: / request abort, send LOGO acc */
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ node_printf(node, "%s received attached=%d\n",
+ efc_sm_event_name(evt), node->attached);
+
+ efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL);
+ break;
+ }
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
diff --git a/drivers/scsi/elx/libefc/efc_device.h b/drivers/scsi/elx/libefc/efc_device.h
new file mode 100644
index 000000000..3cf1d8c66
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_device.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * Node state machine functions for remote device node sm
+ */
+
+#ifndef __EFCT_DEVICE_H__
+#define __EFCT_DEVICE_H__
+void
+efc_node_init_device(struct efc_node *node, bool send_plogi);
+void
+efc_process_prli_payload(struct efc_node *node,
+ void *prli);
+void
+efc_d_send_prli_rsp(struct efc_node *node, uint16_t ox_id);
+void
+efc_send_ls_acc_after_attach(struct efc_node *node,
+ struct fc_frame_header *hdr,
+ enum efc_node_send_ls_acc ls);
+void
+__efc_d_wait_loop(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_plogi_acc_cmpl(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_plogi_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_plogi_rsp_recvd_prli(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_domain_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_topology_notify(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_node_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_attach_evt_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_initiate_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_port_logged_in(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_logo_acc_cmpl(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_device_ready(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_device_gone(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_adisc_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_logo_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+
+#endif /* __EFCT_DEVICE_H__ */
diff --git a/drivers/scsi/elx/libefc/efc_domain.c b/drivers/scsi/elx/libefc/efc_domain.c
new file mode 100644
index 000000000..ca9d7ff2c
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_domain.c
@@ -0,0 +1,1088 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * domain_sm Domain State Machine: States
+ */
+
+#include "efc.h"
+
+int
+efc_domain_cb(void *arg, int event, void *data)
+{
+ struct efc *efc = arg;
+ struct efc_domain *domain = NULL;
+ int rc = 0;
+ unsigned long flags = 0;
+
+ if (event != EFC_HW_DOMAIN_FOUND)
+ domain = data;
+
+ /* Accept domain callback events from the user driver */
+ spin_lock_irqsave(&efc->lock, flags);
+ switch (event) {
+ case EFC_HW_DOMAIN_FOUND: {
+ u64 fcf_wwn = 0;
+ struct efc_domain_record *drec = data;
+
+ /* extract the fcf_wwn */
+ fcf_wwn = be64_to_cpu(*((__be64 *)drec->wwn));
+
+ efc_log_debug(efc, "Domain found: wwn %016llX\n", fcf_wwn);
+
+ /* lookup domain, or allocate a new one */
+ domain = efc->domain;
+ if (!domain) {
+ domain = efc_domain_alloc(efc, fcf_wwn);
+ if (!domain) {
+ efc_log_err(efc, "efc_domain_alloc() failed\n");
+ rc = -1;
+ break;
+ }
+ efc_sm_transition(&domain->drvsm, __efc_domain_init,
+ NULL);
+ }
+ efc_domain_post_event(domain, EFC_EVT_DOMAIN_FOUND, drec);
+ break;
+ }
+
+ case EFC_HW_DOMAIN_LOST:
+ domain_trace(domain, "EFC_HW_DOMAIN_LOST:\n");
+ efc->hold_frames = true;
+ efc_domain_post_event(domain, EFC_EVT_DOMAIN_LOST, NULL);
+ break;
+
+ case EFC_HW_DOMAIN_ALLOC_OK:
+ domain_trace(domain, "EFC_HW_DOMAIN_ALLOC_OK:\n");
+ efc_domain_post_event(domain, EFC_EVT_DOMAIN_ALLOC_OK, NULL);
+ break;
+
+ case EFC_HW_DOMAIN_ALLOC_FAIL:
+ domain_trace(domain, "EFC_HW_DOMAIN_ALLOC_FAIL:\n");
+ efc_domain_post_event(domain, EFC_EVT_DOMAIN_ALLOC_FAIL,
+ NULL);
+ break;
+
+ case EFC_HW_DOMAIN_ATTACH_OK:
+ domain_trace(domain, "EFC_HW_DOMAIN_ATTACH_OK:\n");
+ efc_domain_post_event(domain, EFC_EVT_DOMAIN_ATTACH_OK, NULL);
+ break;
+
+ case EFC_HW_DOMAIN_ATTACH_FAIL:
+ domain_trace(domain, "EFC_HW_DOMAIN_ATTACH_FAIL:\n");
+ efc_domain_post_event(domain,
+ EFC_EVT_DOMAIN_ATTACH_FAIL, NULL);
+ break;
+
+ case EFC_HW_DOMAIN_FREE_OK:
+ domain_trace(domain, "EFC_HW_DOMAIN_FREE_OK:\n");
+ efc_domain_post_event(domain, EFC_EVT_DOMAIN_FREE_OK, NULL);
+ break;
+
+ case EFC_HW_DOMAIN_FREE_FAIL:
+ domain_trace(domain, "EFC_HW_DOMAIN_FREE_FAIL:\n");
+ efc_domain_post_event(domain, EFC_EVT_DOMAIN_FREE_FAIL, NULL);
+ break;
+
+ default:
+ efc_log_warn(efc, "unsupported event %#x\n", event);
+ }
+ spin_unlock_irqrestore(&efc->lock, flags);
+
+ if (efc->domain && domain->req_accept_frames) {
+ domain->req_accept_frames = false;
+ efc->hold_frames = false;
+ }
+
+ return rc;
+}
+
+static void
+_efc_domain_free(struct kref *arg)
+{
+ struct efc_domain *domain = container_of(arg, struct efc_domain, ref);
+ struct efc *efc = domain->efc;
+
+ if (efc->domain_free_cb)
+ (*efc->domain_free_cb)(efc, efc->domain_free_cb_arg);
+
+ kfree(domain);
+}
+
+void
+efc_domain_free(struct efc_domain *domain)
+{
+ struct efc *efc;
+
+ efc = domain->efc;
+
+ /* Hold frames to clear the domain pointer from the xport lookup */
+ efc->hold_frames = false;
+
+ efc_log_debug(efc, "Domain free: wwn %016llX\n", domain->fcf_wwn);
+
+ xa_destroy(&domain->lookup);
+ efc->domain = NULL;
+ kref_put(&domain->ref, domain->release);
+}
+
+struct efc_domain *
+efc_domain_alloc(struct efc *efc, uint64_t fcf_wwn)
+{
+ struct efc_domain *domain;
+
+ domain = kzalloc(sizeof(*domain), GFP_ATOMIC);
+ if (!domain)
+ return NULL;
+
+ domain->efc = efc;
+ domain->drvsm.app = domain;
+
+ /* initialize refcount */
+ kref_init(&domain->ref);
+ domain->release = _efc_domain_free;
+
+ xa_init(&domain->lookup);
+
+ INIT_LIST_HEAD(&domain->nport_list);
+ efc->domain = domain;
+ domain->fcf_wwn = fcf_wwn;
+ efc_log_debug(efc, "Domain allocated: wwn %016llX\n", domain->fcf_wwn);
+
+ return domain;
+}
+
+void
+efc_register_domain_free_cb(struct efc *efc,
+ void (*callback)(struct efc *efc, void *arg),
+ void *arg)
+{
+ /* Register a callback to be called when the domain is freed */
+ efc->domain_free_cb = callback;
+ efc->domain_free_cb_arg = arg;
+ if (!efc->domain && callback)
+ (*callback)(efc, arg);
+}
+
+static void
+__efc_domain_common(const char *funcname, struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_domain *domain = ctx->app;
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ case EFC_EVT_REENTER:
+ case EFC_EVT_EXIT:
+ case EFC_EVT_ALL_CHILD_NODES_FREE:
+ /*
+ * this can arise if an FLOGI fails on the NPORT,
+ * and the NPORT is shutdown
+ */
+ break;
+ default:
+ efc_log_warn(domain->efc, "%-20s %-20s not handled\n",
+ funcname, efc_sm_event_name(evt));
+ }
+}
+
+static void
+__efc_domain_common_shutdown(const char *funcname, struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_domain *domain = ctx->app;
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ case EFC_EVT_REENTER:
+ case EFC_EVT_EXIT:
+ break;
+ case EFC_EVT_DOMAIN_FOUND:
+ /* save drec, mark domain_found_pending */
+ memcpy(&domain->pending_drec, arg,
+ sizeof(domain->pending_drec));
+ domain->domain_found_pending = true;
+ break;
+ case EFC_EVT_DOMAIN_LOST:
+ /* unmark domain_found_pending */
+ domain->domain_found_pending = false;
+ break;
+
+ default:
+ efc_log_warn(domain->efc, "%-20s %-20s not handled\n",
+ funcname, efc_sm_event_name(evt));
+ }
+}
+
+#define std_domain_state_decl(...)\
+ struct efc_domain *domain = NULL;\
+ struct efc *efc = NULL;\
+ \
+ WARN_ON(!ctx || !ctx->app);\
+ domain = ctx->app;\
+ WARN_ON(!domain->efc);\
+ efc = domain->efc
+
+void
+__efc_domain_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg)
+{
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ domain->attached = false;
+ break;
+
+ case EFC_EVT_DOMAIN_FOUND: {
+ u32 i;
+ struct efc_domain_record *drec = arg;
+ struct efc_nport *nport;
+
+ u64 my_wwnn = efc->req_wwnn;
+ u64 my_wwpn = efc->req_wwpn;
+ __be64 bewwpn;
+
+ if (my_wwpn == 0 || my_wwnn == 0) {
+ efc_log_debug(efc, "using default hardware WWN config\n");
+ my_wwpn = efc->def_wwpn;
+ my_wwnn = efc->def_wwnn;
+ }
+
+ efc_log_debug(efc, "Create nport WWPN %016llX WWNN %016llX\n",
+ my_wwpn, my_wwnn);
+
+ /* Allocate a nport and transition to __efc_nport_allocated */
+ nport = efc_nport_alloc(domain, my_wwpn, my_wwnn, U32_MAX,
+ efc->enable_ini, efc->enable_tgt);
+
+ if (!nport) {
+ efc_log_err(efc, "efc_nport_alloc() failed\n");
+ break;
+ }
+ efc_sm_transition(&nport->sm, __efc_nport_allocated, NULL);
+
+ bewwpn = cpu_to_be64(nport->wwpn);
+
+ /* allocate struct efc_nport object for local port
+ * Note: drec->fc_id is ALPA from read_topology only if loop
+ */
+ if (efc_cmd_nport_alloc(efc, nport, NULL, (uint8_t *)&bewwpn)) {
+ efc_log_err(efc, "Can't allocate port\n");
+ efc_nport_free(nport);
+ break;
+ }
+
+ domain->is_loop = drec->is_loop;
+
+ /*
+ * If the loop position map includes ALPA == 0,
+ * then we are in a public loop (NL_PORT)
+ * Note that the first element of the loopmap[]
+ * contains the count of elements, and if
+ * ALPA == 0 is present, it will occupy the first
+ * location after the count.
+ */
+ domain->is_nlport = drec->map.loop[1] == 0x00;
+
+ if (!domain->is_loop) {
+ /* Initiate HW domain alloc */
+ if (efc_cmd_domain_alloc(efc, domain, drec->index)) {
+ efc_log_err(efc,
+ "Failed to initiate HW domain allocation\n");
+ break;
+ }
+ efc_sm_transition(ctx, __efc_domain_wait_alloc, arg);
+ break;
+ }
+
+ efc_log_debug(efc, "%s fc_id=%#x speed=%d\n",
+ drec->is_loop ?
+ (domain->is_nlport ?
+ "public-loop" : "loop") : "other",
+ drec->fc_id, drec->speed);
+
+ nport->fc_id = drec->fc_id;
+ nport->topology = EFC_NPORT_TOPO_FC_AL;
+ snprintf(nport->display_name, sizeof(nport->display_name),
+ "s%06x", drec->fc_id);
+
+ if (efc->enable_ini) {
+ u32 count = drec->map.loop[0];
+
+ efc_log_debug(efc, "%d position map entries\n",
+ count);
+ for (i = 1; i <= count; i++) {
+ if (drec->map.loop[i] != drec->fc_id) {
+ struct efc_node *node;
+
+ efc_log_debug(efc, "%#x -> %#x\n",
+ drec->fc_id,
+ drec->map.loop[i]);
+ node = efc_node_alloc(nport,
+ drec->map.loop[i],
+ false, true);
+ if (!node) {
+ efc_log_err(efc,
+ "efc_node_alloc() failed\n");
+ break;
+ }
+ efc_node_transition(node,
+ __efc_d_wait_loop,
+ NULL);
+ }
+ }
+ }
+
+ /* Initiate HW domain alloc */
+ if (efc_cmd_domain_alloc(efc, domain, drec->index)) {
+ efc_log_err(efc,
+ "Failed to initiate HW domain allocation\n");
+ break;
+ }
+ efc_sm_transition(ctx, __efc_domain_wait_alloc, arg);
+ break;
+ }
+ default:
+ __efc_domain_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_domain_wait_alloc(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ switch (evt) {
+ case EFC_EVT_DOMAIN_ALLOC_OK: {
+ struct fc_els_flogi *sp;
+ struct efc_nport *nport;
+
+ nport = domain->nport;
+ if (WARN_ON(!nport))
+ return;
+
+ sp = (struct fc_els_flogi *)nport->service_params;
+
+ /* Save the domain service parameters */
+ memcpy(domain->service_params + 4, domain->dma.virt,
+ sizeof(struct fc_els_flogi) - 4);
+ memcpy(nport->service_params + 4, domain->dma.virt,
+ sizeof(struct fc_els_flogi) - 4);
+
+ /*
+ * Update the nport's service parameters,
+ * user might have specified non-default names
+ */
+ sp->fl_wwpn = cpu_to_be64(nport->wwpn);
+ sp->fl_wwnn = cpu_to_be64(nport->wwnn);
+
+ /*
+ * Take the loop topology path,
+ * unless we are an NL_PORT (public loop)
+ */
+ if (domain->is_loop && !domain->is_nlport) {
+ /*
+ * For loop, we already have our FC ID
+ * and don't need fabric login.
+ * Transition to the allocated state and
+ * post an event to attach to
+ * the domain. Note that this breaks the
+ * normal action/transition
+ * pattern here to avoid a race with the
+ * domain attach callback.
+ */
+ /* sm: is_loop / domain_attach */
+ efc_sm_transition(ctx, __efc_domain_allocated, NULL);
+ __efc_domain_attach_internal(domain, nport->fc_id);
+ break;
+ }
+ {
+ struct efc_node *node;
+
+ /* alloc fabric node, send FLOGI */
+ node = efc_node_find(nport, FC_FID_FLOGI);
+ if (node) {
+ efc_log_err(efc,
+ "Fabric Controller node already exists\n");
+ break;
+ }
+ node = efc_node_alloc(nport, FC_FID_FLOGI,
+ false, false);
+ if (!node) {
+ efc_log_err(efc,
+ "Error: efc_node_alloc() failed\n");
+ } else {
+ efc_node_transition(node,
+ __efc_fabric_init, NULL);
+ }
+ /* Accept frames */
+ domain->req_accept_frames = true;
+ }
+ /* sm: / start fabric logins */
+ efc_sm_transition(ctx, __efc_domain_allocated, NULL);
+ break;
+ }
+
+ case EFC_EVT_DOMAIN_ALLOC_FAIL:
+ efc_log_err(efc, "%s recv'd waiting for DOMAIN_ALLOC_OK;",
+ efc_sm_event_name(evt));
+ efc_log_err(efc, "shutting down domain\n");
+ domain->req_domain_free = true;
+ break;
+
+ case EFC_EVT_DOMAIN_FOUND:
+ /* Should not happen */
+ break;
+
+ case EFC_EVT_DOMAIN_LOST:
+ efc_log_debug(efc,
+ "%s received while waiting for hw_domain_alloc()\n",
+ efc_sm_event_name(evt));
+ efc_sm_transition(ctx, __efc_domain_wait_domain_lost, NULL);
+ break;
+
+ default:
+ __efc_domain_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_domain_allocated(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ switch (evt) {
+ case EFC_EVT_DOMAIN_REQ_ATTACH: {
+ int rc = 0;
+ u32 fc_id;
+
+ if (WARN_ON(!arg))
+ return;
+
+ fc_id = *((u32 *)arg);
+ efc_log_debug(efc, "Requesting hw domain attach fc_id x%x\n",
+ fc_id);
+ /* Update nport lookup */
+ rc = xa_err(xa_store(&domain->lookup, fc_id, domain->nport,
+ GFP_ATOMIC));
+ if (rc) {
+ efc_log_err(efc, "Sport lookup store failed: %d\n", rc);
+ return;
+ }
+
+ /* Update display name for the nport */
+ efc_node_fcid_display(fc_id, domain->nport->display_name,
+ sizeof(domain->nport->display_name));
+
+ /* Issue domain attach call */
+ rc = efc_cmd_domain_attach(efc, domain, fc_id);
+ if (rc) {
+ efc_log_err(efc, "efc_hw_domain_attach failed: %d\n",
+ rc);
+ return;
+ }
+ /* sm: / domain_attach */
+ efc_sm_transition(ctx, __efc_domain_wait_attach, NULL);
+ break;
+ }
+
+ case EFC_EVT_DOMAIN_FOUND:
+ /* Should not happen */
+ efc_log_err(efc, "%s: evt: %d should not happen\n",
+ __func__, evt);
+ break;
+
+ case EFC_EVT_DOMAIN_LOST: {
+ efc_log_debug(efc,
+ "%s received while in EFC_EVT_DOMAIN_REQ_ATTACH\n",
+ efc_sm_event_name(evt));
+ if (!list_empty(&domain->nport_list)) {
+ /*
+ * if there are nports, transition to
+ * wait state and send shutdown to each
+ * nport
+ */
+ struct efc_nport *nport = NULL, *nport_next = NULL;
+
+ efc_sm_transition(ctx, __efc_domain_wait_nports_free,
+ NULL);
+ list_for_each_entry_safe(nport, nport_next,
+ &domain->nport_list,
+ list_entry) {
+ efc_sm_post_event(&nport->sm,
+ EFC_EVT_SHUTDOWN, NULL);
+ }
+ } else {
+ /* no nports exist, free domain */
+ efc_sm_transition(ctx, __efc_domain_wait_shutdown,
+ NULL);
+ if (efc_cmd_domain_free(efc, domain))
+ efc_log_err(efc, "hw_domain_free failed\n");
+ }
+
+ break;
+ }
+
+ default:
+ __efc_domain_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_domain_wait_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ switch (evt) {
+ case EFC_EVT_DOMAIN_ATTACH_OK: {
+ struct efc_node *node = NULL;
+ struct efc_nport *nport, *next_nport;
+ unsigned long index;
+
+ /*
+ * Set domain notify pending state to avoid
+ * duplicate domain event post
+ */
+ domain->domain_notify_pend = true;
+
+ /* Mark as attached */
+ domain->attached = true;
+
+ /* Transition to ready */
+ /* sm: / forward event to all nports and nodes */
+ efc_sm_transition(ctx, __efc_domain_ready, NULL);
+
+ /* We have an FCFI, so we can accept frames */
+ domain->req_accept_frames = true;
+
+ /*
+ * Notify all nodes that the domain attach request
+ * has completed
+ * Note: nport will have already received notification
+ * of nport attached as a result of the HW's port attach.
+ */
+ list_for_each_entry_safe(nport, next_nport,
+ &domain->nport_list, list_entry) {
+ xa_for_each(&nport->lookup, index, node) {
+ efc_node_post_event(node,
+ EFC_EVT_DOMAIN_ATTACH_OK,
+ NULL);
+ }
+ }
+ domain->domain_notify_pend = false;
+ break;
+ }
+
+ case EFC_EVT_DOMAIN_ATTACH_FAIL:
+ efc_log_debug(efc,
+ "%s received while waiting for hw attach\n",
+ efc_sm_event_name(evt));
+ break;
+
+ case EFC_EVT_DOMAIN_FOUND:
+ /* Should not happen */
+ efc_log_err(efc, "%s: evt: %d should not happen\n",
+ __func__, evt);
+ break;
+
+ case EFC_EVT_DOMAIN_LOST:
+ /*
+ * Domain lost while waiting for an attach to complete,
+ * go to a state that waits for the domain attach to
+ * complete, then handle domain lost
+ */
+ efc_sm_transition(ctx, __efc_domain_wait_domain_lost, NULL);
+ break;
+
+ case EFC_EVT_DOMAIN_REQ_ATTACH:
+ /*
+ * In P2P we can get an attach request from
+ * the other FLOGI path, so drop this one
+ */
+ break;
+
+ default:
+ __efc_domain_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_domain_ready(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg)
+{
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ switch (evt) {
+ case EFC_EVT_ENTER: {
+ /* start any pending vports */
+ if (efc_vport_start(domain)) {
+ efc_log_debug(domain->efc,
+ "efc_vport_start didn't start vports\n");
+ }
+ break;
+ }
+ case EFC_EVT_DOMAIN_LOST: {
+ if (!list_empty(&domain->nport_list)) {
+ /*
+ * if there are nports, transition to wait state
+ * and send shutdown to each nport
+ */
+ struct efc_nport *nport = NULL, *nport_next = NULL;
+
+ efc_sm_transition(ctx, __efc_domain_wait_nports_free,
+ NULL);
+ list_for_each_entry_safe(nport, nport_next,
+ &domain->nport_list,
+ list_entry) {
+ efc_sm_post_event(&nport->sm,
+ EFC_EVT_SHUTDOWN, NULL);
+ }
+ } else {
+ /* no nports exist, free domain */
+ efc_sm_transition(ctx, __efc_domain_wait_shutdown,
+ NULL);
+ if (efc_cmd_domain_free(efc, domain))
+ efc_log_err(efc, "hw_domain_free failed\n");
+ }
+ break;
+ }
+
+ case EFC_EVT_DOMAIN_FOUND:
+ /* Should not happen */
+ efc_log_err(efc, "%s: evt: %d should not happen\n",
+ __func__, evt);
+ break;
+
+ case EFC_EVT_DOMAIN_REQ_ATTACH: {
+ /* can happen during p2p */
+ u32 fc_id;
+
+ fc_id = *((u32 *)arg);
+
+ /* Assume that the domain is attached */
+ WARN_ON(!domain->attached);
+
+ /*
+ * Verify that the requested FC_ID
+ * is the same as the one we're working with
+ */
+ WARN_ON(domain->nport->fc_id != fc_id);
+ break;
+ }
+
+ default:
+ __efc_domain_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_domain_wait_nports_free(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg)
+{
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ /* Wait for nodes to free prior to the domain shutdown */
+ switch (evt) {
+ case EFC_EVT_ALL_CHILD_NODES_FREE: {
+ int rc;
+
+ /* sm: / efc_hw_domain_free */
+ efc_sm_transition(ctx, __efc_domain_wait_shutdown, NULL);
+
+ /* Request efc_hw_domain_free and wait for completion */
+ rc = efc_cmd_domain_free(efc, domain);
+ if (rc) {
+ efc_log_err(efc, "efc_hw_domain_free() failed: %d\n",
+ rc);
+ }
+ break;
+ }
+ default:
+ __efc_domain_common_shutdown(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_domain_wait_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ switch (evt) {
+ case EFC_EVT_DOMAIN_FREE_OK:
+ /* sm: / domain_free */
+ if (domain->domain_found_pending) {
+ /*
+ * save fcf_wwn and drec from this domain,
+ * free current domain and allocate
+ * a new one with the same fcf_wwn
+ * could use a SLI-4 "re-register VPI"
+ * operation here?
+ */
+ u64 fcf_wwn = domain->fcf_wwn;
+ struct efc_domain_record drec = domain->pending_drec;
+
+ efc_log_debug(efc, "Reallocating domain\n");
+ domain->req_domain_free = true;
+ domain = efc_domain_alloc(efc, fcf_wwn);
+
+ if (!domain) {
+ efc_log_err(efc,
+ "efc_domain_alloc() failed\n");
+ return;
+ }
+ /*
+ * got a new domain; at this point,
+ * there are at least two domains
+ * once the req_domain_free flag is processed,
+ * the associated domain will be removed.
+ */
+ efc_sm_transition(&domain->drvsm, __efc_domain_init,
+ NULL);
+ efc_sm_post_event(&domain->drvsm,
+ EFC_EVT_DOMAIN_FOUND, &drec);
+ } else {
+ domain->req_domain_free = true;
+ }
+ break;
+ default:
+ __efc_domain_common_shutdown(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_domain_wait_domain_lost(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ /*
+ * Wait for the domain alloc/attach completion
+ * after receiving a domain lost.
+ */
+ switch (evt) {
+ case EFC_EVT_DOMAIN_ALLOC_OK:
+ case EFC_EVT_DOMAIN_ATTACH_OK: {
+ if (!list_empty(&domain->nport_list)) {
+ /*
+ * if there are nports, transition to
+ * wait state and send shutdown to each nport
+ */
+ struct efc_nport *nport = NULL, *nport_next = NULL;
+
+ efc_sm_transition(ctx, __efc_domain_wait_nports_free,
+ NULL);
+ list_for_each_entry_safe(nport, nport_next,
+ &domain->nport_list,
+ list_entry) {
+ efc_sm_post_event(&nport->sm,
+ EFC_EVT_SHUTDOWN, NULL);
+ }
+ } else {
+ /* no nports exist, free domain */
+ efc_sm_transition(ctx, __efc_domain_wait_shutdown,
+ NULL);
+ if (efc_cmd_domain_free(efc, domain))
+ efc_log_err(efc, "hw_domain_free() failed\n");
+ }
+ break;
+ }
+ case EFC_EVT_DOMAIN_ALLOC_FAIL:
+ case EFC_EVT_DOMAIN_ATTACH_FAIL:
+ efc_log_err(efc, "[domain] %-20s: failed\n",
+ efc_sm_event_name(evt));
+ break;
+
+ default:
+ __efc_domain_common_shutdown(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_domain_attach_internal(struct efc_domain *domain, u32 s_id)
+{
+ memcpy(domain->dma.virt,
+ ((uint8_t *)domain->flogi_service_params) + 4,
+ sizeof(struct fc_els_flogi) - 4);
+ (void)efc_sm_post_event(&domain->drvsm, EFC_EVT_DOMAIN_REQ_ATTACH,
+ &s_id);
+}
+
+void
+efc_domain_attach(struct efc_domain *domain, u32 s_id)
+{
+ __efc_domain_attach_internal(domain, s_id);
+}
+
+int
+efc_domain_post_event(struct efc_domain *domain,
+ enum efc_sm_event event, void *arg)
+{
+ int rc;
+ bool req_domain_free;
+
+ rc = efc_sm_post_event(&domain->drvsm, event, arg);
+
+ req_domain_free = domain->req_domain_free;
+ domain->req_domain_free = false;
+
+ if (req_domain_free)
+ efc_domain_free(domain);
+
+ return rc;
+}
+
+static void
+efct_domain_process_pending(struct efc_domain *domain)
+{
+ struct efc *efc = domain->efc;
+ struct efc_hw_sequence *seq = NULL;
+ u32 processed = 0;
+ unsigned long flags = 0;
+
+ for (;;) {
+ /* need to check for hold frames condition after each frame
+ * processed because any given frame could cause a transition
+ * to a state that holds frames
+ */
+ if (efc->hold_frames)
+ break;
+
+ /* Get next frame/sequence */
+ spin_lock_irqsave(&efc->pend_frames_lock, flags);
+
+ if (!list_empty(&efc->pend_frames)) {
+ seq = list_first_entry(&efc->pend_frames,
+ struct efc_hw_sequence, list_entry);
+ list_del(&seq->list_entry);
+ }
+
+ if (!seq) {
+ processed = efc->pend_frames_processed;
+ efc->pend_frames_processed = 0;
+ spin_unlock_irqrestore(&efc->pend_frames_lock, flags);
+ break;
+ }
+ efc->pend_frames_processed++;
+
+ spin_unlock_irqrestore(&efc->pend_frames_lock, flags);
+
+ /* now dispatch frame(s) to dispatch function */
+ if (efc_domain_dispatch_frame(domain, seq))
+ efc->tt.hw_seq_free(efc, seq);
+
+ seq = NULL;
+ }
+
+ if (processed != 0)
+ efc_log_debug(efc, "%u domain frames held and processed\n",
+ processed);
+}
+
+void
+efc_dispatch_frame(struct efc *efc, struct efc_hw_sequence *seq)
+{
+ struct efc_domain *domain = efc->domain;
+
+ /*
+ * If we are holding frames or the domain is not yet registered or
+ * there's already frames on the pending list,
+ * then add the new frame to pending list
+ */
+ if (!domain || efc->hold_frames || !list_empty(&efc->pend_frames)) {
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->pend_frames_lock, flags);
+ INIT_LIST_HEAD(&seq->list_entry);
+ list_add_tail(&seq->list_entry, &efc->pend_frames);
+ spin_unlock_irqrestore(&efc->pend_frames_lock, flags);
+
+ if (domain) {
+ /* immediately process pending frames */
+ efct_domain_process_pending(domain);
+ }
+ } else {
+ /*
+ * We are not holding frames and pending list is empty,
+ * just process frame. A non-zero return means the frame
+ * was not handled - so cleanup
+ */
+ if (efc_domain_dispatch_frame(domain, seq))
+ efc->tt.hw_seq_free(efc, seq);
+ }
+}
+
+int
+efc_domain_dispatch_frame(void *arg, struct efc_hw_sequence *seq)
+{
+ struct efc_domain *domain = (struct efc_domain *)arg;
+ struct efc *efc = domain->efc;
+ struct fc_frame_header *hdr;
+ struct efc_node *node = NULL;
+ struct efc_nport *nport = NULL;
+ unsigned long flags = 0;
+ u32 s_id, d_id, rc = EFC_HW_SEQ_FREE;
+
+ if (!seq->header || !seq->header->dma.virt || !seq->payload->dma.virt) {
+ efc_log_err(efc, "Sequence header or payload is null\n");
+ return rc;
+ }
+
+ hdr = seq->header->dma.virt;
+
+ /* extract the s_id and d_id */
+ s_id = ntoh24(hdr->fh_s_id);
+ d_id = ntoh24(hdr->fh_d_id);
+
+ spin_lock_irqsave(&efc->lock, flags);
+
+ nport = efc_nport_find(domain, d_id);
+ if (!nport) {
+ if (hdr->fh_type == FC_TYPE_FCP) {
+ /* Drop frame */
+ efc_log_warn(efc, "FCP frame with invalid d_id x%x\n",
+ d_id);
+ goto out;
+ }
+
+ /* p2p will use this case */
+ nport = domain->nport;
+ if (!nport || !kref_get_unless_zero(&nport->ref)) {
+ efc_log_err(efc, "Physical nport is NULL\n");
+ goto out;
+ }
+ }
+
+ /* Lookup the node given the remote s_id */
+ node = efc_node_find(nport, s_id);
+
+ /* If not found, then create a new node */
+ if (!node) {
+ /*
+ * If this is solicited data or control based on R_CTL and
+ * there is no node context, then we can drop the frame
+ */
+ if ((hdr->fh_r_ctl == FC_RCTL_DD_SOL_DATA) ||
+ (hdr->fh_r_ctl == FC_RCTL_DD_SOL_CTL)) {
+ efc_log_debug(efc, "sol data/ctrl frame without node\n");
+ goto out_release;
+ }
+
+ node = efc_node_alloc(nport, s_id, false, false);
+ if (!node) {
+ efc_log_err(efc, "efc_node_alloc() failed\n");
+ goto out_release;
+ }
+ /* don't send PLOGI on efc_d_init entry */
+ efc_node_init_device(node, false);
+ }
+
+ if (node->hold_frames || !list_empty(&node->pend_frames)) {
+ /* add frame to node's pending list */
+ spin_lock(&node->pend_frames_lock);
+ INIT_LIST_HEAD(&seq->list_entry);
+ list_add_tail(&seq->list_entry, &node->pend_frames);
+ spin_unlock(&node->pend_frames_lock);
+ rc = EFC_HW_SEQ_HOLD;
+ goto out_release;
+ }
+
+ /* now dispatch frame to the node frame handler */
+ efc_node_dispatch_frame(node, seq);
+
+out_release:
+ kref_put(&nport->ref, nport->release);
+out:
+ spin_unlock_irqrestore(&efc->lock, flags);
+ return rc;
+}
+
+void
+efc_node_dispatch_frame(void *arg, struct efc_hw_sequence *seq)
+{
+ struct fc_frame_header *hdr = seq->header->dma.virt;
+ u32 port_id;
+ struct efc_node *node = (struct efc_node *)arg;
+ struct efc *efc = node->efc;
+
+ port_id = ntoh24(hdr->fh_s_id);
+
+ if (WARN_ON(port_id != node->rnode.fc_id))
+ return;
+
+ if ((!(ntoh24(hdr->fh_f_ctl) & FC_FC_END_SEQ)) ||
+ !(ntoh24(hdr->fh_f_ctl) & FC_FC_SEQ_INIT)) {
+ node_printf(node,
+ "Drop frame hdr = %08x %08x %08x %08x %08x %08x\n",
+ cpu_to_be32(((u32 *)hdr)[0]),
+ cpu_to_be32(((u32 *)hdr)[1]),
+ cpu_to_be32(((u32 *)hdr)[2]),
+ cpu_to_be32(((u32 *)hdr)[3]),
+ cpu_to_be32(((u32 *)hdr)[4]),
+ cpu_to_be32(((u32 *)hdr)[5]));
+ return;
+ }
+
+ switch (hdr->fh_r_ctl) {
+ case FC_RCTL_ELS_REQ:
+ case FC_RCTL_ELS_REP:
+ efc_node_recv_els_frame(node, seq);
+ break;
+
+ case FC_RCTL_BA_ABTS:
+ case FC_RCTL_BA_ACC:
+ case FC_RCTL_BA_RJT:
+ case FC_RCTL_BA_NOP:
+ efc_log_err(efc, "Received ABTS:\n");
+ break;
+
+ case FC_RCTL_DD_UNSOL_CMD:
+ case FC_RCTL_DD_UNSOL_CTL:
+ switch (hdr->fh_type) {
+ case FC_TYPE_FCP:
+ if ((hdr->fh_r_ctl & 0xf) == FC_RCTL_DD_UNSOL_CMD) {
+ if (!node->fcp_enabled) {
+ efc_node_recv_fcp_cmd(node, seq);
+ break;
+ }
+ efc_log_err(efc, "Recvd FCP CMD. Drop IO\n");
+ } else if ((hdr->fh_r_ctl & 0xf) ==
+ FC_RCTL_DD_SOL_DATA) {
+ node_printf(node,
+ "solicited data recvd. Drop IO\n");
+ }
+ break;
+
+ case FC_TYPE_CT:
+ efc_node_recv_ct_frame(node, seq);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ efc_log_err(efc, "Unhandled frame rctl: %02x\n", hdr->fh_r_ctl);
+ }
+}
diff --git a/drivers/scsi/elx/libefc/efc_domain.h b/drivers/scsi/elx/libefc/efc_domain.h
new file mode 100644
index 000000000..5468ea7ab
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_domain.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * Declare driver's domain handler exported interface
+ */
+
+#ifndef __EFCT_DOMAIN_H__
+#define __EFCT_DOMAIN_H__
+
+struct efc_domain *
+efc_domain_alloc(struct efc *efc, uint64_t fcf_wwn);
+void
+efc_domain_free(struct efc_domain *domain);
+
+void
+__efc_domain_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg);
+void
+__efc_domain_wait_alloc(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg);
+void
+__efc_domain_allocated(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg);
+void
+__efc_domain_wait_attach(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg);
+void
+__efc_domain_ready(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg);
+void
+__efc_domain_wait_nports_free(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg);
+void
+__efc_domain_wait_shutdown(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg);
+void
+__efc_domain_wait_domain_lost(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg);
+void
+efc_domain_attach(struct efc_domain *domain, u32 s_id);
+int
+efc_domain_post_event(struct efc_domain *domain, enum efc_sm_event event,
+ void *arg);
+void
+__efc_domain_attach_internal(struct efc_domain *domain, u32 s_id);
+
+int
+efc_domain_dispatch_frame(void *arg, struct efc_hw_sequence *seq);
+void
+efc_node_dispatch_frame(void *arg, struct efc_hw_sequence *seq);
+
+#endif /* __EFCT_DOMAIN_H__ */
diff --git a/drivers/scsi/elx/libefc/efc_els.c b/drivers/scsi/elx/libefc/efc_els.c
new file mode 100644
index 000000000..84bc81d7c
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_els.c
@@ -0,0 +1,1094 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * Functions to build and send ELS/CT/BLS commands and responses.
+ */
+
+#include "efc.h"
+#include "efc_els.h"
+#include "../libefc_sli/sli4.h"
+
+#define EFC_LOG_ENABLE_ELS_TRACE(efc) \
+ (((efc) != NULL) ? (((efc)->logmask & (1U << 1)) != 0) : 0)
+
+#define node_els_trace() \
+ do { \
+ if (EFC_LOG_ENABLE_ELS_TRACE(efc)) \
+ efc_log_info(efc, "[%s] %-20s\n", \
+ node->display_name, __func__); \
+ } while (0)
+
+#define els_io_printf(els, fmt, ...) \
+ efc_log_err((struct efc *)els->node->efc,\
+ "[%s] %-8s " fmt, \
+ els->node->display_name,\
+ els->display_name, ##__VA_ARGS__)
+
+#define EFC_ELS_RSP_LEN 1024
+#define EFC_ELS_GID_PT_RSP_LEN 8096
+
+struct efc_els_io_req *
+efc_els_io_alloc(struct efc_node *node, u32 reqlen)
+{
+ return efc_els_io_alloc_size(node, reqlen, EFC_ELS_RSP_LEN);
+}
+
+struct efc_els_io_req *
+efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen)
+{
+ struct efc *efc;
+ struct efc_els_io_req *els;
+ unsigned long flags = 0;
+
+ efc = node->efc;
+
+ if (!node->els_io_enabled) {
+ efc_log_err(efc, "els io alloc disabled\n");
+ return NULL;
+ }
+
+ els = mempool_alloc(efc->els_io_pool, GFP_ATOMIC);
+ if (!els) {
+ atomic_add_return(1, &efc->els_io_alloc_failed_count);
+ return NULL;
+ }
+
+ /* initialize refcount */
+ kref_init(&els->ref);
+ els->release = _efc_els_io_free;
+
+ /* populate generic io fields */
+ els->node = node;
+
+ /* now allocate DMA for request and response */
+ els->io.req.size = reqlen;
+ els->io.req.virt = dma_alloc_coherent(&efc->pci->dev, els->io.req.size,
+ &els->io.req.phys, GFP_KERNEL);
+ if (!els->io.req.virt) {
+ mempool_free(els, efc->els_io_pool);
+ return NULL;
+ }
+
+ els->io.rsp.size = rsplen;
+ els->io.rsp.virt = dma_alloc_coherent(&efc->pci->dev, els->io.rsp.size,
+ &els->io.rsp.phys, GFP_KERNEL);
+ if (!els->io.rsp.virt) {
+ dma_free_coherent(&efc->pci->dev, els->io.req.size,
+ els->io.req.virt, els->io.req.phys);
+ mempool_free(els, efc->els_io_pool);
+ els = NULL;
+ }
+
+ if (els) {
+ /* initialize fields */
+ els->els_retries_remaining = EFC_FC_ELS_DEFAULT_RETRIES;
+
+ /* add els structure to ELS IO list */
+ INIT_LIST_HEAD(&els->list_entry);
+ spin_lock_irqsave(&node->els_ios_lock, flags);
+ list_add_tail(&els->list_entry, &node->els_ios_list);
+ spin_unlock_irqrestore(&node->els_ios_lock, flags);
+ }
+
+ return els;
+}
+
+void
+efc_els_io_free(struct efc_els_io_req *els)
+{
+ kref_put(&els->ref, els->release);
+}
+
+void
+_efc_els_io_free(struct kref *arg)
+{
+ struct efc_els_io_req *els =
+ container_of(arg, struct efc_els_io_req, ref);
+ struct efc *efc;
+ struct efc_node *node;
+ int send_empty_event = false;
+ unsigned long flags = 0;
+
+ node = els->node;
+ efc = node->efc;
+
+ spin_lock_irqsave(&node->els_ios_lock, flags);
+
+ list_del(&els->list_entry);
+ /* Send list empty event if the IO allocator
+ * is disabled, and the list is empty
+ * If node->els_io_enabled was not checked,
+ * the event would be posted continually
+ */
+ send_empty_event = (!node->els_io_enabled &&
+ list_empty(&node->els_ios_list));
+
+ spin_unlock_irqrestore(&node->els_ios_lock, flags);
+
+ /* free ELS request and response buffers */
+ dma_free_coherent(&efc->pci->dev, els->io.rsp.size,
+ els->io.rsp.virt, els->io.rsp.phys);
+ dma_free_coherent(&efc->pci->dev, els->io.req.size,
+ els->io.req.virt, els->io.req.phys);
+
+ mempool_free(els, efc->els_io_pool);
+
+ if (send_empty_event)
+ efc_scsi_io_list_empty(node->efc, node);
+}
+
+static void
+efc_els_retry(struct efc_els_io_req *els);
+
+static void
+efc_els_delay_timer_cb(struct timer_list *t)
+{
+ struct efc_els_io_req *els = from_timer(els, t, delay_timer);
+
+ /* Retry delay timer expired, retry the ELS request */
+ efc_els_retry(els);
+}
+
+static int
+efc_els_req_cb(void *arg, u32 length, int status, u32 ext_status)
+{
+ struct efc_els_io_req *els;
+ struct efc_node *node;
+ struct efc *efc;
+ struct efc_node_cb cbdata;
+ u32 reason_code;
+
+ els = arg;
+ node = els->node;
+ efc = node->efc;
+
+ if (status)
+ els_io_printf(els, "status x%x ext x%x\n", status, ext_status);
+
+ /* set the response len element of els->rsp */
+ els->io.rsp.len = length;
+
+ cbdata.status = status;
+ cbdata.ext_status = ext_status;
+ cbdata.header = NULL;
+ cbdata.els_rsp = els->io.rsp;
+
+ /* set the response len element of els->rsp */
+ cbdata.rsp_len = length;
+
+ /* FW returns the number of bytes received on the link in
+ * the WCQE, not the amount placed in the buffer; use this info to
+ * check if there was an overrun.
+ */
+ if (length > els->io.rsp.size) {
+ efc_log_warn(efc,
+ "ELS response returned len=%d > buflen=%zu\n",
+ length, els->io.rsp.size);
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL, &cbdata);
+ return 0;
+ }
+
+ /* Post event to ELS IO object */
+ switch (status) {
+ case SLI4_FC_WCQE_STATUS_SUCCESS:
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_OK, &cbdata);
+ break;
+
+ case SLI4_FC_WCQE_STATUS_LS_RJT:
+ reason_code = (ext_status >> 16) & 0xff;
+
+ /* delay and retry if reason code is Logical Busy */
+ switch (reason_code) {
+ case ELS_RJT_BUSY:
+ els->node->els_req_cnt--;
+ els_io_printf(els,
+ "LS_RJT Logical Busy, delay and retry\n");
+ timer_setup(&els->delay_timer,
+ efc_els_delay_timer_cb, 0);
+ mod_timer(&els->delay_timer,
+ jiffies + msecs_to_jiffies(5000));
+ break;
+ default:
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_RJT,
+ &cbdata);
+ break;
+ }
+ break;
+
+ case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
+ switch (ext_status) {
+ case SLI4_FC_LOCAL_REJECT_SEQUENCE_TIMEOUT:
+ efc_els_retry(els);
+ break;
+ default:
+ efc_log_err(efc, "LOCAL_REJECT with ext status:%x\n",
+ ext_status);
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL,
+ &cbdata);
+ break;
+ }
+ break;
+ default: /* Other error */
+ efc_log_warn(efc, "els req failed status x%x, ext_status x%x\n",
+ status, ext_status);
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL, &cbdata);
+ break;
+ }
+
+ return 0;
+}
+
+void efc_disc_io_complete(struct efc_disc_io *io, u32 len, u32 status,
+ u32 ext_status)
+{
+ struct efc_els_io_req *els =
+ container_of(io, struct efc_els_io_req, io);
+
+ WARN_ON_ONCE(!els->cb);
+
+ ((efc_hw_srrs_cb_t)els->cb) (els, len, status, ext_status);
+}
+
+static int efc_els_send_req(struct efc_node *node, struct efc_els_io_req *els,
+ enum efc_disc_io_type io_type)
+{
+ int rc = 0;
+ struct efc *efc = node->efc;
+ struct efc_node_cb cbdata;
+
+ /* update ELS request counter */
+ els->node->els_req_cnt++;
+
+ /* Prepare the IO request details */
+ els->io.io_type = io_type;
+ els->io.xmit_len = els->io.req.size;
+ els->io.rsp_len = els->io.rsp.size;
+ els->io.rpi = node->rnode.indicator;
+ els->io.vpi = node->nport->indicator;
+ els->io.s_id = node->nport->fc_id;
+ els->io.d_id = node->rnode.fc_id;
+
+ if (node->rnode.attached)
+ els->io.rpi_registered = true;
+
+ els->cb = efc_els_req_cb;
+
+ rc = efc->tt.send_els(efc, &els->io);
+ if (!rc)
+ return rc;
+
+ cbdata.status = EFC_STATUS_INVALID;
+ cbdata.ext_status = EFC_STATUS_INVALID;
+ cbdata.els_rsp = els->io.rsp;
+ efc_log_err(efc, "efc_els_send failed: %d\n", rc);
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL, &cbdata);
+
+ return rc;
+}
+
+static void
+efc_els_retry(struct efc_els_io_req *els)
+{
+ struct efc *efc;
+ struct efc_node_cb cbdata;
+ u32 rc;
+
+ efc = els->node->efc;
+ cbdata.status = EFC_STATUS_INVALID;
+ cbdata.ext_status = EFC_STATUS_INVALID;
+ cbdata.els_rsp = els->io.rsp;
+
+ if (els->els_retries_remaining) {
+ els->els_retries_remaining--;
+ rc = efc->tt.send_els(efc, &els->io);
+ } else {
+ rc = -EIO;
+ }
+
+ if (rc) {
+ efc_log_err(efc, "ELS retries exhausted\n");
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL, &cbdata);
+ }
+}
+
+static int
+efc_els_acc_cb(void *arg, u32 length, int status, u32 ext_status)
+{
+ struct efc_els_io_req *els;
+ struct efc_node *node;
+ struct efc *efc;
+ struct efc_node_cb cbdata;
+
+ els = arg;
+ node = els->node;
+ efc = node->efc;
+
+ cbdata.status = status;
+ cbdata.ext_status = ext_status;
+ cbdata.header = NULL;
+ cbdata.els_rsp = els->io.rsp;
+
+ /* Post node event */
+ switch (status) {
+ case SLI4_FC_WCQE_STATUS_SUCCESS:
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_CMPL_OK, &cbdata);
+ break;
+
+ default: /* Other error */
+ efc_log_warn(efc, "[%s] %-8s failed status x%x, ext x%x\n",
+ node->display_name, els->display_name,
+ status, ext_status);
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_CMPL_FAIL, &cbdata);
+ break;
+ }
+
+ return 0;
+}
+
+static int
+efc_els_send_rsp(struct efc_els_io_req *els, u32 rsplen)
+{
+ int rc = 0;
+ struct efc_node_cb cbdata;
+ struct efc_node *node = els->node;
+ struct efc *efc = node->efc;
+
+ /* increment ELS completion counter */
+ node->els_cmpl_cnt++;
+
+ els->io.io_type = EFC_DISC_IO_ELS_RESP;
+ els->cb = efc_els_acc_cb;
+
+ /* Prepare the IO request details */
+ els->io.xmit_len = rsplen;
+ els->io.rsp_len = els->io.rsp.size;
+ els->io.rpi = node->rnode.indicator;
+ els->io.vpi = node->nport->indicator;
+ if (node->nport->fc_id != U32_MAX)
+ els->io.s_id = node->nport->fc_id;
+ else
+ els->io.s_id = els->io.iparam.els.s_id;
+ els->io.d_id = node->rnode.fc_id;
+
+ if (node->attached)
+ els->io.rpi_registered = true;
+
+ rc = efc->tt.send_els(efc, &els->io);
+ if (!rc)
+ return rc;
+
+ cbdata.status = EFC_STATUS_INVALID;
+ cbdata.ext_status = EFC_STATUS_INVALID;
+ cbdata.els_rsp = els->io.rsp;
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_CMPL_FAIL, &cbdata);
+
+ return rc;
+}
+
+int
+efc_send_plogi(struct efc_node *node)
+{
+ struct efc_els_io_req *els;
+ struct efc *efc = node->efc;
+ struct fc_els_flogi *plogi;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*plogi));
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+ els->display_name = "plogi";
+
+ /* Build PLOGI request */
+ plogi = els->io.req.virt;
+
+ memcpy(plogi, node->nport->service_params, sizeof(*plogi));
+
+ plogi->fl_cmd = ELS_PLOGI;
+ memset(plogi->_fl_resvd, 0, sizeof(plogi->_fl_resvd));
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
+}
+
+int
+efc_send_flogi(struct efc_node *node)
+{
+ struct efc_els_io_req *els;
+ struct efc *efc;
+ struct fc_els_flogi *flogi;
+
+ efc = node->efc;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*flogi));
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "flogi";
+
+ /* Build FLOGI request */
+ flogi = els->io.req.virt;
+
+ memcpy(flogi, node->nport->service_params, sizeof(*flogi));
+ flogi->fl_cmd = ELS_FLOGI;
+ memset(flogi->_fl_resvd, 0, sizeof(flogi->_fl_resvd));
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
+}
+
+int
+efc_send_fdisc(struct efc_node *node)
+{
+ struct efc_els_io_req *els;
+ struct efc *efc;
+ struct fc_els_flogi *fdisc;
+
+ efc = node->efc;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*fdisc));
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "fdisc";
+
+ /* Build FDISC request */
+ fdisc = els->io.req.virt;
+
+ memcpy(fdisc, node->nport->service_params, sizeof(*fdisc));
+ fdisc->fl_cmd = ELS_FDISC;
+ memset(fdisc->_fl_resvd, 0, sizeof(fdisc->_fl_resvd));
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
+}
+
+int
+efc_send_prli(struct efc_node *node)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els;
+ struct {
+ struct fc_els_prli prli;
+ struct fc_els_spp spp;
+ } *pp;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*pp));
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "prli";
+
+ /* Build PRLI request */
+ pp = els->io.req.virt;
+
+ memset(pp, 0, sizeof(*pp));
+
+ pp->prli.prli_cmd = ELS_PRLI;
+ pp->prli.prli_spp_len = 16;
+ pp->prli.prli_len = cpu_to_be16(sizeof(*pp));
+ pp->spp.spp_type = FC_TYPE_FCP;
+ pp->spp.spp_type_ext = 0;
+ pp->spp.spp_flags = FC_SPP_EST_IMG_PAIR;
+ pp->spp.spp_params = cpu_to_be32(FCP_SPPF_RD_XRDY_DIS |
+ (node->nport->enable_ini ?
+ FCP_SPPF_INIT_FCN : 0) |
+ (node->nport->enable_tgt ?
+ FCP_SPPF_TARG_FCN : 0));
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
+}
+
+int
+efc_send_logo(struct efc_node *node)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els;
+ struct fc_els_logo *logo;
+ struct fc_els_flogi *sparams;
+
+ node_els_trace();
+
+ sparams = (struct fc_els_flogi *)node->nport->service_params;
+
+ els = efc_els_io_alloc(node, sizeof(*logo));
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "logo";
+
+ /* Build LOGO request */
+
+ logo = els->io.req.virt;
+
+ memset(logo, 0, sizeof(*logo));
+ logo->fl_cmd = ELS_LOGO;
+ hton24(logo->fl_n_port_id, node->rnode.nport->fc_id);
+ logo->fl_n_port_wwn = sparams->fl_wwpn;
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
+}
+
+int
+efc_send_adisc(struct efc_node *node)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els;
+ struct fc_els_adisc *adisc;
+ struct fc_els_flogi *sparams;
+ struct efc_nport *nport = node->nport;
+
+ node_els_trace();
+
+ sparams = (struct fc_els_flogi *)node->nport->service_params;
+
+ els = efc_els_io_alloc(node, sizeof(*adisc));
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "adisc";
+
+ /* Build ADISC request */
+
+ adisc = els->io.req.virt;
+
+ memset(adisc, 0, sizeof(*adisc));
+ adisc->adisc_cmd = ELS_ADISC;
+ hton24(adisc->adisc_hard_addr, nport->fc_id);
+ adisc->adisc_wwpn = sparams->fl_wwpn;
+ adisc->adisc_wwnn = sparams->fl_wwnn;
+ hton24(adisc->adisc_port_id, node->rnode.nport->fc_id);
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
+}
+
+int
+efc_send_scr(struct efc_node *node)
+{
+ struct efc_els_io_req *els;
+ struct efc *efc = node->efc;
+ struct fc_els_scr *req;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*req));
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "scr";
+
+ req = els->io.req.virt;
+
+ memset(req, 0, sizeof(*req));
+ req->scr_cmd = ELS_SCR;
+ req->scr_reg_func = ELS_SCRF_FULL;
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
+}
+
+int
+efc_send_ls_rjt(struct efc_node *node, u32 ox_id, u32 reason_code,
+ u32 reason_code_expl, u32 vendor_unique)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els = NULL;
+ struct fc_els_ls_rjt *rjt;
+
+ els = efc_els_io_alloc(node, sizeof(*rjt));
+ if (!els) {
+ efc_log_err(efc, "els IO alloc failed\n");
+ return -EIO;
+ }
+
+ node_els_trace();
+
+ els->display_name = "ls_rjt";
+
+ memset(&els->io.iparam, 0, sizeof(els->io.iparam));
+ els->io.iparam.els.ox_id = ox_id;
+
+ rjt = els->io.req.virt;
+ memset(rjt, 0, sizeof(*rjt));
+
+ rjt->er_cmd = ELS_LS_RJT;
+ rjt->er_reason = reason_code;
+ rjt->er_explan = reason_code_expl;
+
+ return efc_els_send_rsp(els, sizeof(*rjt));
+}
+
+int
+efc_send_plogi_acc(struct efc_node *node, u32 ox_id)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els = NULL;
+ struct fc_els_flogi *plogi;
+ struct fc_els_flogi *req = (struct fc_els_flogi *)node->service_params;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*plogi));
+ if (!els) {
+ efc_log_err(efc, "els IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "plogi_acc";
+
+ memset(&els->io.iparam, 0, sizeof(els->io.iparam));
+ els->io.iparam.els.ox_id = ox_id;
+
+ plogi = els->io.req.virt;
+
+ /* copy our port's service parameters to payload */
+ memcpy(plogi, node->nport->service_params, sizeof(*plogi));
+ plogi->fl_cmd = ELS_LS_ACC;
+ memset(plogi->_fl_resvd, 0, sizeof(plogi->_fl_resvd));
+
+ /* Set Application header support bit if requested */
+ if (req->fl_csp.sp_features & cpu_to_be16(FC_SP_FT_BCAST))
+ plogi->fl_csp.sp_features |= cpu_to_be16(FC_SP_FT_BCAST);
+
+ return efc_els_send_rsp(els, sizeof(*plogi));
+}
+
+int
+efc_send_flogi_p2p_acc(struct efc_node *node, u32 ox_id, u32 s_id)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els = NULL;
+ struct fc_els_flogi *flogi;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*flogi));
+ if (!els) {
+ efc_log_err(efc, "els IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "flogi_p2p_acc";
+
+ memset(&els->io.iparam, 0, sizeof(els->io.iparam));
+ els->io.iparam.els.ox_id = ox_id;
+ els->io.iparam.els.s_id = s_id;
+
+ flogi = els->io.req.virt;
+
+ /* copy our port's service parameters to payload */
+ memcpy(flogi, node->nport->service_params, sizeof(*flogi));
+ flogi->fl_cmd = ELS_LS_ACC;
+ memset(flogi->_fl_resvd, 0, sizeof(flogi->_fl_resvd));
+
+ memset(flogi->fl_cssp, 0, sizeof(flogi->fl_cssp));
+
+ return efc_els_send_rsp(els, sizeof(*flogi));
+}
+
+int
+efc_send_prli_acc(struct efc_node *node, u32 ox_id)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els = NULL;
+ struct {
+ struct fc_els_prli prli;
+ struct fc_els_spp spp;
+ } *pp;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*pp));
+ if (!els) {
+ efc_log_err(efc, "els IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "prli_acc";
+
+ memset(&els->io.iparam, 0, sizeof(els->io.iparam));
+ els->io.iparam.els.ox_id = ox_id;
+
+ pp = els->io.req.virt;
+ memset(pp, 0, sizeof(*pp));
+
+ pp->prli.prli_cmd = ELS_LS_ACC;
+ pp->prli.prli_spp_len = 0x10;
+ pp->prli.prli_len = cpu_to_be16(sizeof(*pp));
+ pp->spp.spp_type = FC_TYPE_FCP;
+ pp->spp.spp_type_ext = 0;
+ pp->spp.spp_flags = FC_SPP_EST_IMG_PAIR | FC_SPP_RESP_ACK;
+
+ pp->spp.spp_params = cpu_to_be32(FCP_SPPF_RD_XRDY_DIS |
+ (node->nport->enable_ini ?
+ FCP_SPPF_INIT_FCN : 0) |
+ (node->nport->enable_tgt ?
+ FCP_SPPF_TARG_FCN : 0));
+
+ return efc_els_send_rsp(els, sizeof(*pp));
+}
+
+int
+efc_send_prlo_acc(struct efc_node *node, u32 ox_id)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els = NULL;
+ struct {
+ struct fc_els_prlo prlo;
+ struct fc_els_spp spp;
+ } *pp;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*pp));
+ if (!els) {
+ efc_log_err(efc, "els IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "prlo_acc";
+
+ memset(&els->io.iparam, 0, sizeof(els->io.iparam));
+ els->io.iparam.els.ox_id = ox_id;
+
+ pp = els->io.req.virt;
+ memset(pp, 0, sizeof(*pp));
+ pp->prlo.prlo_cmd = ELS_LS_ACC;
+ pp->prlo.prlo_obs = 0x10;
+ pp->prlo.prlo_len = cpu_to_be16(sizeof(*pp));
+
+ pp->spp.spp_type = FC_TYPE_FCP;
+ pp->spp.spp_type_ext = 0;
+ pp->spp.spp_flags = FC_SPP_RESP_ACK;
+
+ return efc_els_send_rsp(els, sizeof(*pp));
+}
+
+int
+efc_send_ls_acc(struct efc_node *node, u32 ox_id)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els = NULL;
+ struct fc_els_ls_acc *acc;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*acc));
+ if (!els) {
+ efc_log_err(efc, "els IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "ls_acc";
+
+ memset(&els->io.iparam, 0, sizeof(els->io.iparam));
+ els->io.iparam.els.ox_id = ox_id;
+
+ acc = els->io.req.virt;
+ memset(acc, 0, sizeof(*acc));
+
+ acc->la_cmd = ELS_LS_ACC;
+
+ return efc_els_send_rsp(els, sizeof(*acc));
+}
+
+int
+efc_send_logo_acc(struct efc_node *node, u32 ox_id)
+{
+ struct efc_els_io_req *els = NULL;
+ struct efc *efc = node->efc;
+ struct fc_els_ls_acc *logo;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*logo));
+ if (!els) {
+ efc_log_err(efc, "els IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "logo_acc";
+
+ memset(&els->io.iparam, 0, sizeof(els->io.iparam));
+ els->io.iparam.els.ox_id = ox_id;
+
+ logo = els->io.req.virt;
+ memset(logo, 0, sizeof(*logo));
+
+ logo->la_cmd = ELS_LS_ACC;
+
+ return efc_els_send_rsp(els, sizeof(*logo));
+}
+
+int
+efc_send_adisc_acc(struct efc_node *node, u32 ox_id)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els = NULL;
+ struct fc_els_adisc *adisc;
+ struct fc_els_flogi *sparams;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*adisc));
+ if (!els) {
+ efc_log_err(efc, "els IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "adisc_acc";
+
+ /* Go ahead and send the ELS_ACC */
+ memset(&els->io.iparam, 0, sizeof(els->io.iparam));
+ els->io.iparam.els.ox_id = ox_id;
+
+ sparams = (struct fc_els_flogi *)node->nport->service_params;
+ adisc = els->io.req.virt;
+ memset(adisc, 0, sizeof(*adisc));
+ adisc->adisc_cmd = ELS_LS_ACC;
+ adisc->adisc_wwpn = sparams->fl_wwpn;
+ adisc->adisc_wwnn = sparams->fl_wwnn;
+ hton24(adisc->adisc_port_id, node->rnode.nport->fc_id);
+
+ return efc_els_send_rsp(els, sizeof(*adisc));
+}
+
+static inline void
+fcct_build_req_header(struct fc_ct_hdr *hdr, u16 cmd, u16 max_size)
+{
+ hdr->ct_rev = FC_CT_REV;
+ hdr->ct_fs_type = FC_FST_DIR;
+ hdr->ct_fs_subtype = FC_NS_SUBTYPE;
+ hdr->ct_options = 0;
+ hdr->ct_cmd = cpu_to_be16(cmd);
+ /* words */
+ hdr->ct_mr_size = cpu_to_be16(max_size / (sizeof(u32)));
+ hdr->ct_reason = 0;
+ hdr->ct_explan = 0;
+ hdr->ct_vendor = 0;
+}
+
+int
+efc_ns_send_rftid(struct efc_node *node)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els;
+ struct {
+ struct fc_ct_hdr hdr;
+ struct fc_ns_rft_id rftid;
+ } *ct;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*ct));
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->io.iparam.ct.r_ctl = FC_RCTL_ELS_REQ;
+ els->io.iparam.ct.type = FC_TYPE_CT;
+ els->io.iparam.ct.df_ctl = 0;
+ els->io.iparam.ct.timeout = EFC_FC_ELS_SEND_DEFAULT_TIMEOUT;
+
+ els->display_name = "rftid";
+
+ ct = els->io.req.virt;
+ memset(ct, 0, sizeof(*ct));
+ fcct_build_req_header(&ct->hdr, FC_NS_RFT_ID,
+ sizeof(struct fc_ns_rft_id));
+
+ hton24(ct->rftid.fr_fid.fp_fid, node->rnode.nport->fc_id);
+ ct->rftid.fr_fts.ff_type_map[FC_TYPE_FCP / FC_NS_BPW] =
+ cpu_to_be32(1 << (FC_TYPE_FCP % FC_NS_BPW));
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_CT_REQ);
+}
+
+int
+efc_ns_send_rffid(struct efc_node *node)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els;
+ struct {
+ struct fc_ct_hdr hdr;
+ struct fc_ns_rff_id rffid;
+ } *ct;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*ct));
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->io.iparam.ct.r_ctl = FC_RCTL_ELS_REQ;
+ els->io.iparam.ct.type = FC_TYPE_CT;
+ els->io.iparam.ct.df_ctl = 0;
+ els->io.iparam.ct.timeout = EFC_FC_ELS_SEND_DEFAULT_TIMEOUT;
+
+ els->display_name = "rffid";
+ ct = els->io.req.virt;
+
+ memset(ct, 0, sizeof(*ct));
+ fcct_build_req_header(&ct->hdr, FC_NS_RFF_ID,
+ sizeof(struct fc_ns_rff_id));
+
+ hton24(ct->rffid.fr_fid.fp_fid, node->rnode.nport->fc_id);
+ if (node->nport->enable_ini)
+ ct->rffid.fr_feat |= FCP_FEAT_INIT;
+ if (node->nport->enable_tgt)
+ ct->rffid.fr_feat |= FCP_FEAT_TARG;
+ ct->rffid.fr_type = FC_TYPE_FCP;
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_CT_REQ);
+}
+
+int
+efc_ns_send_gidpt(struct efc_node *node)
+{
+ struct efc_els_io_req *els = NULL;
+ struct efc *efc = node->efc;
+ struct {
+ struct fc_ct_hdr hdr;
+ struct fc_ns_gid_pt gidpt;
+ } *ct;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc_size(node, sizeof(*ct), EFC_ELS_GID_PT_RSP_LEN);
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->io.iparam.ct.r_ctl = FC_RCTL_ELS_REQ;
+ els->io.iparam.ct.type = FC_TYPE_CT;
+ els->io.iparam.ct.df_ctl = 0;
+ els->io.iparam.ct.timeout = EFC_FC_ELS_SEND_DEFAULT_TIMEOUT;
+
+ els->display_name = "gidpt";
+
+ ct = els->io.req.virt;
+
+ memset(ct, 0, sizeof(*ct));
+ fcct_build_req_header(&ct->hdr, FC_NS_GID_PT,
+ sizeof(struct fc_ns_gid_pt));
+
+ ct->gidpt.fn_pt_type = FC_TYPE_FCP;
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_CT_REQ);
+}
+
+void
+efc_els_io_cleanup(struct efc_els_io_req *els, int evt, void *arg)
+{
+ /* don't want further events that could come; e.g. abort requests
+ * from the node state machine; thus, disable state machine
+ */
+ els->els_req_free = true;
+ efc_node_post_els_resp(els->node, evt, arg);
+
+ efc_els_io_free(els);
+}
+
+static int
+efc_ct_acc_cb(void *arg, u32 length, int status, u32 ext_status)
+{
+ struct efc_els_io_req *els = arg;
+
+ efc_els_io_free(els);
+
+ return 0;
+}
+
+int
+efc_send_ct_rsp(struct efc *efc, struct efc_node *node, u16 ox_id,
+ struct fc_ct_hdr *ct_hdr, u32 cmd_rsp_code,
+ u32 reason_code, u32 reason_code_explanation)
+{
+ struct efc_els_io_req *els = NULL;
+ struct fc_ct_hdr *rsp = NULL;
+
+ els = efc_els_io_alloc(node, 256);
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ rsp = els->io.rsp.virt;
+
+ *rsp = *ct_hdr;
+
+ fcct_build_req_header(rsp, cmd_rsp_code, 0);
+ rsp->ct_reason = reason_code;
+ rsp->ct_explan = reason_code_explanation;
+
+ els->display_name = "ct_rsp";
+ els->cb = efc_ct_acc_cb;
+
+ /* Prepare the IO request details */
+ els->io.io_type = EFC_DISC_IO_CT_RESP;
+ els->io.xmit_len = sizeof(*rsp);
+
+ els->io.rpi = node->rnode.indicator;
+ els->io.d_id = node->rnode.fc_id;
+
+ memset(&els->io.iparam, 0, sizeof(els->io.iparam));
+
+ els->io.iparam.ct.ox_id = ox_id;
+ els->io.iparam.ct.r_ctl = 3;
+ els->io.iparam.ct.type = FC_TYPE_CT;
+ els->io.iparam.ct.df_ctl = 0;
+ els->io.iparam.ct.timeout = 5;
+
+ if (efc->tt.send_els(efc, &els->io)) {
+ efc_els_io_free(els);
+ return -EIO;
+ }
+ return 0;
+}
+
+int
+efc_send_bls_acc(struct efc_node *node, struct fc_frame_header *hdr)
+{
+ struct sli_bls_params bls;
+ struct fc_ba_acc *acc;
+ struct efc *efc = node->efc;
+
+ memset(&bls, 0, sizeof(bls));
+ bls.ox_id = be16_to_cpu(hdr->fh_ox_id);
+ bls.rx_id = be16_to_cpu(hdr->fh_rx_id);
+ bls.s_id = ntoh24(hdr->fh_d_id);
+ bls.d_id = node->rnode.fc_id;
+ bls.rpi = node->rnode.indicator;
+ bls.vpi = node->nport->indicator;
+
+ acc = (void *)bls.payload;
+ acc->ba_ox_id = cpu_to_be16(bls.ox_id);
+ acc->ba_rx_id = cpu_to_be16(bls.rx_id);
+ acc->ba_high_seq_cnt = cpu_to_be16(U16_MAX);
+
+ return efc->tt.send_bls(efc, FC_RCTL_BA_ACC, &bls);
+}
diff --git a/drivers/scsi/elx/libefc/efc_els.h b/drivers/scsi/elx/libefc/efc_els.h
new file mode 100644
index 000000000..3c4f820f6
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_els.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#ifndef __EFC_ELS_H__
+#define __EFC_ELS_H__
+
+#define EFC_STATUS_INVALID INT_MAX
+#define EFC_ELS_IO_POOL_SZ 1024
+
+struct efc_els_io_req {
+ struct list_head list_entry;
+ struct kref ref;
+ void (*release)(struct kref *arg);
+ struct efc_node *node;
+ void *cb;
+ u32 els_retries_remaining;
+ bool els_req_free;
+ struct timer_list delay_timer;
+
+ const char *display_name;
+
+ struct efc_disc_io io;
+};
+
+typedef int(*efc_hw_srrs_cb_t)(void *arg, u32 length, int status,
+ u32 ext_status);
+
+void _efc_els_io_free(struct kref *arg);
+struct efc_els_io_req *
+efc_els_io_alloc(struct efc_node *node, u32 reqlen);
+struct efc_els_io_req *
+efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen);
+void efc_els_io_free(struct efc_els_io_req *els);
+
+/* ELS command send */
+typedef void (*els_cb_t)(struct efc_node *node,
+ struct efc_node_cb *cbdata, void *arg);
+int
+efc_send_plogi(struct efc_node *node);
+int
+efc_send_flogi(struct efc_node *node);
+int
+efc_send_fdisc(struct efc_node *node);
+int
+efc_send_prli(struct efc_node *node);
+int
+efc_send_prlo(struct efc_node *node);
+int
+efc_send_logo(struct efc_node *node);
+int
+efc_send_adisc(struct efc_node *node);
+int
+efc_send_pdisc(struct efc_node *node);
+int
+efc_send_scr(struct efc_node *node);
+int
+efc_ns_send_rftid(struct efc_node *node);
+int
+efc_ns_send_rffid(struct efc_node *node);
+int
+efc_ns_send_gidpt(struct efc_node *node);
+void
+efc_els_io_cleanup(struct efc_els_io_req *els, int evt, void *arg);
+
+/* ELS acc send */
+int
+efc_send_ls_acc(struct efc_node *node, u32 ox_id);
+int
+efc_send_ls_rjt(struct efc_node *node, u32 ox_id, u32 reason_cod,
+ u32 reason_code_expl, u32 vendor_unique);
+int
+efc_send_flogi_p2p_acc(struct efc_node *node, u32 ox_id, u32 s_id);
+int
+efc_send_flogi_acc(struct efc_node *node, u32 ox_id, u32 is_fport);
+int
+efc_send_plogi_acc(struct efc_node *node, u32 ox_id);
+int
+efc_send_prli_acc(struct efc_node *node, u32 ox_id);
+int
+efc_send_logo_acc(struct efc_node *node, u32 ox_id);
+int
+efc_send_prlo_acc(struct efc_node *node, u32 ox_id);
+int
+efc_send_adisc_acc(struct efc_node *node, u32 ox_id);
+
+int
+efc_bls_send_acc_hdr(struct efc *efc, struct efc_node *node,
+ struct fc_frame_header *hdr);
+int
+efc_bls_send_rjt_hdr(struct efc_els_io_req *io, struct fc_frame_header *hdr);
+
+int
+efc_els_io_list_empty(struct efc_node *node, struct list_head *list);
+
+/* CT */
+int
+efc_send_ct_rsp(struct efc *efc, struct efc_node *node, u16 ox_id,
+ struct fc_ct_hdr *ct_hdr, u32 cmd_rsp_code, u32 reason_code,
+ u32 reason_code_explanation);
+
+int
+efc_send_bls_acc(struct efc_node *node, struct fc_frame_header *hdr);
+
+#endif /* __EFC_ELS_H__ */
diff --git a/drivers/scsi/elx/libefc/efc_fabric.c b/drivers/scsi/elx/libefc/efc_fabric.c
new file mode 100644
index 000000000..9661eea93
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_fabric.c
@@ -0,0 +1,1563 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * This file implements remote node state machines for:
+ * - Fabric logins.
+ * - Fabric controller events.
+ * - Name/directory services interaction.
+ * - Point-to-point logins.
+ */
+
+/*
+ * fabric_sm Node State Machine: Fabric States
+ * ns_sm Node State Machine: Name/Directory Services States
+ * p2p_sm Node State Machine: Point-to-Point Node States
+ */
+
+#include "efc.h"
+
+static void
+efc_fabric_initiate_shutdown(struct efc_node *node)
+{
+ struct efc *efc = node->efc;
+
+ node->els_io_enabled = false;
+
+ if (node->attached) {
+ int rc;
+
+ /* issue hw node free; don't care if succeeds right away
+ * or sometime later, will check node->attached later in
+ * shutdown process
+ */
+ rc = efc_cmd_node_detach(efc, &node->rnode);
+ if (rc < 0) {
+ node_printf(node, "Failed freeing HW node, rc=%d\n",
+ rc);
+ }
+ }
+ /*
+ * node has either been detached or is in the process of being detached,
+ * call common node's initiate cleanup function
+ */
+ efc_node_initiate_cleanup(node);
+}
+
+static void
+__efc_fabric_common(const char *funcname, struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = NULL;
+
+ node = ctx->app;
+
+ switch (evt) {
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ break;
+ case EFC_EVT_SHUTDOWN:
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_fabric_initiate_shutdown(node);
+ break;
+
+ default:
+ /* call default event handler common to all nodes */
+ __efc_node_common(funcname, ctx, evt, arg);
+ }
+}
+
+void
+__efc_fabric_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg)
+{
+ struct efc_node *node = ctx->app;
+ struct efc *efc = node->efc;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_REENTER:
+ efc_log_debug(efc, ">>> reenter !!\n");
+ fallthrough;
+
+ case EFC_EVT_ENTER:
+ /* send FLOGI */
+ efc_send_flogi(node);
+ efc_node_transition(node, __efc_fabric_flogi_wait_rsp, NULL);
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+efc_fabric_set_topology(struct efc_node *node,
+ enum efc_nport_topology topology)
+{
+ node->nport->topology = topology;
+}
+
+void
+efc_fabric_notify_topology(struct efc_node *node)
+{
+ struct efc_node *tmp_node;
+ unsigned long index;
+
+ /*
+ * now loop through the nodes in the nport
+ * and send topology notification
+ */
+ xa_for_each(&node->nport->lookup, index, tmp_node) {
+ if (tmp_node != node) {
+ efc_node_post_event(tmp_node,
+ EFC_EVT_NPORT_TOPOLOGY_NOTIFY,
+ &node->nport->topology);
+ }
+ }
+}
+
+static bool efc_rnode_is_nport(struct fc_els_flogi *rsp)
+{
+ return !(ntohs(rsp->fl_csp.sp_features) & FC_SP_FT_FPORT);
+}
+
+void
+__efc_fabric_flogi_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_SRRS_ELS_REQ_OK: {
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_FLOGI,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+
+ memcpy(node->nport->domain->flogi_service_params,
+ cbdata->els_rsp.virt,
+ sizeof(struct fc_els_flogi));
+
+ /* Check to see if the fabric is an F_PORT or and N_PORT */
+ if (!efc_rnode_is_nport(cbdata->els_rsp.virt)) {
+ /* sm: if not nport / efc_domain_attach */
+ /* ext_status has the fc_id, attach domain */
+ efc_fabric_set_topology(node, EFC_NPORT_TOPO_FABRIC);
+ efc_fabric_notify_topology(node);
+ WARN_ON(node->nport->domain->attached);
+ efc_domain_attach(node->nport->domain,
+ cbdata->ext_status);
+ efc_node_transition(node,
+ __efc_fabric_wait_domain_attach,
+ NULL);
+ break;
+ }
+
+ /* sm: if nport and p2p_winner / efc_domain_attach */
+ efc_fabric_set_topology(node, EFC_NPORT_TOPO_P2P);
+ if (efc_p2p_setup(node->nport)) {
+ node_printf(node,
+ "p2p setup failed, shutting down node\n");
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_fabric_initiate_shutdown(node);
+ break;
+ }
+
+ if (node->nport->p2p_winner) {
+ efc_node_transition(node,
+ __efc_p2p_wait_domain_attach,
+ NULL);
+ if (node->nport->domain->attached &&
+ !node->nport->domain->domain_notify_pend) {
+ /*
+ * already attached,
+ * just send ATTACH_OK
+ */
+ node_printf(node,
+ "p2p winner, domain already attached\n");
+ efc_node_post_event(node,
+ EFC_EVT_DOMAIN_ATTACH_OK,
+ NULL);
+ }
+ } else {
+ /*
+ * peer is p2p winner;
+ * PLOGI will be received on the
+ * remote SID=1 node;
+ * this node has served its purpose
+ */
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_fabric_initiate_shutdown(node);
+ }
+
+ break;
+ }
+
+ case EFC_EVT_ELS_REQ_ABORTED:
+ case EFC_EVT_SRRS_ELS_REQ_RJT:
+ case EFC_EVT_SRRS_ELS_REQ_FAIL: {
+ struct efc_nport *nport = node->nport;
+ /*
+ * with these errors, we have no recovery,
+ * so shutdown the nport, leave the link
+ * up and the domain ready
+ */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_FLOGI,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ node_printf(node,
+ "FLOGI failed evt=%s, shutting down nport [%s]\n",
+ efc_sm_event_name(evt), nport->display_name);
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ efc_sm_post_event(&nport->sm, EFC_EVT_SHUTDOWN, NULL);
+ break;
+ }
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_vport_fabric_init(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ /* sm: / send FDISC */
+ efc_send_fdisc(node);
+ efc_node_transition(node, __efc_fabric_fdisc_wait_rsp, NULL);
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_fabric_fdisc_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_SRRS_ELS_REQ_OK: {
+ /* fc_id is in ext_status */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_FDISC,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / efc_nport_attach */
+ efc_nport_attach(node->nport, cbdata->ext_status);
+ efc_node_transition(node, __efc_fabric_wait_domain_attach,
+ NULL);
+ break;
+ }
+
+ case EFC_EVT_SRRS_ELS_REQ_RJT:
+ case EFC_EVT_SRRS_ELS_REQ_FAIL: {
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_FDISC,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ efc_log_err(node->efc, "FDISC failed, shutting down nport\n");
+ /* sm: / shutdown nport */
+ efc_sm_post_event(&node->nport->sm, EFC_EVT_SHUTDOWN, NULL);
+ break;
+ }
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+static int
+efc_start_ns_node(struct efc_nport *nport)
+{
+ struct efc_node *ns;
+
+ /* Instantiate a name services node */
+ ns = efc_node_find(nport, FC_FID_DIR_SERV);
+ if (!ns) {
+ ns = efc_node_alloc(nport, FC_FID_DIR_SERV, false, false);
+ if (!ns)
+ return -EIO;
+ }
+ /*
+ * for found ns, should we be transitioning from here?
+ * breaks transition only
+ * 1. from within state machine or
+ * 2. if after alloc
+ */
+ if (ns->efc->nodedb_mask & EFC_NODEDB_PAUSE_NAMESERVER)
+ efc_node_pause(ns, __efc_ns_init);
+ else
+ efc_node_transition(ns, __efc_ns_init, NULL);
+ return 0;
+}
+
+static int
+efc_start_fabctl_node(struct efc_nport *nport)
+{
+ struct efc_node *fabctl;
+
+ fabctl = efc_node_find(nport, FC_FID_FCTRL);
+ if (!fabctl) {
+ fabctl = efc_node_alloc(nport, FC_FID_FCTRL,
+ false, false);
+ if (!fabctl)
+ return -EIO;
+ }
+ /*
+ * for found ns, should we be transitioning from here?
+ * breaks transition only
+ * 1. from within state machine or
+ * 2. if after alloc
+ */
+ efc_node_transition(fabctl, __efc_fabctl_init, NULL);
+ return 0;
+}
+
+void
+__efc_fabric_wait_domain_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ case EFC_EVT_NPORT_ATTACH_OK: {
+ int rc;
+
+ rc = efc_start_ns_node(node->nport);
+ if (rc)
+ return;
+
+ /* sm: if enable_ini / start fabctl node */
+ /* Instantiate the fabric controller (sends SCR) */
+ if (node->nport->enable_rscn) {
+ rc = efc_start_fabctl_node(node->nport);
+ if (rc)
+ return;
+ }
+ efc_node_transition(node, __efc_fabric_idle, NULL);
+ break;
+ }
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_fabric_idle(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ break;
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_ns_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ /* sm: / send PLOGI */
+ efc_send_plogi(node);
+ efc_node_transition(node, __efc_ns_plogi_wait_rsp, NULL);
+ break;
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_ns_plogi_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_SRRS_ELS_REQ_OK: {
+ int rc;
+
+ /* Save service parameters */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / save sparams, efc_node_attach */
+ efc_node_save_sparms(node, cbdata->els_rsp.virt);
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_ns_wait_node_attach, NULL);
+ if (rc < 0)
+ efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
+ NULL);
+ break;
+ }
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_ns_wait_node_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_NODE_ATTACH_OK:
+ node->attached = true;
+ /* sm: / send RFTID */
+ efc_ns_send_rftid(node);
+ efc_node_transition(node, __efc_ns_rftid_wait_rsp, NULL);
+ break;
+
+ case EFC_EVT_NODE_ATTACH_FAIL:
+ /* node attach failed, shutdown the node */
+ node->attached = false;
+ node_printf(node, "Node attach failed\n");
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_fabric_initiate_shutdown(node);
+ break;
+
+ case EFC_EVT_SHUTDOWN:
+ node_printf(node, "Shutdown event received\n");
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_node_transition(node,
+ __efc_fabric_wait_attach_evt_shutdown,
+ NULL);
+ break;
+
+ /*
+ * if receive RSCN just ignore,
+ * we haven't sent GID_PT yet (ACC sent by fabctl node)
+ */
+ case EFC_EVT_RSCN_RCVD:
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_fabric_wait_attach_evt_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ /* wait for any of these attach events and then shutdown */
+ case EFC_EVT_NODE_ATTACH_OK:
+ node->attached = true;
+ node_printf(node, "Attach evt=%s, proceed to shutdown\n",
+ efc_sm_event_name(evt));
+ efc_fabric_initiate_shutdown(node);
+ break;
+
+ case EFC_EVT_NODE_ATTACH_FAIL:
+ node->attached = false;
+ node_printf(node, "Attach evt=%s, proceed to shutdown\n",
+ efc_sm_event_name(evt));
+ efc_fabric_initiate_shutdown(node);
+ break;
+
+ /* ignore shutdown event as we're already in shutdown path */
+ case EFC_EVT_SHUTDOWN:
+ node_printf(node, "Shutdown event received\n");
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_ns_rftid_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_SRRS_ELS_REQ_OK:
+ if (efc_node_check_ns_req(ctx, evt, arg, FC_NS_RFT_ID,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / send RFFID */
+ efc_ns_send_rffid(node);
+ efc_node_transition(node, __efc_ns_rffid_wait_rsp, NULL);
+ break;
+
+ /*
+ * if receive RSCN just ignore,
+ * we haven't sent GID_PT yet (ACC sent by fabctl node)
+ */
+ case EFC_EVT_RSCN_RCVD:
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_ns_rffid_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ /*
+ * Waits for an RFFID response event;
+ * if rscn enabled, a GIDPT name services request is issued.
+ */
+ switch (evt) {
+ case EFC_EVT_SRRS_ELS_REQ_OK: {
+ if (efc_node_check_ns_req(ctx, evt, arg, FC_NS_RFF_ID,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ if (node->nport->enable_rscn) {
+ /* sm: if enable_rscn / send GIDPT */
+ efc_ns_send_gidpt(node);
+
+ efc_node_transition(node, __efc_ns_gidpt_wait_rsp,
+ NULL);
+ } else {
+ /* if 'T' only, we're done, go to idle */
+ efc_node_transition(node, __efc_ns_idle, NULL);
+ }
+ break;
+ }
+ /*
+ * if receive RSCN just ignore,
+ * we haven't sent GID_PT yet (ACC sent by fabctl node)
+ */
+ case EFC_EVT_RSCN_RCVD:
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+static int
+efc_process_gidpt_payload(struct efc_node *node,
+ void *data, u32 gidpt_len)
+{
+ u32 i, j;
+ struct efc_node *newnode;
+ struct efc_nport *nport = node->nport;
+ struct efc *efc = node->efc;
+ u32 port_id = 0, port_count, plist_count;
+ struct efc_node *n;
+ struct efc_node **active_nodes;
+ int residual;
+ struct {
+ struct fc_ct_hdr hdr;
+ struct fc_gid_pn_resp pn_rsp;
+ } *rsp;
+ struct fc_gid_pn_resp *gidpt;
+ unsigned long index;
+
+ rsp = data;
+ gidpt = &rsp->pn_rsp;
+ residual = be16_to_cpu(rsp->hdr.ct_mr_size);
+
+ if (residual != 0)
+ efc_log_debug(node->efc, "residual is %u words\n", residual);
+
+ if (be16_to_cpu(rsp->hdr.ct_cmd) == FC_FS_RJT) {
+ node_printf(node,
+ "GIDPT request failed: rsn x%x rsn_expl x%x\n",
+ rsp->hdr.ct_reason, rsp->hdr.ct_explan);
+ return -EIO;
+ }
+
+ plist_count = (gidpt_len - sizeof(struct fc_ct_hdr)) / sizeof(*gidpt);
+
+ /* Count the number of nodes */
+ port_count = 0;
+ xa_for_each(&nport->lookup, index, n) {
+ port_count++;
+ }
+
+ /* Allocate a buffer for all nodes */
+ active_nodes = kcalloc(port_count, sizeof(*active_nodes), GFP_ATOMIC);
+ if (!active_nodes) {
+ node_printf(node, "efc_malloc failed\n");
+ return -EIO;
+ }
+
+ /* Fill buffer with fc_id of active nodes */
+ i = 0;
+ xa_for_each(&nport->lookup, index, n) {
+ port_id = n->rnode.fc_id;
+ switch (port_id) {
+ case FC_FID_FLOGI:
+ case FC_FID_FCTRL:
+ case FC_FID_DIR_SERV:
+ break;
+ default:
+ if (port_id != FC_FID_DOM_MGR)
+ active_nodes[i++] = n;
+ break;
+ }
+ }
+
+ /* update the active nodes buffer */
+ for (i = 0; i < plist_count; i++) {
+ hton24(gidpt[i].fp_fid, port_id);
+
+ for (j = 0; j < port_count; j++) {
+ if (active_nodes[j] &&
+ port_id == active_nodes[j]->rnode.fc_id) {
+ active_nodes[j] = NULL;
+ }
+ }
+
+ if (gidpt[i].fp_resvd & FC_NS_FID_LAST)
+ break;
+ }
+
+ /* Those remaining in the active_nodes[] are now gone ! */
+ for (i = 0; i < port_count; i++) {
+ /*
+ * if we're an initiator and the remote node
+ * is a target, then post the node missing event.
+ * if we're target and we have enabled
+ * target RSCN, then post the node missing event.
+ */
+ if (!active_nodes[i])
+ continue;
+
+ if ((node->nport->enable_ini && active_nodes[i]->targ) ||
+ (node->nport->enable_tgt && enable_target_rscn(efc))) {
+ efc_node_post_event(active_nodes[i],
+ EFC_EVT_NODE_MISSING, NULL);
+ } else {
+ node_printf(node,
+ "GID_PT: skipping non-tgt port_id x%06x\n",
+ active_nodes[i]->rnode.fc_id);
+ }
+ }
+ kfree(active_nodes);
+
+ for (i = 0; i < plist_count; i++) {
+ hton24(gidpt[i].fp_fid, port_id);
+
+ /* Don't create node for ourselves */
+ if (port_id == node->rnode.nport->fc_id) {
+ if (gidpt[i].fp_resvd & FC_NS_FID_LAST)
+ break;
+ continue;
+ }
+
+ newnode = efc_node_find(nport, port_id);
+ if (!newnode) {
+ if (!node->nport->enable_ini)
+ continue;
+
+ newnode = efc_node_alloc(nport, port_id, false, false);
+ if (!newnode) {
+ efc_log_err(efc, "efc_node_alloc() failed\n");
+ return -EIO;
+ }
+ /*
+ * send PLOGI automatically
+ * if initiator
+ */
+ efc_node_init_device(newnode, true);
+ }
+
+ if (node->nport->enable_ini && newnode->targ) {
+ efc_node_post_event(newnode, EFC_EVT_NODE_REFOUND,
+ NULL);
+ }
+
+ if (gidpt[i].fp_resvd & FC_NS_FID_LAST)
+ break;
+ }
+ return 0;
+}
+
+void
+__efc_ns_gidpt_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+ /*
+ * Wait for a GIDPT response from the name server. Process the FC_IDs
+ * that are reported by creating new remote ports, as needed.
+ */
+
+ switch (evt) {
+ case EFC_EVT_SRRS_ELS_REQ_OK: {
+ if (efc_node_check_ns_req(ctx, evt, arg, FC_NS_GID_PT,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / process GIDPT payload */
+ efc_process_gidpt_payload(node, cbdata->els_rsp.virt,
+ cbdata->els_rsp.len);
+ efc_node_transition(node, __efc_ns_idle, NULL);
+ break;
+ }
+
+ case EFC_EVT_SRRS_ELS_REQ_FAIL: {
+ /* not much we can do; will retry with the next RSCN */
+ node_printf(node, "GID_PT failed to complete\n");
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ efc_node_transition(node, __efc_ns_idle, NULL);
+ break;
+ }
+
+ /* if receive RSCN here, queue up another discovery processing */
+ case EFC_EVT_RSCN_RCVD: {
+ node_printf(node, "RSCN received during GID_PT processing\n");
+ node->rscn_pending = true;
+ break;
+ }
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_ns_idle(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+ struct efc *efc = node->efc;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ /*
+ * Wait for RSCN received events (posted from the fabric controller)
+ * and restart the GIDPT name services query and processing.
+ */
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ if (!node->rscn_pending)
+ break;
+
+ node_printf(node, "RSCN pending, restart discovery\n");
+ node->rscn_pending = false;
+ fallthrough;
+
+ case EFC_EVT_RSCN_RCVD: {
+ /* sm: / send GIDPT */
+ /*
+ * If target RSCN processing is enabled,
+ * and this is target only (not initiator),
+ * and tgt_rscn_delay is non-zero,
+ * then we delay issuing the GID_PT
+ */
+ if (efc->tgt_rscn_delay_msec != 0 &&
+ !node->nport->enable_ini && node->nport->enable_tgt &&
+ enable_target_rscn(efc)) {
+ efc_node_transition(node, __efc_ns_gidpt_delay, NULL);
+ } else {
+ efc_ns_send_gidpt(node);
+ efc_node_transition(node, __efc_ns_gidpt_wait_rsp,
+ NULL);
+ }
+ break;
+ }
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+static void
+gidpt_delay_timer_cb(struct timer_list *t)
+{
+ struct efc_node *node = from_timer(node, t, gidpt_delay_timer);
+
+ del_timer(&node->gidpt_delay_timer);
+
+ efc_node_post_event(node, EFC_EVT_GIDPT_DELAY_EXPIRED, NULL);
+}
+
+void
+__efc_ns_gidpt_delay(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+ struct efc *efc = node->efc;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER: {
+ u64 delay_msec, tmp;
+
+ /*
+ * Compute the delay time.
+ * Set to tgt_rscn_delay, if the time since last GIDPT
+ * is less than tgt_rscn_period, then use tgt_rscn_period.
+ */
+ delay_msec = efc->tgt_rscn_delay_msec;
+ tmp = jiffies_to_msecs(jiffies) - node->time_last_gidpt_msec;
+ if (tmp < efc->tgt_rscn_period_msec)
+ delay_msec = efc->tgt_rscn_period_msec;
+
+ timer_setup(&node->gidpt_delay_timer, &gidpt_delay_timer_cb,
+ 0);
+ mod_timer(&node->gidpt_delay_timer,
+ jiffies + msecs_to_jiffies(delay_msec));
+
+ break;
+ }
+
+ case EFC_EVT_GIDPT_DELAY_EXPIRED:
+ node->time_last_gidpt_msec = jiffies_to_msecs(jiffies);
+
+ efc_ns_send_gidpt(node);
+ efc_node_transition(node, __efc_ns_gidpt_wait_rsp, NULL);
+ break;
+
+ case EFC_EVT_RSCN_RCVD: {
+ efc_log_debug(efc,
+ "RSCN received while in GIDPT delay - no action\n");
+ break;
+ }
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_fabctl_init(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ /* no need to login to fabric controller, just send SCR */
+ efc_send_scr(node);
+ efc_node_transition(node, __efc_fabctl_wait_scr_rsp, NULL);
+ break;
+
+ case EFC_EVT_NODE_ATTACH_OK:
+ node->attached = true;
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_fabctl_wait_scr_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ /*
+ * Fabric controller node state machine:
+ * Wait for an SCR response from the fabric controller.
+ */
+ switch (evt) {
+ case EFC_EVT_SRRS_ELS_REQ_OK:
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_SCR,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ efc_node_transition(node, __efc_fabctl_ready, NULL);
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+static void
+efc_process_rscn(struct efc_node *node, struct efc_node_cb *cbdata)
+{
+ struct efc *efc = node->efc;
+ struct efc_nport *nport = node->nport;
+ struct efc_node *ns;
+
+ /* Forward this event to the name-services node */
+ ns = efc_node_find(nport, FC_FID_DIR_SERV);
+ if (ns)
+ efc_node_post_event(ns, EFC_EVT_RSCN_RCVD, cbdata);
+ else
+ efc_log_warn(efc, "can't find name server node\n");
+}
+
+void
+__efc_fabctl_ready(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ /*
+ * Fabric controller node state machine: Ready.
+ * In this state, the fabric controller sends a RSCN, which is received
+ * by this node and is forwarded to the name services node object; and
+ * the RSCN LS_ACC is sent.
+ */
+ switch (evt) {
+ case EFC_EVT_RSCN_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ /*
+ * sm: / process RSCN (forward to name services node),
+ * send LS_ACC
+ */
+ efc_process_rscn(node, cbdata);
+ efc_send_ls_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ efc_node_transition(node, __efc_fabctl_wait_ls_acc_cmpl,
+ NULL);
+ break;
+ }
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_fabctl_wait_ls_acc_cmpl(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_CMPL_OK:
+ WARN_ON(!node->els_cmpl_cnt);
+ node->els_cmpl_cnt--;
+ efc_node_transition(node, __efc_fabctl_ready, NULL);
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+static uint64_t
+efc_get_wwpn(struct fc_els_flogi *sp)
+{
+ return be64_to_cpu(sp->fl_wwnn);
+}
+
+static int
+efc_rnode_is_winner(struct efc_nport *nport)
+{
+ struct fc_els_flogi *remote_sp;
+ u64 remote_wwpn;
+ u64 local_wwpn = nport->wwpn;
+ u64 wwn_bump = 0;
+
+ remote_sp = (struct fc_els_flogi *)nport->domain->flogi_service_params;
+ remote_wwpn = efc_get_wwpn(remote_sp);
+
+ local_wwpn ^= wwn_bump;
+
+ efc_log_debug(nport->efc, "r: %llx\n",
+ be64_to_cpu(remote_sp->fl_wwpn));
+ efc_log_debug(nport->efc, "l: %llx\n", local_wwpn);
+
+ if (remote_wwpn == local_wwpn) {
+ efc_log_warn(nport->efc,
+ "WWPN of remote node [%08x %08x] matches local WWPN\n",
+ (u32)(local_wwpn >> 32ll),
+ (u32)local_wwpn);
+ return -1;
+ }
+
+ return (remote_wwpn > local_wwpn);
+}
+
+void
+__efc_p2p_wait_domain_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+ struct efc *efc = node->efc;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_DOMAIN_ATTACH_OK: {
+ struct efc_nport *nport = node->nport;
+ struct efc_node *rnode;
+
+ /*
+ * this transient node (SID=0 (recv'd FLOGI)
+ * or DID=fabric (sent FLOGI))
+ * is the p2p winner, will use a separate node
+ * to send PLOGI to peer
+ */
+ WARN_ON(!node->nport->p2p_winner);
+
+ rnode = efc_node_find(nport, node->nport->p2p_remote_port_id);
+ if (rnode) {
+ /*
+ * the "other" transient p2p node has
+ * already kicked off the
+ * new node from which PLOGI is sent
+ */
+ node_printf(node,
+ "Node with fc_id x%x already exists\n",
+ rnode->rnode.fc_id);
+ } else {
+ /*
+ * create new node (SID=1, DID=2)
+ * from which to send PLOGI
+ */
+ rnode = efc_node_alloc(nport,
+ nport->p2p_remote_port_id,
+ false, false);
+ if (!rnode) {
+ efc_log_err(efc, "node alloc failed\n");
+ return;
+ }
+
+ efc_fabric_notify_topology(node);
+ /* sm: / allocate p2p remote node */
+ efc_node_transition(rnode, __efc_p2p_rnode_init,
+ NULL);
+ }
+
+ /*
+ * the transient node (SID=0 or DID=fabric)
+ * has served its purpose
+ */
+ if (node->rnode.fc_id == 0) {
+ /*
+ * if this is the SID=0 node,
+ * move to the init state in case peer
+ * has restarted FLOGI discovery and FLOGI is pending
+ */
+ /* don't send PLOGI on efc_d_init entry */
+ efc_node_init_device(node, false);
+ } else {
+ /*
+ * if this is the DID=fabric node
+ * (we initiated FLOGI), shut it down
+ */
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_fabric_initiate_shutdown(node);
+ }
+ break;
+ }
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_p2p_rnode_init(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ /* sm: / send PLOGI */
+ efc_send_plogi(node);
+ efc_node_transition(node, __efc_p2p_wait_plogi_rsp, NULL);
+ break;
+
+ case EFC_EVT_ABTS_RCVD:
+ /* sm: send BA_ACC */
+ efc_send_bls_acc(node, cbdata->header->dma.virt);
+
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_p2p_wait_flogi_acc_cmpl(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_CMPL_OK:
+ WARN_ON(!node->els_cmpl_cnt);
+ node->els_cmpl_cnt--;
+
+ /* sm: if p2p_winner / domain_attach */
+ if (node->nport->p2p_winner) {
+ efc_node_transition(node,
+ __efc_p2p_wait_domain_attach,
+ NULL);
+ if (!node->nport->domain->attached) {
+ node_printf(node, "Domain not attached\n");
+ efc_domain_attach(node->nport->domain,
+ node->nport->p2p_port_id);
+ } else {
+ node_printf(node, "Domain already attached\n");
+ efc_node_post_event(node,
+ EFC_EVT_DOMAIN_ATTACH_OK,
+ NULL);
+ }
+ } else {
+ /* this node has served its purpose;
+ * we'll expect a PLOGI on a separate
+ * node (remote SID=0x1); return this node
+ * to init state in case peer
+ * restarts discovery -- it may already
+ * have (pending frames may exist).
+ */
+ /* don't send PLOGI on efc_d_init entry */
+ efc_node_init_device(node, false);
+ }
+ break;
+
+ case EFC_EVT_SRRS_ELS_CMPL_FAIL:
+ /*
+ * LS_ACC failed, possibly due to link down;
+ * shutdown node and wait
+ * for FLOGI discovery to restart
+ */
+ node_printf(node, "FLOGI LS_ACC failed, shutting down\n");
+ WARN_ON(!node->els_cmpl_cnt);
+ node->els_cmpl_cnt--;
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_fabric_initiate_shutdown(node);
+ break;
+
+ case EFC_EVT_ABTS_RCVD: {
+ /* sm: / send BA_ACC */
+ efc_send_bls_acc(node, cbdata->header->dma.virt);
+ break;
+ }
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_p2p_wait_plogi_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_SRRS_ELS_REQ_OK: {
+ int rc;
+
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / save sparams, efc_node_attach */
+ efc_node_save_sparms(node, cbdata->els_rsp.virt);
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_p2p_wait_node_attach, NULL);
+ if (rc < 0)
+ efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
+ NULL);
+ break;
+ }
+ case EFC_EVT_SRRS_ELS_REQ_FAIL: {
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ node_printf(node, "PLOGI failed, shutting down\n");
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_fabric_initiate_shutdown(node);
+ break;
+ }
+
+ case EFC_EVT_PLOGI_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+ /* if we're in external loopback mode, just send LS_ACC */
+ if (node->efc->external_loopback) {
+ efc_send_plogi_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ } else {
+ /*
+ * if this isn't external loopback,
+ * pass to default handler
+ */
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+ break;
+ }
+ case EFC_EVT_PRLI_RCVD:
+ /* I, or I+T */
+ /* sent PLOGI and before completion was seen, received the
+ * PRLI from the remote node (WCQEs and RCQEs come in on
+ * different queues and order of processing cannot be assumed)
+ * Save OXID so PRLI can be sent after the attach and continue
+ * to wait for PLOGI response
+ */
+ efc_process_prli_payload(node, cbdata->payload->dma.virt);
+ efc_send_ls_acc_after_attach(node,
+ cbdata->header->dma.virt,
+ EFC_NODE_SEND_LS_ACC_PRLI);
+ efc_node_transition(node, __efc_p2p_wait_plogi_rsp_recvd_prli,
+ NULL);
+ break;
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_p2p_wait_plogi_rsp_recvd_prli(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ /*
+ * Since we've received a PRLI, we have a port login and will
+ * just need to wait for the PLOGI response to do the node
+ * attach and then we can send the LS_ACC for the PRLI. If,
+ * during this time, we receive FCP_CMNDs (which is possible
+ * since we've already sent a PRLI and our peer may have
+ * accepted).
+ * At this time, we are not waiting on any other unsolicited
+ * frames to continue with the login process. Thus, it will not
+ * hurt to hold frames here.
+ */
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_OK: { /* PLOGI response received */
+ int rc;
+
+ /* Completion from PLOGI sent */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / save sparams, efc_node_attach */
+ efc_node_save_sparms(node, cbdata->els_rsp.virt);
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_p2p_wait_node_attach, NULL);
+ if (rc < 0)
+ efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
+ NULL);
+ break;
+ }
+ case EFC_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */
+ case EFC_EVT_SRRS_ELS_REQ_RJT:
+ /* PLOGI failed, shutdown the node */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_fabric_initiate_shutdown(node);
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_p2p_wait_node_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_NODE_ATTACH_OK:
+ node->attached = true;
+ switch (node->send_ls_acc) {
+ case EFC_NODE_SEND_LS_ACC_PRLI: {
+ efc_d_send_prli_rsp(node->ls_acc_io,
+ node->ls_acc_oxid);
+ node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
+ node->ls_acc_io = NULL;
+ break;
+ }
+ case EFC_NODE_SEND_LS_ACC_PLOGI: /* Can't happen in P2P */
+ case EFC_NODE_SEND_LS_ACC_NONE:
+ default:
+ /* Normal case for I */
+ /* sm: send_plogi_acc is not set / send PLOGI acc */
+ efc_node_transition(node, __efc_d_port_logged_in,
+ NULL);
+ break;
+ }
+ break;
+
+ case EFC_EVT_NODE_ATTACH_FAIL:
+ /* node attach failed, shutdown the node */
+ node->attached = false;
+ node_printf(node, "Node attach failed\n");
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_fabric_initiate_shutdown(node);
+ break;
+
+ case EFC_EVT_SHUTDOWN:
+ node_printf(node, "%s received\n", efc_sm_event_name(evt));
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_node_transition(node,
+ __efc_fabric_wait_attach_evt_shutdown,
+ NULL);
+ break;
+ case EFC_EVT_PRLI_RCVD:
+ node_printf(node, "%s: PRLI received before node is attached\n",
+ efc_sm_event_name(evt));
+ efc_process_prli_payload(node, cbdata->payload->dma.virt);
+ efc_send_ls_acc_after_attach(node,
+ cbdata->header->dma.virt,
+ EFC_NODE_SEND_LS_ACC_PRLI);
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+int
+efc_p2p_setup(struct efc_nport *nport)
+{
+ struct efc *efc = nport->efc;
+ int rnode_winner;
+
+ rnode_winner = efc_rnode_is_winner(nport);
+
+ /* set nport flags to indicate p2p "winner" */
+ if (rnode_winner == 1) {
+ nport->p2p_remote_port_id = 0;
+ nport->p2p_port_id = 0;
+ nport->p2p_winner = false;
+ } else if (rnode_winner == 0) {
+ nport->p2p_remote_port_id = 2;
+ nport->p2p_port_id = 1;
+ nport->p2p_winner = true;
+ } else {
+ /* no winner; only okay if external loopback enabled */
+ if (nport->efc->external_loopback) {
+ /*
+ * External loopback mode enabled;
+ * local nport and remote node
+ * will be registered with an NPortID = 1;
+ */
+ efc_log_debug(efc,
+ "External loopback mode enabled\n");
+ nport->p2p_remote_port_id = 1;
+ nport->p2p_port_id = 1;
+ nport->p2p_winner = true;
+ } else {
+ efc_log_warn(efc,
+ "failed to determine p2p winner\n");
+ return rnode_winner;
+ }
+ }
+ return 0;
+}
diff --git a/drivers/scsi/elx/libefc/efc_fabric.h b/drivers/scsi/elx/libefc/efc_fabric.h
new file mode 100644
index 000000000..b0947ae6f
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_fabric.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * Declarations for the interface exported by efc_fabric
+ */
+
+#ifndef __EFCT_FABRIC_H__
+#define __EFCT_FABRIC_H__
+#include "scsi/fc/fc_els.h"
+#include "scsi/fc/fc_fs.h"
+#include "scsi/fc/fc_ns.h"
+
+void
+__efc_fabric_init(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabric_flogi_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabric_domain_attach_wait(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabric_wait_domain_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+
+void
+__efc_vport_fabric_init(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabric_fdisc_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabric_wait_nport_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+
+void
+__efc_ns_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg);
+void
+__efc_ns_plogi_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_ns_rftid_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_ns_rffid_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_ns_wait_node_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabric_wait_attach_evt_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_ns_logo_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event, void *arg);
+void
+__efc_ns_gidpt_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_ns_idle(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg);
+void
+__efc_ns_gidpt_delay(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabctl_init(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabctl_wait_node_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabctl_wait_scr_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabctl_ready(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabctl_wait_ls_acc_cmpl(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabric_idle(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+
+void
+__efc_p2p_rnode_init(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_p2p_domain_attach_wait(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_p2p_wait_flogi_acc_cmpl(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_p2p_wait_plogi_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_p2p_wait_plogi_rsp_recvd_prli(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_p2p_wait_domain_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_p2p_wait_node_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+
+int
+efc_p2p_setup(struct efc_nport *nport);
+void
+efc_fabric_set_topology(struct efc_node *node,
+ enum efc_nport_topology topology);
+void efc_fabric_notify_topology(struct efc_node *node);
+
+#endif /* __EFCT_FABRIC_H__ */
diff --git a/drivers/scsi/elx/libefc/efc_node.c b/drivers/scsi/elx/libefc/efc_node.c
new file mode 100644
index 000000000..a1b4ce6a2
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_node.c
@@ -0,0 +1,1102 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include "efc.h"
+
+int
+efc_remote_node_cb(void *arg, int event, void *data)
+{
+ struct efc *efc = arg;
+ struct efc_remote_node *rnode = data;
+ struct efc_node *node = rnode->node;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->lock, flags);
+ efc_node_post_event(node, event, NULL);
+ spin_unlock_irqrestore(&efc->lock, flags);
+
+ return 0;
+}
+
+struct efc_node *
+efc_node_find(struct efc_nport *nport, u32 port_id)
+{
+ /* Find an FC node structure given the FC port ID */
+ return xa_load(&nport->lookup, port_id);
+}
+
+static void
+_efc_node_free(struct kref *arg)
+{
+ struct efc_node *node = container_of(arg, struct efc_node, ref);
+ struct efc *efc = node->efc;
+ struct efc_dma *dma;
+
+ dma = &node->sparm_dma_buf;
+ dma_pool_free(efc->node_dma_pool, dma->virt, dma->phys);
+ memset(dma, 0, sizeof(struct efc_dma));
+ mempool_free(node, efc->node_pool);
+}
+
+struct efc_node *efc_node_alloc(struct efc_nport *nport,
+ u32 port_id, bool init, bool targ)
+{
+ int rc;
+ struct efc_node *node = NULL;
+ struct efc *efc = nport->efc;
+ struct efc_dma *dma;
+
+ if (nport->shutting_down) {
+ efc_log_debug(efc, "node allocation when shutting down %06x",
+ port_id);
+ return NULL;
+ }
+
+ node = mempool_alloc(efc->node_pool, GFP_ATOMIC);
+ if (!node) {
+ efc_log_err(efc, "node allocation failed %06x", port_id);
+ return NULL;
+ }
+ memset(node, 0, sizeof(*node));
+
+ dma = &node->sparm_dma_buf;
+ dma->size = NODE_SPARAMS_SIZE;
+ dma->virt = dma_pool_zalloc(efc->node_dma_pool, GFP_ATOMIC, &dma->phys);
+ if (!dma->virt) {
+ efc_log_err(efc, "node dma alloc failed\n");
+ goto dma_fail;
+ }
+ node->rnode.indicator = U32_MAX;
+ node->nport = nport;
+
+ node->efc = efc;
+ node->init = init;
+ node->targ = targ;
+
+ spin_lock_init(&node->pend_frames_lock);
+ INIT_LIST_HEAD(&node->pend_frames);
+ spin_lock_init(&node->els_ios_lock);
+ INIT_LIST_HEAD(&node->els_ios_list);
+ node->els_io_enabled = true;
+
+ rc = efc_cmd_node_alloc(efc, &node->rnode, port_id, nport);
+ if (rc) {
+ efc_log_err(efc, "efc_hw_node_alloc failed: %d\n", rc);
+ goto hw_alloc_fail;
+ }
+
+ node->rnode.node = node;
+ node->sm.app = node;
+ node->evtdepth = 0;
+
+ efc_node_update_display_name(node);
+
+ rc = xa_err(xa_store(&nport->lookup, port_id, node, GFP_ATOMIC));
+ if (rc) {
+ efc_log_err(efc, "Node lookup store failed: %d\n", rc);
+ goto xa_fail;
+ }
+
+ /* initialize refcount */
+ kref_init(&node->ref);
+ node->release = _efc_node_free;
+ kref_get(&nport->ref);
+
+ return node;
+
+xa_fail:
+ efc_node_free_resources(efc, &node->rnode);
+hw_alloc_fail:
+ dma_pool_free(efc->node_dma_pool, dma->virt, dma->phys);
+dma_fail:
+ mempool_free(node, efc->node_pool);
+ return NULL;
+}
+
+void
+efc_node_free(struct efc_node *node)
+{
+ struct efc_nport *nport;
+ struct efc *efc;
+ int rc = 0;
+ struct efc_node *ns = NULL;
+
+ nport = node->nport;
+ efc = node->efc;
+
+ node_printf(node, "Free'd\n");
+
+ if (node->refound) {
+ /*
+ * Save the name server node. We will send fake RSCN event at
+ * the end to handle ignored RSCN event during node deletion
+ */
+ ns = efc_node_find(node->nport, FC_FID_DIR_SERV);
+ }
+
+ if (!node->nport) {
+ efc_log_err(efc, "Node already Freed\n");
+ return;
+ }
+
+ /* Free HW resources */
+ rc = efc_node_free_resources(efc, &node->rnode);
+ if (rc < 0)
+ efc_log_err(efc, "efc_hw_node_free failed: %d\n", rc);
+
+ /* if the gidpt_delay_timer is still running, then delete it */
+ if (timer_pending(&node->gidpt_delay_timer))
+ del_timer(&node->gidpt_delay_timer);
+
+ xa_erase(&nport->lookup, node->rnode.fc_id);
+
+ /*
+ * If the node_list is empty,
+ * then post a ALL_CHILD_NODES_FREE event to the nport,
+ * after the lock is released.
+ * The nport may be free'd as a result of the event.
+ */
+ if (xa_empty(&nport->lookup))
+ efc_sm_post_event(&nport->sm, EFC_EVT_ALL_CHILD_NODES_FREE,
+ NULL);
+
+ node->nport = NULL;
+ node->sm.current_state = NULL;
+
+ kref_put(&nport->ref, nport->release);
+ kref_put(&node->ref, node->release);
+
+ if (ns) {
+ /* sending fake RSCN event to name server node */
+ efc_node_post_event(ns, EFC_EVT_RSCN_RCVD, NULL);
+ }
+}
+
+static void
+efc_dma_copy_in(struct efc_dma *dma, void *buffer, u32 buffer_length)
+{
+ if (!dma || !buffer || !buffer_length)
+ return;
+
+ if (buffer_length > dma->size)
+ buffer_length = dma->size;
+
+ memcpy(dma->virt, buffer, buffer_length);
+ dma->len = buffer_length;
+}
+
+int
+efc_node_attach(struct efc_node *node)
+{
+ int rc = 0;
+ struct efc_nport *nport = node->nport;
+ struct efc_domain *domain = nport->domain;
+ struct efc *efc = node->efc;
+
+ if (!domain->attached) {
+ efc_log_err(efc, "Warning: unattached domain\n");
+ return -EIO;
+ }
+ /* Update node->wwpn/wwnn */
+
+ efc_node_build_eui_name(node->wwpn, sizeof(node->wwpn),
+ efc_node_get_wwpn(node));
+ efc_node_build_eui_name(node->wwnn, sizeof(node->wwnn),
+ efc_node_get_wwnn(node));
+
+ efc_dma_copy_in(&node->sparm_dma_buf, node->service_params + 4,
+ sizeof(node->service_params) - 4);
+
+ /* take lock to protect node->rnode.attached */
+ rc = efc_cmd_node_attach(efc, &node->rnode, &node->sparm_dma_buf);
+ if (rc < 0)
+ efc_log_debug(efc, "efc_hw_node_attach failed: %d\n", rc);
+
+ return rc;
+}
+
+void
+efc_node_fcid_display(u32 fc_id, char *buffer, u32 buffer_length)
+{
+ switch (fc_id) {
+ case FC_FID_FLOGI:
+ snprintf(buffer, buffer_length, "fabric");
+ break;
+ case FC_FID_FCTRL:
+ snprintf(buffer, buffer_length, "fabctl");
+ break;
+ case FC_FID_DIR_SERV:
+ snprintf(buffer, buffer_length, "nserve");
+ break;
+ default:
+ if (fc_id == FC_FID_DOM_MGR) {
+ snprintf(buffer, buffer_length, "dctl%02x",
+ (fc_id & 0x0000ff));
+ } else {
+ snprintf(buffer, buffer_length, "%06x", fc_id);
+ }
+ break;
+ }
+}
+
+void
+efc_node_update_display_name(struct efc_node *node)
+{
+ u32 port_id = node->rnode.fc_id;
+ struct efc_nport *nport = node->nport;
+ char portid_display[16];
+
+ efc_node_fcid_display(port_id, portid_display, sizeof(portid_display));
+
+ snprintf(node->display_name, sizeof(node->display_name), "%s.%s",
+ nport->display_name, portid_display);
+}
+
+void
+efc_node_send_ls_io_cleanup(struct efc_node *node)
+{
+ if (node->send_ls_acc != EFC_NODE_SEND_LS_ACC_NONE) {
+ efc_log_debug(node->efc, "[%s] cleaning up LS_ACC oxid=0x%x\n",
+ node->display_name, node->ls_acc_oxid);
+
+ node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
+ node->ls_acc_io = NULL;
+ }
+}
+
+static void efc_node_handle_implicit_logo(struct efc_node *node)
+{
+ int rc;
+
+ /*
+ * currently, only case for implicit logo is PLOGI
+ * recvd. Thus, node's ELS IO pending list won't be
+ * empty (PLOGI will be on it)
+ */
+ WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_PLOGI);
+ node_printf(node, "Reason: implicit logout, re-authenticate\n");
+
+ /* Re-attach node with the same HW node resources */
+ node->req_free = false;
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_d_wait_node_attach, NULL);
+ node->els_io_enabled = true;
+
+ if (rc < 0)
+ efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL, NULL);
+}
+
+static void efc_node_handle_explicit_logo(struct efc_node *node)
+{
+ s8 pend_frames_empty;
+ unsigned long flags = 0;
+
+ /* cleanup any pending LS_ACC ELSs */
+ efc_node_send_ls_io_cleanup(node);
+
+ spin_lock_irqsave(&node->pend_frames_lock, flags);
+ pend_frames_empty = list_empty(&node->pend_frames);
+ spin_unlock_irqrestore(&node->pend_frames_lock, flags);
+
+ /*
+ * there are two scenarios where we want to keep
+ * this node alive:
+ * 1. there are pending frames that need to be
+ * processed or
+ * 2. we're an initiator and the remote node is
+ * a target and we need to re-authenticate
+ */
+ node_printf(node, "Shutdown: explicit logo pend=%d ", !pend_frames_empty);
+ node_printf(node, "nport.ini=%d node.tgt=%d\n",
+ node->nport->enable_ini, node->targ);
+ if (!pend_frames_empty || (node->nport->enable_ini && node->targ)) {
+ u8 send_plogi = false;
+
+ if (node->nport->enable_ini && node->targ) {
+ /*
+ * we're an initiator and
+ * node shutting down is a target;
+ * we'll need to re-authenticate in
+ * initial state
+ */
+ send_plogi = true;
+ }
+
+ /*
+ * transition to __efc_d_init
+ * (will retain HW node resources)
+ */
+ node->els_io_enabled = true;
+ node->req_free = false;
+
+ /*
+ * either pending frames exist or we are re-authenticating
+ * with PLOGI (or both); in either case, return to initial
+ * state
+ */
+ efc_node_init_device(node, send_plogi);
+ }
+ /* else: let node shutdown occur */
+}
+
+static void
+efc_node_purge_pending(struct efc_node *node)
+{
+ struct efc *efc = node->efc;
+ struct efc_hw_sequence *frame, *next;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&node->pend_frames_lock, flags);
+
+ list_for_each_entry_safe(frame, next, &node->pend_frames, list_entry) {
+ list_del(&frame->list_entry);
+ efc->tt.hw_seq_free(efc, frame);
+ }
+
+ spin_unlock_irqrestore(&node->pend_frames_lock, flags);
+}
+
+void
+__efc_node_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER: {
+ efc_node_hold_frames(node);
+ WARN_ON(!efc_els_io_list_empty(node, &node->els_ios_list));
+ /* by default, we will be freeing node after we unwind */
+ node->req_free = true;
+
+ switch (node->shutdown_reason) {
+ case EFC_NODE_SHUTDOWN_IMPLICIT_LOGO:
+ /* Node shutdown b/c of PLOGI received when node
+ * already logged in. We have PLOGI service
+ * parameters, so submit node attach; we won't be
+ * freeing this node
+ */
+
+ efc_node_handle_implicit_logo(node);
+ break;
+
+ case EFC_NODE_SHUTDOWN_EXPLICIT_LOGO:
+ efc_node_handle_explicit_logo(node);
+ break;
+
+ case EFC_NODE_SHUTDOWN_DEFAULT:
+ default: {
+ /*
+ * shutdown due to link down,
+ * node going away (xport event) or
+ * nport shutdown, purge pending and
+ * proceed to cleanup node
+ */
+
+ /* cleanup any pending LS_ACC ELSs */
+ efc_node_send_ls_io_cleanup(node);
+
+ node_printf(node,
+ "Shutdown reason: default, purge pending\n");
+ efc_node_purge_pending(node);
+ break;
+ }
+ }
+
+ break;
+ }
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ default:
+ __efc_node_common(__func__, ctx, evt, arg);
+ }
+}
+
+static bool
+efc_node_check_els_quiesced(struct efc_node *node)
+{
+ /* check to see if ELS requests, completions are quiesced */
+ if (node->els_req_cnt == 0 && node->els_cmpl_cnt == 0 &&
+ efc_els_io_list_empty(node, &node->els_ios_list)) {
+ if (!node->attached) {
+ /* hw node detach already completed, proceed */
+ node_printf(node, "HW node not attached\n");
+ efc_node_transition(node,
+ __efc_node_wait_ios_shutdown,
+ NULL);
+ } else {
+ /*
+ * hw node detach hasn't completed,
+ * transition and wait
+ */
+ node_printf(node, "HW node still attached\n");
+ efc_node_transition(node, __efc_node_wait_node_free,
+ NULL);
+ }
+ return true;
+ }
+ return false;
+}
+
+void
+efc_node_initiate_cleanup(struct efc_node *node)
+{
+ /*
+ * if ELS's have already been quiesced, will move to next state
+ * if ELS's have not been quiesced, abort them
+ */
+ if (!efc_node_check_els_quiesced(node)) {
+ efc_node_hold_frames(node);
+ efc_node_transition(node, __efc_node_wait_els_shutdown, NULL);
+ }
+}
+
+void
+__efc_node_wait_els_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ bool check_quiesce = false;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+ /* Node state machine: Wait for all ELSs to complete */
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ if (efc_els_io_list_empty(node, &node->els_ios_list)) {
+ node_printf(node, "All ELS IOs complete\n");
+ check_quiesce = true;
+ }
+ break;
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_OK:
+ case EFC_EVT_SRRS_ELS_REQ_FAIL:
+ case EFC_EVT_SRRS_ELS_REQ_RJT:
+ case EFC_EVT_ELS_REQ_ABORTED:
+ if (WARN_ON(!node->els_req_cnt))
+ break;
+ node->els_req_cnt--;
+ check_quiesce = true;
+ break;
+
+ case EFC_EVT_SRRS_ELS_CMPL_OK:
+ case EFC_EVT_SRRS_ELS_CMPL_FAIL:
+ if (WARN_ON(!node->els_cmpl_cnt))
+ break;
+ node->els_cmpl_cnt--;
+ check_quiesce = true;
+ break;
+
+ case EFC_EVT_ALL_CHILD_NODES_FREE:
+ /* all ELS IO's complete */
+ node_printf(node, "All ELS IOs complete\n");
+ WARN_ON(!efc_els_io_list_empty(node, &node->els_ios_list));
+ check_quiesce = true;
+ break;
+
+ case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
+ check_quiesce = true;
+ break;
+
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ /* don't care about domain_attach_ok */
+ break;
+
+ /* ignore shutdown events as we're already in shutdown path */
+ case EFC_EVT_SHUTDOWN:
+ /* have default shutdown event take precedence */
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ fallthrough;
+
+ case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ node_printf(node, "%s received\n", efc_sm_event_name(evt));
+ break;
+
+ default:
+ __efc_node_common(__func__, ctx, evt, arg);
+ }
+
+ if (check_quiesce)
+ efc_node_check_els_quiesced(node);
+}
+
+void
+__efc_node_wait_node_free(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_NODE_FREE_OK:
+ /* node is officially no longer attached */
+ node->attached = false;
+ efc_node_transition(node, __efc_node_wait_ios_shutdown, NULL);
+ break;
+
+ case EFC_EVT_ALL_CHILD_NODES_FREE:
+ case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
+ /* As IOs and ELS IO's complete we expect to get these events */
+ break;
+
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ /* don't care about domain_attach_ok */
+ break;
+
+ /* ignore shutdown events as we're already in shutdown path */
+ case EFC_EVT_SHUTDOWN:
+ /* have default shutdown event take precedence */
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ fallthrough;
+
+ case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ node_printf(node, "%s received\n", efc_sm_event_name(evt));
+ break;
+ default:
+ __efc_node_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_node_wait_ios_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+ struct efc *efc = node->efc;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+
+ /* first check to see if no ELS IOs are outstanding */
+ if (efc_els_io_list_empty(node, &node->els_ios_list))
+ /* If there are any active IOS, Free them. */
+ efc_node_transition(node, __efc_node_shutdown, NULL);
+ break;
+
+ case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
+ case EFC_EVT_ALL_CHILD_NODES_FREE:
+ if (efc_els_io_list_empty(node, &node->els_ios_list))
+ efc_node_transition(node, __efc_node_shutdown, NULL);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_FAIL:
+ /* Can happen as ELS IO IO's complete */
+ if (WARN_ON(!node->els_req_cnt))
+ break;
+ node->els_req_cnt--;
+ break;
+
+ /* ignore shutdown events as we're already in shutdown path */
+ case EFC_EVT_SHUTDOWN:
+ /* have default shutdown event take precedence */
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ fallthrough;
+
+ case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ efc_log_debug(efc, "[%s] %-20s\n", node->display_name,
+ efc_sm_event_name(evt));
+ break;
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ /* don't care about domain_attach_ok */
+ break;
+ default:
+ __efc_node_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_node_common(const char *funcname, struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = NULL;
+ struct efc *efc = NULL;
+ struct efc_node_cb *cbdata = arg;
+
+ node = ctx->app;
+ efc = node->efc;
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ case EFC_EVT_REENTER:
+ case EFC_EVT_EXIT:
+ case EFC_EVT_NPORT_TOPOLOGY_NOTIFY:
+ case EFC_EVT_NODE_MISSING:
+ case EFC_EVT_FCP_CMD_RCVD:
+ break;
+
+ case EFC_EVT_NODE_REFOUND:
+ node->refound = true;
+ break;
+
+ /*
+ * node->attached must be set appropriately
+ * for all node attach/detach events
+ */
+ case EFC_EVT_NODE_ATTACH_OK:
+ node->attached = true;
+ break;
+
+ case EFC_EVT_NODE_FREE_OK:
+ case EFC_EVT_NODE_ATTACH_FAIL:
+ node->attached = false;
+ break;
+
+ /*
+ * handle any ELS completions that
+ * other states either didn't care about
+ * or forgot about
+ */
+ case EFC_EVT_SRRS_ELS_CMPL_OK:
+ case EFC_EVT_SRRS_ELS_CMPL_FAIL:
+ if (WARN_ON(!node->els_cmpl_cnt))
+ break;
+ node->els_cmpl_cnt--;
+ break;
+
+ /*
+ * handle any ELS request completions that
+ * other states either didn't care about
+ * or forgot about
+ */
+ case EFC_EVT_SRRS_ELS_REQ_OK:
+ case EFC_EVT_SRRS_ELS_REQ_FAIL:
+ case EFC_EVT_SRRS_ELS_REQ_RJT:
+ case EFC_EVT_ELS_REQ_ABORTED:
+ if (WARN_ON(!node->els_req_cnt))
+ break;
+ node->els_req_cnt--;
+ break;
+
+ case EFC_EVT_ELS_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ /*
+ * Unsupported ELS was received,
+ * send LS_RJT, command not supported
+ */
+ efc_log_debug(efc,
+ "[%s] (%s) ELS x%02x, LS_RJT not supported\n",
+ node->display_name, funcname,
+ ((u8 *)cbdata->payload->dma.virt)[0]);
+
+ efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
+ ELS_RJT_UNSUP, ELS_EXPL_NONE, 0);
+ break;
+ }
+
+ case EFC_EVT_PLOGI_RCVD:
+ case EFC_EVT_FLOGI_RCVD:
+ case EFC_EVT_LOGO_RCVD:
+ case EFC_EVT_PRLI_RCVD:
+ case EFC_EVT_PRLO_RCVD:
+ case EFC_EVT_PDISC_RCVD:
+ case EFC_EVT_FDISC_RCVD:
+ case EFC_EVT_ADISC_RCVD:
+ case EFC_EVT_RSCN_RCVD:
+ case EFC_EVT_SCR_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ /* sm: / send ELS_RJT */
+ efc_log_debug(efc, "[%s] (%s) %s sending ELS_RJT\n",
+ node->display_name, funcname,
+ efc_sm_event_name(evt));
+ /* if we didn't catch this in a state, send generic LS_RJT */
+ efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
+ ELS_RJT_UNAB, ELS_EXPL_NONE, 0);
+ break;
+ }
+ case EFC_EVT_ABTS_RCVD: {
+ efc_log_debug(efc, "[%s] (%s) %s sending BA_ACC\n",
+ node->display_name, funcname,
+ efc_sm_event_name(evt));
+
+ /* sm: / send BA_ACC */
+ efc_send_bls_acc(node, cbdata->header->dma.virt);
+ break;
+ }
+
+ default:
+ efc_log_debug(node->efc, "[%s] %-20s %-20s not handled\n",
+ node->display_name, funcname,
+ efc_sm_event_name(evt));
+ }
+}
+
+void
+efc_node_save_sparms(struct efc_node *node, void *payload)
+{
+ memcpy(node->service_params, payload, sizeof(node->service_params));
+}
+
+void
+efc_node_post_event(struct efc_node *node,
+ enum efc_sm_event evt, void *arg)
+{
+ bool free_node = false;
+
+ node->evtdepth++;
+
+ efc_sm_post_event(&node->sm, evt, arg);
+
+ /* If our event call depth is one and
+ * we're not holding frames
+ * then we can dispatch any pending frames.
+ * We don't want to allow the efc_process_node_pending()
+ * call to recurse.
+ */
+ if (!node->hold_frames && node->evtdepth == 1)
+ efc_process_node_pending(node);
+
+ node->evtdepth--;
+
+ /*
+ * Free the node object if so requested,
+ * and we're at an event call depth of zero
+ */
+ if (node->evtdepth == 0 && node->req_free)
+ free_node = true;
+
+ if (free_node)
+ efc_node_free(node);
+}
+
+void
+efc_node_transition(struct efc_node *node,
+ void (*state)(struct efc_sm_ctx *,
+ enum efc_sm_event, void *), void *data)
+{
+ struct efc_sm_ctx *ctx = &node->sm;
+
+ if (ctx->current_state == state) {
+ efc_node_post_event(node, EFC_EVT_REENTER, data);
+ } else {
+ efc_node_post_event(node, EFC_EVT_EXIT, data);
+ ctx->current_state = state;
+ efc_node_post_event(node, EFC_EVT_ENTER, data);
+ }
+}
+
+void
+efc_node_build_eui_name(char *buf, u32 buf_len, uint64_t eui_name)
+{
+ memset(buf, 0, buf_len);
+
+ snprintf(buf, buf_len, "eui.%016llX", (unsigned long long)eui_name);
+}
+
+u64
+efc_node_get_wwpn(struct efc_node *node)
+{
+ struct fc_els_flogi *sp =
+ (struct fc_els_flogi *)node->service_params;
+
+ return be64_to_cpu(sp->fl_wwpn);
+}
+
+u64
+efc_node_get_wwnn(struct efc_node *node)
+{
+ struct fc_els_flogi *sp =
+ (struct fc_els_flogi *)node->service_params;
+
+ return be64_to_cpu(sp->fl_wwnn);
+}
+
+int
+efc_node_check_els_req(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg,
+ u8 cmd, void (*efc_node_common_func)(const char *,
+ struct efc_sm_ctx *, enum efc_sm_event, void *),
+ const char *funcname)
+{
+ return 0;
+}
+
+int
+efc_node_check_ns_req(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg,
+ u16 cmd, void (*efc_node_common_func)(const char *,
+ struct efc_sm_ctx *, enum efc_sm_event, void *),
+ const char *funcname)
+{
+ return 0;
+}
+
+int
+efc_els_io_list_empty(struct efc_node *node, struct list_head *list)
+{
+ int empty;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&node->els_ios_lock, flags);
+ empty = list_empty(list);
+ spin_unlock_irqrestore(&node->els_ios_lock, flags);
+ return empty;
+}
+
+void
+efc_node_pause(struct efc_node *node,
+ void (*state)(struct efc_sm_ctx *,
+ enum efc_sm_event, void *))
+
+{
+ node->nodedb_state = state;
+ efc_node_transition(node, __efc_node_paused, NULL);
+}
+
+void
+__efc_node_paused(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ /*
+ * This state is entered when a state is "paused". When resumed, the
+ * node is transitioned to a previously saved state (node->ndoedb_state)
+ */
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ node_printf(node, "Paused\n");
+ break;
+
+ case EFC_EVT_RESUME: {
+ void (*pf)(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+
+ pf = node->nodedb_state;
+
+ node->nodedb_state = NULL;
+ efc_node_transition(node, pf, NULL);
+ break;
+ }
+
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ break;
+
+ case EFC_EVT_SHUTDOWN:
+ node->req_free = true;
+ break;
+
+ default:
+ __efc_node_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+efc_node_recv_els_frame(struct efc_node *node,
+ struct efc_hw_sequence *seq)
+{
+ u32 prli_size = sizeof(struct fc_els_prli) + sizeof(struct fc_els_spp);
+ struct {
+ u32 cmd;
+ enum efc_sm_event evt;
+ u32 payload_size;
+ } els_cmd_list[] = {
+ {ELS_PLOGI, EFC_EVT_PLOGI_RCVD, sizeof(struct fc_els_flogi)},
+ {ELS_FLOGI, EFC_EVT_FLOGI_RCVD, sizeof(struct fc_els_flogi)},
+ {ELS_LOGO, EFC_EVT_LOGO_RCVD, sizeof(struct fc_els_ls_acc)},
+ {ELS_PRLI, EFC_EVT_PRLI_RCVD, prli_size},
+ {ELS_PRLO, EFC_EVT_PRLO_RCVD, prli_size},
+ {ELS_PDISC, EFC_EVT_PDISC_RCVD, MAX_ACC_REJECT_PAYLOAD},
+ {ELS_FDISC, EFC_EVT_FDISC_RCVD, MAX_ACC_REJECT_PAYLOAD},
+ {ELS_ADISC, EFC_EVT_ADISC_RCVD, sizeof(struct fc_els_adisc)},
+ {ELS_RSCN, EFC_EVT_RSCN_RCVD, MAX_ACC_REJECT_PAYLOAD},
+ {ELS_SCR, EFC_EVT_SCR_RCVD, MAX_ACC_REJECT_PAYLOAD},
+ };
+ struct efc_node_cb cbdata;
+ u8 *buf = seq->payload->dma.virt;
+ enum efc_sm_event evt = EFC_EVT_ELS_RCVD;
+ u32 i;
+
+ memset(&cbdata, 0, sizeof(cbdata));
+ cbdata.header = seq->header;
+ cbdata.payload = seq->payload;
+
+ /* find a matching event for the ELS command */
+ for (i = 0; i < ARRAY_SIZE(els_cmd_list); i++) {
+ if (els_cmd_list[i].cmd == buf[0]) {
+ evt = els_cmd_list[i].evt;
+ break;
+ }
+ }
+
+ efc_node_post_event(node, evt, &cbdata);
+}
+
+void
+efc_node_recv_ct_frame(struct efc_node *node,
+ struct efc_hw_sequence *seq)
+{
+ struct fc_ct_hdr *iu = seq->payload->dma.virt;
+ struct fc_frame_header *hdr = seq->header->dma.virt;
+ struct efc *efc = node->efc;
+ u16 gscmd = be16_to_cpu(iu->ct_cmd);
+
+ efc_log_err(efc, "[%s] Received cmd :%x sending CT_REJECT\n",
+ node->display_name, gscmd);
+ efc_send_ct_rsp(efc, node, be16_to_cpu(hdr->fh_ox_id), iu,
+ FC_FS_RJT, FC_FS_RJT_UNSUP, 0);
+}
+
+void
+efc_node_recv_fcp_cmd(struct efc_node *node, struct efc_hw_sequence *seq)
+{
+ struct efc_node_cb cbdata;
+
+ memset(&cbdata, 0, sizeof(cbdata));
+ cbdata.header = seq->header;
+ cbdata.payload = seq->payload;
+
+ efc_node_post_event(node, EFC_EVT_FCP_CMD_RCVD, &cbdata);
+}
+
+void
+efc_process_node_pending(struct efc_node *node)
+{
+ struct efc *efc = node->efc;
+ struct efc_hw_sequence *seq = NULL;
+ u32 pend_frames_processed = 0;
+ unsigned long flags = 0;
+
+ for (;;) {
+ /* need to check for hold frames condition after each frame
+ * processed because any given frame could cause a transition
+ * to a state that holds frames
+ */
+ if (node->hold_frames)
+ break;
+
+ seq = NULL;
+ /* Get next frame/sequence */
+ spin_lock_irqsave(&node->pend_frames_lock, flags);
+
+ if (!list_empty(&node->pend_frames)) {
+ seq = list_first_entry(&node->pend_frames,
+ struct efc_hw_sequence, list_entry);
+ list_del(&seq->list_entry);
+ }
+ spin_unlock_irqrestore(&node->pend_frames_lock, flags);
+
+ if (!seq) {
+ pend_frames_processed = node->pend_frames_processed;
+ node->pend_frames_processed = 0;
+ break;
+ }
+ node->pend_frames_processed++;
+
+ /* now dispatch frame(s) to dispatch function */
+ efc_node_dispatch_frame(node, seq);
+ efc->tt.hw_seq_free(efc, seq);
+ }
+
+ if (pend_frames_processed != 0)
+ efc_log_debug(efc, "%u node frames held and processed\n",
+ pend_frames_processed);
+}
+
+void
+efc_scsi_sess_reg_complete(struct efc_node *node, u32 status)
+{
+ unsigned long flags = 0;
+ enum efc_sm_event evt = EFC_EVT_NODE_SESS_REG_OK;
+ struct efc *efc = node->efc;
+
+ if (status)
+ evt = EFC_EVT_NODE_SESS_REG_FAIL;
+
+ spin_lock_irqsave(&efc->lock, flags);
+ /* Notify the node to resume */
+ efc_node_post_event(node, evt, NULL);
+ spin_unlock_irqrestore(&efc->lock, flags);
+}
+
+void
+efc_scsi_del_initiator_complete(struct efc *efc, struct efc_node *node)
+{
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->lock, flags);
+ /* Notify the node to resume */
+ efc_node_post_event(node, EFC_EVT_NODE_DEL_INI_COMPLETE, NULL);
+ spin_unlock_irqrestore(&efc->lock, flags);
+}
+
+void
+efc_scsi_del_target_complete(struct efc *efc, struct efc_node *node)
+{
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->lock, flags);
+ /* Notify the node to resume */
+ efc_node_post_event(node, EFC_EVT_NODE_DEL_TGT_COMPLETE, NULL);
+ spin_unlock_irqrestore(&efc->lock, flags);
+}
+
+void
+efc_scsi_io_list_empty(struct efc *efc, struct efc_node *node)
+{
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->lock, flags);
+ efc_node_post_event(node, EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY, NULL);
+ spin_unlock_irqrestore(&efc->lock, flags);
+}
+
+void efc_node_post_els_resp(struct efc_node *node, u32 evt, void *arg)
+{
+ struct efc *efc = node->efc;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->lock, flags);
+ efc_node_post_event(node, evt, arg);
+ spin_unlock_irqrestore(&efc->lock, flags);
+}
+
+void efc_node_post_shutdown(struct efc_node *node, void *arg)
+{
+ unsigned long flags = 0;
+ struct efc *efc = node->efc;
+
+ spin_lock_irqsave(&efc->lock, flags);
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN, arg);
+ spin_unlock_irqrestore(&efc->lock, flags);
+}
diff --git a/drivers/scsi/elx/libefc/efc_node.h b/drivers/scsi/elx/libefc/efc_node.h
new file mode 100644
index 000000000..e9c600ac4
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_node.h
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#if !defined(__EFC_NODE_H__)
+#define __EFC_NODE_H__
+#include "scsi/fc/fc_ns.h"
+
+#define EFC_NODEDB_PAUSE_FABRIC_LOGIN (1 << 0)
+#define EFC_NODEDB_PAUSE_NAMESERVER (1 << 1)
+#define EFC_NODEDB_PAUSE_NEW_NODES (1 << 2)
+
+#define MAX_ACC_REJECT_PAYLOAD sizeof(struct fc_els_ls_rjt)
+
+#define scsi_io_printf(io, fmt, ...) \
+ efc_log_debug(io->efc, "[%s] [%04x][i:%04x t:%04x h:%04x]" fmt, \
+ io->node->display_name, io->instance_index, io->init_task_tag, \
+ io->tgt_task_tag, io->hw_tag, ##__VA_ARGS__)
+
+static inline void
+efc_node_evt_set(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ const char *handler)
+{
+ struct efc_node *node = ctx->app;
+
+ if (evt == EFC_EVT_ENTER) {
+ strncpy(node->current_state_name, handler,
+ sizeof(node->current_state_name));
+ } else if (evt == EFC_EVT_EXIT) {
+ strncpy(node->prev_state_name, node->current_state_name,
+ sizeof(node->prev_state_name));
+ strncpy(node->current_state_name, "invalid",
+ sizeof(node->current_state_name));
+ }
+ node->prev_evt = node->current_evt;
+ node->current_evt = evt;
+}
+
+/**
+ * hold frames in pending frame list
+ *
+ * Unsolicited receive frames are held on the node pending frame list,
+ * rather than being processed.
+ */
+
+static inline void
+efc_node_hold_frames(struct efc_node *node)
+{
+ node->hold_frames = true;
+}
+
+/**
+ * accept frames
+ *
+ * Unsolicited receive frames processed rather than being held on the node
+ * pending frame list.
+ */
+
+static inline void
+efc_node_accept_frames(struct efc_node *node)
+{
+ node->hold_frames = false;
+}
+
+/*
+ * Node initiator/target enable defines
+ * All combinations of the SLI port (nport) initiator/target enable,
+ * and remote node initiator/target enable are enumerated.
+ * ex: EFC_NODE_ENABLE_T_TO_IT decodes to target mode is enabled on SLI port
+ * and I+T is enabled on remote node.
+ */
+enum efc_node_enable {
+ EFC_NODE_ENABLE_x_TO_x,
+ EFC_NODE_ENABLE_x_TO_T,
+ EFC_NODE_ENABLE_x_TO_I,
+ EFC_NODE_ENABLE_x_TO_IT,
+ EFC_NODE_ENABLE_T_TO_x,
+ EFC_NODE_ENABLE_T_TO_T,
+ EFC_NODE_ENABLE_T_TO_I,
+ EFC_NODE_ENABLE_T_TO_IT,
+ EFC_NODE_ENABLE_I_TO_x,
+ EFC_NODE_ENABLE_I_TO_T,
+ EFC_NODE_ENABLE_I_TO_I,
+ EFC_NODE_ENABLE_I_TO_IT,
+ EFC_NODE_ENABLE_IT_TO_x,
+ EFC_NODE_ENABLE_IT_TO_T,
+ EFC_NODE_ENABLE_IT_TO_I,
+ EFC_NODE_ENABLE_IT_TO_IT,
+};
+
+static inline enum efc_node_enable
+efc_node_get_enable(struct efc_node *node)
+{
+ u32 retval = 0;
+
+ if (node->nport->enable_ini)
+ retval |= (1U << 3);
+ if (node->nport->enable_tgt)
+ retval |= (1U << 2);
+ if (node->init)
+ retval |= (1U << 1);
+ if (node->targ)
+ retval |= (1U << 0);
+ return (enum efc_node_enable)retval;
+}
+
+int
+efc_node_check_els_req(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg,
+ u8 cmd, void (*efc_node_common_func)(const char *,
+ struct efc_sm_ctx *, enum efc_sm_event, void *),
+ const char *funcname);
+int
+efc_node_check_ns_req(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg,
+ u16 cmd, void (*efc_node_common_func)(const char *,
+ struct efc_sm_ctx *, enum efc_sm_event, void *),
+ const char *funcname);
+int
+efc_node_attach(struct efc_node *node);
+struct efc_node *
+efc_node_alloc(struct efc_nport *nport, u32 port_id,
+ bool init, bool targ);
+void
+efc_node_free(struct efc_node *efc);
+void
+efc_node_update_display_name(struct efc_node *node);
+void efc_node_post_event(struct efc_node *node, enum efc_sm_event evt,
+ void *arg);
+
+void
+__efc_node_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_node_wait_node_free(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_node_wait_els_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_node_wait_ios_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+efc_node_save_sparms(struct efc_node *node, void *payload);
+void
+efc_node_transition(struct efc_node *node,
+ void (*state)(struct efc_sm_ctx *, enum efc_sm_event,
+ void *), void *data);
+void
+__efc_node_common(const char *funcname, struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+
+void
+efc_node_initiate_cleanup(struct efc_node *node);
+
+void
+efc_node_build_eui_name(char *buf, u32 buf_len, uint64_t eui_name);
+
+void
+efc_node_pause(struct efc_node *node,
+ void (*state)(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg));
+void
+__efc_node_paused(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+int
+efc_node_active_ios_empty(struct efc_node *node);
+void
+efc_node_send_ls_io_cleanup(struct efc_node *node);
+
+int
+efc_els_io_list_empty(struct efc_node *node, struct list_head *list);
+
+void
+efc_process_node_pending(struct efc_node *domain);
+
+u64 efc_node_get_wwnn(struct efc_node *node);
+struct efc_node *
+efc_node_find(struct efc_nport *nport, u32 id);
+void
+efc_node_post_els_resp(struct efc_node *node, u32 evt, void *arg);
+void
+efc_node_recv_els_frame(struct efc_node *node, struct efc_hw_sequence *s);
+void
+efc_node_recv_ct_frame(struct efc_node *node, struct efc_hw_sequence *seq);
+void
+efc_node_recv_fcp_cmd(struct efc_node *node, struct efc_hw_sequence *seq);
+
+#endif /* __EFC_NODE_H__ */
diff --git a/drivers/scsi/elx/libefc/efc_nport.c b/drivers/scsi/elx/libefc/efc_nport.c
new file mode 100644
index 000000000..2e83a6679
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_nport.c
@@ -0,0 +1,777 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * NPORT
+ *
+ * Port object for physical port and NPIV ports.
+ */
+
+/*
+ * NPORT REFERENCE COUNTING
+ *
+ * A nport reference should be taken when:
+ * - an nport is allocated
+ * - a vport populates associated nport
+ * - a remote node is allocated
+ * - a unsolicited frame is processed
+ * The reference should be dropped when:
+ * - the unsolicited frame processesing is done
+ * - the remote node is removed
+ * - the vport is removed
+ * - the nport is removed
+ */
+
+#include "efc.h"
+
+void
+efc_nport_cb(void *arg, int event, void *data)
+{
+ struct efc *efc = arg;
+ struct efc_nport *nport = data;
+ unsigned long flags = 0;
+
+ efc_log_debug(efc, "nport event: %s\n", efc_sm_event_name(event));
+
+ spin_lock_irqsave(&efc->lock, flags);
+ efc_sm_post_event(&nport->sm, event, NULL);
+ spin_unlock_irqrestore(&efc->lock, flags);
+}
+
+static struct efc_nport *
+efc_nport_find_wwn(struct efc_domain *domain, uint64_t wwnn, uint64_t wwpn)
+{
+ struct efc_nport *nport = NULL;
+
+ /* Find a nport, given the WWNN and WWPN */
+ list_for_each_entry(nport, &domain->nport_list, list_entry) {
+ if (nport->wwnn == wwnn && nport->wwpn == wwpn)
+ return nport;
+ }
+ return NULL;
+}
+
+static void
+_efc_nport_free(struct kref *arg)
+{
+ struct efc_nport *nport = container_of(arg, struct efc_nport, ref);
+
+ kfree(nport);
+}
+
+struct efc_nport *
+efc_nport_alloc(struct efc_domain *domain, uint64_t wwpn, uint64_t wwnn,
+ u32 fc_id, bool enable_ini, bool enable_tgt)
+{
+ struct efc_nport *nport;
+
+ if (domain->efc->enable_ini)
+ enable_ini = 0;
+
+ /* Return a failure if this nport has already been allocated */
+ if ((wwpn != 0) || (wwnn != 0)) {
+ nport = efc_nport_find_wwn(domain, wwnn, wwpn);
+ if (nport) {
+ efc_log_err(domain->efc,
+ "NPORT %016llX %016llX already allocated\n",
+ wwnn, wwpn);
+ return NULL;
+ }
+ }
+
+ nport = kzalloc(sizeof(*nport), GFP_ATOMIC);
+ if (!nport)
+ return nport;
+
+ /* initialize refcount */
+ kref_init(&nport->ref);
+ nport->release = _efc_nport_free;
+
+ nport->efc = domain->efc;
+ snprintf(nport->display_name, sizeof(nport->display_name), "------");
+ nport->domain = domain;
+ xa_init(&nport->lookup);
+ nport->instance_index = domain->nport_count++;
+ nport->sm.app = nport;
+ nport->enable_ini = enable_ini;
+ nport->enable_tgt = enable_tgt;
+ nport->enable_rscn = (nport->enable_ini ||
+ (nport->enable_tgt && enable_target_rscn(nport->efc)));
+
+ /* Copy service parameters from domain */
+ memcpy(nport->service_params, domain->service_params,
+ sizeof(struct fc_els_flogi));
+
+ /* Update requested fc_id */
+ nport->fc_id = fc_id;
+
+ /* Update the nport's service parameters for the new wwn's */
+ nport->wwpn = wwpn;
+ nport->wwnn = wwnn;
+ snprintf(nport->wwnn_str, sizeof(nport->wwnn_str), "%016llX",
+ (unsigned long long)wwnn);
+
+ /*
+ * if this is the "first" nport of the domain,
+ * then make it the "phys" nport
+ */
+ if (list_empty(&domain->nport_list))
+ domain->nport = nport;
+
+ INIT_LIST_HEAD(&nport->list_entry);
+ list_add_tail(&nport->list_entry, &domain->nport_list);
+
+ kref_get(&domain->ref);
+
+ efc_log_debug(domain->efc, "New Nport [%s]\n", nport->display_name);
+
+ return nport;
+}
+
+void
+efc_nport_free(struct efc_nport *nport)
+{
+ struct efc_domain *domain;
+
+ if (!nport)
+ return;
+
+ domain = nport->domain;
+ efc_log_debug(domain->efc, "[%s] free nport\n", nport->display_name);
+ list_del(&nport->list_entry);
+ /*
+ * if this is the physical nport,
+ * then clear it out of the domain
+ */
+ if (nport == domain->nport)
+ domain->nport = NULL;
+
+ xa_destroy(&nport->lookup);
+ xa_erase(&domain->lookup, nport->fc_id);
+
+ if (list_empty(&domain->nport_list))
+ efc_domain_post_event(domain, EFC_EVT_ALL_CHILD_NODES_FREE,
+ NULL);
+
+ kref_put(&domain->ref, domain->release);
+ kref_put(&nport->ref, nport->release);
+}
+
+struct efc_nport *
+efc_nport_find(struct efc_domain *domain, u32 d_id)
+{
+ struct efc_nport *nport;
+
+ /* Find a nport object, given an FC_ID */
+ nport = xa_load(&domain->lookup, d_id);
+ if (!nport || !kref_get_unless_zero(&nport->ref))
+ return NULL;
+
+ return nport;
+}
+
+int
+efc_nport_attach(struct efc_nport *nport, u32 fc_id)
+{
+ int rc;
+ struct efc_node *node;
+ struct efc *efc = nport->efc;
+ unsigned long index;
+
+ /* Set our lookup */
+ rc = xa_err(xa_store(&nport->domain->lookup, fc_id, nport, GFP_ATOMIC));
+ if (rc) {
+ efc_log_err(efc, "Sport lookup store failed: %d\n", rc);
+ return rc;
+ }
+
+ /* Update our display_name */
+ efc_node_fcid_display(fc_id, nport->display_name,
+ sizeof(nport->display_name));
+
+ xa_for_each(&nport->lookup, index, node) {
+ efc_node_update_display_name(node);
+ }
+
+ efc_log_debug(nport->efc, "[%s] attach nport: fc_id x%06x\n",
+ nport->display_name, fc_id);
+
+ /* Register a nport, given an FC_ID */
+ rc = efc_cmd_nport_attach(efc, nport, fc_id);
+ if (rc < 0) {
+ efc_log_err(nport->efc,
+ "efc_hw_port_attach failed: %d\n", rc);
+ return -EIO;
+ }
+ return 0;
+}
+
+static void
+efc_nport_shutdown(struct efc_nport *nport)
+{
+ struct efc *efc = nport->efc;
+ struct efc_node *node;
+ unsigned long index;
+
+ xa_for_each(&nport->lookup, index, node) {
+ if (!(node->rnode.fc_id == FC_FID_FLOGI && nport->is_vport)) {
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
+ continue;
+ }
+
+ /*
+ * If this is a vport, logout of the fabric
+ * controller so that it deletes the vport
+ * on the switch.
+ */
+ /* if link is down, don't send logo */
+ if (efc->link_status == EFC_LINK_STATUS_DOWN) {
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
+ continue;
+ }
+
+ efc_log_debug(efc, "[%s] nport shutdown vport, send logo\n",
+ node->display_name);
+
+ if (!efc_send_logo(node)) {
+ /* sent LOGO, wait for response */
+ efc_node_transition(node, __efc_d_wait_logo_rsp, NULL);
+ continue;
+ }
+
+ /*
+ * failed to send LOGO,
+ * go ahead and cleanup node anyways
+ */
+ node_printf(node, "Failed to send LOGO\n");
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
+ }
+}
+
+static void
+efc_vport_link_down(struct efc_nport *nport)
+{
+ struct efc *efc = nport->efc;
+ struct efc_vport *vport;
+
+ /* Clear the nport reference in the vport specification */
+ list_for_each_entry(vport, &efc->vport_list, list_entry) {
+ if (vport->nport == nport) {
+ kref_put(&nport->ref, nport->release);
+ vport->nport = NULL;
+ break;
+ }
+ }
+}
+
+static void
+__efc_nport_common(const char *funcname, struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_nport *nport = ctx->app;
+ struct efc_domain *domain = nport->domain;
+ struct efc *efc = nport->efc;
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ case EFC_EVT_REENTER:
+ case EFC_EVT_EXIT:
+ case EFC_EVT_ALL_CHILD_NODES_FREE:
+ break;
+ case EFC_EVT_NPORT_ATTACH_OK:
+ efc_sm_transition(ctx, __efc_nport_attached, NULL);
+ break;
+ case EFC_EVT_SHUTDOWN:
+ /* Flag this nport as shutting down */
+ nport->shutting_down = true;
+
+ if (nport->is_vport)
+ efc_vport_link_down(nport);
+
+ if (xa_empty(&nport->lookup)) {
+ /* Remove the nport from the domain's lookup table */
+ xa_erase(&domain->lookup, nport->fc_id);
+ efc_sm_transition(ctx, __efc_nport_wait_port_free,
+ NULL);
+ if (efc_cmd_nport_free(efc, nport)) {
+ efc_log_debug(nport->efc,
+ "efc_hw_port_free failed\n");
+ /* Not much we can do, free the nport anyways */
+ efc_nport_free(nport);
+ }
+ } else {
+ /* sm: node list is not empty / shutdown nodes */
+ efc_sm_transition(ctx,
+ __efc_nport_wait_shutdown, NULL);
+ efc_nport_shutdown(nport);
+ }
+ break;
+ default:
+ efc_log_debug(nport->efc, "[%s] %-20s %-20s not handled\n",
+ nport->display_name, funcname,
+ efc_sm_event_name(evt));
+ }
+}
+
+void
+__efc_nport_allocated(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_nport *nport = ctx->app;
+ struct efc_domain *domain = nport->domain;
+
+ nport_sm_trace(nport);
+
+ switch (evt) {
+ /* the physical nport is attached */
+ case EFC_EVT_NPORT_ATTACH_OK:
+ WARN_ON(nport != domain->nport);
+ efc_sm_transition(ctx, __efc_nport_attached, NULL);
+ break;
+
+ case EFC_EVT_NPORT_ALLOC_OK:
+ /* ignore */
+ break;
+ default:
+ __efc_nport_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_nport_vport_init(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_nport *nport = ctx->app;
+ struct efc *efc = nport->efc;
+
+ nport_sm_trace(nport);
+
+ switch (evt) {
+ case EFC_EVT_ENTER: {
+ __be64 be_wwpn = cpu_to_be64(nport->wwpn);
+
+ if (nport->wwpn == 0)
+ efc_log_debug(efc, "vport: letting f/w select WWN\n");
+
+ if (nport->fc_id != U32_MAX) {
+ efc_log_debug(efc, "vport: hard coding port id: %x\n",
+ nport->fc_id);
+ }
+
+ efc_sm_transition(ctx, __efc_nport_vport_wait_alloc, NULL);
+ /* If wwpn is zero, then we'll let the f/w assign wwpn*/
+ if (efc_cmd_nport_alloc(efc, nport, nport->domain,
+ nport->wwpn == 0 ? NULL :
+ (uint8_t *)&be_wwpn)) {
+ efc_log_err(efc, "Can't allocate port\n");
+ break;
+ }
+
+ break;
+ }
+ default:
+ __efc_nport_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_nport_vport_wait_alloc(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_nport *nport = ctx->app;
+ struct efc *efc = nport->efc;
+
+ nport_sm_trace(nport);
+
+ switch (evt) {
+ case EFC_EVT_NPORT_ALLOC_OK: {
+ struct fc_els_flogi *sp;
+
+ sp = (struct fc_els_flogi *)nport->service_params;
+
+ if (nport->wwnn == 0) {
+ nport->wwnn = be64_to_cpu(nport->sli_wwnn);
+ nport->wwpn = be64_to_cpu(nport->sli_wwpn);
+ snprintf(nport->wwnn_str, sizeof(nport->wwnn_str),
+ "%016llX", nport->wwpn);
+ }
+
+ /* Update the nport's service parameters */
+ sp->fl_wwpn = cpu_to_be64(nport->wwpn);
+ sp->fl_wwnn = cpu_to_be64(nport->wwnn);
+
+ /*
+ * if nport->fc_id is uninitialized,
+ * then request that the fabric node use FDISC
+ * to find an fc_id.
+ * Otherwise we're restoring vports, or we're in
+ * fabric emulation mode, so attach the fc_id
+ */
+ if (nport->fc_id == U32_MAX) {
+ struct efc_node *fabric;
+
+ fabric = efc_node_alloc(nport, FC_FID_FLOGI, false,
+ false);
+ if (!fabric) {
+ efc_log_err(efc, "efc_node_alloc() failed\n");
+ return;
+ }
+ efc_node_transition(fabric, __efc_vport_fabric_init,
+ NULL);
+ } else {
+ snprintf(nport->wwnn_str, sizeof(nport->wwnn_str),
+ "%016llX", nport->wwpn);
+ efc_nport_attach(nport, nport->fc_id);
+ }
+ efc_sm_transition(ctx, __efc_nport_vport_allocated, NULL);
+ break;
+ }
+ default:
+ __efc_nport_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_nport_vport_allocated(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_nport *nport = ctx->app;
+ struct efc *efc = nport->efc;
+
+ nport_sm_trace(nport);
+
+ /*
+ * This state is entered after the nport is allocated;
+ * it then waits for a fabric node
+ * FDISC to complete, which requests a nport attach.
+ * The nport attach complete is handled in this state.
+ */
+ switch (evt) {
+ case EFC_EVT_NPORT_ATTACH_OK: {
+ struct efc_node *node;
+
+ /* Find our fabric node, and forward this event */
+ node = efc_node_find(nport, FC_FID_FLOGI);
+ if (!node) {
+ efc_log_debug(efc, "can't find node %06x\n", FC_FID_FLOGI);
+ break;
+ }
+ /* sm: / forward nport attach to fabric node */
+ efc_node_post_event(node, evt, NULL);
+ efc_sm_transition(ctx, __efc_nport_attached, NULL);
+ break;
+ }
+ default:
+ __efc_nport_common(__func__, ctx, evt, arg);
+ }
+}
+
+static void
+efc_vport_update_spec(struct efc_nport *nport)
+{
+ struct efc *efc = nport->efc;
+ struct efc_vport *vport;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->vport_lock, flags);
+ list_for_each_entry(vport, &efc->vport_list, list_entry) {
+ if (vport->nport == nport) {
+ vport->wwnn = nport->wwnn;
+ vport->wwpn = nport->wwpn;
+ vport->tgt_data = nport->tgt_data;
+ vport->ini_data = nport->ini_data;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&efc->vport_lock, flags);
+}
+
+void
+__efc_nport_attached(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_nport *nport = ctx->app;
+ struct efc *efc = nport->efc;
+
+ nport_sm_trace(nport);
+
+ switch (evt) {
+ case EFC_EVT_ENTER: {
+ struct efc_node *node;
+ unsigned long index;
+
+ efc_log_debug(efc,
+ "[%s] NPORT attached WWPN %016llX WWNN %016llX\n",
+ nport->display_name,
+ nport->wwpn, nport->wwnn);
+
+ xa_for_each(&nport->lookup, index, node)
+ efc_node_update_display_name(node);
+
+ efc->tt.new_nport(efc, nport);
+
+ /*
+ * Update the vport (if its not the physical nport)
+ * parameters
+ */
+ if (nport->is_vport)
+ efc_vport_update_spec(nport);
+ break;
+ }
+
+ case EFC_EVT_EXIT:
+ efc_log_debug(efc,
+ "[%s] NPORT deattached WWPN %016llX WWNN %016llX\n",
+ nport->display_name,
+ nport->wwpn, nport->wwnn);
+
+ efc->tt.del_nport(efc, nport);
+ break;
+ default:
+ __efc_nport_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_nport_wait_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_nport *nport = ctx->app;
+ struct efc_domain *domain = nport->domain;
+ struct efc *efc = nport->efc;
+
+ nport_sm_trace(nport);
+
+ switch (evt) {
+ case EFC_EVT_NPORT_ALLOC_OK:
+ case EFC_EVT_NPORT_ALLOC_FAIL:
+ case EFC_EVT_NPORT_ATTACH_OK:
+ case EFC_EVT_NPORT_ATTACH_FAIL:
+ /* ignore these events - just wait for the all free event */
+ break;
+
+ case EFC_EVT_ALL_CHILD_NODES_FREE: {
+ /*
+ * Remove the nport from the domain's
+ * sparse vector lookup table
+ */
+ xa_erase(&domain->lookup, nport->fc_id);
+ efc_sm_transition(ctx, __efc_nport_wait_port_free, NULL);
+ if (efc_cmd_nport_free(efc, nport)) {
+ efc_log_err(nport->efc, "efc_hw_port_free failed\n");
+ /* Not much we can do, free the nport anyways */
+ efc_nport_free(nport);
+ }
+ break;
+ }
+ default:
+ __efc_nport_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_nport_wait_port_free(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_nport *nport = ctx->app;
+
+ nport_sm_trace(nport);
+
+ switch (evt) {
+ case EFC_EVT_NPORT_ATTACH_OK:
+ /* Ignore as we are waiting for the free CB */
+ break;
+ case EFC_EVT_NPORT_FREE_OK: {
+ /* All done, free myself */
+ efc_nport_free(nport);
+ break;
+ }
+ default:
+ __efc_nport_common(__func__, ctx, evt, arg);
+ }
+}
+
+static int
+efc_vport_nport_alloc(struct efc_domain *domain, struct efc_vport *vport)
+{
+ struct efc_nport *nport;
+
+ lockdep_assert_held(&domain->efc->lock);
+
+ nport = efc_nport_alloc(domain, vport->wwpn, vport->wwnn, vport->fc_id,
+ vport->enable_ini, vport->enable_tgt);
+ vport->nport = nport;
+ if (!nport)
+ return -EIO;
+
+ kref_get(&nport->ref);
+ nport->is_vport = true;
+ nport->tgt_data = vport->tgt_data;
+ nport->ini_data = vport->ini_data;
+
+ efc_sm_transition(&nport->sm, __efc_nport_vport_init, NULL);
+
+ return 0;
+}
+
+int
+efc_vport_start(struct efc_domain *domain)
+{
+ struct efc *efc = domain->efc;
+ struct efc_vport *vport;
+ struct efc_vport *next;
+ int rc = 0;
+ unsigned long flags = 0;
+
+ /* Use the vport spec to find the associated vports and start them */
+ spin_lock_irqsave(&efc->vport_lock, flags);
+ list_for_each_entry_safe(vport, next, &efc->vport_list, list_entry) {
+ if (!vport->nport) {
+ if (efc_vport_nport_alloc(domain, vport))
+ rc = -EIO;
+ }
+ }
+ spin_unlock_irqrestore(&efc->vport_lock, flags);
+
+ return rc;
+}
+
+int
+efc_nport_vport_new(struct efc_domain *domain, uint64_t wwpn, uint64_t wwnn,
+ u32 fc_id, bool ini, bool tgt, void *tgt_data,
+ void *ini_data)
+{
+ struct efc *efc = domain->efc;
+ struct efc_vport *vport;
+ int rc = 0;
+ unsigned long flags = 0;
+
+ if (ini && domain->efc->enable_ini == 0) {
+ efc_log_debug(efc, "driver initiator mode not enabled\n");
+ return -EIO;
+ }
+
+ if (tgt && domain->efc->enable_tgt == 0) {
+ efc_log_debug(efc, "driver target mode not enabled\n");
+ return -EIO;
+ }
+
+ /*
+ * Create a vport spec if we need to recreate
+ * this vport after a link up event
+ */
+ vport = efc_vport_create_spec(domain->efc, wwnn, wwpn, fc_id, ini, tgt,
+ tgt_data, ini_data);
+ if (!vport) {
+ efc_log_err(efc, "failed to create vport object entry\n");
+ return -EIO;
+ }
+
+ spin_lock_irqsave(&efc->lock, flags);
+ rc = efc_vport_nport_alloc(domain, vport);
+ spin_unlock_irqrestore(&efc->lock, flags);
+
+ return rc;
+}
+
+int
+efc_nport_vport_del(struct efc *efc, struct efc_domain *domain,
+ u64 wwpn, uint64_t wwnn)
+{
+ struct efc_nport *nport;
+ struct efc_vport *vport;
+ struct efc_vport *next;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->vport_lock, flags);
+ /* walk the efc_vport_list and remove from there */
+ list_for_each_entry_safe(vport, next, &efc->vport_list, list_entry) {
+ if (vport->wwpn == wwpn && vport->wwnn == wwnn) {
+ list_del(&vport->list_entry);
+ kfree(vport);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&efc->vport_lock, flags);
+
+ if (!domain) {
+ /* No domain means no nport to look for */
+ return 0;
+ }
+
+ spin_lock_irqsave(&efc->lock, flags);
+ list_for_each_entry(nport, &domain->nport_list, list_entry) {
+ if (nport->wwpn == wwpn && nport->wwnn == wwnn) {
+ kref_put(&nport->ref, nport->release);
+ /* Shutdown this NPORT */
+ efc_sm_post_event(&nport->sm, EFC_EVT_SHUTDOWN, NULL);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&efc->lock, flags);
+ return 0;
+}
+
+void
+efc_vport_del_all(struct efc *efc)
+{
+ struct efc_vport *vport;
+ struct efc_vport *next;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->vport_lock, flags);
+ list_for_each_entry_safe(vport, next, &efc->vport_list, list_entry) {
+ list_del(&vport->list_entry);
+ kfree(vport);
+ }
+ spin_unlock_irqrestore(&efc->vport_lock, flags);
+}
+
+struct efc_vport *
+efc_vport_create_spec(struct efc *efc, uint64_t wwnn, uint64_t wwpn,
+ u32 fc_id, bool enable_ini,
+ bool enable_tgt, void *tgt_data, void *ini_data)
+{
+ struct efc_vport *vport;
+ unsigned long flags = 0;
+
+ /*
+ * walk the efc_vport_list and return failure
+ * if a valid(vport with non zero WWPN and WWNN) vport entry
+ * is already created
+ */
+ spin_lock_irqsave(&efc->vport_lock, flags);
+ list_for_each_entry(vport, &efc->vport_list, list_entry) {
+ if ((wwpn && vport->wwpn == wwpn) &&
+ (wwnn && vport->wwnn == wwnn)) {
+ efc_log_err(efc,
+ "VPORT %016llX %016llX already allocated\n",
+ wwnn, wwpn);
+ spin_unlock_irqrestore(&efc->vport_lock, flags);
+ return NULL;
+ }
+ }
+
+ vport = kzalloc(sizeof(*vport), GFP_ATOMIC);
+ if (!vport) {
+ spin_unlock_irqrestore(&efc->vport_lock, flags);
+ return NULL;
+ }
+
+ vport->wwnn = wwnn;
+ vport->wwpn = wwpn;
+ vport->fc_id = fc_id;
+ vport->enable_tgt = enable_tgt;
+ vport->enable_ini = enable_ini;
+ vport->tgt_data = tgt_data;
+ vport->ini_data = ini_data;
+
+ INIT_LIST_HEAD(&vport->list_entry);
+ list_add_tail(&vport->list_entry, &efc->vport_list);
+ spin_unlock_irqrestore(&efc->vport_lock, flags);
+ return vport;
+}
diff --git a/drivers/scsi/elx/libefc/efc_nport.h b/drivers/scsi/elx/libefc/efc_nport.h
new file mode 100644
index 000000000..b575ea205
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_nport.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/**
+ * EFC FC port (NPORT) exported declarations
+ *
+ */
+
+#ifndef __EFC_NPORT_H__
+#define __EFC_NPORT_H__
+
+struct efc_nport *
+efc_nport_find(struct efc_domain *domain, u32 d_id);
+struct efc_nport *
+efc_nport_alloc(struct efc_domain *domain, uint64_t wwpn, uint64_t wwnn,
+ u32 fc_id, bool enable_ini, bool enable_tgt);
+void
+efc_nport_free(struct efc_nport *nport);
+int
+efc_nport_attach(struct efc_nport *nport, u32 fc_id);
+
+void
+__efc_nport_allocated(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_nport_wait_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_nport_wait_port_free(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_nport_vport_init(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_nport_vport_wait_alloc(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_nport_vport_allocated(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_nport_attached(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+
+int
+efc_vport_start(struct efc_domain *domain);
+
+#endif /* __EFC_NPORT_H__ */
diff --git a/drivers/scsi/elx/libefc/efc_sm.c b/drivers/scsi/elx/libefc/efc_sm.c
new file mode 100644
index 000000000..afd963782
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_sm.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * Generic state machine framework.
+ */
+#include "efc.h"
+#include "efc_sm.h"
+
+/**
+ * efc_sm_post_event() - Post an event to a context.
+ *
+ * @ctx: State machine context
+ * @evt: Event to post
+ * @data: Event-specific data (if any)
+ */
+int
+efc_sm_post_event(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *data)
+{
+ if (!ctx->current_state)
+ return -EIO;
+
+ ctx->current_state(ctx, evt, data);
+ return 0;
+}
+
+void
+efc_sm_transition(struct efc_sm_ctx *ctx,
+ void (*state)(struct efc_sm_ctx *,
+ enum efc_sm_event, void *), void *data)
+
+{
+ if (ctx->current_state == state) {
+ efc_sm_post_event(ctx, EFC_EVT_REENTER, data);
+ } else {
+ efc_sm_post_event(ctx, EFC_EVT_EXIT, data);
+ ctx->current_state = state;
+ efc_sm_post_event(ctx, EFC_EVT_ENTER, data);
+ }
+}
+
+static char *event_name[] = EFC_SM_EVENT_NAME;
+
+const char *efc_sm_event_name(enum efc_sm_event evt)
+{
+ if (evt > EFC_EVT_LAST)
+ return "unknown";
+
+ return event_name[evt];
+}
diff --git a/drivers/scsi/elx/libefc/efc_sm.h b/drivers/scsi/elx/libefc/efc_sm.h
new file mode 100644
index 000000000..e26867b4d
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_sm.h
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ */
+
+/**
+ * Generic state machine framework declarations.
+ */
+
+#ifndef _EFC_SM_H
+#define _EFC_SM_H
+
+struct efc_sm_ctx;
+
+/* State Machine events */
+enum efc_sm_event {
+ /* Common Events */
+ EFC_EVT_ENTER,
+ EFC_EVT_REENTER,
+ EFC_EVT_EXIT,
+ EFC_EVT_SHUTDOWN,
+ EFC_EVT_ALL_CHILD_NODES_FREE,
+ EFC_EVT_RESUME,
+ EFC_EVT_TIMER_EXPIRED,
+
+ /* Domain Events */
+ EFC_EVT_RESPONSE,
+ EFC_EVT_ERROR,
+
+ EFC_EVT_DOMAIN_FOUND,
+ EFC_EVT_DOMAIN_ALLOC_OK,
+ EFC_EVT_DOMAIN_ALLOC_FAIL,
+ EFC_EVT_DOMAIN_REQ_ATTACH,
+ EFC_EVT_DOMAIN_ATTACH_OK,
+ EFC_EVT_DOMAIN_ATTACH_FAIL,
+ EFC_EVT_DOMAIN_LOST,
+ EFC_EVT_DOMAIN_FREE_OK,
+ EFC_EVT_DOMAIN_FREE_FAIL,
+ EFC_EVT_HW_DOMAIN_REQ_ATTACH,
+ EFC_EVT_HW_DOMAIN_REQ_FREE,
+
+ /* Sport Events */
+ EFC_EVT_NPORT_ALLOC_OK,
+ EFC_EVT_NPORT_ALLOC_FAIL,
+ EFC_EVT_NPORT_ATTACH_OK,
+ EFC_EVT_NPORT_ATTACH_FAIL,
+ EFC_EVT_NPORT_FREE_OK,
+ EFC_EVT_NPORT_FREE_FAIL,
+ EFC_EVT_NPORT_TOPOLOGY_NOTIFY,
+ EFC_EVT_HW_PORT_ALLOC_OK,
+ EFC_EVT_HW_PORT_ALLOC_FAIL,
+ EFC_EVT_HW_PORT_ATTACH_OK,
+ EFC_EVT_HW_PORT_REQ_ATTACH,
+ EFC_EVT_HW_PORT_REQ_FREE,
+ EFC_EVT_HW_PORT_FREE_OK,
+
+ /* Login Events */
+ EFC_EVT_SRRS_ELS_REQ_OK,
+ EFC_EVT_SRRS_ELS_CMPL_OK,
+ EFC_EVT_SRRS_ELS_REQ_FAIL,
+ EFC_EVT_SRRS_ELS_CMPL_FAIL,
+ EFC_EVT_SRRS_ELS_REQ_RJT,
+ EFC_EVT_NODE_ATTACH_OK,
+ EFC_EVT_NODE_ATTACH_FAIL,
+ EFC_EVT_NODE_FREE_OK,
+ EFC_EVT_NODE_FREE_FAIL,
+ EFC_EVT_ELS_FRAME,
+ EFC_EVT_ELS_REQ_TIMEOUT,
+ EFC_EVT_ELS_REQ_ABORTED,
+ /* request an ELS IO be aborted */
+ EFC_EVT_ABORT_ELS,
+ /* ELS abort process complete */
+ EFC_EVT_ELS_ABORT_CMPL,
+
+ EFC_EVT_ABTS_RCVD,
+
+ /* node is not in the GID_PT payload */
+ EFC_EVT_NODE_MISSING,
+ /* node is allocated and in the GID_PT payload */
+ EFC_EVT_NODE_REFOUND,
+ /* node shutting down due to PLOGI recvd (implicit logo) */
+ EFC_EVT_SHUTDOWN_IMPLICIT_LOGO,
+ /* node shutting down due to LOGO recvd/sent (explicit logo) */
+ EFC_EVT_SHUTDOWN_EXPLICIT_LOGO,
+
+ EFC_EVT_PLOGI_RCVD,
+ EFC_EVT_FLOGI_RCVD,
+ EFC_EVT_LOGO_RCVD,
+ EFC_EVT_PRLI_RCVD,
+ EFC_EVT_PRLO_RCVD,
+ EFC_EVT_PDISC_RCVD,
+ EFC_EVT_FDISC_RCVD,
+ EFC_EVT_ADISC_RCVD,
+ EFC_EVT_RSCN_RCVD,
+ EFC_EVT_SCR_RCVD,
+ EFC_EVT_ELS_RCVD,
+
+ EFC_EVT_FCP_CMD_RCVD,
+
+ EFC_EVT_GIDPT_DELAY_EXPIRED,
+
+ /* SCSI Target Server events */
+ EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY,
+ EFC_EVT_NODE_DEL_INI_COMPLETE,
+ EFC_EVT_NODE_DEL_TGT_COMPLETE,
+ EFC_EVT_NODE_SESS_REG_OK,
+ EFC_EVT_NODE_SESS_REG_FAIL,
+
+ /* Must be last */
+ EFC_EVT_LAST
+};
+
+/* State Machine event name lookup array */
+#define EFC_SM_EVENT_NAME { \
+ [EFC_EVT_ENTER] = "EFC_EVT_ENTER", \
+ [EFC_EVT_REENTER] = "EFC_EVT_REENTER", \
+ [EFC_EVT_EXIT] = "EFC_EVT_EXIT", \
+ [EFC_EVT_SHUTDOWN] = "EFC_EVT_SHUTDOWN", \
+ [EFC_EVT_ALL_CHILD_NODES_FREE] = "EFC_EVT_ALL_CHILD_NODES_FREE",\
+ [EFC_EVT_RESUME] = "EFC_EVT_RESUME", \
+ [EFC_EVT_TIMER_EXPIRED] = "EFC_EVT_TIMER_EXPIRED", \
+ [EFC_EVT_RESPONSE] = "EFC_EVT_RESPONSE", \
+ [EFC_EVT_ERROR] = "EFC_EVT_ERROR", \
+ [EFC_EVT_DOMAIN_FOUND] = "EFC_EVT_DOMAIN_FOUND", \
+ [EFC_EVT_DOMAIN_ALLOC_OK] = "EFC_EVT_DOMAIN_ALLOC_OK", \
+ [EFC_EVT_DOMAIN_ALLOC_FAIL] = "EFC_EVT_DOMAIN_ALLOC_FAIL", \
+ [EFC_EVT_DOMAIN_REQ_ATTACH] = "EFC_EVT_DOMAIN_REQ_ATTACH", \
+ [EFC_EVT_DOMAIN_ATTACH_OK] = "EFC_EVT_DOMAIN_ATTACH_OK", \
+ [EFC_EVT_DOMAIN_ATTACH_FAIL] = "EFC_EVT_DOMAIN_ATTACH_FAIL", \
+ [EFC_EVT_DOMAIN_LOST] = "EFC_EVT_DOMAIN_LOST", \
+ [EFC_EVT_DOMAIN_FREE_OK] = "EFC_EVT_DOMAIN_FREE_OK", \
+ [EFC_EVT_DOMAIN_FREE_FAIL] = "EFC_EVT_DOMAIN_FREE_FAIL", \
+ [EFC_EVT_HW_DOMAIN_REQ_ATTACH] = "EFC_EVT_HW_DOMAIN_REQ_ATTACH",\
+ [EFC_EVT_HW_DOMAIN_REQ_FREE] = "EFC_EVT_HW_DOMAIN_REQ_FREE", \
+ [EFC_EVT_NPORT_ALLOC_OK] = "EFC_EVT_NPORT_ALLOC_OK", \
+ [EFC_EVT_NPORT_ALLOC_FAIL] = "EFC_EVT_NPORT_ALLOC_FAIL", \
+ [EFC_EVT_NPORT_ATTACH_OK] = "EFC_EVT_NPORT_ATTACH_OK", \
+ [EFC_EVT_NPORT_ATTACH_FAIL] = "EFC_EVT_NPORT_ATTACH_FAIL", \
+ [EFC_EVT_NPORT_FREE_OK] = "EFC_EVT_NPORT_FREE_OK", \
+ [EFC_EVT_NPORT_FREE_FAIL] = "EFC_EVT_NPORT_FREE_FAIL", \
+ [EFC_EVT_NPORT_TOPOLOGY_NOTIFY] = "EFC_EVT_NPORT_TOPOLOGY_NOTIFY",\
+ [EFC_EVT_HW_PORT_ALLOC_OK] = "EFC_EVT_HW_PORT_ALLOC_OK", \
+ [EFC_EVT_HW_PORT_ALLOC_FAIL] = "EFC_EVT_HW_PORT_ALLOC_FAIL", \
+ [EFC_EVT_HW_PORT_ATTACH_OK] = "EFC_EVT_HW_PORT_ATTACH_OK", \
+ [EFC_EVT_HW_PORT_REQ_ATTACH] = "EFC_EVT_HW_PORT_REQ_ATTACH", \
+ [EFC_EVT_HW_PORT_REQ_FREE] = "EFC_EVT_HW_PORT_REQ_FREE", \
+ [EFC_EVT_HW_PORT_FREE_OK] = "EFC_EVT_HW_PORT_FREE_OK", \
+ [EFC_EVT_SRRS_ELS_REQ_OK] = "EFC_EVT_SRRS_ELS_REQ_OK", \
+ [EFC_EVT_SRRS_ELS_CMPL_OK] = "EFC_EVT_SRRS_ELS_CMPL_OK", \
+ [EFC_EVT_SRRS_ELS_REQ_FAIL] = "EFC_EVT_SRRS_ELS_REQ_FAIL", \
+ [EFC_EVT_SRRS_ELS_CMPL_FAIL] = "EFC_EVT_SRRS_ELS_CMPL_FAIL", \
+ [EFC_EVT_SRRS_ELS_REQ_RJT] = "EFC_EVT_SRRS_ELS_REQ_RJT", \
+ [EFC_EVT_NODE_ATTACH_OK] = "EFC_EVT_NODE_ATTACH_OK", \
+ [EFC_EVT_NODE_ATTACH_FAIL] = "EFC_EVT_NODE_ATTACH_FAIL", \
+ [EFC_EVT_NODE_FREE_OK] = "EFC_EVT_NODE_FREE_OK", \
+ [EFC_EVT_NODE_FREE_FAIL] = "EFC_EVT_NODE_FREE_FAIL", \
+ [EFC_EVT_ELS_FRAME] = "EFC_EVT_ELS_FRAME", \
+ [EFC_EVT_ELS_REQ_TIMEOUT] = "EFC_EVT_ELS_REQ_TIMEOUT", \
+ [EFC_EVT_ELS_REQ_ABORTED] = "EFC_EVT_ELS_REQ_ABORTED", \
+ [EFC_EVT_ABORT_ELS] = "EFC_EVT_ABORT_ELS", \
+ [EFC_EVT_ELS_ABORT_CMPL] = "EFC_EVT_ELS_ABORT_CMPL", \
+ [EFC_EVT_ABTS_RCVD] = "EFC_EVT_ABTS_RCVD", \
+ [EFC_EVT_NODE_MISSING] = "EFC_EVT_NODE_MISSING", \
+ [EFC_EVT_NODE_REFOUND] = "EFC_EVT_NODE_REFOUND", \
+ [EFC_EVT_SHUTDOWN_IMPLICIT_LOGO] = "EFC_EVT_SHUTDOWN_IMPLICIT_LOGO",\
+ [EFC_EVT_SHUTDOWN_EXPLICIT_LOGO] = "EFC_EVT_SHUTDOWN_EXPLICIT_LOGO",\
+ [EFC_EVT_PLOGI_RCVD] = "EFC_EVT_PLOGI_RCVD", \
+ [EFC_EVT_FLOGI_RCVD] = "EFC_EVT_FLOGI_RCVD", \
+ [EFC_EVT_LOGO_RCVD] = "EFC_EVT_LOGO_RCVD", \
+ [EFC_EVT_PRLI_RCVD] = "EFC_EVT_PRLI_RCVD", \
+ [EFC_EVT_PRLO_RCVD] = "EFC_EVT_PRLO_RCVD", \
+ [EFC_EVT_PDISC_RCVD] = "EFC_EVT_PDISC_RCVD", \
+ [EFC_EVT_FDISC_RCVD] = "EFC_EVT_FDISC_RCVD", \
+ [EFC_EVT_ADISC_RCVD] = "EFC_EVT_ADISC_RCVD", \
+ [EFC_EVT_RSCN_RCVD] = "EFC_EVT_RSCN_RCVD", \
+ [EFC_EVT_SCR_RCVD] = "EFC_EVT_SCR_RCVD", \
+ [EFC_EVT_ELS_RCVD] = "EFC_EVT_ELS_RCVD", \
+ [EFC_EVT_FCP_CMD_RCVD] = "EFC_EVT_FCP_CMD_RCVD", \
+ [EFC_EVT_NODE_DEL_INI_COMPLETE] = "EFC_EVT_NODE_DEL_INI_COMPLETE",\
+ [EFC_EVT_NODE_DEL_TGT_COMPLETE] = "EFC_EVT_NODE_DEL_TGT_COMPLETE",\
+ [EFC_EVT_LAST] = "EFC_EVT_LAST", \
+}
+
+int
+efc_sm_post_event(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *data);
+void
+efc_sm_transition(struct efc_sm_ctx *ctx,
+ void (*state)(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg),
+ void *data);
+void efc_sm_disable(struct efc_sm_ctx *ctx);
+const char *efc_sm_event_name(enum efc_sm_event evt);
+
+#endif /* ! _EFC_SM_H */
diff --git a/drivers/scsi/elx/libefc/efclib.c b/drivers/scsi/elx/libefc/efclib.c
new file mode 100644
index 000000000..dd3e3d0a4
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efclib.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * LIBEFC LOCKING
+ *
+ * The critical sections protected by the efc's spinlock are quite broad and
+ * may be improved upon in the future. The libefc code and its locking doesn't
+ * influence the I/O path, so excessive locking doesn't impact I/O performance.
+ *
+ * The strategy is to lock whenever processing a request from user driver. This
+ * means that the entry points into the libefc library are protected by efc
+ * lock. So all the state machine transitions are protected.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include "efc.h"
+
+int efcport_init(struct efc *efc)
+{
+ u32 rc = 0;
+
+ spin_lock_init(&efc->lock);
+ INIT_LIST_HEAD(&efc->vport_list);
+ efc->hold_frames = false;
+ spin_lock_init(&efc->pend_frames_lock);
+ INIT_LIST_HEAD(&efc->pend_frames);
+
+ /* Create Node pool */
+ efc->node_pool = mempool_create_kmalloc_pool(EFC_MAX_REMOTE_NODES,
+ sizeof(struct efc_node));
+ if (!efc->node_pool) {
+ efc_log_err(efc, "Can't allocate node pool\n");
+ return -ENOMEM;
+ }
+
+ efc->node_dma_pool = dma_pool_create("node_dma_pool", &efc->pci->dev,
+ NODE_SPARAMS_SIZE, 0, 0);
+ if (!efc->node_dma_pool) {
+ efc_log_err(efc, "Can't allocate node dma pool\n");
+ mempool_destroy(efc->node_pool);
+ return -ENOMEM;
+ }
+
+ efc->els_io_pool = mempool_create_kmalloc_pool(EFC_ELS_IO_POOL_SZ,
+ sizeof(struct efc_els_io_req));
+ if (!efc->els_io_pool) {
+ efc_log_err(efc, "Can't allocate els io pool\n");
+ return -ENOMEM;
+ }
+
+ return rc;
+}
+
+static void
+efc_purge_pending(struct efc *efc)
+{
+ struct efc_hw_sequence *frame, *next;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->pend_frames_lock, flags);
+
+ list_for_each_entry_safe(frame, next, &efc->pend_frames, list_entry) {
+ list_del(&frame->list_entry);
+ efc->tt.hw_seq_free(efc, frame);
+ }
+
+ spin_unlock_irqrestore(&efc->pend_frames_lock, flags);
+}
+
+void efcport_destroy(struct efc *efc)
+{
+ efc_purge_pending(efc);
+ mempool_destroy(efc->els_io_pool);
+ mempool_destroy(efc->node_pool);
+ dma_pool_destroy(efc->node_dma_pool);
+}
diff --git a/drivers/scsi/elx/libefc/efclib.h b/drivers/scsi/elx/libefc/efclib.h
new file mode 100644
index 000000000..57e338612
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efclib.h
@@ -0,0 +1,623 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#ifndef __EFCLIB_H__
+#define __EFCLIB_H__
+
+#include "scsi/fc/fc_els.h"
+#include "scsi/fc/fc_fs.h"
+#include "scsi/fc/fc_ns.h"
+#include "scsi/fc/fc_gs.h"
+#include "scsi/fc_frame.h"
+#include "../include/efc_common.h"
+#include "../libefc_sli/sli4.h"
+
+#define EFC_SERVICE_PARMS_LENGTH 120
+#define EFC_NAME_LENGTH 32
+#define EFC_SM_NAME_LENGTH 64
+#define EFC_DISPLAY_BUS_INFO_LENGTH 16
+
+#define EFC_WWN_LENGTH 32
+
+#define EFC_FC_ELS_DEFAULT_RETRIES 3
+
+/* Timeouts */
+#define EFC_FC_ELS_SEND_DEFAULT_TIMEOUT 0
+#define EFC_FC_FLOGI_TIMEOUT_SEC 5
+#define EFC_SHUTDOWN_TIMEOUT_USEC 30000000
+
+/* Return values for calls from base driver to libefc */
+#define EFC_SCSI_CALL_COMPLETE 0
+#define EFC_SCSI_CALL_ASYNC 1
+
+/* Local port topology */
+enum efc_nport_topology {
+ EFC_NPORT_TOPO_UNKNOWN = 0,
+ EFC_NPORT_TOPO_FABRIC,
+ EFC_NPORT_TOPO_P2P,
+ EFC_NPORT_TOPO_FC_AL,
+};
+
+#define enable_target_rscn(efc) 1
+
+enum efc_node_shutd_rsn {
+ EFC_NODE_SHUTDOWN_DEFAULT = 0,
+ EFC_NODE_SHUTDOWN_EXPLICIT_LOGO,
+ EFC_NODE_SHUTDOWN_IMPLICIT_LOGO,
+};
+
+enum efc_node_send_ls_acc {
+ EFC_NODE_SEND_LS_ACC_NONE = 0,
+ EFC_NODE_SEND_LS_ACC_PLOGI,
+ EFC_NODE_SEND_LS_ACC_PRLI,
+};
+
+#define EFC_LINK_STATUS_UP 0
+#define EFC_LINK_STATUS_DOWN 1
+
+enum efc_sm_event;
+
+/* State machine context header */
+struct efc_sm_ctx {
+ void (*current_state)(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+
+ const char *description;
+ void *app;
+};
+
+/* Description of discovered Fabric Domain */
+struct efc_domain_record {
+ u32 index;
+ u32 priority;
+ u8 address[6];
+ u8 wwn[8];
+ union {
+ u8 vlan[512];
+ u8 loop[128];
+ } map;
+ u32 speed;
+ u32 fc_id;
+ bool is_loop;
+ bool is_nport;
+};
+
+/* Domain events */
+enum efc_hw_domain_event {
+ EFC_HW_DOMAIN_ALLOC_OK,
+ EFC_HW_DOMAIN_ALLOC_FAIL,
+ EFC_HW_DOMAIN_ATTACH_OK,
+ EFC_HW_DOMAIN_ATTACH_FAIL,
+ EFC_HW_DOMAIN_FREE_OK,
+ EFC_HW_DOMAIN_FREE_FAIL,
+ EFC_HW_DOMAIN_LOST,
+ EFC_HW_DOMAIN_FOUND,
+ EFC_HW_DOMAIN_CHANGED,
+};
+
+/**
+ * Fibre Channel port object
+ *
+ * @list_entry: nport list entry
+ * @ref: reference count, each node takes a reference
+ * @release: function to free nport object
+ * @efc: pointer back to efc
+ * @instance_index: unique instance index value
+ * @display_name: port display name
+ * @is_vport: Is NPIV port
+ * @free_req_pending: pending request to free resources
+ * @attached: mark attached if reg VPI succeeds
+ * @p2p_winner: TRUE if we're the point-to-point winner
+ * @domain: pointer back to domain
+ * @wwpn: port wwpn
+ * @wwnn: port wwnn
+ * @tgt_data: target backend private port data
+ * @ini_data: initiator backend private port data
+ * @indicator: VPI
+ * @fc_id: port FC address
+ * @dma: memory for Service Parameters
+ * @wwnn_str: wwpn string
+ * @sli_wwpn: SLI provided wwpn
+ * @sli_wwnn: SLI provided wwnn
+ * @sm: nport state machine context
+ * @lookup: fc_id to node lookup object
+ * @enable_ini: SCSI initiator enabled for this port
+ * @enable_tgt: SCSI target enabled for this port
+ * @enable_rscn: port will be expecting RSCN
+ * @shutting_down: nport in process of shutting down
+ * @p2p_port_id: our port id for point-to-point
+ * @topology: topology: fabric/p2p/unknown
+ * @service_params: login parameters
+ * @p2p_remote_port_id: remote node's port id for point-to-point
+ */
+
+struct efc_nport {
+ struct list_head list_entry;
+ struct kref ref;
+ void (*release)(struct kref *arg);
+ struct efc *efc;
+ u32 instance_index;
+ char display_name[EFC_NAME_LENGTH];
+ bool is_vport;
+ bool free_req_pending;
+ bool attached;
+ bool attaching;
+ bool p2p_winner;
+ struct efc_domain *domain;
+ u64 wwpn;
+ u64 wwnn;
+ void *tgt_data;
+ void *ini_data;
+
+ u32 indicator;
+ u32 fc_id;
+ struct efc_dma dma;
+
+ u8 wwnn_str[EFC_WWN_LENGTH];
+ __be64 sli_wwpn;
+ __be64 sli_wwnn;
+
+ struct efc_sm_ctx sm;
+ struct xarray lookup;
+ bool enable_ini;
+ bool enable_tgt;
+ bool enable_rscn;
+ bool shutting_down;
+ u32 p2p_port_id;
+ enum efc_nport_topology topology;
+ u8 service_params[EFC_SERVICE_PARMS_LENGTH];
+ u32 p2p_remote_port_id;
+};
+
+/**
+ * Fibre Channel domain object
+ *
+ * This object is a container for the various SLI components needed
+ * to connect to the domain of a FC or FCoE switch
+ * @efc: pointer back to efc
+ * @instance_index: unique instance index value
+ * @display_name: Node display name
+ * @nport_list: linked list of nports associated with this domain
+ * @ref: Reference count, each nport takes a reference
+ * @release: Function to free domain object
+ * @ini_domain: initiator backend private domain data
+ * @tgt_domain: target backend private domain data
+ * @sm: state machine context
+ * @fcf: FC Forwarder table index
+ * @fcf_indicator: FCFI
+ * @indicator: VFI
+ * @nport_count: Number of nports allocated
+ * @dma: memory for Service Parameters
+ * @fcf_wwn: WWN for FCF/switch
+ * @drvsm: driver domain sm context
+ * @attached: set true after attach completes
+ * @is_fc: is FC
+ * @is_loop: is loop topology
+ * @is_nlport: is public loop
+ * @domain_found_pending:A domain found is pending, drec is updated
+ * @req_domain_free: True if domain object should be free'd
+ * @req_accept_frames: set in domain state machine to enable frames
+ * @domain_notify_pend: Set in domain SM to avoid duplicate node event post
+ * @pending_drec: Pending drec if a domain found is pending
+ * @service_params: any nports service parameters
+ * @flogi_service_params:Fabric/P2p service parameters from FLOGI
+ * @lookup: d_id to node lookup object
+ * @nport: Pointer to first (physical) SLI port
+ */
+struct efc_domain {
+ struct efc *efc;
+ char display_name[EFC_NAME_LENGTH];
+ struct list_head nport_list;
+ struct kref ref;
+ void (*release)(struct kref *arg);
+ void *ini_domain;
+ void *tgt_domain;
+
+ /* Declarations private to HW/SLI */
+ u32 fcf;
+ u32 fcf_indicator;
+ u32 indicator;
+ u32 nport_count;
+ struct efc_dma dma;
+
+ /* Declarations private to FC trannport */
+ u64 fcf_wwn;
+ struct efc_sm_ctx drvsm;
+ bool attached;
+ bool is_fc;
+ bool is_loop;
+ bool is_nlport;
+ bool domain_found_pending;
+ bool req_domain_free;
+ bool req_accept_frames;
+ bool domain_notify_pend;
+
+ struct efc_domain_record pending_drec;
+ u8 service_params[EFC_SERVICE_PARMS_LENGTH];
+ u8 flogi_service_params[EFC_SERVICE_PARMS_LENGTH];
+
+ struct xarray lookup;
+
+ struct efc_nport *nport;
+};
+
+/**
+ * Remote Node object
+ *
+ * This object represents a connection between the SLI port and another
+ * Nx_Port on the fabric. Note this can be either a well known port such
+ * as a F_Port (i.e. ff:ff:fe) or another N_Port.
+ * @indicator: RPI
+ * @fc_id: FC address
+ * @attached: true if attached
+ * @nport: associated SLI port
+ * @node: associated node
+ */
+struct efc_remote_node {
+ u32 indicator;
+ u32 index;
+ u32 fc_id;
+
+ bool attached;
+
+ struct efc_nport *nport;
+ void *node;
+};
+
+/**
+ * FC Node object
+ * @efc: pointer back to efc structure
+ * @display_name: Node display name
+ * @nort: Assosiated nport pointer.
+ * @hold_frames: hold incoming frames if true
+ * @els_io_enabled: Enable allocating els ios for this node
+ * @els_ios_lock: lock to protect the els ios list
+ * @els_ios_list: ELS I/O's for this node
+ * @ini_node: backend initiator private node data
+ * @tgt_node: backend target private node data
+ * @rnode: Remote node
+ * @sm: state machine context
+ * @evtdepth: current event posting nesting depth
+ * @req_free: this node is to be free'd
+ * @attached: node is attached (REGLOGIN complete)
+ * @fcp_enabled: node is enabled to handle FCP
+ * @rscn_pending: for name server node RSCN is pending
+ * @send_plogi: send PLOGI accept, upon completion of node attach
+ * @send_plogi_acc: TRUE if io_alloc() is enabled.
+ * @send_ls_acc: type of LS acc to send
+ * @ls_acc_io: SCSI IO for LS acc
+ * @ls_acc_oxid: OX_ID for pending accept
+ * @ls_acc_did: D_ID for pending accept
+ * @shutdown_reason: reason for node shutdown
+ * @sparm_dma_buf: service parameters buffer
+ * @service_params: plogi/acc frame from remote device
+ * @pend_frames_lock: lock for inbound pending frames list
+ * @pend_frames: inbound pending frames list
+ * @pend_frames_processed:count of frames processed in hold frames interval
+ * @ox_id_in_use: used to verify one at a time us of ox_id
+ * @els_retries_remaining:for ELS, number of retries remaining
+ * @els_req_cnt: number of outstanding ELS requests
+ * @els_cmpl_cnt: number of outstanding ELS completions
+ * @abort_cnt: Abort counter for debugging purpos
+ * @current_state_name: current node state
+ * @prev_state_name: previous node state
+ * @current_evt: current event
+ * @prev_evt: previous event
+ * @targ: node is target capable
+ * @init: node is init capable
+ * @refound: Handle node refound case when node is being deleted
+ * @els_io_pend_list: list of pending (not yet processed) ELS IOs
+ * @els_io_active_list: list of active (processed) ELS IOs
+ * @nodedb_state: Node debugging, saved state
+ * @gidpt_delay_timer: GIDPT delay timer
+ * @time_last_gidpt_msec:Start time of last target RSCN GIDPT
+ * @wwnn: remote port WWNN
+ * @wwpn: remote port WWPN
+ */
+struct efc_node {
+ struct efc *efc;
+ char display_name[EFC_NAME_LENGTH];
+ struct efc_nport *nport;
+ struct kref ref;
+ void (*release)(struct kref *arg);
+ bool hold_frames;
+ bool els_io_enabled;
+ bool send_plogi_acc;
+ bool send_plogi;
+ bool rscn_pending;
+ bool fcp_enabled;
+ bool attached;
+ bool req_free;
+
+ spinlock_t els_ios_lock;
+ struct list_head els_ios_list;
+ void *ini_node;
+ void *tgt_node;
+
+ struct efc_remote_node rnode;
+ /* Declarations private to FC trannport */
+ struct efc_sm_ctx sm;
+ u32 evtdepth;
+
+ enum efc_node_send_ls_acc send_ls_acc;
+ void *ls_acc_io;
+ u32 ls_acc_oxid;
+ u32 ls_acc_did;
+ enum efc_node_shutd_rsn shutdown_reason;
+ bool targ;
+ bool init;
+ bool refound;
+ struct efc_dma sparm_dma_buf;
+ u8 service_params[EFC_SERVICE_PARMS_LENGTH];
+ spinlock_t pend_frames_lock;
+ struct list_head pend_frames;
+ u32 pend_frames_processed;
+ u32 ox_id_in_use;
+ u32 els_retries_remaining;
+ u32 els_req_cnt;
+ u32 els_cmpl_cnt;
+ u32 abort_cnt;
+
+ char current_state_name[EFC_SM_NAME_LENGTH];
+ char prev_state_name[EFC_SM_NAME_LENGTH];
+ int current_evt;
+ int prev_evt;
+
+ void (*nodedb_state)(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+ struct timer_list gidpt_delay_timer;
+ u64 time_last_gidpt_msec;
+
+ char wwnn[EFC_WWN_LENGTH];
+ char wwpn[EFC_WWN_LENGTH];
+};
+
+/**
+ * NPIV port
+ *
+ * Collection of the information required to restore a virtual port across
+ * link events
+ * @wwnn: node name
+ * @wwpn: port name
+ * @fc_id: port id
+ * @tgt_data: target backend pointer
+ * @ini_data: initiator backend pointe
+ * @nport: Used to match record after attaching for update
+ *
+ */
+
+struct efc_vport {
+ struct list_head list_entry;
+ u64 wwnn;
+ u64 wwpn;
+ u32 fc_id;
+ bool enable_tgt;
+ bool enable_ini;
+ void *tgt_data;
+ void *ini_data;
+ struct efc_nport *nport;
+};
+
+#define node_printf(node, fmt, args...) \
+ efc_log_info(node->efc, "[%s] " fmt, node->display_name, ##args)
+
+/* Node SM IO Context Callback structure */
+struct efc_node_cb {
+ int status;
+ int ext_status;
+ struct efc_hw_rq_buffer *header;
+ struct efc_hw_rq_buffer *payload;
+ struct efc_dma els_rsp;
+
+ /* Actual length of data received */
+ int rsp_len;
+};
+
+struct efc_hw_rq_buffer {
+ u16 rqindex;
+ struct efc_dma dma;
+};
+
+/**
+ * FC sequence object
+ *
+ * Defines a general FC sequence object
+ * @hw: HW that owns this sequence
+ * @fcfi: FCFI associated with sequence
+ * @header: Received frame header
+ * @payload: Received frame header
+ * @hw_priv: HW private context
+ */
+struct efc_hw_sequence {
+ struct list_head list_entry;
+ void *hw;
+ u8 fcfi;
+ struct efc_hw_rq_buffer *header;
+ struct efc_hw_rq_buffer *payload;
+ void *hw_priv;
+};
+
+enum efc_disc_io_type {
+ EFC_DISC_IO_ELS_REQ,
+ EFC_DISC_IO_ELS_RESP,
+ EFC_DISC_IO_CT_REQ,
+ EFC_DISC_IO_CT_RESP
+};
+
+struct efc_io_els_params {
+ u32 s_id;
+ u16 ox_id;
+ u8 timeout;
+};
+
+struct efc_io_ct_params {
+ u8 r_ctl;
+ u8 type;
+ u8 df_ctl;
+ u8 timeout;
+ u16 ox_id;
+};
+
+union efc_disc_io_param {
+ struct efc_io_els_params els;
+ struct efc_io_ct_params ct;
+};
+
+struct efc_disc_io {
+ struct efc_dma req; /* send buffer */
+ struct efc_dma rsp; /* receive buffer */
+ enum efc_disc_io_type io_type; /* EFC_DISC_IO_TYPE enum*/
+ u16 xmit_len; /* Length of els request*/
+ u16 rsp_len; /* Max length of rsps to be rcvd */
+ u32 rpi; /* Registered RPI */
+ u32 vpi; /* VPI for this nport */
+ u32 s_id;
+ u32 d_id;
+ bool rpi_registered; /* if false, use tmp RPI */
+ union efc_disc_io_param iparam;
+};
+
+/* Return value indiacating the sequence can not be freed */
+#define EFC_HW_SEQ_HOLD 0
+/* Return value indiacating the sequence can be freed */
+#define EFC_HW_SEQ_FREE 1
+
+struct libefc_function_template {
+ /*Sport*/
+ int (*new_nport)(struct efc *efc, struct efc_nport *sp);
+ void (*del_nport)(struct efc *efc, struct efc_nport *sp);
+
+ /*Scsi Node*/
+ int (*scsi_new_node)(struct efc *efc, struct efc_node *n);
+ int (*scsi_del_node)(struct efc *efc, struct efc_node *n, int reason);
+
+ int (*issue_mbox_rqst)(void *efct, void *buf, void *cb, void *arg);
+ /*Send ELS IO*/
+ int (*send_els)(struct efc *efc, struct efc_disc_io *io);
+ /*Send BLS IO*/
+ int (*send_bls)(struct efc *efc, u32 type, struct sli_bls_params *bls);
+ /*Free HW frame*/
+ int (*hw_seq_free)(struct efc *efc, struct efc_hw_sequence *seq);
+};
+
+#define EFC_LOG_LIB 0x01
+#define EFC_LOG_NODE 0x02
+#define EFC_LOG_PORT 0x04
+#define EFC_LOG_DOMAIN 0x08
+#define EFC_LOG_ELS 0x10
+#define EFC_LOG_DOMAIN_SM 0x20
+#define EFC_LOG_SM 0x40
+
+/* efc library port structure */
+struct efc {
+ void *base;
+ struct pci_dev *pci;
+ struct sli4 *sli;
+ u32 fcfi;
+ u64 req_wwpn;
+ u64 req_wwnn;
+
+ u64 def_wwpn;
+ u64 def_wwnn;
+ u64 max_xfer_size;
+ mempool_t *node_pool;
+ struct dma_pool *node_dma_pool;
+ u32 nodes_count;
+
+ u32 link_status;
+
+ struct list_head vport_list;
+ /* lock to protect the vport list */
+ spinlock_t vport_lock;
+
+ struct libefc_function_template tt;
+ /* lock to protect the discovery library.
+ * Refer to efclib.c for more details.
+ */
+ spinlock_t lock;
+
+ bool enable_ini;
+ bool enable_tgt;
+
+ u32 log_level;
+
+ struct efc_domain *domain;
+ void (*domain_free_cb)(struct efc *efc, void *arg);
+ void *domain_free_cb_arg;
+
+ u64 tgt_rscn_delay_msec;
+ u64 tgt_rscn_period_msec;
+
+ bool external_loopback;
+ u32 nodedb_mask;
+ u32 logmask;
+ mempool_t *els_io_pool;
+ atomic_t els_io_alloc_failed_count;
+
+ /* hold pending frames */
+ bool hold_frames;
+ /* lock to protect pending frames list access */
+ spinlock_t pend_frames_lock;
+ struct list_head pend_frames;
+ /* count of pending frames that were processed */
+ u32 pend_frames_processed;
+
+};
+
+/*
+ * EFC library registration
+ * **********************************/
+int efcport_init(struct efc *efc);
+void efcport_destroy(struct efc *efc);
+/*
+ * EFC Domain
+ * **********************************/
+int efc_domain_cb(void *arg, int event, void *data);
+void
+efc_register_domain_free_cb(struct efc *efc,
+ void (*callback)(struct efc *efc, void *arg),
+ void *arg);
+
+/*
+ * EFC nport
+ * **********************************/
+void efc_nport_cb(void *arg, int event, void *data);
+struct efc_vport *
+efc_vport_create_spec(struct efc *efc, u64 wwnn, u64 wwpn, u32 fc_id,
+ bool enable_ini, bool enable_tgt,
+ void *tgt_data, void *ini_data);
+int efc_nport_vport_new(struct efc_domain *domain, u64 wwpn,
+ u64 wwnn, u32 fc_id, bool ini, bool tgt,
+ void *tgt_data, void *ini_data);
+int efc_nport_vport_del(struct efc *efc, struct efc_domain *domain,
+ u64 wwpn, u64 wwnn);
+
+void efc_vport_del_all(struct efc *efc);
+
+/*
+ * EFC Node
+ * **********************************/
+int efc_remote_node_cb(void *arg, int event, void *data);
+void efc_node_fcid_display(u32 fc_id, char *buffer, u32 buf_len);
+void efc_node_post_shutdown(struct efc_node *node, void *arg);
+u64 efc_node_get_wwpn(struct efc_node *node);
+
+/*
+ * EFC FCP/ELS/CT interface
+ * **********************************/
+void efc_dispatch_frame(struct efc *efc, struct efc_hw_sequence *seq);
+void efc_disc_io_complete(struct efc_disc_io *io, u32 len, u32 status,
+ u32 ext_status);
+
+/*
+ * EFC SCSI INTERACTION LAYER
+ * **********************************/
+void efc_scsi_sess_reg_complete(struct efc_node *node, u32 status);
+void efc_scsi_del_initiator_complete(struct efc *efc, struct efc_node *node);
+void efc_scsi_del_target_complete(struct efc *efc, struct efc_node *node);
+void efc_scsi_io_list_empty(struct efc *efc, struct efc_node *node);
+
+#endif /* __EFCLIB_H__ */