summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/elx/efct
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/elx/efct')
-rw-r--r--drivers/scsi/elx/efct/efct_driver.c782
-rw-r--r--drivers/scsi/elx/efct/efct_driver.h108
-rw-r--r--drivers/scsi/elx/efct/efct_hw.c3580
-rw-r--r--drivers/scsi/elx/efct/efct_hw.h764
-rw-r--r--drivers/scsi/elx/efct/efct_hw_queues.c677
-rw-r--r--drivers/scsi/elx/efct/efct_io.c190
-rw-r--r--drivers/scsi/elx/efct/efct_io.h174
-rw-r--r--drivers/scsi/elx/efct/efct_lio.c1695
-rw-r--r--drivers/scsi/elx/efct/efct_lio.h189
-rw-r--r--drivers/scsi/elx/efct/efct_scsi.c1157
-rw-r--r--drivers/scsi/elx/efct/efct_scsi.h203
-rw-r--r--drivers/scsi/elx/efct/efct_unsol.c492
-rw-r--r--drivers/scsi/elx/efct/efct_unsol.h17
-rw-r--r--drivers/scsi/elx/efct/efct_xport.c1111
-rw-r--r--drivers/scsi/elx/efct/efct_xport.h186
15 files changed, 11325 insertions, 0 deletions
diff --git a/drivers/scsi/elx/efct/efct_driver.c b/drivers/scsi/elx/efct/efct_driver.c
new file mode 100644
index 000000000..49fd2cfed
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_driver.c
@@ -0,0 +1,782 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include "efct_driver.h"
+
+#include "efct_hw.h"
+#include "efct_unsol.h"
+#include "efct_scsi.h"
+
+LIST_HEAD(efct_devices);
+
+static int logmask;
+module_param(logmask, int, 0444);
+MODULE_PARM_DESC(logmask, "logging bitmask (default 0)");
+
+static struct libefc_function_template efct_libefc_templ = {
+ .issue_mbox_rqst = efct_issue_mbox_rqst,
+ .send_els = efct_els_hw_srrs_send,
+ .send_bls = efct_efc_bls_send,
+
+ .new_nport = efct_scsi_tgt_new_nport,
+ .del_nport = efct_scsi_tgt_del_nport,
+ .scsi_new_node = efct_scsi_new_initiator,
+ .scsi_del_node = efct_scsi_del_initiator,
+ .hw_seq_free = efct_efc_hw_sequence_free,
+};
+
+static int
+efct_device_init(void)
+{
+ int rc;
+
+ /* driver-wide init for target-server */
+ rc = efct_scsi_tgt_driver_init();
+ if (rc) {
+ pr_err("efct_scsi_tgt_init failed rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = efct_scsi_reg_fc_transport();
+ if (rc) {
+ efct_scsi_tgt_driver_exit();
+ pr_err("failed to register to FC host\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static void
+efct_device_shutdown(void)
+{
+ efct_scsi_release_fc_transport();
+
+ efct_scsi_tgt_driver_exit();
+}
+
+static void *
+efct_device_alloc(u32 nid)
+{
+ struct efct *efct = NULL;
+
+ efct = kzalloc_node(sizeof(*efct), GFP_KERNEL, nid);
+ if (!efct)
+ return efct;
+
+ INIT_LIST_HEAD(&efct->list_entry);
+ list_add_tail(&efct->list_entry, &efct_devices);
+
+ return efct;
+}
+
+static void
+efct_teardown_msix(struct efct *efct)
+{
+ u32 i;
+
+ for (i = 0; i < efct->n_msix_vec; i++) {
+ free_irq(pci_irq_vector(efct->pci, i),
+ &efct->intr_context[i]);
+ }
+
+ pci_free_irq_vectors(efct->pci);
+}
+
+static int
+efct_efclib_config(struct efct *efct, struct libefc_function_template *tt)
+{
+ struct efc *efc;
+ struct sli4 *sli;
+ int rc = 0;
+
+ efc = kzalloc(sizeof(*efc), GFP_KERNEL);
+ if (!efc)
+ return -ENOMEM;
+
+ efct->efcport = efc;
+
+ memcpy(&efc->tt, tt, sizeof(*tt));
+ efc->base = efct;
+ efc->pci = efct->pci;
+
+ efc->def_wwnn = efct_get_wwnn(&efct->hw);
+ efc->def_wwpn = efct_get_wwpn(&efct->hw);
+ efc->enable_tgt = 1;
+ efc->log_level = EFC_LOG_LIB;
+
+ sli = &efct->hw.sli;
+ efc->max_xfer_size = sli->sge_supported_length *
+ sli_get_max_sgl(&efct->hw.sli);
+ efc->sli = sli;
+ efc->fcfi = efct->hw.fcf_indicator;
+
+ rc = efcport_init(efc);
+ if (rc)
+ efc_log_err(efc, "efcport_init failed\n");
+
+ return rc;
+}
+
+static int efct_request_firmware_update(struct efct *efct);
+
+static const char*
+efct_pci_model(u16 device)
+{
+ switch (device) {
+ case EFCT_DEVICE_LANCER_G6: return "LPE31004";
+ case EFCT_DEVICE_LANCER_G7: return "LPE36000";
+ default: return "unknown";
+ }
+}
+
+static int
+efct_device_attach(struct efct *efct)
+{
+ u32 rc = 0, i = 0;
+
+ if (efct->attached) {
+ efc_log_err(efct, "Device is already attached\n");
+ return -EIO;
+ }
+
+ snprintf(efct->name, sizeof(efct->name), "[%s%d] ", "fc",
+ efct->instance_index);
+
+ efct->logmask = logmask;
+ efct->filter_def = EFCT_DEFAULT_FILTER;
+ efct->max_isr_time_msec = EFCT_OS_MAX_ISR_TIME_MSEC;
+
+ efct->model = efct_pci_model(efct->pci->device);
+
+ efct->efct_req_fw_upgrade = true;
+
+ /* Allocate transport object and bring online */
+ efct->xport = efct_xport_alloc(efct);
+ if (!efct->xport) {
+ efc_log_err(efct, "failed to allocate transport object\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rc = efct_xport_attach(efct->xport);
+ if (rc) {
+ efc_log_err(efct, "failed to attach transport object\n");
+ goto xport_out;
+ }
+
+ rc = efct_xport_initialize(efct->xport);
+ if (rc) {
+ efc_log_err(efct, "failed to initialize transport object\n");
+ goto xport_out;
+ }
+
+ rc = efct_efclib_config(efct, &efct_libefc_templ);
+ if (rc) {
+ efc_log_err(efct, "failed to init efclib\n");
+ goto efclib_out;
+ }
+
+ for (i = 0; i < efct->n_msix_vec; i++) {
+ efc_log_debug(efct, "irq %d enabled\n", i);
+ enable_irq(pci_irq_vector(efct->pci, i));
+ }
+
+ efct->attached = true;
+
+ if (efct->efct_req_fw_upgrade)
+ efct_request_firmware_update(efct);
+
+ return rc;
+
+efclib_out:
+ efct_xport_detach(efct->xport);
+xport_out:
+ efct_xport_free(efct->xport);
+ efct->xport = NULL;
+out:
+ return rc;
+}
+
+static int
+efct_device_detach(struct efct *efct)
+{
+ int i;
+
+ if (!efct || !efct->attached) {
+ pr_err("Device is not attached\n");
+ return -EIO;
+ }
+
+ if (efct_xport_control(efct->xport, EFCT_XPORT_SHUTDOWN))
+ efc_log_err(efct, "Transport Shutdown timed out\n");
+
+ for (i = 0; i < efct->n_msix_vec; i++)
+ disable_irq(pci_irq_vector(efct->pci, i));
+
+ efct_xport_detach(efct->xport);
+
+ efct_xport_free(efct->xport);
+ efct->xport = NULL;
+
+ efcport_destroy(efct->efcport);
+ kfree(efct->efcport);
+
+ efct->attached = false;
+
+ return 0;
+}
+
+static void
+efct_fw_write_cb(int status, u32 actual_write_length,
+ u32 change_status, void *arg)
+{
+ struct efct_fw_write_result *result = arg;
+
+ result->status = status;
+ result->actual_xfer = actual_write_length;
+ result->change_status = change_status;
+
+ complete(&result->done);
+}
+
+static int
+efct_firmware_write(struct efct *efct, const u8 *buf, size_t buf_len,
+ u8 *change_status)
+{
+ int rc = 0;
+ u32 bytes_left;
+ u32 xfer_size;
+ u32 offset;
+ struct efc_dma dma;
+ int last = 0;
+ struct efct_fw_write_result result;
+
+ init_completion(&result.done);
+
+ bytes_left = buf_len;
+ offset = 0;
+
+ dma.size = FW_WRITE_BUFSIZE;
+ dma.virt = dma_alloc_coherent(&efct->pci->dev,
+ dma.size, &dma.phys, GFP_KERNEL);
+ if (!dma.virt)
+ return -ENOMEM;
+
+ while (bytes_left > 0) {
+ if (bytes_left > FW_WRITE_BUFSIZE)
+ xfer_size = FW_WRITE_BUFSIZE;
+ else
+ xfer_size = bytes_left;
+
+ memcpy(dma.virt, buf + offset, xfer_size);
+
+ if (bytes_left == xfer_size)
+ last = 1;
+
+ efct_hw_firmware_write(&efct->hw, &dma, xfer_size, offset,
+ last, efct_fw_write_cb, &result);
+
+ if (wait_for_completion_interruptible(&result.done) != 0) {
+ rc = -ENXIO;
+ break;
+ }
+
+ if (result.actual_xfer == 0 || result.status != 0) {
+ rc = -EFAULT;
+ break;
+ }
+
+ if (last)
+ *change_status = result.change_status;
+
+ bytes_left -= result.actual_xfer;
+ offset += result.actual_xfer;
+ }
+
+ dma_free_coherent(&efct->pci->dev, dma.size, dma.virt, dma.phys);
+ return rc;
+}
+
+static int
+efct_fw_reset(struct efct *efct)
+{
+ /*
+ * Firmware reset to activate the new firmware.
+ * Function 0 will update and load the new firmware
+ * during attach.
+ */
+ if (timer_pending(&efct->xport->stats_timer))
+ del_timer(&efct->xport->stats_timer);
+
+ if (efct_hw_reset(&efct->hw, EFCT_HW_RESET_FIRMWARE)) {
+ efc_log_info(efct, "failed to reset firmware\n");
+ return -EIO;
+ }
+
+ efc_log_info(efct, "successfully reset firmware.Now resetting port\n");
+
+ efct_device_detach(efct);
+ return efct_device_attach(efct);
+}
+
+static int
+efct_request_firmware_update(struct efct *efct)
+{
+ int rc = 0;
+ u8 file_name[256], fw_change_status = 0;
+ const struct firmware *fw;
+ struct efct_hw_grp_hdr *fw_image;
+
+ snprintf(file_name, 256, "%s.grp", efct->model);
+
+ rc = request_firmware(&fw, file_name, &efct->pci->dev);
+ if (rc) {
+ efc_log_debug(efct, "Firmware file(%s) not found.\n", file_name);
+ return rc;
+ }
+
+ fw_image = (struct efct_hw_grp_hdr *)fw->data;
+
+ if (!strncmp(efct->hw.sli.fw_name[0], fw_image->revision,
+ strnlen(fw_image->revision, 16))) {
+ efc_log_debug(efct,
+ "Skip update. Firmware is already up to date.\n");
+ goto exit;
+ }
+
+ efc_log_info(efct, "Firmware update is initiated. %s -> %s\n",
+ efct->hw.sli.fw_name[0], fw_image->revision);
+
+ rc = efct_firmware_write(efct, fw->data, fw->size, &fw_change_status);
+ if (rc) {
+ efc_log_err(efct, "Firmware update failed. rc = %d\n", rc);
+ goto exit;
+ }
+
+ efc_log_info(efct, "Firmware updated successfully\n");
+ switch (fw_change_status) {
+ case 0x00:
+ efc_log_info(efct, "New firmware is active.\n");
+ break;
+ case 0x01:
+ efc_log_info(efct,
+ "System reboot needed to activate the new firmware\n");
+ break;
+ case 0x02:
+ case 0x03:
+ efc_log_info(efct,
+ "firmware reset to activate the new firmware\n");
+ efct_fw_reset(efct);
+ break;
+ default:
+ efc_log_info(efct, "Unexpected value change_status:%d\n",
+ fw_change_status);
+ break;
+ }
+
+exit:
+ release_firmware(fw);
+
+ return rc;
+}
+
+static void
+efct_device_free(struct efct *efct)
+{
+ if (efct) {
+ list_del(&efct->list_entry);
+ kfree(efct);
+ }
+}
+
+static int
+efct_device_interrupts_required(struct efct *efct)
+{
+ int rc;
+
+ rc = efct_hw_setup(&efct->hw, efct, efct->pci);
+ if (rc < 0)
+ return rc;
+
+ return efct->hw.config.n_eq;
+}
+
+static irqreturn_t
+efct_intr_thread(int irq, void *handle)
+{
+ struct efct_intr_context *intr_ctx = handle;
+ struct efct *efct = intr_ctx->efct;
+
+ efct_hw_process(&efct->hw, intr_ctx->index, efct->max_isr_time_msec);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+efct_intr_msix(int irq, void *handle)
+{
+ return IRQ_WAKE_THREAD;
+}
+
+static int
+efct_setup_msix(struct efct *efct, u32 num_intrs)
+{
+ int rc = 0, i;
+
+ if (!pci_find_capability(efct->pci, PCI_CAP_ID_MSIX)) {
+ dev_err(&efct->pci->dev,
+ "%s : MSI-X not available\n", __func__);
+ return -EIO;
+ }
+
+ efct->n_msix_vec = num_intrs;
+
+ rc = pci_alloc_irq_vectors(efct->pci, num_intrs, num_intrs,
+ PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
+
+ if (rc < 0) {
+ dev_err(&efct->pci->dev, "Failed to alloc irq : %d\n", rc);
+ return rc;
+ }
+
+ for (i = 0; i < num_intrs; i++) {
+ struct efct_intr_context *intr_ctx = NULL;
+
+ intr_ctx = &efct->intr_context[i];
+ intr_ctx->efct = efct;
+ intr_ctx->index = i;
+
+ rc = request_threaded_irq(pci_irq_vector(efct->pci, i),
+ efct_intr_msix, efct_intr_thread, 0,
+ EFCT_DRIVER_NAME, intr_ctx);
+ if (rc) {
+ dev_err(&efct->pci->dev,
+ "Failed to register %d vector: %d\n", i, rc);
+ goto out;
+ }
+ }
+
+ return rc;
+
+out:
+ while (--i >= 0)
+ free_irq(pci_irq_vector(efct->pci, i),
+ &efct->intr_context[i]);
+
+ pci_free_irq_vectors(efct->pci);
+ return rc;
+}
+
+static struct pci_device_id efct_pci_table[] = {
+ {PCI_DEVICE(EFCT_VENDOR_ID, EFCT_DEVICE_LANCER_G6), 0},
+ {PCI_DEVICE(EFCT_VENDOR_ID, EFCT_DEVICE_LANCER_G7), 0},
+ {} /* terminate list */
+};
+
+static int
+efct_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct efct *efct = NULL;
+ int rc;
+ u32 i, r;
+ int num_interrupts = 0;
+ int nid;
+
+ dev_info(&pdev->dev, "%s\n", EFCT_DRIVER_NAME);
+
+ rc = pci_enable_device_mem(pdev);
+ if (rc)
+ return rc;
+
+ pci_set_master(pdev);
+
+ rc = pci_set_mwi(pdev);
+ if (rc) {
+ dev_info(&pdev->dev, "pci_set_mwi returned %d\n", rc);
+ goto mwi_out;
+ }
+
+ rc = pci_request_regions(pdev, EFCT_DRIVER_NAME);
+ if (rc) {
+ dev_err(&pdev->dev, "pci_request_regions failed %d\n", rc);
+ goto req_regions_out;
+ }
+
+ /* Fetch the Numa node id for this device */
+ nid = dev_to_node(&pdev->dev);
+ if (nid < 0) {
+ dev_err(&pdev->dev, "Warning Numa node ID is %d\n", nid);
+ nid = 0;
+ }
+
+ /* Allocate efct */
+ efct = efct_device_alloc(nid);
+ if (!efct) {
+ dev_err(&pdev->dev, "Failed to allocate efct\n");
+ rc = -ENOMEM;
+ goto alloc_out;
+ }
+
+ efct->pci = pdev;
+ efct->numa_node = nid;
+
+ /* Map all memory BARs */
+ for (i = 0, r = 0; i < EFCT_PCI_MAX_REGS; i++) {
+ if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
+ efct->reg[r] = ioremap(pci_resource_start(pdev, i),
+ pci_resource_len(pdev, i));
+ r++;
+ }
+
+ /*
+ * If the 64-bit attribute is set, both this BAR and the
+ * next form the complete address. Skip processing the
+ * next BAR.
+ */
+ if (pci_resource_flags(pdev, i) & IORESOURCE_MEM_64)
+ i++;
+ }
+
+ pci_set_drvdata(pdev, efct);
+
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rc) {
+ dev_err(&pdev->dev, "setting DMA_BIT_MASK failed\n");
+ goto dma_mask_out;
+ }
+
+ num_interrupts = efct_device_interrupts_required(efct);
+ if (num_interrupts < 0) {
+ efc_log_err(efct, "efct_device_interrupts_required failed\n");
+ rc = -1;
+ goto dma_mask_out;
+ }
+
+ /*
+ * Initialize MSIX interrupts, note,
+ * efct_setup_msix() enables the interrupt
+ */
+ rc = efct_setup_msix(efct, num_interrupts);
+ if (rc) {
+ dev_err(&pdev->dev, "Can't setup msix\n");
+ goto dma_mask_out;
+ }
+ /* Disable interrupt for now */
+ for (i = 0; i < efct->n_msix_vec; i++) {
+ efc_log_debug(efct, "irq %d disabled\n", i);
+ disable_irq(pci_irq_vector(efct->pci, i));
+ }
+
+ rc = efct_device_attach(efct);
+ if (rc)
+ goto attach_out;
+
+ return 0;
+
+attach_out:
+ efct_teardown_msix(efct);
+dma_mask_out:
+ pci_set_drvdata(pdev, NULL);
+
+ for (i = 0; i < EFCT_PCI_MAX_REGS; i++) {
+ if (efct->reg[i])
+ iounmap(efct->reg[i]);
+ }
+ efct_device_free(efct);
+alloc_out:
+ pci_release_regions(pdev);
+req_regions_out:
+ pci_clear_mwi(pdev);
+mwi_out:
+ pci_disable_device(pdev);
+ return rc;
+}
+
+static void
+efct_pci_remove(struct pci_dev *pdev)
+{
+ struct efct *efct = pci_get_drvdata(pdev);
+ u32 i;
+
+ if (!efct)
+ return;
+
+ efct_device_detach(efct);
+
+ efct_teardown_msix(efct);
+
+ for (i = 0; i < EFCT_PCI_MAX_REGS; i++) {
+ if (efct->reg[i])
+ iounmap(efct->reg[i]);
+ }
+
+ pci_set_drvdata(pdev, NULL);
+
+ efct_device_free(efct);
+
+ pci_release_regions(pdev);
+
+ pci_disable_device(pdev);
+}
+
+static void
+efct_device_prep_for_reset(struct efct *efct, struct pci_dev *pdev)
+{
+ if (efct) {
+ efc_log_debug(efct,
+ "PCI channel disable preparing for reset\n");
+ efct_device_detach(efct);
+ /* Disable interrupt and pci device */
+ efct_teardown_msix(efct);
+ }
+ pci_disable_device(pdev);
+}
+
+static void
+efct_device_prep_for_recover(struct efct *efct)
+{
+ if (efct) {
+ efc_log_debug(efct, "PCI channel preparing for recovery\n");
+ efct_hw_io_abort_all(&efct->hw);
+ }
+}
+
+/**
+ * efct_pci_io_error_detected - method for handling PCI I/O error
+ * @pdev: pointer to PCI device.
+ * @state: the current PCI connection state.
+ *
+ * This routine is registered to the PCI subsystem for error handling. This
+ * function is called by the PCI subsystem after a PCI bus error affecting
+ * this device has been detected. When this routine is invoked, it dispatches
+ * device error detected handling routine, which will perform the proper
+ * error detected operation.
+ *
+ * Return codes
+ * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
+ * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ */
+static pci_ers_result_t
+efct_pci_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct efct *efct = pci_get_drvdata(pdev);
+ pci_ers_result_t rc;
+
+ switch (state) {
+ case pci_channel_io_normal:
+ efct_device_prep_for_recover(efct);
+ rc = PCI_ERS_RESULT_CAN_RECOVER;
+ break;
+ case pci_channel_io_frozen:
+ efct_device_prep_for_reset(efct, pdev);
+ rc = PCI_ERS_RESULT_NEED_RESET;
+ break;
+ case pci_channel_io_perm_failure:
+ efct_device_detach(efct);
+ rc = PCI_ERS_RESULT_DISCONNECT;
+ break;
+ default:
+ efc_log_debug(efct, "Unknown PCI error state:0x%x\n", state);
+ efct_device_prep_for_reset(efct, pdev);
+ rc = PCI_ERS_RESULT_NEED_RESET;
+ break;
+ }
+
+ return rc;
+}
+
+static pci_ers_result_t
+efct_pci_io_slot_reset(struct pci_dev *pdev)
+{
+ int rc;
+ struct efct *efct = pci_get_drvdata(pdev);
+
+ rc = pci_enable_device_mem(pdev);
+ if (rc) {
+ efc_log_err(efct, "failed to enable PCI device after reset\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ /*
+ * As the new kernel behavior of pci_restore_state() API call clears
+ * device saved_state flag, need to save the restored state again.
+ */
+
+ pci_save_state(pdev);
+
+ pci_set_master(pdev);
+
+ rc = efct_setup_msix(efct, efct->n_msix_vec);
+ if (rc)
+ efc_log_err(efct, "rc %d returned, IRQ allocation failed\n",
+ rc);
+
+ /* Perform device reset */
+ efct_device_detach(efct);
+ /* Bring device to online*/
+ efct_device_attach(efct);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void
+efct_pci_io_resume(struct pci_dev *pdev)
+{
+ struct efct *efct = pci_get_drvdata(pdev);
+
+ /* Perform device reset */
+ efct_device_detach(efct);
+ /* Bring device to online*/
+ efct_device_attach(efct);
+}
+
+MODULE_DEVICE_TABLE(pci, efct_pci_table);
+
+static struct pci_error_handlers efct_pci_err_handler = {
+ .error_detected = efct_pci_io_error_detected,
+ .slot_reset = efct_pci_io_slot_reset,
+ .resume = efct_pci_io_resume,
+};
+
+static struct pci_driver efct_pci_driver = {
+ .name = EFCT_DRIVER_NAME,
+ .id_table = efct_pci_table,
+ .probe = efct_pci_probe,
+ .remove = efct_pci_remove,
+ .err_handler = &efct_pci_err_handler,
+};
+
+static
+int __init efct_init(void)
+{
+ int rc;
+
+ rc = efct_device_init();
+ if (rc) {
+ pr_err("efct_device_init failed rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = pci_register_driver(&efct_pci_driver);
+ if (rc) {
+ pr_err("pci_register_driver failed rc=%d\n", rc);
+ efct_device_shutdown();
+ }
+
+ return rc;
+}
+
+static void __exit efct_exit(void)
+{
+ pci_unregister_driver(&efct_pci_driver);
+ efct_device_shutdown();
+}
+
+module_init(efct_init);
+module_exit(efct_exit);
+MODULE_VERSION(EFCT_DRIVER_VERSION);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Broadcom");
diff --git a/drivers/scsi/elx/efct/efct_driver.h b/drivers/scsi/elx/efct/efct_driver.h
new file mode 100644
index 000000000..0e3c931db
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_driver.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#if !defined(__EFCT_DRIVER_H__)
+#define __EFCT_DRIVER_H__
+
+/***************************************************************************
+ * OS specific includes
+ */
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/firmware.h>
+#include "../include/efc_common.h"
+#include "../libefc/efclib.h"
+#include "efct_hw.h"
+#include "efct_io.h"
+#include "efct_xport.h"
+
+#define EFCT_DRIVER_NAME "efct"
+#define EFCT_DRIVER_VERSION "1.0.0.0"
+
+/* EFCT_DEFAULT_FILTER-
+ * MRQ filter to segregate the IO flow.
+ */
+#define EFCT_DEFAULT_FILTER "0x01ff22ff,0,0,0"
+
+/* EFCT_OS_MAX_ISR_TIME_MSEC -
+ * maximum time driver code should spend in an interrupt
+ * or kernel thread context without yielding
+ */
+#define EFCT_OS_MAX_ISR_TIME_MSEC 1000
+
+#define EFCT_FC_MAX_SGL 64
+#define EFCT_FC_DIF_SEED 0
+
+/* Watermark */
+#define EFCT_WATERMARK_HIGH_PCT 90
+#define EFCT_WATERMARK_LOW_PCT 80
+#define EFCT_IO_WATERMARK_PER_INITIATOR 8
+
+#define EFCT_PCI_MAX_REGS 6
+#define MAX_PCI_INTERRUPTS 16
+
+struct efct_intr_context {
+ struct efct *efct;
+ u32 index;
+};
+
+struct efct {
+ struct pci_dev *pci;
+ void __iomem *reg[EFCT_PCI_MAX_REGS];
+
+ u32 n_msix_vec;
+ bool attached;
+ bool soft_wwn_enable;
+ u8 efct_req_fw_upgrade;
+ struct efct_intr_context intr_context[MAX_PCI_INTERRUPTS];
+ u32 numa_node;
+
+ char name[EFC_NAME_LENGTH];
+ u32 instance_index;
+ struct list_head list_entry;
+ struct efct_scsi_tgt tgt_efct;
+ struct efct_xport *xport;
+ struct efc *efcport;
+ struct Scsi_Host *shost;
+ int logmask;
+ u32 max_isr_time_msec;
+
+ const char *desc;
+
+ const char *model;
+
+ struct efct_hw hw;
+
+ u32 rq_selection_policy;
+ char *filter_def;
+ int topology;
+
+ /* Look up for target node */
+ struct xarray lookup;
+
+ /*
+ * Target IO timer value:
+ * Zero: target command timeout disabled.
+ * Non-zero: Timeout value, in seconds, for target commands
+ */
+ u32 target_io_timer_sec;
+
+ int speed;
+ struct dentry *sess_debugfs_dir;
+};
+
+#define FW_WRITE_BUFSIZE (64 * 1024)
+
+struct efct_fw_write_result {
+ struct completion done;
+ int status;
+ u32 actual_xfer;
+ u32 change_status;
+};
+
+extern struct list_head efct_devices;
+
+#endif /* __EFCT_DRIVER_H__ */
diff --git a/drivers/scsi/elx/efct/efct_hw.c b/drivers/scsi/elx/efct/efct_hw.c
new file mode 100644
index 000000000..5a5525054
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_hw.c
@@ -0,0 +1,3580 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include "efct_driver.h"
+#include "efct_hw.h"
+#include "efct_unsol.h"
+
+struct efct_hw_link_stat_cb_arg {
+ void (*cb)(int status, u32 num_counters,
+ struct efct_hw_link_stat_counts *counters, void *arg);
+ void *arg;
+};
+
+struct efct_hw_host_stat_cb_arg {
+ void (*cb)(int status, u32 num_counters,
+ struct efct_hw_host_stat_counts *counters, void *arg);
+ void *arg;
+};
+
+struct efct_hw_fw_wr_cb_arg {
+ void (*cb)(int status, u32 bytes_written, u32 change_status, void *arg);
+ void *arg;
+};
+
+struct efct_mbox_rqst_ctx {
+ int (*callback)(struct efc *efc, int status, u8 *mqe, void *arg);
+ void *arg;
+};
+
+static int
+efct_hw_link_event_init(struct efct_hw *hw)
+{
+ hw->link.status = SLI4_LINK_STATUS_MAX;
+ hw->link.topology = SLI4_LINK_TOPO_NONE;
+ hw->link.medium = SLI4_LINK_MEDIUM_MAX;
+ hw->link.speed = 0;
+ hw->link.loop_map = NULL;
+ hw->link.fc_id = U32_MAX;
+
+ return 0;
+}
+
+static int
+efct_hw_read_max_dump_size(struct efct_hw *hw)
+{
+ u8 buf[SLI4_BMBX_SIZE];
+ struct efct *efct = hw->os;
+ int rc = 0;
+ struct sli4_rsp_cmn_set_dump_location *rsp;
+
+ /* attempt to detemine the dump size for function 0 only. */
+ if (PCI_FUNC(efct->pci->devfn) != 0)
+ return rc;
+
+ if (sli_cmd_common_set_dump_location(&hw->sli, buf, 1, 0, NULL, 0))
+ return -EIO;
+
+ rsp = (struct sli4_rsp_cmn_set_dump_location *)
+ (buf + offsetof(struct sli4_cmd_sli_config, payload.embed));
+
+ rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
+ if (rc != 0) {
+ efc_log_debug(hw->os, "set dump location cmd failed\n");
+ return rc;
+ }
+
+ hw->dump_size =
+ le32_to_cpu(rsp->buffer_length_dword) & SLI4_CMN_SET_DUMP_BUFFER_LEN;
+
+ efc_log_debug(hw->os, "Dump size %x\n", hw->dump_size);
+
+ return rc;
+}
+
+static int
+__efct_read_topology_cb(struct efct_hw *hw, int status, u8 *mqe, void *arg)
+{
+ struct sli4_cmd_read_topology *read_topo =
+ (struct sli4_cmd_read_topology *)mqe;
+ u8 speed;
+ struct efc_domain_record drec = {0};
+ struct efct *efct = hw->os;
+
+ if (status || le16_to_cpu(read_topo->hdr.status)) {
+ efc_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status,
+ le16_to_cpu(read_topo->hdr.status));
+ return -EIO;
+ }
+
+ switch (le32_to_cpu(read_topo->dw2_attentype) &
+ SLI4_READTOPO_ATTEN_TYPE) {
+ case SLI4_READ_TOPOLOGY_LINK_UP:
+ hw->link.status = SLI4_LINK_STATUS_UP;
+ break;
+ case SLI4_READ_TOPOLOGY_LINK_DOWN:
+ hw->link.status = SLI4_LINK_STATUS_DOWN;
+ break;
+ case SLI4_READ_TOPOLOGY_LINK_NO_ALPA:
+ hw->link.status = SLI4_LINK_STATUS_NO_ALPA;
+ break;
+ default:
+ hw->link.status = SLI4_LINK_STATUS_MAX;
+ break;
+ }
+
+ switch (read_topo->topology) {
+ case SLI4_READ_TOPO_NON_FC_AL:
+ hw->link.topology = SLI4_LINK_TOPO_NON_FC_AL;
+ break;
+ case SLI4_READ_TOPO_FC_AL:
+ hw->link.topology = SLI4_LINK_TOPO_FC_AL;
+ if (hw->link.status == SLI4_LINK_STATUS_UP)
+ hw->link.loop_map = hw->loop_map.virt;
+ hw->link.fc_id = read_topo->acquired_al_pa;
+ break;
+ default:
+ hw->link.topology = SLI4_LINK_TOPO_MAX;
+ break;
+ }
+
+ hw->link.medium = SLI4_LINK_MEDIUM_FC;
+
+ speed = (le32_to_cpu(read_topo->currlink_state) &
+ SLI4_READTOPO_LINKSTATE_SPEED) >> 8;
+ switch (speed) {
+ case SLI4_READ_TOPOLOGY_SPEED_1G:
+ hw->link.speed = 1 * 1000;
+ break;
+ case SLI4_READ_TOPOLOGY_SPEED_2G:
+ hw->link.speed = 2 * 1000;
+ break;
+ case SLI4_READ_TOPOLOGY_SPEED_4G:
+ hw->link.speed = 4 * 1000;
+ break;
+ case SLI4_READ_TOPOLOGY_SPEED_8G:
+ hw->link.speed = 8 * 1000;
+ break;
+ case SLI4_READ_TOPOLOGY_SPEED_16G:
+ hw->link.speed = 16 * 1000;
+ break;
+ case SLI4_READ_TOPOLOGY_SPEED_32G:
+ hw->link.speed = 32 * 1000;
+ break;
+ case SLI4_READ_TOPOLOGY_SPEED_64G:
+ hw->link.speed = 64 * 1000;
+ break;
+ case SLI4_READ_TOPOLOGY_SPEED_128G:
+ hw->link.speed = 128 * 1000;
+ break;
+ }
+
+ drec.speed = hw->link.speed;
+ drec.fc_id = hw->link.fc_id;
+ drec.is_nport = true;
+ efc_domain_cb(efct->efcport, EFC_HW_DOMAIN_FOUND, &drec);
+
+ return 0;
+}
+
+static int
+efct_hw_cb_link(void *ctx, void *e)
+{
+ struct efct_hw *hw = ctx;
+ struct sli4_link_event *event = e;
+ struct efc_domain *d = NULL;
+ int rc = 0;
+ struct efct *efct = hw->os;
+
+ efct_hw_link_event_init(hw);
+
+ switch (event->status) {
+ case SLI4_LINK_STATUS_UP:
+
+ hw->link = *event;
+ efct->efcport->link_status = EFC_LINK_STATUS_UP;
+
+ if (event->topology == SLI4_LINK_TOPO_NON_FC_AL) {
+ struct efc_domain_record drec = {0};
+
+ efc_log_info(hw->os, "Link Up, NPORT, speed is %d\n",
+ event->speed);
+ drec.speed = event->speed;
+ drec.fc_id = event->fc_id;
+ drec.is_nport = true;
+ efc_domain_cb(efct->efcport, EFC_HW_DOMAIN_FOUND,
+ &drec);
+ } else if (event->topology == SLI4_LINK_TOPO_FC_AL) {
+ u8 buf[SLI4_BMBX_SIZE];
+
+ efc_log_info(hw->os, "Link Up, LOOP, speed is %d\n",
+ event->speed);
+
+ if (!sli_cmd_read_topology(&hw->sli, buf,
+ &hw->loop_map)) {
+ rc = efct_hw_command(hw, buf, EFCT_CMD_NOWAIT,
+ __efct_read_topology_cb, NULL);
+ }
+
+ if (rc)
+ efc_log_debug(hw->os, "READ_TOPOLOGY failed\n");
+ } else {
+ efc_log_info(hw->os, "%s(%#x), speed is %d\n",
+ "Link Up, unsupported topology ",
+ event->topology, event->speed);
+ }
+ break;
+ case SLI4_LINK_STATUS_DOWN:
+ efc_log_info(hw->os, "Link down\n");
+
+ hw->link.status = event->status;
+ efct->efcport->link_status = EFC_LINK_STATUS_DOWN;
+
+ d = efct->efcport->domain;
+ if (d)
+ efc_domain_cb(efct->efcport, EFC_HW_DOMAIN_LOST, d);
+ break;
+ default:
+ efc_log_debug(hw->os, "unhandled link status %#x\n",
+ event->status);
+ break;
+ }
+
+ return 0;
+}
+
+int
+efct_hw_setup(struct efct_hw *hw, void *os, struct pci_dev *pdev)
+{
+ u32 i, max_sgl, cpus;
+
+ if (hw->hw_setup_called)
+ return 0;
+
+ /*
+ * efct_hw_init() relies on NULL pointers indicating that a structure
+ * needs allocation. If a structure is non-NULL, efct_hw_init() won't
+ * free/realloc that memory
+ */
+ memset(hw, 0, sizeof(struct efct_hw));
+
+ hw->hw_setup_called = true;
+
+ hw->os = os;
+
+ mutex_init(&hw->bmbx_lock);
+ spin_lock_init(&hw->cmd_lock);
+ INIT_LIST_HEAD(&hw->cmd_head);
+ INIT_LIST_HEAD(&hw->cmd_pending);
+ hw->cmd_head_count = 0;
+
+ /* Create mailbox command ctx pool */
+ hw->cmd_ctx_pool = mempool_create_kmalloc_pool(EFCT_CMD_CTX_POOL_SZ,
+ sizeof(struct efct_command_ctx));
+ if (!hw->cmd_ctx_pool) {
+ efc_log_err(hw->os, "failed to allocate mailbox buffer pool\n");
+ return -EIO;
+ }
+
+ /* Create mailbox request ctx pool for library callback */
+ hw->mbox_rqst_pool = mempool_create_kmalloc_pool(EFCT_CMD_CTX_POOL_SZ,
+ sizeof(struct efct_mbox_rqst_ctx));
+ if (!hw->mbox_rqst_pool) {
+ efc_log_err(hw->os, "failed to allocate mbox request pool\n");
+ return -EIO;
+ }
+
+ spin_lock_init(&hw->io_lock);
+ INIT_LIST_HEAD(&hw->io_inuse);
+ INIT_LIST_HEAD(&hw->io_free);
+ INIT_LIST_HEAD(&hw->io_wait_free);
+
+ atomic_set(&hw->io_alloc_failed_count, 0);
+
+ hw->config.speed = SLI4_LINK_SPEED_AUTO_16_8_4;
+ if (sli_setup(&hw->sli, hw->os, pdev, ((struct efct *)os)->reg)) {
+ efc_log_err(hw->os, "SLI setup failed\n");
+ return -EIO;
+ }
+
+ efct_hw_link_event_init(hw);
+
+ sli_callback(&hw->sli, SLI4_CB_LINK, efct_hw_cb_link, hw);
+
+ /*
+ * Set all the queue sizes to the maximum allowed.
+ */
+ for (i = 0; i < ARRAY_SIZE(hw->num_qentries); i++)
+ hw->num_qentries[i] = hw->sli.qinfo.max_qentries[i];
+ /*
+ * Adjust the size of the WQs so that the CQ is twice as big as
+ * the WQ to allow for 2 completions per IO. This allows us to
+ * handle multi-phase as well as aborts.
+ */
+ hw->num_qentries[SLI4_QTYPE_WQ] = hw->num_qentries[SLI4_QTYPE_CQ] / 2;
+
+ /*
+ * The RQ assignment for RQ pair mode.
+ */
+
+ hw->config.rq_default_buffer_size = EFCT_HW_RQ_SIZE_PAYLOAD;
+ hw->config.n_io = hw->sli.ext[SLI4_RSRC_XRI].size;
+
+ cpus = num_possible_cpus();
+ hw->config.n_eq = cpus > EFCT_HW_MAX_NUM_EQ ? EFCT_HW_MAX_NUM_EQ : cpus;
+
+ max_sgl = sli_get_max_sgl(&hw->sli) - SLI4_SGE_MAX_RESERVED;
+ max_sgl = (max_sgl > EFCT_FC_MAX_SGL) ? EFCT_FC_MAX_SGL : max_sgl;
+ hw->config.n_sgl = max_sgl;
+
+ (void)efct_hw_read_max_dump_size(hw);
+
+ return 0;
+}
+
+static void
+efct_logfcfi(struct efct_hw *hw, u32 j, u32 i, u32 id)
+{
+ efc_log_info(hw->os,
+ "REG_FCFI: filter[%d] %08X -> RQ[%d] id=%d\n",
+ j, hw->config.filter_def[j], i, id);
+}
+
+static inline void
+efct_hw_init_free_io(struct efct_hw_io *io)
+{
+ /*
+ * Set io->done to NULL, to avoid any callbacks, should
+ * a completion be received for one of these IOs
+ */
+ io->done = NULL;
+ io->abort_done = NULL;
+ io->status_saved = false;
+ io->abort_in_progress = false;
+ io->type = 0xFFFF;
+ io->wq = NULL;
+}
+
+static bool efct_hw_iotype_is_originator(u16 io_type)
+{
+ switch (io_type) {
+ case EFCT_HW_FC_CT:
+ case EFCT_HW_ELS_REQ:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void
+efct_hw_io_restore_sgl(struct efct_hw *hw, struct efct_hw_io *io)
+{
+ /* Restore the default */
+ io->sgl = &io->def_sgl;
+ io->sgl_count = io->def_sgl_count;
+}
+
+static void
+efct_hw_wq_process_io(void *arg, u8 *cqe, int status)
+{
+ struct efct_hw_io *io = arg;
+ struct efct_hw *hw = io->hw;
+ struct sli4_fc_wcqe *wcqe = (void *)cqe;
+ u32 len = 0;
+ u32 ext = 0;
+
+ /* clear xbusy flag if WCQE[XB] is clear */
+ if (io->xbusy && (wcqe->flags & SLI4_WCQE_XB) == 0)
+ io->xbusy = false;
+
+ /* get extended CQE status */
+ switch (io->type) {
+ case EFCT_HW_BLS_ACC:
+ case EFCT_HW_BLS_RJT:
+ break;
+ case EFCT_HW_ELS_REQ:
+ sli_fc_els_did(&hw->sli, cqe, &ext);
+ len = sli_fc_response_length(&hw->sli, cqe);
+ break;
+ case EFCT_HW_ELS_RSP:
+ case EFCT_HW_FC_CT_RSP:
+ break;
+ case EFCT_HW_FC_CT:
+ len = sli_fc_response_length(&hw->sli, cqe);
+ break;
+ case EFCT_HW_IO_TARGET_WRITE:
+ len = sli_fc_io_length(&hw->sli, cqe);
+ break;
+ case EFCT_HW_IO_TARGET_READ:
+ len = sli_fc_io_length(&hw->sli, cqe);
+ break;
+ case EFCT_HW_IO_TARGET_RSP:
+ break;
+ case EFCT_HW_IO_DNRX_REQUEUE:
+ /* release the count for re-posting the buffer */
+ /* efct_hw_io_free(hw, io); */
+ break;
+ default:
+ efc_log_err(hw->os, "unhandled io type %#x for XRI 0x%x\n",
+ io->type, io->indicator);
+ break;
+ }
+ if (status) {
+ ext = sli_fc_ext_status(&hw->sli, cqe);
+ /*
+ * If we're not an originator IO, and XB is set, then issue
+ * abort for the IO from within the HW
+ */
+ if (efct_hw_iotype_is_originator(io->type) &&
+ wcqe->flags & SLI4_WCQE_XB) {
+ int rc;
+
+ efc_log_debug(hw->os, "aborting xri=%#x tag=%#x\n",
+ io->indicator, io->reqtag);
+
+ /*
+ * Because targets may send a response when the IO
+ * completes using the same XRI, we must wait for the
+ * XRI_ABORTED CQE to issue the IO callback
+ */
+ rc = efct_hw_io_abort(hw, io, false, NULL, NULL);
+ if (rc == 0) {
+ /*
+ * latch status to return after abort is
+ * complete
+ */
+ io->status_saved = true;
+ io->saved_status = status;
+ io->saved_ext = ext;
+ io->saved_len = len;
+ goto exit_efct_hw_wq_process_io;
+ } else if (rc == -EINPROGRESS) {
+ /*
+ * Already being aborted by someone else (ABTS
+ * perhaps). Just return original
+ * error.
+ */
+ efc_log_debug(hw->os, "%s%#x tag=%#x\n",
+ "abort in progress xri=",
+ io->indicator, io->reqtag);
+
+ } else {
+ /* Failed to abort for some other reason, log
+ * error
+ */
+ efc_log_debug(hw->os, "%s%#x tag=%#x rc=%d\n",
+ "Failed to abort xri=",
+ io->indicator, io->reqtag, rc);
+ }
+ }
+ }
+
+ if (io->done) {
+ efct_hw_done_t done = io->done;
+
+ io->done = NULL;
+
+ if (io->status_saved) {
+ /* use latched status if exists */
+ status = io->saved_status;
+ len = io->saved_len;
+ ext = io->saved_ext;
+ io->status_saved = false;
+ }
+
+ /* Restore default SGL */
+ efct_hw_io_restore_sgl(hw, io);
+ done(io, len, status, ext, io->arg);
+ }
+
+exit_efct_hw_wq_process_io:
+ return;
+}
+
+static int
+efct_hw_setup_io(struct efct_hw *hw)
+{
+ u32 i = 0;
+ struct efct_hw_io *io = NULL;
+ uintptr_t xfer_virt = 0;
+ uintptr_t xfer_phys = 0;
+ u32 index;
+ bool new_alloc = true;
+ struct efc_dma *dma;
+ struct efct *efct = hw->os;
+
+ if (!hw->io) {
+ hw->io = kmalloc_array(hw->config.n_io, sizeof(io), GFP_KERNEL);
+ if (!hw->io)
+ return -ENOMEM;
+
+ memset(hw->io, 0, hw->config.n_io * sizeof(io));
+
+ for (i = 0; i < hw->config.n_io; i++) {
+ hw->io[i] = kzalloc(sizeof(*io), GFP_KERNEL);
+ if (!hw->io[i])
+ goto error;
+ }
+
+ /* Create WQE buffs for IO */
+ hw->wqe_buffs = kzalloc((hw->config.n_io * hw->sli.wqe_size),
+ GFP_KERNEL);
+ if (!hw->wqe_buffs) {
+ kfree(hw->io);
+ return -ENOMEM;
+ }
+
+ } else {
+ /* re-use existing IOs, including SGLs */
+ new_alloc = false;
+ }
+
+ if (new_alloc) {
+ dma = &hw->xfer_rdy;
+ dma->size = sizeof(struct fcp_txrdy) * hw->config.n_io;
+ dma->virt = dma_alloc_coherent(&efct->pci->dev,
+ dma->size, &dma->phys, GFP_KERNEL);
+ if (!dma->virt)
+ return -ENOMEM;
+ }
+ xfer_virt = (uintptr_t)hw->xfer_rdy.virt;
+ xfer_phys = hw->xfer_rdy.phys;
+
+ /* Initialize the pool of HW IO objects */
+ for (i = 0; i < hw->config.n_io; i++) {
+ struct hw_wq_callback *wqcb;
+
+ io = hw->io[i];
+
+ /* initialize IO fields */
+ io->hw = hw;
+
+ /* Assign a WQE buff */
+ io->wqe.wqebuf = &hw->wqe_buffs[i * hw->sli.wqe_size];
+
+ /* Allocate the request tag for this IO */
+ wqcb = efct_hw_reqtag_alloc(hw, efct_hw_wq_process_io, io);
+ if (!wqcb) {
+ efc_log_err(hw->os, "can't allocate request tag\n");
+ return -ENOSPC;
+ }
+ io->reqtag = wqcb->instance_index;
+
+ /* Now for the fields that are initialized on each free */
+ efct_hw_init_free_io(io);
+
+ /* The XB flag isn't cleared on IO free, so init to zero */
+ io->xbusy = 0;
+
+ if (sli_resource_alloc(&hw->sli, SLI4_RSRC_XRI,
+ &io->indicator, &index)) {
+ efc_log_err(hw->os,
+ "sli_resource_alloc failed @ %d\n", i);
+ return -ENOMEM;
+ }
+
+ if (new_alloc) {
+ dma = &io->def_sgl;
+ dma->size = hw->config.n_sgl *
+ sizeof(struct sli4_sge);
+ dma->virt = dma_alloc_coherent(&efct->pci->dev,
+ dma->size, &dma->phys,
+ GFP_KERNEL);
+ if (!dma->virt) {
+ efc_log_err(hw->os, "dma_alloc fail %d\n", i);
+ memset(&io->def_sgl, 0,
+ sizeof(struct efc_dma));
+ return -ENOMEM;
+ }
+ }
+ io->def_sgl_count = hw->config.n_sgl;
+ io->sgl = &io->def_sgl;
+ io->sgl_count = io->def_sgl_count;
+
+ if (hw->xfer_rdy.size) {
+ io->xfer_rdy.virt = (void *)xfer_virt;
+ io->xfer_rdy.phys = xfer_phys;
+ io->xfer_rdy.size = sizeof(struct fcp_txrdy);
+
+ xfer_virt += sizeof(struct fcp_txrdy);
+ xfer_phys += sizeof(struct fcp_txrdy);
+ }
+ }
+
+ return 0;
+error:
+ for (i = 0; i < hw->config.n_io && hw->io[i]; i++) {
+ kfree(hw->io[i]);
+ hw->io[i] = NULL;
+ }
+
+ kfree(hw->io);
+ hw->io = NULL;
+
+ return -ENOMEM;
+}
+
+static int
+efct_hw_init_prereg_io(struct efct_hw *hw)
+{
+ u32 i, idx = 0;
+ struct efct_hw_io *io = NULL;
+ u8 cmd[SLI4_BMBX_SIZE];
+ int rc = 0;
+ u32 n_rem;
+ u32 n = 0;
+ u32 sgls_per_request = 256;
+ struct efc_dma **sgls = NULL;
+ struct efc_dma req;
+ struct efct *efct = hw->os;
+
+ sgls = kmalloc_array(sgls_per_request, sizeof(*sgls), GFP_KERNEL);
+ if (!sgls)
+ return -ENOMEM;
+
+ memset(&req, 0, sizeof(struct efc_dma));
+ req.size = 32 + sgls_per_request * 16;
+ req.virt = dma_alloc_coherent(&efct->pci->dev, req.size, &req.phys,
+ GFP_KERNEL);
+ if (!req.virt) {
+ kfree(sgls);
+ return -ENOMEM;
+ }
+
+ for (n_rem = hw->config.n_io; n_rem; n_rem -= n) {
+ /* Copy address of SGL's into local sgls[] array, break
+ * out if the xri is not contiguous.
+ */
+ u32 min = (sgls_per_request < n_rem) ? sgls_per_request : n_rem;
+
+ for (n = 0; n < min; n++) {
+ /* Check that we have contiguous xri values */
+ if (n > 0) {
+ if (hw->io[idx + n]->indicator !=
+ hw->io[idx + n - 1]->indicator + 1)
+ break;
+ }
+
+ sgls[n] = hw->io[idx + n]->sgl;
+ }
+
+ if (sli_cmd_post_sgl_pages(&hw->sli, cmd,
+ hw->io[idx]->indicator, n, sgls, NULL, &req)) {
+ rc = -EIO;
+ break;
+ }
+
+ rc = efct_hw_command(hw, cmd, EFCT_CMD_POLL, NULL, NULL);
+ if (rc) {
+ efc_log_err(hw->os, "SGL post failed, rc=%d\n", rc);
+ break;
+ }
+
+ /* Add to tail if successful */
+ for (i = 0; i < n; i++, idx++) {
+ io = hw->io[idx];
+ io->state = EFCT_HW_IO_STATE_FREE;
+ INIT_LIST_HEAD(&io->list_entry);
+ list_add_tail(&io->list_entry, &hw->io_free);
+ }
+ }
+
+ dma_free_coherent(&efct->pci->dev, req.size, req.virt, req.phys);
+ memset(&req, 0, sizeof(struct efc_dma));
+ kfree(sgls);
+
+ return rc;
+}
+
+static int
+efct_hw_init_io(struct efct_hw *hw)
+{
+ u32 i, idx = 0;
+ bool prereg = false;
+ struct efct_hw_io *io = NULL;
+ int rc = 0;
+
+ prereg = hw->sli.params.sgl_pre_registered;
+
+ if (prereg)
+ return efct_hw_init_prereg_io(hw);
+
+ for (i = 0; i < hw->config.n_io; i++, idx++) {
+ io = hw->io[idx];
+ io->state = EFCT_HW_IO_STATE_FREE;
+ INIT_LIST_HEAD(&io->list_entry);
+ list_add_tail(&io->list_entry, &hw->io_free);
+ }
+
+ return rc;
+}
+
+static int
+efct_hw_config_set_fdt_xfer_hint(struct efct_hw *hw, u32 fdt_xfer_hint)
+{
+ int rc = 0;
+ u8 buf[SLI4_BMBX_SIZE];
+ struct sli4_rqst_cmn_set_features_set_fdt_xfer_hint param;
+
+ memset(&param, 0, sizeof(param));
+ param.fdt_xfer_hint = cpu_to_le32(fdt_xfer_hint);
+ /* build the set_features command */
+ sli_cmd_common_set_features(&hw->sli, buf,
+ SLI4_SET_FEATURES_SET_FTD_XFER_HINT, sizeof(param), &param);
+
+ rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
+ if (rc)
+ efc_log_warn(hw->os, "set FDT hint %d failed: %d\n",
+ fdt_xfer_hint, rc);
+ else
+ efc_log_info(hw->os, "Set FTD transfer hint to %d\n",
+ le32_to_cpu(param.fdt_xfer_hint));
+
+ return rc;
+}
+
+static int
+efct_hw_config_rq(struct efct_hw *hw)
+{
+ u32 min_rq_count, i, rc;
+ struct sli4_cmd_rq_cfg rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG];
+ u8 buf[SLI4_BMBX_SIZE];
+
+ efc_log_info(hw->os, "using REG_FCFI standard\n");
+
+ /*
+ * Set the filter match/mask values from hw's
+ * filter_def values
+ */
+ for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
+ rq_cfg[i].rq_id = cpu_to_le16(0xffff);
+ rq_cfg[i].r_ctl_mask = (u8)hw->config.filter_def[i];
+ rq_cfg[i].r_ctl_match = (u8)(hw->config.filter_def[i] >> 8);
+ rq_cfg[i].type_mask = (u8)(hw->config.filter_def[i] >> 16);
+ rq_cfg[i].type_match = (u8)(hw->config.filter_def[i] >> 24);
+ }
+
+ /*
+ * Update the rq_id's of the FCF configuration
+ * (don't update more than the number of rq_cfg
+ * elements)
+ */
+ min_rq_count = (hw->hw_rq_count < SLI4_CMD_REG_FCFI_NUM_RQ_CFG) ?
+ hw->hw_rq_count : SLI4_CMD_REG_FCFI_NUM_RQ_CFG;
+ for (i = 0; i < min_rq_count; i++) {
+ struct hw_rq *rq = hw->hw_rq[i];
+ u32 j;
+
+ for (j = 0; j < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; j++) {
+ u32 mask = (rq->filter_mask != 0) ?
+ rq->filter_mask : 1;
+
+ if (!(mask & (1U << j)))
+ continue;
+
+ rq_cfg[i].rq_id = cpu_to_le16(rq->hdr->id);
+ efct_logfcfi(hw, j, i, rq->hdr->id);
+ }
+ }
+
+ rc = -EIO;
+ if (!sli_cmd_reg_fcfi(&hw->sli, buf, 0, rq_cfg))
+ rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
+
+ if (rc != 0) {
+ efc_log_err(hw->os, "FCFI registration failed\n");
+ return rc;
+ }
+ hw->fcf_indicator =
+ le16_to_cpu(((struct sli4_cmd_reg_fcfi *)buf)->fcfi);
+
+ return rc;
+}
+
+static int
+efct_hw_config_mrq(struct efct_hw *hw, u8 mode, u16 fcf_index)
+{
+ u8 buf[SLI4_BMBX_SIZE], mrq_bitmask = 0;
+ struct hw_rq *rq;
+ struct sli4_cmd_reg_fcfi_mrq *rsp = NULL;
+ struct sli4_cmd_rq_cfg rq_filter[SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG];
+ u32 rc, i;
+
+ if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE)
+ goto issue_cmd;
+
+ /* Set the filter match/mask values from hw's filter_def values */
+ for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
+ rq_filter[i].rq_id = cpu_to_le16(0xffff);
+ rq_filter[i].type_mask = (u8)hw->config.filter_def[i];
+ rq_filter[i].type_match = (u8)(hw->config.filter_def[i] >> 8);
+ rq_filter[i].r_ctl_mask = (u8)(hw->config.filter_def[i] >> 16);
+ rq_filter[i].r_ctl_match = (u8)(hw->config.filter_def[i] >> 24);
+ }
+
+ rq = hw->hw_rq[0];
+ rq_filter[0].rq_id = cpu_to_le16(rq->hdr->id);
+ rq_filter[1].rq_id = cpu_to_le16(rq->hdr->id);
+
+ mrq_bitmask = 0x2;
+issue_cmd:
+ efc_log_debug(hw->os, "Issue reg_fcfi_mrq count:%d policy:%d mode:%d\n",
+ hw->hw_rq_count, hw->config.rq_selection_policy, mode);
+ /* Invoke REG_FCFI_MRQ */
+ rc = sli_cmd_reg_fcfi_mrq(&hw->sli, buf, mode, fcf_index,
+ hw->config.rq_selection_policy, mrq_bitmask,
+ hw->hw_mrq_count, rq_filter);
+ if (rc) {
+ efc_log_err(hw->os, "sli_cmd_reg_fcfi_mrq() failed\n");
+ return -EIO;
+ }
+
+ rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
+
+ rsp = (struct sli4_cmd_reg_fcfi_mrq *)buf;
+
+ if ((rc) || (le16_to_cpu(rsp->hdr.status))) {
+ efc_log_err(hw->os, "FCFI MRQ reg failed. cmd=%x status=%x\n",
+ rsp->hdr.command, le16_to_cpu(rsp->hdr.status));
+ return -EIO;
+ }
+
+ if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE)
+ hw->fcf_indicator = le16_to_cpu(rsp->fcfi);
+
+ return 0;
+}
+
+static void
+efct_hw_queue_hash_add(struct efct_queue_hash *hash,
+ u16 id, u16 index)
+{
+ u32 hash_index = id & (EFCT_HW_Q_HASH_SIZE - 1);
+
+ /*
+ * Since the hash is always bigger than the number of queues, then we
+ * never have to worry about an infinite loop.
+ */
+ while (hash[hash_index].in_use)
+ hash_index = (hash_index + 1) & (EFCT_HW_Q_HASH_SIZE - 1);
+
+ /* not used, claim the entry */
+ hash[hash_index].id = id;
+ hash[hash_index].in_use = true;
+ hash[hash_index].index = index;
+}
+
+static int
+efct_hw_config_sli_port_health_check(struct efct_hw *hw, u8 query, u8 enable)
+{
+ int rc = 0;
+ u8 buf[SLI4_BMBX_SIZE];
+ struct sli4_rqst_cmn_set_features_health_check param;
+ u32 health_check_flag = 0;
+
+ memset(&param, 0, sizeof(param));
+
+ if (enable)
+ health_check_flag |= SLI4_RQ_HEALTH_CHECK_ENABLE;
+
+ if (query)
+ health_check_flag |= SLI4_RQ_HEALTH_CHECK_QUERY;
+
+ param.health_check_dword = cpu_to_le32(health_check_flag);
+
+ /* build the set_features command */
+ sli_cmd_common_set_features(&hw->sli, buf,
+ SLI4_SET_FEATURES_SLI_PORT_HEALTH_CHECK, sizeof(param), &param);
+
+ rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
+ if (rc)
+ efc_log_err(hw->os, "efct_hw_command returns %d\n", rc);
+ else
+ efc_log_debug(hw->os, "SLI Port Health Check is enabled\n");
+
+ return rc;
+}
+
+int
+efct_hw_init(struct efct_hw *hw)
+{
+ int rc;
+ u32 i = 0;
+ int rem_count;
+ unsigned long flags = 0;
+ struct efct_hw_io *temp;
+ struct efc_dma *dma;
+
+ /*
+ * Make sure the command lists are empty. If this is start-of-day,
+ * they'll be empty since they were just initialized in efct_hw_setup.
+ * If we've just gone through a reset, the command and command pending
+ * lists should have been cleaned up as part of the reset
+ * (efct_hw_reset()).
+ */
+ spin_lock_irqsave(&hw->cmd_lock, flags);
+ if (!list_empty(&hw->cmd_head)) {
+ spin_unlock_irqrestore(&hw->cmd_lock, flags);
+ efc_log_err(hw->os, "command found on cmd list\n");
+ return -EIO;
+ }
+ if (!list_empty(&hw->cmd_pending)) {
+ spin_unlock_irqrestore(&hw->cmd_lock, flags);
+ efc_log_err(hw->os, "command found on pending list\n");
+ return -EIO;
+ }
+ spin_unlock_irqrestore(&hw->cmd_lock, flags);
+
+ /* Free RQ buffers if prevously allocated */
+ efct_hw_rx_free(hw);
+
+ /*
+ * The IO queues must be initialized here for the reset case. The
+ * efct_hw_init_io() function will re-add the IOs to the free list.
+ * The cmd_head list should be OK since we free all entries in
+ * efct_hw_command_cancel() that is called in the efct_hw_reset().
+ */
+
+ /* If we are in this function due to a reset, there may be stale items
+ * on lists that need to be removed. Clean them up.
+ */
+ rem_count = 0;
+ while ((!list_empty(&hw->io_wait_free))) {
+ rem_count++;
+ temp = list_first_entry(&hw->io_wait_free, struct efct_hw_io,
+ list_entry);
+ list_del_init(&temp->list_entry);
+ }
+ if (rem_count > 0)
+ efc_log_debug(hw->os, "rmvd %d items from io_wait_free list\n",
+ rem_count);
+
+ rem_count = 0;
+ while ((!list_empty(&hw->io_inuse))) {
+ rem_count++;
+ temp = list_first_entry(&hw->io_inuse, struct efct_hw_io,
+ list_entry);
+ list_del_init(&temp->list_entry);
+ }
+ if (rem_count > 0)
+ efc_log_debug(hw->os, "rmvd %d items from io_inuse list\n",
+ rem_count);
+
+ rem_count = 0;
+ while ((!list_empty(&hw->io_free))) {
+ rem_count++;
+ temp = list_first_entry(&hw->io_free, struct efct_hw_io,
+ list_entry);
+ list_del_init(&temp->list_entry);
+ }
+ if (rem_count > 0)
+ efc_log_debug(hw->os, "rmvd %d items from io_free list\n",
+ rem_count);
+
+ /* If MRQ not required, Make sure we dont request feature. */
+ if (hw->config.n_rq == 1)
+ hw->sli.features &= (~SLI4_REQFEAT_MRQP);
+
+ if (sli_init(&hw->sli)) {
+ efc_log_err(hw->os, "SLI failed to initialize\n");
+ return -EIO;
+ }
+
+ if (hw->sliport_healthcheck) {
+ rc = efct_hw_config_sli_port_health_check(hw, 0, 1);
+ if (rc != 0) {
+ efc_log_err(hw->os, "Enable port Health check fail\n");
+ return rc;
+ }
+ }
+
+ /*
+ * Set FDT transfer hint, only works on Lancer
+ */
+ if (hw->sli.if_type == SLI4_INTF_IF_TYPE_2) {
+ /*
+ * Non-fatal error. In particular, we can disregard failure to
+ * set EFCT_HW_FDT_XFER_HINT on devices with legacy firmware
+ * that do not support EFCT_HW_FDT_XFER_HINT feature.
+ */
+ efct_hw_config_set_fdt_xfer_hint(hw, EFCT_HW_FDT_XFER_HINT);
+ }
+
+ /* zero the hashes */
+ memset(hw->cq_hash, 0, sizeof(hw->cq_hash));
+ efc_log_debug(hw->os, "Max CQs %d, hash size = %d\n",
+ EFCT_HW_MAX_NUM_CQ, EFCT_HW_Q_HASH_SIZE);
+
+ memset(hw->rq_hash, 0, sizeof(hw->rq_hash));
+ efc_log_debug(hw->os, "Max RQs %d, hash size = %d\n",
+ EFCT_HW_MAX_NUM_RQ, EFCT_HW_Q_HASH_SIZE);
+
+ memset(hw->wq_hash, 0, sizeof(hw->wq_hash));
+ efc_log_debug(hw->os, "Max WQs %d, hash size = %d\n",
+ EFCT_HW_MAX_NUM_WQ, EFCT_HW_Q_HASH_SIZE);
+
+ rc = efct_hw_init_queues(hw);
+ if (rc)
+ return rc;
+
+ rc = efct_hw_map_wq_cpu(hw);
+ if (rc)
+ return rc;
+
+ /* Allocate and p_st RQ buffers */
+ rc = efct_hw_rx_allocate(hw);
+ if (rc) {
+ efc_log_err(hw->os, "rx_allocate failed\n");
+ return rc;
+ }
+
+ rc = efct_hw_rx_post(hw);
+ if (rc) {
+ efc_log_err(hw->os, "WARNING - error posting RQ buffers\n");
+ return rc;
+ }
+
+ if (hw->config.n_eq == 1) {
+ rc = efct_hw_config_rq(hw);
+ if (rc) {
+ efc_log_err(hw->os, "config rq failed %d\n", rc);
+ return rc;
+ }
+ } else {
+ rc = efct_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_FCFI_MODE, 0);
+ if (rc != 0) {
+ efc_log_err(hw->os, "REG_FCFI_MRQ FCFI reg failed\n");
+ return rc;
+ }
+
+ rc = efct_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_MRQ_MODE, 0);
+ if (rc != 0) {
+ efc_log_err(hw->os, "REG_FCFI_MRQ MRQ reg failed\n");
+ return rc;
+ }
+ }
+
+ /*
+ * Allocate the WQ request tag pool, if not previously allocated
+ * (the request tag value is 16 bits, thus the pool allocation size
+ * of 64k)
+ */
+ hw->wq_reqtag_pool = efct_hw_reqtag_pool_alloc(hw);
+ if (!hw->wq_reqtag_pool) {
+ efc_log_err(hw->os, "efct_hw_reqtag_pool_alloc failed\n");
+ return -ENOMEM;
+ }
+
+ rc = efct_hw_setup_io(hw);
+ if (rc) {
+ efc_log_err(hw->os, "IO allocation failure\n");
+ return rc;
+ }
+
+ rc = efct_hw_init_io(hw);
+ if (rc) {
+ efc_log_err(hw->os, "IO initialization failure\n");
+ return rc;
+ }
+
+ dma = &hw->loop_map;
+ dma->size = SLI4_MIN_LOOP_MAP_BYTES;
+ dma->virt = dma_alloc_coherent(&hw->os->pci->dev, dma->size, &dma->phys,
+ GFP_KERNEL);
+ if (!dma->virt)
+ return -EIO;
+
+ /*
+ * Arming the EQ allows (e.g.) interrupts when CQ completions write EQ
+ * entries
+ */
+ for (i = 0; i < hw->eq_count; i++)
+ sli_queue_arm(&hw->sli, &hw->eq[i], true);
+
+ /*
+ * Initialize RQ hash
+ */
+ for (i = 0; i < hw->rq_count; i++)
+ efct_hw_queue_hash_add(hw->rq_hash, hw->rq[i].id, i);
+
+ /*
+ * Initialize WQ hash
+ */
+ for (i = 0; i < hw->wq_count; i++)
+ efct_hw_queue_hash_add(hw->wq_hash, hw->wq[i].id, i);
+
+ /*
+ * Arming the CQ allows (e.g.) MQ completions to write CQ entries
+ */
+ for (i = 0; i < hw->cq_count; i++) {
+ efct_hw_queue_hash_add(hw->cq_hash, hw->cq[i].id, i);
+ sli_queue_arm(&hw->sli, &hw->cq[i], true);
+ }
+
+ /* Set RQ process limit*/
+ for (i = 0; i < hw->hw_rq_count; i++) {
+ struct hw_rq *rq = hw->hw_rq[i];
+
+ hw->cq[rq->cq->instance].proc_limit = hw->config.n_io / 2;
+ }
+
+ /* record the fact that the queues are functional */
+ hw->state = EFCT_HW_STATE_ACTIVE;
+ /*
+ * Allocate a HW IOs for send frame.
+ */
+ hw->hw_wq[0]->send_frame_io = efct_hw_io_alloc(hw);
+ if (!hw->hw_wq[0]->send_frame_io)
+ efc_log_err(hw->os, "alloc for send_frame_io failed\n");
+
+ /* Initialize send frame sequence id */
+ atomic_set(&hw->send_frame_seq_id, 0);
+
+ return 0;
+}
+
+int
+efct_hw_parse_filter(struct efct_hw *hw, void *value)
+{
+ int rc = 0;
+ char *p = NULL;
+ char *token;
+ u32 idx = 0;
+
+ for (idx = 0; idx < ARRAY_SIZE(hw->config.filter_def); idx++)
+ hw->config.filter_def[idx] = 0;
+
+ p = kstrdup(value, GFP_KERNEL);
+ if (!p || !*p) {
+ efc_log_err(hw->os, "p is NULL\n");
+ return -ENOMEM;
+ }
+
+ idx = 0;
+ while ((token = strsep(&p, ",")) && *token) {
+ if (kstrtou32(token, 0, &hw->config.filter_def[idx++]))
+ efc_log_err(hw->os, "kstrtoint failed\n");
+
+ if (!p || !*p)
+ break;
+
+ if (idx == ARRAY_SIZE(hw->config.filter_def))
+ break;
+ }
+ kfree(p);
+
+ return rc;
+}
+
+u64
+efct_get_wwnn(struct efct_hw *hw)
+{
+ struct sli4 *sli = &hw->sli;
+ u8 p[8];
+
+ memcpy(p, sli->wwnn, sizeof(p));
+ return get_unaligned_be64(p);
+}
+
+u64
+efct_get_wwpn(struct efct_hw *hw)
+{
+ struct sli4 *sli = &hw->sli;
+ u8 p[8];
+
+ memcpy(p, sli->wwpn, sizeof(p));
+ return get_unaligned_be64(p);
+}
+
+static struct efc_hw_rq_buffer *
+efct_hw_rx_buffer_alloc(struct efct_hw *hw, u32 rqindex, u32 count,
+ u32 size)
+{
+ struct efct *efct = hw->os;
+ struct efc_hw_rq_buffer *rq_buf = NULL;
+ struct efc_hw_rq_buffer *prq;
+ u32 i;
+
+ if (!count)
+ return NULL;
+
+ rq_buf = kmalloc_array(count, sizeof(*rq_buf), GFP_KERNEL);
+ if (!rq_buf)
+ return NULL;
+ memset(rq_buf, 0, sizeof(*rq_buf) * count);
+
+ for (i = 0, prq = rq_buf; i < count; i ++, prq++) {
+ prq->rqindex = rqindex;
+ prq->dma.size = size;
+ prq->dma.virt = dma_alloc_coherent(&efct->pci->dev,
+ prq->dma.size,
+ &prq->dma.phys,
+ GFP_KERNEL);
+ if (!prq->dma.virt) {
+ efc_log_err(hw->os, "DMA allocation failed\n");
+ kfree(rq_buf);
+ return NULL;
+ }
+ }
+ return rq_buf;
+}
+
+static void
+efct_hw_rx_buffer_free(struct efct_hw *hw,
+ struct efc_hw_rq_buffer *rq_buf,
+ u32 count)
+{
+ struct efct *efct = hw->os;
+ u32 i;
+ struct efc_hw_rq_buffer *prq;
+
+ if (rq_buf) {
+ for (i = 0, prq = rq_buf; i < count; i++, prq++) {
+ dma_free_coherent(&efct->pci->dev,
+ prq->dma.size, prq->dma.virt,
+ prq->dma.phys);
+ memset(&prq->dma, 0, sizeof(struct efc_dma));
+ }
+
+ kfree(rq_buf);
+ }
+}
+
+int
+efct_hw_rx_allocate(struct efct_hw *hw)
+{
+ struct efct *efct = hw->os;
+ u32 i;
+ int rc = 0;
+ u32 rqindex = 0;
+ u32 hdr_size = EFCT_HW_RQ_SIZE_HDR;
+ u32 payload_size = hw->config.rq_default_buffer_size;
+
+ rqindex = 0;
+
+ for (i = 0; i < hw->hw_rq_count; i++) {
+ struct hw_rq *rq = hw->hw_rq[i];
+
+ /* Allocate header buffers */
+ rq->hdr_buf = efct_hw_rx_buffer_alloc(hw, rqindex,
+ rq->entry_count,
+ hdr_size);
+ if (!rq->hdr_buf) {
+ efc_log_err(efct, "rx_buffer_alloc hdr_buf failed\n");
+ rc = -EIO;
+ break;
+ }
+
+ efc_log_debug(hw->os,
+ "rq[%2d] rq_id %02d header %4d by %4d bytes\n",
+ i, rq->hdr->id, rq->entry_count, hdr_size);
+
+ rqindex++;
+
+ /* Allocate payload buffers */
+ rq->payload_buf = efct_hw_rx_buffer_alloc(hw, rqindex,
+ rq->entry_count,
+ payload_size);
+ if (!rq->payload_buf) {
+ efc_log_err(efct, "rx_buffer_alloc fb_buf failed\n");
+ rc = -EIO;
+ break;
+ }
+ efc_log_debug(hw->os,
+ "rq[%2d] rq_id %02d default %4d by %4d bytes\n",
+ i, rq->data->id, rq->entry_count, payload_size);
+ rqindex++;
+ }
+
+ return rc ? -EIO : 0;
+}
+
+int
+efct_hw_rx_post(struct efct_hw *hw)
+{
+ u32 i;
+ u32 idx;
+ u32 rq_idx;
+ int rc = 0;
+
+ if (!hw->seq_pool) {
+ u32 count = 0;
+
+ for (i = 0; i < hw->hw_rq_count; i++)
+ count += hw->hw_rq[i]->entry_count;
+
+ hw->seq_pool = kmalloc_array(count,
+ sizeof(struct efc_hw_sequence), GFP_KERNEL);
+ if (!hw->seq_pool)
+ return -ENOMEM;
+ }
+
+ /*
+ * In RQ pair mode, we MUST post the header and payload buffer at the
+ * same time.
+ */
+ for (rq_idx = 0, idx = 0; rq_idx < hw->hw_rq_count; rq_idx++) {
+ struct hw_rq *rq = hw->hw_rq[rq_idx];
+
+ for (i = 0; i < rq->entry_count - 1; i++) {
+ struct efc_hw_sequence *seq;
+
+ seq = hw->seq_pool + idx;
+ idx++;
+ seq->header = &rq->hdr_buf[i];
+ seq->payload = &rq->payload_buf[i];
+ rc = efct_hw_sequence_free(hw, seq);
+ if (rc)
+ break;
+ }
+ if (rc)
+ break;
+ }
+
+ if (rc && hw->seq_pool)
+ kfree(hw->seq_pool);
+
+ return rc;
+}
+
+void
+efct_hw_rx_free(struct efct_hw *hw)
+{
+ u32 i;
+
+ /* Free hw_rq buffers */
+ for (i = 0; i < hw->hw_rq_count; i++) {
+ struct hw_rq *rq = hw->hw_rq[i];
+
+ if (rq) {
+ efct_hw_rx_buffer_free(hw, rq->hdr_buf,
+ rq->entry_count);
+ rq->hdr_buf = NULL;
+ efct_hw_rx_buffer_free(hw, rq->payload_buf,
+ rq->entry_count);
+ rq->payload_buf = NULL;
+ }
+ }
+}
+
+static int
+efct_hw_cmd_submit_pending(struct efct_hw *hw)
+{
+ int rc = 0;
+
+ /* Assumes lock held */
+
+ /* Only submit MQE if there's room */
+ while (hw->cmd_head_count < (EFCT_HW_MQ_DEPTH - 1) &&
+ !list_empty(&hw->cmd_pending)) {
+ struct efct_command_ctx *ctx;
+
+ ctx = list_first_entry(&hw->cmd_pending,
+ struct efct_command_ctx, list_entry);
+ if (!ctx)
+ break;
+
+ list_del_init(&ctx->list_entry);
+
+ list_add_tail(&ctx->list_entry, &hw->cmd_head);
+ hw->cmd_head_count++;
+ if (sli_mq_write(&hw->sli, hw->mq, ctx->buf) < 0) {
+ efc_log_debug(hw->os,
+ "sli_queue_write failed: %d\n", rc);
+ rc = -EIO;
+ break;
+ }
+ }
+ return rc;
+}
+
+int
+efct_hw_command(struct efct_hw *hw, u8 *cmd, u32 opts, void *cb, void *arg)
+{
+ int rc = -EIO;
+ unsigned long flags = 0;
+ void *bmbx = NULL;
+
+ /*
+ * If the chip is in an error state (UE'd) then reject this mailbox
+ * command.
+ */
+ if (sli_fw_error_status(&hw->sli) > 0) {
+ efc_log_crit(hw->os, "Chip in an error state - reset needed\n");
+ efc_log_crit(hw->os, "status=%#x error1=%#x error2=%#x\n",
+ sli_reg_read_status(&hw->sli),
+ sli_reg_read_err1(&hw->sli),
+ sli_reg_read_err2(&hw->sli));
+
+ return -EIO;
+ }
+
+ /*
+ * Send a mailbox command to the hardware, and either wait for
+ * a completion (EFCT_CMD_POLL) or get an optional asynchronous
+ * completion (EFCT_CMD_NOWAIT).
+ */
+
+ if (opts == EFCT_CMD_POLL) {
+ mutex_lock(&hw->bmbx_lock);
+ bmbx = hw->sli.bmbx.virt;
+
+ memcpy(bmbx, cmd, SLI4_BMBX_SIZE);
+
+ if (sli_bmbx_command(&hw->sli) == 0) {
+ rc = 0;
+ memcpy(cmd, bmbx, SLI4_BMBX_SIZE);
+ }
+ mutex_unlock(&hw->bmbx_lock);
+ } else if (opts == EFCT_CMD_NOWAIT) {
+ struct efct_command_ctx *ctx = NULL;
+
+ if (hw->state != EFCT_HW_STATE_ACTIVE) {
+ efc_log_err(hw->os, "Can't send command, HW state=%d\n",
+ hw->state);
+ return -EIO;
+ }
+
+ ctx = mempool_alloc(hw->cmd_ctx_pool, GFP_ATOMIC);
+ if (!ctx)
+ return -ENOSPC;
+
+ memset(ctx, 0, sizeof(struct efct_command_ctx));
+
+ if (cb) {
+ ctx->cb = cb;
+ ctx->arg = arg;
+ }
+
+ memcpy(ctx->buf, cmd, SLI4_BMBX_SIZE);
+ ctx->ctx = hw;
+
+ spin_lock_irqsave(&hw->cmd_lock, flags);
+
+ /* Add to pending list */
+ INIT_LIST_HEAD(&ctx->list_entry);
+ list_add_tail(&ctx->list_entry, &hw->cmd_pending);
+
+ /* Submit as much of the pending list as we can */
+ rc = efct_hw_cmd_submit_pending(hw);
+
+ spin_unlock_irqrestore(&hw->cmd_lock, flags);
+ }
+
+ return rc;
+}
+
+static int
+efct_hw_command_process(struct efct_hw *hw, int status, u8 *mqe,
+ size_t size)
+{
+ struct efct_command_ctx *ctx = NULL;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&hw->cmd_lock, flags);
+ if (!list_empty(&hw->cmd_head)) {
+ ctx = list_first_entry(&hw->cmd_head,
+ struct efct_command_ctx, list_entry);
+ list_del_init(&ctx->list_entry);
+ }
+ if (!ctx) {
+ efc_log_err(hw->os, "no command context\n");
+ spin_unlock_irqrestore(&hw->cmd_lock, flags);
+ return -EIO;
+ }
+
+ hw->cmd_head_count--;
+
+ /* Post any pending requests */
+ efct_hw_cmd_submit_pending(hw);
+
+ spin_unlock_irqrestore(&hw->cmd_lock, flags);
+
+ if (ctx->cb) {
+ memcpy(ctx->buf, mqe, size);
+ ctx->cb(hw, status, ctx->buf, ctx->arg);
+ }
+
+ mempool_free(ctx, hw->cmd_ctx_pool);
+
+ return 0;
+}
+
+static int
+efct_hw_mq_process(struct efct_hw *hw,
+ int status, struct sli4_queue *mq)
+{
+ u8 mqe[SLI4_BMBX_SIZE];
+ int rc;
+
+ rc = sli_mq_read(&hw->sli, mq, mqe);
+ if (!rc)
+ rc = efct_hw_command_process(hw, status, mqe, mq->size);
+
+ return rc;
+}
+
+static int
+efct_hw_command_cancel(struct efct_hw *hw)
+{
+ unsigned long flags = 0;
+ int rc = 0;
+
+ spin_lock_irqsave(&hw->cmd_lock, flags);
+
+ /*
+ * Manually clean up remaining commands. Note: since this calls
+ * efct_hw_command_process(), we'll also process the cmd_pending
+ * list, so no need to manually clean that out.
+ */
+ while (!list_empty(&hw->cmd_head)) {
+ u8 mqe[SLI4_BMBX_SIZE] = { 0 };
+ struct efct_command_ctx *ctx;
+
+ ctx = list_first_entry(&hw->cmd_head,
+ struct efct_command_ctx, list_entry);
+
+ efc_log_debug(hw->os, "hung command %08x\n",
+ !ctx ? U32_MAX : *((u32 *)ctx->buf));
+ spin_unlock_irqrestore(&hw->cmd_lock, flags);
+ rc = efct_hw_command_process(hw, -1, mqe, SLI4_BMBX_SIZE);
+ spin_lock_irqsave(&hw->cmd_lock, flags);
+ }
+
+ spin_unlock_irqrestore(&hw->cmd_lock, flags);
+
+ return rc;
+}
+
+static void
+efct_mbox_rsp_cb(struct efct_hw *hw, int status, u8 *mqe, void *arg)
+{
+ struct efct_mbox_rqst_ctx *ctx = arg;
+
+ if (ctx) {
+ if (ctx->callback)
+ (*ctx->callback)(hw->os->efcport, status, mqe,
+ ctx->arg);
+
+ mempool_free(ctx, hw->mbox_rqst_pool);
+ }
+}
+
+int
+efct_issue_mbox_rqst(void *base, void *cmd, void *cb, void *arg)
+{
+ struct efct_mbox_rqst_ctx *ctx;
+ struct efct *efct = base;
+ struct efct_hw *hw = &efct->hw;
+ int rc;
+
+ /*
+ * Allocate a callback context (which includes the mbox cmd buffer),
+ * we need this to be persistent as the mbox cmd submission may be
+ * queued and executed later execution.
+ */
+ ctx = mempool_alloc(hw->mbox_rqst_pool, GFP_ATOMIC);
+ if (!ctx)
+ return -EIO;
+
+ ctx->callback = cb;
+ ctx->arg = arg;
+
+ rc = efct_hw_command(hw, cmd, EFCT_CMD_NOWAIT, efct_mbox_rsp_cb, ctx);
+ if (rc) {
+ efc_log_err(efct, "issue mbox rqst failure rc:%d\n", rc);
+ mempool_free(ctx, hw->mbox_rqst_pool);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static inline struct efct_hw_io *
+_efct_hw_io_alloc(struct efct_hw *hw)
+{
+ struct efct_hw_io *io = NULL;
+
+ if (!list_empty(&hw->io_free)) {
+ io = list_first_entry(&hw->io_free, struct efct_hw_io,
+ list_entry);
+ list_del(&io->list_entry);
+ }
+ if (io) {
+ INIT_LIST_HEAD(&io->list_entry);
+ list_add_tail(&io->list_entry, &hw->io_inuse);
+ io->state = EFCT_HW_IO_STATE_INUSE;
+ io->abort_reqtag = U32_MAX;
+ io->wq = hw->wq_cpu_array[raw_smp_processor_id()];
+ if (!io->wq) {
+ efc_log_err(hw->os, "WQ not assigned for cpu:%d\n",
+ raw_smp_processor_id());
+ io->wq = hw->hw_wq[0];
+ }
+ kref_init(&io->ref);
+ io->release = efct_hw_io_free_internal;
+ } else {
+ atomic_add(1, &hw->io_alloc_failed_count);
+ }
+
+ return io;
+}
+
+struct efct_hw_io *
+efct_hw_io_alloc(struct efct_hw *hw)
+{
+ struct efct_hw_io *io = NULL;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&hw->io_lock, flags);
+ io = _efct_hw_io_alloc(hw);
+ spin_unlock_irqrestore(&hw->io_lock, flags);
+
+ return io;
+}
+
+static void
+efct_hw_io_free_move_correct_list(struct efct_hw *hw,
+ struct efct_hw_io *io)
+{
+ /*
+ * When an IO is freed, depending on the exchange busy flag,
+ * move it to the correct list.
+ */
+ if (io->xbusy) {
+ /*
+ * add to wait_free list and wait for XRI_ABORTED CQEs to clean
+ * up
+ */
+ INIT_LIST_HEAD(&io->list_entry);
+ list_add_tail(&io->list_entry, &hw->io_wait_free);
+ io->state = EFCT_HW_IO_STATE_WAIT_FREE;
+ } else {
+ /* IO not busy, add to free list */
+ INIT_LIST_HEAD(&io->list_entry);
+ list_add_tail(&io->list_entry, &hw->io_free);
+ io->state = EFCT_HW_IO_STATE_FREE;
+ }
+}
+
+static inline void
+efct_hw_io_free_common(struct efct_hw *hw, struct efct_hw_io *io)
+{
+ /* initialize IO fields */
+ efct_hw_init_free_io(io);
+
+ /* Restore default SGL */
+ efct_hw_io_restore_sgl(hw, io);
+}
+
+void
+efct_hw_io_free_internal(struct kref *arg)
+{
+ unsigned long flags = 0;
+ struct efct_hw_io *io = container_of(arg, struct efct_hw_io, ref);
+ struct efct_hw *hw = io->hw;
+
+ /* perform common cleanup */
+ efct_hw_io_free_common(hw, io);
+
+ spin_lock_irqsave(&hw->io_lock, flags);
+ /* remove from in-use list */
+ if (!list_empty(&io->list_entry) && !list_empty(&hw->io_inuse)) {
+ list_del_init(&io->list_entry);
+ efct_hw_io_free_move_correct_list(hw, io);
+ }
+ spin_unlock_irqrestore(&hw->io_lock, flags);
+}
+
+int
+efct_hw_io_free(struct efct_hw *hw, struct efct_hw_io *io)
+{
+ return kref_put(&io->ref, io->release);
+}
+
+struct efct_hw_io *
+efct_hw_io_lookup(struct efct_hw *hw, u32 xri)
+{
+ u32 ioindex;
+
+ ioindex = xri - hw->sli.ext[SLI4_RSRC_XRI].base[0];
+ return hw->io[ioindex];
+}
+
+int
+efct_hw_io_init_sges(struct efct_hw *hw, struct efct_hw_io *io,
+ enum efct_hw_io_type type)
+{
+ struct sli4_sge *data = NULL;
+ u32 i = 0;
+ u32 skips = 0;
+ u32 sge_flags = 0;
+
+ if (!io) {
+ efc_log_err(hw->os, "bad parameter hw=%p io=%p\n", hw, io);
+ return -EIO;
+ }
+
+ /* Clear / reset the scatter-gather list */
+ io->sgl = &io->def_sgl;
+ io->sgl_count = io->def_sgl_count;
+ io->first_data_sge = 0;
+
+ memset(io->sgl->virt, 0, 2 * sizeof(struct sli4_sge));
+ io->n_sge = 0;
+ io->sge_offset = 0;
+
+ io->type = type;
+
+ data = io->sgl->virt;
+
+ /*
+ * Some IO types have underlying hardware requirements on the order
+ * of SGEs. Process all special entries here.
+ */
+ switch (type) {
+ case EFCT_HW_IO_TARGET_WRITE:
+
+ /* populate host resident XFER_RDY buffer */
+ sge_flags = le32_to_cpu(data->dw2_flags);
+ sge_flags &= (~SLI4_SGE_TYPE_MASK);
+ sge_flags |= (SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT);
+ data->buffer_address_high =
+ cpu_to_le32(upper_32_bits(io->xfer_rdy.phys));
+ data->buffer_address_low =
+ cpu_to_le32(lower_32_bits(io->xfer_rdy.phys));
+ data->buffer_length = cpu_to_le32(io->xfer_rdy.size);
+ data->dw2_flags = cpu_to_le32(sge_flags);
+ data++;
+
+ skips = EFCT_TARGET_WRITE_SKIPS;
+
+ io->n_sge = 1;
+ break;
+ case EFCT_HW_IO_TARGET_READ:
+ /*
+ * For FCP_TSEND64, the first 2 entries are SKIP SGE's
+ */
+ skips = EFCT_TARGET_READ_SKIPS;
+ break;
+ case EFCT_HW_IO_TARGET_RSP:
+ /*
+ * No skips, etc. for FCP_TRSP64
+ */
+ break;
+ default:
+ efc_log_err(hw->os, "unsupported IO type %#x\n", type);
+ return -EIO;
+ }
+
+ /*
+ * Write skip entries
+ */
+ for (i = 0; i < skips; i++) {
+ sge_flags = le32_to_cpu(data->dw2_flags);
+ sge_flags &= (~SLI4_SGE_TYPE_MASK);
+ sge_flags |= (SLI4_SGE_TYPE_SKIP << SLI4_SGE_TYPE_SHIFT);
+ data->dw2_flags = cpu_to_le32(sge_flags);
+ data++;
+ }
+
+ io->n_sge += skips;
+
+ /*
+ * Set last
+ */
+ sge_flags = le32_to_cpu(data->dw2_flags);
+ sge_flags |= SLI4_SGE_LAST;
+ data->dw2_flags = cpu_to_le32(sge_flags);
+
+ return 0;
+}
+
+int
+efct_hw_io_add_sge(struct efct_hw *hw, struct efct_hw_io *io,
+ uintptr_t addr, u32 length)
+{
+ struct sli4_sge *data = NULL;
+ u32 sge_flags = 0;
+
+ if (!io || !addr || !length) {
+ efc_log_err(hw->os,
+ "bad parameter hw=%p io=%p addr=%lx length=%u\n",
+ hw, io, addr, length);
+ return -EIO;
+ }
+
+ if (length > hw->sli.sge_supported_length) {
+ efc_log_err(hw->os,
+ "length of SGE %d bigger than allowed %d\n",
+ length, hw->sli.sge_supported_length);
+ return -EIO;
+ }
+
+ data = io->sgl->virt;
+ data += io->n_sge;
+
+ sge_flags = le32_to_cpu(data->dw2_flags);
+ sge_flags &= ~SLI4_SGE_TYPE_MASK;
+ sge_flags |= SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT;
+ sge_flags &= ~SLI4_SGE_DATA_OFFSET_MASK;
+ sge_flags |= SLI4_SGE_DATA_OFFSET_MASK & io->sge_offset;
+
+ data->buffer_address_high = cpu_to_le32(upper_32_bits(addr));
+ data->buffer_address_low = cpu_to_le32(lower_32_bits(addr));
+ data->buffer_length = cpu_to_le32(length);
+
+ /*
+ * Always assume this is the last entry and mark as such.
+ * If this is not the first entry unset the "last SGE"
+ * indication for the previous entry
+ */
+ sge_flags |= SLI4_SGE_LAST;
+ data->dw2_flags = cpu_to_le32(sge_flags);
+
+ if (io->n_sge) {
+ sge_flags = le32_to_cpu(data[-1].dw2_flags);
+ sge_flags &= ~SLI4_SGE_LAST;
+ data[-1].dw2_flags = cpu_to_le32(sge_flags);
+ }
+
+ /* Set first_data_bde if not previously set */
+ if (io->first_data_sge == 0)
+ io->first_data_sge = io->n_sge;
+
+ io->sge_offset += length;
+ io->n_sge++;
+
+ return 0;
+}
+
+void
+efct_hw_io_abort_all(struct efct_hw *hw)
+{
+ struct efct_hw_io *io_to_abort = NULL;
+ struct efct_hw_io *next_io = NULL;
+
+ list_for_each_entry_safe(io_to_abort, next_io,
+ &hw->io_inuse, list_entry) {
+ efct_hw_io_abort(hw, io_to_abort, true, NULL, NULL);
+ }
+}
+
+static void
+efct_hw_wq_process_abort(void *arg, u8 *cqe, int status)
+{
+ struct efct_hw_io *io = arg;
+ struct efct_hw *hw = io->hw;
+ u32 ext = 0;
+ u32 len = 0;
+ struct hw_wq_callback *wqcb;
+
+ /*
+ * For IOs that were aborted internally, we may need to issue the
+ * callback here depending on whether a XRI_ABORTED CQE is expected ot
+ * not. If the status is Local Reject/No XRI, then
+ * issue the callback now.
+ */
+ ext = sli_fc_ext_status(&hw->sli, cqe);
+ if (status == SLI4_FC_WCQE_STATUS_LOCAL_REJECT &&
+ ext == SLI4_FC_LOCAL_REJECT_NO_XRI && io->done) {
+ efct_hw_done_t done = io->done;
+
+ io->done = NULL;
+
+ /*
+ * Use latched status as this is always saved for an internal
+ * abort Note: We won't have both a done and abort_done
+ * function, so don't worry about
+ * clobbering the len, status and ext fields.
+ */
+ status = io->saved_status;
+ len = io->saved_len;
+ ext = io->saved_ext;
+ io->status_saved = false;
+ done(io, len, status, ext, io->arg);
+ }
+
+ if (io->abort_done) {
+ efct_hw_done_t done = io->abort_done;
+
+ io->abort_done = NULL;
+ done(io, len, status, ext, io->abort_arg);
+ }
+
+ /* clear abort bit to indicate abort is complete */
+ io->abort_in_progress = false;
+
+ /* Free the WQ callback */
+ if (io->abort_reqtag == U32_MAX) {
+ efc_log_err(hw->os, "HW IO already freed\n");
+ return;
+ }
+
+ wqcb = efct_hw_reqtag_get_instance(hw, io->abort_reqtag);
+ efct_hw_reqtag_free(hw, wqcb);
+
+ /*
+ * Call efct_hw_io_free() because this releases the WQ reservation as
+ * well as doing the refcount put. Don't duplicate the code here.
+ */
+ (void)efct_hw_io_free(hw, io);
+}
+
+static void
+efct_hw_fill_abort_wqe(struct efct_hw *hw, struct efct_hw_wqe *wqe)
+{
+ struct sli4_abort_wqe *abort = (void *)wqe->wqebuf;
+
+ memset(abort, 0, hw->sli.wqe_size);
+
+ abort->criteria = SLI4_ABORT_CRITERIA_XRI_TAG;
+ abort->ia_ir_byte |= wqe->send_abts ? 0 : 1;
+
+ /* Suppress ABTS retries */
+ abort->ia_ir_byte |= SLI4_ABRT_WQE_IR;
+
+ abort->t_tag = cpu_to_le32(wqe->id);
+ abort->command = SLI4_WQE_ABORT;
+ abort->request_tag = cpu_to_le16(wqe->abort_reqtag);
+
+ abort->dw10w0_flags = cpu_to_le16(SLI4_ABRT_WQE_QOSD);
+
+ abort->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT);
+}
+
+int
+efct_hw_io_abort(struct efct_hw *hw, struct efct_hw_io *io_to_abort,
+ bool send_abts, void *cb, void *arg)
+{
+ struct hw_wq_callback *wqcb;
+ unsigned long flags = 0;
+
+ if (!io_to_abort) {
+ efc_log_err(hw->os, "bad parameter hw=%p io=%p\n",
+ hw, io_to_abort);
+ return -EIO;
+ }
+
+ if (hw->state != EFCT_HW_STATE_ACTIVE) {
+ efc_log_err(hw->os, "cannot send IO abort, HW state=%d\n",
+ hw->state);
+ return -EIO;
+ }
+
+ /* take a reference on IO being aborted */
+ if (kref_get_unless_zero(&io_to_abort->ref) == 0) {
+ /* command no longer active */
+ efc_log_debug(hw->os,
+ "io not active xri=0x%x tag=0x%x\n",
+ io_to_abort->indicator, io_to_abort->reqtag);
+ return -ENOENT;
+ }
+
+ /* Must have a valid WQ reference */
+ if (!io_to_abort->wq) {
+ efc_log_debug(hw->os, "io_to_abort xri=0x%x not active on WQ\n",
+ io_to_abort->indicator);
+ /* efct_ref_get(): same function */
+ kref_put(&io_to_abort->ref, io_to_abort->release);
+ return -ENOENT;
+ }
+
+ /*
+ * Validation checks complete; now check to see if already being
+ * aborted, if not set the flag.
+ */
+ if (cmpxchg(&io_to_abort->abort_in_progress, false, true)) {
+ /* efct_ref_get(): same function */
+ kref_put(&io_to_abort->ref, io_to_abort->release);
+ efc_log_debug(hw->os,
+ "io already being aborted xri=0x%x tag=0x%x\n",
+ io_to_abort->indicator, io_to_abort->reqtag);
+ return -EINPROGRESS;
+ }
+
+ /*
+ * If we got here, the possibilities are:
+ * - host owned xri
+ * - io_to_abort->wq_index != U32_MAX
+ * - submit ABORT_WQE to same WQ
+ * - port owned xri:
+ * - rxri: io_to_abort->wq_index == U32_MAX
+ * - submit ABORT_WQE to any WQ
+ * - non-rxri
+ * - io_to_abort->index != U32_MAX
+ * - submit ABORT_WQE to same WQ
+ * - io_to_abort->index == U32_MAX
+ * - submit ABORT_WQE to any WQ
+ */
+ io_to_abort->abort_done = cb;
+ io_to_abort->abort_arg = arg;
+
+ /* Allocate a request tag for the abort portion of this IO */
+ wqcb = efct_hw_reqtag_alloc(hw, efct_hw_wq_process_abort, io_to_abort);
+ if (!wqcb) {
+ efc_log_err(hw->os, "can't allocate request tag\n");
+ return -ENOSPC;
+ }
+
+ io_to_abort->abort_reqtag = wqcb->instance_index;
+ io_to_abort->wqe.send_abts = send_abts;
+ io_to_abort->wqe.id = io_to_abort->indicator;
+ io_to_abort->wqe.abort_reqtag = io_to_abort->abort_reqtag;
+
+ /*
+ * If the wqe is on the pending list, then set this wqe to be
+ * aborted when the IO's wqe is removed from the list.
+ */
+ if (io_to_abort->wq) {
+ spin_lock_irqsave(&io_to_abort->wq->queue->lock, flags);
+ if (io_to_abort->wqe.list_entry.next) {
+ io_to_abort->wqe.abort_wqe_submit_needed = true;
+ spin_unlock_irqrestore(&io_to_abort->wq->queue->lock,
+ flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(&io_to_abort->wq->queue->lock, flags);
+ }
+
+ efct_hw_fill_abort_wqe(hw, &io_to_abort->wqe);
+
+ /* ABORT_WQE does not actually utilize an XRI on the Port,
+ * therefore, keep xbusy as-is to track the exchange's state,
+ * not the ABORT_WQE's state
+ */
+ if (efct_hw_wq_write(io_to_abort->wq, &io_to_abort->wqe)) {
+ io_to_abort->abort_in_progress = false;
+ /* efct_ref_get(): same function */
+ kref_put(&io_to_abort->ref, io_to_abort->release);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+void
+efct_hw_reqtag_pool_free(struct efct_hw *hw)
+{
+ u32 i;
+ struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool;
+ struct hw_wq_callback *wqcb = NULL;
+
+ if (reqtag_pool) {
+ for (i = 0; i < U16_MAX; i++) {
+ wqcb = reqtag_pool->tags[i];
+ if (!wqcb)
+ continue;
+
+ kfree(wqcb);
+ }
+ kfree(reqtag_pool);
+ hw->wq_reqtag_pool = NULL;
+ }
+}
+
+struct reqtag_pool *
+efct_hw_reqtag_pool_alloc(struct efct_hw *hw)
+{
+ u32 i = 0;
+ struct reqtag_pool *reqtag_pool;
+ struct hw_wq_callback *wqcb;
+
+ reqtag_pool = kzalloc(sizeof(*reqtag_pool), GFP_KERNEL);
+ if (!reqtag_pool)
+ return NULL;
+
+ INIT_LIST_HEAD(&reqtag_pool->freelist);
+ /* initialize reqtag pool lock */
+ spin_lock_init(&reqtag_pool->lock);
+ for (i = 0; i < U16_MAX; i++) {
+ wqcb = kmalloc(sizeof(*wqcb), GFP_KERNEL);
+ if (!wqcb)
+ break;
+
+ reqtag_pool->tags[i] = wqcb;
+ wqcb->instance_index = i;
+ wqcb->callback = NULL;
+ wqcb->arg = NULL;
+ INIT_LIST_HEAD(&wqcb->list_entry);
+ list_add_tail(&wqcb->list_entry, &reqtag_pool->freelist);
+ }
+
+ return reqtag_pool;
+}
+
+struct hw_wq_callback *
+efct_hw_reqtag_alloc(struct efct_hw *hw,
+ void (*callback)(void *arg, u8 *cqe, int status),
+ void *arg)
+{
+ struct hw_wq_callback *wqcb = NULL;
+ struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool;
+ unsigned long flags = 0;
+
+ if (!callback)
+ return wqcb;
+
+ spin_lock_irqsave(&reqtag_pool->lock, flags);
+
+ if (!list_empty(&reqtag_pool->freelist)) {
+ wqcb = list_first_entry(&reqtag_pool->freelist,
+ struct hw_wq_callback, list_entry);
+ }
+
+ if (wqcb) {
+ list_del_init(&wqcb->list_entry);
+ spin_unlock_irqrestore(&reqtag_pool->lock, flags);
+ wqcb->callback = callback;
+ wqcb->arg = arg;
+ } else {
+ spin_unlock_irqrestore(&reqtag_pool->lock, flags);
+ }
+
+ return wqcb;
+}
+
+void
+efct_hw_reqtag_free(struct efct_hw *hw, struct hw_wq_callback *wqcb)
+{
+ unsigned long flags = 0;
+ struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool;
+
+ if (!wqcb->callback)
+ efc_log_err(hw->os, "WQCB is already freed\n");
+
+ spin_lock_irqsave(&reqtag_pool->lock, flags);
+ wqcb->callback = NULL;
+ wqcb->arg = NULL;
+ INIT_LIST_HEAD(&wqcb->list_entry);
+ list_add(&wqcb->list_entry, &hw->wq_reqtag_pool->freelist);
+ spin_unlock_irqrestore(&reqtag_pool->lock, flags);
+}
+
+struct hw_wq_callback *
+efct_hw_reqtag_get_instance(struct efct_hw *hw, u32 instance_index)
+{
+ struct hw_wq_callback *wqcb;
+
+ wqcb = hw->wq_reqtag_pool->tags[instance_index];
+ if (!wqcb)
+ efc_log_err(hw->os, "wqcb for instance %d is null\n",
+ instance_index);
+
+ return wqcb;
+}
+
+int
+efct_hw_queue_hash_find(struct efct_queue_hash *hash, u16 id)
+{
+ int index = -1;
+ int i = id & (EFCT_HW_Q_HASH_SIZE - 1);
+
+ /*
+ * Since the hash is always bigger than the maximum number of Qs, then
+ * we never have to worry about an infinite loop. We will always find
+ * an unused entry.
+ */
+ do {
+ if (hash[i].in_use && hash[i].id == id)
+ index = hash[i].index;
+ else
+ i = (i + 1) & (EFCT_HW_Q_HASH_SIZE - 1);
+ } while (index == -1 && hash[i].in_use);
+
+ return index;
+}
+
+int
+efct_hw_process(struct efct_hw *hw, u32 vector,
+ u32 max_isr_time_msec)
+{
+ struct hw_eq *eq;
+
+ /*
+ * The caller should disable interrupts if they wish to prevent us
+ * from processing during a shutdown. The following states are defined:
+ * EFCT_HW_STATE_UNINITIALIZED - No queues allocated
+ * EFCT_HW_STATE_QUEUES_ALLOCATED - The state after a chip reset,
+ * queues are cleared.
+ * EFCT_HW_STATE_ACTIVE - Chip and queues are operational
+ * EFCT_HW_STATE_RESET_IN_PROGRESS - reset, we still want completions
+ * EFCT_HW_STATE_TEARDOWN_IN_PROGRESS - We still want mailbox
+ * completions.
+ */
+ if (hw->state == EFCT_HW_STATE_UNINITIALIZED)
+ return 0;
+
+ /* Get pointer to struct hw_eq */
+ eq = hw->hw_eq[vector];
+ if (!eq)
+ return 0;
+
+ eq->use_count++;
+
+ return efct_hw_eq_process(hw, eq, max_isr_time_msec);
+}
+
+int
+efct_hw_eq_process(struct efct_hw *hw, struct hw_eq *eq,
+ u32 max_isr_time_msec)
+{
+ u8 eqe[sizeof(struct sli4_eqe)] = { 0 };
+ u32 tcheck_count;
+ u64 tstart;
+ u64 telapsed;
+ bool done = false;
+
+ tcheck_count = EFCT_HW_TIMECHECK_ITERATIONS;
+ tstart = jiffies_to_msecs(jiffies);
+
+ while (!done && !sli_eq_read(&hw->sli, eq->queue, eqe)) {
+ u16 cq_id = 0;
+ int rc;
+
+ rc = sli_eq_parse(&hw->sli, eqe, &cq_id);
+ if (unlikely(rc)) {
+ if (rc == SLI4_EQE_STATUS_EQ_FULL) {
+ u32 i;
+
+ /*
+ * Received a sentinel EQE indicating the
+ * EQ is full. Process all CQs
+ */
+ for (i = 0; i < hw->cq_count; i++)
+ efct_hw_cq_process(hw, hw->hw_cq[i]);
+ continue;
+ } else {
+ return rc;
+ }
+ } else {
+ int index;
+
+ index = efct_hw_queue_hash_find(hw->cq_hash, cq_id);
+
+ if (likely(index >= 0))
+ efct_hw_cq_process(hw, hw->hw_cq[index]);
+ else
+ efc_log_err(hw->os, "bad CQ_ID %#06x\n", cq_id);
+ }
+
+ if (eq->queue->n_posted > eq->queue->posted_limit)
+ sli_queue_arm(&hw->sli, eq->queue, false);
+
+ if (tcheck_count && (--tcheck_count == 0)) {
+ tcheck_count = EFCT_HW_TIMECHECK_ITERATIONS;
+ telapsed = jiffies_to_msecs(jiffies) - tstart;
+ if (telapsed >= max_isr_time_msec)
+ done = true;
+ }
+ }
+ sli_queue_eq_arm(&hw->sli, eq->queue, true);
+
+ return 0;
+}
+
+static int
+_efct_hw_wq_write(struct hw_wq *wq, struct efct_hw_wqe *wqe)
+{
+ int queue_rc;
+
+ /* Every so often, set the wqec bit to generate comsummed completions */
+ if (wq->wqec_count)
+ wq->wqec_count--;
+
+ if (wq->wqec_count == 0) {
+ struct sli4_generic_wqe *genwqe = (void *)wqe->wqebuf;
+
+ genwqe->cmdtype_wqec_byte |= SLI4_GEN_WQE_WQEC;
+ wq->wqec_count = wq->wqec_set_count;
+ }
+
+ /* Decrement WQ free count */
+ wq->free_count--;
+
+ queue_rc = sli_wq_write(&wq->hw->sli, wq->queue, wqe->wqebuf);
+
+ return (queue_rc < 0) ? -EIO : 0;
+}
+
+static void
+hw_wq_submit_pending(struct hw_wq *wq, u32 update_free_count)
+{
+ struct efct_hw_wqe *wqe;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&wq->queue->lock, flags);
+
+ /* Update free count with value passed in */
+ wq->free_count += update_free_count;
+
+ while ((wq->free_count > 0) && (!list_empty(&wq->pending_list))) {
+ wqe = list_first_entry(&wq->pending_list,
+ struct efct_hw_wqe, list_entry);
+ list_del_init(&wqe->list_entry);
+ _efct_hw_wq_write(wq, wqe);
+
+ if (wqe->abort_wqe_submit_needed) {
+ wqe->abort_wqe_submit_needed = false;
+ efct_hw_fill_abort_wqe(wq->hw, wqe);
+ INIT_LIST_HEAD(&wqe->list_entry);
+ list_add_tail(&wqe->list_entry, &wq->pending_list);
+ wq->wq_pending_count++;
+ }
+ }
+
+ spin_unlock_irqrestore(&wq->queue->lock, flags);
+}
+
+void
+efct_hw_cq_process(struct efct_hw *hw, struct hw_cq *cq)
+{
+ u8 cqe[sizeof(struct sli4_mcqe)];
+ u16 rid = U16_MAX;
+ /* completion type */
+ enum sli4_qentry ctype;
+ u32 n_processed = 0;
+ u32 tstart, telapsed;
+
+ tstart = jiffies_to_msecs(jiffies);
+
+ while (!sli_cq_read(&hw->sli, cq->queue, cqe)) {
+ int status;
+
+ status = sli_cq_parse(&hw->sli, cq->queue, cqe, &ctype, &rid);
+ /*
+ * The sign of status is significant. If status is:
+ * == 0 : call completed correctly and
+ * the CQE indicated success
+ * > 0 : call completed correctly and
+ * the CQE indicated an error
+ * < 0 : call failed and no information is available about the
+ * CQE
+ */
+ if (status < 0) {
+ if (status == SLI4_MCQE_STATUS_NOT_COMPLETED)
+ /*
+ * Notification that an entry was consumed,
+ * but not completed
+ */
+ continue;
+
+ break;
+ }
+
+ switch (ctype) {
+ case SLI4_QENTRY_ASYNC:
+ sli_cqe_async(&hw->sli, cqe);
+ break;
+ case SLI4_QENTRY_MQ:
+ /*
+ * Process MQ entry. Note there is no way to determine
+ * the MQ_ID from the completion entry.
+ */
+ efct_hw_mq_process(hw, status, hw->mq);
+ break;
+ case SLI4_QENTRY_WQ:
+ efct_hw_wq_process(hw, cq, cqe, status, rid);
+ break;
+ case SLI4_QENTRY_WQ_RELEASE: {
+ u32 wq_id = rid;
+ int index;
+ struct hw_wq *wq = NULL;
+
+ index = efct_hw_queue_hash_find(hw->wq_hash, wq_id);
+
+ if (likely(index >= 0)) {
+ wq = hw->hw_wq[index];
+ } else {
+ efc_log_err(hw->os, "bad WQ_ID %#06x\n", wq_id);
+ break;
+ }
+ /* Submit any HW IOs that are on the WQ pending list */
+ hw_wq_submit_pending(wq, wq->wqec_set_count);
+
+ break;
+ }
+
+ case SLI4_QENTRY_RQ:
+ efct_hw_rqpair_process_rq(hw, cq, cqe);
+ break;
+ case SLI4_QENTRY_XABT: {
+ efct_hw_xabt_process(hw, cq, cqe, rid);
+ break;
+ }
+ default:
+ efc_log_debug(hw->os, "unhandled ctype=%#x rid=%#x\n",
+ ctype, rid);
+ break;
+ }
+
+ n_processed++;
+ if (n_processed == cq->queue->proc_limit)
+ break;
+
+ if (cq->queue->n_posted >= cq->queue->posted_limit)
+ sli_queue_arm(&hw->sli, cq->queue, false);
+ }
+
+ sli_queue_arm(&hw->sli, cq->queue, true);
+
+ if (n_processed > cq->queue->max_num_processed)
+ cq->queue->max_num_processed = n_processed;
+ telapsed = jiffies_to_msecs(jiffies) - tstart;
+ if (telapsed > cq->queue->max_process_time)
+ cq->queue->max_process_time = telapsed;
+}
+
+void
+efct_hw_wq_process(struct efct_hw *hw, struct hw_cq *cq,
+ u8 *cqe, int status, u16 rid)
+{
+ struct hw_wq_callback *wqcb;
+
+ if (rid == EFCT_HW_REQUE_XRI_REGTAG) {
+ if (status)
+ efc_log_err(hw->os, "reque xri failed, status = %d\n",
+ status);
+ return;
+ }
+
+ wqcb = efct_hw_reqtag_get_instance(hw, rid);
+ if (!wqcb) {
+ efc_log_err(hw->os, "invalid request tag: x%x\n", rid);
+ return;
+ }
+
+ if (!wqcb->callback) {
+ efc_log_err(hw->os, "wqcb callback is NULL\n");
+ return;
+ }
+
+ (*wqcb->callback)(wqcb->arg, cqe, status);
+}
+
+void
+efct_hw_xabt_process(struct efct_hw *hw, struct hw_cq *cq,
+ u8 *cqe, u16 rid)
+{
+ /* search IOs wait free list */
+ struct efct_hw_io *io = NULL;
+ unsigned long flags = 0;
+
+ io = efct_hw_io_lookup(hw, rid);
+ if (!io) {
+ /* IO lookup failure should never happen */
+ efc_log_err(hw->os, "xabt io lookup failed rid=%#x\n", rid);
+ return;
+ }
+
+ if (!io->xbusy)
+ efc_log_debug(hw->os, "xabt io not busy rid=%#x\n", rid);
+ else
+ /* mark IO as no longer busy */
+ io->xbusy = false;
+
+ /*
+ * For IOs that were aborted internally, we need to issue any pending
+ * callback here.
+ */
+ if (io->done) {
+ efct_hw_done_t done = io->done;
+ void *arg = io->arg;
+
+ /*
+ * Use latched status as this is always saved for an internal
+ * abort
+ */
+ int status = io->saved_status;
+ u32 len = io->saved_len;
+ u32 ext = io->saved_ext;
+
+ io->done = NULL;
+ io->status_saved = false;
+
+ done(io, len, status, ext, arg);
+ }
+
+ spin_lock_irqsave(&hw->io_lock, flags);
+ if (io->state == EFCT_HW_IO_STATE_INUSE ||
+ io->state == EFCT_HW_IO_STATE_WAIT_FREE) {
+ /* if on wait_free list, caller has already freed IO;
+ * remove from wait_free list and add to free list.
+ * if on in-use list, already marked as no longer busy;
+ * just leave there and wait for caller to free.
+ */
+ if (io->state == EFCT_HW_IO_STATE_WAIT_FREE) {
+ io->state = EFCT_HW_IO_STATE_FREE;
+ list_del_init(&io->list_entry);
+ efct_hw_io_free_move_correct_list(hw, io);
+ }
+ }
+ spin_unlock_irqrestore(&hw->io_lock, flags);
+}
+
+static int
+efct_hw_flush(struct efct_hw *hw)
+{
+ u32 i = 0;
+
+ /* Process any remaining completions */
+ for (i = 0; i < hw->eq_count; i++)
+ efct_hw_process(hw, i, ~0);
+
+ return 0;
+}
+
+int
+efct_hw_wq_write(struct hw_wq *wq, struct efct_hw_wqe *wqe)
+{
+ int rc = 0;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&wq->queue->lock, flags);
+ if (list_empty(&wq->pending_list)) {
+ if (wq->free_count > 0) {
+ rc = _efct_hw_wq_write(wq, wqe);
+ } else {
+ INIT_LIST_HEAD(&wqe->list_entry);
+ list_add_tail(&wqe->list_entry, &wq->pending_list);
+ wq->wq_pending_count++;
+ }
+
+ spin_unlock_irqrestore(&wq->queue->lock, flags);
+ return rc;
+ }
+
+ INIT_LIST_HEAD(&wqe->list_entry);
+ list_add_tail(&wqe->list_entry, &wq->pending_list);
+ wq->wq_pending_count++;
+ while (wq->free_count > 0) {
+ wqe = list_first_entry(&wq->pending_list, struct efct_hw_wqe,
+ list_entry);
+ if (!wqe)
+ break;
+
+ list_del_init(&wqe->list_entry);
+ rc = _efct_hw_wq_write(wq, wqe);
+ if (rc)
+ break;
+
+ if (wqe->abort_wqe_submit_needed) {
+ wqe->abort_wqe_submit_needed = false;
+ efct_hw_fill_abort_wqe(wq->hw, wqe);
+
+ INIT_LIST_HEAD(&wqe->list_entry);
+ list_add_tail(&wqe->list_entry, &wq->pending_list);
+ wq->wq_pending_count++;
+ }
+ }
+
+ spin_unlock_irqrestore(&wq->queue->lock, flags);
+
+ return rc;
+}
+
+int
+efct_efc_bls_send(struct efc *efc, u32 type, struct sli_bls_params *bls)
+{
+ struct efct *efct = efc->base;
+
+ return efct_hw_bls_send(efct, type, bls, NULL, NULL);
+}
+
+int
+efct_hw_bls_send(struct efct *efct, u32 type, struct sli_bls_params *bls_params,
+ void *cb, void *arg)
+{
+ struct efct_hw *hw = &efct->hw;
+ struct efct_hw_io *hio;
+ struct sli_bls_payload bls;
+ int rc;
+
+ if (hw->state != EFCT_HW_STATE_ACTIVE) {
+ efc_log_err(hw->os,
+ "cannot send BLS, HW state=%d\n", hw->state);
+ return -EIO;
+ }
+
+ hio = efct_hw_io_alloc(hw);
+ if (!hio) {
+ efc_log_err(hw->os, "HIO allocation failed\n");
+ return -EIO;
+ }
+
+ hio->done = cb;
+ hio->arg = arg;
+
+ bls_params->xri = hio->indicator;
+ bls_params->tag = hio->reqtag;
+
+ if (type == FC_RCTL_BA_ACC) {
+ hio->type = EFCT_HW_BLS_ACC;
+ bls.type = SLI4_SLI_BLS_ACC;
+ memcpy(&bls.u.acc, bls_params->payload, sizeof(bls.u.acc));
+ } else {
+ hio->type = EFCT_HW_BLS_RJT;
+ bls.type = SLI4_SLI_BLS_RJT;
+ memcpy(&bls.u.rjt, bls_params->payload, sizeof(bls.u.rjt));
+ }
+
+ bls.ox_id = cpu_to_le16(bls_params->ox_id);
+ bls.rx_id = cpu_to_le16(bls_params->rx_id);
+
+ if (sli_xmit_bls_rsp64_wqe(&hw->sli, hio->wqe.wqebuf,
+ &bls, bls_params)) {
+ efc_log_err(hw->os, "XMIT_BLS_RSP64 WQE error\n");
+ return -EIO;
+ }
+
+ hio->xbusy = true;
+
+ /*
+ * Add IO to active io wqe list before submitting, in case the
+ * wcqe processing preempts this thread.
+ */
+ hio->wq->use_count++;
+ rc = efct_hw_wq_write(hio->wq, &hio->wqe);
+ if (rc >= 0) {
+ /* non-negative return is success */
+ rc = 0;
+ } else {
+ /* failed to write wqe, remove from active wqe list */
+ efc_log_err(hw->os,
+ "sli_queue_write failed: %d\n", rc);
+ hio->xbusy = false;
+ }
+
+ return rc;
+}
+
+static int
+efct_els_ssrs_send_cb(struct efct_hw_io *hio, u32 length, int status,
+ u32 ext_status, void *arg)
+{
+ struct efc_disc_io *io = arg;
+
+ efc_disc_io_complete(io, length, status, ext_status);
+ return 0;
+}
+
+static inline void
+efct_fill_els_params(struct efc_disc_io *io, struct sli_els_params *params)
+{
+ u8 *cmd = io->req.virt;
+
+ params->cmd = *cmd;
+ params->s_id = io->s_id;
+ params->d_id = io->d_id;
+ params->ox_id = io->iparam.els.ox_id;
+ params->rpi = io->rpi;
+ params->vpi = io->vpi;
+ params->rpi_registered = io->rpi_registered;
+ params->xmit_len = io->xmit_len;
+ params->rsp_len = io->rsp_len;
+ params->timeout = io->iparam.els.timeout;
+}
+
+static inline void
+efct_fill_ct_params(struct efc_disc_io *io, struct sli_ct_params *params)
+{
+ params->r_ctl = io->iparam.ct.r_ctl;
+ params->type = io->iparam.ct.type;
+ params->df_ctl = io->iparam.ct.df_ctl;
+ params->d_id = io->d_id;
+ params->ox_id = io->iparam.ct.ox_id;
+ params->rpi = io->rpi;
+ params->vpi = io->vpi;
+ params->rpi_registered = io->rpi_registered;
+ params->xmit_len = io->xmit_len;
+ params->rsp_len = io->rsp_len;
+ params->timeout = io->iparam.ct.timeout;
+}
+
+/**
+ * efct_els_hw_srrs_send() - Send a single request and response cmd.
+ * @efc: efc library structure
+ * @io: Discovery IO used to hold els and ct cmd context.
+ *
+ * This routine supports communication sequences consisting of a single
+ * request and single response between two endpoints. Examples include:
+ * - Sending an ELS request.
+ * - Sending an ELS response - To send an ELS response, the caller must provide
+ * the OX_ID from the received request.
+ * - Sending a FC Common Transport (FC-CT) request - To send a FC-CT request,
+ * the caller must provide the R_CTL, TYPE, and DF_CTL
+ * values to place in the FC frame header.
+ *
+ * Return: Status of the request.
+ */
+int
+efct_els_hw_srrs_send(struct efc *efc, struct efc_disc_io *io)
+{
+ struct efct *efct = efc->base;
+ struct efct_hw_io *hio;
+ struct efct_hw *hw = &efct->hw;
+ struct efc_dma *send = &io->req;
+ struct efc_dma *receive = &io->rsp;
+ struct sli4_sge *sge = NULL;
+ int rc = 0;
+ u32 len = io->xmit_len;
+ u32 sge0_flags;
+ u32 sge1_flags;
+
+ hio = efct_hw_io_alloc(hw);
+ if (!hio) {
+ pr_err("HIO alloc failed\n");
+ return -EIO;
+ }
+
+ if (hw->state != EFCT_HW_STATE_ACTIVE) {
+ efc_log_debug(hw->os,
+ "cannot send SRRS, HW state=%d\n", hw->state);
+ return -EIO;
+ }
+
+ hio->done = efct_els_ssrs_send_cb;
+ hio->arg = io;
+
+ sge = hio->sgl->virt;
+
+ /* clear both SGE */
+ memset(hio->sgl->virt, 0, 2 * sizeof(struct sli4_sge));
+
+ sge0_flags = le32_to_cpu(sge[0].dw2_flags);
+ sge1_flags = le32_to_cpu(sge[1].dw2_flags);
+ if (send->size) {
+ sge[0].buffer_address_high =
+ cpu_to_le32(upper_32_bits(send->phys));
+ sge[0].buffer_address_low =
+ cpu_to_le32(lower_32_bits(send->phys));
+
+ sge0_flags |= (SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT);
+
+ sge[0].buffer_length = cpu_to_le32(len);
+ }
+
+ if (io->io_type == EFC_DISC_IO_ELS_REQ ||
+ io->io_type == EFC_DISC_IO_CT_REQ) {
+ sge[1].buffer_address_high =
+ cpu_to_le32(upper_32_bits(receive->phys));
+ sge[1].buffer_address_low =
+ cpu_to_le32(lower_32_bits(receive->phys));
+
+ sge1_flags |= (SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT);
+ sge1_flags |= SLI4_SGE_LAST;
+
+ sge[1].buffer_length = cpu_to_le32(receive->size);
+ } else {
+ sge0_flags |= SLI4_SGE_LAST;
+ }
+
+ sge[0].dw2_flags = cpu_to_le32(sge0_flags);
+ sge[1].dw2_flags = cpu_to_le32(sge1_flags);
+
+ switch (io->io_type) {
+ case EFC_DISC_IO_ELS_REQ: {
+ struct sli_els_params els_params;
+
+ hio->type = EFCT_HW_ELS_REQ;
+ efct_fill_els_params(io, &els_params);
+ els_params.xri = hio->indicator;
+ els_params.tag = hio->reqtag;
+
+ if (sli_els_request64_wqe(&hw->sli, hio->wqe.wqebuf, hio->sgl,
+ &els_params)) {
+ efc_log_err(hw->os, "REQ WQE error\n");
+ rc = -EIO;
+ }
+ break;
+ }
+ case EFC_DISC_IO_ELS_RESP: {
+ struct sli_els_params els_params;
+
+ hio->type = EFCT_HW_ELS_RSP;
+ efct_fill_els_params(io, &els_params);
+ els_params.xri = hio->indicator;
+ els_params.tag = hio->reqtag;
+ if (sli_xmit_els_rsp64_wqe(&hw->sli, hio->wqe.wqebuf, send,
+ &els_params)){
+ efc_log_err(hw->os, "RSP WQE error\n");
+ rc = -EIO;
+ }
+ break;
+ }
+ case EFC_DISC_IO_CT_REQ: {
+ struct sli_ct_params ct_params;
+
+ hio->type = EFCT_HW_FC_CT;
+ efct_fill_ct_params(io, &ct_params);
+ ct_params.xri = hio->indicator;
+ ct_params.tag = hio->reqtag;
+ if (sli_gen_request64_wqe(&hw->sli, hio->wqe.wqebuf, hio->sgl,
+ &ct_params)){
+ efc_log_err(hw->os, "GEN WQE error\n");
+ rc = -EIO;
+ }
+ break;
+ }
+ case EFC_DISC_IO_CT_RESP: {
+ struct sli_ct_params ct_params;
+
+ hio->type = EFCT_HW_FC_CT_RSP;
+ efct_fill_ct_params(io, &ct_params);
+ ct_params.xri = hio->indicator;
+ ct_params.tag = hio->reqtag;
+ if (sli_xmit_sequence64_wqe(&hw->sli, hio->wqe.wqebuf, hio->sgl,
+ &ct_params)){
+ efc_log_err(hw->os, "XMIT SEQ WQE error\n");
+ rc = -EIO;
+ }
+ break;
+ }
+ default:
+ efc_log_err(hw->os, "bad SRRS type %#x\n", io->io_type);
+ rc = -EIO;
+ }
+
+ if (rc == 0) {
+ hio->xbusy = true;
+
+ /*
+ * Add IO to active io wqe list before submitting, in case the
+ * wcqe processing preempts this thread.
+ */
+ hio->wq->use_count++;
+ rc = efct_hw_wq_write(hio->wq, &hio->wqe);
+ if (rc >= 0) {
+ /* non-negative return is success */
+ rc = 0;
+ } else {
+ /* failed to write wqe, remove from active wqe list */
+ efc_log_err(hw->os,
+ "sli_queue_write failed: %d\n", rc);
+ hio->xbusy = false;
+ }
+ }
+
+ return rc;
+}
+
+int
+efct_hw_io_send(struct efct_hw *hw, enum efct_hw_io_type type,
+ struct efct_hw_io *io, union efct_hw_io_param_u *iparam,
+ void *cb, void *arg)
+{
+ int rc = 0;
+ bool send_wqe = true;
+
+ if (!io) {
+ pr_err("bad parm hw=%p io=%p\n", hw, io);
+ return -EIO;
+ }
+
+ if (hw->state != EFCT_HW_STATE_ACTIVE) {
+ efc_log_err(hw->os, "cannot send IO, HW state=%d\n", hw->state);
+ return -EIO;
+ }
+
+ /*
+ * Save state needed during later stages
+ */
+ io->type = type;
+ io->done = cb;
+ io->arg = arg;
+
+ /*
+ * Format the work queue entry used to send the IO
+ */
+ switch (type) {
+ case EFCT_HW_IO_TARGET_WRITE: {
+ u16 *flags = &iparam->fcp_tgt.flags;
+ struct fcp_txrdy *xfer = io->xfer_rdy.virt;
+
+ /*
+ * Fill in the XFER_RDY for IF_TYPE 0 devices
+ */
+ xfer->ft_data_ro = cpu_to_be32(iparam->fcp_tgt.offset);
+ xfer->ft_burst_len = cpu_to_be32(iparam->fcp_tgt.xmit_len);
+
+ if (io->xbusy)
+ *flags |= SLI4_IO_CONTINUATION;
+ else
+ *flags &= ~SLI4_IO_CONTINUATION;
+ iparam->fcp_tgt.xri = io->indicator;
+ iparam->fcp_tgt.tag = io->reqtag;
+
+ if (sli_fcp_treceive64_wqe(&hw->sli, io->wqe.wqebuf,
+ &io->def_sgl, io->first_data_sge,
+ SLI4_CQ_DEFAULT,
+ 0, 0, &iparam->fcp_tgt)) {
+ efc_log_err(hw->os, "TRECEIVE WQE error\n");
+ rc = -EIO;
+ }
+ break;
+ }
+ case EFCT_HW_IO_TARGET_READ: {
+ u16 *flags = &iparam->fcp_tgt.flags;
+
+ if (io->xbusy)
+ *flags |= SLI4_IO_CONTINUATION;
+ else
+ *flags &= ~SLI4_IO_CONTINUATION;
+
+ iparam->fcp_tgt.xri = io->indicator;
+ iparam->fcp_tgt.tag = io->reqtag;
+
+ if (sli_fcp_tsend64_wqe(&hw->sli, io->wqe.wqebuf,
+ &io->def_sgl, io->first_data_sge,
+ SLI4_CQ_DEFAULT,
+ 0, 0, &iparam->fcp_tgt)) {
+ efc_log_err(hw->os, "TSEND WQE error\n");
+ rc = -EIO;
+ }
+ break;
+ }
+ case EFCT_HW_IO_TARGET_RSP: {
+ u16 *flags = &iparam->fcp_tgt.flags;
+
+ if (io->xbusy)
+ *flags |= SLI4_IO_CONTINUATION;
+ else
+ *flags &= ~SLI4_IO_CONTINUATION;
+
+ iparam->fcp_tgt.xri = io->indicator;
+ iparam->fcp_tgt.tag = io->reqtag;
+
+ if (sli_fcp_trsp64_wqe(&hw->sli, io->wqe.wqebuf,
+ &io->def_sgl, SLI4_CQ_DEFAULT,
+ 0, &iparam->fcp_tgt)) {
+ efc_log_err(hw->os, "TRSP WQE error\n");
+ rc = -EIO;
+ }
+
+ break;
+ }
+ default:
+ efc_log_err(hw->os, "unsupported IO type %#x\n", type);
+ rc = -EIO;
+ }
+
+ if (send_wqe && rc == 0) {
+ io->xbusy = true;
+
+ /*
+ * Add IO to active io wqe list before submitting, in case the
+ * wcqe processing preempts this thread.
+ */
+ hw->tcmd_wq_submit[io->wq->instance]++;
+ io->wq->use_count++;
+ rc = efct_hw_wq_write(io->wq, &io->wqe);
+ if (rc >= 0) {
+ /* non-negative return is success */
+ rc = 0;
+ } else {
+ /* failed to write wqe, remove from active wqe list */
+ efc_log_err(hw->os,
+ "sli_queue_write failed: %d\n", rc);
+ io->xbusy = false;
+ }
+ }
+
+ return rc;
+}
+
+int
+efct_hw_send_frame(struct efct_hw *hw, struct fc_frame_header *hdr,
+ u8 sof, u8 eof, struct efc_dma *payload,
+ struct efct_hw_send_frame_context *ctx,
+ void (*callback)(void *arg, u8 *cqe, int status),
+ void *arg)
+{
+ int rc;
+ struct efct_hw_wqe *wqe;
+ u32 xri;
+ struct hw_wq *wq;
+
+ wqe = &ctx->wqe;
+
+ /* populate the callback object */
+ ctx->hw = hw;
+
+ /* Fetch and populate request tag */
+ ctx->wqcb = efct_hw_reqtag_alloc(hw, callback, arg);
+ if (!ctx->wqcb) {
+ efc_log_err(hw->os, "can't allocate request tag\n");
+ return -ENOSPC;
+ }
+
+ wq = hw->hw_wq[0];
+
+ /* Set XRI and RX_ID in the header based on which WQ, and which
+ * send_frame_io we are using
+ */
+ xri = wq->send_frame_io->indicator;
+
+ /* Build the send frame WQE */
+ rc = sli_send_frame_wqe(&hw->sli, wqe->wqebuf,
+ sof, eof, (u32 *)hdr, payload, payload->len,
+ EFCT_HW_SEND_FRAME_TIMEOUT, xri,
+ ctx->wqcb->instance_index);
+ if (rc) {
+ efc_log_err(hw->os, "sli_send_frame_wqe failed: %d\n", rc);
+ return -EIO;
+ }
+
+ /* Write to WQ */
+ rc = efct_hw_wq_write(wq, wqe);
+ if (rc) {
+ efc_log_err(hw->os, "efct_hw_wq_write failed: %d\n", rc);
+ return -EIO;
+ }
+
+ wq->use_count++;
+
+ return 0;
+}
+
+static int
+efct_hw_cb_link_stat(struct efct_hw *hw, int status,
+ u8 *mqe, void *arg)
+{
+ struct sli4_cmd_read_link_stats *mbox_rsp;
+ struct efct_hw_link_stat_cb_arg *cb_arg = arg;
+ struct efct_hw_link_stat_counts counts[EFCT_HW_LINK_STAT_MAX];
+ u32 num_counters, i;
+ u32 mbox_rsp_flags = 0;
+
+ mbox_rsp = (struct sli4_cmd_read_link_stats *)mqe;
+ mbox_rsp_flags = le32_to_cpu(mbox_rsp->dw1_flags);
+ num_counters = (mbox_rsp_flags & SLI4_READ_LNKSTAT_GEC) ? 20 : 13;
+ memset(counts, 0, sizeof(struct efct_hw_link_stat_counts) *
+ EFCT_HW_LINK_STAT_MAX);
+
+ /* Fill overflow counts, mask starts from SLI4_READ_LNKSTAT_W02OF*/
+ for (i = 0; i < EFCT_HW_LINK_STAT_MAX; i++)
+ counts[i].overflow = (mbox_rsp_flags & (1 << (i + 2)));
+
+ counts[EFCT_HW_LINK_STAT_LINK_FAILURE_COUNT].counter =
+ le32_to_cpu(mbox_rsp->linkfail_errcnt);
+ counts[EFCT_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter =
+ le32_to_cpu(mbox_rsp->losssync_errcnt);
+ counts[EFCT_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT].counter =
+ le32_to_cpu(mbox_rsp->losssignal_errcnt);
+ counts[EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter =
+ le32_to_cpu(mbox_rsp->primseq_errcnt);
+ counts[EFCT_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter =
+ le32_to_cpu(mbox_rsp->inval_txword_errcnt);
+ counts[EFCT_HW_LINK_STAT_CRC_COUNT].counter =
+ le32_to_cpu(mbox_rsp->crc_errcnt);
+ counts[EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT].counter =
+ le32_to_cpu(mbox_rsp->primseq_eventtimeout_cnt);
+ counts[EFCT_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT].counter =
+ le32_to_cpu(mbox_rsp->elastic_bufoverrun_errcnt);
+ counts[EFCT_HW_LINK_STAT_ARB_TIMEOUT_COUNT].counter =
+ le32_to_cpu(mbox_rsp->arbit_fc_al_timeout_cnt);
+ counts[EFCT_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT].counter =
+ le32_to_cpu(mbox_rsp->adv_rx_buftor_to_buf_credit);
+ counts[EFCT_HW_LINK_STAT_CURR_RCV_B2B_CREDIT].counter =
+ le32_to_cpu(mbox_rsp->curr_rx_buf_to_buf_credit);
+ counts[EFCT_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT].counter =
+ le32_to_cpu(mbox_rsp->adv_tx_buf_to_buf_credit);
+ counts[EFCT_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT].counter =
+ le32_to_cpu(mbox_rsp->curr_tx_buf_to_buf_credit);
+ counts[EFCT_HW_LINK_STAT_RCV_EOFA_COUNT].counter =
+ le32_to_cpu(mbox_rsp->rx_eofa_cnt);
+ counts[EFCT_HW_LINK_STAT_RCV_EOFDTI_COUNT].counter =
+ le32_to_cpu(mbox_rsp->rx_eofdti_cnt);
+ counts[EFCT_HW_LINK_STAT_RCV_EOFNI_COUNT].counter =
+ le32_to_cpu(mbox_rsp->rx_eofni_cnt);
+ counts[EFCT_HW_LINK_STAT_RCV_SOFF_COUNT].counter =
+ le32_to_cpu(mbox_rsp->rx_soff_cnt);
+ counts[EFCT_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT].counter =
+ le32_to_cpu(mbox_rsp->rx_dropped_no_aer_cnt);
+ counts[EFCT_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT].counter =
+ le32_to_cpu(mbox_rsp->rx_dropped_no_avail_rpi_rescnt);
+ counts[EFCT_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT].counter =
+ le32_to_cpu(mbox_rsp->rx_dropped_no_avail_xri_rescnt);
+
+ if (cb_arg) {
+ if (cb_arg->cb) {
+ if (status == 0 && le16_to_cpu(mbox_rsp->hdr.status))
+ status = le16_to_cpu(mbox_rsp->hdr.status);
+ cb_arg->cb(status, num_counters, counts, cb_arg->arg);
+ }
+
+ kfree(cb_arg);
+ }
+
+ return 0;
+}
+
+int
+efct_hw_get_link_stats(struct efct_hw *hw, u8 req_ext_counters,
+ u8 clear_overflow_flags, u8 clear_all_counters,
+ void (*cb)(int status, u32 num_counters,
+ struct efct_hw_link_stat_counts *counters,
+ void *arg),
+ void *arg)
+{
+ int rc = -EIO;
+ struct efct_hw_link_stat_cb_arg *cb_arg;
+ u8 mbxdata[SLI4_BMBX_SIZE];
+
+ cb_arg = kzalloc(sizeof(*cb_arg), GFP_ATOMIC);
+ if (!cb_arg)
+ return -ENOMEM;
+
+ cb_arg->cb = cb;
+ cb_arg->arg = arg;
+
+ /* Send the HW command */
+ if (!sli_cmd_read_link_stats(&hw->sli, mbxdata, req_ext_counters,
+ clear_overflow_flags, clear_all_counters))
+ rc = efct_hw_command(hw, mbxdata, EFCT_CMD_NOWAIT,
+ efct_hw_cb_link_stat, cb_arg);
+
+ if (rc)
+ kfree(cb_arg);
+
+ return rc;
+}
+
+static int
+efct_hw_cb_host_stat(struct efct_hw *hw, int status, u8 *mqe, void *arg)
+{
+ struct sli4_cmd_read_status *mbox_rsp =
+ (struct sli4_cmd_read_status *)mqe;
+ struct efct_hw_host_stat_cb_arg *cb_arg = arg;
+ struct efct_hw_host_stat_counts counts[EFCT_HW_HOST_STAT_MAX];
+ u32 num_counters = EFCT_HW_HOST_STAT_MAX;
+
+ memset(counts, 0, sizeof(struct efct_hw_host_stat_counts) *
+ EFCT_HW_HOST_STAT_MAX);
+
+ counts[EFCT_HW_HOST_STAT_TX_KBYTE_COUNT].counter =
+ le32_to_cpu(mbox_rsp->trans_kbyte_cnt);
+ counts[EFCT_HW_HOST_STAT_RX_KBYTE_COUNT].counter =
+ le32_to_cpu(mbox_rsp->recv_kbyte_cnt);
+ counts[EFCT_HW_HOST_STAT_TX_FRAME_COUNT].counter =
+ le32_to_cpu(mbox_rsp->trans_frame_cnt);
+ counts[EFCT_HW_HOST_STAT_RX_FRAME_COUNT].counter =
+ le32_to_cpu(mbox_rsp->recv_frame_cnt);
+ counts[EFCT_HW_HOST_STAT_TX_SEQ_COUNT].counter =
+ le32_to_cpu(mbox_rsp->trans_seq_cnt);
+ counts[EFCT_HW_HOST_STAT_RX_SEQ_COUNT].counter =
+ le32_to_cpu(mbox_rsp->recv_seq_cnt);
+ counts[EFCT_HW_HOST_STAT_TOTAL_EXCH_ORIG].counter =
+ le32_to_cpu(mbox_rsp->tot_exchanges_orig);
+ counts[EFCT_HW_HOST_STAT_TOTAL_EXCH_RESP].counter =
+ le32_to_cpu(mbox_rsp->tot_exchanges_resp);
+ counts[EFCT_HW_HOSY_STAT_RX_P_BSY_COUNT].counter =
+ le32_to_cpu(mbox_rsp->recv_p_bsy_cnt);
+ counts[EFCT_HW_HOST_STAT_RX_F_BSY_COUNT].counter =
+ le32_to_cpu(mbox_rsp->recv_f_bsy_cnt);
+ counts[EFCT_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_RQ_BUF_COUNT].counter =
+ le32_to_cpu(mbox_rsp->no_rq_buf_dropped_frames_cnt);
+ counts[EFCT_HW_HOST_STAT_EMPTY_RQ_TIMEOUT_COUNT].counter =
+ le32_to_cpu(mbox_rsp->empty_rq_timeout_cnt);
+ counts[EFCT_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_XRI_COUNT].counter =
+ le32_to_cpu(mbox_rsp->no_xri_dropped_frames_cnt);
+ counts[EFCT_HW_HOST_STAT_EMPTY_XRI_POOL_COUNT].counter =
+ le32_to_cpu(mbox_rsp->empty_xri_pool_cnt);
+
+ if (cb_arg) {
+ if (cb_arg->cb) {
+ if (status == 0 && le16_to_cpu(mbox_rsp->hdr.status))
+ status = le16_to_cpu(mbox_rsp->hdr.status);
+ cb_arg->cb(status, num_counters, counts, cb_arg->arg);
+ }
+
+ kfree(cb_arg);
+ }
+
+ return 0;
+}
+
+int
+efct_hw_get_host_stats(struct efct_hw *hw, u8 cc,
+ void (*cb)(int status, u32 num_counters,
+ struct efct_hw_host_stat_counts *counters,
+ void *arg),
+ void *arg)
+{
+ int rc = -EIO;
+ struct efct_hw_host_stat_cb_arg *cb_arg;
+ u8 mbxdata[SLI4_BMBX_SIZE];
+
+ cb_arg = kmalloc(sizeof(*cb_arg), GFP_ATOMIC);
+ if (!cb_arg)
+ return -ENOMEM;
+
+ cb_arg->cb = cb;
+ cb_arg->arg = arg;
+
+ /* Send the HW command to get the host stats */
+ if (!sli_cmd_read_status(&hw->sli, mbxdata, cc))
+ rc = efct_hw_command(hw, mbxdata, EFCT_CMD_NOWAIT,
+ efct_hw_cb_host_stat, cb_arg);
+
+ if (rc) {
+ efc_log_debug(hw->os, "READ_HOST_STATS failed\n");
+ kfree(cb_arg);
+ }
+
+ return rc;
+}
+
+struct efct_hw_async_call_ctx {
+ efct_hw_async_cb_t callback;
+ void *arg;
+ u8 cmd[SLI4_BMBX_SIZE];
+};
+
+static void
+efct_hw_async_cb(struct efct_hw *hw, int status, u8 *mqe, void *arg)
+{
+ struct efct_hw_async_call_ctx *ctx = arg;
+
+ if (ctx) {
+ if (ctx->callback)
+ (*ctx->callback)(hw, status, mqe, ctx->arg);
+
+ kfree(ctx);
+ }
+}
+
+int
+efct_hw_async_call(struct efct_hw *hw, efct_hw_async_cb_t callback, void *arg)
+{
+ struct efct_hw_async_call_ctx *ctx;
+ int rc;
+
+ /*
+ * Allocate a callback context (which includes the mbox cmd buffer),
+ * we need this to be persistent as the mbox cmd submission may be
+ * queued and executed later execution.
+ */
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->callback = callback;
+ ctx->arg = arg;
+
+ /* Build and send a NOP mailbox command */
+ if (sli_cmd_common_nop(&hw->sli, ctx->cmd, 0)) {
+ efc_log_err(hw->os, "COMMON_NOP format failure\n");
+ kfree(ctx);
+ return -EIO;
+ }
+
+ rc = efct_hw_command(hw, ctx->cmd, EFCT_CMD_NOWAIT, efct_hw_async_cb,
+ ctx);
+ if (rc) {
+ efc_log_err(hw->os, "COMMON_NOP command failure, rc=%d\n", rc);
+ kfree(ctx);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int
+efct_hw_cb_fw_write(struct efct_hw *hw, int status, u8 *mqe, void *arg)
+{
+ struct sli4_cmd_sli_config *mbox_rsp =
+ (struct sli4_cmd_sli_config *)mqe;
+ struct sli4_rsp_cmn_write_object *wr_obj_rsp;
+ struct efct_hw_fw_wr_cb_arg *cb_arg = arg;
+ u32 bytes_written;
+ u16 mbox_status;
+ u32 change_status;
+
+ wr_obj_rsp = (struct sli4_rsp_cmn_write_object *)
+ &mbox_rsp->payload.embed;
+ bytes_written = le32_to_cpu(wr_obj_rsp->actual_write_length);
+ mbox_status = le16_to_cpu(mbox_rsp->hdr.status);
+ change_status = (le32_to_cpu(wr_obj_rsp->change_status_dword) &
+ RSP_CHANGE_STATUS);
+
+ if (cb_arg) {
+ if (cb_arg->cb) {
+ if (!status && mbox_status)
+ status = mbox_status;
+ cb_arg->cb(status, bytes_written, change_status,
+ cb_arg->arg);
+ }
+
+ kfree(cb_arg);
+ }
+
+ return 0;
+}
+
+int
+efct_hw_firmware_write(struct efct_hw *hw, struct efc_dma *dma, u32 size,
+ u32 offset, int last,
+ void (*cb)(int status, u32 bytes_written,
+ u32 change_status, void *arg),
+ void *arg)
+{
+ int rc = -EIO;
+ u8 mbxdata[SLI4_BMBX_SIZE];
+ struct efct_hw_fw_wr_cb_arg *cb_arg;
+ int noc = 0;
+
+ cb_arg = kzalloc(sizeof(*cb_arg), GFP_KERNEL);
+ if (!cb_arg)
+ return -ENOMEM;
+
+ cb_arg->cb = cb;
+ cb_arg->arg = arg;
+
+ /* Write a portion of a firmware image to the device */
+ if (!sli_cmd_common_write_object(&hw->sli, mbxdata,
+ noc, last, size, offset, "/prg/",
+ dma))
+ rc = efct_hw_command(hw, mbxdata, EFCT_CMD_NOWAIT,
+ efct_hw_cb_fw_write, cb_arg);
+
+ if (rc != 0) {
+ efc_log_debug(hw->os, "COMMON_WRITE_OBJECT failed\n");
+ kfree(cb_arg);
+ }
+
+ return rc;
+}
+
+static int
+efct_hw_cb_port_control(struct efct_hw *hw, int status, u8 *mqe,
+ void *arg)
+{
+ return 0;
+}
+
+int
+efct_hw_port_control(struct efct_hw *hw, enum efct_hw_port ctrl,
+ uintptr_t value,
+ void (*cb)(int status, uintptr_t value, void *arg),
+ void *arg)
+{
+ int rc = -EIO;
+ u8 link[SLI4_BMBX_SIZE];
+ u32 speed = 0;
+ u8 reset_alpa = 0;
+
+ switch (ctrl) {
+ case EFCT_HW_PORT_INIT:
+ if (!sli_cmd_config_link(&hw->sli, link))
+ rc = efct_hw_command(hw, link, EFCT_CMD_NOWAIT,
+ efct_hw_cb_port_control, NULL);
+
+ if (rc != 0) {
+ efc_log_err(hw->os, "CONFIG_LINK failed\n");
+ break;
+ }
+ speed = hw->config.speed;
+ reset_alpa = (u8)(value & 0xff);
+
+ rc = -EIO;
+ if (!sli_cmd_init_link(&hw->sli, link, speed, reset_alpa))
+ rc = efct_hw_command(hw, link, EFCT_CMD_NOWAIT,
+ efct_hw_cb_port_control, NULL);
+ /* Free buffer on error, since no callback is coming */
+ if (rc)
+ efc_log_err(hw->os, "INIT_LINK failed\n");
+ break;
+
+ case EFCT_HW_PORT_SHUTDOWN:
+ if (!sli_cmd_down_link(&hw->sli, link))
+ rc = efct_hw_command(hw, link, EFCT_CMD_NOWAIT,
+ efct_hw_cb_port_control, NULL);
+ /* Free buffer on error, since no callback is coming */
+ if (rc)
+ efc_log_err(hw->os, "DOWN_LINK failed\n");
+ break;
+
+ default:
+ efc_log_debug(hw->os, "unhandled control %#x\n", ctrl);
+ break;
+ }
+
+ return rc;
+}
+
+void
+efct_hw_teardown(struct efct_hw *hw)
+{
+ u32 i = 0;
+ u32 destroy_queues;
+ u32 free_memory;
+ struct efc_dma *dma;
+ struct efct *efct = hw->os;
+
+ destroy_queues = (hw->state == EFCT_HW_STATE_ACTIVE);
+ free_memory = (hw->state != EFCT_HW_STATE_UNINITIALIZED);
+
+ /* Cancel Sliport Healthcheck */
+ if (hw->sliport_healthcheck) {
+ hw->sliport_healthcheck = 0;
+ efct_hw_config_sli_port_health_check(hw, 0, 0);
+ }
+
+ if (hw->state != EFCT_HW_STATE_QUEUES_ALLOCATED) {
+ hw->state = EFCT_HW_STATE_TEARDOWN_IN_PROGRESS;
+
+ efct_hw_flush(hw);
+
+ if (list_empty(&hw->cmd_head))
+ efc_log_debug(hw->os,
+ "All commands completed on MQ queue\n");
+ else
+ efc_log_debug(hw->os,
+ "Some cmds still pending on MQ queue\n");
+
+ /* Cancel any remaining commands */
+ efct_hw_command_cancel(hw);
+ } else {
+ hw->state = EFCT_HW_STATE_TEARDOWN_IN_PROGRESS;
+ }
+
+ dma_free_coherent(&efct->pci->dev,
+ hw->rnode_mem.size, hw->rnode_mem.virt,
+ hw->rnode_mem.phys);
+ memset(&hw->rnode_mem, 0, sizeof(struct efc_dma));
+
+ if (hw->io) {
+ for (i = 0; i < hw->config.n_io; i++) {
+ if (hw->io[i] && hw->io[i]->sgl &&
+ hw->io[i]->sgl->virt) {
+ dma_free_coherent(&efct->pci->dev,
+ hw->io[i]->sgl->size,
+ hw->io[i]->sgl->virt,
+ hw->io[i]->sgl->phys);
+ }
+ kfree(hw->io[i]);
+ hw->io[i] = NULL;
+ }
+ kfree(hw->io);
+ hw->io = NULL;
+ kfree(hw->wqe_buffs);
+ hw->wqe_buffs = NULL;
+ }
+
+ dma = &hw->xfer_rdy;
+ dma_free_coherent(&efct->pci->dev,
+ dma->size, dma->virt, dma->phys);
+ memset(dma, 0, sizeof(struct efc_dma));
+
+ dma = &hw->loop_map;
+ dma_free_coherent(&efct->pci->dev,
+ dma->size, dma->virt, dma->phys);
+ memset(dma, 0, sizeof(struct efc_dma));
+
+ for (i = 0; i < hw->wq_count; i++)
+ sli_queue_free(&hw->sli, &hw->wq[i], destroy_queues,
+ free_memory);
+
+ for (i = 0; i < hw->rq_count; i++)
+ sli_queue_free(&hw->sli, &hw->rq[i], destroy_queues,
+ free_memory);
+
+ for (i = 0; i < hw->mq_count; i++)
+ sli_queue_free(&hw->sli, &hw->mq[i], destroy_queues,
+ free_memory);
+
+ for (i = 0; i < hw->cq_count; i++)
+ sli_queue_free(&hw->sli, &hw->cq[i], destroy_queues,
+ free_memory);
+
+ for (i = 0; i < hw->eq_count; i++)
+ sli_queue_free(&hw->sli, &hw->eq[i], destroy_queues,
+ free_memory);
+
+ /* Free rq buffers */
+ efct_hw_rx_free(hw);
+
+ efct_hw_queue_teardown(hw);
+
+ kfree(hw->wq_cpu_array);
+
+ sli_teardown(&hw->sli);
+
+ /* record the fact that the queues are non-functional */
+ hw->state = EFCT_HW_STATE_UNINITIALIZED;
+
+ /* free sequence free pool */
+ kfree(hw->seq_pool);
+ hw->seq_pool = NULL;
+
+ /* free hw_wq_callback pool */
+ efct_hw_reqtag_pool_free(hw);
+
+ mempool_destroy(hw->cmd_ctx_pool);
+ mempool_destroy(hw->mbox_rqst_pool);
+
+ /* Mark HW setup as not having been called */
+ hw->hw_setup_called = false;
+}
+
+static int
+efct_hw_sli_reset(struct efct_hw *hw, enum efct_hw_reset reset,
+ enum efct_hw_state prev_state)
+{
+ int rc = 0;
+
+ switch (reset) {
+ case EFCT_HW_RESET_FUNCTION:
+ efc_log_debug(hw->os, "issuing function level reset\n");
+ if (sli_reset(&hw->sli)) {
+ efc_log_err(hw->os, "sli_reset failed\n");
+ rc = -EIO;
+ }
+ break;
+ case EFCT_HW_RESET_FIRMWARE:
+ efc_log_debug(hw->os, "issuing firmware reset\n");
+ if (sli_fw_reset(&hw->sli)) {
+ efc_log_err(hw->os, "sli_soft_reset failed\n");
+ rc = -EIO;
+ }
+ /*
+ * Because the FW reset leaves the FW in a non-running state,
+ * follow that with a regular reset.
+ */
+ efc_log_debug(hw->os, "issuing function level reset\n");
+ if (sli_reset(&hw->sli)) {
+ efc_log_err(hw->os, "sli_reset failed\n");
+ rc = -EIO;
+ }
+ break;
+ default:
+ efc_log_err(hw->os, "unknown type - no reset performed\n");
+ hw->state = prev_state;
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+int
+efct_hw_reset(struct efct_hw *hw, enum efct_hw_reset reset)
+{
+ int rc = 0;
+ enum efct_hw_state prev_state = hw->state;
+
+ if (hw->state != EFCT_HW_STATE_ACTIVE)
+ efc_log_debug(hw->os,
+ "HW state %d is not active\n", hw->state);
+
+ hw->state = EFCT_HW_STATE_RESET_IN_PROGRESS;
+
+ /*
+ * If the prev_state is already reset/teardown in progress,
+ * don't continue further
+ */
+ if (prev_state == EFCT_HW_STATE_RESET_IN_PROGRESS ||
+ prev_state == EFCT_HW_STATE_TEARDOWN_IN_PROGRESS)
+ return efct_hw_sli_reset(hw, reset, prev_state);
+
+ if (prev_state != EFCT_HW_STATE_UNINITIALIZED) {
+ efct_hw_flush(hw);
+
+ if (list_empty(&hw->cmd_head))
+ efc_log_debug(hw->os,
+ "All commands completed on MQ queue\n");
+ else
+ efc_log_err(hw->os,
+ "Some commands still pending on MQ queue\n");
+ }
+
+ /* Reset the chip */
+ rc = efct_hw_sli_reset(hw, reset, prev_state);
+ if (rc == -EINVAL)
+ return -EIO;
+
+ return rc;
+}
diff --git a/drivers/scsi/elx/efct/efct_hw.h b/drivers/scsi/elx/efct/efct_hw.h
new file mode 100644
index 000000000..f3f4aa78d
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_hw.h
@@ -0,0 +1,764 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#ifndef _EFCT_HW_H
+#define _EFCT_HW_H
+
+#include "../libefc_sli/sli4.h"
+
+/*
+ * EFCT PCI IDs
+ */
+#define EFCT_VENDOR_ID 0x10df
+/* LightPulse 16Gb x 4 FC (lancer-g6) */
+#define EFCT_DEVICE_LANCER_G6 0xe307
+/* LightPulse 32Gb x 4 FC (lancer-g7) */
+#define EFCT_DEVICE_LANCER_G7 0xf407
+
+/*Default RQ entries len used by driver*/
+#define EFCT_HW_RQ_ENTRIES_MIN 512
+#define EFCT_HW_RQ_ENTRIES_DEF 1024
+#define EFCT_HW_RQ_ENTRIES_MAX 4096
+
+/*Defines the size of the RQ buffers used for each RQ*/
+#define EFCT_HW_RQ_SIZE_HDR 128
+#define EFCT_HW_RQ_SIZE_PAYLOAD 1024
+
+/*Define the maximum number of multi-receive queues*/
+#define EFCT_HW_MAX_MRQS 8
+
+/*
+ * Define count of when to set the WQEC bit in a submitted
+ * WQE, causing a consummed/released completion to be posted.
+ */
+#define EFCT_HW_WQEC_SET_COUNT 32
+
+/*Send frame timeout in seconds*/
+#define EFCT_HW_SEND_FRAME_TIMEOUT 10
+
+/*
+ * FDT Transfer Hint value, reads greater than this value
+ * will be segmented to implement fairness. A value of zero disables
+ * the feature.
+ */
+#define EFCT_HW_FDT_XFER_HINT 8192
+
+#define EFCT_HW_TIMECHECK_ITERATIONS 100
+#define EFCT_HW_MAX_NUM_MQ 1
+#define EFCT_HW_MAX_NUM_RQ 32
+#define EFCT_HW_MAX_NUM_EQ 16
+#define EFCT_HW_MAX_NUM_WQ 32
+#define EFCT_HW_DEF_NUM_EQ 1
+
+#define OCE_HW_MAX_NUM_MRQ_PAIRS 16
+
+#define EFCT_HW_MQ_DEPTH 128
+#define EFCT_HW_EQ_DEPTH 1024
+
+/*
+ * A CQ will be assinged to each WQ
+ * (CQ must have 2X entries of the WQ for abort
+ * processing), plus a separate one for each RQ PAIR and one for MQ
+ */
+#define EFCT_HW_MAX_NUM_CQ \
+ ((EFCT_HW_MAX_NUM_WQ * 2) + 1 + (OCE_HW_MAX_NUM_MRQ_PAIRS * 2))
+
+#define EFCT_HW_Q_HASH_SIZE 128
+#define EFCT_HW_RQ_HEADER_SIZE 128
+#define EFCT_HW_RQ_HEADER_INDEX 0
+
+#define EFCT_HW_REQUE_XRI_REGTAG 65534
+
+/* Options for efct_hw_command() */
+enum efct_cmd_opts {
+ /* command executes synchronously and busy-waits for completion */
+ EFCT_CMD_POLL,
+ /* command executes asynchronously. Uses callback */
+ EFCT_CMD_NOWAIT,
+};
+
+enum efct_hw_reset {
+ EFCT_HW_RESET_FUNCTION,
+ EFCT_HW_RESET_FIRMWARE,
+ EFCT_HW_RESET_MAX
+};
+
+enum efct_hw_topo {
+ EFCT_HW_TOPOLOGY_AUTO,
+ EFCT_HW_TOPOLOGY_NPORT,
+ EFCT_HW_TOPOLOGY_LOOP,
+ EFCT_HW_TOPOLOGY_NONE,
+ EFCT_HW_TOPOLOGY_MAX
+};
+
+/* pack fw revision values into a single uint64_t */
+#define HW_FWREV(a, b, c, d) (((uint64_t)(a) << 48) | ((uint64_t)(b) << 32) \
+ | ((uint64_t)(c) << 16) | ((uint64_t)(d)))
+
+#define EFCT_FW_VER_STR(a, b, c, d) (#a "." #b "." #c "." #d)
+
+enum efct_hw_io_type {
+ EFCT_HW_ELS_REQ,
+ EFCT_HW_ELS_RSP,
+ EFCT_HW_FC_CT,
+ EFCT_HW_FC_CT_RSP,
+ EFCT_HW_BLS_ACC,
+ EFCT_HW_BLS_RJT,
+ EFCT_HW_IO_TARGET_READ,
+ EFCT_HW_IO_TARGET_WRITE,
+ EFCT_HW_IO_TARGET_RSP,
+ EFCT_HW_IO_DNRX_REQUEUE,
+ EFCT_HW_IO_MAX,
+};
+
+enum efct_hw_io_state {
+ EFCT_HW_IO_STATE_FREE,
+ EFCT_HW_IO_STATE_INUSE,
+ EFCT_HW_IO_STATE_WAIT_FREE,
+ EFCT_HW_IO_STATE_WAIT_SEC_HIO,
+};
+
+#define EFCT_TARGET_WRITE_SKIPS 1
+#define EFCT_TARGET_READ_SKIPS 2
+
+struct efct_hw;
+struct efct_io;
+
+#define EFCT_CMD_CTX_POOL_SZ 32
+/**
+ * HW command context.
+ * Stores the state for the asynchronous commands sent to the hardware.
+ */
+struct efct_command_ctx {
+ struct list_head list_entry;
+ int (*cb)(struct efct_hw *hw, int status, u8 *mqe, void *arg);
+ void *arg; /* Argument for callback */
+ /* buffer holding command / results */
+ u8 buf[SLI4_BMBX_SIZE];
+ void *ctx; /* upper layer context */
+};
+
+struct efct_hw_sgl {
+ uintptr_t addr;
+ size_t len;
+};
+
+union efct_hw_io_param_u {
+ struct sli_bls_params bls;
+ struct sli_els_params els;
+ struct sli_ct_params fc_ct;
+ struct sli_fcp_tgt_params fcp_tgt;
+};
+
+/* WQ steering mode */
+enum efct_hw_wq_steering {
+ EFCT_HW_WQ_STEERING_CLASS,
+ EFCT_HW_WQ_STEERING_REQUEST,
+ EFCT_HW_WQ_STEERING_CPU,
+};
+
+/* HW wqe object */
+struct efct_hw_wqe {
+ struct list_head list_entry;
+ bool abort_wqe_submit_needed;
+ bool send_abts;
+ u32 id;
+ u32 abort_reqtag;
+ u8 *wqebuf;
+};
+
+struct efct_hw_io;
+/* Typedef for HW "done" callback */
+typedef int (*efct_hw_done_t)(struct efct_hw_io *, u32 len, int status,
+ u32 ext, void *ul_arg);
+
+/**
+ * HW IO object.
+ *
+ * Stores the per-IO information necessary
+ * for both SLI and efct.
+ * @ref: reference counter for hw io object
+ * @state: state of IO: free, busy, wait_free
+ * @list_entry used for busy, wait_free, free lists
+ * @wqe Work queue object, with link for pending
+ * @hw pointer back to hardware context
+ * @xfer_rdy transfer ready data
+ * @type IO type
+ * @xbusy Exchange is active in FW
+ * @abort_in_progress if TRUE, abort is in progress
+ * @status_saved if TRUE, latched status should be returned
+ * @wq_class WQ class if steering mode is Class
+ * @reqtag request tag for this HW IO
+ * @wq WQ assigned to the exchange
+ * @done Function called on IO completion
+ * @arg argument passed to IO done callback
+ * @abort_done Function called on abort completion
+ * @abort_arg argument passed to abort done callback
+ * @wq_steering WQ steering mode request
+ * @saved_status Saved status
+ * @saved_len Status length
+ * @saved_ext Saved extended status
+ * @eq EQ on which this HIO came up
+ * @sge_offset SGE data offset
+ * @def_sgl_count Count of SGEs in default SGL
+ * @abort_reqtag request tag for an abort of this HW IO
+ * @indicator Exchange indicator
+ * @def_sgl default SGL
+ * @sgl pointer to current active SGL
+ * @sgl_count count of SGEs in io->sgl
+ * @first_data_sge index of first data SGE
+ * @n_sge number of active SGEs
+ */
+struct efct_hw_io {
+ struct kref ref;
+ enum efct_hw_io_state state;
+ void (*release)(struct kref *arg);
+ struct list_head list_entry;
+ struct efct_hw_wqe wqe;
+
+ struct efct_hw *hw;
+ struct efc_dma xfer_rdy;
+ u16 type;
+ bool xbusy;
+ int abort_in_progress;
+ bool status_saved;
+ u8 wq_class;
+ u16 reqtag;
+
+ struct hw_wq *wq;
+ efct_hw_done_t done;
+ void *arg;
+ efct_hw_done_t abort_done;
+ void *abort_arg;
+
+ enum efct_hw_wq_steering wq_steering;
+
+ u32 saved_status;
+ u32 saved_len;
+ u32 saved_ext;
+
+ struct hw_eq *eq;
+ u32 sge_offset;
+ u32 def_sgl_count;
+ u32 abort_reqtag;
+ u32 indicator;
+ struct efc_dma def_sgl;
+ struct efc_dma *sgl;
+ u32 sgl_count;
+ u32 first_data_sge;
+ u32 n_sge;
+};
+
+enum efct_hw_port {
+ EFCT_HW_PORT_INIT,
+ EFCT_HW_PORT_SHUTDOWN,
+};
+
+/* Node group rpi reference */
+struct efct_hw_rpi_ref {
+ atomic_t rpi_count;
+ atomic_t rpi_attached;
+};
+
+enum efct_hw_link_stat {
+ EFCT_HW_LINK_STAT_LINK_FAILURE_COUNT,
+ EFCT_HW_LINK_STAT_LOSS_OF_SYNC_COUNT,
+ EFCT_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT,
+ EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT,
+ EFCT_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT,
+ EFCT_HW_LINK_STAT_CRC_COUNT,
+ EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT,
+ EFCT_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT,
+ EFCT_HW_LINK_STAT_ARB_TIMEOUT_COUNT,
+ EFCT_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT,
+ EFCT_HW_LINK_STAT_CURR_RCV_B2B_CREDIT,
+ EFCT_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT,
+ EFCT_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT,
+ EFCT_HW_LINK_STAT_RCV_EOFA_COUNT,
+ EFCT_HW_LINK_STAT_RCV_EOFDTI_COUNT,
+ EFCT_HW_LINK_STAT_RCV_EOFNI_COUNT,
+ EFCT_HW_LINK_STAT_RCV_SOFF_COUNT,
+ EFCT_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT,
+ EFCT_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT,
+ EFCT_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT,
+ EFCT_HW_LINK_STAT_MAX,
+};
+
+enum efct_hw_host_stat {
+ EFCT_HW_HOST_STAT_TX_KBYTE_COUNT,
+ EFCT_HW_HOST_STAT_RX_KBYTE_COUNT,
+ EFCT_HW_HOST_STAT_TX_FRAME_COUNT,
+ EFCT_HW_HOST_STAT_RX_FRAME_COUNT,
+ EFCT_HW_HOST_STAT_TX_SEQ_COUNT,
+ EFCT_HW_HOST_STAT_RX_SEQ_COUNT,
+ EFCT_HW_HOST_STAT_TOTAL_EXCH_ORIG,
+ EFCT_HW_HOST_STAT_TOTAL_EXCH_RESP,
+ EFCT_HW_HOSY_STAT_RX_P_BSY_COUNT,
+ EFCT_HW_HOST_STAT_RX_F_BSY_COUNT,
+ EFCT_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_RQ_BUF_COUNT,
+ EFCT_HW_HOST_STAT_EMPTY_RQ_TIMEOUT_COUNT,
+ EFCT_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_XRI_COUNT,
+ EFCT_HW_HOST_STAT_EMPTY_XRI_POOL_COUNT,
+ EFCT_HW_HOST_STAT_MAX,
+};
+
+enum efct_hw_state {
+ EFCT_HW_STATE_UNINITIALIZED,
+ EFCT_HW_STATE_QUEUES_ALLOCATED,
+ EFCT_HW_STATE_ACTIVE,
+ EFCT_HW_STATE_RESET_IN_PROGRESS,
+ EFCT_HW_STATE_TEARDOWN_IN_PROGRESS,
+};
+
+struct efct_hw_link_stat_counts {
+ u8 overflow;
+ u32 counter;
+};
+
+struct efct_hw_host_stat_counts {
+ u32 counter;
+};
+
+/* Structure used for the hash lookup of queue IDs */
+struct efct_queue_hash {
+ bool in_use;
+ u16 id;
+ u16 index;
+};
+
+/* WQ callback object */
+struct hw_wq_callback {
+ u16 instance_index; /* use for request tag */
+ void (*callback)(void *arg, u8 *cqe, int status);
+ void *arg;
+ struct list_head list_entry;
+};
+
+struct reqtag_pool {
+ spinlock_t lock; /* pool lock */
+ struct hw_wq_callback *tags[U16_MAX];
+ struct list_head freelist;
+};
+
+struct efct_hw_config {
+ u32 n_eq;
+ u32 n_cq;
+ u32 n_mq;
+ u32 n_rq;
+ u32 n_wq;
+ u32 n_io;
+ u32 n_sgl;
+ u32 speed;
+ u32 topology;
+ /* size of the buffers for first burst */
+ u32 rq_default_buffer_size;
+ u8 esoc;
+ /* MRQ RQ selection policy */
+ u8 rq_selection_policy;
+ /* RQ quanta if rq_selection_policy == 2 */
+ u8 rr_quanta;
+ u32 filter_def[SLI4_CMD_REG_FCFI_NUM_RQ_CFG];
+};
+
+struct efct_hw {
+ struct efct *os;
+ struct sli4 sli;
+ u16 ulp_start;
+ u16 ulp_max;
+ u32 dump_size;
+ enum efct_hw_state state;
+ bool hw_setup_called;
+ u8 sliport_healthcheck;
+ u16 fcf_indicator;
+
+ /* HW configuration */
+ struct efct_hw_config config;
+
+ /* calculated queue sizes for each type */
+ u32 num_qentries[SLI4_QTYPE_MAX];
+
+ /* Storage for SLI queue objects */
+ struct sli4_queue wq[EFCT_HW_MAX_NUM_WQ];
+ struct sli4_queue rq[EFCT_HW_MAX_NUM_RQ];
+ u16 hw_rq_lookup[EFCT_HW_MAX_NUM_RQ];
+ struct sli4_queue mq[EFCT_HW_MAX_NUM_MQ];
+ struct sli4_queue cq[EFCT_HW_MAX_NUM_CQ];
+ struct sli4_queue eq[EFCT_HW_MAX_NUM_EQ];
+
+ /* HW queue */
+ u32 eq_count;
+ u32 cq_count;
+ u32 mq_count;
+ u32 wq_count;
+ u32 rq_count;
+ u32 cmd_head_count;
+ struct list_head eq_list;
+
+ struct efct_queue_hash cq_hash[EFCT_HW_Q_HASH_SIZE];
+ struct efct_queue_hash rq_hash[EFCT_HW_Q_HASH_SIZE];
+ struct efct_queue_hash wq_hash[EFCT_HW_Q_HASH_SIZE];
+
+ /* Storage for HW queue objects */
+ struct hw_wq *hw_wq[EFCT_HW_MAX_NUM_WQ];
+ struct hw_rq *hw_rq[EFCT_HW_MAX_NUM_RQ];
+ struct hw_mq *hw_mq[EFCT_HW_MAX_NUM_MQ];
+ struct hw_cq *hw_cq[EFCT_HW_MAX_NUM_CQ];
+ struct hw_eq *hw_eq[EFCT_HW_MAX_NUM_EQ];
+ /* count of hw_rq[] entries */
+ u32 hw_rq_count;
+ /* count of multirq RQs */
+ u32 hw_mrq_count;
+
+ struct hw_wq **wq_cpu_array;
+
+ /* Sequence objects used in incoming frame processing */
+ struct efc_hw_sequence *seq_pool;
+
+ /* Maintain an ordered, linked list of outstanding HW commands. */
+ struct mutex bmbx_lock;
+ spinlock_t cmd_lock;
+ struct list_head cmd_head;
+ struct list_head cmd_pending;
+ mempool_t *cmd_ctx_pool;
+ mempool_t *mbox_rqst_pool;
+
+ struct sli4_link_event link;
+
+ /* pointer array of IO objects */
+ struct efct_hw_io **io;
+ /* array of WQE buffs mapped to IO objects */
+ u8 *wqe_buffs;
+
+ /* IO lock to synchronize list access */
+ spinlock_t io_lock;
+ /* List of IO objects in use */
+ struct list_head io_inuse;
+ /* List of IO objects waiting to be freed */
+ struct list_head io_wait_free;
+ /* List of IO objects available for allocation */
+ struct list_head io_free;
+
+ struct efc_dma loop_map;
+
+ struct efc_dma xfer_rdy;
+
+ struct efc_dma rnode_mem;
+
+ atomic_t io_alloc_failed_count;
+
+ /* stat: wq sumbit count */
+ u32 tcmd_wq_submit[EFCT_HW_MAX_NUM_WQ];
+ /* stat: wq complete count */
+ u32 tcmd_wq_complete[EFCT_HW_MAX_NUM_WQ];
+
+ atomic_t send_frame_seq_id;
+ struct reqtag_pool *wq_reqtag_pool;
+};
+
+enum efct_hw_io_count_type {
+ EFCT_HW_IO_INUSE_COUNT,
+ EFCT_HW_IO_FREE_COUNT,
+ EFCT_HW_IO_WAIT_FREE_COUNT,
+ EFCT_HW_IO_N_TOTAL_IO_COUNT,
+};
+
+/* HW queue data structures */
+struct hw_eq {
+ struct list_head list_entry;
+ enum sli4_qtype type;
+ u32 instance;
+ u32 entry_count;
+ u32 entry_size;
+ struct efct_hw *hw;
+ struct sli4_queue *queue;
+ struct list_head cq_list;
+ u32 use_count;
+};
+
+struct hw_cq {
+ struct list_head list_entry;
+ enum sli4_qtype type;
+ u32 instance;
+ u32 entry_count;
+ u32 entry_size;
+ struct hw_eq *eq;
+ struct sli4_queue *queue;
+ struct list_head q_list;
+ u32 use_count;
+};
+
+struct hw_q {
+ struct list_head list_entry;
+ enum sli4_qtype type;
+};
+
+struct hw_mq {
+ struct list_head list_entry;
+ enum sli4_qtype type;
+ u32 instance;
+
+ u32 entry_count;
+ u32 entry_size;
+ struct hw_cq *cq;
+ struct sli4_queue *queue;
+
+ u32 use_count;
+};
+
+struct hw_wq {
+ struct list_head list_entry;
+ enum sli4_qtype type;
+ u32 instance;
+ struct efct_hw *hw;
+
+ u32 entry_count;
+ u32 entry_size;
+ struct hw_cq *cq;
+ struct sli4_queue *queue;
+ u32 class;
+
+ /* WQ consumed */
+ u32 wqec_set_count;
+ u32 wqec_count;
+ u32 free_count;
+ u32 total_submit_count;
+ struct list_head pending_list;
+
+ /* HW IO allocated for use with Send Frame */
+ struct efct_hw_io *send_frame_io;
+
+ /* Stats */
+ u32 use_count;
+ u32 wq_pending_count;
+};
+
+struct hw_rq {
+ struct list_head list_entry;
+ enum sli4_qtype type;
+ u32 instance;
+
+ u32 entry_count;
+ u32 use_count;
+ u32 hdr_entry_size;
+ u32 first_burst_entry_size;
+ u32 data_entry_size;
+ bool is_mrq;
+ u32 base_mrq_id;
+
+ struct hw_cq *cq;
+
+ u8 filter_mask;
+ struct sli4_queue *hdr;
+ struct sli4_queue *first_burst;
+ struct sli4_queue *data;
+
+ struct efc_hw_rq_buffer *hdr_buf;
+ struct efc_hw_rq_buffer *fb_buf;
+ struct efc_hw_rq_buffer *payload_buf;
+ /* RQ tracker for this RQ */
+ struct efc_hw_sequence **rq_tracker;
+};
+
+struct efct_hw_send_frame_context {
+ struct efct_hw *hw;
+ struct hw_wq_callback *wqcb;
+ struct efct_hw_wqe wqe;
+ void (*callback)(int status, void *arg);
+ void *arg;
+
+ /* General purpose elements */
+ struct efc_hw_sequence *seq;
+ struct efc_dma payload;
+};
+
+struct efct_hw_grp_hdr {
+ u32 size;
+ __be32 magic_number;
+ u32 word2;
+ u8 rev_name[128];
+ u8 date[12];
+ u8 revision[32];
+};
+
+static inline int
+efct_hw_get_link_speed(struct efct_hw *hw) {
+ return hw->link.speed;
+}
+
+int
+efct_hw_setup(struct efct_hw *hw, void *os, struct pci_dev *pdev);
+int efct_hw_init(struct efct_hw *hw);
+int
+efct_hw_parse_filter(struct efct_hw *hw, void *value);
+int
+efct_hw_init_queues(struct efct_hw *hw);
+int
+efct_hw_map_wq_cpu(struct efct_hw *hw);
+uint64_t
+efct_get_wwnn(struct efct_hw *hw);
+uint64_t
+efct_get_wwpn(struct efct_hw *hw);
+
+int efct_hw_rx_allocate(struct efct_hw *hw);
+int efct_hw_rx_post(struct efct_hw *hw);
+void efct_hw_rx_free(struct efct_hw *hw);
+int
+efct_hw_command(struct efct_hw *hw, u8 *cmd, u32 opts, void *cb,
+ void *arg);
+int
+efct_issue_mbox_rqst(void *base, void *cmd, void *cb, void *arg);
+
+struct efct_hw_io *efct_hw_io_alloc(struct efct_hw *hw);
+int efct_hw_io_free(struct efct_hw *hw, struct efct_hw_io *io);
+u8 efct_hw_io_inuse(struct efct_hw *hw, struct efct_hw_io *io);
+int
+efct_hw_io_send(struct efct_hw *hw, enum efct_hw_io_type type,
+ struct efct_hw_io *io, union efct_hw_io_param_u *iparam,
+ void *cb, void *arg);
+int
+efct_hw_io_register_sgl(struct efct_hw *hw, struct efct_hw_io *io,
+ struct efc_dma *sgl,
+ u32 sgl_count);
+int
+efct_hw_io_init_sges(struct efct_hw *hw,
+ struct efct_hw_io *io, enum efct_hw_io_type type);
+
+int
+efct_hw_io_add_sge(struct efct_hw *hw, struct efct_hw_io *io,
+ uintptr_t addr, u32 length);
+int
+efct_hw_io_abort(struct efct_hw *hw, struct efct_hw_io *io_to_abort,
+ bool send_abts, void *cb, void *arg);
+u32
+efct_hw_io_get_count(struct efct_hw *hw,
+ enum efct_hw_io_count_type io_count_type);
+struct efct_hw_io
+*efct_hw_io_lookup(struct efct_hw *hw, u32 indicator);
+void efct_hw_io_abort_all(struct efct_hw *hw);
+void efct_hw_io_free_internal(struct kref *arg);
+
+/* HW WQ request tag API */
+struct reqtag_pool *efct_hw_reqtag_pool_alloc(struct efct_hw *hw);
+void efct_hw_reqtag_pool_free(struct efct_hw *hw);
+struct hw_wq_callback
+*efct_hw_reqtag_alloc(struct efct_hw *hw,
+ void (*callback)(void *arg, u8 *cqe,
+ int status), void *arg);
+void
+efct_hw_reqtag_free(struct efct_hw *hw, struct hw_wq_callback *wqcb);
+struct hw_wq_callback
+*efct_hw_reqtag_get_instance(struct efct_hw *hw, u32 instance_index);
+
+/* RQ completion handlers for RQ pair mode */
+int
+efct_hw_rqpair_process_rq(struct efct_hw *hw,
+ struct hw_cq *cq, u8 *cqe);
+int
+efct_hw_rqpair_sequence_free(struct efct_hw *hw, struct efc_hw_sequence *seq);
+static inline void
+efct_hw_sequence_copy(struct efc_hw_sequence *dst,
+ struct efc_hw_sequence *src)
+{
+ /* Copy src to dst, then zero out the linked list link */
+ *dst = *src;
+}
+
+int
+efct_efc_hw_sequence_free(struct efc *efc, struct efc_hw_sequence *seq);
+
+static inline int
+efct_hw_sequence_free(struct efct_hw *hw, struct efc_hw_sequence *seq)
+{
+ /* Only RQ pair mode is supported */
+ return efct_hw_rqpair_sequence_free(hw, seq);
+}
+
+int
+efct_hw_eq_process(struct efct_hw *hw, struct hw_eq *eq,
+ u32 max_isr_time_msec);
+void efct_hw_cq_process(struct efct_hw *hw, struct hw_cq *cq);
+void
+efct_hw_wq_process(struct efct_hw *hw, struct hw_cq *cq,
+ u8 *cqe, int status, u16 rid);
+void
+efct_hw_xabt_process(struct efct_hw *hw, struct hw_cq *cq,
+ u8 *cqe, u16 rid);
+int
+efct_hw_process(struct efct_hw *hw, u32 vector, u32 max_isr_time_msec);
+int
+efct_hw_queue_hash_find(struct efct_queue_hash *hash, u16 id);
+int efct_hw_wq_write(struct hw_wq *wq, struct efct_hw_wqe *wqe);
+int
+efct_hw_send_frame(struct efct_hw *hw, struct fc_frame_header *hdr,
+ u8 sof, u8 eof, struct efc_dma *payload,
+ struct efct_hw_send_frame_context *ctx,
+ void (*callback)(void *arg, u8 *cqe, int status),
+ void *arg);
+int
+efct_els_hw_srrs_send(struct efc *efc, struct efc_disc_io *io);
+int
+efct_efc_bls_send(struct efc *efc, u32 type, struct sli_bls_params *bls);
+int
+efct_hw_bls_send(struct efct *efct, u32 type, struct sli_bls_params *bls_params,
+ void *cb, void *arg);
+
+/* Function for retrieving link statistics */
+int
+efct_hw_get_link_stats(struct efct_hw *hw,
+ u8 req_ext_counters,
+ u8 clear_overflow_flags,
+ u8 clear_all_counters,
+ void (*efct_hw_link_stat_cb_t)(int status,
+ u32 num_counters,
+ struct efct_hw_link_stat_counts *counters, void *arg),
+ void *arg);
+/* Function for retrieving host statistics */
+int
+efct_hw_get_host_stats(struct efct_hw *hw,
+ u8 cc,
+ void (*efct_hw_host_stat_cb_t)(int status,
+ u32 num_counters,
+ struct efct_hw_host_stat_counts *counters, void *arg),
+ void *arg);
+int
+efct_hw_firmware_write(struct efct_hw *hw, struct efc_dma *dma,
+ u32 size, u32 offset, int last,
+ void (*cb)(int status, u32 bytes_written,
+ u32 change_status, void *arg),
+ void *arg);
+typedef void (*efct_hw_async_cb_t)(struct efct_hw *hw, int status,
+ u8 *mqe, void *arg);
+int
+efct_hw_async_call(struct efct_hw *hw, efct_hw_async_cb_t callback, void *arg);
+
+struct hw_eq *efct_hw_new_eq(struct efct_hw *hw, u32 entry_count);
+struct hw_cq *efct_hw_new_cq(struct hw_eq *eq, u32 entry_count);
+u32
+efct_hw_new_cq_set(struct hw_eq *eqs[], struct hw_cq *cqs[],
+ u32 num_cqs, u32 entry_count);
+struct hw_mq *efct_hw_new_mq(struct hw_cq *cq, u32 entry_count);
+struct hw_wq
+*efct_hw_new_wq(struct hw_cq *cq, u32 entry_count);
+u32
+efct_hw_new_rq_set(struct hw_cq *cqs[], struct hw_rq *rqs[],
+ u32 num_rq_pairs, u32 entry_count);
+void efct_hw_del_eq(struct hw_eq *eq);
+void efct_hw_del_cq(struct hw_cq *cq);
+void efct_hw_del_mq(struct hw_mq *mq);
+void efct_hw_del_wq(struct hw_wq *wq);
+void efct_hw_del_rq(struct hw_rq *rq);
+void efct_hw_queue_teardown(struct efct_hw *hw);
+void efct_hw_teardown(struct efct_hw *hw);
+int
+efct_hw_reset(struct efct_hw *hw, enum efct_hw_reset reset);
+
+int
+efct_hw_port_control(struct efct_hw *hw, enum efct_hw_port ctrl,
+ uintptr_t value,
+ void (*cb)(int status, uintptr_t value, void *arg),
+ void *arg);
+
+#endif /* __EFCT_H__ */
diff --git a/drivers/scsi/elx/efct/efct_hw_queues.c b/drivers/scsi/elx/efct/efct_hw_queues.c
new file mode 100644
index 000000000..3a1d1a586
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_hw_queues.c
@@ -0,0 +1,677 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include "efct_driver.h"
+#include "efct_hw.h"
+#include "efct_unsol.h"
+
+int
+efct_hw_init_queues(struct efct_hw *hw)
+{
+ struct hw_eq *eq = NULL;
+ struct hw_cq *cq = NULL;
+ struct hw_wq *wq = NULL;
+ struct hw_mq *mq = NULL;
+
+ struct hw_eq *eqs[EFCT_HW_MAX_NUM_EQ];
+ struct hw_cq *cqs[EFCT_HW_MAX_NUM_EQ];
+ struct hw_rq *rqs[EFCT_HW_MAX_NUM_EQ];
+ u32 i = 0, j;
+
+ hw->eq_count = 0;
+ hw->cq_count = 0;
+ hw->mq_count = 0;
+ hw->wq_count = 0;
+ hw->rq_count = 0;
+ hw->hw_rq_count = 0;
+ INIT_LIST_HEAD(&hw->eq_list);
+
+ for (i = 0; i < hw->config.n_eq; i++) {
+ /* Create EQ */
+ eq = efct_hw_new_eq(hw, EFCT_HW_EQ_DEPTH);
+ if (!eq) {
+ efct_hw_queue_teardown(hw);
+ return -ENOMEM;
+ }
+
+ eqs[i] = eq;
+
+ /* Create one MQ */
+ if (!i) {
+ cq = efct_hw_new_cq(eq,
+ hw->num_qentries[SLI4_QTYPE_CQ]);
+ if (!cq) {
+ efct_hw_queue_teardown(hw);
+ return -ENOMEM;
+ }
+
+ mq = efct_hw_new_mq(cq, EFCT_HW_MQ_DEPTH);
+ if (!mq) {
+ efct_hw_queue_teardown(hw);
+ return -ENOMEM;
+ }
+ }
+
+ /* Create WQ */
+ cq = efct_hw_new_cq(eq, hw->num_qentries[SLI4_QTYPE_CQ]);
+ if (!cq) {
+ efct_hw_queue_teardown(hw);
+ return -ENOMEM;
+ }
+
+ wq = efct_hw_new_wq(cq, hw->num_qentries[SLI4_QTYPE_WQ]);
+ if (!wq) {
+ efct_hw_queue_teardown(hw);
+ return -ENOMEM;
+ }
+ }
+
+ /* Create CQ set */
+ if (efct_hw_new_cq_set(eqs, cqs, i, hw->num_qentries[SLI4_QTYPE_CQ])) {
+ efct_hw_queue_teardown(hw);
+ return -EIO;
+ }
+
+ /* Create RQ set */
+ if (efct_hw_new_rq_set(cqs, rqs, i, EFCT_HW_RQ_ENTRIES_DEF)) {
+ efct_hw_queue_teardown(hw);
+ return -EIO;
+ }
+
+ for (j = 0; j < i ; j++) {
+ rqs[j]->filter_mask = 0;
+ rqs[j]->is_mrq = true;
+ rqs[j]->base_mrq_id = rqs[0]->hdr->id;
+ }
+
+ hw->hw_mrq_count = i;
+
+ return 0;
+}
+
+int
+efct_hw_map_wq_cpu(struct efct_hw *hw)
+{
+ struct efct *efct = hw->os;
+ u32 cpu = 0, i;
+
+ /* Init cpu_map array */
+ hw->wq_cpu_array = kcalloc(num_possible_cpus(), sizeof(void *),
+ GFP_KERNEL);
+ if (!hw->wq_cpu_array)
+ return -ENOMEM;
+
+ for (i = 0; i < hw->config.n_eq; i++) {
+ const struct cpumask *maskp;
+
+ /* Get a CPU mask for all CPUs affinitized to this vector */
+ maskp = pci_irq_get_affinity(efct->pci, i);
+ if (!maskp) {
+ efc_log_debug(efct, "maskp null for vector:%d\n", i);
+ continue;
+ }
+
+ /* Loop through all CPUs associated with vector idx */
+ for_each_cpu_and(cpu, maskp, cpu_present_mask) {
+ efc_log_debug(efct, "CPU:%d irq vector:%d\n", cpu, i);
+ hw->wq_cpu_array[cpu] = hw->hw_wq[i];
+ }
+ }
+
+ return 0;
+}
+
+struct hw_eq *
+efct_hw_new_eq(struct efct_hw *hw, u32 entry_count)
+{
+ struct hw_eq *eq = kzalloc(sizeof(*eq), GFP_KERNEL);
+
+ if (!eq)
+ return NULL;
+
+ eq->type = SLI4_QTYPE_EQ;
+ eq->hw = hw;
+ eq->entry_count = entry_count;
+ eq->instance = hw->eq_count++;
+ eq->queue = &hw->eq[eq->instance];
+ INIT_LIST_HEAD(&eq->cq_list);
+
+ if (sli_queue_alloc(&hw->sli, SLI4_QTYPE_EQ, eq->queue, entry_count,
+ NULL)) {
+ efc_log_err(hw->os, "EQ[%d] alloc failure\n", eq->instance);
+ kfree(eq);
+ return NULL;
+ }
+
+ sli_eq_modify_delay(&hw->sli, eq->queue, 1, 0, 8);
+ hw->hw_eq[eq->instance] = eq;
+ INIT_LIST_HEAD(&eq->list_entry);
+ list_add_tail(&eq->list_entry, &hw->eq_list);
+ efc_log_debug(hw->os, "create eq[%2d] id %3d len %4d\n", eq->instance,
+ eq->queue->id, eq->entry_count);
+ return eq;
+}
+
+struct hw_cq *
+efct_hw_new_cq(struct hw_eq *eq, u32 entry_count)
+{
+ struct efct_hw *hw = eq->hw;
+ struct hw_cq *cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+
+ if (!cq)
+ return NULL;
+
+ cq->eq = eq;
+ cq->type = SLI4_QTYPE_CQ;
+ cq->instance = eq->hw->cq_count++;
+ cq->entry_count = entry_count;
+ cq->queue = &hw->cq[cq->instance];
+
+ INIT_LIST_HEAD(&cq->q_list);
+
+ if (sli_queue_alloc(&hw->sli, SLI4_QTYPE_CQ, cq->queue,
+ cq->entry_count, eq->queue)) {
+ efc_log_err(hw->os, "CQ[%d] allocation failure len=%d\n",
+ eq->instance, eq->entry_count);
+ kfree(cq);
+ return NULL;
+ }
+
+ hw->hw_cq[cq->instance] = cq;
+ INIT_LIST_HEAD(&cq->list_entry);
+ list_add_tail(&cq->list_entry, &eq->cq_list);
+ efc_log_debug(hw->os, "create cq[%2d] id %3d len %4d\n", cq->instance,
+ cq->queue->id, cq->entry_count);
+ return cq;
+}
+
+u32
+efct_hw_new_cq_set(struct hw_eq *eqs[], struct hw_cq *cqs[],
+ u32 num_cqs, u32 entry_count)
+{
+ u32 i;
+ struct efct_hw *hw = eqs[0]->hw;
+ struct sli4 *sli4 = &hw->sli;
+ struct hw_cq *cq = NULL;
+ struct sli4_queue *qs[SLI4_MAX_CQ_SET_COUNT];
+ struct sli4_queue *assefct[SLI4_MAX_CQ_SET_COUNT];
+
+ /* Initialise CQS pointers to NULL */
+ for (i = 0; i < num_cqs; i++)
+ cqs[i] = NULL;
+
+ for (i = 0; i < num_cqs; i++) {
+ cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+ if (!cq)
+ goto error;
+
+ cqs[i] = cq;
+ cq->eq = eqs[i];
+ cq->type = SLI4_QTYPE_CQ;
+ cq->instance = hw->cq_count++;
+ cq->entry_count = entry_count;
+ cq->queue = &hw->cq[cq->instance];
+ qs[i] = cq->queue;
+ assefct[i] = eqs[i]->queue;
+ INIT_LIST_HEAD(&cq->q_list);
+ }
+
+ if (sli_cq_alloc_set(sli4, qs, num_cqs, entry_count, assefct)) {
+ efc_log_err(hw->os, "Failed to create CQ Set.\n");
+ goto error;
+ }
+
+ for (i = 0; i < num_cqs; i++) {
+ hw->hw_cq[cqs[i]->instance] = cqs[i];
+ INIT_LIST_HEAD(&cqs[i]->list_entry);
+ list_add_tail(&cqs[i]->list_entry, &cqs[i]->eq->cq_list);
+ }
+
+ return 0;
+
+error:
+ for (i = 0; i < num_cqs; i++) {
+ kfree(cqs[i]);
+ cqs[i] = NULL;
+ }
+ return -EIO;
+}
+
+struct hw_mq *
+efct_hw_new_mq(struct hw_cq *cq, u32 entry_count)
+{
+ struct efct_hw *hw = cq->eq->hw;
+ struct hw_mq *mq = kzalloc(sizeof(*mq), GFP_KERNEL);
+
+ if (!mq)
+ return NULL;
+
+ mq->cq = cq;
+ mq->type = SLI4_QTYPE_MQ;
+ mq->instance = cq->eq->hw->mq_count++;
+ mq->entry_count = entry_count;
+ mq->entry_size = EFCT_HW_MQ_DEPTH;
+ mq->queue = &hw->mq[mq->instance];
+
+ if (sli_queue_alloc(&hw->sli, SLI4_QTYPE_MQ, mq->queue, mq->entry_size,
+ cq->queue)) {
+ efc_log_err(hw->os, "MQ allocation failure\n");
+ kfree(mq);
+ return NULL;
+ }
+
+ hw->hw_mq[mq->instance] = mq;
+ INIT_LIST_HEAD(&mq->list_entry);
+ list_add_tail(&mq->list_entry, &cq->q_list);
+ efc_log_debug(hw->os, "create mq[%2d] id %3d len %4d\n", mq->instance,
+ mq->queue->id, mq->entry_count);
+ return mq;
+}
+
+struct hw_wq *
+efct_hw_new_wq(struct hw_cq *cq, u32 entry_count)
+{
+ struct efct_hw *hw = cq->eq->hw;
+ struct hw_wq *wq = kzalloc(sizeof(*wq), GFP_KERNEL);
+
+ if (!wq)
+ return NULL;
+
+ wq->hw = cq->eq->hw;
+ wq->cq = cq;
+ wq->type = SLI4_QTYPE_WQ;
+ wq->instance = cq->eq->hw->wq_count++;
+ wq->entry_count = entry_count;
+ wq->queue = &hw->wq[wq->instance];
+ wq->wqec_set_count = EFCT_HW_WQEC_SET_COUNT;
+ wq->wqec_count = wq->wqec_set_count;
+ wq->free_count = wq->entry_count - 1;
+ INIT_LIST_HEAD(&wq->pending_list);
+
+ if (sli_queue_alloc(&hw->sli, SLI4_QTYPE_WQ, wq->queue,
+ wq->entry_count, cq->queue)) {
+ efc_log_err(hw->os, "WQ allocation failure\n");
+ kfree(wq);
+ return NULL;
+ }
+
+ hw->hw_wq[wq->instance] = wq;
+ INIT_LIST_HEAD(&wq->list_entry);
+ list_add_tail(&wq->list_entry, &cq->q_list);
+ efc_log_debug(hw->os, "create wq[%2d] id %3d len %4d cls %d\n",
+ wq->instance, wq->queue->id, wq->entry_count, wq->class);
+ return wq;
+}
+
+u32
+efct_hw_new_rq_set(struct hw_cq *cqs[], struct hw_rq *rqs[],
+ u32 num_rq_pairs, u32 entry_count)
+{
+ struct efct_hw *hw = cqs[0]->eq->hw;
+ struct hw_rq *rq = NULL;
+ struct sli4_queue *qs[SLI4_MAX_RQ_SET_COUNT * 2] = { NULL };
+ u32 i, q_count, size;
+
+ /* Initialise RQS pointers */
+ for (i = 0; i < num_rq_pairs; i++)
+ rqs[i] = NULL;
+
+ /*
+ * Allocate an RQ object SET, where each element in set
+ * encapsulates 2 SLI queues (for rq pair)
+ */
+ for (i = 0, q_count = 0; i < num_rq_pairs; i++, q_count += 2) {
+ rq = kzalloc(sizeof(*rq), GFP_KERNEL);
+ if (!rq)
+ goto error;
+
+ rqs[i] = rq;
+ rq->instance = hw->hw_rq_count++;
+ rq->cq = cqs[i];
+ rq->type = SLI4_QTYPE_RQ;
+ rq->entry_count = entry_count;
+
+ /* Header RQ */
+ rq->hdr = &hw->rq[hw->rq_count];
+ rq->hdr_entry_size = EFCT_HW_RQ_HEADER_SIZE;
+ hw->hw_rq_lookup[hw->rq_count] = rq->instance;
+ hw->rq_count++;
+ qs[q_count] = rq->hdr;
+
+ /* Data RQ */
+ rq->data = &hw->rq[hw->rq_count];
+ rq->data_entry_size = hw->config.rq_default_buffer_size;
+ hw->hw_rq_lookup[hw->rq_count] = rq->instance;
+ hw->rq_count++;
+ qs[q_count + 1] = rq->data;
+
+ rq->rq_tracker = NULL;
+ }
+
+ if (sli_fc_rq_set_alloc(&hw->sli, num_rq_pairs, qs,
+ cqs[0]->queue->id,
+ rqs[0]->entry_count,
+ rqs[0]->hdr_entry_size,
+ rqs[0]->data_entry_size)) {
+ efc_log_err(hw->os, "RQ Set alloc failure for base CQ=%d\n",
+ cqs[0]->queue->id);
+ goto error;
+ }
+
+ for (i = 0; i < num_rq_pairs; i++) {
+ hw->hw_rq[rqs[i]->instance] = rqs[i];
+ INIT_LIST_HEAD(&rqs[i]->list_entry);
+ list_add_tail(&rqs[i]->list_entry, &cqs[i]->q_list);
+ size = sizeof(struct efc_hw_sequence *) * rqs[i]->entry_count;
+ rqs[i]->rq_tracker = kzalloc(size, GFP_KERNEL);
+ if (!rqs[i]->rq_tracker)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ for (i = 0; i < num_rq_pairs; i++) {
+ if (rqs[i]) {
+ kfree(rqs[i]->rq_tracker);
+ kfree(rqs[i]);
+ }
+ }
+
+ return -EIO;
+}
+
+void
+efct_hw_del_eq(struct hw_eq *eq)
+{
+ struct hw_cq *cq;
+ struct hw_cq *cq_next;
+
+ if (!eq)
+ return;
+
+ list_for_each_entry_safe(cq, cq_next, &eq->cq_list, list_entry)
+ efct_hw_del_cq(cq);
+ list_del(&eq->list_entry);
+ eq->hw->hw_eq[eq->instance] = NULL;
+ kfree(eq);
+}
+
+void
+efct_hw_del_cq(struct hw_cq *cq)
+{
+ struct hw_q *q;
+ struct hw_q *q_next;
+
+ if (!cq)
+ return;
+
+ list_for_each_entry_safe(q, q_next, &cq->q_list, list_entry) {
+ switch (q->type) {
+ case SLI4_QTYPE_MQ:
+ efct_hw_del_mq((struct hw_mq *)q);
+ break;
+ case SLI4_QTYPE_WQ:
+ efct_hw_del_wq((struct hw_wq *)q);
+ break;
+ case SLI4_QTYPE_RQ:
+ efct_hw_del_rq((struct hw_rq *)q);
+ break;
+ default:
+ break;
+ }
+ }
+ list_del(&cq->list_entry);
+ cq->eq->hw->hw_cq[cq->instance] = NULL;
+ kfree(cq);
+}
+
+void
+efct_hw_del_mq(struct hw_mq *mq)
+{
+ if (!mq)
+ return;
+
+ list_del(&mq->list_entry);
+ mq->cq->eq->hw->hw_mq[mq->instance] = NULL;
+ kfree(mq);
+}
+
+void
+efct_hw_del_wq(struct hw_wq *wq)
+{
+ if (!wq)
+ return;
+
+ list_del(&wq->list_entry);
+ wq->cq->eq->hw->hw_wq[wq->instance] = NULL;
+ kfree(wq);
+}
+
+void
+efct_hw_del_rq(struct hw_rq *rq)
+{
+ struct efct_hw *hw = NULL;
+
+ if (!rq)
+ return;
+ /* Free RQ tracker */
+ kfree(rq->rq_tracker);
+ rq->rq_tracker = NULL;
+ list_del(&rq->list_entry);
+ hw = rq->cq->eq->hw;
+ hw->hw_rq[rq->instance] = NULL;
+ kfree(rq);
+}
+
+void
+efct_hw_queue_teardown(struct efct_hw *hw)
+{
+ struct hw_eq *eq;
+ struct hw_eq *eq_next;
+
+ if (!hw->eq_list.next)
+ return;
+
+ list_for_each_entry_safe(eq, eq_next, &hw->eq_list, list_entry)
+ efct_hw_del_eq(eq);
+}
+
+static inline int
+efct_hw_rqpair_find(struct efct_hw *hw, u16 rq_id)
+{
+ return efct_hw_queue_hash_find(hw->rq_hash, rq_id);
+}
+
+static struct efc_hw_sequence *
+efct_hw_rqpair_get(struct efct_hw *hw, u16 rqindex, u16 bufindex)
+{
+ struct sli4_queue *rq_hdr = &hw->rq[rqindex];
+ struct efc_hw_sequence *seq = NULL;
+ struct hw_rq *rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]];
+ unsigned long flags = 0;
+
+ if (bufindex >= rq_hdr->length) {
+ efc_log_err(hw->os,
+ "RQidx %d bufidx %d exceed ring len %d for id %d\n",
+ rqindex, bufindex, rq_hdr->length, rq_hdr->id);
+ return NULL;
+ }
+
+ /* rq_hdr lock also covers rqindex+1 queue */
+ spin_lock_irqsave(&rq_hdr->lock, flags);
+
+ seq = rq->rq_tracker[bufindex];
+ rq->rq_tracker[bufindex] = NULL;
+
+ if (!seq) {
+ efc_log_err(hw->os,
+ "RQbuf NULL, rqidx %d, bufidx %d, cur q idx = %d\n",
+ rqindex, bufindex, rq_hdr->index);
+ }
+
+ spin_unlock_irqrestore(&rq_hdr->lock, flags);
+ return seq;
+}
+
+int
+efct_hw_rqpair_process_rq(struct efct_hw *hw, struct hw_cq *cq,
+ u8 *cqe)
+{
+ u16 rq_id;
+ u32 index;
+ int rqindex;
+ int rq_status;
+ u32 h_len;
+ u32 p_len;
+ struct efc_hw_sequence *seq;
+ struct hw_rq *rq;
+
+ rq_status = sli_fc_rqe_rqid_and_index(&hw->sli, cqe,
+ &rq_id, &index);
+ if (rq_status != 0) {
+ switch (rq_status) {
+ case SLI4_FC_ASYNC_RQ_BUF_LEN_EXCEEDED:
+ case SLI4_FC_ASYNC_RQ_DMA_FAILURE:
+ /* just get RQ buffer then return to chip */
+ rqindex = efct_hw_rqpair_find(hw, rq_id);
+ if (rqindex < 0) {
+ efc_log_debug(hw->os,
+ "status=%#x: lookup fail id=%#x\n",
+ rq_status, rq_id);
+ break;
+ }
+
+ /* get RQ buffer */
+ seq = efct_hw_rqpair_get(hw, rqindex, index);
+
+ /* return to chip */
+ if (efct_hw_rqpair_sequence_free(hw, seq)) {
+ efc_log_debug(hw->os,
+ "status=%#x,fail rtrn buf to RQ\n",
+ rq_status);
+ break;
+ }
+ break;
+ case SLI4_FC_ASYNC_RQ_INSUFF_BUF_NEEDED:
+ case SLI4_FC_ASYNC_RQ_INSUFF_BUF_FRM_DISC:
+ /*
+ * since RQ buffers were not consumed, cannot return
+ * them to chip
+ */
+ efc_log_debug(hw->os, "Warning: RCQE status=%#x,\n",
+ rq_status);
+ fallthrough;
+ default:
+ break;
+ }
+ return -EIO;
+ }
+
+ rqindex = efct_hw_rqpair_find(hw, rq_id);
+ if (rqindex < 0) {
+ efc_log_debug(hw->os, "Error: rq_id lookup failed for id=%#x\n",
+ rq_id);
+ return -EIO;
+ }
+
+ rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]];
+ rq->use_count++;
+
+ seq = efct_hw_rqpair_get(hw, rqindex, index);
+ if (WARN_ON(!seq))
+ return -EIO;
+
+ seq->hw = hw;
+
+ sli_fc_rqe_length(&hw->sli, cqe, &h_len, &p_len);
+ seq->header->dma.len = h_len;
+ seq->payload->dma.len = p_len;
+ seq->fcfi = sli_fc_rqe_fcfi(&hw->sli, cqe);
+ seq->hw_priv = cq->eq;
+
+ efct_unsolicited_cb(hw->os, seq);
+
+ return 0;
+}
+
+static int
+efct_hw_rqpair_put(struct efct_hw *hw, struct efc_hw_sequence *seq)
+{
+ struct sli4_queue *rq_hdr = &hw->rq[seq->header->rqindex];
+ struct sli4_queue *rq_payload = &hw->rq[seq->payload->rqindex];
+ u32 hw_rq_index = hw->hw_rq_lookup[seq->header->rqindex];
+ struct hw_rq *rq = hw->hw_rq[hw_rq_index];
+ u32 phys_hdr[2];
+ u32 phys_payload[2];
+ int qindex_hdr;
+ int qindex_payload;
+ unsigned long flags = 0;
+
+ /* Update the RQ verification lookup tables */
+ phys_hdr[0] = upper_32_bits(seq->header->dma.phys);
+ phys_hdr[1] = lower_32_bits(seq->header->dma.phys);
+ phys_payload[0] = upper_32_bits(seq->payload->dma.phys);
+ phys_payload[1] = lower_32_bits(seq->payload->dma.phys);
+
+ /* rq_hdr lock also covers payload / header->rqindex+1 queue */
+ spin_lock_irqsave(&rq_hdr->lock, flags);
+
+ /*
+ * Note: The header must be posted last for buffer pair mode because
+ * posting on the header queue posts the payload queue as well.
+ * We do not ring the payload queue independently in RQ pair mode.
+ */
+ qindex_payload = sli_rq_write(&hw->sli, rq_payload,
+ (void *)phys_payload);
+ qindex_hdr = sli_rq_write(&hw->sli, rq_hdr, (void *)phys_hdr);
+ if (qindex_hdr < 0 ||
+ qindex_payload < 0) {
+ efc_log_err(hw->os, "RQ_ID=%#x write failed\n", rq_hdr->id);
+ spin_unlock_irqrestore(&rq_hdr->lock, flags);
+ return -EIO;
+ }
+
+ /* ensure the indexes are the same */
+ WARN_ON(qindex_hdr != qindex_payload);
+
+ /* Update the lookup table */
+ if (!rq->rq_tracker[qindex_hdr]) {
+ rq->rq_tracker[qindex_hdr] = seq;
+ } else {
+ efc_log_debug(hw->os,
+ "expected rq_tracker[%d][%d] buffer to be NULL\n",
+ hw_rq_index, qindex_hdr);
+ }
+
+ spin_unlock_irqrestore(&rq_hdr->lock, flags);
+ return 0;
+}
+
+int
+efct_hw_rqpair_sequence_free(struct efct_hw *hw, struct efc_hw_sequence *seq)
+{
+ int rc = 0;
+
+ /*
+ * Post the data buffer first. Because in RQ pair mode, ringing the
+ * doorbell of the header ring will post the data buffer as well.
+ */
+ if (efct_hw_rqpair_put(hw, seq)) {
+ efc_log_err(hw->os, "error writing buffers\n");
+ return -EIO;
+ }
+
+ return rc;
+}
+
+int
+efct_efc_hw_sequence_free(struct efc *efc, struct efc_hw_sequence *seq)
+{
+ struct efct *efct = efc->base;
+
+ return efct_hw_rqpair_sequence_free(&efct->hw, seq);
+}
diff --git a/drivers/scsi/elx/efct/efct_io.c b/drivers/scsi/elx/efct/efct_io.c
new file mode 100644
index 000000000..c612f0a48
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_io.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include "efct_driver.h"
+#include "efct_hw.h"
+#include "efct_io.h"
+
+struct efct_io_pool {
+ struct efct *efct;
+ spinlock_t lock; /* IO pool lock */
+ u32 io_num_ios; /* Total IOs allocated */
+ struct efct_io *ios[EFCT_NUM_SCSI_IOS];
+ struct list_head freelist;
+
+};
+
+struct efct_io_pool *
+efct_io_pool_create(struct efct *efct, u32 num_sgl)
+{
+ u32 i = 0;
+ struct efct_io_pool *io_pool;
+ struct efct_io *io;
+
+ /* Allocate the IO pool */
+ io_pool = kzalloc(sizeof(*io_pool), GFP_KERNEL);
+ if (!io_pool)
+ return NULL;
+
+ io_pool->efct = efct;
+ INIT_LIST_HEAD(&io_pool->freelist);
+ /* initialize IO pool lock */
+ spin_lock_init(&io_pool->lock);
+
+ for (i = 0; i < EFCT_NUM_SCSI_IOS; i++) {
+ io = kzalloc(sizeof(*io), GFP_KERNEL);
+ if (!io)
+ break;
+
+ io_pool->io_num_ios++;
+ io_pool->ios[i] = io;
+ io->tag = i;
+ io->instance_index = i;
+
+ /* Allocate a response buffer */
+ io->rspbuf.size = SCSI_RSP_BUF_LENGTH;
+ io->rspbuf.virt = dma_alloc_coherent(&efct->pci->dev,
+ io->rspbuf.size,
+ &io->rspbuf.phys, GFP_KERNEL);
+ if (!io->rspbuf.virt) {
+ efc_log_err(efct, "dma_alloc rspbuf failed\n");
+ efct_io_pool_free(io_pool);
+ return NULL;
+ }
+
+ /* Allocate SGL */
+ io->sgl = kzalloc(sizeof(*io->sgl) * num_sgl, GFP_KERNEL);
+ if (!io->sgl) {
+ efct_io_pool_free(io_pool);
+ return NULL;
+ }
+
+ io->sgl_allocated = num_sgl;
+ io->sgl_count = 0;
+
+ INIT_LIST_HEAD(&io->list_entry);
+ list_add_tail(&io->list_entry, &io_pool->freelist);
+ }
+
+ return io_pool;
+}
+
+int
+efct_io_pool_free(struct efct_io_pool *io_pool)
+{
+ struct efct *efct;
+ u32 i;
+ struct efct_io *io;
+
+ if (io_pool) {
+ efct = io_pool->efct;
+
+ for (i = 0; i < io_pool->io_num_ios; i++) {
+ io = io_pool->ios[i];
+ if (!io)
+ continue;
+
+ kfree(io->sgl);
+ dma_free_coherent(&efct->pci->dev,
+ io->rspbuf.size, io->rspbuf.virt,
+ io->rspbuf.phys);
+ memset(&io->rspbuf, 0, sizeof(struct efc_dma));
+ }
+
+ kfree(io_pool);
+ efct->xport->io_pool = NULL;
+ }
+
+ return 0;
+}
+
+struct efct_io *
+efct_io_pool_io_alloc(struct efct_io_pool *io_pool)
+{
+ struct efct_io *io = NULL;
+ struct efct *efct;
+ unsigned long flags = 0;
+
+ efct = io_pool->efct;
+
+ spin_lock_irqsave(&io_pool->lock, flags);
+
+ if (!list_empty(&io_pool->freelist)) {
+ io = list_first_entry(&io_pool->freelist, struct efct_io,
+ list_entry);
+ list_del_init(&io->list_entry);
+ }
+
+ spin_unlock_irqrestore(&io_pool->lock, flags);
+
+ if (!io)
+ return NULL;
+
+ io->io_type = EFCT_IO_TYPE_MAX;
+ io->hio_type = EFCT_HW_IO_MAX;
+ io->hio = NULL;
+ io->transferred = 0;
+ io->efct = efct;
+ io->timeout = 0;
+ io->sgl_count = 0;
+ io->tgt_task_tag = 0;
+ io->init_task_tag = 0;
+ io->hw_tag = 0;
+ io->display_name = "pending";
+ io->seq_init = 0;
+ io->io_free = 0;
+ io->release = NULL;
+ atomic_add_return(1, &efct->xport->io_active_count);
+ atomic_add_return(1, &efct->xport->io_total_alloc);
+ return io;
+}
+
+/* Free an object used to track an IO */
+void
+efct_io_pool_io_free(struct efct_io_pool *io_pool, struct efct_io *io)
+{
+ struct efct *efct;
+ struct efct_hw_io *hio = NULL;
+ unsigned long flags = 0;
+
+ efct = io_pool->efct;
+
+ spin_lock_irqsave(&io_pool->lock, flags);
+ hio = io->hio;
+ io->hio = NULL;
+ io->io_free = 1;
+ INIT_LIST_HEAD(&io->list_entry);
+ list_add(&io->list_entry, &io_pool->freelist);
+ spin_unlock_irqrestore(&io_pool->lock, flags);
+
+ if (hio)
+ efct_hw_io_free(&efct->hw, hio);
+
+ atomic_sub_return(1, &efct->xport->io_active_count);
+ atomic_add_return(1, &efct->xport->io_total_free);
+}
+
+/* Find an I/O given it's node and ox_id */
+struct efct_io *
+efct_io_find_tgt_io(struct efct *efct, struct efct_node *node,
+ u16 ox_id, u16 rx_id)
+{
+ struct efct_io *io = NULL;
+ unsigned long flags = 0;
+ u8 found = false;
+
+ spin_lock_irqsave(&node->active_ios_lock, flags);
+ list_for_each_entry(io, &node->active_ios, list_entry) {
+ if ((io->cmd_tgt && io->init_task_tag == ox_id) &&
+ (rx_id == 0xffff || io->tgt_task_tag == rx_id)) {
+ if (kref_get_unless_zero(&io->ref))
+ found = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&node->active_ios_lock, flags);
+ return found ? io : NULL;
+}
diff --git a/drivers/scsi/elx/efct/efct_io.h b/drivers/scsi/elx/efct/efct_io.h
new file mode 100644
index 000000000..bb0f51811
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_io.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#if !defined(__EFCT_IO_H__)
+#define __EFCT_IO_H__
+
+#include "efct_lio.h"
+
+#define EFCT_LOG_ENABLE_IO_ERRORS(efct) \
+ (((efct) != NULL) ? (((efct)->logmask & (1U << 6)) != 0) : 0)
+
+#define io_error_log(io, fmt, ...) \
+ do { \
+ if (EFCT_LOG_ENABLE_IO_ERRORS(io->efct)) \
+ efc_log_warn(io->efct, fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define SCSI_CMD_BUF_LENGTH 48
+#define SCSI_RSP_BUF_LENGTH (FCP_RESP_WITH_EXT + SCSI_SENSE_BUFFERSIZE)
+#define EFCT_NUM_SCSI_IOS 8192
+
+enum efct_io_type {
+ EFCT_IO_TYPE_IO = 0,
+ EFCT_IO_TYPE_ELS,
+ EFCT_IO_TYPE_CT,
+ EFCT_IO_TYPE_CT_RESP,
+ EFCT_IO_TYPE_BLS_RESP,
+ EFCT_IO_TYPE_ABORT,
+
+ EFCT_IO_TYPE_MAX,
+};
+
+enum efct_els_state {
+ EFCT_ELS_REQUEST = 0,
+ EFCT_ELS_REQUEST_DELAYED,
+ EFCT_ELS_REQUEST_DELAY_ABORT,
+ EFCT_ELS_REQ_ABORT,
+ EFCT_ELS_REQ_ABORTED,
+ EFCT_ELS_ABORT_IO_COMPL,
+};
+
+/**
+ * Scsi target IO object
+ * @efct: pointer back to efct
+ * @instance_index: unique instance index value
+ * @io: IO display name
+ * @node: pointer to node
+ * @list_entry: io list entry
+ * @io_pending_link: io pending list entry
+ * @ref: reference counter
+ * @release: release callback function
+ * @init_task_tag: initiator task tag (OX_ID) for back-end and SCSI logging
+ * @tgt_task_tag: target task tag (RX_ID) for back-end and SCSI logging
+ * @hw_tag: HW layer unique IO id
+ * @tag: unique IO identifier
+ * @sgl: SGL
+ * @sgl_allocated: Number of allocated SGEs
+ * @sgl_count: Number of SGEs in this SGL
+ * @tgt_io: backend target private IO data
+ * @exp_xfer_len: expected data transfer length, based on FC header
+ * @hw_priv: Declarations private to HW/SLI
+ * @io_type: indicates what this struct efct_io structure is used for
+ * @hio: hw io object
+ * @transferred: Number of bytes transferred
+ * @auto_resp: set if auto_trsp was set
+ * @low_latency: set if low latency request
+ * @wq_steering: selected WQ steering request
+ * @wq_class: selected WQ class if steering is class
+ * @xfer_req: transfer size for current request
+ * @scsi_tgt_cb: target callback function
+ * @scsi_tgt_cb_arg: target callback function argument
+ * @abort_cb: abort callback function
+ * @abort_cb_arg: abort callback function argument
+ * @bls_cb: BLS callback function
+ * @bls_cb_arg: BLS callback function argument
+ * @tmf_cmd: TMF command being processed
+ * @abort_rx_id: rx_id from the ABTS that initiated the command abort
+ * @cmd_tgt: True if this is a Target command
+ * @send_abts: when aborting, indicates ABTS is to be sent
+ * @cmd_ini: True if this is an Initiator command
+ * @seq_init: True if local node has sequence initiative
+ * @iparam: iparams for hw io send call
+ * @hio_type: HW IO type
+ * @wire_len: wire length
+ * @hw_cb: saved HW callback
+ * @io_to_abort: for abort handling, pointer to IO to abort
+ * @rspbuf: SCSI Response buffer
+ * @timeout: Timeout value in seconds for this IO
+ * @cs_ctl: CS_CTL priority for this IO
+ * @io_free: Is io object in freelist
+ * @app_id: application id
+ */
+struct efct_io {
+ struct efct *efct;
+ u32 instance_index;
+ const char *display_name;
+ struct efct_node *node;
+
+ struct list_head list_entry;
+ struct list_head io_pending_link;
+ struct kref ref;
+ void (*release)(struct kref *arg);
+ u32 init_task_tag;
+ u32 tgt_task_tag;
+ u32 hw_tag;
+ u32 tag;
+ struct efct_scsi_sgl *sgl;
+ u32 sgl_allocated;
+ u32 sgl_count;
+ struct efct_scsi_tgt_io tgt_io;
+ u32 exp_xfer_len;
+
+ void *hw_priv;
+
+ enum efct_io_type io_type;
+ struct efct_hw_io *hio;
+ size_t transferred;
+
+ bool auto_resp;
+ bool low_latency;
+ u8 wq_steering;
+ u8 wq_class;
+ u64 xfer_req;
+ efct_scsi_io_cb_t scsi_tgt_cb;
+ void *scsi_tgt_cb_arg;
+ efct_scsi_io_cb_t abort_cb;
+ void *abort_cb_arg;
+ efct_scsi_io_cb_t bls_cb;
+ void *bls_cb_arg;
+ enum efct_scsi_tmf_cmd tmf_cmd;
+ u16 abort_rx_id;
+
+ bool cmd_tgt;
+ bool send_abts;
+ bool cmd_ini;
+ bool seq_init;
+ union efct_hw_io_param_u iparam;
+ enum efct_hw_io_type hio_type;
+ u64 wire_len;
+ void *hw_cb;
+
+ struct efct_io *io_to_abort;
+
+ struct efc_dma rspbuf;
+ u32 timeout;
+ u8 cs_ctl;
+ u8 io_free;
+ u32 app_id;
+};
+
+struct efct_io_cb_arg {
+ int status;
+ int ext_status;
+ void *app;
+};
+
+struct efct_io_pool *
+efct_io_pool_create(struct efct *efct, u32 num_sgl);
+int
+efct_io_pool_free(struct efct_io_pool *io_pool);
+u32
+efct_io_pool_allocated(struct efct_io_pool *io_pool);
+
+struct efct_io *
+efct_io_pool_io_alloc(struct efct_io_pool *io_pool);
+void
+efct_io_pool_io_free(struct efct_io_pool *io_pool, struct efct_io *io);
+struct efct_io *
+efct_io_find_tgt_io(struct efct *efct, struct efct_node *node,
+ u16 ox_id, u16 rx_id);
+#endif /* __EFCT_IO_H__ */
diff --git a/drivers/scsi/elx/efct/efct_lio.c b/drivers/scsi/elx/efct/efct_lio.c
new file mode 100644
index 000000000..be4b5c1ee
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_lio.c
@@ -0,0 +1,1695 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include "efct_driver.h"
+#include "efct_lio.h"
+
+/*
+ * lio_wq is used to call the LIO backed during creation or deletion of
+ * sessions. This brings serialization to the session management as we create
+ * single threaded work queue.
+ */
+static struct workqueue_struct *lio_wq;
+
+static int
+efct_format_wwn(char *str, size_t len, const char *pre, u64 wwn)
+{
+ u8 a[8];
+
+ put_unaligned_be64(wwn, a);
+ return snprintf(str, len, "%s%8phC", pre, a);
+}
+
+static int
+efct_lio_parse_wwn(const char *name, u64 *wwp, u8 npiv)
+{
+ int num;
+ u8 b[8];
+
+ if (npiv) {
+ num = sscanf(name,
+ "%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx",
+ &b[0], &b[1], &b[2], &b[3], &b[4], &b[5], &b[6],
+ &b[7]);
+ } else {
+ num = sscanf(name,
+ "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
+ &b[0], &b[1], &b[2], &b[3], &b[4], &b[5], &b[6],
+ &b[7]);
+ }
+
+ if (num != 8)
+ return -EINVAL;
+
+ *wwp = get_unaligned_be64(b);
+ return 0;
+}
+
+static int
+efct_lio_parse_npiv_wwn(const char *name, size_t size, u64 *wwpn, u64 *wwnn)
+{
+ unsigned int cnt = size;
+ int rc;
+
+ *wwpn = *wwnn = 0;
+ if (name[cnt - 1] == '\n' || name[cnt - 1] == 0)
+ cnt--;
+
+ /* validate we have enough characters for WWPN */
+ if ((cnt != (16 + 1 + 16)) || (name[16] != ':'))
+ return -EINVAL;
+
+ rc = efct_lio_parse_wwn(&name[0], wwpn, 1);
+ if (rc)
+ return rc;
+
+ rc = efct_lio_parse_wwn(&name[17], wwnn, 1);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static ssize_t
+efct_lio_tpg_enable_show(struct config_item *item, char *page)
+{
+ struct se_portal_group *se_tpg = to_tpg(item);
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return snprintf(page, PAGE_SIZE, "%d\n", tpg->enabled);
+}
+
+static ssize_t
+efct_lio_tpg_enable_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct se_portal_group *se_tpg = to_tpg(item);
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+ struct efct *efct;
+ struct efc *efc;
+ unsigned long op;
+
+ if (!tpg->nport || !tpg->nport->efct) {
+ pr_err("%s: Unable to find EFCT device\n", __func__);
+ return -EINVAL;
+ }
+
+ efct = tpg->nport->efct;
+ efc = efct->efcport;
+
+ if (kstrtoul(page, 0, &op) < 0)
+ return -EINVAL;
+
+ if (op == 1) {
+ int ret;
+
+ tpg->enabled = true;
+ efc_log_debug(efct, "enable portal group %d\n", tpg->tpgt);
+
+ ret = efct_xport_control(efct->xport, EFCT_XPORT_PORT_ONLINE);
+ if (ret) {
+ efct->tgt_efct.lio_nport = NULL;
+ efc_log_debug(efct, "cannot bring port online\n");
+ return ret;
+ }
+ } else if (op == 0) {
+ efc_log_debug(efct, "disable portal group %d\n", tpg->tpgt);
+
+ if (efc->domain && efc->domain->nport)
+ efct_scsi_tgt_del_nport(efc, efc->domain->nport);
+
+ tpg->enabled = false;
+ } else {
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static ssize_t
+efct_lio_npiv_tpg_enable_show(struct config_item *item, char *page)
+{
+ struct se_portal_group *se_tpg = to_tpg(item);
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return snprintf(page, PAGE_SIZE, "%d\n", tpg->enabled);
+}
+
+static ssize_t
+efct_lio_npiv_tpg_enable_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct se_portal_group *se_tpg = to_tpg(item);
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+ struct efct_lio_vport *lio_vport = tpg->vport;
+ struct efct *efct;
+ struct efc *efc;
+ unsigned long op;
+
+ if (kstrtoul(page, 0, &op) < 0)
+ return -EINVAL;
+
+ if (!lio_vport) {
+ pr_err("Unable to find vport\n");
+ return -EINVAL;
+ }
+
+ efct = lio_vport->efct;
+ efc = efct->efcport;
+
+ if (op == 1) {
+ tpg->enabled = true;
+ efc_log_debug(efct, "enable portal group %d\n", tpg->tpgt);
+
+ if (efc->domain) {
+ int ret;
+
+ ret = efc_nport_vport_new(efc->domain,
+ lio_vport->npiv_wwpn,
+ lio_vport->npiv_wwnn,
+ U32_MAX, false, true,
+ NULL, NULL);
+ if (ret != 0) {
+ efc_log_err(efct, "Failed to create Vport\n");
+ return ret;
+ }
+ return count;
+ }
+
+ if (!(efc_vport_create_spec(efc, lio_vport->npiv_wwnn,
+ lio_vport->npiv_wwpn, U32_MAX,
+ false, true, NULL, NULL)))
+ return -ENOMEM;
+
+ } else if (op == 0) {
+ efc_log_debug(efct, "disable portal group %d\n", tpg->tpgt);
+
+ tpg->enabled = false;
+ /* only physical nport should exist, free lio_nport
+ * allocated in efct_lio_make_nport
+ */
+ if (efc->domain) {
+ efc_nport_vport_del(efct->efcport, efc->domain,
+ lio_vport->npiv_wwpn,
+ lio_vport->npiv_wwnn);
+ return count;
+ }
+ } else {
+ return -EINVAL;
+ }
+ return count;
+}
+
+static char *efct_lio_get_fabric_wwn(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return tpg->nport->wwpn_str;
+}
+
+static char *efct_lio_get_npiv_fabric_wwn(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return tpg->vport->wwpn_str;
+}
+
+static u16 efct_lio_get_tag(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return tpg->tpgt;
+}
+
+static u16 efct_lio_get_npiv_tag(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return tpg->tpgt;
+}
+
+static int efct_lio_check_demo_mode(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static int efct_lio_check_demo_mode_cache(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static int efct_lio_check_demo_write_protect(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return tpg->tpg_attrib.demo_mode_write_protect;
+}
+
+static int
+efct_lio_npiv_check_demo_write_protect(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return tpg->tpg_attrib.demo_mode_write_protect;
+}
+
+static int efct_lio_check_prod_write_protect(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return tpg->tpg_attrib.prod_mode_write_protect;
+}
+
+static int
+efct_lio_npiv_check_prod_write_protect(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return tpg->tpg_attrib.prod_mode_write_protect;
+}
+
+static u32 efct_lio_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static int efct_lio_check_stop_free(struct se_cmd *se_cmd)
+{
+ struct efct_scsi_tgt_io *ocp =
+ container_of(se_cmd, struct efct_scsi_tgt_io, cmd);
+ struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_CHK_STOP_FREE);
+ return target_put_sess_cmd(se_cmd);
+}
+
+static int
+efct_lio_abort_tgt_cb(struct efct_io *io,
+ enum efct_scsi_io_status scsi_status,
+ u32 flags, void *arg)
+{
+ efct_lio_io_printf(io, "Abort done, status:%d\n", scsi_status);
+ return 0;
+}
+
+static void
+efct_lio_aborted_task(struct se_cmd *se_cmd)
+{
+ struct efct_scsi_tgt_io *ocp =
+ container_of(se_cmd, struct efct_scsi_tgt_io, cmd);
+ struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_ABORTED_TASK);
+
+ if (ocp->rsp_sent)
+ return;
+
+ /* command has been aborted, cleanup here */
+ ocp->aborting = true;
+ ocp->err = EFCT_SCSI_STATUS_ABORTED;
+ /* terminate the exchange */
+ efct_scsi_tgt_abort_io(io, efct_lio_abort_tgt_cb, NULL);
+}
+
+static void efct_lio_release_cmd(struct se_cmd *se_cmd)
+{
+ struct efct_scsi_tgt_io *ocp =
+ container_of(se_cmd, struct efct_scsi_tgt_io, cmd);
+ struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
+ struct efct *efct = io->efct;
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_RELEASE_CMD);
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_CMPL_CMD);
+ efct_scsi_io_complete(io);
+ atomic_sub_return(1, &efct->tgt_efct.ios_in_use);
+}
+
+static void efct_lio_close_session(struct se_session *se_sess)
+{
+ struct efc_node *node = se_sess->fabric_sess_ptr;
+
+ pr_debug("se_sess=%p node=%p", se_sess, node);
+
+ if (!node) {
+ pr_debug("node is NULL");
+ return;
+ }
+
+ efc_node_post_shutdown(node, NULL);
+}
+
+static u32 efct_lio_sess_get_index(struct se_session *se_sess)
+{
+ return 0;
+}
+
+static void efct_lio_set_default_node_attrs(struct se_node_acl *nacl)
+{
+}
+
+static int efct_lio_get_cmd_state(struct se_cmd *cmd)
+{
+ struct efct_scsi_tgt_io *ocp =
+ container_of(cmd, struct efct_scsi_tgt_io, cmd);
+ struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
+
+ return io->tgt_io.state;
+}
+
+static int
+efct_lio_sg_map(struct efct_io *io)
+{
+ struct efct_scsi_tgt_io *ocp = &io->tgt_io;
+ struct se_cmd *cmd = &ocp->cmd;
+
+ ocp->seg_map_cnt = dma_map_sg(&io->efct->pci->dev, cmd->t_data_sg,
+ cmd->t_data_nents, cmd->data_direction);
+ if (ocp->seg_map_cnt == 0)
+ return -EFAULT;
+ return 0;
+}
+
+static void
+efct_lio_sg_unmap(struct efct_io *io)
+{
+ struct efct_scsi_tgt_io *ocp = &io->tgt_io;
+ struct se_cmd *cmd = &ocp->cmd;
+
+ if (WARN_ON(!ocp->seg_map_cnt || !cmd->t_data_sg))
+ return;
+
+ dma_unmap_sg(&io->efct->pci->dev, cmd->t_data_sg,
+ ocp->seg_map_cnt, cmd->data_direction);
+ ocp->seg_map_cnt = 0;
+}
+
+static int
+efct_lio_status_done(struct efct_io *io,
+ enum efct_scsi_io_status scsi_status,
+ u32 flags, void *arg)
+{
+ struct efct_scsi_tgt_io *ocp = &io->tgt_io;
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_RSP_DONE);
+ if (scsi_status != EFCT_SCSI_STATUS_GOOD) {
+ efct_lio_io_printf(io, "callback completed with error=%d\n",
+ scsi_status);
+ ocp->err = scsi_status;
+ }
+ if (ocp->seg_map_cnt)
+ efct_lio_sg_unmap(io);
+
+ efct_lio_io_printf(io, "status=%d, err=%d flags=0x%x, dir=%d\n",
+ scsi_status, ocp->err, flags, ocp->ddir);
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE);
+ transport_generic_free_cmd(&io->tgt_io.cmd, 0);
+ return 0;
+}
+
+static int
+efct_lio_datamove_done(struct efct_io *io, enum efct_scsi_io_status scsi_status,
+ u32 flags, void *arg);
+
+static int
+efct_lio_write_pending(struct se_cmd *cmd)
+{
+ struct efct_scsi_tgt_io *ocp =
+ container_of(cmd, struct efct_scsi_tgt_io, cmd);
+ struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
+ struct efct_scsi_sgl *sgl = io->sgl;
+ struct scatterlist *sg;
+ u32 flags = 0, cnt, curcnt;
+ u64 length = 0;
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_WRITE_PENDING);
+ efct_lio_io_printf(io, "trans_state=0x%x se_cmd_flags=0x%x\n",
+ cmd->transport_state, cmd->se_cmd_flags);
+
+ if (ocp->seg_cnt == 0) {
+ ocp->seg_cnt = cmd->t_data_nents;
+ ocp->cur_seg = 0;
+ if (efct_lio_sg_map(io)) {
+ efct_lio_io_printf(io, "efct_lio_sg_map failed\n");
+ return -EFAULT;
+ }
+ }
+ curcnt = (ocp->seg_map_cnt - ocp->cur_seg);
+ curcnt = (curcnt < io->sgl_allocated) ? curcnt : io->sgl_allocated;
+ /* find current sg */
+ for (cnt = 0, sg = cmd->t_data_sg; cnt < ocp->cur_seg; cnt++,
+ sg = sg_next(sg))
+ ;/* do nothing */
+
+ for (cnt = 0; cnt < curcnt; cnt++, sg = sg_next(sg)) {
+ sgl[cnt].addr = sg_dma_address(sg);
+ sgl[cnt].dif_addr = 0;
+ sgl[cnt].len = sg_dma_len(sg);
+ length += sgl[cnt].len;
+ ocp->cur_seg++;
+ }
+
+ if (ocp->cur_seg == ocp->seg_cnt)
+ flags = EFCT_SCSI_LAST_DATAPHASE;
+
+ return efct_scsi_recv_wr_data(io, flags, sgl, curcnt, length,
+ efct_lio_datamove_done, NULL);
+}
+
+static int
+efct_lio_queue_data_in(struct se_cmd *cmd)
+{
+ struct efct_scsi_tgt_io *ocp =
+ container_of(cmd, struct efct_scsi_tgt_io, cmd);
+ struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
+ struct efct_scsi_sgl *sgl = io->sgl;
+ struct scatterlist *sg = NULL;
+ uint flags = 0, cnt = 0, curcnt = 0;
+ u64 length = 0;
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_QUEUE_DATA_IN);
+
+ if (ocp->seg_cnt == 0) {
+ if (cmd->data_length) {
+ ocp->seg_cnt = cmd->t_data_nents;
+ ocp->cur_seg = 0;
+ if (efct_lio_sg_map(io)) {
+ efct_lio_io_printf(io,
+ "efct_lio_sg_map failed\n");
+ return -EAGAIN;
+ }
+ } else {
+ /* If command length is 0, send the response status */
+ struct efct_scsi_cmd_resp rsp;
+
+ memset(&rsp, 0, sizeof(rsp));
+ efct_lio_io_printf(io,
+ "cmd : %p length 0, send status\n",
+ cmd);
+ return efct_scsi_send_resp(io, 0, &rsp,
+ efct_lio_status_done, NULL);
+ }
+ }
+ curcnt = min(ocp->seg_map_cnt - ocp->cur_seg, io->sgl_allocated);
+
+ while (cnt < curcnt) {
+ sg = &cmd->t_data_sg[ocp->cur_seg];
+ sgl[cnt].addr = sg_dma_address(sg);
+ sgl[cnt].dif_addr = 0;
+ if (ocp->transferred_len + sg_dma_len(sg) >= cmd->data_length)
+ sgl[cnt].len = cmd->data_length - ocp->transferred_len;
+ else
+ sgl[cnt].len = sg_dma_len(sg);
+
+ ocp->transferred_len += sgl[cnt].len;
+ length += sgl[cnt].len;
+ ocp->cur_seg++;
+ cnt++;
+ if (ocp->transferred_len == cmd->data_length)
+ break;
+ }
+
+ if (ocp->transferred_len == cmd->data_length) {
+ flags = EFCT_SCSI_LAST_DATAPHASE;
+ ocp->seg_cnt = ocp->cur_seg;
+ }
+
+ /* If there is residual, disable Auto Good Response */
+ if (cmd->residual_count)
+ flags |= EFCT_SCSI_NO_AUTO_RESPONSE;
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_SEND_RD_DATA);
+
+ return efct_scsi_send_rd_data(io, flags, sgl, curcnt, length,
+ efct_lio_datamove_done, NULL);
+}
+
+static void
+efct_lio_send_resp(struct efct_io *io, enum efct_scsi_io_status scsi_status,
+ u32 flags)
+{
+ struct efct_scsi_cmd_resp rsp;
+ struct efct_scsi_tgt_io *ocp = &io->tgt_io;
+ struct se_cmd *cmd = &io->tgt_io.cmd;
+ int rc;
+
+ if (flags & EFCT_SCSI_IO_CMPL_RSP_SENT) {
+ ocp->rsp_sent = true;
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE);
+ transport_generic_free_cmd(&io->tgt_io.cmd, 0);
+ return;
+ }
+
+ /* send check condition if an error occurred */
+ memset(&rsp, 0, sizeof(rsp));
+ rsp.scsi_status = cmd->scsi_status;
+ rsp.sense_data = (uint8_t *)io->tgt_io.sense_buffer;
+ rsp.sense_data_length = cmd->scsi_sense_length;
+
+ /* Check for residual underrun or overrun */
+ if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT)
+ rsp.residual = -cmd->residual_count;
+ else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT)
+ rsp.residual = cmd->residual_count;
+
+ rc = efct_scsi_send_resp(io, 0, &rsp, efct_lio_status_done, NULL);
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_SEND_RSP);
+ if (rc != 0) {
+ efct_lio_io_printf(io, "Read done, send rsp failed %d\n", rc);
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE);
+ transport_generic_free_cmd(&io->tgt_io.cmd, 0);
+ } else {
+ ocp->rsp_sent = true;
+ }
+}
+
+static int
+efct_lio_datamove_done(struct efct_io *io, enum efct_scsi_io_status scsi_status,
+ u32 flags, void *arg)
+{
+ struct efct_scsi_tgt_io *ocp = &io->tgt_io;
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_DATA_DONE);
+ if (scsi_status != EFCT_SCSI_STATUS_GOOD) {
+ efct_lio_io_printf(io, "callback completed with error=%d\n",
+ scsi_status);
+ ocp->err = scsi_status;
+ }
+ efct_lio_io_printf(io, "seg_map_cnt=%d\n", ocp->seg_map_cnt);
+ if (ocp->seg_map_cnt) {
+ if (ocp->err == EFCT_SCSI_STATUS_GOOD &&
+ ocp->cur_seg < ocp->seg_cnt) {
+ int rc;
+
+ efct_lio_io_printf(io, "continuing cmd at segm=%d\n",
+ ocp->cur_seg);
+ if (ocp->ddir == DMA_TO_DEVICE)
+ rc = efct_lio_write_pending(&ocp->cmd);
+ else
+ rc = efct_lio_queue_data_in(&ocp->cmd);
+ if (!rc)
+ return 0;
+
+ ocp->err = EFCT_SCSI_STATUS_ERROR;
+ efct_lio_io_printf(io, "could not continue command\n");
+ }
+ efct_lio_sg_unmap(io);
+ }
+
+ if (io->tgt_io.aborting) {
+ efct_lio_io_printf(io, "IO done aborted\n");
+ return 0;
+ }
+
+ if (ocp->ddir == DMA_TO_DEVICE) {
+ efct_lio_io_printf(io, "Write done, trans_state=0x%x\n",
+ io->tgt_io.cmd.transport_state);
+ if (scsi_status != EFCT_SCSI_STATUS_GOOD) {
+ transport_generic_request_failure(&io->tgt_io.cmd,
+ TCM_CHECK_CONDITION_ABORT_CMD);
+ efct_set_lio_io_state(io,
+ EFCT_LIO_STATE_TGT_GENERIC_REQ_FAILURE);
+ } else {
+ efct_set_lio_io_state(io,
+ EFCT_LIO_STATE_TGT_EXECUTE_CMD);
+ target_execute_cmd(&io->tgt_io.cmd);
+ }
+ } else {
+ efct_lio_send_resp(io, scsi_status, flags);
+ }
+ return 0;
+}
+
+static int
+efct_lio_tmf_done(struct efct_io *io, enum efct_scsi_io_status scsi_status,
+ u32 flags, void *arg)
+{
+ efct_lio_tmfio_printf(io, "cmd=%p status=%d, flags=0x%x\n",
+ &io->tgt_io.cmd, scsi_status, flags);
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE);
+ transport_generic_free_cmd(&io->tgt_io.cmd, 0);
+ return 0;
+}
+
+static int
+efct_lio_null_tmf_done(struct efct_io *tmfio,
+ enum efct_scsi_io_status scsi_status,
+ u32 flags, void *arg)
+{
+ efct_lio_tmfio_printf(tmfio, "cmd=%p status=%d, flags=0x%x\n",
+ &tmfio->tgt_io.cmd, scsi_status, flags);
+
+ /* free struct efct_io only, no active se_cmd */
+ efct_scsi_io_complete(tmfio);
+ return 0;
+}
+
+static int
+efct_lio_queue_status(struct se_cmd *cmd)
+{
+ struct efct_scsi_cmd_resp rsp;
+ struct efct_scsi_tgt_io *ocp =
+ container_of(cmd, struct efct_scsi_tgt_io, cmd);
+ struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
+ int rc = 0;
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_QUEUE_STATUS);
+ efct_lio_io_printf(io,
+ "status=0x%x trans_state=0x%x se_cmd_flags=0x%x sns_len=%d\n",
+ cmd->scsi_status, cmd->transport_state, cmd->se_cmd_flags,
+ cmd->scsi_sense_length);
+
+ memset(&rsp, 0, sizeof(rsp));
+ rsp.scsi_status = cmd->scsi_status;
+ rsp.sense_data = (u8 *)io->tgt_io.sense_buffer;
+ rsp.sense_data_length = cmd->scsi_sense_length;
+
+ /* Check for residual underrun or overrun, mark negitive value for
+ * underrun to recognize in HW
+ */
+ if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT)
+ rsp.residual = -cmd->residual_count;
+ else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT)
+ rsp.residual = cmd->residual_count;
+
+ rc = efct_scsi_send_resp(io, 0, &rsp, efct_lio_status_done, NULL);
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_SEND_RSP);
+ if (rc == 0)
+ ocp->rsp_sent = true;
+ return rc;
+}
+
+static void efct_lio_queue_tm_rsp(struct se_cmd *cmd)
+{
+ struct efct_scsi_tgt_io *ocp =
+ container_of(cmd, struct efct_scsi_tgt_io, cmd);
+ struct efct_io *tmfio = container_of(ocp, struct efct_io, tgt_io);
+ struct se_tmr_req *se_tmr = cmd->se_tmr_req;
+ u8 rspcode;
+
+ efct_lio_tmfio_printf(tmfio, "cmd=%p function=0x%x tmr->response=%d\n",
+ cmd, se_tmr->function, se_tmr->response);
+ switch (se_tmr->response) {
+ case TMR_FUNCTION_COMPLETE:
+ rspcode = EFCT_SCSI_TMF_FUNCTION_COMPLETE;
+ break;
+ case TMR_TASK_DOES_NOT_EXIST:
+ rspcode = EFCT_SCSI_TMF_FUNCTION_IO_NOT_FOUND;
+ break;
+ case TMR_LUN_DOES_NOT_EXIST:
+ rspcode = EFCT_SCSI_TMF_INCORRECT_LOGICAL_UNIT_NUMBER;
+ break;
+ case TMR_FUNCTION_REJECTED:
+ default:
+ rspcode = EFCT_SCSI_TMF_FUNCTION_REJECTED;
+ break;
+ }
+ efct_scsi_send_tmf_resp(tmfio, rspcode, NULL, efct_lio_tmf_done, NULL);
+}
+
+static struct efct *efct_find_wwpn(u64 wwpn)
+{
+ struct efct *efct;
+
+ /* Search for the HBA that has this WWPN */
+ list_for_each_entry(efct, &efct_devices, list_entry) {
+
+ if (wwpn == efct_get_wwpn(&efct->hw))
+ return efct;
+ }
+
+ return NULL;
+}
+
+static struct se_wwn *
+efct_lio_make_nport(struct target_fabric_configfs *tf,
+ struct config_group *group, const char *name)
+{
+ struct efct_lio_nport *lio_nport;
+ struct efct *efct;
+ int ret;
+ u64 wwpn;
+
+ ret = efct_lio_parse_wwn(name, &wwpn, 0);
+ if (ret)
+ return ERR_PTR(ret);
+
+ efct = efct_find_wwpn(wwpn);
+ if (!efct) {
+ pr_err("cannot find EFCT for base wwpn %s\n", name);
+ return ERR_PTR(-ENXIO);
+ }
+
+ lio_nport = kzalloc(sizeof(*lio_nport), GFP_KERNEL);
+ if (!lio_nport)
+ return ERR_PTR(-ENOMEM);
+
+ lio_nport->efct = efct;
+ lio_nport->wwpn = wwpn;
+ efct_format_wwn(lio_nport->wwpn_str, sizeof(lio_nport->wwpn_str),
+ "naa.", wwpn);
+ efct->tgt_efct.lio_nport = lio_nport;
+
+ return &lio_nport->nport_wwn;
+}
+
+static struct se_wwn *
+efct_lio_npiv_make_nport(struct target_fabric_configfs *tf,
+ struct config_group *group, const char *name)
+{
+ struct efct_lio_vport *lio_vport;
+ struct efct *efct;
+ int ret;
+ u64 p_wwpn, npiv_wwpn, npiv_wwnn;
+ char *p, *pbuf, tmp[128];
+ struct efct_lio_vport_list_t *vport_list;
+ struct fc_vport *new_fc_vport;
+ struct fc_vport_identifiers vport_id;
+ unsigned long flags = 0;
+
+ snprintf(tmp, sizeof(tmp), "%s", name);
+ pbuf = &tmp[0];
+
+ p = strsep(&pbuf, "@");
+
+ if (!p || !pbuf) {
+ pr_err("Unable to find separator operator(@)\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ ret = efct_lio_parse_wwn(p, &p_wwpn, 0);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = efct_lio_parse_npiv_wwn(pbuf, strlen(pbuf), &npiv_wwpn,
+ &npiv_wwnn);
+ if (ret)
+ return ERR_PTR(ret);
+
+ efct = efct_find_wwpn(p_wwpn);
+ if (!efct) {
+ pr_err("cannot find EFCT for base wwpn %s\n", name);
+ return ERR_PTR(-ENXIO);
+ }
+
+ lio_vport = kzalloc(sizeof(*lio_vport), GFP_KERNEL);
+ if (!lio_vport)
+ return ERR_PTR(-ENOMEM);
+
+ lio_vport->efct = efct;
+ lio_vport->wwpn = p_wwpn;
+ lio_vport->npiv_wwpn = npiv_wwpn;
+ lio_vport->npiv_wwnn = npiv_wwnn;
+
+ efct_format_wwn(lio_vport->wwpn_str, sizeof(lio_vport->wwpn_str),
+ "naa.", npiv_wwpn);
+
+ vport_list = kzalloc(sizeof(*vport_list), GFP_KERNEL);
+ if (!vport_list) {
+ kfree(lio_vport);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ vport_list->lio_vport = lio_vport;
+
+ memset(&vport_id, 0, sizeof(vport_id));
+ vport_id.port_name = npiv_wwpn;
+ vport_id.node_name = npiv_wwnn;
+ vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
+ vport_id.vport_type = FC_PORTTYPE_NPIV;
+ vport_id.disable = false;
+
+ new_fc_vport = fc_vport_create(efct->shost, 0, &vport_id);
+ if (!new_fc_vport) {
+ efc_log_err(efct, "fc_vport_create failed\n");
+ kfree(lio_vport);
+ kfree(vport_list);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ lio_vport->fc_vport = new_fc_vport;
+ spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags);
+ INIT_LIST_HEAD(&vport_list->list_entry);
+ list_add_tail(&vport_list->list_entry, &efct->tgt_efct.vport_list);
+ spin_unlock_irqrestore(&efct->tgt_efct.efct_lio_lock, flags);
+
+ return &lio_vport->vport_wwn;
+}
+
+static void
+efct_lio_drop_nport(struct se_wwn *wwn)
+{
+ struct efct_lio_nport *lio_nport =
+ container_of(wwn, struct efct_lio_nport, nport_wwn);
+ struct efct *efct = lio_nport->efct;
+
+ /* only physical nport should exist, free lio_nport allocated
+ * in efct_lio_make_nport.
+ */
+ kfree(efct->tgt_efct.lio_nport);
+ efct->tgt_efct.lio_nport = NULL;
+}
+
+static void
+efct_lio_npiv_drop_nport(struct se_wwn *wwn)
+{
+ struct efct_lio_vport *lio_vport =
+ container_of(wwn, struct efct_lio_vport, vport_wwn);
+ struct efct_lio_vport_list_t *vport, *next_vport;
+ struct efct *efct = lio_vport->efct;
+ unsigned long flags = 0;
+
+ if (lio_vport->fc_vport)
+ fc_vport_terminate(lio_vport->fc_vport);
+
+ spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags);
+
+ list_for_each_entry_safe(vport, next_vport, &efct->tgt_efct.vport_list,
+ list_entry) {
+ if (vport->lio_vport == lio_vport) {
+ list_del(&vport->list_entry);
+ kfree(vport->lio_vport);
+ kfree(vport);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&efct->tgt_efct.efct_lio_lock, flags);
+}
+
+static struct se_portal_group *
+efct_lio_make_tpg(struct se_wwn *wwn, const char *name)
+{
+ struct efct_lio_nport *lio_nport =
+ container_of(wwn, struct efct_lio_nport, nport_wwn);
+ struct efct_lio_tpg *tpg;
+ struct efct *efct;
+ unsigned long n;
+ int ret;
+
+ if (strstr(name, "tpgt_") != name)
+ return ERR_PTR(-EINVAL);
+ if (kstrtoul(name + 5, 10, &n) || n > USHRT_MAX)
+ return ERR_PTR(-EINVAL);
+
+ tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
+ if (!tpg)
+ return ERR_PTR(-ENOMEM);
+
+ tpg->nport = lio_nport;
+ tpg->tpgt = n;
+ tpg->enabled = false;
+
+ tpg->tpg_attrib.generate_node_acls = 1;
+ tpg->tpg_attrib.demo_mode_write_protect = 1;
+ tpg->tpg_attrib.cache_dynamic_acls = 1;
+ tpg->tpg_attrib.demo_mode_login_only = 1;
+ tpg->tpg_attrib.session_deletion_wait = 1;
+
+ ret = core_tpg_register(wwn, &tpg->tpg, SCSI_PROTOCOL_FCP);
+ if (ret < 0) {
+ kfree(tpg);
+ return NULL;
+ }
+ efct = lio_nport->efct;
+ efct->tgt_efct.tpg = tpg;
+ efc_log_debug(efct, "create portal group %d\n", tpg->tpgt);
+
+ xa_init(&efct->lookup);
+ return &tpg->tpg;
+}
+
+static void
+efct_lio_drop_tpg(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ struct efct *efct = tpg->nport->efct;
+
+ efc_log_debug(efct, "drop portal group %d\n", tpg->tpgt);
+ tpg->nport->efct->tgt_efct.tpg = NULL;
+ core_tpg_deregister(se_tpg);
+ xa_destroy(&efct->lookup);
+ kfree(tpg);
+}
+
+static struct se_portal_group *
+efct_lio_npiv_make_tpg(struct se_wwn *wwn, const char *name)
+{
+ struct efct_lio_vport *lio_vport =
+ container_of(wwn, struct efct_lio_vport, vport_wwn);
+ struct efct_lio_tpg *tpg;
+ struct efct *efct;
+ unsigned long n;
+ int ret;
+
+ efct = lio_vport->efct;
+ if (strstr(name, "tpgt_") != name)
+ return ERR_PTR(-EINVAL);
+ if (kstrtoul(name + 5, 10, &n) || n > USHRT_MAX)
+ return ERR_PTR(-EINVAL);
+
+ if (n != 1) {
+ efc_log_err(efct, "Invalid tpgt index: %ld provided\n", n);
+ return ERR_PTR(-EINVAL);
+ }
+
+ tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
+ if (!tpg)
+ return ERR_PTR(-ENOMEM);
+
+ tpg->vport = lio_vport;
+ tpg->tpgt = n;
+ tpg->enabled = false;
+
+ tpg->tpg_attrib.generate_node_acls = 1;
+ tpg->tpg_attrib.demo_mode_write_protect = 1;
+ tpg->tpg_attrib.cache_dynamic_acls = 1;
+ tpg->tpg_attrib.demo_mode_login_only = 1;
+ tpg->tpg_attrib.session_deletion_wait = 1;
+
+ ret = core_tpg_register(wwn, &tpg->tpg, SCSI_PROTOCOL_FCP);
+
+ if (ret < 0) {
+ kfree(tpg);
+ return NULL;
+ }
+ lio_vport->tpg = tpg;
+ efc_log_debug(efct, "create vport portal group %d\n", tpg->tpgt);
+
+ return &tpg->tpg;
+}
+
+static void
+efct_lio_npiv_drop_tpg(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ efc_log_debug(tpg->vport->efct, "drop npiv portal group %d\n",
+ tpg->tpgt);
+ core_tpg_deregister(se_tpg);
+ kfree(tpg);
+}
+
+static int
+efct_lio_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
+{
+ struct efct_lio_nacl *nacl;
+ u64 wwnn;
+
+ if (efct_lio_parse_wwn(name, &wwnn, 0) < 0)
+ return -EINVAL;
+
+ nacl = container_of(se_nacl, struct efct_lio_nacl, se_node_acl);
+ nacl->nport_wwnn = wwnn;
+
+ efct_format_wwn(nacl->nport_name, sizeof(nacl->nport_name), "", wwnn);
+ return 0;
+}
+
+static int efct_lio_check_demo_mode_login_only(struct se_portal_group *stpg)
+{
+ struct efct_lio_tpg *tpg = container_of(stpg, struct efct_lio_tpg, tpg);
+
+ return tpg->tpg_attrib.demo_mode_login_only;
+}
+
+static int
+efct_lio_npiv_check_demo_mode_login_only(struct se_portal_group *stpg)
+{
+ struct efct_lio_tpg *tpg = container_of(stpg, struct efct_lio_tpg, tpg);
+
+ return tpg->tpg_attrib.demo_mode_login_only;
+}
+
+static struct efct_lio_tpg *
+efct_get_vport_tpg(struct efc_node *node)
+{
+ struct efct *efct;
+ u64 wwpn = node->nport->wwpn;
+ struct efct_lio_vport_list_t *vport, *next;
+ struct efct_lio_vport *lio_vport = NULL;
+ struct efct_lio_tpg *tpg = NULL;
+ unsigned long flags = 0;
+
+ efct = node->efc->base;
+ spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags);
+ list_for_each_entry_safe(vport, next, &efct->tgt_efct.vport_list,
+ list_entry) {
+ lio_vport = vport->lio_vport;
+ if (wwpn && lio_vport && lio_vport->npiv_wwpn == wwpn) {
+ efc_log_debug(efct, "found tpg on vport\n");
+ tpg = lio_vport->tpg;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&efct->tgt_efct.efct_lio_lock, flags);
+ return tpg;
+}
+
+static void
+_efct_tgt_node_free(struct kref *arg)
+{
+ struct efct_node *tgt_node = container_of(arg, struct efct_node, ref);
+ struct efc_node *node = tgt_node->node;
+
+ efc_scsi_del_initiator_complete(node->efc, node);
+ kfree(tgt_node);
+}
+
+static int efct_session_cb(struct se_portal_group *se_tpg,
+ struct se_session *se_sess, void *private)
+{
+ struct efc_node *node = private;
+ struct efct_node *tgt_node;
+ struct efct *efct = node->efc->base;
+
+ tgt_node = kzalloc(sizeof(*tgt_node), GFP_KERNEL);
+ if (!tgt_node)
+ return -ENOMEM;
+
+ kref_init(&tgt_node->ref);
+ tgt_node->release = _efct_tgt_node_free;
+
+ tgt_node->session = se_sess;
+ node->tgt_node = tgt_node;
+ tgt_node->efct = efct;
+
+ tgt_node->node = node;
+
+ tgt_node->node_fc_id = node->rnode.fc_id;
+ tgt_node->port_fc_id = node->nport->fc_id;
+ tgt_node->vpi = node->nport->indicator;
+ tgt_node->rpi = node->rnode.indicator;
+
+ spin_lock_init(&tgt_node->active_ios_lock);
+ INIT_LIST_HEAD(&tgt_node->active_ios);
+
+ return 0;
+}
+
+int efct_scsi_tgt_new_device(struct efct *efct)
+{
+ u32 total_ios;
+
+ /* Get the max settings */
+ efct->tgt_efct.max_sge = sli_get_max_sge(&efct->hw.sli);
+ efct->tgt_efct.max_sgl = sli_get_max_sgl(&efct->hw.sli);
+
+ /* initialize IO watermark fields */
+ atomic_set(&efct->tgt_efct.ios_in_use, 0);
+ total_ios = efct->hw.config.n_io;
+ efc_log_debug(efct, "total_ios=%d\n", total_ios);
+ efct->tgt_efct.watermark_min =
+ (total_ios * EFCT_WATERMARK_LOW_PCT) / 100;
+ efct->tgt_efct.watermark_max =
+ (total_ios * EFCT_WATERMARK_HIGH_PCT) / 100;
+ atomic_set(&efct->tgt_efct.io_high_watermark,
+ efct->tgt_efct.watermark_max);
+ atomic_set(&efct->tgt_efct.watermark_hit, 0);
+ atomic_set(&efct->tgt_efct.initiator_count, 0);
+
+ lio_wq = create_singlethread_workqueue("efct_lio_worker");
+ if (!lio_wq) {
+ efc_log_err(efct, "workqueue create failed\n");
+ return -EIO;
+ }
+
+ spin_lock_init(&efct->tgt_efct.efct_lio_lock);
+ INIT_LIST_HEAD(&efct->tgt_efct.vport_list);
+
+ return 0;
+}
+
+int efct_scsi_tgt_del_device(struct efct *efct)
+{
+ flush_workqueue(lio_wq);
+
+ return 0;
+}
+
+int
+efct_scsi_tgt_new_nport(struct efc *efc, struct efc_nport *nport)
+{
+ struct efct *efct = nport->efc->base;
+
+ efc_log_debug(efct, "New SPORT: %s bound to %s\n", nport->display_name,
+ efct->tgt_efct.lio_nport->wwpn_str);
+
+ return 0;
+}
+
+void
+efct_scsi_tgt_del_nport(struct efc *efc, struct efc_nport *nport)
+{
+ efc_log_debug(efc, "Del SPORT: %s\n", nport->display_name);
+}
+
+static void efct_lio_setup_session(struct work_struct *work)
+{
+ struct efct_lio_wq_data *wq_data =
+ container_of(work, struct efct_lio_wq_data, work);
+ struct efct *efct = wq_data->efct;
+ struct efc_node *node = wq_data->ptr;
+ char wwpn[WWN_NAME_LEN];
+ struct efct_lio_tpg *tpg;
+ struct efct_node *tgt_node;
+ struct se_portal_group *se_tpg;
+ struct se_session *se_sess;
+ int watermark;
+ int ini_count;
+ u64 id;
+
+ /* Check to see if it's belongs to vport,
+ * if not get physical port
+ */
+ tpg = efct_get_vport_tpg(node);
+ if (tpg) {
+ se_tpg = &tpg->tpg;
+ } else if (efct->tgt_efct.tpg) {
+ tpg = efct->tgt_efct.tpg;
+ se_tpg = &tpg->tpg;
+ } else {
+ efc_log_err(efct, "failed to init session\n");
+ return;
+ }
+
+ /*
+ * Format the FCP Initiator port_name into colon
+ * separated values to match the format by our explicit
+ * ConfigFS NodeACLs.
+ */
+ efct_format_wwn(wwpn, sizeof(wwpn), "", efc_node_get_wwpn(node));
+
+ se_sess = target_setup_session(se_tpg, 0, 0, TARGET_PROT_NORMAL, wwpn,
+ node, efct_session_cb);
+ if (IS_ERR(se_sess)) {
+ efc_log_err(efct, "failed to setup session\n");
+ kfree(wq_data);
+ efc_scsi_sess_reg_complete(node, -EIO);
+ return;
+ }
+
+ tgt_node = node->tgt_node;
+ id = (u64) tgt_node->port_fc_id << 32 | tgt_node->node_fc_id;
+
+ efc_log_debug(efct, "new initiator sess=%p node=%p id: %llx\n",
+ se_sess, node, id);
+
+ if (xa_err(xa_store(&efct->lookup, id, tgt_node, GFP_KERNEL)))
+ efc_log_err(efct, "Node lookup store failed\n");
+
+ efc_scsi_sess_reg_complete(node, 0);
+
+ /* update IO watermark: increment initiator count */
+ ini_count = atomic_add_return(1, &efct->tgt_efct.initiator_count);
+ watermark = efct->tgt_efct.watermark_max -
+ ini_count * EFCT_IO_WATERMARK_PER_INITIATOR;
+ watermark = (efct->tgt_efct.watermark_min > watermark) ?
+ efct->tgt_efct.watermark_min : watermark;
+ atomic_set(&efct->tgt_efct.io_high_watermark, watermark);
+
+ kfree(wq_data);
+}
+
+int efct_scsi_new_initiator(struct efc *efc, struct efc_node *node)
+{
+ struct efct *efct = node->efc->base;
+ struct efct_lio_wq_data *wq_data;
+
+ /*
+ * Since LIO only supports initiator validation at thread level,
+ * we are open minded and accept all callers.
+ */
+ wq_data = kzalloc(sizeof(*wq_data), GFP_ATOMIC);
+ if (!wq_data)
+ return -ENOMEM;
+
+ wq_data->ptr = node;
+ wq_data->efct = efct;
+ INIT_WORK(&wq_data->work, efct_lio_setup_session);
+ queue_work(lio_wq, &wq_data->work);
+ return EFC_SCSI_CALL_ASYNC;
+}
+
+static void efct_lio_remove_session(struct work_struct *work)
+{
+ struct efct_lio_wq_data *wq_data =
+ container_of(work, struct efct_lio_wq_data, work);
+ struct efct *efct = wq_data->efct;
+ struct efc_node *node = wq_data->ptr;
+ struct efct_node *tgt_node;
+ struct se_session *se_sess;
+
+ tgt_node = node->tgt_node;
+ if (!tgt_node) {
+ /* base driver has sent back-to-back requests
+ * to unreg session with no intervening
+ * register
+ */
+ efc_log_err(efct, "unreg session for NULL session\n");
+ efc_scsi_del_initiator_complete(node->efc, node);
+ return;
+ }
+
+ se_sess = tgt_node->session;
+ efc_log_debug(efct, "unreg session se_sess=%p node=%p\n",
+ se_sess, node);
+
+ /* first flag all session commands to complete */
+ target_stop_session(se_sess);
+
+ /* now wait for session commands to complete */
+ target_wait_for_sess_cmds(se_sess);
+ target_remove_session(se_sess);
+ tgt_node->session = NULL;
+ node->tgt_node = NULL;
+ kref_put(&tgt_node->ref, tgt_node->release);
+
+ kfree(wq_data);
+}
+
+int efct_scsi_del_initiator(struct efc *efc, struct efc_node *node, int reason)
+{
+ struct efct *efct = node->efc->base;
+ struct efct_node *tgt_node = node->tgt_node;
+ struct efct_lio_wq_data *wq_data;
+ int watermark;
+ int ini_count;
+ u64 id;
+
+ if (reason == EFCT_SCSI_INITIATOR_MISSING)
+ return EFC_SCSI_CALL_COMPLETE;
+
+ if (!tgt_node) {
+ efc_log_err(efct, "tgt_node is NULL\n");
+ return -EIO;
+ }
+
+ wq_data = kzalloc(sizeof(*wq_data), GFP_ATOMIC);
+ if (!wq_data)
+ return -ENOMEM;
+
+ id = (u64) tgt_node->port_fc_id << 32 | tgt_node->node_fc_id;
+ xa_erase(&efct->lookup, id);
+
+ wq_data->ptr = node;
+ wq_data->efct = efct;
+ INIT_WORK(&wq_data->work, efct_lio_remove_session);
+ queue_work(lio_wq, &wq_data->work);
+
+ /*
+ * update IO watermark: decrement initiator count
+ */
+ ini_count = atomic_sub_return(1, &efct->tgt_efct.initiator_count);
+
+ watermark = efct->tgt_efct.watermark_max -
+ ini_count * EFCT_IO_WATERMARK_PER_INITIATOR;
+ watermark = (efct->tgt_efct.watermark_min > watermark) ?
+ efct->tgt_efct.watermark_min : watermark;
+ atomic_set(&efct->tgt_efct.io_high_watermark, watermark);
+
+ return EFC_SCSI_CALL_ASYNC;
+}
+
+void efct_scsi_recv_cmd(struct efct_io *io, uint64_t lun, u8 *cdb,
+ u32 cdb_len, u32 flags)
+{
+ struct efct_scsi_tgt_io *ocp = &io->tgt_io;
+ struct se_cmd *se_cmd = &io->tgt_io.cmd;
+ struct efct *efct = io->efct;
+ char *ddir;
+ struct efct_node *tgt_node;
+ struct se_session *se_sess;
+ int rc = 0;
+
+ memset(ocp, 0, sizeof(struct efct_scsi_tgt_io));
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_RECV_CMD);
+ atomic_add_return(1, &efct->tgt_efct.ios_in_use);
+
+ /* set target timeout */
+ io->timeout = efct->target_io_timer_sec;
+
+ if (flags & EFCT_SCSI_CMD_SIMPLE)
+ ocp->task_attr = TCM_SIMPLE_TAG;
+ else if (flags & EFCT_SCSI_CMD_HEAD_OF_QUEUE)
+ ocp->task_attr = TCM_HEAD_TAG;
+ else if (flags & EFCT_SCSI_CMD_ORDERED)
+ ocp->task_attr = TCM_ORDERED_TAG;
+ else if (flags & EFCT_SCSI_CMD_ACA)
+ ocp->task_attr = TCM_ACA_TAG;
+
+ switch (flags & (EFCT_SCSI_CMD_DIR_IN | EFCT_SCSI_CMD_DIR_OUT)) {
+ case EFCT_SCSI_CMD_DIR_IN:
+ ddir = "FROM_INITIATOR";
+ ocp->ddir = DMA_TO_DEVICE;
+ break;
+ case EFCT_SCSI_CMD_DIR_OUT:
+ ddir = "TO_INITIATOR";
+ ocp->ddir = DMA_FROM_DEVICE;
+ break;
+ case EFCT_SCSI_CMD_DIR_IN | EFCT_SCSI_CMD_DIR_OUT:
+ ddir = "BIDIR";
+ ocp->ddir = DMA_BIDIRECTIONAL;
+ break;
+ default:
+ ddir = "NONE";
+ ocp->ddir = DMA_NONE;
+ break;
+ }
+
+ ocp->lun = lun;
+ efct_lio_io_printf(io, "new cmd=0x%x ddir=%s dl=%u\n",
+ cdb[0], ddir, io->exp_xfer_len);
+
+ tgt_node = io->node;
+ se_sess = tgt_node->session;
+ if (!se_sess) {
+ efc_log_err(efct, "No session found to submit IO se_cmd: %p\n",
+ &ocp->cmd);
+ efct_scsi_io_free(io);
+ return;
+ }
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_SUBMIT_CMD);
+ rc = target_init_cmd(se_cmd, se_sess, &io->tgt_io.sense_buffer[0],
+ ocp->lun, io->exp_xfer_len, ocp->task_attr,
+ ocp->ddir, TARGET_SCF_ACK_KREF);
+ if (rc) {
+ efc_log_err(efct, "failed to init cmd se_cmd: %p\n", se_cmd);
+ efct_scsi_io_free(io);
+ return;
+ }
+
+ if (target_submit_prep(se_cmd, cdb, NULL, 0, NULL, 0,
+ NULL, 0, GFP_ATOMIC))
+ return;
+
+ target_submit(se_cmd);
+}
+
+int
+efct_scsi_recv_tmf(struct efct_io *tmfio, u32 lun, enum efct_scsi_tmf_cmd cmd,
+ struct efct_io *io_to_abort, u32 flags)
+{
+ unsigned char tmr_func;
+ struct efct *efct = tmfio->efct;
+ struct efct_scsi_tgt_io *ocp = &tmfio->tgt_io;
+ struct efct_node *tgt_node;
+ struct se_session *se_sess;
+ int rc;
+
+ memset(ocp, 0, sizeof(struct efct_scsi_tgt_io));
+ efct_set_lio_io_state(tmfio, EFCT_LIO_STATE_SCSI_RECV_TMF);
+ atomic_add_return(1, &efct->tgt_efct.ios_in_use);
+ efct_lio_tmfio_printf(tmfio, "%s: new tmf %x lun=%u\n",
+ tmfio->display_name, cmd, lun);
+
+ switch (cmd) {
+ case EFCT_SCSI_TMF_ABORT_TASK:
+ tmr_func = TMR_ABORT_TASK;
+ break;
+ case EFCT_SCSI_TMF_ABORT_TASK_SET:
+ tmr_func = TMR_ABORT_TASK_SET;
+ break;
+ case EFCT_SCSI_TMF_CLEAR_TASK_SET:
+ tmr_func = TMR_CLEAR_TASK_SET;
+ break;
+ case EFCT_SCSI_TMF_LOGICAL_UNIT_RESET:
+ tmr_func = TMR_LUN_RESET;
+ break;
+ case EFCT_SCSI_TMF_CLEAR_ACA:
+ tmr_func = TMR_CLEAR_ACA;
+ break;
+ case EFCT_SCSI_TMF_TARGET_RESET:
+ tmr_func = TMR_TARGET_WARM_RESET;
+ break;
+ case EFCT_SCSI_TMF_QUERY_ASYNCHRONOUS_EVENT:
+ case EFCT_SCSI_TMF_QUERY_TASK_SET:
+ default:
+ goto tmf_fail;
+ }
+
+ tmfio->tgt_io.tmf = tmr_func;
+ tmfio->tgt_io.lun = lun;
+ tmfio->tgt_io.io_to_abort = io_to_abort;
+
+ tgt_node = tmfio->node;
+
+ se_sess = tgt_node->session;
+ if (!se_sess)
+ return 0;
+
+ rc = target_submit_tmr(&ocp->cmd, se_sess, NULL, lun, ocp, tmr_func,
+ GFP_ATOMIC, tmfio->init_task_tag, TARGET_SCF_ACK_KREF);
+
+ efct_set_lio_io_state(tmfio, EFCT_LIO_STATE_TGT_SUBMIT_TMR);
+ if (rc)
+ goto tmf_fail;
+
+ return 0;
+
+tmf_fail:
+ efct_scsi_send_tmf_resp(tmfio, EFCT_SCSI_TMF_FUNCTION_REJECTED,
+ NULL, efct_lio_null_tmf_done, NULL);
+ return 0;
+}
+
+/* Start items for efct_lio_tpg_attrib_cit */
+
+#define DEF_EFCT_TPG_ATTRIB(name) \
+ \
+static ssize_t efct_lio_tpg_attrib_##name##_show( \
+ struct config_item *item, char *page) \
+{ \
+ struct se_portal_group *se_tpg = to_tpg(item); \
+ struct efct_lio_tpg *tpg = container_of(se_tpg, \
+ struct efct_lio_tpg, tpg); \
+ \
+ return sprintf(page, "%u\n", tpg->tpg_attrib.name); \
+} \
+ \
+static ssize_t efct_lio_tpg_attrib_##name##_store( \
+ struct config_item *item, const char *page, size_t count) \
+{ \
+ struct se_portal_group *se_tpg = to_tpg(item); \
+ struct efct_lio_tpg *tpg = container_of(se_tpg, \
+ struct efct_lio_tpg, tpg); \
+ struct efct_lio_tpg_attrib *a = &tpg->tpg_attrib; \
+ unsigned long val; \
+ int ret; \
+ \
+ ret = kstrtoul(page, 0, &val); \
+ if (ret < 0) { \
+ pr_err("kstrtoul() failed with ret: %d\n", ret); \
+ return ret; \
+ } \
+ \
+ if (val != 0 && val != 1) { \
+ pr_err("Illegal boolean value %lu\n", val); \
+ return -EINVAL; \
+ } \
+ \
+ a->name = val; \
+ \
+ return count; \
+} \
+CONFIGFS_ATTR(efct_lio_tpg_attrib_, name)
+
+DEF_EFCT_TPG_ATTRIB(generate_node_acls);
+DEF_EFCT_TPG_ATTRIB(cache_dynamic_acls);
+DEF_EFCT_TPG_ATTRIB(demo_mode_write_protect);
+DEF_EFCT_TPG_ATTRIB(prod_mode_write_protect);
+DEF_EFCT_TPG_ATTRIB(demo_mode_login_only);
+DEF_EFCT_TPG_ATTRIB(session_deletion_wait);
+
+static struct configfs_attribute *efct_lio_tpg_attrib_attrs[] = {
+ &efct_lio_tpg_attrib_attr_generate_node_acls,
+ &efct_lio_tpg_attrib_attr_cache_dynamic_acls,
+ &efct_lio_tpg_attrib_attr_demo_mode_write_protect,
+ &efct_lio_tpg_attrib_attr_prod_mode_write_protect,
+ &efct_lio_tpg_attrib_attr_demo_mode_login_only,
+ &efct_lio_tpg_attrib_attr_session_deletion_wait,
+ NULL,
+};
+
+#define DEF_EFCT_NPIV_TPG_ATTRIB(name) \
+ \
+static ssize_t efct_lio_npiv_tpg_attrib_##name##_show( \
+ struct config_item *item, char *page) \
+{ \
+ struct se_portal_group *se_tpg = to_tpg(item); \
+ struct efct_lio_tpg *tpg = container_of(se_tpg, \
+ struct efct_lio_tpg, tpg); \
+ \
+ return sprintf(page, "%u\n", tpg->tpg_attrib.name); \
+} \
+ \
+static ssize_t efct_lio_npiv_tpg_attrib_##name##_store( \
+ struct config_item *item, const char *page, size_t count) \
+{ \
+ struct se_portal_group *se_tpg = to_tpg(item); \
+ struct efct_lio_tpg *tpg = container_of(se_tpg, \
+ struct efct_lio_tpg, tpg); \
+ struct efct_lio_tpg_attrib *a = &tpg->tpg_attrib; \
+ unsigned long val; \
+ int ret; \
+ \
+ ret = kstrtoul(page, 0, &val); \
+ if (ret < 0) { \
+ pr_err("kstrtoul() failed with ret: %d\n", ret); \
+ return ret; \
+ } \
+ \
+ if (val != 0 && val != 1) { \
+ pr_err("Illegal boolean value %lu\n", val); \
+ return -EINVAL; \
+ } \
+ \
+ a->name = val; \
+ \
+ return count; \
+} \
+CONFIGFS_ATTR(efct_lio_npiv_tpg_attrib_, name)
+
+DEF_EFCT_NPIV_TPG_ATTRIB(generate_node_acls);
+DEF_EFCT_NPIV_TPG_ATTRIB(cache_dynamic_acls);
+DEF_EFCT_NPIV_TPG_ATTRIB(demo_mode_write_protect);
+DEF_EFCT_NPIV_TPG_ATTRIB(prod_mode_write_protect);
+DEF_EFCT_NPIV_TPG_ATTRIB(demo_mode_login_only);
+DEF_EFCT_NPIV_TPG_ATTRIB(session_deletion_wait);
+
+static struct configfs_attribute *efct_lio_npiv_tpg_attrib_attrs[] = {
+ &efct_lio_npiv_tpg_attrib_attr_generate_node_acls,
+ &efct_lio_npiv_tpg_attrib_attr_cache_dynamic_acls,
+ &efct_lio_npiv_tpg_attrib_attr_demo_mode_write_protect,
+ &efct_lio_npiv_tpg_attrib_attr_prod_mode_write_protect,
+ &efct_lio_npiv_tpg_attrib_attr_demo_mode_login_only,
+ &efct_lio_npiv_tpg_attrib_attr_session_deletion_wait,
+ NULL,
+};
+
+CONFIGFS_ATTR(efct_lio_tpg_, enable);
+static struct configfs_attribute *efct_lio_tpg_attrs[] = {
+ &efct_lio_tpg_attr_enable, NULL };
+CONFIGFS_ATTR(efct_lio_npiv_tpg_, enable);
+static struct configfs_attribute *efct_lio_npiv_tpg_attrs[] = {
+ &efct_lio_npiv_tpg_attr_enable, NULL };
+
+static const struct target_core_fabric_ops efct_lio_ops = {
+ .module = THIS_MODULE,
+ .fabric_name = "efct",
+ .node_acl_size = sizeof(struct efct_lio_nacl),
+ .max_data_sg_nents = 65535,
+ .tpg_get_wwn = efct_lio_get_fabric_wwn,
+ .tpg_get_tag = efct_lio_get_tag,
+ .fabric_init_nodeacl = efct_lio_init_nodeacl,
+ .tpg_check_demo_mode = efct_lio_check_demo_mode,
+ .tpg_check_demo_mode_cache = efct_lio_check_demo_mode_cache,
+ .tpg_check_demo_mode_write_protect = efct_lio_check_demo_write_protect,
+ .tpg_check_prod_mode_write_protect = efct_lio_check_prod_write_protect,
+ .tpg_get_inst_index = efct_lio_tpg_get_inst_index,
+ .check_stop_free = efct_lio_check_stop_free,
+ .aborted_task = efct_lio_aborted_task,
+ .release_cmd = efct_lio_release_cmd,
+ .close_session = efct_lio_close_session,
+ .sess_get_index = efct_lio_sess_get_index,
+ .write_pending = efct_lio_write_pending,
+ .set_default_node_attributes = efct_lio_set_default_node_attrs,
+ .get_cmd_state = efct_lio_get_cmd_state,
+ .queue_data_in = efct_lio_queue_data_in,
+ .queue_status = efct_lio_queue_status,
+ .queue_tm_rsp = efct_lio_queue_tm_rsp,
+ .fabric_make_wwn = efct_lio_make_nport,
+ .fabric_drop_wwn = efct_lio_drop_nport,
+ .fabric_make_tpg = efct_lio_make_tpg,
+ .fabric_drop_tpg = efct_lio_drop_tpg,
+ .tpg_check_demo_mode_login_only = efct_lio_check_demo_mode_login_only,
+ .tpg_check_prot_fabric_only = NULL,
+ .sess_get_initiator_sid = NULL,
+ .tfc_tpg_base_attrs = efct_lio_tpg_attrs,
+ .tfc_tpg_attrib_attrs = efct_lio_tpg_attrib_attrs,
+};
+
+static const struct target_core_fabric_ops efct_lio_npiv_ops = {
+ .module = THIS_MODULE,
+ .fabric_name = "efct_npiv",
+ .node_acl_size = sizeof(struct efct_lio_nacl),
+ .max_data_sg_nents = 65535,
+ .tpg_get_wwn = efct_lio_get_npiv_fabric_wwn,
+ .tpg_get_tag = efct_lio_get_npiv_tag,
+ .fabric_init_nodeacl = efct_lio_init_nodeacl,
+ .tpg_check_demo_mode = efct_lio_check_demo_mode,
+ .tpg_check_demo_mode_cache = efct_lio_check_demo_mode_cache,
+ .tpg_check_demo_mode_write_protect =
+ efct_lio_npiv_check_demo_write_protect,
+ .tpg_check_prod_mode_write_protect =
+ efct_lio_npiv_check_prod_write_protect,
+ .tpg_get_inst_index = efct_lio_tpg_get_inst_index,
+ .check_stop_free = efct_lio_check_stop_free,
+ .aborted_task = efct_lio_aborted_task,
+ .release_cmd = efct_lio_release_cmd,
+ .close_session = efct_lio_close_session,
+ .sess_get_index = efct_lio_sess_get_index,
+ .write_pending = efct_lio_write_pending,
+ .set_default_node_attributes = efct_lio_set_default_node_attrs,
+ .get_cmd_state = efct_lio_get_cmd_state,
+ .queue_data_in = efct_lio_queue_data_in,
+ .queue_status = efct_lio_queue_status,
+ .queue_tm_rsp = efct_lio_queue_tm_rsp,
+ .fabric_make_wwn = efct_lio_npiv_make_nport,
+ .fabric_drop_wwn = efct_lio_npiv_drop_nport,
+ .fabric_make_tpg = efct_lio_npiv_make_tpg,
+ .fabric_drop_tpg = efct_lio_npiv_drop_tpg,
+ .tpg_check_demo_mode_login_only =
+ efct_lio_npiv_check_demo_mode_login_only,
+ .tpg_check_prot_fabric_only = NULL,
+ .sess_get_initiator_sid = NULL,
+ .tfc_tpg_base_attrs = efct_lio_npiv_tpg_attrs,
+ .tfc_tpg_attrib_attrs = efct_lio_npiv_tpg_attrib_attrs,
+};
+
+int efct_scsi_tgt_driver_init(void)
+{
+ int rc;
+
+ /* Register the top level struct config_item_type with TCM core */
+ rc = target_register_template(&efct_lio_ops);
+ if (rc < 0) {
+ pr_err("target_fabric_configfs_register failed with %d\n", rc);
+ return rc;
+ }
+ rc = target_register_template(&efct_lio_npiv_ops);
+ if (rc < 0) {
+ pr_err("target_fabric_configfs_register failed with %d\n", rc);
+ target_unregister_template(&efct_lio_ops);
+ return rc;
+ }
+ return 0;
+}
+
+int efct_scsi_tgt_driver_exit(void)
+{
+ target_unregister_template(&efct_lio_ops);
+ target_unregister_template(&efct_lio_npiv_ops);
+ return 0;
+}
diff --git a/drivers/scsi/elx/efct/efct_lio.h b/drivers/scsi/elx/efct/efct_lio.h
new file mode 100644
index 000000000..569a0d4b1
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_lio.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#ifndef __EFCT_LIO_H__
+#define __EFCT_LIO_H__
+
+#include "efct_scsi.h"
+#include <target/target_core_base.h>
+
+#define efct_lio_io_printf(io, fmt, ...) \
+ efc_log_debug(io->efct, \
+ "[%s] [%04x][i:%04x t:%04x h:%04x]" fmt,\
+ io->node->display_name, io->instance_index, \
+ io->init_task_tag, io->tgt_task_tag, io->hw_tag,\
+ ##__VA_ARGS__)
+
+#define efct_lio_tmfio_printf(io, fmt, ...) \
+ efc_log_debug(io->efct, \
+ "[%s] [%04x][i:%04x t:%04x h:%04x][f:%02x]" fmt,\
+ io->node->display_name, io->instance_index, \
+ io->init_task_tag, io->tgt_task_tag, io->hw_tag,\
+ io->tgt_io.tmf, ##__VA_ARGS__)
+
+#define efct_set_lio_io_state(io, value) (io->tgt_io.state |= value)
+
+struct efct_lio_wq_data {
+ struct efct *efct;
+ void *ptr;
+ struct work_struct work;
+};
+
+/* Target private efct structure */
+struct efct_scsi_tgt {
+ u32 max_sge;
+ u32 max_sgl;
+
+ /*
+ * Variables used to send task set full. We are using a high watermark
+ * method to send task set full. We will reserve a fixed number of IOs
+ * per initiator plus a fudge factor. Once we reach this number,
+ * then the target will start sending task set full/busy responses.
+ */
+ atomic_t initiator_count;
+ atomic_t ios_in_use;
+ atomic_t io_high_watermark;
+
+ atomic_t watermark_hit;
+ int watermark_min;
+ int watermark_max;
+
+ struct efct_lio_nport *lio_nport;
+ struct efct_lio_tpg *tpg;
+
+ struct list_head vport_list;
+ /* Protects vport list*/
+ spinlock_t efct_lio_lock;
+
+ u64 wwnn;
+};
+
+struct efct_scsi_tgt_nport {
+ struct efct_lio_nport *lio_nport;
+};
+
+struct efct_node {
+ struct list_head list_entry;
+ struct kref ref;
+ void (*release)(struct kref *arg);
+ struct efct *efct;
+ struct efc_node *node;
+ struct se_session *session;
+ spinlock_t active_ios_lock;
+ struct list_head active_ios;
+ char display_name[EFC_NAME_LENGTH];
+ u32 port_fc_id;
+ u32 node_fc_id;
+ u32 vpi;
+ u32 rpi;
+ u32 abort_cnt;
+};
+
+#define EFCT_LIO_STATE_SCSI_RECV_CMD (1 << 0)
+#define EFCT_LIO_STATE_TGT_SUBMIT_CMD (1 << 1)
+#define EFCT_LIO_STATE_TFO_QUEUE_DATA_IN (1 << 2)
+#define EFCT_LIO_STATE_TFO_WRITE_PENDING (1 << 3)
+#define EFCT_LIO_STATE_TGT_EXECUTE_CMD (1 << 4)
+#define EFCT_LIO_STATE_SCSI_SEND_RD_DATA (1 << 5)
+#define EFCT_LIO_STATE_TFO_CHK_STOP_FREE (1 << 6)
+#define EFCT_LIO_STATE_SCSI_DATA_DONE (1 << 7)
+#define EFCT_LIO_STATE_TFO_QUEUE_STATUS (1 << 8)
+#define EFCT_LIO_STATE_SCSI_SEND_RSP (1 << 9)
+#define EFCT_LIO_STATE_SCSI_RSP_DONE (1 << 10)
+#define EFCT_LIO_STATE_TGT_GENERIC_FREE (1 << 11)
+#define EFCT_LIO_STATE_SCSI_RECV_TMF (1 << 12)
+#define EFCT_LIO_STATE_TGT_SUBMIT_TMR (1 << 13)
+#define EFCT_LIO_STATE_TFO_WRITE_PEND_STATUS (1 << 14)
+#define EFCT_LIO_STATE_TGT_GENERIC_REQ_FAILURE (1 << 15)
+
+#define EFCT_LIO_STATE_TFO_ABORTED_TASK (1 << 29)
+#define EFCT_LIO_STATE_TFO_RELEASE_CMD (1 << 30)
+#define EFCT_LIO_STATE_SCSI_CMPL_CMD (1u << 31)
+
+struct efct_scsi_tgt_io {
+ struct se_cmd cmd;
+ unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
+ enum dma_data_direction ddir;
+ int task_attr;
+ u64 lun;
+
+ u32 state;
+ u8 tmf;
+ struct efct_io *io_to_abort;
+ u32 seg_map_cnt;
+ u32 seg_cnt;
+ u32 cur_seg;
+ enum efct_scsi_io_status err;
+ bool aborting;
+ bool rsp_sent;
+ u32 transferred_len;
+};
+
+/* Handler return codes */
+enum {
+ SCSI_HANDLER_DATAPHASE_STARTED = 1,
+ SCSI_HANDLER_RESP_STARTED,
+ SCSI_HANDLER_VALIDATED_DATAPHASE_STARTED,
+ SCSI_CMD_NOT_SUPPORTED,
+};
+
+#define WWN_NAME_LEN 32
+struct efct_lio_vport {
+ u64 wwpn;
+ u64 npiv_wwpn;
+ u64 npiv_wwnn;
+ unsigned char wwpn_str[WWN_NAME_LEN];
+ struct se_wwn vport_wwn;
+ struct efct_lio_tpg *tpg;
+ struct efct *efct;
+ struct Scsi_Host *shost;
+ struct fc_vport *fc_vport;
+ atomic_t enable;
+};
+
+struct efct_lio_nport {
+ u64 wwpn;
+ unsigned char wwpn_str[WWN_NAME_LEN];
+ struct se_wwn nport_wwn;
+ struct efct_lio_tpg *tpg;
+ struct efct *efct;
+ atomic_t enable;
+};
+
+struct efct_lio_tpg_attrib {
+ u32 generate_node_acls;
+ u32 cache_dynamic_acls;
+ u32 demo_mode_write_protect;
+ u32 prod_mode_write_protect;
+ u32 demo_mode_login_only;
+ bool session_deletion_wait;
+};
+
+struct efct_lio_tpg {
+ struct se_portal_group tpg;
+ struct efct_lio_nport *nport;
+ struct efct_lio_vport *vport;
+ struct efct_lio_tpg_attrib tpg_attrib;
+ unsigned short tpgt;
+ bool enabled;
+};
+
+struct efct_lio_nacl {
+ u64 nport_wwnn;
+ char nport_name[WWN_NAME_LEN];
+ struct se_session *session;
+ struct se_node_acl se_node_acl;
+};
+
+struct efct_lio_vport_list_t {
+ struct list_head list_entry;
+ struct efct_lio_vport *lio_vport;
+};
+
+int efct_scsi_tgt_driver_init(void);
+int efct_scsi_tgt_driver_exit(void);
+
+#endif /*__EFCT_LIO_H__ */
diff --git a/drivers/scsi/elx/efct/efct_scsi.c b/drivers/scsi/elx/efct/efct_scsi.c
new file mode 100644
index 000000000..afb154992
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_scsi.c
@@ -0,0 +1,1157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include "efct_driver.h"
+#include "efct_hw.h"
+
+#define enable_tsend_auto_resp(efct) 1
+#define enable_treceive_auto_resp(efct) 0
+
+#define SCSI_IOFMT "[%04x][i:%04x t:%04x h:%04x]"
+
+#define scsi_io_printf(io, fmt, ...) \
+ efc_log_debug(io->efct, "[%s]" SCSI_IOFMT fmt, \
+ io->node->display_name, io->instance_index,\
+ io->init_task_tag, io->tgt_task_tag, io->hw_tag, ##__VA_ARGS__)
+
+#define EFCT_LOG_ENABLE_SCSI_TRACE(efct) \
+ (((efct) != NULL) ? (((efct)->logmask & (1U << 2)) != 0) : 0)
+
+#define scsi_io_trace(io, fmt, ...) \
+ do { \
+ if (EFCT_LOG_ENABLE_SCSI_TRACE(io->efct)) \
+ scsi_io_printf(io, fmt, ##__VA_ARGS__); \
+ } while (0)
+
+struct efct_io *
+efct_scsi_io_alloc(struct efct_node *node)
+{
+ struct efct *efct;
+ struct efct_xport *xport;
+ struct efct_io *io;
+ unsigned long flags;
+
+ efct = node->efct;
+
+ xport = efct->xport;
+
+ io = efct_io_pool_io_alloc(efct->xport->io_pool);
+ if (!io) {
+ efc_log_err(efct, "IO alloc Failed\n");
+ atomic_add_return(1, &xport->io_alloc_failed_count);
+ return NULL;
+ }
+
+ /* initialize refcount */
+ kref_init(&io->ref);
+ io->release = _efct_scsi_io_free;
+
+ /* set generic fields */
+ io->efct = efct;
+ io->node = node;
+ kref_get(&node->ref);
+
+ /* set type and name */
+ io->io_type = EFCT_IO_TYPE_IO;
+ io->display_name = "scsi_io";
+
+ io->cmd_ini = false;
+ io->cmd_tgt = true;
+
+ /* Add to node's active_ios list */
+ INIT_LIST_HEAD(&io->list_entry);
+ spin_lock_irqsave(&node->active_ios_lock, flags);
+ list_add(&io->list_entry, &node->active_ios);
+
+ spin_unlock_irqrestore(&node->active_ios_lock, flags);
+
+ return io;
+}
+
+void
+_efct_scsi_io_free(struct kref *arg)
+{
+ struct efct_io *io = container_of(arg, struct efct_io, ref);
+ struct efct *efct = io->efct;
+ struct efct_node *node = io->node;
+ unsigned long flags = 0;
+
+ scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name);
+
+ if (io->io_free) {
+ efc_log_err(efct, "IO already freed.\n");
+ return;
+ }
+
+ spin_lock_irqsave(&node->active_ios_lock, flags);
+ list_del_init(&io->list_entry);
+ spin_unlock_irqrestore(&node->active_ios_lock, flags);
+
+ kref_put(&node->ref, node->release);
+ io->node = NULL;
+ efct_io_pool_io_free(efct->xport->io_pool, io);
+}
+
+void
+efct_scsi_io_free(struct efct_io *io)
+{
+ scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name);
+ WARN_ON(!refcount_read(&io->ref.refcount));
+ kref_put(&io->ref, io->release);
+}
+
+static void
+efct_target_io_cb(struct efct_hw_io *hio, u32 length, int status,
+ u32 ext_status, void *app)
+{
+ u32 flags = 0;
+ struct efct_io *io = app;
+ struct efct *efct;
+ enum efct_scsi_io_status scsi_stat = EFCT_SCSI_STATUS_GOOD;
+ efct_scsi_io_cb_t cb;
+
+ if (!io || !io->efct) {
+ pr_err("%s: IO can not be NULL\n", __func__);
+ return;
+ }
+
+ scsi_io_trace(io, "status x%x ext_status x%x\n", status, ext_status);
+
+ efct = io->efct;
+
+ io->transferred += length;
+
+ if (!io->scsi_tgt_cb) {
+ efct_scsi_check_pending(efct);
+ return;
+ }
+
+ /* Call target server completion */
+ cb = io->scsi_tgt_cb;
+
+ /* Clear the callback before invoking the callback */
+ io->scsi_tgt_cb = NULL;
+
+ /* if status was good, and auto-good-response was set,
+ * then callback target-server with IO_CMPL_RSP_SENT,
+ * otherwise send IO_CMPL
+ */
+ if (status == 0 && io->auto_resp)
+ flags |= EFCT_SCSI_IO_CMPL_RSP_SENT;
+ else
+ flags |= EFCT_SCSI_IO_CMPL;
+
+ switch (status) {
+ case SLI4_FC_WCQE_STATUS_SUCCESS:
+ scsi_stat = EFCT_SCSI_STATUS_GOOD;
+ break;
+ case SLI4_FC_WCQE_STATUS_DI_ERROR:
+ if (ext_status & SLI4_FC_DI_ERROR_GE)
+ scsi_stat = EFCT_SCSI_STATUS_DIF_GUARD_ERR;
+ else if (ext_status & SLI4_FC_DI_ERROR_AE)
+ scsi_stat = EFCT_SCSI_STATUS_DIF_APP_TAG_ERROR;
+ else if (ext_status & SLI4_FC_DI_ERROR_RE)
+ scsi_stat = EFCT_SCSI_STATUS_DIF_REF_TAG_ERROR;
+ else
+ scsi_stat = EFCT_SCSI_STATUS_DIF_UNKNOWN_ERROR;
+ break;
+ case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
+ switch (ext_status) {
+ case SLI4_FC_LOCAL_REJECT_INVALID_RELOFFSET:
+ case SLI4_FC_LOCAL_REJECT_ABORT_REQUESTED:
+ scsi_stat = EFCT_SCSI_STATUS_ABORTED;
+ break;
+ case SLI4_FC_LOCAL_REJECT_INVALID_RPI:
+ scsi_stat = EFCT_SCSI_STATUS_NEXUS_LOST;
+ break;
+ case SLI4_FC_LOCAL_REJECT_NO_XRI:
+ scsi_stat = EFCT_SCSI_STATUS_NO_IO;
+ break;
+ default:
+ /*we have seen 0x0d(TX_DMA_FAILED err)*/
+ scsi_stat = EFCT_SCSI_STATUS_ERROR;
+ break;
+ }
+ break;
+
+ case SLI4_FC_WCQE_STATUS_TARGET_WQE_TIMEOUT:
+ /* target IO timed out */
+ scsi_stat = EFCT_SCSI_STATUS_TIMEDOUT_AND_ABORTED;
+ break;
+
+ case SLI4_FC_WCQE_STATUS_SHUTDOWN:
+ /* Target IO cancelled by HW */
+ scsi_stat = EFCT_SCSI_STATUS_SHUTDOWN;
+ break;
+
+ default:
+ scsi_stat = EFCT_SCSI_STATUS_ERROR;
+ break;
+ }
+
+ cb(io, scsi_stat, flags, io->scsi_tgt_cb_arg);
+
+ efct_scsi_check_pending(efct);
+}
+
+static int
+efct_scsi_build_sgls(struct efct_hw *hw, struct efct_hw_io *hio,
+ struct efct_scsi_sgl *sgl, u32 sgl_count,
+ enum efct_hw_io_type type)
+{
+ int rc;
+ u32 i;
+ struct efct *efct = hw->os;
+
+ /* Initialize HW SGL */
+ rc = efct_hw_io_init_sges(hw, hio, type);
+ if (rc) {
+ efc_log_err(efct, "efct_hw_io_init_sges failed: %d\n", rc);
+ return -EIO;
+ }
+
+ for (i = 0; i < sgl_count; i++) {
+ /* Add data SGE */
+ rc = efct_hw_io_add_sge(hw, hio, sgl[i].addr, sgl[i].len);
+ if (rc) {
+ efc_log_err(efct, "add sge failed cnt=%d rc=%d\n",
+ sgl_count, rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static void efc_log_sgl(struct efct_io *io)
+{
+ struct efct_hw_io *hio = io->hio;
+ struct sli4_sge *data = NULL;
+ u32 *dword = NULL;
+ u32 i;
+ u32 n_sge;
+
+ scsi_io_trace(io, "def_sgl at 0x%x 0x%08x\n",
+ upper_32_bits(hio->def_sgl.phys),
+ lower_32_bits(hio->def_sgl.phys));
+ n_sge = (hio->sgl == &hio->def_sgl) ? hio->n_sge : hio->def_sgl_count;
+ for (i = 0, data = hio->def_sgl.virt; i < n_sge; i++, data++) {
+ dword = (u32 *)data;
+
+ scsi_io_trace(io, "SGL %2d 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ i, dword[0], dword[1], dword[2], dword[3]);
+
+ if (dword[2] & (1U << 31))
+ break;
+ }
+}
+
+static void
+efct_scsi_check_pending_async_cb(struct efct_hw *hw, int status,
+ u8 *mqe, void *arg)
+{
+ struct efct_io *io = arg;
+
+ if (io) {
+ efct_hw_done_t cb = io->hw_cb;
+
+ if (!io->hw_cb)
+ return;
+
+ io->hw_cb = NULL;
+ (cb)(io->hio, 0, SLI4_FC_WCQE_STATUS_DISPATCH_ERROR, 0, io);
+ }
+}
+
+static int
+efct_scsi_io_dispatch_hw_io(struct efct_io *io, struct efct_hw_io *hio)
+{
+ int rc = 0;
+ struct efct *efct = io->efct;
+
+ /* Got a HW IO;
+ * update ini/tgt_task_tag with HW IO info and dispatch
+ */
+ io->hio = hio;
+ if (io->cmd_tgt)
+ io->tgt_task_tag = hio->indicator;
+ else if (io->cmd_ini)
+ io->init_task_tag = hio->indicator;
+ io->hw_tag = hio->reqtag;
+
+ hio->eq = io->hw_priv;
+
+ /* Copy WQ steering */
+ switch (io->wq_steering) {
+ case EFCT_SCSI_WQ_STEERING_CLASS >> EFCT_SCSI_WQ_STEERING_SHIFT:
+ hio->wq_steering = EFCT_HW_WQ_STEERING_CLASS;
+ break;
+ case EFCT_SCSI_WQ_STEERING_REQUEST >> EFCT_SCSI_WQ_STEERING_SHIFT:
+ hio->wq_steering = EFCT_HW_WQ_STEERING_REQUEST;
+ break;
+ case EFCT_SCSI_WQ_STEERING_CPU >> EFCT_SCSI_WQ_STEERING_SHIFT:
+ hio->wq_steering = EFCT_HW_WQ_STEERING_CPU;
+ break;
+ }
+
+ switch (io->io_type) {
+ case EFCT_IO_TYPE_IO:
+ rc = efct_scsi_build_sgls(&efct->hw, io->hio,
+ io->sgl, io->sgl_count, io->hio_type);
+ if (rc)
+ break;
+
+ if (EFCT_LOG_ENABLE_SCSI_TRACE(efct))
+ efc_log_sgl(io);
+
+ if (io->app_id)
+ io->iparam.fcp_tgt.app_id = io->app_id;
+
+ io->iparam.fcp_tgt.vpi = io->node->vpi;
+ io->iparam.fcp_tgt.rpi = io->node->rpi;
+ io->iparam.fcp_tgt.s_id = io->node->port_fc_id;
+ io->iparam.fcp_tgt.d_id = io->node->node_fc_id;
+ io->iparam.fcp_tgt.xmit_len = io->wire_len;
+
+ rc = efct_hw_io_send(&io->efct->hw, io->hio_type, io->hio,
+ &io->iparam, io->hw_cb, io);
+ break;
+ default:
+ scsi_io_printf(io, "Unknown IO type=%d\n", io->io_type);
+ rc = -EIO;
+ break;
+ }
+ return rc;
+}
+
+static int
+efct_scsi_io_dispatch_no_hw_io(struct efct_io *io)
+{
+ int rc;
+
+ switch (io->io_type) {
+ case EFCT_IO_TYPE_ABORT: {
+ struct efct_hw_io *hio_to_abort = NULL;
+
+ hio_to_abort = io->io_to_abort->hio;
+
+ if (!hio_to_abort) {
+ /*
+ * If "IO to abort" does not have an
+ * associated HW IO, immediately make callback with
+ * success. The command must have been sent to
+ * the backend, but the data phase has not yet
+ * started, so we don't have a HW IO.
+ *
+ * Note: since the backend shims should be
+ * taking a reference on io_to_abort, it should not
+ * be possible to have been completed and freed by
+ * the backend before the abort got here.
+ */
+ scsi_io_printf(io, "IO: not active\n");
+ ((efct_hw_done_t)io->hw_cb)(io->hio, 0,
+ SLI4_FC_WCQE_STATUS_SUCCESS, 0, io);
+ rc = 0;
+ break;
+ }
+
+ /* HW IO is valid, abort it */
+ scsi_io_printf(io, "aborting\n");
+ rc = efct_hw_io_abort(&io->efct->hw, hio_to_abort,
+ io->send_abts, io->hw_cb, io);
+ if (rc) {
+ int status = SLI4_FC_WCQE_STATUS_SUCCESS;
+ efct_hw_done_t cb = io->hw_cb;
+
+ if (rc != -ENOENT && rc != -EINPROGRESS) {
+ status = -1;
+ scsi_io_printf(io, "Failed to abort IO rc=%d\n",
+ rc);
+ }
+ cb(io->hio, 0, status, 0, io);
+ rc = 0;
+ }
+
+ break;
+ }
+ default:
+ scsi_io_printf(io, "Unknown IO type=%d\n", io->io_type);
+ rc = -EIO;
+ break;
+ }
+ return rc;
+}
+
+static struct efct_io *
+efct_scsi_dispatch_pending(struct efct *efct)
+{
+ struct efct_xport *xport = efct->xport;
+ struct efct_io *io = NULL;
+ struct efct_hw_io *hio;
+ unsigned long flags = 0;
+ int status;
+
+ spin_lock_irqsave(&xport->io_pending_lock, flags);
+
+ if (!list_empty(&xport->io_pending_list)) {
+ io = list_first_entry(&xport->io_pending_list, struct efct_io,
+ io_pending_link);
+ list_del_init(&io->io_pending_link);
+ }
+
+ if (!io) {
+ spin_unlock_irqrestore(&xport->io_pending_lock, flags);
+ return NULL;
+ }
+
+ if (io->io_type == EFCT_IO_TYPE_ABORT) {
+ hio = NULL;
+ } else {
+ hio = efct_hw_io_alloc(&efct->hw);
+ if (!hio) {
+ /*
+ * No HW IO available.Put IO back on
+ * the front of pending list
+ */
+ list_add(&xport->io_pending_list, &io->io_pending_link);
+ io = NULL;
+ } else {
+ hio->eq = io->hw_priv;
+ }
+ }
+
+ /* Must drop the lock before dispatching the IO */
+ spin_unlock_irqrestore(&xport->io_pending_lock, flags);
+
+ if (!io)
+ return NULL;
+
+ /*
+ * We pulled an IO off the pending list,
+ * and either got an HW IO or don't need one
+ */
+ atomic_sub_return(1, &xport->io_pending_count);
+ if (!hio)
+ status = efct_scsi_io_dispatch_no_hw_io(io);
+ else
+ status = efct_scsi_io_dispatch_hw_io(io, hio);
+ if (status) {
+ /*
+ * Invoke the HW callback, but do so in the
+ * separate execution context,provided by the
+ * NOP mailbox completion processing context
+ * by using efct_hw_async_call()
+ */
+ if (efct_hw_async_call(&efct->hw,
+ efct_scsi_check_pending_async_cb, io)) {
+ efc_log_debug(efct, "call hw async failed\n");
+ }
+ }
+
+ return io;
+}
+
+void
+efct_scsi_check_pending(struct efct *efct)
+{
+ struct efct_xport *xport = efct->xport;
+ struct efct_io *io = NULL;
+ int count = 0;
+ unsigned long flags = 0;
+ int dispatch = 0;
+
+ /* Guard against recursion */
+ if (atomic_add_return(1, &xport->io_pending_recursing)) {
+ /* This function is already running. Decrement and return. */
+ atomic_sub_return(1, &xport->io_pending_recursing);
+ return;
+ }
+
+ while (efct_scsi_dispatch_pending(efct))
+ count++;
+
+ if (count) {
+ atomic_sub_return(1, &xport->io_pending_recursing);
+ return;
+ }
+
+ /*
+ * If nothing was removed from the list,
+ * we might be in a case where we need to abort an
+ * active IO and the abort is on the pending list.
+ * Look for an abort we can dispatch.
+ */
+
+ spin_lock_irqsave(&xport->io_pending_lock, flags);
+
+ list_for_each_entry(io, &xport->io_pending_list, io_pending_link) {
+ if (io->io_type == EFCT_IO_TYPE_ABORT && io->io_to_abort->hio) {
+ /* This IO has a HW IO, so it is
+ * active. Dispatch the abort.
+ */
+ dispatch = 1;
+ list_del_init(&io->io_pending_link);
+ atomic_sub_return(1, &xport->io_pending_count);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&xport->io_pending_lock, flags);
+
+ if (dispatch) {
+ if (efct_scsi_io_dispatch_no_hw_io(io)) {
+ if (efct_hw_async_call(&efct->hw,
+ efct_scsi_check_pending_async_cb, io)) {
+ efc_log_debug(efct, "hw async failed\n");
+ }
+ }
+ }
+
+ atomic_sub_return(1, &xport->io_pending_recursing);
+}
+
+int
+efct_scsi_io_dispatch(struct efct_io *io, void *cb)
+{
+ struct efct_hw_io *hio;
+ struct efct *efct = io->efct;
+ struct efct_xport *xport = efct->xport;
+ unsigned long flags = 0;
+
+ io->hw_cb = cb;
+
+ /*
+ * if this IO already has a HW IO, then this is either
+ * not the first phase of the IO. Send it to the HW.
+ */
+ if (io->hio)
+ return efct_scsi_io_dispatch_hw_io(io, io->hio);
+
+ /*
+ * We don't already have a HW IO associated with the IO. First check
+ * the pending list. If not empty, add IO to the tail and process the
+ * pending list.
+ */
+ spin_lock_irqsave(&xport->io_pending_lock, flags);
+ if (!list_empty(&xport->io_pending_list)) {
+ /*
+ * If this is a low latency request,
+ * the put at the front of the IO pending
+ * queue, otherwise put it at the end of the queue.
+ */
+ if (io->low_latency) {
+ INIT_LIST_HEAD(&io->io_pending_link);
+ list_add(&xport->io_pending_list, &io->io_pending_link);
+ } else {
+ INIT_LIST_HEAD(&io->io_pending_link);
+ list_add_tail(&io->io_pending_link,
+ &xport->io_pending_list);
+ }
+ spin_unlock_irqrestore(&xport->io_pending_lock, flags);
+ atomic_add_return(1, &xport->io_pending_count);
+ atomic_add_return(1, &xport->io_total_pending);
+
+ /* process pending list */
+ efct_scsi_check_pending(efct);
+ return 0;
+ }
+ spin_unlock_irqrestore(&xport->io_pending_lock, flags);
+
+ /*
+ * We don't have a HW IO associated with the IO and there's nothing
+ * on the pending list. Attempt to allocate a HW IO and dispatch it.
+ */
+ hio = efct_hw_io_alloc(&io->efct->hw);
+ if (!hio) {
+ /* Couldn't get a HW IO. Save this IO on the pending list */
+ spin_lock_irqsave(&xport->io_pending_lock, flags);
+ INIT_LIST_HEAD(&io->io_pending_link);
+ list_add_tail(&io->io_pending_link, &xport->io_pending_list);
+ spin_unlock_irqrestore(&xport->io_pending_lock, flags);
+
+ atomic_add_return(1, &xport->io_total_pending);
+ atomic_add_return(1, &xport->io_pending_count);
+ return 0;
+ }
+
+ /* We successfully allocated a HW IO; dispatch to HW */
+ return efct_scsi_io_dispatch_hw_io(io, hio);
+}
+
+int
+efct_scsi_io_dispatch_abort(struct efct_io *io, void *cb)
+{
+ struct efct *efct = io->efct;
+ struct efct_xport *xport = efct->xport;
+ unsigned long flags = 0;
+
+ io->hw_cb = cb;
+
+ /*
+ * For aborts, we don't need a HW IO, but we still want
+ * to pass through the pending list to preserve ordering.
+ * Thus, if the pending list is not empty, add this abort
+ * to the pending list and process the pending list.
+ */
+ spin_lock_irqsave(&xport->io_pending_lock, flags);
+ if (!list_empty(&xport->io_pending_list)) {
+ INIT_LIST_HEAD(&io->io_pending_link);
+ list_add_tail(&io->io_pending_link, &xport->io_pending_list);
+ spin_unlock_irqrestore(&xport->io_pending_lock, flags);
+ atomic_add_return(1, &xport->io_pending_count);
+ atomic_add_return(1, &xport->io_total_pending);
+
+ /* process pending list */
+ efct_scsi_check_pending(efct);
+ return 0;
+ }
+ spin_unlock_irqrestore(&xport->io_pending_lock, flags);
+
+ /* nothing on pending list, dispatch abort */
+ return efct_scsi_io_dispatch_no_hw_io(io);
+}
+
+static inline int
+efct_scsi_xfer_data(struct efct_io *io, u32 flags,
+ struct efct_scsi_sgl *sgl, u32 sgl_count, u64 xwire_len,
+ enum efct_hw_io_type type, int enable_ar,
+ efct_scsi_io_cb_t cb, void *arg)
+{
+ struct efct *efct;
+ size_t residual = 0;
+
+ io->sgl_count = sgl_count;
+
+ efct = io->efct;
+
+ scsi_io_trace(io, "%s wire_len %llu\n",
+ (type == EFCT_HW_IO_TARGET_READ) ? "send" : "recv",
+ xwire_len);
+
+ io->hio_type = type;
+
+ io->scsi_tgt_cb = cb;
+ io->scsi_tgt_cb_arg = arg;
+
+ residual = io->exp_xfer_len - io->transferred;
+ io->wire_len = (xwire_len < residual) ? xwire_len : residual;
+ residual = (xwire_len - io->wire_len);
+
+ memset(&io->iparam, 0, sizeof(io->iparam));
+ io->iparam.fcp_tgt.ox_id = io->init_task_tag;
+ io->iparam.fcp_tgt.offset = io->transferred;
+ io->iparam.fcp_tgt.cs_ctl = io->cs_ctl;
+ io->iparam.fcp_tgt.timeout = io->timeout;
+
+ /* if this is the last data phase and there is no residual, enable
+ * auto-good-response
+ */
+ if (enable_ar && (flags & EFCT_SCSI_LAST_DATAPHASE) && residual == 0 &&
+ ((io->transferred + io->wire_len) == io->exp_xfer_len) &&
+ (!(flags & EFCT_SCSI_NO_AUTO_RESPONSE))) {
+ io->iparam.fcp_tgt.flags |= SLI4_IO_AUTO_GOOD_RESPONSE;
+ io->auto_resp = true;
+ } else {
+ io->auto_resp = false;
+ }
+
+ /* save this transfer length */
+ io->xfer_req = io->wire_len;
+
+ /* Adjust the transferred count to account for overrun
+ * when the residual is calculated in efct_scsi_send_resp
+ */
+ io->transferred += residual;
+
+ /* Adjust the SGL size if there is overrun */
+
+ if (residual) {
+ struct efct_scsi_sgl *sgl_ptr = &io->sgl[sgl_count - 1];
+
+ while (residual) {
+ size_t len = sgl_ptr->len;
+
+ if (len > residual) {
+ sgl_ptr->len = len - residual;
+ residual = 0;
+ } else {
+ sgl_ptr->len = 0;
+ residual -= len;
+ io->sgl_count--;
+ }
+ sgl_ptr--;
+ }
+ }
+
+ /* Set latency and WQ steering */
+ io->low_latency = (flags & EFCT_SCSI_LOW_LATENCY) != 0;
+ io->wq_steering = (flags & EFCT_SCSI_WQ_STEERING_MASK) >>
+ EFCT_SCSI_WQ_STEERING_SHIFT;
+ io->wq_class = (flags & EFCT_SCSI_WQ_CLASS_MASK) >>
+ EFCT_SCSI_WQ_CLASS_SHIFT;
+
+ if (efct->xport) {
+ struct efct_xport *xport = efct->xport;
+
+ if (type == EFCT_HW_IO_TARGET_READ) {
+ xport->fcp_stats.input_requests++;
+ xport->fcp_stats.input_bytes += xwire_len;
+ } else if (type == EFCT_HW_IO_TARGET_WRITE) {
+ xport->fcp_stats.output_requests++;
+ xport->fcp_stats.output_bytes += xwire_len;
+ }
+ }
+ return efct_scsi_io_dispatch(io, efct_target_io_cb);
+}
+
+int
+efct_scsi_send_rd_data(struct efct_io *io, u32 flags,
+ struct efct_scsi_sgl *sgl, u32 sgl_count, u64 len,
+ efct_scsi_io_cb_t cb, void *arg)
+{
+ return efct_scsi_xfer_data(io, flags, sgl, sgl_count,
+ len, EFCT_HW_IO_TARGET_READ,
+ enable_tsend_auto_resp(io->efct), cb, arg);
+}
+
+int
+efct_scsi_recv_wr_data(struct efct_io *io, u32 flags,
+ struct efct_scsi_sgl *sgl, u32 sgl_count, u64 len,
+ efct_scsi_io_cb_t cb, void *arg)
+{
+ return efct_scsi_xfer_data(io, flags, sgl, sgl_count, len,
+ EFCT_HW_IO_TARGET_WRITE,
+ enable_treceive_auto_resp(io->efct), cb, arg);
+}
+
+int
+efct_scsi_send_resp(struct efct_io *io, u32 flags,
+ struct efct_scsi_cmd_resp *rsp,
+ efct_scsi_io_cb_t cb, void *arg)
+{
+ struct efct *efct;
+ int residual;
+ /* Always try auto resp */
+ bool auto_resp = true;
+ u8 scsi_status = 0;
+ u16 scsi_status_qualifier = 0;
+ u8 *sense_data = NULL;
+ u32 sense_data_length = 0;
+
+ efct = io->efct;
+
+ if (rsp) {
+ scsi_status = rsp->scsi_status;
+ scsi_status_qualifier = rsp->scsi_status_qualifier;
+ sense_data = rsp->sense_data;
+ sense_data_length = rsp->sense_data_length;
+ residual = rsp->residual;
+ } else {
+ residual = io->exp_xfer_len - io->transferred;
+ }
+
+ io->wire_len = 0;
+ io->hio_type = EFCT_HW_IO_TARGET_RSP;
+
+ io->scsi_tgt_cb = cb;
+ io->scsi_tgt_cb_arg = arg;
+
+ memset(&io->iparam, 0, sizeof(io->iparam));
+ io->iparam.fcp_tgt.ox_id = io->init_task_tag;
+ io->iparam.fcp_tgt.offset = 0;
+ io->iparam.fcp_tgt.cs_ctl = io->cs_ctl;
+ io->iparam.fcp_tgt.timeout = io->timeout;
+
+ /* Set low latency queueing request */
+ io->low_latency = (flags & EFCT_SCSI_LOW_LATENCY) != 0;
+ io->wq_steering = (flags & EFCT_SCSI_WQ_STEERING_MASK) >>
+ EFCT_SCSI_WQ_STEERING_SHIFT;
+ io->wq_class = (flags & EFCT_SCSI_WQ_CLASS_MASK) >>
+ EFCT_SCSI_WQ_CLASS_SHIFT;
+
+ if (scsi_status != 0 || residual || sense_data_length) {
+ struct fcp_resp_with_ext *fcprsp = io->rspbuf.virt;
+ u8 *sns_data;
+
+ if (!fcprsp) {
+ efc_log_err(efct, "NULL response buffer\n");
+ return -EIO;
+ }
+
+ sns_data = (u8 *)io->rspbuf.virt + sizeof(*fcprsp);
+
+ auto_resp = false;
+
+ memset(fcprsp, 0, sizeof(*fcprsp));
+
+ io->wire_len += sizeof(*fcprsp);
+
+ fcprsp->resp.fr_status = scsi_status;
+ fcprsp->resp.fr_retry_delay =
+ cpu_to_be16(scsi_status_qualifier);
+
+ /* set residual status if necessary */
+ if (residual != 0) {
+ /* FCP: if data transferred is less than the
+ * amount expected, then this is an underflow.
+ * If data transferred would have been greater
+ * than the amount expected this is an overflow
+ */
+ if (residual > 0) {
+ fcprsp->resp.fr_flags |= FCP_RESID_UNDER;
+ fcprsp->ext.fr_resid = cpu_to_be32(residual);
+ } else {
+ fcprsp->resp.fr_flags |= FCP_RESID_OVER;
+ fcprsp->ext.fr_resid = cpu_to_be32(-residual);
+ }
+ }
+
+ if (EFCT_SCSI_SNS_BUF_VALID(sense_data) && sense_data_length) {
+ if (sense_data_length > SCSI_SENSE_BUFFERSIZE) {
+ efc_log_err(efct, "Sense exceeds max size.\n");
+ return -EIO;
+ }
+
+ fcprsp->resp.fr_flags |= FCP_SNS_LEN_VAL;
+ memcpy(sns_data, sense_data, sense_data_length);
+ fcprsp->ext.fr_sns_len = cpu_to_be32(sense_data_length);
+ io->wire_len += sense_data_length;
+ }
+
+ io->sgl[0].addr = io->rspbuf.phys;
+ io->sgl[0].dif_addr = 0;
+ io->sgl[0].len = io->wire_len;
+ io->sgl_count = 1;
+ }
+
+ if (auto_resp)
+ io->iparam.fcp_tgt.flags |= SLI4_IO_AUTO_GOOD_RESPONSE;
+
+ return efct_scsi_io_dispatch(io, efct_target_io_cb);
+}
+
+static int
+efct_target_bls_resp_cb(struct efct_hw_io *hio, u32 length, int status,
+ u32 ext_status, void *app)
+{
+ struct efct_io *io = app;
+ struct efct *efct;
+ enum efct_scsi_io_status bls_status;
+
+ efct = io->efct;
+
+ /* BLS isn't really a "SCSI" concept, but use SCSI status */
+ if (status) {
+ io_error_log(io, "s=%#x x=%#x\n", status, ext_status);
+ bls_status = EFCT_SCSI_STATUS_ERROR;
+ } else {
+ bls_status = EFCT_SCSI_STATUS_GOOD;
+ }
+
+ if (io->bls_cb) {
+ efct_scsi_io_cb_t bls_cb = io->bls_cb;
+ void *bls_cb_arg = io->bls_cb_arg;
+
+ io->bls_cb = NULL;
+ io->bls_cb_arg = NULL;
+
+ /* invoke callback */
+ bls_cb(io, bls_status, 0, bls_cb_arg);
+ }
+
+ efct_scsi_check_pending(efct);
+ return 0;
+}
+
+static int
+efct_target_send_bls_resp(struct efct_io *io,
+ efct_scsi_io_cb_t cb, void *arg)
+{
+ struct efct_node *node = io->node;
+ struct sli_bls_params *bls = &io->iparam.bls;
+ struct efct *efct = node->efct;
+ struct fc_ba_acc *acc;
+ int rc;
+
+ /* fill out IO structure with everything needed to send BA_ACC */
+ memset(&io->iparam, 0, sizeof(io->iparam));
+ bls->ox_id = io->init_task_tag;
+ bls->rx_id = io->abort_rx_id;
+ bls->vpi = io->node->vpi;
+ bls->rpi = io->node->rpi;
+ bls->s_id = U32_MAX;
+ bls->d_id = io->node->node_fc_id;
+ bls->rpi_registered = true;
+
+ acc = (void *)bls->payload;
+ acc->ba_ox_id = cpu_to_be16(bls->ox_id);
+ acc->ba_rx_id = cpu_to_be16(bls->rx_id);
+ acc->ba_high_seq_cnt = cpu_to_be16(U16_MAX);
+
+ /* generic io fields have already been populated */
+
+ /* set type and BLS-specific fields */
+ io->io_type = EFCT_IO_TYPE_BLS_RESP;
+ io->display_name = "bls_rsp";
+ io->hio_type = EFCT_HW_BLS_ACC;
+ io->bls_cb = cb;
+ io->bls_cb_arg = arg;
+
+ /* dispatch IO */
+ rc = efct_hw_bls_send(efct, FC_RCTL_BA_ACC, bls,
+ efct_target_bls_resp_cb, io);
+ return rc;
+}
+
+static int efct_bls_send_rjt_cb(struct efct_hw_io *hio, u32 length, int status,
+ u32 ext_status, void *app)
+{
+ struct efct_io *io = app;
+
+ efct_scsi_io_free(io);
+ return 0;
+}
+
+struct efct_io *
+efct_bls_send_rjt(struct efct_io *io, struct fc_frame_header *hdr)
+{
+ struct efct_node *node = io->node;
+ struct sli_bls_params *bls = &io->iparam.bls;
+ struct efct *efct = node->efct;
+ struct fc_ba_rjt *acc;
+ int rc;
+
+ /* fill out BLS Response-specific fields */
+ io->io_type = EFCT_IO_TYPE_BLS_RESP;
+ io->display_name = "ba_rjt";
+ io->hio_type = EFCT_HW_BLS_RJT;
+ io->init_task_tag = be16_to_cpu(hdr->fh_ox_id);
+
+ /* fill out iparam fields */
+ memset(&io->iparam, 0, sizeof(io->iparam));
+ bls->ox_id = be16_to_cpu(hdr->fh_ox_id);
+ bls->rx_id = be16_to_cpu(hdr->fh_rx_id);
+ bls->vpi = io->node->vpi;
+ bls->rpi = io->node->rpi;
+ bls->s_id = U32_MAX;
+ bls->d_id = io->node->node_fc_id;
+ bls->rpi_registered = true;
+
+ acc = (void *)bls->payload;
+ acc->br_reason = ELS_RJT_UNAB;
+ acc->br_explan = ELS_EXPL_NONE;
+
+ rc = efct_hw_bls_send(efct, FC_RCTL_BA_RJT, bls, efct_bls_send_rjt_cb,
+ io);
+ if (rc) {
+ efc_log_err(efct, "efct_scsi_io_dispatch() failed: %d\n", rc);
+ efct_scsi_io_free(io);
+ io = NULL;
+ }
+ return io;
+}
+
+int
+efct_scsi_send_tmf_resp(struct efct_io *io,
+ enum efct_scsi_tmf_resp rspcode,
+ u8 addl_rsp_info[3],
+ efct_scsi_io_cb_t cb, void *arg)
+{
+ int rc;
+ struct {
+ struct fcp_resp_with_ext rsp_ext;
+ struct fcp_resp_rsp_info info;
+ } *fcprsp;
+ u8 fcp_rspcode;
+
+ io->wire_len = 0;
+
+ switch (rspcode) {
+ case EFCT_SCSI_TMF_FUNCTION_COMPLETE:
+ fcp_rspcode = FCP_TMF_CMPL;
+ break;
+ case EFCT_SCSI_TMF_FUNCTION_SUCCEEDED:
+ case EFCT_SCSI_TMF_FUNCTION_IO_NOT_FOUND:
+ fcp_rspcode = FCP_TMF_CMPL;
+ break;
+ case EFCT_SCSI_TMF_FUNCTION_REJECTED:
+ fcp_rspcode = FCP_TMF_REJECTED;
+ break;
+ case EFCT_SCSI_TMF_INCORRECT_LOGICAL_UNIT_NUMBER:
+ fcp_rspcode = FCP_TMF_INVALID_LUN;
+ break;
+ case EFCT_SCSI_TMF_SERVICE_DELIVERY:
+ fcp_rspcode = FCP_TMF_FAILED;
+ break;
+ default:
+ fcp_rspcode = FCP_TMF_REJECTED;
+ break;
+ }
+
+ io->hio_type = EFCT_HW_IO_TARGET_RSP;
+
+ io->scsi_tgt_cb = cb;
+ io->scsi_tgt_cb_arg = arg;
+
+ if (io->tmf_cmd == EFCT_SCSI_TMF_ABORT_TASK) {
+ rc = efct_target_send_bls_resp(io, cb, arg);
+ return rc;
+ }
+
+ /* populate the FCP TMF response */
+ fcprsp = io->rspbuf.virt;
+ memset(fcprsp, 0, sizeof(*fcprsp));
+
+ fcprsp->rsp_ext.resp.fr_flags |= FCP_SNS_LEN_VAL;
+
+ if (addl_rsp_info) {
+ memcpy(fcprsp->info._fr_resvd, addl_rsp_info,
+ sizeof(fcprsp->info._fr_resvd));
+ }
+ fcprsp->info.rsp_code = fcp_rspcode;
+
+ io->wire_len = sizeof(*fcprsp);
+
+ fcprsp->rsp_ext.ext.fr_rsp_len =
+ cpu_to_be32(sizeof(struct fcp_resp_rsp_info));
+
+ io->sgl[0].addr = io->rspbuf.phys;
+ io->sgl[0].dif_addr = 0;
+ io->sgl[0].len = io->wire_len;
+ io->sgl_count = 1;
+
+ memset(&io->iparam, 0, sizeof(io->iparam));
+ io->iparam.fcp_tgt.ox_id = io->init_task_tag;
+ io->iparam.fcp_tgt.offset = 0;
+ io->iparam.fcp_tgt.cs_ctl = io->cs_ctl;
+ io->iparam.fcp_tgt.timeout = io->timeout;
+
+ rc = efct_scsi_io_dispatch(io, efct_target_io_cb);
+
+ return rc;
+}
+
+static int
+efct_target_abort_cb(struct efct_hw_io *hio, u32 length, int status,
+ u32 ext_status, void *app)
+{
+ struct efct_io *io = app;
+ struct efct *efct;
+ enum efct_scsi_io_status scsi_status;
+ efct_scsi_io_cb_t abort_cb;
+ void *abort_cb_arg;
+
+ efct = io->efct;
+
+ if (!io->abort_cb)
+ goto done;
+
+ abort_cb = io->abort_cb;
+ abort_cb_arg = io->abort_cb_arg;
+
+ io->abort_cb = NULL;
+ io->abort_cb_arg = NULL;
+
+ switch (status) {
+ case SLI4_FC_WCQE_STATUS_SUCCESS:
+ scsi_status = EFCT_SCSI_STATUS_GOOD;
+ break;
+ case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
+ switch (ext_status) {
+ case SLI4_FC_LOCAL_REJECT_NO_XRI:
+ scsi_status = EFCT_SCSI_STATUS_NO_IO;
+ break;
+ case SLI4_FC_LOCAL_REJECT_ABORT_IN_PROGRESS:
+ scsi_status = EFCT_SCSI_STATUS_ABORT_IN_PROGRESS;
+ break;
+ default:
+ /*we have seen 0x15 (abort in progress)*/
+ scsi_status = EFCT_SCSI_STATUS_ERROR;
+ break;
+ }
+ break;
+ case SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE:
+ scsi_status = EFCT_SCSI_STATUS_CHECK_RESPONSE;
+ break;
+ default:
+ scsi_status = EFCT_SCSI_STATUS_ERROR;
+ break;
+ }
+ /* invoke callback */
+ abort_cb(io->io_to_abort, scsi_status, 0, abort_cb_arg);
+
+done:
+ /* done with IO to abort,efct_ref_get(): efct_scsi_tgt_abort_io() */
+ kref_put(&io->io_to_abort->ref, io->io_to_abort->release);
+
+ efct_io_pool_io_free(efct->xport->io_pool, io);
+
+ efct_scsi_check_pending(efct);
+ return 0;
+}
+
+int
+efct_scsi_tgt_abort_io(struct efct_io *io, efct_scsi_io_cb_t cb, void *arg)
+{
+ struct efct *efct;
+ struct efct_xport *xport;
+ int rc;
+ struct efct_io *abort_io = NULL;
+
+ efct = io->efct;
+ xport = efct->xport;
+
+ /* take a reference on IO being aborted */
+ if (kref_get_unless_zero(&io->ref) == 0) {
+ /* command no longer active */
+ scsi_io_printf(io, "command no longer active\n");
+ return -EIO;
+ }
+
+ /*
+ * allocate a new IO to send the abort request. Use efct_io_alloc()
+ * directly, as we need an IO object that will not fail allocation
+ * due to allocations being disabled (in efct_scsi_io_alloc())
+ */
+ abort_io = efct_io_pool_io_alloc(efct->xport->io_pool);
+ if (!abort_io) {
+ atomic_add_return(1, &xport->io_alloc_failed_count);
+ kref_put(&io->ref, io->release);
+ return -EIO;
+ }
+
+ /* Save the target server callback and argument */
+ /* set generic fields */
+ abort_io->cmd_tgt = true;
+ abort_io->node = io->node;
+
+ /* set type and abort-specific fields */
+ abort_io->io_type = EFCT_IO_TYPE_ABORT;
+ abort_io->display_name = "tgt_abort";
+ abort_io->io_to_abort = io;
+ abort_io->send_abts = false;
+ abort_io->abort_cb = cb;
+ abort_io->abort_cb_arg = arg;
+
+ /* now dispatch IO */
+ rc = efct_scsi_io_dispatch_abort(abort_io, efct_target_abort_cb);
+ if (rc)
+ kref_put(&io->ref, io->release);
+ return rc;
+}
+
+void
+efct_scsi_io_complete(struct efct_io *io)
+{
+ if (io->io_free) {
+ efc_log_debug(io->efct, "completion for non-busy io tag 0x%x\n",
+ io->tag);
+ return;
+ }
+
+ scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name);
+ kref_put(&io->ref, io->release);
+}
diff --git a/drivers/scsi/elx/efct/efct_scsi.h b/drivers/scsi/elx/efct/efct_scsi.h
new file mode 100644
index 000000000..b04faffa3
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_scsi.h
@@ -0,0 +1,203 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#if !defined(__EFCT_SCSI_H__)
+#define __EFCT_SCSI_H__
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+
+/* efct_scsi_rcv_cmd() efct_scsi_rcv_tmf() flags */
+#define EFCT_SCSI_CMD_DIR_IN (1 << 0)
+#define EFCT_SCSI_CMD_DIR_OUT (1 << 1)
+#define EFCT_SCSI_CMD_SIMPLE (1 << 2)
+#define EFCT_SCSI_CMD_HEAD_OF_QUEUE (1 << 3)
+#define EFCT_SCSI_CMD_ORDERED (1 << 4)
+#define EFCT_SCSI_CMD_UNTAGGED (1 << 5)
+#define EFCT_SCSI_CMD_ACA (1 << 6)
+#define EFCT_SCSI_FIRST_BURST_ERR (1 << 7)
+#define EFCT_SCSI_FIRST_BURST_ABORTED (1 << 8)
+
+/* efct_scsi_send_rd_data/recv_wr_data/send_resp flags */
+#define EFCT_SCSI_LAST_DATAPHASE (1 << 0)
+#define EFCT_SCSI_NO_AUTO_RESPONSE (1 << 1)
+#define EFCT_SCSI_LOW_LATENCY (1 << 2)
+
+#define EFCT_SCSI_SNS_BUF_VALID(sense) ((sense) && \
+ (0x70 == (((const u8 *)(sense))[0] & 0x70)))
+
+#define EFCT_SCSI_WQ_STEERING_SHIFT 16
+#define EFCT_SCSI_WQ_STEERING_MASK (0xf << EFCT_SCSI_WQ_STEERING_SHIFT)
+#define EFCT_SCSI_WQ_STEERING_CLASS (0 << EFCT_SCSI_WQ_STEERING_SHIFT)
+#define EFCT_SCSI_WQ_STEERING_REQUEST (1 << EFCT_SCSI_WQ_STEERING_SHIFT)
+#define EFCT_SCSI_WQ_STEERING_CPU (2 << EFCT_SCSI_WQ_STEERING_SHIFT)
+
+#define EFCT_SCSI_WQ_CLASS_SHIFT (20)
+#define EFCT_SCSI_WQ_CLASS_MASK (0xf << EFCT_SCSI_WQ_CLASS_SHIFT)
+#define EFCT_SCSI_WQ_CLASS(x) ((x & EFCT_SCSI_WQ_CLASS_MASK) << \
+ EFCT_SCSI_WQ_CLASS_SHIFT)
+
+#define EFCT_SCSI_WQ_CLASS_LOW_LATENCY 1
+
+struct efct_scsi_cmd_resp {
+ u8 scsi_status;
+ u16 scsi_status_qualifier;
+ u8 *response_data;
+ u32 response_data_length;
+ u8 *sense_data;
+ u32 sense_data_length;
+ int residual;
+ u32 response_wire_length;
+};
+
+struct efct_vport {
+ struct efct *efct;
+ bool is_vport;
+ struct fc_host_statistics fc_host_stats;
+ struct Scsi_Host *shost;
+ struct fc_vport *fc_vport;
+ u64 npiv_wwpn;
+ u64 npiv_wwnn;
+};
+
+/* Status values returned by IO callbacks */
+enum efct_scsi_io_status {
+ EFCT_SCSI_STATUS_GOOD = 0,
+ EFCT_SCSI_STATUS_ABORTED,
+ EFCT_SCSI_STATUS_ERROR,
+ EFCT_SCSI_STATUS_DIF_GUARD_ERR,
+ EFCT_SCSI_STATUS_DIF_REF_TAG_ERROR,
+ EFCT_SCSI_STATUS_DIF_APP_TAG_ERROR,
+ EFCT_SCSI_STATUS_DIF_UNKNOWN_ERROR,
+ EFCT_SCSI_STATUS_PROTOCOL_CRC_ERROR,
+ EFCT_SCSI_STATUS_NO_IO,
+ EFCT_SCSI_STATUS_ABORT_IN_PROGRESS,
+ EFCT_SCSI_STATUS_CHECK_RESPONSE,
+ EFCT_SCSI_STATUS_COMMAND_TIMEOUT,
+ EFCT_SCSI_STATUS_TIMEDOUT_AND_ABORTED,
+ EFCT_SCSI_STATUS_SHUTDOWN,
+ EFCT_SCSI_STATUS_NEXUS_LOST,
+};
+
+struct efct_node;
+struct efct_io;
+struct efc_node;
+struct efc_nport;
+
+/* Callback used by send_rd_data(), recv_wr_data(), send_resp() */
+typedef int (*efct_scsi_io_cb_t)(struct efct_io *io,
+ enum efct_scsi_io_status status,
+ u32 flags, void *arg);
+
+/* Callback used by send_rd_io(), send_wr_io() */
+typedef int (*efct_scsi_rsp_io_cb_t)(struct efct_io *io,
+ enum efct_scsi_io_status status,
+ struct efct_scsi_cmd_resp *rsp,
+ u32 flags, void *arg);
+
+/* efct_scsi_cb_t flags */
+#define EFCT_SCSI_IO_CMPL (1 << 0)
+/* IO completed, response sent */
+#define EFCT_SCSI_IO_CMPL_RSP_SENT (1 << 1)
+#define EFCT_SCSI_IO_ABORTED (1 << 2)
+
+/* efct_scsi_recv_tmf() request values */
+enum efct_scsi_tmf_cmd {
+ EFCT_SCSI_TMF_ABORT_TASK = 1,
+ EFCT_SCSI_TMF_QUERY_TASK_SET,
+ EFCT_SCSI_TMF_ABORT_TASK_SET,
+ EFCT_SCSI_TMF_CLEAR_TASK_SET,
+ EFCT_SCSI_TMF_QUERY_ASYNCHRONOUS_EVENT,
+ EFCT_SCSI_TMF_LOGICAL_UNIT_RESET,
+ EFCT_SCSI_TMF_CLEAR_ACA,
+ EFCT_SCSI_TMF_TARGET_RESET,
+};
+
+/* efct_scsi_send_tmf_resp() response values */
+enum efct_scsi_tmf_resp {
+ EFCT_SCSI_TMF_FUNCTION_COMPLETE = 1,
+ EFCT_SCSI_TMF_FUNCTION_SUCCEEDED,
+ EFCT_SCSI_TMF_FUNCTION_IO_NOT_FOUND,
+ EFCT_SCSI_TMF_FUNCTION_REJECTED,
+ EFCT_SCSI_TMF_INCORRECT_LOGICAL_UNIT_NUMBER,
+ EFCT_SCSI_TMF_SERVICE_DELIVERY,
+};
+
+struct efct_scsi_sgl {
+ uintptr_t addr;
+ uintptr_t dif_addr;
+ size_t len;
+};
+
+enum efct_scsi_io_role {
+ EFCT_SCSI_IO_ROLE_ORIGINATOR,
+ EFCT_SCSI_IO_ROLE_RESPONDER,
+};
+
+struct efct_io *
+efct_scsi_io_alloc(struct efct_node *node);
+void efct_scsi_io_free(struct efct_io *io);
+struct efct_io *efct_io_get_instance(struct efct *efct, u32 index);
+
+int efct_scsi_tgt_driver_init(void);
+int efct_scsi_tgt_driver_exit(void);
+int efct_scsi_tgt_new_device(struct efct *efct);
+int efct_scsi_tgt_del_device(struct efct *efct);
+int
+efct_scsi_tgt_new_nport(struct efc *efc, struct efc_nport *nport);
+void
+efct_scsi_tgt_del_nport(struct efc *efc, struct efc_nport *nport);
+
+int
+efct_scsi_new_initiator(struct efc *efc, struct efc_node *node);
+
+enum efct_scsi_del_initiator_reason {
+ EFCT_SCSI_INITIATOR_DELETED,
+ EFCT_SCSI_INITIATOR_MISSING,
+};
+
+int
+efct_scsi_del_initiator(struct efc *efc, struct efc_node *node, int reason);
+void
+efct_scsi_recv_cmd(struct efct_io *io, uint64_t lun, u8 *cdb, u32 cdb_len,
+ u32 flags);
+int
+efct_scsi_recv_tmf(struct efct_io *tmfio, u32 lun, enum efct_scsi_tmf_cmd cmd,
+ struct efct_io *abortio, u32 flags);
+int
+efct_scsi_send_rd_data(struct efct_io *io, u32 flags, struct efct_scsi_sgl *sgl,
+ u32 sgl_count, u64 wire_len, efct_scsi_io_cb_t cb, void *arg);
+int
+efct_scsi_recv_wr_data(struct efct_io *io, u32 flags, struct efct_scsi_sgl *sgl,
+ u32 sgl_count, u64 wire_len, efct_scsi_io_cb_t cb, void *arg);
+int
+efct_scsi_send_resp(struct efct_io *io, u32 flags,
+ struct efct_scsi_cmd_resp *rsp, efct_scsi_io_cb_t cb, void *arg);
+int
+efct_scsi_send_tmf_resp(struct efct_io *io, enum efct_scsi_tmf_resp rspcode,
+ u8 addl_rsp_info[3], efct_scsi_io_cb_t cb, void *arg);
+int
+efct_scsi_tgt_abort_io(struct efct_io *io, efct_scsi_io_cb_t cb, void *arg);
+
+void efct_scsi_io_complete(struct efct_io *io);
+
+int efct_scsi_reg_fc_transport(void);
+void efct_scsi_release_fc_transport(void);
+int efct_scsi_new_device(struct efct *efct);
+void efct_scsi_del_device(struct efct *efct);
+void _efct_scsi_io_free(struct kref *arg);
+
+int
+efct_scsi_del_vport(struct efct *efct, struct Scsi_Host *shost);
+struct efct_vport *
+efct_scsi_new_vport(struct efct *efct, struct device *dev);
+
+int efct_scsi_io_dispatch(struct efct_io *io, void *cb);
+int efct_scsi_io_dispatch_abort(struct efct_io *io, void *cb);
+void efct_scsi_check_pending(struct efct *efct);
+struct efct_io *
+efct_bls_send_rjt(struct efct_io *io, struct fc_frame_header *hdr);
+
+#endif /* __EFCT_SCSI_H__ */
diff --git a/drivers/scsi/elx/efct/efct_unsol.c b/drivers/scsi/elx/efct/efct_unsol.c
new file mode 100644
index 000000000..e6addab66
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_unsol.c
@@ -0,0 +1,492 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include "efct_driver.h"
+#include "efct_unsol.h"
+
+#define frame_printf(efct, hdr, fmt, ...) \
+ do { \
+ char s_id_text[16]; \
+ efc_node_fcid_display(ntoh24((hdr)->fh_s_id), \
+ s_id_text, sizeof(s_id_text)); \
+ efc_log_debug(efct, "[%06x.%s] %02x/%04x/%04x: " fmt, \
+ ntoh24((hdr)->fh_d_id), s_id_text, \
+ (hdr)->fh_r_ctl, be16_to_cpu((hdr)->fh_ox_id), \
+ be16_to_cpu((hdr)->fh_rx_id), ##__VA_ARGS__); \
+ } while (0)
+
+static struct efct_node *
+efct_node_find(struct efct *efct, u32 port_id, u32 node_id)
+{
+ struct efct_node *node;
+ u64 id = (u64)port_id << 32 | node_id;
+
+ /*
+ * During node shutdown, Lookup will be removed first,
+ * before announcing to backend. So, no new IOs will be allowed
+ */
+ /* Find a target node, given s_id and d_id */
+ node = xa_load(&efct->lookup, id);
+ if (node)
+ kref_get(&node->ref);
+
+ return node;
+}
+
+static int
+efct_dispatch_frame(struct efct *efct, struct efc_hw_sequence *seq)
+{
+ struct efct_node *node;
+ struct fc_frame_header *hdr;
+ u32 s_id, d_id;
+
+ hdr = seq->header->dma.virt;
+
+ /* extract the s_id and d_id */
+ s_id = ntoh24(hdr->fh_s_id);
+ d_id = ntoh24(hdr->fh_d_id);
+
+ if (!(hdr->fh_type == FC_TYPE_FCP || hdr->fh_type == FC_TYPE_BLS))
+ return -EIO;
+
+ if (hdr->fh_type == FC_TYPE_FCP) {
+ node = efct_node_find(efct, d_id, s_id);
+ if (!node) {
+ efc_log_err(efct,
+ "Node not found, drop cmd d_id:%x s_id:%x\n",
+ d_id, s_id);
+ efct_hw_sequence_free(&efct->hw, seq);
+ return 0;
+ }
+
+ efct_dispatch_fcp_cmd(node, seq);
+ } else {
+ node = efct_node_find(efct, d_id, s_id);
+ if (!node) {
+ efc_log_err(efct, "ABTS: Node not found, d_id:%x s_id:%x\n",
+ d_id, s_id);
+ return -EIO;
+ }
+
+ efc_log_err(efct, "Received ABTS for Node:%p\n", node);
+ efct_node_recv_abts_frame(node, seq);
+ }
+
+ kref_put(&node->ref, node->release);
+ efct_hw_sequence_free(&efct->hw, seq);
+ return 0;
+}
+
+int
+efct_unsolicited_cb(void *arg, struct efc_hw_sequence *seq)
+{
+ struct efct *efct = arg;
+
+ /* Process FCP command */
+ if (!efct_dispatch_frame(efct, seq))
+ return 0;
+
+ /* Forward frame to discovery lib */
+ efc_dispatch_frame(efct->efcport, seq);
+ return 0;
+}
+
+static int
+efct_fc_tmf_rejected_cb(struct efct_io *io,
+ enum efct_scsi_io_status scsi_status,
+ u32 flags, void *arg)
+{
+ efct_scsi_io_free(io);
+ return 0;
+}
+
+static void
+efct_dispatch_unsol_tmf(struct efct_io *io, u8 tm_flags, u32 lun)
+{
+ u32 i;
+ struct {
+ u32 mask;
+ enum efct_scsi_tmf_cmd cmd;
+ } tmflist[] = {
+ {FCP_TMF_ABT_TASK_SET, EFCT_SCSI_TMF_ABORT_TASK_SET},
+ {FCP_TMF_CLR_TASK_SET, EFCT_SCSI_TMF_CLEAR_TASK_SET},
+ {FCP_TMF_LUN_RESET, EFCT_SCSI_TMF_LOGICAL_UNIT_RESET},
+ {FCP_TMF_TGT_RESET, EFCT_SCSI_TMF_TARGET_RESET},
+ {FCP_TMF_CLR_ACA, EFCT_SCSI_TMF_CLEAR_ACA} };
+
+ io->exp_xfer_len = 0;
+
+ for (i = 0; i < ARRAY_SIZE(tmflist); i++) {
+ if (tmflist[i].mask & tm_flags) {
+ io->tmf_cmd = tmflist[i].cmd;
+ efct_scsi_recv_tmf(io, lun, tmflist[i].cmd, NULL, 0);
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(tmflist)) {
+ /* Not handled */
+ efc_log_err(io->node->efct, "TMF x%x rejected\n", tm_flags);
+ efct_scsi_send_tmf_resp(io, EFCT_SCSI_TMF_FUNCTION_REJECTED,
+ NULL, efct_fc_tmf_rejected_cb, NULL);
+ }
+}
+
+static int
+efct_validate_fcp_cmd(struct efct *efct, struct efc_hw_sequence *seq)
+{
+ /*
+ * If we received less than FCP_CMND_IU bytes, assume that the frame is
+ * corrupted in some way and drop it.
+ * This was seen when jamming the FCTL
+ * fill bytes field.
+ */
+ if (seq->payload->dma.len < sizeof(struct fcp_cmnd)) {
+ struct fc_frame_header *fchdr = seq->header->dma.virt;
+
+ efc_log_debug(efct,
+ "drop ox_id %04x payload (%zd) less than (%zd)\n",
+ be16_to_cpu(fchdr->fh_ox_id),
+ seq->payload->dma.len, sizeof(struct fcp_cmnd));
+ return -EIO;
+ }
+ return 0;
+}
+
+static void
+efct_populate_io_fcp_cmd(struct efct_io *io, struct fcp_cmnd *cmnd,
+ struct fc_frame_header *fchdr, bool sit)
+{
+ io->init_task_tag = be16_to_cpu(fchdr->fh_ox_id);
+ /* note, tgt_task_tag, hw_tag set when HW io is allocated */
+ io->exp_xfer_len = be32_to_cpu(cmnd->fc_dl);
+ io->transferred = 0;
+
+ /* The upper 7 bits of CS_CTL is the frame priority thru the SAN.
+ * Our assertion here is, the priority given to a frame containing
+ * the FCP cmd should be the priority given to ALL frames contained
+ * in that IO. Thus we need to save the incoming CS_CTL here.
+ */
+ if (ntoh24(fchdr->fh_f_ctl) & FC_FC_RES_B17)
+ io->cs_ctl = fchdr->fh_cs_ctl;
+ else
+ io->cs_ctl = 0;
+
+ io->seq_init = sit;
+}
+
+static u32
+efct_get_flags_fcp_cmd(struct fcp_cmnd *cmnd)
+{
+ u32 flags = 0;
+
+ switch (cmnd->fc_pri_ta & FCP_PTA_MASK) {
+ case FCP_PTA_SIMPLE:
+ flags |= EFCT_SCSI_CMD_SIMPLE;
+ break;
+ case FCP_PTA_HEADQ:
+ flags |= EFCT_SCSI_CMD_HEAD_OF_QUEUE;
+ break;
+ case FCP_PTA_ORDERED:
+ flags |= EFCT_SCSI_CMD_ORDERED;
+ break;
+ case FCP_PTA_ACA:
+ flags |= EFCT_SCSI_CMD_ACA;
+ break;
+ }
+ if (cmnd->fc_flags & FCP_CFL_WRDATA)
+ flags |= EFCT_SCSI_CMD_DIR_IN;
+ if (cmnd->fc_flags & FCP_CFL_RDDATA)
+ flags |= EFCT_SCSI_CMD_DIR_OUT;
+
+ return flags;
+}
+
+static void
+efct_sframe_common_send_cb(void *arg, u8 *cqe, int status)
+{
+ struct efct_hw_send_frame_context *ctx = arg;
+ struct efct_hw *hw = ctx->hw;
+
+ /* Free WQ completion callback */
+ efct_hw_reqtag_free(hw, ctx->wqcb);
+
+ /* Free sequence */
+ efct_hw_sequence_free(hw, ctx->seq);
+}
+
+static int
+efct_sframe_common_send(struct efct_node *node,
+ struct efc_hw_sequence *seq,
+ enum fc_rctl r_ctl, u32 f_ctl,
+ u8 type, void *payload, u32 payload_len)
+{
+ struct efct *efct = node->efct;
+ struct efct_hw *hw = &efct->hw;
+ int rc = 0;
+ struct fc_frame_header *req_hdr = seq->header->dma.virt;
+ struct fc_frame_header hdr;
+ struct efct_hw_send_frame_context *ctx;
+
+ u32 heap_size = seq->payload->dma.size;
+ uintptr_t heap_phys_base = seq->payload->dma.phys;
+ u8 *heap_virt_base = seq->payload->dma.virt;
+ u32 heap_offset = 0;
+
+ /* Build the FC header reusing the RQ header DMA buffer */
+ memset(&hdr, 0, sizeof(hdr));
+ hdr.fh_r_ctl = r_ctl;
+ /* send it back to whomever sent it to us */
+ memcpy(hdr.fh_d_id, req_hdr->fh_s_id, sizeof(hdr.fh_d_id));
+ memcpy(hdr.fh_s_id, req_hdr->fh_d_id, sizeof(hdr.fh_s_id));
+ hdr.fh_type = type;
+ hton24(hdr.fh_f_ctl, f_ctl);
+ hdr.fh_ox_id = req_hdr->fh_ox_id;
+ hdr.fh_rx_id = req_hdr->fh_rx_id;
+ hdr.fh_cs_ctl = 0;
+ hdr.fh_df_ctl = 0;
+ hdr.fh_seq_cnt = 0;
+ hdr.fh_parm_offset = 0;
+
+ /*
+ * send_frame_seq_id is an atomic, we just let it increment,
+ * while storing only the low 8 bits to hdr->seq_id
+ */
+ hdr.fh_seq_id = (u8)atomic_add_return(1, &hw->send_frame_seq_id);
+ hdr.fh_seq_id--;
+
+ /* Allocate and fill in the send frame request context */
+ ctx = (void *)(heap_virt_base + heap_offset);
+ heap_offset += sizeof(*ctx);
+ if (heap_offset > heap_size) {
+ efc_log_err(efct, "Fill send frame failed offset %d size %d\n",
+ heap_offset, heap_size);
+ return -EIO;
+ }
+
+ memset(ctx, 0, sizeof(*ctx));
+
+ /* Save sequence */
+ ctx->seq = seq;
+
+ /* Allocate a response payload DMA buffer from the heap */
+ ctx->payload.phys = heap_phys_base + heap_offset;
+ ctx->payload.virt = heap_virt_base + heap_offset;
+ ctx->payload.size = payload_len;
+ ctx->payload.len = payload_len;
+ heap_offset += payload_len;
+ if (heap_offset > heap_size) {
+ efc_log_err(efct, "Fill send frame failed offset %d size %d\n",
+ heap_offset, heap_size);
+ return -EIO;
+ }
+
+ /* Copy the payload in */
+ memcpy(ctx->payload.virt, payload, payload_len);
+
+ /* Send */
+ rc = efct_hw_send_frame(&efct->hw, (void *)&hdr, FC_SOF_N3,
+ FC_EOF_T, &ctx->payload, ctx,
+ efct_sframe_common_send_cb, ctx);
+ if (rc)
+ efc_log_debug(efct, "efct_hw_send_frame failed: %d\n", rc);
+
+ return rc;
+}
+
+static int
+efct_sframe_send_fcp_rsp(struct efct_node *node, struct efc_hw_sequence *seq,
+ void *rsp, u32 rsp_len)
+{
+ return efct_sframe_common_send(node, seq, FC_RCTL_DD_CMD_STATUS,
+ FC_FC_EX_CTX |
+ FC_FC_LAST_SEQ |
+ FC_FC_END_SEQ |
+ FC_FC_SEQ_INIT,
+ FC_TYPE_FCP,
+ rsp, rsp_len);
+}
+
+static int
+efct_sframe_send_task_set_full_or_busy(struct efct_node *node,
+ struct efc_hw_sequence *seq)
+{
+ struct fcp_resp_with_ext fcprsp;
+ struct fcp_cmnd *fcpcmd = seq->payload->dma.virt;
+ int rc = 0;
+ unsigned long flags = 0;
+ struct efct *efct = node->efct;
+
+ /* construct task set full or busy response */
+ memset(&fcprsp, 0, sizeof(fcprsp));
+ spin_lock_irqsave(&node->active_ios_lock, flags);
+ fcprsp.resp.fr_status = list_empty(&node->active_ios) ?
+ SAM_STAT_BUSY : SAM_STAT_TASK_SET_FULL;
+ spin_unlock_irqrestore(&node->active_ios_lock, flags);
+ *((u32 *)&fcprsp.ext.fr_resid) = be32_to_cpu(fcpcmd->fc_dl);
+
+ /* send it using send_frame */
+ rc = efct_sframe_send_fcp_rsp(node, seq, &fcprsp, sizeof(fcprsp));
+ if (rc)
+ efc_log_debug(efct, "efct_sframe_send_fcp_rsp failed %d\n", rc);
+
+ return rc;
+}
+
+int
+efct_dispatch_fcp_cmd(struct efct_node *node, struct efc_hw_sequence *seq)
+{
+ struct efct *efct = node->efct;
+ struct fc_frame_header *fchdr = seq->header->dma.virt;
+ struct fcp_cmnd *cmnd = NULL;
+ struct efct_io *io = NULL;
+ u32 lun;
+
+ if (!seq->payload) {
+ efc_log_err(efct, "Sequence payload is NULL.\n");
+ return -EIO;
+ }
+
+ cmnd = seq->payload->dma.virt;
+
+ /* perform FCP_CMND validation check(s) */
+ if (efct_validate_fcp_cmd(efct, seq))
+ return -EIO;
+
+ lun = scsilun_to_int(&cmnd->fc_lun);
+ if (lun == U32_MAX)
+ return -EIO;
+
+ io = efct_scsi_io_alloc(node);
+ if (!io) {
+ int rc;
+
+ /* Use SEND_FRAME to send task set full or busy */
+ rc = efct_sframe_send_task_set_full_or_busy(node, seq);
+ if (rc)
+ efc_log_err(efct, "Failed to send busy task: %d\n", rc);
+
+ return rc;
+ }
+
+ io->hw_priv = seq->hw_priv;
+
+ io->app_id = 0;
+
+ /* RQ pair, if we got here, SIT=1 */
+ efct_populate_io_fcp_cmd(io, cmnd, fchdr, true);
+
+ if (cmnd->fc_tm_flags) {
+ efct_dispatch_unsol_tmf(io, cmnd->fc_tm_flags, lun);
+ } else {
+ u32 flags = efct_get_flags_fcp_cmd(cmnd);
+
+ if (cmnd->fc_flags & FCP_CFL_LEN_MASK) {
+ efc_log_err(efct, "Additional CDB not supported\n");
+ return -EIO;
+ }
+ /*
+ * Can return failure for things like task set full and UAs,
+ * no need to treat as a dropped frame if rc != 0
+ */
+ efct_scsi_recv_cmd(io, lun, cmnd->fc_cdb,
+ sizeof(cmnd->fc_cdb), flags);
+ }
+
+ return 0;
+}
+
+static int
+efct_process_abts(struct efct_io *io, struct fc_frame_header *hdr)
+{
+ struct efct_node *node = io->node;
+ struct efct *efct = io->efct;
+ u16 ox_id = be16_to_cpu(hdr->fh_ox_id);
+ u16 rx_id = be16_to_cpu(hdr->fh_rx_id);
+ struct efct_io *abortio;
+
+ /* Find IO and attempt to take a reference on it */
+ abortio = efct_io_find_tgt_io(efct, node, ox_id, rx_id);
+
+ if (abortio) {
+ /* Got a reference on the IO. Hold it until backend
+ * is notified below
+ */
+ efc_log_info(node->efct, "Abort ox_id [%04x] rx_id [%04x]\n",
+ ox_id, rx_id);
+
+ /*
+ * Save the ox_id for the ABTS as the init_task_tag in our
+ * manufactured
+ * TMF IO object
+ */
+ io->display_name = "abts";
+ io->init_task_tag = ox_id;
+ /* don't set tgt_task_tag, don't want to confuse with XRI */
+
+ /*
+ * Save the rx_id from the ABTS as it is
+ * needed for the BLS response,
+ * regardless of the IO context's rx_id
+ */
+ io->abort_rx_id = rx_id;
+
+ /* Call target server command abort */
+ io->tmf_cmd = EFCT_SCSI_TMF_ABORT_TASK;
+ efct_scsi_recv_tmf(io, abortio->tgt_io.lun,
+ EFCT_SCSI_TMF_ABORT_TASK, abortio, 0);
+
+ /*
+ * Backend will have taken an additional
+ * reference on the IO if needed;
+ * done with current reference.
+ */
+ kref_put(&abortio->ref, abortio->release);
+ } else {
+ /*
+ * Either IO was not found or it has been
+ * freed between finding it
+ * and attempting to get the reference,
+ */
+ efc_log_info(node->efct, "Abort: ox_id [%04x], IO not found\n",
+ ox_id);
+
+ /* Send a BA_RJT */
+ efct_bls_send_rjt(io, hdr);
+ }
+ return 0;
+}
+
+int
+efct_node_recv_abts_frame(struct efct_node *node, struct efc_hw_sequence *seq)
+{
+ struct efct *efct = node->efct;
+ struct fc_frame_header *hdr = seq->header->dma.virt;
+ struct efct_io *io = NULL;
+
+ node->abort_cnt++;
+ io = efct_scsi_io_alloc(node);
+ if (io) {
+ io->hw_priv = seq->hw_priv;
+ /* If we got this far, SIT=1 */
+ io->seq_init = 1;
+
+ /* fill out generic fields */
+ io->efct = efct;
+ io->node = node;
+ io->cmd_tgt = true;
+
+ efct_process_abts(io, seq->header->dma.virt);
+ } else {
+ efc_log_err(efct,
+ "SCSI IO allocation failed for ABTS received ");
+ efc_log_err(efct, "s_id %06x d_id %06x ox_id %04x rx_id %04x\n",
+ ntoh24(hdr->fh_s_id), ntoh24(hdr->fh_d_id),
+ be16_to_cpu(hdr->fh_ox_id),
+ be16_to_cpu(hdr->fh_rx_id));
+ }
+
+ return 0;
+}
diff --git a/drivers/scsi/elx/efct/efct_unsol.h b/drivers/scsi/elx/efct/efct_unsol.h
new file mode 100644
index 000000000..16d1e3ba1
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_unsol.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#if !defined(__OSC_UNSOL_H__)
+#define __OSC_UNSOL_H__
+
+int
+efct_unsolicited_cb(void *arg, struct efc_hw_sequence *seq);
+int
+efct_dispatch_fcp_cmd(struct efct_node *node, struct efc_hw_sequence *seq);
+int
+efct_node_recv_abts_frame(struct efct_node *node, struct efc_hw_sequence *seq);
+
+#endif /* __OSC_UNSOL_H__ */
diff --git a/drivers/scsi/elx/efct/efct_xport.c b/drivers/scsi/elx/efct/efct_xport.c
new file mode 100644
index 000000000..9495cedcc
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_xport.c
@@ -0,0 +1,1111 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include "efct_driver.h"
+#include "efct_unsol.h"
+
+static struct dentry *efct_debugfs_root;
+static atomic_t efct_debugfs_count;
+
+static struct scsi_host_template efct_template = {
+ .module = THIS_MODULE,
+ .name = EFCT_DRIVER_NAME,
+ .supported_mode = MODE_TARGET,
+};
+
+/* globals */
+static struct fc_function_template efct_xport_functions;
+static struct fc_function_template efct_vport_functions;
+
+static struct scsi_transport_template *efct_xport_fc_tt;
+static struct scsi_transport_template *efct_vport_fc_tt;
+
+struct efct_xport *
+efct_xport_alloc(struct efct *efct)
+{
+ struct efct_xport *xport;
+
+ xport = kzalloc(sizeof(*xport), GFP_KERNEL);
+ if (!xport)
+ return xport;
+
+ xport->efct = efct;
+ return xport;
+}
+
+static int
+efct_xport_init_debugfs(struct efct *efct)
+{
+ /* Setup efct debugfs root directory */
+ if (!efct_debugfs_root) {
+ efct_debugfs_root = debugfs_create_dir("efct", NULL);
+ atomic_set(&efct_debugfs_count, 0);
+ }
+
+ /* Create a directory for sessions in root */
+ if (!efct->sess_debugfs_dir) {
+ efct->sess_debugfs_dir = debugfs_create_dir("sessions",
+ efct_debugfs_root);
+ if (IS_ERR(efct->sess_debugfs_dir)) {
+ efc_log_err(efct,
+ "failed to create debugfs entry for sessions\n");
+ goto debugfs_fail;
+ }
+ atomic_inc(&efct_debugfs_count);
+ }
+
+ return 0;
+
+debugfs_fail:
+ return -EIO;
+}
+
+static void efct_xport_delete_debugfs(struct efct *efct)
+{
+ /* Remove session debugfs directory */
+ debugfs_remove(efct->sess_debugfs_dir);
+ efct->sess_debugfs_dir = NULL;
+ atomic_dec(&efct_debugfs_count);
+
+ if (atomic_read(&efct_debugfs_count) == 0) {
+ /* remove root debugfs directory */
+ debugfs_remove(efct_debugfs_root);
+ efct_debugfs_root = NULL;
+ }
+}
+
+int
+efct_xport_attach(struct efct_xport *xport)
+{
+ struct efct *efct = xport->efct;
+ int rc;
+
+ rc = efct_hw_setup(&efct->hw, efct, efct->pci);
+ if (rc) {
+ efc_log_err(efct, "%s: Can't setup hardware\n", efct->desc);
+ return rc;
+ }
+
+ efct_hw_parse_filter(&efct->hw, (void *)efct->filter_def);
+
+ xport->io_pool = efct_io_pool_create(efct, efct->hw.config.n_sgl);
+ if (!xport->io_pool) {
+ efc_log_err(efct, "Can't allocate IO pool\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void
+efct_xport_link_stats_cb(int status, u32 num_counters,
+ struct efct_hw_link_stat_counts *counters, void *arg)
+{
+ union efct_xport_stats_u *result = arg;
+
+ result->stats.link_stats.link_failure_error_count =
+ counters[EFCT_HW_LINK_STAT_LINK_FAILURE_COUNT].counter;
+ result->stats.link_stats.loss_of_sync_error_count =
+ counters[EFCT_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter;
+ result->stats.link_stats.primitive_sequence_error_count =
+ counters[EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter;
+ result->stats.link_stats.invalid_transmission_word_error_count =
+ counters[EFCT_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter;
+ result->stats.link_stats.crc_error_count =
+ counters[EFCT_HW_LINK_STAT_CRC_COUNT].counter;
+
+ complete(&result->stats.done);
+}
+
+static void
+efct_xport_host_stats_cb(int status, u32 num_counters,
+ struct efct_hw_host_stat_counts *counters, void *arg)
+{
+ union efct_xport_stats_u *result = arg;
+
+ result->stats.host_stats.transmit_kbyte_count =
+ counters[EFCT_HW_HOST_STAT_TX_KBYTE_COUNT].counter;
+ result->stats.host_stats.receive_kbyte_count =
+ counters[EFCT_HW_HOST_STAT_RX_KBYTE_COUNT].counter;
+ result->stats.host_stats.transmit_frame_count =
+ counters[EFCT_HW_HOST_STAT_TX_FRAME_COUNT].counter;
+ result->stats.host_stats.receive_frame_count =
+ counters[EFCT_HW_HOST_STAT_RX_FRAME_COUNT].counter;
+
+ complete(&result->stats.done);
+}
+
+static void
+efct_xport_async_link_stats_cb(int status, u32 num_counters,
+ struct efct_hw_link_stat_counts *counters,
+ void *arg)
+{
+ union efct_xport_stats_u *result = arg;
+
+ result->stats.link_stats.link_failure_error_count =
+ counters[EFCT_HW_LINK_STAT_LINK_FAILURE_COUNT].counter;
+ result->stats.link_stats.loss_of_sync_error_count =
+ counters[EFCT_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter;
+ result->stats.link_stats.primitive_sequence_error_count =
+ counters[EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter;
+ result->stats.link_stats.invalid_transmission_word_error_count =
+ counters[EFCT_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter;
+ result->stats.link_stats.crc_error_count =
+ counters[EFCT_HW_LINK_STAT_CRC_COUNT].counter;
+}
+
+static void
+efct_xport_async_host_stats_cb(int status, u32 num_counters,
+ struct efct_hw_host_stat_counts *counters,
+ void *arg)
+{
+ union efct_xport_stats_u *result = arg;
+
+ result->stats.host_stats.transmit_kbyte_count =
+ counters[EFCT_HW_HOST_STAT_TX_KBYTE_COUNT].counter;
+ result->stats.host_stats.receive_kbyte_count =
+ counters[EFCT_HW_HOST_STAT_RX_KBYTE_COUNT].counter;
+ result->stats.host_stats.transmit_frame_count =
+ counters[EFCT_HW_HOST_STAT_TX_FRAME_COUNT].counter;
+ result->stats.host_stats.receive_frame_count =
+ counters[EFCT_HW_HOST_STAT_RX_FRAME_COUNT].counter;
+}
+
+static void
+efct_xport_config_stats_timer(struct efct *efct);
+
+static void
+efct_xport_stats_timer_cb(struct timer_list *t)
+{
+ struct efct_xport *xport = from_timer(xport, t, stats_timer);
+ struct efct *efct = xport->efct;
+
+ efct_xport_config_stats_timer(efct);
+}
+
+static void
+efct_xport_config_stats_timer(struct efct *efct)
+{
+ u32 timeout = 3 * 1000;
+ struct efct_xport *xport = NULL;
+
+ if (!efct) {
+ pr_err("%s: failed to locate EFCT device\n", __func__);
+ return;
+ }
+
+ xport = efct->xport;
+ efct_hw_get_link_stats(&efct->hw, 0, 0, 0,
+ efct_xport_async_link_stats_cb,
+ &xport->fc_xport_stats);
+ efct_hw_get_host_stats(&efct->hw, 0, efct_xport_async_host_stats_cb,
+ &xport->fc_xport_stats);
+
+ timer_setup(&xport->stats_timer,
+ &efct_xport_stats_timer_cb, 0);
+ mod_timer(&xport->stats_timer,
+ jiffies + msecs_to_jiffies(timeout));
+}
+
+int
+efct_xport_initialize(struct efct_xport *xport)
+{
+ struct efct *efct = xport->efct;
+ int rc = 0;
+
+ /* Initialize io lists */
+ spin_lock_init(&xport->io_pending_lock);
+ INIT_LIST_HEAD(&xport->io_pending_list);
+ atomic_set(&xport->io_active_count, 0);
+ atomic_set(&xport->io_pending_count, 0);
+ atomic_set(&xport->io_total_free, 0);
+ atomic_set(&xport->io_total_pending, 0);
+ atomic_set(&xport->io_alloc_failed_count, 0);
+ atomic_set(&xport->io_pending_recursing, 0);
+
+ rc = efct_hw_init(&efct->hw);
+ if (rc) {
+ efc_log_err(efct, "efct_hw_init failure\n");
+ goto out;
+ }
+
+ rc = efct_scsi_tgt_new_device(efct);
+ if (rc) {
+ efc_log_err(efct, "failed to initialize target\n");
+ goto hw_init_out;
+ }
+
+ rc = efct_scsi_new_device(efct);
+ if (rc) {
+ efc_log_err(efct, "failed to initialize initiator\n");
+ goto tgt_dev_out;
+ }
+
+ /* Get FC link and host statistics perodically*/
+ efct_xport_config_stats_timer(efct);
+
+ efct_xport_init_debugfs(efct);
+
+ return rc;
+
+tgt_dev_out:
+ efct_scsi_tgt_del_device(efct);
+
+hw_init_out:
+ efct_hw_teardown(&efct->hw);
+out:
+ return rc;
+}
+
+int
+efct_xport_status(struct efct_xport *xport, enum efct_xport_status cmd,
+ union efct_xport_stats_u *result)
+{
+ int rc = 0;
+ struct efct *efct = NULL;
+ union efct_xport_stats_u value;
+
+ efct = xport->efct;
+
+ switch (cmd) {
+ case EFCT_XPORT_CONFIG_PORT_STATUS:
+ if (xport->configured_link_state == 0) {
+ /*
+ * Initial state is offline. configured_link_state is
+ * set to online explicitly when port is brought online
+ */
+ xport->configured_link_state = EFCT_XPORT_PORT_OFFLINE;
+ }
+ result->value = xport->configured_link_state;
+ break;
+
+ case EFCT_XPORT_PORT_STATUS:
+ /* Determine port status based on link speed. */
+ value.value = efct_hw_get_link_speed(&efct->hw);
+ if (value.value == 0)
+ result->value = EFCT_XPORT_PORT_OFFLINE;
+ else
+ result->value = EFCT_XPORT_PORT_ONLINE;
+ break;
+
+ case EFCT_XPORT_LINK_SPEED:
+ result->value = efct_hw_get_link_speed(&efct->hw);
+ break;
+
+ case EFCT_XPORT_LINK_STATISTICS:
+ memcpy((void *)result, &efct->xport->fc_xport_stats,
+ sizeof(union efct_xport_stats_u));
+ break;
+ case EFCT_XPORT_LINK_STAT_RESET: {
+ /* Create a completion to synchronize the stat reset process */
+ init_completion(&result->stats.done);
+
+ /* First reset the link stats */
+ rc = efct_hw_get_link_stats(&efct->hw, 0, 1, 1,
+ efct_xport_link_stats_cb, result);
+ if (rc)
+ break;
+
+ /* Wait for completion to be signaled when the cmd completes */
+ if (wait_for_completion_interruptible(&result->stats.done)) {
+ /* Undefined failure */
+ efc_log_debug(efct, "sem wait failed\n");
+ rc = -EIO;
+ break;
+ }
+
+ /* Next reset the host stats */
+ rc = efct_hw_get_host_stats(&efct->hw, 1,
+ efct_xport_host_stats_cb, result);
+
+ if (rc)
+ break;
+
+ /* Wait for completion to be signaled when the cmd completes */
+ if (wait_for_completion_interruptible(&result->stats.done)) {
+ /* Undefined failure */
+ efc_log_debug(efct, "sem wait failed\n");
+ rc = -EIO;
+ break;
+ }
+ break;
+ }
+ default:
+ rc = -EIO;
+ break;
+ }
+
+ return rc;
+}
+
+static int
+efct_get_link_supported_speeds(struct efct *efct)
+{
+ u32 supported_speeds = 0;
+ u32 link_module_type, i;
+ struct {
+ u32 lmt_speed;
+ u32 speed;
+ } supported_speed_list[] = {
+ {SLI4_LINK_MODULE_TYPE_1GB, FC_PORTSPEED_1GBIT},
+ {SLI4_LINK_MODULE_TYPE_2GB, FC_PORTSPEED_2GBIT},
+ {SLI4_LINK_MODULE_TYPE_4GB, FC_PORTSPEED_4GBIT},
+ {SLI4_LINK_MODULE_TYPE_8GB, FC_PORTSPEED_8GBIT},
+ {SLI4_LINK_MODULE_TYPE_16GB, FC_PORTSPEED_16GBIT},
+ {SLI4_LINK_MODULE_TYPE_32GB, FC_PORTSPEED_32GBIT},
+ {SLI4_LINK_MODULE_TYPE_64GB, FC_PORTSPEED_64GBIT},
+ {SLI4_LINK_MODULE_TYPE_128GB, FC_PORTSPEED_128GBIT},
+ };
+
+ link_module_type = sli_get_lmt(&efct->hw.sli);
+
+ /* populate link supported speeds */
+ for (i = 0; i < ARRAY_SIZE(supported_speed_list); i++) {
+ if (link_module_type & supported_speed_list[i].lmt_speed)
+ supported_speeds |= supported_speed_list[i].speed;
+ }
+
+ return supported_speeds;
+}
+
+int
+efct_scsi_new_device(struct efct *efct)
+{
+ struct Scsi_Host *shost = NULL;
+ int error = 0;
+ struct efct_vport *vport = NULL;
+
+ shost = scsi_host_alloc(&efct_template, sizeof(*vport));
+ if (!shost) {
+ efc_log_err(efct, "failed to allocate Scsi_Host struct\n");
+ return -ENOMEM;
+ }
+
+ /* save shost to initiator-client context */
+ efct->shost = shost;
+
+ /* save efct information to shost LLD-specific space */
+ vport = (struct efct_vport *)shost->hostdata;
+ vport->efct = efct;
+
+ /*
+ * Set initial can_queue value to the max SCSI IOs. This is the maximum
+ * global queue depth (as opposed to the per-LUN queue depth --
+ * .cmd_per_lun This may need to be adjusted for I+T mode.
+ */
+ shost->can_queue = efct->hw.config.n_io;
+ shost->max_cmd_len = 16; /* 16-byte CDBs */
+ shost->max_id = 0xffff;
+ shost->max_lun = 0xffffffff;
+
+ /*
+ * can only accept (from mid-layer) as many SGEs as we've
+ * pre-registered
+ */
+ shost->sg_tablesize = sli_get_max_sgl(&efct->hw.sli);
+
+ /* attach FC Transport template to shost */
+ shost->transportt = efct_xport_fc_tt;
+ efc_log_debug(efct, "transport template=%p\n", efct_xport_fc_tt);
+
+ /* get pci_dev structure and add host to SCSI ML */
+ error = scsi_add_host_with_dma(shost, &efct->pci->dev,
+ &efct->pci->dev);
+ if (error) {
+ efc_log_debug(efct, "failed scsi_add_host_with_dma\n");
+ return -EIO;
+ }
+
+ /* Set symbolic name for host port */
+ snprintf(fc_host_symbolic_name(shost),
+ sizeof(fc_host_symbolic_name(shost)),
+ "Emulex %s FV%s DV%s", efct->model,
+ efct->hw.sli.fw_name[0], EFCT_DRIVER_VERSION);
+
+ /* Set host port supported classes */
+ fc_host_supported_classes(shost) = FC_COS_CLASS3;
+
+ fc_host_supported_speeds(shost) = efct_get_link_supported_speeds(efct);
+
+ fc_host_node_name(shost) = efct_get_wwnn(&efct->hw);
+ fc_host_port_name(shost) = efct_get_wwpn(&efct->hw);
+ fc_host_max_npiv_vports(shost) = 128;
+
+ return 0;
+}
+
+struct scsi_transport_template *
+efct_attach_fc_transport(void)
+{
+ struct scsi_transport_template *efct_fc_template = NULL;
+
+ efct_fc_template = fc_attach_transport(&efct_xport_functions);
+
+ if (!efct_fc_template)
+ pr_err("failed to attach EFCT with fc transport\n");
+
+ return efct_fc_template;
+}
+
+struct scsi_transport_template *
+efct_attach_vport_fc_transport(void)
+{
+ struct scsi_transport_template *efct_fc_template = NULL;
+
+ efct_fc_template = fc_attach_transport(&efct_vport_functions);
+
+ if (!efct_fc_template)
+ pr_err("failed to attach EFCT with fc transport\n");
+
+ return efct_fc_template;
+}
+
+int
+efct_scsi_reg_fc_transport(void)
+{
+ /* attach to appropriate scsi_tranport_* module */
+ efct_xport_fc_tt = efct_attach_fc_transport();
+ if (!efct_xport_fc_tt) {
+ pr_err("%s: failed to attach to scsi_transport_*", __func__);
+ return -EIO;
+ }
+
+ efct_vport_fc_tt = efct_attach_vport_fc_transport();
+ if (!efct_vport_fc_tt) {
+ pr_err("%s: failed to attach to scsi_transport_*", __func__);
+ efct_release_fc_transport(efct_xport_fc_tt);
+ efct_xport_fc_tt = NULL;
+ return -EIO;
+ }
+
+ return 0;
+}
+
+void
+efct_scsi_release_fc_transport(void)
+{
+ /* detach from scsi_transport_* */
+ efct_release_fc_transport(efct_xport_fc_tt);
+ efct_xport_fc_tt = NULL;
+ if (efct_vport_fc_tt)
+ efct_release_fc_transport(efct_vport_fc_tt);
+
+ efct_vport_fc_tt = NULL;
+}
+
+void
+efct_xport_detach(struct efct_xport *xport)
+{
+ struct efct *efct = xport->efct;
+
+ /* free resources associated with target-server and initiator-client */
+ efct_scsi_tgt_del_device(efct);
+
+ efct_scsi_del_device(efct);
+
+ /*Shutdown FC Statistics timer*/
+ if (timer_pending(&xport->stats_timer))
+ del_timer(&xport->stats_timer);
+
+ efct_hw_teardown(&efct->hw);
+
+ efct_xport_delete_debugfs(efct);
+}
+
+static void
+efct_xport_domain_free_cb(struct efc *efc, void *arg)
+{
+ struct completion *done = arg;
+
+ complete(done);
+}
+
+int
+efct_xport_control(struct efct_xport *xport, enum efct_xport_ctrl cmd, ...)
+{
+ u32 rc = 0;
+ struct efct *efct = NULL;
+ va_list argp;
+
+ efct = xport->efct;
+
+ switch (cmd) {
+ case EFCT_XPORT_PORT_ONLINE: {
+ /* Bring the port on-line */
+ rc = efct_hw_port_control(&efct->hw, EFCT_HW_PORT_INIT, 0,
+ NULL, NULL);
+ if (rc)
+ efc_log_err(efct,
+ "%s: Can't init port\n", efct->desc);
+ else
+ xport->configured_link_state = cmd;
+ break;
+ }
+ case EFCT_XPORT_PORT_OFFLINE: {
+ if (efct_hw_port_control(&efct->hw, EFCT_HW_PORT_SHUTDOWN, 0,
+ NULL, NULL))
+ efc_log_err(efct, "port shutdown failed\n");
+ else
+ xport->configured_link_state = cmd;
+ break;
+ }
+
+ case EFCT_XPORT_SHUTDOWN: {
+ struct completion done;
+ unsigned long timeout;
+
+ /* if a PHYSDEV reset was performed (e.g. hw dump), will affect
+ * all PCI functions; orderly shutdown won't work,
+ * just force free
+ */
+ if (sli_reset_required(&efct->hw.sli)) {
+ struct efc_domain *domain = efct->efcport->domain;
+
+ if (domain)
+ efc_domain_cb(efct->efcport, EFC_HW_DOMAIN_LOST,
+ domain);
+ } else {
+ efct_hw_port_control(&efct->hw, EFCT_HW_PORT_SHUTDOWN,
+ 0, NULL, NULL);
+ }
+
+ init_completion(&done);
+
+ efc_register_domain_free_cb(efct->efcport,
+ efct_xport_domain_free_cb, &done);
+
+ efc_log_debug(efct, "Waiting %d seconds for domain shutdown\n",
+ (EFC_SHUTDOWN_TIMEOUT_USEC / 1000000));
+
+ timeout = usecs_to_jiffies(EFC_SHUTDOWN_TIMEOUT_USEC);
+ if (!wait_for_completion_timeout(&done, timeout)) {
+ efc_log_err(efct, "Domain shutdown timed out!!\n");
+ WARN_ON(1);
+ }
+
+ efc_register_domain_free_cb(efct->efcport, NULL, NULL);
+
+ /* Free up any saved virtual ports */
+ efc_vport_del_all(efct->efcport);
+ break;
+ }
+
+ /*
+ * Set wwnn for the port. This will be used instead of the default
+ * provided by FW.
+ */
+ case EFCT_XPORT_WWNN_SET: {
+ u64 wwnn;
+
+ /* Retrieve arguments */
+ va_start(argp, cmd);
+ wwnn = va_arg(argp, uint64_t);
+ va_end(argp);
+
+ efc_log_debug(efct, " WWNN %016llx\n", wwnn);
+ xport->req_wwnn = wwnn;
+
+ break;
+ }
+ /*
+ * Set wwpn for the port. This will be used instead of the default
+ * provided by FW.
+ */
+ case EFCT_XPORT_WWPN_SET: {
+ u64 wwpn;
+
+ /* Retrieve arguments */
+ va_start(argp, cmd);
+ wwpn = va_arg(argp, uint64_t);
+ va_end(argp);
+
+ efc_log_debug(efct, " WWPN %016llx\n", wwpn);
+ xport->req_wwpn = wwpn;
+
+ break;
+ }
+
+ default:
+ break;
+ }
+ return rc;
+}
+
+void
+efct_xport_free(struct efct_xport *xport)
+{
+ if (xport) {
+ efct_io_pool_free(xport->io_pool);
+
+ kfree(xport);
+ }
+}
+
+void
+efct_release_fc_transport(struct scsi_transport_template *transport_template)
+{
+ if (transport_template)
+ pr_err("releasing transport layer\n");
+
+ /* Releasing FC transport */
+ fc_release_transport(transport_template);
+}
+
+static void
+efct_xport_remove_host(struct Scsi_Host *shost)
+{
+ fc_remove_host(shost);
+}
+
+void
+efct_scsi_del_device(struct efct *efct)
+{
+ if (!efct->shost)
+ return;
+
+ efc_log_debug(efct, "Unregistering with Transport Layer\n");
+ efct_xport_remove_host(efct->shost);
+ efc_log_debug(efct, "Unregistering with SCSI Midlayer\n");
+ scsi_remove_host(efct->shost);
+ scsi_host_put(efct->shost);
+ efct->shost = NULL;
+}
+
+static void
+efct_get_host_port_id(struct Scsi_Host *shost)
+{
+ struct efct_vport *vport = (struct efct_vport *)shost->hostdata;
+ struct efct *efct = vport->efct;
+ struct efc *efc = efct->efcport;
+ struct efc_nport *nport;
+
+ if (efc->domain && efc->domain->nport) {
+ nport = efc->domain->nport;
+ fc_host_port_id(shost) = nport->fc_id;
+ }
+}
+
+static void
+efct_get_host_port_type(struct Scsi_Host *shost)
+{
+ struct efct_vport *vport = (struct efct_vport *)shost->hostdata;
+ struct efct *efct = vport->efct;
+ struct efc *efc = efct->efcport;
+ int type = FC_PORTTYPE_UNKNOWN;
+
+ if (efc->domain && efc->domain->nport) {
+ if (efc->domain->is_loop) {
+ type = FC_PORTTYPE_LPORT;
+ } else {
+ struct efc_nport *nport = efc->domain->nport;
+
+ if (nport->is_vport)
+ type = FC_PORTTYPE_NPIV;
+ else if (nport->topology == EFC_NPORT_TOPO_P2P)
+ type = FC_PORTTYPE_PTP;
+ else if (nport->topology == EFC_NPORT_TOPO_UNKNOWN)
+ type = FC_PORTTYPE_UNKNOWN;
+ else
+ type = FC_PORTTYPE_NPORT;
+ }
+ }
+ fc_host_port_type(shost) = type;
+}
+
+static void
+efct_get_host_vport_type(struct Scsi_Host *shost)
+{
+ fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
+}
+
+static void
+efct_get_host_port_state(struct Scsi_Host *shost)
+{
+ struct efct_vport *vport = (struct efct_vport *)shost->hostdata;
+ struct efct *efct = vport->efct;
+ union efct_xport_stats_u status;
+ int rc;
+
+ rc = efct_xport_status(efct->xport, EFCT_XPORT_PORT_STATUS, &status);
+ if ((!rc) && (status.value == EFCT_XPORT_PORT_ONLINE))
+ fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+ else
+ fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
+}
+
+static void
+efct_get_host_speed(struct Scsi_Host *shost)
+{
+ struct efct_vport *vport = (struct efct_vport *)shost->hostdata;
+ struct efct *efct = vport->efct;
+ struct efc *efc = efct->efcport;
+ union efct_xport_stats_u speed;
+ u32 fc_speed = FC_PORTSPEED_UNKNOWN;
+ int rc;
+
+ if (!efc->domain || !efc->domain->nport) {
+ fc_host_speed(shost) = fc_speed;
+ return;
+ }
+
+ rc = efct_xport_status(efct->xport, EFCT_XPORT_LINK_SPEED, &speed);
+ if (!rc) {
+ switch (speed.value) {
+ case 1000:
+ fc_speed = FC_PORTSPEED_1GBIT;
+ break;
+ case 2000:
+ fc_speed = FC_PORTSPEED_2GBIT;
+ break;
+ case 4000:
+ fc_speed = FC_PORTSPEED_4GBIT;
+ break;
+ case 8000:
+ fc_speed = FC_PORTSPEED_8GBIT;
+ break;
+ case 10000:
+ fc_speed = FC_PORTSPEED_10GBIT;
+ break;
+ case 16000:
+ fc_speed = FC_PORTSPEED_16GBIT;
+ break;
+ case 32000:
+ fc_speed = FC_PORTSPEED_32GBIT;
+ break;
+ case 64000:
+ fc_speed = FC_PORTSPEED_64GBIT;
+ break;
+ case 128000:
+ fc_speed = FC_PORTSPEED_128GBIT;
+ break;
+ }
+ }
+
+ fc_host_speed(shost) = fc_speed;
+}
+
+static void
+efct_get_host_fabric_name(struct Scsi_Host *shost)
+{
+ struct efct_vport *vport = (struct efct_vport *)shost->hostdata;
+ struct efct *efct = vport->efct;
+ struct efc *efc = efct->efcport;
+
+ if (efc->domain) {
+ struct fc_els_flogi *sp =
+ (struct fc_els_flogi *)
+ efc->domain->flogi_service_params;
+
+ fc_host_fabric_name(shost) = be64_to_cpu(sp->fl_wwnn);
+ }
+}
+
+static struct fc_host_statistics *
+efct_get_stats(struct Scsi_Host *shost)
+{
+ struct efct_vport *vport = (struct efct_vport *)shost->hostdata;
+ struct efct *efct = vport->efct;
+ union efct_xport_stats_u stats;
+ struct efct_xport *xport = efct->xport;
+ int rc = 0;
+
+ rc = efct_xport_status(xport, EFCT_XPORT_LINK_STATISTICS, &stats);
+ if (rc) {
+ pr_err("efct_xport_status returned non 0 - %d\n", rc);
+ return NULL;
+ }
+
+ vport->fc_host_stats.loss_of_sync_count =
+ stats.stats.link_stats.loss_of_sync_error_count;
+ vport->fc_host_stats.link_failure_count =
+ stats.stats.link_stats.link_failure_error_count;
+ vport->fc_host_stats.prim_seq_protocol_err_count =
+ stats.stats.link_stats.primitive_sequence_error_count;
+ vport->fc_host_stats.invalid_tx_word_count =
+ stats.stats.link_stats.invalid_transmission_word_error_count;
+ vport->fc_host_stats.invalid_crc_count =
+ stats.stats.link_stats.crc_error_count;
+ /* mbox returns kbyte count so we need to convert to words */
+ vport->fc_host_stats.tx_words =
+ stats.stats.host_stats.transmit_kbyte_count * 256;
+ /* mbox returns kbyte count so we need to convert to words */
+ vport->fc_host_stats.rx_words =
+ stats.stats.host_stats.receive_kbyte_count * 256;
+ vport->fc_host_stats.tx_frames =
+ stats.stats.host_stats.transmit_frame_count;
+ vport->fc_host_stats.rx_frames =
+ stats.stats.host_stats.receive_frame_count;
+
+ vport->fc_host_stats.fcp_input_requests =
+ xport->fcp_stats.input_requests;
+ vport->fc_host_stats.fcp_output_requests =
+ xport->fcp_stats.output_requests;
+ vport->fc_host_stats.fcp_output_megabytes =
+ xport->fcp_stats.output_bytes >> 20;
+ vport->fc_host_stats.fcp_input_megabytes =
+ xport->fcp_stats.input_bytes >> 20;
+ vport->fc_host_stats.fcp_control_requests =
+ xport->fcp_stats.control_requests;
+
+ return &vport->fc_host_stats;
+}
+
+static void
+efct_reset_stats(struct Scsi_Host *shost)
+{
+ struct efct_vport *vport = (struct efct_vport *)shost->hostdata;
+ struct efct *efct = vport->efct;
+ /* argument has no purpose for this action */
+ union efct_xport_stats_u dummy;
+ int rc;
+
+ rc = efct_xport_status(efct->xport, EFCT_XPORT_LINK_STAT_RESET, &dummy);
+ if (rc)
+ pr_err("efct_xport_status returned non 0 - %d\n", rc);
+}
+
+static int
+efct_issue_lip(struct Scsi_Host *shost)
+{
+ struct efct_vport *vport =
+ shost ? (struct efct_vport *)shost->hostdata : NULL;
+ struct efct *efct = vport ? vport->efct : NULL;
+
+ if (!shost || !vport || !efct) {
+ pr_err("%s: shost=%p vport=%p efct=%p\n", __func__,
+ shost, vport, efct);
+ return -EPERM;
+ }
+
+ /*
+ * Bring the link down gracefully then re-init the link.
+ * The firmware will re-initialize the Fibre Channel interface as
+ * required. It does not issue a LIP.
+ */
+
+ if (efct_xport_control(efct->xport, EFCT_XPORT_PORT_OFFLINE))
+ efc_log_debug(efct, "EFCT_XPORT_PORT_OFFLINE failed\n");
+
+ if (efct_xport_control(efct->xport, EFCT_XPORT_PORT_ONLINE))
+ efc_log_debug(efct, "EFCT_XPORT_PORT_ONLINE failed\n");
+
+ return 0;
+}
+
+struct efct_vport *
+efct_scsi_new_vport(struct efct *efct, struct device *dev)
+{
+ struct Scsi_Host *shost = NULL;
+ int error = 0;
+ struct efct_vport *vport = NULL;
+
+ shost = scsi_host_alloc(&efct_template, sizeof(*vport));
+ if (!shost) {
+ efc_log_err(efct, "failed to allocate Scsi_Host struct\n");
+ return NULL;
+ }
+
+ /* save efct information to shost LLD-specific space */
+ vport = (struct efct_vport *)shost->hostdata;
+ vport->efct = efct;
+ vport->is_vport = true;
+
+ shost->can_queue = efct->hw.config.n_io;
+ shost->max_cmd_len = 16; /* 16-byte CDBs */
+ shost->max_id = 0xffff;
+ shost->max_lun = 0xffffffff;
+
+ /* can only accept (from mid-layer) as many SGEs as we've pre-regited*/
+ shost->sg_tablesize = sli_get_max_sgl(&efct->hw.sli);
+
+ /* attach FC Transport template to shost */
+ shost->transportt = efct_vport_fc_tt;
+ efc_log_debug(efct, "vport transport template=%p\n",
+ efct_vport_fc_tt);
+
+ /* get pci_dev structure and add host to SCSI ML */
+ error = scsi_add_host_with_dma(shost, dev, &efct->pci->dev);
+ if (error) {
+ efc_log_debug(efct, "failed scsi_add_host_with_dma\n");
+ return NULL;
+ }
+
+ /* Set symbolic name for host port */
+ snprintf(fc_host_symbolic_name(shost),
+ sizeof(fc_host_symbolic_name(shost)),
+ "Emulex %s FV%s DV%s", efct->model, efct->hw.sli.fw_name[0],
+ EFCT_DRIVER_VERSION);
+
+ /* Set host port supported classes */
+ fc_host_supported_classes(shost) = FC_COS_CLASS3;
+
+ fc_host_supported_speeds(shost) = efct_get_link_supported_speeds(efct);
+ vport->shost = shost;
+
+ return vport;
+}
+
+int efct_scsi_del_vport(struct efct *efct, struct Scsi_Host *shost)
+{
+ if (shost) {
+ efc_log_debug(efct,
+ "Unregistering vport with Transport Layer\n");
+ efct_xport_remove_host(shost);
+ efc_log_debug(efct, "Unregistering vport with SCSI Midlayer\n");
+ scsi_remove_host(shost);
+ scsi_host_put(shost);
+ return 0;
+ }
+ return -EIO;
+}
+
+static int
+efct_vport_create(struct fc_vport *fc_vport, bool disable)
+{
+ struct Scsi_Host *shost = fc_vport ? fc_vport->shost : NULL;
+ struct efct_vport *pport = shost ?
+ (struct efct_vport *)shost->hostdata :
+ NULL;
+ struct efct *efct = pport ? pport->efct : NULL;
+ struct efct_vport *vport = NULL;
+
+ if (!fc_vport || !shost || !efct)
+ goto fail;
+
+ vport = efct_scsi_new_vport(efct, &fc_vport->dev);
+ if (!vport) {
+ efc_log_err(efct, "failed to create vport\n");
+ goto fail;
+ }
+
+ vport->fc_vport = fc_vport;
+ vport->npiv_wwpn = fc_vport->port_name;
+ vport->npiv_wwnn = fc_vport->node_name;
+ fc_host_node_name(vport->shost) = vport->npiv_wwnn;
+ fc_host_port_name(vport->shost) = vport->npiv_wwpn;
+ *(struct efct_vport **)fc_vport->dd_data = vport;
+
+ return 0;
+
+fail:
+ return -EIO;
+}
+
+static int
+efct_vport_delete(struct fc_vport *fc_vport)
+{
+ struct efct_vport *vport = *(struct efct_vport **)fc_vport->dd_data;
+ struct Scsi_Host *shost = vport ? vport->shost : NULL;
+ struct efct *efct = vport ? vport->efct : NULL;
+ int rc;
+
+ rc = efct_scsi_del_vport(efct, shost);
+
+ if (rc)
+ pr_err("%s: vport delete failed\n", __func__);
+
+ return rc;
+}
+
+static int
+efct_vport_disable(struct fc_vport *fc_vport, bool disable)
+{
+ return 0;
+}
+
+static struct fc_function_template efct_xport_functions = {
+ .get_host_port_id = efct_get_host_port_id,
+ .get_host_port_type = efct_get_host_port_type,
+ .get_host_port_state = efct_get_host_port_state,
+ .get_host_speed = efct_get_host_speed,
+ .get_host_fabric_name = efct_get_host_fabric_name,
+
+ .get_fc_host_stats = efct_get_stats,
+ .reset_fc_host_stats = efct_reset_stats,
+
+ .issue_fc_host_lip = efct_issue_lip,
+
+ .vport_disable = efct_vport_disable,
+
+ /* allocation lengths for host-specific data */
+ .dd_fcrport_size = sizeof(struct efct_rport_data),
+ .dd_fcvport_size = 128, /* should be sizeof(...) */
+
+ /* remote port fixed attributes */
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+ .show_rport_dev_loss_tmo = 1,
+
+ /* target dynamic attributes */
+ .show_starget_node_name = 1,
+ .show_starget_port_name = 1,
+ .show_starget_port_id = 1,
+
+ /* host fixed attributes */
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_supported_speeds = 1,
+ .show_host_maxframe_size = 1,
+
+ /* host dynamic attributes */
+ .show_host_port_id = 1,
+ .show_host_port_type = 1,
+ .show_host_port_state = 1,
+ /* active_fc4s is shown but doesn't change (thus no get function) */
+ .show_host_active_fc4s = 1,
+ .show_host_speed = 1,
+ .show_host_fabric_name = 1,
+ .show_host_symbolic_name = 1,
+ .vport_create = efct_vport_create,
+ .vport_delete = efct_vport_delete,
+};
+
+static struct fc_function_template efct_vport_functions = {
+ .get_host_port_id = efct_get_host_port_id,
+ .get_host_port_type = efct_get_host_vport_type,
+ .get_host_port_state = efct_get_host_port_state,
+ .get_host_speed = efct_get_host_speed,
+ .get_host_fabric_name = efct_get_host_fabric_name,
+
+ .get_fc_host_stats = efct_get_stats,
+ .reset_fc_host_stats = efct_reset_stats,
+
+ .issue_fc_host_lip = efct_issue_lip,
+
+ /* allocation lengths for host-specific data */
+ .dd_fcrport_size = sizeof(struct efct_rport_data),
+ .dd_fcvport_size = 128, /* should be sizeof(...) */
+
+ /* remote port fixed attributes */
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+ .show_rport_dev_loss_tmo = 1,
+
+ /* target dynamic attributes */
+ .show_starget_node_name = 1,
+ .show_starget_port_name = 1,
+ .show_starget_port_id = 1,
+
+ /* host fixed attributes */
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_supported_speeds = 1,
+ .show_host_maxframe_size = 1,
+
+ /* host dynamic attributes */
+ .show_host_port_id = 1,
+ .show_host_port_type = 1,
+ .show_host_port_state = 1,
+ /* active_fc4s is shown but doesn't change (thus no get function) */
+ .show_host_active_fc4s = 1,
+ .show_host_speed = 1,
+ .show_host_fabric_name = 1,
+ .show_host_symbolic_name = 1,
+};
diff --git a/drivers/scsi/elx/efct/efct_xport.h b/drivers/scsi/elx/efct/efct_xport.h
new file mode 100644
index 000000000..89f3c20ec
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_xport.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#if !defined(__EFCT_XPORT_H__)
+#define __EFCT_XPORT_H__
+
+enum efct_xport_ctrl {
+ EFCT_XPORT_PORT_ONLINE = 1,
+ EFCT_XPORT_PORT_OFFLINE,
+ EFCT_XPORT_SHUTDOWN,
+ EFCT_XPORT_POST_NODE_EVENT,
+ EFCT_XPORT_WWNN_SET,
+ EFCT_XPORT_WWPN_SET,
+};
+
+enum efct_xport_status {
+ EFCT_XPORT_PORT_STATUS,
+ EFCT_XPORT_CONFIG_PORT_STATUS,
+ EFCT_XPORT_LINK_SPEED,
+ EFCT_XPORT_IS_SUPPORTED_LINK_SPEED,
+ EFCT_XPORT_LINK_STATISTICS,
+ EFCT_XPORT_LINK_STAT_RESET,
+ EFCT_XPORT_IS_QUIESCED
+};
+
+struct efct_xport_link_stats {
+ bool rec;
+ bool gec;
+ bool w02of;
+ bool w03of;
+ bool w04of;
+ bool w05of;
+ bool w06of;
+ bool w07of;
+ bool w08of;
+ bool w09of;
+ bool w10of;
+ bool w11of;
+ bool w12of;
+ bool w13of;
+ bool w14of;
+ bool w15of;
+ bool w16of;
+ bool w17of;
+ bool w18of;
+ bool w19of;
+ bool w20of;
+ bool w21of;
+ bool clrc;
+ bool clof1;
+ u32 link_failure_error_count;
+ u32 loss_of_sync_error_count;
+ u32 loss_of_signal_error_count;
+ u32 primitive_sequence_error_count;
+ u32 invalid_transmission_word_error_count;
+ u32 crc_error_count;
+ u32 primitive_sequence_event_timeout_count;
+ u32 elastic_buffer_overrun_error_count;
+ u32 arbitration_fc_al_timeout_count;
+ u32 advertised_receive_bufftor_to_buffer_credit;
+ u32 current_receive_buffer_to_buffer_credit;
+ u32 advertised_transmit_buffer_to_buffer_credit;
+ u32 current_transmit_buffer_to_buffer_credit;
+ u32 received_eofa_count;
+ u32 received_eofdti_count;
+ u32 received_eofni_count;
+ u32 received_soff_count;
+ u32 received_dropped_no_aer_count;
+ u32 received_dropped_no_available_rpi_resources_count;
+ u32 received_dropped_no_available_xri_resources_count;
+};
+
+struct efct_xport_host_stats {
+ bool cc;
+ u32 transmit_kbyte_count;
+ u32 receive_kbyte_count;
+ u32 transmit_frame_count;
+ u32 receive_frame_count;
+ u32 transmit_sequence_count;
+ u32 receive_sequence_count;
+ u32 total_exchanges_originator;
+ u32 total_exchanges_responder;
+ u32 receive_p_bsy_count;
+ u32 receive_f_bsy_count;
+ u32 dropped_frames_due_to_no_rq_buffer_count;
+ u32 empty_rq_timeout_count;
+ u32 dropped_frames_due_to_no_xri_count;
+ u32 empty_xri_pool_count;
+};
+
+struct efct_xport_host_statistics {
+ struct completion done;
+ struct efct_xport_link_stats link_stats;
+ struct efct_xport_host_stats host_stats;
+};
+
+union efct_xport_stats_u {
+ u32 value;
+ struct efct_xport_host_statistics stats;
+};
+
+struct efct_xport_fcp_stats {
+ u64 input_bytes;
+ u64 output_bytes;
+ u64 input_requests;
+ u64 output_requests;
+ u64 control_requests;
+};
+
+struct efct_xport {
+ struct efct *efct;
+ /* wwpn requested by user for primary nport */
+ u64 req_wwpn;
+ /* wwnn requested by user for primary nport */
+ u64 req_wwnn;
+
+ /* Nodes */
+ /* number of allocated nodes */
+ u32 nodes_count;
+ /* used to track how often IO pool is empty */
+ atomic_t io_alloc_failed_count;
+ /* array of pointers to nodes */
+ struct efc_node **nodes;
+
+ /* Io pool and counts */
+ /* pointer to IO pool */
+ struct efct_io_pool *io_pool;
+ /* lock for io_pending_list */
+ spinlock_t io_pending_lock;
+ /* list of IOs waiting for HW resources
+ * lock: xport->io_pending_lock
+ * link: efct_io_s->io_pending_link
+ */
+ struct list_head io_pending_list;
+ /* count of totals IOS allocated */
+ atomic_t io_total_alloc;
+ /* count of totals IOS free'd */
+ atomic_t io_total_free;
+ /* count of totals IOS that were pended */
+ atomic_t io_total_pending;
+ /* count of active IOS */
+ atomic_t io_active_count;
+ /* count of pending IOS */
+ atomic_t io_pending_count;
+ /* non-zero if efct_scsi_check_pending is executing */
+ atomic_t io_pending_recursing;
+
+ /* Port */
+ /* requested link state */
+ u32 configured_link_state;
+
+ /* Timer for Statistics */
+ struct timer_list stats_timer;
+ union efct_xport_stats_u fc_xport_stats;
+ struct efct_xport_fcp_stats fcp_stats;
+};
+
+struct efct_rport_data {
+ struct efc_node *node;
+};
+
+struct efct_xport *
+efct_xport_alloc(struct efct *efct);
+int
+efct_xport_attach(struct efct_xport *xport);
+int
+efct_xport_initialize(struct efct_xport *xport);
+void
+efct_xport_detach(struct efct_xport *xport);
+int
+efct_xport_control(struct efct_xport *xport, enum efct_xport_ctrl cmd, ...);
+int
+efct_xport_status(struct efct_xport *xport, enum efct_xport_status cmd,
+ union efct_xport_stats_u *result);
+void
+efct_xport_free(struct efct_xport *xport);
+
+struct scsi_transport_template *efct_attach_fc_transport(void);
+struct scsi_transport_template *efct_attach_vport_fc_transport(void);
+void
+efct_release_fc_transport(struct scsi_transport_template *transport_template);
+
+#endif /* __EFCT_XPORT_H__ */