diff options
Diffstat (limited to 'drivers/scsi/csiostor')
22 files changed, 19640 insertions, 0 deletions
diff --git a/drivers/scsi/csiostor/Kconfig b/drivers/scsi/csiostor/Kconfig new file mode 100644 index 000000000..c6c03f9e3 --- /dev/null +++ b/drivers/scsi/csiostor/Kconfig @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0-only +config SCSI_CHELSIO_FCOE + tristate "Chelsio Communications FCoE support" + depends on PCI && SCSI + depends on SCSI_FC_ATTRS + select FW_LOADER + help + This driver supports FCoE Offload functionality over + Chelsio T4-based 10Gb Converged Network Adapters. + + For general information about Chelsio and our products, visit + our website at <http://www.chelsio.com>. + + For customer support, please visit our customer support page at + <http://www.chelsio.com/support.html>. + + Please send feedback to <linux-bugs@chelsio.com>. + + To compile this driver as a module choose M here; the module + will be called csiostor. diff --git a/drivers/scsi/csiostor/Makefile b/drivers/scsi/csiostor/Makefile new file mode 100644 index 000000000..d047e22ea --- /dev/null +++ b/drivers/scsi/csiostor/Makefile @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0 +# +## Chelsio FCoE driver +# +## + +ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/cxgb4 + +obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor.o + +csiostor-objs := csio_attr.o csio_init.o csio_lnode.o csio_scsi.o \ + csio_hw.o csio_hw_t5.o csio_isr.o \ + csio_mb.o csio_rnode.o csio_wr.o diff --git a/drivers/scsi/csiostor/csio_attr.c b/drivers/scsi/csiostor/csio_attr.c new file mode 100644 index 000000000..200e50089 --- /dev/null +++ b/drivers/scsi/csiostor/csio_attr.c @@ -0,0 +1,805 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/mm.h> +#include <linux/jiffies.h> +#include <scsi/fc/fc_fs.h> + +#include "csio_init.h" + +static void +csio_vport_set_state(struct csio_lnode *ln); + +/* + * csio_reg_rnode - Register a remote port with FC transport. + * @rn: Rnode representing remote port. + * + * Call fc_remote_port_add() to register this remote port with FC transport. + * If remote port is Initiator OR Target OR both, change the role appropriately. + * + */ +void +csio_reg_rnode(struct csio_rnode *rn) +{ + struct csio_lnode *ln = csio_rnode_to_lnode(rn); + struct Scsi_Host *shost = csio_ln_to_shost(ln); + struct fc_rport_identifiers ids; + struct fc_rport *rport; + struct csio_service_parms *sp; + + ids.node_name = wwn_to_u64(csio_rn_wwnn(rn)); + ids.port_name = wwn_to_u64(csio_rn_wwpn(rn)); + ids.port_id = rn->nport_id; + ids.roles = FC_RPORT_ROLE_UNKNOWN; + + if (rn->role & CSIO_RNFR_INITIATOR || rn->role & CSIO_RNFR_TARGET) { + rport = rn->rport; + CSIO_ASSERT(rport != NULL); + goto update_role; + } + + rn->rport = fc_remote_port_add(shost, 0, &ids); + if (!rn->rport) { + csio_ln_err(ln, "Failed to register rport = 0x%x.\n", + rn->nport_id); + return; + } + + ln->num_reg_rnodes++; + rport = rn->rport; + spin_lock_irq(shost->host_lock); + *((struct csio_rnode **)rport->dd_data) = rn; + spin_unlock_irq(shost->host_lock); + + sp = &rn->rn_sparm; + rport->maxframe_size = ntohs(sp->csp.sp_bb_data); + if (ntohs(sp->clsp[2].cp_class) & FC_CPC_VALID) + rport->supported_classes = FC_COS_CLASS3; + else + rport->supported_classes = FC_COS_UNSPECIFIED; +update_role: + if (rn->role & CSIO_RNFR_INITIATOR) + ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR; + if (rn->role & CSIO_RNFR_TARGET) + ids.roles |= FC_RPORT_ROLE_FCP_TARGET; + + if (ids.roles != FC_RPORT_ROLE_UNKNOWN) + fc_remote_port_rolechg(rport, ids.roles); + + rn->scsi_id = rport->scsi_target_id; + + csio_ln_dbg(ln, "Remote port x%x role 0x%x registered\n", + rn->nport_id, ids.roles); +} + +/* + * csio_unreg_rnode - Unregister a remote port with FC transport. + * @rn: Rnode representing remote port. + * + * Call fc_remote_port_delete() to unregister this remote port with FC + * transport. + * + */ +void +csio_unreg_rnode(struct csio_rnode *rn) +{ + struct csio_lnode *ln = csio_rnode_to_lnode(rn); + struct fc_rport *rport = rn->rport; + + rn->role &= ~(CSIO_RNFR_INITIATOR | CSIO_RNFR_TARGET); + fc_remote_port_delete(rport); + ln->num_reg_rnodes--; + + csio_ln_dbg(ln, "Remote port x%x un-registered\n", rn->nport_id); +} + +/* + * csio_lnode_async_event - Async events from local port. + * @ln: lnode representing local port. + * + * Async events from local node that FC transport/SCSI ML + * should be made aware of (Eg: RSCN). + */ +void +csio_lnode_async_event(struct csio_lnode *ln, enum csio_ln_fc_evt fc_evt) +{ + switch (fc_evt) { + case CSIO_LN_FC_RSCN: + /* Get payload of rscn from ln */ + /* For each RSCN entry */ + /* + * fc_host_post_event(shost, + * fc_get_event_number(), + * FCH_EVT_RSCN, + * rscn_entry); + */ + break; + case CSIO_LN_FC_LINKUP: + /* send fc_host_post_event */ + /* set vport state */ + if (csio_is_npiv_ln(ln)) + csio_vport_set_state(ln); + + break; + case CSIO_LN_FC_LINKDOWN: + /* send fc_host_post_event */ + /* set vport state */ + if (csio_is_npiv_ln(ln)) + csio_vport_set_state(ln); + + break; + case CSIO_LN_FC_ATTRIB_UPDATE: + csio_fchost_attr_init(ln); + break; + default: + break; + } +} + +/* + * csio_fchost_attr_init - Initialize FC transport attributes + * @ln: Lnode. + * + */ +void +csio_fchost_attr_init(struct csio_lnode *ln) +{ + struct Scsi_Host *shost = csio_ln_to_shost(ln); + + fc_host_node_name(shost) = wwn_to_u64(csio_ln_wwnn(ln)); + fc_host_port_name(shost) = wwn_to_u64(csio_ln_wwpn(ln)); + + fc_host_supported_classes(shost) = FC_COS_CLASS3; + fc_host_max_npiv_vports(shost) = + (csio_lnode_to_hw(ln))->fres_info.max_vnps; + fc_host_supported_speeds(shost) = FC_PORTSPEED_10GBIT | + FC_PORTSPEED_1GBIT; + + fc_host_maxframe_size(shost) = ntohs(ln->ln_sparm.csp.sp_bb_data); + memset(fc_host_supported_fc4s(shost), 0, + sizeof(fc_host_supported_fc4s(shost))); + fc_host_supported_fc4s(shost)[7] = 1; + + memset(fc_host_active_fc4s(shost), 0, + sizeof(fc_host_active_fc4s(shost))); + fc_host_active_fc4s(shost)[7] = 1; +} + +/* + * csio_get_host_port_id - sysfs entries for nport_id is + * populated/cached from this function + */ +static void +csio_get_host_port_id(struct Scsi_Host *shost) +{ + struct csio_lnode *ln = shost_priv(shost); + struct csio_hw *hw = csio_lnode_to_hw(ln); + + spin_lock_irq(&hw->lock); + fc_host_port_id(shost) = ln->nport_id; + spin_unlock_irq(&hw->lock); +} + +/* + * csio_get_port_type - Return FC local port type. + * @shost: scsi host. + * + */ +static void +csio_get_host_port_type(struct Scsi_Host *shost) +{ + struct csio_lnode *ln = shost_priv(shost); + struct csio_hw *hw = csio_lnode_to_hw(ln); + + spin_lock_irq(&hw->lock); + if (csio_is_npiv_ln(ln)) + fc_host_port_type(shost) = FC_PORTTYPE_NPIV; + else + fc_host_port_type(shost) = FC_PORTTYPE_NPORT; + spin_unlock_irq(&hw->lock); +} + +/* + * csio_get_port_state - Return FC local port state. + * @shost: scsi host. + * + */ +static void +csio_get_host_port_state(struct Scsi_Host *shost) +{ + struct csio_lnode *ln = shost_priv(shost); + struct csio_hw *hw = csio_lnode_to_hw(ln); + char state[16]; + + spin_lock_irq(&hw->lock); + + csio_lnode_state_to_str(ln, state); + if (!strcmp(state, "READY")) + fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; + else if (!strcmp(state, "OFFLINE")) + fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; + else + fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; + + spin_unlock_irq(&hw->lock); +} + +/* + * csio_get_host_speed - Return link speed to FC transport. + * @shost: scsi host. + * + */ +static void +csio_get_host_speed(struct Scsi_Host *shost) +{ + struct csio_lnode *ln = shost_priv(shost); + struct csio_hw *hw = csio_lnode_to_hw(ln); + + spin_lock_irq(&hw->lock); + switch (hw->pport[ln->portid].link_speed) { + case FW_PORT_CAP32_SPEED_1G: + fc_host_speed(shost) = FC_PORTSPEED_1GBIT; + break; + case FW_PORT_CAP32_SPEED_10G: + fc_host_speed(shost) = FC_PORTSPEED_10GBIT; + break; + case FW_PORT_CAP32_SPEED_25G: + fc_host_speed(shost) = FC_PORTSPEED_25GBIT; + break; + case FW_PORT_CAP32_SPEED_40G: + fc_host_speed(shost) = FC_PORTSPEED_40GBIT; + break; + case FW_PORT_CAP32_SPEED_50G: + fc_host_speed(shost) = FC_PORTSPEED_50GBIT; + break; + case FW_PORT_CAP32_SPEED_100G: + fc_host_speed(shost) = FC_PORTSPEED_100GBIT; + break; + default: + fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; + break; + } + spin_unlock_irq(&hw->lock); +} + +/* + * csio_get_host_fabric_name - Return fabric name + * @shost: scsi host. + * + */ +static void +csio_get_host_fabric_name(struct Scsi_Host *shost) +{ + struct csio_lnode *ln = shost_priv(shost); + struct csio_rnode *rn = NULL; + struct csio_hw *hw = csio_lnode_to_hw(ln); + + spin_lock_irq(&hw->lock); + rn = csio_rnode_lookup_portid(ln, FC_FID_FLOGI); + if (rn) + fc_host_fabric_name(shost) = wwn_to_u64(csio_rn_wwnn(rn)); + else + fc_host_fabric_name(shost) = 0; + spin_unlock_irq(&hw->lock); +} + +/* + * csio_get_host_speed - Return FC transport statistics. + * @ln: Lnode. + * + */ +static struct fc_host_statistics * +csio_get_stats(struct Scsi_Host *shost) +{ + struct csio_lnode *ln = shost_priv(shost); + struct csio_hw *hw = csio_lnode_to_hw(ln); + struct fc_host_statistics *fhs = &ln->fch_stats; + struct fw_fcoe_port_stats fcoe_port_stats; + uint64_t seconds; + + memset(&fcoe_port_stats, 0, sizeof(struct fw_fcoe_port_stats)); + csio_get_phy_port_stats(hw, ln->portid, &fcoe_port_stats); + + fhs->tx_frames += (be64_to_cpu(fcoe_port_stats.tx_bcast_frames) + + be64_to_cpu(fcoe_port_stats.tx_mcast_frames) + + be64_to_cpu(fcoe_port_stats.tx_ucast_frames) + + be64_to_cpu(fcoe_port_stats.tx_offload_frames)); + fhs->tx_words += (be64_to_cpu(fcoe_port_stats.tx_bcast_bytes) + + be64_to_cpu(fcoe_port_stats.tx_mcast_bytes) + + be64_to_cpu(fcoe_port_stats.tx_ucast_bytes) + + be64_to_cpu(fcoe_port_stats.tx_offload_bytes)) / + CSIO_WORD_TO_BYTE; + fhs->rx_frames += (be64_to_cpu(fcoe_port_stats.rx_bcast_frames) + + be64_to_cpu(fcoe_port_stats.rx_mcast_frames) + + be64_to_cpu(fcoe_port_stats.rx_ucast_frames)); + fhs->rx_words += (be64_to_cpu(fcoe_port_stats.rx_bcast_bytes) + + be64_to_cpu(fcoe_port_stats.rx_mcast_bytes) + + be64_to_cpu(fcoe_port_stats.rx_ucast_bytes)) / + CSIO_WORD_TO_BYTE; + fhs->error_frames += be64_to_cpu(fcoe_port_stats.rx_err_frames); + fhs->fcp_input_requests += ln->stats.n_input_requests; + fhs->fcp_output_requests += ln->stats.n_output_requests; + fhs->fcp_control_requests += ln->stats.n_control_requests; + fhs->fcp_input_megabytes += ln->stats.n_input_bytes >> 20; + fhs->fcp_output_megabytes += ln->stats.n_output_bytes >> 20; + fhs->link_failure_count = ln->stats.n_link_down; + /* Reset stats for the device */ + seconds = jiffies_to_msecs(jiffies) - hw->stats.n_reset_start; + do_div(seconds, 1000); + fhs->seconds_since_last_reset = seconds; + + return fhs; +} + +/* + * csio_set_rport_loss_tmo - Set the rport dev loss timeout + * @rport: fc rport. + * @timeout: new value for dev loss tmo. + * + * If timeout is non zero set the dev_loss_tmo to timeout, else set + * dev_loss_tmo to one. + */ +static void +csio_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) +{ + if (timeout) + rport->dev_loss_tmo = timeout; + else + rport->dev_loss_tmo = 1; +} + +static void +csio_vport_set_state(struct csio_lnode *ln) +{ + struct fc_vport *fc_vport = ln->fc_vport; + struct csio_lnode *pln = ln->pln; + char state[16]; + + /* Set fc vport state based on phyiscal lnode */ + csio_lnode_state_to_str(pln, state); + if (strcmp(state, "READY")) { + fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); + return; + } + + if (!(pln->flags & CSIO_LNF_NPIVSUPP)) { + fc_vport_set_state(fc_vport, FC_VPORT_NO_FABRIC_SUPP); + return; + } + + /* Set fc vport state based on virtual lnode */ + csio_lnode_state_to_str(ln, state); + if (strcmp(state, "READY")) { + fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); + return; + } + fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE); +} + +static int +csio_fcoe_alloc_vnp(struct csio_hw *hw, struct csio_lnode *ln) +{ + struct csio_lnode *pln; + struct csio_mb *mbp; + struct fw_fcoe_vnp_cmd *rsp; + int ret = 0; + int retry = 0; + + /* Issue VNP cmd to alloc vport */ + /* Allocate Mbox request */ + spin_lock_irq(&hw->lock); + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) { + CSIO_INC_STATS(hw, n_err_nomem); + ret = -ENOMEM; + goto out; + } + + pln = ln->pln; + ln->fcf_flowid = pln->fcf_flowid; + ln->portid = pln->portid; + + csio_fcoe_vnp_alloc_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO, + pln->fcf_flowid, pln->vnp_flowid, 0, + csio_ln_wwnn(ln), csio_ln_wwpn(ln), NULL); + + for (retry = 0; retry < 3; retry++) { + /* FW is expected to complete vnp cmd in immediate mode + * without much delay. + * Otherwise, there will be increase in IO latency since HW + * lock is held till completion of vnp mbox cmd. + */ + ret = csio_mb_issue(hw, mbp); + if (ret != -EBUSY) + break; + + /* Retry if mbox returns busy */ + spin_unlock_irq(&hw->lock); + msleep(2000); + spin_lock_irq(&hw->lock); + } + + if (ret) { + csio_ln_err(ln, "Failed to issue mbox FCoE VNP command\n"); + goto out_free; + } + + /* Process Mbox response of VNP command */ + rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb); + if (FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16)) != FW_SUCCESS) { + csio_ln_err(ln, "FCOE VNP ALLOC cmd returned 0x%x!\n", + FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16))); + ret = -EINVAL; + goto out_free; + } + + ln->vnp_flowid = FW_FCOE_VNP_CMD_VNPI_GET( + ntohl(rsp->gen_wwn_to_vnpi)); + memcpy(csio_ln_wwnn(ln), rsp->vnport_wwnn, 8); + memcpy(csio_ln_wwpn(ln), rsp->vnport_wwpn, 8); + + csio_ln_dbg(ln, "FCOE VNPI: 0x%x\n", ln->vnp_flowid); + csio_ln_dbg(ln, "\tWWNN: %x%x%x%x%x%x%x%x\n", + ln->ln_sparm.wwnn[0], ln->ln_sparm.wwnn[1], + ln->ln_sparm.wwnn[2], ln->ln_sparm.wwnn[3], + ln->ln_sparm.wwnn[4], ln->ln_sparm.wwnn[5], + ln->ln_sparm.wwnn[6], ln->ln_sparm.wwnn[7]); + csio_ln_dbg(ln, "\tWWPN: %x%x%x%x%x%x%x%x\n", + ln->ln_sparm.wwpn[0], ln->ln_sparm.wwpn[1], + ln->ln_sparm.wwpn[2], ln->ln_sparm.wwpn[3], + ln->ln_sparm.wwpn[4], ln->ln_sparm.wwpn[5], + ln->ln_sparm.wwpn[6], ln->ln_sparm.wwpn[7]); + +out_free: + mempool_free(mbp, hw->mb_mempool); +out: + spin_unlock_irq(&hw->lock); + return ret; +} + +static int +csio_fcoe_free_vnp(struct csio_hw *hw, struct csio_lnode *ln) +{ + struct csio_mb *mbp; + struct fw_fcoe_vnp_cmd *rsp; + int ret = 0; + int retry = 0; + + /* Issue VNP cmd to free vport */ + /* Allocate Mbox request */ + + spin_lock_irq(&hw->lock); + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) { + CSIO_INC_STATS(hw, n_err_nomem); + ret = -ENOMEM; + goto out; + } + + csio_fcoe_vnp_free_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO, + ln->fcf_flowid, ln->vnp_flowid, + NULL); + + for (retry = 0; retry < 3; retry++) { + ret = csio_mb_issue(hw, mbp); + if (ret != -EBUSY) + break; + + /* Retry if mbox returns busy */ + spin_unlock_irq(&hw->lock); + msleep(2000); + spin_lock_irq(&hw->lock); + } + + if (ret) { + csio_ln_err(ln, "Failed to issue mbox FCoE VNP command\n"); + goto out_free; + } + + /* Process Mbox response of VNP command */ + rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb); + if (FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16)) != FW_SUCCESS) { + csio_ln_err(ln, "FCOE VNP FREE cmd returned 0x%x!\n", + FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16))); + ret = -EINVAL; + } + +out_free: + mempool_free(mbp, hw->mb_mempool); +out: + spin_unlock_irq(&hw->lock); + return ret; +} + +static int +csio_vport_create(struct fc_vport *fc_vport, bool disable) +{ + struct Scsi_Host *shost = fc_vport->shost; + struct csio_lnode *pln = shost_priv(shost); + struct csio_lnode *ln = NULL; + struct csio_hw *hw = csio_lnode_to_hw(pln); + uint8_t wwn[8]; + int ret = -1; + + ln = csio_shost_init(hw, &fc_vport->dev, false, pln); + if (!ln) + goto error; + + if (fc_vport->node_name != 0) { + u64_to_wwn(fc_vport->node_name, wwn); + + if (!CSIO_VALID_WWN(wwn)) { + csio_ln_err(ln, + "vport create failed. Invalid wwnn\n"); + goto error; + } + memcpy(csio_ln_wwnn(ln), wwn, 8); + } + + if (fc_vport->port_name != 0) { + u64_to_wwn(fc_vport->port_name, wwn); + + if (!CSIO_VALID_WWN(wwn)) { + csio_ln_err(ln, + "vport create failed. Invalid wwpn\n"); + goto error; + } + + if (csio_lnode_lookup_by_wwpn(hw, wwn)) { + csio_ln_err(ln, + "vport create failed. wwpn already exists\n"); + goto error; + } + memcpy(csio_ln_wwpn(ln), wwn, 8); + } + + fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING); + ln->fc_vport = fc_vport; + + if (csio_fcoe_alloc_vnp(hw, ln)) + goto error; + + *(struct csio_lnode **)fc_vport->dd_data = ln; + if (!fc_vport->node_name) + fc_vport->node_name = wwn_to_u64(csio_ln_wwnn(ln)); + if (!fc_vport->port_name) + fc_vport->port_name = wwn_to_u64(csio_ln_wwpn(ln)); + csio_fchost_attr_init(ln); + return 0; +error: + if (ln) + csio_shost_exit(ln); + + return ret; +} + +static int +csio_vport_delete(struct fc_vport *fc_vport) +{ + struct csio_lnode *ln = *(struct csio_lnode **)fc_vport->dd_data; + struct Scsi_Host *shost = csio_ln_to_shost(ln); + struct csio_hw *hw = csio_lnode_to_hw(ln); + int rmv; + + spin_lock_irq(&hw->lock); + rmv = csio_is_hw_removing(hw); + spin_unlock_irq(&hw->lock); + + if (rmv) { + csio_shost_exit(ln); + return 0; + } + + /* Quiesce ios and send remove event to lnode */ + scsi_block_requests(shost); + spin_lock_irq(&hw->lock); + csio_scsim_cleanup_io_lnode(csio_hw_to_scsim(hw), ln); + csio_lnode_close(ln); + spin_unlock_irq(&hw->lock); + scsi_unblock_requests(shost); + + /* Free vnp */ + if (fc_vport->vport_state != FC_VPORT_DISABLED) + csio_fcoe_free_vnp(hw, ln); + + csio_shost_exit(ln); + return 0; +} + +static int +csio_vport_disable(struct fc_vport *fc_vport, bool disable) +{ + struct csio_lnode *ln = *(struct csio_lnode **)fc_vport->dd_data; + struct Scsi_Host *shost = csio_ln_to_shost(ln); + struct csio_hw *hw = csio_lnode_to_hw(ln); + + /* disable vport */ + if (disable) { + /* Quiesce ios and send stop event to lnode */ + scsi_block_requests(shost); + spin_lock_irq(&hw->lock); + csio_scsim_cleanup_io_lnode(csio_hw_to_scsim(hw), ln); + csio_lnode_stop(ln); + spin_unlock_irq(&hw->lock); + scsi_unblock_requests(shost); + + /* Free vnp */ + csio_fcoe_free_vnp(hw, ln); + fc_vport_set_state(fc_vport, FC_VPORT_DISABLED); + csio_ln_err(ln, "vport disabled\n"); + return 0; + } else { + /* enable vport */ + fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING); + if (csio_fcoe_alloc_vnp(hw, ln)) { + csio_ln_err(ln, "vport enabled failed.\n"); + return -1; + } + csio_ln_err(ln, "vport enabled\n"); + return 0; + } +} + +static void +csio_dev_loss_tmo_callbk(struct fc_rport *rport) +{ + struct csio_rnode *rn; + struct csio_hw *hw; + struct csio_lnode *ln; + + rn = *((struct csio_rnode **)rport->dd_data); + ln = csio_rnode_to_lnode(rn); + hw = csio_lnode_to_hw(ln); + + spin_lock_irq(&hw->lock); + + /* return if driver is being removed or same rnode comes back online */ + if (csio_is_hw_removing(hw) || csio_is_rnode_ready(rn)) + goto out; + + csio_ln_dbg(ln, "devloss timeout on rnode:%p portid:x%x flowid:x%x\n", + rn, rn->nport_id, csio_rn_flowid(rn)); + + CSIO_INC_STATS(ln, n_dev_loss_tmo); + + /* + * enqueue devloss event to event worker thread to serialize all + * rnode events. + */ + if (csio_enqueue_evt(hw, CSIO_EVT_DEV_LOSS, &rn, sizeof(rn))) { + CSIO_INC_STATS(hw, n_evt_drop); + goto out; + } + + if (!(hw->flags & CSIO_HWF_FWEVT_PENDING)) { + hw->flags |= CSIO_HWF_FWEVT_PENDING; + spin_unlock_irq(&hw->lock); + schedule_work(&hw->evtq_work); + return; + } + +out: + spin_unlock_irq(&hw->lock); +} + +/* FC transport functions template - Physical port */ +struct fc_function_template csio_fc_transport_funcs = { + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + .show_host_supported_fc4s = 1, + .show_host_maxframe_size = 1, + + .get_host_port_id = csio_get_host_port_id, + .show_host_port_id = 1, + + .get_host_port_type = csio_get_host_port_type, + .show_host_port_type = 1, + + .get_host_port_state = csio_get_host_port_state, + .show_host_port_state = 1, + + .show_host_active_fc4s = 1, + .get_host_speed = csio_get_host_speed, + .show_host_speed = 1, + .get_host_fabric_name = csio_get_host_fabric_name, + .show_host_fabric_name = 1, + + .get_fc_host_stats = csio_get_stats, + + .dd_fcrport_size = sizeof(struct csio_rnode *), + .show_rport_maxframe_size = 1, + .show_rport_supported_classes = 1, + + .set_rport_dev_loss_tmo = csio_set_rport_loss_tmo, + .show_rport_dev_loss_tmo = 1, + + .show_starget_port_id = 1, + .show_starget_node_name = 1, + .show_starget_port_name = 1, + + .dev_loss_tmo_callbk = csio_dev_loss_tmo_callbk, + .dd_fcvport_size = sizeof(struct csio_lnode *), + + .vport_create = csio_vport_create, + .vport_disable = csio_vport_disable, + .vport_delete = csio_vport_delete, +}; + +/* FC transport functions template - Virtual port */ +struct fc_function_template csio_fc_transport_vport_funcs = { + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + .show_host_supported_fc4s = 1, + .show_host_maxframe_size = 1, + + .get_host_port_id = csio_get_host_port_id, + .show_host_port_id = 1, + + .get_host_port_type = csio_get_host_port_type, + .show_host_port_type = 1, + + .get_host_port_state = csio_get_host_port_state, + .show_host_port_state = 1, + .show_host_active_fc4s = 1, + + .get_host_speed = csio_get_host_speed, + .show_host_speed = 1, + + .get_host_fabric_name = csio_get_host_fabric_name, + .show_host_fabric_name = 1, + + .get_fc_host_stats = csio_get_stats, + + .dd_fcrport_size = sizeof(struct csio_rnode *), + .show_rport_maxframe_size = 1, + .show_rport_supported_classes = 1, + + .set_rport_dev_loss_tmo = csio_set_rport_loss_tmo, + .show_rport_dev_loss_tmo = 1, + + .show_starget_port_id = 1, + .show_starget_node_name = 1, + .show_starget_port_name = 1, + + .dev_loss_tmo_callbk = csio_dev_loss_tmo_callbk, + +}; diff --git a/drivers/scsi/csiostor/csio_defs.h b/drivers/scsi/csiostor/csio_defs.h new file mode 100644 index 000000000..c38017b4a --- /dev/null +++ b/drivers/scsi/csiostor/csio_defs.h @@ -0,0 +1,121 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CSIO_DEFS_H__ +#define __CSIO_DEFS_H__ + +#include <linux/kernel.h> +#include <linux/stddef.h> +#include <linux/timer.h> +#include <linux/list.h> +#include <linux/bug.h> +#include <linux/pci.h> +#include <linux/jiffies.h> + +#define CSIO_INVALID_IDX 0xFFFFFFFF +#define CSIO_INC_STATS(elem, val) ((elem)->stats.val++) +#define CSIO_DEC_STATS(elem, val) ((elem)->stats.val--) +#define CSIO_VALID_WWN(__n) ((*__n >> 4) == 0x5 ? true : false) +#define CSIO_DID_MASK 0xFFFFFF +#define CSIO_WORD_TO_BYTE 4 + +#ifndef readq +static inline u64 readq(void __iomem *addr) +{ + return readl(addr) + ((u64)readl(addr + 4) << 32); +} + +static inline void writeq(u64 val, void __iomem *addr) +{ + writel(val, addr); + writel(val >> 32, addr + 4); +} +#endif + +static inline int +csio_list_deleted(struct list_head *list) +{ + return ((list->next == list) && (list->prev == list)); +} + +#define csio_list_next(elem) (((struct list_head *)(elem))->next) +#define csio_list_prev(elem) (((struct list_head *)(elem))->prev) + +/* State machine */ +typedef void (*csio_sm_state_t)(void *, uint32_t); + +struct csio_sm { + struct list_head sm_list; + csio_sm_state_t sm_state; +}; + +static inline void +csio_set_state(void *smp, void *state) +{ + ((struct csio_sm *)smp)->sm_state = (csio_sm_state_t)state; +} + +static inline void +csio_init_state(struct csio_sm *smp, void *state) +{ + csio_set_state(smp, state); +} + +static inline void +csio_post_event(void *smp, uint32_t evt) +{ + ((struct csio_sm *)smp)->sm_state(smp, evt); +} + +static inline csio_sm_state_t +csio_get_state(void *smp) +{ + return ((struct csio_sm *)smp)->sm_state; +} + +static inline bool +csio_match_state(void *smp, void *state) +{ + return (csio_get_state(smp) == (csio_sm_state_t)state); +} + +#define CSIO_ASSERT(cond) BUG_ON(!(cond)) + +#ifdef __CSIO_DEBUG__ +#define CSIO_DB_ASSERT(__c) CSIO_ASSERT((__c)) +#else +#define CSIO_DB_ASSERT(__c) +#endif + +#endif /* ifndef __CSIO_DEFS_H__ */ diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c new file mode 100644 index 000000000..e43c5413c --- /dev/null +++ b/drivers/scsi/csiostor/csio_hw.c @@ -0,0 +1,4434 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/pci.h> +#include <linux/pci_regs.h> +#include <linux/firmware.h> +#include <linux/stddef.h> +#include <linux/delay.h> +#include <linux/string.h> +#include <linux/compiler.h> +#include <linux/jiffies.h> +#include <linux/kernel.h> +#include <linux/log2.h> + +#include "csio_hw.h" +#include "csio_lnode.h" +#include "csio_rnode.h" + +int csio_dbg_level = 0xFEFF; +unsigned int csio_port_mask = 0xf; + +/* Default FW event queue entries. */ +static uint32_t csio_evtq_sz = CSIO_EVTQ_SIZE; + +/* Default MSI param level */ +int csio_msi = 2; + +/* FCoE function instances */ +static int dev_num; + +/* FCoE Adapter types & its description */ +static const struct csio_adap_desc csio_t5_fcoe_adapters[] = { + {"T580-Dbg 10G", "Chelsio T580-Dbg 10G [FCoE]"}, + {"T520-CR 10G", "Chelsio T520-CR 10G [FCoE]"}, + {"T522-CR 10G/1G", "Chelsio T522-CR 10G/1G [FCoE]"}, + {"T540-CR 10G", "Chelsio T540-CR 10G [FCoE]"}, + {"T520-BCH 10G", "Chelsio T520-BCH 10G [FCoE]"}, + {"T540-BCH 10G", "Chelsio T540-BCH 10G [FCoE]"}, + {"T540-CH 10G", "Chelsio T540-CH 10G [FCoE]"}, + {"T520-SO 10G", "Chelsio T520-SO 10G [FCoE]"}, + {"T520-CX4 10G", "Chelsio T520-CX4 10G [FCoE]"}, + {"T520-BT 10G", "Chelsio T520-BT 10G [FCoE]"}, + {"T504-BT 1G", "Chelsio T504-BT 1G [FCoE]"}, + {"B520-SR 10G", "Chelsio B520-SR 10G [FCoE]"}, + {"B504-BT 1G", "Chelsio B504-BT 1G [FCoE]"}, + {"T580-CR 10G", "Chelsio T580-CR 10G [FCoE]"}, + {"T540-LP-CR 10G", "Chelsio T540-LP-CR 10G [FCoE]"}, + {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"}, + {"T580-LP-CR 40G", "Chelsio T580-LP-CR 40G [FCoE]"}, + {"T520-LL-CR 10G", "Chelsio T520-LL-CR 10G [FCoE]"}, + {"T560-CR 40G", "Chelsio T560-CR 40G [FCoE]"}, + {"T580-CR 40G", "Chelsio T580-CR 40G [FCoE]"}, + {"T580-SO 40G", "Chelsio T580-SO 40G [FCoE]"}, + {"T502-BT 1G", "Chelsio T502-BT 1G [FCoE]"} +}; + +static void csio_mgmtm_cleanup(struct csio_mgmtm *); +static void csio_hw_mbm_cleanup(struct csio_hw *); + +/* State machine forward declarations */ +static void csio_hws_uninit(struct csio_hw *, enum csio_hw_ev); +static void csio_hws_configuring(struct csio_hw *, enum csio_hw_ev); +static void csio_hws_initializing(struct csio_hw *, enum csio_hw_ev); +static void csio_hws_ready(struct csio_hw *, enum csio_hw_ev); +static void csio_hws_quiescing(struct csio_hw *, enum csio_hw_ev); +static void csio_hws_quiesced(struct csio_hw *, enum csio_hw_ev); +static void csio_hws_resetting(struct csio_hw *, enum csio_hw_ev); +static void csio_hws_removing(struct csio_hw *, enum csio_hw_ev); +static void csio_hws_pcierr(struct csio_hw *, enum csio_hw_ev); + +static void csio_hw_initialize(struct csio_hw *hw); +static void csio_evtq_stop(struct csio_hw *hw); +static void csio_evtq_start(struct csio_hw *hw); + +int csio_is_hw_ready(struct csio_hw *hw) +{ + return csio_match_state(hw, csio_hws_ready); +} + +int csio_is_hw_removing(struct csio_hw *hw) +{ + return csio_match_state(hw, csio_hws_removing); +} + + +/* + * csio_hw_wait_op_done_val - wait until an operation is completed + * @hw: the HW module + * @reg: the register to check for completion + * @mask: a single-bit field within @reg that indicates completion + * @polarity: the value of the field when the operation is completed + * @attempts: number of check iterations + * @delay: delay in usecs between iterations + * @valp: where to store the value of the register at completion time + * + * Wait until an operation is completed by checking a bit in a register + * up to @attempts times. If @valp is not NULL the value of the register + * at the time it indicated completion is stored there. Returns 0 if the + * operation completes and -EAGAIN otherwise. + */ +int +csio_hw_wait_op_done_val(struct csio_hw *hw, int reg, uint32_t mask, + int polarity, int attempts, int delay, uint32_t *valp) +{ + uint32_t val; + while (1) { + val = csio_rd_reg32(hw, reg); + + if (!!(val & mask) == polarity) { + if (valp) + *valp = val; + return 0; + } + + if (--attempts == 0) + return -EAGAIN; + if (delay) + udelay(delay); + } +} + +/* + * csio_hw_tp_wr_bits_indirect - set/clear bits in an indirect TP register + * @hw: the adapter + * @addr: the indirect TP register address + * @mask: specifies the field within the register to modify + * @val: new value for the field + * + * Sets a field of an indirect TP register to the given value. + */ +void +csio_hw_tp_wr_bits_indirect(struct csio_hw *hw, unsigned int addr, + unsigned int mask, unsigned int val) +{ + csio_wr_reg32(hw, addr, TP_PIO_ADDR_A); + val |= csio_rd_reg32(hw, TP_PIO_DATA_A) & ~mask; + csio_wr_reg32(hw, val, TP_PIO_DATA_A); +} + +void +csio_set_reg_field(struct csio_hw *hw, uint32_t reg, uint32_t mask, + uint32_t value) +{ + uint32_t val = csio_rd_reg32(hw, reg) & ~mask; + + csio_wr_reg32(hw, val | value, reg); + /* Flush */ + csio_rd_reg32(hw, reg); + +} + +static int +csio_memory_write(struct csio_hw *hw, int mtype, u32 addr, u32 len, u32 *buf) +{ + return hw->chip_ops->chip_memory_rw(hw, MEMWIN_CSIOSTOR, mtype, + addr, len, buf, 0); +} + +/* + * EEPROM reads take a few tens of us while writes can take a bit over 5 ms. + */ +#define EEPROM_MAX_RD_POLL 40 +#define EEPROM_MAX_WR_POLL 6 +#define EEPROM_STAT_ADDR 0x7bfc +#define VPD_BASE 0x400 +#define VPD_BASE_OLD 0 +#define VPD_LEN 1024 +#define VPD_INFO_FLD_HDR_SIZE 3 + +/* + * csio_hw_seeprom_read - read a serial EEPROM location + * @hw: hw to read + * @addr: EEPROM virtual address + * @data: where to store the read data + * + * Read a 32-bit word from a location in serial EEPROM using the card's PCI + * VPD capability. Note that this function must be called with a virtual + * address. + */ +static int +csio_hw_seeprom_read(struct csio_hw *hw, uint32_t addr, uint32_t *data) +{ + uint16_t val = 0; + int attempts = EEPROM_MAX_RD_POLL; + uint32_t base = hw->params.pci.vpd_cap_addr; + + if (addr >= EEPROMVSIZE || (addr & 3)) + return -EINVAL; + + pci_write_config_word(hw->pdev, base + PCI_VPD_ADDR, (uint16_t)addr); + + do { + udelay(10); + pci_read_config_word(hw->pdev, base + PCI_VPD_ADDR, &val); + } while (!(val & PCI_VPD_ADDR_F) && --attempts); + + if (!(val & PCI_VPD_ADDR_F)) { + csio_err(hw, "reading EEPROM address 0x%x failed\n", addr); + return -EINVAL; + } + + pci_read_config_dword(hw->pdev, base + PCI_VPD_DATA, data); + *data = le32_to_cpu(*(__le32 *)data); + + return 0; +} + +/* + * Partial EEPROM Vital Product Data structure. Includes only the ID and + * VPD-R sections. + */ +struct t4_vpd_hdr { + u8 id_tag; + u8 id_len[2]; + u8 id_data[ID_LEN]; + u8 vpdr_tag; + u8 vpdr_len[2]; +}; + +/* + * csio_hw_get_vpd_keyword_val - Locates an information field keyword in + * the VPD + * @v: Pointer to buffered vpd data structure + * @kw: The keyword to search for + * + * Returns the value of the information field keyword or + * -EINVAL otherwise. + */ +static int +csio_hw_get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw) +{ + int32_t i; + int32_t offset , len; + const uint8_t *buf = &v->id_tag; + const uint8_t *vpdr_len = &v->vpdr_tag; + offset = sizeof(struct t4_vpd_hdr); + len = (uint16_t)vpdr_len[1] + ((uint16_t)vpdr_len[2] << 8); + + if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) + return -EINVAL; + + for (i = offset; (i + VPD_INFO_FLD_HDR_SIZE) <= (offset + len);) { + if (memcmp(buf + i , kw, 2) == 0) { + i += VPD_INFO_FLD_HDR_SIZE; + return i; + } + + i += VPD_INFO_FLD_HDR_SIZE + buf[i+2]; + } + + return -EINVAL; +} + +static int +csio_pci_capability(struct pci_dev *pdev, int cap, int *pos) +{ + *pos = pci_find_capability(pdev, cap); + if (*pos) + return 0; + + return -1; +} + +/* + * csio_hw_get_vpd_params - read VPD parameters from VPD EEPROM + * @hw: HW module + * @p: where to store the parameters + * + * Reads card parameters stored in VPD EEPROM. + */ +static int +csio_hw_get_vpd_params(struct csio_hw *hw, struct csio_vpd *p) +{ + int i, ret, ec, sn, addr; + uint8_t *vpd, csum; + const struct t4_vpd_hdr *v; + /* To get around compilation warning from strstrip */ + char __always_unused *s; + + if (csio_is_valid_vpd(hw)) + return 0; + + ret = csio_pci_capability(hw->pdev, PCI_CAP_ID_VPD, + &hw->params.pci.vpd_cap_addr); + if (ret) + return -EINVAL; + + vpd = kzalloc(VPD_LEN, GFP_ATOMIC); + if (vpd == NULL) + return -ENOMEM; + + /* + * Card information normally starts at VPD_BASE but early cards had + * it at 0. + */ + ret = csio_hw_seeprom_read(hw, VPD_BASE, (uint32_t *)(vpd)); + addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD; + + for (i = 0; i < VPD_LEN; i += 4) { + ret = csio_hw_seeprom_read(hw, addr + i, (uint32_t *)(vpd + i)); + if (ret) { + kfree(vpd); + return ret; + } + } + + /* Reset the VPD flag! */ + hw->flags &= (~CSIO_HWF_VPD_VALID); + + v = (const struct t4_vpd_hdr *)vpd; + +#define FIND_VPD_KW(var, name) do { \ + var = csio_hw_get_vpd_keyword_val(v, name); \ + if (var < 0) { \ + csio_err(hw, "missing VPD keyword " name "\n"); \ + kfree(vpd); \ + return -EINVAL; \ + } \ +} while (0) + + FIND_VPD_KW(i, "RV"); + for (csum = 0; i >= 0; i--) + csum += vpd[i]; + + if (csum) { + csio_err(hw, "corrupted VPD EEPROM, actual csum %u\n", csum); + kfree(vpd); + return -EINVAL; + } + FIND_VPD_KW(ec, "EC"); + FIND_VPD_KW(sn, "SN"); +#undef FIND_VPD_KW + + memcpy(p->id, v->id_data, ID_LEN); + s = strstrip(p->id); + memcpy(p->ec, vpd + ec, EC_LEN); + s = strstrip(p->ec); + i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2]; + memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); + s = strstrip(p->sn); + + csio_valid_vpd_copied(hw); + + kfree(vpd); + return 0; +} + +/* + * csio_hw_sf1_read - read data from the serial flash + * @hw: the HW module + * @byte_cnt: number of bytes to read + * @cont: whether another operation will be chained + * @lock: whether to lock SF for PL access only + * @valp: where to store the read data + * + * Reads up to 4 bytes of data from the serial flash. The location of + * the read needs to be specified prior to calling this by issuing the + * appropriate commands to the serial flash. + */ +static int +csio_hw_sf1_read(struct csio_hw *hw, uint32_t byte_cnt, int32_t cont, + int32_t lock, uint32_t *valp) +{ + int ret; + + if (!byte_cnt || byte_cnt > 4) + return -EINVAL; + if (csio_rd_reg32(hw, SF_OP_A) & SF_BUSY_F) + return -EBUSY; + + csio_wr_reg32(hw, SF_LOCK_V(lock) | SF_CONT_V(cont) | + BYTECNT_V(byte_cnt - 1), SF_OP_A); + ret = csio_hw_wait_op_done_val(hw, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, + 10, NULL); + if (!ret) + *valp = csio_rd_reg32(hw, SF_DATA_A); + return ret; +} + +/* + * csio_hw_sf1_write - write data to the serial flash + * @hw: the HW module + * @byte_cnt: number of bytes to write + * @cont: whether another operation will be chained + * @lock: whether to lock SF for PL access only + * @val: value to write + * + * Writes up to 4 bytes of data to the serial flash. The location of + * the write needs to be specified prior to calling this by issuing the + * appropriate commands to the serial flash. + */ +static int +csio_hw_sf1_write(struct csio_hw *hw, uint32_t byte_cnt, uint32_t cont, + int32_t lock, uint32_t val) +{ + if (!byte_cnt || byte_cnt > 4) + return -EINVAL; + if (csio_rd_reg32(hw, SF_OP_A) & SF_BUSY_F) + return -EBUSY; + + csio_wr_reg32(hw, val, SF_DATA_A); + csio_wr_reg32(hw, SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) | + OP_V(1) | SF_LOCK_V(lock), SF_OP_A); + + return csio_hw_wait_op_done_val(hw, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, + 10, NULL); +} + +/* + * csio_hw_flash_wait_op - wait for a flash operation to complete + * @hw: the HW module + * @attempts: max number of polls of the status register + * @delay: delay between polls in ms + * + * Wait for a flash operation to complete by polling the status register. + */ +static int +csio_hw_flash_wait_op(struct csio_hw *hw, int32_t attempts, int32_t delay) +{ + int ret; + uint32_t status; + + while (1) { + ret = csio_hw_sf1_write(hw, 1, 1, 1, SF_RD_STATUS); + if (ret != 0) + return ret; + + ret = csio_hw_sf1_read(hw, 1, 0, 1, &status); + if (ret != 0) + return ret; + + if (!(status & 1)) + return 0; + if (--attempts == 0) + return -EAGAIN; + if (delay) + msleep(delay); + } +} + +/* + * csio_hw_read_flash - read words from serial flash + * @hw: the HW module + * @addr: the start address for the read + * @nwords: how many 32-bit words to read + * @data: where to store the read data + * @byte_oriented: whether to store data as bytes or as words + * + * Read the specified number of 32-bit words from the serial flash. + * If @byte_oriented is set the read data is stored as a byte array + * (i.e., big-endian), otherwise as 32-bit words in the platform's + * natural endianess. + */ +static int +csio_hw_read_flash(struct csio_hw *hw, uint32_t addr, uint32_t nwords, + uint32_t *data, int32_t byte_oriented) +{ + int ret; + + if (addr + nwords * sizeof(uint32_t) > hw->params.sf_size || (addr & 3)) + return -EINVAL; + + addr = swab32(addr) | SF_RD_DATA_FAST; + + ret = csio_hw_sf1_write(hw, 4, 1, 0, addr); + if (ret != 0) + return ret; + + ret = csio_hw_sf1_read(hw, 1, 1, 0, data); + if (ret != 0) + return ret; + + for ( ; nwords; nwords--, data++) { + ret = csio_hw_sf1_read(hw, 4, nwords > 1, nwords == 1, data); + if (nwords == 1) + csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */ + if (ret) + return ret; + if (byte_oriented) + *data = (__force __u32) htonl(*data); + } + return 0; +} + +/* + * csio_hw_write_flash - write up to a page of data to the serial flash + * @hw: the hw + * @addr: the start address to write + * @n: length of data to write in bytes + * @data: the data to write + * + * Writes up to a page of data (256 bytes) to the serial flash starting + * at the given address. All the data must be written to the same page. + */ +static int +csio_hw_write_flash(struct csio_hw *hw, uint32_t addr, + uint32_t n, const uint8_t *data) +{ + int ret = -EINVAL; + uint32_t buf[64]; + uint32_t i, c, left, val, offset = addr & 0xff; + + if (addr >= hw->params.sf_size || offset + n > SF_PAGE_SIZE) + return -EINVAL; + + val = swab32(addr) | SF_PROG_PAGE; + + ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE); + if (ret != 0) + goto unlock; + + ret = csio_hw_sf1_write(hw, 4, 1, 1, val); + if (ret != 0) + goto unlock; + + for (left = n; left; left -= c) { + c = min(left, 4U); + for (val = 0, i = 0; i < c; ++i) + val = (val << 8) + *data++; + + ret = csio_hw_sf1_write(hw, c, c != left, 1, val); + if (ret) + goto unlock; + } + ret = csio_hw_flash_wait_op(hw, 8, 1); + if (ret) + goto unlock; + + csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */ + + /* Read the page to verify the write succeeded */ + ret = csio_hw_read_flash(hw, addr & ~0xff, ARRAY_SIZE(buf), buf, 1); + if (ret) + return ret; + + if (memcmp(data - n, (uint8_t *)buf + offset, n)) { + csio_err(hw, + "failed to correctly write the flash page at %#x\n", + addr); + return -EINVAL; + } + + return 0; + +unlock: + csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */ + return ret; +} + +/* + * csio_hw_flash_erase_sectors - erase a range of flash sectors + * @hw: the HW module + * @start: the first sector to erase + * @end: the last sector to erase + * + * Erases the sectors in the given inclusive range. + */ +static int +csio_hw_flash_erase_sectors(struct csio_hw *hw, int32_t start, int32_t end) +{ + int ret = 0; + + while (start <= end) { + + ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE); + if (ret != 0) + goto out; + + ret = csio_hw_sf1_write(hw, 4, 0, 1, + SF_ERASE_SECTOR | (start << 8)); + if (ret != 0) + goto out; + + ret = csio_hw_flash_wait_op(hw, 14, 500); + if (ret != 0) + goto out; + + start++; + } +out: + if (ret) + csio_err(hw, "erase of flash sector %d failed, error %d\n", + start, ret); + csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */ + return 0; +} + +static void +csio_hw_print_fw_version(struct csio_hw *hw, char *str) +{ + csio_info(hw, "%s: %u.%u.%u.%u\n", str, + FW_HDR_FW_VER_MAJOR_G(hw->fwrev), + FW_HDR_FW_VER_MINOR_G(hw->fwrev), + FW_HDR_FW_VER_MICRO_G(hw->fwrev), + FW_HDR_FW_VER_BUILD_G(hw->fwrev)); +} + +/* + * csio_hw_get_fw_version - read the firmware version + * @hw: HW module + * @vers: where to place the version + * + * Reads the FW version from flash. + */ +static int +csio_hw_get_fw_version(struct csio_hw *hw, uint32_t *vers) +{ + return csio_hw_read_flash(hw, FLASH_FW_START + + offsetof(struct fw_hdr, fw_ver), 1, + vers, 0); +} + +/* + * csio_hw_get_tp_version - read the TP microcode version + * @hw: HW module + * @vers: where to place the version + * + * Reads the TP microcode version from flash. + */ +static int +csio_hw_get_tp_version(struct csio_hw *hw, u32 *vers) +{ + return csio_hw_read_flash(hw, FLASH_FW_START + + offsetof(struct fw_hdr, tp_microcode_ver), 1, + vers, 0); +} + +/* + * csio_hw_fw_dload - download firmware. + * @hw: HW module + * @fw_data: firmware image to write. + * @size: image size + * + * Write the supplied firmware image to the card's serial flash. + */ +static int +csio_hw_fw_dload(struct csio_hw *hw, uint8_t *fw_data, uint32_t size) +{ + uint32_t csum; + int32_t addr; + int ret; + uint32_t i; + uint8_t first_page[SF_PAGE_SIZE]; + const __be32 *p = (const __be32 *)fw_data; + struct fw_hdr *hdr = (struct fw_hdr *)fw_data; + uint32_t sf_sec_size; + + if ((!hw->params.sf_size) || (!hw->params.sf_nsec)) { + csio_err(hw, "Serial Flash data invalid\n"); + return -EINVAL; + } + + if (!size) { + csio_err(hw, "FW image has no data\n"); + return -EINVAL; + } + + if (size & 511) { + csio_err(hw, "FW image size not multiple of 512 bytes\n"); + return -EINVAL; + } + + if (ntohs(hdr->len512) * 512 != size) { + csio_err(hw, "FW image size differs from size in FW header\n"); + return -EINVAL; + } + + if (size > FLASH_FW_MAX_SIZE) { + csio_err(hw, "FW image too large, max is %u bytes\n", + FLASH_FW_MAX_SIZE); + return -EINVAL; + } + + for (csum = 0, i = 0; i < size / sizeof(csum); i++) + csum += ntohl(p[i]); + + if (csum != 0xffffffff) { + csio_err(hw, "corrupted firmware image, checksum %#x\n", csum); + return -EINVAL; + } + + sf_sec_size = hw->params.sf_size / hw->params.sf_nsec; + i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */ + + csio_dbg(hw, "Erasing sectors... start:%d end:%d\n", + FLASH_FW_START_SEC, FLASH_FW_START_SEC + i - 1); + + ret = csio_hw_flash_erase_sectors(hw, FLASH_FW_START_SEC, + FLASH_FW_START_SEC + i - 1); + if (ret) { + csio_err(hw, "Flash Erase failed\n"); + goto out; + } + + /* + * We write the correct version at the end so the driver can see a bad + * version if the FW write fails. Start by writing a copy of the + * first page with a bad version. + */ + memcpy(first_page, fw_data, SF_PAGE_SIZE); + ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff); + ret = csio_hw_write_flash(hw, FLASH_FW_START, SF_PAGE_SIZE, first_page); + if (ret) + goto out; + + csio_dbg(hw, "Writing Flash .. start:%d end:%d\n", + FW_IMG_START, FW_IMG_START + size); + + addr = FLASH_FW_START; + for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { + addr += SF_PAGE_SIZE; + fw_data += SF_PAGE_SIZE; + ret = csio_hw_write_flash(hw, addr, SF_PAGE_SIZE, fw_data); + if (ret) + goto out; + } + + ret = csio_hw_write_flash(hw, + FLASH_FW_START + + offsetof(struct fw_hdr, fw_ver), + sizeof(hdr->fw_ver), + (const uint8_t *)&hdr->fw_ver); + +out: + if (ret) + csio_err(hw, "firmware download failed, error %d\n", ret); + return ret; +} + +static int +csio_hw_get_flash_params(struct csio_hw *hw) +{ + /* Table for non-Numonix supported flash parts. Numonix parts are left + * to the preexisting code. All flash parts have 64KB sectors. + */ + static struct flash_desc { + u32 vendor_and_model_id; + u32 size_mb; + } supported_flash[] = { + { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */ + }; + + u32 part, manufacturer; + u32 density, size = 0; + u32 flashid = 0; + int ret; + + ret = csio_hw_sf1_write(hw, 1, 1, 0, SF_RD_ID); + if (!ret) + ret = csio_hw_sf1_read(hw, 3, 0, 1, &flashid); + csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */ + if (ret) + return ret; + + /* Check to see if it's one of our non-standard supported Flash parts. + */ + for (part = 0; part < ARRAY_SIZE(supported_flash); part++) + if (supported_flash[part].vendor_and_model_id == flashid) { + hw->params.sf_size = supported_flash[part].size_mb; + hw->params.sf_nsec = + hw->params.sf_size / SF_SEC_SIZE; + goto found; + } + + /* Decode Flash part size. The code below looks repetitive with + * common encodings, but that's not guaranteed in the JEDEC + * specification for the Read JEDEC ID command. The only thing that + * we're guaranteed by the JEDEC specification is where the + * Manufacturer ID is in the returned result. After that each + * Manufacturer ~could~ encode things completely differently. + * Note, all Flash parts must have 64KB sectors. + */ + manufacturer = flashid & 0xff; + switch (manufacturer) { + case 0x20: { /* Micron/Numonix */ + /* This Density -> Size decoding table is taken from Micron + * Data Sheets. + */ + density = (flashid >> 16) & 0xff; + switch (density) { + case 0x14 ... 0x19: /* 1MB - 32MB */ + size = 1 << density; + break; + case 0x20: /* 64MB */ + size = 1 << 26; + break; + case 0x21: /* 128MB */ + size = 1 << 27; + break; + case 0x22: /* 256MB */ + size = 1 << 28; + } + break; + } + case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */ + /* This Density -> Size decoding table is taken from ISSI + * Data Sheets. + */ + density = (flashid >> 16) & 0xff; + switch (density) { + case 0x16: /* 32 MB */ + size = 1 << 25; + break; + case 0x17: /* 64MB */ + size = 1 << 26; + } + break; + } + case 0xc2: /* Macronix */ + case 0xef: /* Winbond */ { + /* This Density -> Size decoding table is taken from + * Macronix and Winbond Data Sheets. + */ + density = (flashid >> 16) & 0xff; + switch (density) { + case 0x17: /* 8MB */ + case 0x18: /* 16MB */ + size = 1 << density; + } + } + } + + /* If we didn't recognize the FLASH part, that's no real issue: the + * Hardware/Software contract says that Hardware will _*ALWAYS*_ + * use a FLASH part which is at least 4MB in size and has 64KB + * sectors. The unrecognized FLASH part is likely to be much larger + * than 4MB, but that's all we really need. + */ + if (size == 0) { + csio_warn(hw, "Unknown Flash Part, ID = %#x, assuming 4MB\n", + flashid); + size = 1 << 22; + } + + /* Store decoded Flash size */ + hw->params.sf_size = size; + hw->params.sf_nsec = size / SF_SEC_SIZE; + +found: + if (hw->params.sf_size < FLASH_MIN_SIZE) + csio_warn(hw, "WARNING: Flash Part ID %#x, size %#x < %#x\n", + flashid, hw->params.sf_size, FLASH_MIN_SIZE); + return 0; +} + +/*****************************************************************************/ +/* HW State machine assists */ +/*****************************************************************************/ + +static int +csio_hw_dev_ready(struct csio_hw *hw) +{ + uint32_t reg; + int cnt = 6; + int src_pf; + + while (((reg = csio_rd_reg32(hw, PL_WHOAMI_A)) == 0xFFFFFFFF) && + (--cnt != 0)) + mdelay(100); + + if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK)) + src_pf = SOURCEPF_G(reg); + else + src_pf = T6_SOURCEPF_G(reg); + + if ((cnt == 0) && (((int32_t)(src_pf) < 0) || + (src_pf >= CSIO_MAX_PFN))) { + csio_err(hw, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg, cnt); + return -EIO; + } + + hw->pfn = src_pf; + + return 0; +} + +/* + * csio_do_hello - Perform the HELLO FW Mailbox command and process response. + * @hw: HW module + * @state: Device state + * + * FW_HELLO_CMD has to be polled for completion. + */ +static int +csio_do_hello(struct csio_hw *hw, enum csio_dev_state *state) +{ + struct csio_mb *mbp; + int rv = 0; + enum fw_retval retval; + uint8_t mpfn; + char state_str[16]; + int retries = FW_CMD_HELLO_RETRIES; + + memset(state_str, 0, sizeof(state_str)); + + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) { + rv = -ENOMEM; + CSIO_INC_STATS(hw, n_err_nomem); + goto out; + } + +retry: + csio_mb_hello(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, + hw->pfn, CSIO_MASTER_MAY, NULL); + + rv = csio_mb_issue(hw, mbp); + if (rv) { + csio_err(hw, "failed to issue HELLO cmd. ret:%d.\n", rv); + goto out_free_mb; + } + + csio_mb_process_hello_rsp(hw, mbp, &retval, state, &mpfn); + if (retval != FW_SUCCESS) { + csio_err(hw, "HELLO cmd failed with ret: %d\n", retval); + rv = -EINVAL; + goto out_free_mb; + } + + /* Firmware has designated us to be master */ + if (hw->pfn == mpfn) { + hw->flags |= CSIO_HWF_MASTER; + } else if (*state == CSIO_DEV_STATE_UNINIT) { + /* + * If we're not the Master PF then we need to wait around for + * the Master PF Driver to finish setting up the adapter. + * + * Note that we also do this wait if we're a non-Master-capable + * PF and there is no current Master PF; a Master PF may show up + * momentarily and we wouldn't want to fail pointlessly. (This + * can happen when an OS loads lots of different drivers rapidly + * at the same time). In this case, the Master PF returned by + * the firmware will be PCIE_FW_MASTER_MASK so the test below + * will work ... + */ + + int waiting = FW_CMD_HELLO_TIMEOUT; + + /* + * Wait for the firmware to either indicate an error or + * initialized state. If we see either of these we bail out + * and report the issue to the caller. If we exhaust the + * "hello timeout" and we haven't exhausted our retries, try + * again. Otherwise bail with a timeout error. + */ + for (;;) { + uint32_t pcie_fw; + + spin_unlock_irq(&hw->lock); + msleep(50); + spin_lock_irq(&hw->lock); + waiting -= 50; + + /* + * If neither Error nor Initialized are indicated + * by the firmware keep waiting till we exhaust our + * timeout ... and then retry if we haven't exhausted + * our retries ... + */ + pcie_fw = csio_rd_reg32(hw, PCIE_FW_A); + if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) { + if (waiting <= 0) { + if (retries-- > 0) + goto retry; + + rv = -ETIMEDOUT; + break; + } + continue; + } + + /* + * We either have an Error or Initialized condition + * report errors preferentially. + */ + if (state) { + if (pcie_fw & PCIE_FW_ERR_F) { + *state = CSIO_DEV_STATE_ERR; + rv = -ETIMEDOUT; + } else if (pcie_fw & PCIE_FW_INIT_F) + *state = CSIO_DEV_STATE_INIT; + } + + /* + * If we arrived before a Master PF was selected and + * there's not a valid Master PF, grab its identity + * for our caller. + */ + if (mpfn == PCIE_FW_MASTER_M && + (pcie_fw & PCIE_FW_MASTER_VLD_F)) + mpfn = PCIE_FW_MASTER_G(pcie_fw); + break; + } + hw->flags &= ~CSIO_HWF_MASTER; + } + + switch (*state) { + case CSIO_DEV_STATE_UNINIT: + strcpy(state_str, "Initializing"); + break; + case CSIO_DEV_STATE_INIT: + strcpy(state_str, "Initialized"); + break; + case CSIO_DEV_STATE_ERR: + strcpy(state_str, "Error"); + break; + default: + strcpy(state_str, "Unknown"); + break; + } + + if (hw->pfn == mpfn) + csio_info(hw, "PF: %d, Coming up as MASTER, HW state: %s\n", + hw->pfn, state_str); + else + csio_info(hw, + "PF: %d, Coming up as SLAVE, Master PF: %d, HW state: %s\n", + hw->pfn, mpfn, state_str); + +out_free_mb: + mempool_free(mbp, hw->mb_mempool); +out: + return rv; +} + +/* + * csio_do_bye - Perform the BYE FW Mailbox command and process response. + * @hw: HW module + * + */ +static int +csio_do_bye(struct csio_hw *hw) +{ + struct csio_mb *mbp; + enum fw_retval retval; + + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) { + CSIO_INC_STATS(hw, n_err_nomem); + return -ENOMEM; + } + + csio_mb_bye(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL); + + if (csio_mb_issue(hw, mbp)) { + csio_err(hw, "Issue of BYE command failed\n"); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + retval = csio_mb_fw_retval(mbp); + if (retval != FW_SUCCESS) { + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + mempool_free(mbp, hw->mb_mempool); + + return 0; +} + +/* + * csio_do_reset- Perform the device reset. + * @hw: HW module + * @fw_rst: FW reset + * + * If fw_rst is set, issues FW reset mbox cmd otherwise + * does PIO reset. + * Performs reset of the function. + */ +static int +csio_do_reset(struct csio_hw *hw, bool fw_rst) +{ + struct csio_mb *mbp; + enum fw_retval retval; + + if (!fw_rst) { + /* PIO reset */ + csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A); + mdelay(2000); + return 0; + } + + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) { + CSIO_INC_STATS(hw, n_err_nomem); + return -ENOMEM; + } + + csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO, + PIORSTMODE_F | PIORST_F, 0, NULL); + + if (csio_mb_issue(hw, mbp)) { + csio_err(hw, "Issue of RESET command failed.n"); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + retval = csio_mb_fw_retval(mbp); + if (retval != FW_SUCCESS) { + csio_err(hw, "RESET cmd failed with ret:0x%x.\n", retval); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + mempool_free(mbp, hw->mb_mempool); + + return 0; +} + +static int +csio_hw_validate_caps(struct csio_hw *hw, struct csio_mb *mbp) +{ + struct fw_caps_config_cmd *rsp = (struct fw_caps_config_cmd *)mbp->mb; + uint16_t caps; + + caps = ntohs(rsp->fcoecaps); + + if (!(caps & FW_CAPS_CONFIG_FCOE_INITIATOR)) { + csio_err(hw, "No FCoE Initiator capability in the firmware.\n"); + return -EINVAL; + } + + if (!(caps & FW_CAPS_CONFIG_FCOE_CTRL_OFLD)) { + csio_err(hw, "No FCoE Control Offload capability\n"); + return -EINVAL; + } + + return 0; +} + +/* + * csio_hw_fw_halt - issue a reset/halt to FW and put uP into RESET + * @hw: the HW module + * @mbox: mailbox to use for the FW RESET command (if desired) + * @force: force uP into RESET even if FW RESET command fails + * + * Issues a RESET command to firmware (if desired) with a HALT indication + * and then puts the microprocessor into RESET state. The RESET command + * will only be issued if a legitimate mailbox is provided (mbox <= + * PCIE_FW_MASTER_MASK). + * + * This is generally used in order for the host to safely manipulate the + * adapter without fear of conflicting with whatever the firmware might + * be doing. The only way out of this state is to RESTART the firmware + * ... + */ +static int +csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force) +{ + enum fw_retval retval = 0; + + /* + * If a legitimate mailbox is provided, issue a RESET command + * with a HALT indication. + */ + if (mbox <= PCIE_FW_MASTER_M) { + struct csio_mb *mbp; + + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) { + CSIO_INC_STATS(hw, n_err_nomem); + return -ENOMEM; + } + + csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO, + PIORSTMODE_F | PIORST_F, FW_RESET_CMD_HALT_F, + NULL); + + if (csio_mb_issue(hw, mbp)) { + csio_err(hw, "Issue of RESET command failed!\n"); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + retval = csio_mb_fw_retval(mbp); + mempool_free(mbp, hw->mb_mempool); + } + + /* + * Normally we won't complete the operation if the firmware RESET + * command fails but if our caller insists we'll go ahead and put the + * uP into RESET. This can be useful if the firmware is hung or even + * missing ... We'll have to take the risk of putting the uP into + * RESET without the cooperation of firmware in that case. + * + * We also force the firmware's HALT flag to be on in case we bypassed + * the firmware RESET command above or we're dealing with old firmware + * which doesn't have the HALT capability. This will serve as a flag + * for the incoming firmware to know that it's coming out of a HALT + * rather than a RESET ... if it's new enough to understand that ... + */ + if (retval == 0 || force) { + csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F); + csio_set_reg_field(hw, PCIE_FW_A, PCIE_FW_HALT_F, + PCIE_FW_HALT_F); + } + + /* + * And we always return the result of the firmware RESET command + * even when we force the uP into RESET ... + */ + return retval ? -EINVAL : 0; +} + +/* + * csio_hw_fw_restart - restart the firmware by taking the uP out of RESET + * @hw: the HW module + * @reset: if we want to do a RESET to restart things + * + * Restart firmware previously halted by csio_hw_fw_halt(). On successful + * return the previous PF Master remains as the new PF Master and there + * is no need to issue a new HELLO command, etc. + * + * We do this in two ways: + * + * 1. If we're dealing with newer firmware we'll simply want to take + * the chip's microprocessor out of RESET. This will cause the + * firmware to start up from its start vector. And then we'll loop + * until the firmware indicates it's started again (PCIE_FW.HALT + * reset to 0) or we timeout. + * + * 2. If we're dealing with older firmware then we'll need to RESET + * the chip since older firmware won't recognize the PCIE_FW.HALT + * flag and automatically RESET itself on startup. + */ +static int +csio_hw_fw_restart(struct csio_hw *hw, uint32_t mbox, int32_t reset) +{ + if (reset) { + /* + * Since we're directing the RESET instead of the firmware + * doing it automatically, we need to clear the PCIE_FW.HALT + * bit. + */ + csio_set_reg_field(hw, PCIE_FW_A, PCIE_FW_HALT_F, 0); + + /* + * If we've been given a valid mailbox, first try to get the + * firmware to do the RESET. If that works, great and we can + * return success. Otherwise, if we haven't been given a + * valid mailbox or the RESET command failed, fall back to + * hitting the chip with a hammer. + */ + if (mbox <= PCIE_FW_MASTER_M) { + csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, 0); + msleep(100); + if (csio_do_reset(hw, true) == 0) + return 0; + } + + csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A); + msleep(2000); + } else { + int ms; + + csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, 0); + for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { + if (!(csio_rd_reg32(hw, PCIE_FW_A) & PCIE_FW_HALT_F)) + return 0; + msleep(100); + ms += 100; + } + return -ETIMEDOUT; + } + return 0; +} + +/* + * csio_hw_fw_upgrade - perform all of the steps necessary to upgrade FW + * @hw: the HW module + * @mbox: mailbox to use for the FW RESET command (if desired) + * @fw_data: the firmware image to write + * @size: image size + * @force: force upgrade even if firmware doesn't cooperate + * + * Perform all of the steps necessary for upgrading an adapter's + * firmware image. Normally this requires the cooperation of the + * existing firmware in order to halt all existing activities + * but if an invalid mailbox token is passed in we skip that step + * (though we'll still put the adapter microprocessor into RESET in + * that case). + * + * On successful return the new firmware will have been loaded and + * the adapter will have been fully RESET losing all previous setup + * state. On unsuccessful return the adapter may be completely hosed ... + * positive errno indicates that the adapter is ~probably~ intact, a + * negative errno indicates that things are looking bad ... + */ +static int +csio_hw_fw_upgrade(struct csio_hw *hw, uint32_t mbox, + const u8 *fw_data, uint32_t size, int32_t force) +{ + const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; + int reset, ret; + + ret = csio_hw_fw_halt(hw, mbox, force); + if (ret != 0 && !force) + return ret; + + ret = csio_hw_fw_dload(hw, (uint8_t *) fw_data, size); + if (ret != 0) + return ret; + + /* + * Older versions of the firmware don't understand the new + * PCIE_FW.HALT flag and so won't know to perform a RESET when they + * restart. So for newly loaded older firmware we'll have to do the + * RESET for it so it starts up on a clean slate. We can tell if + * the newly loaded firmware will handle this right by checking + * its header flags to see if it advertises the capability. + */ + reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0); + return csio_hw_fw_restart(hw, mbox, reset); +} + +/* + * csio_get_device_params - Get device parameters. + * @hw: HW module + * + */ +static int +csio_get_device_params(struct csio_hw *hw) +{ + struct csio_wrm *wrm = csio_hw_to_wrm(hw); + struct csio_mb *mbp; + enum fw_retval retval; + u32 param[6]; + int i, j = 0; + + /* Initialize portids to -1 */ + for (i = 0; i < CSIO_MAX_PPORTS; i++) + hw->pport[i].portid = -1; + + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) { + CSIO_INC_STATS(hw, n_err_nomem); + return -ENOMEM; + } + + /* Get port vec information. */ + param[0] = FW_PARAM_DEV(PORTVEC); + + /* Get Core clock. */ + param[1] = FW_PARAM_DEV(CCLK); + + /* Get EQ id start and end. */ + param[2] = FW_PARAM_PFVF(EQ_START); + param[3] = FW_PARAM_PFVF(EQ_END); + + /* Get IQ id start and end. */ + param[4] = FW_PARAM_PFVF(IQFLINT_START); + param[5] = FW_PARAM_PFVF(IQFLINT_END); + + csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0, + ARRAY_SIZE(param), param, NULL, false, NULL); + if (csio_mb_issue(hw, mbp)) { + csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n"); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + csio_mb_process_read_params_rsp(hw, mbp, &retval, + ARRAY_SIZE(param), param); + if (retval != FW_SUCCESS) { + csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n", + retval); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + /* cache the information. */ + hw->port_vec = param[0]; + hw->vpd.cclk = param[1]; + wrm->fw_eq_start = param[2]; + wrm->fw_iq_start = param[4]; + + /* Using FW configured max iqs & eqs */ + if ((hw->flags & CSIO_HWF_USING_SOFT_PARAMS) || + !csio_is_hw_master(hw)) { + hw->cfg_niq = param[5] - param[4] + 1; + hw->cfg_neq = param[3] - param[2] + 1; + csio_dbg(hw, "Using fwconfig max niqs %d neqs %d\n", + hw->cfg_niq, hw->cfg_neq); + } + + hw->port_vec &= csio_port_mask; + + hw->num_pports = hweight32(hw->port_vec); + + csio_dbg(hw, "Port vector: 0x%x, #ports: %d\n", + hw->port_vec, hw->num_pports); + + for (i = 0; i < hw->num_pports; i++) { + while ((hw->port_vec & (1 << j)) == 0) + j++; + hw->pport[i].portid = j++; + csio_dbg(hw, "Found Port:%d\n", hw->pport[i].portid); + } + mempool_free(mbp, hw->mb_mempool); + + return 0; +} + + +/* + * csio_config_device_caps - Get and set device capabilities. + * @hw: HW module + * + */ +static int +csio_config_device_caps(struct csio_hw *hw) +{ + struct csio_mb *mbp; + enum fw_retval retval; + int rv = -EINVAL; + + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) { + CSIO_INC_STATS(hw, n_err_nomem); + return -ENOMEM; + } + + /* Get device capabilities */ + csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, 0, 0, 0, 0, NULL); + + if (csio_mb_issue(hw, mbp)) { + csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(r) failed!\n"); + goto out; + } + + retval = csio_mb_fw_retval(mbp); + if (retval != FW_SUCCESS) { + csio_err(hw, "FW_CAPS_CONFIG_CMD(r) returned %d!\n", retval); + goto out; + } + + /* Validate device capabilities */ + rv = csio_hw_validate_caps(hw, mbp); + if (rv != 0) + goto out; + + /* Don't config device capabilities if already configured */ + if (hw->fw_state == CSIO_DEV_STATE_INIT) { + rv = 0; + goto out; + } + + /* Write back desired device capabilities */ + csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, true, true, + false, true, NULL); + + if (csio_mb_issue(hw, mbp)) { + csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(w) failed!\n"); + goto out; + } + + retval = csio_mb_fw_retval(mbp); + if (retval != FW_SUCCESS) { + csio_err(hw, "FW_CAPS_CONFIG_CMD(w) returned %d!\n", retval); + goto out; + } + + rv = 0; +out: + mempool_free(mbp, hw->mb_mempool); + return rv; +} + +static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec) +{ + enum cc_fec cc_fec = 0; + + if (fw_fec & FW_PORT_CAP32_FEC_RS) + cc_fec |= FEC_RS; + if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS) + cc_fec |= FEC_BASER_RS; + + return cc_fec; +} + +static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause) +{ + fw_port_cap32_t fw_pause = 0; + + if (cc_pause & PAUSE_RX) + fw_pause |= FW_PORT_CAP32_FC_RX; + if (cc_pause & PAUSE_TX) + fw_pause |= FW_PORT_CAP32_FC_TX; + + return fw_pause; +} + +static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec) +{ + fw_port_cap32_t fw_fec = 0; + + if (cc_fec & FEC_RS) + fw_fec |= FW_PORT_CAP32_FEC_RS; + if (cc_fec & FEC_BASER_RS) + fw_fec |= FW_PORT_CAP32_FEC_BASER_RS; + + return fw_fec; +} + +/** + * fwcap_to_fwspeed - return highest speed in Port Capabilities + * @acaps: advertised Port Capabilities + * + * Get the highest speed for the port from the advertised Port + * Capabilities. + */ +fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps) +{ + #define TEST_SPEED_RETURN(__caps_speed) \ + do { \ + if (acaps & FW_PORT_CAP32_SPEED_##__caps_speed) \ + return FW_PORT_CAP32_SPEED_##__caps_speed; \ + } while (0) + + TEST_SPEED_RETURN(400G); + TEST_SPEED_RETURN(200G); + TEST_SPEED_RETURN(100G); + TEST_SPEED_RETURN(50G); + TEST_SPEED_RETURN(40G); + TEST_SPEED_RETURN(25G); + TEST_SPEED_RETURN(10G); + TEST_SPEED_RETURN(1G); + TEST_SPEED_RETURN(100M); + + #undef TEST_SPEED_RETURN + + return 0; +} + +/** + * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits + * @caps16: a 16-bit Port Capabilities value + * + * Returns the equivalent 32-bit Port Capabilities value. + */ +fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16) +{ + fw_port_cap32_t caps32 = 0; + + #define CAP16_TO_CAP32(__cap) \ + do { \ + if (caps16 & FW_PORT_CAP_##__cap) \ + caps32 |= FW_PORT_CAP32_##__cap; \ + } while (0) + + CAP16_TO_CAP32(SPEED_100M); + CAP16_TO_CAP32(SPEED_1G); + CAP16_TO_CAP32(SPEED_25G); + CAP16_TO_CAP32(SPEED_10G); + CAP16_TO_CAP32(SPEED_40G); + CAP16_TO_CAP32(SPEED_100G); + CAP16_TO_CAP32(FC_RX); + CAP16_TO_CAP32(FC_TX); + CAP16_TO_CAP32(ANEG); + CAP16_TO_CAP32(MDIAUTO); + CAP16_TO_CAP32(MDISTRAIGHT); + CAP16_TO_CAP32(FEC_RS); + CAP16_TO_CAP32(FEC_BASER_RS); + CAP16_TO_CAP32(802_3_PAUSE); + CAP16_TO_CAP32(802_3_ASM_DIR); + + #undef CAP16_TO_CAP32 + + return caps32; +} + +/** + * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits + * @caps32: a 32-bit Port Capabilities value + * + * Returns the equivalent 16-bit Port Capabilities value. Note that + * not all 32-bit Port Capabilities can be represented in the 16-bit + * Port Capabilities and some fields/values may not make it. + */ +fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32) +{ + fw_port_cap16_t caps16 = 0; + + #define CAP32_TO_CAP16(__cap) \ + do { \ + if (caps32 & FW_PORT_CAP32_##__cap) \ + caps16 |= FW_PORT_CAP_##__cap; \ + } while (0) + + CAP32_TO_CAP16(SPEED_100M); + CAP32_TO_CAP16(SPEED_1G); + CAP32_TO_CAP16(SPEED_10G); + CAP32_TO_CAP16(SPEED_25G); + CAP32_TO_CAP16(SPEED_40G); + CAP32_TO_CAP16(SPEED_100G); + CAP32_TO_CAP16(FC_RX); + CAP32_TO_CAP16(FC_TX); + CAP32_TO_CAP16(802_3_PAUSE); + CAP32_TO_CAP16(802_3_ASM_DIR); + CAP32_TO_CAP16(ANEG); + CAP32_TO_CAP16(FORCE_PAUSE); + CAP32_TO_CAP16(MDIAUTO); + CAP32_TO_CAP16(MDISTRAIGHT); + CAP32_TO_CAP16(FEC_RS); + CAP32_TO_CAP16(FEC_BASER_RS); + + #undef CAP32_TO_CAP16 + + return caps16; +} + +/** + * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities + * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value + * + * Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new + * 32-bit Port Capabilities value. + */ +fw_port_cap32_t lstatus_to_fwcap(u32 lstatus) +{ + fw_port_cap32_t linkattr = 0; + + /* The format of the Link Status in the old + * 16-bit Port Information message isn't the same as the + * 16-bit Port Capabilities bitfield used everywhere else. + */ + if (lstatus & FW_PORT_CMD_RXPAUSE_F) + linkattr |= FW_PORT_CAP32_FC_RX; + if (lstatus & FW_PORT_CMD_TXPAUSE_F) + linkattr |= FW_PORT_CAP32_FC_TX; + if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) + linkattr |= FW_PORT_CAP32_SPEED_100M; + if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) + linkattr |= FW_PORT_CAP32_SPEED_1G; + if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) + linkattr |= FW_PORT_CAP32_SPEED_10G; + if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G)) + linkattr |= FW_PORT_CAP32_SPEED_25G; + if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) + linkattr |= FW_PORT_CAP32_SPEED_40G; + if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G)) + linkattr |= FW_PORT_CAP32_SPEED_100G; + + return linkattr; +} + +/** + * csio_init_link_config - initialize a link's SW state + * @lc: pointer to structure holding the link state + * @pcaps: link Port Capabilities + * @acaps: link current Advertised Port Capabilities + * + * Initializes the SW state maintained for each link, including the link's + * capabilities and default speed/flow-control/autonegotiation settings. + */ +static void csio_init_link_config(struct link_config *lc, fw_port_cap32_t pcaps, + fw_port_cap32_t acaps) +{ + lc->pcaps = pcaps; + lc->def_acaps = acaps; + lc->lpacaps = 0; + lc->speed_caps = 0; + lc->speed = 0; + lc->requested_fc = PAUSE_RX | PAUSE_TX; + lc->fc = lc->requested_fc; + + /* + * For Forward Error Control, we default to whatever the Firmware + * tells us the Link is currently advertising. + */ + lc->requested_fec = FEC_AUTO; + lc->fec = fwcap_to_cc_fec(lc->def_acaps); + + /* If the Port is capable of Auto-Negtotiation, initialize it as + * "enabled" and copy over all of the Physical Port Capabilities + * to the Advertised Port Capabilities. Otherwise mark it as + * Auto-Negotiate disabled and select the highest supported speed + * for the link. Note parallel structure in t4_link_l1cfg_core() + * and t4_handle_get_port_info(). + */ + if (lc->pcaps & FW_PORT_CAP32_ANEG) { + lc->acaps = lc->pcaps & ADVERT_MASK; + lc->autoneg = AUTONEG_ENABLE; + lc->requested_fc |= PAUSE_AUTONEG; + } else { + lc->acaps = 0; + lc->autoneg = AUTONEG_DISABLE; + } +} + +static void csio_link_l1cfg(struct link_config *lc, uint16_t fw_caps, + uint32_t *rcaps) +{ + unsigned int fw_mdi = FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO); + fw_port_cap32_t fw_fc, cc_fec, fw_fec, lrcap; + + lc->link_ok = 0; + + /* + * Convert driver coding of Pause Frame Flow Control settings into the + * Firmware's API. + */ + fw_fc = cc_to_fwcap_pause(lc->requested_fc); + + /* + * Convert Common Code Forward Error Control settings into the + * Firmware's API. If the current Requested FEC has "Automatic" + * (IEEE 802.3) specified, then we use whatever the Firmware + * sent us as part of it's IEEE 802.3-based interpretation of + * the Transceiver Module EPROM FEC parameters. Otherwise we + * use whatever is in the current Requested FEC settings. + */ + if (lc->requested_fec & FEC_AUTO) + cc_fec = fwcap_to_cc_fec(lc->def_acaps); + else + cc_fec = lc->requested_fec; + fw_fec = cc_to_fwcap_fec(cc_fec); + + /* Figure out what our Requested Port Capabilities are going to be. + * Note parallel structure in t4_handle_get_port_info() and + * init_link_config(). + */ + if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) { + lrcap = (lc->pcaps & ADVERT_MASK) | fw_fc | fw_fec; + lc->fc = lc->requested_fc & ~PAUSE_AUTONEG; + lc->fec = cc_fec; + } else if (lc->autoneg == AUTONEG_DISABLE) { + lrcap = lc->speed_caps | fw_fc | fw_fec | fw_mdi; + lc->fc = lc->requested_fc & ~PAUSE_AUTONEG; + lc->fec = cc_fec; + } else { + lrcap = lc->acaps | fw_fc | fw_fec | fw_mdi; + } + + *rcaps = lrcap; +} + +/* + * csio_enable_ports - Bring up all available ports. + * @hw: HW module. + * + */ +static int +csio_enable_ports(struct csio_hw *hw) +{ + struct csio_mb *mbp; + u16 fw_caps = FW_CAPS_UNKNOWN; + enum fw_retval retval; + uint8_t portid; + fw_port_cap32_t pcaps, acaps, rcaps; + int i; + + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) { + CSIO_INC_STATS(hw, n_err_nomem); + return -ENOMEM; + } + + for (i = 0; i < hw->num_pports; i++) { + portid = hw->pport[i].portid; + + if (fw_caps == FW_CAPS_UNKNOWN) { + u32 param, val; + + param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_PORT_CAPS32)); + val = 1; + + csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, + hw->pfn, 0, 1, ¶m, &val, true, + NULL); + + if (csio_mb_issue(hw, mbp)) { + csio_err(hw, "failed to issue FW_PARAMS_CMD(r) port:%d\n", + portid); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + csio_mb_process_read_params_rsp(hw, mbp, &retval, + 0, NULL); + fw_caps = retval ? FW_CAPS16 : FW_CAPS32; + } + + /* Read PORT information */ + csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid, + false, 0, fw_caps, NULL); + + if (csio_mb_issue(hw, mbp)) { + csio_err(hw, "failed to issue FW_PORT_CMD(r) port:%d\n", + portid); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + csio_mb_process_read_port_rsp(hw, mbp, &retval, fw_caps, + &pcaps, &acaps); + if (retval != FW_SUCCESS) { + csio_err(hw, "FW_PORT_CMD(r) port:%d failed: 0x%x\n", + portid, retval); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + csio_init_link_config(&hw->pport[i].link_cfg, pcaps, acaps); + + csio_link_l1cfg(&hw->pport[i].link_cfg, fw_caps, &rcaps); + + /* Write back PORT information */ + csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid, + true, rcaps, fw_caps, NULL); + + if (csio_mb_issue(hw, mbp)) { + csio_err(hw, "failed to issue FW_PORT_CMD(w) port:%d\n", + portid); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + retval = csio_mb_fw_retval(mbp); + if (retval != FW_SUCCESS) { + csio_err(hw, "FW_PORT_CMD(w) port:%d failed :0x%x\n", + portid, retval); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + } /* For all ports */ + + mempool_free(mbp, hw->mb_mempool); + + return 0; +} + +/* + * csio_get_fcoe_resinfo - Read fcoe fw resource info. + * @hw: HW module + * Issued with lock held. + */ +static int +csio_get_fcoe_resinfo(struct csio_hw *hw) +{ + struct csio_fcoe_res_info *res_info = &hw->fres_info; + struct fw_fcoe_res_info_cmd *rsp; + struct csio_mb *mbp; + enum fw_retval retval; + + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) { + CSIO_INC_STATS(hw, n_err_nomem); + return -ENOMEM; + } + + /* Get FCoE FW resource information */ + csio_fcoe_read_res_info_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL); + + if (csio_mb_issue(hw, mbp)) { + csio_err(hw, "failed to issue FW_FCOE_RES_INFO_CMD\n"); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + rsp = (struct fw_fcoe_res_info_cmd *)(mbp->mb); + retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16)); + if (retval != FW_SUCCESS) { + csio_err(hw, "FW_FCOE_RES_INFO_CMD failed with ret x%x\n", + retval); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + res_info->e_d_tov = ntohs(rsp->e_d_tov); + res_info->r_a_tov_seq = ntohs(rsp->r_a_tov_seq); + res_info->r_a_tov_els = ntohs(rsp->r_a_tov_els); + res_info->r_r_tov = ntohs(rsp->r_r_tov); + res_info->max_xchgs = ntohl(rsp->max_xchgs); + res_info->max_ssns = ntohl(rsp->max_ssns); + res_info->used_xchgs = ntohl(rsp->used_xchgs); + res_info->used_ssns = ntohl(rsp->used_ssns); + res_info->max_fcfs = ntohl(rsp->max_fcfs); + res_info->max_vnps = ntohl(rsp->max_vnps); + res_info->used_fcfs = ntohl(rsp->used_fcfs); + res_info->used_vnps = ntohl(rsp->used_vnps); + + csio_dbg(hw, "max ssns:%d max xchgs:%d\n", res_info->max_ssns, + res_info->max_xchgs); + mempool_free(mbp, hw->mb_mempool); + + return 0; +} + +static int +csio_hw_check_fwconfig(struct csio_hw *hw, u32 *param) +{ + struct csio_mb *mbp; + enum fw_retval retval; + u32 _param[1]; + + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) { + CSIO_INC_STATS(hw, n_err_nomem); + return -ENOMEM; + } + + /* + * Find out whether we're dealing with a version of + * the firmware which has configuration file support. + */ + _param[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF)); + + csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0, + ARRAY_SIZE(_param), _param, NULL, false, NULL); + if (csio_mb_issue(hw, mbp)) { + csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n"); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + csio_mb_process_read_params_rsp(hw, mbp, &retval, + ARRAY_SIZE(_param), _param); + if (retval != FW_SUCCESS) { + csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n", + retval); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + mempool_free(mbp, hw->mb_mempool); + *param = _param[0]; + + return 0; +} + +static int +csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path) +{ + int ret = 0; + const struct firmware *cf; + struct pci_dev *pci_dev = hw->pdev; + struct device *dev = &pci_dev->dev; + unsigned int mtype = 0, maddr = 0; + uint32_t *cfg_data; + int value_to_add = 0; + const char *fw_cfg_file; + + if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK)) + fw_cfg_file = FW_CFG_NAME_T5; + else + fw_cfg_file = FW_CFG_NAME_T6; + + if (request_firmware(&cf, fw_cfg_file, dev) < 0) { + csio_err(hw, "could not find config file %s, err: %d\n", + fw_cfg_file, ret); + return -ENOENT; + } + + if (cf->size%4 != 0) + value_to_add = 4 - (cf->size % 4); + + cfg_data = kzalloc(cf->size+value_to_add, GFP_KERNEL); + if (cfg_data == NULL) { + ret = -ENOMEM; + goto leave; + } + + memcpy((void *)cfg_data, (const void *)cf->data, cf->size); + if (csio_hw_check_fwconfig(hw, fw_cfg_param) != 0) { + ret = -EINVAL; + goto leave; + } + + mtype = FW_PARAMS_PARAM_Y_G(*fw_cfg_param); + maddr = FW_PARAMS_PARAM_Z_G(*fw_cfg_param) << 16; + + ret = csio_memory_write(hw, mtype, maddr, + cf->size + value_to_add, cfg_data); + + if ((ret == 0) && (value_to_add != 0)) { + union { + u32 word; + char buf[4]; + } last; + size_t size = cf->size & ~0x3; + int i; + + last.word = cfg_data[size >> 2]; + for (i = value_to_add; i < 4; i++) + last.buf[i] = 0; + ret = csio_memory_write(hw, mtype, maddr + size, 4, &last.word); + } + if (ret == 0) { + csio_info(hw, "config file upgraded to %s\n", fw_cfg_file); + snprintf(path, 64, "%s%s", "/lib/firmware/", fw_cfg_file); + } + +leave: + kfree(cfg_data); + release_firmware(cf); + return ret; +} + +/* + * HW initialization: contact FW, obtain config, perform basic init. + * + * If the firmware we're dealing with has Configuration File support, then + * we use that to perform all configuration -- either using the configuration + * file stored in flash on the adapter or using a filesystem-local file + * if available. + * + * If we don't have configuration file support in the firmware, then we'll + * have to set things up the old fashioned way with hard-coded register + * writes and firmware commands ... + */ + +/* + * Attempt to initialize the HW via a Firmware Configuration File. + */ +static int +csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param) +{ + struct csio_mb *mbp = NULL; + struct fw_caps_config_cmd *caps_cmd; + unsigned int mtype, maddr; + int rv = -EINVAL; + uint32_t finiver = 0, finicsum = 0, cfcsum = 0; + char path[64]; + char *config_name = NULL; + + /* + * Reset device if necessary + */ + if (reset) { + rv = csio_do_reset(hw, true); + if (rv != 0) + goto bye; + } + + /* + * If we have a configuration file in host , + * then use that. Otherwise, use the configuration file stored + * in the HW flash ... + */ + spin_unlock_irq(&hw->lock); + rv = csio_hw_flash_config(hw, fw_cfg_param, path); + spin_lock_irq(&hw->lock); + if (rv != 0) { + /* + * config file was not found. Use default + * config file from flash. + */ + config_name = "On FLASH"; + mtype = FW_MEMTYPE_CF_FLASH; + maddr = hw->chip_ops->chip_flash_cfg_addr(hw); + } else { + config_name = path; + mtype = FW_PARAMS_PARAM_Y_G(*fw_cfg_param); + maddr = FW_PARAMS_PARAM_Z_G(*fw_cfg_param) << 16; + } + + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) { + CSIO_INC_STATS(hw, n_err_nomem); + return -ENOMEM; + } + /* + * Tell the firmware to process the indicated Configuration File. + * If there are no errors and the caller has provided return value + * pointers for the [fini] section version, checksum and computed + * checksum, pass those back to the caller. + */ + caps_cmd = (struct fw_caps_config_cmd *)(mbp->mb); + CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1); + caps_cmd->op_to_write = + htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F); + caps_cmd->cfvalid_to_len16 = + htonl(FW_CAPS_CONFIG_CMD_CFVALID_F | + FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) | + FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) | + FW_LEN16(*caps_cmd)); + + if (csio_mb_issue(hw, mbp)) { + rv = -EINVAL; + goto bye; + } + + rv = csio_mb_fw_retval(mbp); + /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware + * Configuration File in FLASH), our last gasp effort is to use the + * Firmware Configuration File which is embedded in the + * firmware. A very few early versions of the firmware didn't + * have one embedded but we can ignore those. + */ + if (rv == ENOENT) { + CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1); + caps_cmd->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F); + caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd)); + + if (csio_mb_issue(hw, mbp)) { + rv = -EINVAL; + goto bye; + } + + rv = csio_mb_fw_retval(mbp); + config_name = "Firmware Default"; + } + if (rv != FW_SUCCESS) + goto bye; + + finiver = ntohl(caps_cmd->finiver); + finicsum = ntohl(caps_cmd->finicsum); + cfcsum = ntohl(caps_cmd->cfcsum); + + /* + * And now tell the firmware to use the configuration we just loaded. + */ + caps_cmd->op_to_write = + htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F); + caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd)); + + if (csio_mb_issue(hw, mbp)) { + rv = -EINVAL; + goto bye; + } + + rv = csio_mb_fw_retval(mbp); + if (rv != FW_SUCCESS) { + csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv); + goto bye; + } + + if (finicsum != cfcsum) { + csio_warn(hw, + "Config File checksum mismatch: csum=%#x, computed=%#x\n", + finicsum, cfcsum); + } + + /* Validate device capabilities */ + rv = csio_hw_validate_caps(hw, mbp); + if (rv != 0) + goto bye; + + mempool_free(mbp, hw->mb_mempool); + mbp = NULL; + + /* + * Note that we're operating with parameters + * not supplied by the driver, rather than from hard-wired + * initialization constants buried in the driver. + */ + hw->flags |= CSIO_HWF_USING_SOFT_PARAMS; + + /* device parameters */ + rv = csio_get_device_params(hw); + if (rv != 0) + goto bye; + + /* Configure SGE */ + csio_wr_sge_init(hw); + + /* + * And finally tell the firmware to initialize itself using the + * parameters from the Configuration File. + */ + /* Post event to notify completion of configuration */ + csio_post_event(&hw->sm, CSIO_HWE_INIT); + + csio_info(hw, "Successfully configure using Firmware " + "Configuration File %s, version %#x, computed checksum %#x\n", + config_name, finiver, cfcsum); + return 0; + + /* + * Something bad happened. Return the error ... + */ +bye: + if (mbp) + mempool_free(mbp, hw->mb_mempool); + hw->flags &= ~CSIO_HWF_USING_SOFT_PARAMS; + csio_warn(hw, "Configuration file error %d\n", rv); + return rv; +} + +/* Is the given firmware API compatible with the one the driver was compiled + * with? + */ +static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2) +{ + + /* short circuit if it's the exact same firmware version */ + if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) + return 1; + +#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) + if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && + SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe)) + return 1; +#undef SAME_INTF + + return 0; +} + +/* The firmware in the filesystem is usable, but should it be installed? + * This routine explains itself in detail if it indicates the filesystem + * firmware should be installed. + */ +static int csio_should_install_fs_fw(struct csio_hw *hw, int card_fw_usable, + int k, int c) +{ + const char *reason; + + if (!card_fw_usable) { + reason = "incompatible or unusable"; + goto install; + } + + if (k > c) { + reason = "older than the version supported with this driver"; + goto install; + } + + return 0; + +install: + csio_err(hw, "firmware on card (%u.%u.%u.%u) is %s, " + "installing firmware %u.%u.%u.%u on card.\n", + FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c), + FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason, + FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k), + FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k)); + + return 1; +} + +static struct fw_info fw_info_array[] = { + { + .chip = CHELSIO_T5, + .fs_name = FW_CFG_NAME_T5, + .fw_mod_name = FW_FNAME_T5, + .fw_hdr = { + .chip = FW_HDR_CHIP_T5, + .fw_ver = __cpu_to_be32(FW_VERSION(T5)), + .intfver_nic = FW_INTFVER(T5, NIC), + .intfver_vnic = FW_INTFVER(T5, VNIC), + .intfver_ri = FW_INTFVER(T5, RI), + .intfver_iscsi = FW_INTFVER(T5, ISCSI), + .intfver_fcoe = FW_INTFVER(T5, FCOE), + }, + }, { + .chip = CHELSIO_T6, + .fs_name = FW_CFG_NAME_T6, + .fw_mod_name = FW_FNAME_T6, + .fw_hdr = { + .chip = FW_HDR_CHIP_T6, + .fw_ver = __cpu_to_be32(FW_VERSION(T6)), + .intfver_nic = FW_INTFVER(T6, NIC), + .intfver_vnic = FW_INTFVER(T6, VNIC), + .intfver_ri = FW_INTFVER(T6, RI), + .intfver_iscsi = FW_INTFVER(T6, ISCSI), + .intfver_fcoe = FW_INTFVER(T6, FCOE), + }, + } +}; + +static struct fw_info *find_fw_info(int chip) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) { + if (fw_info_array[i].chip == chip) + return &fw_info_array[i]; + } + return NULL; +} + +static int csio_hw_prep_fw(struct csio_hw *hw, struct fw_info *fw_info, + const u8 *fw_data, unsigned int fw_size, + struct fw_hdr *card_fw, enum csio_dev_state state, + int *reset) +{ + int ret, card_fw_usable, fs_fw_usable; + const struct fw_hdr *fs_fw; + const struct fw_hdr *drv_fw; + + drv_fw = &fw_info->fw_hdr; + + /* Read the header of the firmware on the card */ + ret = csio_hw_read_flash(hw, FLASH_FW_START, + sizeof(*card_fw) / sizeof(uint32_t), + (uint32_t *)card_fw, 1); + if (ret == 0) { + card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw); + } else { + csio_err(hw, + "Unable to read card's firmware header: %d\n", ret); + card_fw_usable = 0; + } + + if (fw_data != NULL) { + fs_fw = (const void *)fw_data; + fs_fw_usable = fw_compatible(drv_fw, fs_fw); + } else { + fs_fw = NULL; + fs_fw_usable = 0; + } + + if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver && + (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) { + /* Common case: the firmware on the card is an exact match and + * the filesystem one is an exact match too, or the filesystem + * one is absent/incompatible. + */ + } else if (fs_fw_usable && state == CSIO_DEV_STATE_UNINIT && + csio_should_install_fs_fw(hw, card_fw_usable, + be32_to_cpu(fs_fw->fw_ver), + be32_to_cpu(card_fw->fw_ver))) { + ret = csio_hw_fw_upgrade(hw, hw->pfn, fw_data, + fw_size, 0); + if (ret != 0) { + csio_err(hw, + "failed to install firmware: %d\n", ret); + goto bye; + } + + /* Installed successfully, update the cached header too. */ + memcpy(card_fw, fs_fw, sizeof(*card_fw)); + card_fw_usable = 1; + *reset = 0; /* already reset as part of load_fw */ + } + + if (!card_fw_usable) { + uint32_t d, c, k; + + d = be32_to_cpu(drv_fw->fw_ver); + c = be32_to_cpu(card_fw->fw_ver); + k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0; + + csio_err(hw, "Cannot find a usable firmware: " + "chip state %d, " + "driver compiled with %d.%d.%d.%d, " + "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n", + state, + FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d), + FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d), + FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c), + FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), + FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k), + FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k)); + ret = -EINVAL; + goto bye; + } + + /* We're using whatever's on the card and it's known to be good. */ + hw->fwrev = be32_to_cpu(card_fw->fw_ver); + hw->tp_vers = be32_to_cpu(card_fw->tp_microcode_ver); + +bye: + return ret; +} + +/* + * Returns -EINVAL if attempts to flash the firmware failed, + * -ENOMEM if memory allocation failed else returns 0, + * if flashing was not attempted because the card had the + * latest firmware ECANCELED is returned + */ +static int +csio_hw_flash_fw(struct csio_hw *hw, int *reset) +{ + int ret = -ECANCELED; + const struct firmware *fw; + struct fw_info *fw_info; + struct fw_hdr *card_fw; + struct pci_dev *pci_dev = hw->pdev; + struct device *dev = &pci_dev->dev ; + const u8 *fw_data = NULL; + unsigned int fw_size = 0; + const char *fw_bin_file; + + /* This is the firmware whose headers the driver was compiled + * against + */ + fw_info = find_fw_info(CHELSIO_CHIP_VERSION(hw->chip_id)); + if (fw_info == NULL) { + csio_err(hw, + "unable to get firmware info for chip %d.\n", + CHELSIO_CHIP_VERSION(hw->chip_id)); + return -EINVAL; + } + + /* allocate memory to read the header of the firmware on the + * card + */ + card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL); + if (!card_fw) + return -ENOMEM; + + if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK)) + fw_bin_file = FW_FNAME_T5; + else + fw_bin_file = FW_FNAME_T6; + + if (request_firmware(&fw, fw_bin_file, dev) < 0) { + csio_err(hw, "could not find firmware image %s, err: %d\n", + fw_bin_file, ret); + } else { + fw_data = fw->data; + fw_size = fw->size; + } + + /* upgrade FW logic */ + ret = csio_hw_prep_fw(hw, fw_info, fw_data, fw_size, card_fw, + hw->fw_state, reset); + + /* Cleaning up */ + if (fw != NULL) + release_firmware(fw); + kfree(card_fw); + return ret; +} + +static int csio_hw_check_fwver(struct csio_hw *hw) +{ + if (csio_is_t6(hw->pdev->device & CSIO_HW_CHIP_MASK) && + (hw->fwrev < CSIO_MIN_T6_FW)) { + csio_hw_print_fw_version(hw, "T6 unsupported fw"); + return -1; + } + + return 0; +} + +/* + * csio_hw_configure - Configure HW + * @hw - HW module + * + */ +static void +csio_hw_configure(struct csio_hw *hw) +{ + int reset = 1; + int rv; + u32 param[1]; + + rv = csio_hw_dev_ready(hw); + if (rv != 0) { + CSIO_INC_STATS(hw, n_err_fatal); + csio_post_event(&hw->sm, CSIO_HWE_FATAL); + goto out; + } + + /* HW version */ + hw->chip_ver = (char)csio_rd_reg32(hw, PL_REV_A); + + /* Needed for FW download */ + rv = csio_hw_get_flash_params(hw); + if (rv != 0) { + csio_err(hw, "Failed to get serial flash params rv:%d\n", rv); + csio_post_event(&hw->sm, CSIO_HWE_FATAL); + goto out; + } + + /* Set PCIe completion timeout to 4 seconds */ + if (pci_is_pcie(hw->pdev)) + pcie_capability_clear_and_set_word(hw->pdev, PCI_EXP_DEVCTL2, + PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd); + + hw->chip_ops->chip_set_mem_win(hw, MEMWIN_CSIOSTOR); + + rv = csio_hw_get_fw_version(hw, &hw->fwrev); + if (rv != 0) + goto out; + + csio_hw_print_fw_version(hw, "Firmware revision"); + + rv = csio_do_hello(hw, &hw->fw_state); + if (rv != 0) { + CSIO_INC_STATS(hw, n_err_fatal); + csio_post_event(&hw->sm, CSIO_HWE_FATAL); + goto out; + } + + /* Read vpd */ + rv = csio_hw_get_vpd_params(hw, &hw->vpd); + if (rv != 0) + goto out; + + csio_hw_get_fw_version(hw, &hw->fwrev); + csio_hw_get_tp_version(hw, &hw->tp_vers); + if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) { + + /* Do firmware update */ + spin_unlock_irq(&hw->lock); + rv = csio_hw_flash_fw(hw, &reset); + spin_lock_irq(&hw->lock); + + if (rv != 0) + goto out; + + rv = csio_hw_check_fwver(hw); + if (rv < 0) + goto out; + + /* If the firmware doesn't support Configuration Files, + * return an error. + */ + rv = csio_hw_check_fwconfig(hw, param); + if (rv != 0) { + csio_info(hw, "Firmware doesn't support " + "Firmware Configuration files\n"); + goto out; + } + + /* The firmware provides us with a memory buffer where we can + * load a Configuration File from the host if we want to + * override the Configuration File in flash. + */ + rv = csio_hw_use_fwconfig(hw, reset, param); + if (rv == -ENOENT) { + csio_info(hw, "Could not initialize " + "adapter, error%d\n", rv); + goto out; + } + if (rv != 0) { + csio_info(hw, "Could not initialize " + "adapter, error%d\n", rv); + goto out; + } + + } else { + rv = csio_hw_check_fwver(hw); + if (rv < 0) + goto out; + + if (hw->fw_state == CSIO_DEV_STATE_INIT) { + + hw->flags |= CSIO_HWF_USING_SOFT_PARAMS; + + /* device parameters */ + rv = csio_get_device_params(hw); + if (rv != 0) + goto out; + + /* Get device capabilities */ + rv = csio_config_device_caps(hw); + if (rv != 0) + goto out; + + /* Configure SGE */ + csio_wr_sge_init(hw); + + /* Post event to notify completion of configuration */ + csio_post_event(&hw->sm, CSIO_HWE_INIT); + goto out; + } + } /* if not master */ + +out: + return; +} + +/* + * csio_hw_initialize - Initialize HW + * @hw - HW module + * + */ +static void +csio_hw_initialize(struct csio_hw *hw) +{ + struct csio_mb *mbp; + enum fw_retval retval; + int rv; + int i; + + if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) { + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) + goto out; + + csio_mb_initialize(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL); + + if (csio_mb_issue(hw, mbp)) { + csio_err(hw, "Issue of FW_INITIALIZE_CMD failed!\n"); + goto free_and_out; + } + + retval = csio_mb_fw_retval(mbp); + if (retval != FW_SUCCESS) { + csio_err(hw, "FW_INITIALIZE_CMD returned 0x%x!\n", + retval); + goto free_and_out; + } + + mempool_free(mbp, hw->mb_mempool); + } + + rv = csio_get_fcoe_resinfo(hw); + if (rv != 0) { + csio_err(hw, "Failed to read fcoe resource info: %d\n", rv); + goto out; + } + + spin_unlock_irq(&hw->lock); + rv = csio_config_queues(hw); + spin_lock_irq(&hw->lock); + + if (rv != 0) { + csio_err(hw, "Config of queues failed!: %d\n", rv); + goto out; + } + + for (i = 0; i < hw->num_pports; i++) + hw->pport[i].mod_type = FW_PORT_MOD_TYPE_NA; + + if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) { + rv = csio_enable_ports(hw); + if (rv != 0) { + csio_err(hw, "Failed to enable ports: %d\n", rv); + goto out; + } + } + + csio_post_event(&hw->sm, CSIO_HWE_INIT_DONE); + return; + +free_and_out: + mempool_free(mbp, hw->mb_mempool); +out: + return; +} + +#define PF_INTR_MASK (PFSW_F | PFCIM_F) + +/* + * csio_hw_intr_enable - Enable HW interrupts + * @hw: Pointer to HW module. + * + * Enable interrupts in HW registers. + */ +static void +csio_hw_intr_enable(struct csio_hw *hw) +{ + uint16_t vec = (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw)); + u32 pf = 0; + uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE_A); + + if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK)) + pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A)); + else + pf = T6_SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A)); + + /* + * Set aivec for MSI/MSIX. PCIE_PF_CFG.INTXType is set up + * by FW, so do nothing for INTX. + */ + if (hw->intr_mode == CSIO_IM_MSIX) + csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG_A), + AIVEC_V(AIVEC_M), vec); + else if (hw->intr_mode == CSIO_IM_MSI) + csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG_A), + AIVEC_V(AIVEC_M), 0); + + csio_wr_reg32(hw, PF_INTR_MASK, MYPF_REG(PL_PF_INT_ENABLE_A)); + + /* Turn on MB interrupts - this will internally flush PIO as well */ + csio_mb_intr_enable(hw); + + /* These are common registers - only a master can modify them */ + if (csio_is_hw_master(hw)) { + /* + * Disable the Serial FLASH interrupt, if enabled! + */ + pl &= (~SF_F); + csio_wr_reg32(hw, pl, PL_INT_ENABLE_A); + + csio_wr_reg32(hw, ERR_CPL_EXCEED_IQE_SIZE_F | + EGRESS_SIZE_ERR_F | ERR_INVALID_CIDX_INC_F | + ERR_CPL_OPCODE_0_F | ERR_DROPPED_DB_F | + ERR_DATA_CPL_ON_HIGH_QID1_F | + ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F | + ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F | + ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F | + ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F, + SGE_INT_ENABLE3_A); + csio_set_reg_field(hw, PL_INT_MAP0_A, 0, 1 << pf); + } + + hw->flags |= CSIO_HWF_HW_INTR_ENABLED; + +} + +/* + * csio_hw_intr_disable - Disable HW interrupts + * @hw: Pointer to HW module. + * + * Turn off Mailbox and PCI_PF_CFG interrupts. + */ +void +csio_hw_intr_disable(struct csio_hw *hw) +{ + u32 pf = 0; + + if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK)) + pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A)); + else + pf = T6_SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A)); + + if (!(hw->flags & CSIO_HWF_HW_INTR_ENABLED)) + return; + + hw->flags &= ~CSIO_HWF_HW_INTR_ENABLED; + + csio_wr_reg32(hw, 0, MYPF_REG(PL_PF_INT_ENABLE_A)); + if (csio_is_hw_master(hw)) + csio_set_reg_field(hw, PL_INT_MAP0_A, 1 << pf, 0); + + /* Turn off MB interrupts */ + csio_mb_intr_disable(hw); + +} + +void +csio_hw_fatal_err(struct csio_hw *hw) +{ + csio_set_reg_field(hw, SGE_CONTROL_A, GLOBALENABLE_F, 0); + csio_hw_intr_disable(hw); + + /* Do not reset HW, we may need FW state for debugging */ + csio_fatal(hw, "HW Fatal error encountered!\n"); +} + +/*****************************************************************************/ +/* START: HW SM */ +/*****************************************************************************/ +/* + * csio_hws_uninit - Uninit state + * @hw - HW module + * @evt - Event + * + */ +static void +csio_hws_uninit(struct csio_hw *hw, enum csio_hw_ev evt) +{ + hw->prev_evt = hw->cur_evt; + hw->cur_evt = evt; + CSIO_INC_STATS(hw, n_evt_sm[evt]); + + switch (evt) { + case CSIO_HWE_CFG: + csio_set_state(&hw->sm, csio_hws_configuring); + csio_hw_configure(hw); + break; + + default: + CSIO_INC_STATS(hw, n_evt_unexp); + break; + } +} + +/* + * csio_hws_configuring - Configuring state + * @hw - HW module + * @evt - Event + * + */ +static void +csio_hws_configuring(struct csio_hw *hw, enum csio_hw_ev evt) +{ + hw->prev_evt = hw->cur_evt; + hw->cur_evt = evt; + CSIO_INC_STATS(hw, n_evt_sm[evt]); + + switch (evt) { + case CSIO_HWE_INIT: + csio_set_state(&hw->sm, csio_hws_initializing); + csio_hw_initialize(hw); + break; + + case CSIO_HWE_INIT_DONE: + csio_set_state(&hw->sm, csio_hws_ready); + /* Fan out event to all lnode SMs */ + csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY); + break; + + case CSIO_HWE_FATAL: + csio_set_state(&hw->sm, csio_hws_uninit); + break; + + case CSIO_HWE_PCI_REMOVE: + csio_do_bye(hw); + break; + default: + CSIO_INC_STATS(hw, n_evt_unexp); + break; + } +} + +/* + * csio_hws_initializing - Initializing state + * @hw - HW module + * @evt - Event + * + */ +static void +csio_hws_initializing(struct csio_hw *hw, enum csio_hw_ev evt) +{ + hw->prev_evt = hw->cur_evt; + hw->cur_evt = evt; + CSIO_INC_STATS(hw, n_evt_sm[evt]); + + switch (evt) { + case CSIO_HWE_INIT_DONE: + csio_set_state(&hw->sm, csio_hws_ready); + + /* Fan out event to all lnode SMs */ + csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY); + + /* Enable interrupts */ + csio_hw_intr_enable(hw); + break; + + case CSIO_HWE_FATAL: + csio_set_state(&hw->sm, csio_hws_uninit); + break; + + case CSIO_HWE_PCI_REMOVE: + csio_do_bye(hw); + break; + + default: + CSIO_INC_STATS(hw, n_evt_unexp); + break; + } +} + +/* + * csio_hws_ready - Ready state + * @hw - HW module + * @evt - Event + * + */ +static void +csio_hws_ready(struct csio_hw *hw, enum csio_hw_ev evt) +{ + /* Remember the event */ + hw->evtflag = evt; + + hw->prev_evt = hw->cur_evt; + hw->cur_evt = evt; + CSIO_INC_STATS(hw, n_evt_sm[evt]); + + switch (evt) { + case CSIO_HWE_HBA_RESET: + case CSIO_HWE_FW_DLOAD: + case CSIO_HWE_SUSPEND: + case CSIO_HWE_PCI_REMOVE: + case CSIO_HWE_PCIERR_DETECTED: + csio_set_state(&hw->sm, csio_hws_quiescing); + /* cleanup all outstanding cmds */ + if (evt == CSIO_HWE_HBA_RESET || + evt == CSIO_HWE_PCIERR_DETECTED) + csio_scsim_cleanup_io(csio_hw_to_scsim(hw), false); + else + csio_scsim_cleanup_io(csio_hw_to_scsim(hw), true); + + csio_hw_intr_disable(hw); + csio_hw_mbm_cleanup(hw); + csio_evtq_stop(hw); + csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWSTOP); + csio_evtq_flush(hw); + csio_mgmtm_cleanup(csio_hw_to_mgmtm(hw)); + csio_post_event(&hw->sm, CSIO_HWE_QUIESCED); + break; + + case CSIO_HWE_FATAL: + csio_set_state(&hw->sm, csio_hws_uninit); + break; + + default: + CSIO_INC_STATS(hw, n_evt_unexp); + break; + } +} + +/* + * csio_hws_quiescing - Quiescing state + * @hw - HW module + * @evt - Event + * + */ +static void +csio_hws_quiescing(struct csio_hw *hw, enum csio_hw_ev evt) +{ + hw->prev_evt = hw->cur_evt; + hw->cur_evt = evt; + CSIO_INC_STATS(hw, n_evt_sm[evt]); + + switch (evt) { + case CSIO_HWE_QUIESCED: + switch (hw->evtflag) { + case CSIO_HWE_FW_DLOAD: + csio_set_state(&hw->sm, csio_hws_resetting); + /* Download firmware */ + fallthrough; + + case CSIO_HWE_HBA_RESET: + csio_set_state(&hw->sm, csio_hws_resetting); + /* Start reset of the HBA */ + csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWRESET); + csio_wr_destroy_queues(hw, false); + csio_do_reset(hw, false); + csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET_DONE); + break; + + case CSIO_HWE_PCI_REMOVE: + csio_set_state(&hw->sm, csio_hws_removing); + csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREMOVE); + csio_wr_destroy_queues(hw, true); + /* Now send the bye command */ + csio_do_bye(hw); + break; + + case CSIO_HWE_SUSPEND: + csio_set_state(&hw->sm, csio_hws_quiesced); + break; + + case CSIO_HWE_PCIERR_DETECTED: + csio_set_state(&hw->sm, csio_hws_pcierr); + csio_wr_destroy_queues(hw, false); + break; + + default: + CSIO_INC_STATS(hw, n_evt_unexp); + break; + + } + break; + + default: + CSIO_INC_STATS(hw, n_evt_unexp); + break; + } +} + +/* + * csio_hws_quiesced - Quiesced state + * @hw - HW module + * @evt - Event + * + */ +static void +csio_hws_quiesced(struct csio_hw *hw, enum csio_hw_ev evt) +{ + hw->prev_evt = hw->cur_evt; + hw->cur_evt = evt; + CSIO_INC_STATS(hw, n_evt_sm[evt]); + + switch (evt) { + case CSIO_HWE_RESUME: + csio_set_state(&hw->sm, csio_hws_configuring); + csio_hw_configure(hw); + break; + + default: + CSIO_INC_STATS(hw, n_evt_unexp); + break; + } +} + +/* + * csio_hws_resetting - HW Resetting state + * @hw - HW module + * @evt - Event + * + */ +static void +csio_hws_resetting(struct csio_hw *hw, enum csio_hw_ev evt) +{ + hw->prev_evt = hw->cur_evt; + hw->cur_evt = evt; + CSIO_INC_STATS(hw, n_evt_sm[evt]); + + switch (evt) { + case CSIO_HWE_HBA_RESET_DONE: + csio_evtq_start(hw); + csio_set_state(&hw->sm, csio_hws_configuring); + csio_hw_configure(hw); + break; + + default: + CSIO_INC_STATS(hw, n_evt_unexp); + break; + } +} + +/* + * csio_hws_removing - PCI Hotplug removing state + * @hw - HW module + * @evt - Event + * + */ +static void +csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt) +{ + hw->prev_evt = hw->cur_evt; + hw->cur_evt = evt; + CSIO_INC_STATS(hw, n_evt_sm[evt]); + + switch (evt) { + case CSIO_HWE_HBA_RESET: + if (!csio_is_hw_master(hw)) + break; + /* + * The BYE should have already been issued, so we can't + * use the mailbox interface. Hence we use the PL_RST + * register directly. + */ + csio_err(hw, "Resetting HW and waiting 2 seconds...\n"); + csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A); + mdelay(2000); + break; + + /* Should never receive any new events */ + default: + CSIO_INC_STATS(hw, n_evt_unexp); + break; + + } +} + +/* + * csio_hws_pcierr - PCI Error state + * @hw - HW module + * @evt - Event + * + */ +static void +csio_hws_pcierr(struct csio_hw *hw, enum csio_hw_ev evt) +{ + hw->prev_evt = hw->cur_evt; + hw->cur_evt = evt; + CSIO_INC_STATS(hw, n_evt_sm[evt]); + + switch (evt) { + case CSIO_HWE_PCIERR_SLOT_RESET: + csio_evtq_start(hw); + csio_set_state(&hw->sm, csio_hws_configuring); + csio_hw_configure(hw); + break; + + default: + CSIO_INC_STATS(hw, n_evt_unexp); + break; + } +} + +/*****************************************************************************/ +/* END: HW SM */ +/*****************************************************************************/ + +/* + * csio_handle_intr_status - table driven interrupt handler + * @hw: HW instance + * @reg: the interrupt status register to process + * @acts: table of interrupt actions + * + * A table driven interrupt handler that applies a set of masks to an + * interrupt status word and performs the corresponding actions if the + * interrupts described by the mask have occurred. The actions include + * optionally emitting a warning or alert message. The table is terminated + * by an entry specifying mask 0. Returns the number of fatal interrupt + * conditions. + */ +int +csio_handle_intr_status(struct csio_hw *hw, unsigned int reg, + const struct intr_info *acts) +{ + int fatal = 0; + unsigned int mask = 0; + unsigned int status = csio_rd_reg32(hw, reg); + + for ( ; acts->mask; ++acts) { + if (!(status & acts->mask)) + continue; + if (acts->fatal) { + fatal++; + csio_fatal(hw, "Fatal %s (0x%x)\n", + acts->msg, status & acts->mask); + } else if (acts->msg) + csio_info(hw, "%s (0x%x)\n", + acts->msg, status & acts->mask); + mask |= acts->mask; + } + status &= mask; + if (status) /* clear processed interrupts */ + csio_wr_reg32(hw, status, reg); + return fatal; +} + +/* + * TP interrupt handler. + */ +static void csio_tp_intr_handler(struct csio_hw *hw) +{ + static struct intr_info tp_intr_info[] = { + { 0x3fffffff, "TP parity error", -1, 1 }, + { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 }, + { 0, NULL, 0, 0 } + }; + + if (csio_handle_intr_status(hw, TP_INT_CAUSE_A, tp_intr_info)) + csio_hw_fatal_err(hw); +} + +/* + * SGE interrupt handler. + */ +static void csio_sge_intr_handler(struct csio_hw *hw) +{ + uint64_t v; + + static struct intr_info sge_intr_info[] = { + { ERR_CPL_EXCEED_IQE_SIZE_F, + "SGE received CPL exceeding IQE size", -1, 1 }, + { ERR_INVALID_CIDX_INC_F, + "SGE GTS CIDX increment too large", -1, 0 }, + { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 }, + { ERR_DROPPED_DB_F, "SGE doorbell dropped", -1, 0 }, + { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F, + "SGE IQID > 1023 received CPL for FL", -1, 0 }, + { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1, + 0 }, + { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1, + 0 }, + { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1, + 0 }, + { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1, + 0 }, + { ERR_ING_CTXT_PRIO_F, + "SGE too many priority ingress contexts", -1, 0 }, + { ERR_EGR_CTXT_PRIO_F, + "SGE too many priority egress contexts", -1, 0 }, + { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 }, + { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 }, + { 0, NULL, 0, 0 } + }; + + v = (uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE1_A) | + ((uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE2_A) << 32); + if (v) { + csio_fatal(hw, "SGE parity error (%#llx)\n", + (unsigned long long)v); + csio_wr_reg32(hw, (uint32_t)(v & 0xFFFFFFFF), + SGE_INT_CAUSE1_A); + csio_wr_reg32(hw, (uint32_t)(v >> 32), SGE_INT_CAUSE2_A); + } + + v |= csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info); + + if (csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info) || + v != 0) + csio_hw_fatal_err(hw); +} + +#define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\ + OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F) +#define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\ + IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F) + +/* + * CIM interrupt handler. + */ +static void csio_cim_intr_handler(struct csio_hw *hw) +{ + static struct intr_info cim_intr_info[] = { + { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 }, + { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 }, + { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 }, + { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 }, + { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 }, + { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 }, + { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 }, + { 0, NULL, 0, 0 } + }; + static struct intr_info cim_upintr_info[] = { + { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 }, + { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 }, + { ILLWRINT_F, "CIM illegal write", -1, 1 }, + { ILLRDINT_F, "CIM illegal read", -1, 1 }, + { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 }, + { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 }, + { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 }, + { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 }, + { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 }, + { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 }, + { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 }, + { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 }, + { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 }, + { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 }, + { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 }, + { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 }, + { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 }, + { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 }, + { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 }, + { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 }, + { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 }, + { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 }, + { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 }, + { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 }, + { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 }, + { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 }, + { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 }, + { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 }, + { 0, NULL, 0, 0 } + }; + + int fat; + + fat = csio_handle_intr_status(hw, CIM_HOST_INT_CAUSE_A, + cim_intr_info) + + csio_handle_intr_status(hw, CIM_HOST_UPACC_INT_CAUSE_A, + cim_upintr_info); + if (fat) + csio_hw_fatal_err(hw); +} + +/* + * ULP RX interrupt handler. + */ +static void csio_ulprx_intr_handler(struct csio_hw *hw) +{ + static struct intr_info ulprx_intr_info[] = { + { 0x1800000, "ULPRX context error", -1, 1 }, + { 0x7fffff, "ULPRX parity error", -1, 1 }, + { 0, NULL, 0, 0 } + }; + + if (csio_handle_intr_status(hw, ULP_RX_INT_CAUSE_A, ulprx_intr_info)) + csio_hw_fatal_err(hw); +} + +/* + * ULP TX interrupt handler. + */ +static void csio_ulptx_intr_handler(struct csio_hw *hw) +{ + static struct intr_info ulptx_intr_info[] = { + { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1, + 0 }, + { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1, + 0 }, + { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1, + 0 }, + { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1, + 0 }, + { 0xfffffff, "ULPTX parity error", -1, 1 }, + { 0, NULL, 0, 0 } + }; + + if (csio_handle_intr_status(hw, ULP_TX_INT_CAUSE_A, ulptx_intr_info)) + csio_hw_fatal_err(hw); +} + +/* + * PM TX interrupt handler. + */ +static void csio_pmtx_intr_handler(struct csio_hw *hw) +{ + static struct intr_info pmtx_intr_info[] = { + { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 }, + { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 }, + { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 }, + { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 }, + { 0xffffff0, "PMTX framing error", -1, 1 }, + { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 }, + { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error", -1, + 1 }, + { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 }, + { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1}, + { 0, NULL, 0, 0 } + }; + + if (csio_handle_intr_status(hw, PM_TX_INT_CAUSE_A, pmtx_intr_info)) + csio_hw_fatal_err(hw); +} + +/* + * PM RX interrupt handler. + */ +static void csio_pmrx_intr_handler(struct csio_hw *hw) +{ + static struct intr_info pmrx_intr_info[] = { + { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 }, + { 0x3ffff0, "PMRX framing error", -1, 1 }, + { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 }, + { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error", -1, + 1 }, + { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 }, + { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1}, + { 0, NULL, 0, 0 } + }; + + if (csio_handle_intr_status(hw, PM_RX_INT_CAUSE_A, pmrx_intr_info)) + csio_hw_fatal_err(hw); +} + +/* + * CPL switch interrupt handler. + */ +static void csio_cplsw_intr_handler(struct csio_hw *hw) +{ + static struct intr_info cplsw_intr_info[] = { + { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 }, + { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 }, + { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 }, + { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 }, + { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 }, + { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 }, + { 0, NULL, 0, 0 } + }; + + if (csio_handle_intr_status(hw, CPL_INTR_CAUSE_A, cplsw_intr_info)) + csio_hw_fatal_err(hw); +} + +/* + * LE interrupt handler. + */ +static void csio_le_intr_handler(struct csio_hw *hw) +{ + enum chip_type chip = CHELSIO_CHIP_VERSION(hw->chip_id); + + static struct intr_info le_intr_info[] = { + { LIPMISS_F, "LE LIP miss", -1, 0 }, + { LIP0_F, "LE 0 LIP error", -1, 0 }, + { PARITYERR_F, "LE parity error", -1, 1 }, + { UNKNOWNCMD_F, "LE unknown command", -1, 1 }, + { REQQPARERR_F, "LE request queue parity error", -1, 1 }, + { 0, NULL, 0, 0 } + }; + + static struct intr_info t6_le_intr_info[] = { + { T6_LIPMISS_F, "LE LIP miss", -1, 0 }, + { T6_LIP0_F, "LE 0 LIP error", -1, 0 }, + { TCAMINTPERR_F, "LE parity error", -1, 1 }, + { T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 }, + { SSRAMINTPERR_F, "LE request queue parity error", -1, 1 }, + { 0, NULL, 0, 0 } + }; + + if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE_A, + (chip == CHELSIO_T5) ? + le_intr_info : t6_le_intr_info)) + csio_hw_fatal_err(hw); +} + +/* + * MPS interrupt handler. + */ +static void csio_mps_intr_handler(struct csio_hw *hw) +{ + static struct intr_info mps_rx_intr_info[] = { + { 0xffffff, "MPS Rx parity error", -1, 1 }, + { 0, NULL, 0, 0 } + }; + static struct intr_info mps_tx_intr_info[] = { + { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 }, + { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 }, + { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error", + -1, 1 }, + { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error", + -1, 1 }, + { BUBBLE_F, "MPS Tx underflow", -1, 1 }, + { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 }, + { FRMERR_F, "MPS Tx framing error", -1, 1 }, + { 0, NULL, 0, 0 } + }; + static struct intr_info mps_trc_intr_info[] = { + { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 }, + { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error", + -1, 1 }, + { MISCPERR_F, "MPS TRC misc parity error", -1, 1 }, + { 0, NULL, 0, 0 } + }; + static struct intr_info mps_stat_sram_intr_info[] = { + { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 }, + { 0, NULL, 0, 0 } + }; + static struct intr_info mps_stat_tx_intr_info[] = { + { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 }, + { 0, NULL, 0, 0 } + }; + static struct intr_info mps_stat_rx_intr_info[] = { + { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 }, + { 0, NULL, 0, 0 } + }; + static struct intr_info mps_cls_intr_info[] = { + { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 }, + { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 }, + { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 }, + { 0, NULL, 0, 0 } + }; + + int fat; + + fat = csio_handle_intr_status(hw, MPS_RX_PERR_INT_CAUSE_A, + mps_rx_intr_info) + + csio_handle_intr_status(hw, MPS_TX_INT_CAUSE_A, + mps_tx_intr_info) + + csio_handle_intr_status(hw, MPS_TRC_INT_CAUSE_A, + mps_trc_intr_info) + + csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_SRAM_A, + mps_stat_sram_intr_info) + + csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A, + mps_stat_tx_intr_info) + + csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A, + mps_stat_rx_intr_info) + + csio_handle_intr_status(hw, MPS_CLS_INT_CAUSE_A, + mps_cls_intr_info); + + csio_wr_reg32(hw, 0, MPS_INT_CAUSE_A); + csio_rd_reg32(hw, MPS_INT_CAUSE_A); /* flush */ + if (fat) + csio_hw_fatal_err(hw); +} + +#define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \ + ECC_UE_INT_CAUSE_F) + +/* + * EDC/MC interrupt handler. + */ +static void csio_mem_intr_handler(struct csio_hw *hw, int idx) +{ + static const char name[3][5] = { "EDC0", "EDC1", "MC" }; + + unsigned int addr, cnt_addr, v; + + if (idx <= MEM_EDC1) { + addr = EDC_REG(EDC_INT_CAUSE_A, idx); + cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx); + } else { + addr = MC_INT_CAUSE_A; + cnt_addr = MC_ECC_STATUS_A; + } + + v = csio_rd_reg32(hw, addr) & MEM_INT_MASK; + if (v & PERR_INT_CAUSE_F) + csio_fatal(hw, "%s FIFO parity error\n", name[idx]); + if (v & ECC_CE_INT_CAUSE_F) { + uint32_t cnt = ECC_CECNT_G(csio_rd_reg32(hw, cnt_addr)); + + csio_wr_reg32(hw, ECC_CECNT_V(ECC_CECNT_M), cnt_addr); + csio_warn(hw, "%u %s correctable ECC data error%s\n", + cnt, name[idx], cnt > 1 ? "s" : ""); + } + if (v & ECC_UE_INT_CAUSE_F) + csio_fatal(hw, "%s uncorrectable ECC data error\n", name[idx]); + + csio_wr_reg32(hw, v, addr); + if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F)) + csio_hw_fatal_err(hw); +} + +/* + * MA interrupt handler. + */ +static void csio_ma_intr_handler(struct csio_hw *hw) +{ + uint32_t v, status = csio_rd_reg32(hw, MA_INT_CAUSE_A); + + if (status & MEM_PERR_INT_CAUSE_F) + csio_fatal(hw, "MA parity error, parity status %#x\n", + csio_rd_reg32(hw, MA_PARITY_ERROR_STATUS_A)); + if (status & MEM_WRAP_INT_CAUSE_F) { + v = csio_rd_reg32(hw, MA_INT_WRAP_STATUS_A); + csio_fatal(hw, + "MA address wrap-around error by client %u to address %#x\n", + MEM_WRAP_CLIENT_NUM_G(v), MEM_WRAP_ADDRESS_G(v) << 4); + } + csio_wr_reg32(hw, status, MA_INT_CAUSE_A); + csio_hw_fatal_err(hw); +} + +/* + * SMB interrupt handler. + */ +static void csio_smb_intr_handler(struct csio_hw *hw) +{ + static struct intr_info smb_intr_info[] = { + { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 }, + { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 }, + { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 }, + { 0, NULL, 0, 0 } + }; + + if (csio_handle_intr_status(hw, SMB_INT_CAUSE_A, smb_intr_info)) + csio_hw_fatal_err(hw); +} + +/* + * NC-SI interrupt handler. + */ +static void csio_ncsi_intr_handler(struct csio_hw *hw) +{ + static struct intr_info ncsi_intr_info[] = { + { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 }, + { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 }, + { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 }, + { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 }, + { 0, NULL, 0, 0 } + }; + + if (csio_handle_intr_status(hw, NCSI_INT_CAUSE_A, ncsi_intr_info)) + csio_hw_fatal_err(hw); +} + +/* + * XGMAC interrupt handler. + */ +static void csio_xgmac_intr_handler(struct csio_hw *hw, int port) +{ + uint32_t v = csio_rd_reg32(hw, T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A)); + + v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F; + if (!v) + return; + + if (v & TXFIFO_PRTY_ERR_F) + csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port); + if (v & RXFIFO_PRTY_ERR_F) + csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port); + csio_wr_reg32(hw, v, T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A)); + csio_hw_fatal_err(hw); +} + +/* + * PL interrupt handler. + */ +static void csio_pl_intr_handler(struct csio_hw *hw) +{ + static struct intr_info pl_intr_info[] = { + { FATALPERR_F, "T4 fatal parity error", -1, 1 }, + { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 }, + { 0, NULL, 0, 0 } + }; + + if (csio_handle_intr_status(hw, PL_PL_INT_CAUSE_A, pl_intr_info)) + csio_hw_fatal_err(hw); +} + +/* + * csio_hw_slow_intr_handler - control path interrupt handler + * @hw: HW module + * + * Interrupt handler for non-data global interrupt events, e.g., errors. + * The designation 'slow' is because it involves register reads, while + * data interrupts typically don't involve any MMIOs. + */ +int +csio_hw_slow_intr_handler(struct csio_hw *hw) +{ + uint32_t cause = csio_rd_reg32(hw, PL_INT_CAUSE_A); + + if (!(cause & CSIO_GLBL_INTR_MASK)) { + CSIO_INC_STATS(hw, n_plint_unexp); + return 0; + } + + csio_dbg(hw, "Slow interrupt! cause: 0x%x\n", cause); + + CSIO_INC_STATS(hw, n_plint_cnt); + + if (cause & CIM_F) + csio_cim_intr_handler(hw); + + if (cause & MPS_F) + csio_mps_intr_handler(hw); + + if (cause & NCSI_F) + csio_ncsi_intr_handler(hw); + + if (cause & PL_F) + csio_pl_intr_handler(hw); + + if (cause & SMB_F) + csio_smb_intr_handler(hw); + + if (cause & XGMAC0_F) + csio_xgmac_intr_handler(hw, 0); + + if (cause & XGMAC1_F) + csio_xgmac_intr_handler(hw, 1); + + if (cause & XGMAC_KR0_F) + csio_xgmac_intr_handler(hw, 2); + + if (cause & XGMAC_KR1_F) + csio_xgmac_intr_handler(hw, 3); + + if (cause & PCIE_F) + hw->chip_ops->chip_pcie_intr_handler(hw); + + if (cause & MC_F) + csio_mem_intr_handler(hw, MEM_MC); + + if (cause & EDC0_F) + csio_mem_intr_handler(hw, MEM_EDC0); + + if (cause & EDC1_F) + csio_mem_intr_handler(hw, MEM_EDC1); + + if (cause & LE_F) + csio_le_intr_handler(hw); + + if (cause & TP_F) + csio_tp_intr_handler(hw); + + if (cause & MA_F) + csio_ma_intr_handler(hw); + + if (cause & PM_TX_F) + csio_pmtx_intr_handler(hw); + + if (cause & PM_RX_F) + csio_pmrx_intr_handler(hw); + + if (cause & ULP_RX_F) + csio_ulprx_intr_handler(hw); + + if (cause & CPL_SWITCH_F) + csio_cplsw_intr_handler(hw); + + if (cause & SGE_F) + csio_sge_intr_handler(hw); + + if (cause & ULP_TX_F) + csio_ulptx_intr_handler(hw); + + /* Clear the interrupts just processed for which we are the master. */ + csio_wr_reg32(hw, cause & CSIO_GLBL_INTR_MASK, PL_INT_CAUSE_A); + csio_rd_reg32(hw, PL_INT_CAUSE_A); /* flush */ + + return 1; +} + +/***************************************************************************** + * HW <--> mailbox interfacing routines. + ****************************************************************************/ +/* + * csio_mberr_worker - Worker thread (dpc) for mailbox/error completions + * + * @data: Private data pointer. + * + * Called from worker thread context. + */ +static void +csio_mberr_worker(void *data) +{ + struct csio_hw *hw = (struct csio_hw *)data; + struct csio_mbm *mbm = &hw->mbm; + LIST_HEAD(cbfn_q); + struct csio_mb *mbp_next; + int rv; + + del_timer_sync(&mbm->timer); + + spin_lock_irq(&hw->lock); + if (list_empty(&mbm->cbfn_q)) { + spin_unlock_irq(&hw->lock); + return; + } + + list_splice_tail_init(&mbm->cbfn_q, &cbfn_q); + mbm->stats.n_cbfnq = 0; + + /* Try to start waiting mailboxes */ + if (!list_empty(&mbm->req_q)) { + mbp_next = list_first_entry(&mbm->req_q, struct csio_mb, list); + list_del_init(&mbp_next->list); + + rv = csio_mb_issue(hw, mbp_next); + if (rv != 0) + list_add_tail(&mbp_next->list, &mbm->req_q); + else + CSIO_DEC_STATS(mbm, n_activeq); + } + spin_unlock_irq(&hw->lock); + + /* Now callback completions */ + csio_mb_completions(hw, &cbfn_q); +} + +/* + * csio_hw_mb_timer - Top-level Mailbox timeout handler. + * + * @data: private data pointer + * + **/ +static void +csio_hw_mb_timer(struct timer_list *t) +{ + struct csio_mbm *mbm = from_timer(mbm, t, timer); + struct csio_hw *hw = mbm->hw; + struct csio_mb *mbp = NULL; + + spin_lock_irq(&hw->lock); + mbp = csio_mb_tmo_handler(hw); + spin_unlock_irq(&hw->lock); + + /* Call back the function for the timed-out Mailbox */ + if (mbp) + mbp->mb_cbfn(hw, mbp); + +} + +/* + * csio_hw_mbm_cleanup - Cleanup Mailbox module. + * @hw: HW module + * + * Called with lock held, should exit with lock held. + * Cancels outstanding mailboxes (waiting, in-flight) and gathers them + * into a local queue. Drops lock and calls the completions. Holds + * lock and returns. + */ +static void +csio_hw_mbm_cleanup(struct csio_hw *hw) +{ + LIST_HEAD(cbfn_q); + + csio_mb_cancel_all(hw, &cbfn_q); + + spin_unlock_irq(&hw->lock); + csio_mb_completions(hw, &cbfn_q); + spin_lock_irq(&hw->lock); +} + +/***************************************************************************** + * Event handling + ****************************************************************************/ +int +csio_enqueue_evt(struct csio_hw *hw, enum csio_evt type, void *evt_msg, + uint16_t len) +{ + struct csio_evt_msg *evt_entry = NULL; + + if (type >= CSIO_EVT_MAX) + return -EINVAL; + + if (len > CSIO_EVT_MSG_SIZE) + return -EINVAL; + + if (hw->flags & CSIO_HWF_FWEVT_STOP) + return -EINVAL; + + if (list_empty(&hw->evt_free_q)) { + csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n", + type, len); + return -ENOMEM; + } + + evt_entry = list_first_entry(&hw->evt_free_q, + struct csio_evt_msg, list); + list_del_init(&evt_entry->list); + + /* copy event msg and queue the event */ + evt_entry->type = type; + memcpy((void *)evt_entry->data, evt_msg, len); + list_add_tail(&evt_entry->list, &hw->evt_active_q); + + CSIO_DEC_STATS(hw, n_evt_freeq); + CSIO_INC_STATS(hw, n_evt_activeq); + + return 0; +} + +static int +csio_enqueue_evt_lock(struct csio_hw *hw, enum csio_evt type, void *evt_msg, + uint16_t len, bool msg_sg) +{ + struct csio_evt_msg *evt_entry = NULL; + struct csio_fl_dma_buf *fl_sg; + uint32_t off = 0; + unsigned long flags; + int n, ret = 0; + + if (type >= CSIO_EVT_MAX) + return -EINVAL; + + if (len > CSIO_EVT_MSG_SIZE) + return -EINVAL; + + spin_lock_irqsave(&hw->lock, flags); + if (hw->flags & CSIO_HWF_FWEVT_STOP) { + ret = -EINVAL; + goto out; + } + + if (list_empty(&hw->evt_free_q)) { + csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n", + type, len); + ret = -ENOMEM; + goto out; + } + + evt_entry = list_first_entry(&hw->evt_free_q, + struct csio_evt_msg, list); + list_del_init(&evt_entry->list); + + /* copy event msg and queue the event */ + evt_entry->type = type; + + /* If Payload in SG list*/ + if (msg_sg) { + fl_sg = (struct csio_fl_dma_buf *) evt_msg; + for (n = 0; (n < CSIO_MAX_FLBUF_PER_IQWR && off < len); n++) { + memcpy((void *)((uintptr_t)evt_entry->data + off), + fl_sg->flbufs[n].vaddr, + fl_sg->flbufs[n].len); + off += fl_sg->flbufs[n].len; + } + } else + memcpy((void *)evt_entry->data, evt_msg, len); + + list_add_tail(&evt_entry->list, &hw->evt_active_q); + CSIO_DEC_STATS(hw, n_evt_freeq); + CSIO_INC_STATS(hw, n_evt_activeq); +out: + spin_unlock_irqrestore(&hw->lock, flags); + return ret; +} + +static void +csio_free_evt(struct csio_hw *hw, struct csio_evt_msg *evt_entry) +{ + if (evt_entry) { + spin_lock_irq(&hw->lock); + list_del_init(&evt_entry->list); + list_add_tail(&evt_entry->list, &hw->evt_free_q); + CSIO_DEC_STATS(hw, n_evt_activeq); + CSIO_INC_STATS(hw, n_evt_freeq); + spin_unlock_irq(&hw->lock); + } +} + +void +csio_evtq_flush(struct csio_hw *hw) +{ + uint32_t count; + count = 30; + while (hw->flags & CSIO_HWF_FWEVT_PENDING && count--) { + spin_unlock_irq(&hw->lock); + msleep(2000); + spin_lock_irq(&hw->lock); + } + + CSIO_DB_ASSERT(!(hw->flags & CSIO_HWF_FWEVT_PENDING)); +} + +static void +csio_evtq_stop(struct csio_hw *hw) +{ + hw->flags |= CSIO_HWF_FWEVT_STOP; +} + +static void +csio_evtq_start(struct csio_hw *hw) +{ + hw->flags &= ~CSIO_HWF_FWEVT_STOP; +} + +static void +csio_evtq_cleanup(struct csio_hw *hw) +{ + struct list_head *evt_entry, *next_entry; + + /* Release outstanding events from activeq to freeq*/ + if (!list_empty(&hw->evt_active_q)) + list_splice_tail_init(&hw->evt_active_q, &hw->evt_free_q); + + hw->stats.n_evt_activeq = 0; + hw->flags &= ~CSIO_HWF_FWEVT_PENDING; + + /* Freeup event entry */ + list_for_each_safe(evt_entry, next_entry, &hw->evt_free_q) { + kfree(evt_entry); + CSIO_DEC_STATS(hw, n_evt_freeq); + } + + hw->stats.n_evt_freeq = 0; +} + + +static void +csio_process_fwevtq_entry(struct csio_hw *hw, void *wr, uint32_t len, + struct csio_fl_dma_buf *flb, void *priv) +{ + __u8 op; + void *msg = NULL; + uint32_t msg_len = 0; + bool msg_sg = 0; + + op = ((struct rss_header *) wr)->opcode; + if (op == CPL_FW6_PLD) { + CSIO_INC_STATS(hw, n_cpl_fw6_pld); + if (!flb || !flb->totlen) { + CSIO_INC_STATS(hw, n_cpl_unexp); + return; + } + + msg = (void *) flb; + msg_len = flb->totlen; + msg_sg = 1; + } else if (op == CPL_FW6_MSG || op == CPL_FW4_MSG) { + + CSIO_INC_STATS(hw, n_cpl_fw6_msg); + /* skip RSS header */ + msg = (void *)((uintptr_t)wr + sizeof(__be64)); + msg_len = (op == CPL_FW6_MSG) ? sizeof(struct cpl_fw6_msg) : + sizeof(struct cpl_fw4_msg); + } else { + csio_warn(hw, "unexpected CPL %#x on FW event queue\n", op); + CSIO_INC_STATS(hw, n_cpl_unexp); + return; + } + + /* + * Enqueue event to EventQ. Events processing happens + * in Event worker thread context + */ + if (csio_enqueue_evt_lock(hw, CSIO_EVT_FW, msg, + (uint16_t)msg_len, msg_sg)) + CSIO_INC_STATS(hw, n_evt_drop); +} + +void +csio_evtq_worker(struct work_struct *work) +{ + struct csio_hw *hw = container_of(work, struct csio_hw, evtq_work); + struct list_head *evt_entry, *next_entry; + LIST_HEAD(evt_q); + struct csio_evt_msg *evt_msg; + struct cpl_fw6_msg *msg; + struct csio_rnode *rn; + int rv = 0; + uint8_t evtq_stop = 0; + + csio_dbg(hw, "event worker thread active evts#%d\n", + hw->stats.n_evt_activeq); + + spin_lock_irq(&hw->lock); + while (!list_empty(&hw->evt_active_q)) { + list_splice_tail_init(&hw->evt_active_q, &evt_q); + spin_unlock_irq(&hw->lock); + + list_for_each_safe(evt_entry, next_entry, &evt_q) { + evt_msg = (struct csio_evt_msg *) evt_entry; + + /* Drop events if queue is STOPPED */ + spin_lock_irq(&hw->lock); + if (hw->flags & CSIO_HWF_FWEVT_STOP) + evtq_stop = 1; + spin_unlock_irq(&hw->lock); + if (evtq_stop) { + CSIO_INC_STATS(hw, n_evt_drop); + goto free_evt; + } + + switch (evt_msg->type) { + case CSIO_EVT_FW: + msg = (struct cpl_fw6_msg *)(evt_msg->data); + + if ((msg->opcode == CPL_FW6_MSG || + msg->opcode == CPL_FW4_MSG) && + !msg->type) { + rv = csio_mb_fwevt_handler(hw, + msg->data); + if (!rv) + break; + /* Handle any remaining fw events */ + csio_fcoe_fwevt_handler(hw, + msg->opcode, msg->data); + } else if (msg->opcode == CPL_FW6_PLD) { + + csio_fcoe_fwevt_handler(hw, + msg->opcode, msg->data); + } else { + csio_warn(hw, + "Unhandled FW msg op %x type %x\n", + msg->opcode, msg->type); + CSIO_INC_STATS(hw, n_evt_drop); + } + break; + + case CSIO_EVT_MBX: + csio_mberr_worker(hw); + break; + + case CSIO_EVT_DEV_LOSS: + memcpy(&rn, evt_msg->data, sizeof(rn)); + csio_rnode_devloss_handler(rn); + break; + + default: + csio_warn(hw, "Unhandled event %x on evtq\n", + evt_msg->type); + CSIO_INC_STATS(hw, n_evt_unexp); + break; + } +free_evt: + csio_free_evt(hw, evt_msg); + } + + spin_lock_irq(&hw->lock); + } + hw->flags &= ~CSIO_HWF_FWEVT_PENDING; + spin_unlock_irq(&hw->lock); +} + +int +csio_fwevtq_handler(struct csio_hw *hw) +{ + int rv; + + if (csio_q_iqid(hw, hw->fwevt_iq_idx) == CSIO_MAX_QID) { + CSIO_INC_STATS(hw, n_int_stray); + return -EINVAL; + } + + rv = csio_wr_process_iq_idx(hw, hw->fwevt_iq_idx, + csio_process_fwevtq_entry, NULL); + return rv; +} + +/**************************************************************************** + * Entry points + ****************************************************************************/ + +/* Management module */ +/* + * csio_mgmt_req_lookup - Lookup the given IO req exist in Active Q. + * mgmt - mgmt module + * @io_req - io request + * + * Return - 0:if given IO Req exists in active Q. + * -EINVAL :if lookup fails. + */ +int +csio_mgmt_req_lookup(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req) +{ + struct list_head *tmp; + + /* Lookup ioreq in the ACTIVEQ */ + list_for_each(tmp, &mgmtm->active_q) { + if (io_req == (struct csio_ioreq *)tmp) + return 0; + } + return -EINVAL; +} + +#define ECM_MIN_TMO 1000 /* Minimum timeout value for req */ + +/* + * csio_mgmts_tmo_handler - MGMT IO Timeout handler. + * @data - Event data. + * + * Return - none. + */ +static void +csio_mgmt_tmo_handler(struct timer_list *t) +{ + struct csio_mgmtm *mgmtm = from_timer(mgmtm, t, mgmt_timer); + struct list_head *tmp; + struct csio_ioreq *io_req; + + csio_dbg(mgmtm->hw, "Mgmt timer invoked!\n"); + + spin_lock_irq(&mgmtm->hw->lock); + + list_for_each(tmp, &mgmtm->active_q) { + io_req = (struct csio_ioreq *) tmp; + io_req->tmo -= min_t(uint32_t, io_req->tmo, ECM_MIN_TMO); + + if (!io_req->tmo) { + /* Dequeue the request from retry Q. */ + tmp = csio_list_prev(tmp); + list_del_init(&io_req->sm.sm_list); + if (io_req->io_cbfn) { + /* io_req will be freed by completion handler */ + io_req->wr_status = -ETIMEDOUT; + io_req->io_cbfn(mgmtm->hw, io_req); + } else { + CSIO_DB_ASSERT(0); + } + } + } + + /* If retry queue is not empty, re-arm timer */ + if (!list_empty(&mgmtm->active_q)) + mod_timer(&mgmtm->mgmt_timer, + jiffies + msecs_to_jiffies(ECM_MIN_TMO)); + spin_unlock_irq(&mgmtm->hw->lock); +} + +static void +csio_mgmtm_cleanup(struct csio_mgmtm *mgmtm) +{ + struct csio_hw *hw = mgmtm->hw; + struct csio_ioreq *io_req; + struct list_head *tmp; + uint32_t count; + + count = 30; + /* Wait for all outstanding req to complete gracefully */ + while ((!list_empty(&mgmtm->active_q)) && count--) { + spin_unlock_irq(&hw->lock); + msleep(2000); + spin_lock_irq(&hw->lock); + } + + /* release outstanding req from ACTIVEQ */ + list_for_each(tmp, &mgmtm->active_q) { + io_req = (struct csio_ioreq *) tmp; + tmp = csio_list_prev(tmp); + list_del_init(&io_req->sm.sm_list); + mgmtm->stats.n_active--; + if (io_req->io_cbfn) { + /* io_req will be freed by completion handler */ + io_req->wr_status = -ETIMEDOUT; + io_req->io_cbfn(mgmtm->hw, io_req); + } + } +} + +/* + * csio_mgmt_init - Mgmt module init entry point + * @mgmtsm - mgmt module + * @hw - HW module + * + * Initialize mgmt timer, resource wait queue, active queue, + * completion q. Allocate Egress and Ingress + * WR queues and save off the queue index returned by the WR + * module for future use. Allocate and save off mgmt reqs in the + * mgmt_req_freelist for future use. Make sure their SM is initialized + * to uninit state. + * Returns: 0 - on success + * -ENOMEM - on error. + */ +static int +csio_mgmtm_init(struct csio_mgmtm *mgmtm, struct csio_hw *hw) +{ + timer_setup(&mgmtm->mgmt_timer, csio_mgmt_tmo_handler, 0); + + INIT_LIST_HEAD(&mgmtm->active_q); + INIT_LIST_HEAD(&mgmtm->cbfn_q); + + mgmtm->hw = hw; + /*mgmtm->iq_idx = hw->fwevt_iq_idx;*/ + + return 0; +} + +/* + * csio_mgmtm_exit - MGMT module exit entry point + * @mgmtsm - mgmt module + * + * This function called during MGMT module uninit. + * Stop timers, free ioreqs allocated. + * Returns: None + * + */ +static void +csio_mgmtm_exit(struct csio_mgmtm *mgmtm) +{ + del_timer_sync(&mgmtm->mgmt_timer); +} + + +/** + * csio_hw_start - Kicks off the HW State machine + * @hw: Pointer to HW module. + * + * It is assumed that the initialization is a synchronous operation. + * So when we return after posting the event, the HW SM should be in + * the ready state, if there were no errors during init. + */ +int +csio_hw_start(struct csio_hw *hw) +{ + spin_lock_irq(&hw->lock); + csio_post_event(&hw->sm, CSIO_HWE_CFG); + spin_unlock_irq(&hw->lock); + + if (csio_is_hw_ready(hw)) + return 0; + else if (csio_match_state(hw, csio_hws_uninit)) + return -EINVAL; + else + return -ENODEV; +} + +int +csio_hw_stop(struct csio_hw *hw) +{ + csio_post_event(&hw->sm, CSIO_HWE_PCI_REMOVE); + + if (csio_is_hw_removing(hw)) + return 0; + else + return -EINVAL; +} + +/* Max reset retries */ +#define CSIO_MAX_RESET_RETRIES 3 + +/** + * csio_hw_reset - Reset the hardware + * @hw: HW module. + * + * Caller should hold lock across this function. + */ +int +csio_hw_reset(struct csio_hw *hw) +{ + if (!csio_is_hw_master(hw)) + return -EPERM; + + if (hw->rst_retries >= CSIO_MAX_RESET_RETRIES) { + csio_dbg(hw, "Max hw reset attempts reached.."); + return -EINVAL; + } + + hw->rst_retries++; + csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET); + + if (csio_is_hw_ready(hw)) { + hw->rst_retries = 0; + hw->stats.n_reset_start = jiffies_to_msecs(jiffies); + return 0; + } else + return -EINVAL; +} + +/* + * csio_hw_get_device_id - Caches the Adapter's vendor & device id. + * @hw: HW module. + */ +static void +csio_hw_get_device_id(struct csio_hw *hw) +{ + /* Is the adapter device id cached already ?*/ + if (csio_is_dev_id_cached(hw)) + return; + + /* Get the PCI vendor & device id */ + pci_read_config_word(hw->pdev, PCI_VENDOR_ID, + &hw->params.pci.vendor_id); + pci_read_config_word(hw->pdev, PCI_DEVICE_ID, + &hw->params.pci.device_id); + + csio_dev_id_cached(hw); + hw->chip_id = (hw->params.pci.device_id & CSIO_HW_CHIP_MASK); + +} /* csio_hw_get_device_id */ + +/* + * csio_hw_set_description - Set the model, description of the hw. + * @hw: HW module. + * @ven_id: PCI Vendor ID + * @dev_id: PCI Device ID + */ +static void +csio_hw_set_description(struct csio_hw *hw, uint16_t ven_id, uint16_t dev_id) +{ + uint32_t adap_type, prot_type; + + if (ven_id == CSIO_VENDOR_ID) { + prot_type = (dev_id & CSIO_ASIC_DEVID_PROTO_MASK); + adap_type = (dev_id & CSIO_ASIC_DEVID_TYPE_MASK); + + if (prot_type == CSIO_T5_FCOE_ASIC) { + memcpy(hw->hw_ver, + csio_t5_fcoe_adapters[adap_type].model_no, 16); + memcpy(hw->model_desc, + csio_t5_fcoe_adapters[adap_type].description, + 32); + } else { + char tempName[32] = "Chelsio FCoE Controller"; + memcpy(hw->model_desc, tempName, 32); + } + } +} /* csio_hw_set_description */ + +/** + * csio_hw_init - Initialize HW module. + * @hw: Pointer to HW module. + * + * Initialize the members of the HW module. + */ +int +csio_hw_init(struct csio_hw *hw) +{ + int rv = -EINVAL; + uint32_t i; + uint16_t ven_id, dev_id; + struct csio_evt_msg *evt_entry; + + INIT_LIST_HEAD(&hw->sm.sm_list); + csio_init_state(&hw->sm, csio_hws_uninit); + spin_lock_init(&hw->lock); + INIT_LIST_HEAD(&hw->sln_head); + + /* Get the PCI vendor & device id */ + csio_hw_get_device_id(hw); + + strcpy(hw->name, CSIO_HW_NAME); + + /* Initialize the HW chip ops T5 specific ops */ + hw->chip_ops = &t5_ops; + + /* Set the model & its description */ + + ven_id = hw->params.pci.vendor_id; + dev_id = hw->params.pci.device_id; + + csio_hw_set_description(hw, ven_id, dev_id); + + /* Initialize default log level */ + hw->params.log_level = (uint32_t) csio_dbg_level; + + csio_set_fwevt_intr_idx(hw, -1); + csio_set_nondata_intr_idx(hw, -1); + + /* Init all the modules: Mailbox, WorkRequest and Transport */ + if (csio_mbm_init(csio_hw_to_mbm(hw), hw, csio_hw_mb_timer)) + goto err; + + rv = csio_wrm_init(csio_hw_to_wrm(hw), hw); + if (rv) + goto err_mbm_exit; + + rv = csio_scsim_init(csio_hw_to_scsim(hw), hw); + if (rv) + goto err_wrm_exit; + + rv = csio_mgmtm_init(csio_hw_to_mgmtm(hw), hw); + if (rv) + goto err_scsim_exit; + /* Pre-allocate evtq and initialize them */ + INIT_LIST_HEAD(&hw->evt_active_q); + INIT_LIST_HEAD(&hw->evt_free_q); + for (i = 0; i < csio_evtq_sz; i++) { + + evt_entry = kzalloc(sizeof(struct csio_evt_msg), GFP_KERNEL); + if (!evt_entry) { + rv = -ENOMEM; + csio_err(hw, "Failed to initialize eventq"); + goto err_evtq_cleanup; + } + + list_add_tail(&evt_entry->list, &hw->evt_free_q); + CSIO_INC_STATS(hw, n_evt_freeq); + } + + hw->dev_num = dev_num; + dev_num++; + + return 0; + +err_evtq_cleanup: + csio_evtq_cleanup(hw); + csio_mgmtm_exit(csio_hw_to_mgmtm(hw)); +err_scsim_exit: + csio_scsim_exit(csio_hw_to_scsim(hw)); +err_wrm_exit: + csio_wrm_exit(csio_hw_to_wrm(hw), hw); +err_mbm_exit: + csio_mbm_exit(csio_hw_to_mbm(hw)); +err: + return rv; +} + +/** + * csio_hw_exit - Un-initialize HW module. + * @hw: Pointer to HW module. + * + */ +void +csio_hw_exit(struct csio_hw *hw) +{ + csio_evtq_cleanup(hw); + csio_mgmtm_exit(csio_hw_to_mgmtm(hw)); + csio_scsim_exit(csio_hw_to_scsim(hw)); + csio_wrm_exit(csio_hw_to_wrm(hw), hw); + csio_mbm_exit(csio_hw_to_mbm(hw)); +} diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h new file mode 100644 index 000000000..e351af6e7 --- /dev/null +++ b/drivers/scsi/csiostor/csio_hw.h @@ -0,0 +1,666 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CSIO_HW_H__ +#define __CSIO_HW_H__ + +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/device.h> +#include <linux/workqueue.h> +#include <linux/compiler.h> +#include <linux/cdev.h> +#include <linux/list.h> +#include <linux/mempool.h> +#include <linux/io.h> +#include <linux/spinlock_types.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_transport_fc.h> + +#include "t4_hw.h" +#include "csio_hw_chip.h" +#include "csio_wr.h" +#include "csio_mb.h" +#include "csio_scsi.h" +#include "csio_defs.h" +#include "t4_regs.h" +#include "t4_msg.h" + +/* + * An error value used by host. Should not clash with FW defined return values. + */ +#define FW_HOSTERROR 255 + +#define CSIO_HW_NAME "Chelsio FCoE Adapter" +#define CSIO_MAX_PFN 8 +#define CSIO_MAX_PPORTS 4 + +#define CSIO_MAX_LUN 0xFFFF +#define CSIO_MAX_QUEUE 2048 +#define CSIO_MAX_CMD_PER_LUN 32 +#define CSIO_MAX_DDP_BUF_SIZE (1024 * 1024) +#define CSIO_MAX_SECTOR_SIZE 128 +#define CSIO_MIN_T6_FW 0x01102D00 /* FW 1.16.45.0 */ + +/* Interrupts */ +#define CSIO_EXTRA_MSI_IQS 2 /* Extra iqs for INTX/MSI mode + * (Forward intr iq + fw iq) */ +#define CSIO_EXTRA_VECS 2 /* non-data + FW evt */ +#define CSIO_MAX_SCSI_CPU 128 +#define CSIO_MAX_SCSI_QSETS (CSIO_MAX_SCSI_CPU * CSIO_MAX_PPORTS) +#define CSIO_MAX_MSIX_VECS (CSIO_MAX_SCSI_QSETS + CSIO_EXTRA_VECS) + +/* Queues */ +enum { + CSIO_INTR_WRSIZE = 128, + CSIO_INTR_IQSIZE = ((CSIO_MAX_MSIX_VECS + 1) * CSIO_INTR_WRSIZE), + CSIO_FWEVT_WRSIZE = 128, + CSIO_FWEVT_IQLEN = 128, + CSIO_FWEVT_FLBUFS = 64, + CSIO_FWEVT_IQSIZE = (CSIO_FWEVT_WRSIZE * CSIO_FWEVT_IQLEN), + CSIO_HW_NIQ = 1, + CSIO_HW_NFLQ = 1, + CSIO_HW_NEQ = 1, + CSIO_HW_NINTXQ = 1, +}; + +struct csio_msix_entries { + void *dev_id; /* Priv object associated w/ this msix*/ + char desc[24]; /* Description of this vector */ +}; + +struct csio_scsi_qset { + int iq_idx; /* Ingress index */ + int eq_idx; /* Egress index */ + uint32_t intr_idx; /* MSIX Vector index */ +}; + +struct csio_scsi_cpu_info { + int16_t max_cpus; +}; + +extern int csio_dbg_level; +extern unsigned int csio_port_mask; +extern int csio_msi; + +#define CSIO_VENDOR_ID 0x1425 +#define CSIO_ASIC_DEVID_PROTO_MASK 0xFF00 +#define CSIO_ASIC_DEVID_TYPE_MASK 0x00FF + +#define CSIO_GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | \ + EDC0_F | EDC1_F | LE_F | TP_F | MA_F | \ + PM_TX_F | PM_RX_F | ULP_RX_F | \ + CPL_SWITCH_F | SGE_F | ULP_TX_F | SF_F) + +/* + * Hard parameters used to initialize the card in the absence of a + * configuration file. + */ +enum { + /* General */ + CSIO_SGE_DBFIFO_INT_THRESH = 10, + + CSIO_SGE_RX_DMA_OFFSET = 2, + + CSIO_SGE_FLBUF_SIZE1 = 65536, + CSIO_SGE_FLBUF_SIZE2 = 1536, + CSIO_SGE_FLBUF_SIZE3 = 9024, + CSIO_SGE_FLBUF_SIZE4 = 9216, + CSIO_SGE_FLBUF_SIZE5 = 2048, + CSIO_SGE_FLBUF_SIZE6 = 128, + CSIO_SGE_FLBUF_SIZE7 = 8192, + CSIO_SGE_FLBUF_SIZE8 = 16384, + + CSIO_SGE_TIMER_VAL_0 = 5, + CSIO_SGE_TIMER_VAL_1 = 10, + CSIO_SGE_TIMER_VAL_2 = 20, + CSIO_SGE_TIMER_VAL_3 = 50, + CSIO_SGE_TIMER_VAL_4 = 100, + CSIO_SGE_TIMER_VAL_5 = 200, + + CSIO_SGE_INT_CNT_VAL_0 = 1, + CSIO_SGE_INT_CNT_VAL_1 = 4, + CSIO_SGE_INT_CNT_VAL_2 = 8, + CSIO_SGE_INT_CNT_VAL_3 = 16, +}; + +/* Slowpath events */ +enum csio_evt { + CSIO_EVT_FW = 0, /* FW event */ + CSIO_EVT_MBX, /* MBX event */ + CSIO_EVT_SCN, /* State change notification */ + CSIO_EVT_DEV_LOSS, /* Device loss event */ + CSIO_EVT_MAX, /* Max supported event */ +}; + +#define CSIO_EVT_MSG_SIZE 512 +#define CSIO_EVTQ_SIZE 512 + +/* Event msg */ +struct csio_evt_msg { + struct list_head list; /* evt queue*/ + enum csio_evt type; + uint8_t data[CSIO_EVT_MSG_SIZE]; +}; + +enum { + SERNUM_LEN = 16, /* Serial # length */ + EC_LEN = 16, /* E/C length */ + ID_LEN = 16, /* ID length */ +}; + +enum { + SF_SIZE = SF_SEC_SIZE * 16, /* serial flash size */ +}; + +/* serial flash and firmware constants */ +enum { + SF_ATTEMPTS = 10, /* max retries for SF operations */ + + /* flash command opcodes */ + SF_PROG_PAGE = 2, /* program page */ + SF_WR_DISABLE = 4, /* disable writes */ + SF_RD_STATUS = 5, /* read status register */ + SF_WR_ENABLE = 6, /* enable writes */ + SF_RD_DATA_FAST = 0xb, /* read flash */ + SF_RD_ID = 0x9f, /* read ID */ + SF_ERASE_SECTOR = 0xd8, /* erase sector */ +}; + +/* Management module */ +enum { + CSIO_MGMT_EQ_WRSIZE = 512, + CSIO_MGMT_IQ_WRSIZE = 128, + CSIO_MGMT_EQLEN = 64, + CSIO_MGMT_IQLEN = 64, +}; + +#define CSIO_MGMT_EQSIZE (CSIO_MGMT_EQLEN * CSIO_MGMT_EQ_WRSIZE) +#define CSIO_MGMT_IQSIZE (CSIO_MGMT_IQLEN * CSIO_MGMT_IQ_WRSIZE) + +/* mgmt module stats */ +struct csio_mgmtm_stats { + uint32_t n_abort_req; /* Total abort request */ + uint32_t n_abort_rsp; /* Total abort response */ + uint32_t n_close_req; /* Total close request */ + uint32_t n_close_rsp; /* Total close response */ + uint32_t n_err; /* Total Errors */ + uint32_t n_drop; /* Total request dropped */ + uint32_t n_active; /* Count of active_q */ + uint32_t n_cbfn; /* Count of cbfn_q */ +}; + +/* MGMT module */ +struct csio_mgmtm { + struct csio_hw *hw; /* Pointer to HW moduel */ + int eq_idx; /* Egress queue index */ + int iq_idx; /* Ingress queue index */ + int msi_vec; /* MSI vector */ + struct list_head active_q; /* Outstanding ELS/CT */ + struct list_head abort_q; /* Outstanding abort req */ + struct list_head cbfn_q; /* Completion queue */ + struct list_head mgmt_req_freelist; /* Free poll of reqs */ + /* ELSCT request freelist*/ + struct timer_list mgmt_timer; /* MGMT timer */ + struct csio_mgmtm_stats stats; /* ELS/CT stats */ +}; + +struct csio_adap_desc { + char model_no[16]; + char description[32]; +}; + +struct pci_params { + uint16_t vendor_id; + uint16_t device_id; + int vpd_cap_addr; + uint16_t speed; + uint8_t width; +}; + +/* User configurable hw parameters */ +struct csio_hw_params { + uint32_t sf_size; /* serial flash + * size in bytes + */ + uint32_t sf_nsec; /* # of flash sectors */ + struct pci_params pci; + uint32_t log_level; /* Module-level for + * debug log. + */ +}; + +struct csio_vpd { + uint32_t cclk; + uint8_t ec[EC_LEN + 1]; + uint8_t sn[SERNUM_LEN + 1]; + uint8_t id[ID_LEN + 1]; +}; + +/* Firmware Port Capabilities types. */ + +typedef u16 fw_port_cap16_t; /* 16-bit Port Capabilities integral value */ +typedef u32 fw_port_cap32_t; /* 32-bit Port Capabilities integral value */ + +enum fw_caps { + FW_CAPS_UNKNOWN = 0, /* 0'ed out initial state */ + FW_CAPS16 = 1, /* old Firmware: 16-bit Port Capabilities */ + FW_CAPS32 = 2, /* new Firmware: 32-bit Port Capabilities */ +}; + +enum cc_pause { + PAUSE_RX = 1 << 0, + PAUSE_TX = 1 << 1, + PAUSE_AUTONEG = 1 << 2 +}; + +enum cc_fec { + FEC_AUTO = 1 << 0, /* IEEE 802.3 "automatic" */ + FEC_RS = 1 << 1, /* Reed-Solomon */ + FEC_BASER_RS = 1 << 2 /* BaseR/Reed-Solomon */ +}; + +struct link_config { + fw_port_cap32_t pcaps; /* link capabilities */ + fw_port_cap32_t def_acaps; /* default advertised capabilities */ + fw_port_cap32_t acaps; /* advertised capabilities */ + fw_port_cap32_t lpacaps; /* peer advertised capabilities */ + + fw_port_cap32_t speed_caps; /* speed(s) user has requested */ + unsigned int speed; /* actual link speed (Mb/s) */ + + enum cc_pause requested_fc; /* flow control user has requested */ + enum cc_pause fc; /* actual link flow control */ + + enum cc_fec requested_fec; /* Forward Error Correction: */ + enum cc_fec fec; /* requested and actual in use */ + + unsigned char autoneg; /* autonegotiating? */ + + unsigned char link_ok; /* link up? */ + unsigned char link_down_rc; /* link down reason */ +}; + +#define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16) + +#define ADVERT_MASK (FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_M) | \ + FW_PORT_CAP32_ANEG) + +/* Enable or disable autonegotiation. */ +#define AUTONEG_DISABLE 0x00 +#define AUTONEG_ENABLE 0x01 + +struct csio_pport { + uint16_t pcap; + uint16_t acap; + uint8_t portid; + uint8_t link_status; + uint16_t link_speed; + uint8_t mac[6]; + uint8_t mod_type; + uint8_t rsvd1; + uint8_t rsvd2; + uint8_t rsvd3; + struct link_config link_cfg; +}; + +/* fcoe resource information */ +struct csio_fcoe_res_info { + uint16_t e_d_tov; + uint16_t r_a_tov_seq; + uint16_t r_a_tov_els; + uint16_t r_r_tov; + uint32_t max_xchgs; + uint32_t max_ssns; + uint32_t used_xchgs; + uint32_t used_ssns; + uint32_t max_fcfs; + uint32_t max_vnps; + uint32_t used_fcfs; + uint32_t used_vnps; +}; + +/* HW State machine Events */ +enum csio_hw_ev { + CSIO_HWE_CFG = (uint32_t)1, /* Starts off the State machine */ + CSIO_HWE_INIT, /* Config done, start Init */ + CSIO_HWE_INIT_DONE, /* Init Mailboxes sent, HW ready */ + CSIO_HWE_FATAL, /* Fatal error during initialization */ + CSIO_HWE_PCIERR_DETECTED,/* PCI error recovery detetced */ + CSIO_HWE_PCIERR_SLOT_RESET, /* Slot reset after PCI recoviery */ + CSIO_HWE_PCIERR_RESUME, /* Resume after PCI error recovery */ + CSIO_HWE_QUIESCED, /* HBA quiesced */ + CSIO_HWE_HBA_RESET, /* HBA reset requested */ + CSIO_HWE_HBA_RESET_DONE, /* HBA reset completed */ + CSIO_HWE_FW_DLOAD, /* FW download requested */ + CSIO_HWE_PCI_REMOVE, /* PCI de-instantiation */ + CSIO_HWE_SUSPEND, /* HW suspend for Online(hot) replacement */ + CSIO_HWE_RESUME, /* HW resume for Online(hot) replacement */ + CSIO_HWE_MAX, /* Max HW event */ +}; + +/* hw stats */ +struct csio_hw_stats { + uint32_t n_evt_activeq; /* Number of event in active Q */ + uint32_t n_evt_freeq; /* Number of event in free Q */ + uint32_t n_evt_drop; /* Number of event droped */ + uint32_t n_evt_unexp; /* Number of unexpected events */ + uint32_t n_pcich_offline;/* Number of pci channel offline */ + uint32_t n_lnlkup_miss; /* Number of lnode lookup miss */ + uint32_t n_cpl_fw6_msg; /* Number of cpl fw6 message*/ + uint32_t n_cpl_fw6_pld; /* Number of cpl fw6 payload*/ + uint32_t n_cpl_unexp; /* Number of unexpected cpl */ + uint32_t n_mbint_unexp; /* Number of unexpected mbox */ + /* interrupt */ + uint32_t n_plint_unexp; /* Number of unexpected PL */ + /* interrupt */ + uint32_t n_plint_cnt; /* Number of PL interrupt */ + uint32_t n_int_stray; /* Number of stray interrupt */ + uint32_t n_err; /* Number of hw errors */ + uint32_t n_err_fatal; /* Number of fatal errors */ + uint32_t n_err_nomem; /* Number of memory alloc failure */ + uint32_t n_err_io; /* Number of IO failure */ + enum csio_hw_ev n_evt_sm[CSIO_HWE_MAX]; /* Number of sm events */ + uint64_t n_reset_start; /* Start time after the reset */ + uint32_t rsvd1; +}; + +/* Defines for hw->flags */ +#define CSIO_HWF_MASTER 0x00000001 /* This is the Master + * function for the + * card. + */ +#define CSIO_HWF_HW_INTR_ENABLED 0x00000002 /* Are HW Interrupt + * enable bit set? + */ +#define CSIO_HWF_FWEVT_PENDING 0x00000004 /* FW events pending */ +#define CSIO_HWF_Q_MEM_ALLOCED 0x00000008 /* Queues have been + * allocated memory. + */ +#define CSIO_HWF_Q_FW_ALLOCED 0x00000010 /* Queues have been + * allocated in FW. + */ +#define CSIO_HWF_VPD_VALID 0x00000020 /* Valid VPD copied */ +#define CSIO_HWF_DEVID_CACHED 0X00000040 /* PCI vendor & device + * id cached */ +#define CSIO_HWF_FWEVT_STOP 0x00000080 /* Stop processing + * FW events + */ +#define CSIO_HWF_USING_SOFT_PARAMS 0x00000100 /* Using FW config + * params + */ +#define CSIO_HWF_HOST_INTR_ENABLED 0x00000200 /* Are host interrupts + * enabled? + */ +#define CSIO_HWF_ROOT_NO_RELAXED_ORDERING 0x00000400 /* Is PCIe relaxed + * ordering enabled + */ + +#define csio_is_hw_intr_enabled(__hw) \ + ((__hw)->flags & CSIO_HWF_HW_INTR_ENABLED) +#define csio_is_host_intr_enabled(__hw) \ + ((__hw)->flags & CSIO_HWF_HOST_INTR_ENABLED) +#define csio_is_hw_master(__hw) ((__hw)->flags & CSIO_HWF_MASTER) +#define csio_is_valid_vpd(__hw) ((__hw)->flags & CSIO_HWF_VPD_VALID) +#define csio_is_dev_id_cached(__hw) ((__hw)->flags & CSIO_HWF_DEVID_CACHED) +#define csio_valid_vpd_copied(__hw) ((__hw)->flags |= CSIO_HWF_VPD_VALID) +#define csio_dev_id_cached(__hw) ((__hw)->flags |= CSIO_HWF_DEVID_CACHED) + +/* Defines for intr_mode */ +enum csio_intr_mode { + CSIO_IM_NONE = 0, + CSIO_IM_INTX = 1, + CSIO_IM_MSI = 2, + CSIO_IM_MSIX = 3, +}; + +/* Master HW structure: One per function */ +struct csio_hw { + struct csio_sm sm; /* State machine: should + * be the 1st member. + */ + spinlock_t lock; /* Lock for hw */ + + struct csio_scsim scsim; /* SCSI module*/ + struct csio_wrm wrm; /* Work request module*/ + struct pci_dev *pdev; /* PCI device */ + + void __iomem *regstart; /* Virtual address of + * register map + */ + /* SCSI queue sets */ + uint32_t num_sqsets; /* Number of SCSI + * queue sets */ + uint32_t num_scsi_msix_cpus; /* Number of CPUs that + * will be used + * for ingress + * processing. + */ + + struct csio_scsi_qset sqset[CSIO_MAX_PPORTS][CSIO_MAX_SCSI_CPU]; + struct csio_scsi_cpu_info scsi_cpu_info[CSIO_MAX_PPORTS]; + + uint32_t evtflag; /* Event flag */ + uint32_t flags; /* HW flags */ + + struct csio_mgmtm mgmtm; /* management module */ + struct csio_mbm mbm; /* Mailbox module */ + + /* Lnodes */ + uint32_t num_lns; /* Number of lnodes */ + struct csio_lnode *rln; /* Root lnode */ + struct list_head sln_head; /* Sibling node list + * list + */ + int intr_iq_idx; /* Forward interrupt + * queue. + */ + int fwevt_iq_idx; /* FW evt queue */ + struct work_struct evtq_work; /* Worker thread for + * HW events. + */ + struct list_head evt_free_q; /* freelist of evt + * elements + */ + struct list_head evt_active_q; /* active evt queue*/ + + /* board related info */ + char name[32]; + char hw_ver[16]; + char model_desc[32]; + char drv_version[32]; + char fwrev_str[32]; + uint32_t optrom_ver; + uint32_t fwrev; + uint32_t tp_vers; + char chip_ver; + uint16_t chip_id; /* Tells T4/T5 chip */ + enum csio_dev_state fw_state; + struct csio_vpd vpd; + + uint8_t pfn; /* Physical Function + * number + */ + uint32_t port_vec; /* Port vector */ + uint8_t num_pports; /* Number of physical + * ports. + */ + uint8_t rst_retries; /* Reset retries */ + uint8_t cur_evt; /* current s/m evt */ + uint8_t prev_evt; /* Previous s/m evt */ + uint32_t dev_num; /* device number */ + struct csio_pport pport[CSIO_MAX_PPORTS]; /* Ports (XGMACs) */ + struct csio_hw_params params; /* Hw parameters */ + + struct dma_pool *scsi_dma_pool; /* DMA pool for SCSI */ + mempool_t *mb_mempool; /* Mailbox memory pool*/ + mempool_t *rnode_mempool; /* rnode memory pool */ + + /* Interrupt */ + enum csio_intr_mode intr_mode; /* INTx, MSI, MSIX */ + uint32_t fwevt_intr_idx; /* FW evt MSIX/interrupt + * index + */ + uint32_t nondata_intr_idx; /* nondata MSIX/intr + * idx + */ + + uint8_t cfg_neq; /* FW configured no of + * egress queues + */ + uint8_t cfg_niq; /* FW configured no of + * iq queues. + */ + + struct csio_fcoe_res_info fres_info; /* Fcoe resource info */ + struct csio_hw_chip_ops *chip_ops; /* T4/T5 Chip specific + * Operations + */ + + /* MSIX vectors */ + struct csio_msix_entries msix_entries[CSIO_MAX_MSIX_VECS]; + + struct dentry *debugfs_root; /* Debug FS */ + struct csio_hw_stats stats; /* Hw statistics */ +}; + +/* Register access macros */ +#define csio_reg(_b, _r) ((_b) + (_r)) + +#define csio_rd_reg8(_h, _r) readb(csio_reg((_h)->regstart, (_r))) +#define csio_rd_reg16(_h, _r) readw(csio_reg((_h)->regstart, (_r))) +#define csio_rd_reg32(_h, _r) readl(csio_reg((_h)->regstart, (_r))) +#define csio_rd_reg64(_h, _r) readq(csio_reg((_h)->regstart, (_r))) + +#define csio_wr_reg8(_h, _v, _r) writeb((_v), \ + csio_reg((_h)->regstart, (_r))) +#define csio_wr_reg16(_h, _v, _r) writew((_v), \ + csio_reg((_h)->regstart, (_r))) +#define csio_wr_reg32(_h, _v, _r) writel((_v), \ + csio_reg((_h)->regstart, (_r))) +#define csio_wr_reg64(_h, _v, _r) writeq((_v), \ + csio_reg((_h)->regstart, (_r))) + +void csio_set_reg_field(struct csio_hw *, uint32_t, uint32_t, uint32_t); + +/* Core clocks <==> uSecs */ +static inline uint32_t +csio_core_ticks_to_us(struct csio_hw *hw, uint32_t ticks) +{ + /* add Core Clock / 2 to round ticks to nearest uS */ + return (ticks * 1000 + hw->vpd.cclk/2) / hw->vpd.cclk; +} + +static inline uint32_t +csio_us_to_core_ticks(struct csio_hw *hw, uint32_t us) +{ + return (us * hw->vpd.cclk) / 1000; +} + +/* Easy access macros */ +#define csio_hw_to_wrm(hw) ((struct csio_wrm *)(&(hw)->wrm)) +#define csio_hw_to_mbm(hw) ((struct csio_mbm *)(&(hw)->mbm)) +#define csio_hw_to_scsim(hw) ((struct csio_scsim *)(&(hw)->scsim)) +#define csio_hw_to_mgmtm(hw) ((struct csio_mgmtm *)(&(hw)->mgmtm)) + +#define CSIO_PCI_BUS(hw) ((hw)->pdev->bus->number) +#define CSIO_PCI_DEV(hw) (PCI_SLOT((hw)->pdev->devfn)) +#define CSIO_PCI_FUNC(hw) (PCI_FUNC((hw)->pdev->devfn)) + +#define csio_set_fwevt_intr_idx(_h, _i) ((_h)->fwevt_intr_idx = (_i)) +#define csio_get_fwevt_intr_idx(_h) ((_h)->fwevt_intr_idx) +#define csio_set_nondata_intr_idx(_h, _i) ((_h)->nondata_intr_idx = (_i)) +#define csio_get_nondata_intr_idx(_h) ((_h)->nondata_intr_idx) + +/* Printing/logging */ +#define CSIO_DEVID(__dev) ((__dev)->dev_num) +#define CSIO_DEVID_LO(__dev) (CSIO_DEVID((__dev)) & 0xFFFF) +#define CSIO_DEVID_HI(__dev) ((CSIO_DEVID((__dev)) >> 16) & 0xFFFF) + +#define csio_info(__hw, __fmt, ...) \ + dev_info(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__) + +#define csio_fatal(__hw, __fmt, ...) \ + dev_crit(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__) + +#define csio_err(__hw, __fmt, ...) \ + dev_err(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__) + +#define csio_warn(__hw, __fmt, ...) \ + dev_warn(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__) + +#ifdef __CSIO_DEBUG__ +#define csio_dbg(__hw, __fmt, ...) \ + csio_info((__hw), __fmt, ##__VA_ARGS__); +#else +#define csio_dbg(__hw, __fmt, ...) +#endif + +int csio_hw_wait_op_done_val(struct csio_hw *, int, uint32_t, int, + int, int, uint32_t *); +void csio_hw_tp_wr_bits_indirect(struct csio_hw *, unsigned int, + unsigned int, unsigned int); +int csio_mgmt_req_lookup(struct csio_mgmtm *, struct csio_ioreq *); +void csio_hw_intr_disable(struct csio_hw *); +int csio_hw_slow_intr_handler(struct csio_hw *); +int csio_handle_intr_status(struct csio_hw *, unsigned int, + const struct intr_info *); + +fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps); +fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16); +fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32); +fw_port_cap32_t lstatus_to_fwcap(u32 lstatus); + +int csio_hw_start(struct csio_hw *); +int csio_hw_stop(struct csio_hw *); +int csio_hw_reset(struct csio_hw *); +int csio_is_hw_ready(struct csio_hw *); +int csio_is_hw_removing(struct csio_hw *); + +int csio_fwevtq_handler(struct csio_hw *); +void csio_evtq_worker(struct work_struct *); +int csio_enqueue_evt(struct csio_hw *, enum csio_evt, void *, uint16_t); +void csio_evtq_flush(struct csio_hw *hw); + +int csio_request_irqs(struct csio_hw *); +void csio_intr_enable(struct csio_hw *); +void csio_intr_disable(struct csio_hw *, bool); +void csio_hw_fatal_err(struct csio_hw *); + +struct csio_lnode *csio_lnode_alloc(struct csio_hw *); +int csio_config_queues(struct csio_hw *); + +int csio_hw_init(struct csio_hw *); +void csio_hw_exit(struct csio_hw *); +#endif /* ifndef __CSIO_HW_H__ */ diff --git a/drivers/scsi/csiostor/csio_hw_chip.h b/drivers/scsi/csiostor/csio_hw_chip.h new file mode 100644 index 000000000..aaabdbe11 --- /dev/null +++ b/drivers/scsi/csiostor/csio_hw_chip.h @@ -0,0 +1,135 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2013 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CSIO_HW_CHIP_H__ +#define __CSIO_HW_CHIP_H__ + +#include "csio_defs.h" + +/* Define MACRO values */ +#define CSIO_HW_T5 0x5000 +#define CSIO_T5_FCOE_ASIC 0x5600 +#define CSIO_HW_T6 0x6000 +#define CSIO_T6_FCOE_ASIC 0x6600 +#define CSIO_HW_CHIP_MASK 0xF000 + +#define T5_REGMAP_SIZE (332 * 1024) +#define FW_FNAME_T5 "cxgb4/t5fw.bin" +#define FW_CFG_NAME_T5 "cxgb4/t5-config.txt" +#define FW_FNAME_T6 "cxgb4/t6fw.bin" +#define FW_CFG_NAME_T6 "cxgb4/t6-config.txt" + +#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision)) +#define CHELSIO_CHIP_FPGA 0x100 +#define CHELSIO_CHIP_VERSION(code) (((code) >> 12) & 0xf) +#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf) + +#define CHELSIO_T5 0x5 +#define CHELSIO_T6 0x6 + +enum chip_type { + T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0), + T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1), + T5_FIRST_REV = T5_A0, + T5_LAST_REV = T5_A1, + + T6_A0 = CHELSIO_CHIP_CODE(CHELSIO_T6, 0), + T6_FIRST_REV = T6_A0, + T6_LAST_REV = T6_A0, +}; + +static inline int csio_is_t5(uint16_t chip) +{ + return (chip == CSIO_HW_T5); +} + +static inline int csio_is_t6(uint16_t chip) +{ + return (chip == CSIO_HW_T6); +} + +/* Define MACRO DEFINITIONS */ +#define CSIO_DEVICE(devid, idx) \ + { PCI_VENDOR_ID_CHELSIO, (devid), PCI_ANY_ID, PCI_ANY_ID, 0, 0, (idx) } + +#include "t4fw_api.h" +#include "t4fw_version.h" + +#define FW_VERSION(chip) ( \ + FW_HDR_FW_VER_MAJOR_G(chip##FW_VERSION_MAJOR) | \ + FW_HDR_FW_VER_MINOR_G(chip##FW_VERSION_MINOR) | \ + FW_HDR_FW_VER_MICRO_G(chip##FW_VERSION_MICRO) | \ + FW_HDR_FW_VER_BUILD_G(chip##FW_VERSION_BUILD)) +#define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf) + +struct fw_info { + u8 chip; + char *fs_name; + char *fw_mod_name; + struct fw_hdr fw_hdr; +}; + +/* Declare ENUMS */ +enum { MEM_EDC0, MEM_EDC1, MEM_MC, MEM_MC0 = MEM_MC, MEM_MC1 }; + +enum { + MEMWIN_APERTURE = 2048, + MEMWIN_BASE = 0x1b800, +}; + +/* Slow path handlers */ +struct intr_info { + unsigned int mask; /* bits to check in interrupt status */ + const char *msg; /* message to print or NULL */ + short stat_idx; /* stat counter to increment or -1 */ + unsigned short fatal; /* whether the condition reported is fatal */ +}; + +/* T4/T5 Chip specific ops */ +struct csio_hw; +struct csio_hw_chip_ops { + int (*chip_set_mem_win)(struct csio_hw *, uint32_t); + void (*chip_pcie_intr_handler)(struct csio_hw *); + uint32_t (*chip_flash_cfg_addr)(struct csio_hw *); + int (*chip_mc_read)(struct csio_hw *, int, uint32_t, + __be32 *, uint64_t *); + int (*chip_edc_read)(struct csio_hw *, int, uint32_t, + __be32 *, uint64_t *); + int (*chip_memory_rw)(struct csio_hw *, u32, int, u32, + u32, uint32_t *, int); + void (*chip_dfs_create_ext_mem)(struct csio_hw *); +}; + +extern struct csio_hw_chip_ops t5_ops; + +#endif /* #ifndef __CSIO_HW_CHIP_H__ */ diff --git a/drivers/scsi/csiostor/csio_hw_t5.c b/drivers/scsi/csiostor/csio_hw_t5.c new file mode 100644 index 000000000..86fded97d --- /dev/null +++ b/drivers/scsi/csiostor/csio_hw_t5.c @@ -0,0 +1,369 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2013 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "csio_hw.h" +#include "csio_init.h" + +static int +csio_t5_set_mem_win(struct csio_hw *hw, uint32_t win) +{ + u32 mem_win_base; + /* + * Truncation intentional: we only read the bottom 32-bits of the + * 64-bit BAR0/BAR1 ... We use the hardware backdoor mechanism to + * read BAR0 instead of using pci_resource_start() because we could be + * operating from within a Virtual Machine which is trapping our + * accesses to our Configuration Space and we need to set up the PCI-E + * Memory Window decoders with the actual addresses which will be + * coming across the PCI-E link. + */ + + /* For T5, only relative offset inside the PCIe BAR is passed */ + mem_win_base = MEMWIN_BASE; + + /* + * Set up memory window for accessing adapter memory ranges. (Read + * back MA register to ensure that changes propagate before we attempt + * to use the new values.) + */ + csio_wr_reg32(hw, mem_win_base | BIR_V(0) | + WINDOW_V(ilog2(MEMWIN_APERTURE) - 10), + PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, win)); + csio_rd_reg32(hw, + PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, win)); + + return 0; +} + +/* + * Interrupt handler for the PCIE module. + */ +static void +csio_t5_pcie_intr_handler(struct csio_hw *hw) +{ + static struct intr_info pcie_intr_info[] = { + { MSTGRPPERR_F, "Master Response Read Queue parity error", + -1, 1 }, + { MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 }, + { MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 }, + { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 }, + { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 }, + { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 }, + { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 }, + { PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error", + -1, 1 }, + { PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error", + -1, 1 }, + { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 }, + { MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 }, + { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 }, + { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 }, + { DREQWRPERR_F, "PCI DMA channel write request parity error", + -1, 1 }, + { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 }, + { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 }, + { HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 }, + { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 }, + { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 }, + { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 }, + { FIDPERR_F, "PCI FID parity error", -1, 1 }, + { VFIDPERR_F, "PCI INTx clear parity error", -1, 1 }, + { MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 }, + { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 }, + { IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error", + -1, 1 }, + { IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error", + -1, 1 }, + { RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 }, + { IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 }, + { TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 }, + { READRSPERR_F, "Outbound read error", -1, 0 }, + { 0, NULL, 0, 0 } + }; + + int fat; + fat = csio_handle_intr_status(hw, PCIE_INT_CAUSE_A, pcie_intr_info); + if (fat) + csio_hw_fatal_err(hw); +} + +/* + * csio_t5_flash_cfg_addr - return the address of the flash configuration file + * @hw: the HW module + * + * Return the address within the flash where the Firmware Configuration + * File is stored. + */ +static unsigned int +csio_t5_flash_cfg_addr(struct csio_hw *hw) +{ + return FLASH_CFG_START; +} + +/* + * csio_t5_mc_read - read from MC through backdoor accesses + * @hw: the hw module + * @idx: index to the register + * @addr: address of first byte requested + * @data: 64 bytes of data containing the requested address + * @ecc: where to store the corresponding 64-bit ECC word + * + * Read 64 bytes of data from MC starting at a 64-byte-aligned address + * that covers the requested address @addr. If @parity is not %NULL it + * is assigned the 64-bit ECC word for the read data. + */ +static int +csio_t5_mc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data, + uint64_t *ecc) +{ + int i; + uint32_t mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg; + uint32_t mc_bist_data_pattern_reg; + + mc_bist_cmd_reg = MC_REG(MC_P_BIST_CMD_A, idx); + mc_bist_cmd_addr_reg = MC_REG(MC_P_BIST_CMD_ADDR_A, idx); + mc_bist_cmd_len_reg = MC_REG(MC_P_BIST_CMD_LEN_A, idx); + mc_bist_data_pattern_reg = MC_REG(MC_P_BIST_DATA_PATTERN_A, idx); + + if (csio_rd_reg32(hw, mc_bist_cmd_reg) & START_BIST_F) + return -EBUSY; + csio_wr_reg32(hw, addr & ~0x3fU, mc_bist_cmd_addr_reg); + csio_wr_reg32(hw, 64, mc_bist_cmd_len_reg); + csio_wr_reg32(hw, 0xc, mc_bist_data_pattern_reg); + csio_wr_reg32(hw, BIST_OPCODE_V(1) | START_BIST_F | BIST_CMD_GAP_V(1), + mc_bist_cmd_reg); + i = csio_hw_wait_op_done_val(hw, mc_bist_cmd_reg, START_BIST_F, + 0, 10, 1, NULL); + if (i) + return i; + +#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA_A, i) + + for (i = 15; i >= 0; i--) + *data++ = htonl(csio_rd_reg32(hw, MC_DATA(i))); + if (ecc) + *ecc = csio_rd_reg64(hw, MC_DATA(16)); +#undef MC_DATA + return 0; +} + +/* + * csio_t5_edc_read - read from EDC through backdoor accesses + * @hw: the hw module + * @idx: which EDC to access + * @addr: address of first byte requested + * @data: 64 bytes of data containing the requested address + * @ecc: where to store the corresponding 64-bit ECC word + * + * Read 64 bytes of data from EDC starting at a 64-byte-aligned address + * that covers the requested address @addr. If @parity is not %NULL it + * is assigned the 64-bit ECC word for the read data. + */ +static int +csio_t5_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data, + uint64_t *ecc) +{ + int i; + uint32_t edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg; + uint32_t edc_bist_cmd_data_pattern; + +/* + * These macro are missing in t4_regs.h file. + */ +#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR) +#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx) + + edc_bist_cmd_reg = EDC_REG_T5(EDC_H_BIST_CMD_A, idx); + edc_bist_cmd_addr_reg = EDC_REG_T5(EDC_H_BIST_CMD_ADDR_A, idx); + edc_bist_cmd_len_reg = EDC_REG_T5(EDC_H_BIST_CMD_LEN_A, idx); + edc_bist_cmd_data_pattern = EDC_REG_T5(EDC_H_BIST_DATA_PATTERN_A, idx); +#undef EDC_REG_T5 +#undef EDC_STRIDE_T5 + + if (csio_rd_reg32(hw, edc_bist_cmd_reg) & START_BIST_F) + return -EBUSY; + csio_wr_reg32(hw, addr & ~0x3fU, edc_bist_cmd_addr_reg); + csio_wr_reg32(hw, 64, edc_bist_cmd_len_reg); + csio_wr_reg32(hw, 0xc, edc_bist_cmd_data_pattern); + csio_wr_reg32(hw, BIST_OPCODE_V(1) | START_BIST_F | BIST_CMD_GAP_V(1), + edc_bist_cmd_reg); + i = csio_hw_wait_op_done_val(hw, edc_bist_cmd_reg, START_BIST_F, + 0, 10, 1, NULL); + if (i) + return i; + +#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA_A, i) + idx) + + for (i = 15; i >= 0; i--) + *data++ = htonl(csio_rd_reg32(hw, EDC_DATA(i))); + if (ecc) + *ecc = csio_rd_reg64(hw, EDC_DATA(16)); +#undef EDC_DATA + return 0; +} + +/* + * csio_t5_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window + * @hw: the csio_hw + * @win: PCI-E memory Window to use + * @mtype: memory type: MEM_EDC0, MEM_EDC1, MEM_MC0 (or MEM_MC) or MEM_MC1 + * @addr: address within indicated memory type + * @len: amount of memory to transfer + * @buf: host memory buffer + * @dir: direction of transfer 1 => read, 0 => write + * + * Reads/writes an [almost] arbitrary memory region in the firmware: the + * firmware memory address, length and host buffer must be aligned on + * 32-bit boundaries. The memory is transferred as a raw byte sequence + * from/to the firmware's memory. If this memory contains data + * structures which contain multi-byte integers, it's the callers + * responsibility to perform appropriate byte order conversions. + */ +static int +csio_t5_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr, + u32 len, uint32_t *buf, int dir) +{ + u32 pos, start, offset, memoffset; + u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base; + + /* + * Argument sanity checks ... + */ + if ((addr & 0x3) || (len & 0x3)) + return -EINVAL; + + /* Offset into the region of memory which is being accessed + * MEM_EDC0 = 0 + * MEM_EDC1 = 1 + * MEM_MC = 2 -- T4 + * MEM_MC0 = 2 -- For T5 + * MEM_MC1 = 3 -- For T5 + */ + edc_size = EDRAM0_SIZE_G(csio_rd_reg32(hw, MA_EDRAM0_BAR_A)); + if (mtype != MEM_MC1) + memoffset = (mtype * (edc_size * 1024 * 1024)); + else { + mc_size = EXT_MEM_SIZE_G(csio_rd_reg32(hw, + MA_EXT_MEMORY_BAR_A)); + memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024; + } + + /* Determine the PCIE_MEM_ACCESS_OFFSET */ + addr = addr + memoffset; + + /* + * Each PCI-E Memory Window is programmed with a window size -- or + * "aperture" -- which controls the granularity of its mapping onto + * adapter memory. We need to grab that aperture in order to know + * how to use the specified window. The window is also programmed + * with the base address of the Memory Window in BAR0's address + * space. For T4 this is an absolute PCI-E Bus Address. For T5 + * the address is relative to BAR0. + */ + mem_reg = csio_rd_reg32(hw, + PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, win)); + mem_aperture = 1 << (WINDOW_V(mem_reg) + 10); + mem_base = PCIEOFST_G(mem_reg) << 10; + + start = addr & ~(mem_aperture-1); + offset = addr - start; + win_pf = PFNUM_V(hw->pfn); + + csio_dbg(hw, "csio_t5_memory_rw: mem_reg: 0x%x, mem_aperture: 0x%x\n", + mem_reg, mem_aperture); + csio_dbg(hw, "csio_t5_memory_rw: mem_base: 0x%x, mem_offset: 0x%x\n", + mem_base, memoffset); + csio_dbg(hw, "csio_t5_memory_rw: start:0x%x, offset:0x%x, win_pf:%d\n", + start, offset, win_pf); + csio_dbg(hw, "csio_t5_memory_rw: mtype: %d, addr: 0x%x, len: %d\n", + mtype, addr, len); + + for (pos = start; len > 0; pos += mem_aperture, offset = 0) { + /* + * Move PCI-E Memory Window to our current transfer + * position. Read it back to ensure that changes propagate + * before we attempt to use the new value. + */ + csio_wr_reg32(hw, pos | win_pf, + PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win)); + csio_rd_reg32(hw, + PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win)); + + while (offset < mem_aperture && len > 0) { + if (dir) + *buf++ = csio_rd_reg32(hw, mem_base + offset); + else + csio_wr_reg32(hw, *buf++, mem_base + offset); + + offset += sizeof(__be32); + len -= sizeof(__be32); + } + } + return 0; +} + +/* + * csio_t5_dfs_create_ext_mem - setup debugfs for MC0 or MC1 to read the values + * @hw: the csio_hw + * + * This function creates files in the debugfs with external memory region + * MC0 & MC1. + */ +static void +csio_t5_dfs_create_ext_mem(struct csio_hw *hw) +{ + u32 size; + int i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE_A); + + if (i & EXT_MEM_ENABLE_F) { + size = csio_rd_reg32(hw, MA_EXT_MEMORY_BAR_A); + csio_add_debugfs_mem(hw, "mc0", MEM_MC0, + EXT_MEM_SIZE_G(size)); + } + if (i & EXT_MEM1_ENABLE_F) { + size = csio_rd_reg32(hw, MA_EXT_MEMORY1_BAR_A); + csio_add_debugfs_mem(hw, "mc1", MEM_MC1, + EXT_MEM_SIZE_G(size)); + } +} + +/* T5 adapter specific function */ +struct csio_hw_chip_ops t5_ops = { + .chip_set_mem_win = csio_t5_set_mem_win, + .chip_pcie_intr_handler = csio_t5_pcie_intr_handler, + .chip_flash_cfg_addr = csio_t5_flash_cfg_addr, + .chip_mc_read = csio_t5_mc_read, + .chip_edc_read = csio_t5_edc_read, + .chip_memory_rw = csio_t5_memory_rw, + .chip_dfs_create_ext_mem = csio_t5_dfs_create_ext_mem, +}; diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c new file mode 100644 index 000000000..ccbded335 --- /dev/null +++ b/drivers/scsi/csiostor/csio_init.c @@ -0,0 +1,1257 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/aer.h> +#include <linux/mm.h> +#include <linux/notifier.h> +#include <linux/kdebug.h> +#include <linux/seq_file.h> +#include <linux/debugfs.h> +#include <linux/string.h> +#include <linux/export.h> + +#include "csio_init.h" +#include "csio_defs.h" + +#define CSIO_MIN_MEMPOOL_SZ 64 + +static struct dentry *csio_debugfs_root; + +static struct scsi_transport_template *csio_fcoe_transport; +static struct scsi_transport_template *csio_fcoe_transport_vport; + +/* + * debugfs support + */ +static ssize_t +csio_mem_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) +{ + loff_t pos = *ppos; + loff_t avail = file_inode(file)->i_size; + unsigned int mem = (uintptr_t)file->private_data & 3; + struct csio_hw *hw = file->private_data - mem; + + if (pos < 0) + return -EINVAL; + if (pos >= avail) + return 0; + if (count > avail - pos) + count = avail - pos; + + while (count) { + size_t len; + int ret, ofst; + __be32 data[16]; + + if (mem == MEM_MC) + ret = hw->chip_ops->chip_mc_read(hw, 0, pos, + data, NULL); + else + ret = hw->chip_ops->chip_edc_read(hw, mem, pos, + data, NULL); + if (ret) + return ret; + + ofst = pos % sizeof(data); + len = min(count, sizeof(data) - ofst); + if (copy_to_user(buf, (u8 *)data + ofst, len)) + return -EFAULT; + + buf += len; + pos += len; + count -= len; + } + count = pos - *ppos; + *ppos = pos; + return count; +} + +static const struct file_operations csio_mem_debugfs_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = csio_mem_read, + .llseek = default_llseek, +}; + +void csio_add_debugfs_mem(struct csio_hw *hw, const char *name, + unsigned int idx, unsigned int size_mb) +{ + debugfs_create_file_size(name, S_IRUSR, hw->debugfs_root, + (void *)hw + idx, &csio_mem_debugfs_fops, + size_mb << 20); +} + +static int csio_setup_debugfs(struct csio_hw *hw) +{ + int i; + + if (IS_ERR_OR_NULL(hw->debugfs_root)) + return -1; + + i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE_A); + if (i & EDRAM0_ENABLE_F) + csio_add_debugfs_mem(hw, "edc0", MEM_EDC0, 5); + if (i & EDRAM1_ENABLE_F) + csio_add_debugfs_mem(hw, "edc1", MEM_EDC1, 5); + + hw->chip_ops->chip_dfs_create_ext_mem(hw); + return 0; +} + +/* + * csio_dfs_create - Creates and sets up per-hw debugfs. + * + */ +static int +csio_dfs_create(struct csio_hw *hw) +{ + if (csio_debugfs_root) { + hw->debugfs_root = debugfs_create_dir(pci_name(hw->pdev), + csio_debugfs_root); + csio_setup_debugfs(hw); + } + + return 0; +} + +/* + * csio_dfs_destroy - Destroys per-hw debugfs. + */ +static void +csio_dfs_destroy(struct csio_hw *hw) +{ + debugfs_remove_recursive(hw->debugfs_root); +} + +/* + * csio_dfs_init - Debug filesystem initialization for the module. + * + */ +static void +csio_dfs_init(void) +{ + csio_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); +} + +/* + * csio_dfs_exit - debugfs cleanup for the module. + */ +static void +csio_dfs_exit(void) +{ + debugfs_remove(csio_debugfs_root); +} + +/* + * csio_pci_init - PCI initialization. + * @pdev: PCI device. + * @bars: Bitmask of bars to be requested. + * + * Initializes the PCI function by enabling MMIO, setting bus + * mastership and setting DMA mask. + */ +static int +csio_pci_init(struct pci_dev *pdev, int *bars) +{ + int rv = -ENODEV; + + *bars = pci_select_bars(pdev, IORESOURCE_MEM); + + if (pci_enable_device_mem(pdev)) + goto err; + + if (pci_request_selected_regions(pdev, *bars, KBUILD_MODNAME)) + goto err_disable_device; + + pci_set_master(pdev); + pci_try_set_mwi(pdev); + + rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (rv) + rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (rv) { + rv = -ENODEV; + dev_err(&pdev->dev, "No suitable DMA available.\n"); + goto err_release_regions; + } + + return 0; + +err_release_regions: + pci_release_selected_regions(pdev, *bars); +err_disable_device: + pci_disable_device(pdev); +err: + return rv; + +} + +/* + * csio_pci_exit - PCI unitialization. + * @pdev: PCI device. + * @bars: Bars to be released. + * + */ +static void +csio_pci_exit(struct pci_dev *pdev, int *bars) +{ + pci_release_selected_regions(pdev, *bars); + pci_disable_device(pdev); +} + +/* + * csio_hw_init_workers - Initialize the HW module's worker threads. + * @hw: HW module. + * + */ +static void +csio_hw_init_workers(struct csio_hw *hw) +{ + INIT_WORK(&hw->evtq_work, csio_evtq_worker); +} + +static void +csio_hw_exit_workers(struct csio_hw *hw) +{ + cancel_work_sync(&hw->evtq_work); +} + +static int +csio_create_queues(struct csio_hw *hw) +{ + int i, j; + struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw); + int rv; + struct csio_scsi_cpu_info *info; + + if (hw->flags & CSIO_HWF_Q_FW_ALLOCED) + return 0; + + if (hw->intr_mode != CSIO_IM_MSIX) { + rv = csio_wr_iq_create(hw, NULL, hw->intr_iq_idx, + 0, hw->pport[0].portid, false, NULL); + if (rv != 0) { + csio_err(hw, " Forward Interrupt IQ failed!: %d\n", rv); + return rv; + } + } + + /* FW event queue */ + rv = csio_wr_iq_create(hw, NULL, hw->fwevt_iq_idx, + csio_get_fwevt_intr_idx(hw), + hw->pport[0].portid, true, NULL); + if (rv != 0) { + csio_err(hw, "FW event IQ config failed!: %d\n", rv); + return rv; + } + + /* Create mgmt queue */ + rv = csio_wr_eq_create(hw, NULL, mgmtm->eq_idx, + mgmtm->iq_idx, hw->pport[0].portid, NULL); + + if (rv != 0) { + csio_err(hw, "Mgmt EQ create failed!: %d\n", rv); + goto err; + } + + /* Create SCSI queues */ + for (i = 0; i < hw->num_pports; i++) { + info = &hw->scsi_cpu_info[i]; + + for (j = 0; j < info->max_cpus; j++) { + struct csio_scsi_qset *sqset = &hw->sqset[i][j]; + + rv = csio_wr_iq_create(hw, NULL, sqset->iq_idx, + sqset->intr_idx, i, false, NULL); + if (rv != 0) { + csio_err(hw, + "SCSI module IQ config failed [%d][%d]:%d\n", + i, j, rv); + goto err; + } + rv = csio_wr_eq_create(hw, NULL, sqset->eq_idx, + sqset->iq_idx, i, NULL); + if (rv != 0) { + csio_err(hw, + "SCSI module EQ config failed [%d][%d]:%d\n", + i, j, rv); + goto err; + } + } /* for all CPUs */ + } /* For all ports */ + + hw->flags |= CSIO_HWF_Q_FW_ALLOCED; + return 0; +err: + csio_wr_destroy_queues(hw, true); + return -EINVAL; +} + +/* + * csio_config_queues - Configure the DMA queues. + * @hw: HW module. + * + * Allocates memory for queues are registers them with FW. + */ +int +csio_config_queues(struct csio_hw *hw) +{ + int i, j, idx, k = 0; + int rv; + struct csio_scsi_qset *sqset; + struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw); + struct csio_scsi_qset *orig; + struct csio_scsi_cpu_info *info; + + if (hw->flags & CSIO_HWF_Q_MEM_ALLOCED) + return csio_create_queues(hw); + + /* Calculate number of SCSI queues for MSIX we would like */ + hw->num_scsi_msix_cpus = num_online_cpus(); + hw->num_sqsets = num_online_cpus() * hw->num_pports; + + if (hw->num_sqsets > CSIO_MAX_SCSI_QSETS) { + hw->num_sqsets = CSIO_MAX_SCSI_QSETS; + hw->num_scsi_msix_cpus = CSIO_MAX_SCSI_CPU; + } + + /* Initialize max_cpus, may get reduced during msix allocations */ + for (i = 0; i < hw->num_pports; i++) + hw->scsi_cpu_info[i].max_cpus = hw->num_scsi_msix_cpus; + + csio_dbg(hw, "nsqsets:%d scpus:%d\n", + hw->num_sqsets, hw->num_scsi_msix_cpus); + + csio_intr_enable(hw); + + if (hw->intr_mode != CSIO_IM_MSIX) { + + /* Allocate Forward interrupt iq. */ + hw->intr_iq_idx = csio_wr_alloc_q(hw, CSIO_INTR_IQSIZE, + CSIO_INTR_WRSIZE, CSIO_INGRESS, + (void *)hw, 0, 0, NULL); + if (hw->intr_iq_idx == -1) { + csio_err(hw, + "Forward interrupt queue creation failed\n"); + goto intr_disable; + } + } + + /* Allocate the FW evt queue */ + hw->fwevt_iq_idx = csio_wr_alloc_q(hw, CSIO_FWEVT_IQSIZE, + CSIO_FWEVT_WRSIZE, + CSIO_INGRESS, (void *)hw, + CSIO_FWEVT_FLBUFS, 0, + csio_fwevt_intx_handler); + if (hw->fwevt_iq_idx == -1) { + csio_err(hw, "FW evt queue creation failed\n"); + goto intr_disable; + } + + /* Allocate the mgmt queue */ + mgmtm->eq_idx = csio_wr_alloc_q(hw, CSIO_MGMT_EQSIZE, + CSIO_MGMT_EQ_WRSIZE, + CSIO_EGRESS, (void *)hw, 0, 0, NULL); + if (mgmtm->eq_idx == -1) { + csio_err(hw, "Failed to alloc egress queue for mgmt module\n"); + goto intr_disable; + } + + /* Use FW IQ for MGMT req completion */ + mgmtm->iq_idx = hw->fwevt_iq_idx; + + /* Allocate SCSI queues */ + for (i = 0; i < hw->num_pports; i++) { + info = &hw->scsi_cpu_info[i]; + + for (j = 0; j < hw->num_scsi_msix_cpus; j++) { + sqset = &hw->sqset[i][j]; + + if (j >= info->max_cpus) { + k = j % info->max_cpus; + orig = &hw->sqset[i][k]; + sqset->eq_idx = orig->eq_idx; + sqset->iq_idx = orig->iq_idx; + continue; + } + + idx = csio_wr_alloc_q(hw, csio_scsi_eqsize, 0, + CSIO_EGRESS, (void *)hw, 0, 0, + NULL); + if (idx == -1) { + csio_err(hw, "EQ creation failed for idx:%d\n", + idx); + goto intr_disable; + } + + sqset->eq_idx = idx; + + idx = csio_wr_alloc_q(hw, CSIO_SCSI_IQSIZE, + CSIO_SCSI_IQ_WRSZ, CSIO_INGRESS, + (void *)hw, 0, 0, + csio_scsi_intx_handler); + if (idx == -1) { + csio_err(hw, "IQ creation failed for idx:%d\n", + idx); + goto intr_disable; + } + sqset->iq_idx = idx; + } /* for all CPUs */ + } /* For all ports */ + + hw->flags |= CSIO_HWF_Q_MEM_ALLOCED; + + rv = csio_create_queues(hw); + if (rv != 0) + goto intr_disable; + + /* + * Now request IRQs for the vectors. In the event of a failure, + * cleanup is handled internally by this function. + */ + rv = csio_request_irqs(hw); + if (rv != 0) + return -EINVAL; + + return 0; + +intr_disable: + csio_intr_disable(hw, false); + + return -EINVAL; +} + +static int +csio_resource_alloc(struct csio_hw *hw) +{ + struct csio_wrm *wrm = csio_hw_to_wrm(hw); + int rv = -ENOMEM; + + wrm->num_q = ((CSIO_MAX_SCSI_QSETS * 2) + CSIO_HW_NIQ + + CSIO_HW_NEQ + CSIO_HW_NFLQ + CSIO_HW_NINTXQ); + + hw->mb_mempool = mempool_create_kmalloc_pool(CSIO_MIN_MEMPOOL_SZ, + sizeof(struct csio_mb)); + if (!hw->mb_mempool) + goto err; + + hw->rnode_mempool = mempool_create_kmalloc_pool(CSIO_MIN_MEMPOOL_SZ, + sizeof(struct csio_rnode)); + if (!hw->rnode_mempool) + goto err_free_mb_mempool; + + hw->scsi_dma_pool = dma_pool_create("csio_scsi_dma_pool", + &hw->pdev->dev, CSIO_SCSI_RSP_LEN, + 8, 0); + if (!hw->scsi_dma_pool) + goto err_free_rn_pool; + + return 0; + +err_free_rn_pool: + mempool_destroy(hw->rnode_mempool); + hw->rnode_mempool = NULL; +err_free_mb_mempool: + mempool_destroy(hw->mb_mempool); + hw->mb_mempool = NULL; +err: + return rv; +} + +static void +csio_resource_free(struct csio_hw *hw) +{ + dma_pool_destroy(hw->scsi_dma_pool); + hw->scsi_dma_pool = NULL; + mempool_destroy(hw->rnode_mempool); + hw->rnode_mempool = NULL; + mempool_destroy(hw->mb_mempool); + hw->mb_mempool = NULL; +} + +/* + * csio_hw_alloc - Allocate and initialize the HW module. + * @pdev: PCI device. + * + * Allocates HW structure, DMA, memory resources, maps BARS to + * host memory and initializes HW module. + */ +static struct csio_hw *csio_hw_alloc(struct pci_dev *pdev) +{ + struct csio_hw *hw; + + hw = kzalloc(sizeof(struct csio_hw), GFP_KERNEL); + if (!hw) + goto err; + + hw->pdev = pdev; + strncpy(hw->drv_version, CSIO_DRV_VERSION, 32); + + /* memory pool/DMA pool allocation */ + if (csio_resource_alloc(hw)) + goto err_free_hw; + + /* Get the start address of registers from BAR 0 */ + hw->regstart = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!hw->regstart) { + csio_err(hw, "Could not map BAR 0, regstart = %p\n", + hw->regstart); + goto err_resource_free; + } + + csio_hw_init_workers(hw); + + if (csio_hw_init(hw)) + goto err_unmap_bar; + + csio_dfs_create(hw); + + csio_dbg(hw, "hw:%p\n", hw); + + return hw; + +err_unmap_bar: + csio_hw_exit_workers(hw); + iounmap(hw->regstart); +err_resource_free: + csio_resource_free(hw); +err_free_hw: + kfree(hw); +err: + return NULL; +} + +/* + * csio_hw_free - Uninitialize and free the HW module. + * @hw: The HW module + * + * Disable interrupts, uninit the HW module, free resources, free hw. + */ +static void +csio_hw_free(struct csio_hw *hw) +{ + csio_intr_disable(hw, true); + csio_hw_exit_workers(hw); + csio_hw_exit(hw); + iounmap(hw->regstart); + csio_dfs_destroy(hw); + csio_resource_free(hw); + kfree(hw); +} + +/** + * csio_shost_init - Create and initialize the lnode module. + * @hw: The HW module. + * @dev: The device associated with this invocation. + * @probe: Called from probe context or not? + * @pln: Parent lnode if any. + * + * Allocates lnode structure via scsi_host_alloc, initializes + * shost, initializes lnode module and registers with SCSI ML + * via scsi_host_add. This function is shared between physical and + * virtual node ports. + */ +struct csio_lnode * +csio_shost_init(struct csio_hw *hw, struct device *dev, + bool probe, struct csio_lnode *pln) +{ + struct Scsi_Host *shost = NULL; + struct csio_lnode *ln; + + csio_fcoe_shost_template.cmd_per_lun = csio_lun_qdepth; + csio_fcoe_shost_vport_template.cmd_per_lun = csio_lun_qdepth; + + /* + * hw->pdev is the physical port's PCI dev structure, + * which will be different from the NPIV dev structure. + */ + if (dev == &hw->pdev->dev) + shost = scsi_host_alloc( + &csio_fcoe_shost_template, + sizeof(struct csio_lnode)); + else + shost = scsi_host_alloc( + &csio_fcoe_shost_vport_template, + sizeof(struct csio_lnode)); + + if (!shost) + goto err; + + ln = shost_priv(shost); + memset(ln, 0, sizeof(struct csio_lnode)); + + /* Link common lnode to this lnode */ + ln->dev_num = (shost->host_no << 16); + + shost->can_queue = CSIO_MAX_QUEUE; + shost->this_id = -1; + shost->unique_id = shost->host_no; + shost->max_cmd_len = 16; /* Max CDB length supported */ + shost->max_id = min_t(uint32_t, csio_fcoe_rnodes, + hw->fres_info.max_ssns); + shost->max_lun = CSIO_MAX_LUN; + if (dev == &hw->pdev->dev) + shost->transportt = csio_fcoe_transport; + else + shost->transportt = csio_fcoe_transport_vport; + + /* root lnode */ + if (!hw->rln) + hw->rln = ln; + + /* Other initialization here: Common, Transport specific */ + if (csio_lnode_init(ln, hw, pln)) + goto err_shost_put; + + if (scsi_add_host_with_dma(shost, dev, &hw->pdev->dev)) + goto err_lnode_exit; + + return ln; + +err_lnode_exit: + csio_lnode_exit(ln); +err_shost_put: + scsi_host_put(shost); +err: + return NULL; +} + +/** + * csio_shost_exit - De-instantiate the shost. + * @ln: The lnode module corresponding to the shost. + * + */ +void +csio_shost_exit(struct csio_lnode *ln) +{ + struct Scsi_Host *shost = csio_ln_to_shost(ln); + struct csio_hw *hw = csio_lnode_to_hw(ln); + + /* Inform transport */ + fc_remove_host(shost); + + /* Inform SCSI ML */ + scsi_remove_host(shost); + + /* Flush all the events, so that any rnode removal events + * already queued are all handled, before we remove the lnode. + */ + spin_lock_irq(&hw->lock); + csio_evtq_flush(hw); + spin_unlock_irq(&hw->lock); + + csio_lnode_exit(ln); + scsi_host_put(shost); +} + +struct csio_lnode * +csio_lnode_alloc(struct csio_hw *hw) +{ + return csio_shost_init(hw, &hw->pdev->dev, false, NULL); +} + +void +csio_lnodes_block_request(struct csio_hw *hw) +{ + struct Scsi_Host *shost; + struct csio_lnode *sln; + struct csio_lnode *ln; + struct list_head *cur_ln, *cur_cln; + struct csio_lnode **lnode_list; + int cur_cnt = 0, ii; + + lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns), + GFP_KERNEL); + if (!lnode_list) { + csio_err(hw, "Failed to allocate lnodes_list"); + return; + } + + spin_lock_irq(&hw->lock); + /* Traverse sibling lnodes */ + list_for_each(cur_ln, &hw->sln_head) { + sln = (struct csio_lnode *) cur_ln; + lnode_list[cur_cnt++] = sln; + + /* Traverse children lnodes */ + list_for_each(cur_cln, &sln->cln_head) + lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln; + } + spin_unlock_irq(&hw->lock); + + for (ii = 0; ii < cur_cnt; ii++) { + csio_dbg(hw, "Blocking IOs on lnode: %p\n", lnode_list[ii]); + ln = lnode_list[ii]; + shost = csio_ln_to_shost(ln); + scsi_block_requests(shost); + + } + kfree(lnode_list); +} + +void +csio_lnodes_unblock_request(struct csio_hw *hw) +{ + struct csio_lnode *ln; + struct Scsi_Host *shost; + struct csio_lnode *sln; + struct list_head *cur_ln, *cur_cln; + struct csio_lnode **lnode_list; + int cur_cnt = 0, ii; + + lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns), + GFP_KERNEL); + if (!lnode_list) { + csio_err(hw, "Failed to allocate lnodes_list"); + return; + } + + spin_lock_irq(&hw->lock); + /* Traverse sibling lnodes */ + list_for_each(cur_ln, &hw->sln_head) { + sln = (struct csio_lnode *) cur_ln; + lnode_list[cur_cnt++] = sln; + + /* Traverse children lnodes */ + list_for_each(cur_cln, &sln->cln_head) + lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln; + } + spin_unlock_irq(&hw->lock); + + for (ii = 0; ii < cur_cnt; ii++) { + csio_dbg(hw, "unblocking IOs on lnode: %p\n", lnode_list[ii]); + ln = lnode_list[ii]; + shost = csio_ln_to_shost(ln); + scsi_unblock_requests(shost); + } + kfree(lnode_list); +} + +void +csio_lnodes_block_by_port(struct csio_hw *hw, uint8_t portid) +{ + struct csio_lnode *ln; + struct Scsi_Host *shost; + struct csio_lnode *sln; + struct list_head *cur_ln, *cur_cln; + struct csio_lnode **lnode_list; + int cur_cnt = 0, ii; + + lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns), + GFP_KERNEL); + if (!lnode_list) { + csio_err(hw, "Failed to allocate lnodes_list"); + return; + } + + spin_lock_irq(&hw->lock); + /* Traverse sibling lnodes */ + list_for_each(cur_ln, &hw->sln_head) { + sln = (struct csio_lnode *) cur_ln; + if (sln->portid != portid) + continue; + + lnode_list[cur_cnt++] = sln; + + /* Traverse children lnodes */ + list_for_each(cur_cln, &sln->cln_head) + lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln; + } + spin_unlock_irq(&hw->lock); + + for (ii = 0; ii < cur_cnt; ii++) { + csio_dbg(hw, "Blocking IOs on lnode: %p\n", lnode_list[ii]); + ln = lnode_list[ii]; + shost = csio_ln_to_shost(ln); + scsi_block_requests(shost); + } + kfree(lnode_list); +} + +void +csio_lnodes_unblock_by_port(struct csio_hw *hw, uint8_t portid) +{ + struct csio_lnode *ln; + struct Scsi_Host *shost; + struct csio_lnode *sln; + struct list_head *cur_ln, *cur_cln; + struct csio_lnode **lnode_list; + int cur_cnt = 0, ii; + + lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns), + GFP_KERNEL); + if (!lnode_list) { + csio_err(hw, "Failed to allocate lnodes_list"); + return; + } + + spin_lock_irq(&hw->lock); + /* Traverse sibling lnodes */ + list_for_each(cur_ln, &hw->sln_head) { + sln = (struct csio_lnode *) cur_ln; + if (sln->portid != portid) + continue; + lnode_list[cur_cnt++] = sln; + + /* Traverse children lnodes */ + list_for_each(cur_cln, &sln->cln_head) + lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln; + } + spin_unlock_irq(&hw->lock); + + for (ii = 0; ii < cur_cnt; ii++) { + csio_dbg(hw, "unblocking IOs on lnode: %p\n", lnode_list[ii]); + ln = lnode_list[ii]; + shost = csio_ln_to_shost(ln); + scsi_unblock_requests(shost); + } + kfree(lnode_list); +} + +void +csio_lnodes_exit(struct csio_hw *hw, bool npiv) +{ + struct csio_lnode *sln; + struct csio_lnode *ln; + struct list_head *cur_ln, *cur_cln; + struct csio_lnode **lnode_list; + int cur_cnt = 0, ii; + + lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns), + GFP_KERNEL); + if (!lnode_list) { + csio_err(hw, "lnodes_exit: Failed to allocate lnodes_list.\n"); + return; + } + + /* Get all child lnodes(NPIV ports) */ + spin_lock_irq(&hw->lock); + list_for_each(cur_ln, &hw->sln_head) { + sln = (struct csio_lnode *) cur_ln; + + /* Traverse children lnodes */ + list_for_each(cur_cln, &sln->cln_head) + lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln; + } + spin_unlock_irq(&hw->lock); + + /* Delete NPIV lnodes */ + for (ii = 0; ii < cur_cnt; ii++) { + csio_dbg(hw, "Deleting child lnode: %p\n", lnode_list[ii]); + ln = lnode_list[ii]; + fc_vport_terminate(ln->fc_vport); + } + + /* Delete only npiv lnodes */ + if (npiv) + goto free_lnodes; + + cur_cnt = 0; + /* Get all physical lnodes */ + spin_lock_irq(&hw->lock); + /* Traverse sibling lnodes */ + list_for_each(cur_ln, &hw->sln_head) { + sln = (struct csio_lnode *) cur_ln; + lnode_list[cur_cnt++] = sln; + } + spin_unlock_irq(&hw->lock); + + /* Delete physical lnodes */ + for (ii = 0; ii < cur_cnt; ii++) { + csio_dbg(hw, "Deleting parent lnode: %p\n", lnode_list[ii]); + csio_shost_exit(lnode_list[ii]); + } + +free_lnodes: + kfree(lnode_list); +} + +/* + * csio_lnode_init_post: Set lnode attributes after starting HW. + * @ln: lnode. + * + */ +static void +csio_lnode_init_post(struct csio_lnode *ln) +{ + struct Scsi_Host *shost = csio_ln_to_shost(ln); + + csio_fchost_attr_init(ln); + + scsi_scan_host(shost); +} + +/* + * csio_probe_one - Instantiate this function. + * @pdev: PCI device + * @id: Device ID + * + * This is the .probe() callback of the driver. This function: + * - Initializes the PCI function by enabling MMIO, setting bus + * mastership and setting DMA mask. + * - Allocates HW structure, DMA, memory resources, maps BARS to + * host memory and initializes HW module. + * - Allocates lnode structure via scsi_host_alloc, initializes + * shost, initialized lnode module and registers with SCSI ML + * via scsi_host_add. + * - Enables interrupts, and starts the chip by kicking off the + * HW state machine. + * - Once hardware is ready, initiated scan of the host via + * scsi_scan_host. + */ +static int csio_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) +{ + int rv; + int bars; + int i; + struct csio_hw *hw; + struct csio_lnode *ln; + + /* probe only T5 and T6 cards */ + if (!csio_is_t5((pdev->device & CSIO_HW_CHIP_MASK)) && + !csio_is_t6((pdev->device & CSIO_HW_CHIP_MASK))) + return -ENODEV; + + rv = csio_pci_init(pdev, &bars); + if (rv) + goto err; + + hw = csio_hw_alloc(pdev); + if (!hw) { + rv = -ENODEV; + goto err_pci_exit; + } + + if (!pcie_relaxed_ordering_enabled(pdev)) + hw->flags |= CSIO_HWF_ROOT_NO_RELAXED_ORDERING; + + pci_set_drvdata(pdev, hw); + + rv = csio_hw_start(hw); + if (rv) { + if (rv == -EINVAL) { + dev_err(&pdev->dev, + "Failed to start FW, continuing in debug mode.\n"); + return 0; + } + goto err_lnode_exit; + } + + sprintf(hw->fwrev_str, "%u.%u.%u.%u\n", + FW_HDR_FW_VER_MAJOR_G(hw->fwrev), + FW_HDR_FW_VER_MINOR_G(hw->fwrev), + FW_HDR_FW_VER_MICRO_G(hw->fwrev), + FW_HDR_FW_VER_BUILD_G(hw->fwrev)); + + for (i = 0; i < hw->num_pports; i++) { + ln = csio_shost_init(hw, &pdev->dev, true, NULL); + if (!ln) { + rv = -ENODEV; + break; + } + /* Initialize portid */ + ln->portid = hw->pport[i].portid; + + spin_lock_irq(&hw->lock); + if (csio_lnode_start(ln) != 0) + rv = -ENODEV; + spin_unlock_irq(&hw->lock); + + if (rv) + break; + + csio_lnode_init_post(ln); + } + + if (rv) + goto err_lnode_exit; + + return 0; + +err_lnode_exit: + csio_lnodes_block_request(hw); + spin_lock_irq(&hw->lock); + csio_hw_stop(hw); + spin_unlock_irq(&hw->lock); + csio_lnodes_unblock_request(hw); + csio_lnodes_exit(hw, 0); + csio_hw_free(hw); +err_pci_exit: + csio_pci_exit(pdev, &bars); +err: + dev_err(&pdev->dev, "probe of device failed: %d\n", rv); + return rv; +} + +/* + * csio_remove_one - Remove one instance of the driver at this PCI function. + * @pdev: PCI device + * + * Used during hotplug operation. + */ +static void csio_remove_one(struct pci_dev *pdev) +{ + struct csio_hw *hw = pci_get_drvdata(pdev); + int bars = pci_select_bars(pdev, IORESOURCE_MEM); + + csio_lnodes_block_request(hw); + spin_lock_irq(&hw->lock); + + /* Stops lnode, Rnode s/m + * Quiesce IOs. + * All sessions with remote ports are unregistered. + */ + csio_hw_stop(hw); + spin_unlock_irq(&hw->lock); + csio_lnodes_unblock_request(hw); + + csio_lnodes_exit(hw, 0); + csio_hw_free(hw); + csio_pci_exit(pdev, &bars); +} + +/* + * csio_pci_error_detected - PCI error was detected + * @pdev: PCI device + * + */ +static pci_ers_result_t +csio_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct csio_hw *hw = pci_get_drvdata(pdev); + + csio_lnodes_block_request(hw); + spin_lock_irq(&hw->lock); + + /* Post PCI error detected evt to HW s/m + * HW s/m handles this evt by quiescing IOs, unregisters rports + * and finally takes the device to offline. + */ + csio_post_event(&hw->sm, CSIO_HWE_PCIERR_DETECTED); + spin_unlock_irq(&hw->lock); + csio_lnodes_unblock_request(hw); + csio_lnodes_exit(hw, 0); + csio_intr_disable(hw, true); + pci_disable_device(pdev); + return state == pci_channel_io_perm_failure ? + PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; +} + +/* + * csio_pci_slot_reset - PCI slot has been reset. + * @pdev: PCI device + * + */ +static pci_ers_result_t +csio_pci_slot_reset(struct pci_dev *pdev) +{ + struct csio_hw *hw = pci_get_drvdata(pdev); + int ready; + + if (pci_enable_device(pdev)) { + dev_err(&pdev->dev, "cannot re-enable device in slot reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + + /* Bring HW s/m to ready state. + * but don't resume IOs. + */ + spin_lock_irq(&hw->lock); + csio_post_event(&hw->sm, CSIO_HWE_PCIERR_SLOT_RESET); + ready = csio_is_hw_ready(hw); + spin_unlock_irq(&hw->lock); + + if (ready) { + return PCI_ERS_RESULT_RECOVERED; + } else { + dev_err(&pdev->dev, "Can't initialize HW when in slot reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } +} + +/* + * csio_pci_resume - Resume normal operations + * @pdev: PCI device + * + */ +static void +csio_pci_resume(struct pci_dev *pdev) +{ + struct csio_hw *hw = pci_get_drvdata(pdev); + struct csio_lnode *ln; + int rv = 0; + int i; + + /* Bring the LINK UP and Resume IO */ + + for (i = 0; i < hw->num_pports; i++) { + ln = csio_shost_init(hw, &pdev->dev, true, NULL); + if (!ln) { + rv = -ENODEV; + break; + } + /* Initialize portid */ + ln->portid = hw->pport[i].portid; + + spin_lock_irq(&hw->lock); + if (csio_lnode_start(ln) != 0) + rv = -ENODEV; + spin_unlock_irq(&hw->lock); + + if (rv) + break; + + csio_lnode_init_post(ln); + } + + if (rv) + goto err_resume_exit; + + return; + +err_resume_exit: + csio_lnodes_block_request(hw); + spin_lock_irq(&hw->lock); + csio_hw_stop(hw); + spin_unlock_irq(&hw->lock); + csio_lnodes_unblock_request(hw); + csio_lnodes_exit(hw, 0); + csio_hw_free(hw); + dev_err(&pdev->dev, "resume of device failed: %d\n", rv); +} + +static struct pci_error_handlers csio_err_handler = { + .error_detected = csio_pci_error_detected, + .slot_reset = csio_pci_slot_reset, + .resume = csio_pci_resume, +}; + +/* + * Macros needed to support the PCI Device ID Table ... + */ +#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \ + static const struct pci_device_id csio_pci_tbl[] = { +/* Define for FCoE uses PF6 */ +#define CH_PCI_DEVICE_ID_FUNCTION 0x6 + +#define CH_PCI_ID_TABLE_ENTRY(devid) \ + { PCI_VDEVICE(CHELSIO, (devid)), 0 } + +#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END { 0, } } + +#include "t4_pci_id_tbl.h" + +static struct pci_driver csio_pci_driver = { + .name = KBUILD_MODNAME, + .driver = { + .owner = THIS_MODULE, + }, + .id_table = csio_pci_tbl, + .probe = csio_probe_one, + .remove = csio_remove_one, + .err_handler = &csio_err_handler, +}; + +/* + * csio_init - Chelsio storage driver initialization function. + * + */ +static int __init +csio_init(void) +{ + int rv = -ENOMEM; + + pr_info("%s %s\n", CSIO_DRV_DESC, CSIO_DRV_VERSION); + + csio_dfs_init(); + + csio_fcoe_transport = fc_attach_transport(&csio_fc_transport_funcs); + if (!csio_fcoe_transport) + goto err; + + csio_fcoe_transport_vport = + fc_attach_transport(&csio_fc_transport_vport_funcs); + if (!csio_fcoe_transport_vport) + goto err_vport; + + rv = pci_register_driver(&csio_pci_driver); + if (rv) + goto err_pci; + + return 0; + +err_pci: + fc_release_transport(csio_fcoe_transport_vport); +err_vport: + fc_release_transport(csio_fcoe_transport); +err: + csio_dfs_exit(); + return rv; +} + +/* + * csio_exit - Chelsio storage driver uninitialization . + * + * Function that gets called in the unload path. + */ +static void __exit +csio_exit(void) +{ + pci_unregister_driver(&csio_pci_driver); + csio_dfs_exit(); + fc_release_transport(csio_fcoe_transport_vport); + fc_release_transport(csio_fcoe_transport); +} + +module_init(csio_init); +module_exit(csio_exit); +MODULE_AUTHOR(CSIO_DRV_AUTHOR); +MODULE_DESCRIPTION(CSIO_DRV_DESC); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DEVICE_TABLE(pci, csio_pci_tbl); +MODULE_VERSION(CSIO_DRV_VERSION); +MODULE_FIRMWARE(FW_FNAME_T5); +MODULE_FIRMWARE(FW_FNAME_T6); +MODULE_SOFTDEP("pre: cxgb4"); diff --git a/drivers/scsi/csiostor/csio_init.h b/drivers/scsi/csiostor/csio_init.h new file mode 100644 index 000000000..202442543 --- /dev/null +++ b/drivers/scsi/csiostor/csio_init.h @@ -0,0 +1,136 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CSIO_INIT_H__ +#define __CSIO_INIT_H__ + +#include <linux/pci.h> +#include <linux/if_ether.h> +#include <scsi/scsi.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_host.h> +#include <scsi/scsi_transport_fc.h> + +#include "csio_scsi.h" +#include "csio_lnode.h" +#include "csio_rnode.h" +#include "csio_hw.h" + +#define CSIO_DRV_AUTHOR "Chelsio Communications" +#define CSIO_DRV_DESC "Chelsio FCoE driver" +#define CSIO_DRV_VERSION "1.0.0-ko" + +extern struct fc_function_template csio_fc_transport_funcs; +extern struct fc_function_template csio_fc_transport_vport_funcs; + +void csio_fchost_attr_init(struct csio_lnode *); + +/* INTx handlers */ +void csio_scsi_intx_handler(struct csio_hw *, void *, uint32_t, + struct csio_fl_dma_buf *, void *); + +void csio_fwevt_intx_handler(struct csio_hw *, void *, uint32_t, + struct csio_fl_dma_buf *, void *); + +/* Common os lnode APIs */ +void csio_lnodes_block_request(struct csio_hw *); +void csio_lnodes_unblock_request(struct csio_hw *); +void csio_lnodes_block_by_port(struct csio_hw *, uint8_t); +void csio_lnodes_unblock_by_port(struct csio_hw *, uint8_t); + +struct csio_lnode *csio_shost_init(struct csio_hw *, struct device *, bool, + struct csio_lnode *); +void csio_shost_exit(struct csio_lnode *); +void csio_lnodes_exit(struct csio_hw *, bool); + +/* DebugFS helper routines */ +void csio_add_debugfs_mem(struct csio_hw *, const char *, + unsigned int, unsigned int); + +static inline struct Scsi_Host * +csio_ln_to_shost(struct csio_lnode *ln) +{ + return container_of((void *)ln, struct Scsi_Host, hostdata[0]); +} + +/* SCSI -- locking version of get/put ioreqs */ +static inline struct csio_ioreq * +csio_get_scsi_ioreq_lock(struct csio_hw *hw, struct csio_scsim *scsim) +{ + struct csio_ioreq *ioreq; + unsigned long flags; + + spin_lock_irqsave(&scsim->freelist_lock, flags); + ioreq = csio_get_scsi_ioreq(scsim); + spin_unlock_irqrestore(&scsim->freelist_lock, flags); + + return ioreq; +} + +static inline void +csio_put_scsi_ioreq_lock(struct csio_hw *hw, struct csio_scsim *scsim, + struct csio_ioreq *ioreq) +{ + unsigned long flags; + + spin_lock_irqsave(&scsim->freelist_lock, flags); + csio_put_scsi_ioreq(scsim, ioreq); + spin_unlock_irqrestore(&scsim->freelist_lock, flags); +} + +/* Called in interrupt context */ +static inline void +csio_put_scsi_ioreq_list_lock(struct csio_hw *hw, struct csio_scsim *scsim, + struct list_head *reqlist, int n) +{ + unsigned long flags; + + spin_lock_irqsave(&scsim->freelist_lock, flags); + csio_put_scsi_ioreq_list(scsim, reqlist, n); + spin_unlock_irqrestore(&scsim->freelist_lock, flags); +} + +/* Called in interrupt context */ +static inline void +csio_put_scsi_ddp_list_lock(struct csio_hw *hw, struct csio_scsim *scsim, + struct list_head *reqlist, int n) +{ + unsigned long flags; + + spin_lock_irqsave(&hw->lock, flags); + csio_put_scsi_ddp_list(scsim, reqlist, n); + spin_unlock_irqrestore(&hw->lock, flags); +} + +#endif /* ifndef __CSIO_INIT_H__ */ diff --git a/drivers/scsi/csiostor/csio_isr.c b/drivers/scsi/csiostor/csio_isr.c new file mode 100644 index 000000000..b2540402f --- /dev/null +++ b/drivers/scsi/csiostor/csio_isr.c @@ -0,0 +1,610 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/cpumask.h> +#include <linux/string.h> + +#include "csio_init.h" +#include "csio_hw.h" + +static irqreturn_t +csio_nondata_isr(int irq, void *dev_id) +{ + struct csio_hw *hw = (struct csio_hw *) dev_id; + int rv; + unsigned long flags; + + if (unlikely(!hw)) + return IRQ_NONE; + + if (unlikely(pci_channel_offline(hw->pdev))) { + CSIO_INC_STATS(hw, n_pcich_offline); + return IRQ_NONE; + } + + spin_lock_irqsave(&hw->lock, flags); + csio_hw_slow_intr_handler(hw); + rv = csio_mb_isr_handler(hw); + + if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) { + hw->flags |= CSIO_HWF_FWEVT_PENDING; + spin_unlock_irqrestore(&hw->lock, flags); + schedule_work(&hw->evtq_work); + return IRQ_HANDLED; + } + spin_unlock_irqrestore(&hw->lock, flags); + return IRQ_HANDLED; +} + +/* + * csio_fwevt_handler - Common FW event handler routine. + * @hw: HW module. + * + * This is the ISR for FW events. It is shared b/w MSIX + * and INTx handlers. + */ +static void +csio_fwevt_handler(struct csio_hw *hw) +{ + int rv; + unsigned long flags; + + rv = csio_fwevtq_handler(hw); + + spin_lock_irqsave(&hw->lock, flags); + if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) { + hw->flags |= CSIO_HWF_FWEVT_PENDING; + spin_unlock_irqrestore(&hw->lock, flags); + schedule_work(&hw->evtq_work); + return; + } + spin_unlock_irqrestore(&hw->lock, flags); + +} /* csio_fwevt_handler */ + +/* + * csio_fwevt_isr() - FW events MSIX ISR + * @irq: + * @dev_id: + * + * Process WRs on the FW event queue. + * + */ +static irqreturn_t +csio_fwevt_isr(int irq, void *dev_id) +{ + struct csio_hw *hw = (struct csio_hw *) dev_id; + + if (unlikely(!hw)) + return IRQ_NONE; + + if (unlikely(pci_channel_offline(hw->pdev))) { + CSIO_INC_STATS(hw, n_pcich_offline); + return IRQ_NONE; + } + + csio_fwevt_handler(hw); + + return IRQ_HANDLED; +} + +/* + * csio_fwevt_isr() - INTx wrapper for handling FW events. + * @irq: + * @dev_id: + */ +void +csio_fwevt_intx_handler(struct csio_hw *hw, void *wr, uint32_t len, + struct csio_fl_dma_buf *flb, void *priv) +{ + csio_fwevt_handler(hw); +} /* csio_fwevt_intx_handler */ + +/* + * csio_process_scsi_cmpl - Process a SCSI WR completion. + * @hw: HW module. + * @wr: The completed WR from the ingress queue. + * @len: Length of the WR. + * @flb: Freelist buffer array. + * + */ +static void +csio_process_scsi_cmpl(struct csio_hw *hw, void *wr, uint32_t len, + struct csio_fl_dma_buf *flb, void *cbfn_q) +{ + struct csio_ioreq *ioreq; + uint8_t *scsiwr; + uint8_t subop; + void *cmnd; + unsigned long flags; + + ioreq = csio_scsi_cmpl_handler(hw, wr, len, flb, NULL, &scsiwr); + if (likely(ioreq)) { + if (unlikely(*scsiwr == FW_SCSI_ABRT_CLS_WR)) { + subop = FW_SCSI_ABRT_CLS_WR_SUB_OPCODE_GET( + ((struct fw_scsi_abrt_cls_wr *) + scsiwr)->sub_opcode_to_chk_all_io); + + csio_dbg(hw, "%s cmpl recvd ioreq:%p status:%d\n", + subop ? "Close" : "Abort", + ioreq, ioreq->wr_status); + + spin_lock_irqsave(&hw->lock, flags); + if (subop) + csio_scsi_closed(ioreq, + (struct list_head *)cbfn_q); + else + csio_scsi_aborted(ioreq, + (struct list_head *)cbfn_q); + /* + * We call scsi_done for I/Os that driver thinks aborts + * have timed out. If there is a race caused by FW + * completing abort at the exact same time that the + * driver has deteced the abort timeout, the following + * check prevents calling of scsi_done twice for the + * same command: once from the eh_abort_handler, another + * from csio_scsi_isr_handler(). This also avoids the + * need to check if csio_scsi_cmnd(req) is NULL in the + * fast path. + */ + cmnd = csio_scsi_cmnd(ioreq); + if (unlikely(cmnd == NULL)) + list_del_init(&ioreq->sm.sm_list); + + spin_unlock_irqrestore(&hw->lock, flags); + + if (unlikely(cmnd == NULL)) + csio_put_scsi_ioreq_lock(hw, + csio_hw_to_scsim(hw), ioreq); + } else { + spin_lock_irqsave(&hw->lock, flags); + csio_scsi_completed(ioreq, (struct list_head *)cbfn_q); + spin_unlock_irqrestore(&hw->lock, flags); + } + } +} + +/* + * csio_scsi_isr_handler() - Common SCSI ISR handler. + * @iq: Ingress queue pointer. + * + * Processes SCSI completions on the SCSI IQ indicated by scm->iq_idx + * by calling csio_wr_process_iq_idx. If there are completions on the + * isr_cbfn_q, yank them out into a local queue and call their io_cbfns. + * Once done, add these completions onto the freelist. + * This routine is shared b/w MSIX and INTx. + */ +static inline irqreturn_t +csio_scsi_isr_handler(struct csio_q *iq) +{ + struct csio_hw *hw = (struct csio_hw *)iq->owner; + LIST_HEAD(cbfn_q); + struct list_head *tmp; + struct csio_scsim *scm; + struct csio_ioreq *ioreq; + int isr_completions = 0; + + scm = csio_hw_to_scsim(hw); + + if (unlikely(csio_wr_process_iq(hw, iq, csio_process_scsi_cmpl, + &cbfn_q) != 0)) + return IRQ_NONE; + + /* Call back the completion routines */ + list_for_each(tmp, &cbfn_q) { + ioreq = (struct csio_ioreq *)tmp; + isr_completions++; + ioreq->io_cbfn(hw, ioreq); + /* Release ddp buffer if used for this req */ + if (unlikely(ioreq->dcopy)) + csio_put_scsi_ddp_list_lock(hw, scm, &ioreq->gen_list, + ioreq->nsge); + } + + if (isr_completions) { + /* Return the ioreqs back to ioreq->freelist */ + csio_put_scsi_ioreq_list_lock(hw, scm, &cbfn_q, + isr_completions); + } + + return IRQ_HANDLED; +} + +/* + * csio_scsi_isr() - SCSI MSIX handler + * @irq: + * @dev_id: + * + * This is the top level SCSI MSIX handler. Calls csio_scsi_isr_handler() + * for handling SCSI completions. + */ +static irqreturn_t +csio_scsi_isr(int irq, void *dev_id) +{ + struct csio_q *iq = (struct csio_q *) dev_id; + struct csio_hw *hw; + + if (unlikely(!iq)) + return IRQ_NONE; + + hw = (struct csio_hw *)iq->owner; + + if (unlikely(pci_channel_offline(hw->pdev))) { + CSIO_INC_STATS(hw, n_pcich_offline); + return IRQ_NONE; + } + + csio_scsi_isr_handler(iq); + + return IRQ_HANDLED; +} + +/* + * csio_scsi_intx_handler() - SCSI INTx handler + * @irq: + * @dev_id: + * + * This is the top level SCSI INTx handler. Calls csio_scsi_isr_handler() + * for handling SCSI completions. + */ +void +csio_scsi_intx_handler(struct csio_hw *hw, void *wr, uint32_t len, + struct csio_fl_dma_buf *flb, void *priv) +{ + struct csio_q *iq = priv; + + csio_scsi_isr_handler(iq); + +} /* csio_scsi_intx_handler */ + +/* + * csio_fcoe_isr() - INTx/MSI interrupt service routine for FCoE. + * @irq: + * @dev_id: + * + * + */ +static irqreturn_t +csio_fcoe_isr(int irq, void *dev_id) +{ + struct csio_hw *hw = (struct csio_hw *) dev_id; + struct csio_q *intx_q = NULL; + int rv; + irqreturn_t ret = IRQ_NONE; + unsigned long flags; + + if (unlikely(!hw)) + return IRQ_NONE; + + if (unlikely(pci_channel_offline(hw->pdev))) { + CSIO_INC_STATS(hw, n_pcich_offline); + return IRQ_NONE; + } + + /* Disable the interrupt for this PCI function. */ + if (hw->intr_mode == CSIO_IM_INTX) + csio_wr_reg32(hw, 0, MYPF_REG(PCIE_PF_CLI_A)); + + /* + * The read in the following function will flush the + * above write. + */ + if (csio_hw_slow_intr_handler(hw)) + ret = IRQ_HANDLED; + + /* Get the INTx Forward interrupt IQ. */ + intx_q = csio_get_q(hw, hw->intr_iq_idx); + + CSIO_DB_ASSERT(intx_q); + + /* IQ handler is not possible for intx_q, hence pass in NULL */ + if (likely(csio_wr_process_iq(hw, intx_q, NULL, NULL) == 0)) + ret = IRQ_HANDLED; + + spin_lock_irqsave(&hw->lock, flags); + rv = csio_mb_isr_handler(hw); + if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) { + hw->flags |= CSIO_HWF_FWEVT_PENDING; + spin_unlock_irqrestore(&hw->lock, flags); + schedule_work(&hw->evtq_work); + return IRQ_HANDLED; + } + spin_unlock_irqrestore(&hw->lock, flags); + + return ret; +} + +static void +csio_add_msix_desc(struct csio_hw *hw) +{ + int i; + struct csio_msix_entries *entryp = &hw->msix_entries[0]; + int k = CSIO_EXTRA_VECS; + int len = sizeof(entryp->desc) - 1; + int cnt = hw->num_sqsets + k; + + /* Non-data vector */ + memset(entryp->desc, 0, len + 1); + snprintf(entryp->desc, len, "csio-%02x:%02x:%x-nondata", + CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw)); + + entryp++; + memset(entryp->desc, 0, len + 1); + snprintf(entryp->desc, len, "csio-%02x:%02x:%x-fwevt", + CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw)); + entryp++; + + /* Name SCSI vecs */ + for (i = k; i < cnt; i++, entryp++) { + memset(entryp->desc, 0, len + 1); + snprintf(entryp->desc, len, "csio-%02x:%02x:%x-scsi%d", + CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), + CSIO_PCI_FUNC(hw), i - CSIO_EXTRA_VECS); + } +} + +int +csio_request_irqs(struct csio_hw *hw) +{ + int rv, i, j, k = 0; + struct csio_msix_entries *entryp = &hw->msix_entries[0]; + struct csio_scsi_cpu_info *info; + struct pci_dev *pdev = hw->pdev; + + if (hw->intr_mode != CSIO_IM_MSIX) { + rv = request_irq(pci_irq_vector(pdev, 0), csio_fcoe_isr, + hw->intr_mode == CSIO_IM_MSI ? 0 : IRQF_SHARED, + KBUILD_MODNAME, hw); + if (rv) { + csio_err(hw, "Failed to allocate interrupt line.\n"); + goto out_free_irqs; + } + + goto out; + } + + /* Add the MSIX vector descriptions */ + csio_add_msix_desc(hw); + + rv = request_irq(pci_irq_vector(pdev, k), csio_nondata_isr, 0, + entryp[k].desc, hw); + if (rv) { + csio_err(hw, "IRQ request failed for vec %d err:%d\n", + pci_irq_vector(pdev, k), rv); + goto out_free_irqs; + } + + entryp[k++].dev_id = hw; + + rv = request_irq(pci_irq_vector(pdev, k), csio_fwevt_isr, 0, + entryp[k].desc, hw); + if (rv) { + csio_err(hw, "IRQ request failed for vec %d err:%d\n", + pci_irq_vector(pdev, k), rv); + goto out_free_irqs; + } + + entryp[k++].dev_id = (void *)hw; + + /* Allocate IRQs for SCSI */ + for (i = 0; i < hw->num_pports; i++) { + info = &hw->scsi_cpu_info[i]; + for (j = 0; j < info->max_cpus; j++, k++) { + struct csio_scsi_qset *sqset = &hw->sqset[i][j]; + struct csio_q *q = hw->wrm.q_arr[sqset->iq_idx]; + + rv = request_irq(pci_irq_vector(pdev, k), csio_scsi_isr, 0, + entryp[k].desc, q); + if (rv) { + csio_err(hw, + "IRQ request failed for vec %d err:%d\n", + pci_irq_vector(pdev, k), rv); + goto out_free_irqs; + } + + entryp[k].dev_id = q; + + } /* for all scsi cpus */ + } /* for all ports */ + +out: + hw->flags |= CSIO_HWF_HOST_INTR_ENABLED; + return 0; + +out_free_irqs: + for (i = 0; i < k; i++) + free_irq(pci_irq_vector(pdev, i), hw->msix_entries[i].dev_id); + pci_free_irq_vectors(hw->pdev); + return -EINVAL; +} + +/* Reduce per-port max possible CPUs */ +static void +csio_reduce_sqsets(struct csio_hw *hw, int cnt) +{ + int i; + struct csio_scsi_cpu_info *info; + + while (cnt < hw->num_sqsets) { + for (i = 0; i < hw->num_pports; i++) { + info = &hw->scsi_cpu_info[i]; + if (info->max_cpus > 1) { + info->max_cpus--; + hw->num_sqsets--; + if (hw->num_sqsets <= cnt) + break; + } + } + } + + csio_dbg(hw, "Reduced sqsets to %d\n", hw->num_sqsets); +} + +static void csio_calc_sets(struct irq_affinity *affd, unsigned int nvecs) +{ + struct csio_hw *hw = affd->priv; + u8 i; + + if (!nvecs) + return; + + if (nvecs < hw->num_pports) { + affd->nr_sets = 1; + affd->set_size[0] = nvecs; + return; + } + + affd->nr_sets = hw->num_pports; + for (i = 0; i < hw->num_pports; i++) + affd->set_size[i] = nvecs / hw->num_pports; +} + +static int +csio_enable_msix(struct csio_hw *hw) +{ + int i, j, k, n, min, cnt; + int extra = CSIO_EXTRA_VECS; + struct csio_scsi_cpu_info *info; + struct irq_affinity desc = { + .pre_vectors = CSIO_EXTRA_VECS, + .calc_sets = csio_calc_sets, + .priv = hw, + }; + + if (hw->num_pports > IRQ_AFFINITY_MAX_SETS) + return -ENOSPC; + + min = hw->num_pports + extra; + cnt = hw->num_sqsets + extra; + + /* Max vectors required based on #niqs configured in fw */ + if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS || !csio_is_hw_master(hw)) + cnt = min_t(uint8_t, hw->cfg_niq, cnt); + + csio_dbg(hw, "FW supp #niq:%d, trying %d msix's\n", hw->cfg_niq, cnt); + + cnt = pci_alloc_irq_vectors_affinity(hw->pdev, min, cnt, + PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc); + if (cnt < 0) + return cnt; + + if (cnt < (hw->num_sqsets + extra)) { + csio_dbg(hw, "Reducing sqsets to %d\n", cnt - extra); + csio_reduce_sqsets(hw, cnt - extra); + } + + /* Distribute vectors */ + k = 0; + csio_set_nondata_intr_idx(hw, k); + csio_set_mb_intr_idx(csio_hw_to_mbm(hw), k++); + csio_set_fwevt_intr_idx(hw, k++); + + for (i = 0; i < hw->num_pports; i++) { + info = &hw->scsi_cpu_info[i]; + + for (j = 0; j < hw->num_scsi_msix_cpus; j++) { + n = (j % info->max_cpus) + k; + hw->sqset[i][j].intr_idx = n; + } + + k += info->max_cpus; + } + + return 0; +} + +void +csio_intr_enable(struct csio_hw *hw) +{ + hw->intr_mode = CSIO_IM_NONE; + hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED; + + /* Try MSIX, then MSI or fall back to INTx */ + if ((csio_msi == 2) && !csio_enable_msix(hw)) + hw->intr_mode = CSIO_IM_MSIX; + else { + /* Max iqs required based on #niqs configured in fw */ + if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS || + !csio_is_hw_master(hw)) { + int extra = CSIO_EXTRA_MSI_IQS; + + if (hw->cfg_niq < (hw->num_sqsets + extra)) { + csio_dbg(hw, "Reducing sqsets to %d\n", + hw->cfg_niq - extra); + csio_reduce_sqsets(hw, hw->cfg_niq - extra); + } + } + + if ((csio_msi == 1) && !pci_enable_msi(hw->pdev)) + hw->intr_mode = CSIO_IM_MSI; + else + hw->intr_mode = CSIO_IM_INTX; + } + + csio_dbg(hw, "Using %s interrupt mode.\n", + (hw->intr_mode == CSIO_IM_MSIX) ? "MSIX" : + ((hw->intr_mode == CSIO_IM_MSI) ? "MSI" : "INTx")); +} + +void +csio_intr_disable(struct csio_hw *hw, bool free) +{ + csio_hw_intr_disable(hw); + + if (free) { + int i; + + switch (hw->intr_mode) { + case CSIO_IM_MSIX: + for (i = 0; i < hw->num_sqsets + CSIO_EXTRA_VECS; i++) { + free_irq(pci_irq_vector(hw->pdev, i), + hw->msix_entries[i].dev_id); + } + break; + case CSIO_IM_MSI: + case CSIO_IM_INTX: + free_irq(pci_irq_vector(hw->pdev, 0), hw); + break; + default: + break; + } + } + + pci_free_irq_vectors(hw->pdev); + hw->intr_mode = CSIO_IM_NONE; + hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED; +} diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c new file mode 100644 index 000000000..d5ac93897 --- /dev/null +++ b/drivers/scsi/csiostor/csio_lnode.c @@ -0,0 +1,2152 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/utsname.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_transport_fc.h> +#include <asm/unaligned.h> +#include <scsi/fc/fc_els.h> +#include <scsi/fc/fc_fs.h> +#include <scsi/fc/fc_gs.h> +#include <scsi/fc/fc_ms.h> + +#include "csio_hw.h" +#include "csio_mb.h" +#include "csio_lnode.h" +#include "csio_rnode.h" + +int csio_fcoe_rnodes = 1024; +int csio_fdmi_enable = 1; + +#define PORT_ID_PTR(_x) ((uint8_t *)(&_x) + 1) + +/* Lnode SM declarations */ +static void csio_lns_uninit(struct csio_lnode *, enum csio_ln_ev); +static void csio_lns_online(struct csio_lnode *, enum csio_ln_ev); +static void csio_lns_ready(struct csio_lnode *, enum csio_ln_ev); +static void csio_lns_offline(struct csio_lnode *, enum csio_ln_ev); + +static int csio_ln_mgmt_submit_req(struct csio_ioreq *, + void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *), + enum fcoe_cmn_type, struct csio_dma_buf *, uint32_t); + +/* LN event mapping */ +static enum csio_ln_ev fwevt_to_lnevt[] = { + CSIO_LNE_NONE, /* None */ + CSIO_LNE_NONE, /* PLOGI_ACC_RCVD */ + CSIO_LNE_NONE, /* PLOGI_RJT_RCVD */ + CSIO_LNE_NONE, /* PLOGI_RCVD */ + CSIO_LNE_NONE, /* PLOGO_RCVD */ + CSIO_LNE_NONE, /* PRLI_ACC_RCVD */ + CSIO_LNE_NONE, /* PRLI_RJT_RCVD */ + CSIO_LNE_NONE, /* PRLI_RCVD */ + CSIO_LNE_NONE, /* PRLO_RCVD */ + CSIO_LNE_NONE, /* NPORT_ID_CHGD */ + CSIO_LNE_LOGO, /* FLOGO_RCVD */ + CSIO_LNE_LOGO, /* CLR_VIRT_LNK_RCVD */ + CSIO_LNE_FAB_INIT_DONE,/* FLOGI_ACC_RCVD */ + CSIO_LNE_NONE, /* FLOGI_RJT_RCVD */ + CSIO_LNE_FAB_INIT_DONE,/* FDISC_ACC_RCVD */ + CSIO_LNE_NONE, /* FDISC_RJT_RCVD */ + CSIO_LNE_NONE, /* FLOGI_TMO_MAX_RETRY */ + CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_ACC */ + CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_RJT */ + CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_CNFLT */ + CSIO_LNE_NONE, /* PRLI_TMO */ + CSIO_LNE_NONE, /* ADISC_TMO */ + CSIO_LNE_NONE, /* RSCN_DEV_LOST */ + CSIO_LNE_NONE, /* SCR_ACC_RCVD */ + CSIO_LNE_NONE, /* ADISC_RJT_RCVD */ + CSIO_LNE_NONE, /* LOGO_SNT */ + CSIO_LNE_NONE, /* PROTO_ERR_IMPL_LOGO */ +}; + +#define CSIO_FWE_TO_LNE(_evt) ((_evt > PROTO_ERR_IMPL_LOGO) ? \ + CSIO_LNE_NONE : \ + fwevt_to_lnevt[_evt]) + +#define csio_ct_rsp(cp) (((struct fc_ct_hdr *)cp)->ct_cmd) +#define csio_ct_reason(cp) (((struct fc_ct_hdr *)cp)->ct_reason) +#define csio_ct_expl(cp) (((struct fc_ct_hdr *)cp)->ct_explan) +#define csio_ct_get_pld(cp) ((void *)(((uint8_t *)cp) + FC_CT_HDR_LEN)) + +/* + * csio_ln_match_by_portid - lookup lnode using given portid. + * @hw: HW module + * @portid: port-id. + * + * If found, returns lnode matching given portid otherwise returns NULL. + */ +static struct csio_lnode * +csio_ln_lookup_by_portid(struct csio_hw *hw, uint8_t portid) +{ + struct csio_lnode *ln; + struct list_head *tmp; + + /* Match siblings lnode with portid */ + list_for_each(tmp, &hw->sln_head) { + ln = (struct csio_lnode *) tmp; + if (ln->portid == portid) + return ln; + } + + return NULL; +} + +/* + * csio_ln_lookup_by_vnpi - Lookup lnode using given vnp id. + * @hw - HW module + * @vnpi - vnp index. + * Returns - If found, returns lnode matching given vnp id + * otherwise returns NULL. + */ +static struct csio_lnode * +csio_ln_lookup_by_vnpi(struct csio_hw *hw, uint32_t vnp_id) +{ + struct list_head *tmp1, *tmp2; + struct csio_lnode *sln = NULL, *cln = NULL; + + if (list_empty(&hw->sln_head)) { + CSIO_INC_STATS(hw, n_lnlkup_miss); + return NULL; + } + /* Traverse sibling lnodes */ + list_for_each(tmp1, &hw->sln_head) { + sln = (struct csio_lnode *) tmp1; + + /* Match sibling lnode */ + if (sln->vnp_flowid == vnp_id) + return sln; + + if (list_empty(&sln->cln_head)) + continue; + + /* Traverse children lnodes */ + list_for_each(tmp2, &sln->cln_head) { + cln = (struct csio_lnode *) tmp2; + + if (cln->vnp_flowid == vnp_id) + return cln; + } + } + CSIO_INC_STATS(hw, n_lnlkup_miss); + return NULL; +} + +/** + * csio_lnode_lookup_by_wwpn - Lookup lnode using given wwpn. + * @hw: HW module. + * @wwpn: WWPN. + * + * If found, returns lnode matching given wwpn, returns NULL otherwise. + */ +struct csio_lnode * +csio_lnode_lookup_by_wwpn(struct csio_hw *hw, uint8_t *wwpn) +{ + struct list_head *tmp1, *tmp2; + struct csio_lnode *sln = NULL, *cln = NULL; + + if (list_empty(&hw->sln_head)) { + CSIO_INC_STATS(hw, n_lnlkup_miss); + return NULL; + } + /* Traverse sibling lnodes */ + list_for_each(tmp1, &hw->sln_head) { + sln = (struct csio_lnode *) tmp1; + + /* Match sibling lnode */ + if (!memcmp(csio_ln_wwpn(sln), wwpn, 8)) + return sln; + + if (list_empty(&sln->cln_head)) + continue; + + /* Traverse children lnodes */ + list_for_each(tmp2, &sln->cln_head) { + cln = (struct csio_lnode *) tmp2; + + if (!memcmp(csio_ln_wwpn(cln), wwpn, 8)) + return cln; + } + } + return NULL; +} + +/* FDMI */ +static void +csio_fill_ct_iu(void *buf, uint8_t type, uint8_t sub_type, uint16_t op) +{ + struct fc_ct_hdr *cmd = (struct fc_ct_hdr *)buf; + cmd->ct_rev = FC_CT_REV; + cmd->ct_fs_type = type; + cmd->ct_fs_subtype = sub_type; + cmd->ct_cmd = htons(op); +} + +static int +csio_hostname(uint8_t *buf, size_t buf_len) +{ + if (snprintf(buf, buf_len, "%s", init_utsname()->nodename) > 0) + return 0; + return -1; +} + +static int +csio_osname(uint8_t *buf, size_t buf_len) +{ + if (snprintf(buf, buf_len, "%s %s %s", + init_utsname()->sysname, + init_utsname()->release, + init_utsname()->version) > 0) + return 0; + + return -1; +} + +static inline void +csio_append_attrib(uint8_t **ptr, uint16_t type, void *val, size_t val_len) +{ + uint16_t len; + struct fc_fdmi_attr_entry *ae = (struct fc_fdmi_attr_entry *)*ptr; + + if (WARN_ON(val_len > U16_MAX)) + return; + + len = val_len; + + ae->type = htons(type); + len += 4; /* includes attribute type and length */ + len = (len + 3) & ~3; /* should be multiple of 4 bytes */ + ae->len = htons(len); + memcpy(ae->value, val, val_len); + if (len > val_len) + memset(ae->value + val_len, 0, len - val_len); + *ptr += len; +} + +/* + * csio_ln_fdmi_done - FDMI registeration completion + * @hw: HW context + * @fdmi_req: fdmi request + */ +static void +csio_ln_fdmi_done(struct csio_hw *hw, struct csio_ioreq *fdmi_req) +{ + void *cmd; + struct csio_lnode *ln = fdmi_req->lnode; + + if (fdmi_req->wr_status != FW_SUCCESS) { + csio_ln_dbg(ln, "WR error:%x in processing fdmi rpa cmd\n", + fdmi_req->wr_status); + CSIO_INC_STATS(ln, n_fdmi_err); + } + + cmd = fdmi_req->dma_buf.vaddr; + if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) { + csio_ln_dbg(ln, "fdmi rpa cmd rejected reason %x expl %x\n", + csio_ct_reason(cmd), csio_ct_expl(cmd)); + } +} + +/* + * csio_ln_fdmi_rhba_cbfn - RHBA completion + * @hw: HW context + * @fdmi_req: fdmi request + */ +static void +csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) +{ + void *cmd; + uint8_t *pld; + uint32_t len = 0; + __be32 val; + __be16 mfs; + uint32_t numattrs = 0; + struct csio_lnode *ln = fdmi_req->lnode; + struct fs_fdmi_attrs *attrib_blk; + struct fc_fdmi_port_name *port_name; + uint8_t buf[64]; + uint8_t *fc4_type; + unsigned long flags; + + if (fdmi_req->wr_status != FW_SUCCESS) { + csio_ln_dbg(ln, "WR error:%x in processing fdmi rhba cmd\n", + fdmi_req->wr_status); + CSIO_INC_STATS(ln, n_fdmi_err); + } + + cmd = fdmi_req->dma_buf.vaddr; + if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) { + csio_ln_dbg(ln, "fdmi rhba cmd rejected reason %x expl %x\n", + csio_ct_reason(cmd), csio_ct_expl(cmd)); + } + + if (!csio_is_rnode_ready(fdmi_req->rnode)) { + CSIO_INC_STATS(ln, n_fdmi_err); + return; + } + + /* Prepare CT hdr for RPA cmd */ + memset(cmd, 0, FC_CT_HDR_LEN); + csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_RPA); + + /* Prepare RPA payload */ + pld = (uint8_t *)csio_ct_get_pld(cmd); + port_name = (struct fc_fdmi_port_name *)pld; + memcpy(&port_name->portname, csio_ln_wwpn(ln), 8); + pld += sizeof(*port_name); + + /* Start appending Port attributes */ + attrib_blk = (struct fs_fdmi_attrs *)pld; + attrib_blk->numattrs = 0; + len += sizeof(attrib_blk->numattrs); + pld += sizeof(attrib_blk->numattrs); + + fc4_type = &buf[0]; + memset(fc4_type, 0, FC_FDMI_PORT_ATTR_FC4TYPES_LEN); + fc4_type[2] = 1; + fc4_type[7] = 1; + csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_FC4TYPES, + fc4_type, FC_FDMI_PORT_ATTR_FC4TYPES_LEN); + numattrs++; + val = htonl(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT); + csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_SUPPORTEDSPEED, + &val, + FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN); + numattrs++; + + if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_1G) + val = htonl(FC_PORTSPEED_1GBIT); + else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_10G) + val = htonl(FC_PORTSPEED_10GBIT); + else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_25G) + val = htonl(FC_PORTSPEED_25GBIT); + else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_40G) + val = htonl(FC_PORTSPEED_40GBIT); + else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_50G) + val = htonl(FC_PORTSPEED_50GBIT); + else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_100G) + val = htonl(FC_PORTSPEED_100GBIT); + else + val = htonl(CSIO_HBA_PORTSPEED_UNKNOWN); + csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED, + &val, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN); + numattrs++; + + mfs = ln->ln_sparm.csp.sp_bb_data; + csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_MAXFRAMESIZE, + &mfs, sizeof(mfs)); + numattrs++; + + strcpy(buf, "csiostor"); + csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_OSDEVICENAME, buf, + strlen(buf)); + numattrs++; + + if (!csio_hostname(buf, sizeof(buf))) { + csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_HOSTNAME, + buf, strlen(buf)); + numattrs++; + } + attrib_blk->numattrs = htonl(numattrs); + len = (uint32_t)(pld - (uint8_t *)cmd); + + /* Submit FDMI RPA request */ + spin_lock_irqsave(&hw->lock, flags); + if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_done, + FCOE_CT, &fdmi_req->dma_buf, len)) { + CSIO_INC_STATS(ln, n_fdmi_err); + csio_ln_dbg(ln, "Failed to issue fdmi rpa req\n"); + } + spin_unlock_irqrestore(&hw->lock, flags); +} + +/* + * csio_ln_fdmi_dprt_cbfn - DPRT completion + * @hw: HW context + * @fdmi_req: fdmi request + */ +static void +csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) +{ + void *cmd; + uint8_t *pld; + uint32_t len = 0; + uint32_t numattrs = 0; + __be32 maxpayload = htonl(65536); + struct fc_fdmi_hba_identifier *hbaid; + struct csio_lnode *ln = fdmi_req->lnode; + struct fc_fdmi_rpl *reg_pl; + struct fs_fdmi_attrs *attrib_blk; + uint8_t buf[64]; + unsigned long flags; + + if (fdmi_req->wr_status != FW_SUCCESS) { + csio_ln_dbg(ln, "WR error:%x in processing fdmi dprt cmd\n", + fdmi_req->wr_status); + CSIO_INC_STATS(ln, n_fdmi_err); + } + + if (!csio_is_rnode_ready(fdmi_req->rnode)) { + CSIO_INC_STATS(ln, n_fdmi_err); + return; + } + cmd = fdmi_req->dma_buf.vaddr; + if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) { + csio_ln_dbg(ln, "fdmi dprt cmd rejected reason %x expl %x\n", + csio_ct_reason(cmd), csio_ct_expl(cmd)); + } + + /* Prepare CT hdr for RHBA cmd */ + memset(cmd, 0, FC_CT_HDR_LEN); + csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_RHBA); + len = FC_CT_HDR_LEN; + + /* Prepare RHBA payload */ + pld = (uint8_t *)csio_ct_get_pld(cmd); + hbaid = (struct fc_fdmi_hba_identifier *)pld; + memcpy(&hbaid->id, csio_ln_wwpn(ln), 8); /* HBA identifer */ + pld += sizeof(*hbaid); + + /* Register one port per hba */ + reg_pl = (struct fc_fdmi_rpl *)pld; + reg_pl->numport = htonl(1); + memcpy(®_pl->port[0].portname, csio_ln_wwpn(ln), 8); + pld += sizeof(*reg_pl); + + /* Start appending HBA attributes hba */ + attrib_blk = (struct fs_fdmi_attrs *)pld; + attrib_blk->numattrs = 0; + len += sizeof(attrib_blk->numattrs); + pld += sizeof(attrib_blk->numattrs); + + csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_NODENAME, csio_ln_wwnn(ln), + FC_FDMI_HBA_ATTR_NODENAME_LEN); + numattrs++; + + memset(buf, 0, sizeof(buf)); + + strcpy(buf, "Chelsio Communications"); + csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MANUFACTURER, buf, + strlen(buf)); + numattrs++; + csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_SERIALNUMBER, + hw->vpd.sn, sizeof(hw->vpd.sn)); + numattrs++; + csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODEL, hw->vpd.id, + sizeof(hw->vpd.id)); + numattrs++; + csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODELDESCRIPTION, + hw->model_desc, strlen(hw->model_desc)); + numattrs++; + csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_HARDWAREVERSION, + hw->hw_ver, sizeof(hw->hw_ver)); + numattrs++; + csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_FIRMWAREVERSION, + hw->fwrev_str, strlen(hw->fwrev_str)); + numattrs++; + + if (!csio_osname(buf, sizeof(buf))) { + csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_OSNAMEVERSION, + buf, strlen(buf)); + numattrs++; + } + + csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD, + &maxpayload, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN); + len = (uint32_t)(pld - (uint8_t *)cmd); + numattrs++; + attrib_blk->numattrs = htonl(numattrs); + + /* Submit FDMI RHBA request */ + spin_lock_irqsave(&hw->lock, flags); + if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_rhba_cbfn, + FCOE_CT, &fdmi_req->dma_buf, len)) { + CSIO_INC_STATS(ln, n_fdmi_err); + csio_ln_dbg(ln, "Failed to issue fdmi rhba req\n"); + } + spin_unlock_irqrestore(&hw->lock, flags); +} + +/* + * csio_ln_fdmi_dhba_cbfn - DHBA completion + * @hw: HW context + * @fdmi_req: fdmi request + */ +static void +csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) +{ + struct csio_lnode *ln = fdmi_req->lnode; + void *cmd; + struct fc_fdmi_port_name *port_name; + uint32_t len; + unsigned long flags; + + if (fdmi_req->wr_status != FW_SUCCESS) { + csio_ln_dbg(ln, "WR error:%x in processing fdmi dhba cmd\n", + fdmi_req->wr_status); + CSIO_INC_STATS(ln, n_fdmi_err); + } + + if (!csio_is_rnode_ready(fdmi_req->rnode)) { + CSIO_INC_STATS(ln, n_fdmi_err); + return; + } + cmd = fdmi_req->dma_buf.vaddr; + if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) { + csio_ln_dbg(ln, "fdmi dhba cmd rejected reason %x expl %x\n", + csio_ct_reason(cmd), csio_ct_expl(cmd)); + } + + /* Send FDMI cmd to de-register any Port attributes if registered + * before + */ + + /* Prepare FDMI DPRT cmd */ + memset(cmd, 0, FC_CT_HDR_LEN); + csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_DPRT); + len = FC_CT_HDR_LEN; + port_name = (struct fc_fdmi_port_name *)csio_ct_get_pld(cmd); + memcpy(&port_name->portname, csio_ln_wwpn(ln), 8); + len += sizeof(*port_name); + + /* Submit FDMI request */ + spin_lock_irqsave(&hw->lock, flags); + if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dprt_cbfn, + FCOE_CT, &fdmi_req->dma_buf, len)) { + CSIO_INC_STATS(ln, n_fdmi_err); + csio_ln_dbg(ln, "Failed to issue fdmi dprt req\n"); + } + spin_unlock_irqrestore(&hw->lock, flags); +} + +/** + * csio_ln_fdmi_start - Start an FDMI request. + * @ln: lnode + * @context: session context + * + * Issued with lock held. + */ +int +csio_ln_fdmi_start(struct csio_lnode *ln, void *context) +{ + struct csio_ioreq *fdmi_req; + struct csio_rnode *fdmi_rn = (struct csio_rnode *)context; + void *cmd; + struct fc_fdmi_hba_identifier *hbaid; + uint32_t len; + + if (!(ln->flags & CSIO_LNF_FDMI_ENABLE)) + return -EPROTONOSUPPORT; + + if (!csio_is_rnode_ready(fdmi_rn)) + CSIO_INC_STATS(ln, n_fdmi_err); + + /* Send FDMI cmd to de-register any HBA attributes if registered + * before + */ + + fdmi_req = ln->mgmt_req; + fdmi_req->lnode = ln; + fdmi_req->rnode = fdmi_rn; + + /* Prepare FDMI DHBA cmd */ + cmd = fdmi_req->dma_buf.vaddr; + memset(cmd, 0, FC_CT_HDR_LEN); + csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_DHBA); + len = FC_CT_HDR_LEN; + + hbaid = (struct fc_fdmi_hba_identifier *)csio_ct_get_pld(cmd); + memcpy(&hbaid->id, csio_ln_wwpn(ln), 8); + len += sizeof(*hbaid); + + /* Submit FDMI request */ + if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dhba_cbfn, + FCOE_CT, &fdmi_req->dma_buf, len)) { + CSIO_INC_STATS(ln, n_fdmi_err); + csio_ln_dbg(ln, "Failed to issue fdmi dhba req\n"); + } + + return 0; +} + +/* + * csio_ln_vnp_read_cbfn - vnp read completion handler. + * @hw: HW lnode + * @cbfn: Completion handler. + * + * Reads vnp response and updates ln parameters. + */ +static void +csio_ln_vnp_read_cbfn(struct csio_hw *hw, struct csio_mb *mbp) +{ + struct csio_lnode *ln = ((struct csio_lnode *)mbp->priv); + struct fw_fcoe_vnp_cmd *rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb); + struct fc_els_csp *csp; + struct fc_els_cssp *clsp; + enum fw_retval retval; + __be32 nport_id = 0; + + retval = FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16)); + if (retval != FW_SUCCESS) { + csio_err(hw, "FCOE VNP read cmd returned error:0x%x\n", retval); + mempool_free(mbp, hw->mb_mempool); + return; + } + + spin_lock_irq(&hw->lock); + + memcpy(ln->mac, rsp->vnport_mac, sizeof(ln->mac)); + memcpy(&nport_id, &rsp->vnport_mac[3], sizeof(uint8_t)*3); + ln->nport_id = ntohl(nport_id); + ln->nport_id = ln->nport_id >> 8; + + /* Update WWNs */ + /* + * This may look like a duplication of what csio_fcoe_enable_link() + * does, but is absolutely necessary if the vnpi changes between + * a FCOE LINK UP and FCOE LINK DOWN. + */ + memcpy(csio_ln_wwnn(ln), rsp->vnport_wwnn, 8); + memcpy(csio_ln_wwpn(ln), rsp->vnport_wwpn, 8); + + /* Copy common sparam */ + csp = (struct fc_els_csp *)rsp->cmn_srv_parms; + ln->ln_sparm.csp.sp_hi_ver = csp->sp_hi_ver; + ln->ln_sparm.csp.sp_lo_ver = csp->sp_lo_ver; + ln->ln_sparm.csp.sp_bb_cred = csp->sp_bb_cred; + ln->ln_sparm.csp.sp_features = csp->sp_features; + ln->ln_sparm.csp.sp_bb_data = csp->sp_bb_data; + ln->ln_sparm.csp.sp_r_a_tov = csp->sp_r_a_tov; + ln->ln_sparm.csp.sp_e_d_tov = csp->sp_e_d_tov; + + /* Copy word 0 & word 1 of class sparam */ + clsp = (struct fc_els_cssp *)rsp->clsp_word_0_1; + ln->ln_sparm.clsp[2].cp_class = clsp->cp_class; + ln->ln_sparm.clsp[2].cp_init = clsp->cp_init; + ln->ln_sparm.clsp[2].cp_recip = clsp->cp_recip; + ln->ln_sparm.clsp[2].cp_rdfs = clsp->cp_rdfs; + + spin_unlock_irq(&hw->lock); + + mempool_free(mbp, hw->mb_mempool); + + /* Send an event to update local attribs */ + csio_lnode_async_event(ln, CSIO_LN_FC_ATTRIB_UPDATE); +} + +/* + * csio_ln_vnp_read - Read vnp params. + * @ln: lnode + * @cbfn: Completion handler. + * + * Issued with lock held. + */ +static int +csio_ln_vnp_read(struct csio_lnode *ln, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct csio_hw *hw = ln->hwp; + struct csio_mb *mbp; + + /* Allocate Mbox request */ + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) { + CSIO_INC_STATS(hw, n_err_nomem); + return -ENOMEM; + } + + /* Prepare VNP Command */ + csio_fcoe_vnp_read_init_mb(ln, mbp, + CSIO_MB_DEFAULT_TMO, + ln->fcf_flowid, + ln->vnp_flowid, + cbfn); + + /* Issue MBOX cmd */ + if (csio_mb_issue(hw, mbp)) { + csio_err(hw, "Failed to issue mbox FCoE VNP command\n"); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + return 0; +} + +/* + * csio_fcoe_enable_link - Enable fcoe link. + * @ln: lnode + * @enable: enable/disable + * Issued with lock held. + * Issues mbox cmd to bring up FCOE link on port associated with given ln. + */ +static int +csio_fcoe_enable_link(struct csio_lnode *ln, bool enable) +{ + struct csio_hw *hw = ln->hwp; + struct csio_mb *mbp; + enum fw_retval retval; + uint8_t portid; + uint8_t sub_op; + struct fw_fcoe_link_cmd *lcmd; + int i; + + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) { + CSIO_INC_STATS(hw, n_err_nomem); + return -ENOMEM; + } + + portid = ln->portid; + sub_op = enable ? FCOE_LINK_UP : FCOE_LINK_DOWN; + + csio_dbg(hw, "bringing FCOE LINK %s on Port:%d\n", + sub_op ? "UP" : "DOWN", portid); + + csio_write_fcoe_link_cond_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO, + portid, sub_op, 0, 0, 0, NULL); + + if (csio_mb_issue(hw, mbp)) { + csio_err(hw, "failed to issue FCOE LINK cmd on port[%d]\n", + portid); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + retval = csio_mb_fw_retval(mbp); + if (retval != FW_SUCCESS) { + csio_err(hw, + "FCOE LINK %s cmd on port[%d] failed with " + "ret:x%x\n", sub_op ? "UP" : "DOWN", portid, retval); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + if (!enable) + goto out; + + lcmd = (struct fw_fcoe_link_cmd *)mbp->mb; + + memcpy(csio_ln_wwnn(ln), lcmd->vnport_wwnn, 8); + memcpy(csio_ln_wwpn(ln), lcmd->vnport_wwpn, 8); + + for (i = 0; i < CSIO_MAX_PPORTS; i++) + if (hw->pport[i].portid == portid) + memcpy(hw->pport[i].mac, lcmd->phy_mac, 6); + +out: + mempool_free(mbp, hw->mb_mempool); + return 0; +} + +/* + * csio_ln_read_fcf_cbfn - Read fcf parameters + * @ln: lnode + * + * read fcf response and Update ln fcf information. + */ +static void +csio_ln_read_fcf_cbfn(struct csio_hw *hw, struct csio_mb *mbp) +{ + struct csio_lnode *ln = (struct csio_lnode *)mbp->priv; + struct csio_fcf_info *fcf_info; + struct fw_fcoe_fcf_cmd *rsp = + (struct fw_fcoe_fcf_cmd *)(mbp->mb); + enum fw_retval retval; + + retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16)); + if (retval != FW_SUCCESS) { + csio_ln_err(ln, "FCOE FCF cmd failed with ret x%x\n", + retval); + mempool_free(mbp, hw->mb_mempool); + return; + } + + spin_lock_irq(&hw->lock); + fcf_info = ln->fcfinfo; + fcf_info->priority = FW_FCOE_FCF_CMD_PRIORITY_GET( + ntohs(rsp->priority_pkd)); + fcf_info->vf_id = ntohs(rsp->vf_id); + fcf_info->vlan_id = rsp->vlan_id; + fcf_info->max_fcoe_size = ntohs(rsp->max_fcoe_size); + fcf_info->fka_adv = be32_to_cpu(rsp->fka_adv); + fcf_info->fcfi = FW_FCOE_FCF_CMD_FCFI_GET(ntohl(rsp->op_to_fcfi)); + fcf_info->fpma = FW_FCOE_FCF_CMD_FPMA_GET(rsp->fpma_to_portid); + fcf_info->spma = FW_FCOE_FCF_CMD_SPMA_GET(rsp->fpma_to_portid); + fcf_info->login = FW_FCOE_FCF_CMD_LOGIN_GET(rsp->fpma_to_portid); + fcf_info->portid = FW_FCOE_FCF_CMD_PORTID_GET(rsp->fpma_to_portid); + memcpy(fcf_info->fc_map, rsp->fc_map, sizeof(fcf_info->fc_map)); + memcpy(fcf_info->mac, rsp->mac, sizeof(fcf_info->mac)); + memcpy(fcf_info->name_id, rsp->name_id, sizeof(fcf_info->name_id)); + memcpy(fcf_info->fabric, rsp->fabric, sizeof(fcf_info->fabric)); + memcpy(fcf_info->spma_mac, rsp->spma_mac, sizeof(fcf_info->spma_mac)); + + spin_unlock_irq(&hw->lock); + + mempool_free(mbp, hw->mb_mempool); +} + +/* + * csio_ln_read_fcf_entry - Read fcf entry. + * @ln: lnode + * @cbfn: Completion handler. + * + * Issued with lock held. + */ +static int +csio_ln_read_fcf_entry(struct csio_lnode *ln, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct csio_hw *hw = ln->hwp; + struct csio_mb *mbp; + + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) { + CSIO_INC_STATS(hw, n_err_nomem); + return -ENOMEM; + } + + /* Get FCoE FCF information */ + csio_fcoe_read_fcf_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO, + ln->portid, ln->fcf_flowid, cbfn); + + if (csio_mb_issue(hw, mbp)) { + csio_err(hw, "failed to issue FCOE FCF cmd\n"); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + return 0; +} + +/* + * csio_handle_link_up - Logical Linkup event. + * @hw - HW module. + * @portid - Physical port number + * @fcfi - FCF index. + * @vnpi - VNP index. + * Returns - none. + * + * This event is received from FW, when virtual link is established between + * Physical port[ENode] and FCF. If its new vnpi, then local node object is + * created on this FCF and set to [ONLINE] state. + * Lnode waits for FW_RDEV_CMD event to be received indicating that + * Fabric login is completed and lnode moves to [READY] state. + * + * This called with hw lock held + */ +static void +csio_handle_link_up(struct csio_hw *hw, uint8_t portid, uint32_t fcfi, + uint32_t vnpi) +{ + struct csio_lnode *ln = NULL; + + /* Lookup lnode based on vnpi */ + ln = csio_ln_lookup_by_vnpi(hw, vnpi); + if (!ln) { + /* Pick lnode based on portid */ + ln = csio_ln_lookup_by_portid(hw, portid); + if (!ln) { + csio_err(hw, "failed to lookup fcoe lnode on port:%d\n", + portid); + CSIO_DB_ASSERT(0); + return; + } + + /* Check if lnode has valid vnp flowid */ + if (ln->vnp_flowid != CSIO_INVALID_IDX) { + /* New VN-Port */ + spin_unlock_irq(&hw->lock); + csio_lnode_alloc(hw); + spin_lock_irq(&hw->lock); + if (!ln) { + csio_err(hw, + "failed to allocate fcoe lnode" + "for port:%d vnpi:x%x\n", + portid, vnpi); + CSIO_DB_ASSERT(0); + return; + } + ln->portid = portid; + } + ln->vnp_flowid = vnpi; + ln->dev_num &= ~0xFFFF; + ln->dev_num |= vnpi; + } + + /*Initialize fcfi */ + ln->fcf_flowid = fcfi; + + csio_info(hw, "Port:%d - FCOE LINK UP\n", portid); + + CSIO_INC_STATS(ln, n_link_up); + + /* Send LINKUP event to SM */ + csio_post_event(&ln->sm, CSIO_LNE_LINKUP); +} + +/* + * csio_post_event_rns + * @ln - FCOE lnode + * @evt - Given rnode event + * Returns - none + * + * Posts given rnode event to all FCOE rnodes connected with given Lnode. + * This routine is invoked when lnode receives LINK_DOWN/DOWN_LINK/CLOSE + * event. + * + * This called with hw lock held + */ +static void +csio_post_event_rns(struct csio_lnode *ln, enum csio_rn_ev evt) +{ + struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead; + struct list_head *tmp, *next; + struct csio_rnode *rn; + + list_for_each_safe(tmp, next, &rnhead->sm.sm_list) { + rn = (struct csio_rnode *) tmp; + csio_post_event(&rn->sm, evt); + } +} + +/* + * csio_cleanup_rns + * @ln - FCOE lnode + * Returns - none + * + * Frees all FCOE rnodes connected with given Lnode. + * + * This called with hw lock held + */ +static void +csio_cleanup_rns(struct csio_lnode *ln) +{ + struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead; + struct list_head *tmp, *next_rn; + struct csio_rnode *rn; + + list_for_each_safe(tmp, next_rn, &rnhead->sm.sm_list) { + rn = (struct csio_rnode *) tmp; + csio_put_rnode(ln, rn); + } + +} + +/* + * csio_post_event_lns + * @ln - FCOE lnode + * @evt - Given lnode event + * Returns - none + * + * Posts given lnode event to all FCOE lnodes connected with given Lnode. + * This routine is invoked when lnode receives LINK_DOWN/DOWN_LINK/CLOSE + * event. + * + * This called with hw lock held + */ +static void +csio_post_event_lns(struct csio_lnode *ln, enum csio_ln_ev evt) +{ + struct list_head *tmp; + struct csio_lnode *cln, *sln; + + /* If NPIV lnode, send evt only to that and return */ + if (csio_is_npiv_ln(ln)) { + csio_post_event(&ln->sm, evt); + return; + } + + sln = ln; + /* Traverse children lnodes list and send evt */ + list_for_each(tmp, &sln->cln_head) { + cln = (struct csio_lnode *) tmp; + csio_post_event(&cln->sm, evt); + } + + /* Send evt to parent lnode */ + csio_post_event(&ln->sm, evt); +} + +/* + * csio_ln_down - Lcoal nport is down + * @ln - FCOE Lnode + * Returns - none + * + * Sends LINK_DOWN events to Lnode and its associated NPIVs lnodes. + * + * This called with hw lock held + */ +static void +csio_ln_down(struct csio_lnode *ln) +{ + csio_post_event_lns(ln, CSIO_LNE_LINK_DOWN); +} + +/* + * csio_handle_link_down - Logical Linkdown event. + * @hw - HW module. + * @portid - Physical port number + * @fcfi - FCF index. + * @vnpi - VNP index. + * Returns - none + * + * This event is received from FW, when virtual link goes down between + * Physical port[ENode] and FCF. Lnode and its associated NPIVs lnode hosted on + * this vnpi[VN-Port] will be de-instantiated. + * + * This called with hw lock held + */ +static void +csio_handle_link_down(struct csio_hw *hw, uint8_t portid, uint32_t fcfi, + uint32_t vnpi) +{ + struct csio_fcf_info *fp; + struct csio_lnode *ln; + + /* Lookup lnode based on vnpi */ + ln = csio_ln_lookup_by_vnpi(hw, vnpi); + if (ln) { + fp = ln->fcfinfo; + CSIO_INC_STATS(ln, n_link_down); + + /*Warn if linkdown received if lnode is not in ready state */ + if (!csio_is_lnode_ready(ln)) { + csio_ln_warn(ln, + "warn: FCOE link is already in offline " + "Ignoring Fcoe linkdown event on portid %d\n", + portid); + CSIO_INC_STATS(ln, n_evt_drop); + return; + } + + /* Verify portid */ + if (fp->portid != portid) { + csio_ln_warn(ln, + "warn: FCOE linkdown recv with " + "invalid port %d\n", portid); + CSIO_INC_STATS(ln, n_evt_drop); + return; + } + + /* verify fcfi */ + if (ln->fcf_flowid != fcfi) { + csio_ln_warn(ln, + "warn: FCOE linkdown recv with " + "invalid fcfi x%x\n", fcfi); + CSIO_INC_STATS(ln, n_evt_drop); + return; + } + + csio_info(hw, "Port:%d - FCOE LINK DOWN\n", portid); + + /* Send LINK_DOWN event to lnode s/m */ + csio_ln_down(ln); + + return; + } else { + csio_warn(hw, + "warn: FCOE linkdown recv with invalid vnpi x%x\n", + vnpi); + CSIO_INC_STATS(hw, n_evt_drop); + } +} + +/* + * csio_is_lnode_ready - Checks FCOE lnode is in ready state. + * @ln: Lnode module + * + * Returns True if FCOE lnode is in ready state. + */ +int +csio_is_lnode_ready(struct csio_lnode *ln) +{ + return (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready)); +} + +/*****************************************************************************/ +/* START: Lnode SM */ +/*****************************************************************************/ +/* + * csio_lns_uninit - The request in uninit state. + * @ln - FCOE lnode. + * @evt - Event to be processed. + * + * Process the given lnode event which is currently in "uninit" state. + * Invoked with HW lock held. + * Return - none. + */ +static void +csio_lns_uninit(struct csio_lnode *ln, enum csio_ln_ev evt) +{ + struct csio_hw *hw = csio_lnode_to_hw(ln); + struct csio_lnode *rln = hw->rln; + int rv; + + CSIO_INC_STATS(ln, n_evt_sm[evt]); + switch (evt) { + case CSIO_LNE_LINKUP: + csio_set_state(&ln->sm, csio_lns_online); + /* Read FCF only for physical lnode */ + if (csio_is_phys_ln(ln)) { + rv = csio_ln_read_fcf_entry(ln, + csio_ln_read_fcf_cbfn); + if (rv != 0) { + /* TODO: Send HW RESET event */ + CSIO_INC_STATS(ln, n_err); + break; + } + + /* Add FCF record */ + list_add_tail(&ln->fcfinfo->list, &rln->fcf_lsthead); + } + + rv = csio_ln_vnp_read(ln, csio_ln_vnp_read_cbfn); + if (rv != 0) { + /* TODO: Send HW RESET event */ + CSIO_INC_STATS(ln, n_err); + } + break; + + case CSIO_LNE_DOWN_LINK: + break; + + default: + csio_ln_dbg(ln, + "unexp ln event %d recv from did:x%x in " + "ln state[uninit].\n", evt, ln->nport_id); + CSIO_INC_STATS(ln, n_evt_unexp); + break; + } /* switch event */ +} + +/* + * csio_lns_online - The request in online state. + * @ln - FCOE lnode. + * @evt - Event to be processed. + * + * Process the given lnode event which is currently in "online" state. + * Invoked with HW lock held. + * Return - none. + */ +static void +csio_lns_online(struct csio_lnode *ln, enum csio_ln_ev evt) +{ + struct csio_hw *hw = csio_lnode_to_hw(ln); + + CSIO_INC_STATS(ln, n_evt_sm[evt]); + switch (evt) { + case CSIO_LNE_LINKUP: + csio_ln_warn(ln, + "warn: FCOE link is up already " + "Ignoring linkup on port:%d\n", ln->portid); + CSIO_INC_STATS(ln, n_evt_drop); + break; + + case CSIO_LNE_FAB_INIT_DONE: + csio_set_state(&ln->sm, csio_lns_ready); + + spin_unlock_irq(&hw->lock); + csio_lnode_async_event(ln, CSIO_LN_FC_LINKUP); + spin_lock_irq(&hw->lock); + + break; + + case CSIO_LNE_LINK_DOWN: + case CSIO_LNE_DOWN_LINK: + csio_set_state(&ln->sm, csio_lns_uninit); + if (csio_is_phys_ln(ln)) { + /* Remove FCF entry */ + list_del_init(&ln->fcfinfo->list); + } + break; + + default: + csio_ln_dbg(ln, + "unexp ln event %d recv from did:x%x in " + "ln state[uninit].\n", evt, ln->nport_id); + CSIO_INC_STATS(ln, n_evt_unexp); + + break; + } /* switch event */ +} + +/* + * csio_lns_ready - The request in ready state. + * @ln - FCOE lnode. + * @evt - Event to be processed. + * + * Process the given lnode event which is currently in "ready" state. + * Invoked with HW lock held. + * Return - none. + */ +static void +csio_lns_ready(struct csio_lnode *ln, enum csio_ln_ev evt) +{ + struct csio_hw *hw = csio_lnode_to_hw(ln); + + CSIO_INC_STATS(ln, n_evt_sm[evt]); + switch (evt) { + case CSIO_LNE_FAB_INIT_DONE: + csio_ln_dbg(ln, + "ignoring event %d recv from did x%x" + "in ln state[ready].\n", evt, ln->nport_id); + CSIO_INC_STATS(ln, n_evt_drop); + break; + + case CSIO_LNE_LINK_DOWN: + csio_set_state(&ln->sm, csio_lns_offline); + csio_post_event_rns(ln, CSIO_RNFE_DOWN); + + spin_unlock_irq(&hw->lock); + csio_lnode_async_event(ln, CSIO_LN_FC_LINKDOWN); + spin_lock_irq(&hw->lock); + + if (csio_is_phys_ln(ln)) { + /* Remove FCF entry */ + list_del_init(&ln->fcfinfo->list); + } + break; + + case CSIO_LNE_DOWN_LINK: + csio_set_state(&ln->sm, csio_lns_offline); + csio_post_event_rns(ln, CSIO_RNFE_DOWN); + + /* Host need to issue aborts in case if FW has not returned + * WRs with status "ABORTED" + */ + spin_unlock_irq(&hw->lock); + csio_lnode_async_event(ln, CSIO_LN_FC_LINKDOWN); + spin_lock_irq(&hw->lock); + + if (csio_is_phys_ln(ln)) { + /* Remove FCF entry */ + list_del_init(&ln->fcfinfo->list); + } + break; + + case CSIO_LNE_CLOSE: + csio_set_state(&ln->sm, csio_lns_uninit); + csio_post_event_rns(ln, CSIO_RNFE_CLOSE); + break; + + case CSIO_LNE_LOGO: + csio_set_state(&ln->sm, csio_lns_offline); + csio_post_event_rns(ln, CSIO_RNFE_DOWN); + break; + + default: + csio_ln_dbg(ln, + "unexp ln event %d recv from did:x%x in " + "ln state[uninit].\n", evt, ln->nport_id); + CSIO_INC_STATS(ln, n_evt_unexp); + CSIO_DB_ASSERT(0); + break; + } /* switch event */ +} + +/* + * csio_lns_offline - The request in offline state. + * @ln - FCOE lnode. + * @evt - Event to be processed. + * + * Process the given lnode event which is currently in "offline" state. + * Invoked with HW lock held. + * Return - none. + */ +static void +csio_lns_offline(struct csio_lnode *ln, enum csio_ln_ev evt) +{ + struct csio_hw *hw = csio_lnode_to_hw(ln); + struct csio_lnode *rln = hw->rln; + int rv; + + CSIO_INC_STATS(ln, n_evt_sm[evt]); + switch (evt) { + case CSIO_LNE_LINKUP: + csio_set_state(&ln->sm, csio_lns_online); + /* Read FCF only for physical lnode */ + if (csio_is_phys_ln(ln)) { + rv = csio_ln_read_fcf_entry(ln, + csio_ln_read_fcf_cbfn); + if (rv != 0) { + /* TODO: Send HW RESET event */ + CSIO_INC_STATS(ln, n_err); + break; + } + + /* Add FCF record */ + list_add_tail(&ln->fcfinfo->list, &rln->fcf_lsthead); + } + + rv = csio_ln_vnp_read(ln, csio_ln_vnp_read_cbfn); + if (rv != 0) { + /* TODO: Send HW RESET event */ + CSIO_INC_STATS(ln, n_err); + } + break; + + case CSIO_LNE_LINK_DOWN: + case CSIO_LNE_DOWN_LINK: + case CSIO_LNE_LOGO: + csio_ln_dbg(ln, + "ignoring event %d recv from did x%x" + "in ln state[offline].\n", evt, ln->nport_id); + CSIO_INC_STATS(ln, n_evt_drop); + break; + + case CSIO_LNE_CLOSE: + csio_set_state(&ln->sm, csio_lns_uninit); + csio_post_event_rns(ln, CSIO_RNFE_CLOSE); + break; + + default: + csio_ln_dbg(ln, + "unexp ln event %d recv from did:x%x in " + "ln state[offline]\n", evt, ln->nport_id); + CSIO_INC_STATS(ln, n_evt_unexp); + CSIO_DB_ASSERT(0); + break; + } /* switch event */ +} + +/*****************************************************************************/ +/* END: Lnode SM */ +/*****************************************************************************/ + +static void +csio_free_fcfinfo(struct kref *kref) +{ + struct csio_fcf_info *fcfinfo = container_of(kref, + struct csio_fcf_info, kref); + kfree(fcfinfo); +} + +/* Helper routines for attributes */ +/* + * csio_lnode_state_to_str - Get current state of FCOE lnode. + * @ln - lnode + * @str - state of lnode. + * + */ +void +csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str) +{ + if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_uninit)) { + strcpy(str, "UNINIT"); + return; + } + if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready)) { + strcpy(str, "READY"); + return; + } + if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_offline)) { + strcpy(str, "OFFLINE"); + return; + } + strcpy(str, "UNKNOWN"); +} /* csio_lnode_state_to_str */ + + +int +csio_get_phy_port_stats(struct csio_hw *hw, uint8_t portid, + struct fw_fcoe_port_stats *port_stats) +{ + struct csio_mb *mbp; + struct fw_fcoe_port_cmd_params portparams; + enum fw_retval retval; + int idx; + + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) { + csio_err(hw, "FCoE FCF PARAMS command out of memory!\n"); + return -EINVAL; + } + portparams.portid = portid; + + for (idx = 1; idx <= 3; idx++) { + portparams.idx = (idx-1)*6 + 1; + portparams.nstats = 6; + if (idx == 3) + portparams.nstats = 4; + csio_fcoe_read_portparams_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO, + &portparams, NULL); + if (csio_mb_issue(hw, mbp)) { + csio_err(hw, "Issue of FCoE port params failed!\n"); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + csio_mb_process_portparams_rsp(hw, mbp, &retval, + &portparams, port_stats); + } + + mempool_free(mbp, hw->mb_mempool); + return 0; +} + +/* + * csio_ln_mgmt_wr_handler -Mgmt Work Request handler. + * @wr - WR. + * @len - WR len. + * This handler is invoked when an outstanding mgmt WR is completed. + * Its invoked in the context of FW event worker thread for every + * mgmt event received. + * Return - none. + */ + +static void +csio_ln_mgmt_wr_handler(struct csio_hw *hw, void *wr, uint32_t len) +{ + struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw); + struct csio_ioreq *io_req = NULL; + struct fw_fcoe_els_ct_wr *wr_cmd; + + + wr_cmd = (struct fw_fcoe_els_ct_wr *) wr; + + if (len < sizeof(struct fw_fcoe_els_ct_wr)) { + csio_err(mgmtm->hw, + "Invalid ELS CT WR length recvd, len:%x\n", len); + mgmtm->stats.n_err++; + return; + } + + io_req = (struct csio_ioreq *) ((uintptr_t) wr_cmd->cookie); + io_req->wr_status = csio_wr_status(wr_cmd); + + /* lookup ioreq exists in our active Q */ + spin_lock_irq(&hw->lock); + if (csio_mgmt_req_lookup(mgmtm, io_req) != 0) { + csio_err(mgmtm->hw, + "Error- Invalid IO handle recv in WR. handle: %p\n", + io_req); + mgmtm->stats.n_err++; + spin_unlock_irq(&hw->lock); + return; + } + + mgmtm = csio_hw_to_mgmtm(hw); + + /* Dequeue from active queue */ + list_del_init(&io_req->sm.sm_list); + mgmtm->stats.n_active--; + spin_unlock_irq(&hw->lock); + + /* io_req will be freed by completion handler */ + if (io_req->io_cbfn) + io_req->io_cbfn(hw, io_req); +} + +/** + * csio_fcoe_fwevt_handler - Event handler for Firmware FCoE events. + * @hw: HW module + * @cpl_op: CPL opcode + * @cmd: FW cmd/WR. + * + * Process received FCoE cmd/WR event from FW. + */ +void +csio_fcoe_fwevt_handler(struct csio_hw *hw, __u8 cpl_op, __be64 *cmd) +{ + struct csio_lnode *ln; + struct csio_rnode *rn; + uint8_t portid, opcode = *(uint8_t *)cmd; + struct fw_fcoe_link_cmd *lcmd; + struct fw_wr_hdr *wr; + struct fw_rdev_wr *rdev_wr; + enum fw_fcoe_link_status lstatus; + uint32_t fcfi, rdev_flowid, vnpi; + enum csio_ln_ev evt; + + if (cpl_op == CPL_FW6_MSG && opcode == FW_FCOE_LINK_CMD) { + + lcmd = (struct fw_fcoe_link_cmd *)cmd; + lstatus = lcmd->lstatus; + portid = FW_FCOE_LINK_CMD_PORTID_GET( + ntohl(lcmd->op_to_portid)); + fcfi = FW_FCOE_LINK_CMD_FCFI_GET(ntohl(lcmd->sub_opcode_fcfi)); + vnpi = FW_FCOE_LINK_CMD_VNPI_GET(ntohl(lcmd->vnpi_pkd)); + + if (lstatus == FCOE_LINKUP) { + + /* HW lock here */ + spin_lock_irq(&hw->lock); + csio_handle_link_up(hw, portid, fcfi, vnpi); + spin_unlock_irq(&hw->lock); + /* HW un lock here */ + + } else if (lstatus == FCOE_LINKDOWN) { + + /* HW lock here */ + spin_lock_irq(&hw->lock); + csio_handle_link_down(hw, portid, fcfi, vnpi); + spin_unlock_irq(&hw->lock); + /* HW un lock here */ + } else { + csio_warn(hw, "Unexpected FCOE LINK status:0x%x\n", + lcmd->lstatus); + CSIO_INC_STATS(hw, n_cpl_unexp); + } + } else if (cpl_op == CPL_FW6_PLD) { + wr = (struct fw_wr_hdr *) (cmd + 4); + if (FW_WR_OP_G(be32_to_cpu(wr->hi)) + == FW_RDEV_WR) { + + rdev_wr = (struct fw_rdev_wr *) (cmd + 4); + + rdev_flowid = FW_RDEV_WR_FLOWID_GET( + ntohl(rdev_wr->alloc_to_len16)); + vnpi = FW_RDEV_WR_ASSOC_FLOWID_GET( + ntohl(rdev_wr->flags_to_assoc_flowid)); + + csio_dbg(hw, + "FW_RDEV_WR: flowid:x%x ev_cause:x%x " + "vnpi:0x%x\n", rdev_flowid, + rdev_wr->event_cause, vnpi); + + if (rdev_wr->protocol != PROT_FCOE) { + csio_err(hw, + "FW_RDEV_WR: invalid proto:x%x " + "received with flowid:x%x\n", + rdev_wr->protocol, + rdev_flowid); + CSIO_INC_STATS(hw, n_evt_drop); + return; + } + + /* HW lock here */ + spin_lock_irq(&hw->lock); + ln = csio_ln_lookup_by_vnpi(hw, vnpi); + if (!ln) { + csio_err(hw, + "FW_DEV_WR: invalid vnpi:x%x received " + "with flowid:x%x\n", vnpi, rdev_flowid); + CSIO_INC_STATS(hw, n_evt_drop); + goto out_pld; + } + + rn = csio_confirm_rnode(ln, rdev_flowid, + &rdev_wr->u.fcoe_rdev); + if (!rn) { + csio_ln_dbg(ln, + "Failed to confirm rnode " + "for flowid:x%x\n", rdev_flowid); + CSIO_INC_STATS(hw, n_evt_drop); + goto out_pld; + } + + /* save previous event for debugging */ + ln->prev_evt = ln->cur_evt; + ln->cur_evt = rdev_wr->event_cause; + CSIO_INC_STATS(ln, n_evt_fw[rdev_wr->event_cause]); + + /* Translate all the fabric events to lnode SM events */ + evt = CSIO_FWE_TO_LNE(rdev_wr->event_cause); + if (evt) { + csio_ln_dbg(ln, + "Posting event to lnode event:%d " + "cause:%d flowid:x%x\n", evt, + rdev_wr->event_cause, rdev_flowid); + csio_post_event(&ln->sm, evt); + } + + /* Handover event to rn SM here. */ + csio_rnode_fwevt_handler(rn, rdev_wr->event_cause); +out_pld: + spin_unlock_irq(&hw->lock); + return; + } else { + csio_warn(hw, "unexpected WR op(0x%x) recv\n", + FW_WR_OP_G(be32_to_cpu((wr->hi)))); + CSIO_INC_STATS(hw, n_cpl_unexp); + } + } else if (cpl_op == CPL_FW6_MSG) { + wr = (struct fw_wr_hdr *) (cmd); + if (FW_WR_OP_G(be32_to_cpu(wr->hi)) == FW_FCOE_ELS_CT_WR) { + csio_ln_mgmt_wr_handler(hw, wr, + sizeof(struct fw_fcoe_els_ct_wr)); + } else { + csio_warn(hw, "unexpected WR op(0x%x) recv\n", + FW_WR_OP_G(be32_to_cpu((wr->hi)))); + CSIO_INC_STATS(hw, n_cpl_unexp); + } + } else { + csio_warn(hw, "unexpected CPL op(0x%x) recv\n", opcode); + CSIO_INC_STATS(hw, n_cpl_unexp); + } +} + +/** + * csio_lnode_start - Kickstart lnode discovery. + * @ln: lnode + * + * This routine kickstarts the discovery by issuing an FCOE_LINK (up) command. + */ +int +csio_lnode_start(struct csio_lnode *ln) +{ + int rv = 0; + if (csio_is_phys_ln(ln) && !(ln->flags & CSIO_LNF_LINK_ENABLE)) { + rv = csio_fcoe_enable_link(ln, 1); + ln->flags |= CSIO_LNF_LINK_ENABLE; + } + + return rv; +} + +/** + * csio_lnode_stop - Stop the lnode. + * @ln: lnode + * + * This routine is invoked by HW module to stop lnode and its associated NPIV + * lnodes. + */ +void +csio_lnode_stop(struct csio_lnode *ln) +{ + csio_post_event_lns(ln, CSIO_LNE_DOWN_LINK); + if (csio_is_phys_ln(ln) && (ln->flags & CSIO_LNF_LINK_ENABLE)) { + csio_fcoe_enable_link(ln, 0); + ln->flags &= ~CSIO_LNF_LINK_ENABLE; + } + csio_ln_dbg(ln, "stopping ln :%p\n", ln); +} + +/** + * csio_lnode_close - Close an lnode. + * @ln: lnode + * + * This routine is invoked by HW module to close an lnode and its + * associated NPIV lnodes. Lnode and its associated NPIV lnodes are + * set to uninitialized state. + */ +void +csio_lnode_close(struct csio_lnode *ln) +{ + csio_post_event_lns(ln, CSIO_LNE_CLOSE); + if (csio_is_phys_ln(ln)) + ln->vnp_flowid = CSIO_INVALID_IDX; + + csio_ln_dbg(ln, "closed ln :%p\n", ln); +} + +/* + * csio_ln_prep_ecwr - Prepare ELS/CT WR. + * @io_req - IO request. + * @wr_len - WR len + * @immd_len - WR immediate data + * @sub_op - Sub opcode + * @sid - source portid. + * @did - destination portid + * @flow_id - flowid + * @fw_wr - ELS/CT WR to be prepared. + * Returns: 0 - on success + */ +static int +csio_ln_prep_ecwr(struct csio_ioreq *io_req, uint32_t wr_len, + uint32_t immd_len, uint8_t sub_op, uint32_t sid, + uint32_t did, uint32_t flow_id, uint8_t *fw_wr) +{ + struct fw_fcoe_els_ct_wr *wr; + __be32 port_id; + + wr = (struct fw_fcoe_els_ct_wr *)fw_wr; + wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_FCOE_ELS_CT_WR) | + FW_FCOE_ELS_CT_WR_IMMDLEN(immd_len)); + + wr_len = DIV_ROUND_UP(wr_len, 16); + wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(flow_id) | + FW_WR_LEN16_V(wr_len)); + wr->els_ct_type = sub_op; + wr->ctl_pri = 0; + wr->cp_en_class = 0; + wr->cookie = io_req->fw_handle; + wr->iqid = cpu_to_be16(csio_q_physiqid( + io_req->lnode->hwp, io_req->iq_idx)); + wr->fl_to_sp = FW_FCOE_ELS_CT_WR_SP(1); + wr->tmo_val = (uint8_t) io_req->tmo; + port_id = htonl(sid); + memcpy(wr->l_id, PORT_ID_PTR(port_id), 3); + port_id = htonl(did); + memcpy(wr->r_id, PORT_ID_PTR(port_id), 3); + + /* Prepare RSP SGL */ + wr->rsp_dmalen = cpu_to_be32(io_req->dma_buf.len); + wr->rsp_dmaaddr = cpu_to_be64(io_req->dma_buf.paddr); + return 0; +} + +/* + * csio_ln_mgmt_submit_wr - Post elsct work request. + * @mgmtm - mgmtm + * @io_req - io request. + * @sub_op - ELS or CT request type + * @pld - Dma Payload buffer + * @pld_len - Payload len + * Prepares ELSCT Work request and sents it to FW. + * Returns: 0 - on success + */ +static int +csio_ln_mgmt_submit_wr(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req, + uint8_t sub_op, struct csio_dma_buf *pld, + uint32_t pld_len) +{ + struct csio_wr_pair wrp; + struct csio_lnode *ln = io_req->lnode; + struct csio_rnode *rn = io_req->rnode; + struct csio_hw *hw = mgmtm->hw; + uint8_t fw_wr[64]; + struct ulptx_sgl dsgl; + uint32_t wr_size = 0; + uint8_t im_len = 0; + uint32_t wr_off = 0; + + int ret = 0; + + /* Calculate WR Size for this ELS REQ */ + wr_size = sizeof(struct fw_fcoe_els_ct_wr); + + /* Send as immediate data if pld < 256 */ + if (pld_len < 256) { + wr_size += ALIGN(pld_len, 8); + im_len = (uint8_t)pld_len; + } else + wr_size += sizeof(struct ulptx_sgl); + + /* Roundup WR size in units of 16 bytes */ + wr_size = ALIGN(wr_size, 16); + + /* Get WR to send ELS REQ */ + ret = csio_wr_get(hw, mgmtm->eq_idx, wr_size, &wrp); + if (ret != 0) { + csio_err(hw, "Failed to get WR for ec_req %p ret:%d\n", + io_req, ret); + return ret; + } + + /* Prepare Generic WR used by all ELS/CT cmd */ + csio_ln_prep_ecwr(io_req, wr_size, im_len, sub_op, + ln->nport_id, rn->nport_id, + csio_rn_flowid(rn), + &fw_wr[0]); + + /* Copy ELS/CT WR CMD */ + csio_wr_copy_to_wrp(&fw_wr[0], &wrp, wr_off, + sizeof(struct fw_fcoe_els_ct_wr)); + wr_off += sizeof(struct fw_fcoe_els_ct_wr); + + /* Copy payload to Immediate section of WR */ + if (im_len) + csio_wr_copy_to_wrp(pld->vaddr, &wrp, wr_off, im_len); + else { + /* Program DSGL to dma payload */ + dsgl.cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | + ULPTX_MORE_F | ULPTX_NSGE_V(1)); + dsgl.len0 = cpu_to_be32(pld_len); + dsgl.addr0 = cpu_to_be64(pld->paddr); + csio_wr_copy_to_wrp(&dsgl, &wrp, ALIGN(wr_off, 8), + sizeof(struct ulptx_sgl)); + } + + /* Issue work request to xmit ELS/CT req to FW */ + csio_wr_issue(mgmtm->hw, mgmtm->eq_idx, false); + return ret; +} + +/* + * csio_ln_mgmt_submit_req - Submit FCOE Mgmt request. + * @io_req - IO Request + * @io_cbfn - Completion handler. + * @req_type - ELS or CT request type + * @pld - Dma Payload buffer + * @pld_len - Payload len + * + * + * This API used submit managment ELS/CT request. + * This called with hw lock held + * Returns: 0 - on success + * -ENOMEM - on error. + */ +static int +csio_ln_mgmt_submit_req(struct csio_ioreq *io_req, + void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *), + enum fcoe_cmn_type req_type, struct csio_dma_buf *pld, + uint32_t pld_len) +{ + struct csio_hw *hw = csio_lnode_to_hw(io_req->lnode); + struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw); + int rv; + + BUG_ON(pld_len > pld->len); + + io_req->io_cbfn = io_cbfn; /* Upper layer callback handler */ + io_req->fw_handle = (uintptr_t) (io_req); + io_req->eq_idx = mgmtm->eq_idx; + io_req->iq_idx = mgmtm->iq_idx; + + rv = csio_ln_mgmt_submit_wr(mgmtm, io_req, req_type, pld, pld_len); + if (rv == 0) { + list_add_tail(&io_req->sm.sm_list, &mgmtm->active_q); + mgmtm->stats.n_active++; + } + return rv; +} + +/* + * csio_ln_fdmi_init - FDMI Init entry point. + * @ln: lnode + */ +static int +csio_ln_fdmi_init(struct csio_lnode *ln) +{ + struct csio_hw *hw = csio_lnode_to_hw(ln); + struct csio_dma_buf *dma_buf; + + /* Allocate MGMT request required for FDMI */ + ln->mgmt_req = kzalloc(sizeof(struct csio_ioreq), GFP_KERNEL); + if (!ln->mgmt_req) { + csio_ln_err(ln, "Failed to alloc ioreq for FDMI\n"); + CSIO_INC_STATS(hw, n_err_nomem); + return -ENOMEM; + } + + /* Allocate Dma buffers for FDMI response Payload */ + dma_buf = &ln->mgmt_req->dma_buf; + dma_buf->len = 2048; + dma_buf->vaddr = dma_alloc_coherent(&hw->pdev->dev, dma_buf->len, + &dma_buf->paddr, GFP_KERNEL); + if (!dma_buf->vaddr) { + csio_err(hw, "Failed to alloc DMA buffer for FDMI!\n"); + kfree(ln->mgmt_req); + ln->mgmt_req = NULL; + return -ENOMEM; + } + + ln->flags |= CSIO_LNF_FDMI_ENABLE; + return 0; +} + +/* + * csio_ln_fdmi_exit - FDMI exit entry point. + * @ln: lnode + */ +static int +csio_ln_fdmi_exit(struct csio_lnode *ln) +{ + struct csio_dma_buf *dma_buf; + struct csio_hw *hw = csio_lnode_to_hw(ln); + + if (!ln->mgmt_req) + return 0; + + dma_buf = &ln->mgmt_req->dma_buf; + if (dma_buf->vaddr) + dma_free_coherent(&hw->pdev->dev, dma_buf->len, dma_buf->vaddr, + dma_buf->paddr); + + kfree(ln->mgmt_req); + return 0; +} + +int +csio_scan_done(struct csio_lnode *ln, unsigned long ticks, + unsigned long time, unsigned long max_scan_ticks, + unsigned long delta_scan_ticks) +{ + int rv = 0; + + if (time >= max_scan_ticks) + return 1; + + if (!ln->tgt_scan_tick) + ln->tgt_scan_tick = ticks; + + if (((ticks - ln->tgt_scan_tick) >= delta_scan_ticks)) { + if (!ln->last_scan_ntgts) + ln->last_scan_ntgts = ln->n_scsi_tgts; + else { + if (ln->last_scan_ntgts == ln->n_scsi_tgts) + return 1; + + ln->last_scan_ntgts = ln->n_scsi_tgts; + } + ln->tgt_scan_tick = ticks; + } + return rv; +} + +/* + * csio_notify_lnodes: + * @hw: HW module + * @note: Notification + * + * Called from the HW SM to fan out notifications to the + * Lnode SM. Since the HW SM is entered with lock held, + * there is no need to hold locks here. + * + */ +void +csio_notify_lnodes(struct csio_hw *hw, enum csio_ln_notify note) +{ + struct list_head *tmp; + struct csio_lnode *ln; + + csio_dbg(hw, "Notifying all nodes of event %d\n", note); + + /* Traverse children lnodes list and send evt */ + list_for_each(tmp, &hw->sln_head) { + ln = (struct csio_lnode *) tmp; + + switch (note) { + case CSIO_LN_NOTIFY_HWREADY: + csio_lnode_start(ln); + break; + + case CSIO_LN_NOTIFY_HWRESET: + case CSIO_LN_NOTIFY_HWREMOVE: + csio_lnode_close(ln); + break; + + case CSIO_LN_NOTIFY_HWSTOP: + csio_lnode_stop(ln); + break; + + default: + break; + + } + } +} + +/* + * csio_disable_lnodes: + * @hw: HW module + * @portid:port id + * @disable: disable/enable flag. + * If disable=1, disables all lnode hosted on given physical port. + * otherwise enables all the lnodes on given phsysical port. + * This routine need to called with hw lock held. + */ +void +csio_disable_lnodes(struct csio_hw *hw, uint8_t portid, bool disable) +{ + struct list_head *tmp; + struct csio_lnode *ln; + + csio_dbg(hw, "Notifying event to all nodes of port:%d\n", portid); + + /* Traverse sibling lnodes list and send evt */ + list_for_each(tmp, &hw->sln_head) { + ln = (struct csio_lnode *) tmp; + if (ln->portid != portid) + continue; + + if (disable) + csio_lnode_stop(ln); + else + csio_lnode_start(ln); + } +} + +/* + * csio_ln_init - Initialize an lnode. + * @ln: lnode + * + */ +static int +csio_ln_init(struct csio_lnode *ln) +{ + int rv = -EINVAL; + struct csio_lnode *pln; + struct csio_hw *hw = csio_lnode_to_hw(ln); + + csio_init_state(&ln->sm, csio_lns_uninit); + ln->vnp_flowid = CSIO_INVALID_IDX; + ln->fcf_flowid = CSIO_INVALID_IDX; + + if (csio_is_root_ln(ln)) { + + /* This is the lnode used during initialization */ + + ln->fcfinfo = kzalloc(sizeof(struct csio_fcf_info), GFP_KERNEL); + if (!ln->fcfinfo) { + csio_ln_err(ln, "Failed to alloc FCF record\n"); + CSIO_INC_STATS(hw, n_err_nomem); + goto err; + } + + INIT_LIST_HEAD(&ln->fcf_lsthead); + kref_init(&ln->fcfinfo->kref); + + if (csio_fdmi_enable && csio_ln_fdmi_init(ln)) + goto err; + + } else { /* Either a non-root physical or a virtual lnode */ + + /* + * THe rest is common for non-root physical and NPIV lnodes. + * Just get references to all other modules + */ + + if (csio_is_npiv_ln(ln)) { + /* NPIV */ + pln = csio_parent_lnode(ln); + kref_get(&pln->fcfinfo->kref); + ln->fcfinfo = pln->fcfinfo; + } else { + /* Another non-root physical lnode (FCF) */ + ln->fcfinfo = kzalloc(sizeof(struct csio_fcf_info), + GFP_KERNEL); + if (!ln->fcfinfo) { + csio_ln_err(ln, "Failed to alloc FCF info\n"); + CSIO_INC_STATS(hw, n_err_nomem); + goto err; + } + + kref_init(&ln->fcfinfo->kref); + + if (csio_fdmi_enable && csio_ln_fdmi_init(ln)) + goto err; + } + + } /* if (!csio_is_root_ln(ln)) */ + + return 0; +err: + return rv; +} + +static void +csio_ln_exit(struct csio_lnode *ln) +{ + struct csio_lnode *pln; + + csio_cleanup_rns(ln); + if (csio_is_npiv_ln(ln)) { + pln = csio_parent_lnode(ln); + kref_put(&pln->fcfinfo->kref, csio_free_fcfinfo); + } else { + kref_put(&ln->fcfinfo->kref, csio_free_fcfinfo); + if (csio_fdmi_enable) + csio_ln_fdmi_exit(ln); + } + ln->fcfinfo = NULL; +} + +/* + * csio_lnode_init - Initialize the members of an lnode. + * @ln: lnode + */ +int +csio_lnode_init(struct csio_lnode *ln, struct csio_hw *hw, + struct csio_lnode *pln) +{ + int rv = -EINVAL; + + /* Link this lnode to hw */ + csio_lnode_to_hw(ln) = hw; + + /* Link child to parent if child lnode */ + if (pln) + ln->pln = pln; + else + ln->pln = NULL; + + /* Initialize scsi_tgt and timers to zero */ + ln->n_scsi_tgts = 0; + ln->last_scan_ntgts = 0; + ln->tgt_scan_tick = 0; + + /* Initialize rnode list */ + INIT_LIST_HEAD(&ln->rnhead); + INIT_LIST_HEAD(&ln->cln_head); + + /* Initialize log level for debug */ + ln->params.log_level = hw->params.log_level; + + if (csio_ln_init(ln)) + goto err; + + /* Add lnode to list of sibling or children lnodes */ + spin_lock_irq(&hw->lock); + list_add_tail(&ln->sm.sm_list, pln ? &pln->cln_head : &hw->sln_head); + if (pln) + pln->num_vports++; + spin_unlock_irq(&hw->lock); + + hw->num_lns++; + + return 0; +err: + csio_lnode_to_hw(ln) = NULL; + return rv; +} + +/** + * csio_lnode_exit - De-instantiate an lnode. + * @ln: lnode + * + */ +void +csio_lnode_exit(struct csio_lnode *ln) +{ + struct csio_hw *hw = csio_lnode_to_hw(ln); + + csio_ln_exit(ln); + + /* Remove this lnode from hw->sln_head */ + spin_lock_irq(&hw->lock); + + list_del_init(&ln->sm.sm_list); + + /* If it is children lnode, decrement the + * counter in its parent lnode + */ + if (ln->pln) + ln->pln->num_vports--; + + /* Update root lnode pointer */ + if (list_empty(&hw->sln_head)) + hw->rln = NULL; + else + hw->rln = (struct csio_lnode *)csio_list_next(&hw->sln_head); + + spin_unlock_irq(&hw->lock); + + csio_lnode_to_hw(ln) = NULL; + hw->num_lns--; +} diff --git a/drivers/scsi/csiostor/csio_lnode.h b/drivers/scsi/csiostor/csio_lnode.h new file mode 100644 index 000000000..372a67d12 --- /dev/null +++ b/drivers/scsi/csiostor/csio_lnode.h @@ -0,0 +1,255 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CSIO_LNODE_H__ +#define __CSIO_LNODE_H__ + +#include <linux/kref.h> +#include <linux/timer.h> +#include <linux/workqueue.h> +#include <scsi/fc/fc_els.h> + + +#include "csio_defs.h" +#include "csio_hw.h" + +#define CSIO_FCOE_MAX_NPIV 128 +#define CSIO_FCOE_MAX_RNODES 2048 + +/* FDMI port attribute unknown speed */ +#define CSIO_HBA_PORTSPEED_UNKNOWN 0x8000 + +extern int csio_fcoe_rnodes; +extern int csio_fdmi_enable; + +/* State machine evets */ +enum csio_ln_ev { + CSIO_LNE_NONE = (uint32_t)0, + CSIO_LNE_LINKUP, + CSIO_LNE_FAB_INIT_DONE, + CSIO_LNE_LINK_DOWN, + CSIO_LNE_DOWN_LINK, + CSIO_LNE_LOGO, + CSIO_LNE_CLOSE, + CSIO_LNE_MAX_EVENT, +}; + + +struct csio_fcf_info { + struct list_head list; + uint8_t priority; + uint8_t mac[6]; + uint8_t name_id[8]; + uint8_t fabric[8]; + uint16_t vf_id; + uint8_t vlan_id; + uint16_t max_fcoe_size; + uint8_t fc_map[3]; + uint32_t fka_adv; + uint32_t fcfi; + uint8_t get_next:1; + uint8_t link_aff:1; + uint8_t fpma:1; + uint8_t spma:1; + uint8_t login:1; + uint8_t portid; + uint8_t spma_mac[6]; + struct kref kref; +}; + +/* Defines for flags */ +#define CSIO_LNF_FIPSUPP 0x00000001 /* Fip Supported */ +#define CSIO_LNF_NPIVSUPP 0x00000002 /* NPIV supported */ +#define CSIO_LNF_LINK_ENABLE 0x00000004 /* Link enabled */ +#define CSIO_LNF_FDMI_ENABLE 0x00000008 /* FDMI support */ + +/* Transport events */ +enum csio_ln_fc_evt { + CSIO_LN_FC_LINKUP = 1, + CSIO_LN_FC_LINKDOWN, + CSIO_LN_FC_RSCN, + CSIO_LN_FC_ATTRIB_UPDATE, +}; + +/* Lnode stats */ +struct csio_lnode_stats { + uint32_t n_link_up; /* Link down */ + uint32_t n_link_down; /* Link up */ + uint32_t n_err; /* error */ + uint32_t n_err_nomem; /* memory not available */ + uint32_t n_inval_parm; /* Invalid parameters */ + uint32_t n_evt_unexp; /* unexpected event */ + uint32_t n_evt_drop; /* dropped event */ + uint32_t n_rnode_match; /* matched rnode */ + uint32_t n_dev_loss_tmo; /* Device loss timeout */ + uint32_t n_fdmi_err; /* fdmi err */ + uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO + 1]; /* fw events */ + enum csio_ln_ev n_evt_sm[CSIO_LNE_MAX_EVENT]; /* State m/c events */ + uint32_t n_rnode_alloc; /* rnode allocated */ + uint32_t n_rnode_free; /* rnode freed */ + uint32_t n_rnode_nomem; /* rnode alloc failure */ + uint32_t n_input_requests; /* Input Requests */ + uint32_t n_output_requests; /* Output Requests */ + uint32_t n_control_requests; /* Control Requests */ + uint32_t n_input_bytes; /* Input Bytes */ + uint32_t n_output_bytes; /* Output Bytes */ + uint32_t rsvd1; +}; + +/* Common Lnode params */ +struct csio_lnode_params { + uint32_t ra_tov; + uint32_t fcfi; + uint32_t log_level; /* Module level for debugging */ +}; + +struct csio_service_parms { + struct fc_els_csp csp; /* Common service parms */ + uint8_t wwpn[8]; /* WWPN */ + uint8_t wwnn[8]; /* WWNN */ + struct fc_els_cssp clsp[4]; /* Class service params */ + uint8_t vvl[16]; /* Vendor version level */ +}; + +/* Lnode */ +struct csio_lnode { + struct csio_sm sm; /* State machine + sibling + * lnode list. + */ + struct csio_hw *hwp; /* Pointer to the HW module */ + uint8_t portid; /* Port ID */ + uint8_t rsvd1; + uint16_t rsvd2; + uint32_t dev_num; /* Device number */ + uint32_t flags; /* Flags */ + struct list_head fcf_lsthead; /* FCF entries */ + struct csio_fcf_info *fcfinfo; /* FCF in use */ + struct csio_ioreq *mgmt_req; /* MGMT request */ + + /* FCoE identifiers */ + uint8_t mac[6]; + uint32_t nport_id; + struct csio_service_parms ln_sparm; /* Service parms */ + + /* Firmware identifiers */ + uint32_t fcf_flowid; /*fcf flowid */ + uint32_t vnp_flowid; + uint16_t ssn_cnt; /* Registered Session */ + uint8_t cur_evt; /* Current event */ + uint8_t prev_evt; /* Previous event */ + + /* Children */ + struct list_head cln_head; /* Head of the children lnode + * list. + */ + uint32_t num_vports; /* Total NPIV/children LNodes*/ + struct csio_lnode *pln; /* Parent lnode of child + * lnodes. + */ + struct list_head cmpl_q; /* Pending I/Os on this lnode */ + + /* Remote node information */ + struct list_head rnhead; /* Head of rnode list */ + uint32_t num_reg_rnodes; /* Number of rnodes registered + * with the host. + */ + uint32_t n_scsi_tgts; /* Number of scsi targets + * found + */ + uint32_t last_scan_ntgts;/* Number of scsi targets + * found per last scan. + */ + uint32_t tgt_scan_tick; /* timer started after + * new tgt found + */ + /* FC transport data */ + struct fc_vport *fc_vport; + struct fc_host_statistics fch_stats; + + struct csio_lnode_stats stats; /* Common lnode stats */ + struct csio_lnode_params params; /* Common lnode params */ +}; + +#define csio_lnode_to_hw(ln) ((ln)->hwp) +#define csio_root_lnode(ln) (csio_lnode_to_hw((ln))->rln) +#define csio_parent_lnode(ln) ((ln)->pln) +#define csio_ln_flowid(ln) ((ln)->vnp_flowid) +#define csio_ln_wwpn(ln) ((ln)->ln_sparm.wwpn) +#define csio_ln_wwnn(ln) ((ln)->ln_sparm.wwnn) + +#define csio_is_root_ln(ln) (((ln) == csio_root_lnode((ln))) ? 1 : 0) +#define csio_is_phys_ln(ln) (((ln)->pln == NULL) ? 1 : 0) +#define csio_is_npiv_ln(ln) (((ln)->pln != NULL) ? 1 : 0) + + +#define csio_ln_dbg(_ln, _fmt, ...) \ + csio_dbg(_ln->hwp, "%x:%x "_fmt, CSIO_DEVID_HI(_ln), \ + CSIO_DEVID_LO(_ln), ##__VA_ARGS__); + +#define csio_ln_err(_ln, _fmt, ...) \ + csio_err(_ln->hwp, "%x:%x "_fmt, CSIO_DEVID_HI(_ln), \ + CSIO_DEVID_LO(_ln), ##__VA_ARGS__); + +#define csio_ln_warn(_ln, _fmt, ...) \ + csio_warn(_ln->hwp, "%x:%x "_fmt, CSIO_DEVID_HI(_ln), \ + CSIO_DEVID_LO(_ln), ##__VA_ARGS__); + +/* HW->Lnode notifications */ +enum csio_ln_notify { + CSIO_LN_NOTIFY_HWREADY = 1, + CSIO_LN_NOTIFY_HWSTOP, + CSIO_LN_NOTIFY_HWREMOVE, + CSIO_LN_NOTIFY_HWRESET, +}; + +void csio_fcoe_fwevt_handler(struct csio_hw *, __u8 cpl_op, __be64 *); +int csio_is_lnode_ready(struct csio_lnode *); +void csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str); +struct csio_lnode *csio_lnode_lookup_by_wwpn(struct csio_hw *, uint8_t *); +int csio_get_phy_port_stats(struct csio_hw *, uint8_t , + struct fw_fcoe_port_stats *); +int csio_scan_done(struct csio_lnode *, unsigned long, unsigned long, + unsigned long, unsigned long); +void csio_notify_lnodes(struct csio_hw *, enum csio_ln_notify); +void csio_disable_lnodes(struct csio_hw *, uint8_t, bool); +void csio_lnode_async_event(struct csio_lnode *, enum csio_ln_fc_evt); +int csio_ln_fdmi_start(struct csio_lnode *, void *); +int csio_lnode_start(struct csio_lnode *); +void csio_lnode_stop(struct csio_lnode *); +void csio_lnode_close(struct csio_lnode *); +int csio_lnode_init(struct csio_lnode *, struct csio_hw *, + struct csio_lnode *); +void csio_lnode_exit(struct csio_lnode *); + +#endif /* ifndef __CSIO_LNODE_H__ */ diff --git a/drivers/scsi/csiostor/csio_mb.c b/drivers/scsi/csiostor/csio_mb.c new file mode 100644 index 000000000..94810b19e --- /dev/null +++ b/drivers/scsi/csiostor/csio_mb.c @@ -0,0 +1,1690 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/delay.h> +#include <linux/jiffies.h> +#include <linux/string.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_transport_fc.h> + +#include "csio_hw.h" +#include "csio_lnode.h" +#include "csio_rnode.h" +#include "csio_mb.h" +#include "csio_wr.h" + +#define csio_mb_is_host_owner(__owner) ((__owner) == CSIO_MBOWNER_PL) + +/* MB Command/Response Helpers */ +/* + * csio_mb_fw_retval - FW return value from a mailbox response. + * @mbp: Mailbox structure + * + */ +enum fw_retval +csio_mb_fw_retval(struct csio_mb *mbp) +{ + struct fw_cmd_hdr *hdr; + + hdr = (struct fw_cmd_hdr *)(mbp->mb); + + return FW_CMD_RETVAL_G(ntohl(hdr->lo)); +} + +/* + * csio_mb_hello - FW HELLO command helper + * @hw: The HW structure + * @mbp: Mailbox structure + * @m_mbox: Master mailbox number, if any. + * @a_mbox: Mailbox number for asycn notifications. + * @master: Device mastership. + * @cbfn: Callback, if any. + * + */ +void +csio_mb_hello(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, + uint32_t m_mbox, uint32_t a_mbox, enum csio_dev_master master, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct fw_hello_cmd *cmdp = (struct fw_hello_cmd *)(mbp->mb); + + CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); + + cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_HELLO_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F); + cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + cmdp->err_to_clearinit = htonl( + FW_HELLO_CMD_MASTERDIS_V(master == CSIO_MASTER_CANT) | + FW_HELLO_CMD_MASTERFORCE_V(master == CSIO_MASTER_MUST) | + FW_HELLO_CMD_MBMASTER_V(master == CSIO_MASTER_MUST ? + m_mbox : FW_HELLO_CMD_MBMASTER_M) | + FW_HELLO_CMD_MBASYNCNOT_V(a_mbox) | + FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) | + FW_HELLO_CMD_CLEARINIT_F); + +} + +/* + * csio_mb_process_hello_rsp - FW HELLO response processing helper + * @hw: The HW structure + * @mbp: Mailbox structure + * @retval: Mailbox return value from Firmware + * @state: State that the function is in. + * @mpfn: Master pfn + * + */ +void +csio_mb_process_hello_rsp(struct csio_hw *hw, struct csio_mb *mbp, + enum fw_retval *retval, enum csio_dev_state *state, + uint8_t *mpfn) +{ + struct fw_hello_cmd *rsp = (struct fw_hello_cmd *)(mbp->mb); + uint32_t value; + + *retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16)); + + if (*retval == FW_SUCCESS) { + hw->fwrev = ntohl(rsp->fwrev); + + value = ntohl(rsp->err_to_clearinit); + *mpfn = FW_HELLO_CMD_MBMASTER_G(value); + + if (value & FW_HELLO_CMD_INIT_F) + *state = CSIO_DEV_STATE_INIT; + else if (value & FW_HELLO_CMD_ERR_F) + *state = CSIO_DEV_STATE_ERR; + else + *state = CSIO_DEV_STATE_UNINIT; + } +} + +/* + * csio_mb_bye - FW BYE command helper + * @hw: The HW structure + * @mbp: Mailbox structure + * @cbfn: Callback, if any. + * + */ +void +csio_mb_bye(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct fw_bye_cmd *cmdp = (struct fw_bye_cmd *)(mbp->mb); + + CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); + + cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_BYE_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F); + cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + +} + +/* + * csio_mb_reset - FW RESET command helper + * @hw: The HW structure + * @mbp: Mailbox structure + * @reset: Type of reset. + * @cbfn: Callback, if any. + * + */ +void +csio_mb_reset(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, + int reset, int halt, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct fw_reset_cmd *cmdp = (struct fw_reset_cmd *)(mbp->mb); + + CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); + + cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_RESET_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F); + cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + cmdp->val = htonl(reset); + cmdp->halt_pkd = htonl(halt); + +} + +/* + * csio_mb_params - FW PARAMS command helper + * @hw: The HW structure + * @mbp: Mailbox structure + * @tmo: Command timeout. + * @pf: PF number. + * @vf: VF number. + * @nparams: Number of parameters + * @params: Parameter mnemonic array. + * @val: Parameter value array. + * @wr: Write/Read PARAMS. + * @cbfn: Callback, if any. + * + */ +void +csio_mb_params(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, + unsigned int pf, unsigned int vf, unsigned int nparams, + const u32 *params, u32 *val, bool wr, + void (*cbfn)(struct csio_hw *, struct csio_mb *)) +{ + uint32_t i; + uint32_t temp_params = 0, temp_val = 0; + struct fw_params_cmd *cmdp = (struct fw_params_cmd *)(mbp->mb); + __be32 *p = &cmdp->param[0].mnem; + + CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); + + cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | + FW_CMD_REQUEST_F | + (wr ? FW_CMD_WRITE_F : FW_CMD_READ_F) | + FW_PARAMS_CMD_PFN_V(pf) | + FW_PARAMS_CMD_VFN_V(vf)); + cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + + /* Write Params */ + if (wr) { + while (nparams--) { + temp_params = *params++; + temp_val = *val++; + + *p++ = htonl(temp_params); + *p++ = htonl(temp_val); + } + } else { + for (i = 0; i < nparams; i++, p += 2) { + temp_params = *params++; + *p = htonl(temp_params); + } + } + +} + +/* + * csio_mb_process_read_params_rsp - FW PARAMS response processing helper + * @hw: The HW structure + * @mbp: Mailbox structure + * @retval: Mailbox return value from Firmware + * @nparams: Number of parameters + * @val: Parameter value array. + * + */ +void +csio_mb_process_read_params_rsp(struct csio_hw *hw, struct csio_mb *mbp, + enum fw_retval *retval, unsigned int nparams, + u32 *val) +{ + struct fw_params_cmd *rsp = (struct fw_params_cmd *)(mbp->mb); + uint32_t i; + __be32 *p = &rsp->param[0].val; + + *retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16)); + + if (*retval == FW_SUCCESS) + for (i = 0; i < nparams; i++, p += 2) + *val++ = ntohl(*p); +} + +/* + * csio_mb_ldst - FW LDST command + * @hw: The HW structure + * @mbp: Mailbox structure + * @tmo: timeout + * @reg: register + * + */ +void +csio_mb_ldst(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, int reg) +{ + struct fw_ldst_cmd *ldst_cmd = (struct fw_ldst_cmd *)(mbp->mb); + CSIO_INIT_MBP(mbp, ldst_cmd, tmo, hw, NULL, 1); + + /* + * Construct and send the Firmware LDST Command to retrieve the + * specified PCI-E Configuration Space register. + */ + ldst_cmd->op_to_addrspace = + htonl(FW_CMD_OP_V(FW_LDST_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F | + FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE)); + ldst_cmd->cycles_to_len16 = htonl(FW_LEN16(struct fw_ldst_cmd)); + ldst_cmd->u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1); + ldst_cmd->u.pcie.ctrl_to_fn = + (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(hw->pfn)); + ldst_cmd->u.pcie.r = (uint8_t)reg; +} + +/* + * + * csio_mb_caps_config - FW Read/Write Capabilities command helper + * @hw: The HW structure + * @mbp: Mailbox structure + * @wr: Write if 1, Read if 0 + * @init: Turn on initiator mode. + * @tgt: Turn on target mode. + * @cofld: If 1, Control Offload for FCoE + * @cbfn: Callback, if any. + * + * This helper assumes that cmdp has MB payload from a previous CAPS + * read command. + */ +void +csio_mb_caps_config(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, + bool wr, bool init, bool tgt, bool cofld, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct fw_caps_config_cmd *cmdp = + (struct fw_caps_config_cmd *)(mbp->mb); + + CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, wr ? 0 : 1); + + cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | + (wr ? FW_CMD_WRITE_F : FW_CMD_READ_F)); + cmdp->cfvalid_to_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + + /* Read config */ + if (!wr) + return; + + /* Write config */ + cmdp->fcoecaps = 0; + + if (cofld) + cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_CTRL_OFLD); + if (init) + cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_INITIATOR); + if (tgt) + cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_TARGET); +} + +/* + * csio_mb_port- FW PORT command helper + * @hw: The HW structure + * @mbp: Mailbox structure + * @tmo: COmmand timeout + * @portid: Port ID to get/set info + * @wr: Write/Read PORT information. + * @fc: Flow control + * @caps: Port capabilites to set. + * @cbfn: Callback, if any. + * + */ +void +csio_mb_port(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, + u8 portid, bool wr, uint32_t fc, uint16_t fw_caps, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct fw_port_cmd *cmdp = (struct fw_port_cmd *)(mbp->mb); + + CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); + + cmdp->op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | + FW_CMD_REQUEST_F | + (wr ? FW_CMD_EXEC_F : FW_CMD_READ_F) | + FW_PORT_CMD_PORTID_V(portid)); + if (!wr) { + cmdp->action_to_len16 = htonl( + FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16 + ? FW_PORT_ACTION_GET_PORT_INFO + : FW_PORT_ACTION_GET_PORT_INFO32) | + FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + return; + } + + /* Set port */ + cmdp->action_to_len16 = htonl( + FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16 + ? FW_PORT_ACTION_L1_CFG + : FW_PORT_ACTION_L1_CFG32) | + FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + + if (fw_caps == FW_CAPS16) + cmdp->u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(fc)); + else + cmdp->u.l1cfg32.rcap32 = cpu_to_be32(fc); +} + +/* + * csio_mb_process_read_port_rsp - FW PORT command response processing helper + * @hw: The HW structure + * @mbp: Mailbox structure + * @retval: Mailbox return value from Firmware + * @caps: port capabilities + * + */ +void +csio_mb_process_read_port_rsp(struct csio_hw *hw, struct csio_mb *mbp, + enum fw_retval *retval, uint16_t fw_caps, + u32 *pcaps, u32 *acaps) +{ + struct fw_port_cmd *rsp = (struct fw_port_cmd *)(mbp->mb); + + *retval = FW_CMD_RETVAL_G(ntohl(rsp->action_to_len16)); + + if (*retval == FW_SUCCESS) { + if (fw_caps == FW_CAPS16) { + *pcaps = fwcaps16_to_caps32(ntohs(rsp->u.info.pcap)); + *acaps = fwcaps16_to_caps32(ntohs(rsp->u.info.acap)); + } else { + *pcaps = be32_to_cpu(rsp->u.info32.pcaps32); + *acaps = be32_to_cpu(rsp->u.info32.acaps32); + } + } +} + +/* + * csio_mb_initialize - FW INITIALIZE command helper + * @hw: The HW structure + * @mbp: Mailbox structure + * @tmo: COmmand timeout + * @cbfn: Callback, if any. + * + */ +void +csio_mb_initialize(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct fw_initialize_cmd *cmdp = (struct fw_initialize_cmd *)(mbp->mb); + + CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); + + cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_INITIALIZE_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F); + cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + +} + +/* + * csio_mb_iq_alloc - Initializes the mailbox to allocate an + * Ingress DMA queue in the firmware. + * + * @hw: The hw structure + * @mbp: Mailbox structure to initialize + * @priv: Private object + * @mb_tmo: Mailbox time-out period (in ms). + * @iq_params: Ingress queue params needed for allocation. + * @cbfn: The call-back function + * + * + */ +static void +csio_mb_iq_alloc(struct csio_hw *hw, struct csio_mb *mbp, void *priv, + uint32_t mb_tmo, struct csio_iq_params *iq_params, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb); + + CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1); + + cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | + FW_CMD_REQUEST_F | FW_CMD_EXEC_F | + FW_IQ_CMD_PFN_V(iq_params->pfn) | + FW_IQ_CMD_VFN_V(iq_params->vfn)); + + cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | + FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + + cmdp->type_to_iqandstindex = htonl( + FW_IQ_CMD_VIID_V(iq_params->viid) | + FW_IQ_CMD_TYPE_V(iq_params->type) | + FW_IQ_CMD_IQASYNCH_V(iq_params->iqasynch)); + + cmdp->fl0size = htons(iq_params->fl0size); + cmdp->fl0size = htons(iq_params->fl1size); + +} /* csio_mb_iq_alloc */ + +/* + * csio_mb_iq_write - Initializes the mailbox for writing into an + * Ingress DMA Queue. + * + * @hw: The HW structure + * @mbp: Mailbox structure to initialize + * @priv: Private object + * @mb_tmo: Mailbox time-out period (in ms). + * @cascaded_req: TRUE - if this request is cascased with iq-alloc request. + * @iq_params: Ingress queue params needed for writing. + * @cbfn: The call-back function + * + * NOTE: We OR relevant bits with cmdp->XXX, instead of just equating, + * because this IQ write request can be cascaded with a previous + * IQ alloc request, and we dont want to over-write the bits set by + * that request. This logic will work even in a non-cascaded case, since the + * cmdp structure is zeroed out by CSIO_INIT_MBP. + */ +static void +csio_mb_iq_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv, + uint32_t mb_tmo, bool cascaded_req, + struct csio_iq_params *iq_params, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb); + + uint32_t iq_start_stop = (iq_params->iq_start) ? + FW_IQ_CMD_IQSTART_F : + FW_IQ_CMD_IQSTOP_F; + int relaxed = !(hw->flags & CSIO_HWF_ROOT_NO_RELAXED_ORDERING); + + /* + * If this IQ write is cascaded with IQ alloc request, do not + * re-initialize with 0's. + * + */ + if (!cascaded_req) + CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1); + + cmdp->op_to_vfn |= htonl(FW_CMD_OP_V(FW_IQ_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F | + FW_IQ_CMD_PFN_V(iq_params->pfn) | + FW_IQ_CMD_VFN_V(iq_params->vfn)); + cmdp->alloc_to_len16 |= htonl(iq_start_stop | + FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + cmdp->iqid |= htons(iq_params->iqid); + cmdp->fl0id |= htons(iq_params->fl0id); + cmdp->fl1id |= htons(iq_params->fl1id); + cmdp->type_to_iqandstindex |= htonl( + FW_IQ_CMD_IQANDST_V(iq_params->iqandst) | + FW_IQ_CMD_IQANUS_V(iq_params->iqanus) | + FW_IQ_CMD_IQANUD_V(iq_params->iqanud) | + FW_IQ_CMD_IQANDSTINDEX_V(iq_params->iqandstindex)); + cmdp->iqdroprss_to_iqesize |= htons( + FW_IQ_CMD_IQPCIECH_V(iq_params->iqpciech) | + FW_IQ_CMD_IQDCAEN_V(iq_params->iqdcaen) | + FW_IQ_CMD_IQDCACPU_V(iq_params->iqdcacpu) | + FW_IQ_CMD_IQINTCNTTHRESH_V(iq_params->iqintcntthresh) | + FW_IQ_CMD_IQCPRIO_V(iq_params->iqcprio) | + FW_IQ_CMD_IQESIZE_V(iq_params->iqesize)); + + cmdp->iqsize |= htons(iq_params->iqsize); + cmdp->iqaddr |= cpu_to_be64(iq_params->iqaddr); + + if (iq_params->type == 0) { + cmdp->iqns_to_fl0congen |= htonl( + FW_IQ_CMD_IQFLINTIQHSEN_V(iq_params->iqflintiqhsen)| + FW_IQ_CMD_IQFLINTCONGEN_V(iq_params->iqflintcongen)); + } + + if (iq_params->fl0size && iq_params->fl0addr && + (iq_params->fl0id != 0xFFFF)) { + + cmdp->iqns_to_fl0congen |= htonl( + FW_IQ_CMD_FL0HOSTFCMODE_V(iq_params->fl0hostfcmode)| + FW_IQ_CMD_FL0CPRIO_V(iq_params->fl0cprio) | + FW_IQ_CMD_FL0FETCHRO_V(relaxed) | + FW_IQ_CMD_FL0DATARO_V(relaxed) | + FW_IQ_CMD_FL0PADEN_V(iq_params->fl0paden) | + FW_IQ_CMD_FL0PACKEN_V(iq_params->fl0packen)); + cmdp->fl0dcaen_to_fl0cidxfthresh |= htons( + FW_IQ_CMD_FL0DCAEN_V(iq_params->fl0dcaen) | + FW_IQ_CMD_FL0DCACPU_V(iq_params->fl0dcacpu) | + FW_IQ_CMD_FL0FBMIN_V(iq_params->fl0fbmin) | + FW_IQ_CMD_FL0FBMAX_V(iq_params->fl0fbmax) | + FW_IQ_CMD_FL0CIDXFTHRESH_V(iq_params->fl0cidxfthresh)); + cmdp->fl0size |= htons(iq_params->fl0size); + cmdp->fl0addr |= cpu_to_be64(iq_params->fl0addr); + } +} /* csio_mb_iq_write */ + +/* + * csio_mb_iq_alloc_write - Initializes the mailbox for allocating an + * Ingress DMA Queue. + * + * @hw: The HW structure + * @mbp: Mailbox structure to initialize + * @priv: Private data. + * @mb_tmo: Mailbox time-out period (in ms). + * @iq_params: Ingress queue params needed for allocation & writing. + * @cbfn: The call-back function + * + * + */ +void +csio_mb_iq_alloc_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv, + uint32_t mb_tmo, struct csio_iq_params *iq_params, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + csio_mb_iq_alloc(hw, mbp, priv, mb_tmo, iq_params, cbfn); + csio_mb_iq_write(hw, mbp, priv, mb_tmo, true, iq_params, cbfn); +} /* csio_mb_iq_alloc_write */ + +/* + * csio_mb_iq_alloc_write_rsp - Process the allocation & writing + * of ingress DMA queue mailbox's response. + * + * @hw: The HW structure. + * @mbp: Mailbox structure to initialize. + * @retval: Firmware return value. + * @iq_params: Ingress queue parameters, after allocation and write. + * + */ +void +csio_mb_iq_alloc_write_rsp(struct csio_hw *hw, struct csio_mb *mbp, + enum fw_retval *ret_val, + struct csio_iq_params *iq_params) +{ + struct fw_iq_cmd *rsp = (struct fw_iq_cmd *)(mbp->mb); + + *ret_val = FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16)); + if (*ret_val == FW_SUCCESS) { + iq_params->physiqid = ntohs(rsp->physiqid); + iq_params->iqid = ntohs(rsp->iqid); + iq_params->fl0id = ntohs(rsp->fl0id); + iq_params->fl1id = ntohs(rsp->fl1id); + } else { + iq_params->physiqid = iq_params->iqid = + iq_params->fl0id = iq_params->fl1id = 0; + } +} /* csio_mb_iq_alloc_write_rsp */ + +/* + * csio_mb_iq_free - Initializes the mailbox for freeing a + * specified Ingress DMA Queue. + * + * @hw: The HW structure + * @mbp: Mailbox structure to initialize + * @priv: Private data + * @mb_tmo: Mailbox time-out period (in ms). + * @iq_params: Parameters of ingress queue, that is to be freed. + * @cbfn: The call-back function + * + * + */ +void +csio_mb_iq_free(struct csio_hw *hw, struct csio_mb *mbp, void *priv, + uint32_t mb_tmo, struct csio_iq_params *iq_params, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb); + + CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1); + + cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | + FW_CMD_REQUEST_F | FW_CMD_EXEC_F | + FW_IQ_CMD_PFN_V(iq_params->pfn) | + FW_IQ_CMD_VFN_V(iq_params->vfn)); + cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_FREE_F | + FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + cmdp->type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(iq_params->type)); + + cmdp->iqid = htons(iq_params->iqid); + cmdp->fl0id = htons(iq_params->fl0id); + cmdp->fl1id = htons(iq_params->fl1id); + +} /* csio_mb_iq_free */ + +/* + * csio_mb_eq_ofld_alloc - Initializes the mailbox for allocating + * an offload-egress queue. + * + * @hw: The HW structure + * @mbp: Mailbox structure to initialize + * @priv: Private data + * @mb_tmo: Mailbox time-out period (in ms). + * @eq_ofld_params: (Offload) Egress queue parameters. + * @cbfn: The call-back function + * + * + */ +static void +csio_mb_eq_ofld_alloc(struct csio_hw *hw, struct csio_mb *mbp, void *priv, + uint32_t mb_tmo, struct csio_eq_params *eq_ofld_params, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb); + + CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1); + cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | + FW_CMD_REQUEST_F | FW_CMD_EXEC_F | + FW_EQ_OFLD_CMD_PFN_V(eq_ofld_params->pfn) | + FW_EQ_OFLD_CMD_VFN_V(eq_ofld_params->vfn)); + cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F | + FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + +} /* csio_mb_eq_ofld_alloc */ + +/* + * csio_mb_eq_ofld_write - Initializes the mailbox for writing + * an alloacted offload-egress queue. + * + * @hw: The HW structure + * @mbp: Mailbox structure to initialize + * @priv: Private data + * @mb_tmo: Mailbox time-out period (in ms). + * @cascaded_req: TRUE - if this request is cascased with Eq-alloc request. + * @eq_ofld_params: (Offload) Egress queue parameters. + * @cbfn: The call-back function + * + * + * NOTE: We OR relevant bits with cmdp->XXX, instead of just equating, + * because this EQ write request can be cascaded with a previous + * EQ alloc request, and we dont want to over-write the bits set by + * that request. This logic will work even in a non-cascaded case, since the + * cmdp structure is zeroed out by CSIO_INIT_MBP. + */ +static void +csio_mb_eq_ofld_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv, + uint32_t mb_tmo, bool cascaded_req, + struct csio_eq_params *eq_ofld_params, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb); + + uint32_t eq_start_stop = (eq_ofld_params->eqstart) ? + FW_EQ_OFLD_CMD_EQSTART_F : + FW_EQ_OFLD_CMD_EQSTOP_F; + + /* + * If this EQ write is cascaded with EQ alloc request, do not + * re-initialize with 0's. + * + */ + if (!cascaded_req) + CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1); + + cmdp->op_to_vfn |= htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F | + FW_EQ_OFLD_CMD_PFN_V(eq_ofld_params->pfn) | + FW_EQ_OFLD_CMD_VFN_V(eq_ofld_params->vfn)); + cmdp->alloc_to_len16 |= htonl(eq_start_stop | + FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + + cmdp->eqid_pkd |= htonl(FW_EQ_OFLD_CMD_EQID_V(eq_ofld_params->eqid)); + + cmdp->fetchszm_to_iqid |= htonl( + FW_EQ_OFLD_CMD_HOSTFCMODE_V(eq_ofld_params->hostfcmode) | + FW_EQ_OFLD_CMD_CPRIO_V(eq_ofld_params->cprio) | + FW_EQ_OFLD_CMD_PCIECHN_V(eq_ofld_params->pciechn) | + FW_EQ_OFLD_CMD_IQID_V(eq_ofld_params->iqid)); + + cmdp->dcaen_to_eqsize |= htonl( + FW_EQ_OFLD_CMD_DCAEN_V(eq_ofld_params->dcaen) | + FW_EQ_OFLD_CMD_DCACPU_V(eq_ofld_params->dcacpu) | + FW_EQ_OFLD_CMD_FBMIN_V(eq_ofld_params->fbmin) | + FW_EQ_OFLD_CMD_FBMAX_V(eq_ofld_params->fbmax) | + FW_EQ_OFLD_CMD_CIDXFTHRESHO_V(eq_ofld_params->cidxfthresho) | + FW_EQ_OFLD_CMD_CIDXFTHRESH_V(eq_ofld_params->cidxfthresh) | + FW_EQ_OFLD_CMD_EQSIZE_V(eq_ofld_params->eqsize)); + + cmdp->eqaddr |= cpu_to_be64(eq_ofld_params->eqaddr); + +} /* csio_mb_eq_ofld_write */ + +/* + * csio_mb_eq_ofld_alloc_write - Initializes the mailbox for allocation + * writing into an Engress DMA Queue. + * + * @hw: The HW structure + * @mbp: Mailbox structure to initialize + * @priv: Private data. + * @mb_tmo: Mailbox time-out period (in ms). + * @eq_ofld_params: (Offload) Egress queue parameters. + * @cbfn: The call-back function + * + * + */ +void +csio_mb_eq_ofld_alloc_write(struct csio_hw *hw, struct csio_mb *mbp, + void *priv, uint32_t mb_tmo, + struct csio_eq_params *eq_ofld_params, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + csio_mb_eq_ofld_alloc(hw, mbp, priv, mb_tmo, eq_ofld_params, cbfn); + csio_mb_eq_ofld_write(hw, mbp, priv, mb_tmo, true, + eq_ofld_params, cbfn); +} /* csio_mb_eq_ofld_alloc_write */ + +/* + * csio_mb_eq_ofld_alloc_write_rsp - Process the allocation + * & write egress DMA queue mailbox's response. + * + * @hw: The HW structure. + * @mbp: Mailbox structure to initialize. + * @retval: Firmware return value. + * @eq_ofld_params: (Offload) Egress queue parameters. + * + */ +void +csio_mb_eq_ofld_alloc_write_rsp(struct csio_hw *hw, + struct csio_mb *mbp, enum fw_retval *ret_val, + struct csio_eq_params *eq_ofld_params) +{ + struct fw_eq_ofld_cmd *rsp = (struct fw_eq_ofld_cmd *)(mbp->mb); + + *ret_val = FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16)); + + if (*ret_val == FW_SUCCESS) { + eq_ofld_params->eqid = FW_EQ_OFLD_CMD_EQID_G( + ntohl(rsp->eqid_pkd)); + eq_ofld_params->physeqid = FW_EQ_OFLD_CMD_PHYSEQID_G( + ntohl(rsp->physeqid_pkd)); + } else + eq_ofld_params->eqid = 0; + +} /* csio_mb_eq_ofld_alloc_write_rsp */ + +/* + * csio_mb_eq_ofld_free - Initializes the mailbox for freeing a + * specified Engress DMA Queue. + * + * @hw: The HW structure + * @mbp: Mailbox structure to initialize + * @priv: Private data area. + * @mb_tmo: Mailbox time-out period (in ms). + * @eq_ofld_params: (Offload) Egress queue parameters, that is to be freed. + * @cbfn: The call-back function + * + * + */ +void +csio_mb_eq_ofld_free(struct csio_hw *hw, struct csio_mb *mbp, void *priv, + uint32_t mb_tmo, struct csio_eq_params *eq_ofld_params, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb); + + CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1); + + cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | + FW_CMD_REQUEST_F | FW_CMD_EXEC_F | + FW_EQ_OFLD_CMD_PFN_V(eq_ofld_params->pfn) | + FW_EQ_OFLD_CMD_VFN_V(eq_ofld_params->vfn)); + cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE_F | + FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + cmdp->eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID_V(eq_ofld_params->eqid)); + +} /* csio_mb_eq_ofld_free */ + +/* + * csio_write_fcoe_link_cond_init_mb - Initialize Mailbox to write FCoE link + * condition. + * + * @ln: The Lnode structure + * @mbp: Mailbox structure to initialize + * @mb_tmo: Mailbox time-out period (in ms). + * @cbfn: The call back function. + * + * + */ +void +csio_write_fcoe_link_cond_init_mb(struct csio_lnode *ln, struct csio_mb *mbp, + uint32_t mb_tmo, uint8_t port_id, uint32_t sub_opcode, + uint8_t cos, bool link_status, uint32_t fcfi, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct fw_fcoe_link_cmd *cmdp = + (struct fw_fcoe_link_cmd *)(mbp->mb); + + CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1); + + cmdp->op_to_portid = htonl(( + FW_CMD_OP_V(FW_FCOE_LINK_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_FCOE_LINK_CMD_PORTID(port_id))); + cmdp->sub_opcode_fcfi = htonl( + FW_FCOE_LINK_CMD_SUB_OPCODE(sub_opcode) | + FW_FCOE_LINK_CMD_FCFI(fcfi)); + cmdp->lstatus = link_status; + cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + +} /* csio_write_fcoe_link_cond_init_mb */ + +/* + * csio_fcoe_read_res_info_init_mb - Initializes the mailbox for reading FCoE + * resource information(FW_GET_RES_INFO_CMD). + * + * @hw: The HW structure + * @mbp: Mailbox structure to initialize + * @mb_tmo: Mailbox time-out period (in ms). + * @cbfn: The call-back function + * + * + */ +void +csio_fcoe_read_res_info_init_mb(struct csio_hw *hw, struct csio_mb *mbp, + uint32_t mb_tmo, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct fw_fcoe_res_info_cmd *cmdp = + (struct fw_fcoe_res_info_cmd *)(mbp->mb); + + CSIO_INIT_MBP(mbp, cmdp, mb_tmo, hw, cbfn, 1); + + cmdp->op_to_read = htonl((FW_CMD_OP_V(FW_FCOE_RES_INFO_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F)); + + cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + +} /* csio_fcoe_read_res_info_init_mb */ + +/* + * csio_fcoe_vnp_alloc_init_mb - Initializes the mailbox for allocating VNP + * in the firmware (FW_FCOE_VNP_CMD). + * + * @ln: The Lnode structure. + * @mbp: Mailbox structure to initialize. + * @mb_tmo: Mailbox time-out period (in ms). + * @fcfi: FCF Index. + * @vnpi: vnpi + * @iqid: iqid + * @vnport_wwnn: vnport WWNN + * @vnport_wwpn: vnport WWPN + * @cbfn: The call-back function. + * + * + */ +void +csio_fcoe_vnp_alloc_init_mb(struct csio_lnode *ln, struct csio_mb *mbp, + uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi, uint16_t iqid, + uint8_t vnport_wwnn[8], uint8_t vnport_wwpn[8], + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct fw_fcoe_vnp_cmd *cmdp = + (struct fw_fcoe_vnp_cmd *)(mbp->mb); + + CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1); + + cmdp->op_to_fcfi = htonl((FW_CMD_OP_V(FW_FCOE_VNP_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | + FW_FCOE_VNP_CMD_FCFI(fcfi))); + + cmdp->alloc_to_len16 = htonl(FW_FCOE_VNP_CMD_ALLOC | + FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + + cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi)); + + cmdp->iqid = htons(iqid); + + if (!wwn_to_u64(vnport_wwnn) && !wwn_to_u64(vnport_wwpn)) + cmdp->gen_wwn_to_vnpi |= htonl(FW_FCOE_VNP_CMD_GEN_WWN); + + if (vnport_wwnn) + memcpy(cmdp->vnport_wwnn, vnport_wwnn, 8); + if (vnport_wwpn) + memcpy(cmdp->vnport_wwpn, vnport_wwpn, 8); + +} /* csio_fcoe_vnp_alloc_init_mb */ + +/* + * csio_fcoe_vnp_read_init_mb - Prepares VNP read cmd. + * @ln: The Lnode structure. + * @mbp: Mailbox structure to initialize. + * @mb_tmo: Mailbox time-out period (in ms). + * @fcfi: FCF Index. + * @vnpi: vnpi + * @cbfn: The call-back handler. + */ +void +csio_fcoe_vnp_read_init_mb(struct csio_lnode *ln, struct csio_mb *mbp, + uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct fw_fcoe_vnp_cmd *cmdp = + (struct fw_fcoe_vnp_cmd *)(mbp->mb); + + CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1); + cmdp->op_to_fcfi = htonl(FW_CMD_OP_V(FW_FCOE_VNP_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F | + FW_FCOE_VNP_CMD_FCFI(fcfi)); + cmdp->alloc_to_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi)); +} + +/* + * csio_fcoe_vnp_free_init_mb - Initializes the mailbox for freeing an + * alloacted VNP in the firmware (FW_FCOE_VNP_CMD). + * + * @ln: The Lnode structure. + * @mbp: Mailbox structure to initialize. + * @mb_tmo: Mailbox time-out period (in ms). + * @fcfi: FCF flow id + * @vnpi: VNP flow id + * @cbfn: The call-back function. + * Return: None + */ +void +csio_fcoe_vnp_free_init_mb(struct csio_lnode *ln, struct csio_mb *mbp, + uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct fw_fcoe_vnp_cmd *cmdp = + (struct fw_fcoe_vnp_cmd *)(mbp->mb); + + CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1); + + cmdp->op_to_fcfi = htonl(FW_CMD_OP_V(FW_FCOE_VNP_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | + FW_FCOE_VNP_CMD_FCFI(fcfi)); + cmdp->alloc_to_len16 = htonl(FW_FCOE_VNP_CMD_FREE | + FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi)); +} + +/* + * csio_fcoe_read_fcf_init_mb - Initializes the mailbox to read the + * FCF records. + * + * @ln: The Lnode structure + * @mbp: Mailbox structure to initialize + * @mb_tmo: Mailbox time-out period (in ms). + * @fcf_params: FC-Forwarder parameters. + * @cbfn: The call-back function + * + * + */ +void +csio_fcoe_read_fcf_init_mb(struct csio_lnode *ln, struct csio_mb *mbp, + uint32_t mb_tmo, uint32_t portid, uint32_t fcfi, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct fw_fcoe_fcf_cmd *cmdp = + (struct fw_fcoe_fcf_cmd *)(mbp->mb); + + CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1); + + cmdp->op_to_fcfi = htonl(FW_CMD_OP_V(FW_FCOE_FCF_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F | + FW_FCOE_FCF_CMD_FCFI(fcfi)); + cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); + +} /* csio_fcoe_read_fcf_init_mb */ + +void +csio_fcoe_read_portparams_init_mb(struct csio_hw *hw, struct csio_mb *mbp, + uint32_t mb_tmo, + struct fw_fcoe_port_cmd_params *portparams, + void (*cbfn)(struct csio_hw *, + struct csio_mb *)) +{ + struct fw_fcoe_stats_cmd *cmdp = (struct fw_fcoe_stats_cmd *)(mbp->mb); + + CSIO_INIT_MBP(mbp, cmdp, mb_tmo, hw, cbfn, 1); + mbp->mb_size = 64; + + cmdp->op_to_flowid = htonl(FW_CMD_OP_V(FW_FCOE_STATS_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F); + cmdp->free_to_len16 = htonl(FW_CMD_LEN16_V(CSIO_MAX_MB_SIZE/16)); + + cmdp->u.ctl.nstats_port = FW_FCOE_STATS_CMD_NSTATS(portparams->nstats) | + FW_FCOE_STATS_CMD_PORT(portparams->portid); + + cmdp->u.ctl.port_valid_ix = FW_FCOE_STATS_CMD_IX(portparams->idx) | + FW_FCOE_STATS_CMD_PORT_VALID; + +} /* csio_fcoe_read_portparams_init_mb */ + +void +csio_mb_process_portparams_rsp(struct csio_hw *hw, + struct csio_mb *mbp, + enum fw_retval *retval, + struct fw_fcoe_port_cmd_params *portparams, + struct fw_fcoe_port_stats *portstats) +{ + struct fw_fcoe_stats_cmd *rsp = (struct fw_fcoe_stats_cmd *)(mbp->mb); + struct fw_fcoe_port_stats stats; + uint8_t *src; + uint8_t *dst; + + *retval = FW_CMD_RETVAL_G(ntohl(rsp->free_to_len16)); + + memset(&stats, 0, sizeof(struct fw_fcoe_port_stats)); + + if (*retval == FW_SUCCESS) { + dst = (uint8_t *)(&stats) + ((portparams->idx - 1) * 8); + src = (uint8_t *)rsp + (CSIO_STATS_OFFSET * 8); + memcpy(dst, src, (portparams->nstats * 8)); + if (portparams->idx == 1) { + /* Get the first 6 flits from the Mailbox */ + portstats->tx_bcast_bytes = stats.tx_bcast_bytes; + portstats->tx_bcast_frames = stats.tx_bcast_frames; + portstats->tx_mcast_bytes = stats.tx_mcast_bytes; + portstats->tx_mcast_frames = stats.tx_mcast_frames; + portstats->tx_ucast_bytes = stats.tx_ucast_bytes; + portstats->tx_ucast_frames = stats.tx_ucast_frames; + } + if (portparams->idx == 7) { + /* Get the second 6 flits from the Mailbox */ + portstats->tx_drop_frames = stats.tx_drop_frames; + portstats->tx_offload_bytes = stats.tx_offload_bytes; + portstats->tx_offload_frames = stats.tx_offload_frames; +#if 0 + portstats->rx_pf_bytes = stats.rx_pf_bytes; + portstats->rx_pf_frames = stats.rx_pf_frames; +#endif + portstats->rx_bcast_bytes = stats.rx_bcast_bytes; + portstats->rx_bcast_frames = stats.rx_bcast_frames; + portstats->rx_mcast_bytes = stats.rx_mcast_bytes; + } + if (portparams->idx == 13) { + /* Get the last 4 flits from the Mailbox */ + portstats->rx_mcast_frames = stats.rx_mcast_frames; + portstats->rx_ucast_bytes = stats.rx_ucast_bytes; + portstats->rx_ucast_frames = stats.rx_ucast_frames; + portstats->rx_err_frames = stats.rx_err_frames; + } + } +} + +/* Entry points/APIs for MB module */ +/* + * csio_mb_intr_enable - Enable Interrupts from mailboxes. + * @hw: The HW structure + * + * Enables CIM interrupt bit in appropriate INT_ENABLE registers. + */ +void +csio_mb_intr_enable(struct csio_hw *hw) +{ + csio_wr_reg32(hw, MBMSGRDYINTEN_F, MYPF_REG(CIM_PF_HOST_INT_ENABLE_A)); + csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE_A)); +} + +/* + * csio_mb_intr_disable - Disable Interrupts from mailboxes. + * @hw: The HW structure + * + * Disable bit in HostInterruptEnable CIM register. + */ +void +csio_mb_intr_disable(struct csio_hw *hw) +{ + csio_wr_reg32(hw, MBMSGRDYINTEN_V(0), + MYPF_REG(CIM_PF_HOST_INT_ENABLE_A)); + csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE_A)); +} + +static void +csio_mb_dump_fw_dbg(struct csio_hw *hw, __be64 *cmd) +{ + struct fw_debug_cmd *dbg = (struct fw_debug_cmd *)cmd; + + if ((FW_DEBUG_CMD_TYPE_G(ntohl(dbg->op_type))) == 1) { + csio_info(hw, "FW print message:\n"); + csio_info(hw, "\tdebug->dprtstridx = %d\n", + ntohs(dbg->u.prt.dprtstridx)); + csio_info(hw, "\tdebug->dprtstrparam0 = 0x%x\n", + ntohl(dbg->u.prt.dprtstrparam0)); + csio_info(hw, "\tdebug->dprtstrparam1 = 0x%x\n", + ntohl(dbg->u.prt.dprtstrparam1)); + csio_info(hw, "\tdebug->dprtstrparam2 = 0x%x\n", + ntohl(dbg->u.prt.dprtstrparam2)); + csio_info(hw, "\tdebug->dprtstrparam3 = 0x%x\n", + ntohl(dbg->u.prt.dprtstrparam3)); + } else { + /* This is a FW assertion */ + csio_fatal(hw, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n", + dbg->u.assert.filename_0_7, + ntohl(dbg->u.assert.line), + ntohl(dbg->u.assert.x), + ntohl(dbg->u.assert.y)); + } +} + +static void +csio_mb_debug_cmd_handler(struct csio_hw *hw) +{ + int i; + __be64 cmd[CSIO_MB_MAX_REGS]; + uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A); + uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A); + int size = sizeof(struct fw_debug_cmd); + + /* Copy mailbox data */ + for (i = 0; i < size; i += 8) + cmd[i / 8] = cpu_to_be64(csio_rd_reg64(hw, data_reg + i)); + + csio_mb_dump_fw_dbg(hw, cmd); + + /* Notify FW of mailbox by setting owner as UP */ + csio_wr_reg32(hw, MBMSGVALID_F | MBINTREQ_F | + MBOWNER_V(CSIO_MBOWNER_FW), ctl_reg); + + csio_rd_reg32(hw, ctl_reg); + wmb(); +} + +/* + * csio_mb_issue - generic routine for issuing Mailbox commands. + * @hw: The HW structure + * @mbp: Mailbox command to issue + * + * Caller should hold hw lock across this call. + */ +int +csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp) +{ + uint32_t owner, ctl; + int i; + uint32_t ii; + __be64 *cmd = mbp->mb; + __be64 hdr; + struct csio_mbm *mbm = &hw->mbm; + uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A); + uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A); + int size = mbp->mb_size; + int rv = -EINVAL; + struct fw_cmd_hdr *fw_hdr; + + /* Determine mode */ + if (mbp->mb_cbfn == NULL) { + /* Need to issue/get results in the same context */ + if (mbp->tmo < CSIO_MB_POLL_FREQ) { + csio_err(hw, "Invalid tmo: 0x%x\n", mbp->tmo); + goto error_out; + } + } else if (!csio_is_host_intr_enabled(hw) || + !csio_is_hw_intr_enabled(hw)) { + csio_err(hw, "Cannot issue mailbox in interrupt mode 0x%x\n", + *((uint8_t *)mbp->mb)); + goto error_out; + } + + if (mbm->mcurrent != NULL) { + /* Queue mbox cmd, if another mbox cmd is active */ + if (mbp->mb_cbfn == NULL) { + rv = -EBUSY; + csio_dbg(hw, "Couldn't own Mailbox %x op:0x%x\n", + hw->pfn, *((uint8_t *)mbp->mb)); + + goto error_out; + } else { + list_add_tail(&mbp->list, &mbm->req_q); + CSIO_INC_STATS(mbm, n_activeq); + + return 0; + } + } + + /* Now get ownership of mailbox */ + owner = MBOWNER_G(csio_rd_reg32(hw, ctl_reg)); + + if (!csio_mb_is_host_owner(owner)) { + + for (i = 0; (owner == CSIO_MBOWNER_NONE) && (i < 3); i++) + owner = MBOWNER_G(csio_rd_reg32(hw, ctl_reg)); + /* + * Mailbox unavailable. In immediate mode, fail the command. + * In other modes, enqueue the request. + */ + if (!csio_mb_is_host_owner(owner)) { + if (mbp->mb_cbfn == NULL) { + rv = owner ? -EBUSY : -ETIMEDOUT; + + csio_dbg(hw, + "Couldn't own Mailbox %x op:0x%x " + "owner:%x\n", + hw->pfn, *((uint8_t *)mbp->mb), owner); + goto error_out; + } else { + if (mbm->mcurrent == NULL) { + csio_err(hw, + "Couldn't own Mailbox %x " + "op:0x%x owner:%x\n", + hw->pfn, *((uint8_t *)mbp->mb), + owner); + csio_err(hw, + "No outstanding driver" + " mailbox as well\n"); + goto error_out; + } + } + } + } + + /* Mailbox is available, copy mailbox data into it */ + for (i = 0; i < size; i += 8) { + csio_wr_reg64(hw, be64_to_cpu(*cmd), data_reg + i); + cmd++; + } + + CSIO_DUMP_MB(hw, hw->pfn, data_reg); + + /* Start completion timers in non-immediate modes and notify FW */ + if (mbp->mb_cbfn != NULL) { + mbm->mcurrent = mbp; + mod_timer(&mbm->timer, jiffies + msecs_to_jiffies(mbp->tmo)); + csio_wr_reg32(hw, MBMSGVALID_F | MBINTREQ_F | + MBOWNER_V(CSIO_MBOWNER_FW), ctl_reg); + } else + csio_wr_reg32(hw, MBMSGVALID_F | MBOWNER_V(CSIO_MBOWNER_FW), + ctl_reg); + + /* Flush posted writes */ + csio_rd_reg32(hw, ctl_reg); + wmb(); + + CSIO_INC_STATS(mbm, n_req); + + if (mbp->mb_cbfn) + return 0; + + /* Poll for completion in immediate mode */ + cmd = mbp->mb; + + for (ii = 0; ii < mbp->tmo; ii += CSIO_MB_POLL_FREQ) { + mdelay(CSIO_MB_POLL_FREQ); + + /* Check for response */ + ctl = csio_rd_reg32(hw, ctl_reg); + if (csio_mb_is_host_owner(MBOWNER_G(ctl))) { + + if (!(ctl & MBMSGVALID_F)) { + csio_wr_reg32(hw, 0, ctl_reg); + continue; + } + + CSIO_DUMP_MB(hw, hw->pfn, data_reg); + + hdr = cpu_to_be64(csio_rd_reg64(hw, data_reg)); + fw_hdr = (struct fw_cmd_hdr *)&hdr; + + switch (FW_CMD_OP_G(ntohl(fw_hdr->hi))) { + case FW_DEBUG_CMD: + csio_mb_debug_cmd_handler(hw); + continue; + } + + /* Copy response */ + for (i = 0; i < size; i += 8) + *cmd++ = cpu_to_be64(csio_rd_reg64 + (hw, data_reg + i)); + csio_wr_reg32(hw, 0, ctl_reg); + + if (csio_mb_fw_retval(mbp) != FW_SUCCESS) + CSIO_INC_STATS(mbm, n_err); + + CSIO_INC_STATS(mbm, n_rsp); + return 0; + } + } + + CSIO_INC_STATS(mbm, n_tmo); + + csio_err(hw, "Mailbox %x op:0x%x timed out!\n", + hw->pfn, *((uint8_t *)cmd)); + + return -ETIMEDOUT; + +error_out: + CSIO_INC_STATS(mbm, n_err); + return rv; +} + +/* + * csio_mb_completions - Completion handler for Mailbox commands + * @hw: The HW structure + * @cbfn_q: Completion queue. + * + */ +void +csio_mb_completions(struct csio_hw *hw, struct list_head *cbfn_q) +{ + struct csio_mb *mbp; + struct csio_mbm *mbm = &hw->mbm; + enum fw_retval rv; + + while (!list_empty(cbfn_q)) { + mbp = list_first_entry(cbfn_q, struct csio_mb, list); + list_del_init(&mbp->list); + + rv = csio_mb_fw_retval(mbp); + if ((rv != FW_SUCCESS) && (rv != FW_HOSTERROR)) + CSIO_INC_STATS(mbm, n_err); + else if (rv != FW_HOSTERROR) + CSIO_INC_STATS(mbm, n_rsp); + + if (mbp->mb_cbfn) + mbp->mb_cbfn(hw, mbp); + } +} + +static void +csio_mb_portmod_changed(struct csio_hw *hw, uint8_t port_id) +{ + static char *mod_str[] = { + NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM" + }; + + struct csio_pport *port = &hw->pport[port_id]; + + if (port->mod_type == FW_PORT_MOD_TYPE_NONE) + csio_info(hw, "Port:%d - port module unplugged\n", port_id); + else if (port->mod_type < ARRAY_SIZE(mod_str)) + csio_info(hw, "Port:%d - %s port module inserted\n", port_id, + mod_str[port->mod_type]); + else if (port->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) + csio_info(hw, + "Port:%d - unsupported optical port module " + "inserted\n", port_id); + else if (port->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) + csio_info(hw, + "Port:%d - unknown port module inserted, forcing " + "TWINAX\n", port_id); + else if (port->mod_type == FW_PORT_MOD_TYPE_ERROR) + csio_info(hw, "Port:%d - transceiver module error\n", port_id); + else + csio_info(hw, "Port:%d - unknown module type %d inserted\n", + port_id, port->mod_type); +} + +int +csio_mb_fwevt_handler(struct csio_hw *hw, __be64 *cmd) +{ + uint8_t opcode = *(uint8_t *)cmd; + struct fw_port_cmd *pcmd; + uint8_t port_id; + uint32_t link_status; + uint16_t action; + uint8_t mod_type; + fw_port_cap32_t linkattr; + + if (opcode == FW_PORT_CMD) { + pcmd = (struct fw_port_cmd *)cmd; + port_id = FW_PORT_CMD_PORTID_G( + ntohl(pcmd->op_to_portid)); + action = FW_PORT_CMD_ACTION_G( + ntohl(pcmd->action_to_len16)); + if (action != FW_PORT_ACTION_GET_PORT_INFO && + action != FW_PORT_ACTION_GET_PORT_INFO32) { + csio_err(hw, "Unhandled FW_PORT_CMD action: %u\n", + action); + return -EINVAL; + } + + if (action == FW_PORT_ACTION_GET_PORT_INFO) { + link_status = ntohl(pcmd->u.info.lstatus_to_modtype); + mod_type = FW_PORT_CMD_MODTYPE_G(link_status); + linkattr = lstatus_to_fwcap(link_status); + + hw->pport[port_id].link_status = + FW_PORT_CMD_LSTATUS_G(link_status); + } else { + link_status = + ntohl(pcmd->u.info32.lstatus32_to_cbllen32); + mod_type = FW_PORT_CMD_MODTYPE32_G(link_status); + linkattr = ntohl(pcmd->u.info32.linkattr32); + + hw->pport[port_id].link_status = + FW_PORT_CMD_LSTATUS32_G(link_status); + } + + hw->pport[port_id].link_speed = fwcap_to_fwspeed(linkattr); + + csio_info(hw, "Port:%x - LINK %s\n", port_id, + hw->pport[port_id].link_status ? "UP" : "DOWN"); + + if (mod_type != hw->pport[port_id].mod_type) { + hw->pport[port_id].mod_type = mod_type; + csio_mb_portmod_changed(hw, port_id); + } + } else if (opcode == FW_DEBUG_CMD) { + csio_mb_dump_fw_dbg(hw, cmd); + } else { + csio_dbg(hw, "Gen MB can't handle op:0x%x on evtq.\n", opcode); + return -EINVAL; + } + + return 0; +} + +/* + * csio_mb_isr_handler - Handle mailboxes related interrupts. + * @hw: The HW structure + * + * Called from the ISR to handle Mailbox related interrupts. + * HW Lock should be held across this call. + */ +int +csio_mb_isr_handler(struct csio_hw *hw) +{ + struct csio_mbm *mbm = &hw->mbm; + struct csio_mb *mbp = mbm->mcurrent; + __be64 *cmd; + uint32_t ctl, cim_cause, pl_cause; + int i; + uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A); + uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A); + int size; + __be64 hdr; + struct fw_cmd_hdr *fw_hdr; + + pl_cause = csio_rd_reg32(hw, MYPF_REG(PL_PF_INT_CAUSE_A)); + cim_cause = csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_CAUSE_A)); + + if (!(pl_cause & PFCIM_F) || !(cim_cause & MBMSGRDYINT_F)) { + CSIO_INC_STATS(hw, n_mbint_unexp); + return -EINVAL; + } + + /* + * The cause registers below HAVE to be cleared in the SAME + * order as below: The low level cause register followed by + * the upper level cause register. In other words, CIM-cause + * first followed by PL-Cause next. + */ + csio_wr_reg32(hw, MBMSGRDYINT_F, MYPF_REG(CIM_PF_HOST_INT_CAUSE_A)); + csio_wr_reg32(hw, PFCIM_F, MYPF_REG(PL_PF_INT_CAUSE_A)); + + ctl = csio_rd_reg32(hw, ctl_reg); + + if (csio_mb_is_host_owner(MBOWNER_G(ctl))) { + + CSIO_DUMP_MB(hw, hw->pfn, data_reg); + + if (!(ctl & MBMSGVALID_F)) { + csio_warn(hw, + "Stray mailbox interrupt recvd," + " mailbox data not valid\n"); + csio_wr_reg32(hw, 0, ctl_reg); + /* Flush */ + csio_rd_reg32(hw, ctl_reg); + return -EINVAL; + } + + hdr = cpu_to_be64(csio_rd_reg64(hw, data_reg)); + fw_hdr = (struct fw_cmd_hdr *)&hdr; + + switch (FW_CMD_OP_G(ntohl(fw_hdr->hi))) { + case FW_DEBUG_CMD: + csio_mb_debug_cmd_handler(hw); + return -EINVAL; +#if 0 + case FW_ERROR_CMD: + case FW_INITIALIZE_CMD: /* When we are not master */ +#endif + } + + CSIO_ASSERT(mbp != NULL); + + cmd = mbp->mb; + size = mbp->mb_size; + /* Get response */ + for (i = 0; i < size; i += 8) + *cmd++ = cpu_to_be64(csio_rd_reg64 + (hw, data_reg + i)); + + csio_wr_reg32(hw, 0, ctl_reg); + /* Flush */ + csio_rd_reg32(hw, ctl_reg); + + mbm->mcurrent = NULL; + + /* Add completion to tail of cbfn queue */ + list_add_tail(&mbp->list, &mbm->cbfn_q); + CSIO_INC_STATS(mbm, n_cbfnq); + + /* + * Enqueue event to EventQ. Events processing happens + * in Event worker thread context + */ + if (csio_enqueue_evt(hw, CSIO_EVT_MBX, mbp, sizeof(mbp))) + CSIO_INC_STATS(hw, n_evt_drop); + + return 0; + + } else { + /* + * We can get here if mailbox MSIX vector is shared, + * or in INTx case. Or a stray interrupt. + */ + csio_dbg(hw, "Host not owner, no mailbox interrupt\n"); + CSIO_INC_STATS(hw, n_int_stray); + return -EINVAL; + } +} + +/* + * csio_mb_tmo_handler - Timeout handler + * @hw: The HW structure + * + */ +struct csio_mb * +csio_mb_tmo_handler(struct csio_hw *hw) +{ + struct csio_mbm *mbm = &hw->mbm; + struct csio_mb *mbp = mbm->mcurrent; + struct fw_cmd_hdr *fw_hdr; + + /* + * Could be a race b/w the completion handler and the timer + * and the completion handler won that race. + */ + if (mbp == NULL) { + CSIO_DB_ASSERT(0); + return NULL; + } + + fw_hdr = (struct fw_cmd_hdr *)(mbp->mb); + + csio_dbg(hw, "Mailbox num:%x op:0x%x timed out\n", hw->pfn, + FW_CMD_OP_G(ntohl(fw_hdr->hi))); + + mbm->mcurrent = NULL; + CSIO_INC_STATS(mbm, n_tmo); + fw_hdr->lo = htonl(FW_CMD_RETVAL_V(FW_ETIMEDOUT)); + + return mbp; +} + +/* + * csio_mb_cancel_all - Cancel all waiting commands. + * @hw: The HW structure + * @cbfn_q: The callback queue. + * + * Caller should hold hw lock across this call. + */ +void +csio_mb_cancel_all(struct csio_hw *hw, struct list_head *cbfn_q) +{ + struct csio_mb *mbp; + struct csio_mbm *mbm = &hw->mbm; + struct fw_cmd_hdr *hdr; + struct list_head *tmp; + + if (mbm->mcurrent) { + mbp = mbm->mcurrent; + + /* Stop mailbox completion timer */ + del_timer_sync(&mbm->timer); + + /* Add completion to tail of cbfn queue */ + list_add_tail(&mbp->list, cbfn_q); + mbm->mcurrent = NULL; + } + + if (!list_empty(&mbm->req_q)) { + list_splice_tail_init(&mbm->req_q, cbfn_q); + mbm->stats.n_activeq = 0; + } + + if (!list_empty(&mbm->cbfn_q)) { + list_splice_tail_init(&mbm->cbfn_q, cbfn_q); + mbm->stats.n_cbfnq = 0; + } + + if (list_empty(cbfn_q)) + return; + + list_for_each(tmp, cbfn_q) { + mbp = (struct csio_mb *)tmp; + hdr = (struct fw_cmd_hdr *)(mbp->mb); + + csio_dbg(hw, "Cancelling pending mailbox num %x op:%x\n", + hw->pfn, FW_CMD_OP_G(ntohl(hdr->hi))); + + CSIO_INC_STATS(mbm, n_cancel); + hdr->lo = htonl(FW_CMD_RETVAL_V(FW_HOSTERROR)); + } +} + +/* + * csio_mbm_init - Initialize Mailbox module + * @mbm: Mailbox module + * @hw: The HW structure + * @timer: Timing function for interrupting mailboxes + * + * Initialize timer and the request/response queues. + */ +int +csio_mbm_init(struct csio_mbm *mbm, struct csio_hw *hw, + void (*timer_fn)(struct timer_list *)) +{ + mbm->hw = hw; + timer_setup(&mbm->timer, timer_fn, 0); + + INIT_LIST_HEAD(&mbm->req_q); + INIT_LIST_HEAD(&mbm->cbfn_q); + csio_set_mb_intr_idx(mbm, -1); + + return 0; +} + +/* + * csio_mbm_exit - Uninitialize mailbox module + * @mbm: Mailbox module + * + * Stop timer. + */ +void +csio_mbm_exit(struct csio_mbm *mbm) +{ + del_timer_sync(&mbm->timer); + + CSIO_DB_ASSERT(mbm->mcurrent == NULL); + CSIO_DB_ASSERT(list_empty(&mbm->req_q)); + CSIO_DB_ASSERT(list_empty(&mbm->cbfn_q)); +} diff --git a/drivers/scsi/csiostor/csio_mb.h b/drivers/scsi/csiostor/csio_mb.h new file mode 100644 index 000000000..b07e891c5 --- /dev/null +++ b/drivers/scsi/csiostor/csio_mb.h @@ -0,0 +1,263 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CSIO_MB_H__ +#define __CSIO_MB_H__ + +#include <linux/timer.h> +#include <linux/completion.h> + +#include "t4fw_api.h" +#include "t4fw_api_stor.h" +#include "csio_defs.h" + +#define CSIO_STATS_OFFSET (2) +#define CSIO_NUM_STATS_PER_MB (6) + +struct fw_fcoe_port_cmd_params { + uint8_t portid; + uint8_t idx; + uint8_t nstats; +}; + +#define CSIO_DUMP_MB(__hw, __num, __mb) \ + csio_dbg(__hw, "\t%llx %llx %llx %llx %llx %llx %llx %llx\n", \ + (unsigned long long)csio_rd_reg64(__hw, __mb), \ + (unsigned long long)csio_rd_reg64(__hw, __mb + 8), \ + (unsigned long long)csio_rd_reg64(__hw, __mb + 16), \ + (unsigned long long)csio_rd_reg64(__hw, __mb + 24), \ + (unsigned long long)csio_rd_reg64(__hw, __mb + 32), \ + (unsigned long long)csio_rd_reg64(__hw, __mb + 40), \ + (unsigned long long)csio_rd_reg64(__hw, __mb + 48), \ + (unsigned long long)csio_rd_reg64(__hw, __mb + 56)) + +#define CSIO_MB_MAX_REGS 8 +#define CSIO_MAX_MB_SIZE 64 +#define CSIO_MB_POLL_FREQ 5 /* 5 ms */ +#define CSIO_MB_DEFAULT_TMO FW_CMD_MAX_TIMEOUT + +/* Device master in HELLO command */ +enum csio_dev_master { CSIO_MASTER_CANT, CSIO_MASTER_MAY, CSIO_MASTER_MUST }; + +enum csio_mb_owner { CSIO_MBOWNER_NONE, CSIO_MBOWNER_FW, CSIO_MBOWNER_PL }; + +enum csio_dev_state { + CSIO_DEV_STATE_UNINIT, + CSIO_DEV_STATE_INIT, + CSIO_DEV_STATE_ERR +}; + +#define FW_PARAM_DEV(param) \ + (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \ + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param)) + +#define FW_PARAM_PFVF(param) \ + (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \ + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \ + FW_PARAMS_PARAM_Y_V(0) | \ + FW_PARAMS_PARAM_Z_V(0)) + +#define CSIO_INIT_MBP(__mbp, __cp, __tmo, __priv, __fn, __clear) \ +do { \ + if (__clear) \ + memset((__cp), 0, \ + CSIO_MB_MAX_REGS * sizeof(__be64)); \ + INIT_LIST_HEAD(&(__mbp)->list); \ + (__mbp)->tmo = (__tmo); \ + (__mbp)->priv = (void *)(__priv); \ + (__mbp)->mb_cbfn = (__fn); \ + (__mbp)->mb_size = sizeof(*(__cp)); \ +} while (0) + +struct csio_mbm_stats { + uint32_t n_req; /* number of mbox req */ + uint32_t n_rsp; /* number of mbox rsp */ + uint32_t n_activeq; /* number of mbox req active Q */ + uint32_t n_cbfnq; /* number of mbox req cbfn Q */ + uint32_t n_tmo; /* number of mbox timeout */ + uint32_t n_cancel; /* number of mbox cancel */ + uint32_t n_err; /* number of mbox error */ +}; + +/* Driver version of Mailbox */ +struct csio_mb { + struct list_head list; /* for req/resp */ + /* queue in driver */ + __be64 mb[CSIO_MB_MAX_REGS]; /* MB in HW format */ + int mb_size; /* Size of this + * mailbox. + */ + uint32_t tmo; /* Timeout */ + struct completion cmplobj; /* MB Completion + * object + */ + void (*mb_cbfn) (struct csio_hw *, struct csio_mb *); + /* Callback fn */ + void *priv; /* Owner private ptr */ +}; + +struct csio_mbm { + uint32_t a_mbox; /* Async mbox num */ + uint32_t intr_idx; /* Interrupt index */ + struct timer_list timer; /* Mbox timer */ + struct csio_hw *hw; /* Hardware pointer */ + struct list_head req_q; /* Mbox request queue */ + struct list_head cbfn_q; /* Mbox completion q */ + struct csio_mb *mcurrent; /* Current mailbox */ + uint32_t req_q_cnt; /* Outstanding mbox + * cmds + */ + struct csio_mbm_stats stats; /* Statistics */ +}; + +#define csio_set_mb_intr_idx(_m, _i) ((_m)->intr_idx = (_i)) +#define csio_get_mb_intr_idx(_m) ((_m)->intr_idx) + +struct csio_iq_params; +struct csio_eq_params; + +enum fw_retval csio_mb_fw_retval(struct csio_mb *); + +/* MB helpers */ +void csio_mb_hello(struct csio_hw *, struct csio_mb *, uint32_t, + uint32_t, uint32_t, enum csio_dev_master, + void (*)(struct csio_hw *, struct csio_mb *)); + +void csio_mb_process_hello_rsp(struct csio_hw *, struct csio_mb *, + enum fw_retval *, enum csio_dev_state *, + uint8_t *); + +void csio_mb_bye(struct csio_hw *, struct csio_mb *, uint32_t, + void (*)(struct csio_hw *, struct csio_mb *)); + +void csio_mb_reset(struct csio_hw *, struct csio_mb *, uint32_t, int, int, + void (*)(struct csio_hw *, struct csio_mb *)); + +void csio_mb_params(struct csio_hw *, struct csio_mb *, uint32_t, unsigned int, + unsigned int, unsigned int, const u32 *, u32 *, bool, + void (*)(struct csio_hw *, struct csio_mb *)); + +void csio_mb_process_read_params_rsp(struct csio_hw *, struct csio_mb *, + enum fw_retval *, unsigned int , u32 *); + +void csio_mb_ldst(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, + int reg); + +void csio_mb_caps_config(struct csio_hw *, struct csio_mb *, uint32_t, + bool, bool, bool, bool, + void (*)(struct csio_hw *, struct csio_mb *)); + +void csio_mb_port(struct csio_hw *, struct csio_mb *, uint32_t, + uint8_t, bool, uint32_t, uint16_t, + void (*) (struct csio_hw *, struct csio_mb *)); + +void csio_mb_process_read_port_rsp(struct csio_hw *, struct csio_mb *, + enum fw_retval *, uint16_t, + uint32_t *, uint32_t *); + +void csio_mb_initialize(struct csio_hw *, struct csio_mb *, uint32_t, + void (*)(struct csio_hw *, struct csio_mb *)); + +void csio_mb_iq_alloc_write(struct csio_hw *, struct csio_mb *, void *, + uint32_t, struct csio_iq_params *, + void (*) (struct csio_hw *, struct csio_mb *)); + +void csio_mb_iq_alloc_write_rsp(struct csio_hw *, struct csio_mb *, + enum fw_retval *, struct csio_iq_params *); + +void csio_mb_iq_free(struct csio_hw *, struct csio_mb *, void *, + uint32_t, struct csio_iq_params *, + void (*) (struct csio_hw *, struct csio_mb *)); + +void csio_mb_eq_ofld_alloc_write(struct csio_hw *, struct csio_mb *, void *, + uint32_t, struct csio_eq_params *, + void (*) (struct csio_hw *, struct csio_mb *)); + +void csio_mb_eq_ofld_alloc_write_rsp(struct csio_hw *, struct csio_mb *, + enum fw_retval *, struct csio_eq_params *); + +void csio_mb_eq_ofld_free(struct csio_hw *, struct csio_mb *, void *, + uint32_t , struct csio_eq_params *, + void (*) (struct csio_hw *, struct csio_mb *)); + +void csio_fcoe_read_res_info_init_mb(struct csio_hw *, struct csio_mb *, + uint32_t, + void (*) (struct csio_hw *, struct csio_mb *)); + +void csio_write_fcoe_link_cond_init_mb(struct csio_lnode *, struct csio_mb *, + uint32_t, uint8_t, uint32_t, uint8_t, bool, uint32_t, + void (*) (struct csio_hw *, struct csio_mb *)); + +void csio_fcoe_vnp_alloc_init_mb(struct csio_lnode *, struct csio_mb *, + uint32_t, uint32_t , uint32_t , uint16_t, + uint8_t [8], uint8_t [8], + void (*) (struct csio_hw *, struct csio_mb *)); + +void csio_fcoe_vnp_read_init_mb(struct csio_lnode *, struct csio_mb *, + uint32_t, uint32_t , uint32_t , + void (*) (struct csio_hw *, struct csio_mb *)); + +void csio_fcoe_vnp_free_init_mb(struct csio_lnode *, struct csio_mb *, + uint32_t , uint32_t, uint32_t , + void (*) (struct csio_hw *, struct csio_mb *)); + +void csio_fcoe_read_fcf_init_mb(struct csio_lnode *, struct csio_mb *, + uint32_t, uint32_t, uint32_t, + void (*cbfn) (struct csio_hw *, struct csio_mb *)); + +void csio_fcoe_read_portparams_init_mb(struct csio_hw *hw, + struct csio_mb *mbp, uint32_t mb_tmo, + struct fw_fcoe_port_cmd_params *portparams, + void (*cbfn)(struct csio_hw *, struct csio_mb *)); + +void csio_mb_process_portparams_rsp(struct csio_hw *hw, struct csio_mb *mbp, + enum fw_retval *retval, + struct fw_fcoe_port_cmd_params *portparams, + struct fw_fcoe_port_stats *portstats); + +/* MB module functions */ +int csio_mbm_init(struct csio_mbm *, struct csio_hw *, + void (*)(struct timer_list *)); +void csio_mbm_exit(struct csio_mbm *); +void csio_mb_intr_enable(struct csio_hw *); +void csio_mb_intr_disable(struct csio_hw *); + +int csio_mb_issue(struct csio_hw *, struct csio_mb *); +void csio_mb_completions(struct csio_hw *, struct list_head *); +int csio_mb_fwevt_handler(struct csio_hw *, __be64 *); +int csio_mb_isr_handler(struct csio_hw *); +struct csio_mb *csio_mb_tmo_handler(struct csio_hw *); +void csio_mb_cancel_all(struct csio_hw *, struct list_head *); + +#endif /* ifndef __CSIO_MB_H__ */ diff --git a/drivers/scsi/csiostor/csio_rnode.c b/drivers/scsi/csiostor/csio_rnode.c new file mode 100644 index 000000000..713e13adf --- /dev/null +++ b/drivers/scsi/csiostor/csio_rnode.c @@ -0,0 +1,921 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/string.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_transport_fc.h> +#include <scsi/fc/fc_els.h> +#include <scsi/fc/fc_fs.h> + +#include "csio_hw.h" +#include "csio_lnode.h" +#include "csio_rnode.h" + +static int csio_rnode_init(struct csio_rnode *, struct csio_lnode *); +static void csio_rnode_exit(struct csio_rnode *); + +/* Static machine forward declarations */ +static void csio_rns_uninit(struct csio_rnode *, enum csio_rn_ev); +static void csio_rns_ready(struct csio_rnode *, enum csio_rn_ev); +static void csio_rns_offline(struct csio_rnode *, enum csio_rn_ev); +static void csio_rns_disappeared(struct csio_rnode *, enum csio_rn_ev); + +/* RNF event mapping */ +static enum csio_rn_ev fwevt_to_rnevt[] = { + CSIO_RNFE_NONE, /* None */ + CSIO_RNFE_LOGGED_IN, /* PLOGI_ACC_RCVD */ + CSIO_RNFE_NONE, /* PLOGI_RJT_RCVD */ + CSIO_RNFE_PLOGI_RECV, /* PLOGI_RCVD */ + CSIO_RNFE_LOGO_RECV, /* PLOGO_RCVD */ + CSIO_RNFE_PRLI_DONE, /* PRLI_ACC_RCVD */ + CSIO_RNFE_NONE, /* PRLI_RJT_RCVD */ + CSIO_RNFE_PRLI_RECV, /* PRLI_RCVD */ + CSIO_RNFE_PRLO_RECV, /* PRLO_RCVD */ + CSIO_RNFE_NONE, /* NPORT_ID_CHGD */ + CSIO_RNFE_LOGO_RECV, /* FLOGO_RCVD */ + CSIO_RNFE_NONE, /* CLR_VIRT_LNK_RCVD */ + CSIO_RNFE_LOGGED_IN, /* FLOGI_ACC_RCVD */ + CSIO_RNFE_NONE, /* FLOGI_RJT_RCVD */ + CSIO_RNFE_LOGGED_IN, /* FDISC_ACC_RCVD */ + CSIO_RNFE_NONE, /* FDISC_RJT_RCVD */ + CSIO_RNFE_NONE, /* FLOGI_TMO_MAX_RETRY */ + CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_ACC */ + CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_RJT */ + CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_CNFLT */ + CSIO_RNFE_NONE, /* PRLI_TMO */ + CSIO_RNFE_NONE, /* ADISC_TMO */ + CSIO_RNFE_NAME_MISSING, /* RSCN_DEV_LOST */ + CSIO_RNFE_NONE, /* SCR_ACC_RCVD */ + CSIO_RNFE_NONE, /* ADISC_RJT_RCVD */ + CSIO_RNFE_NONE, /* LOGO_SNT */ + CSIO_RNFE_LOGO_RECV, /* PROTO_ERR_IMPL_LOGO */ +}; + +#define CSIO_FWE_TO_RNFE(_evt) ((_evt > PROTO_ERR_IMPL_LOGO) ? \ + CSIO_RNFE_NONE : \ + fwevt_to_rnevt[_evt]) +int +csio_is_rnode_ready(struct csio_rnode *rn) +{ + return csio_match_state(rn, csio_rns_ready); +} + +static int +csio_is_rnode_uninit(struct csio_rnode *rn) +{ + return csio_match_state(rn, csio_rns_uninit); +} + +static int +csio_is_rnode_wka(uint8_t rport_type) +{ + if ((rport_type == FLOGI_VFPORT) || + (rport_type == FDISC_VFPORT) || + (rport_type == NS_VNPORT) || + (rport_type == FDMI_VNPORT)) + return 1; + + return 0; +} + +/* + * csio_rn_lookup - Finds the rnode with the given flowid + * @ln - lnode + * @flowid - flowid. + * + * Does the rnode lookup on the given lnode and flowid.If no matching entry + * found, NULL is returned. + */ +static struct csio_rnode * +csio_rn_lookup(struct csio_lnode *ln, uint32_t flowid) +{ + struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead; + struct list_head *tmp; + struct csio_rnode *rn; + + list_for_each(tmp, &rnhead->sm.sm_list) { + rn = (struct csio_rnode *) tmp; + if (rn->flowid == flowid) + return rn; + } + + return NULL; +} + +/* + * csio_rn_lookup_wwpn - Finds the rnode with the given wwpn + * @ln: lnode + * @wwpn: wwpn + * + * Does the rnode lookup on the given lnode and wwpn. If no matching entry + * found, NULL is returned. + */ +static struct csio_rnode * +csio_rn_lookup_wwpn(struct csio_lnode *ln, uint8_t *wwpn) +{ + struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead; + struct list_head *tmp; + struct csio_rnode *rn; + + list_for_each(tmp, &rnhead->sm.sm_list) { + rn = (struct csio_rnode *) tmp; + if (!memcmp(csio_rn_wwpn(rn), wwpn, 8)) + return rn; + } + + return NULL; +} + +/** + * csio_rnode_lookup_portid - Finds the rnode with the given portid + * @ln: lnode + * @portid: port id + * + * Lookup the rnode list for a given portid. If no matching entry + * found, NULL is returned. + */ +struct csio_rnode * +csio_rnode_lookup_portid(struct csio_lnode *ln, uint32_t portid) +{ + struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead; + struct list_head *tmp; + struct csio_rnode *rn; + + list_for_each(tmp, &rnhead->sm.sm_list) { + rn = (struct csio_rnode *) tmp; + if (rn->nport_id == portid) + return rn; + } + + return NULL; +} + +static int +csio_rn_dup_flowid(struct csio_lnode *ln, uint32_t rdev_flowid, + uint32_t *vnp_flowid) +{ + struct csio_rnode *rnhead; + struct list_head *tmp, *tmp1; + struct csio_rnode *rn; + struct csio_lnode *ln_tmp; + struct csio_hw *hw = csio_lnode_to_hw(ln); + + list_for_each(tmp1, &hw->sln_head) { + ln_tmp = (struct csio_lnode *) tmp1; + if (ln_tmp == ln) + continue; + + rnhead = (struct csio_rnode *)&ln_tmp->rnhead; + list_for_each(tmp, &rnhead->sm.sm_list) { + + rn = (struct csio_rnode *) tmp; + if (csio_is_rnode_ready(rn)) { + if (rn->flowid == rdev_flowid) { + *vnp_flowid = csio_ln_flowid(ln_tmp); + return 1; + } + } + } + } + + return 0; +} + +static struct csio_rnode * +csio_alloc_rnode(struct csio_lnode *ln) +{ + struct csio_hw *hw = csio_lnode_to_hw(ln); + + struct csio_rnode *rn = mempool_alloc(hw->rnode_mempool, GFP_ATOMIC); + if (!rn) + goto err; + + memset(rn, 0, sizeof(struct csio_rnode)); + if (csio_rnode_init(rn, ln)) + goto err_free; + + CSIO_INC_STATS(ln, n_rnode_alloc); + + return rn; + +err_free: + mempool_free(rn, hw->rnode_mempool); +err: + CSIO_INC_STATS(ln, n_rnode_nomem); + return NULL; +} + +static void +csio_free_rnode(struct csio_rnode *rn) +{ + struct csio_hw *hw = csio_lnode_to_hw(csio_rnode_to_lnode(rn)); + + csio_rnode_exit(rn); + CSIO_INC_STATS(rn->lnp, n_rnode_free); + mempool_free(rn, hw->rnode_mempool); +} + +/* + * csio_get_rnode - Gets rnode with the given flowid + * @ln - lnode + * @flowid - flow id. + * + * Does the rnode lookup on the given lnode and flowid. If no matching + * rnode found, then new rnode with given npid is allocated and returned. + */ +static struct csio_rnode * +csio_get_rnode(struct csio_lnode *ln, uint32_t flowid) +{ + struct csio_rnode *rn; + + rn = csio_rn_lookup(ln, flowid); + if (!rn) { + rn = csio_alloc_rnode(ln); + if (!rn) + return NULL; + + rn->flowid = flowid; + } + + return rn; +} + +/* + * csio_put_rnode - Frees the given rnode + * @ln - lnode + * @flowid - flow id. + * + * Does the rnode lookup on the given lnode and flowid. If no matching + * rnode found, then new rnode with given npid is allocated and returned. + */ +void +csio_put_rnode(struct csio_lnode *ln, struct csio_rnode *rn) +{ + CSIO_DB_ASSERT(csio_is_rnode_uninit(rn) != 0); + csio_free_rnode(rn); +} + +/* + * csio_confirm_rnode - confirms rnode based on wwpn. + * @ln: lnode + * @rdev_flowid: remote device flowid + * @rdevp: remote device params + * This routines searches other rnode in list having same wwpn of new rnode. + * If there is a match, then matched rnode is returned and otherwise new rnode + * is returned. + * returns rnode. + */ +struct csio_rnode * +csio_confirm_rnode(struct csio_lnode *ln, uint32_t rdev_flowid, + struct fcoe_rdev_entry *rdevp) +{ + uint8_t rport_type; + struct csio_rnode *rn, *match_rn; + uint32_t vnp_flowid = 0; + __be32 *port_id; + + port_id = (__be32 *)&rdevp->r_id[0]; + rport_type = + FW_RDEV_WR_RPORT_TYPE_GET(rdevp->rd_xfer_rdy_to_rport_type); + + /* Drop rdev event for cntrl port */ + if (rport_type == FAB_CTLR_VNPORT) { + csio_ln_dbg(ln, + "Unhandled rport_type:%d recv in rdev evt " + "ssni:x%x\n", rport_type, rdev_flowid); + return NULL; + } + + /* Lookup on flowid */ + rn = csio_rn_lookup(ln, rdev_flowid); + if (!rn) { + + /* Drop events with duplicate flowid */ + if (csio_rn_dup_flowid(ln, rdev_flowid, &vnp_flowid)) { + csio_ln_warn(ln, + "ssni:%x already active on vnpi:%x", + rdev_flowid, vnp_flowid); + return NULL; + } + + /* Lookup on wwpn for NPORTs */ + rn = csio_rn_lookup_wwpn(ln, rdevp->wwpn); + if (!rn) + goto alloc_rnode; + + } else { + /* Lookup well-known ports with nport id */ + if (csio_is_rnode_wka(rport_type)) { + match_rn = csio_rnode_lookup_portid(ln, + ((ntohl(*port_id) >> 8) & CSIO_DID_MASK)); + if (match_rn == NULL) { + csio_rn_flowid(rn) = CSIO_INVALID_IDX; + goto alloc_rnode; + } + + /* + * Now compare the wwpn to confirm that + * same port relogged in. If so update the matched rn. + * Else, go ahead and alloc a new rnode. + */ + if (!memcmp(csio_rn_wwpn(match_rn), rdevp->wwpn, 8)) { + if (rn == match_rn) + goto found_rnode; + csio_ln_dbg(ln, + "nport_id:x%x and wwpn:%llx" + " match for ssni:x%x\n", + rn->nport_id, + wwn_to_u64(rdevp->wwpn), + rdev_flowid); + if (csio_is_rnode_ready(rn)) { + csio_ln_warn(ln, + "rnode is already" + "active ssni:x%x\n", + rdev_flowid); + CSIO_ASSERT(0); + } + csio_rn_flowid(rn) = CSIO_INVALID_IDX; + rn = match_rn; + + /* Update rn */ + goto found_rnode; + } + csio_rn_flowid(rn) = CSIO_INVALID_IDX; + goto alloc_rnode; + } + + /* wwpn match */ + if (!memcmp(csio_rn_wwpn(rn), rdevp->wwpn, 8)) + goto found_rnode; + + /* Search for rnode that have same wwpn */ + match_rn = csio_rn_lookup_wwpn(ln, rdevp->wwpn); + if (match_rn != NULL) { + csio_ln_dbg(ln, + "ssni:x%x changed for rport name(wwpn):%llx " + "did:x%x\n", rdev_flowid, + wwn_to_u64(rdevp->wwpn), + match_rn->nport_id); + csio_rn_flowid(rn) = CSIO_INVALID_IDX; + rn = match_rn; + } else { + csio_ln_dbg(ln, + "rnode wwpn mismatch found ssni:x%x " + "name(wwpn):%llx\n", + rdev_flowid, + wwn_to_u64(csio_rn_wwpn(rn))); + if (csio_is_rnode_ready(rn)) { + csio_ln_warn(ln, + "rnode is already active " + "wwpn:%llx ssni:x%x\n", + wwn_to_u64(csio_rn_wwpn(rn)), + rdev_flowid); + CSIO_ASSERT(0); + } + csio_rn_flowid(rn) = CSIO_INVALID_IDX; + goto alloc_rnode; + } + } + +found_rnode: + csio_ln_dbg(ln, "found rnode:%p ssni:x%x name(wwpn):%llx\n", + rn, rdev_flowid, wwn_to_u64(rdevp->wwpn)); + + /* Update flowid */ + csio_rn_flowid(rn) = rdev_flowid; + + /* update rdev entry */ + rn->rdev_entry = rdevp; + CSIO_INC_STATS(ln, n_rnode_match); + return rn; + +alloc_rnode: + rn = csio_get_rnode(ln, rdev_flowid); + if (!rn) + return NULL; + + csio_ln_dbg(ln, "alloc rnode:%p ssni:x%x name(wwpn):%llx\n", + rn, rdev_flowid, wwn_to_u64(rdevp->wwpn)); + + /* update rdev entry */ + rn->rdev_entry = rdevp; + return rn; +} + +/* + * csio_rn_verify_rparams - verify rparams. + * @ln: lnode + * @rn: rnode + * @rdevp: remote device params + * returns success if rparams are verified. + */ +static int +csio_rn_verify_rparams(struct csio_lnode *ln, struct csio_rnode *rn, + struct fcoe_rdev_entry *rdevp) +{ + uint8_t null[8]; + uint8_t rport_type; + uint8_t fc_class; + __be32 *did; + + did = (__be32 *) &rdevp->r_id[0]; + rport_type = + FW_RDEV_WR_RPORT_TYPE_GET(rdevp->rd_xfer_rdy_to_rport_type); + switch (rport_type) { + case FLOGI_VFPORT: + rn->role = CSIO_RNFR_FABRIC; + if (((ntohl(*did) >> 8) & CSIO_DID_MASK) != FC_FID_FLOGI) { + csio_ln_err(ln, "ssni:x%x invalid fabric portid\n", + csio_rn_flowid(rn)); + return -EINVAL; + } + /* NPIV support */ + if (FW_RDEV_WR_NPIV_GET(rdevp->vft_to_qos)) + ln->flags |= CSIO_LNF_NPIVSUPP; + + break; + + case NS_VNPORT: + rn->role = CSIO_RNFR_NS; + if (((ntohl(*did) >> 8) & CSIO_DID_MASK) != FC_FID_DIR_SERV) { + csio_ln_err(ln, "ssni:x%x invalid fabric portid\n", + csio_rn_flowid(rn)); + return -EINVAL; + } + break; + + case REG_FC4_VNPORT: + case REG_VNPORT: + rn->role = CSIO_RNFR_NPORT; + if (rdevp->event_cause == PRLI_ACC_RCVD || + rdevp->event_cause == PRLI_RCVD) { + if (FW_RDEV_WR_TASK_RETRY_ID_GET( + rdevp->enh_disc_to_tgt)) + rn->fcp_flags |= FCP_SPPF_OVLY_ALLOW; + + if (FW_RDEV_WR_RETRY_GET(rdevp->enh_disc_to_tgt)) + rn->fcp_flags |= FCP_SPPF_RETRY; + + if (FW_RDEV_WR_CONF_CMPL_GET(rdevp->enh_disc_to_tgt)) + rn->fcp_flags |= FCP_SPPF_CONF_COMPL; + + if (FW_RDEV_WR_TGT_GET(rdevp->enh_disc_to_tgt)) + rn->role |= CSIO_RNFR_TARGET; + + if (FW_RDEV_WR_INI_GET(rdevp->enh_disc_to_tgt)) + rn->role |= CSIO_RNFR_INITIATOR; + } + + break; + + case FDMI_VNPORT: + case FAB_CTLR_VNPORT: + rn->role = 0; + break; + + default: + csio_ln_err(ln, "ssni:x%x invalid rport type recv x%x\n", + csio_rn_flowid(rn), rport_type); + return -EINVAL; + } + + /* validate wwpn/wwnn for Name server/remote port */ + if (rport_type == REG_VNPORT || rport_type == NS_VNPORT) { + memset(null, 0, 8); + if (!memcmp(rdevp->wwnn, null, 8)) { + csio_ln_err(ln, + "ssni:x%x invalid wwnn received from" + " rport did:x%x\n", + csio_rn_flowid(rn), + (ntohl(*did) & CSIO_DID_MASK)); + return -EINVAL; + } + + if (!memcmp(rdevp->wwpn, null, 8)) { + csio_ln_err(ln, + "ssni:x%x invalid wwpn received from" + " rport did:x%x\n", + csio_rn_flowid(rn), + (ntohl(*did) & CSIO_DID_MASK)); + return -EINVAL; + } + + } + + /* Copy wwnn, wwpn and nport id */ + rn->nport_id = (ntohl(*did) >> 8) & CSIO_DID_MASK; + memcpy(csio_rn_wwnn(rn), rdevp->wwnn, 8); + memcpy(csio_rn_wwpn(rn), rdevp->wwpn, 8); + rn->rn_sparm.csp.sp_bb_data = rdevp->rcv_fr_sz; + fc_class = FW_RDEV_WR_CLASS_GET(rdevp->vft_to_qos); + rn->rn_sparm.clsp[fc_class - 1].cp_class = htons(FC_CPC_VALID); + + return 0; +} + +static void +__csio_reg_rnode(struct csio_rnode *rn) +{ + struct csio_lnode *ln = csio_rnode_to_lnode(rn); + struct csio_hw *hw = csio_lnode_to_hw(ln); + + spin_unlock_irq(&hw->lock); + csio_reg_rnode(rn); + spin_lock_irq(&hw->lock); + + if (rn->role & CSIO_RNFR_TARGET) + ln->n_scsi_tgts++; + + if (rn->nport_id == FC_FID_MGMT_SERV) + csio_ln_fdmi_start(ln, (void *) rn); +} + +static void +__csio_unreg_rnode(struct csio_rnode *rn) +{ + struct csio_lnode *ln = csio_rnode_to_lnode(rn); + struct csio_hw *hw = csio_lnode_to_hw(ln); + LIST_HEAD(tmp_q); + int cmpl = 0; + + if (!list_empty(&rn->host_cmpl_q)) { + csio_dbg(hw, "Returning completion queue I/Os\n"); + list_splice_tail_init(&rn->host_cmpl_q, &tmp_q); + cmpl = 1; + } + + if (rn->role & CSIO_RNFR_TARGET) { + ln->n_scsi_tgts--; + ln->last_scan_ntgts--; + } + + spin_unlock_irq(&hw->lock); + csio_unreg_rnode(rn); + spin_lock_irq(&hw->lock); + + /* Cleanup I/Os that were waiting for rnode to unregister */ + if (cmpl) + csio_scsi_cleanup_io_q(csio_hw_to_scsim(hw), &tmp_q); + +} + +/*****************************************************************************/ +/* START: Rnode SM */ +/*****************************************************************************/ + +/* + * csio_rns_uninit - + * @rn - rnode + * @evt - SM event. + * + */ +static void +csio_rns_uninit(struct csio_rnode *rn, enum csio_rn_ev evt) +{ + struct csio_lnode *ln = csio_rnode_to_lnode(rn); + int ret = 0; + + CSIO_INC_STATS(rn, n_evt_sm[evt]); + + switch (evt) { + case CSIO_RNFE_LOGGED_IN: + case CSIO_RNFE_PLOGI_RECV: + ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry); + if (!ret) { + csio_set_state(&rn->sm, csio_rns_ready); + __csio_reg_rnode(rn); + } else { + CSIO_INC_STATS(rn, n_err_inval); + } + break; + case CSIO_RNFE_LOGO_RECV: + csio_ln_dbg(ln, + "ssni:x%x Ignoring event %d recv " + "in rn state[uninit]\n", csio_rn_flowid(rn), evt); + CSIO_INC_STATS(rn, n_evt_drop); + break; + default: + csio_ln_dbg(ln, + "ssni:x%x unexp event %d recv " + "in rn state[uninit]\n", csio_rn_flowid(rn), evt); + CSIO_INC_STATS(rn, n_evt_unexp); + break; + } +} + +/* + * csio_rns_ready - + * @rn - rnode + * @evt - SM event. + * + */ +static void +csio_rns_ready(struct csio_rnode *rn, enum csio_rn_ev evt) +{ + struct csio_lnode *ln = csio_rnode_to_lnode(rn); + int ret = 0; + + CSIO_INC_STATS(rn, n_evt_sm[evt]); + + switch (evt) { + case CSIO_RNFE_LOGGED_IN: + case CSIO_RNFE_PLOGI_RECV: + csio_ln_dbg(ln, + "ssni:x%x Ignoring event %d recv from did:x%x " + "in rn state[ready]\n", csio_rn_flowid(rn), evt, + rn->nport_id); + CSIO_INC_STATS(rn, n_evt_drop); + break; + + case CSIO_RNFE_PRLI_DONE: + case CSIO_RNFE_PRLI_RECV: + ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry); + if (!ret) + __csio_reg_rnode(rn); + else + CSIO_INC_STATS(rn, n_err_inval); + + break; + case CSIO_RNFE_DOWN: + csio_set_state(&rn->sm, csio_rns_offline); + __csio_unreg_rnode(rn); + + /* FW expected to internally aborted outstanding SCSI WRs + * and return all SCSI WRs to host with status "ABORTED". + */ + break; + + case CSIO_RNFE_LOGO_RECV: + csio_set_state(&rn->sm, csio_rns_offline); + + __csio_unreg_rnode(rn); + + /* FW expected to internally aborted outstanding SCSI WRs + * and return all SCSI WRs to host with status "ABORTED". + */ + break; + + case CSIO_RNFE_CLOSE: + /* + * Each rnode receives CLOSE event when driver is removed or + * device is reset + * Note: All outstanding IOs on remote port need to returned + * to uppper layer with appropriate error before sending + * CLOSE event + */ + csio_set_state(&rn->sm, csio_rns_uninit); + __csio_unreg_rnode(rn); + break; + + case CSIO_RNFE_NAME_MISSING: + csio_set_state(&rn->sm, csio_rns_disappeared); + __csio_unreg_rnode(rn); + + /* + * FW expected to internally aborted outstanding SCSI WRs + * and return all SCSI WRs to host with status "ABORTED". + */ + + break; + + default: + csio_ln_dbg(ln, + "ssni:x%x unexp event %d recv from did:x%x " + "in rn state[uninit]\n", csio_rn_flowid(rn), evt, + rn->nport_id); + CSIO_INC_STATS(rn, n_evt_unexp); + break; + } +} + +/* + * csio_rns_offline - + * @rn - rnode + * @evt - SM event. + * + */ +static void +csio_rns_offline(struct csio_rnode *rn, enum csio_rn_ev evt) +{ + struct csio_lnode *ln = csio_rnode_to_lnode(rn); + int ret = 0; + + CSIO_INC_STATS(rn, n_evt_sm[evt]); + + switch (evt) { + case CSIO_RNFE_LOGGED_IN: + case CSIO_RNFE_PLOGI_RECV: + ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry); + if (!ret) { + csio_set_state(&rn->sm, csio_rns_ready); + __csio_reg_rnode(rn); + } else { + CSIO_INC_STATS(rn, n_err_inval); + csio_post_event(&rn->sm, CSIO_RNFE_CLOSE); + } + break; + + case CSIO_RNFE_DOWN: + csio_ln_dbg(ln, + "ssni:x%x Ignoring event %d recv from did:x%x " + "in rn state[offline]\n", csio_rn_flowid(rn), evt, + rn->nport_id); + CSIO_INC_STATS(rn, n_evt_drop); + break; + + case CSIO_RNFE_CLOSE: + /* Each rnode receives CLOSE event when driver is removed or + * device is reset + * Note: All outstanding IOs on remote port need to returned + * to uppper layer with appropriate error before sending + * CLOSE event + */ + csio_set_state(&rn->sm, csio_rns_uninit); + break; + + case CSIO_RNFE_NAME_MISSING: + csio_set_state(&rn->sm, csio_rns_disappeared); + break; + + default: + csio_ln_dbg(ln, + "ssni:x%x unexp event %d recv from did:x%x " + "in rn state[offline]\n", csio_rn_flowid(rn), evt, + rn->nport_id); + CSIO_INC_STATS(rn, n_evt_unexp); + break; + } +} + +/* + * csio_rns_disappeared - + * @rn - rnode + * @evt - SM event. + * + */ +static void +csio_rns_disappeared(struct csio_rnode *rn, enum csio_rn_ev evt) +{ + struct csio_lnode *ln = csio_rnode_to_lnode(rn); + int ret = 0; + + CSIO_INC_STATS(rn, n_evt_sm[evt]); + + switch (evt) { + case CSIO_RNFE_LOGGED_IN: + case CSIO_RNFE_PLOGI_RECV: + ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry); + if (!ret) { + csio_set_state(&rn->sm, csio_rns_ready); + __csio_reg_rnode(rn); + } else { + CSIO_INC_STATS(rn, n_err_inval); + csio_post_event(&rn->sm, CSIO_RNFE_CLOSE); + } + break; + + case CSIO_RNFE_CLOSE: + /* Each rnode receives CLOSE event when driver is removed or + * device is reset. + * Note: All outstanding IOs on remote port need to returned + * to uppper layer with appropriate error before sending + * CLOSE event + */ + csio_set_state(&rn->sm, csio_rns_uninit); + break; + + case CSIO_RNFE_DOWN: + case CSIO_RNFE_NAME_MISSING: + csio_ln_dbg(ln, + "ssni:x%x Ignoring event %d recv from did x%x" + "in rn state[disappeared]\n", csio_rn_flowid(rn), + evt, rn->nport_id); + break; + + default: + csio_ln_dbg(ln, + "ssni:x%x unexp event %d recv from did x%x" + "in rn state[disappeared]\n", csio_rn_flowid(rn), + evt, rn->nport_id); + CSIO_INC_STATS(rn, n_evt_unexp); + break; + } +} + +/*****************************************************************************/ +/* END: Rnode SM */ +/*****************************************************************************/ + +/* + * csio_rnode_devloss_handler - Device loss event handler + * @rn: rnode + * + * Post event to close rnode SM and free rnode. + */ +void +csio_rnode_devloss_handler(struct csio_rnode *rn) +{ + struct csio_lnode *ln = csio_rnode_to_lnode(rn); + + /* ignore if same rnode came back as online */ + if (csio_is_rnode_ready(rn)) + return; + + csio_post_event(&rn->sm, CSIO_RNFE_CLOSE); + + /* Free rn if in uninit state */ + if (csio_is_rnode_uninit(rn)) + csio_put_rnode(ln, rn); +} + +/** + * csio_rnode_fwevt_handler - Event handler for firmware rnode events. + * @rn: rnode + * @fwevt: firmware event to handle + */ +void +csio_rnode_fwevt_handler(struct csio_rnode *rn, uint8_t fwevt) +{ + struct csio_lnode *ln = csio_rnode_to_lnode(rn); + enum csio_rn_ev evt; + + evt = CSIO_FWE_TO_RNFE(fwevt); + if (!evt) { + csio_ln_err(ln, "ssni:x%x Unhandled FW Rdev event: %d\n", + csio_rn_flowid(rn), fwevt); + CSIO_INC_STATS(rn, n_evt_unexp); + return; + } + CSIO_INC_STATS(rn, n_evt_fw[fwevt]); + + /* Track previous & current events for debugging */ + rn->prev_evt = rn->cur_evt; + rn->cur_evt = fwevt; + + /* Post event to rnode SM */ + csio_post_event(&rn->sm, evt); + + /* Free rn if in uninit state */ + if (csio_is_rnode_uninit(rn)) + csio_put_rnode(ln, rn); +} + +/* + * csio_rnode_init - Initialize rnode. + * @rn: RNode + * @ln: Associated lnode + * + * Caller is responsible for holding the lock. The lock is required + * to be held for inserting the rnode in ln->rnhead list. + */ +static int +csio_rnode_init(struct csio_rnode *rn, struct csio_lnode *ln) +{ + csio_rnode_to_lnode(rn) = ln; + csio_init_state(&rn->sm, csio_rns_uninit); + INIT_LIST_HEAD(&rn->host_cmpl_q); + csio_rn_flowid(rn) = CSIO_INVALID_IDX; + + /* Add rnode to list of lnodes->rnhead */ + list_add_tail(&rn->sm.sm_list, &ln->rnhead); + + return 0; +} + +static void +csio_rnode_exit(struct csio_rnode *rn) +{ + list_del_init(&rn->sm.sm_list); + CSIO_DB_ASSERT(list_empty(&rn->host_cmpl_q)); +} diff --git a/drivers/scsi/csiostor/csio_rnode.h b/drivers/scsi/csiostor/csio_rnode.h new file mode 100644 index 000000000..433434221 --- /dev/null +++ b/drivers/scsi/csiostor/csio_rnode.h @@ -0,0 +1,141 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CSIO_RNODE_H__ +#define __CSIO_RNODE_H__ + +#include "csio_defs.h" + +/* State machine evets */ +enum csio_rn_ev { + CSIO_RNFE_NONE = (uint32_t)0, /* None */ + CSIO_RNFE_LOGGED_IN, /* [N/F]Port login + * complete. + */ + CSIO_RNFE_PRLI_DONE, /* PRLI completed */ + CSIO_RNFE_PLOGI_RECV, /* Received PLOGI */ + CSIO_RNFE_PRLI_RECV, /* Received PLOGI */ + CSIO_RNFE_LOGO_RECV, /* Received LOGO */ + CSIO_RNFE_PRLO_RECV, /* Received PRLO */ + CSIO_RNFE_DOWN, /* Rnode is down */ + CSIO_RNFE_CLOSE, /* Close rnode */ + CSIO_RNFE_NAME_MISSING, /* Rnode name missing + * in name server. + */ + CSIO_RNFE_MAX_EVENT, +}; + +/* rnode stats */ +struct csio_rnode_stats { + uint32_t n_err; /* error */ + uint32_t n_err_inval; /* invalid parameter */ + uint32_t n_err_nomem; /* error nomem */ + uint32_t n_evt_unexp; /* unexpected event */ + uint32_t n_evt_drop; /* unexpected event */ + uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO + 1]; /* fw events */ + enum csio_rn_ev n_evt_sm[CSIO_RNFE_MAX_EVENT]; /* State m/c events */ + uint32_t n_lun_rst; /* Number of resets of + * of LUNs under this + * target + */ + uint32_t n_lun_rst_fail; /* Number of LUN reset + * failures. + */ + uint32_t n_tgt_rst; /* Number of target resets */ + uint32_t n_tgt_rst_fail; /* Number of target reset + * failures. + */ +}; + +/* Defines for rnode role */ +#define CSIO_RNFR_INITIATOR 0x1 +#define CSIO_RNFR_TARGET 0x2 +#define CSIO_RNFR_FABRIC 0x4 +#define CSIO_RNFR_NS 0x8 +#define CSIO_RNFR_NPORT 0x10 + +struct csio_rnode { + struct csio_sm sm; /* State machine - + * should be the + * 1st member + */ + struct csio_lnode *lnp; /* Pointer to owning + * Lnode */ + uint32_t flowid; /* Firmware ID */ + struct list_head host_cmpl_q; /* SCSI IOs + * pending to completed + * to Mid-layer. + */ + /* FC identifiers for remote node */ + uint32_t nport_id; + uint16_t fcp_flags; /* FCP Flags */ + uint8_t cur_evt; /* Current event */ + uint8_t prev_evt; /* Previous event */ + uint32_t role; /* Fabric/Target/ + * Initiator/NS + */ + struct fcoe_rdev_entry *rdev_entry; /* Rdev entry */ + struct csio_service_parms rn_sparm; + + /* FC transport attributes */ + struct fc_rport *rport; /* FC transport rport */ + uint32_t supp_classes; /* Supported FC classes */ + uint32_t maxframe_size; /* Max Frame size */ + uint32_t scsi_id; /* Transport given SCSI id */ + + struct csio_rnode_stats stats; /* Common rnode stats */ +}; + +#define csio_rn_flowid(rn) ((rn)->flowid) +#define csio_rn_wwpn(rn) ((rn)->rn_sparm.wwpn) +#define csio_rn_wwnn(rn) ((rn)->rn_sparm.wwnn) +#define csio_rnode_to_lnode(rn) ((rn)->lnp) + +int csio_is_rnode_ready(struct csio_rnode *rn); +void csio_rnode_state_to_str(struct csio_rnode *rn, int8_t *str); + +struct csio_rnode *csio_rnode_lookup_portid(struct csio_lnode *, uint32_t); +struct csio_rnode *csio_confirm_rnode(struct csio_lnode *, + uint32_t, struct fcoe_rdev_entry *); + +void csio_rnode_fwevt_handler(struct csio_rnode *rn, uint8_t fwevt); + +void csio_put_rnode(struct csio_lnode *ln, struct csio_rnode *rn); + +void csio_reg_rnode(struct csio_rnode *); +void csio_unreg_rnode(struct csio_rnode *); + +void csio_rnode_devloss_handler(struct csio_rnode *); + +#endif /* ifndef __CSIO_RNODE_H__ */ diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c new file mode 100644 index 000000000..05e1a63e0 --- /dev/null +++ b/drivers/scsi/csiostor/csio_scsi.c @@ -0,0 +1,2529 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/device.h> +#include <linux/delay.h> +#include <linux/ctype.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/compiler.h> +#include <linux/export.h> +#include <linux/module.h> +#include <asm/unaligned.h> +#include <asm/page.h> +#include <scsi/scsi.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_transport_fc.h> + +#include "csio_hw.h" +#include "csio_lnode.h" +#include "csio_rnode.h" +#include "csio_scsi.h" +#include "csio_init.h" + +int csio_scsi_eqsize = 65536; +int csio_scsi_iqlen = 128; +int csio_scsi_ioreqs = 2048; +uint32_t csio_max_scan_tmo; +uint32_t csio_delta_scan_tmo = 5; +int csio_lun_qdepth = 32; + +static int csio_ddp_descs = 128; + +static int csio_do_abrt_cls(struct csio_hw *, + struct csio_ioreq *, bool); + +static void csio_scsis_uninit(struct csio_ioreq *, enum csio_scsi_ev); +static void csio_scsis_io_active(struct csio_ioreq *, enum csio_scsi_ev); +static void csio_scsis_tm_active(struct csio_ioreq *, enum csio_scsi_ev); +static void csio_scsis_aborting(struct csio_ioreq *, enum csio_scsi_ev); +static void csio_scsis_closing(struct csio_ioreq *, enum csio_scsi_ev); +static void csio_scsis_shost_cmpl_await(struct csio_ioreq *, enum csio_scsi_ev); + +/* + * csio_scsi_match_io - Match an ioreq with the given SCSI level data. + * @ioreq: The I/O request + * @sld: Level information + * + * Should be called with lock held. + * + */ +static bool +csio_scsi_match_io(struct csio_ioreq *ioreq, struct csio_scsi_level_data *sld) +{ + struct scsi_cmnd *scmnd = csio_scsi_cmnd(ioreq); + + switch (sld->level) { + case CSIO_LEV_LUN: + if (scmnd == NULL) + return false; + + return ((ioreq->lnode == sld->lnode) && + (ioreq->rnode == sld->rnode) && + ((uint64_t)scmnd->device->lun == sld->oslun)); + + case CSIO_LEV_RNODE: + return ((ioreq->lnode == sld->lnode) && + (ioreq->rnode == sld->rnode)); + case CSIO_LEV_LNODE: + return (ioreq->lnode == sld->lnode); + case CSIO_LEV_ALL: + return true; + default: + return false; + } +} + +/* + * csio_scsi_gather_active_ios - Gather active I/Os based on level + * @scm: SCSI module + * @sld: Level information + * @dest: The queue where these I/Os have to be gathered. + * + * Should be called with lock held. + */ +static void +csio_scsi_gather_active_ios(struct csio_scsim *scm, + struct csio_scsi_level_data *sld, + struct list_head *dest) +{ + struct list_head *tmp, *next; + + if (list_empty(&scm->active_q)) + return; + + /* Just splice the entire active_q into dest */ + if (sld->level == CSIO_LEV_ALL) { + list_splice_tail_init(&scm->active_q, dest); + return; + } + + list_for_each_safe(tmp, next, &scm->active_q) { + if (csio_scsi_match_io((struct csio_ioreq *)tmp, sld)) { + list_del_init(tmp); + list_add_tail(tmp, dest); + } + } +} + +static inline bool +csio_scsi_itnexus_loss_error(uint16_t error) +{ + switch (error) { + case FW_ERR_LINK_DOWN: + case FW_RDEV_NOT_READY: + case FW_ERR_RDEV_LOST: + case FW_ERR_RDEV_LOGO: + case FW_ERR_RDEV_IMPL_LOGO: + return true; + } + return false; +} + +/* + * csio_scsi_fcp_cmnd - Frame the SCSI FCP command paylod. + * @req: IO req structure. + * @addr: DMA location to place the payload. + * + * This routine is shared between FCP_WRITE, FCP_READ and FCP_CMD requests. + */ +static inline void +csio_scsi_fcp_cmnd(struct csio_ioreq *req, void *addr) +{ + struct fcp_cmnd *fcp_cmnd = (struct fcp_cmnd *)addr; + struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); + + /* Check for Task Management */ + if (likely(csio_priv(scmnd)->fc_tm_flags == 0)) { + int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun); + fcp_cmnd->fc_tm_flags = 0; + fcp_cmnd->fc_cmdref = 0; + + memcpy(fcp_cmnd->fc_cdb, scmnd->cmnd, 16); + fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE; + fcp_cmnd->fc_dl = cpu_to_be32(scsi_bufflen(scmnd)); + + if (req->nsge) + if (req->datadir == DMA_TO_DEVICE) + fcp_cmnd->fc_flags = FCP_CFL_WRDATA; + else + fcp_cmnd->fc_flags = FCP_CFL_RDDATA; + else + fcp_cmnd->fc_flags = 0; + } else { + memset(fcp_cmnd, 0, sizeof(*fcp_cmnd)); + int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun); + fcp_cmnd->fc_tm_flags = csio_priv(scmnd)->fc_tm_flags; + } +} + +/* + * csio_scsi_init_cmd_wr - Initialize the SCSI CMD WR. + * @req: IO req structure. + * @addr: DMA location to place the payload. + * @size: Size of WR (including FW WR + immed data + rsp SG entry + * + * Wrapper for populating fw_scsi_cmd_wr. + */ +static inline void +csio_scsi_init_cmd_wr(struct csio_ioreq *req, void *addr, uint32_t size) +{ + struct csio_hw *hw = req->lnode->hwp; + struct csio_rnode *rn = req->rnode; + struct fw_scsi_cmd_wr *wr = (struct fw_scsi_cmd_wr *)addr; + struct csio_dma_buf *dma_buf; + uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len; + + wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_CMD_WR) | + FW_SCSI_CMD_WR_IMMDLEN(imm)); + wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) | + FW_WR_LEN16_V( + DIV_ROUND_UP(size, 16))); + + wr->cookie = (uintptr_t) req; + wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); + wr->tmo_val = (uint8_t) req->tmo; + wr->r3 = 0; + memset(&wr->r5, 0, 8); + + /* Get RSP DMA buffer */ + dma_buf = &req->dma_buf; + + /* Prepare RSP SGL */ + wr->rsp_dmalen = cpu_to_be32(dma_buf->len); + wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr); + + wr->r6 = 0; + + wr->u.fcoe.ctl_pri = 0; + wr->u.fcoe.cp_en_class = 0; + wr->u.fcoe.r4_lo[0] = 0; + wr->u.fcoe.r4_lo[1] = 0; + + /* Frame a FCP command */ + csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)addr + + sizeof(struct fw_scsi_cmd_wr))); +} + +#define CSIO_SCSI_CMD_WR_SZ(_imm) \ + (sizeof(struct fw_scsi_cmd_wr) + /* WR size */ \ + ALIGN((_imm), 16)) /* Immed data */ + +#define CSIO_SCSI_CMD_WR_SZ_16(_imm) \ + (ALIGN(CSIO_SCSI_CMD_WR_SZ((_imm)), 16)) + +/* + * csio_scsi_cmd - Create a SCSI CMD WR. + * @req: IO req structure. + * + * Gets a WR slot in the ingress queue and initializes it with SCSI CMD WR. + * + */ +static inline void +csio_scsi_cmd(struct csio_ioreq *req) +{ + struct csio_wr_pair wrp; + struct csio_hw *hw = req->lnode->hwp; + struct csio_scsim *scsim = csio_hw_to_scsim(hw); + uint32_t size = CSIO_SCSI_CMD_WR_SZ_16(scsim->proto_cmd_len); + + req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); + if (unlikely(req->drv_status != 0)) + return; + + if (wrp.size1 >= size) { + /* Initialize WR in one shot */ + csio_scsi_init_cmd_wr(req, wrp.addr1, size); + } else { + uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); + + /* + * Make a temporary copy of the WR and write back + * the copy into the WR pair. + */ + csio_scsi_init_cmd_wr(req, (void *)tmpwr, size); + memcpy(wrp.addr1, tmpwr, wrp.size1); + memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1); + } +} + +/* + * csio_scsi_init_ulptx_dsgl - Fill in a ULP_TX_SC_DSGL + * @hw: HW module + * @req: IO request + * @sgl: ULP TX SGL pointer. + * + */ +static inline void +csio_scsi_init_ultptx_dsgl(struct csio_hw *hw, struct csio_ioreq *req, + struct ulptx_sgl *sgl) +{ + struct ulptx_sge_pair *sge_pair = NULL; + struct scatterlist *sgel; + uint32_t i = 0; + uint32_t xfer_len; + struct list_head *tmp; + struct csio_dma_buf *dma_buf; + struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); + + sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_MORE_F | + ULPTX_NSGE_V(req->nsge)); + /* Now add the data SGLs */ + if (likely(!req->dcopy)) { + scsi_for_each_sg(scmnd, sgel, req->nsge, i) { + if (i == 0) { + sgl->addr0 = cpu_to_be64(sg_dma_address(sgel)); + sgl->len0 = cpu_to_be32(sg_dma_len(sgel)); + sge_pair = (struct ulptx_sge_pair *)(sgl + 1); + continue; + } + if ((i - 1) & 0x1) { + sge_pair->addr[1] = cpu_to_be64( + sg_dma_address(sgel)); + sge_pair->len[1] = cpu_to_be32( + sg_dma_len(sgel)); + sge_pair++; + } else { + sge_pair->addr[0] = cpu_to_be64( + sg_dma_address(sgel)); + sge_pair->len[0] = cpu_to_be32( + sg_dma_len(sgel)); + } + } + } else { + /* Program sg elements with driver's DDP buffer */ + xfer_len = scsi_bufflen(scmnd); + list_for_each(tmp, &req->gen_list) { + dma_buf = (struct csio_dma_buf *)tmp; + if (i == 0) { + sgl->addr0 = cpu_to_be64(dma_buf->paddr); + sgl->len0 = cpu_to_be32( + min(xfer_len, dma_buf->len)); + sge_pair = (struct ulptx_sge_pair *)(sgl + 1); + } else if ((i - 1) & 0x1) { + sge_pair->addr[1] = cpu_to_be64(dma_buf->paddr); + sge_pair->len[1] = cpu_to_be32( + min(xfer_len, dma_buf->len)); + sge_pair++; + } else { + sge_pair->addr[0] = cpu_to_be64(dma_buf->paddr); + sge_pair->len[0] = cpu_to_be32( + min(xfer_len, dma_buf->len)); + } + xfer_len -= min(xfer_len, dma_buf->len); + i++; + } + } +} + +/* + * csio_scsi_init_read_wr - Initialize the READ SCSI WR. + * @req: IO req structure. + * @wrp: DMA location to place the payload. + * @size: Size of WR (including FW WR + immed data + rsp SG entry + data SGL + * + * Wrapper for populating fw_scsi_read_wr. + */ +static inline void +csio_scsi_init_read_wr(struct csio_ioreq *req, void *wrp, uint32_t size) +{ + struct csio_hw *hw = req->lnode->hwp; + struct csio_rnode *rn = req->rnode; + struct fw_scsi_read_wr *wr = (struct fw_scsi_read_wr *)wrp; + struct ulptx_sgl *sgl; + struct csio_dma_buf *dma_buf; + uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len; + struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); + + wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_READ_WR) | + FW_SCSI_READ_WR_IMMDLEN(imm)); + wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) | + FW_WR_LEN16_V(DIV_ROUND_UP(size, 16))); + wr->cookie = (uintptr_t)req; + wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); + wr->tmo_val = (uint8_t)(req->tmo); + wr->use_xfer_cnt = 1; + wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd)); + wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd)); + /* Get RSP DMA buffer */ + dma_buf = &req->dma_buf; + + /* Prepare RSP SGL */ + wr->rsp_dmalen = cpu_to_be32(dma_buf->len); + wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr); + + wr->r4 = 0; + + wr->u.fcoe.ctl_pri = 0; + wr->u.fcoe.cp_en_class = 0; + wr->u.fcoe.r3_lo[0] = 0; + wr->u.fcoe.r3_lo[1] = 0; + csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp + + sizeof(struct fw_scsi_read_wr))); + + /* Move WR pointer past command and immediate data */ + sgl = (struct ulptx_sgl *)((uintptr_t)wrp + + sizeof(struct fw_scsi_read_wr) + ALIGN(imm, 16)); + + /* Fill in the DSGL */ + csio_scsi_init_ultptx_dsgl(hw, req, sgl); +} + +/* + * csio_scsi_init_write_wr - Initialize the WRITE SCSI WR. + * @req: IO req structure. + * @wrp: DMA location to place the payload. + * @size: Size of WR (including FW WR + immed data + rsp SG entry + data SGL + * + * Wrapper for populating fw_scsi_write_wr. + */ +static inline void +csio_scsi_init_write_wr(struct csio_ioreq *req, void *wrp, uint32_t size) +{ + struct csio_hw *hw = req->lnode->hwp; + struct csio_rnode *rn = req->rnode; + struct fw_scsi_write_wr *wr = (struct fw_scsi_write_wr *)wrp; + struct ulptx_sgl *sgl; + struct csio_dma_buf *dma_buf; + uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len; + struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); + + wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_WRITE_WR) | + FW_SCSI_WRITE_WR_IMMDLEN(imm)); + wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) | + FW_WR_LEN16_V(DIV_ROUND_UP(size, 16))); + wr->cookie = (uintptr_t)req; + wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); + wr->tmo_val = (uint8_t)(req->tmo); + wr->use_xfer_cnt = 1; + wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd)); + wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd)); + /* Get RSP DMA buffer */ + dma_buf = &req->dma_buf; + + /* Prepare RSP SGL */ + wr->rsp_dmalen = cpu_to_be32(dma_buf->len); + wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr); + + wr->r4 = 0; + + wr->u.fcoe.ctl_pri = 0; + wr->u.fcoe.cp_en_class = 0; + wr->u.fcoe.r3_lo[0] = 0; + wr->u.fcoe.r3_lo[1] = 0; + csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp + + sizeof(struct fw_scsi_write_wr))); + + /* Move WR pointer past command and immediate data */ + sgl = (struct ulptx_sgl *)((uintptr_t)wrp + + sizeof(struct fw_scsi_write_wr) + ALIGN(imm, 16)); + + /* Fill in the DSGL */ + csio_scsi_init_ultptx_dsgl(hw, req, sgl); +} + +/* Calculate WR size needed for fw_scsi_read_wr/fw_scsi_write_wr */ +#define CSIO_SCSI_DATA_WRSZ(req, oper, sz, imm) \ +do { \ + (sz) = sizeof(struct fw_scsi_##oper##_wr) + /* WR size */ \ + ALIGN((imm), 16) + /* Immed data */ \ + sizeof(struct ulptx_sgl); /* ulptx_sgl */ \ + \ + if (unlikely((req)->nsge > 1)) \ + (sz) += (sizeof(struct ulptx_sge_pair) * \ + (ALIGN(((req)->nsge - 1), 2) / 2)); \ + /* Data SGE */ \ +} while (0) + +/* + * csio_scsi_read - Create a SCSI READ WR. + * @req: IO req structure. + * + * Gets a WR slot in the ingress queue and initializes it with + * SCSI READ WR. + * + */ +static inline void +csio_scsi_read(struct csio_ioreq *req) +{ + struct csio_wr_pair wrp; + uint32_t size; + struct csio_hw *hw = req->lnode->hwp; + struct csio_scsim *scsim = csio_hw_to_scsim(hw); + + CSIO_SCSI_DATA_WRSZ(req, read, size, scsim->proto_cmd_len); + size = ALIGN(size, 16); + + req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); + if (likely(req->drv_status == 0)) { + if (likely(wrp.size1 >= size)) { + /* Initialize WR in one shot */ + csio_scsi_init_read_wr(req, wrp.addr1, size); + } else { + uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); + /* + * Make a temporary copy of the WR and write back + * the copy into the WR pair. + */ + csio_scsi_init_read_wr(req, (void *)tmpwr, size); + memcpy(wrp.addr1, tmpwr, wrp.size1); + memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1); + } + } +} + +/* + * csio_scsi_write - Create a SCSI WRITE WR. + * @req: IO req structure. + * + * Gets a WR slot in the ingress queue and initializes it with + * SCSI WRITE WR. + * + */ +static inline void +csio_scsi_write(struct csio_ioreq *req) +{ + struct csio_wr_pair wrp; + uint32_t size; + struct csio_hw *hw = req->lnode->hwp; + struct csio_scsim *scsim = csio_hw_to_scsim(hw); + + CSIO_SCSI_DATA_WRSZ(req, write, size, scsim->proto_cmd_len); + size = ALIGN(size, 16); + + req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); + if (likely(req->drv_status == 0)) { + if (likely(wrp.size1 >= size)) { + /* Initialize WR in one shot */ + csio_scsi_init_write_wr(req, wrp.addr1, size); + } else { + uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); + /* + * Make a temporary copy of the WR and write back + * the copy into the WR pair. + */ + csio_scsi_init_write_wr(req, (void *)tmpwr, size); + memcpy(wrp.addr1, tmpwr, wrp.size1); + memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1); + } + } +} + +/* + * csio_setup_ddp - Setup DDP buffers for Read request. + * @req: IO req structure. + * + * Checks SGLs/Data buffers are virtually contiguous required for DDP. + * If contiguous,driver posts SGLs in the WR otherwise post internal + * buffers for such request for DDP. + */ +static inline void +csio_setup_ddp(struct csio_scsim *scsim, struct csio_ioreq *req) +{ +#ifdef __CSIO_DEBUG__ + struct csio_hw *hw = req->lnode->hwp; +#endif + struct scatterlist *sgel = NULL; + struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); + uint64_t sg_addr = 0; + uint32_t ddp_pagesz = 4096; + uint32_t buf_off; + struct csio_dma_buf *dma_buf = NULL; + uint32_t alloc_len = 0; + uint32_t xfer_len = 0; + uint32_t sg_len = 0; + uint32_t i; + + scsi_for_each_sg(scmnd, sgel, req->nsge, i) { + sg_addr = sg_dma_address(sgel); + sg_len = sg_dma_len(sgel); + + buf_off = sg_addr & (ddp_pagesz - 1); + + /* Except 1st buffer,all buffer addr have to be Page aligned */ + if (i != 0 && buf_off) { + csio_dbg(hw, "SGL addr not DDP aligned (%llx:%d)\n", + sg_addr, sg_len); + goto unaligned; + } + + /* Except last buffer,all buffer must end on page boundary */ + if ((i != (req->nsge - 1)) && + ((buf_off + sg_len) & (ddp_pagesz - 1))) { + csio_dbg(hw, + "SGL addr not ending on page boundary" + "(%llx:%d)\n", sg_addr, sg_len); + goto unaligned; + } + } + + /* SGL's are virtually contiguous. HW will DDP to SGLs */ + req->dcopy = 0; + csio_scsi_read(req); + + return; + +unaligned: + CSIO_INC_STATS(scsim, n_unaligned); + /* + * For unaligned SGLs, driver will allocate internal DDP buffer. + * Once command is completed data from DDP buffer copied to SGLs + */ + req->dcopy = 1; + + /* Use gen_list to store the DDP buffers */ + INIT_LIST_HEAD(&req->gen_list); + xfer_len = scsi_bufflen(scmnd); + + i = 0; + /* Allocate ddp buffers for this request */ + while (alloc_len < xfer_len) { + dma_buf = csio_get_scsi_ddp(scsim); + if (dma_buf == NULL || i > scsim->max_sge) { + req->drv_status = -EBUSY; + break; + } + alloc_len += dma_buf->len; + /* Added to IO req */ + list_add_tail(&dma_buf->list, &req->gen_list); + i++; + } + + if (!req->drv_status) { + /* set number of ddp bufs used */ + req->nsge = i; + csio_scsi_read(req); + return; + } + + /* release dma descs */ + if (i > 0) + csio_put_scsi_ddp_list(scsim, &req->gen_list, i); +} + +/* + * csio_scsi_init_abrt_cls_wr - Initialize an ABORT/CLOSE WR. + * @req: IO req structure. + * @addr: DMA location to place the payload. + * @size: Size of WR + * @abort: abort OR close + * + * Wrapper for populating fw_scsi_cmd_wr. + */ +static inline void +csio_scsi_init_abrt_cls_wr(struct csio_ioreq *req, void *addr, uint32_t size, + bool abort) +{ + struct csio_hw *hw = req->lnode->hwp; + struct csio_rnode *rn = req->rnode; + struct fw_scsi_abrt_cls_wr *wr = (struct fw_scsi_abrt_cls_wr *)addr; + + wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_ABRT_CLS_WR)); + wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) | + FW_WR_LEN16_V( + DIV_ROUND_UP(size, 16))); + + wr->cookie = (uintptr_t) req; + wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); + wr->tmo_val = (uint8_t) req->tmo; + /* 0 for CHK_ALL_IO tells FW to look up t_cookie */ + wr->sub_opcode_to_chk_all_io = + (FW_SCSI_ABRT_CLS_WR_SUB_OPCODE(abort) | + FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO(0)); + wr->r3[0] = 0; + wr->r3[1] = 0; + wr->r3[2] = 0; + wr->r3[3] = 0; + /* Since we re-use the same ioreq for abort as well */ + wr->t_cookie = (uintptr_t) req; +} + +static inline void +csio_scsi_abrt_cls(struct csio_ioreq *req, bool abort) +{ + struct csio_wr_pair wrp; + struct csio_hw *hw = req->lnode->hwp; + uint32_t size = ALIGN(sizeof(struct fw_scsi_abrt_cls_wr), 16); + + req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); + if (req->drv_status != 0) + return; + + if (wrp.size1 >= size) { + /* Initialize WR in one shot */ + csio_scsi_init_abrt_cls_wr(req, wrp.addr1, size, abort); + } else { + uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); + /* + * Make a temporary copy of the WR and write back + * the copy into the WR pair. + */ + csio_scsi_init_abrt_cls_wr(req, (void *)tmpwr, size, abort); + memcpy(wrp.addr1, tmpwr, wrp.size1); + memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1); + } +} + +/*****************************************************************************/ +/* START: SCSI SM */ +/*****************************************************************************/ +static void +csio_scsis_uninit(struct csio_ioreq *req, enum csio_scsi_ev evt) +{ + struct csio_hw *hw = req->lnode->hwp; + struct csio_scsim *scsim = csio_hw_to_scsim(hw); + + switch (evt) { + case CSIO_SCSIE_START_IO: + + if (req->nsge) { + if (req->datadir == DMA_TO_DEVICE) { + req->dcopy = 0; + csio_scsi_write(req); + } else + csio_setup_ddp(scsim, req); + } else { + csio_scsi_cmd(req); + } + + if (likely(req->drv_status == 0)) { + /* change state and enqueue on active_q */ + csio_set_state(&req->sm, csio_scsis_io_active); + list_add_tail(&req->sm.sm_list, &scsim->active_q); + csio_wr_issue(hw, req->eq_idx, false); + CSIO_INC_STATS(scsim, n_active); + + return; + } + break; + + case CSIO_SCSIE_START_TM: + csio_scsi_cmd(req); + if (req->drv_status == 0) { + /* + * NOTE: We collect the affected I/Os prior to issuing + * LUN reset, and not after it. This is to prevent + * aborting I/Os that get issued after the LUN reset, + * but prior to LUN reset completion (in the event that + * the host stack has not blocked I/Os to a LUN that is + * being reset. + */ + csio_set_state(&req->sm, csio_scsis_tm_active); + list_add_tail(&req->sm.sm_list, &scsim->active_q); + csio_wr_issue(hw, req->eq_idx, false); + CSIO_INC_STATS(scsim, n_tm_active); + } + return; + + case CSIO_SCSIE_ABORT: + case CSIO_SCSIE_CLOSE: + /* + * NOTE: + * We could get here due to : + * - a window in the cleanup path of the SCSI module + * (csio_scsi_abort_io()). Please see NOTE in this function. + * - a window in the time we tried to issue an abort/close + * of a request to FW, and the FW completed the request + * itself. + * Print a message for now, and return INVAL either way. + */ + req->drv_status = -EINVAL; + csio_warn(hw, "Trying to abort/close completed IO:%p!\n", req); + break; + + default: + csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); + CSIO_DB_ASSERT(0); + } +} + +static void +csio_scsis_io_active(struct csio_ioreq *req, enum csio_scsi_ev evt) +{ + struct csio_hw *hw = req->lnode->hwp; + struct csio_scsim *scm = csio_hw_to_scsim(hw); + struct csio_rnode *rn; + + switch (evt) { + case CSIO_SCSIE_COMPLETED: + CSIO_DEC_STATS(scm, n_active); + list_del_init(&req->sm.sm_list); + csio_set_state(&req->sm, csio_scsis_uninit); + /* + * In MSIX mode, with multiple queues, the SCSI compeltions + * could reach us sooner than the FW events sent to indicate + * I-T nexus loss (link down, remote device logo etc). We + * dont want to be returning such I/Os to the upper layer + * immediately, since we wouldnt have reported the I-T nexus + * loss itself. This forces us to serialize such completions + * with the reporting of the I-T nexus loss. Therefore, we + * internally queue up such up such completions in the rnode. + * The reporting of I-T nexus loss to the upper layer is then + * followed by the returning of I/Os in this internal queue. + * Having another state alongwith another queue helps us take + * actions for events such as ABORT received while we are + * in this rnode queue. + */ + if (unlikely(req->wr_status != FW_SUCCESS)) { + rn = req->rnode; + /* + * FW says remote device is lost, but rnode + * doesnt reflect it. + */ + if (csio_scsi_itnexus_loss_error(req->wr_status) && + csio_is_rnode_ready(rn)) { + csio_set_state(&req->sm, + csio_scsis_shost_cmpl_await); + list_add_tail(&req->sm.sm_list, + &rn->host_cmpl_q); + } + } + + break; + + case CSIO_SCSIE_ABORT: + csio_scsi_abrt_cls(req, SCSI_ABORT); + if (req->drv_status == 0) { + csio_wr_issue(hw, req->eq_idx, false); + csio_set_state(&req->sm, csio_scsis_aborting); + } + break; + + case CSIO_SCSIE_CLOSE: + csio_scsi_abrt_cls(req, SCSI_CLOSE); + if (req->drv_status == 0) { + csio_wr_issue(hw, req->eq_idx, false); + csio_set_state(&req->sm, csio_scsis_closing); + } + break; + + case CSIO_SCSIE_DRVCLEANUP: + req->wr_status = FW_HOSTERROR; + CSIO_DEC_STATS(scm, n_active); + csio_set_state(&req->sm, csio_scsis_uninit); + break; + + default: + csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); + CSIO_DB_ASSERT(0); + } +} + +static void +csio_scsis_tm_active(struct csio_ioreq *req, enum csio_scsi_ev evt) +{ + struct csio_hw *hw = req->lnode->hwp; + struct csio_scsim *scm = csio_hw_to_scsim(hw); + + switch (evt) { + case CSIO_SCSIE_COMPLETED: + CSIO_DEC_STATS(scm, n_tm_active); + list_del_init(&req->sm.sm_list); + csio_set_state(&req->sm, csio_scsis_uninit); + + break; + + case CSIO_SCSIE_ABORT: + csio_scsi_abrt_cls(req, SCSI_ABORT); + if (req->drv_status == 0) { + csio_wr_issue(hw, req->eq_idx, false); + csio_set_state(&req->sm, csio_scsis_aborting); + } + break; + + + case CSIO_SCSIE_CLOSE: + csio_scsi_abrt_cls(req, SCSI_CLOSE); + if (req->drv_status == 0) { + csio_wr_issue(hw, req->eq_idx, false); + csio_set_state(&req->sm, csio_scsis_closing); + } + break; + + case CSIO_SCSIE_DRVCLEANUP: + req->wr_status = FW_HOSTERROR; + CSIO_DEC_STATS(scm, n_tm_active); + csio_set_state(&req->sm, csio_scsis_uninit); + break; + + default: + csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); + CSIO_DB_ASSERT(0); + } +} + +static void +csio_scsis_aborting(struct csio_ioreq *req, enum csio_scsi_ev evt) +{ + struct csio_hw *hw = req->lnode->hwp; + struct csio_scsim *scm = csio_hw_to_scsim(hw); + + switch (evt) { + case CSIO_SCSIE_COMPLETED: + csio_dbg(hw, + "ioreq %p recvd cmpltd (wr_status:%d) " + "in aborting st\n", req, req->wr_status); + /* + * Use -ECANCELED to explicitly tell the ABORTED event that + * the original I/O was returned to driver by FW. + * We dont really care if the I/O was returned with success by + * FW (because the ABORT and completion of the I/O crossed each + * other), or any other return value. Once we are in aborting + * state, the success or failure of the I/O is unimportant to + * us. + */ + req->drv_status = -ECANCELED; + break; + + case CSIO_SCSIE_ABORT: + CSIO_INC_STATS(scm, n_abrt_dups); + break; + + case CSIO_SCSIE_ABORTED: + + csio_dbg(hw, "abort of %p return status:0x%x drv_status:%x\n", + req, req->wr_status, req->drv_status); + /* + * Check if original I/O WR completed before the Abort + * completion. + */ + if (req->drv_status != -ECANCELED) { + csio_warn(hw, + "Abort completed before original I/O," + " req:%p\n", req); + CSIO_DB_ASSERT(0); + } + + /* + * There are the following possible scenarios: + * 1. The abort completed successfully, FW returned FW_SUCCESS. + * 2. The completion of an I/O and the receipt of + * abort for that I/O by the FW crossed each other. + * The FW returned FW_EINVAL. The original I/O would have + * returned with FW_SUCCESS or any other SCSI error. + * 3. The FW couldn't sent the abort out on the wire, as there + * was an I-T nexus loss (link down, remote device logged + * out etc). FW sent back an appropriate IT nexus loss status + * for the abort. + * 4. FW sent an abort, but abort timed out (remote device + * didnt respond). FW replied back with + * FW_SCSI_ABORT_TIMEDOUT. + * 5. FW couldn't genuinely abort the request for some reason, + * and sent us an error. + * + * The first 3 scenarios are treated as succesful abort + * operations by the host, while the last 2 are failed attempts + * to abort. Manipulate the return value of the request + * appropriately, so that host can convey these results + * back to the upper layer. + */ + if ((req->wr_status == FW_SUCCESS) || + (req->wr_status == FW_EINVAL) || + csio_scsi_itnexus_loss_error(req->wr_status)) + req->wr_status = FW_SCSI_ABORT_REQUESTED; + + CSIO_DEC_STATS(scm, n_active); + list_del_init(&req->sm.sm_list); + csio_set_state(&req->sm, csio_scsis_uninit); + break; + + case CSIO_SCSIE_DRVCLEANUP: + req->wr_status = FW_HOSTERROR; + CSIO_DEC_STATS(scm, n_active); + csio_set_state(&req->sm, csio_scsis_uninit); + break; + + case CSIO_SCSIE_CLOSE: + /* + * We can receive this event from the module + * cleanup paths, if the FW forgot to reply to the ABORT WR + * and left this ioreq in this state. For now, just ignore + * the event. The CLOSE event is sent to this state, as + * the LINK may have already gone down. + */ + break; + + default: + csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); + CSIO_DB_ASSERT(0); + } +} + +static void +csio_scsis_closing(struct csio_ioreq *req, enum csio_scsi_ev evt) +{ + struct csio_hw *hw = req->lnode->hwp; + struct csio_scsim *scm = csio_hw_to_scsim(hw); + + switch (evt) { + case CSIO_SCSIE_COMPLETED: + csio_dbg(hw, + "ioreq %p recvd cmpltd (wr_status:%d) " + "in closing st\n", req, req->wr_status); + /* + * Use -ECANCELED to explicitly tell the CLOSED event that + * the original I/O was returned to driver by FW. + * We dont really care if the I/O was returned with success by + * FW (because the CLOSE and completion of the I/O crossed each + * other), or any other return value. Once we are in aborting + * state, the success or failure of the I/O is unimportant to + * us. + */ + req->drv_status = -ECANCELED; + break; + + case CSIO_SCSIE_CLOSED: + /* + * Check if original I/O WR completed before the Close + * completion. + */ + if (req->drv_status != -ECANCELED) { + csio_fatal(hw, + "Close completed before original I/O," + " req:%p\n", req); + CSIO_DB_ASSERT(0); + } + + /* + * Either close succeeded, or we issued close to FW at the + * same time FW compelted it to us. Either way, the I/O + * is closed. + */ + CSIO_DB_ASSERT((req->wr_status == FW_SUCCESS) || + (req->wr_status == FW_EINVAL)); + req->wr_status = FW_SCSI_CLOSE_REQUESTED; + + CSIO_DEC_STATS(scm, n_active); + list_del_init(&req->sm.sm_list); + csio_set_state(&req->sm, csio_scsis_uninit); + break; + + case CSIO_SCSIE_CLOSE: + break; + + case CSIO_SCSIE_DRVCLEANUP: + req->wr_status = FW_HOSTERROR; + CSIO_DEC_STATS(scm, n_active); + csio_set_state(&req->sm, csio_scsis_uninit); + break; + + default: + csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); + CSIO_DB_ASSERT(0); + } +} + +static void +csio_scsis_shost_cmpl_await(struct csio_ioreq *req, enum csio_scsi_ev evt) +{ + switch (evt) { + case CSIO_SCSIE_ABORT: + case CSIO_SCSIE_CLOSE: + /* + * Just succeed the abort request, and hope that + * the remote device unregister path will cleanup + * this I/O to the upper layer within a sane + * amount of time. + */ + /* + * A close can come in during a LINK DOWN. The FW would have + * returned us the I/O back, but not the remote device lost + * FW event. In this interval, if the I/O times out at the upper + * layer, a close can come in. Take the same action as abort: + * return success, and hope that the remote device unregister + * path will cleanup this I/O. If the FW still doesnt send + * the msg, the close times out, and the upper layer resorts + * to the next level of error recovery. + */ + req->drv_status = 0; + break; + case CSIO_SCSIE_DRVCLEANUP: + csio_set_state(&req->sm, csio_scsis_uninit); + break; + default: + csio_dbg(req->lnode->hwp, "Unhandled event:%d sent to req:%p\n", + evt, req); + CSIO_DB_ASSERT(0); + } +} + +/* + * csio_scsi_cmpl_handler - WR completion handler for SCSI. + * @hw: HW module. + * @wr: The completed WR from the ingress queue. + * @len: Length of the WR. + * @flb: Freelist buffer array. + * @priv: Private object + * @scsiwr: Pointer to SCSI WR. + * + * This is the WR completion handler called per completion from the + * ISR. It is called with lock held. It walks past the RSS and CPL message + * header where the actual WR is present. + * It then gets the status, WR handle (ioreq pointer) and the len of + * the WR, based on WR opcode. Only on a non-good status is the entire + * WR copied into the WR cache (ioreq->fw_wr). + * The ioreq corresponding to the WR is returned to the caller. + * NOTE: The SCSI queue doesnt allocate a freelist today, hence + * no freelist buffer is expected. + */ +struct csio_ioreq * +csio_scsi_cmpl_handler(struct csio_hw *hw, void *wr, uint32_t len, + struct csio_fl_dma_buf *flb, void *priv, uint8_t **scsiwr) +{ + struct csio_ioreq *ioreq = NULL; + struct cpl_fw6_msg *cpl; + uint8_t *tempwr; + uint8_t status; + struct csio_scsim *scm = csio_hw_to_scsim(hw); + + /* skip RSS header */ + cpl = (struct cpl_fw6_msg *)((uintptr_t)wr + sizeof(__be64)); + + if (unlikely(cpl->opcode != CPL_FW6_MSG)) { + csio_warn(hw, "Error: Invalid CPL msg %x recvd on SCSI q\n", + cpl->opcode); + CSIO_INC_STATS(scm, n_inval_cplop); + return NULL; + } + + tempwr = (uint8_t *)(cpl->data); + status = csio_wr_status(tempwr); + *scsiwr = tempwr; + + if (likely((*tempwr == FW_SCSI_READ_WR) || + (*tempwr == FW_SCSI_WRITE_WR) || + (*tempwr == FW_SCSI_CMD_WR))) { + ioreq = (struct csio_ioreq *)((uintptr_t) + (((struct fw_scsi_read_wr *)tempwr)->cookie)); + CSIO_DB_ASSERT(virt_addr_valid(ioreq)); + + ioreq->wr_status = status; + + return ioreq; + } + + if (*tempwr == FW_SCSI_ABRT_CLS_WR) { + ioreq = (struct csio_ioreq *)((uintptr_t) + (((struct fw_scsi_abrt_cls_wr *)tempwr)->cookie)); + CSIO_DB_ASSERT(virt_addr_valid(ioreq)); + + ioreq->wr_status = status; + return ioreq; + } + + csio_warn(hw, "WR with invalid opcode in SCSI IQ: %x\n", *tempwr); + CSIO_INC_STATS(scm, n_inval_scsiop); + return NULL; +} + +/* + * csio_scsi_cleanup_io_q - Cleanup the given queue. + * @scm: SCSI module. + * @q: Queue to be cleaned up. + * + * Called with lock held. Has to exit with lock held. + */ +void +csio_scsi_cleanup_io_q(struct csio_scsim *scm, struct list_head *q) +{ + struct csio_hw *hw = scm->hw; + struct csio_ioreq *ioreq; + struct list_head *tmp, *next; + struct scsi_cmnd *scmnd; + + /* Call back the completion routines of the active_q */ + list_for_each_safe(tmp, next, q) { + ioreq = (struct csio_ioreq *)tmp; + csio_scsi_drvcleanup(ioreq); + list_del_init(&ioreq->sm.sm_list); + scmnd = csio_scsi_cmnd(ioreq); + spin_unlock_irq(&hw->lock); + + /* + * Upper layers may have cleared this command, hence this + * check to avoid accessing stale references. + */ + if (scmnd != NULL) + ioreq->io_cbfn(hw, ioreq); + + spin_lock_irq(&scm->freelist_lock); + csio_put_scsi_ioreq(scm, ioreq); + spin_unlock_irq(&scm->freelist_lock); + + spin_lock_irq(&hw->lock); + } +} + +#define CSIO_SCSI_ABORT_Q_POLL_MS 2000 + +static void +csio_abrt_cls(struct csio_ioreq *ioreq, struct scsi_cmnd *scmnd) +{ + struct csio_lnode *ln = ioreq->lnode; + struct csio_hw *hw = ln->hwp; + int ready = 0; + struct csio_scsim *scsim = csio_hw_to_scsim(hw); + int rv; + + if (csio_scsi_cmnd(ioreq) != scmnd) { + CSIO_INC_STATS(scsim, n_abrt_race_comp); + return; + } + + ready = csio_is_lnode_ready(ln); + + rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE)); + if (rv != 0) { + if (ready) + CSIO_INC_STATS(scsim, n_abrt_busy_error); + else + CSIO_INC_STATS(scsim, n_cls_busy_error); + } +} + +/* + * csio_scsi_abort_io_q - Abort all I/Os on given queue + * @scm: SCSI module. + * @q: Queue to abort. + * @tmo: Timeout in ms + * + * Attempt to abort all I/Os on given queue, and wait for a max + * of tmo milliseconds for them to complete. Returns success + * if all I/Os are aborted. Else returns -ETIMEDOUT. + * Should be entered with lock held. Exits with lock held. + * NOTE: + * Lock has to be held across the loop that aborts I/Os, since dropping the lock + * in between can cause the list to be corrupted. As a result, the caller + * of this function has to ensure that the number of I/os to be aborted + * is finite enough to not cause lock-held-for-too-long issues. + */ +static int +csio_scsi_abort_io_q(struct csio_scsim *scm, struct list_head *q, uint32_t tmo) +{ + struct csio_hw *hw = scm->hw; + struct list_head *tmp, *next; + int count = DIV_ROUND_UP(tmo, CSIO_SCSI_ABORT_Q_POLL_MS); + struct scsi_cmnd *scmnd; + + if (list_empty(q)) + return 0; + + csio_dbg(hw, "Aborting SCSI I/Os\n"); + + /* Now abort/close I/Os in the queue passed */ + list_for_each_safe(tmp, next, q) { + scmnd = csio_scsi_cmnd((struct csio_ioreq *)tmp); + csio_abrt_cls((struct csio_ioreq *)tmp, scmnd); + } + + /* Wait till all active I/Os are completed/aborted/closed */ + while (!list_empty(q) && count--) { + spin_unlock_irq(&hw->lock); + msleep(CSIO_SCSI_ABORT_Q_POLL_MS); + spin_lock_irq(&hw->lock); + } + + /* all aborts completed */ + if (list_empty(q)) + return 0; + + return -ETIMEDOUT; +} + +/* + * csio_scsim_cleanup_io - Cleanup all I/Os in SCSI module. + * @scm: SCSI module. + * @abort: abort required. + * Called with lock held, should exit with lock held. + * Can sleep when waiting for I/Os to complete. + */ +int +csio_scsim_cleanup_io(struct csio_scsim *scm, bool abort) +{ + struct csio_hw *hw = scm->hw; + int rv = 0; + int count = DIV_ROUND_UP(60 * 1000, CSIO_SCSI_ABORT_Q_POLL_MS); + + /* No I/Os pending */ + if (list_empty(&scm->active_q)) + return 0; + + /* Wait until all active I/Os are completed */ + while (!list_empty(&scm->active_q) && count--) { + spin_unlock_irq(&hw->lock); + msleep(CSIO_SCSI_ABORT_Q_POLL_MS); + spin_lock_irq(&hw->lock); + } + + /* all I/Os completed */ + if (list_empty(&scm->active_q)) + return 0; + + /* Else abort */ + if (abort) { + rv = csio_scsi_abort_io_q(scm, &scm->active_q, 30000); + if (rv == 0) + return rv; + csio_dbg(hw, "Some I/O aborts timed out, cleaning up..\n"); + } + + csio_scsi_cleanup_io_q(scm, &scm->active_q); + + CSIO_DB_ASSERT(list_empty(&scm->active_q)); + + return rv; +} + +/* + * csio_scsim_cleanup_io_lnode - Cleanup all I/Os of given lnode. + * @scm: SCSI module. + * @lnode: lnode + * + * Called with lock held, should exit with lock held. + * Can sleep (with dropped lock) when waiting for I/Os to complete. + */ +int +csio_scsim_cleanup_io_lnode(struct csio_scsim *scm, struct csio_lnode *ln) +{ + struct csio_hw *hw = scm->hw; + struct csio_scsi_level_data sld; + int rv; + int count = DIV_ROUND_UP(60 * 1000, CSIO_SCSI_ABORT_Q_POLL_MS); + + csio_dbg(hw, "Gathering all SCSI I/Os on lnode %p\n", ln); + + sld.level = CSIO_LEV_LNODE; + sld.lnode = ln; + INIT_LIST_HEAD(&ln->cmpl_q); + csio_scsi_gather_active_ios(scm, &sld, &ln->cmpl_q); + + /* No I/Os pending on this lnode */ + if (list_empty(&ln->cmpl_q)) + return 0; + + /* Wait until all active I/Os on this lnode are completed */ + while (!list_empty(&ln->cmpl_q) && count--) { + spin_unlock_irq(&hw->lock); + msleep(CSIO_SCSI_ABORT_Q_POLL_MS); + spin_lock_irq(&hw->lock); + } + + /* all I/Os completed */ + if (list_empty(&ln->cmpl_q)) + return 0; + + csio_dbg(hw, "Some I/Os pending on ln:%p, aborting them..\n", ln); + + /* I/Os are pending, abort them */ + rv = csio_scsi_abort_io_q(scm, &ln->cmpl_q, 30000); + if (rv != 0) { + csio_dbg(hw, "Some I/O aborts timed out, cleaning up..\n"); + csio_scsi_cleanup_io_q(scm, &ln->cmpl_q); + } + + CSIO_DB_ASSERT(list_empty(&ln->cmpl_q)); + + return rv; +} + +static ssize_t +csio_show_hw_state(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct csio_lnode *ln = shost_priv(class_to_shost(dev)); + struct csio_hw *hw = csio_lnode_to_hw(ln); + + if (csio_is_hw_ready(hw)) + return sysfs_emit(buf, "ready\n"); + + return sysfs_emit(buf, "not ready\n"); +} + +/* Device reset */ +static ssize_t +csio_device_reset(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct csio_lnode *ln = shost_priv(class_to_shost(dev)); + struct csio_hw *hw = csio_lnode_to_hw(ln); + + if (*buf != '1') + return -EINVAL; + + /* Delete NPIV lnodes */ + csio_lnodes_exit(hw, 1); + + /* Block upper IOs */ + csio_lnodes_block_request(hw); + + spin_lock_irq(&hw->lock); + csio_hw_reset(hw); + spin_unlock_irq(&hw->lock); + + /* Unblock upper IOs */ + csio_lnodes_unblock_request(hw); + return count; +} + +/* disable port */ +static ssize_t +csio_disable_port(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct csio_lnode *ln = shost_priv(class_to_shost(dev)); + struct csio_hw *hw = csio_lnode_to_hw(ln); + bool disable; + + if (*buf == '1' || *buf == '0') + disable = (*buf == '1') ? true : false; + else + return -EINVAL; + + /* Block upper IOs */ + csio_lnodes_block_by_port(hw, ln->portid); + + spin_lock_irq(&hw->lock); + csio_disable_lnodes(hw, ln->portid, disable); + spin_unlock_irq(&hw->lock); + + /* Unblock upper IOs */ + csio_lnodes_unblock_by_port(hw, ln->portid); + return count; +} + +/* Show debug level */ +static ssize_t +csio_show_dbg_level(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct csio_lnode *ln = shost_priv(class_to_shost(dev)); + + return sysfs_emit(buf, "%x\n", ln->params.log_level); +} + +/* Store debug level */ +static ssize_t +csio_store_dbg_level(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct csio_lnode *ln = shost_priv(class_to_shost(dev)); + struct csio_hw *hw = csio_lnode_to_hw(ln); + uint32_t dbg_level = 0; + + if (!isdigit(buf[0])) + return -EINVAL; + + if (sscanf(buf, "%i", &dbg_level)) + return -EINVAL; + + ln->params.log_level = dbg_level; + hw->params.log_level = dbg_level; + + return 0; +} + +static DEVICE_ATTR(hw_state, S_IRUGO, csio_show_hw_state, NULL); +static DEVICE_ATTR(device_reset, S_IWUSR, NULL, csio_device_reset); +static DEVICE_ATTR(disable_port, S_IWUSR, NULL, csio_disable_port); +static DEVICE_ATTR(dbg_level, S_IRUGO | S_IWUSR, csio_show_dbg_level, + csio_store_dbg_level); + +static struct attribute *csio_fcoe_lport_attrs[] = { + &dev_attr_hw_state.attr, + &dev_attr_device_reset.attr, + &dev_attr_disable_port.attr, + &dev_attr_dbg_level.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(csio_fcoe_lport); + +static ssize_t +csio_show_num_reg_rnodes(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct csio_lnode *ln = shost_priv(class_to_shost(dev)); + + return sysfs_emit(buf, "%d\n", ln->num_reg_rnodes); +} + +static DEVICE_ATTR(num_reg_rnodes, S_IRUGO, csio_show_num_reg_rnodes, NULL); + +static struct attribute *csio_fcoe_vport_attrs[] = { + &dev_attr_num_reg_rnodes.attr, + &dev_attr_dbg_level.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(csio_fcoe_vport); + +static inline uint32_t +csio_scsi_copy_to_sgl(struct csio_hw *hw, struct csio_ioreq *req) +{ + struct scsi_cmnd *scmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req); + struct scatterlist *sg; + uint32_t bytes_left; + uint32_t bytes_copy; + uint32_t buf_off = 0; + uint32_t start_off = 0; + uint32_t sg_off = 0; + void *sg_addr; + void *buf_addr; + struct csio_dma_buf *dma_buf; + + bytes_left = scsi_bufflen(scmnd); + sg = scsi_sglist(scmnd); + dma_buf = (struct csio_dma_buf *)csio_list_next(&req->gen_list); + + /* Copy data from driver buffer to SGs of SCSI CMD */ + while (bytes_left > 0 && sg && dma_buf) { + if (buf_off >= dma_buf->len) { + buf_off = 0; + dma_buf = (struct csio_dma_buf *) + csio_list_next(dma_buf); + continue; + } + + if (start_off >= sg->length) { + start_off -= sg->length; + sg = sg_next(sg); + continue; + } + + buf_addr = dma_buf->vaddr + buf_off; + sg_off = sg->offset + start_off; + bytes_copy = min((dma_buf->len - buf_off), + sg->length - start_off); + bytes_copy = min((uint32_t)(PAGE_SIZE - (sg_off & ~PAGE_MASK)), + bytes_copy); + + sg_addr = kmap_atomic(sg_page(sg) + (sg_off >> PAGE_SHIFT)); + if (!sg_addr) { + csio_err(hw, "failed to kmap sg:%p of ioreq:%p\n", + sg, req); + break; + } + + csio_dbg(hw, "copy_to_sgl:sg_addr %p sg_off %d buf %p len %d\n", + sg_addr, sg_off, buf_addr, bytes_copy); + memcpy(sg_addr + (sg_off & ~PAGE_MASK), buf_addr, bytes_copy); + kunmap_atomic(sg_addr); + + start_off += bytes_copy; + buf_off += bytes_copy; + bytes_left -= bytes_copy; + } + + if (bytes_left > 0) + return DID_ERROR; + else + return DID_OK; +} + +/* + * csio_scsi_err_handler - SCSI error handler. + * @hw: HW module. + * @req: IO request. + * + */ +static inline void +csio_scsi_err_handler(struct csio_hw *hw, struct csio_ioreq *req) +{ + struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req); + struct csio_scsim *scm = csio_hw_to_scsim(hw); + struct fcp_resp_with_ext *fcp_resp; + struct fcp_resp_rsp_info *rsp_info; + struct csio_dma_buf *dma_buf; + uint8_t flags, scsi_status = 0; + uint32_t host_status = DID_OK; + uint32_t rsp_len = 0, sns_len = 0; + struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata); + + + switch (req->wr_status) { + case FW_HOSTERROR: + if (unlikely(!csio_is_hw_ready(hw))) + return; + + host_status = DID_ERROR; + CSIO_INC_STATS(scm, n_hosterror); + + break; + case FW_SCSI_RSP_ERR: + dma_buf = &req->dma_buf; + fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr; + rsp_info = (struct fcp_resp_rsp_info *)(fcp_resp + 1); + flags = fcp_resp->resp.fr_flags; + scsi_status = fcp_resp->resp.fr_status; + + if (flags & FCP_RSP_LEN_VAL) { + rsp_len = be32_to_cpu(fcp_resp->ext.fr_rsp_len); + if ((rsp_len != 0 && rsp_len != 4 && rsp_len != 8) || + (rsp_info->rsp_code != FCP_TMF_CMPL)) { + host_status = DID_ERROR; + goto out; + } + } + + if ((flags & FCP_SNS_LEN_VAL) && fcp_resp->ext.fr_sns_len) { + sns_len = be32_to_cpu(fcp_resp->ext.fr_sns_len); + if (sns_len > SCSI_SENSE_BUFFERSIZE) + sns_len = SCSI_SENSE_BUFFERSIZE; + + memcpy(cmnd->sense_buffer, + &rsp_info->_fr_resvd[0] + rsp_len, sns_len); + CSIO_INC_STATS(scm, n_autosense); + } + + scsi_set_resid(cmnd, 0); + + /* Under run */ + if (flags & FCP_RESID_UNDER) { + scsi_set_resid(cmnd, + be32_to_cpu(fcp_resp->ext.fr_resid)); + + if (!(flags & FCP_SNS_LEN_VAL) && + (scsi_status == SAM_STAT_GOOD) && + ((scsi_bufflen(cmnd) - scsi_get_resid(cmnd)) + < cmnd->underflow)) + host_status = DID_ERROR; + } else if (flags & FCP_RESID_OVER) + host_status = DID_ERROR; + + CSIO_INC_STATS(scm, n_rsperror); + break; + + case FW_SCSI_OVER_FLOW_ERR: + csio_warn(hw, + "Over-flow error,cmnd:0x%x expected len:0x%x" + " resid:0x%x\n", cmnd->cmnd[0], + scsi_bufflen(cmnd), scsi_get_resid(cmnd)); + host_status = DID_ERROR; + CSIO_INC_STATS(scm, n_ovflerror); + break; + + case FW_SCSI_UNDER_FLOW_ERR: + csio_warn(hw, + "Under-flow error,cmnd:0x%x expected" + " len:0x%x resid:0x%x lun:0x%llx ssn:0x%x\n", + cmnd->cmnd[0], scsi_bufflen(cmnd), + scsi_get_resid(cmnd), cmnd->device->lun, + rn->flowid); + host_status = DID_ERROR; + CSIO_INC_STATS(scm, n_unflerror); + break; + + case FW_SCSI_ABORT_REQUESTED: + case FW_SCSI_ABORTED: + case FW_SCSI_CLOSE_REQUESTED: + csio_dbg(hw, "Req %p cmd:%p op:%x %s\n", req, cmnd, + cmnd->cmnd[0], + (req->wr_status == FW_SCSI_CLOSE_REQUESTED) ? + "closed" : "aborted"); + /* + * csio_eh_abort_handler checks this value to + * succeed or fail the abort request. + */ + host_status = DID_REQUEUE; + if (req->wr_status == FW_SCSI_CLOSE_REQUESTED) + CSIO_INC_STATS(scm, n_closed); + else + CSIO_INC_STATS(scm, n_aborted); + break; + + case FW_SCSI_ABORT_TIMEDOUT: + /* FW timed out the abort itself */ + csio_dbg(hw, "FW timed out abort req:%p cmnd:%p status:%x\n", + req, cmnd, req->wr_status); + host_status = DID_ERROR; + CSIO_INC_STATS(scm, n_abrt_timedout); + break; + + case FW_RDEV_NOT_READY: + /* + * In firmware, a RDEV can get into this state + * temporarily, before moving into dissapeared/lost + * state. So, the driver should complete the request equivalent + * to device-disappeared! + */ + CSIO_INC_STATS(scm, n_rdev_nr_error); + host_status = DID_ERROR; + break; + + case FW_ERR_RDEV_LOST: + CSIO_INC_STATS(scm, n_rdev_lost_error); + host_status = DID_ERROR; + break; + + case FW_ERR_RDEV_LOGO: + CSIO_INC_STATS(scm, n_rdev_logo_error); + host_status = DID_ERROR; + break; + + case FW_ERR_RDEV_IMPL_LOGO: + host_status = DID_ERROR; + break; + + case FW_ERR_LINK_DOWN: + CSIO_INC_STATS(scm, n_link_down_error); + host_status = DID_ERROR; + break; + + case FW_FCOE_NO_XCHG: + CSIO_INC_STATS(scm, n_no_xchg_error); + host_status = DID_ERROR; + break; + + default: + csio_err(hw, "Unknown SCSI FW WR status:%d req:%p cmnd:%p\n", + req->wr_status, req, cmnd); + CSIO_DB_ASSERT(0); + + CSIO_INC_STATS(scm, n_unknown_error); + host_status = DID_ERROR; + break; + } + +out: + if (req->nsge > 0) { + scsi_dma_unmap(cmnd); + if (req->dcopy && (host_status == DID_OK)) + host_status = csio_scsi_copy_to_sgl(hw, req); + } + + cmnd->result = (((host_status) << 16) | scsi_status); + scsi_done(cmnd); + + /* Wake up waiting threads */ + csio_scsi_cmnd(req) = NULL; + complete(&req->cmplobj); +} + +/* + * csio_scsi_cbfn - SCSI callback function. + * @hw: HW module. + * @req: IO request. + * + */ +static void +csio_scsi_cbfn(struct csio_hw *hw, struct csio_ioreq *req) +{ + struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req); + uint8_t scsi_status = SAM_STAT_GOOD; + uint32_t host_status = DID_OK; + + if (likely(req->wr_status == FW_SUCCESS)) { + if (req->nsge > 0) { + scsi_dma_unmap(cmnd); + if (req->dcopy) + host_status = csio_scsi_copy_to_sgl(hw, req); + } + + cmnd->result = (((host_status) << 16) | scsi_status); + scsi_done(cmnd); + csio_scsi_cmnd(req) = NULL; + CSIO_INC_STATS(csio_hw_to_scsim(hw), n_tot_success); + } else { + /* Error handling */ + csio_scsi_err_handler(hw, req); + } +} + +/** + * csio_queuecommand - Entry point to kickstart an I/O request. + * @host: The scsi_host pointer. + * @cmnd: The I/O request from ML. + * + * This routine does the following: + * - Checks for HW and Rnode module readiness. + * - Gets a free ioreq structure (which is already initialized + * to uninit during its allocation). + * - Maps SG elements. + * - Initializes ioreq members. + * - Kicks off the SCSI state machine for this IO. + * - Returns busy status on error. + */ +static int +csio_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmnd) +{ + struct csio_lnode *ln = shost_priv(host); + struct csio_hw *hw = csio_lnode_to_hw(ln); + struct csio_scsim *scsim = csio_hw_to_scsim(hw); + struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata); + struct csio_ioreq *ioreq = NULL; + unsigned long flags; + int nsge = 0; + int rv = SCSI_MLQUEUE_HOST_BUSY, nr; + int retval; + struct csio_scsi_qset *sqset; + struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); + + sqset = &hw->sqset[ln->portid][blk_mq_rq_cpu(scsi_cmd_to_rq(cmnd))]; + + nr = fc_remote_port_chkready(rport); + if (nr) { + cmnd->result = nr; + CSIO_INC_STATS(scsim, n_rn_nr_error); + goto err_done; + } + + if (unlikely(!csio_is_hw_ready(hw))) { + cmnd->result = (DID_REQUEUE << 16); + CSIO_INC_STATS(scsim, n_hw_nr_error); + goto err_done; + } + + /* Get req->nsge, if there are SG elements to be mapped */ + nsge = scsi_dma_map(cmnd); + if (unlikely(nsge < 0)) { + CSIO_INC_STATS(scsim, n_dmamap_error); + goto err; + } + + /* Do we support so many mappings? */ + if (unlikely(nsge > scsim->max_sge)) { + csio_warn(hw, + "More SGEs than can be supported." + " SGEs: %d, Max SGEs: %d\n", nsge, scsim->max_sge); + CSIO_INC_STATS(scsim, n_unsupp_sge_error); + goto err_dma_unmap; + } + + /* Get a free ioreq structure - SM is already set to uninit */ + ioreq = csio_get_scsi_ioreq_lock(hw, scsim); + if (!ioreq) { + csio_err(hw, "Out of I/O request elements. Active #:%d\n", + scsim->stats.n_active); + CSIO_INC_STATS(scsim, n_no_req_error); + goto err_dma_unmap; + } + + ioreq->nsge = nsge; + ioreq->lnode = ln; + ioreq->rnode = rn; + ioreq->iq_idx = sqset->iq_idx; + ioreq->eq_idx = sqset->eq_idx; + ioreq->wr_status = 0; + ioreq->drv_status = 0; + csio_scsi_cmnd(ioreq) = (void *)cmnd; + ioreq->tmo = 0; + ioreq->datadir = cmnd->sc_data_direction; + + if (cmnd->sc_data_direction == DMA_TO_DEVICE) { + CSIO_INC_STATS(ln, n_output_requests); + ln->stats.n_output_bytes += scsi_bufflen(cmnd); + } else if (cmnd->sc_data_direction == DMA_FROM_DEVICE) { + CSIO_INC_STATS(ln, n_input_requests); + ln->stats.n_input_bytes += scsi_bufflen(cmnd); + } else + CSIO_INC_STATS(ln, n_control_requests); + + /* Set cbfn */ + ioreq->io_cbfn = csio_scsi_cbfn; + + /* Needed during abort */ + cmnd->host_scribble = (unsigned char *)ioreq; + csio_priv(cmnd)->fc_tm_flags = 0; + + /* Kick off SCSI IO SM on the ioreq */ + spin_lock_irqsave(&hw->lock, flags); + retval = csio_scsi_start_io(ioreq); + spin_unlock_irqrestore(&hw->lock, flags); + + if (retval != 0) { + csio_err(hw, "ioreq: %p couldn't be started, status:%d\n", + ioreq, retval); + CSIO_INC_STATS(scsim, n_busy_error); + goto err_put_req; + } + + return 0; + +err_put_req: + csio_put_scsi_ioreq_lock(hw, scsim, ioreq); +err_dma_unmap: + if (nsge > 0) + scsi_dma_unmap(cmnd); +err: + return rv; + +err_done: + scsi_done(cmnd); + return 0; +} + +static int +csio_do_abrt_cls(struct csio_hw *hw, struct csio_ioreq *ioreq, bool abort) +{ + int rv; + int cpu = smp_processor_id(); + struct csio_lnode *ln = ioreq->lnode; + struct csio_scsi_qset *sqset = &hw->sqset[ln->portid][cpu]; + + ioreq->tmo = CSIO_SCSI_ABRT_TMO_MS; + /* + * Use current processor queue for posting the abort/close, but retain + * the ingress queue ID of the original I/O being aborted/closed - we + * need the abort/close completion to be received on the same queue + * as the original I/O. + */ + ioreq->eq_idx = sqset->eq_idx; + + if (abort == SCSI_ABORT) + rv = csio_scsi_abort(ioreq); + else + rv = csio_scsi_close(ioreq); + + return rv; +} + +static int +csio_eh_abort_handler(struct scsi_cmnd *cmnd) +{ + struct csio_ioreq *ioreq; + struct csio_lnode *ln = shost_priv(cmnd->device->host); + struct csio_hw *hw = csio_lnode_to_hw(ln); + struct csio_scsim *scsim = csio_hw_to_scsim(hw); + int ready = 0, ret; + unsigned long tmo = 0; + int rv; + struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata); + + ret = fc_block_scsi_eh(cmnd); + if (ret) + return ret; + + ioreq = (struct csio_ioreq *)cmnd->host_scribble; + if (!ioreq) + return SUCCESS; + + if (!rn) + return FAILED; + + csio_dbg(hw, + "Request to abort ioreq:%p cmd:%p cdb:%08llx" + " ssni:0x%x lun:%llu iq:0x%x\n", + ioreq, cmnd, *((uint64_t *)cmnd->cmnd), rn->flowid, + cmnd->device->lun, csio_q_physiqid(hw, ioreq->iq_idx)); + + if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) != cmnd) { + CSIO_INC_STATS(scsim, n_abrt_race_comp); + return SUCCESS; + } + + ready = csio_is_lnode_ready(ln); + tmo = CSIO_SCSI_ABRT_TMO_MS; + + reinit_completion(&ioreq->cmplobj); + spin_lock_irq(&hw->lock); + rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE)); + spin_unlock_irq(&hw->lock); + + if (rv != 0) { + if (rv == -EINVAL) { + /* Return success, if abort/close request issued on + * already completed IO + */ + return SUCCESS; + } + if (ready) + CSIO_INC_STATS(scsim, n_abrt_busy_error); + else + CSIO_INC_STATS(scsim, n_cls_busy_error); + + goto inval_scmnd; + } + + wait_for_completion_timeout(&ioreq->cmplobj, msecs_to_jiffies(tmo)); + + /* FW didnt respond to abort within our timeout */ + if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) { + + csio_err(hw, "Abort timed out -- req: %p\n", ioreq); + CSIO_INC_STATS(scsim, n_abrt_timedout); + +inval_scmnd: + if (ioreq->nsge > 0) + scsi_dma_unmap(cmnd); + + spin_lock_irq(&hw->lock); + csio_scsi_cmnd(ioreq) = NULL; + spin_unlock_irq(&hw->lock); + + cmnd->result = (DID_ERROR << 16); + scsi_done(cmnd); + + return FAILED; + } + + /* FW successfully aborted the request */ + if (host_byte(cmnd->result) == DID_REQUEUE) { + csio_info(hw, + "Aborted SCSI command to (%d:%llu) tag %u\n", + cmnd->device->id, cmnd->device->lun, + scsi_cmd_to_rq(cmnd)->tag); + return SUCCESS; + } else { + csio_info(hw, + "Failed to abort SCSI command, (%d:%llu) tag %u\n", + cmnd->device->id, cmnd->device->lun, + scsi_cmd_to_rq(cmnd)->tag); + return FAILED; + } +} + +/* + * csio_tm_cbfn - TM callback function. + * @hw: HW module. + * @req: IO request. + * + * Cache the result in 'cmnd', since ioreq will be freed soon + * after we return from here, and the waiting thread shouldnt trust + * the ioreq contents. + */ +static void +csio_tm_cbfn(struct csio_hw *hw, struct csio_ioreq *req) +{ + struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req); + struct csio_dma_buf *dma_buf; + uint8_t flags = 0; + struct fcp_resp_with_ext *fcp_resp; + struct fcp_resp_rsp_info *rsp_info; + + csio_dbg(hw, "req: %p in csio_tm_cbfn status: %d\n", + req, req->wr_status); + + /* Cache FW return status */ + csio_priv(cmnd)->wr_status = req->wr_status; + + /* Special handling based on FCP response */ + + /* + * FW returns us this error, if flags were set. FCP4 says + * FCP_RSP_LEN_VAL in flags shall be set for TM completions. + * So if a target were to set this bit, we expect that the + * rsp_code is set to FCP_TMF_CMPL for a successful TM + * completion. Any other rsp_code means TM operation failed. + * If a target were to just ignore setting flags, we treat + * the TM operation as success, and FW returns FW_SUCCESS. + */ + if (req->wr_status == FW_SCSI_RSP_ERR) { + dma_buf = &req->dma_buf; + fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr; + rsp_info = (struct fcp_resp_rsp_info *)(fcp_resp + 1); + + flags = fcp_resp->resp.fr_flags; + + /* Modify return status if flags indicate success */ + if (flags & FCP_RSP_LEN_VAL) + if (rsp_info->rsp_code == FCP_TMF_CMPL) + csio_priv(cmnd)->wr_status = FW_SUCCESS; + + csio_dbg(hw, "TM FCP rsp code: %d\n", rsp_info->rsp_code); + } + + /* Wake up the TM handler thread */ + csio_scsi_cmnd(req) = NULL; +} + +static int +csio_eh_lun_reset_handler(struct scsi_cmnd *cmnd) +{ + struct csio_lnode *ln = shost_priv(cmnd->device->host); + struct csio_hw *hw = csio_lnode_to_hw(ln); + struct csio_scsim *scsim = csio_hw_to_scsim(hw); + struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata); + struct csio_ioreq *ioreq = NULL; + struct csio_scsi_qset *sqset; + unsigned long flags; + int retval; + int count, ret; + LIST_HEAD(local_q); + struct csio_scsi_level_data sld; + + if (!rn) + goto fail; + + csio_dbg(hw, "Request to reset LUN:%llu (ssni:0x%x tgtid:%d)\n", + cmnd->device->lun, rn->flowid, rn->scsi_id); + + if (!csio_is_lnode_ready(ln)) { + csio_err(hw, + "LUN reset cannot be issued on non-ready" + " local node vnpi:0x%x (LUN:%llu)\n", + ln->vnp_flowid, cmnd->device->lun); + goto fail; + } + + /* Lnode is ready, now wait on rport node readiness */ + ret = fc_block_scsi_eh(cmnd); + if (ret) + return ret; + + /* + * If we have blocked in the previous call, at this point, either the + * remote node has come back online, or device loss timer has fired + * and the remote node is destroyed. Allow the LUN reset only for + * the former case, since LUN reset is a TMF I/O on the wire, and we + * need a valid session to issue it. + */ + if (fc_remote_port_chkready(rn->rport)) { + csio_err(hw, + "LUN reset cannot be issued on non-ready" + " remote node ssni:0x%x (LUN:%llu)\n", + rn->flowid, cmnd->device->lun); + goto fail; + } + + /* Get a free ioreq structure - SM is already set to uninit */ + ioreq = csio_get_scsi_ioreq_lock(hw, scsim); + + if (!ioreq) { + csio_err(hw, "Out of IO request elements. Active # :%d\n", + scsim->stats.n_active); + goto fail; + } + + sqset = &hw->sqset[ln->portid][smp_processor_id()]; + ioreq->nsge = 0; + ioreq->lnode = ln; + ioreq->rnode = rn; + ioreq->iq_idx = sqset->iq_idx; + ioreq->eq_idx = sqset->eq_idx; + + csio_scsi_cmnd(ioreq) = cmnd; + cmnd->host_scribble = (unsigned char *)ioreq; + csio_priv(cmnd)->wr_status = 0; + + csio_priv(cmnd)->fc_tm_flags = FCP_TMF_LUN_RESET; + ioreq->tmo = CSIO_SCSI_LUNRST_TMO_MS / 1000; + + /* + * FW times the LUN reset for ioreq->tmo, so we got to wait a little + * longer (10s for now) than that to allow FW to return the timed + * out command. + */ + count = DIV_ROUND_UP((ioreq->tmo + 10) * 1000, CSIO_SCSI_TM_POLL_MS); + + /* Set cbfn */ + ioreq->io_cbfn = csio_tm_cbfn; + + /* Save of the ioreq info for later use */ + sld.level = CSIO_LEV_LUN; + sld.lnode = ioreq->lnode; + sld.rnode = ioreq->rnode; + sld.oslun = cmnd->device->lun; + + spin_lock_irqsave(&hw->lock, flags); + /* Kick off TM SM on the ioreq */ + retval = csio_scsi_start_tm(ioreq); + spin_unlock_irqrestore(&hw->lock, flags); + + if (retval != 0) { + csio_err(hw, "Failed to issue LUN reset, req:%p, status:%d\n", + ioreq, retval); + goto fail_ret_ioreq; + } + + csio_dbg(hw, "Waiting max %d secs for LUN reset completion\n", + count * (CSIO_SCSI_TM_POLL_MS / 1000)); + /* Wait for completion */ + while ((((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) + && count--) + msleep(CSIO_SCSI_TM_POLL_MS); + + /* LUN reset timed-out */ + if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) { + csio_err(hw, "LUN reset (%d:%llu) timed out\n", + cmnd->device->id, cmnd->device->lun); + + spin_lock_irq(&hw->lock); + csio_scsi_drvcleanup(ioreq); + list_del_init(&ioreq->sm.sm_list); + spin_unlock_irq(&hw->lock); + + goto fail_ret_ioreq; + } + + /* LUN reset returned, check cached status */ + if (csio_priv(cmnd)->wr_status != FW_SUCCESS) { + csio_err(hw, "LUN reset failed (%d:%llu), status: %d\n", + cmnd->device->id, cmnd->device->lun, + csio_priv(cmnd)->wr_status); + goto fail; + } + + /* LUN reset succeeded, Start aborting affected I/Os */ + /* + * Since the host guarantees during LUN reset that there + * will not be any more I/Os to that LUN, until the LUN reset + * completes, we gather pending I/Os after the LUN reset. + */ + spin_lock_irq(&hw->lock); + csio_scsi_gather_active_ios(scsim, &sld, &local_q); + + retval = csio_scsi_abort_io_q(scsim, &local_q, 30000); + spin_unlock_irq(&hw->lock); + + /* Aborts may have timed out */ + if (retval != 0) { + csio_err(hw, + "Attempt to abort I/Os during LUN reset of %llu" + " returned %d\n", cmnd->device->lun, retval); + /* Return I/Os back to active_q */ + spin_lock_irq(&hw->lock); + list_splice_tail_init(&local_q, &scsim->active_q); + spin_unlock_irq(&hw->lock); + goto fail; + } + + CSIO_INC_STATS(rn, n_lun_rst); + + csio_info(hw, "LUN reset occurred (%d:%llu)\n", + cmnd->device->id, cmnd->device->lun); + + return SUCCESS; + +fail_ret_ioreq: + csio_put_scsi_ioreq_lock(hw, scsim, ioreq); +fail: + CSIO_INC_STATS(rn, n_lun_rst_fail); + return FAILED; +} + +static int +csio_slave_alloc(struct scsi_device *sdev) +{ + struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); + + if (!rport || fc_remote_port_chkready(rport)) + return -ENXIO; + + sdev->hostdata = *((struct csio_lnode **)(rport->dd_data)); + + return 0; +} + +static int +csio_slave_configure(struct scsi_device *sdev) +{ + scsi_change_queue_depth(sdev, csio_lun_qdepth); + return 0; +} + +static void +csio_slave_destroy(struct scsi_device *sdev) +{ + sdev->hostdata = NULL; +} + +static int +csio_scan_finished(struct Scsi_Host *shost, unsigned long time) +{ + struct csio_lnode *ln = shost_priv(shost); + int rv = 1; + + spin_lock_irq(shost->host_lock); + if (!ln->hwp || csio_list_deleted(&ln->sm.sm_list)) + goto out; + + rv = csio_scan_done(ln, jiffies, time, csio_max_scan_tmo * HZ, + csio_delta_scan_tmo * HZ); +out: + spin_unlock_irq(shost->host_lock); + + return rv; +} + +struct scsi_host_template csio_fcoe_shost_template = { + .module = THIS_MODULE, + .name = CSIO_DRV_DESC, + .proc_name = KBUILD_MODNAME, + .queuecommand = csio_queuecommand, + .cmd_size = sizeof(struct csio_cmd_priv), + .eh_timed_out = fc_eh_timed_out, + .eh_abort_handler = csio_eh_abort_handler, + .eh_device_reset_handler = csio_eh_lun_reset_handler, + .slave_alloc = csio_slave_alloc, + .slave_configure = csio_slave_configure, + .slave_destroy = csio_slave_destroy, + .scan_finished = csio_scan_finished, + .this_id = -1, + .sg_tablesize = CSIO_SCSI_MAX_SGE, + .cmd_per_lun = CSIO_MAX_CMD_PER_LUN, + .shost_groups = csio_fcoe_lport_groups, + .max_sectors = CSIO_MAX_SECTOR_SIZE, +}; + +struct scsi_host_template csio_fcoe_shost_vport_template = { + .module = THIS_MODULE, + .name = CSIO_DRV_DESC, + .proc_name = KBUILD_MODNAME, + .queuecommand = csio_queuecommand, + .eh_timed_out = fc_eh_timed_out, + .eh_abort_handler = csio_eh_abort_handler, + .eh_device_reset_handler = csio_eh_lun_reset_handler, + .slave_alloc = csio_slave_alloc, + .slave_configure = csio_slave_configure, + .slave_destroy = csio_slave_destroy, + .scan_finished = csio_scan_finished, + .this_id = -1, + .sg_tablesize = CSIO_SCSI_MAX_SGE, + .cmd_per_lun = CSIO_MAX_CMD_PER_LUN, + .shost_groups = csio_fcoe_vport_groups, + .max_sectors = CSIO_MAX_SECTOR_SIZE, +}; + +/* + * csio_scsi_alloc_ddp_bufs - Allocate buffers for DDP of unaligned SGLs. + * @scm: SCSI Module + * @hw: HW device. + * @buf_size: buffer size + * @num_buf : Number of buffers. + * + * This routine allocates DMA buffers required for SCSI Data xfer, if + * each SGL buffer for a SCSI Read request posted by SCSI midlayer are + * not virtually contiguous. + */ +static int +csio_scsi_alloc_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw, + int buf_size, int num_buf) +{ + int n = 0; + struct list_head *tmp; + struct csio_dma_buf *ddp_desc = NULL; + uint32_t unit_size = 0; + + if (!num_buf) + return 0; + + if (!buf_size) + return -EINVAL; + + INIT_LIST_HEAD(&scm->ddp_freelist); + + /* Align buf size to page size */ + buf_size = (buf_size + PAGE_SIZE - 1) & PAGE_MASK; + /* Initialize dma descriptors */ + for (n = 0; n < num_buf; n++) { + /* Set unit size to request size */ + unit_size = buf_size; + ddp_desc = kzalloc(sizeof(struct csio_dma_buf), GFP_KERNEL); + if (!ddp_desc) { + csio_err(hw, + "Failed to allocate ddp descriptors," + " Num allocated = %d.\n", + scm->stats.n_free_ddp); + goto no_mem; + } + + /* Allocate Dma buffers for DDP */ + ddp_desc->vaddr = dma_alloc_coherent(&hw->pdev->dev, unit_size, + &ddp_desc->paddr, GFP_KERNEL); + if (!ddp_desc->vaddr) { + csio_err(hw, + "SCSI response DMA buffer (ddp) allocation" + " failed!\n"); + kfree(ddp_desc); + goto no_mem; + } + + ddp_desc->len = unit_size; + + /* Added it to scsi ddp freelist */ + list_add_tail(&ddp_desc->list, &scm->ddp_freelist); + CSIO_INC_STATS(scm, n_free_ddp); + } + + return 0; +no_mem: + /* release dma descs back to freelist and free dma memory */ + list_for_each(tmp, &scm->ddp_freelist) { + ddp_desc = (struct csio_dma_buf *) tmp; + tmp = csio_list_prev(tmp); + dma_free_coherent(&hw->pdev->dev, ddp_desc->len, + ddp_desc->vaddr, ddp_desc->paddr); + list_del_init(&ddp_desc->list); + kfree(ddp_desc); + } + scm->stats.n_free_ddp = 0; + + return -ENOMEM; +} + +/* + * csio_scsi_free_ddp_bufs - free DDP buffers of unaligned SGLs. + * @scm: SCSI Module + * @hw: HW device. + * + * This routine frees ddp buffers. + */ +static void +csio_scsi_free_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw) +{ + struct list_head *tmp; + struct csio_dma_buf *ddp_desc; + + /* release dma descs back to freelist and free dma memory */ + list_for_each(tmp, &scm->ddp_freelist) { + ddp_desc = (struct csio_dma_buf *) tmp; + tmp = csio_list_prev(tmp); + dma_free_coherent(&hw->pdev->dev, ddp_desc->len, + ddp_desc->vaddr, ddp_desc->paddr); + list_del_init(&ddp_desc->list); + kfree(ddp_desc); + } + scm->stats.n_free_ddp = 0; +} + +/** + * csio_scsim_init - Initialize SCSI Module + * @scm: SCSI Module + * @hw: HW module + * + */ +int +csio_scsim_init(struct csio_scsim *scm, struct csio_hw *hw) +{ + int i; + struct csio_ioreq *ioreq; + struct csio_dma_buf *dma_buf; + + INIT_LIST_HEAD(&scm->active_q); + scm->hw = hw; + + scm->proto_cmd_len = sizeof(struct fcp_cmnd); + scm->proto_rsp_len = CSIO_SCSI_RSP_LEN; + scm->max_sge = CSIO_SCSI_MAX_SGE; + + spin_lock_init(&scm->freelist_lock); + + /* Pre-allocate ioreqs and initialize them */ + INIT_LIST_HEAD(&scm->ioreq_freelist); + for (i = 0; i < csio_scsi_ioreqs; i++) { + + ioreq = kzalloc(sizeof(struct csio_ioreq), GFP_KERNEL); + if (!ioreq) { + csio_err(hw, + "I/O request element allocation failed, " + " Num allocated = %d.\n", + scm->stats.n_free_ioreq); + + goto free_ioreq; + } + + /* Allocate Dma buffers for Response Payload */ + dma_buf = &ioreq->dma_buf; + dma_buf->vaddr = dma_pool_alloc(hw->scsi_dma_pool, GFP_KERNEL, + &dma_buf->paddr); + if (!dma_buf->vaddr) { + csio_err(hw, + "SCSI response DMA buffer allocation" + " failed!\n"); + kfree(ioreq); + goto free_ioreq; + } + + dma_buf->len = scm->proto_rsp_len; + + /* Set state to uninit */ + csio_init_state(&ioreq->sm, csio_scsis_uninit); + INIT_LIST_HEAD(&ioreq->gen_list); + init_completion(&ioreq->cmplobj); + + list_add_tail(&ioreq->sm.sm_list, &scm->ioreq_freelist); + CSIO_INC_STATS(scm, n_free_ioreq); + } + + if (csio_scsi_alloc_ddp_bufs(scm, hw, PAGE_SIZE, csio_ddp_descs)) + goto free_ioreq; + + return 0; + +free_ioreq: + /* + * Free up existing allocations, since an error + * from here means we are returning for good + */ + while (!list_empty(&scm->ioreq_freelist)) { + struct csio_sm *tmp; + + tmp = list_first_entry(&scm->ioreq_freelist, + struct csio_sm, sm_list); + list_del_init(&tmp->sm_list); + ioreq = (struct csio_ioreq *)tmp; + + dma_buf = &ioreq->dma_buf; + dma_pool_free(hw->scsi_dma_pool, dma_buf->vaddr, + dma_buf->paddr); + + kfree(ioreq); + } + + scm->stats.n_free_ioreq = 0; + + return -ENOMEM; +} + +/** + * csio_scsim_exit: Uninitialize SCSI Module + * @scm: SCSI Module + * + */ +void +csio_scsim_exit(struct csio_scsim *scm) +{ + struct csio_ioreq *ioreq; + struct csio_dma_buf *dma_buf; + + while (!list_empty(&scm->ioreq_freelist)) { + struct csio_sm *tmp; + + tmp = list_first_entry(&scm->ioreq_freelist, + struct csio_sm, sm_list); + list_del_init(&tmp->sm_list); + ioreq = (struct csio_ioreq *)tmp; + + dma_buf = &ioreq->dma_buf; + dma_pool_free(scm->hw->scsi_dma_pool, dma_buf->vaddr, + dma_buf->paddr); + + kfree(ioreq); + } + + scm->stats.n_free_ioreq = 0; + + csio_scsi_free_ddp_bufs(scm, scm->hw); +} diff --git a/drivers/scsi/csiostor/csio_scsi.h b/drivers/scsi/csiostor/csio_scsi.h new file mode 100644 index 000000000..39dda3c88 --- /dev/null +++ b/drivers/scsi/csiostor/csio_scsi.h @@ -0,0 +1,352 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CSIO_SCSI_H__ +#define __CSIO_SCSI_H__ + +#include <linux/spinlock_types.h> +#include <linux/completion.h> +#include <scsi/scsi.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_host.h> +#include <scsi/scsi_eh.h> +#include <scsi/scsi_tcq.h> +#include <scsi/fc/fc_fcp.h> + +#include "csio_defs.h" +#include "csio_wr.h" + +extern struct scsi_host_template csio_fcoe_shost_template; +extern struct scsi_host_template csio_fcoe_shost_vport_template; + +extern int csio_scsi_eqsize; +extern int csio_scsi_iqlen; +extern int csio_scsi_ioreqs; +extern uint32_t csio_max_scan_tmo; +extern uint32_t csio_delta_scan_tmo; +extern int csio_lun_qdepth; + +/* + **************************** NOTE ******************************* + * How do we calculate MAX FCoE SCSI SGEs? Here is the math: + * Max Egress WR size = 512 bytes + * One SCSI egress WR has the following fixed no of bytes: + * 48 (sizeof(struct fw_scsi_write[read]_wr)) - FW WR + * + 32 (sizeof(struct fc_fcp_cmnd)) - Immediate FCP_CMD + * ------ + * 80 + * ------ + * That leaves us with 512 - 96 = 432 bytes for data SGE. Using + * struct ulptx_sgl header for the SGE consumes: + * - 4 bytes for cmnd_sge. + * - 12 bytes for the first SGL. + * That leaves us with 416 bytes for the remaining SGE pairs. Which is + * is 416 / 24 (size(struct ulptx_sge_pair)) = 17 SGE pairs, + * or 34 SGEs. Adding the first SGE fetches us 35 SGEs. + */ +#define CSIO_SCSI_MAX_SGE 35 +#define CSIO_SCSI_ABRT_TMO_MS 60000 +#define CSIO_SCSI_LUNRST_TMO_MS 60000 +#define CSIO_SCSI_TM_POLL_MS 2000 /* should be less than + * all TM timeouts. + */ +#define CSIO_SCSI_IQ_WRSZ 128 +#define CSIO_SCSI_IQSIZE (csio_scsi_iqlen * CSIO_SCSI_IQ_WRSZ) + +#define CSIO_MAX_SNS_LEN 128 +#define CSIO_SCSI_RSP_LEN (FCP_RESP_WITH_EXT + 4 + CSIO_MAX_SNS_LEN) + +/* Reference to scsi_cmnd */ +#define csio_scsi_cmnd(req) ((req)->scratch1) + +struct csio_scsi_stats { + uint64_t n_tot_success; /* Total number of good I/Os */ + uint32_t n_rn_nr_error; /* No. of remote-node-not- + * ready errors + */ + uint32_t n_hw_nr_error; /* No. of hw-module-not- + * ready errors + */ + uint32_t n_dmamap_error; /* No. of DMA map erros */ + uint32_t n_unsupp_sge_error; /* No. of too-many-SGes + * errors. + */ + uint32_t n_no_req_error; /* No. of Out-of-ioreqs error */ + uint32_t n_busy_error; /* No. of -EBUSY errors */ + uint32_t n_hosterror; /* No. of FW_HOSTERROR I/O */ + uint32_t n_rsperror; /* No. of response errors */ + uint32_t n_autosense; /* No. of auto sense replies */ + uint32_t n_ovflerror; /* No. of overflow errors */ + uint32_t n_unflerror; /* No. of underflow errors */ + uint32_t n_rdev_nr_error;/* No. of rdev not + * ready errors + */ + uint32_t n_rdev_lost_error;/* No. of rdev lost errors */ + uint32_t n_rdev_logo_error;/* No. of rdev logo errors */ + uint32_t n_link_down_error;/* No. of link down errors */ + uint32_t n_no_xchg_error; /* No. no exchange error */ + uint32_t n_unknown_error;/* No. of unhandled errors */ + uint32_t n_aborted; /* No. of aborted I/Os */ + uint32_t n_abrt_timedout; /* No. of abort timedouts */ + uint32_t n_abrt_fail; /* No. of abort failures */ + uint32_t n_abrt_dups; /* No. of duplicate aborts */ + uint32_t n_abrt_race_comp; /* No. of aborts that raced + * with completions. + */ + uint32_t n_abrt_busy_error;/* No. of abort failures + * due to -EBUSY. + */ + uint32_t n_closed; /* No. of closed I/Os */ + uint32_t n_cls_busy_error; /* No. of close failures + * due to -EBUSY. + */ + uint32_t n_active; /* No. of IOs in active_q */ + uint32_t n_tm_active; /* No. of TMs in active_q */ + uint32_t n_wcbfn; /* No. of I/Os in worker + * cbfn q + */ + uint32_t n_free_ioreq; /* No. of freelist entries */ + uint32_t n_free_ddp; /* No. of DDP freelist */ + uint32_t n_unaligned; /* No. of Unaligned SGls */ + uint32_t n_inval_cplop; /* No. invalid CPL op's in IQ */ + uint32_t n_inval_scsiop; /* No. invalid scsi op's in IQ*/ +}; + +struct csio_scsim { + struct csio_hw *hw; /* Pointer to HW moduel */ + uint8_t max_sge; /* Max SGE */ + uint8_t proto_cmd_len; /* Proto specific SCSI + * cmd length + */ + uint16_t proto_rsp_len; /* Proto specific SCSI + * response length + */ + spinlock_t freelist_lock; /* Lock for ioreq freelist */ + struct list_head active_q; /* Outstanding SCSI I/Os */ + struct list_head ioreq_freelist; /* Free list of ioreq's */ + struct list_head ddp_freelist; /* DDP descriptor freelist */ + struct csio_scsi_stats stats; /* This module's statistics */ +}; + +/* State machine defines */ +enum csio_scsi_ev { + CSIO_SCSIE_START_IO = 1, /* Start a regular SCSI IO */ + CSIO_SCSIE_START_TM, /* Start a TM IO */ + CSIO_SCSIE_COMPLETED, /* IO Completed */ + CSIO_SCSIE_ABORT, /* Abort IO */ + CSIO_SCSIE_ABORTED, /* IO Aborted */ + CSIO_SCSIE_CLOSE, /* Close exchange */ + CSIO_SCSIE_CLOSED, /* Exchange closed */ + CSIO_SCSIE_DRVCLEANUP, /* Driver wants to manually + * cleanup this I/O. + */ +}; + +enum csio_scsi_lev { + CSIO_LEV_ALL = 1, + CSIO_LEV_LNODE, + CSIO_LEV_RNODE, + CSIO_LEV_LUN, +}; + +struct csio_scsi_level_data { + enum csio_scsi_lev level; + struct csio_rnode *rnode; + struct csio_lnode *lnode; + uint64_t oslun; +}; + +struct csio_cmd_priv { + uint8_t fc_tm_flags; /* task management flags */ + uint16_t wr_status; +}; + +static inline struct csio_cmd_priv *csio_priv(struct scsi_cmnd *cmd) +{ + return scsi_cmd_priv(cmd); +} + +static inline struct csio_ioreq * +csio_get_scsi_ioreq(struct csio_scsim *scm) +{ + struct csio_sm *req; + + if (likely(!list_empty(&scm->ioreq_freelist))) { + req = list_first_entry(&scm->ioreq_freelist, + struct csio_sm, sm_list); + list_del_init(&req->sm_list); + CSIO_DEC_STATS(scm, n_free_ioreq); + return (struct csio_ioreq *)req; + } else + return NULL; +} + +static inline void +csio_put_scsi_ioreq(struct csio_scsim *scm, struct csio_ioreq *ioreq) +{ + list_add_tail(&ioreq->sm.sm_list, &scm->ioreq_freelist); + CSIO_INC_STATS(scm, n_free_ioreq); +} + +static inline void +csio_put_scsi_ioreq_list(struct csio_scsim *scm, struct list_head *reqlist, + int n) +{ + list_splice_init(reqlist, &scm->ioreq_freelist); + scm->stats.n_free_ioreq += n; +} + +static inline struct csio_dma_buf * +csio_get_scsi_ddp(struct csio_scsim *scm) +{ + struct csio_dma_buf *ddp; + + if (likely(!list_empty(&scm->ddp_freelist))) { + ddp = list_first_entry(&scm->ddp_freelist, + struct csio_dma_buf, list); + list_del_init(&ddp->list); + CSIO_DEC_STATS(scm, n_free_ddp); + return ddp; + } else + return NULL; +} + +static inline void +csio_put_scsi_ddp(struct csio_scsim *scm, struct csio_dma_buf *ddp) +{ + list_add_tail(&ddp->list, &scm->ddp_freelist); + CSIO_INC_STATS(scm, n_free_ddp); +} + +static inline void +csio_put_scsi_ddp_list(struct csio_scsim *scm, struct list_head *reqlist, + int n) +{ + list_splice_tail_init(reqlist, &scm->ddp_freelist); + scm->stats.n_free_ddp += n; +} + +static inline void +csio_scsi_completed(struct csio_ioreq *ioreq, struct list_head *cbfn_q) +{ + csio_post_event(&ioreq->sm, CSIO_SCSIE_COMPLETED); + if (csio_list_deleted(&ioreq->sm.sm_list)) + list_add_tail(&ioreq->sm.sm_list, cbfn_q); +} + +static inline void +csio_scsi_aborted(struct csio_ioreq *ioreq, struct list_head *cbfn_q) +{ + csio_post_event(&ioreq->sm, CSIO_SCSIE_ABORTED); + list_add_tail(&ioreq->sm.sm_list, cbfn_q); +} + +static inline void +csio_scsi_closed(struct csio_ioreq *ioreq, struct list_head *cbfn_q) +{ + csio_post_event(&ioreq->sm, CSIO_SCSIE_CLOSED); + list_add_tail(&ioreq->sm.sm_list, cbfn_q); +} + +static inline void +csio_scsi_drvcleanup(struct csio_ioreq *ioreq) +{ + csio_post_event(&ioreq->sm, CSIO_SCSIE_DRVCLEANUP); +} + +/* + * csio_scsi_start_io - Kick starts the IO SM. + * @req: io request SM. + * + * needs to be called with lock held. + */ +static inline int +csio_scsi_start_io(struct csio_ioreq *ioreq) +{ + csio_post_event(&ioreq->sm, CSIO_SCSIE_START_IO); + return ioreq->drv_status; +} + +/* + * csio_scsi_start_tm - Kicks off the Task management IO SM. + * @req: io request SM. + * + * needs to be called with lock held. + */ +static inline int +csio_scsi_start_tm(struct csio_ioreq *ioreq) +{ + csio_post_event(&ioreq->sm, CSIO_SCSIE_START_TM); + return ioreq->drv_status; +} + +/* + * csio_scsi_abort - Abort an IO request + * @req: io request SM. + * + * needs to be called with lock held. + */ +static inline int +csio_scsi_abort(struct csio_ioreq *ioreq) +{ + csio_post_event(&ioreq->sm, CSIO_SCSIE_ABORT); + return ioreq->drv_status; +} + +/* + * csio_scsi_close - Close an IO request + * @req: io request SM. + * + * needs to be called with lock held. + */ +static inline int +csio_scsi_close(struct csio_ioreq *ioreq) +{ + csio_post_event(&ioreq->sm, CSIO_SCSIE_CLOSE); + return ioreq->drv_status; +} + +void csio_scsi_cleanup_io_q(struct csio_scsim *, struct list_head *); +int csio_scsim_cleanup_io(struct csio_scsim *, bool abort); +int csio_scsim_cleanup_io_lnode(struct csio_scsim *, + struct csio_lnode *); +struct csio_ioreq *csio_scsi_cmpl_handler(struct csio_hw *, void *, uint32_t, + struct csio_fl_dma_buf *, + void *, uint8_t **); +int csio_scsi_qconfig(struct csio_hw *); +int csio_scsim_init(struct csio_scsim *, struct csio_hw *); +void csio_scsim_exit(struct csio_scsim *); + +#endif /* __CSIO_SCSI_H__ */ diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c new file mode 100644 index 000000000..fe0355c96 --- /dev/null +++ b/drivers/scsi/csiostor/csio_wr.c @@ -0,0 +1,1720 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/compiler.h> +#include <linux/slab.h> +#include <asm/page.h> +#include <linux/cache.h> + +#include "t4_values.h" +#include "csio_hw.h" +#include "csio_wr.h" +#include "csio_mb.h" +#include "csio_defs.h" + +int csio_intr_coalesce_cnt; /* value:SGE_INGRESS_RX_THRESHOLD[0] */ +static int csio_sge_thresh_reg; /* SGE_INGRESS_RX_THRESHOLD[0] */ + +int csio_intr_coalesce_time = 10; /* value:SGE_TIMER_VALUE_1 */ +static int csio_sge_timer_reg = 1; + +#define CSIO_SET_FLBUF_SIZE(_hw, _reg, _val) \ + csio_wr_reg32((_hw), (_val), SGE_FL_BUFFER_SIZE##_reg##_A) + +static void +csio_get_flbuf_size(struct csio_hw *hw, struct csio_sge *sge, uint32_t reg) +{ + sge->sge_fl_buf_size[reg] = csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE0_A + + reg * sizeof(uint32_t)); +} + +/* Free list buffer size */ +static inline uint32_t +csio_wr_fl_bufsz(struct csio_sge *sge, struct csio_dma_buf *buf) +{ + return sge->sge_fl_buf_size[buf->paddr & 0xF]; +} + +/* Size of the egress queue status page */ +static inline uint32_t +csio_wr_qstat_pgsz(struct csio_hw *hw) +{ + return (hw->wrm.sge.sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64; +} + +/* Ring freelist doorbell */ +static inline void +csio_wr_ring_fldb(struct csio_hw *hw, struct csio_q *flq) +{ + /* + * Ring the doorbell only when we have atleast CSIO_QCREDIT_SZ + * number of bytes in the freelist queue. This translates to atleast + * 8 freelist buffer pointers (since each pointer is 8 bytes). + */ + if (flq->inc_idx >= 8) { + csio_wr_reg32(hw, DBPRIO_F | QID_V(flq->un.fl.flid) | + PIDX_T5_V(flq->inc_idx / 8) | DBTYPE_F, + MYPF_REG(SGE_PF_KDOORBELL_A)); + flq->inc_idx &= 7; + } +} + +/* Write a 0 cidx increment value to enable SGE interrupts for this queue */ +static void +csio_wr_sge_intr_enable(struct csio_hw *hw, uint16_t iqid) +{ + csio_wr_reg32(hw, CIDXINC_V(0) | + INGRESSQID_V(iqid) | + TIMERREG_V(X_TIMERREG_RESTART_COUNTER), + MYPF_REG(SGE_PF_GTS_A)); +} + +/* + * csio_wr_fill_fl - Populate the FL buffers of a FL queue. + * @hw: HW module. + * @flq: Freelist queue. + * + * Fill up freelist buffer entries with buffers of size specified + * in the size register. + * + */ +static int +csio_wr_fill_fl(struct csio_hw *hw, struct csio_q *flq) +{ + struct csio_wrm *wrm = csio_hw_to_wrm(hw); + struct csio_sge *sge = &wrm->sge; + __be64 *d = (__be64 *)(flq->vstart); + struct csio_dma_buf *buf = &flq->un.fl.bufs[0]; + uint64_t paddr; + int sreg = flq->un.fl.sreg; + int n = flq->credits; + + while (n--) { + buf->len = sge->sge_fl_buf_size[sreg]; + buf->vaddr = dma_alloc_coherent(&hw->pdev->dev, buf->len, + &buf->paddr, GFP_KERNEL); + if (!buf->vaddr) { + csio_err(hw, "Could only fill %d buffers!\n", n + 1); + return -ENOMEM; + } + + paddr = buf->paddr | (sreg & 0xF); + + *d++ = cpu_to_be64(paddr); + buf++; + } + + return 0; +} + +/* + * csio_wr_update_fl - + * @hw: HW module. + * @flq: Freelist queue. + * + * + */ +static inline void +csio_wr_update_fl(struct csio_hw *hw, struct csio_q *flq, uint16_t n) +{ + + flq->inc_idx += n; + flq->pidx += n; + if (unlikely(flq->pidx >= flq->credits)) + flq->pidx -= (uint16_t)flq->credits; + + CSIO_INC_STATS(flq, n_flq_refill); +} + +/* + * csio_wr_alloc_q - Allocate a WR queue and initialize it. + * @hw: HW module + * @qsize: Size of the queue in bytes + * @wrsize: Since of WR in this queue, if fixed. + * @type: Type of queue (Ingress/Egress/Freelist) + * @owner: Module that owns this queue. + * @nflb: Number of freelist buffers for FL. + * @sreg: What is the FL buffer size register? + * @iq_int_handler: Ingress queue handler in INTx mode. + * + * This function allocates and sets up a queue for the caller + * of size qsize, aligned at the required boundary. This is subject to + * be free entries being available in the queue array. If one is found, + * it is initialized with the allocated queue, marked as being used (owner), + * and a handle returned to the caller in form of the queue's index + * into the q_arr array. + * If user has indicated a freelist (by specifying nflb > 0), create + * another queue (with its own index into q_arr) for the freelist. Allocate + * memory for DMA buffer metadata (vaddr, len etc). Save off the freelist + * idx in the ingress queue's flq.idx. This is how a Freelist is associated + * with its owning ingress queue. + */ +int +csio_wr_alloc_q(struct csio_hw *hw, uint32_t qsize, uint32_t wrsize, + uint16_t type, void *owner, uint32_t nflb, int sreg, + iq_handler_t iq_intx_handler) +{ + struct csio_wrm *wrm = csio_hw_to_wrm(hw); + struct csio_q *q, *flq; + int free_idx = wrm->free_qidx; + int ret_idx = free_idx; + uint32_t qsz; + int flq_idx; + + if (free_idx >= wrm->num_q) { + csio_err(hw, "No more free queues.\n"); + return -1; + } + + switch (type) { + case CSIO_EGRESS: + qsz = ALIGN(qsize, CSIO_QCREDIT_SZ) + csio_wr_qstat_pgsz(hw); + break; + case CSIO_INGRESS: + switch (wrsize) { + case 16: + case 32: + case 64: + case 128: + break; + default: + csio_err(hw, "Invalid Ingress queue WR size:%d\n", + wrsize); + return -1; + } + + /* + * Number of elements must be a multiple of 16 + * So this includes status page size + */ + qsz = ALIGN(qsize/wrsize, 16) * wrsize; + + break; + case CSIO_FREELIST: + qsz = ALIGN(qsize/wrsize, 8) * wrsize + csio_wr_qstat_pgsz(hw); + break; + default: + csio_err(hw, "Invalid queue type: 0x%x\n", type); + return -1; + } + + q = wrm->q_arr[free_idx]; + + q->vstart = dma_alloc_coherent(&hw->pdev->dev, qsz, &q->pstart, + GFP_KERNEL); + if (!q->vstart) { + csio_err(hw, + "Failed to allocate DMA memory for " + "queue at id: %d size: %d\n", free_idx, qsize); + return -1; + } + + q->type = type; + q->owner = owner; + q->pidx = q->cidx = q->inc_idx = 0; + q->size = qsz; + q->wr_sz = wrsize; /* If using fixed size WRs */ + + wrm->free_qidx++; + + if (type == CSIO_INGRESS) { + /* Since queue area is set to zero */ + q->un.iq.genbit = 1; + + /* + * Ingress queue status page size is always the size of + * the ingress queue entry. + */ + q->credits = (qsz - q->wr_sz) / q->wr_sz; + q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz + - q->wr_sz); + + /* Allocate memory for FL if requested */ + if (nflb > 0) { + flq_idx = csio_wr_alloc_q(hw, nflb * sizeof(__be64), + sizeof(__be64), CSIO_FREELIST, + owner, 0, sreg, NULL); + if (flq_idx == -1) { + csio_err(hw, + "Failed to allocate FL queue" + " for IQ idx:%d\n", free_idx); + return -1; + } + + /* Associate the new FL with the Ingress quue */ + q->un.iq.flq_idx = flq_idx; + + flq = wrm->q_arr[q->un.iq.flq_idx]; + flq->un.fl.bufs = kcalloc(flq->credits, + sizeof(struct csio_dma_buf), + GFP_KERNEL); + if (!flq->un.fl.bufs) { + csio_err(hw, + "Failed to allocate FL queue bufs" + " for IQ idx:%d\n", free_idx); + return -1; + } + + flq->un.fl.packen = 0; + flq->un.fl.offset = 0; + flq->un.fl.sreg = sreg; + + /* Fill up the free list buffers */ + if (csio_wr_fill_fl(hw, flq)) + return -1; + + /* + * Make sure in a FLQ, atleast 1 credit (8 FL buffers) + * remains unpopulated,otherwise HW thinks + * FLQ is empty. + */ + flq->pidx = flq->inc_idx = flq->credits - 8; + } else { + q->un.iq.flq_idx = -1; + } + + /* Associate the IQ INTx handler. */ + q->un.iq.iq_intx_handler = iq_intx_handler; + + csio_q_iqid(hw, ret_idx) = CSIO_MAX_QID; + + } else if (type == CSIO_EGRESS) { + q->credits = (qsz - csio_wr_qstat_pgsz(hw)) / CSIO_QCREDIT_SZ; + q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz + - csio_wr_qstat_pgsz(hw)); + csio_q_eqid(hw, ret_idx) = CSIO_MAX_QID; + } else { /* Freelist */ + q->credits = (qsz - csio_wr_qstat_pgsz(hw)) / sizeof(__be64); + q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz + - csio_wr_qstat_pgsz(hw)); + csio_q_flid(hw, ret_idx) = CSIO_MAX_QID; + } + + return ret_idx; +} + +/* + * csio_wr_iq_create_rsp - Response handler for IQ creation. + * @hw: The HW module. + * @mbp: Mailbox. + * @iq_idx: Ingress queue that got created. + * + * Handle FW_IQ_CMD mailbox completion. Save off the assigned IQ/FL ids. + */ +static int +csio_wr_iq_create_rsp(struct csio_hw *hw, struct csio_mb *mbp, int iq_idx) +{ + struct csio_iq_params iqp; + enum fw_retval retval; + uint32_t iq_id; + int flq_idx; + + memset(&iqp, 0, sizeof(struct csio_iq_params)); + + csio_mb_iq_alloc_write_rsp(hw, mbp, &retval, &iqp); + + if (retval != FW_SUCCESS) { + csio_err(hw, "IQ cmd returned 0x%x!\n", retval); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + csio_q_iqid(hw, iq_idx) = iqp.iqid; + csio_q_physiqid(hw, iq_idx) = iqp.physiqid; + csio_q_pidx(hw, iq_idx) = csio_q_cidx(hw, iq_idx) = 0; + csio_q_inc_idx(hw, iq_idx) = 0; + + /* Actual iq-id. */ + iq_id = iqp.iqid - hw->wrm.fw_iq_start; + + /* Set the iq-id to iq map table. */ + if (iq_id >= CSIO_MAX_IQ) { + csio_err(hw, + "Exceeding MAX_IQ(%d) supported!" + " iqid:%d rel_iqid:%d FW iq_start:%d\n", + CSIO_MAX_IQ, iq_id, iqp.iqid, hw->wrm.fw_iq_start); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + csio_q_set_intr_map(hw, iq_idx, iq_id); + + /* + * During FW_IQ_CMD, FW sets interrupt_sent bit to 1 in the SGE + * ingress context of this queue. This will block interrupts to + * this queue until the next GTS write. Therefore, we do a + * 0-cidx increment GTS write for this queue just to clear the + * interrupt_sent bit. This will re-enable interrupts to this + * queue. + */ + csio_wr_sge_intr_enable(hw, iqp.physiqid); + + flq_idx = csio_q_iq_flq_idx(hw, iq_idx); + if (flq_idx != -1) { + struct csio_q *flq = hw->wrm.q_arr[flq_idx]; + + csio_q_flid(hw, flq_idx) = iqp.fl0id; + csio_q_cidx(hw, flq_idx) = 0; + csio_q_pidx(hw, flq_idx) = csio_q_credits(hw, flq_idx) - 8; + csio_q_inc_idx(hw, flq_idx) = csio_q_credits(hw, flq_idx) - 8; + + /* Now update SGE about the buffers allocated during init */ + csio_wr_ring_fldb(hw, flq); + } + + mempool_free(mbp, hw->mb_mempool); + + return 0; +} + +/* + * csio_wr_iq_create - Configure an Ingress queue with FW. + * @hw: The HW module. + * @priv: Private data object. + * @iq_idx: Ingress queue index in the WR module. + * @vec: MSIX vector. + * @portid: PCIE Channel to be associated with this queue. + * @async: Is this a FW asynchronous message handling queue? + * @cbfn: Completion callback. + * + * This API configures an ingress queue with FW by issuing a FW_IQ_CMD mailbox + * with alloc/write bits set. + */ +int +csio_wr_iq_create(struct csio_hw *hw, void *priv, int iq_idx, + uint32_t vec, uint8_t portid, bool async, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct csio_mb *mbp; + struct csio_iq_params iqp; + int flq_idx; + + memset(&iqp, 0, sizeof(struct csio_iq_params)); + csio_q_portid(hw, iq_idx) = portid; + + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) { + csio_err(hw, "IQ command out of memory!\n"); + return -ENOMEM; + } + + switch (hw->intr_mode) { + case CSIO_IM_INTX: + case CSIO_IM_MSI: + /* For interrupt forwarding queue only */ + if (hw->intr_iq_idx == iq_idx) + iqp.iqandst = X_INTERRUPTDESTINATION_PCIE; + else + iqp.iqandst = X_INTERRUPTDESTINATION_IQ; + iqp.iqandstindex = + csio_q_physiqid(hw, hw->intr_iq_idx); + break; + case CSIO_IM_MSIX: + iqp.iqandst = X_INTERRUPTDESTINATION_PCIE; + iqp.iqandstindex = (uint16_t)vec; + break; + case CSIO_IM_NONE: + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + /* Pass in the ingress queue cmd parameters */ + iqp.pfn = hw->pfn; + iqp.vfn = 0; + iqp.iq_start = 1; + iqp.viid = 0; + iqp.type = FW_IQ_TYPE_FL_INT_CAP; + iqp.iqasynch = async; + if (csio_intr_coalesce_cnt) + iqp.iqanus = X_UPDATESCHEDULING_COUNTER_OPTTIMER; + else + iqp.iqanus = X_UPDATESCHEDULING_TIMER; + iqp.iqanud = X_UPDATEDELIVERY_INTERRUPT; + iqp.iqpciech = portid; + iqp.iqintcntthresh = (uint8_t)csio_sge_thresh_reg; + + switch (csio_q_wr_sz(hw, iq_idx)) { + case 16: + iqp.iqesize = 0; break; + case 32: + iqp.iqesize = 1; break; + case 64: + iqp.iqesize = 2; break; + case 128: + iqp.iqesize = 3; break; + } + + iqp.iqsize = csio_q_size(hw, iq_idx) / + csio_q_wr_sz(hw, iq_idx); + iqp.iqaddr = csio_q_pstart(hw, iq_idx); + + flq_idx = csio_q_iq_flq_idx(hw, iq_idx); + if (flq_idx != -1) { + enum chip_type chip = CHELSIO_CHIP_VERSION(hw->chip_id); + struct csio_q *flq = hw->wrm.q_arr[flq_idx]; + + iqp.fl0paden = 1; + iqp.fl0packen = flq->un.fl.packen ? 1 : 0; + iqp.fl0fbmin = X_FETCHBURSTMIN_64B; + iqp.fl0fbmax = ((chip == CHELSIO_T5) ? + X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B); + iqp.fl0size = csio_q_size(hw, flq_idx) / CSIO_QCREDIT_SZ; + iqp.fl0addr = csio_q_pstart(hw, flq_idx); + } + + csio_mb_iq_alloc_write(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &iqp, cbfn); + + if (csio_mb_issue(hw, mbp)) { + csio_err(hw, "Issue of IQ cmd failed!\n"); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + if (cbfn != NULL) + return 0; + + return csio_wr_iq_create_rsp(hw, mbp, iq_idx); +} + +/* + * csio_wr_eq_create_rsp - Response handler for EQ creation. + * @hw: The HW module. + * @mbp: Mailbox. + * @eq_idx: Egress queue that got created. + * + * Handle FW_EQ_OFLD_CMD mailbox completion. Save off the assigned EQ ids. + */ +static int +csio_wr_eq_cfg_rsp(struct csio_hw *hw, struct csio_mb *mbp, int eq_idx) +{ + struct csio_eq_params eqp; + enum fw_retval retval; + + memset(&eqp, 0, sizeof(struct csio_eq_params)); + + csio_mb_eq_ofld_alloc_write_rsp(hw, mbp, &retval, &eqp); + + if (retval != FW_SUCCESS) { + csio_err(hw, "EQ OFLD cmd returned 0x%x!\n", retval); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + csio_q_eqid(hw, eq_idx) = (uint16_t)eqp.eqid; + csio_q_physeqid(hw, eq_idx) = (uint16_t)eqp.physeqid; + csio_q_pidx(hw, eq_idx) = csio_q_cidx(hw, eq_idx) = 0; + csio_q_inc_idx(hw, eq_idx) = 0; + + mempool_free(mbp, hw->mb_mempool); + + return 0; +} + +/* + * csio_wr_eq_create - Configure an Egress queue with FW. + * @hw: HW module. + * @priv: Private data. + * @eq_idx: Egress queue index in the WR module. + * @iq_idx: Associated ingress queue index. + * @cbfn: Completion callback. + * + * This API configures a offload egress queue with FW by issuing a + * FW_EQ_OFLD_CMD (with alloc + write ) mailbox. + */ +int +csio_wr_eq_create(struct csio_hw *hw, void *priv, int eq_idx, + int iq_idx, uint8_t portid, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + struct csio_mb *mbp; + struct csio_eq_params eqp; + + memset(&eqp, 0, sizeof(struct csio_eq_params)); + + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) { + csio_err(hw, "EQ command out of memory!\n"); + return -ENOMEM; + } + + eqp.pfn = hw->pfn; + eqp.vfn = 0; + eqp.eqstart = 1; + eqp.hostfcmode = X_HOSTFCMODE_STATUS_PAGE; + eqp.iqid = csio_q_iqid(hw, iq_idx); + eqp.fbmin = X_FETCHBURSTMIN_64B; + eqp.fbmax = X_FETCHBURSTMAX_512B; + eqp.cidxfthresh = 0; + eqp.pciechn = portid; + eqp.eqsize = csio_q_size(hw, eq_idx) / CSIO_QCREDIT_SZ; + eqp.eqaddr = csio_q_pstart(hw, eq_idx); + + csio_mb_eq_ofld_alloc_write(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, + &eqp, cbfn); + + if (csio_mb_issue(hw, mbp)) { + csio_err(hw, "Issue of EQ OFLD cmd failed!\n"); + mempool_free(mbp, hw->mb_mempool); + return -EINVAL; + } + + if (cbfn != NULL) + return 0; + + return csio_wr_eq_cfg_rsp(hw, mbp, eq_idx); +} + +/* + * csio_wr_iq_destroy_rsp - Response handler for IQ removal. + * @hw: The HW module. + * @mbp: Mailbox. + * @iq_idx: Ingress queue that was freed. + * + * Handle FW_IQ_CMD (free) mailbox completion. + */ +static int +csio_wr_iq_destroy_rsp(struct csio_hw *hw, struct csio_mb *mbp, int iq_idx) +{ + enum fw_retval retval = csio_mb_fw_retval(mbp); + int rv = 0; + + if (retval != FW_SUCCESS) + rv = -EINVAL; + + mempool_free(mbp, hw->mb_mempool); + + return rv; +} + +/* + * csio_wr_iq_destroy - Free an ingress queue. + * @hw: The HW module. + * @priv: Private data object. + * @iq_idx: Ingress queue index to destroy + * @cbfn: Completion callback. + * + * This API frees an ingress queue by issuing the FW_IQ_CMD + * with the free bit set. + */ +static int +csio_wr_iq_destroy(struct csio_hw *hw, void *priv, int iq_idx, + void (*cbfn)(struct csio_hw *, struct csio_mb *)) +{ + int rv = 0; + struct csio_mb *mbp; + struct csio_iq_params iqp; + int flq_idx; + + memset(&iqp, 0, sizeof(struct csio_iq_params)); + + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) + return -ENOMEM; + + iqp.pfn = hw->pfn; + iqp.vfn = 0; + iqp.iqid = csio_q_iqid(hw, iq_idx); + iqp.type = FW_IQ_TYPE_FL_INT_CAP; + + flq_idx = csio_q_iq_flq_idx(hw, iq_idx); + if (flq_idx != -1) + iqp.fl0id = csio_q_flid(hw, flq_idx); + else + iqp.fl0id = 0xFFFF; + + iqp.fl1id = 0xFFFF; + + csio_mb_iq_free(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &iqp, cbfn); + + rv = csio_mb_issue(hw, mbp); + if (rv != 0) { + mempool_free(mbp, hw->mb_mempool); + return rv; + } + + if (cbfn != NULL) + return 0; + + return csio_wr_iq_destroy_rsp(hw, mbp, iq_idx); +} + +/* + * csio_wr_eq_destroy_rsp - Response handler for OFLD EQ creation. + * @hw: The HW module. + * @mbp: Mailbox. + * @eq_idx: Egress queue that was freed. + * + * Handle FW_OFLD_EQ_CMD (free) mailbox completion. + */ +static int +csio_wr_eq_destroy_rsp(struct csio_hw *hw, struct csio_mb *mbp, int eq_idx) +{ + enum fw_retval retval = csio_mb_fw_retval(mbp); + int rv = 0; + + if (retval != FW_SUCCESS) + rv = -EINVAL; + + mempool_free(mbp, hw->mb_mempool); + + return rv; +} + +/* + * csio_wr_eq_destroy - Free an Egress queue. + * @hw: The HW module. + * @priv: Private data object. + * @eq_idx: Egress queue index to destroy + * @cbfn: Completion callback. + * + * This API frees an Egress queue by issuing the FW_EQ_OFLD_CMD + * with the free bit set. + */ +static int +csio_wr_eq_destroy(struct csio_hw *hw, void *priv, int eq_idx, + void (*cbfn) (struct csio_hw *, struct csio_mb *)) +{ + int rv = 0; + struct csio_mb *mbp; + struct csio_eq_params eqp; + + memset(&eqp, 0, sizeof(struct csio_eq_params)); + + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) + return -ENOMEM; + + eqp.pfn = hw->pfn; + eqp.vfn = 0; + eqp.eqid = csio_q_eqid(hw, eq_idx); + + csio_mb_eq_ofld_free(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &eqp, cbfn); + + rv = csio_mb_issue(hw, mbp); + if (rv != 0) { + mempool_free(mbp, hw->mb_mempool); + return rv; + } + + if (cbfn != NULL) + return 0; + + return csio_wr_eq_destroy_rsp(hw, mbp, eq_idx); +} + +/* + * csio_wr_cleanup_eq_stpg - Cleanup Egress queue status page + * @hw: HW module + * @qidx: Egress queue index + * + * Cleanup the Egress queue status page. + */ +static void +csio_wr_cleanup_eq_stpg(struct csio_hw *hw, int qidx) +{ + struct csio_q *q = csio_hw_to_wrm(hw)->q_arr[qidx]; + struct csio_qstatus_page *stp = (struct csio_qstatus_page *)q->vwrap; + + memset(stp, 0, sizeof(*stp)); +} + +/* + * csio_wr_cleanup_iq_ftr - Cleanup Footer entries in IQ + * @hw: HW module + * @qidx: Ingress queue index + * + * Cleanup the footer entries in the given ingress queue, + * set to 1 the internal copy of genbit. + */ +static void +csio_wr_cleanup_iq_ftr(struct csio_hw *hw, int qidx) +{ + struct csio_wrm *wrm = csio_hw_to_wrm(hw); + struct csio_q *q = wrm->q_arr[qidx]; + void *wr; + struct csio_iqwr_footer *ftr; + uint32_t i = 0; + + /* set to 1 since we are just about zero out genbit */ + q->un.iq.genbit = 1; + + for (i = 0; i < q->credits; i++) { + /* Get the WR */ + wr = (void *)((uintptr_t)q->vstart + + (i * q->wr_sz)); + /* Get the footer */ + ftr = (struct csio_iqwr_footer *)((uintptr_t)wr + + (q->wr_sz - sizeof(*ftr))); + /* Zero out footer */ + memset(ftr, 0, sizeof(*ftr)); + } +} + +int +csio_wr_destroy_queues(struct csio_hw *hw, bool cmd) +{ + int i, flq_idx; + struct csio_q *q; + struct csio_wrm *wrm = csio_hw_to_wrm(hw); + int rv; + + for (i = 0; i < wrm->free_qidx; i++) { + q = wrm->q_arr[i]; + + switch (q->type) { + case CSIO_EGRESS: + if (csio_q_eqid(hw, i) != CSIO_MAX_QID) { + csio_wr_cleanup_eq_stpg(hw, i); + if (!cmd) { + csio_q_eqid(hw, i) = CSIO_MAX_QID; + continue; + } + + rv = csio_wr_eq_destroy(hw, NULL, i, NULL); + if ((rv == -EBUSY) || (rv == -ETIMEDOUT)) + cmd = false; + + csio_q_eqid(hw, i) = CSIO_MAX_QID; + } + fallthrough; + case CSIO_INGRESS: + if (csio_q_iqid(hw, i) != CSIO_MAX_QID) { + csio_wr_cleanup_iq_ftr(hw, i); + if (!cmd) { + csio_q_iqid(hw, i) = CSIO_MAX_QID; + flq_idx = csio_q_iq_flq_idx(hw, i); + if (flq_idx != -1) + csio_q_flid(hw, flq_idx) = + CSIO_MAX_QID; + continue; + } + + rv = csio_wr_iq_destroy(hw, NULL, i, NULL); + if ((rv == -EBUSY) || (rv == -ETIMEDOUT)) + cmd = false; + + csio_q_iqid(hw, i) = CSIO_MAX_QID; + flq_idx = csio_q_iq_flq_idx(hw, i); + if (flq_idx != -1) + csio_q_flid(hw, flq_idx) = CSIO_MAX_QID; + } + break; + default: + break; + } + } + + hw->flags &= ~CSIO_HWF_Q_FW_ALLOCED; + + return 0; +} + +/* + * csio_wr_get - Get requested size of WR entry/entries from queue. + * @hw: HW module. + * @qidx: Index of queue. + * @size: Cumulative size of Work request(s). + * @wrp: Work request pair. + * + * If requested credits are available, return the start address of the + * work request in the work request pair. Set pidx accordingly and + * return. + * + * NOTE about WR pair: + * ================== + * A WR can start towards the end of a queue, and then continue at the + * beginning, since the queue is considered to be circular. This will + * require a pair of address/size to be passed back to the caller - + * hence Work request pair format. + */ +int +csio_wr_get(struct csio_hw *hw, int qidx, uint32_t size, + struct csio_wr_pair *wrp) +{ + struct csio_wrm *wrm = csio_hw_to_wrm(hw); + struct csio_q *q = wrm->q_arr[qidx]; + void *cwr = (void *)((uintptr_t)(q->vstart) + + (q->pidx * CSIO_QCREDIT_SZ)); + struct csio_qstatus_page *stp = (struct csio_qstatus_page *)q->vwrap; + uint16_t cidx = q->cidx = ntohs(stp->cidx); + uint16_t pidx = q->pidx; + uint32_t req_sz = ALIGN(size, CSIO_QCREDIT_SZ); + int req_credits = req_sz / CSIO_QCREDIT_SZ; + int credits; + + CSIO_DB_ASSERT(q->owner != NULL); + CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx)); + CSIO_DB_ASSERT(cidx <= q->credits); + + /* Calculate credits */ + if (pidx > cidx) { + credits = q->credits - (pidx - cidx) - 1; + } else if (cidx > pidx) { + credits = cidx - pidx - 1; + } else { + /* cidx == pidx, empty queue */ + credits = q->credits; + CSIO_INC_STATS(q, n_qempty); + } + + /* + * Check if we have enough credits. + * credits = 1 implies queue is full. + */ + if (!credits || (req_credits > credits)) { + CSIO_INC_STATS(q, n_qfull); + return -EBUSY; + } + + /* + * If we are here, we have enough credits to satisfy the + * request. Check if we are near the end of q, and if WR spills over. + * If it does, use the first addr/size to cover the queue until + * the end. Fit the remainder portion of the request at the top + * of queue and return it in the second addr/len. Set pidx + * accordingly. + */ + if (unlikely(((uintptr_t)cwr + req_sz) > (uintptr_t)(q->vwrap))) { + wrp->addr1 = cwr; + wrp->size1 = (uint32_t)((uintptr_t)q->vwrap - (uintptr_t)cwr); + wrp->addr2 = q->vstart; + wrp->size2 = req_sz - wrp->size1; + q->pidx = (uint16_t)(ALIGN(wrp->size2, CSIO_QCREDIT_SZ) / + CSIO_QCREDIT_SZ); + CSIO_INC_STATS(q, n_qwrap); + CSIO_INC_STATS(q, n_eq_wr_split); + } else { + wrp->addr1 = cwr; + wrp->size1 = req_sz; + wrp->addr2 = NULL; + wrp->size2 = 0; + q->pidx += (uint16_t)req_credits; + + /* We are the end of queue, roll back pidx to top of queue */ + if (unlikely(q->pidx == q->credits)) { + q->pidx = 0; + CSIO_INC_STATS(q, n_qwrap); + } + } + + q->inc_idx = (uint16_t)req_credits; + + CSIO_INC_STATS(q, n_tot_reqs); + + return 0; +} + +/* + * csio_wr_copy_to_wrp - Copies given data into WR. + * @data_buf - Data buffer + * @wrp - Work request pair. + * @wr_off - Work request offset. + * @data_len - Data length. + * + * Copies the given data in Work Request. Work request pair(wrp) specifies + * address information of Work request. + * Returns: none + */ +void +csio_wr_copy_to_wrp(void *data_buf, struct csio_wr_pair *wrp, + uint32_t wr_off, uint32_t data_len) +{ + uint32_t nbytes; + + /* Number of space available in buffer addr1 of WRP */ + nbytes = ((wrp->size1 - wr_off) >= data_len) ? + data_len : (wrp->size1 - wr_off); + + memcpy((uint8_t *) wrp->addr1 + wr_off, data_buf, nbytes); + data_len -= nbytes; + + /* Write the remaining data from the begining of circular buffer */ + if (data_len) { + CSIO_DB_ASSERT(data_len <= wrp->size2); + CSIO_DB_ASSERT(wrp->addr2 != NULL); + memcpy(wrp->addr2, (uint8_t *) data_buf + nbytes, data_len); + } +} + +/* + * csio_wr_issue - Notify chip of Work request. + * @hw: HW module. + * @qidx: Index of queue. + * @prio: 0: Low priority, 1: High priority + * + * Rings the SGE Doorbell by writing the current producer index of the passed + * in queue into the register. + * + */ +int +csio_wr_issue(struct csio_hw *hw, int qidx, bool prio) +{ + struct csio_wrm *wrm = csio_hw_to_wrm(hw); + struct csio_q *q = wrm->q_arr[qidx]; + + CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx)); + + wmb(); + /* Ring SGE Doorbell writing q->pidx into it */ + csio_wr_reg32(hw, DBPRIO_V(prio) | QID_V(q->un.eq.physeqid) | + PIDX_T5_V(q->inc_idx) | DBTYPE_F, + MYPF_REG(SGE_PF_KDOORBELL_A)); + q->inc_idx = 0; + + return 0; +} + +static inline uint32_t +csio_wr_avail_qcredits(struct csio_q *q) +{ + if (q->pidx > q->cidx) + return q->pidx - q->cidx; + else if (q->cidx > q->pidx) + return q->credits - (q->cidx - q->pidx); + else + return 0; /* cidx == pidx, empty queue */ +} + +/* + * csio_wr_inval_flq_buf - Invalidate a free list buffer entry. + * @hw: HW module. + * @flq: The freelist queue. + * + * Invalidate the driver's version of a freelist buffer entry, + * without freeing the associated the DMA memory. The entry + * to be invalidated is picked up from the current Free list + * queue cidx. + * + */ +static inline void +csio_wr_inval_flq_buf(struct csio_hw *hw, struct csio_q *flq) +{ + flq->cidx++; + if (flq->cidx == flq->credits) { + flq->cidx = 0; + CSIO_INC_STATS(flq, n_qwrap); + } +} + +/* + * csio_wr_process_fl - Process a freelist completion. + * @hw: HW module. + * @q: The ingress queue attached to the Freelist. + * @wr: The freelist completion WR in the ingress queue. + * @len_to_qid: The lower 32-bits of the first flit of the RSP footer + * @iq_handler: Caller's handler for this completion. + * @priv: Private pointer of caller + * + */ +static inline void +csio_wr_process_fl(struct csio_hw *hw, struct csio_q *q, + void *wr, uint32_t len_to_qid, + void (*iq_handler)(struct csio_hw *, void *, + uint32_t, struct csio_fl_dma_buf *, + void *), + void *priv) +{ + struct csio_wrm *wrm = csio_hw_to_wrm(hw); + struct csio_sge *sge = &wrm->sge; + struct csio_fl_dma_buf flb; + struct csio_dma_buf *buf, *fbuf; + uint32_t bufsz, len, lastlen = 0; + int n; + struct csio_q *flq = hw->wrm.q_arr[q->un.iq.flq_idx]; + + CSIO_DB_ASSERT(flq != NULL); + + len = len_to_qid; + + if (len & IQWRF_NEWBUF) { + if (flq->un.fl.offset > 0) { + csio_wr_inval_flq_buf(hw, flq); + flq->un.fl.offset = 0; + } + len = IQWRF_LEN_GET(len); + } + + CSIO_DB_ASSERT(len != 0); + + flb.totlen = len; + + /* Consume all freelist buffers used for len bytes */ + for (n = 0, fbuf = flb.flbufs; ; n++, fbuf++) { + buf = &flq->un.fl.bufs[flq->cidx]; + bufsz = csio_wr_fl_bufsz(sge, buf); + + fbuf->paddr = buf->paddr; + fbuf->vaddr = buf->vaddr; + + flb.offset = flq->un.fl.offset; + lastlen = min(bufsz, len); + fbuf->len = lastlen; + + len -= lastlen; + if (!len) + break; + csio_wr_inval_flq_buf(hw, flq); + } + + flb.defer_free = flq->un.fl.packen ? 0 : 1; + + iq_handler(hw, wr, q->wr_sz - sizeof(struct csio_iqwr_footer), + &flb, priv); + + if (flq->un.fl.packen) + flq->un.fl.offset += ALIGN(lastlen, sge->csio_fl_align); + else + csio_wr_inval_flq_buf(hw, flq); + +} + +/* + * csio_is_new_iqwr - Is this a new Ingress queue entry ? + * @q: Ingress quueue. + * @ftr: Ingress queue WR SGE footer. + * + * The entry is new if our generation bit matches the corresponding + * bit in the footer of the current WR. + */ +static inline bool +csio_is_new_iqwr(struct csio_q *q, struct csio_iqwr_footer *ftr) +{ + return (q->un.iq.genbit == (ftr->u.type_gen >> IQWRF_GEN_SHIFT)); +} + +/* + * csio_wr_process_iq - Process elements in Ingress queue. + * @hw: HW pointer + * @qidx: Index of queue + * @iq_handler: Handler for this queue + * @priv: Caller's private pointer + * + * This routine walks through every entry of the ingress queue, calling + * the provided iq_handler with the entry, until the generation bit + * flips. + */ +int +csio_wr_process_iq(struct csio_hw *hw, struct csio_q *q, + void (*iq_handler)(struct csio_hw *, void *, + uint32_t, struct csio_fl_dma_buf *, + void *), + void *priv) +{ + struct csio_wrm *wrm = csio_hw_to_wrm(hw); + void *wr = (void *)((uintptr_t)q->vstart + (q->cidx * q->wr_sz)); + struct csio_iqwr_footer *ftr; + uint32_t wr_type, fw_qid, qid; + struct csio_q *q_completed; + struct csio_q *flq = csio_iq_has_fl(q) ? + wrm->q_arr[q->un.iq.flq_idx] : NULL; + int rv = 0; + + /* Get the footer */ + ftr = (struct csio_iqwr_footer *)((uintptr_t)wr + + (q->wr_sz - sizeof(*ftr))); + + /* + * When q wrapped around last time, driver should have inverted + * ic.genbit as well. + */ + while (csio_is_new_iqwr(q, ftr)) { + + CSIO_DB_ASSERT(((uintptr_t)wr + q->wr_sz) <= + (uintptr_t)q->vwrap); + rmb(); + wr_type = IQWRF_TYPE_GET(ftr->u.type_gen); + + switch (wr_type) { + case X_RSPD_TYPE_CPL: + /* Subtract footer from WR len */ + iq_handler(hw, wr, q->wr_sz - sizeof(*ftr), NULL, priv); + break; + case X_RSPD_TYPE_FLBUF: + csio_wr_process_fl(hw, q, wr, + ntohl(ftr->pldbuflen_qid), + iq_handler, priv); + break; + case X_RSPD_TYPE_INTR: + fw_qid = ntohl(ftr->pldbuflen_qid); + qid = fw_qid - wrm->fw_iq_start; + q_completed = hw->wrm.intr_map[qid]; + + if (unlikely(qid == + csio_q_physiqid(hw, hw->intr_iq_idx))) { + /* + * We are already in the Forward Interrupt + * Interrupt Queue Service! Do-not service + * again! + * + */ + } else { + CSIO_DB_ASSERT(q_completed); + CSIO_DB_ASSERT( + q_completed->un.iq.iq_intx_handler); + + /* Call the queue handler. */ + q_completed->un.iq.iq_intx_handler(hw, NULL, + 0, NULL, (void *)q_completed); + } + break; + default: + csio_warn(hw, "Unknown resp type 0x%x received\n", + wr_type); + CSIO_INC_STATS(q, n_rsp_unknown); + break; + } + + /* + * Ingress *always* has fixed size WR entries. Therefore, + * there should always be complete WRs towards the end of + * queue. + */ + if (((uintptr_t)wr + q->wr_sz) == (uintptr_t)q->vwrap) { + + /* Roll over to start of queue */ + q->cidx = 0; + wr = q->vstart; + + /* Toggle genbit */ + q->un.iq.genbit ^= 0x1; + + CSIO_INC_STATS(q, n_qwrap); + } else { + q->cidx++; + wr = (void *)((uintptr_t)(q->vstart) + + (q->cidx * q->wr_sz)); + } + + ftr = (struct csio_iqwr_footer *)((uintptr_t)wr + + (q->wr_sz - sizeof(*ftr))); + q->inc_idx++; + + } /* while (q->un.iq.genbit == hdr->genbit) */ + + /* + * We need to re-arm SGE interrupts in case we got a stray interrupt, + * especially in msix mode. With INTx, this may be a common occurence. + */ + if (unlikely(!q->inc_idx)) { + CSIO_INC_STATS(q, n_stray_comp); + rv = -EINVAL; + goto restart; + } + + /* Replenish free list buffers if pending falls below low water mark */ + if (flq) { + uint32_t avail = csio_wr_avail_qcredits(flq); + if (avail <= 16) { + /* Make sure in FLQ, atleast 1 credit (8 FL buffers) + * remains unpopulated otherwise HW thinks + * FLQ is empty. + */ + csio_wr_update_fl(hw, flq, (flq->credits - 8) - avail); + csio_wr_ring_fldb(hw, flq); + } + } + +restart: + /* Now inform SGE about our incremental index value */ + csio_wr_reg32(hw, CIDXINC_V(q->inc_idx) | + INGRESSQID_V(q->un.iq.physiqid) | + TIMERREG_V(csio_sge_timer_reg), + MYPF_REG(SGE_PF_GTS_A)); + q->stats.n_tot_rsps += q->inc_idx; + + q->inc_idx = 0; + + return rv; +} + +int +csio_wr_process_iq_idx(struct csio_hw *hw, int qidx, + void (*iq_handler)(struct csio_hw *, void *, + uint32_t, struct csio_fl_dma_buf *, + void *), + void *priv) +{ + struct csio_wrm *wrm = csio_hw_to_wrm(hw); + struct csio_q *iq = wrm->q_arr[qidx]; + + return csio_wr_process_iq(hw, iq, iq_handler, priv); +} + +static int +csio_closest_timer(struct csio_sge *s, int time) +{ + int i, delta, match = 0, min_delta = INT_MAX; + + for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) { + delta = time - s->timer_val[i]; + if (delta < 0) + delta = -delta; + if (delta < min_delta) { + min_delta = delta; + match = i; + } + } + return match; +} + +static int +csio_closest_thresh(struct csio_sge *s, int cnt) +{ + int i, delta, match = 0, min_delta = INT_MAX; + + for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) { + delta = cnt - s->counter_val[i]; + if (delta < 0) + delta = -delta; + if (delta < min_delta) { + min_delta = delta; + match = i; + } + } + return match; +} + +static void +csio_wr_fixup_host_params(struct csio_hw *hw) +{ + struct csio_wrm *wrm = csio_hw_to_wrm(hw); + struct csio_sge *sge = &wrm->sge; + uint32_t clsz = L1_CACHE_BYTES; + uint32_t s_hps = PAGE_SHIFT - 10; + uint32_t stat_len = clsz > 64 ? 128 : 64; + u32 fl_align = clsz < 32 ? 32 : clsz; + u32 pack_align; + u32 ingpad, ingpack; + + csio_wr_reg32(hw, HOSTPAGESIZEPF0_V(s_hps) | HOSTPAGESIZEPF1_V(s_hps) | + HOSTPAGESIZEPF2_V(s_hps) | HOSTPAGESIZEPF3_V(s_hps) | + HOSTPAGESIZEPF4_V(s_hps) | HOSTPAGESIZEPF5_V(s_hps) | + HOSTPAGESIZEPF6_V(s_hps) | HOSTPAGESIZEPF7_V(s_hps), + SGE_HOST_PAGE_SIZE_A); + + /* T5 introduced the separation of the Free List Padding and + * Packing Boundaries. Thus, we can select a smaller Padding + * Boundary to avoid uselessly chewing up PCIe Link and Memory + * Bandwidth, and use a Packing Boundary which is large enough + * to avoid false sharing between CPUs, etc. + * + * For the PCI Link, the smaller the Padding Boundary the + * better. For the Memory Controller, a smaller Padding + * Boundary is better until we cross under the Memory Line + * Size (the minimum unit of transfer to/from Memory). If we + * have a Padding Boundary which is smaller than the Memory + * Line Size, that'll involve a Read-Modify-Write cycle on the + * Memory Controller which is never good. + */ + + /* We want the Packing Boundary to be based on the Cache Line + * Size in order to help avoid False Sharing performance + * issues between CPUs, etc. We also want the Packing + * Boundary to incorporate the PCI-E Maximum Payload Size. We + * get best performance when the Packing Boundary is a + * multiple of the Maximum Payload Size. + */ + pack_align = fl_align; + if (pci_is_pcie(hw->pdev)) { + u32 mps, mps_log; + u16 devctl; + + /* The PCIe Device Control Maximum Payload Size field + * [bits 7:5] encodes sizes as powers of 2 starting at + * 128 bytes. + */ + pcie_capability_read_word(hw->pdev, PCI_EXP_DEVCTL, &devctl); + mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7; + mps = 1 << mps_log; + if (mps > pack_align) + pack_align = mps; + } + + /* T5/T6 have a special interpretation of the "0" + * value for the Packing Boundary. This corresponds to 16 + * bytes instead of the expected 32 bytes. + */ + if (pack_align <= 16) { + ingpack = INGPACKBOUNDARY_16B_X; + fl_align = 16; + } else if (pack_align == 32) { + ingpack = INGPACKBOUNDARY_64B_X; + fl_align = 64; + } else { + u32 pack_align_log = fls(pack_align) - 1; + + ingpack = pack_align_log - INGPACKBOUNDARY_SHIFT_X; + fl_align = pack_align; + } + + /* Use the smallest Ingress Padding which isn't smaller than + * the Memory Controller Read/Write Size. We'll take that as + * being 8 bytes since we don't know of any system with a + * wider Memory Controller Bus Width. + */ + if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK)) + ingpad = INGPADBOUNDARY_32B_X; + else + ingpad = T6_INGPADBOUNDARY_8B_X; + + csio_set_reg_field(hw, SGE_CONTROL_A, + INGPADBOUNDARY_V(INGPADBOUNDARY_M) | + EGRSTATUSPAGESIZE_F, + INGPADBOUNDARY_V(ingpad) | + EGRSTATUSPAGESIZE_V(stat_len != 64)); + csio_set_reg_field(hw, SGE_CONTROL2_A, + INGPACKBOUNDARY_V(INGPACKBOUNDARY_M), + INGPACKBOUNDARY_V(ingpack)); + + /* FL BUFFER SIZE#0 is Page size i,e already aligned to cache line */ + csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0_A); + + /* + * If using hard params, the following will get set correctly + * in csio_wr_set_sge(). + */ + if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS) { + csio_wr_reg32(hw, + (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2_A) + + fl_align - 1) & ~(fl_align - 1), + SGE_FL_BUFFER_SIZE2_A); + csio_wr_reg32(hw, + (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3_A) + + fl_align - 1) & ~(fl_align - 1), + SGE_FL_BUFFER_SIZE3_A); + } + + sge->csio_fl_align = fl_align; + + csio_wr_reg32(hw, HPZ0_V(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ_A); + + /* default value of rx_dma_offset of the NIC driver */ + csio_set_reg_field(hw, SGE_CONTROL_A, + PKTSHIFT_V(PKTSHIFT_M), + PKTSHIFT_V(CSIO_SGE_RX_DMA_OFFSET)); + + csio_hw_tp_wr_bits_indirect(hw, TP_INGRESS_CONFIG_A, + CSUM_HAS_PSEUDO_HDR_F, 0); +} + +static void +csio_init_intr_coalesce_parms(struct csio_hw *hw) +{ + struct csio_wrm *wrm = csio_hw_to_wrm(hw); + struct csio_sge *sge = &wrm->sge; + + csio_sge_thresh_reg = csio_closest_thresh(sge, csio_intr_coalesce_cnt); + if (csio_intr_coalesce_cnt) { + csio_sge_thresh_reg = 0; + csio_sge_timer_reg = X_TIMERREG_RESTART_COUNTER; + return; + } + + csio_sge_timer_reg = csio_closest_timer(sge, csio_intr_coalesce_time); +} + +/* + * csio_wr_get_sge - Get SGE register values. + * @hw: HW module. + * + * Used by non-master functions and by master-functions relying on config file. + */ +static void +csio_wr_get_sge(struct csio_hw *hw) +{ + struct csio_wrm *wrm = csio_hw_to_wrm(hw); + struct csio_sge *sge = &wrm->sge; + uint32_t ingpad; + int i; + u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5; + u32 ingress_rx_threshold; + + sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL_A); + + ingpad = INGPADBOUNDARY_G(sge->sge_control); + + switch (ingpad) { + case X_INGPCIEBOUNDARY_32B: + sge->csio_fl_align = 32; break; + case X_INGPCIEBOUNDARY_64B: + sge->csio_fl_align = 64; break; + case X_INGPCIEBOUNDARY_128B: + sge->csio_fl_align = 128; break; + case X_INGPCIEBOUNDARY_256B: + sge->csio_fl_align = 256; break; + case X_INGPCIEBOUNDARY_512B: + sge->csio_fl_align = 512; break; + case X_INGPCIEBOUNDARY_1024B: + sge->csio_fl_align = 1024; break; + case X_INGPCIEBOUNDARY_2048B: + sge->csio_fl_align = 2048; break; + case X_INGPCIEBOUNDARY_4096B: + sge->csio_fl_align = 4096; break; + } + + for (i = 0; i < CSIO_SGE_FL_SIZE_REGS; i++) + csio_get_flbuf_size(hw, sge, i); + + timer_value_0_and_1 = csio_rd_reg32(hw, SGE_TIMER_VALUE_0_AND_1_A); + timer_value_2_and_3 = csio_rd_reg32(hw, SGE_TIMER_VALUE_2_AND_3_A); + timer_value_4_and_5 = csio_rd_reg32(hw, SGE_TIMER_VALUE_4_AND_5_A); + + sge->timer_val[0] = (uint16_t)csio_core_ticks_to_us(hw, + TIMERVALUE0_G(timer_value_0_and_1)); + sge->timer_val[1] = (uint16_t)csio_core_ticks_to_us(hw, + TIMERVALUE1_G(timer_value_0_and_1)); + sge->timer_val[2] = (uint16_t)csio_core_ticks_to_us(hw, + TIMERVALUE2_G(timer_value_2_and_3)); + sge->timer_val[3] = (uint16_t)csio_core_ticks_to_us(hw, + TIMERVALUE3_G(timer_value_2_and_3)); + sge->timer_val[4] = (uint16_t)csio_core_ticks_to_us(hw, + TIMERVALUE4_G(timer_value_4_and_5)); + sge->timer_val[5] = (uint16_t)csio_core_ticks_to_us(hw, + TIMERVALUE5_G(timer_value_4_and_5)); + + ingress_rx_threshold = csio_rd_reg32(hw, SGE_INGRESS_RX_THRESHOLD_A); + sge->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold); + sge->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold); + sge->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold); + sge->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold); + + csio_init_intr_coalesce_parms(hw); +} + +/* + * csio_wr_set_sge - Initialize SGE registers + * @hw: HW module. + * + * Used by Master function to initialize SGE registers in the absence + * of a config file. + */ +static void +csio_wr_set_sge(struct csio_hw *hw) +{ + struct csio_wrm *wrm = csio_hw_to_wrm(hw); + struct csio_sge *sge = &wrm->sge; + int i; + + /* + * Set up our basic SGE mode to deliver CPL messages to our Ingress + * Queue and Packet Date to the Free List. + */ + csio_set_reg_field(hw, SGE_CONTROL_A, RXPKTCPLMODE_F, RXPKTCPLMODE_F); + + sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL_A); + + /* sge->csio_fl_align is set up by csio_wr_fixup_host_params(). */ + + /* + * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows + * and generate an interrupt when this occurs so we can recover. + */ + csio_set_reg_field(hw, SGE_DBFIFO_STATUS_A, + LP_INT_THRESH_T5_V(LP_INT_THRESH_T5_M), + LP_INT_THRESH_T5_V(CSIO_SGE_DBFIFO_INT_THRESH)); + csio_set_reg_field(hw, SGE_DBFIFO_STATUS2_A, + HP_INT_THRESH_T5_V(LP_INT_THRESH_T5_M), + HP_INT_THRESH_T5_V(CSIO_SGE_DBFIFO_INT_THRESH)); + + csio_set_reg_field(hw, SGE_DOORBELL_CONTROL_A, ENABLE_DROP_F, + ENABLE_DROP_F); + + /* SGE_FL_BUFFER_SIZE0 is set up by csio_wr_fixup_host_params(). */ + + CSIO_SET_FLBUF_SIZE(hw, 1, CSIO_SGE_FLBUF_SIZE1); + csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE2 + sge->csio_fl_align - 1) + & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE2_A); + csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE3 + sge->csio_fl_align - 1) + & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE3_A); + CSIO_SET_FLBUF_SIZE(hw, 4, CSIO_SGE_FLBUF_SIZE4); + CSIO_SET_FLBUF_SIZE(hw, 5, CSIO_SGE_FLBUF_SIZE5); + CSIO_SET_FLBUF_SIZE(hw, 6, CSIO_SGE_FLBUF_SIZE6); + CSIO_SET_FLBUF_SIZE(hw, 7, CSIO_SGE_FLBUF_SIZE7); + CSIO_SET_FLBUF_SIZE(hw, 8, CSIO_SGE_FLBUF_SIZE8); + + for (i = 0; i < CSIO_SGE_FL_SIZE_REGS; i++) + csio_get_flbuf_size(hw, sge, i); + + /* Initialize interrupt coalescing attributes */ + sge->timer_val[0] = CSIO_SGE_TIMER_VAL_0; + sge->timer_val[1] = CSIO_SGE_TIMER_VAL_1; + sge->timer_val[2] = CSIO_SGE_TIMER_VAL_2; + sge->timer_val[3] = CSIO_SGE_TIMER_VAL_3; + sge->timer_val[4] = CSIO_SGE_TIMER_VAL_4; + sge->timer_val[5] = CSIO_SGE_TIMER_VAL_5; + + sge->counter_val[0] = CSIO_SGE_INT_CNT_VAL_0; + sge->counter_val[1] = CSIO_SGE_INT_CNT_VAL_1; + sge->counter_val[2] = CSIO_SGE_INT_CNT_VAL_2; + sge->counter_val[3] = CSIO_SGE_INT_CNT_VAL_3; + + csio_wr_reg32(hw, THRESHOLD_0_V(sge->counter_val[0]) | + THRESHOLD_1_V(sge->counter_val[1]) | + THRESHOLD_2_V(sge->counter_val[2]) | + THRESHOLD_3_V(sge->counter_val[3]), + SGE_INGRESS_RX_THRESHOLD_A); + + csio_wr_reg32(hw, + TIMERVALUE0_V(csio_us_to_core_ticks(hw, sge->timer_val[0])) | + TIMERVALUE1_V(csio_us_to_core_ticks(hw, sge->timer_val[1])), + SGE_TIMER_VALUE_0_AND_1_A); + + csio_wr_reg32(hw, + TIMERVALUE2_V(csio_us_to_core_ticks(hw, sge->timer_val[2])) | + TIMERVALUE3_V(csio_us_to_core_ticks(hw, sge->timer_val[3])), + SGE_TIMER_VALUE_2_AND_3_A); + + csio_wr_reg32(hw, + TIMERVALUE4_V(csio_us_to_core_ticks(hw, sge->timer_val[4])) | + TIMERVALUE5_V(csio_us_to_core_ticks(hw, sge->timer_val[5])), + SGE_TIMER_VALUE_4_AND_5_A); + + csio_init_intr_coalesce_parms(hw); +} + +void +csio_wr_sge_init(struct csio_hw *hw) +{ + /* + * If we are master and chip is not initialized: + * - If we plan to use the config file, we need to fixup some + * host specific registers, and read the rest of the SGE + * configuration. + * - If we dont plan to use the config file, we need to initialize + * SGE entirely, including fixing the host specific registers. + * If we are master and chip is initialized, just read and work off of + * the already initialized SGE values. + * If we arent the master, we are only allowed to read and work off of + * the already initialized SGE values. + * + * Therefore, before calling this function, we assume that the master- + * ship of the card, state and whether to use config file or not, have + * already been decided. + */ + if (csio_is_hw_master(hw)) { + if (hw->fw_state != CSIO_DEV_STATE_INIT) + csio_wr_fixup_host_params(hw); + + if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS) + csio_wr_get_sge(hw); + else + csio_wr_set_sge(hw); + } else + csio_wr_get_sge(hw); +} + +/* + * csio_wrm_init - Initialize Work request module. + * @wrm: WR module + * @hw: HW pointer + * + * Allocates memory for an array of queue pointers starting at q_arr. + */ +int +csio_wrm_init(struct csio_wrm *wrm, struct csio_hw *hw) +{ + int i; + + if (!wrm->num_q) { + csio_err(hw, "Num queues is not set\n"); + return -EINVAL; + } + + wrm->q_arr = kcalloc(wrm->num_q, sizeof(struct csio_q *), GFP_KERNEL); + if (!wrm->q_arr) + goto err; + + for (i = 0; i < wrm->num_q; i++) { + wrm->q_arr[i] = kzalloc(sizeof(struct csio_q), GFP_KERNEL); + if (!wrm->q_arr[i]) { + while (--i >= 0) + kfree(wrm->q_arr[i]); + goto err_free_arr; + } + } + wrm->free_qidx = 0; + + return 0; + +err_free_arr: + kfree(wrm->q_arr); +err: + return -ENOMEM; +} + +/* + * csio_wrm_exit - Initialize Work request module. + * @wrm: WR module + * @hw: HW module + * + * Uninitialize WR module. Free q_arr and pointers in it. + * We have the additional job of freeing the DMA memory associated + * with the queues. + */ +void +csio_wrm_exit(struct csio_wrm *wrm, struct csio_hw *hw) +{ + int i; + uint32_t j; + struct csio_q *q; + struct csio_dma_buf *buf; + + for (i = 0; i < wrm->num_q; i++) { + q = wrm->q_arr[i]; + + if (wrm->free_qidx && (i < wrm->free_qidx)) { + if (q->type == CSIO_FREELIST) { + if (!q->un.fl.bufs) + continue; + for (j = 0; j < q->credits; j++) { + buf = &q->un.fl.bufs[j]; + if (!buf->vaddr) + continue; + dma_free_coherent(&hw->pdev->dev, + buf->len, buf->vaddr, + buf->paddr); + } + kfree(q->un.fl.bufs); + } + dma_free_coherent(&hw->pdev->dev, q->size, + q->vstart, q->pstart); + } + kfree(q); + } + + hw->flags &= ~CSIO_HWF_Q_MEM_ALLOCED; + + kfree(wrm->q_arr); +} diff --git a/drivers/scsi/csiostor/csio_wr.h b/drivers/scsi/csiostor/csio_wr.h new file mode 100644 index 000000000..0c0dd9a65 --- /dev/null +++ b/drivers/scsi/csiostor/csio_wr.h @@ -0,0 +1,512 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CSIO_WR_H__ +#define __CSIO_WR_H__ + +#include <linux/cache.h> + +#include "csio_defs.h" +#include "t4fw_api.h" +#include "t4fw_api_stor.h" + +/* + * SGE register field values. + */ +#define X_INGPCIEBOUNDARY_32B 0 +#define X_INGPCIEBOUNDARY_64B 1 +#define X_INGPCIEBOUNDARY_128B 2 +#define X_INGPCIEBOUNDARY_256B 3 +#define X_INGPCIEBOUNDARY_512B 4 +#define X_INGPCIEBOUNDARY_1024B 5 +#define X_INGPCIEBOUNDARY_2048B 6 +#define X_INGPCIEBOUNDARY_4096B 7 + +/* GTS register */ +#define X_TIMERREG_COUNTER0 0 +#define X_TIMERREG_COUNTER1 1 +#define X_TIMERREG_COUNTER2 2 +#define X_TIMERREG_COUNTER3 3 +#define X_TIMERREG_COUNTER4 4 +#define X_TIMERREG_COUNTER5 5 +#define X_TIMERREG_RESTART_COUNTER 6 +#define X_TIMERREG_UPDATE_CIDX 7 + +/* + * Egress Context field values + */ +#define X_FETCHBURSTMIN_16B 0 +#define X_FETCHBURSTMIN_32B 1 +#define X_FETCHBURSTMIN_64B 2 +#define X_FETCHBURSTMIN_128B 3 + +#define X_FETCHBURSTMAX_64B 0 +#define X_FETCHBURSTMAX_128B 1 +#define X_FETCHBURSTMAX_256B 2 +#define X_FETCHBURSTMAX_512B 3 + +#define X_HOSTFCMODE_NONE 0 +#define X_HOSTFCMODE_INGRESS_QUEUE 1 +#define X_HOSTFCMODE_STATUS_PAGE 2 +#define X_HOSTFCMODE_BOTH 3 + +/* + * Ingress Context field values + */ +#define X_UPDATESCHEDULING_TIMER 0 +#define X_UPDATESCHEDULING_COUNTER_OPTTIMER 1 + +#define X_UPDATEDELIVERY_NONE 0 +#define X_UPDATEDELIVERY_INTERRUPT 1 +#define X_UPDATEDELIVERY_STATUS_PAGE 2 +#define X_UPDATEDELIVERY_BOTH 3 + +#define X_INTERRUPTDESTINATION_PCIE 0 +#define X_INTERRUPTDESTINATION_IQ 1 + +#define X_RSPD_TYPE_FLBUF 0 +#define X_RSPD_TYPE_CPL 1 +#define X_RSPD_TYPE_INTR 2 + +/* WR status is at the same position as retval in a CMD header */ +#define csio_wr_status(_wr) \ + (FW_CMD_RETVAL_G(ntohl(((struct fw_cmd_hdr *)(_wr))->lo))) + +struct csio_hw; + +extern int csio_intr_coalesce_cnt; +extern int csio_intr_coalesce_time; + +/* Ingress queue params */ +struct csio_iq_params { + + uint8_t iq_start:1; + uint8_t iq_stop:1; + uint8_t pfn:3; + + uint8_t vfn; + + uint16_t physiqid; + uint16_t iqid; + + uint16_t fl0id; + uint16_t fl1id; + + uint8_t viid; + + uint8_t type; + uint8_t iqasynch; + uint8_t reserved4; + + uint8_t iqandst; + uint8_t iqanus; + uint8_t iqanud; + + uint16_t iqandstindex; + + uint8_t iqdroprss; + uint8_t iqpciech; + uint8_t iqdcaen; + + uint8_t iqdcacpu; + uint8_t iqintcntthresh; + uint8_t iqo; + + uint8_t iqcprio; + uint8_t iqesize; + + uint16_t iqsize; + + uint64_t iqaddr; + + uint8_t iqflintiqhsen; + uint8_t reserved5; + uint8_t iqflintcongen; + uint8_t iqflintcngchmap; + + uint32_t reserved6; + + uint8_t fl0hostfcmode; + uint8_t fl0cprio; + uint8_t fl0paden; + uint8_t fl0packen; + uint8_t fl0congen; + uint8_t fl0dcaen; + + uint8_t fl0dcacpu; + uint8_t fl0fbmin; + + uint8_t fl0fbmax; + uint8_t fl0cidxfthresho; + uint8_t fl0cidxfthresh; + + uint16_t fl0size; + + uint64_t fl0addr; + + uint64_t reserved7; + + uint8_t fl1hostfcmode; + uint8_t fl1cprio; + uint8_t fl1paden; + uint8_t fl1packen; + uint8_t fl1congen; + uint8_t fl1dcaen; + + uint8_t fl1dcacpu; + uint8_t fl1fbmin; + + uint8_t fl1fbmax; + uint8_t fl1cidxfthresho; + uint8_t fl1cidxfthresh; + + uint16_t fl1size; + + uint64_t fl1addr; +}; + +/* Egress queue params */ +struct csio_eq_params { + + uint8_t pfn; + uint8_t vfn; + + uint8_t eqstart:1; + uint8_t eqstop:1; + + uint16_t physeqid; + uint32_t eqid; + + uint8_t hostfcmode:2; + uint8_t cprio:1; + uint8_t pciechn:3; + + uint16_t iqid; + + uint8_t dcaen:1; + uint8_t dcacpu:5; + + uint8_t fbmin:3; + uint8_t fbmax:3; + + uint8_t cidxfthresho:1; + uint8_t cidxfthresh:3; + + uint16_t eqsize; + + uint64_t eqaddr; +}; + +struct csio_dma_buf { + struct list_head list; + void *vaddr; /* Virtual address */ + dma_addr_t paddr; /* Physical address */ + uint32_t len; /* Buffer size */ +}; + +/* Generic I/O request structure */ +struct csio_ioreq { + struct csio_sm sm; /* SM, List + * should be the first member + */ + int iq_idx; /* Ingress queue index */ + int eq_idx; /* Egress queue index */ + uint32_t nsge; /* Number of SG elements */ + uint32_t tmo; /* Driver timeout */ + uint32_t datadir; /* Data direction */ + struct csio_dma_buf dma_buf; /* Req/resp DMA buffers */ + uint16_t wr_status; /* WR completion status */ + int16_t drv_status; /* Driver internal status */ + struct csio_lnode *lnode; /* Owner lnode */ + struct csio_rnode *rnode; /* Src/destination rnode */ + void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *); + /* completion callback */ + void *scratch1; /* Scratch area 1. + */ + void *scratch2; /* Scratch area 2. */ + struct list_head gen_list; /* Any list associated with + * this ioreq. + */ + uint64_t fw_handle; /* Unique handle passed + * to FW + */ + uint8_t dcopy; /* Data copy required */ + uint8_t reserved1; + uint16_t reserved2; + struct completion cmplobj; /* ioreq completion object */ +} ____cacheline_aligned_in_smp; + +/* + * Egress status page for egress cidx updates + */ +struct csio_qstatus_page { + __be32 qid; + __be16 cidx; + __be16 pidx; +}; + + +enum { + CSIO_MAX_FLBUF_PER_IQWR = 4, + CSIO_QCREDIT_SZ = 64, /* pidx/cidx increments + * in bytes + */ + CSIO_MAX_QID = 0xFFFF, + CSIO_MAX_IQ = 128, + + CSIO_SGE_NTIMERS = 6, + CSIO_SGE_NCOUNTERS = 4, + CSIO_SGE_FL_SIZE_REGS = 16, +}; + +/* Defines for type */ +enum { + CSIO_EGRESS = 1, + CSIO_INGRESS = 2, + CSIO_FREELIST = 3, +}; + +/* + * Structure for footer (last 2 flits) of Ingress Queue Entry. + */ +struct csio_iqwr_footer { + __be32 hdrbuflen_pidx; + __be32 pldbuflen_qid; + union { + u8 type_gen; + __be64 last_flit; + } u; +}; + +#define IQWRF_NEWBUF (1 << 31) +#define IQWRF_LEN_GET(x) (((x) >> 0) & 0x7fffffffU) +#define IQWRF_GEN_SHIFT 7 +#define IQWRF_TYPE_GET(x) (((x) >> 4) & 0x3U) + + +/* + * WR pair: + * ======== + * A WR can start towards the end of a queue, and then continue at the + * beginning, since the queue is considered to be circular. This will + * require a pair of address/len to be passed back to the caller - + * hence the Work request pair structure. + */ +struct csio_wr_pair { + void *addr1; + uint32_t size1; + void *addr2; + uint32_t size2; +}; + +/* + * The following structure is used by ingress processing to return the + * free list buffers to consumers. + */ +struct csio_fl_dma_buf { + struct csio_dma_buf flbufs[CSIO_MAX_FLBUF_PER_IQWR]; + /* Freelist DMA buffers */ + int offset; /* Offset within the + * first FL buf. + */ + uint32_t totlen; /* Total length */ + uint8_t defer_free; /* Free of buffer can + * deferred + */ +}; + +/* Data-types */ +typedef void (*iq_handler_t)(struct csio_hw *, void *, uint32_t, + struct csio_fl_dma_buf *, void *); + +struct csio_iq { + uint16_t iqid; /* Queue ID */ + uint16_t physiqid; /* Physical Queue ID */ + uint16_t genbit; /* Generation bit, + * initially set to 1 + */ + int flq_idx; /* Freelist queue index */ + iq_handler_t iq_intx_handler; /* IQ INTx handler routine */ +}; + +struct csio_eq { + uint16_t eqid; /* Qid */ + uint16_t physeqid; /* Physical Queue ID */ + uint8_t wrap[512]; /* Temp area for q-wrap around*/ +}; + +struct csio_fl { + uint16_t flid; /* Qid */ + uint16_t packen; /* Packing enabled? */ + int offset; /* Offset within FL buf */ + int sreg; /* Size register */ + struct csio_dma_buf *bufs; /* Free list buffer ptr array + * indexed using flq->cidx/pidx + */ +}; + +struct csio_qstats { + uint32_t n_tot_reqs; /* Total no. of Requests */ + uint32_t n_tot_rsps; /* Total no. of responses */ + uint32_t n_qwrap; /* Queue wraps */ + uint32_t n_eq_wr_split; /* Number of split EQ WRs */ + uint32_t n_qentry; /* Queue entry */ + uint32_t n_qempty; /* Queue empty */ + uint32_t n_qfull; /* Queue fulls */ + uint32_t n_rsp_unknown; /* Unknown response type */ + uint32_t n_stray_comp; /* Stray completion intr */ + uint32_t n_flq_refill; /* Number of FL refills */ +}; + +/* Queue metadata */ +struct csio_q { + uint16_t type; /* Type: Ingress/Egress/FL */ + uint16_t pidx; /* producer index */ + uint16_t cidx; /* consumer index */ + uint16_t inc_idx; /* Incremental index */ + uint32_t wr_sz; /* Size of all WRs in this q + * if fixed + */ + void *vstart; /* Base virtual address + * of queue + */ + void *vwrap; /* Virtual end address to + * wrap around at + */ + uint32_t credits; /* Size of queue in credits */ + void *owner; /* Owner */ + union { /* Queue contexts */ + struct csio_iq iq; + struct csio_eq eq; + struct csio_fl fl; + } un; + + dma_addr_t pstart; /* Base physical address of + * queue + */ + uint32_t portid; /* PCIE Channel */ + uint32_t size; /* Size of queue in bytes */ + struct csio_qstats stats; /* Statistics */ +} ____cacheline_aligned_in_smp; + +struct csio_sge { + uint32_t csio_fl_align; /* Calculated and cached + * for fast path + */ + uint32_t sge_control; /* padding, boundaries, + * lengths, etc. + */ + uint32_t sge_host_page_size; /* Host page size */ + uint32_t sge_fl_buf_size[CSIO_SGE_FL_SIZE_REGS]; + /* free list buffer sizes */ + uint16_t timer_val[CSIO_SGE_NTIMERS]; + uint8_t counter_val[CSIO_SGE_NCOUNTERS]; +}; + +/* Work request module */ +struct csio_wrm { + int num_q; /* Number of queues */ + struct csio_q **q_arr; /* Array of queue pointers + * allocated dynamically + * based on configured values + */ + uint32_t fw_iq_start; /* Start ID of IQ for this fn*/ + uint32_t fw_eq_start; /* Start ID of EQ for this fn*/ + struct csio_q *intr_map[CSIO_MAX_IQ]; + /* IQ-id to IQ map table. */ + int free_qidx; /* queue idx of free queue */ + struct csio_sge sge; /* SGE params */ +}; + +#define csio_get_q(__hw, __idx) ((__hw)->wrm.q_arr[__idx]) +#define csio_q_type(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->type) +#define csio_q_pidx(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->pidx) +#define csio_q_cidx(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->cidx) +#define csio_q_inc_idx(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->inc_idx) +#define csio_q_vstart(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->vstart) +#define csio_q_pstart(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->pstart) +#define csio_q_size(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->size) +#define csio_q_credits(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->credits) +#define csio_q_portid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->portid) +#define csio_q_wr_sz(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->wr_sz) +#define csio_q_iqid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.iq.iqid) +#define csio_q_physiqid(__hw, __idx) \ + ((__hw)->wrm.q_arr[(__idx)]->un.iq.physiqid) +#define csio_q_iq_flq_idx(__hw, __idx) \ + ((__hw)->wrm.q_arr[(__idx)]->un.iq.flq_idx) +#define csio_q_eqid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.eq.eqid) +#define csio_q_flid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.fl.flid) + +#define csio_q_physeqid(__hw, __idx) \ + ((__hw)->wrm.q_arr[(__idx)]->un.eq.physeqid) +#define csio_iq_has_fl(__iq) ((__iq)->un.iq.flq_idx != -1) + +#define csio_q_iq_to_flid(__hw, __iq_idx) \ + csio_q_flid((__hw), (__hw)->wrm.q_arr[(__iq_qidx)]->un.iq.flq_idx) +#define csio_q_set_intr_map(__hw, __iq_idx, __rel_iq_id) \ + (__hw)->wrm.intr_map[__rel_iq_id] = csio_get_q(__hw, __iq_idx) +#define csio_q_eq_wrap(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.eq.wrap) + +struct csio_mb; + +int csio_wr_alloc_q(struct csio_hw *, uint32_t, uint32_t, + uint16_t, void *, uint32_t, int, iq_handler_t); +int csio_wr_iq_create(struct csio_hw *, void *, int, + uint32_t, uint8_t, bool, + void (*)(struct csio_hw *, struct csio_mb *)); +int csio_wr_eq_create(struct csio_hw *, void *, int, int, uint8_t, + void (*)(struct csio_hw *, struct csio_mb *)); +int csio_wr_destroy_queues(struct csio_hw *, bool cmd); + + +int csio_wr_get(struct csio_hw *, int, uint32_t, + struct csio_wr_pair *); +void csio_wr_copy_to_wrp(void *, struct csio_wr_pair *, uint32_t, uint32_t); +int csio_wr_issue(struct csio_hw *, int, bool); +int csio_wr_process_iq(struct csio_hw *, struct csio_q *, + void (*)(struct csio_hw *, void *, + uint32_t, struct csio_fl_dma_buf *, + void *), + void *); +int csio_wr_process_iq_idx(struct csio_hw *, int, + void (*)(struct csio_hw *, void *, + uint32_t, struct csio_fl_dma_buf *, + void *), + void *); + +void csio_wr_sge_init(struct csio_hw *); +int csio_wrm_init(struct csio_wrm *, struct csio_hw *); +void csio_wrm_exit(struct csio_wrm *, struct csio_hw *); + +#endif /* ifndef __CSIO_WR_H__ */ diff --git a/drivers/scsi/csiostor/t4fw_api_stor.h b/drivers/scsi/csiostor/t4fw_api_stor.h new file mode 100644 index 000000000..097e52c0f --- /dev/null +++ b/drivers/scsi/csiostor/t4fw_api_stor.h @@ -0,0 +1,539 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _T4FW_API_STOR_H_ +#define _T4FW_API_STOR_H_ + + +/****************************************************************************** + * R E T U R N V A L U E S + ********************************/ + +enum fw_fcoe_link_sub_op { + FCOE_LINK_DOWN = 0x0, + FCOE_LINK_UP = 0x1, + FCOE_LINK_COND = 0x2, +}; + +enum fw_fcoe_link_status { + FCOE_LINKDOWN = 0x0, + FCOE_LINKUP = 0x1, +}; + +enum fw_ofld_prot { + PROT_FCOE = 0x1, + PROT_ISCSI = 0x2, +}; + +enum rport_type_fcoe { + FLOGI_VFPORT = 0x1, /* 0xfffffe */ + FDISC_VFPORT = 0x2, /* 0xfffffe */ + NS_VNPORT = 0x3, /* 0xfffffc */ + REG_FC4_VNPORT = 0x4, /* any FC4 type VN_PORT */ + REG_VNPORT = 0x5, /* 0xfffxxx - non FC4 port in switch */ + FDMI_VNPORT = 0x6, /* 0xfffffa */ + FAB_CTLR_VNPORT = 0x7, /* 0xfffffd */ +}; + +enum event_cause_fcoe { + PLOGI_ACC_RCVD = 0x01, + PLOGI_RJT_RCVD = 0x02, + PLOGI_RCVD = 0x03, + PLOGO_RCVD = 0x04, + PRLI_ACC_RCVD = 0x05, + PRLI_RJT_RCVD = 0x06, + PRLI_RCVD = 0x07, + PRLO_RCVD = 0x08, + NPORT_ID_CHGD = 0x09, + FLOGO_RCVD = 0x0a, + CLR_VIRT_LNK_RCVD = 0x0b, + FLOGI_ACC_RCVD = 0x0c, + FLOGI_RJT_RCVD = 0x0d, + FDISC_ACC_RCVD = 0x0e, + FDISC_RJT_RCVD = 0x0f, + FLOGI_TMO_MAX_RETRY = 0x10, + IMPL_LOGO_ADISC_ACC = 0x11, + IMPL_LOGO_ADISC_RJT = 0x12, + IMPL_LOGO_ADISC_CNFLT = 0x13, + PRLI_TMO = 0x14, + ADISC_TMO = 0x15, + RSCN_DEV_LOST = 0x16, + SCR_ACC_RCVD = 0x17, + ADISC_RJT_RCVD = 0x18, + LOGO_SNT = 0x19, + PROTO_ERR_IMPL_LOGO = 0x1a, +}; + +enum fcoe_cmn_type { + FCOE_ELS, + FCOE_CT, + FCOE_SCSI_CMD, + FCOE_UNSOL_ELS, +}; + +enum fw_wr_stor_opcodes { + FW_RDEV_WR = 0x38, + FW_FCOE_ELS_CT_WR = 0x30, + FW_SCSI_WRITE_WR = 0x31, + FW_SCSI_READ_WR = 0x32, + FW_SCSI_CMD_WR = 0x33, + FW_SCSI_ABRT_CLS_WR = 0x34, +}; + +struct fw_rdev_wr { + __be32 op_to_immdlen; + __be32 alloc_to_len16; + __be64 cookie; + u8 protocol; + u8 event_cause; + u8 cur_state; + u8 prev_state; + __be32 flags_to_assoc_flowid; + union rdev_entry { + struct fcoe_rdev_entry { + __be32 flowid; + u8 protocol; + u8 event_cause; + u8 flags; + u8 rjt_reason; + u8 cur_login_st; + u8 prev_login_st; + __be16 rcv_fr_sz; + u8 rd_xfer_rdy_to_rport_type; + u8 vft_to_qos; + u8 org_proc_assoc_to_acc_rsp_code; + u8 enh_disc_to_tgt; + u8 wwnn[8]; + u8 wwpn[8]; + __be16 iqid; + u8 fc_oui[3]; + u8 r_id[3]; + } fcoe_rdev; + struct iscsi_rdev_entry { + __be32 flowid; + u8 protocol; + u8 event_cause; + u8 flags; + u8 r3; + __be16 iscsi_opts; + __be16 tcp_opts; + __be16 ip_opts; + __be16 max_rcv_len; + __be16 max_snd_len; + __be16 first_brst_len; + __be16 max_brst_len; + __be16 r4; + __be16 def_time2wait; + __be16 def_time2ret; + __be16 nop_out_intrvl; + __be16 non_scsi_to; + __be16 isid; + __be16 tsid; + __be16 port; + __be16 tpgt; + u8 r5[6]; + __be16 iqid; + } iscsi_rdev; + } u; +}; + +#define FW_RDEV_WR_FLOWID_GET(x) (((x) >> 8) & 0xfffff) +#define FW_RDEV_WR_ASSOC_FLOWID_GET(x) (((x) >> 0) & 0xfffff) +#define FW_RDEV_WR_RPORT_TYPE_GET(x) (((x) >> 0) & 0x1f) +#define FW_RDEV_WR_NPIV_GET(x) (((x) >> 6) & 0x1) +#define FW_RDEV_WR_CLASS_GET(x) (((x) >> 4) & 0x3) +#define FW_RDEV_WR_TASK_RETRY_ID_GET(x) (((x) >> 5) & 0x1) +#define FW_RDEV_WR_RETRY_GET(x) (((x) >> 4) & 0x1) +#define FW_RDEV_WR_CONF_CMPL_GET(x) (((x) >> 3) & 0x1) +#define FW_RDEV_WR_INI_GET(x) (((x) >> 1) & 0x1) +#define FW_RDEV_WR_TGT_GET(x) (((x) >> 0) & 0x1) + +struct fw_fcoe_els_ct_wr { + __be32 op_immdlen; + __be32 flowid_len16; + u64 cookie; + __be16 iqid; + u8 tmo_val; + u8 els_ct_type; + u8 ctl_pri; + u8 cp_en_class; + __be16 xfer_cnt; + u8 fl_to_sp; + u8 l_id[3]; + u8 r5; + u8 r_id[3]; + __be64 rsp_dmaaddr; + __be32 rsp_dmalen; + __be32 r6; +}; + +#define FW_FCOE_ELS_CT_WR_OPCODE(x) ((x) << 24) +#define FW_FCOE_ELS_CT_WR_OPCODE_GET(x) (((x) >> 24) & 0xff) +#define FW_FCOE_ELS_CT_WR_IMMDLEN(x) ((x) << 0) +#define FW_FCOE_ELS_CT_WR_IMMDLEN_GET(x) (((x) >> 0) & 0xff) +#define FW_FCOE_ELS_CT_WR_SP(x) ((x) << 0) + +struct fw_scsi_write_wr { + __be32 op_immdlen; + __be32 flowid_len16; + u64 cookie; + __be16 iqid; + u8 tmo_val; + u8 use_xfer_cnt; + union fw_scsi_write_priv { + struct fcoe_write_priv { + u8 ctl_pri; + u8 cp_en_class; + u8 r3_lo[2]; + } fcoe; + struct iscsi_write_priv { + u8 r3[4]; + } iscsi; + } u; + __be32 xfer_cnt; + __be32 ini_xfer_cnt; + __be64 rsp_dmaaddr; + __be32 rsp_dmalen; + __be32 r4; +}; + +#define FW_SCSI_WRITE_WR_IMMDLEN(x) ((x) << 0) + +struct fw_scsi_read_wr { + __be32 op_immdlen; + __be32 flowid_len16; + u64 cookie; + __be16 iqid; + u8 tmo_val; + u8 use_xfer_cnt; + union fw_scsi_read_priv { + struct fcoe_read_priv { + u8 ctl_pri; + u8 cp_en_class; + u8 r3_lo[2]; + } fcoe; + struct iscsi_read_priv { + u8 r3[4]; + } iscsi; + } u; + __be32 xfer_cnt; + __be32 ini_xfer_cnt; + __be64 rsp_dmaaddr; + __be32 rsp_dmalen; + __be32 r4; +}; + +#define FW_SCSI_READ_WR_IMMDLEN(x) ((x) << 0) + +struct fw_scsi_cmd_wr { + __be32 op_immdlen; + __be32 flowid_len16; + u64 cookie; + __be16 iqid; + u8 tmo_val; + u8 r3; + union fw_scsi_cmd_priv { + struct fcoe_cmd_priv { + u8 ctl_pri; + u8 cp_en_class; + u8 r4_lo[2]; + } fcoe; + struct iscsi_cmd_priv { + u8 r4[4]; + } iscsi; + } u; + u8 r5[8]; + __be64 rsp_dmaaddr; + __be32 rsp_dmalen; + __be32 r6; +}; + +#define FW_SCSI_CMD_WR_IMMDLEN(x) ((x) << 0) + +#define SCSI_ABORT 0 +#define SCSI_CLOSE 1 + +struct fw_scsi_abrt_cls_wr { + __be32 op_immdlen; + __be32 flowid_len16; + u64 cookie; + __be16 iqid; + u8 tmo_val; + u8 sub_opcode_to_chk_all_io; + u8 r3[4]; + u64 t_cookie; +}; + +#define FW_SCSI_ABRT_CLS_WR_SUB_OPCODE(x) ((x) << 2) +#define FW_SCSI_ABRT_CLS_WR_SUB_OPCODE_GET(x) (((x) >> 2) & 0x3f) +#define FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO(x) ((x) << 0) + +enum fw_cmd_stor_opcodes { + FW_FCOE_RES_INFO_CMD = 0x31, + FW_FCOE_LINK_CMD = 0x32, + FW_FCOE_VNP_CMD = 0x33, + FW_FCOE_SPARAMS_CMD = 0x35, + FW_FCOE_STATS_CMD = 0x37, + FW_FCOE_FCF_CMD = 0x38, +}; + +struct fw_fcoe_res_info_cmd { + __be32 op_to_read; + __be32 retval_len16; + __be16 e_d_tov; + __be16 r_a_tov_seq; + __be16 r_a_tov_els; + __be16 r_r_tov; + __be32 max_xchgs; + __be32 max_ssns; + __be32 used_xchgs; + __be32 used_ssns; + __be32 max_fcfs; + __be32 max_vnps; + __be32 used_fcfs; + __be32 used_vnps; +}; + +struct fw_fcoe_link_cmd { + __be32 op_to_portid; + __be32 retval_len16; + __be32 sub_opcode_fcfi; + u8 r3; + u8 lstatus; + __be16 flags; + u8 r4; + u8 set_vlan; + __be16 vlan_id; + __be32 vnpi_pkd; + __be16 r6; + u8 phy_mac[6]; + u8 vnport_wwnn[8]; + u8 vnport_wwpn[8]; +}; + +#define FW_FCOE_LINK_CMD_PORTID(x) ((x) << 0) +#define FW_FCOE_LINK_CMD_PORTID_GET(x) (((x) >> 0) & 0xf) +#define FW_FCOE_LINK_CMD_SUB_OPCODE(x) ((x) << 24U) +#define FW_FCOE_LINK_CMD_FCFI(x) ((x) << 0) +#define FW_FCOE_LINK_CMD_FCFI_GET(x) (((x) >> 0) & 0xffffff) +#define FW_FCOE_LINK_CMD_VNPI_GET(x) (((x) >> 0) & 0xfffff) + +struct fw_fcoe_vnp_cmd { + __be32 op_to_fcfi; + __be32 alloc_to_len16; + __be32 gen_wwn_to_vnpi; + __be32 vf_id; + __be16 iqid; + u8 vnport_mac[6]; + u8 vnport_wwnn[8]; + u8 vnport_wwpn[8]; + u8 cmn_srv_parms[16]; + u8 clsp_word_0_1[8]; +}; + +#define FW_FCOE_VNP_CMD_FCFI(x) ((x) << 0) +#define FW_FCOE_VNP_CMD_ALLOC (1U << 31) +#define FW_FCOE_VNP_CMD_FREE (1U << 30) +#define FW_FCOE_VNP_CMD_MODIFY (1U << 29) +#define FW_FCOE_VNP_CMD_GEN_WWN (1U << 22) +#define FW_FCOE_VNP_CMD_VFID_EN (1U << 20) +#define FW_FCOE_VNP_CMD_VNPI(x) ((x) << 0) +#define FW_FCOE_VNP_CMD_VNPI_GET(x) (((x) >> 0) & 0xfffff) + +struct fw_fcoe_sparams_cmd { + __be32 op_to_portid; + __be32 retval_len16; + u8 r3[7]; + u8 cos; + u8 lport_wwnn[8]; + u8 lport_wwpn[8]; + u8 cmn_srv_parms[16]; + u8 cls_srv_parms[16]; +}; + +#define FW_FCOE_SPARAMS_CMD_PORTID(x) ((x) << 0) + +struct fw_fcoe_stats_cmd { + __be32 op_to_flowid; + __be32 free_to_len16; + union fw_fcoe_stats { + struct fw_fcoe_stats_ctl { + u8 nstats_port; + u8 port_valid_ix; + __be16 r6; + __be32 r7; + __be64 stat0; + __be64 stat1; + __be64 stat2; + __be64 stat3; + __be64 stat4; + __be64 stat5; + } ctl; + struct fw_fcoe_port_stats { + __be64 tx_bcast_bytes; + __be64 tx_bcast_frames; + __be64 tx_mcast_bytes; + __be64 tx_mcast_frames; + __be64 tx_ucast_bytes; + __be64 tx_ucast_frames; + __be64 tx_drop_frames; + __be64 tx_offload_bytes; + __be64 tx_offload_frames; + __be64 rx_bcast_bytes; + __be64 rx_bcast_frames; + __be64 rx_mcast_bytes; + __be64 rx_mcast_frames; + __be64 rx_ucast_bytes; + __be64 rx_ucast_frames; + __be64 rx_err_frames; + } port_stats; + struct fw_fcoe_fcf_stats { + __be32 fip_tx_bytes; + __be32 fip_tx_fr; + __be64 fcf_ka; + __be64 mcast_adv_rcvd; + __be16 ucast_adv_rcvd; + __be16 sol_sent; + __be16 vlan_req; + __be16 vlan_rpl; + __be16 clr_vlink; + __be16 link_down; + __be16 link_up; + __be16 logo; + __be16 flogi_req; + __be16 flogi_rpl; + __be16 fdisc_req; + __be16 fdisc_rpl; + __be16 fka_prd_chg; + __be16 fc_map_chg; + __be16 vfid_chg; + u8 no_fka_req; + u8 no_vnp; + } fcf_stats; + struct fw_fcoe_pcb_stats { + __be64 tx_bytes; + __be64 tx_frames; + __be64 rx_bytes; + __be64 rx_frames; + __be32 vnp_ka; + __be32 unsol_els_rcvd; + __be64 unsol_cmd_rcvd; + __be16 implicit_logo; + __be16 flogi_inv_sparm; + __be16 fdisc_inv_sparm; + __be16 flogi_rjt; + __be16 fdisc_rjt; + __be16 no_ssn; + __be16 mac_flt_fail; + __be16 inv_fr_rcvd; + } pcb_stats; + struct fw_fcoe_scb_stats { + __be64 tx_bytes; + __be64 tx_frames; + __be64 rx_bytes; + __be64 rx_frames; + __be32 host_abrt_req; + __be32 adap_auto_abrt; + __be32 adap_abrt_rsp; + __be32 host_ios_req; + __be16 ssn_offl_ios; + __be16 ssn_not_rdy_ios; + u8 rx_data_ddp_err; + u8 ddp_flt_set_err; + __be16 rx_data_fr_err; + u8 bad_st_abrt_req; + u8 no_io_abrt_req; + u8 abort_tmo; + u8 abort_tmo_2; + __be32 abort_req; + u8 no_ppod_res_tmo; + u8 bp_tmo; + u8 adap_auto_cls; + u8 no_io_cls_req; + __be32 host_cls_req; + __be64 unsol_cmd_rcvd; + __be32 plogi_req_rcvd; + __be32 prli_req_rcvd; + __be16 logo_req_rcvd; + __be16 prlo_req_rcvd; + __be16 plogi_rjt_rcvd; + __be16 prli_rjt_rcvd; + __be32 adisc_req_rcvd; + __be32 rscn_rcvd; + __be32 rrq_req_rcvd; + __be32 unsol_els_rcvd; + u8 adisc_rjt_rcvd; + u8 scr_rjt; + u8 ct_rjt; + u8 inval_bls_rcvd; + __be32 ba_rjt_rcvd; + } scb_stats; + } u; +}; + +#define FW_FCOE_STATS_CMD_FLOWID(x) ((x) << 0) +#define FW_FCOE_STATS_CMD_FREE (1U << 30) +#define FW_FCOE_STATS_CMD_NSTATS(x) ((x) << 4) +#define FW_FCOE_STATS_CMD_PORT(x) ((x) << 0) +#define FW_FCOE_STATS_CMD_PORT_VALID (1U << 7) +#define FW_FCOE_STATS_CMD_IX(x) ((x) << 0) + +struct fw_fcoe_fcf_cmd { + __be32 op_to_fcfi; + __be32 retval_len16; + __be16 priority_pkd; + u8 mac[6]; + u8 name_id[8]; + u8 fabric[8]; + __be16 vf_id; + __be16 max_fcoe_size; + u8 vlan_id; + u8 fc_map[3]; + __be32 fka_adv; + __be32 r6; + u8 r7_hi; + u8 fpma_to_portid; + u8 spma_mac[6]; + __be64 r8; +}; + +#define FW_FCOE_FCF_CMD_FCFI(x) ((x) << 0) +#define FW_FCOE_FCF_CMD_FCFI_GET(x) (((x) >> 0) & 0xfffff) +#define FW_FCOE_FCF_CMD_PRIORITY_GET(x) (((x) >> 0) & 0xff) +#define FW_FCOE_FCF_CMD_FPMA_GET(x) (((x) >> 6) & 0x1) +#define FW_FCOE_FCF_CMD_SPMA_GET(x) (((x) >> 5) & 0x1) +#define FW_FCOE_FCF_CMD_LOGIN_GET(x) (((x) >> 4) & 0x1) +#define FW_FCOE_FCF_CMD_PORTID_GET(x) (((x) >> 0) & 0xf) + +#endif /* _T4FW_API_STOR_H_ */ |