diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:17:46 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:17:46 +0000 |
commit | 7f3a4257159dea8e7ef66d1a539dc6df708b8ed3 (patch) | |
tree | bcc69b5f4609f348fac49e2f59e210b29eaea783 /drivers/scsi/lpfc | |
parent | Adding upstream version 6.9.12. (diff) | |
download | linux-7f3a4257159dea8e7ef66d1a539dc6df708b8ed3.tar.xz linux-7f3a4257159dea8e7ef66d1a539dc6df708b8ed3.zip |
Adding upstream version 6.10.3.upstream/6.10.3
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r-- | drivers/scsi/lpfc/lpfc.h | 62 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_attr.c | 36 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_bsg.c | 3 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_ct.c | 24 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_els.c | 43 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_hbadisc.c | 135 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_hw4.h | 8 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_init.c | 119 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_nportdisc.c | 63 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_nvme.c | 27 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_nvmet.c | 9 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_scsi.c | 71 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_scsi.h | 32 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli.c | 233 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_version.h | 2 |
15 files changed, 446 insertions, 421 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 98ca7df003..7c147d6ea8 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -393,6 +393,37 @@ enum hba_state { LPFC_HBA_ERROR = -1 }; +enum lpfc_hba_flag { /* hba generic flags */ + HBA_ERATT_HANDLED = 0, /* This flag is set when eratt handled */ + DEFER_ERATT = 1, /* Deferred error attn in progress */ + HBA_FCOE_MODE = 2, /* HBA function in FCoE Mode */ + HBA_SP_QUEUE_EVT = 3, /* Slow-path qevt posted to worker thread*/ + HBA_POST_RECEIVE_BUFFER = 4, /* Rcv buffers need to be posted */ + HBA_PERSISTENT_TOPO = 5, /* Persistent topology support in hba */ + ELS_XRI_ABORT_EVENT = 6, /* ELS_XRI abort event was queued */ + ASYNC_EVENT = 7, + LINK_DISABLED = 8, /* Link disabled by user */ + FCF_TS_INPROG = 9, /* FCF table scan in progress */ + FCF_RR_INPROG = 10, /* FCF roundrobin flogi in progress */ + HBA_FIP_SUPPORT = 11, /* FIP support in HBA */ + HBA_DEVLOSS_TMO = 13, /* HBA in devloss timeout */ + HBA_RRQ_ACTIVE = 14, /* process the rrq active list */ + HBA_IOQ_FLUSH = 15, /* I/O queues being flushed */ + HBA_RECOVERABLE_UE = 17, /* FW supports recoverable UE */ + HBA_FORCED_LINK_SPEED = 18, /* + * Firmware supports Forced Link + * Speed capability + */ + HBA_FLOGI_ISSUED = 20, /* FLOGI was issued */ + HBA_DEFER_FLOGI = 23, /* Defer FLOGI till read_sparm cmpl */ + HBA_SETUP = 24, /* HBA setup completed */ + HBA_NEEDS_CFG_PORT = 25, /* SLI3: CONFIG_PORT mbox needed */ + HBA_HBEAT_INP = 26, /* mbox HBEAT is in progress */ + HBA_HBEAT_TMO = 27, /* HBEAT initiated after timeout */ + HBA_FLOGI_OUTSTANDING = 28, /* FLOGI is outstanding */ + HBA_RHBA_CMPL = 29, /* RHBA FDMI cmd is successful */ +}; + struct lpfc_trunk_link_state { enum hba_state state; uint8_t fault; @@ -1007,35 +1038,7 @@ struct lpfc_hba { #define LS_CT_VEN_RPA 0x20 /* Vendor RPA sent to switch */ #define LS_EXTERNAL_LOOPBACK 0x40 /* External loopback plug inserted */ - uint32_t hba_flag; /* hba generic flags */ -#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ -#define DEFER_ERATT 0x2 /* Deferred error attention in progress */ -#define HBA_FCOE_MODE 0x4 /* HBA function in FCoE Mode */ -#define HBA_SP_QUEUE_EVT 0x8 /* Slow-path qevt posted to worker thread*/ -#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */ -#define HBA_PERSISTENT_TOPO 0x20 /* Persistent topology support in hba */ -#define ELS_XRI_ABORT_EVENT 0x40 /* ELS_XRI abort event was queued */ -#define ASYNC_EVENT 0x80 -#define LINK_DISABLED 0x100 /* Link disabled by user */ -#define FCF_TS_INPROG 0x200 /* FCF table scan in progress */ -#define FCF_RR_INPROG 0x400 /* FCF roundrobin flogi in progress */ -#define HBA_FIP_SUPPORT 0x800 /* FIP support in HBA */ -#define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */ -#define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */ -#define HBA_IOQ_FLUSH 0x8000 /* FCP/NVME I/O queues being flushed */ -#define HBA_RECOVERABLE_UE 0x20000 /* Firmware supports recoverable UE */ -#define HBA_FORCED_LINK_SPEED 0x40000 /* - * Firmware supports Forced Link Speed - * capability - */ -#define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */ -#define HBA_DEFER_FLOGI 0x800000 /* Defer FLOGI till read_sparm cmpl */ -#define HBA_SETUP 0x1000000 /* Signifies HBA setup is completed */ -#define HBA_NEEDS_CFG_PORT 0x2000000 /* SLI3 - needs a CONFIG_PORT mbox */ -#define HBA_HBEAT_INP 0x4000000 /* mbox HBEAT is in progress */ -#define HBA_HBEAT_TMO 0x8000000 /* HBEAT initiated after timeout */ -#define HBA_FLOGI_OUTSTANDING 0x10000000 /* FLOGI is outstanding */ -#define HBA_RHBA_CMPL 0x20000000 /* RHBA FDMI command is successful */ + unsigned long hba_flag; /* hba generic flags */ struct completion *fw_dump_cmpl; /* cmpl event tracker for fw_dump */ uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ @@ -1284,6 +1287,7 @@ struct lpfc_hba { uint32_t total_scsi_bufs; struct list_head lpfc_iocb_list; uint32_t total_iocbq_bufs; + spinlock_t rrq_list_lock; /* lock for active_rrq_list */ struct list_head active_rrq_list; spinlock_t hbalock; struct work_struct unblock_request_work; /* SCSI layer unblock IOs */ diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 3c534b3cfe..0a9d6978cb 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -322,7 +322,7 @@ lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr, struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; - if (phba->hba_flag & HBA_FIP_SUPPORT) + if (test_bit(HBA_FIP_SUPPORT, &phba->hba_flag)) return scnprintf(buf, PAGE_SIZE, "1\n"); else return scnprintf(buf, PAGE_SIZE, "0\n"); @@ -1049,7 +1049,7 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr, case LPFC_INIT_MBX_CMDS: case LPFC_LINK_DOWN: case LPFC_HBA_ERROR: - if (phba->hba_flag & LINK_DISABLED) + if (test_bit(LINK_DISABLED, &phba->hba_flag)) len += scnprintf(buf + len, PAGE_SIZE-len, "Link Down - User disabled\n"); else @@ -1292,7 +1292,7 @@ lpfc_issue_lip(struct Scsi_Host *shost) * it doesn't make any sense to allow issue_lip */ if (test_bit(FC_OFFLINE_MODE, &vport->fc_flag) || - (phba->hba_flag & LINK_DISABLED) || + test_bit(LINK_DISABLED, &phba->hba_flag) || (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)) return -EPERM; @@ -1907,6 +1907,11 @@ lpfc_xcvr_data_show(struct device *dev, struct device_attribute *attr, /* Get transceiver information */ rdp_context = kmalloc(sizeof(*rdp_context), GFP_KERNEL); + if (!rdp_context) { + len = scnprintf(buf, PAGE_SIZE - len, + "SPF info NA: alloc failure\n"); + return len; + } rc = lpfc_get_sfp_info_wait(phba, rdp_context); if (rc) { @@ -3635,7 +3640,8 @@ lpfc_pt_show(struct device *dev, struct device_attribute *attr, char *buf) struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; return scnprintf(buf, PAGE_SIZE, "%d\n", - (phba->hba_flag & HBA_PERSISTENT_TOPO) ? 1 : 0); + test_bit(HBA_PERSISTENT_TOPO, + &phba->hba_flag) ? 1 : 0); } static DEVICE_ATTR(pt, 0444, lpfc_pt_show, NULL); @@ -4205,8 +4211,8 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr, &phba->sli4_hba.sli_intf); if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); - if ((phba->hba_flag & HBA_PERSISTENT_TOPO || - (!phba->sli4_hba.pc_sli4_params.pls && + if ((test_bit(HBA_PERSISTENT_TOPO, &phba->hba_flag) || + (!phba->sli4_hba.pc_sli4_params.pls && (sli_family == LPFC_SLI_INTF_FAMILY_G6 || if_type == LPFC_SLI_INTF_IF_TYPE_6))) && val == 4) { @@ -4309,7 +4315,7 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr, if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); if (if_type >= LPFC_SLI_INTF_IF_TYPE_2 && - phba->hba_flag & HBA_FORCED_LINK_SPEED) + test_bit(HBA_FORCED_LINK_SPEED, &phba->hba_flag)) return -EPERM; if (!strncmp(buf, "nolip ", strlen("nolip "))) { @@ -6497,7 +6503,8 @@ lpfc_get_host_speed(struct Scsi_Host *shost) struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; - if ((lpfc_is_link_up(phba)) && (!(phba->hba_flag & HBA_FCOE_MODE))) { + if ((lpfc_is_link_up(phba)) && + !test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { switch(phba->fc_linkspeed) { case LPFC_LINK_SPEED_1GHZ: fc_host_speed(shost) = FC_PORTSPEED_1GBIT; @@ -6533,7 +6540,8 @@ lpfc_get_host_speed(struct Scsi_Host *shost) fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; break; } - } else if (lpfc_is_link_up(phba) && (phba->hba_flag & HBA_FCOE_MODE)) { + } else if (lpfc_is_link_up(phba) && + test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { switch (phba->fc_linkspeed) { case LPFC_ASYNC_LINK_SPEED_1GBPS: fc_host_speed(shost) = FC_PORTSPEED_1GBIT; @@ -6718,7 +6726,7 @@ lpfc_get_stats(struct Scsi_Host *shost) hs->invalid_crc_count -= lso->invalid_crc_count; hs->error_frames -= lso->error_frames; - if (phba->hba_flag & HBA_FCOE_MODE) { + if (test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { hs->lip_count = -1; hs->nos_count = (phba->link_events >> 1); hs->nos_count -= lso->link_events; @@ -6816,7 +6824,7 @@ lpfc_reset_stats(struct Scsi_Host *shost) lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord; lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt; lso->error_frames = pmb->un.varRdLnk.crcCnt; - if (phba->hba_flag & HBA_FCOE_MODE) + if (test_bit(HBA_FCOE_MODE, &phba->hba_flag)) lso->link_events = (phba->link_events >> 1); else lso->link_events = (phba->fc_eventTag >> 1); @@ -7161,11 +7169,11 @@ lpfc_get_hba_function_mode(struct lpfc_hba *phba) case PCI_DEVICE_ID_ZEPHYR_DCSP: case PCI_DEVICE_ID_TIGERSHARK: case PCI_DEVICE_ID_TOMCAT: - phba->hba_flag |= HBA_FCOE_MODE; + set_bit(HBA_FCOE_MODE, &phba->hba_flag); break; default: /* for others, clear the flag */ - phba->hba_flag &= ~HBA_FCOE_MODE; + clear_bit(HBA_FCOE_MODE, &phba->hba_flag); } } @@ -7236,7 +7244,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_get_hba_function_mode(phba); /* BlockGuard allowed for FC only. */ - if (phba->cfg_enable_bg && phba->hba_flag & HBA_FCOE_MODE) { + if (phba->cfg_enable_bg && test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0581 BlockGuard feature not supported\n"); /* If set, clear the BlockGuard support param */ diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 529df1768f..4156419c52 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -5002,7 +5002,8 @@ lpfc_forced_link_speed(struct bsg_job *job) goto job_error; } - forced_reply->supported = (phba->hba_flag & HBA_FORCED_LINK_SPEED) + forced_reply->supported = test_bit(HBA_FORCED_LINK_SPEED, + &phba->hba_flag) ? LPFC_FORCED_LINK_SPEED_SUPPORTED : LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED; job_error: diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 8cc08e58dc..376d0f958b 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -291,9 +291,9 @@ lpfc_ct_handle_mibreq(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocbq) did = bf_get(els_rsp64_sid, &ctiocbq->wqe.xmit_els_rsp); if (ulp_status) { - lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "6438 Unsol CT: status:x%x/x%x did : x%x\n", - ulp_status, ulp_word4, did); + lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, + "6438 Unsol CT: status:x%x/x%x did : x%x\n", + ulp_status, ulp_word4, did); return; } @@ -303,17 +303,17 @@ lpfc_ct_handle_mibreq(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocbq) ndlp = lpfc_findnode_did(vport, did); if (!ndlp) { - lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "6439 Unsol CT: NDLP Not Found for DID : x%x", - did); + lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, + "6439 Unsol CT: NDLP Not Found for DID : x%x", + did); return; } ct_req = (struct lpfc_sli_ct_request *)ctiocbq->cmd_dmabuf->virt; mi_cmd = be16_to_cpu(ct_req->CommandResponse.bits.CmdRsp); - lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "6442 : MI Cmd : x%x Not Supported\n", mi_cmd); + lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, + "6442 MI Cmd : x%x Not Supported\n", mi_cmd); lpfc_ct_reject_event(ndlp, ct_req, bf_get(wqe_ctxt_tag, &ctiocbq->wqe.xmit_els_rsp.wqe_com), @@ -2173,7 +2173,7 @@ lpfc_fdmi_rprt_defer(struct lpfc_hba *phba, uint32_t mask) struct lpfc_nodelist *ndlp; int i; - phba->hba_flag |= HBA_RHBA_CMPL; + set_bit(HBA_RHBA_CMPL, &phba->hba_flag); vports = lpfc_create_vport_work_array(phba); if (vports) { for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { @@ -2368,7 +2368,7 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * for the physical port completes successfully. * We may have to defer the RPRT accordingly. */ - if (phba->hba_flag & HBA_RHBA_CMPL) { + if (test_bit(HBA_RHBA_CMPL, &phba->hba_flag)) { lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, 0); } else { lpfc_printf_vlog(vport, KERN_INFO, @@ -2785,7 +2785,7 @@ lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport, void *attr) u32 tcfg; u8 i, cnt; - if (!(phba->hba_flag & HBA_FCOE_MODE)) { + if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { cnt = 0; if (phba->sli_rev == LPFC_SLI_REV4) { tcfg = phba->sli4_hba.conf_trunk; @@ -2859,7 +2859,7 @@ lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport, void *attr) struct lpfc_hba *phba = vport->phba; u32 speeds = 0; - if (!(phba->hba_flag & HBA_FCOE_MODE)) { + if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { switch (phba->fc_linkspeed) { case LPFC_LINK_SPEED_1GHZ: speeds = HBA_PORTSPEED_1GFC; diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index f7c28dc73b..c32bc773ab 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -189,11 +189,11 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, u8 expect_rsp, * If this command is for fabric controller and HBA running * in FIP mode send FLOGI, FDISC and LOGO as FIP frames. */ - if ((did == Fabric_DID) && - (phba->hba_flag & HBA_FIP_SUPPORT) && - ((elscmd == ELS_CMD_FLOGI) || - (elscmd == ELS_CMD_FDISC) || - (elscmd == ELS_CMD_LOGO))) + if (did == Fabric_DID && + test_bit(HBA_FIP_SUPPORT, &phba->hba_flag) && + (elscmd == ELS_CMD_FLOGI || + elscmd == ELS_CMD_FDISC || + elscmd == ELS_CMD_LOGO)) switch (elscmd) { case ELS_CMD_FLOGI: elsiocb->cmd_flag |= @@ -965,7 +965,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * In case of FIP mode, perform roundrobin FCF failover * due to new FCF discovery */ - if ((phba->hba_flag & HBA_FIP_SUPPORT) && + if (test_bit(HBA_FIP_SUPPORT, &phba->hba_flag) && (phba->fcf.fcf_flag & FCF_DISCOVERY)) { if (phba->link_state < LPFC_LINK_UP) goto stop_rr_fcf_flogi; @@ -999,7 +999,7 @@ stop_rr_fcf_flogi: IOERR_LOOP_OPEN_FAILURE))) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "2858 FLOGI failure Status:x%x/x%x TMO" - ":x%x Data x%x x%x\n", + ":x%x Data x%lx x%x\n", ulp_status, ulp_word4, tmo, phba->hba_flag, phba->fcf.fcf_flag); @@ -1119,7 +1119,7 @@ stop_rr_fcf_flogi: if (sp->cmn.fPort) rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, ulp_word4); - else if (!(phba->hba_flag & HBA_FCOE_MODE)) + else if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); else { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, @@ -1149,14 +1149,15 @@ stop_rr_fcf_flogi: lpfc_nlp_put(ndlp); spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_DISCOVERY; - phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); spin_unlock_irq(&phba->hbalock); + clear_bit(FCF_RR_INPROG, &phba->hba_flag); + clear_bit(HBA_DEVLOSS_TMO, &phba->hba_flag); phba->fcf.fcf_redisc_attempted = 0; /* reset */ goto out; } if (!rc) { /* Mark the FCF discovery process done */ - if (phba->hba_flag & HBA_FIP_SUPPORT) + if (test_bit(HBA_FIP_SUPPORT, &phba->hba_flag)) lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | LOG_ELS, "2769 FLOGI to FCF (x%x) " @@ -1164,8 +1165,9 @@ stop_rr_fcf_flogi: phba->fcf.current_rec.fcf_indx); spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_DISCOVERY; - phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); spin_unlock_irq(&phba->hbalock); + clear_bit(FCF_RR_INPROG, &phba->hba_flag); + clear_bit(HBA_DEVLOSS_TMO, &phba->hba_flag); phba->fcf.fcf_redisc_attempted = 0; /* reset */ goto out; } @@ -1202,7 +1204,7 @@ flogifail: } out: if (!flogi_in_retry) - phba->hba_flag &= ~HBA_FLOGI_OUTSTANDING; + clear_bit(HBA_FLOGI_OUTSTANDING, &phba->hba_flag); lpfc_els_free_iocb(phba, cmdiocb); lpfc_nlp_put(ndlp); @@ -1372,11 +1374,13 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } /* Avoid race with FLOGI completion and hba_flags. */ - phba->hba_flag |= (HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING); + set_bit(HBA_FLOGI_ISSUED, &phba->hba_flag); + set_bit(HBA_FLOGI_OUTSTANDING, &phba->hba_flag); rc = lpfc_issue_fabric_iocb(phba, elsiocb); if (rc == IOCB_ERROR) { - phba->hba_flag &= ~(HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING); + clear_bit(HBA_FLOGI_ISSUED, &phba->hba_flag); + clear_bit(HBA_FLOGI_OUTSTANDING, &phba->hba_flag); lpfc_els_free_iocb(phba, elsiocb); lpfc_nlp_put(ndlp); return 1; @@ -1413,7 +1417,7 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "3354 Xmit deferred FLOGI ACC: rx_id: x%x," - " ox_id: x%x, hba_flag x%x\n", + " ox_id: x%x, hba_flag x%lx\n", phba->defer_flogi_acc_rx_id, phba->defer_flogi_acc_ox_id, phba->hba_flag); @@ -7415,7 +7419,8 @@ lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, goto error; } - if (phba->sli_rev < LPFC_SLI_REV4 || (phba->hba_flag & HBA_FCOE_MODE)) { + if (phba->sli_rev < LPFC_SLI_REV4 || + test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { rjt_err = LSRJT_UNABLE_TPC; rjt_expl = LSEXP_REQ_UNSUPPORTED; goto error; @@ -7738,7 +7743,7 @@ lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, } if (phba->sli_rev < LPFC_SLI_REV4 || - phba->hba_flag & HBA_FCOE_MODE || + test_bit(HBA_FCOE_MODE, &phba->hba_flag) || (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < LPFC_SLI_INTF_IF_TYPE_2)) { rjt_err = LSRJT_CMD_UNSUPPORTED; @@ -8443,7 +8448,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); /* Defer ACC response until AFTER we issue a FLOGI */ - if (!(phba->hba_flag & HBA_FLOGI_ISSUED)) { + if (!test_bit(HBA_FLOGI_ISSUED, &phba->hba_flag)) { phba->defer_flogi_acc_rx_id = bf_get(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com); phba->defer_flogi_acc_ox_id = bf_get(wqe_rcvoxid, @@ -8453,7 +8458,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "3344 Deferring FLOGI ACC: rx_id: x%x," - " ox_id: x%x, hba_flag x%x\n", + " ox_id: x%x, hba_flag x%lx\n", phba->defer_flogi_acc_rx_id, phba->defer_flogi_acc_ox_id, phba->hba_flag); diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index e42fa9c822..13b08c8544 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -487,7 +487,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) recovering = true; } else { /* Physical port path. */ - if (phba->hba_flag & HBA_FLOGI_OUTSTANDING) + if (test_bit(HBA_FLOGI_OUTSTANDING, + &phba->hba_flag)) recovering = true; } break; @@ -652,14 +653,15 @@ lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse, if (!fcf_inuse) return; - if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) { + if (test_bit(HBA_FIP_SUPPORT, &phba->hba_flag) && + !lpfc_fcf_inuse(phba)) { spin_lock_irq(&phba->hbalock); if (phba->fcf.fcf_flag & FCF_DISCOVERY) { - if (phba->hba_flag & HBA_DEVLOSS_TMO) { + if (test_and_set_bit(HBA_DEVLOSS_TMO, + &phba->hba_flag)) { spin_unlock_irq(&phba->hbalock); return; } - phba->hba_flag |= HBA_DEVLOSS_TMO; lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2847 Last remote node (x%x) using " "FCF devloss tmo\n", nlp_did); @@ -671,8 +673,9 @@ lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse, "in progress\n"); return; } - if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) { - spin_unlock_irq(&phba->hbalock); + spin_unlock_irq(&phba->hbalock); + if (!test_bit(FCF_TS_INPROG, &phba->hba_flag) && + !test_bit(FCF_RR_INPROG, &phba->hba_flag)) { lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2869 Devloss tmo to idle FIP engine, " "unreg in-use FCF and rescan.\n"); @@ -680,11 +683,10 @@ lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse, lpfc_unregister_fcf_rescan(phba); return; } - spin_unlock_irq(&phba->hbalock); - if (phba->hba_flag & FCF_TS_INPROG) + if (test_bit(FCF_TS_INPROG, &phba->hba_flag)) lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2870 FCF table scan in progress\n"); - if (phba->hba_flag & FCF_RR_INPROG) + if (test_bit(FCF_RR_INPROG, &phba->hba_flag)) lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2871 FLOGI roundrobin FCF failover " "in progress\n"); @@ -978,18 +980,15 @@ lpfc_work_done(struct lpfc_hba *phba) /* Process SLI4 events */ if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) { - if (phba->hba_flag & HBA_RRQ_ACTIVE) + if (test_bit(HBA_RRQ_ACTIVE, &phba->hba_flag)) lpfc_handle_rrq_active(phba); - if (phba->hba_flag & ELS_XRI_ABORT_EVENT) + if (test_bit(ELS_XRI_ABORT_EVENT, &phba->hba_flag)) lpfc_sli4_els_xri_abort_event_proc(phba); - if (phba->hba_flag & ASYNC_EVENT) + if (test_bit(ASYNC_EVENT, &phba->hba_flag)) lpfc_sli4_async_event_proc(phba); - if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) { - spin_lock_irq(&phba->hbalock); - phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER; - spin_unlock_irq(&phba->hbalock); + if (test_and_clear_bit(HBA_POST_RECEIVE_BUFFER, + &phba->hba_flag)) lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); - } if (phba->fcf.fcf_flag & FCF_REDISC_EVT) lpfc_sli4_fcf_redisc_event_proc(phba); } @@ -1035,11 +1034,11 @@ lpfc_work_done(struct lpfc_hba *phba) status >>= (4*LPFC_ELS_RING); if (pring && (status & HA_RXMASK || pring->flag & LPFC_DEFERRED_RING_EVENT || - phba->hba_flag & HBA_SP_QUEUE_EVT)) { + test_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag))) { if (pring->flag & LPFC_STOP_IOCB_EVENT) { pring->flag |= LPFC_DEFERRED_RING_EVENT; /* Preserve legacy behavior. */ - if (!(phba->hba_flag & HBA_SP_QUEUE_EVT)) + if (!test_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag)) set_bit(LPFC_DATA_READY, &phba->data_flags); } else { /* Driver could have abort request completed in queue @@ -1420,7 +1419,8 @@ lpfc_linkup(struct lpfc_hba *phba) spin_unlock_irq(shost->host_lock); /* reinitialize initial HBA flag */ - phba->hba_flag &= ~(HBA_FLOGI_ISSUED | HBA_RHBA_CMPL); + clear_bit(HBA_FLOGI_ISSUED, &phba->hba_flag); + clear_bit(HBA_RHBA_CMPL, &phba->hba_flag); return 0; } @@ -1505,7 +1505,7 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) /* don't perform discovery for SLI4 loopback diagnostic test */ if ((phba->sli_rev == LPFC_SLI_REV4) && - !(phba->hba_flag & HBA_FCOE_MODE) && + !test_bit(HBA_FCOE_MODE, &phba->hba_flag) && (phba->link_flag & LS_LOOPBACK_MODE)) return; @@ -1548,7 +1548,7 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) goto sparam_out; } - phba->hba_flag |= HBA_DEFER_FLOGI; + set_bit(HBA_DEFER_FLOGI, &phba->hba_flag); } else { lpfc_initial_flogi(vport); } @@ -1617,27 +1617,23 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) spin_unlock_irq(&phba->hbalock); /* If there is a pending FCoE event, restart FCF table scan. */ - if ((!(phba->hba_flag & FCF_RR_INPROG)) && - lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF)) + if (!test_bit(FCF_RR_INPROG, &phba->hba_flag) && + lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF)) goto fail_out; /* Mark successful completion of FCF table scan */ spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); - phba->hba_flag &= ~FCF_TS_INPROG; + spin_unlock_irq(&phba->hbalock); + clear_bit(FCF_TS_INPROG, &phba->hba_flag); if (vport->port_state != LPFC_FLOGI) { - phba->hba_flag |= FCF_RR_INPROG; - spin_unlock_irq(&phba->hbalock); + set_bit(FCF_RR_INPROG, &phba->hba_flag); lpfc_issue_init_vfi(vport); - goto out; } - spin_unlock_irq(&phba->hbalock); goto out; fail_out: - spin_lock_irq(&phba->hbalock); - phba->hba_flag &= ~FCF_RR_INPROG; - spin_unlock_irq(&phba->hbalock); + clear_bit(FCF_RR_INPROG, &phba->hba_flag); out: mempool_free(mboxq, phba->mbox_mem_pool); } @@ -1867,32 +1863,31 @@ lpfc_register_fcf(struct lpfc_hba *phba) spin_lock_irq(&phba->hbalock); /* If the FCF is not available do nothing. */ if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { - phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); spin_unlock_irq(&phba->hbalock); + clear_bit(FCF_TS_INPROG, &phba->hba_flag); + clear_bit(FCF_RR_INPROG, &phba->hba_flag); return; } /* The FCF is already registered, start discovery */ if (phba->fcf.fcf_flag & FCF_REGISTERED) { phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); - phba->hba_flag &= ~FCF_TS_INPROG; + spin_unlock_irq(&phba->hbalock); + clear_bit(FCF_TS_INPROG, &phba->hba_flag); if (phba->pport->port_state != LPFC_FLOGI && test_bit(FC_FABRIC, &phba->pport->fc_flag)) { - phba->hba_flag |= FCF_RR_INPROG; - spin_unlock_irq(&phba->hbalock); + set_bit(FCF_RR_INPROG, &phba->hba_flag); lpfc_initial_flogi(phba->pport); return; } - spin_unlock_irq(&phba->hbalock); return; } spin_unlock_irq(&phba->hbalock); fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!fcf_mbxq) { - spin_lock_irq(&phba->hbalock); - phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); - spin_unlock_irq(&phba->hbalock); + clear_bit(FCF_TS_INPROG, &phba->hba_flag); + clear_bit(FCF_RR_INPROG, &phba->hba_flag); return; } @@ -1901,9 +1896,8 @@ lpfc_register_fcf(struct lpfc_hba *phba) fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi; rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { - spin_lock_irq(&phba->hbalock); - phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); - spin_unlock_irq(&phba->hbalock); + clear_bit(FCF_TS_INPROG, &phba->hba_flag); + clear_bit(FCF_RR_INPROG, &phba->hba_flag); mempool_free(fcf_mbxq, phba->mbox_mem_pool); } @@ -1956,7 +1950,7 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba, bf_get(lpfc_fcf_record_fcf_sol, new_fcf_record)) return 0; - if (!(phba->hba_flag & HBA_FIP_SUPPORT)) { + if (!test_bit(HBA_FIP_SUPPORT, &phba->hba_flag)) { *boot_flag = 0; *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record); @@ -2151,8 +2145,9 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, "2833 Stop FCF discovery process due to link " "state change (x%x)\n", phba->link_state); + clear_bit(FCF_TS_INPROG, &phba->hba_flag); + clear_bit(FCF_RR_INPROG, &phba->hba_flag); spin_lock_irq(&phba->hbalock); - phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY); spin_unlock_irq(&phba->hbalock); } @@ -2380,9 +2375,7 @@ int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index) int rc; if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) { - spin_lock_irq(&phba->hbalock); - if (phba->hba_flag & HBA_DEVLOSS_TMO) { - spin_unlock_irq(&phba->hbalock); + if (test_bit(HBA_DEVLOSS_TMO, &phba->hba_flag)) { lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2872 Devloss tmo with no eligible " "FCF, unregister in-use FCF (x%x) " @@ -2392,8 +2385,9 @@ int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index) goto stop_flogi_current_fcf; } /* Mark the end to FLOGI roundrobin failover */ - phba->hba_flag &= ~FCF_RR_INPROG; + clear_bit(FCF_RR_INPROG, &phba->hba_flag); /* Allow action to new fcf asynchronous event */ + spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_INFO, LOG_FIP, @@ -2630,9 +2624,7 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) "2765 Mailbox command READ_FCF_RECORD " "failed to retrieve a FCF record.\n"); /* Let next new FCF event trigger fast failover */ - spin_lock_irq(&phba->hbalock); - phba->hba_flag &= ~FCF_TS_INPROG; - spin_unlock_irq(&phba->hbalock); + clear_bit(FCF_TS_INPROG, &phba->hba_flag); lpfc_sli4_mbox_cmd_free(phba, mboxq); return; } @@ -2873,10 +2865,10 @@ read_next_fcf: phba->fcoe_eventtag_at_fcf_scan, bf_get(lpfc_fcf_record_fcf_index, new_fcf_record)); - spin_lock_irq(&phba->hbalock); - if (phba->hba_flag & HBA_DEVLOSS_TMO) { - phba->hba_flag &= ~FCF_TS_INPROG; - spin_unlock_irq(&phba->hbalock); + if (test_bit(HBA_DEVLOSS_TMO, + &phba->hba_flag)) { + clear_bit(FCF_TS_INPROG, + &phba->hba_flag); /* Unregister in-use FCF and rescan */ lpfc_printf_log(phba, KERN_INFO, LOG_FIP, @@ -2889,8 +2881,7 @@ read_next_fcf: /* * Let next new FCF event trigger fast failover */ - phba->hba_flag &= ~FCF_TS_INPROG; - spin_unlock_irq(&phba->hbalock); + clear_bit(FCF_TS_INPROG, &phba->hba_flag); return; } /* @@ -2996,8 +2987,8 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) if (phba->link_state < LPFC_LINK_UP) { spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_DISCOVERY; - phba->hba_flag &= ~FCF_RR_INPROG; spin_unlock_irq(&phba->hbalock); + clear_bit(FCF_RR_INPROG, &phba->hba_flag); goto out; } @@ -3008,7 +2999,7 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, "2766 Mailbox command READ_FCF_RECORD " "failed to retrieve a FCF record. " - "hba_flg x%x fcf_flg x%x\n", phba->hba_flag, + "hba_flg x%lx fcf_flg x%x\n", phba->hba_flag, phba->fcf.fcf_flag); lpfc_unregister_fcf_rescan(phba); goto out; @@ -3471,9 +3462,9 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) /* Check if sending the FLOGI is being deferred to after we get * up to date CSPs from MBX_READ_SPARAM. */ - if (phba->hba_flag & HBA_DEFER_FLOGI) { + if (test_bit(HBA_DEFER_FLOGI, &phba->hba_flag)) { lpfc_initial_flogi(vport); - phba->hba_flag &= ~HBA_DEFER_FLOGI; + clear_bit(HBA_DEFER_FLOGI, &phba->hba_flag); } return; @@ -3495,7 +3486,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) spin_lock_irqsave(&phba->hbalock, iflags); phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la); - if (!(phba->hba_flag & HBA_FCOE_MODE)) { + if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { switch (bf_get(lpfc_mbx_read_top_link_spd, la)) { case LPFC_LINK_SPEED_1GHZ: case LPFC_LINK_SPEED_2GHZ: @@ -3611,7 +3602,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) goto out; } - if (!(phba->hba_flag & HBA_FCOE_MODE)) { + if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!cfglink_mbox) goto out; @@ -3631,7 +3622,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) * is phase 1 implementation that support FCF index 0 and driver * defaults. */ - if (!(phba->hba_flag & HBA_FIP_SUPPORT)) { + if (!test_bit(HBA_FIP_SUPPORT, &phba->hba_flag)) { fcf_record = kzalloc(sizeof(struct fcf_record), GFP_KERNEL); if (unlikely(!fcf_record)) { @@ -3661,12 +3652,10 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) * The driver is expected to do FIP/FCF. Call the port * and get the FCF Table. */ - spin_lock_irqsave(&phba->hbalock, iflags); - if (phba->hba_flag & FCF_TS_INPROG) { - spin_unlock_irqrestore(&phba->hbalock, iflags); + if (test_bit(FCF_TS_INPROG, &phba->hba_flag)) return; - } /* This is the initial FCF discovery scan */ + spin_lock_irqsave(&phba->hbalock, iflags); phba->fcf.fcf_flag |= FCF_INIT_DISC; spin_unlock_irqrestore(&phba->hbalock, iflags); lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, @@ -5736,7 +5725,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) return ndlp; if (ndlp->nlp_state > NLP_STE_UNUSED_NODE && - ndlp->nlp_state < NLP_STE_PRLI_ISSUE) { + ndlp->nlp_state <= NLP_STE_PRLI_ISSUE) { lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RECOVERY); } @@ -6997,11 +6986,11 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba) * registered, do nothing. */ spin_lock_irq(&phba->hbalock); - if (!(phba->hba_flag & HBA_FCOE_MODE) || + if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag) || !(phba->fcf.fcf_flag & FCF_REGISTERED) || - !(phba->hba_flag & HBA_FIP_SUPPORT) || + !test_bit(HBA_FIP_SUPPORT, &phba->hba_flag) || (phba->fcf.fcf_flag & FCF_DISCOVERY) || - (phba->pport->port_state == LPFC_FLOGI)) { + phba->pport->port_state == LPFC_FLOGI) { spin_unlock_irq(&phba->hbalock); return; } diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 367e6b066d..500253007b 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -2146,6 +2146,14 @@ struct sli4_sge { /* SLI-4 */ uint32_t sge_len; }; +struct sli4_sge_le { + __le32 addr_hi; + __le32 addr_lo; + + __le32 word2; + __le32 sge_len; +}; + struct sli4_hybrid_sgl { struct list_head list_node; struct sli4_sge *dma_sgl; diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index f7a0aa3625..e1dfa96c2a 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -567,7 +567,7 @@ lpfc_config_port_post(struct lpfc_hba *phba) spin_lock_irq(&phba->hbalock); /* Initialize ERATT handling flag */ - phba->hba_flag &= ~HBA_ERATT_HANDLED; + clear_bit(HBA_ERATT_HANDLED, &phba->hba_flag); /* Enable appropriate host interrupts */ if (lpfc_readl(phba->HCregaddr, &status)) { @@ -599,13 +599,14 @@ lpfc_config_port_post(struct lpfc_hba *phba) /* Set up heart beat (HB) timer */ mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); - phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); + clear_bit(HBA_HBEAT_INP, &phba->hba_flag); + clear_bit(HBA_HBEAT_TMO, &phba->hba_flag); phba->last_completion_time = jiffies; /* Set up error attention (ERATT) polling timer */ mod_timer(&phba->eratt_poll, jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); - if (phba->hba_flag & LINK_DISABLED) { + if (test_bit(LINK_DISABLED, &phba->hba_flag)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2598 Adapter Link is disabled.\n"); lpfc_down_link(phba, pmb); @@ -925,9 +926,7 @@ lpfc_sli4_free_sp_events(struct lpfc_hba *phba) struct hbq_dmabuf *dmabuf; struct lpfc_cq_event *cq_event; - spin_lock_irq(&phba->hbalock); - phba->hba_flag &= ~HBA_SP_QUEUE_EVT; - spin_unlock_irq(&phba->hbalock); + clear_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag); while (!list_empty(&phba->sli4_hba.sp_queue_event)) { /* Get the response iocb from the head of work queue */ @@ -1228,18 +1227,15 @@ static void lpfc_rrq_timeout(struct timer_list *t) { struct lpfc_hba *phba; - unsigned long iflag; phba = from_timer(phba, t, rrq_tmr); - spin_lock_irqsave(&phba->pport->work_port_lock, iflag); - if (!test_bit(FC_UNLOADING, &phba->pport->load_flag)) - phba->hba_flag |= HBA_RRQ_ACTIVE; - else - phba->hba_flag &= ~HBA_RRQ_ACTIVE; - spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); + if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) { + clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag); + return; + } - if (!test_bit(FC_UNLOADING, &phba->pport->load_flag)) - lpfc_worker_wake_up(phba); + set_bit(HBA_RRQ_ACTIVE, &phba->hba_flag); + lpfc_worker_wake_up(phba); } /** @@ -1261,11 +1257,8 @@ lpfc_rrq_timeout(struct timer_list *t) static void lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) { - unsigned long drvr_flag; - - spin_lock_irqsave(&phba->hbalock, drvr_flag); - phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); - spin_unlock_irqrestore(&phba->hbalock, drvr_flag); + clear_bit(HBA_HBEAT_INP, &phba->hba_flag); + clear_bit(HBA_HBEAT_TMO, &phba->hba_flag); /* Check and reset heart-beat timer if necessary */ mempool_free(pmboxq, phba->mbox_mem_pool); @@ -1457,7 +1450,7 @@ lpfc_issue_hb_mbox(struct lpfc_hba *phba) int retval; /* Is a Heartbeat mbox already in progress */ - if (phba->hba_flag & HBA_HBEAT_INP) + if (test_bit(HBA_HBEAT_INP, &phba->hba_flag)) return 0; pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); @@ -1473,7 +1466,7 @@ lpfc_issue_hb_mbox(struct lpfc_hba *phba) mempool_free(pmboxq, phba->mbox_mem_pool); return -ENXIO; } - phba->hba_flag |= HBA_HBEAT_INP; + set_bit(HBA_HBEAT_INP, &phba->hba_flag); return 0; } @@ -1493,7 +1486,7 @@ lpfc_issue_hb_tmo(struct lpfc_hba *phba) { if (phba->cfg_enable_hba_heartbeat) return; - phba->hba_flag |= HBA_HBEAT_TMO; + set_bit(HBA_HBEAT_TMO, &phba->hba_flag); } /** @@ -1565,7 +1558,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), jiffies)) { spin_unlock_irq(&phba->pport->work_port_lock); - if (phba->hba_flag & HBA_HBEAT_INP) + if (test_bit(HBA_HBEAT_INP, &phba->hba_flag)) tmo = (1000 * LPFC_HB_MBOX_TIMEOUT); else tmo = (1000 * LPFC_HB_MBOX_INTERVAL); @@ -1574,7 +1567,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) spin_unlock_irq(&phba->pport->work_port_lock); /* Check if a MBX_HEARTBEAT is already in progress */ - if (phba->hba_flag & HBA_HBEAT_INP) { + if (test_bit(HBA_HBEAT_INP, &phba->hba_flag)) { /* * If heart beat timeout called with HBA_HBEAT_INP set * we need to give the hb mailbox cmd a chance to @@ -1611,7 +1604,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) } } else { /* Check to see if we want to force a MBX_HEARTBEAT */ - if (phba->hba_flag & HBA_HBEAT_TMO) { + if (test_bit(HBA_HBEAT_TMO, &phba->hba_flag)) { retval = lpfc_issue_hb_mbox(phba); if (retval) tmo = (1000 * LPFC_HB_MBOX_INTERVAL); @@ -1699,9 +1692,7 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba) * since we cannot communicate with the pci card anyway. */ if (pci_channel_offline(phba->pcidev)) { - spin_lock_irq(&phba->hbalock); - phba->hba_flag &= ~DEFER_ERATT; - spin_unlock_irq(&phba->hbalock); + clear_bit(DEFER_ERATT, &phba->hba_flag); return; } @@ -1752,9 +1743,7 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba) if (!phba->work_hs && !test_bit(FC_UNLOADING, &phba->pport->load_flag)) phba->work_hs = old_host_status & ~HS_FFER1; - spin_lock_irq(&phba->hbalock); - phba->hba_flag &= ~DEFER_ERATT; - spin_unlock_irq(&phba->hbalock); + clear_bit(DEFER_ERATT, &phba->hba_flag); phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); phba->work_status[1] = readl(phba->MBslimaddr + 0xac); } @@ -1798,9 +1787,7 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba) * since we cannot communicate with the pci card anyway. */ if (pci_channel_offline(phba->pcidev)) { - spin_lock_irq(&phba->hbalock); - phba->hba_flag &= ~DEFER_ERATT; - spin_unlock_irq(&phba->hbalock); + clear_bit(DEFER_ERATT, &phba->hba_flag); return; } @@ -1811,7 +1798,7 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba) /* Send an internal error event to mgmt application */ lpfc_board_errevt_to_mgmt(phba); - if (phba->hba_flag & DEFER_ERATT) + if (test_bit(DEFER_ERATT, &phba->hba_flag)) lpfc_handle_deferred_eratt(phba); if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { @@ -2026,7 +2013,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) /* consider PCI bus read error as pci_channel_offline */ if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO) return; - if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) { + if (!test_bit(HBA_RECOVERABLE_UE, &phba->hba_flag)) { lpfc_sli4_offline_eratt(phba); return; } @@ -3319,9 +3306,10 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba) del_timer_sync(&phba->hb_tmofunc); if (phba->sli_rev == LPFC_SLI_REV4) { del_timer_sync(&phba->rrq_tmr); - phba->hba_flag &= ~HBA_RRQ_ACTIVE; + clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag); } - phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); + clear_bit(HBA_HBEAT_INP, &phba->hba_flag); + clear_bit(HBA_HBEAT_TMO, &phba->hba_flag); switch (phba->pci_dev_grp) { case LPFC_PCI_DEV_LP: @@ -4785,7 +4773,10 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) shost->max_id = LPFC_MAX_TARGET; shost->max_lun = vport->cfg_max_luns; shost->this_id = -1; - shost->max_cmd_len = 16; + if (phba->sli_rev == LPFC_SLI_REV4) + shost->max_cmd_len = LPFC_FCP_CDB_LEN_32; + else + shost->max_cmd_len = LPFC_FCP_CDB_LEN; if (phba->sli_rev == LPFC_SLI_REV4) { if (!phba->cfg_fcp_mq_threshold || @@ -4976,7 +4967,7 @@ static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost) * Avoid reporting supported link speed for FCoE as it can't be * controlled via FCoE. */ - if (phba->hba_flag & HBA_FCOE_MODE) + if (test_bit(HBA_FCOE_MODE, &phba->hba_flag)) return; if (phba->lmt & LMT_256Gb) @@ -5490,7 +5481,7 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba, * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch * topology info. Note: Optional for non FC-AL ports. */ - if (!(phba->hba_flag & HBA_FCOE_MODE)) { + if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) goto out_free_pmb; @@ -6025,7 +6016,7 @@ lpfc_cmf_timer(struct hrtimer *timer) */ if (phba->cmf_active_mode == LPFC_CFG_MANAGED && phba->link_state != LPFC_LINK_DOWN && - phba->hba_flag & HBA_SETUP) { + test_bit(HBA_SETUP, &phba->hba_flag)) { mbpi = phba->cmf_last_sync_bw; phba->cmf_last_sync_bw = 0; extra = 0; @@ -6778,11 +6769,9 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, } /* If the FCF discovery is in progress, do nothing. */ - spin_lock_irq(&phba->hbalock); - if (phba->hba_flag & FCF_TS_INPROG) { - spin_unlock_irq(&phba->hbalock); + if (test_bit(FCF_TS_INPROG, &phba->hba_flag)) break; - } + spin_lock_irq(&phba->hbalock); /* If fast FCF failover rescan event is pending, do nothing */ if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) { spin_unlock_irq(&phba->hbalock); @@ -7321,9 +7310,7 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) unsigned long iflags; /* First, declare the async event has been handled */ - spin_lock_irqsave(&phba->hbalock, iflags); - phba->hba_flag &= ~ASYNC_EVENT; - spin_unlock_irqrestore(&phba->hbalock, iflags); + clear_bit(ASYNC_EVENT, &phba->hba_flag); /* Now, handle all the async events */ spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); @@ -8247,7 +8234,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) * our max amount and we need to limit lpfc_sg_seg_cnt * to minimize the risk of running out. */ - phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd32) + sizeof(struct fcp_rsp) + max_buf_size; /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ @@ -8269,7 +8256,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) * the FCP rsp, a SGE for each, and a SGE for up to * cfg_sg_seg_cnt data segments. */ - phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd32) + sizeof(struct fcp_rsp) + ((phba->cfg_sg_seg_cnt + extra) * sizeof(struct sli4_sge)); @@ -8332,7 +8319,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) phba->lpfc_cmd_rsp_buf_pool = dma_pool_create("lpfc_cmd_rsp_buf_pool", &phba->pcidev->dev, - sizeof(struct fcp_cmnd) + + sizeof(struct fcp_cmnd32) + sizeof(struct fcp_rsp), i, 0); if (!phba->lpfc_cmd_rsp_buf_pool) { @@ -9869,41 +9856,38 @@ lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config) return; } /* FW supports persistent topology - override module parameter value */ - phba->hba_flag |= HBA_PERSISTENT_TOPO; + set_bit(HBA_PERSISTENT_TOPO, &phba->hba_flag); /* if ASIC_GEN_NUM >= 0xC) */ if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_IF_TYPE_6) || (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_FAMILY_G6)) { - if (!tf) { + if (!tf) phba->cfg_topology = ((pt == LINK_FLAGS_LOOP) ? FLAGS_TOPOLOGY_MODE_LOOP : FLAGS_TOPOLOGY_MODE_PT_PT); - } else { - phba->hba_flag &= ~HBA_PERSISTENT_TOPO; - } + else + clear_bit(HBA_PERSISTENT_TOPO, &phba->hba_flag); } else { /* G5 */ - if (tf) { + if (tf) /* If topology failover set - pt is '0' or '1' */ phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP : FLAGS_TOPOLOGY_MODE_LOOP_PT); - } else { + else phba->cfg_topology = ((pt == LINK_FLAGS_P2P) ? FLAGS_TOPOLOGY_MODE_PT_PT : FLAGS_TOPOLOGY_MODE_LOOP); - } } - if (phba->hba_flag & HBA_PERSISTENT_TOPO) { + if (test_bit(HBA_PERSISTENT_TOPO, &phba->hba_flag)) lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "2020 Using persistent topology value [%s]", lpfc_topo_to_str[phba->cfg_topology]); - } else { + else lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "2021 Invalid topology values from FW " "Using driver parameter defined value [%s]", lpfc_topo_to_str[phba->cfg_topology]); - } } /** @@ -10146,7 +10130,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) forced_link_speed = bf_get(lpfc_mbx_rd_conf_link_speed, rd_config); if (forced_link_speed) { - phba->hba_flag |= HBA_FORCED_LINK_SPEED; + set_bit(HBA_FORCED_LINK_SPEED, &phba->hba_flag); switch (forced_link_speed) { case LINK_SPEED_1G: @@ -12241,7 +12225,7 @@ lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); if (retval) return intr_mode; - phba->hba_flag &= ~HBA_NEEDS_CFG_PORT; + clear_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag); if (cfg_mode == 2) { /* Now, try to enable MSI-X interrupt mode */ @@ -14812,6 +14796,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) goto out_unset_pci_mem_s4; } + spin_lock_init(&phba->rrq_list_lock); INIT_LIST_HEAD(&phba->active_rrq_list); INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); @@ -15528,7 +15513,7 @@ lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; if (phba->link_state == LPFC_HBA_ERROR && - phba->hba_flag & HBA_IOQ_FLUSH) + test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) return PCI_ERS_RESULT_NEED_RESET; switch (phba->pci_dev_grp) { diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index c4172791c2..f6a53446e5 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -47,6 +47,18 @@ #include "lpfc_debugfs.h" +/* Called to clear RSCN discovery flags when driver is unloading. */ +static bool +lpfc_check_unload_and_clr_rscn(unsigned long *fc_flag) +{ + /* If unloading, then clear the FC_RSCN_DEFERRED flag */ + if (test_bit(FC_UNLOADING, fc_flag)) { + clear_bit(FC_RSCN_DEFERRED, fc_flag); + return false; + } + return test_bit(FC_RSCN_DEFERRED, fc_flag); +} + /* Called to verify a rcv'ed ADISC was intended for us. */ static int lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, @@ -213,8 +225,10 @@ void lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { LIST_HEAD(abort_list); + LIST_HEAD(drv_cmpl_list); struct lpfc_sli_ring *pring; struct lpfc_iocbq *iocb, *next_iocb; + int retval = 0; pring = lpfc_phba_elsring(phba); @@ -250,11 +264,20 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) /* Abort the targeted IOs and remove them from the abort list. */ list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) { - spin_lock_irq(&phba->hbalock); - list_del_init(&iocb->dlist); - lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL); - spin_unlock_irq(&phba->hbalock); + spin_lock_irq(&phba->hbalock); + list_del_init(&iocb->dlist); + retval = lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL); + spin_unlock_irq(&phba->hbalock); + + if (retval && test_bit(FC_UNLOADING, &phba->pport->load_flag)) { + list_del_init(&iocb->list); + list_add_tail(&iocb->list, &drv_cmpl_list); + } } + + lpfc_sli_cancel_iocbs(phba, &drv_cmpl_list, IOSTAT_LOCAL_REJECT, + IOERR_SLI_ABORTED); + /* Make sure HBA is alive */ lpfc_issue_hb_tmo(phba); @@ -481,7 +504,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * must have ACCed the remote NPorts FLOGI to us * to make it here. */ - if (phba->hba_flag & HBA_FLOGI_OUTSTANDING) + if (test_bit(HBA_FLOGI_OUTSTANDING, &phba->hba_flag)) lpfc_els_abort_flogi(phba); ed_tov = be32_to_cpu(sp->cmn.e_d_tov); @@ -1604,10 +1627,8 @@ lpfc_device_recov_plogi_issue(struct lpfc_vport *vport, { struct lpfc_hba *phba = vport->phba; - /* Don't do anything that will mess up processing of the - * previous RSCN. - */ - if (test_bit(FC_RSCN_DEFERRED, &vport->fc_flag)) + /* Don't do anything that disrupts the RSCN unless lpfc is unloading. */ + if (lpfc_check_unload_and_clr_rscn(&vport->fc_flag)) return ndlp->nlp_state; /* software abort outstanding PLOGI */ @@ -1790,10 +1811,8 @@ lpfc_device_recov_adisc_issue(struct lpfc_vport *vport, { struct lpfc_hba *phba = vport->phba; - /* Don't do anything that will mess up processing of the - * previous RSCN. - */ - if (test_bit(FC_RSCN_DEFERRED, &vport->fc_flag)) + /* Don't do anything that disrupts the RSCN unless lpfc is unloading. */ + if (lpfc_check_unload_and_clr_rscn(&vport->fc_flag)) return ndlp->nlp_state; /* software abort outstanding ADISC */ @@ -2059,10 +2078,8 @@ lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport, void *arg, uint32_t evt) { - /* Don't do anything that will mess up processing of the - * previous RSCN. - */ - if (test_bit(FC_RSCN_DEFERRED, &vport->fc_flag)) + /* Don't do anything that disrupts the RSCN unless lpfc is unloading. */ + if (lpfc_check_unload_and_clr_rscn(&vport->fc_flag)) return ndlp->nlp_state; ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; @@ -2375,10 +2392,8 @@ lpfc_device_recov_prli_issue(struct lpfc_vport *vport, { struct lpfc_hba *phba = vport->phba; - /* Don't do anything that will mess up processing of the - * previous RSCN. - */ - if (test_bit(FC_RSCN_DEFERRED, &vport->fc_flag)) + /* Don't do anything that disrupts the RSCN unless lpfc is unloading. */ + if (lpfc_check_unload_and_clr_rscn(&vport->fc_flag)) return ndlp->nlp_state; /* software abort outstanding PRLI */ @@ -2894,10 +2909,8 @@ static uint32_t lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { - /* Don't do anything that will mess up processing of the - * previous RSCN. - */ - if (test_bit(FC_RSCN_DEFERRED, &vport->fc_flag)) + /* Don't do anything that disrupts the RSCN unless lpfc is unloading. */ + if (lpfc_check_unload_and_clr_rscn(&vport->fc_flag)) return ndlp->nlp_state; lpfc_cancel_retry_delay_tmo(vport, ndlp); diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index c5792eaf3f..d70da2736c 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -95,7 +95,7 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport, vport = lport->vport; if (!vport || test_bit(FC_UNLOADING, &vport->load_flag) || - vport->phba->hba_flag & HBA_IOQ_FLUSH) + test_bit(HBA_IOQ_FLUSH, &vport->phba->hba_flag)) return -ENODEV; qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL); @@ -272,7 +272,7 @@ lpfc_nvme_handle_lsreq(struct lpfc_hba *phba, remoteport = lpfc_rport->remoteport; if (!vport->localport || - vport->phba->hba_flag & HBA_IOQ_FLUSH) + test_bit(HBA_IOQ_FLUSH, &vport->phba->hba_flag)) return -EINVAL; lport = vport->localport->private; @@ -569,7 +569,7 @@ __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_DID, ntype, nstate); return -ENODEV; } - if (vport->phba->hba_flag & HBA_IOQ_FLUSH) + if (test_bit(HBA_IOQ_FLUSH, &vport->phba->hba_flag)) return -ENODEV; if (!vport->phba->sli4_hba.nvmels_wq) @@ -675,7 +675,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport, vport = lport->vport; if (test_bit(FC_UNLOADING, &vport->load_flag) || - vport->phba->hba_flag & HBA_IOQ_FLUSH) + test_bit(HBA_IOQ_FLUSH, &vport->phba->hba_flag)) return -ENODEV; atomic_inc(&lport->fc4NvmeLsRequests); @@ -1568,7 +1568,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, phba = vport->phba; if ((unlikely(test_bit(FC_UNLOADING, &vport->load_flag))) || - phba->hba_flag & HBA_IOQ_FLUSH) { + test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, "6124 Fail IO, Driver unload\n"); atomic_inc(&lport->xmt_fcp_err); @@ -1909,24 +1909,19 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, return; } - /* Guard against IO completion being called at same time */ - spin_lock_irqsave(&lpfc_nbuf->buf_lock, flags); - - /* If the hba is getting reset, this flag is set. It is - * cleared when the reset is complete and rings reestablished. - */ - spin_lock(&phba->hbalock); /* driver queued commands are in process of being flushed */ - if (phba->hba_flag & HBA_IOQ_FLUSH) { - spin_unlock(&phba->hbalock); - spin_unlock_irqrestore(&lpfc_nbuf->buf_lock, flags); + if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6139 Driver in reset cleanup - flushing " - "NVME Req now. hba_flag x%x\n", + "NVME Req now. hba_flag x%lx\n", phba->hba_flag); return; } + /* Guard against IO completion being called at same time */ + spin_lock_irqsave(&lpfc_nbuf->buf_lock, flags); + spin_lock(&phba->hbalock); + nvmereq_wqe = &lpfc_nbuf->cur_iocbq; /* diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 561ced5503..5297cacc8b 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -1811,7 +1811,9 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, ctxp->flag &= ~LPFC_NVME_XBUSY; spin_unlock_irqrestore(&ctxp->ctxlock, iflag); + spin_lock_irqsave(&phba->rrq_list_lock, iflag); rrq_empty = list_empty(&phba->active_rrq_list); + spin_unlock_irqrestore(&phba->rrq_list_lock, iflag); ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); if (ndlp && (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE || @@ -3393,14 +3395,12 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, /* If the hba is getting reset, this flag is set. It is * cleared when the reset is complete and rings reestablished. */ - spin_lock_irqsave(&phba->hbalock, flags); /* driver queued commands are in process of being flushed */ - if (phba->hba_flag & HBA_IOQ_FLUSH) { - spin_unlock_irqrestore(&phba->hbalock, flags); + if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) { atomic_inc(&tgtp->xmt_abort_rsp_error); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6163 Driver in reset cleanup - flushing " - "NVME Req now. hba_flag x%x oxid x%x\n", + "NVME Req now. hba_flag x%lx oxid x%x\n", phba->hba_flag, ctxp->oxid); lpfc_sli_release_iocbq(phba, abts_wqeq); spin_lock_irqsave(&ctxp->ctxlock, flags); @@ -3409,6 +3409,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, return 0; } + spin_lock_irqsave(&phba->hbalock, flags); /* Outstanding abort is in progress */ if (abts_wqeq->cmd_flag & LPFC_DRIVER_ABORTED) { spin_unlock_irqrestore(&phba->hbalock, flags); diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 4a6e5223a2..98ce9d97a2 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -474,9 +474,11 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba, ndlp = psb->rdata->pnode; else ndlp = NULL; + spin_unlock_irqrestore(&phba->hbalock, iflag); + spin_lock_irqsave(&phba->rrq_list_lock, iflag); rrq_empty = list_empty(&phba->active_rrq_list); - spin_unlock_irqrestore(&phba->hbalock, iflag); + spin_unlock_irqrestore(&phba->rrq_list_lock, iflag); if (ndlp && !offline) { lpfc_set_rrq_active(phba, ndlp, psb->cur_iocbq.sli4_lxritag, rxid, 1); @@ -598,7 +600,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, { struct lpfc_io_buf *lpfc_cmd; struct lpfc_sli4_hdw_queue *qp; - struct sli4_sge *sgl; + struct sli4_sge_le *sgl; dma_addr_t pdma_phys_fcp_rsp; dma_addr_t pdma_phys_fcp_cmd; uint32_t cpu, idx; @@ -649,23 +651,23 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, * The balance are sg list bdes. Initialize the * first two and leave the rest for queuecommand. */ - sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; + sgl = (struct sli4_sge_le *)lpfc_cmd->dma_sgl; pdma_phys_fcp_cmd = tmp->fcp_cmd_rsp_dma_handle; sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd)); sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd)); - sgl->word2 = le32_to_cpu(sgl->word2); - bf_set(lpfc_sli4_sge_last, sgl, 0); - sgl->word2 = cpu_to_le32(sgl->word2); - sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd)); + bf_set_le32(lpfc_sli4_sge_last, sgl, 0); + if (cmnd && cmnd->cmd_len > LPFC_FCP_CDB_LEN) + sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd32)); + else + sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd)); + sgl++; /* Setup the physical region for the FCP RSP */ - pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd); + pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd32); sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp)); sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp)); - sgl->word2 = le32_to_cpu(sgl->word2); - bf_set(lpfc_sli4_sge_last, sgl, 1); - sgl->word2 = cpu_to_le32(sgl->word2); + bf_set_le32(lpfc_sli4_sge_last, sgl, 1); sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp)); if (lpfc_ndlp_check_qdepth(phba, ndlp)) { @@ -2606,7 +2608,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, iocb_cmd->ulpLe = 1; fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); - fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); + fcp_cmnd->fcpDl = cpu_to_be32(fcpdl); /* * Due to difference in data length between DIF/non-DIF paths, @@ -3223,14 +3225,18 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) * explicitly reinitialized. * all iocb memory resources are reused. */ - fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); + if (scsi_cmnd->cmd_len > LPFC_FCP_CDB_LEN) + ((struct fcp_cmnd32 *)fcp_cmnd)->fcpDl = + cpu_to_be32(scsi_bufflen(scsi_cmnd)); + else + fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); /* Set first-burst provided it was successfully negotiated */ - if (!(phba->hba_flag & HBA_FCOE_MODE) && + if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag) && vport->cfg_first_burst_size && scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) { u32 init_len, total_len; - total_len = be32_to_cpu(fcp_cmnd->fcpDl); + total_len = scsi_bufflen(scsi_cmnd); init_len = min(total_len, vport->cfg_first_burst_size); /* Word 4 & 5 */ @@ -3418,15 +3424,18 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, } fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); - fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); + if (lpfc_cmd->pCmd->cmd_len > LPFC_FCP_CDB_LEN) + ((struct fcp_cmnd32 *)fcp_cmnd)->fcpDl = cpu_to_be32(fcpdl); + else + fcp_cmnd->fcpDl = cpu_to_be32(fcpdl); /* Set first-burst provided it was successfully negotiated */ - if (!(phba->hba_flag & HBA_FCOE_MODE) && + if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag) && vport->cfg_first_burst_size && scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) { u32 init_len, total_len; - total_len = be32_to_cpu(fcp_cmnd->fcpDl); + total_len = fcpdl; init_len = min(total_len, vport->cfg_first_burst_size); /* Word 4 & 5 */ @@ -3434,8 +3443,7 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, wqe->fcp_iwrite.total_xfer_len = total_len; } else { /* Word 4 */ - wqe->fcp_iwrite.total_xfer_len = - be32_to_cpu(fcp_cmnd->fcpDl); + wqe->fcp_iwrite.total_xfer_len = fcpdl; } /* @@ -3892,7 +3900,10 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, fcprsp->rspInfo3); scsi_set_resid(cmnd, 0); - fcpDl = be32_to_cpu(fcpcmd->fcpDl); + if (cmnd->cmd_len > LPFC_FCP_CDB_LEN) + fcpDl = be32_to_cpu(((struct fcp_cmnd32 *)fcpcmd)->fcpDl); + else + fcpDl = be32_to_cpu(fcpcmd->fcpDl); if (resp_info & RESID_UNDER) { scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId)); @@ -4721,6 +4732,14 @@ static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport, bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_NONE); } + + /* Additional fcp cdb length field calculation. + * LPFC_FCP_CDB_LEN_32 - normal 16 byte cdb length, + * then divide by 4 for the word count. + * shift 2 because of the RDDATA/WRDATA. + */ + if (scsi_cmnd->cmd_len > LPFC_FCP_CDB_LEN) + fcp_cmnd->fcpCntl3 |= 4 << 2; } else { /* From the icmnd template, initialize words 4 - 11 */ memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4], @@ -4741,7 +4760,7 @@ static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport, /* Word 3 */ bf_set(payload_offset_len, &wqe->fcp_icmd, - sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + sizeof(struct fcp_cmnd32) + sizeof(struct fcp_rsp)); /* Word 6 */ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, @@ -4796,7 +4815,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, int_to_scsilun(lpfc_cmd->pCmd->device->lun, &lpfc_cmd->fcp_cmnd->fcp_lun); - ptr = &fcp_cmnd->fcpCdb[0]; + ptr = &((struct fcp_cmnd32 *)fcp_cmnd)->fcpCdb[0]; memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len); if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) { ptr += scsi_cmnd->cmd_len; @@ -5041,7 +5060,7 @@ lpfc_check_pci_resettable(struct lpfc_hba *phba) /* Check for valid Emulex Device ID */ if (phba->sli_rev != LPFC_SLI_REV4 || - phba->hba_flag & HBA_FCOE_MODE) { + test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "8347 Incapable PCI reset device: " "0x%04x\n", ptr->device); @@ -5327,7 +5346,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) cmnd->cmnd[0], scsi_prot_ref_tag(cmnd), scsi_logical_block_count(cmnd), - (cmnd->cmnd[1]>>5)); + scsi_get_prot_type(cmnd)); } err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); } else { @@ -5518,7 +5537,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) spin_lock(&phba->hbalock); /* driver queued commands are in process of being flushed */ - if (phba->hba_flag & HBA_IOQ_FLUSH) { + if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, "3168 SCSI Layer abort requested I/O has been " "flushed by LLD.\n"); diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h index eae56944f3..a05d203e47 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.h +++ b/drivers/scsi/lpfc/lpfc_scsi.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -24,6 +24,7 @@ struct lpfc_hba; #define LPFC_FCP_CDB_LEN 16 +#define LPFC_FCP_CDB_LEN_32 32 #define list_remove_head(list, entry, type, member) \ do { \ @@ -99,17 +100,11 @@ struct fcp_rsp { #define SNSCOD_BADCMD 0x20 /* sense code is byte 13 ([12]) */ }; -struct fcp_cmnd { - struct scsi_lun fcp_lun; - - uint8_t fcpCntl0; /* FCP_CNTL byte 0 (reserved) */ - uint8_t fcpCntl1; /* FCP_CNTL byte 1 task codes */ #define SIMPLE_Q 0x00 #define HEAD_OF_Q 0x01 #define ORDERED_Q 0x02 #define ACA_Q 0x04 #define UNTAGGED 0x05 - uint8_t fcpCntl2; /* FCP_CTL byte 2 task management codes */ #define FCP_ABORT_TASK_SET 0x02 /* Bit 1 */ #define FCP_CLEAR_TASK_SET 0x04 /* bit 2 */ #define FCP_BUS_RESET 0x08 /* bit 3 */ @@ -117,12 +112,31 @@ struct fcp_cmnd { #define FCP_TARGET_RESET 0x20 /* bit 5 */ #define FCP_CLEAR_ACA 0x40 /* bit 6 */ #define FCP_TERMINATE_TASK 0x80 /* bit 7 */ - uint8_t fcpCntl3; #define WRITE_DATA 0x01 /* Bit 0 */ #define READ_DATA 0x02 /* Bit 1 */ +struct fcp_cmnd { + struct scsi_lun fcp_lun; + + uint8_t fcpCntl0; /* FCP_CNTL byte 0 (reserved) */ + uint8_t fcpCntl1; /* FCP_CNTL byte 1 task codes */ + uint8_t fcpCntl2; /* FCP_CTL byte 2 task management codes */ + uint8_t fcpCntl3; + uint8_t fcpCdb[LPFC_FCP_CDB_LEN]; /* SRB cdb field is copied here */ - uint32_t fcpDl; /* Total transfer length */ + __be32 fcpDl; /* Total transfer length */ + +}; +struct fcp_cmnd32 { + struct scsi_lun fcp_lun; + + uint8_t fcpCntl0; /* FCP_CNTL byte 0 (reserved) */ + uint8_t fcpCntl1; /* FCP_CNTL byte 1 task codes */ + uint8_t fcpCntl2; /* FCP_CTL byte 2 task management codes */ + uint8_t fcpCntl3; + + uint8_t fcpCdb[LPFC_FCP_CDB_LEN_32]; /* SRB cdb field is copied here */ + __be32 fcpDl; /* Total transfer length */ }; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index a028e008dd..3e55d5edd6 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1024,9 +1024,9 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba) unsigned long iflags; LIST_HEAD(send_rrq); - spin_lock_irqsave(&phba->hbalock, iflags); - phba->hba_flag &= ~HBA_RRQ_ACTIVE; + clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag); next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); + spin_lock_irqsave(&phba->rrq_list_lock, iflags); list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { if (time_after(jiffies, rrq->rrq_stop_time)) @@ -1034,7 +1034,7 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba) else if (time_before(rrq->rrq_stop_time, next_time)) next_time = rrq->rrq_stop_time; } - spin_unlock_irqrestore(&phba->hbalock, iflags); + spin_unlock_irqrestore(&phba->rrq_list_lock, iflags); if ((!list_empty(&phba->active_rrq_list)) && (!test_bit(FC_UNLOADING, &phba->pport->load_flag))) mod_timer(&phba->rrq_tmr, next_time); @@ -1072,16 +1072,16 @@ lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did) if (phba->sli_rev != LPFC_SLI_REV4) return NULL; - spin_lock_irqsave(&phba->hbalock, iflags); + spin_lock_irqsave(&phba->rrq_list_lock, iflags); list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { if (rrq->vport == vport && rrq->xritag == xri && rrq->nlp_DID == did){ list_del(&rrq->list); - spin_unlock_irqrestore(&phba->hbalock, iflags); + spin_unlock_irqrestore(&phba->rrq_list_lock, iflags); return rrq; } } - spin_unlock_irqrestore(&phba->hbalock, iflags); + spin_unlock_irqrestore(&phba->rrq_list_lock, iflags); return NULL; } @@ -1109,7 +1109,7 @@ lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) lpfc_sli4_vport_delete_els_xri_aborted(vport); lpfc_sli4_vport_delete_fcp_xri_aborted(vport); } - spin_lock_irqsave(&phba->hbalock, iflags); + spin_lock_irqsave(&phba->rrq_list_lock, iflags); list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { if (rrq->vport != vport) continue; @@ -1118,7 +1118,7 @@ lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) list_move(&rrq->list, &rrq_list); } - spin_unlock_irqrestore(&phba->hbalock, iflags); + spin_unlock_irqrestore(&phba->rrq_list_lock, iflags); list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) { list_del(&rrq->list); @@ -1179,12 +1179,12 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, if (!phba->cfg_enable_rrq) return -EINVAL; - spin_lock_irqsave(&phba->hbalock, iflags); if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) { - phba->hba_flag &= ~HBA_RRQ_ACTIVE; - goto out; + clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag); + goto outnl; } + spin_lock_irqsave(&phba->hbalock, iflags); if (ndlp->vport && test_bit(FC_UNLOADING, &ndlp->vport->load_flag)) goto out; @@ -1213,16 +1213,18 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, rrq->nlp_DID = ndlp->nlp_DID; rrq->vport = ndlp->vport; rrq->rxid = rxid; - spin_lock_irqsave(&phba->hbalock, iflags); + + spin_lock_irqsave(&phba->rrq_list_lock, iflags); empty = list_empty(&phba->active_rrq_list); list_add_tail(&rrq->list, &phba->active_rrq_list); - phba->hba_flag |= HBA_RRQ_ACTIVE; - spin_unlock_irqrestore(&phba->hbalock, iflags); + spin_unlock_irqrestore(&phba->rrq_list_lock, iflags); + set_bit(HBA_RRQ_ACTIVE, &phba->hba_flag); if (empty) lpfc_worker_wake_up(phba); return 0; out: spin_unlock_irqrestore(&phba->hbalock, iflags); +outnl: lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "2921 Can't set rrq active xri:0x%x rxid:0x%x" " DID:0x%x Send:%d\n", @@ -3937,7 +3939,7 @@ void lpfc_poll_eratt(struct timer_list *t) uint64_t sli_intr, cnt; phba = from_timer(phba, t, eratt_poll); - if (!(phba->hba_flag & HBA_SETUP)) + if (!test_bit(HBA_SETUP, &phba->hba_flag)) return; if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) @@ -4522,9 +4524,7 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, unsigned long iflag; int count = 0; - spin_lock_irqsave(&phba->hbalock, iflag); - phba->hba_flag &= ~HBA_SP_QUEUE_EVT; - spin_unlock_irqrestore(&phba->hbalock, iflag); + clear_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag); while (!list_empty(&phba->sli4_hba.sp_queue_event)) { /* Get the response iocb from the head of work queue */ spin_lock_irqsave(&phba->hbalock, iflag); @@ -4681,10 +4681,8 @@ lpfc_sli_flush_io_rings(struct lpfc_hba *phba) uint32_t i; struct lpfc_iocbq *piocb, *next_iocb; - spin_lock_irq(&phba->hbalock); /* Indicate the I/O queues are flushed */ - phba->hba_flag |= HBA_IOQ_FLUSH; - spin_unlock_irq(&phba->hbalock); + set_bit(HBA_IOQ_FLUSH, &phba->hba_flag); /* Look on all the FCP Rings for the iotag */ if (phba->sli_rev >= LPFC_SLI_REV4) { @@ -4762,7 +4760,7 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) if (lpfc_readl(phba->HSregaddr, &status)) return 1; - phba->hba_flag |= HBA_NEEDS_CFG_PORT; + set_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag); /* * Check status register every 100ms for 5 retries, then every @@ -4841,7 +4839,7 @@ lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask) } else phba->sli4_hba.intr_enable = 0; - phba->hba_flag &= ~HBA_SETUP; + clear_bit(HBA_SETUP, &phba->hba_flag); return retval; } @@ -5093,7 +5091,7 @@ lpfc_sli_brdreset(struct lpfc_hba *phba) /* perform board reset */ phba->fc_eventTag = 0; phba->link_events = 0; - phba->hba_flag |= HBA_NEEDS_CFG_PORT; + set_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag); if (phba->pport) { phba->pport->fc_myDID = 0; phba->pport->fc_prevDID = 0; @@ -5153,7 +5151,7 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba) /* Reset HBA */ lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "0295 Reset HBA Data: x%x x%x x%x\n", + "0295 Reset HBA Data: x%x x%x x%lx\n", phba->pport->port_state, psli->sli_flag, phba->hba_flag); @@ -5162,7 +5160,7 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba) phba->link_events = 0; phba->pport->fc_myDID = 0; phba->pport->fc_prevDID = 0; - phba->hba_flag &= ~HBA_SETUP; + clear_bit(HBA_SETUP, &phba->hba_flag); spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~(LPFC_PROCESS_LA); @@ -5406,7 +5404,7 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba) return -EIO; } - phba->hba_flag |= HBA_NEEDS_CFG_PORT; + set_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag); /* Clear all interrupt enable conditions */ writel(0, phba->HCregaddr); @@ -5708,11 +5706,11 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba) int longs; /* Enable ISR already does config_port because of config_msi mbx */ - if (phba->hba_flag & HBA_NEEDS_CFG_PORT) { + if (test_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag)) { rc = lpfc_sli_config_port(phba, LPFC_SLI_REV3); if (rc) return -EIO; - phba->hba_flag &= ~HBA_NEEDS_CFG_PORT; + clear_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag); } phba->fcp_embed_io = 0; /* SLI4 FC support only */ @@ -7759,7 +7757,7 @@ lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) snprintf(mbox->u.mqe.un.set_host_data.un.data, LPFC_HOST_OS_DRIVER_VERSION_SIZE, "Linux %s v"LPFC_DRIVER_VERSION, - (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC"); + test_bit(HBA_FCOE_MODE, &phba->hba_flag) ? "FCoE" : "FC"); } int @@ -8487,7 +8485,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) spin_unlock_irq(&phba->hbalock); } } - phba->hba_flag &= ~HBA_SETUP; + clear_bit(HBA_SETUP, &phba->hba_flag); lpfc_sli4_dip(phba); @@ -8516,25 +8514,26 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) mqe = &mboxq->u.mqe; phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) { - phba->hba_flag |= HBA_FCOE_MODE; + set_bit(HBA_FCOE_MODE, &phba->hba_flag); phba->fcp_embed_io = 0; /* SLI4 FC support only */ } else { - phba->hba_flag &= ~HBA_FCOE_MODE; + clear_bit(HBA_FCOE_MODE, &phba->hba_flag); } if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == LPFC_DCBX_CEE_MODE) - phba->hba_flag |= HBA_FIP_SUPPORT; + set_bit(HBA_FIP_SUPPORT, &phba->hba_flag); else - phba->hba_flag &= ~HBA_FIP_SUPPORT; + clear_bit(HBA_FIP_SUPPORT, &phba->hba_flag); - phba->hba_flag &= ~HBA_IOQ_FLUSH; + clear_bit(HBA_IOQ_FLUSH, &phba->hba_flag); if (phba->sli_rev != LPFC_SLI_REV4) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0376 READ_REV Error. SLI Level %d " "FCoE enabled %d\n", - phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE); + phba->sli_rev, + test_bit(HBA_FCOE_MODE, &phba->hba_flag) ? 1 : 0); rc = -EIO; kfree(vpd); goto out_free_mbox; @@ -8549,7 +8548,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) * to read FCoE param config regions, only read parameters if the * board is FCoE */ - if (phba->hba_flag & HBA_FCOE_MODE && + if (test_bit(HBA_FCOE_MODE, &phba->hba_flag) && lpfc_sli4_read_fcoe_params(phba)) lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT, "2570 Failed to read FCoE parameters\n"); @@ -8626,7 +8625,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); if (rc == MBX_SUCCESS) { - phba->hba_flag |= HBA_RECOVERABLE_UE; + set_bit(HBA_RECOVERABLE_UE, &phba->hba_flag); /* Set 1Sec interval to detect UE */ phba->eratt_poll_interval = 1; phba->sli4_hba.ue_to_sr = bf_get( @@ -8677,7 +8676,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) } /* Performance Hints are ONLY for FCoE */ - if (phba->hba_flag & HBA_FCOE_MODE) { + if (test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs)) phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED; else @@ -8936,7 +8935,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) } lpfc_sli4_node_prep(phba); - if (!(phba->hba_flag & HBA_FCOE_MODE)) { + if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) { /* * The FC Port needs to register FCFI (index 0) @@ -9012,7 +9011,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) /* Start heart beat timer */ mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); - phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO); + clear_bit(HBA_HBEAT_INP, &phba->hba_flag); + clear_bit(HBA_HBEAT_TMO, &phba->hba_flag); phba->last_completion_time = jiffies; /* start eq_delay heartbeat */ @@ -9054,8 +9054,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) /* Setup CMF after HBA is initialized */ lpfc_cmf_setup(phba); - if (!(phba->hba_flag & HBA_FCOE_MODE) && - (phba->hba_flag & LINK_DISABLED)) { + if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag) && + test_bit(LINK_DISABLED, &phba->hba_flag)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3103 Adapter Link is disabled.\n"); lpfc_down_link(phba, mboxq); @@ -9079,7 +9079,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) /* Enable RAS FW log support */ lpfc_sli4_ras_setup(phba); - phba->hba_flag |= HBA_SETUP; + set_bit(HBA_SETUP, &phba->hba_flag); return rc; out_io_buff_free: @@ -9383,7 +9383,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, } /* If HBA has a deferred error attention, fail the iocb. */ - if (unlikely(phba->hba_flag & DEFER_ERATT)) { + if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) { spin_unlock_irqrestore(&phba->hbalock, drvr_flag); goto out_not_finished; } @@ -10447,7 +10447,7 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, return IOCB_ERROR; /* If HBA has a deferred error attention, fail the iocb. */ - if (unlikely(phba->hba_flag & DEFER_ERATT)) + if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) return IOCB_ERROR; /* @@ -10579,10 +10579,11 @@ lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) { struct lpfc_iocbq *piocb = &lpfc_cmd->cur_iocbq; union lpfc_wqe128 *wqe = &lpfc_cmd->cur_iocbq.wqe; - struct sli4_sge *sgl; + struct sli4_sge_le *sgl; + u32 type_size; /* 128 byte wqe support here */ - sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; + sgl = (struct sli4_sge_le *)lpfc_cmd->dma_sgl; if (phba->fcp_embed_io) { struct fcp_cmnd *fcp_cmnd; @@ -10591,24 +10592,24 @@ lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) fcp_cmnd = lpfc_cmd->fcp_cmnd; /* Word 0-2 - FCP_CMND */ - wqe->generic.bde.tus.f.bdeFlags = - BUFF_TYPE_BDE_IMMED; - wqe->generic.bde.tus.f.bdeSize = sgl->sge_len; + type_size = le32_to_cpu(sgl->sge_len); + type_size |= ULP_BDE64_TYPE_BDE_IMMED; + wqe->generic.bde.tus.w = type_size; wqe->generic.bde.addrHigh = 0; - wqe->generic.bde.addrLow = 88; /* Word 22 */ + wqe->generic.bde.addrLow = 72; /* Word 18 */ bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1); bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0); - /* Word 22-29 FCP CMND Payload */ - ptr = &wqe->words[22]; - memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); + /* Word 18-29 FCP CMND Payload */ + ptr = &wqe->words[18]; + lpfc_sli_pcimem_bcopy(fcp_cmnd, ptr, le32_to_cpu(sgl->sge_len)); } else { /* Word 0-2 - Inline BDE */ wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; - wqe->generic.bde.tus.f.bdeSize = sizeof(struct fcp_cmnd); - wqe->generic.bde.addrHigh = sgl->addr_hi; - wqe->generic.bde.addrLow = sgl->addr_lo; + wqe->generic.bde.tus.f.bdeSize = le32_to_cpu(sgl->sge_len); + wqe->generic.bde.addrHigh = le32_to_cpu(sgl->addr_hi); + wqe->generic.bde.addrLow = le32_to_cpu(sgl->addr_lo); /* Word 10 */ bf_set(wqe_dbde, &wqe->generic.wqe_com, 1); @@ -12361,10 +12362,10 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* ELS cmd tag <ulpIoTag> completes */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "0139 Ignoring ELS cmd code x%x completion Data: " + "0139 Ignoring ELS cmd code x%x ref cnt x%x Data: " "x%x x%x x%x x%px\n", - ulp_command, ulp_status, ulp_word4, iotag, - cmdiocb->ndlp); + ulp_command, kref_read(&cmdiocb->ndlp->kref), + ulp_status, ulp_word4, iotag, cmdiocb->ndlp); /* * Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp * if exchange is busy. @@ -12460,7 +12461,9 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, } } - if (phba->link_state < LPFC_LINK_UP || + /* Just close the exchange under certain conditions. */ + if (test_bit(FC_UNLOADING, &vport->load_flag) || + phba->link_state < LPFC_LINK_UP || (phba->sli_rev == LPFC_SLI_REV4 && phba->sli4_hba.link_state.status == LPFC_FC_LA_TYPE_LINK_DOWN) || (phba->link_flag & LS_EXTERNAL_LOOPBACK)) @@ -12507,10 +12510,10 @@ abort_iotag_exit: lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, "0339 Abort IO XRI x%x, Original iotag x%x, " "abort tag x%x Cmdjob : x%px Abortjob : x%px " - "retval x%x\n", + "retval x%x : IA %d\n", ulp_context, (phba->sli_rev == LPFC_SLI_REV4) ? cmdiocb->iotag : iotag, iotag, cmdiocb, abtsiocbp, - retval); + retval, ia); if (retval) { cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED; __lpfc_sli_release_iocbq(phba, abtsiocbp); @@ -12775,7 +12778,7 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id, int i; /* all I/Os are in process of being flushed */ - if (phba->hba_flag & HBA_IOQ_FLUSH) + if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) return errcnt; for (i = 1; i <= phba->sli.last_iotag; i++) { @@ -12845,15 +12848,13 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, u16 ulp_context, iotag, cqid = LPFC_WQE_CQ_ID_DEFAULT; bool ia; - spin_lock_irqsave(&phba->hbalock, iflags); - /* all I/Os are in process of being flushed */ - if (phba->hba_flag & HBA_IOQ_FLUSH) { - spin_unlock_irqrestore(&phba->hbalock, iflags); + if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) return 0; - } + sum = 0; + spin_lock_irqsave(&phba->hbalock, iflags); for (i = 1; i <= phba->sli.last_iotag; i++) { iocbq = phba->sli.iocbq_lookup[i]; @@ -13385,7 +13386,7 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba) if ((HS_FFER1 & phba->work_hs) && ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) { - phba->hba_flag |= DEFER_ERATT; + set_bit(DEFER_ERATT, &phba->hba_flag); /* Clear all interrupt enable conditions */ writel(0, phba->HCregaddr); readl(phba->HCregaddr); @@ -13394,7 +13395,7 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba) /* Set the driver HA work bitmap */ phba->work_ha |= HA_ERATT; /* Indicate polling handles this ERATT */ - phba->hba_flag |= HBA_ERATT_HANDLED; + set_bit(HBA_ERATT_HANDLED, &phba->hba_flag); return 1; } return 0; @@ -13405,7 +13406,7 @@ unplug_err: /* Set the driver HA work bitmap */ phba->work_ha |= HA_ERATT; /* Indicate polling handles this ERATT */ - phba->hba_flag |= HBA_ERATT_HANDLED; + set_bit(HBA_ERATT_HANDLED, &phba->hba_flag); return 1; } @@ -13441,7 +13442,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba) &uerr_sta_hi)) { phba->work_hs |= UNPLUG_ERR; phba->work_ha |= HA_ERATT; - phba->hba_flag |= HBA_ERATT_HANDLED; + set_bit(HBA_ERATT_HANDLED, &phba->hba_flag); return 1; } if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || @@ -13457,7 +13458,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba) phba->work_status[0] = uerr_sta_lo; phba->work_status[1] = uerr_sta_hi; phba->work_ha |= HA_ERATT; - phba->hba_flag |= HBA_ERATT_HANDLED; + set_bit(HBA_ERATT_HANDLED, &phba->hba_flag); return 1; } break; @@ -13469,7 +13470,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba) &portsmphr)){ phba->work_hs |= UNPLUG_ERR; phba->work_ha |= HA_ERATT; - phba->hba_flag |= HBA_ERATT_HANDLED; + set_bit(HBA_ERATT_HANDLED, &phba->hba_flag); return 1; } if (bf_get(lpfc_sliport_status_err, &portstat_reg)) { @@ -13492,7 +13493,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba) phba->work_status[0], phba->work_status[1]); phba->work_ha |= HA_ERATT; - phba->hba_flag |= HBA_ERATT_HANDLED; + set_bit(HBA_ERATT_HANDLED, &phba->hba_flag); return 1; } break; @@ -13529,22 +13530,18 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba) return 0; /* Check if interrupt handler handles this ERATT */ - spin_lock_irq(&phba->hbalock); - if (phba->hba_flag & HBA_ERATT_HANDLED) { + if (test_bit(HBA_ERATT_HANDLED, &phba->hba_flag)) /* Interrupt handler has handled ERATT */ - spin_unlock_irq(&phba->hbalock); return 0; - } /* * If there is deferred error attention, do not check for error * attention */ - if (unlikely(phba->hba_flag & DEFER_ERATT)) { - spin_unlock_irq(&phba->hbalock); + if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) return 0; - } + spin_lock_irq(&phba->hbalock); /* If PCI channel is offline, don't process it */ if (unlikely(pci_channel_offline(phba->pcidev))) { spin_unlock_irq(&phba->hbalock); @@ -13666,19 +13663,17 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) ha_copy &= ~HA_ERATT; /* Check the need for handling ERATT in interrupt handler */ if (ha_copy & HA_ERATT) { - if (phba->hba_flag & HBA_ERATT_HANDLED) + if (test_and_set_bit(HBA_ERATT_HANDLED, + &phba->hba_flag)) /* ERATT polling has handled ERATT */ ha_copy &= ~HA_ERATT; - else - /* Indicate interrupt handler handles ERATT */ - phba->hba_flag |= HBA_ERATT_HANDLED; } /* * If there is deferred error attention, do not check for any * interrupt. */ - if (unlikely(phba->hba_flag & DEFER_ERATT)) { + if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) { spin_unlock_irqrestore(&phba->hbalock, iflag); return IRQ_NONE; } @@ -13774,7 +13769,7 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) { - phba->hba_flag |= DEFER_ERATT; + set_bit(DEFER_ERATT, &phba->hba_flag); /* Clear all interrupt enable conditions */ writel(0, phba->HCregaddr); readl(phba->HCregaddr); @@ -13961,16 +13956,16 @@ lpfc_sli_fp_intr_handler(int irq, void *dev_id) /* Need to read HA REG for FCP ring and other ring events */ if (lpfc_readl(phba->HAregaddr, &ha_copy)) return IRQ_HANDLED; - /* Clear up only attention source related to fast-path */ - spin_lock_irqsave(&phba->hbalock, iflag); + /* * If there is deferred error attention, do not check for * any interrupt. */ - if (unlikely(phba->hba_flag & DEFER_ERATT)) { - spin_unlock_irqrestore(&phba->hbalock, iflag); + if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) return IRQ_NONE; - } + + /* Clear up only attention source related to fast-path */ + spin_lock_irqsave(&phba->hbalock, iflag); writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), phba->HAregaddr); readl(phba->HAregaddr); /* flush */ @@ -14053,18 +14048,15 @@ lpfc_sli_intr_handler(int irq, void *dev_id) spin_unlock(&phba->hbalock); return IRQ_NONE; } else if (phba->ha_copy & HA_ERATT) { - if (phba->hba_flag & HBA_ERATT_HANDLED) + if (test_and_set_bit(HBA_ERATT_HANDLED, &phba->hba_flag)) /* ERATT polling has handled ERATT */ phba->ha_copy &= ~HA_ERATT; - else - /* Indicate interrupt handler handles ERATT */ - phba->hba_flag |= HBA_ERATT_HANDLED; } /* * If there is deferred error attention, do not check for any interrupt. */ - if (unlikely(phba->hba_flag & DEFER_ERATT)) { + if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) { spin_unlock(&phba->hbalock); return IRQ_NONE; } @@ -14135,9 +14127,7 @@ void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) unsigned long iflags; /* First, declare the els xri abort event has been handled */ - spin_lock_irqsave(&phba->hbalock, iflags); - phba->hba_flag &= ~ELS_XRI_ABORT_EVENT; - spin_unlock_irqrestore(&phba->hbalock, iflags); + clear_bit(ELS_XRI_ABORT_EVENT, &phba->hba_flag); /* Now, handle all the els xri abort events */ spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); @@ -14263,9 +14253,7 @@ lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); /* Set the async event flag */ - spin_lock_irqsave(&phba->hbalock, iflags); - phba->hba_flag |= ASYNC_EVENT; - spin_unlock_irqrestore(&phba->hbalock, iflags); + set_bit(ASYNC_EVENT, &phba->hba_flag); return true; } @@ -14505,8 +14493,8 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, spin_lock_irqsave(&phba->hbalock, iflags); list_add_tail(&irspiocbq->cq_event.list, &phba->sli4_hba.sp_queue_event); - phba->hba_flag |= HBA_SP_QUEUE_EVT; spin_unlock_irqrestore(&phba->hbalock, iflags); + set_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag); return true; } @@ -14580,7 +14568,7 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, list_add_tail(&cq_event->list, &phba->sli4_hba.sp_els_xri_aborted_work_queue); /* Set the els xri abort event flag */ - phba->hba_flag |= ELS_XRI_ABORT_EVENT; + set_bit(ELS_XRI_ABORT_EVENT, &phba->hba_flag); spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); workposted = true; @@ -14667,9 +14655,9 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) /* save off the frame for the work thread to process */ list_add_tail(&dma_buf->cq_event.list, &phba->sli4_hba.sp_queue_event); - /* Frame received */ - phba->hba_flag |= HBA_SP_QUEUE_EVT; spin_unlock_irqrestore(&phba->hbalock, iflags); + /* Frame received */ + set_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag); workposted = true; break; case FC_STATUS_INSUFF_BUF_FRM_DISC: @@ -14689,9 +14677,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) case FC_STATUS_INSUFF_BUF_NEED_BUF: hrq->RQ_no_posted_buf++; /* Post more buffers if possible */ - spin_lock_irqsave(&phba->hbalock, iflags); - phba->hba_flag |= HBA_POST_RECEIVE_BUFFER; - spin_unlock_irqrestore(&phba->hbalock, iflags); + set_bit(HBA_POST_RECEIVE_BUFFER, &phba->hba_flag); workposted = true; break; case FC_STATUS_RQ_DMA_FAILURE: @@ -19349,8 +19335,8 @@ lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, spin_lock_irqsave(&phba->hbalock, iflags); list_add_tail(&dmabuf->cq_event.list, &phba->sli4_hba.sp_queue_event); - phba->hba_flag |= HBA_SP_QUEUE_EVT; spin_unlock_irqrestore(&phba->hbalock, iflags); + set_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag); lpfc_worker_wake_up(phba); return; } @@ -20102,9 +20088,7 @@ lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) mboxq->vport = phba->pport; mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; - spin_lock_irq(&phba->hbalock); - phba->hba_flag |= FCF_TS_INPROG; - spin_unlock_irq(&phba->hbalock); + set_bit(FCF_TS_INPROG, &phba->hba_flag); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) @@ -20120,9 +20104,7 @@ fail_fcf_scan: if (mboxq) lpfc_sli4_mbox_cmd_free(phba, mboxq); /* FCF scan failed, clear FCF_TS_INPROG flag */ - spin_lock_irq(&phba->hbalock); - phba->hba_flag &= ~FCF_TS_INPROG; - spin_unlock_irq(&phba->hbalock); + clear_bit(FCF_TS_INPROG, &phba->hba_flag); } return error; } @@ -20779,7 +20761,7 @@ lpfc_sli_read_link_ste(struct lpfc_hba *phba) /* This HBA contains PORT_STE configured */ if (!rgn23_data[offset + 2]) - phba->hba_flag |= LINK_DISABLED; + set_bit(LINK_DISABLED, &phba->hba_flag); goto out; } @@ -22488,7 +22470,7 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, } tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd + - sizeof(struct fcp_cmnd)); + sizeof(struct fcp_cmnd32)); spin_lock_irqsave(&hdwq->hdwq_lock, iflags); list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list); @@ -22593,12 +22575,13 @@ lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job) u8 cmnd; u32 *pcmd; u32 if_type = 0; - u32 fip, abort_tag; + u32 abort_tag; + bool fip; struct lpfc_nodelist *ndlp = NULL; union lpfc_wqe128 *wqe = &job->wqe; u8 command_type = ELS_COMMAND_NON_FIP; - fip = phba->hba_flag & HBA_FIP_SUPPORT; + fip = test_bit(HBA_FIP_SUPPORT, &phba->hba_flag); /* The fcp commands will set command type */ if (job->cmd_flag & LPFC_IO_FCP) command_type = FCP_COMMAND; diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 915f2f11fb..f06087e478 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -20,7 +20,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "14.4.0.1" +#define LPFC_DRIVER_VERSION "14.4.0.2" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ |