summaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:11:27 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:11:27 +0000
commit34996e42f82bfd60bc2c191e5cae3c6ab233ec6c (patch)
tree62db60558cbf089714b48daeabca82bf2b20b20e /drivers/scsi
parentAdding debian version 6.8.12-1. (diff)
downloadlinux-34996e42f82bfd60bc2c191e5cae3c6ab233ec6c.tar.xz
linux-34996e42f82bfd60bc2c191e5cae3c6ab233ec6c.zip
Merging upstream version 6.9.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/3w-9xxx.c44
-rw-r--r--drivers/scsi/3w-sas.c36
-rw-r--r--drivers/scsi/3w-xxxx.c44
-rw-r--r--drivers/scsi/53c700.c2
-rw-r--r--drivers/scsi/Kconfig14
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/aacraid/aachba.c6
-rw-r--r--drivers/scsi/bfa/bfa.h21
-rw-r--r--drivers/scsi/bfa/bfa_cs.h21
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c51
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.h66
-rw-r--r--drivers/scsi/bfa/bfa_fcs.h312
-rw-r--r--drivers/scsi/bfa/bfa_fcs_fcpim.c23
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c112
-rw-r--r--drivers/scsi/bfa/bfa_fcs_rport.c34
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c85
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h76
-rw-r--r--drivers/scsi/bfa/bfa_svc.c72
-rw-r--r--drivers/scsi/bfa/bfa_svc.h115
-rw-r--r--drivers/scsi/bfa/bfad_drv.h31
-rw-r--r--drivers/scsi/ch.c47
-rw-r--r--drivers/scsi/cxlflash/main.c17
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c49
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c84
-rw-r--r--drivers/scsi/esp_scsi.c2
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c4
-rw-r--r--drivers/scsi/fnic/fnic_attrs.c7
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c4
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c14
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c8
-rw-r--r--drivers/scsi/hosts.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c22
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c24
-rw-r--r--drivers/scsi/isci/init.c2
-rw-r--r--drivers/scsi/jazz_esp.c2
-rw-r--r--drivers/scsi/libfc/fc_encode.h14
-rw-r--r--drivers/scsi/lpfc/lpfc.h94
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c107
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c158
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c471
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c378
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c145
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c40
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c97
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c121
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h30
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c71
-rw-r--r--drivers/scsi/mac53c94.c5
-rw-r--r--drivers/scsi/megaraid.c2
-rw-r--r--drivers/scsi/mesh.c7
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_app.c62
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c12
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c118
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h11
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c58
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.h10
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c24
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c6
-rw-r--r--drivers/scsi/pmcraid.c20
-rw-r--r--drivers/scsi/qedi/qedi_debugfs.c12
-rw-r--r--drivers/scsi/qla1280.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qlogicpti.c2
-rw-r--r--drivers/scsi/scsi.c16
-rw-r--r--drivers/scsi/scsi_debug.c297
-rw-r--r--drivers/scsi/scsi_devinfo.c6
-rw-r--r--drivers/scsi/scsi_lib.c124
-rw-r--r--drivers/scsi/scsi_lib_test.c330
-rw-r--r--drivers/scsi/scsi_priv.h2
-rw-r--r--drivers/scsi/scsi_proto_test.c56
-rw-r--r--drivers/scsi/scsi_scan.c111
-rw-r--r--drivers/scsi/scsi_sysfs.c16
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c4
-rw-r--r--drivers/scsi/scsi_transport_sas.c23
-rw-r--r--drivers/scsi/scsi_transport_spi.c35
-rw-r--r--drivers/scsi/sd.c358
-rw-r--r--drivers/scsi/sd.h3
-rw-r--r--drivers/scsi/ses.c66
-rw-r--r--drivers/scsi/sg.c18
-rw-r--r--drivers/scsi/sr.c38
-rw-r--r--drivers/scsi/st.c4
-rw-r--r--drivers/scsi/sun3x_esp.c2
-rw-r--r--drivers/scsi/sun_esp.c2
91 files changed, 3139 insertions, 2016 deletions
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index f925f8664c..6fb61c88ea 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -161,28 +161,28 @@ static ssize_t twa_show_stats(struct device *dev,
ssize_t len;
spin_lock_irqsave(tw_dev->host->host_lock, flags);
- len = snprintf(buf, PAGE_SIZE, "3w-9xxx Driver version: %s\n"
- "Current commands posted: %4d\n"
- "Max commands posted: %4d\n"
- "Current pending commands: %4d\n"
- "Max pending commands: %4d\n"
- "Last sgl length: %4d\n"
- "Max sgl length: %4d\n"
- "Last sector count: %4d\n"
- "Max sector count: %4d\n"
- "SCSI Host Resets: %4d\n"
- "AEN's: %4d\n",
- TW_DRIVER_VERSION,
- tw_dev->posted_request_count,
- tw_dev->max_posted_request_count,
- tw_dev->pending_request_count,
- tw_dev->max_pending_request_count,
- tw_dev->sgl_entries,
- tw_dev->max_sgl_entries,
- tw_dev->sector_count,
- tw_dev->max_sector_count,
- tw_dev->num_resets,
- tw_dev->aen_count);
+ len = sysfs_emit(buf, "3w-9xxx Driver version: %s\n"
+ "Current commands posted: %4d\n"
+ "Max commands posted: %4d\n"
+ "Current pending commands: %4d\n"
+ "Max pending commands: %4d\n"
+ "Last sgl length: %4d\n"
+ "Max sgl length: %4d\n"
+ "Last sector count: %4d\n"
+ "Max sector count: %4d\n"
+ "SCSI Host Resets: %4d\n"
+ "AEN's: %4d\n",
+ TW_DRIVER_VERSION,
+ tw_dev->posted_request_count,
+ tw_dev->max_posted_request_count,
+ tw_dev->pending_request_count,
+ tw_dev->max_pending_request_count,
+ tw_dev->sgl_entries,
+ tw_dev->max_sgl_entries,
+ tw_dev->sector_count,
+ tw_dev->max_sector_count,
+ tw_dev->num_resets,
+ tw_dev->aen_count);
spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
return len;
} /* End twa_show_stats() */
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index 9bdb75dfdc..caa6713a62 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -166,24 +166,24 @@ static ssize_t twl_show_stats(struct device *dev,
ssize_t len;
spin_lock_irqsave(tw_dev->host->host_lock, flags);
- len = snprintf(buf, PAGE_SIZE, "3w-sas Driver version: %s\n"
- "Current commands posted: %4d\n"
- "Max commands posted: %4d\n"
- "Last sgl length: %4d\n"
- "Max sgl length: %4d\n"
- "Last sector count: %4d\n"
- "Max sector count: %4d\n"
- "SCSI Host Resets: %4d\n"
- "AEN's: %4d\n",
- TW_DRIVER_VERSION,
- tw_dev->posted_request_count,
- tw_dev->max_posted_request_count,
- tw_dev->sgl_entries,
- tw_dev->max_sgl_entries,
- tw_dev->sector_count,
- tw_dev->max_sector_count,
- tw_dev->num_resets,
- tw_dev->aen_count);
+ len = sysfs_emit(buf, "3w-sas Driver version: %s\n"
+ "Current commands posted: %4d\n"
+ "Max commands posted: %4d\n"
+ "Last sgl length: %4d\n"
+ "Max sgl length: %4d\n"
+ "Last sector count: %4d\n"
+ "Max sector count: %4d\n"
+ "SCSI Host Resets: %4d\n"
+ "AEN's: %4d\n",
+ TW_DRIVER_VERSION,
+ tw_dev->posted_request_count,
+ tw_dev->max_posted_request_count,
+ tw_dev->sgl_entries,
+ tw_dev->max_sgl_entries,
+ tw_dev->sector_count,
+ tw_dev->max_sector_count,
+ tw_dev->num_resets,
+ tw_dev->aen_count);
spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
return len;
} /* End twl_show_stats() */
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index f39c9ec2e7..2c0fb6da0e 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -496,28 +496,28 @@ static ssize_t tw_show_stats(struct device *dev, struct device_attribute *attr,
ssize_t len;
spin_lock_irqsave(tw_dev->host->host_lock, flags);
- len = snprintf(buf, PAGE_SIZE, "3w-xxxx Driver version: %s\n"
- "Current commands posted: %4d\n"
- "Max commands posted: %4d\n"
- "Current pending commands: %4d\n"
- "Max pending commands: %4d\n"
- "Last sgl length: %4d\n"
- "Max sgl length: %4d\n"
- "Last sector count: %4d\n"
- "Max sector count: %4d\n"
- "SCSI Host Resets: %4d\n"
- "AEN's: %4d\n",
- TW_DRIVER_VERSION,
- tw_dev->posted_request_count,
- tw_dev->max_posted_request_count,
- tw_dev->pending_request_count,
- tw_dev->max_pending_request_count,
- tw_dev->sgl_entries,
- tw_dev->max_sgl_entries,
- tw_dev->sector_count,
- tw_dev->max_sector_count,
- tw_dev->num_resets,
- tw_dev->aen_count);
+ len = sysfs_emit(buf, "3w-xxxx Driver version: %s\n"
+ "Current commands posted: %4d\n"
+ "Max commands posted: %4d\n"
+ "Current pending commands: %4d\n"
+ "Max pending commands: %4d\n"
+ "Last sgl length: %4d\n"
+ "Max sgl length: %4d\n"
+ "Last sector count: %4d\n"
+ "Max sector count: %4d\n"
+ "SCSI Host Resets: %4d\n"
+ "AEN's: %4d\n",
+ TW_DRIVER_VERSION,
+ tw_dev->posted_request_count,
+ tw_dev->max_posted_request_count,
+ tw_dev->pending_request_count,
+ tw_dev->max_pending_request_count,
+ tw_dev->sgl_entries,
+ tw_dev->max_sgl_entries,
+ tw_dev->sector_count,
+ tw_dev->max_sector_count,
+ tw_dev->num_resets,
+ tw_dev->aen_count);
spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
return len;
} /* End tw_show_stats() */
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 857be0f3ae..85439e9761 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -2071,7 +2071,7 @@ NCR_700_show_active_tags(struct device *dev, struct device_attribute *attr, char
{
struct scsi_device *SDp = to_scsi_device(dev);
- return snprintf(buf, 20, "%d\n", NCR_700_get_depth(SDp));
+ return sysfs_emit(buf, "%d\n", NCR_700_get_depth(SDp));
}
static struct device_attribute NCR_700_active_tags_attr = {
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 9ce2709272..634f2f501c 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -67,6 +67,15 @@ config SCSI_PROC_FS
If unsure say Y.
+config SCSI_LIB_KUNIT_TEST
+ tristate "KUnit tests for SCSI Mid Layer's scsi_lib" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Run SCSI Mid Layer's KUnit tests for scsi_lib.
+
+ If unsure say N.
+
comment "SCSI support type (disk, tape, CD-ROM)"
depends on SCSI
@@ -232,6 +241,11 @@ config SCSI_SCAN_ASYNC
Note that this setting also affects whether resuming from
system suspend will be performed asynchronously.
+config SCSI_PROTO_TEST
+ tristate "scsi_proto.h unit tests" if !KUNIT_ALL_TESTS
+ depends on SCSI && KUNIT
+ default KUNIT_ALL_TESTS
+
menu "SCSI Transports"
depends on SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index f055bfd54a..1313ddf2fd 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -24,6 +24,8 @@ obj-$(CONFIG_SCSI_COMMON) += scsi_common.o
obj-$(CONFIG_RAID_ATTRS) += raid_class.o
+obj-$(CONFIG_SCSI_PROTO_TEST) += scsi_proto_test.o
+
# --- NOTE ORDERING HERE ---
# For kernel non-modular link, transport attributes need to
# be initialised before drivers
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 70e1cac197..b22857c6f3 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -1099,7 +1099,7 @@ static void get_container_serial_callback(void *context, struct fib * fibptr)
sp[0] = INQD_PDT_DA;
sp[1] = scsicmd->cmnd[2];
sp[2] = 0;
- sp[3] = snprintf(sp+4, sizeof(sp)-4, "%08X",
+ sp[3] = scnprintf(sp+4, sizeof(sp)-4, "%08X",
le32_to_cpu(get_serial_reply->uid));
scsi_sg_copy_from_buffer(scsicmd, sp,
sizeof(sp));
@@ -1169,8 +1169,8 @@ static int setinqserial(struct aac_dev *dev, void *data, int cid)
/*
* This breaks array migration.
*/
- return snprintf((char *)(data), sizeof(struct scsi_inq) - 4, "%08X%02X",
- le32_to_cpu(dev->adapter_info.serial[0]), cid);
+ return scnprintf((char *)(data), sizeof(struct scsi_inq) - 4, "%08X%02X",
+ le32_to_cpu(dev->adapter_info.serial[0]), cid);
}
static inline void set_sense(struct sense_data *sense_data, u8 sense_key,
diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
index f30fe324e6..4cb9249e58 100644
--- a/drivers/scsi/bfa/bfa.h
+++ b/drivers/scsi/bfa/bfa.h
@@ -215,8 +215,27 @@ struct bfa_faa_args_s {
bfa_boolean_t busy;
};
+/*
+ * IOCFC state machine definitions/declarations
+ */
+enum iocfc_event {
+ IOCFC_E_INIT = 1, /* IOCFC init request */
+ IOCFC_E_START = 2, /* IOCFC mod start request */
+ IOCFC_E_STOP = 3, /* IOCFC stop request */
+ IOCFC_E_ENABLE = 4, /* IOCFC enable request */
+ IOCFC_E_DISABLE = 5, /* IOCFC disable request */
+ IOCFC_E_IOC_ENABLED = 6, /* IOC enabled message */
+ IOCFC_E_IOC_DISABLED = 7, /* IOC disabled message */
+ IOCFC_E_IOC_FAILED = 8, /* failure notice by IOC sm */
+ IOCFC_E_DCONF_DONE = 9, /* dconf read/write done */
+ IOCFC_E_CFG_DONE = 10, /* IOCFC config complete */
+};
+
+struct bfa_iocfc_s;
+typedef void (*bfa_iocfs_fsm_t)(struct bfa_iocfc_s *, enum iocfc_event);
+
struct bfa_iocfc_s {
- bfa_fsm_t fsm;
+ bfa_iocfs_fsm_t fsm;
struct bfa_s *bfa;
struct bfa_iocfc_cfg_s cfg;
u32 req_cq_pi[BFI_IOC_MAX_CQS];
diff --git a/drivers/scsi/bfa/bfa_cs.h b/drivers/scsi/bfa/bfa_cs.h
index 6b606bf589..6650b1dbb1 100644
--- a/drivers/scsi/bfa/bfa_cs.h
+++ b/drivers/scsi/bfa/bfa_cs.h
@@ -187,10 +187,10 @@ typedef void (*bfa_sm_t)(void *sm, int event);
#define bfa_sm_state_decl(oc, st, otype, etype) \
static void oc ## _sm_ ## st(otype * fsm, etype event)
-#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (bfa_sm_t)(_state))
+#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (_state))
#define bfa_sm_send_event(_sm, _event) ((_sm)->sm((_sm), (_event)))
#define bfa_sm_get_state(_sm) ((_sm)->sm)
-#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state))
+#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (_state))
/*
* For converting from state machine function to state encoding.
@@ -200,7 +200,7 @@ struct bfa_sm_table_s {
int state; /* state machine encoding */
char *name; /* state name for display */
};
-#define BFA_SM(_sm) ((bfa_sm_t)(_sm))
+#define BFA_SM(_sm) (_sm)
/*
* State machine with entry actions.
@@ -218,24 +218,13 @@ typedef void (*bfa_fsm_t)(void *fsm, int event);
static void oc ## _sm_ ## st ## _entry(otype * fsm)
#define bfa_fsm_set_state(_fsm, _state) do { \
- (_fsm)->fsm = (bfa_fsm_t)(_state); \
+ (_fsm)->fsm = (_state); \
_state ## _entry(_fsm); \
} while (0)
#define bfa_fsm_send_event(_fsm, _event) ((_fsm)->fsm((_fsm), (_event)))
#define bfa_fsm_get_state(_fsm) ((_fsm)->fsm)
-#define bfa_fsm_cmp_state(_fsm, _state) \
- ((_fsm)->fsm == (bfa_fsm_t)(_state))
-
-static inline int
-bfa_sm_to_state(struct bfa_sm_table_s *smt, bfa_sm_t sm)
-{
- int i = 0;
-
- while (smt[i].sm && smt[i].sm != sm)
- i++;
- return smt[i].state;
-}
+#define bfa_fsm_cmp_state(_fsm, _state) ((_fsm)->fsm == (_state))
/*
* @ Generic wait counter.
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index 7ad2228807..28ae4dc14d 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -65,21 +65,6 @@ enum bfa_ioim_lm_ua_status {
};
/*
- * itnim state machine event
- */
-enum bfa_itnim_event {
- BFA_ITNIM_SM_CREATE = 1, /* itnim is created */
- BFA_ITNIM_SM_ONLINE = 2, /* itnim is online */
- BFA_ITNIM_SM_OFFLINE = 3, /* itnim is offline */
- BFA_ITNIM_SM_FWRSP = 4, /* firmware response */
- BFA_ITNIM_SM_DELETE = 5, /* deleting an existing itnim */
- BFA_ITNIM_SM_CLEANUP = 6, /* IO cleanup completion */
- BFA_ITNIM_SM_SLER = 7, /* second level error recovery */
- BFA_ITNIM_SM_HWFAIL = 8, /* IOC h/w failure event */
- BFA_ITNIM_SM_QRESUME = 9, /* queue space available */
-};
-
-/*
* BFA IOIM related definitions
*/
#define bfa_ioim_move_to_comp_q(__ioim) do { \
@@ -98,30 +83,6 @@ enum bfa_itnim_event {
(__fcpim)->profile_start(__ioim); \
} while (0)
-/*
- * IO state machine events
- */
-enum bfa_ioim_event {
- BFA_IOIM_SM_START = 1, /* io start request from host */
- BFA_IOIM_SM_COMP_GOOD = 2, /* io good comp, resource free */
- BFA_IOIM_SM_COMP = 3, /* io comp, resource is free */
- BFA_IOIM_SM_COMP_UTAG = 4, /* io comp, resource is free */
- BFA_IOIM_SM_DONE = 5, /* io comp, resource not free */
- BFA_IOIM_SM_FREE = 6, /* io resource is freed */
- BFA_IOIM_SM_ABORT = 7, /* abort request from scsi stack */
- BFA_IOIM_SM_ABORT_COMP = 8, /* abort from f/w */
- BFA_IOIM_SM_ABORT_DONE = 9, /* abort completion from f/w */
- BFA_IOIM_SM_QRESUME = 10, /* CQ space available to queue IO */
- BFA_IOIM_SM_SGALLOCED = 11, /* SG page allocation successful */
- BFA_IOIM_SM_SQRETRY = 12, /* sequence recovery retry */
- BFA_IOIM_SM_HCB = 13, /* bfa callback complete */
- BFA_IOIM_SM_CLEANUP = 14, /* IO cleanup from itnim */
- BFA_IOIM_SM_TMSTART = 15, /* IO cleanup from tskim */
- BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
- BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
- BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
-};
-
/*
* BFA TSKIM related definitions
@@ -141,18 +102,6 @@ enum bfa_ioim_event {
} while (0)
-enum bfa_tskim_event {
- BFA_TSKIM_SM_START = 1, /* TM command start */
- BFA_TSKIM_SM_DONE = 2, /* TM completion */
- BFA_TSKIM_SM_QRESUME = 3, /* resume after qfull */
- BFA_TSKIM_SM_HWFAIL = 5, /* IOC h/w failure event */
- BFA_TSKIM_SM_HCB = 6, /* BFA callback completion */
- BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */
- BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */
- BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */
- BFA_TSKIM_SM_UTAG = 10, /* TM completion unknown tag */
-};
-
/*
* forward declaration for BFA ITNIM functions
*/
diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
index 8bf0943354..4499f84c2d 100644
--- a/drivers/scsi/bfa/bfa_fcpim.h
+++ b/drivers/scsi/bfa/bfa_fcpim.h
@@ -155,11 +155,38 @@ struct bfa_fcp_mod_s {
};
/*
+ * IO state machine events
+ */
+enum bfa_ioim_event {
+ BFA_IOIM_SM_START = 1, /* io start request from host */
+ BFA_IOIM_SM_COMP_GOOD = 2, /* io good comp, resource free */
+ BFA_IOIM_SM_COMP = 3, /* io comp, resource is free */
+ BFA_IOIM_SM_COMP_UTAG = 4, /* io comp, resource is free */
+ BFA_IOIM_SM_DONE = 5, /* io comp, resource not free */
+ BFA_IOIM_SM_FREE = 6, /* io resource is freed */
+ BFA_IOIM_SM_ABORT = 7, /* abort request from scsi stack */
+ BFA_IOIM_SM_ABORT_COMP = 8, /* abort from f/w */
+ BFA_IOIM_SM_ABORT_DONE = 9, /* abort completion from f/w */
+ BFA_IOIM_SM_QRESUME = 10, /* CQ space available to queue IO */
+ BFA_IOIM_SM_SGALLOCED = 11, /* SG page allocation successful */
+ BFA_IOIM_SM_SQRETRY = 12, /* sequence recovery retry */
+ BFA_IOIM_SM_HCB = 13, /* bfa callback complete */
+ BFA_IOIM_SM_CLEANUP = 14, /* IO cleanup from itnim */
+ BFA_IOIM_SM_TMSTART = 15, /* IO cleanup from tskim */
+ BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
+ BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
+ BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
+};
+
+struct bfa_ioim_s;
+typedef void (*bfa_ioim_sm_t)(struct bfa_ioim_s *, enum bfa_ioim_event);
+
+/*
* BFA IO (initiator mode)
*/
struct bfa_ioim_s {
struct list_head qe; /* queue elememt */
- bfa_sm_t sm; /* BFA ioim state machine */
+ bfa_ioim_sm_t sm; /* BFA ioim state machine */
struct bfa_s *bfa; /* BFA module */
struct bfa_fcpim_s *fcpim; /* parent fcpim module */
struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */
@@ -186,12 +213,27 @@ struct bfa_ioim_sp_s {
struct bfa_tskim_s *tskim; /* Relevant TM cmd */
};
+enum bfa_tskim_event {
+ BFA_TSKIM_SM_START = 1, /* TM command start */
+ BFA_TSKIM_SM_DONE = 2, /* TM completion */
+ BFA_TSKIM_SM_QRESUME = 3, /* resume after qfull */
+ BFA_TSKIM_SM_HWFAIL = 5, /* IOC h/w failure event */
+ BFA_TSKIM_SM_HCB = 6, /* BFA callback completion */
+ BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */
+ BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */
+ BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */
+ BFA_TSKIM_SM_UTAG = 10, /* TM completion unknown tag */
+};
+
+struct bfa_tskim_s;
+typedef void (*bfa_tskim_sm_t)(struct bfa_tskim_s *, enum bfa_tskim_event);
+
/*
* BFA Task management command (initiator mode)
*/
struct bfa_tskim_s {
struct list_head qe;
- bfa_sm_t sm;
+ bfa_tskim_sm_t sm;
struct bfa_s *bfa; /* BFA module */
struct bfa_fcpim_s *fcpim; /* parent fcpim module */
struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */
@@ -209,11 +251,29 @@ struct bfa_tskim_s {
};
/*
+ * itnim state machine event
+ */
+enum bfa_itnim_event {
+ BFA_ITNIM_SM_CREATE = 1, /* itnim is created */
+ BFA_ITNIM_SM_ONLINE = 2, /* itnim is online */
+ BFA_ITNIM_SM_OFFLINE = 3, /* itnim is offline */
+ BFA_ITNIM_SM_FWRSP = 4, /* firmware response */
+ BFA_ITNIM_SM_DELETE = 5, /* deleting an existing itnim */
+ BFA_ITNIM_SM_CLEANUP = 6, /* IO cleanup completion */
+ BFA_ITNIM_SM_SLER = 7, /* second level error recovery */
+ BFA_ITNIM_SM_HWFAIL = 8, /* IOC h/w failure event */
+ BFA_ITNIM_SM_QRESUME = 9, /* queue space available */
+};
+
+struct bfa_itnim_s;
+typedef void (*bfa_itnim_sm_t)(struct bfa_itnim_s *, enum bfa_itnim_event);
+
+/*
* BFA i-t-n (initiator mode)
*/
struct bfa_itnim_s {
struct list_head qe; /* queue element */
- bfa_sm_t sm; /* i-t-n im BFA state machine */
+ bfa_itnim_sm_t sm; /* i-t-n im BFA state machine */
struct bfa_s *bfa; /* bfa instance */
struct bfa_rport_s *rport; /* bfa rport */
void *ditn; /* driver i-t-n structure */
diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h
index c1baf5cd0d..9788354b90 100644
--- a/drivers/scsi/bfa/bfa_fcs.h
+++ b/drivers/scsi/bfa/bfa_fcs.h
@@ -20,22 +20,6 @@
#define BFA_FCS_OS_STR_LEN 64
/*
- * lps_pvt BFA LPS private functions
- */
-
-enum bfa_lps_event {
- BFA_LPS_SM_LOGIN = 1, /* login request from user */
- BFA_LPS_SM_LOGOUT = 2, /* logout request from user */
- BFA_LPS_SM_FWRSP = 3, /* f/w response to login/logout */
- BFA_LPS_SM_RESUME = 4, /* space present in reqq queue */
- BFA_LPS_SM_DELETE = 5, /* lps delete from user */
- BFA_LPS_SM_OFFLINE = 6, /* Link is offline */
- BFA_LPS_SM_RX_CVL = 7, /* Rx clear virtual link */
- BFA_LPS_SM_SET_N2N_PID = 8, /* Set assigned PID for n2n */
-};
-
-
-/*
* !!! Only append to the enums defined here to avoid any versioning
* !!! needed between trace utility and driver version
*/
@@ -59,8 +43,30 @@ struct bfa_fcs_s;
#define BFA_FCS_PID_IS_WKA(pid) ((bfa_ntoh3b(pid) > 0xFFF000) ? 1 : 0)
#define BFA_FCS_MAX_RPORT_LOGINS 1024
+/*
+ * VPort NS State Machine events
+ */
+enum vport_ns_event {
+ NSSM_EVENT_PORT_ONLINE = 1,
+ NSSM_EVENT_PORT_OFFLINE = 2,
+ NSSM_EVENT_PLOGI_SENT = 3,
+ NSSM_EVENT_RSP_OK = 4,
+ NSSM_EVENT_RSP_ERROR = 5,
+ NSSM_EVENT_TIMEOUT = 6,
+ NSSM_EVENT_NS_QUERY = 7,
+ NSSM_EVENT_RSPNID_SENT = 8,
+ NSSM_EVENT_RFTID_SENT = 9,
+ NSSM_EVENT_RFFID_SENT = 10,
+ NSSM_EVENT_GIDFT_SENT = 11,
+ NSSM_EVENT_RNNID_SENT = 12,
+ NSSM_EVENT_RSNN_NN_SENT = 13,
+};
+
+struct bfa_fcs_lport_ns_s;
+typedef void (*bfa_fcs_lport_ns_sm_t)(struct bfa_fcs_lport_ns_s *fsm, enum vport_ns_event);
+
struct bfa_fcs_lport_ns_s {
- bfa_sm_t sm; /* state machine */
+ bfa_fcs_lport_ns_sm_t sm; /* state machine */
struct bfa_timer_s timer;
struct bfa_fcs_lport_s *port; /* parent port */
struct bfa_fcxp_s *fcxp;
@@ -69,9 +75,23 @@ struct bfa_fcs_lport_ns_s {
u8 num_rsnn_nn_retries;
};
+/*
+ * VPort SCN State Machine events
+ */
+enum port_scn_event {
+ SCNSM_EVENT_PORT_ONLINE = 1,
+ SCNSM_EVENT_PORT_OFFLINE = 2,
+ SCNSM_EVENT_RSP_OK = 3,
+ SCNSM_EVENT_RSP_ERROR = 4,
+ SCNSM_EVENT_TIMEOUT = 5,
+ SCNSM_EVENT_SCR_SENT = 6,
+};
+
+struct bfa_fcs_lport_scn_s;
+typedef void (*bfa_fcs_lport_scn_sm_t)(struct bfa_fcs_lport_scn_s *fsm, enum port_scn_event);
struct bfa_fcs_lport_scn_s {
- bfa_sm_t sm; /* state machine */
+ bfa_fcs_lport_scn_sm_t sm; /* state machine */
struct bfa_timer_s timer;
struct bfa_fcs_lport_s *port; /* parent port */
struct bfa_fcxp_s *fcxp;
@@ -79,8 +99,25 @@ struct bfa_fcs_lport_scn_s {
};
+/*
+ * FDMI State Machine events
+ */
+enum port_fdmi_event {
+ FDMISM_EVENT_PORT_ONLINE = 1,
+ FDMISM_EVENT_PORT_OFFLINE = 2,
+ FDMISM_EVENT_RSP_OK = 4,
+ FDMISM_EVENT_RSP_ERROR = 5,
+ FDMISM_EVENT_TIMEOUT = 6,
+ FDMISM_EVENT_RHBA_SENT = 7,
+ FDMISM_EVENT_RPRT_SENT = 8,
+ FDMISM_EVENT_RPA_SENT = 9,
+};
+
+struct bfa_fcs_lport_fdmi_s;
+typedef void (*bfa_fcs_lport_fdmi_sm_t)(struct bfa_fcs_lport_fdmi_s *fsm, enum port_fdmi_event);
+
struct bfa_fcs_lport_fdmi_s {
- bfa_sm_t sm; /* state machine */
+ bfa_fcs_lport_fdmi_sm_t sm; /* state machine */
struct bfa_timer_s timer;
struct bfa_fcs_lport_ms_s *ms; /* parent ms */
struct bfa_fcxp_s *fcxp;
@@ -88,10 +125,24 @@ struct bfa_fcs_lport_fdmi_s {
u8 retry_cnt; /* retry count */
u8 rsvd[3];
};
+/*
+ * MS State Machine events
+ */
+enum port_ms_event {
+ MSSM_EVENT_PORT_ONLINE = 1,
+ MSSM_EVENT_PORT_OFFLINE = 2,
+ MSSM_EVENT_RSP_OK = 3,
+ MSSM_EVENT_RSP_ERROR = 4,
+ MSSM_EVENT_TIMEOUT = 5,
+ MSSM_EVENT_FCXP_SENT = 6,
+ MSSM_EVENT_PORT_FABRIC_RSCN = 7
+};
+struct bfa_fcs_lport_ms_s;
+typedef void (*bfa_fcs_lport_ms_sm_t)(struct bfa_fcs_lport_ms_s *fsm, enum port_ms_event);
struct bfa_fcs_lport_ms_s {
- bfa_sm_t sm; /* state machine */
+ bfa_fcs_lport_ms_sm_t sm; /* state machine */
struct bfa_timer_s timer;
struct bfa_fcs_lport_s *port; /* parent port */
struct bfa_fcxp_s *fcxp;
@@ -131,10 +182,25 @@ union bfa_fcs_lport_topo_u {
struct bfa_fcs_lport_n2n_s pn2n;
};
+/*
+ * fcs_port_sm FCS logical port state machine
+ */
+
+enum bfa_fcs_lport_event {
+ BFA_FCS_PORT_SM_CREATE = 1,
+ BFA_FCS_PORT_SM_ONLINE = 2,
+ BFA_FCS_PORT_SM_OFFLINE = 3,
+ BFA_FCS_PORT_SM_DELETE = 4,
+ BFA_FCS_PORT_SM_DELRPORT = 5,
+ BFA_FCS_PORT_SM_STOP = 6,
+};
+
+struct bfa_fcs_lport_s;
+typedef void (*bfa_fcs_lport_sm_t)(struct bfa_fcs_lport_s *fsm, enum bfa_fcs_lport_event);
struct bfa_fcs_lport_s {
struct list_head qe; /* used by port/vport */
- bfa_sm_t sm; /* state machine */
+ bfa_fcs_lport_sm_t sm; /* state machine */
struct bfa_fcs_fabric_s *fabric; /* parent fabric */
struct bfa_lport_cfg_s port_cfg; /* port configuration */
struct bfa_timer_s link_timer; /* timer for link offline */
@@ -171,10 +237,37 @@ enum bfa_fcs_fabric_type {
BFA_FCS_FABRIC_LOOP = 3,
};
+/*
+ * Fabric state machine events
+ */
+enum bfa_fcs_fabric_event {
+ BFA_FCS_FABRIC_SM_CREATE = 1, /* create from driver */
+ BFA_FCS_FABRIC_SM_DELETE = 2, /* delete from driver */
+ BFA_FCS_FABRIC_SM_LINK_DOWN = 3, /* link down from port */
+ BFA_FCS_FABRIC_SM_LINK_UP = 4, /* link up from port */
+ BFA_FCS_FABRIC_SM_CONT_OP = 5, /* flogi/auth continue op */
+ BFA_FCS_FABRIC_SM_RETRY_OP = 6, /* flogi/auth retry op */
+ BFA_FCS_FABRIC_SM_NO_FABRIC = 7, /* from flogi/auth */
+ BFA_FCS_FABRIC_SM_PERF_EVFP = 8, /* from flogi/auth */
+ BFA_FCS_FABRIC_SM_ISOLATE = 9, /* from EVFP processing */
+ BFA_FCS_FABRIC_SM_NO_TAGGING = 10, /* no VFT tagging from EVFP */
+ BFA_FCS_FABRIC_SM_DELAYED = 11, /* timeout delay event */
+ BFA_FCS_FABRIC_SM_AUTH_FAILED = 12, /* auth failed */
+ BFA_FCS_FABRIC_SM_AUTH_SUCCESS = 13, /* auth successful */
+ BFA_FCS_FABRIC_SM_DELCOMP = 14, /* all vports deleted event */
+ BFA_FCS_FABRIC_SM_LOOPBACK = 15, /* Received our own FLOGI */
+ BFA_FCS_FABRIC_SM_START = 16, /* from driver */
+ BFA_FCS_FABRIC_SM_STOP = 17, /* Stop from driver */
+ BFA_FCS_FABRIC_SM_STOPCOMP = 18, /* Stop completion */
+ BFA_FCS_FABRIC_SM_LOGOCOMP = 19, /* FLOGO completion */
+};
+
+struct bfa_fcs_fabric_s;
+typedef void (*bfa_fcs_fabric_sm_t)(struct bfa_fcs_fabric_s *fsm, enum bfa_fcs_fabric_event);
struct bfa_fcs_fabric_s {
struct list_head qe; /* queue element */
- bfa_sm_t sm; /* state machine */
+ bfa_fcs_fabric_sm_t sm; /* state machine */
struct bfa_fcs_s *fcs; /* FCS instance */
struct bfa_fcs_lport_s bport; /* base logical port */
enum bfa_fcs_fabric_type fab_type; /* fabric type */
@@ -344,9 +437,33 @@ void bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port,
struct fchs_s *rx_frame, u32 len);
void bfa_fcs_lport_lip_scn_online(bfa_fcs_lport_t *port);
+/*
+ * VPort State Machine events
+ */
+enum bfa_fcs_vport_event {
+ BFA_FCS_VPORT_SM_CREATE = 1, /* vport create event */
+ BFA_FCS_VPORT_SM_DELETE = 2, /* vport delete event */
+ BFA_FCS_VPORT_SM_START = 3, /* vport start request */
+ BFA_FCS_VPORT_SM_STOP = 4, /* stop: unsupported */
+ BFA_FCS_VPORT_SM_ONLINE = 5, /* fabric online */
+ BFA_FCS_VPORT_SM_OFFLINE = 6, /* fabric offline event */
+ BFA_FCS_VPORT_SM_FRMSENT = 7, /* fdisc/logo sent events */
+ BFA_FCS_VPORT_SM_RSP_OK = 8, /* good response */
+ BFA_FCS_VPORT_SM_RSP_ERROR = 9, /* error/bad response */
+ BFA_FCS_VPORT_SM_TIMEOUT = 10, /* delay timer event */
+ BFA_FCS_VPORT_SM_DELCOMP = 11, /* lport delete completion */
+ BFA_FCS_VPORT_SM_RSP_DUP_WWN = 12, /* Dup wnn error*/
+ BFA_FCS_VPORT_SM_RSP_FAILED = 13, /* non-retryable failure */
+ BFA_FCS_VPORT_SM_STOPCOMP = 14, /* vport delete completion */
+ BFA_FCS_VPORT_SM_FABRIC_MAX = 15, /* max vports on fabric */
+};
+
+struct bfa_fcs_vport_s;
+typedef void (*bfa_fcs_vport_sm_t)(struct bfa_fcs_vport_s *fsm, enum bfa_fcs_vport_event);
+
struct bfa_fcs_vport_s {
struct list_head qe; /* queue elem */
- bfa_sm_t sm; /* state machine */
+ bfa_fcs_vport_sm_t sm; /* state machine */
bfa_fcs_lport_t lport; /* logical port */
struct bfa_timer_s timer;
struct bfad_vport_s *vport_drv; /* Driver private */
@@ -397,9 +514,26 @@ struct bfa_fcs_itnim_s;
struct bfa_fcs_tin_s;
struct bfa_fcs_iprp_s;
+/*
+ * fcs_rport_ftrs_sm FCS rport state machine events
+ */
+
+enum rpf_event {
+ RPFSM_EVENT_RPORT_OFFLINE = 1, /* Rport offline */
+ RPFSM_EVENT_RPORT_ONLINE = 2, /* Rport online */
+ RPFSM_EVENT_FCXP_SENT = 3, /* Frame from has been sent */
+ RPFSM_EVENT_TIMEOUT = 4, /* Rport SM timeout event */
+ RPFSM_EVENT_RPSC_COMP = 5,
+ RPFSM_EVENT_RPSC_FAIL = 6,
+ RPFSM_EVENT_RPSC_ERROR = 7,
+};
+
+struct bfa_fcs_rpf_s;
+typedef void (*bfa_fcs_rpf_sm_t)(struct bfa_fcs_rpf_s *, enum rpf_event);
+
/* Rport Features (RPF) */
struct bfa_fcs_rpf_s {
- bfa_sm_t sm; /* state machine */
+ bfa_fcs_rpf_sm_t sm; /* state machine */
struct bfa_fcs_rport_s *rport; /* parent rport */
struct bfa_timer_s timer; /* general purpose timer */
struct bfa_fcxp_s *fcxp; /* FCXP needed for discarding */
@@ -414,6 +548,36 @@ struct bfa_fcs_rpf_s {
*/
};
+/*
+ * fcs_rport_sm FCS rport state machine events
+ */
+enum rport_event {
+ RPSM_EVENT_PLOGI_SEND = 1, /* new rport; start with PLOGI */
+ RPSM_EVENT_PLOGI_RCVD = 2, /* Inbound PLOGI from remote port */
+ RPSM_EVENT_PLOGI_COMP = 3, /* PLOGI completed to rport */
+ RPSM_EVENT_LOGO_RCVD = 4, /* LOGO from remote device */
+ RPSM_EVENT_LOGO_IMP = 5, /* implicit logo for SLER */
+ RPSM_EVENT_FCXP_SENT = 6, /* Frame from has been sent */
+ RPSM_EVENT_DELETE = 7, /* RPORT delete request */
+ RPSM_EVENT_FAB_SCN = 8, /* state change notification */
+ RPSM_EVENT_ACCEPTED = 9, /* Good response from remote device */
+ RPSM_EVENT_FAILED = 10, /* Request to rport failed. */
+ RPSM_EVENT_TIMEOUT = 11, /* Rport SM timeout event */
+ RPSM_EVENT_HCB_ONLINE = 12, /* BFA rport online callback */
+ RPSM_EVENT_HCB_OFFLINE = 13, /* BFA rport offline callback */
+ RPSM_EVENT_FC4_OFFLINE = 14, /* FC-4 offline complete */
+ RPSM_EVENT_ADDRESS_CHANGE = 15, /* Rport's PID has changed */
+ RPSM_EVENT_ADDRESS_DISC = 16, /* Need to Discover rport's PID */
+ RPSM_EVENT_PRLO_RCVD = 17, /* PRLO from remote device */
+ RPSM_EVENT_PLOGI_RETRY = 18, /* Retry PLOGI continuously */
+ RPSM_EVENT_SCN_OFFLINE = 19, /* loop scn offline */
+ RPSM_EVENT_SCN_ONLINE = 20, /* loop scn online */
+ RPSM_EVENT_FC4_FCS_ONLINE = 21, /* FC-4 FCS online complete */
+};
+
+struct bfa_fcs_rport_s;
+typedef void (*bfa_fcs_rport_sm_t)(struct bfa_fcs_rport_s *, enum rport_event);
+
struct bfa_fcs_rport_s {
struct list_head qe; /* used by port/vport */
struct bfa_fcs_lport_s *port; /* parent FCS port */
@@ -430,7 +594,7 @@ struct bfa_fcs_rport_s {
wwn_t pwwn; /* port wwn of rport */
wwn_t nwwn; /* node wwn of rport */
struct bfa_rport_symname_s psym_name; /* port symbolic name */
- bfa_sm_t sm; /* state machine */
+ bfa_fcs_rport_sm_t sm; /* state machine */
struct bfa_timer_s timer; /* general purpose timer */
struct bfa_fcs_itnim_s *itnim; /* ITN initiator mode role */
struct bfa_fcs_tin_s *tin; /* ITN initiator mode role */
@@ -488,12 +652,34 @@ void bfa_fcs_rpf_rport_online(struct bfa_fcs_rport_s *rport);
void bfa_fcs_rpf_rport_offline(struct bfa_fcs_rport_s *rport);
/*
+ * fcs_itnim_sm FCS itnim state machine events
+ */
+enum bfa_fcs_itnim_event {
+ BFA_FCS_ITNIM_SM_FCS_ONLINE = 1, /* rport online event */
+ BFA_FCS_ITNIM_SM_OFFLINE = 2, /* rport offline */
+ BFA_FCS_ITNIM_SM_FRMSENT = 3, /* prli frame is sent */
+ BFA_FCS_ITNIM_SM_RSP_OK = 4, /* good response */
+ BFA_FCS_ITNIM_SM_RSP_ERROR = 5, /* error response */
+ BFA_FCS_ITNIM_SM_TIMEOUT = 6, /* delay timeout */
+ BFA_FCS_ITNIM_SM_HCB_OFFLINE = 7, /* BFA online callback */
+ BFA_FCS_ITNIM_SM_HCB_ONLINE = 8, /* BFA offline callback */
+ BFA_FCS_ITNIM_SM_INITIATOR = 9, /* rport is initiator */
+ BFA_FCS_ITNIM_SM_DELETE = 10, /* delete event from rport */
+ BFA_FCS_ITNIM_SM_PRLO = 11, /* delete event from rport */
+ BFA_FCS_ITNIM_SM_RSP_NOT_SUPP = 12, /* cmd not supported rsp */
+ BFA_FCS_ITNIM_SM_HAL_ONLINE = 13, /* bfa rport online event */
+};
+
+struct bfa_fcs_itnim_s;
+typedef void (*bfa_fcs_itnim_sm_t)(struct bfa_fcs_itnim_s *, enum bfa_fcs_itnim_event);
+
+/*
* forward declarations
*/
struct bfad_itnim_s;
struct bfa_fcs_itnim_s {
- bfa_sm_t sm; /* state machine */
+ bfa_fcs_itnim_sm_t sm; /* state machine */
struct bfa_fcs_rport_s *rport; /* parent remote rport */
struct bfad_itnim_s *itnim_drv; /* driver peer instance */
struct bfa_fcs_s *fcs; /* fcs instance */
@@ -703,78 +889,6 @@ struct bfa_fcs_s {
*/
/*
- * Fabric state machine events
- */
-enum bfa_fcs_fabric_event {
- BFA_FCS_FABRIC_SM_CREATE = 1, /* create from driver */
- BFA_FCS_FABRIC_SM_DELETE = 2, /* delete from driver */
- BFA_FCS_FABRIC_SM_LINK_DOWN = 3, /* link down from port */
- BFA_FCS_FABRIC_SM_LINK_UP = 4, /* link up from port */
- BFA_FCS_FABRIC_SM_CONT_OP = 5, /* flogi/auth continue op */
- BFA_FCS_FABRIC_SM_RETRY_OP = 6, /* flogi/auth retry op */
- BFA_FCS_FABRIC_SM_NO_FABRIC = 7, /* from flogi/auth */
- BFA_FCS_FABRIC_SM_PERF_EVFP = 8, /* from flogi/auth */
- BFA_FCS_FABRIC_SM_ISOLATE = 9, /* from EVFP processing */
- BFA_FCS_FABRIC_SM_NO_TAGGING = 10, /* no VFT tagging from EVFP */
- BFA_FCS_FABRIC_SM_DELAYED = 11, /* timeout delay event */
- BFA_FCS_FABRIC_SM_AUTH_FAILED = 12, /* auth failed */
- BFA_FCS_FABRIC_SM_AUTH_SUCCESS = 13, /* auth successful */
- BFA_FCS_FABRIC_SM_DELCOMP = 14, /* all vports deleted event */
- BFA_FCS_FABRIC_SM_LOOPBACK = 15, /* Received our own FLOGI */
- BFA_FCS_FABRIC_SM_START = 16, /* from driver */
- BFA_FCS_FABRIC_SM_STOP = 17, /* Stop from driver */
- BFA_FCS_FABRIC_SM_STOPCOMP = 18, /* Stop completion */
- BFA_FCS_FABRIC_SM_LOGOCOMP = 19, /* FLOGO completion */
-};
-
-/*
- * fcs_rport_sm FCS rport state machine events
- */
-
-enum rport_event {
- RPSM_EVENT_PLOGI_SEND = 1, /* new rport; start with PLOGI */
- RPSM_EVENT_PLOGI_RCVD = 2, /* Inbound PLOGI from remote port */
- RPSM_EVENT_PLOGI_COMP = 3, /* PLOGI completed to rport */
- RPSM_EVENT_LOGO_RCVD = 4, /* LOGO from remote device */
- RPSM_EVENT_LOGO_IMP = 5, /* implicit logo for SLER */
- RPSM_EVENT_FCXP_SENT = 6, /* Frame from has been sent */
- RPSM_EVENT_DELETE = 7, /* RPORT delete request */
- RPSM_EVENT_FAB_SCN = 8, /* state change notification */
- RPSM_EVENT_ACCEPTED = 9, /* Good response from remote device */
- RPSM_EVENT_FAILED = 10, /* Request to rport failed. */
- RPSM_EVENT_TIMEOUT = 11, /* Rport SM timeout event */
- RPSM_EVENT_HCB_ONLINE = 12, /* BFA rport online callback */
- RPSM_EVENT_HCB_OFFLINE = 13, /* BFA rport offline callback */
- RPSM_EVENT_FC4_OFFLINE = 14, /* FC-4 offline complete */
- RPSM_EVENT_ADDRESS_CHANGE = 15, /* Rport's PID has changed */
- RPSM_EVENT_ADDRESS_DISC = 16, /* Need to Discover rport's PID */
- RPSM_EVENT_PRLO_RCVD = 17, /* PRLO from remote device */
- RPSM_EVENT_PLOGI_RETRY = 18, /* Retry PLOGI continuously */
- RPSM_EVENT_SCN_OFFLINE = 19, /* loop scn offline */
- RPSM_EVENT_SCN_ONLINE = 20, /* loop scn online */
- RPSM_EVENT_FC4_FCS_ONLINE = 21, /* FC-4 FCS online complete */
-};
-
-/*
- * fcs_itnim_sm FCS itnim state machine events
- */
-enum bfa_fcs_itnim_event {
- BFA_FCS_ITNIM_SM_FCS_ONLINE = 1, /* rport online event */
- BFA_FCS_ITNIM_SM_OFFLINE = 2, /* rport offline */
- BFA_FCS_ITNIM_SM_FRMSENT = 3, /* prli frame is sent */
- BFA_FCS_ITNIM_SM_RSP_OK = 4, /* good response */
- BFA_FCS_ITNIM_SM_RSP_ERROR = 5, /* error response */
- BFA_FCS_ITNIM_SM_TIMEOUT = 6, /* delay timeout */
- BFA_FCS_ITNIM_SM_HCB_OFFLINE = 7, /* BFA online callback */
- BFA_FCS_ITNIM_SM_HCB_ONLINE = 8, /* BFA offline callback */
- BFA_FCS_ITNIM_SM_INITIATOR = 9, /* rport is initiator */
- BFA_FCS_ITNIM_SM_DELETE = 10, /* delete event from rport */
- BFA_FCS_ITNIM_SM_PRLO = 11, /* delete event from rport */
- BFA_FCS_ITNIM_SM_RSP_NOT_SUPP = 12, /* cmd not supported rsp */
- BFA_FCS_ITNIM_SM_HAL_ONLINE = 13, /* bfa rport online event */
-};
-
-/*
* bfa fcs API functions
*/
void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa,
@@ -831,9 +945,7 @@ void bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric,
*/
struct bfad_port_s;
-struct bfad_vf_s;
struct bfad_vport_s;
-struct bfad_rport_s;
/*
* lport callbacks
diff --git a/drivers/scsi/bfa/bfa_fcs_fcpim.c b/drivers/scsi/bfa/bfa_fcs_fcpim.c
index c7de62baee..40e65ab285 100644
--- a/drivers/scsi/bfa/bfa_fcs_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcs_fcpim.c
@@ -16,6 +16,7 @@
#include "bfa_fcs.h"
#include "bfa_fcbuild.h"
#include "bfad_im.h"
+#include "bfa_fcpim.h"
BFA_TRC_FILE(FCS, FCPIM);
@@ -52,7 +53,23 @@ static void bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim,
static void bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
enum bfa_fcs_itnim_event event);
-static struct bfa_sm_table_s itnim_sm_table[] = {
+struct bfa_fcs_itnim_sm_table_s {
+ bfa_fcs_itnim_sm_t sm; /* state machine function */
+ enum bfa_itnim_state state; /* state machine encoding */
+ char *name; /* state name for display */
+};
+
+static inline enum bfa_itnim_state
+bfa_fcs_itnim_sm_to_state(struct bfa_fcs_itnim_sm_table_s *smt, bfa_fcs_itnim_sm_t sm)
+{
+ int i = 0;
+
+ while (smt[i].sm && smt[i].sm != sm)
+ i++;
+ return smt[i].state;
+}
+
+static struct bfa_fcs_itnim_sm_table_s itnim_sm_table[] = {
{BFA_SM(bfa_fcs_itnim_sm_offline), BFA_ITNIM_OFFLINE},
{BFA_SM(bfa_fcs_itnim_sm_prli_send), BFA_ITNIM_PRLI_SEND},
{BFA_SM(bfa_fcs_itnim_sm_prli), BFA_ITNIM_PRLI_SENT},
@@ -665,7 +682,7 @@ bfa_status_t
bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim)
{
bfa_trc(itnim->fcs, itnim->rport->pid);
- switch (bfa_sm_to_state(itnim_sm_table, itnim->sm)) {
+ switch (bfa_fcs_itnim_sm_to_state(itnim_sm_table, itnim->sm)) {
case BFA_ITNIM_ONLINE:
case BFA_ITNIM_INITIATIOR:
return BFA_STATUS_OK;
@@ -765,7 +782,7 @@ bfa_fcs_itnim_attr_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn,
if (itnim == NULL)
return BFA_STATUS_NO_FCPIM_NEXUS;
- attr->state = bfa_sm_to_state(itnim_sm_table, itnim->sm);
+ attr->state = bfa_fcs_itnim_sm_to_state(itnim_sm_table, itnim->sm);
attr->retry = itnim->seq_rec;
attr->rec_support = itnim->rec_support;
attr->conf_comp = itnim->conf_comp;
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 008afd8170..966bf6cc6d 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -103,19 +103,6 @@ static struct {
},
};
-/*
- * fcs_port_sm FCS logical port state machine
- */
-
-enum bfa_fcs_lport_event {
- BFA_FCS_PORT_SM_CREATE = 1,
- BFA_FCS_PORT_SM_ONLINE = 2,
- BFA_FCS_PORT_SM_OFFLINE = 3,
- BFA_FCS_PORT_SM_DELETE = 4,
- BFA_FCS_PORT_SM_DELRPORT = 5,
- BFA_FCS_PORT_SM_STOP = 6,
-};
-
static void bfa_fcs_lport_sm_uninit(struct bfa_fcs_lport_s *port,
enum bfa_fcs_lport_event event);
static void bfa_fcs_lport_sm_init(struct bfa_fcs_lport_s *port,
@@ -1426,20 +1413,6 @@ u32 bfa_fcs_fdmi_convert_speed(enum bfa_port_speed pport_speed);
* fcs_fdmi_sm FCS FDMI state machine
*/
-/*
- * FDMI State Machine events
- */
-enum port_fdmi_event {
- FDMISM_EVENT_PORT_ONLINE = 1,
- FDMISM_EVENT_PORT_OFFLINE = 2,
- FDMISM_EVENT_RSP_OK = 4,
- FDMISM_EVENT_RSP_ERROR = 5,
- FDMISM_EVENT_TIMEOUT = 6,
- FDMISM_EVENT_RHBA_SENT = 7,
- FDMISM_EVENT_RPRT_SENT = 8,
- FDMISM_EVENT_RPA_SENT = 9,
-};
-
static void bfa_fcs_lport_fdmi_sm_offline(struct bfa_fcs_lport_fdmi_s *fdmi,
enum port_fdmi_event event);
static void bfa_fcs_lport_fdmi_sm_sending_rhba(
@@ -2863,19 +2836,6 @@ static void bfa_fcs_lport_ms_gfn_response(void *fcsarg,
* fcs_ms_sm FCS MS state machine
*/
-/*
- * MS State Machine events
- */
-enum port_ms_event {
- MSSM_EVENT_PORT_ONLINE = 1,
- MSSM_EVENT_PORT_OFFLINE = 2,
- MSSM_EVENT_RSP_OK = 3,
- MSSM_EVENT_RSP_ERROR = 4,
- MSSM_EVENT_TIMEOUT = 5,
- MSSM_EVENT_FCXP_SENT = 6,
- MSSM_EVENT_PORT_FABRIC_RSCN = 7
-};
-
static void bfa_fcs_lport_ms_sm_offline(struct bfa_fcs_lport_ms_s *ms,
enum port_ms_event event);
static void bfa_fcs_lport_ms_sm_plogi_sending(struct bfa_fcs_lport_ms_s *ms,
@@ -3644,25 +3604,6 @@ static void bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port);
* fcs_ns_sm FCS nameserver interface state machine
*/
-/*
- * VPort NS State Machine events
- */
-enum vport_ns_event {
- NSSM_EVENT_PORT_ONLINE = 1,
- NSSM_EVENT_PORT_OFFLINE = 2,
- NSSM_EVENT_PLOGI_SENT = 3,
- NSSM_EVENT_RSP_OK = 4,
- NSSM_EVENT_RSP_ERROR = 5,
- NSSM_EVENT_TIMEOUT = 6,
- NSSM_EVENT_NS_QUERY = 7,
- NSSM_EVENT_RSPNID_SENT = 8,
- NSSM_EVENT_RFTID_SENT = 9,
- NSSM_EVENT_RFFID_SENT = 10,
- NSSM_EVENT_GIDFT_SENT = 11,
- NSSM_EVENT_RNNID_SENT = 12,
- NSSM_EVENT_RSNN_NN_SENT = 13,
-};
-
static void bfa_fcs_lport_ns_sm_offline(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event);
static void bfa_fcs_lport_ns_sm_plogi_sending(struct bfa_fcs_lport_ns_s *ns,
@@ -5239,18 +5180,6 @@ static void bfa_fcs_lport_scn_timeout(void *arg);
* fcs_scm_sm FCS SCN state machine
*/
-/*
- * VPort SCN State Machine events
- */
-enum port_scn_event {
- SCNSM_EVENT_PORT_ONLINE = 1,
- SCNSM_EVENT_PORT_OFFLINE = 2,
- SCNSM_EVENT_RSP_OK = 3,
- SCNSM_EVENT_RSP_ERROR = 4,
- SCNSM_EVENT_TIMEOUT = 5,
- SCNSM_EVENT_SCR_SENT = 6,
-};
-
static void bfa_fcs_lport_scn_sm_offline(struct bfa_fcs_lport_scn_s *scn,
enum port_scn_event event);
static void bfa_fcs_lport_scn_sm_sending_scr(
@@ -5989,27 +5918,6 @@ static void bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport);
* fcs_vport_sm FCS virtual port state machine
*/
-/*
- * VPort State Machine events
- */
-enum bfa_fcs_vport_event {
- BFA_FCS_VPORT_SM_CREATE = 1, /* vport create event */
- BFA_FCS_VPORT_SM_DELETE = 2, /* vport delete event */
- BFA_FCS_VPORT_SM_START = 3, /* vport start request */
- BFA_FCS_VPORT_SM_STOP = 4, /* stop: unsupported */
- BFA_FCS_VPORT_SM_ONLINE = 5, /* fabric online */
- BFA_FCS_VPORT_SM_OFFLINE = 6, /* fabric offline event */
- BFA_FCS_VPORT_SM_FRMSENT = 7, /* fdisc/logo sent events */
- BFA_FCS_VPORT_SM_RSP_OK = 8, /* good response */
- BFA_FCS_VPORT_SM_RSP_ERROR = 9, /* error/bad response */
- BFA_FCS_VPORT_SM_TIMEOUT = 10, /* delay timer event */
- BFA_FCS_VPORT_SM_DELCOMP = 11, /* lport delete completion */
- BFA_FCS_VPORT_SM_RSP_DUP_WWN = 12, /* Dup wnn error*/
- BFA_FCS_VPORT_SM_RSP_FAILED = 13, /* non-retryable failure */
- BFA_FCS_VPORT_SM_STOPCOMP = 14, /* vport delete completion */
- BFA_FCS_VPORT_SM_FABRIC_MAX = 15, /* max vports on fabric */
-};
-
static void bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport,
enum bfa_fcs_vport_event event);
static void bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport,
@@ -6037,7 +5945,23 @@ static void bfa_fcs_vport_sm_stopping(struct bfa_fcs_vport_s *vport,
static void bfa_fcs_vport_sm_logo_for_stop(struct bfa_fcs_vport_s *vport,
enum bfa_fcs_vport_event event);
-static struct bfa_sm_table_s vport_sm_table[] = {
+struct bfa_fcs_vport_sm_table_s {
+ bfa_fcs_vport_sm_t sm; /* state machine function */
+ enum bfa_vport_state state; /* state machine encoding */
+ char *name; /* state name for display */
+};
+
+static inline enum bfa_vport_state
+bfa_vport_sm_to_state(struct bfa_fcs_vport_sm_table_s *smt, bfa_fcs_vport_sm_t sm)
+{
+ int i = 0;
+
+ while (smt[i].sm && smt[i].sm != sm)
+ i++;
+ return smt[i].state;
+}
+
+static struct bfa_fcs_vport_sm_table_s vport_sm_table[] = {
{BFA_SM(bfa_fcs_vport_sm_uninit), BFA_FCS_VPORT_UNINIT},
{BFA_SM(bfa_fcs_vport_sm_created), BFA_FCS_VPORT_CREATED},
{BFA_SM(bfa_fcs_vport_sm_offline), BFA_FCS_VPORT_OFFLINE},
@@ -6864,7 +6788,7 @@ bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport,
memset(attr, 0, sizeof(struct bfa_vport_attr_s));
bfa_fcs_lport_get_attr(&vport->lport, &attr->port_attr);
- attr->vport_state = bfa_sm_to_state(vport_sm_table, vport->sm);
+ attr->vport_state = bfa_vport_sm_to_state(vport_sm_table, vport->sm);
}
diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
index c21aa37b8a..ce52a9c88a 100644
--- a/drivers/scsi/bfa/bfa_fcs_rport.c
+++ b/drivers/scsi/bfa/bfa_fcs_rport.c
@@ -136,7 +136,23 @@ static void bfa_fcs_rport_sm_fc4_off_delete(struct bfa_fcs_rport_s *rport,
static void bfa_fcs_rport_sm_delete_pending(struct bfa_fcs_rport_s *rport,
enum rport_event event);
-static struct bfa_sm_table_s rport_sm_table[] = {
+struct bfa_fcs_rport_sm_table_s {
+ bfa_fcs_rport_sm_t sm; /* state machine function */
+ enum bfa_rport_state state; /* state machine encoding */
+ char *name; /* state name for display */
+};
+
+static inline enum bfa_rport_state
+bfa_rport_sm_to_state(struct bfa_fcs_rport_sm_table_s *smt, bfa_fcs_rport_sm_t sm)
+{
+ int i = 0;
+
+ while (smt[i].sm && smt[i].sm != sm)
+ i++;
+ return smt[i].state;
+}
+
+static struct bfa_fcs_rport_sm_table_s rport_sm_table[] = {
{BFA_SM(bfa_fcs_rport_sm_uninit), BFA_RPORT_UNINIT},
{BFA_SM(bfa_fcs_rport_sm_plogi_sending), BFA_RPORT_PLOGI},
{BFA_SM(bfa_fcs_rport_sm_plogiacc_sending), BFA_RPORT_ONLINE},
@@ -2964,7 +2980,7 @@ bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
int
bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport)
{
- return bfa_sm_to_state(rport_sm_table, rport->sm);
+ return bfa_rport_sm_to_state(rport_sm_table, rport->sm);
}
@@ -3107,20 +3123,6 @@ static void bfa_fcs_rpf_rpsc2_response(void *fcsarg,
static void bfa_fcs_rpf_timeout(void *arg);
-/*
- * fcs_rport_ftrs_sm FCS rport state machine events
- */
-
-enum rpf_event {
- RPFSM_EVENT_RPORT_OFFLINE = 1, /* Rport offline */
- RPFSM_EVENT_RPORT_ONLINE = 2, /* Rport online */
- RPFSM_EVENT_FCXP_SENT = 3, /* Frame from has been sent */
- RPFSM_EVENT_TIMEOUT = 4, /* Rport SM timeout event */
- RPFSM_EVENT_RPSC_COMP = 5,
- RPFSM_EVENT_RPSC_FAIL = 6,
- RPFSM_EVENT_RPSC_ERROR = 7,
-};
-
static void bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf,
enum rpf_event event);
static void bfa_fcs_rpf_sm_rpsc_sending(struct bfa_fcs_rpf_s *rpf,
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index e1ed1424fd..ea2f107f56 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -114,21 +114,6 @@ static enum bfi_ioc_img_ver_cmp_e bfa_ioc_flash_fwver_cmp(
/*
* IOC state machine definitions/declarations
*/
-enum ioc_event {
- IOC_E_RESET = 1, /* IOC reset request */
- IOC_E_ENABLE = 2, /* IOC enable request */
- IOC_E_DISABLE = 3, /* IOC disable request */
- IOC_E_DETACH = 4, /* driver detach cleanup */
- IOC_E_ENABLED = 5, /* f/w enabled */
- IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */
- IOC_E_DISABLED = 7, /* f/w disabled */
- IOC_E_PFFAILED = 8, /* failure notice by iocpf sm */
- IOC_E_HBFAIL = 9, /* heartbeat failure */
- IOC_E_HWERROR = 10, /* hardware error interrupt */
- IOC_E_TIMEOUT = 11, /* timeout */
- IOC_E_HWFAILED = 12, /* PCI mapping failure notice */
-};
-
bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
@@ -140,7 +125,13 @@ bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
-static struct bfa_sm_table_s ioc_sm_table[] = {
+struct bfa_ioc_sm_table {
+ bfa_ioc_sm_t sm; /* state machine function */
+ enum bfa_ioc_state state; /* state machine encoding */
+ char *name; /* state name for display */
+};
+
+static struct bfa_ioc_sm_table ioc_sm_table[] = {
{BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
{BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
{BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
@@ -153,6 +144,16 @@ static struct bfa_sm_table_s ioc_sm_table[] = {
{BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
};
+static inline enum bfa_ioc_state
+bfa_ioc_sm_to_state(struct bfa_ioc_sm_table *smt, bfa_ioc_sm_t sm)
+{
+ int i = 0;
+
+ while (smt[i].sm && smt[i].sm != sm)
+ i++;
+ return smt[i].state;
+}
+
/*
* IOCPF state machine definitions/declarations
*/
@@ -179,24 +180,6 @@ static void bfa_iocpf_sem_timeout(void *ioc_arg);
static void bfa_iocpf_poll_timeout(void *ioc_arg);
/*
- * IOCPF state machine events
- */
-enum iocpf_event {
- IOCPF_E_ENABLE = 1, /* IOCPF enable request */
- IOCPF_E_DISABLE = 2, /* IOCPF disable request */
- IOCPF_E_STOP = 3, /* stop on driver detach */
- IOCPF_E_FWREADY = 4, /* f/w initialization done */
- IOCPF_E_FWRSP_ENABLE = 5, /* enable f/w response */
- IOCPF_E_FWRSP_DISABLE = 6, /* disable f/w response */
- IOCPF_E_FAIL = 7, /* failure notice by ioc sm */
- IOCPF_E_INITFAIL = 8, /* init fail notice by ioc sm */
- IOCPF_E_GETATTRFAIL = 9, /* init fail notice by ioc sm */
- IOCPF_E_SEMLOCKED = 10, /* h/w semaphore is locked */
- IOCPF_E_TIMEOUT = 11, /* f/w response timeout */
- IOCPF_E_SEM_ERROR = 12, /* h/w sem mapping error */
-};
-
-/*
* IOCPF states
*/
enum bfa_iocpf_state {
@@ -228,7 +211,23 @@ bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
-static struct bfa_sm_table_s iocpf_sm_table[] = {
+struct bfa_iocpf_sm_table {
+ bfa_iocpf_sm_t sm; /* state machine function */
+ enum bfa_iocpf_state state; /* state machine encoding */
+ char *name; /* state name for display */
+};
+
+static inline enum bfa_iocpf_state
+bfa_iocpf_sm_to_state(struct bfa_iocpf_sm_table *smt, bfa_iocpf_sm_t sm)
+{
+ int i = 0;
+
+ while (smt[i].sm && smt[i].sm != sm)
+ i++;
+ return smt[i].state;
+}
+
+static struct bfa_iocpf_sm_table iocpf_sm_table[] = {
{BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
{BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
{BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
@@ -2815,12 +2814,12 @@ enum bfa_ioc_state
bfa_ioc_get_state(struct bfa_ioc_s *ioc)
{
enum bfa_iocpf_state iocpf_st;
- enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
+ enum bfa_ioc_state ioc_st = bfa_ioc_sm_to_state(ioc_sm_table, ioc->fsm);
if (ioc_st == BFA_IOC_ENABLING ||
ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
- iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
+ iocpf_st = bfa_iocpf_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
switch (iocpf_st) {
case BFA_IOCPF_SEMWAIT:
@@ -5805,18 +5804,6 @@ bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
}
}
-/*
- * DCONF state machine events
- */
-enum bfa_dconf_event {
- BFA_DCONF_SM_INIT = 1, /* dconf Init */
- BFA_DCONF_SM_FLASH_COMP = 2, /* read/write to flash */
- BFA_DCONF_SM_WR = 3, /* binding change, map */
- BFA_DCONF_SM_TIMEOUT = 4, /* Start timer */
- BFA_DCONF_SM_EXIT = 5, /* exit dconf module */
- BFA_DCONF_SM_IOCDISABLE = 6, /* IOC disable event */
-};
-
/* forward declaration of DCONF state machine */
static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf,
enum bfa_dconf_event event);
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index 5e568d6d7b..3ec10503ca 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -260,6 +260,24 @@ struct bfa_ioc_cbfn_s {
/*
* IOC event notification mechanism.
*/
+enum ioc_event {
+ IOC_E_RESET = 1, /* IOC reset request */
+ IOC_E_ENABLE = 2, /* IOC enable request */
+ IOC_E_DISABLE = 3, /* IOC disable request */
+ IOC_E_DETACH = 4, /* driver detach cleanup */
+ IOC_E_ENABLED = 5, /* f/w enabled */
+ IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */
+ IOC_E_DISABLED = 7, /* f/w disabled */
+ IOC_E_PFFAILED = 8, /* failure notice by iocpf sm */
+ IOC_E_HBFAIL = 9, /* heartbeat failure */
+ IOC_E_HWERROR = 10, /* hardware error interrupt */
+ IOC_E_TIMEOUT = 11, /* timeout */
+ IOC_E_HWFAILED = 12, /* PCI mapping failure notice */
+};
+
+struct bfa_ioc_s;
+typedef void (*bfa_ioc_sm_t)(struct bfa_ioc_s *fsm, enum ioc_event);
+
enum bfa_ioc_event_e {
BFA_IOC_E_ENABLED = 1,
BFA_IOC_E_DISABLED = 2,
@@ -282,8 +300,29 @@ struct bfa_ioc_notify_s {
(__notify)->cbarg = (__cbarg); \
} while (0)
+/*
+ * IOCPF state machine events
+ */
+enum iocpf_event {
+ IOCPF_E_ENABLE = 1, /* IOCPF enable request */
+ IOCPF_E_DISABLE = 2, /* IOCPF disable request */
+ IOCPF_E_STOP = 3, /* stop on driver detach */
+ IOCPF_E_FWREADY = 4, /* f/w initialization done */
+ IOCPF_E_FWRSP_ENABLE = 5, /* enable f/w response */
+ IOCPF_E_FWRSP_DISABLE = 6, /* disable f/w response */
+ IOCPF_E_FAIL = 7, /* failure notice by ioc sm */
+ IOCPF_E_INITFAIL = 8, /* init fail notice by ioc sm */
+ IOCPF_E_GETATTRFAIL = 9, /* init fail notice by ioc sm */
+ IOCPF_E_SEMLOCKED = 10, /* h/w semaphore is locked */
+ IOCPF_E_TIMEOUT = 11, /* f/w response timeout */
+ IOCPF_E_SEM_ERROR = 12, /* h/w sem mapping error */
+};
+
+struct bfa_iocpf_s;
+typedef void (*bfa_iocpf_sm_t)(struct bfa_iocpf_s *fsm, enum iocpf_event);
+
struct bfa_iocpf_s {
- bfa_fsm_t fsm;
+ bfa_iocpf_sm_t fsm;
struct bfa_ioc_s *ioc;
bfa_boolean_t fw_mismatch_notified;
bfa_boolean_t auto_recover;
@@ -291,7 +330,7 @@ struct bfa_iocpf_s {
};
struct bfa_ioc_s {
- bfa_fsm_t fsm;
+ bfa_ioc_sm_t fsm;
struct bfa_s *bfa;
struct bfa_pcidev_s pcidev;
struct bfa_timer_mod_s *timer_mod;
@@ -380,22 +419,6 @@ struct bfa_cb_qe_s {
};
/*
- * IOCFC state machine definitions/declarations
- */
-enum iocfc_event {
- IOCFC_E_INIT = 1, /* IOCFC init request */
- IOCFC_E_START = 2, /* IOCFC mod start request */
- IOCFC_E_STOP = 3, /* IOCFC stop request */
- IOCFC_E_ENABLE = 4, /* IOCFC enable request */
- IOCFC_E_DISABLE = 5, /* IOCFC disable request */
- IOCFC_E_IOC_ENABLED = 6, /* IOC enabled message */
- IOCFC_E_IOC_DISABLED = 7, /* IOC disabled message */
- IOCFC_E_IOC_FAILED = 8, /* failure notice by IOC sm */
- IOCFC_E_DCONF_DONE = 9, /* dconf read/write done */
- IOCFC_E_CFG_DONE = 10, /* IOCFC config complete */
-};
-
-/*
* ASIC block configurtion related
*/
@@ -779,8 +802,23 @@ struct bfa_dconf_s {
};
#pragma pack()
+/*
+ * DCONF state machine events
+ */
+enum bfa_dconf_event {
+ BFA_DCONF_SM_INIT = 1, /* dconf Init */
+ BFA_DCONF_SM_FLASH_COMP = 2, /* read/write to flash */
+ BFA_DCONF_SM_WR = 3, /* binding change, map */
+ BFA_DCONF_SM_TIMEOUT = 4, /* Start timer */
+ BFA_DCONF_SM_EXIT = 5, /* exit dconf module */
+ BFA_DCONF_SM_IOCDISABLE = 6, /* IOC disable event */
+};
+
+struct bfa_dconf_mod_s;
+typedef void (*bfa_dconf_sm_t)(struct bfa_dconf_mod_s *fsm, enum bfa_dconf_event);
+
struct bfa_dconf_mod_s {
- bfa_sm_t sm;
+ bfa_dconf_sm_t sm;
u8 instance;
bfa_boolean_t read_data_valid;
bfa_boolean_t min_cfg;
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index c9745c0b4e..9f33aa303b 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -41,36 +41,6 @@ BFA_TRC_FILE(HAL, FCXP);
(bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
/*
- * BFA port state machine events
- */
-enum bfa_fcport_sm_event {
- BFA_FCPORT_SM_START = 1, /* start port state machine */
- BFA_FCPORT_SM_STOP = 2, /* stop port state machine */
- BFA_FCPORT_SM_ENABLE = 3, /* enable port */
- BFA_FCPORT_SM_DISABLE = 4, /* disable port state machine */
- BFA_FCPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */
- BFA_FCPORT_SM_LINKUP = 6, /* firmware linkup event */
- BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */
- BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */
- BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */
- BFA_FCPORT_SM_DPORTENABLE = 10, /* enable dport */
- BFA_FCPORT_SM_DPORTDISABLE = 11,/* disable dport */
- BFA_FCPORT_SM_FAA_MISCONFIG = 12, /* FAA misconfiguratin */
- BFA_FCPORT_SM_DDPORTENABLE = 13, /* enable ddport */
- BFA_FCPORT_SM_DDPORTDISABLE = 14, /* disable ddport */
-};
-
-/*
- * BFA port link notification state machine events
- */
-
-enum bfa_fcport_ln_sm_event {
- BFA_FCPORT_LN_SM_LINKUP = 1, /* linkup event */
- BFA_FCPORT_LN_SM_LINKDOWN = 2, /* linkdown event */
- BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */
-};
-
-/*
* RPORT related definitions
*/
#define bfa_rport_offline_cb(__rp) do { \
@@ -201,7 +171,23 @@ static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
enum bfa_fcport_ln_sm_event event);
-static struct bfa_sm_table_s hal_port_sm_table[] = {
+struct bfa_fcport_sm_table_s {
+ bfa_fcport_sm_t sm; /* state machine function */
+ enum bfa_port_states state; /* state machine encoding */
+ char *name; /* state name for display */
+};
+
+static inline enum bfa_port_states
+bfa_fcport_sm_to_state(struct bfa_fcport_sm_table_s *smt, bfa_fcport_sm_t sm)
+{
+ int i = 0;
+
+ while (smt[i].sm && smt[i].sm != sm)
+ i++;
+ return smt[i].state;
+}
+
+static struct bfa_fcport_sm_table_s hal_port_sm_table[] = {
{BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
{BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
{BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
@@ -3545,7 +3531,7 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
fcport->event_arg.i2hmsg = i2hmsg;
bfa_trc(bfa, msg->mhdr.msg_id);
- bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm));
+ bfa_trc(bfa, bfa_fcport_sm_to_state(hal_port_sm_table, fcport->sm));
switch (msg->mhdr.msg_id) {
case BFI_FCPORT_I2H_ENABLE_RSP:
@@ -3980,7 +3966,7 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa);
- attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
+ attr->port_state = bfa_fcport_sm_to_state(hal_port_sm_table, fcport->sm);
attr->fec_state = fcport->fec_state;
@@ -4062,7 +4048,7 @@ bfa_fcport_is_disabled(struct bfa_s *bfa)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
- return bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
+ return bfa_fcport_sm_to_state(hal_port_sm_table, fcport->sm) ==
BFA_PORT_ST_DISABLED;
}
@@ -4072,7 +4058,7 @@ bfa_fcport_is_dport(struct bfa_s *bfa)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
- return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
+ return (bfa_fcport_sm_to_state(hal_port_sm_table, fcport->sm) ==
BFA_PORT_ST_DPORT);
}
@@ -4081,7 +4067,7 @@ bfa_fcport_is_ddport(struct bfa_s *bfa)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
- return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
+ return (bfa_fcport_sm_to_state(hal_port_sm_table, fcport->sm) ==
BFA_PORT_ST_DDPORT);
}
@@ -5641,20 +5627,6 @@ enum bfa_dport_test_state_e {
BFA_DPORT_ST_NOTSTART = 4, /*!< test not start dport is enabled */
};
-/*
- * BFA DPORT state machine events
- */
-enum bfa_dport_sm_event {
- BFA_DPORT_SM_ENABLE = 1, /* dport enable event */
- BFA_DPORT_SM_DISABLE = 2, /* dport disable event */
- BFA_DPORT_SM_FWRSP = 3, /* fw enable/disable rsp */
- BFA_DPORT_SM_QRESUME = 4, /* CQ space available */
- BFA_DPORT_SM_HWFAIL = 5, /* IOC h/w failure */
- BFA_DPORT_SM_START = 6, /* re-start dport test */
- BFA_DPORT_SM_REQFAIL = 7, /* request failure */
- BFA_DPORT_SM_SCN = 8, /* state change notify frm fw */
-};
-
static void bfa_dport_sm_disabled(struct bfa_dport_s *dport,
enum bfa_dport_sm_event event);
static void bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
index 9c83109574..26eeee82be 100644
--- a/drivers/scsi/bfa/bfa_svc.h
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -226,22 +226,6 @@ struct bfa_fcxp_wqe_s {
void bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
-
-/*
- * RPORT related defines
- */
-enum bfa_rport_event {
- BFA_RPORT_SM_CREATE = 1, /* rport create event */
- BFA_RPORT_SM_DELETE = 2, /* deleting an existing rport */
- BFA_RPORT_SM_ONLINE = 3, /* rport is online */
- BFA_RPORT_SM_OFFLINE = 4, /* rport is offline */
- BFA_RPORT_SM_FWRSP = 5, /* firmware response */
- BFA_RPORT_SM_HWFAIL = 6, /* IOC h/w failure */
- BFA_RPORT_SM_QOS_SCN = 7, /* QoS SCN from firmware */
- BFA_RPORT_SM_SET_SPEED = 8, /* Set Rport Speed */
- BFA_RPORT_SM_QRESUME = 9, /* space in requeue queue */
-};
-
#define BFA_RPORT_MIN 4
struct bfa_rport_mod_s {
@@ -285,11 +269,29 @@ struct bfa_rport_info_s {
};
/*
+ * RPORT related defines
+ */
+enum bfa_rport_event {
+ BFA_RPORT_SM_CREATE = 1, /* rport create event */
+ BFA_RPORT_SM_DELETE = 2, /* deleting an existing rport */
+ BFA_RPORT_SM_ONLINE = 3, /* rport is online */
+ BFA_RPORT_SM_OFFLINE = 4, /* rport is offline */
+ BFA_RPORT_SM_FWRSP = 5, /* firmware response */
+ BFA_RPORT_SM_HWFAIL = 6, /* IOC h/w failure */
+ BFA_RPORT_SM_QOS_SCN = 7, /* QoS SCN from firmware */
+ BFA_RPORT_SM_SET_SPEED = 8, /* Set Rport Speed */
+ BFA_RPORT_SM_QRESUME = 9, /* space in requeue queue */
+};
+
+struct bfa_rport_s;
+typedef void (*bfa_rport_sm_t)(struct bfa_rport_s *, enum bfa_rport_event);
+
+/*
* BFA rport data structure
*/
struct bfa_rport_s {
struct list_head qe; /* queue element */
- bfa_sm_t sm; /* state machine */
+ bfa_rport_sm_t sm; /* state machine */
struct bfa_s *bfa; /* backpointer to BFA */
void *rport_drv; /* fcs/driver rport object */
u16 fw_handle; /* firmware rport handle */
@@ -378,12 +380,30 @@ void bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
void bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw);
/*
+ * lps_pvt BFA LPS private functions
+ */
+
+enum bfa_lps_event {
+ BFA_LPS_SM_LOGIN = 1, /* login request from user */
+ BFA_LPS_SM_LOGOUT = 2, /* logout request from user */
+ BFA_LPS_SM_FWRSP = 3, /* f/w response to login/logout */
+ BFA_LPS_SM_RESUME = 4, /* space present in reqq queue */
+ BFA_LPS_SM_DELETE = 5, /* lps delete from user */
+ BFA_LPS_SM_OFFLINE = 6, /* Link is offline */
+ BFA_LPS_SM_RX_CVL = 7, /* Rx clear virtual link */
+ BFA_LPS_SM_SET_N2N_PID = 8, /* Set assigned PID for n2n */
+};
+
+struct bfa_lps_s;
+typedef void (*bfa_lps_sm_t)(struct bfa_lps_s *, enum bfa_lps_event);
+
+/*
* LPS - bfa lport login/logout service interface
*/
struct bfa_lps_s {
struct list_head qe; /* queue element */
struct bfa_s *bfa; /* parent bfa instance */
- bfa_sm_t sm; /* finite state machine */
+ bfa_lps_sm_t sm; /* finite state machine */
u8 bfa_tag; /* lport tag */
u8 fw_tag; /* lport fw tag */
u8 reqq; /* lport request queue */
@@ -440,11 +460,24 @@ void bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
#define BFA_FCPORT(_bfa) (&((_bfa)->modules.port))
/*
+ * BFA port link notification state machine events
+ */
+
+enum bfa_fcport_ln_sm_event {
+ BFA_FCPORT_LN_SM_LINKUP = 1, /* linkup event */
+ BFA_FCPORT_LN_SM_LINKDOWN = 2, /* linkdown event */
+ BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */
+};
+
+struct bfa_fcport_ln_s;
+typedef void (*bfa_fcport_ln_sm_t)(struct bfa_fcport_ln_s *, enum bfa_fcport_ln_sm_event);
+
+/*
* Link notification data structure
*/
struct bfa_fcport_ln_s {
struct bfa_fcport_s *fcport;
- bfa_sm_t sm;
+ bfa_fcport_ln_sm_t sm;
struct bfa_cb_qe_s ln_qe; /* BFA callback queue elem for ln */
enum bfa_port_linkstate ln_event; /* ln event for callback */
};
@@ -454,11 +487,34 @@ struct bfa_fcport_trunk_s {
};
/*
+ * BFA port state machine events
+ */
+enum bfa_fcport_sm_event {
+ BFA_FCPORT_SM_START = 1, /* start port state machine */
+ BFA_FCPORT_SM_STOP = 2, /* stop port state machine */
+ BFA_FCPORT_SM_ENABLE = 3, /* enable port */
+ BFA_FCPORT_SM_DISABLE = 4, /* disable port state machine */
+ BFA_FCPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */
+ BFA_FCPORT_SM_LINKUP = 6, /* firmware linkup event */
+ BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */
+ BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */
+ BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */
+ BFA_FCPORT_SM_DPORTENABLE = 10, /* enable dport */
+ BFA_FCPORT_SM_DPORTDISABLE = 11,/* disable dport */
+ BFA_FCPORT_SM_FAA_MISCONFIG = 12, /* FAA misconfiguratin */
+ BFA_FCPORT_SM_DDPORTENABLE = 13, /* enable ddport */
+ BFA_FCPORT_SM_DDPORTDISABLE = 14, /* disable ddport */
+};
+
+struct bfa_fcport_s;
+typedef void (*bfa_fcport_sm_t)(struct bfa_fcport_s *, enum bfa_fcport_sm_event);
+
+/*
* BFA FC port data structure
*/
struct bfa_fcport_s {
struct bfa_s *bfa; /* parent BFA instance */
- bfa_sm_t sm; /* port state machine */
+ bfa_fcport_sm_t sm; /* port state machine */
wwn_t nwwn; /* node wwn of physical port */
wwn_t pwwn; /* port wwn of physical oprt */
enum bfa_port_speed speed_sup;
@@ -706,9 +762,26 @@ struct bfa_fcdiag_lb_s {
u32 status;
};
+/*
+ * BFA DPORT state machine events
+ */
+enum bfa_dport_sm_event {
+ BFA_DPORT_SM_ENABLE = 1, /* dport enable event */
+ BFA_DPORT_SM_DISABLE = 2, /* dport disable event */
+ BFA_DPORT_SM_FWRSP = 3, /* fw enable/disable rsp */
+ BFA_DPORT_SM_QRESUME = 4, /* CQ space available */
+ BFA_DPORT_SM_HWFAIL = 5, /* IOC h/w failure */
+ BFA_DPORT_SM_START = 6, /* re-start dport test */
+ BFA_DPORT_SM_REQFAIL = 7, /* request failure */
+ BFA_DPORT_SM_SCN = 8, /* state change notify frm fw */
+};
+
+struct bfa_dport_s;
+typedef void (*bfa_dport_sm_t)(struct bfa_dport_s *, enum bfa_dport_sm_event);
+
struct bfa_dport_s {
struct bfa_s *bfa; /* Back pointer to BFA */
- bfa_sm_t sm; /* finite state machine */
+ bfa_dport_sm_t sm; /* finite state machine */
struct bfa_reqq_wait_s reqq_wait;
bfa_cb_diag_t cbfn;
void *cbarg;
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index 7682cfa342..da42e32612 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -175,11 +175,27 @@ union bfad_tmp_buf {
wwn_t wwn[BFA_FCS_MAX_LPORTS];
};
+/* BFAD state machine events */
+enum bfad_sm_event {
+ BFAD_E_CREATE = 1,
+ BFAD_E_KTHREAD_CREATE_FAILED = 2,
+ BFAD_E_INIT = 3,
+ BFAD_E_INIT_SUCCESS = 4,
+ BFAD_E_HAL_INIT_FAILED = 5,
+ BFAD_E_INIT_FAILED = 6,
+ BFAD_E_FCS_EXIT_COMP = 7,
+ BFAD_E_EXIT_COMP = 8,
+ BFAD_E_STOP = 9
+};
+
+struct bfad_s;
+typedef void (*bfad_sm_t)(struct bfad_s *, enum bfad_sm_event);
+
/*
* BFAD (PCI function) data structure
*/
struct bfad_s {
- bfa_sm_t sm; /* state machine */
+ bfad_sm_t sm; /* state machine */
struct list_head list_entry;
struct bfa_s bfa;
struct bfa_fcs_s bfa_fcs;
@@ -226,19 +242,6 @@ struct bfad_s {
struct list_head vport_list;
};
-/* BFAD state machine events */
-enum bfad_sm_event {
- BFAD_E_CREATE = 1,
- BFAD_E_KTHREAD_CREATE_FAILED = 2,
- BFAD_E_INIT = 3,
- BFAD_E_INIT_SUCCESS = 4,
- BFAD_E_HAL_INIT_FAILED = 5,
- BFAD_E_INIT_FAILED = 6,
- BFAD_E_FCS_EXIT_COMP = 7,
- BFAD_E_EXIT_COMP = 8,
- BFAD_E_STOP = 9
-};
-
/*
* RPORT data structure
*/
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 2b864061e0..fa07a6f540 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -102,7 +102,9 @@ do { \
#define MAX_RETRIES 1
-static struct class * ch_sysfs_class;
+static const struct class ch_sysfs_class = {
+ .name = "scsi_changer",
+};
typedef struct {
struct kref ref;
@@ -113,7 +115,6 @@ typedef struct {
struct scsi_device **dt; /* ptrs to data transfer elements */
u_int firsts[CH_TYPES];
u_int counts[CH_TYPES];
- u_int unit_attention;
u_int voltags;
struct mutex lock;
} scsi_changer;
@@ -186,17 +187,29 @@ static int
ch_do_scsi(scsi_changer *ch, unsigned char *cmd, int cmd_len,
void *buffer, unsigned int buflength, enum req_op op)
{
- int errno, retries = 0, timeout, result;
+ int errno = 0, timeout, result;
struct scsi_sense_hdr sshdr;
+ struct scsi_failure failure_defs[] = {
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = SCMD_FAILURE_ASC_ANY,
+ .ascq = SCMD_FAILURE_ASCQ_ANY,
+ .allowed = 3,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = failure_defs,
+ };
const struct scsi_exec_args exec_args = {
.sshdr = &sshdr,
+ .failures = &failures,
};
timeout = (cmd[0] == INITIALIZE_ELEMENT_STATUS)
? timeout_init : timeout_move;
- retry:
- errno = 0;
result = scsi_execute_cmd(ch->device, cmd, op, buffer, buflength,
timeout * HZ, MAX_RETRIES, &exec_args);
if (result < 0)
@@ -205,14 +218,6 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd, int cmd_len,
if (debug)
scsi_print_sense_hdr(ch->device, ch->name, &sshdr);
errno = ch_find_errno(&sshdr);
-
- switch(sshdr.sense_key) {
- case UNIT_ATTENTION:
- ch->unit_attention = 1;
- if (retries++ < 3)
- goto retry;
- break;
- }
}
return errno;
}
@@ -927,7 +932,7 @@ static int ch_probe(struct device *dev)
mutex_init(&ch->lock);
kref_init(&ch->ref);
ch->device = sd;
- class_dev = device_create(ch_sysfs_class, dev,
+ class_dev = device_create(&ch_sysfs_class, dev,
MKDEV(SCSI_CHANGER_MAJOR, ch->minor), ch,
"s%s", ch->name);
if (IS_ERR(class_dev)) {
@@ -952,7 +957,7 @@ static int ch_probe(struct device *dev)
return 0;
destroy_dev:
- device_destroy(ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR, ch->minor));
+ device_destroy(&ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR, ch->minor));
put_device:
scsi_device_put(sd);
remove_idr:
@@ -971,7 +976,7 @@ static int ch_remove(struct device *dev)
dev_set_drvdata(dev, NULL);
spin_unlock(&ch_index_lock);
- device_destroy(ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR,ch->minor));
+ device_destroy(&ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR, ch->minor));
scsi_device_put(ch->device);
kref_put(&ch->ref, ch_destroy);
return 0;
@@ -1000,11 +1005,9 @@ static int __init init_ch_module(void)
int rc;
printk(KERN_INFO "SCSI Media Changer driver v" VERSION " \n");
- ch_sysfs_class = class_create("scsi_changer");
- if (IS_ERR(ch_sysfs_class)) {
- rc = PTR_ERR(ch_sysfs_class);
+ rc = class_register(&ch_sysfs_class);
+ if (rc)
return rc;
- }
rc = register_chrdev(SCSI_CHANGER_MAJOR,"ch",&changer_fops);
if (rc < 0) {
printk("Unable to get major %d for SCSI-Changer\n",
@@ -1019,7 +1022,7 @@ static int __init init_ch_module(void)
fail2:
unregister_chrdev(SCSI_CHANGER_MAJOR, "ch");
fail1:
- class_destroy(ch_sysfs_class);
+ class_unregister(&ch_sysfs_class);
return rc;
}
@@ -1027,7 +1030,7 @@ static void __exit exit_ch_module(void)
{
scsi_unregister_driver(&ch_template.gendrv);
unregister_chrdev(SCSI_CHANGER_MAJOR, "ch");
- class_destroy(ch_sysfs_class);
+ class_unregister(&ch_sysfs_class);
idr_destroy(&ch_index_idr);
}
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index debd369741..e8382cc5cf 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -28,7 +28,12 @@ MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
MODULE_LICENSE("GPL");
-static struct class *cxlflash_class;
+static char *cxlflash_devnode(const struct device *dev, umode_t *mode);
+static const struct class cxlflash_class = {
+ .name = "cxlflash",
+ .devnode = cxlflash_devnode,
+};
+
static u32 cxlflash_major;
static DECLARE_BITMAP(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
@@ -3602,7 +3607,7 @@ static int init_chrdev(struct cxlflash_cfg *cfg)
goto err1;
}
- char_dev = device_create(cxlflash_class, NULL, devno,
+ char_dev = device_create(&cxlflash_class, NULL, devno,
NULL, "cxlflash%d", minor);
if (IS_ERR(char_dev)) {
rc = PTR_ERR(char_dev);
@@ -3880,14 +3885,12 @@ static int cxlflash_class_init(void)
cxlflash_major = MAJOR(devno);
- cxlflash_class = class_create("cxlflash");
- if (IS_ERR(cxlflash_class)) {
- rc = PTR_ERR(cxlflash_class);
+ rc = class_register(&cxlflash_class);
+ if (rc) {
pr_err("%s: class_create failed rc=%d\n", __func__, rc);
goto err;
}
- cxlflash_class->devnode = cxlflash_devnode;
out:
pr_debug("%s: returning rc=%d\n", __func__, rc);
return rc;
@@ -3903,7 +3906,7 @@ static void cxlflash_class_exit(void)
{
dev_t devno = MKDEV(cxlflash_major, 0);
- class_destroy(cxlflash_class);
+ class_unregister(&cxlflash_class);
unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
}
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index 944ea4e0cc..b6eaf49dfb 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -46,9 +46,6 @@ static int tur_done(struct scsi_device *sdev, struct hp_sw_dh_data *h,
int ret = SCSI_DH_IO;
switch (sshdr->sense_key) {
- case UNIT_ATTENTION:
- ret = SCSI_DH_IMM_RETRY;
- break;
case NOT_READY:
if (sshdr->asc == 0x04 && sshdr->ascq == 2) {
/*
@@ -85,11 +82,24 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
int ret, res;
blk_opf_t opf = REQ_OP_DRV_IN | REQ_FAILFAST_DEV |
REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER;
+ struct scsi_failure failure_defs[] = {
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = SCMD_FAILURE_ASC_ANY,
+ .ascq = SCMD_FAILURE_ASCQ_ANY,
+ .allowed = SCMD_FAILURE_NO_LIMIT,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = failure_defs,
+ };
const struct scsi_exec_args exec_args = {
.sshdr = &sshdr,
+ .failures = &failures,
};
-retry:
res = scsi_execute_cmd(sdev, cmd, opf, NULL, 0, HP_SW_TIMEOUT,
HP_SW_RETRIES, &exec_args);
if (res > 0 && scsi_sense_valid(&sshdr)) {
@@ -104,9 +114,6 @@ retry:
ret = SCSI_DH_IO;
}
- if (ret == SCSI_DH_IMM_RETRY)
- goto retry;
-
return ret;
}
@@ -122,14 +129,31 @@ static int hp_sw_start_stop(struct hp_sw_dh_data *h)
struct scsi_sense_hdr sshdr;
struct scsi_device *sdev = h->sdev;
int res, rc;
- int retry_cnt = HP_SW_RETRIES;
blk_opf_t opf = REQ_OP_DRV_IN | REQ_FAILFAST_DEV |
REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER;
+ struct scsi_failure failure_defs[] = {
+ {
+ /*
+ * LUN not ready - manual intervention required
+ *
+ * Switch-over in progress, retry.
+ */
+ .sense = NOT_READY,
+ .asc = 0x04,
+ .ascq = 0x03,
+ .allowed = HP_SW_RETRIES,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = failure_defs,
+ };
const struct scsi_exec_args exec_args = {
.sshdr = &sshdr,
+ .failures = &failures,
};
-retry:
res = scsi_execute_cmd(sdev, cmd, opf, NULL, 0, HP_SW_TIMEOUT,
HP_SW_RETRIES, &exec_args);
if (!res) {
@@ -144,13 +168,6 @@ retry:
switch (sshdr.sense_key) {
case NOT_READY:
if (sshdr.asc == 0x04 && sshdr.ascq == 3) {
- /*
- * LUN not ready - manual intervention required
- *
- * Switch-over in progress, retry.
- */
- if (--retry_cnt)
- goto retry;
rc = SCSI_DH_RETRY;
break;
}
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 1ac2ae17e8..f8a09e3eba 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -485,43 +485,17 @@ static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
static int mode_select_handle_sense(struct scsi_device *sdev,
struct scsi_sense_hdr *sense_hdr)
{
- int err = SCSI_DH_IO;
struct rdac_dh_data *h = sdev->handler_data;
if (!scsi_sense_valid(sense_hdr))
- goto done;
-
- switch (sense_hdr->sense_key) {
- case NO_SENSE:
- case ABORTED_COMMAND:
- case UNIT_ATTENTION:
- err = SCSI_DH_RETRY;
- break;
- case NOT_READY:
- if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01)
- /* LUN Not Ready and is in the Process of Becoming
- * Ready
- */
- err = SCSI_DH_RETRY;
- break;
- case ILLEGAL_REQUEST:
- if (sense_hdr->asc == 0x91 && sense_hdr->ascq == 0x36)
- /*
- * Command Lock contention
- */
- err = SCSI_DH_IMM_RETRY;
- break;
- default:
- break;
- }
+ return SCSI_DH_IO;
RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
"MODE_SELECT returned with sense %02x/%02x/%02x",
(char *) h->ctlr->array_name, h->ctlr->index,
sense_hdr->sense_key, sense_hdr->asc, sense_hdr->ascq);
-done:
- return err;
+ return SCSI_DH_IO;
}
static void send_mode_select(struct work_struct *work)
@@ -530,7 +504,7 @@ static void send_mode_select(struct work_struct *work)
container_of(work, struct rdac_controller, ms_work);
struct scsi_device *sdev = ctlr->ms_sdev;
struct rdac_dh_data *h = sdev->handler_data;
- int rc, err, retry_cnt = RDAC_RETRY_COUNT;
+ int rc, err;
struct rdac_queue_data *tmp, *qdata;
LIST_HEAD(list);
unsigned char cdb[MAX_COMMAND_SIZE];
@@ -538,8 +512,49 @@ static void send_mode_select(struct work_struct *work)
unsigned int data_size;
blk_opf_t opf = REQ_OP_DRV_OUT | REQ_FAILFAST_DEV |
REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER;
+ struct scsi_failure failure_defs[] = {
+ {
+ .sense = NO_SENSE,
+ .asc = SCMD_FAILURE_ASC_ANY,
+ .ascq = SCMD_FAILURE_ASCQ_ANY,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {
+ .sense = ABORTED_COMMAND,
+ .asc = SCMD_FAILURE_ASC_ANY,
+ .ascq = SCMD_FAILURE_ASCQ_ANY,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = SCMD_FAILURE_ASC_ANY,
+ .ascq = SCMD_FAILURE_ASCQ_ANY,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ /* LUN Not Ready and is in the Process of Becoming Ready */
+ {
+ .sense = NOT_READY,
+ .asc = 0x04,
+ .ascq = 0x01,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ /* Command Lock contention */
+ {
+ .sense = ILLEGAL_REQUEST,
+ .asc = 0x91,
+ .ascq = 0x36,
+ .allowed = SCMD_FAILURE_NO_LIMIT,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .total_allowed = RDAC_RETRY_COUNT,
+ .failure_definitions = failure_defs,
+ };
const struct scsi_exec_args exec_args = {
.sshdr = &sshdr,
+ .failures = &failures,
};
spin_lock(&ctlr->ms_lock);
@@ -548,15 +563,12 @@ static void send_mode_select(struct work_struct *work)
ctlr->ms_sdev = NULL;
spin_unlock(&ctlr->ms_lock);
- retry:
memset(cdb, 0, sizeof(cdb));
data_size = rdac_failover_get(ctlr, &list, cdb);
- RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
- "%s MODE_SELECT command",
- (char *) h->ctlr->array_name, h->ctlr->index,
- (retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying");
+ RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, queueing MODE_SELECT command",
+ (char *)h->ctlr->array_name, h->ctlr->index);
rc = scsi_execute_cmd(sdev, cdb, opf, &h->ctlr->mode_select, data_size,
RDAC_TIMEOUT * HZ, RDAC_RETRIES, &exec_args);
@@ -570,10 +582,6 @@ static void send_mode_select(struct work_struct *work)
err = SCSI_DH_IO;
} else {
err = mode_select_handle_sense(sdev, &sshdr);
- if (err == SCSI_DH_RETRY && retry_cnt--)
- goto retry;
- if (err == SCSI_DH_IMM_RETRY)
- goto retry;
}
list_for_each_entry_safe(qdata, tmp, &list, entry) {
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 97816a0e62..0175d2282b 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -2753,7 +2753,7 @@ static void __exit esp_exit(void)
}
MODULE_DESCRIPTION("ESP SCSI driver core");
-MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
index c64a085a7e..453665ac60 100644
--- a/drivers/scsi/fcoe/fcoe_sysfs.c
+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
@@ -597,7 +597,7 @@ static const struct attribute_group *fcoe_fcf_attr_groups[] = {
NULL,
};
-static struct bus_type fcoe_bus_type;
+static const struct bus_type fcoe_bus_type;
static int fcoe_bus_match(struct device *dev,
struct device_driver *drv)
@@ -664,7 +664,7 @@ static struct attribute *fcoe_bus_attrs[] = {
};
ATTRIBUTE_GROUPS(fcoe_bus);
-static struct bus_type fcoe_bus_type = {
+static const struct bus_type fcoe_bus_type = {
.name = "fcoe",
.match = &fcoe_bus_match,
.bus_groups = fcoe_bus_groups,
diff --git a/drivers/scsi/fnic/fnic_attrs.c b/drivers/scsi/fnic/fnic_attrs.c
index a61e0c5e65..0c5e57c7e3 100644
--- a/drivers/scsi/fnic/fnic_attrs.c
+++ b/drivers/scsi/fnic/fnic_attrs.c
@@ -14,13 +14,13 @@ static ssize_t fnic_show_state(struct device *dev,
struct fc_lport *lp = shost_priv(class_to_shost(dev));
struct fnic *fnic = lport_priv(lp);
- return snprintf(buf, PAGE_SIZE, "%s\n", fnic_state_str[fnic->state]);
+ return sysfs_emit(buf, "%s\n", fnic_state_str[fnic->state]);
}
static ssize_t fnic_show_drv_version(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION);
+ return sysfs_emit(buf, "%s\n", DRV_VERSION);
}
static ssize_t fnic_show_link_state(struct device *dev,
@@ -28,8 +28,7 @@ static ssize_t fnic_show_link_state(struct device *dev,
{
struct fc_lport *lp = shost_priv(class_to_shost(dev));
- return snprintf(buf, PAGE_SIZE, "%s\n", (lp->link_up)
- ? "Link Up" : "Link Down");
+ return sysfs_emit(buf, "%s\n", (lp->link_up) ? "Link Up" : "Link Down");
}
static DEVICE_ATTR(fnic_state, S_IRUGO, fnic_show_state, NULL);
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index fc4cee91b1..2ba61dba45 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -1961,8 +1961,8 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
- "Issuing host reset due to out of order IO\n");
+ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ "Issuing host reset due to out of order IO\n");
ret = FAILED;
goto fnic_abort_cmd_end;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 05c38e43f1..35f8e00850 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -1507,7 +1507,12 @@ void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba)
scsi_block_requests(shost);
hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000);
- del_timer_sync(&hisi_hba->timer);
+ /*
+ * hisi_hba->timer is only used for v1/v2 hw, and check hw->sht
+ * which is also only used for v1/v2 hw to skip it for v3 hw
+ */
+ if (hisi_hba->hw->sht)
+ del_timer_sync(&hisi_hba->timer);
set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
}
@@ -1573,7 +1578,7 @@ static int hisi_sas_controller_prereset(struct hisi_hba *hisi_hba)
return -EPERM;
}
- if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
+ if (hisi_sas_debugfs_enable)
hisi_hba->hw->debugfs_snapshot_regs(hisi_hba);
return 0;
@@ -1961,7 +1966,7 @@ static bool hisi_sas_internal_abort_timeout(struct sas_task *task,
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct hisi_sas_internal_abort_data *timeout = data;
- if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct) {
+ if (hisi_sas_debugfs_enable) {
/*
* If timeout occurs in device gone scenario, to avoid
* circular dependency like:
@@ -2625,7 +2630,8 @@ static __exit void hisi_sas_exit(void)
{
sas_release_transport(hisi_sas_stt);
- debugfs_remove(hisi_sas_debugfs_dir);
+ if (hisi_sas_debugfs_enable)
+ debugfs_remove(hisi_sas_debugfs_dir);
}
module_init(hisi_sas_init);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 86112f2347..34f96cc353 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -4910,7 +4910,8 @@ err_out_unregister_ha:
err_out_remove_host:
scsi_remove_host(shost);
err_out_undo_debugfs:
- debugfs_exit_v3_hw(hisi_hba);
+ if (hisi_sas_debugfs_enable)
+ debugfs_exit_v3_hw(hisi_hba);
err_out_free_host:
hisi_sas_free(hisi_hba);
scsi_host_put(shost);
@@ -4942,7 +4943,6 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
struct Scsi_Host *shost = sha->shost;
pm_runtime_get_noresume(dev);
- del_timer_sync(&hisi_hba->timer);
sas_unregister_ha(sha);
flush_workqueue(hisi_hba->wq);
@@ -4950,7 +4950,9 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
hisi_sas_v3_destroy_irqs(pdev, hisi_hba);
hisi_sas_free(hisi_hba);
- debugfs_exit_v3_hw(hisi_hba);
+ if (hisi_sas_debugfs_enable)
+ debugfs_exit_v3_hw(hisi_hba);
+
scsi_host_put(shost);
}
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 445f4a220d..2d92549e52 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -372,7 +372,7 @@ static void scsi_host_dev_release(struct device *dev)
kfree(shost);
}
-static struct device_type scsi_host_type = {
+static const struct device_type scsi_host_type = {
.name = "scsi_host",
.release = scsi_host_dev_release,
};
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 46d0b3a0e1..05b126bfd1 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -3482,8 +3482,7 @@ static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
struct Scsi_Host *shost = class_to_shost(dev);
struct ibmvfc_host *vhost = shost_priv(shost);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- vhost->login_buf->resp.partition_name);
+ return sysfs_emit(buf, "%s\n", vhost->login_buf->resp.partition_name);
}
static ssize_t ibmvfc_show_host_device_name(struct device *dev,
@@ -3492,8 +3491,7 @@ static ssize_t ibmvfc_show_host_device_name(struct device *dev,
struct Scsi_Host *shost = class_to_shost(dev);
struct ibmvfc_host *vhost = shost_priv(shost);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- vhost->login_buf->resp.device_name);
+ return sysfs_emit(buf, "%s\n", vhost->login_buf->resp.device_name);
}
static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
@@ -3502,8 +3500,7 @@ static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
struct Scsi_Host *shost = class_to_shost(dev);
struct ibmvfc_host *vhost = shost_priv(shost);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- vhost->login_buf->resp.port_loc_code);
+ return sysfs_emit(buf, "%s\n", vhost->login_buf->resp.port_loc_code);
}
static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
@@ -3512,8 +3509,7 @@ static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
struct Scsi_Host *shost = class_to_shost(dev);
struct ibmvfc_host *vhost = shost_priv(shost);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- vhost->login_buf->resp.drc_name);
+ return sysfs_emit(buf, "%s\n", vhost->login_buf->resp.drc_name);
}
static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
@@ -3521,7 +3517,8 @@ static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ibmvfc_host *vhost = shost_priv(shost);
- return snprintf(buf, PAGE_SIZE, "%d\n", be32_to_cpu(vhost->login_buf->resp.version));
+ return sysfs_emit(buf, "%d\n",
+ be32_to_cpu(vhost->login_buf->resp.version));
}
static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
@@ -3529,7 +3526,8 @@ static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ibmvfc_host *vhost = shost_priv(shost);
- return snprintf(buf, PAGE_SIZE, "%llx\n", be64_to_cpu(vhost->login_buf->resp.capabilities));
+ return sysfs_emit(buf, "%llx\n",
+ be64_to_cpu(vhost->login_buf->resp.capabilities));
}
/**
@@ -3550,7 +3548,7 @@ static ssize_t ibmvfc_show_log_level(struct device *dev,
int len;
spin_lock_irqsave(shost->host_lock, flags);
- len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->log_level);
+ len = sysfs_emit(buf, "%d\n", vhost->log_level);
spin_unlock_irqrestore(shost->host_lock, flags);
return len;
}
@@ -3589,7 +3587,7 @@ static ssize_t ibmvfc_show_scsi_channels(struct device *dev,
int len;
spin_lock_irqsave(shost->host_lock, flags);
- len = snprintf(buf, PAGE_SIZE, "%d\n", scsi->desired_queues);
+ len = sysfs_emit(buf, "%d\n", scsi->desired_queues);
spin_unlock_irqrestore(shost->host_lock, flags);
return len;
}
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 4dc411a581..68b99924ee 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -1551,18 +1551,18 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
if (vscsi->client_data.partition_number == 0)
vscsi->client_data.partition_number =
be32_to_cpu(info->partition_number);
- strncpy(vscsi->client_data.srp_version, info->srp_version,
+ strscpy(vscsi->client_data.srp_version, info->srp_version,
sizeof(vscsi->client_data.srp_version));
- strncpy(vscsi->client_data.partition_name, info->partition_name,
+ strscpy(vscsi->client_data.partition_name, info->partition_name,
sizeof(vscsi->client_data.partition_name));
vscsi->client_data.mad_version = be32_to_cpu(info->mad_version);
vscsi->client_data.os_type = be32_to_cpu(info->os_type);
/* Copy our info */
- strncpy(info->srp_version, SRP_VERSION,
- sizeof(info->srp_version));
- strncpy(info->partition_name, vscsi->dds.partition_name,
- sizeof(info->partition_name));
+ strscpy_pad(info->srp_version, SRP_VERSION,
+ sizeof(info->srp_version));
+ strscpy_pad(info->partition_name, vscsi->dds.partition_name,
+ sizeof(info->partition_name));
info->partition_number = cpu_to_be32(vscsi->dds.partition_num);
info->mad_version = cpu_to_be32(MAD_VERSION_1);
info->os_type = cpu_to_be32(LINUX);
@@ -1645,8 +1645,8 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
be64_to_cpu(mad->buffer),
vscsi->dds.window[LOCAL].liobn, token);
if (rc == H_SUCCESS) {
- strncpy(cap->name, dev_name(&vscsi->dma_dev->dev),
- SRP_MAX_LOC_LEN);
+ strscpy_pad(cap->name, dev_name(&vscsi->dma_dev->dev),
+ sizeof(cap->name));
len = olen - min_len;
status = VIOSRP_MAD_SUCCESS;
@@ -3616,13 +3616,13 @@ static void ibmvscsis_remove(struct vio_dev *vdev)
static ssize_t system_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%s\n", system_id);
+ return sysfs_emit(buf, "%s\n", system_id);
}
static ssize_t partition_number_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%x\n", partition_number);
+ return sysfs_emit(buf, "%x\n", partition_number);
}
static ssize_t unit_address_show(struct device *dev,
@@ -3630,7 +3630,7 @@ static ssize_t unit_address_show(struct device *dev,
{
struct scsi_info *vscsi = container_of(dev, struct scsi_info, dev);
- return snprintf(buf, PAGE_SIZE, "%x\n", vscsi->dma_dev->unit_address);
+ return sysfs_emit(buf, "%x\n", vscsi->dma_dev->unit_address);
}
static int ibmvscsis_get_system_info(void)
@@ -3650,7 +3650,7 @@ static int ibmvscsis_get_system_info(void)
name = of_get_property(rootdn, "ibm,partition-name", NULL);
if (name)
- strncpy(partition_name, name, sizeof(partition_name));
+ strscpy(partition_name, name, sizeof(partition_name));
num = of_get_property(rootdn, "ibm,partition-no", NULL);
if (num)
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 6277162a02..c582a3932c 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -137,7 +137,7 @@ static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, c
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha);
- return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id);
+ return sysfs_emit(buf, "%d\n", ihost->id);
}
static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL);
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
index 494a671fb5..fb04b0b515 100644
--- a/drivers/scsi/jazz_esp.c
+++ b/drivers/scsi/jazz_esp.c
@@ -204,6 +204,6 @@ static struct platform_driver esp_jazz_driver = {
module_platform_driver(esp_jazz_driver);
MODULE_DESCRIPTION("JAZZ ESP SCSI driver");
-MODULE_AUTHOR("Thomas Bogendoerfer (tsbogend@alpha.franken.de)");
+MODULE_AUTHOR("Thomas Bogendoerfer <tsbogend@alpha.franken.de>");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/scsi/libfc/fc_encode.h b/drivers/scsi/libfc/fc_encode.h
index 7dcac3b6ba..6b7e4ca6b7 100644
--- a/drivers/scsi/libfc/fc_encode.h
+++ b/drivers/scsi/libfc/fc_encode.h
@@ -136,22 +136,24 @@ static inline int fc_ct_ns_fill(struct fc_lport *lport,
break;
case FC_NS_RSPN_ID:
- len = strnlen(fc_host_symbolic_name(lport->host), 255);
+ len = strnlen(fc_host_symbolic_name(lport->host),
+ FC_SYMBOLIC_NAME_SIZE);
ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rspn) + len,
FC_FST_DIR, FC_NS_SUBTYPE);
hton24(ct->payload.spn.fr_fid.fp_fid, lport->port_id);
- strncpy(ct->payload.spn.fr_name,
- fc_host_symbolic_name(lport->host), len);
+ memcpy(ct->payload.spn.fr_name,
+ fc_host_symbolic_name(lport->host), len);
ct->payload.spn.fr_name_len = len;
break;
case FC_NS_RSNN_NN:
- len = strnlen(fc_host_symbolic_name(lport->host), 255);
+ len = strnlen(fc_host_symbolic_name(lport->host),
+ FC_SYMBOLIC_NAME_SIZE);
ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rsnn) + len,
FC_FST_DIR, FC_NS_SUBTYPE);
put_unaligned_be64(lport->wwnn, &ct->payload.snn.fr_wwn);
- strncpy(ct->payload.snn.fr_name,
- fc_host_symbolic_name(lport->host), len);
+ memcpy(ct->payload.snn.fr_name,
+ fc_host_symbolic_name(lport->host), len);
ct->payload.snn.fr_name_len = len;
break;
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 9670cb2bf1..98ca7df003 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -535,6 +535,44 @@ struct lpfc_cgn_acqe_stat {
atomic64_t warn;
};
+enum lpfc_fc_flag {
+ /* Several of these flags are HBA centric and should be moved to
+ * phba->link_flag (e.g. FC_PTP, FC_PUBLIC_LOOP)
+ */
+ FC_PT2PT, /* pt2pt with no fabric */
+ FC_PT2PT_PLOGI, /* pt2pt initiate PLOGI */
+ FC_DISC_TMO, /* Discovery timer running */
+ FC_PUBLIC_LOOP, /* Public loop */
+ FC_LBIT, /* LOGIN bit in loopinit set */
+ FC_RSCN_MODE, /* RSCN cmd rcv'ed */
+ FC_NLP_MORE, /* More node to process in node tbl */
+ FC_OFFLINE_MODE, /* Interface is offline for diag */
+ FC_FABRIC, /* We are fabric attached */
+ FC_VPORT_LOGO_RCVD, /* LOGO received on vport */
+ FC_RSCN_DISCOVERY, /* Auth all devices after RSCN */
+ FC_LOGO_RCVD_DID_CHNG, /* FDISC on phys port detect DID chng */
+ FC_PT2PT_NO_NVME, /* Don't send NVME PRLI */
+ FC_SCSI_SCAN_TMO, /* scsi scan timer running */
+ FC_ABORT_DISCOVERY, /* we want to abort discovery */
+ FC_NDISC_ACTIVE, /* NPort discovery active */
+ FC_BYPASSED_MODE, /* NPort is in bypassed mode */
+ FC_VPORT_NEEDS_REG_VPI, /* Needs to have its vpi registered */
+ FC_RSCN_DEFERRED, /* A deferred RSCN being processed */
+ FC_VPORT_NEEDS_INIT_VPI, /* Need to INIT_VPI before FDISC */
+ FC_VPORT_CVL_RCVD, /* VLink failed due to CVL */
+ FC_VFI_REGISTERED, /* VFI is registered */
+ FC_FDISC_COMPLETED, /* FDISC completed */
+ FC_DISC_DELAYED, /* Delay NPort discovery */
+};
+
+enum lpfc_load_flag {
+ FC_LOADING, /* HBA in process of loading drvr */
+ FC_UNLOADING, /* HBA in process of unloading drvr */
+ FC_ALLOW_FDMI, /* port is ready for FDMI requests */
+ FC_ALLOW_VMID, /* Allow VMID I/Os */
+ FC_DEREGISTER_ALL_APP_ID /* Deregister all VMIDs */
+};
+
struct lpfc_vport {
struct lpfc_hba *phba;
struct list_head listentry;
@@ -549,34 +587,7 @@ struct lpfc_vport {
uint8_t vpi_state;
#define LPFC_VPI_REGISTERED 0x1
- uint32_t fc_flag; /* FC flags */
-/* Several of these flags are HBA centric and should be moved to
- * phba->link_flag (e.g. FC_PTP, FC_PUBLIC_LOOP)
- */
-#define FC_PT2PT 0x1 /* pt2pt with no fabric */
-#define FC_PT2PT_PLOGI 0x2 /* pt2pt initiate PLOGI */
-#define FC_DISC_TMO 0x4 /* Discovery timer running */
-#define FC_PUBLIC_LOOP 0x8 /* Public loop */
-#define FC_LBIT 0x10 /* LOGIN bit in loopinit set */
-#define FC_RSCN_MODE 0x20 /* RSCN cmd rcv'ed */
-#define FC_NLP_MORE 0x40 /* More node to process in node tbl */
-#define FC_OFFLINE_MODE 0x80 /* Interface is offline for diag */
-#define FC_FABRIC 0x100 /* We are fabric attached */
-#define FC_VPORT_LOGO_RCVD 0x200 /* LOGO received on vport */
-#define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */
-#define FC_LOGO_RCVD_DID_CHNG 0x800 /* FDISC on phys port detect DID chng*/
-#define FC_PT2PT_NO_NVME 0x1000 /* Don't send NVME PRLI */
-#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */
-#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */
-#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */
-#define FC_BYPASSED_MODE 0x20000 /* NPort is in bypassed mode */
-#define FC_VPORT_NEEDS_REG_VPI 0x80000 /* Needs to have its vpi registered */
-#define FC_RSCN_DEFERRED 0x100000 /* A deferred RSCN being processed */
-#define FC_VPORT_NEEDS_INIT_VPI 0x200000 /* Need to INIT_VPI before FDISC */
-#define FC_VPORT_CVL_RCVD 0x400000 /* VLink failed due to CVL */
-#define FC_VFI_REGISTERED 0x800000 /* VFI is registered */
-#define FC_FDISC_COMPLETED 0x1000000/* FDISC completed */
-#define FC_DISC_DELAYED 0x2000000/* Delay NPort discovery */
+ unsigned long fc_flag; /* FC flags */
uint32_t ct_flags;
#define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */
@@ -587,16 +598,18 @@ struct lpfc_vport {
#define FC_CT_RPRT_DEFER 0x20 /* Defer issuing FDMI RPRT */
struct list_head fc_nodes;
+ spinlock_t fc_nodes_list_lock; /* spinlock for fc_nodes list */
/* Keep counters for the number of entries in each list. */
- uint16_t fc_plogi_cnt;
- uint16_t fc_adisc_cnt;
- uint16_t fc_reglogin_cnt;
- uint16_t fc_prli_cnt;
- uint16_t fc_unmap_cnt;
- uint16_t fc_map_cnt;
- uint16_t fc_npr_cnt;
- uint16_t fc_unused_cnt;
+ atomic_t fc_plogi_cnt;
+ atomic_t fc_adisc_cnt;
+ atomic_t fc_reglogin_cnt;
+ atomic_t fc_prli_cnt;
+ atomic_t fc_unmap_cnt;
+ atomic_t fc_map_cnt;
+ atomic_t fc_npr_cnt;
+ atomic_t fc_unused_cnt;
+
struct serv_parm fc_sparam; /* buffer for our service parameters */
uint32_t fc_myDID; /* fibre channel S_ID */
@@ -642,12 +655,7 @@ struct lpfc_vport {
struct timer_list els_tmofunc;
struct timer_list delayed_disc_tmo;
- uint8_t load_flag;
-#define FC_LOADING 0x1 /* HBA in process of loading drvr */
-#define FC_UNLOADING 0x2 /* HBA in process of unloading drvr */
-#define FC_ALLOW_FDMI 0x4 /* port is ready for FDMI requests */
-#define FC_ALLOW_VMID 0x8 /* Allow VMID I/Os */
-#define FC_DEREGISTER_ALL_APP_ID 0x10 /* Deregister all VMIDs */
+ unsigned long load_flag;
/* Vport Config Parameters */
uint32_t cfg_scan_down;
uint32_t cfg_lun_queue_depth;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 6f97a04171..3c534b3cfe 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -344,6 +344,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
struct lpfc_fc4_ctrl_stat *cstat;
uint64_t data1, data2, data3;
uint64_t totin, totout, tot;
+ unsigned long iflags;
char *statep;
int i;
int len = 0;
@@ -543,7 +544,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
goto buffer_done;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
nrport = NULL;
@@ -617,7 +618,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
goto unlock_buf_done;
}
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
if (!lport)
goto buffer_done;
@@ -681,7 +682,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
goto buffer_done;
unlock_buf_done:
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
buffer_done:
len = strnlen(buf, PAGE_SIZE);
@@ -1091,14 +1092,14 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
break;
}
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
- if (vport->fc_flag & FC_PUBLIC_LOOP)
+ if (test_bit(FC_PUBLIC_LOOP, &vport->fc_flag))
len += scnprintf(buf + len, PAGE_SIZE-len,
" Public Loop\n");
else
len += scnprintf(buf + len, PAGE_SIZE-len,
" Private Loop\n");
} else {
- if (vport->fc_flag & FC_FABRIC) {
+ if (test_bit(FC_FABRIC, &vport->fc_flag)) {
if (phba->sli_rev == LPFC_SLI_REV4 &&
vport->port_type == LPFC_PHYSICAL_PORT &&
phba->sli4_hba.fawwpn_flag &
@@ -1260,7 +1261,8 @@ lpfc_num_discovered_ports_show(struct device *dev,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
return scnprintf(buf, PAGE_SIZE, "%d\n",
- vport->fc_map_cnt + vport->fc_unmap_cnt);
+ atomic_read(&vport->fc_map_cnt) +
+ atomic_read(&vport->fc_unmap_cnt));
}
/**
@@ -1289,7 +1291,7 @@ lpfc_issue_lip(struct Scsi_Host *shost)
* If the link is offline, disabled or BLOCK_MGMT_IO
* it doesn't make any sense to allow issue_lip
*/
- if ((vport->fc_flag & FC_OFFLINE_MODE) ||
+ if (test_bit(FC_OFFLINE_MODE, &vport->fc_flag) ||
(phba->hba_flag & LINK_DISABLED) ||
(phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO))
return -EPERM;
@@ -1303,8 +1305,8 @@ lpfc_issue_lip(struct Scsi_Host *shost)
pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
pmboxq->u.mb.mbxOwner = OWN_HOST;
- if ((vport->fc_flag & FC_PT2PT) && (vport->fc_flag & FC_PT2PT_NO_NVME))
- vport->fc_flag &= ~FC_PT2PT_NO_NVME;
+ if (test_bit(FC_PT2PT, &vport->fc_flag))
+ clear_bit(FC_PT2PT_NO_NVME, &vport->fc_flag);
mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
@@ -1494,7 +1496,8 @@ lpfc_reset_pci_bus(struct lpfc_hba *phba)
if (shost) {
phba_other =
((struct lpfc_vport *)shost->hostdata)->phba;
- if (!(phba_other->pport->fc_flag & FC_OFFLINE_MODE)) {
+ if (!test_bit(FC_OFFLINE_MODE,
+ &phba_other->pport->fc_flag)) {
lpfc_printf_log(phba_other, KERN_INFO, LOG_INIT,
"8349 WWPN = 0x%02x%02x%02x%02x"
"%02x%02x%02x%02x is not "
@@ -1549,7 +1552,7 @@ lpfc_selective_reset(struct lpfc_hba *phba)
if (!phba->cfg_enable_hba_reset)
return -EACCES;
- if (!(phba->pport->fc_flag & FC_OFFLINE_MODE)) {
+ if (!test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag)) {
status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
if (status != 0)
@@ -1688,7 +1691,7 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
{
struct completion online_compl;
struct pci_dev *pdev = phba->pcidev;
- uint32_t before_fc_flag;
+ unsigned long before_fc_flag;
uint32_t sriov_nr_virtfn;
uint32_t reg_val;
int status = 0, rc = 0;
@@ -1759,7 +1762,7 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
}
/* keep the original port state */
- if (before_fc_flag & FC_OFFLINE_MODE) {
+ if (test_bit(FC_OFFLINE_MODE, &before_fc_flag)) {
if (phba->fw_dump_cmpl)
phba->fw_dump_cmpl = NULL;
goto out;
@@ -2097,7 +2100,7 @@ board_mode_out:
*board_mode_str = '\0';
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"3097 Failed \"%s\", status(%d), "
- "fc_flag(x%x)\n",
+ "fc_flag(x%lx)\n",
buf, status, phba->pport->fc_flag);
return status;
}
@@ -2156,7 +2159,7 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
pmb->mbxOwner = OWN_HOST;
pmboxq->ctx_buf = NULL;
- if (phba->pport->fc_flag & FC_OFFLINE_MODE)
+ if (test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag))
rc = MBX_NOT_FINISHED;
else
rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3764,15 +3767,14 @@ lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val)
static void
lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
{
- struct Scsi_Host *shost;
struct lpfc_nodelist *ndlp;
+ unsigned long iflags;
#if (IS_ENABLED(CONFIG_NVME_FC))
struct lpfc_nvme_rport *rport;
struct nvme_fc_remote_port *remoteport = NULL;
#endif
- shost = lpfc_shost_from_vport(vport);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (ndlp->rport)
ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
@@ -3787,7 +3789,7 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
vport->cfg_devloss_tmo);
#endif
}
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
}
/**
@@ -3973,8 +3975,8 @@ lpfc_vport_param_init(tgt_queue_depth, LPFC_MAX_TGT_QDEPTH,
static int
lpfc_tgt_queue_depth_set(struct lpfc_vport *vport, uint val)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp;
+ unsigned long iflags;
if (!lpfc_rangecheck(val, LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH))
return -EINVAL;
@@ -3982,14 +3984,13 @@ lpfc_tgt_queue_depth_set(struct lpfc_vport *vport, uint val)
if (val == vport->cfg_tgt_queue_depth)
return 0;
- spin_lock_irq(shost->host_lock);
vport->cfg_tgt_queue_depth = val;
/* Next loop thru nodelist and change cmd_qdepth */
+ spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp)
ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
-
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
return 0;
}
@@ -5235,8 +5236,8 @@ lpfc_vport_param_show(max_scsicmpl_time);
static int
lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp, *next_ndlp;
+ unsigned long iflags;
if (val == vport->cfg_max_scsicmpl_time)
return 0;
@@ -5244,13 +5245,13 @@ lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
return -EINVAL;
vport->cfg_max_scsicmpl_time = val;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
continue;
ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
}
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
return 0;
}
lpfc_vport_param_store(max_scsicmpl_time);
@@ -6200,7 +6201,7 @@ sysfs_ctlreg_write(struct file *filp, struct kobject *kobj,
if (memcmp(buf, LPFC_REG_WRITE_KEY, LPFC_REG_WRITE_KEY_SIZE))
return -EINVAL;
- if (!(vport->fc_flag & FC_OFFLINE_MODE))
+ if (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag))
return -EPERM;
spin_lock_irq(&phba->hbalock);
@@ -6429,26 +6430,22 @@ lpfc_get_host_port_type(struct Scsi_Host *shost)
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- spin_lock_irq(shost->host_lock);
-
if (vport->port_type == LPFC_NPIV_PORT) {
fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
} else if (lpfc_is_link_up(phba)) {
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
- if (vport->fc_flag & FC_PUBLIC_LOOP)
+ if (test_bit(FC_PUBLIC_LOOP, &vport->fc_flag))
fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
else
fc_host_port_type(shost) = FC_PORTTYPE_LPORT;
} else {
- if (vport->fc_flag & FC_FABRIC)
+ if (test_bit(FC_FABRIC, &vport->fc_flag))
fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
else
fc_host_port_type(shost) = FC_PORTTYPE_PTP;
}
} else
fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
-
- spin_unlock_irq(shost->host_lock);
}
/**
@@ -6461,9 +6458,7 @@ lpfc_get_host_port_state(struct Scsi_Host *shost)
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- spin_lock_irq(shost->host_lock);
-
- if (vport->fc_flag & FC_OFFLINE_MODE)
+ if (test_bit(FC_OFFLINE_MODE, &vport->fc_flag))
fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
else {
switch (phba->link_state) {
@@ -6490,8 +6485,6 @@ lpfc_get_host_port_state(struct Scsi_Host *shost)
break;
}
}
-
- spin_unlock_irq(shost->host_lock);
}
/**
@@ -6504,8 +6497,6 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- spin_lock_irq(shost->host_lock);
-
if ((lpfc_is_link_up(phba)) && (!(phba->hba_flag & HBA_FCOE_MODE))) {
switch(phba->fc_linkspeed) {
case LPFC_LINK_SPEED_1GHZ:
@@ -6568,8 +6559,6 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
}
} else
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
-
- spin_unlock_irq(shost->host_lock);
}
/**
@@ -6583,19 +6572,15 @@ lpfc_get_host_fabric_name (struct Scsi_Host *shost)
struct lpfc_hba *phba = vport->phba;
u64 node_name;
- spin_lock_irq(shost->host_lock);
-
- if ((vport->port_state > LPFC_FLOGI) &&
- ((vport->fc_flag & FC_FABRIC) ||
- ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
- (vport->fc_flag & FC_PUBLIC_LOOP))))
+ if (vport->port_state > LPFC_FLOGI &&
+ (test_bit(FC_FABRIC, &vport->fc_flag) ||
+ (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
+ test_bit(FC_PUBLIC_LOOP, &vport->fc_flag))))
node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn);
else
/* fabric is local port if there is no F/FL_Port */
node_name = 0;
- spin_unlock_irq(shost->host_lock);
-
fc_host_fabric_name(shost) = node_name;
}
@@ -6646,7 +6631,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
pmboxq->ctx_buf = NULL;
pmboxq->vport = vport;
- if (vport->fc_flag & FC_OFFLINE_MODE) {
+ if (test_bit(FC_OFFLINE_MODE, &vport->fc_flag)) {
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
if (rc != MBX_SUCCESS) {
mempool_free(pmboxq, phba->mbox_mem_pool);
@@ -6699,7 +6684,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
pmboxq->ctx_buf = NULL;
pmboxq->vport = vport;
- if (vport->fc_flag & FC_OFFLINE_MODE) {
+ if (test_bit(FC_OFFLINE_MODE, &vport->fc_flag)) {
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
if (rc != MBX_SUCCESS) {
mempool_free(pmboxq, phba->mbox_mem_pool);
@@ -6786,8 +6771,8 @@ lpfc_reset_stats(struct Scsi_Host *shost)
pmboxq->ctx_buf = NULL;
pmboxq->vport = vport;
- if ((vport->fc_flag & FC_OFFLINE_MODE) ||
- (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
+ if (test_bit(FC_OFFLINE_MODE, &vport->fc_flag) ||
+ !(psli->sli_flag & LPFC_SLI_ACTIVE)) {
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
if (rc != MBX_SUCCESS) {
mempool_free(pmboxq, phba->mbox_mem_pool);
@@ -6808,8 +6793,8 @@ lpfc_reset_stats(struct Scsi_Host *shost)
pmboxq->ctx_buf = NULL;
pmboxq->vport = vport;
- if ((vport->fc_flag & FC_OFFLINE_MODE) ||
- (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
+ if (test_bit(FC_OFFLINE_MODE, &vport->fc_flag) ||
+ !(psli->sli_flag & LPFC_SLI_ACTIVE)) {
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
if (rc != MBX_SUCCESS) {
mempool_free(pmboxq, phba->mbox_mem_pool);
@@ -6868,17 +6853,19 @@ lpfc_get_node_by_target(struct scsi_target *starget)
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_nodelist *ndlp;
+ unsigned long iflags;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
/* Search for this, mapped, target ID */
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
starget->id == ndlp->nlp_sid) {
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock,
+ iflags);
return ndlp;
}
}
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
return NULL;
}
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index c305d16cfa..529df1768f 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -1977,7 +1977,7 @@ lpfc_sli4_bsg_set_loopback_mode(struct lpfc_hba *phba, int mode,
static int
lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
{
- if (phba->pport->fc_flag & FC_VFI_REGISTERED) {
+ if (test_bit(FC_VFI_REGISTERED, &phba->pport->fc_flag)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"3136 Port still had vfi registered: "
"mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n",
@@ -2513,7 +2513,7 @@ static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
return -ENOMEM;
}
- dmabuff = (struct lpfc_dmabuf *)mbox->ctx_buf;
+ dmabuff = mbox->ctx_buf;
mbox->ctx_buf = NULL;
mbox->ctx_ndlp = NULL;
status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
@@ -3376,7 +3376,7 @@ lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
unsigned long flags;
uint8_t *pmb, *pmb_buf;
- dd_data = pmboxq->ctx_ndlp;
+ dd_data = pmboxq->ctx_u.dd_data;
/*
* The outgoing buffer is readily referred from the dma buffer,
@@ -3448,7 +3448,7 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
case MBX_RUN_DIAGS:
case MBX_RESTART:
case MBX_SET_MASK:
- if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
+ if (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2743 Command 0x%x is illegal in on-line "
"state\n",
@@ -3553,7 +3553,7 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
struct lpfc_sli_config_mbox *sli_cfg_mbx;
uint8_t *pmbx;
- dd_data = pmboxq->ctx_buf;
+ dd_data = pmboxq->ctx_u.dd_data;
/* Determine if job has been aborted */
spin_lock_irqsave(&phba->ct_ev_lock, flags);
@@ -3940,7 +3940,7 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl;
/* context fields to callback function */
- pmboxq->ctx_buf = dd_data;
+ pmboxq->ctx_u.dd_data = dd_data;
dd_data->type = TYPE_MBOX;
dd_data->set_job = job;
dd_data->context_un.mbox.pmboxq = pmboxq;
@@ -4112,7 +4112,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
/* context fields to callback function */
- pmboxq->ctx_buf = dd_data;
+ pmboxq->ctx_u.dd_data = dd_data;
dd_data->type = TYPE_MBOX;
dd_data->set_job = job;
dd_data->context_un.mbox.pmboxq = pmboxq;
@@ -4460,7 +4460,7 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
/* context fields to callback function */
- pmboxq->ctx_buf = dd_data;
+ pmboxq->ctx_u.dd_data = dd_data;
dd_data->type = TYPE_MBOX;
dd_data->set_job = job;
dd_data->context_un.mbox.pmboxq = pmboxq;
@@ -4747,7 +4747,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job,
if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
from = pmbx;
ext = from + sizeof(MAILBOX_t);
- pmboxq->ctx_buf = ext;
+ pmboxq->ext_buf = ext;
pmboxq->in_ext_byte_len =
mbox_req->inExtWLen * sizeof(uint32_t);
pmboxq->out_ext_byte_len =
@@ -4875,7 +4875,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job,
pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl;
/* setup context field to pass wait_queue pointer to wake function */
- pmboxq->ctx_ndlp = dd_data;
+ pmboxq->ctx_u.dd_data = dd_data;
dd_data->type = TYPE_MBOX;
dd_data->set_job = job;
dd_data->context_un.mbox.pmboxq = pmboxq;
@@ -4886,7 +4886,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job,
dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen;
job->dd_data = dd_data;
- if ((vport->fc_flag & FC_OFFLINE_MODE) ||
+ if (test_bit(FC_OFFLINE_MODE, &vport->fc_flag) ||
(!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
if (rc != MBX_SUCCESS) {
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index baae1f8279..8cc08e58dc 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -265,7 +265,7 @@ ct_free_mp:
kfree(mp);
ct_exit:
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "6440 Unsol CT: Rsp err %d Data: x%x\n",
+ "6440 Unsol CT: Rsp err %d Data: x%lx\n",
rc, vport->fc_flag);
}
@@ -298,7 +298,7 @@ lpfc_ct_handle_mibreq(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocbq)
}
/* Ignore traffic received during vport shutdown */
- if (vport->fc_flag & FC_UNLOADING)
+ if (test_bit(FC_UNLOADING, &vport->load_flag))
return;
ndlp = lpfc_findnode_did(vport, did);
@@ -723,7 +723,7 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
if (ndlp) {
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
- "Parse GID_FTrsp: did:x%x flg:x%x x%x",
+ "Parse GID_FTrsp: did:x%x flg:x%lx x%x",
Did, ndlp->nlp_flag, vport->fc_flag);
/* By default, the driver expects to support FCP FC4 */
@@ -735,7 +735,7 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0238 Process x%06x NameServer Rsp "
- "Data: x%x x%x x%x x%x x%x\n", Did,
+ "Data: x%x x%x x%x x%lx x%x\n", Did,
ndlp->nlp_flag, ndlp->nlp_fc4_type,
ndlp->nlp_state, vport->fc_flag,
vport->fc_rscn_id_cnt);
@@ -751,20 +751,20 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
}
} else {
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
- "Skip1 GID_FTrsp: did:x%x flg:x%x cnt:%d",
+ "Skip1 GID_FTrsp: did:x%x flg:x%lx cnt:%d",
Did, vport->fc_flag, vport->fc_rscn_id_cnt);
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0239 Skip x%06x NameServer Rsp "
- "Data: x%x x%x x%px\n",
+ "Data: x%lx x%x x%px\n",
Did, vport->fc_flag,
vport->fc_rscn_id_cnt, ndlp);
}
} else {
- if (!(vport->fc_flag & FC_RSCN_MODE) ||
+ if (!test_bit(FC_RSCN_MODE, &vport->fc_flag) ||
lpfc_rscn_payload_check(vport, Did)) {
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
- "Query GID_FTrsp: did:x%x flg:x%x cnt:%d",
+ "Query GID_FTrsp: did:x%x flg:x%lx cnt:%d",
Did, vport->fc_flag, vport->fc_rscn_id_cnt);
/*
@@ -787,12 +787,12 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
lpfc_setup_disc_node(vport, Did);
} else {
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
- "Skip2 GID_FTrsp: did:x%x flg:x%x cnt:%d",
+ "Skip2 GID_FTrsp: did:x%x flg:x%lx cnt:%d",
Did, vport->fc_flag, vport->fc_rscn_id_cnt);
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0245 Skip x%06x NameServer Rsp "
- "Data: x%x x%x\n", Did,
+ "Data: x%lx x%x\n", Did,
vport->fc_flag,
vport->fc_rscn_id_cnt);
}
@@ -914,7 +914,6 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_dmabuf *outp;
struct lpfc_dmabuf *inp;
struct lpfc_sli_ct_request *CTrsp;
@@ -943,9 +942,9 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
}
- /* Don't bother processing response if vport is being torn down. */
- if (vport->load_flag & FC_UNLOADING) {
- if (vport->fc_flag & FC_RSCN_MODE)
+ /* Skip processing response on pport if unloading */
+ if (vport == phba->pport && test_bit(FC_UNLOADING, &vport->load_flag)) {
+ if (test_bit(FC_RSCN_MODE, &vport->fc_flag))
lpfc_els_flush_rscn(vport);
goto out;
}
@@ -953,7 +952,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (lpfc_els_chk_latt(vport)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0216 Link event during NS query\n");
- if (vport->fc_flag & FC_RSCN_MODE)
+ if (test_bit(FC_RSCN_MODE, &vport->fc_flag))
lpfc_els_flush_rscn(vport);
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
goto out;
@@ -961,22 +960,18 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (lpfc_error_lost_link(vport, ulp_status, ulp_word4)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0226 NS query failed due to link event: "
- "ulp_status x%x ulp_word4 x%x fc_flag x%x "
+ "ulp_status x%x ulp_word4 x%x fc_flag x%lx "
"port_state x%x gidft_inp x%x\n",
ulp_status, ulp_word4, vport->fc_flag,
vport->port_state, vport->gidft_inp);
- if (vport->fc_flag & FC_RSCN_MODE)
+ if (test_bit(FC_RSCN_MODE, &vport->fc_flag))
lpfc_els_flush_rscn(vport);
if (vport->gidft_inp)
vport->gidft_inp--;
goto out;
}
- spin_lock_irq(shost->host_lock);
- if (vport->fc_flag & FC_RSCN_DEFERRED) {
- vport->fc_flag &= ~FC_RSCN_DEFERRED;
- spin_unlock_irq(shost->host_lock);
-
+ if (test_and_clear_bit(FC_RSCN_DEFERRED, &vport->fc_flag)) {
/* This is a GID_FT completing so the gidft_inp counter was
* incremented before the GID_FT was issued to the wire.
*/
@@ -988,13 +983,12 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* Re-issue the NS cmd
*/
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "0151 Process Deferred RSCN Data: x%x x%x\n",
+ "0151 Process Deferred RSCN Data: x%lx x%x\n",
vport->fc_flag, vport->fc_rscn_id_cnt);
lpfc_els_handle_rscn(vport);
goto out;
}
- spin_unlock_irq(shost->host_lock);
if (ulp_status) {
/* Check for retry */
@@ -1018,7 +1012,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
vport->gidft_inp--;
}
}
- if (vport->fc_flag & FC_RSCN_MODE)
+ if (test_bit(FC_RSCN_MODE, &vport->fc_flag))
lpfc_els_flush_rscn(vport);
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -1031,7 +1025,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (CTrsp->CommandResponse.bits.CmdRsp ==
cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0208 NameServer Rsp Data: x%x x%x "
+ "0208 NameServer Rsp Data: x%lx x%x "
"x%x x%x sz x%x\n",
vport->fc_flag,
CTreq->un.gid.Fc4Type,
@@ -1051,7 +1045,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_printf_vlog(vport, KERN_INFO,
LOG_DISCOVERY,
"0269 No NameServer Entries "
- "Data: x%x x%x x%x x%x\n",
+ "Data: x%x x%x x%x x%lx\n",
be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp),
(uint32_t) CTrsp->ReasonCode,
(uint32_t) CTrsp->Explanation,
@@ -1066,7 +1060,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_printf_vlog(vport, KERN_INFO,
LOG_DISCOVERY,
"0240 NameServer Rsp Error "
- "Data: x%x x%x x%x x%x\n",
+ "Data: x%x x%x x%x x%lx\n",
be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp),
(uint32_t) CTrsp->ReasonCode,
(uint32_t) CTrsp->Explanation,
@@ -1084,7 +1078,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* NameServer Rsp Error */
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0241 NameServer Rsp Error "
- "Data: x%x x%x x%x x%x\n",
+ "Data: x%x x%x x%x x%lx\n",
be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp),
(uint32_t) CTrsp->ReasonCode,
(uint32_t) CTrsp->Explanation,
@@ -1113,14 +1107,13 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* current driver state.
*/
if (vport->port_state >= LPFC_DISC_AUTH) {
- if (vport->fc_flag & FC_RSCN_MODE) {
+ if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) {
lpfc_els_flush_rscn(vport);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_RSCN_MODE; /* RSCN still */
- spin_unlock_irq(shost->host_lock);
- }
- else
+ /* RSCN still */
+ set_bit(FC_RSCN_MODE, &vport->fc_flag);
+ } else {
lpfc_els_flush_rscn(vport);
+ }
}
lpfc_disc_start(vport);
@@ -1136,7 +1129,6 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_dmabuf *outp;
struct lpfc_dmabuf *inp;
struct lpfc_sli_ct_request *CTrsp;
@@ -1166,9 +1158,9 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
}
- /* Don't bother processing response if vport is being torn down. */
- if (vport->load_flag & FC_UNLOADING) {
- if (vport->fc_flag & FC_RSCN_MODE)
+ /* Skip processing response on pport if unloading */
+ if (vport == phba->pport && test_bit(FC_UNLOADING, &vport->load_flag)) {
+ if (test_bit(FC_RSCN_MODE, &vport->fc_flag))
lpfc_els_flush_rscn(vport);
goto out;
}
@@ -1176,7 +1168,7 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (lpfc_els_chk_latt(vport)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"4108 Link event during NS query\n");
- if (vport->fc_flag & FC_RSCN_MODE)
+ if (test_bit(FC_RSCN_MODE, &vport->fc_flag))
lpfc_els_flush_rscn(vport);
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
goto out;
@@ -1184,22 +1176,18 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (lpfc_error_lost_link(vport, ulp_status, ulp_word4)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"4166 NS query failed due to link event: "
- "ulp_status x%x ulp_word4 x%x fc_flag x%x "
+ "ulp_status x%x ulp_word4 x%x fc_flag x%lx "
"port_state x%x gidft_inp x%x\n",
ulp_status, ulp_word4, vport->fc_flag,
vport->port_state, vport->gidft_inp);
- if (vport->fc_flag & FC_RSCN_MODE)
+ if (test_bit(FC_RSCN_MODE, &vport->fc_flag))
lpfc_els_flush_rscn(vport);
if (vport->gidft_inp)
vport->gidft_inp--;
goto out;
}
- spin_lock_irq(shost->host_lock);
- if (vport->fc_flag & FC_RSCN_DEFERRED) {
- vport->fc_flag &= ~FC_RSCN_DEFERRED;
- spin_unlock_irq(shost->host_lock);
-
+ if (test_and_clear_bit(FC_RSCN_DEFERRED, &vport->fc_flag)) {
/* This is a GID_PT completing so the gidft_inp counter was
* incremented before the GID_PT was issued to the wire.
*/
@@ -1211,13 +1199,12 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* Re-issue the NS cmd
*/
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "4167 Process Deferred RSCN Data: x%x x%x\n",
+ "4167 Process Deferred RSCN Data: x%lx x%x\n",
vport->fc_flag, vport->fc_rscn_id_cnt);
lpfc_els_handle_rscn(vport);
goto out;
}
- spin_unlock_irq(shost->host_lock);
if (ulp_status) {
/* Check for retry */
@@ -1237,7 +1224,7 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
vport->gidft_inp--;
}
}
- if (vport->fc_flag & FC_RSCN_MODE)
+ if (test_bit(FC_RSCN_MODE, &vport->fc_flag))
lpfc_els_flush_rscn(vport);
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -1250,7 +1237,7 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) ==
SLI_CT_RESPONSE_FS_ACC) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "4105 NameServer Rsp Data: x%x x%x "
+ "4105 NameServer Rsp Data: x%lx x%x "
"x%x x%x sz x%x\n",
vport->fc_flag,
CTreq->un.gid.Fc4Type,
@@ -1270,7 +1257,7 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_printf_vlog(
vport, KERN_INFO, LOG_DISCOVERY,
"4106 No NameServer Entries "
- "Data: x%x x%x x%x x%x\n",
+ "Data: x%x x%x x%x x%lx\n",
be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp),
(uint32_t)CTrsp->ReasonCode,
(uint32_t)CTrsp->Explanation,
@@ -1286,7 +1273,7 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_printf_vlog(
vport, KERN_INFO, LOG_DISCOVERY,
"4107 NameServer Rsp Error "
- "Data: x%x x%x x%x x%x\n",
+ "Data: x%x x%x x%x x%lx\n",
be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp),
(uint32_t)CTrsp->ReasonCode,
(uint32_t)CTrsp->Explanation,
@@ -1303,7 +1290,7 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* NameServer Rsp Error */
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"4109 NameServer Rsp Error "
- "Data: x%x x%x x%x x%x\n",
+ "Data: x%x x%x x%x x%lx\n",
be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp),
(uint32_t)CTrsp->ReasonCode,
(uint32_t)CTrsp->Explanation,
@@ -1333,11 +1320,10 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* current driver state.
*/
if (vport->port_state >= LPFC_DISC_AUTH) {
- if (vport->fc_flag & FC_RSCN_MODE) {
+ if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) {
lpfc_els_flush_rscn(vport);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_RSCN_MODE; /* RSCN still */
- spin_unlock_irq(shost->host_lock);
+ /* RSCN still */
+ set_bit(FC_RSCN_MODE, &vport->fc_flag);
} else {
lpfc_els_flush_rscn(vport);
}
@@ -1355,7 +1341,6 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf;
struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf;
struct lpfc_sli_ct_request *CTrsp;
@@ -1445,7 +1430,7 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0267 NameServer GFF Rsp "
- "x%x Error (%d %d) Data: x%x x%x\n",
+ "x%x Error (%d %d) Data: x%lx x%x\n",
did, ulp_status, ulp_word4,
vport->fc_flag, vport->fc_rscn_id_cnt);
}
@@ -1455,13 +1440,13 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (ndlp) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0242 Process x%x GFF "
- "NameServer Rsp Data: x%x x%x x%x\n",
+ "NameServer Rsp Data: x%x x%lx x%x\n",
did, ndlp->nlp_flag, vport->fc_flag,
vport->fc_rscn_id_cnt);
} else {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0243 Skip x%x GFF "
- "NameServer Rsp Data: x%x x%x\n", did,
+ "NameServer Rsp Data: x%lx x%x\n", did,
vport->fc_flag, vport->fc_rscn_id_cnt);
}
out:
@@ -1480,14 +1465,13 @@ out:
* current driver state.
*/
if (vport->port_state >= LPFC_DISC_AUTH) {
- if (vport->fc_flag & FC_RSCN_MODE) {
+ if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) {
lpfc_els_flush_rscn(vport);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_RSCN_MODE; /* RSCN still */
- spin_unlock_irq(shost->host_lock);
- }
- else
+ /* RSCN still */
+ set_bit(FC_RSCN_MODE, &vport->fc_flag);
+ } else {
lpfc_els_flush_rscn(vport);
+ }
}
lpfc_disc_start(vport);
}
@@ -1853,11 +1837,10 @@ static uint32_t
lpfc_find_map_node(struct lpfc_vport *vport)
{
struct lpfc_nodelist *ndlp, *next_ndlp;
- struct Scsi_Host *shost;
+ unsigned long iflags;
uint32_t cnt = 0;
- shost = lpfc_shost_from_vport(vport);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
if (ndlp->nlp_type & NLP_FABRIC)
continue;
@@ -1865,7 +1848,7 @@ lpfc_find_map_node(struct lpfc_vport *vport)
(ndlp->nlp_state == NLP_STE_UNMAPPED_NODE))
cnt++;
}
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
return cnt;
}
@@ -1950,7 +1933,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
/* NameServer Req */
lpfc_printf_vlog(vport, KERN_INFO ,LOG_DISCOVERY,
- "0236 NameServer Req Data: x%x x%x x%x x%x\n",
+ "0236 NameServer Req Data: x%x x%lx x%x x%x\n",
cmdcode, vport->fc_flag, vport->fc_rscn_id_cnt,
context);
@@ -2167,7 +2150,8 @@ ns_cmd_free_mp:
kfree(mp);
ns_cmd_exit:
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "0266 Issue NameServer Req x%x err %d Data: x%x x%x\n",
+ "0266 Issue NameServer Req x%x err %d Data: x%lx "
+ "x%x\n",
cmdcode, rc, vport->fc_flag, vport->fc_rscn_id_cnt);
return 1;
}
@@ -2453,7 +2437,7 @@ lpfc_fdmi_change_check(struct lpfc_vport *vport)
return;
/* Must be connected to a Fabric */
- if (!(vport->fc_flag & FC_FABRIC))
+ if (!test_bit(FC_FABRIC, &vport->fc_flag))
return;
ndlp = lpfc_findnode_did(vport, FDMI_DID);
@@ -2569,9 +2553,9 @@ lpfc_fdmi_set_attr_string(void *attr, uint16_t attrtype, char *attrstring)
* 64 bytes or less.
*/
- strncpy(ae->value_string, attrstring, sizeof(ae->value_string));
+ strscpy(ae->value_string, attrstring, sizeof(ae->value_string));
len = strnlen(ae->value_string, sizeof(ae->value_string));
- /* round string length to a 32bit boundary. Ensure there's a NULL */
+ /* round string length to a 32bit boundary */
len += (len & 3) ? (4 - (len & 3)) : 4;
/* size is Type/Len (4 bytes) plus string length */
size = FOURBYTES + len;
@@ -3233,7 +3217,7 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* FDMI request */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0218 FDMI Request x%x mask x%x Data: x%x x%x x%x\n",
+ "0218 FDMI Request x%x mask x%x Data: x%x x%lx x%x\n",
cmdcode, new_mask, vport->fdmi_port_mask,
vport->fc_flag, vport->port_state);
@@ -3470,15 +3454,8 @@ lpfc_delayed_disc_tmo(struct timer_list *t)
void
lpfc_delayed_disc_timeout_handler(struct lpfc_vport *vport)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
- spin_lock_irq(shost->host_lock);
- if (!(vport->fc_flag & FC_DISC_DELAYED)) {
- spin_unlock_irq(shost->host_lock);
+ if (!test_and_clear_bit(FC_DISC_DELAYED, &vport->fc_flag))
return;
- }
- vport->fc_flag &= ~FC_DISC_DELAYED;
- spin_unlock_irq(shost->host_lock);
lpfc_do_scr_ns_plogi(vport->phba, vport);
}
@@ -3606,7 +3583,8 @@ lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
(ctrsp->Explanation != SLI_CT_APP_ID_NOT_AVAILABLE)) {
/* If DALLAPP_ID failed retry later */
if (cmd == SLI_CTAS_DALLAPP_ID)
- vport->load_flag |= FC_DEREGISTER_ALL_APP_ID;
+ set_bit(FC_DEREGISTER_ALL_APP_ID,
+ &vport->load_flag);
goto free_res;
}
}
@@ -3662,7 +3640,7 @@ lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (!hash_empty(vport->hash_table))
hash_for_each(vport->hash_table, bucket, cur, hnode)
hash_del(&cur->hnode);
- vport->load_flag |= FC_ALLOW_VMID;
+ set_bit(FC_ALLOW_VMID, &vport->load_flag);
break;
default:
lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
@@ -3729,7 +3707,7 @@ lpfc_vmid_cmd(struct lpfc_vport *vport,
INIT_LIST_HEAD(&bmp->list);
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "3275 VMID Request Data: x%x x%x x%x\n",
+ "3275 VMID Request Data: x%lx x%x x%x\n",
vport->fc_flag, vport->port_state, cmdcode);
ctreq = (struct lpfc_sli_ct_request *)mp->virt;
data = mp->virt;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 20662b4f33..a2d2b02b34 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2007-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -806,10 +806,10 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
{
int len = 0;
int i, iocnt, outio, cnt;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp;
unsigned char *statep;
+ unsigned long iflags;
struct nvme_fc_local_port *localport;
struct nvme_fc_remote_port *nrport = NULL;
struct lpfc_nvme_rport *rport;
@@ -818,7 +818,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
outio = 0;
len += scnprintf(buf+len, size-len, "\nFCP Nodelist Entries ...\n");
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
iocnt = 0;
if (!cnt) {
@@ -908,7 +908,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
ndlp->nlp_defer_did);
len += scnprintf(buf+len, size-len, "\n");
}
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
len += scnprintf(buf + len, size - len,
"\nOutstanding IO x%x\n", outio);
@@ -940,8 +940,6 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
if (!localport)
goto out_exit;
- spin_lock_irq(shost->host_lock);
-
/* Port state is only one of two values for now. */
if (localport->port_id)
statep = "ONLINE";
@@ -953,6 +951,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
localport->port_id, statep);
len += scnprintf(buf + len, size - len, "\tRport List:\n");
+ spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
/* local short-hand pointer. */
spin_lock(&ndlp->lock);
@@ -1006,8 +1005,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
/* Terminate the string. */
len += scnprintf(buf + len, size - len, "\n");
}
-
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
out_exit:
return len;
}
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 26736122bd..f7c28dc73b 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -93,7 +93,6 @@ static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid);
int
lpfc_els_chk_latt(struct lpfc_vport *vport)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
uint32_t ha_copy;
@@ -121,9 +120,7 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
* will cleanup any left over in-progress discovery
* events.
*/
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_ABORT_DISCOVERY;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_ABORT_DISCOVERY, &vport->fc_flag);
if (phba->link_state != LPFC_CLEAR_LA)
lpfc_issue_clear_la(phba, vport);
@@ -301,7 +298,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, u8 expect_rsp,
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0116 Xmit ELS command x%x to remote "
"NPORT x%x I/O tag: x%x, port state:x%x "
- "rpi x%x fc_flag:x%x\n",
+ "rpi x%x fc_flag:x%lx\n",
elscmd, did, elsiocb->iotag,
vport->port_state, ndlp->nlp_rpi,
vport->fc_flag);
@@ -310,7 +307,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, u8 expect_rsp,
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0117 Xmit ELS response x%x to remote "
"NPORT x%x I/O tag: x%x, size: x%x "
- "port_state x%x rpi x%x fc_flag x%x\n",
+ "port_state x%x rpi x%x fc_flag x%lx\n",
elscmd, ndlp->nlp_DID, elsiocb->iotag,
cmd_size, vport->port_state,
ndlp->nlp_rpi, vport->fc_flag);
@@ -452,7 +449,7 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
/* move forward in case of SLI4 FC port loopback test and pt2pt mode */
if ((phba->sli_rev == LPFC_SLI_REV4) &&
!(phba->link_flag & LS_LOOPBACK_MODE) &&
- !(vport->fc_flag & FC_PT2PT)) {
+ !test_bit(FC_PT2PT, &vport->fc_flag)) {
ndlp = lpfc_findnode_did(vport, Fabric_DID);
if (!ndlp) {
rc = -ENODEV;
@@ -467,7 +464,8 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
}
/* Supply CSP's only if we are fabric connect or pt-to-pt connect */
- if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) {
+ if (test_bit(FC_FABRIC, &vport->fc_flag) ||
+ test_bit(FC_PT2PT, &vport->fc_flag)) {
rc = lpfc_mbox_rsrc_prep(phba, mboxq);
if (rc) {
rc = -ENOMEM;
@@ -520,7 +518,6 @@ int
lpfc_issue_unreg_vfi(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
- struct Scsi_Host *shost;
LPFC_MBOXQ_t *mboxq;
int rc;
@@ -546,10 +543,7 @@ lpfc_issue_unreg_vfi(struct lpfc_vport *vport)
return -EIO;
}
- shost = lpfc_shost_from_vport(vport);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_VFI_REGISTERED;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_VFI_REGISTERED, &vport->fc_flag);
return 0;
}
@@ -577,7 +571,6 @@ lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
{
struct lpfc_hba *phba = vport->phba;
uint8_t fabric_param_changed = 0;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
if ((vport->fc_prevDID != vport->fc_myDID) ||
memcmp(&vport->fabric_portname, &sp->portName,
@@ -599,11 +592,8 @@ lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
* - lpfc_delay_discovery module parameter is set.
*/
if (fabric_param_changed && !sp->cmn.clean_address_bit &&
- (vport->fc_prevDID || phba->cfg_delay_discovery)) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_DISC_DELAYED;
- spin_unlock_irq(shost->host_lock);
- }
+ (vport->fc_prevDID || phba->cfg_delay_discovery))
+ set_bit(FC_DISC_DELAYED, &vport->fc_flag);
return fabric_param_changed;
}
@@ -633,15 +623,12 @@ static int
lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct serv_parm *sp, uint32_t ulp_word4)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *np;
struct lpfc_nodelist *next_np;
uint8_t fabric_param_changed;
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_FABRIC;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_FABRIC, &vport->fc_flag);
phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */
@@ -650,11 +637,8 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
phba->fc_edtovResol = sp->cmn.edtovResolution;
phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
- if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_PUBLIC_LOOP;
- spin_unlock_irq(shost->host_lock);
- }
+ if (phba->fc_topology == LPFC_TOPOLOGY_LOOP)
+ set_bit(FC_PUBLIC_LOOP, &vport->fc_flag);
vport->fc_myDID = ulp_word4 & Mask_DID;
memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
@@ -728,12 +712,12 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_unregister_fcf_prep(phba);
/* This should just update the VFI CSPs*/
- if (vport->fc_flag & FC_VFI_REGISTERED)
+ if (test_bit(FC_VFI_REGISTERED, &vport->fc_flag))
lpfc_issue_reg_vfi(vport);
}
if (fabric_param_changed &&
- !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
+ !test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag)) {
/* If our NportID changed, we need to ensure all
* remaining NPORTs get unreg_login'ed.
@@ -753,20 +737,16 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (phba->sli_rev == LPFC_SLI_REV4) {
lpfc_sli4_unreg_all_rpis(vport);
lpfc_mbx_unreg_vpi(vport);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_VPORT_NEEDS_INIT_VPI, &vport->fc_flag);
}
/*
* For SLI3 and SLI4, the VPI needs to be reregistered in
* response to this fabric parameter change event.
*/
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
} else if ((phba->sli_rev == LPFC_SLI_REV4) &&
- !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
+ !test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag)) {
/*
* Driver needs to re-reg VPI in order for f/w
* to update the MAC address.
@@ -779,18 +759,18 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (phba->sli_rev < LPFC_SLI_REV4) {
lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
- vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
+ test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag))
lpfc_register_new_vport(phba, vport, ndlp);
else
lpfc_issue_fabric_reglogin(vport);
} else {
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
- if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) &&
- (vport->vpi_state & LPFC_VPI_REGISTERED)) {
+ if ((!test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag)) &&
+ (vport->vpi_state & LPFC_VPI_REGISTERED)) {
lpfc_start_fdiscs(phba);
lpfc_do_scr_ns_plogi(phba, vport);
- } else if (vport->fc_flag & FC_VFI_REGISTERED)
+ } else if (test_bit(FC_VFI_REGISTERED, &vport->fc_flag))
lpfc_issue_init_vpi(vport);
else {
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@@ -826,15 +806,13 @@ static int
lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct serv_parm *sp)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mbox;
int rc;
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
- vport->fc_flag |= FC_PT2PT;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_FABRIC, &vport->fc_flag);
+ clear_bit(FC_PUBLIC_LOOP, &vport->fc_flag);
+ set_bit(FC_PT2PT, &vport->fc_flag);
/* If we are pt2pt with another NPort, force NPIV off! */
phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
@@ -842,10 +820,7 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
lpfc_unregister_fcf_prep(phba);
-
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_VFI_REGISTERED;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_VFI_REGISTERED, &vport->fc_flag);
phba->fc_topology_changed = 0;
}
@@ -854,9 +829,7 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (rc >= 0) {
/* This side will initiate the PLOGI */
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_PT2PT_PLOGI;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_PT2PT_PLOGI, &vport->fc_flag);
/*
* N_Port ID cannot be 0, set our Id to LocalID
@@ -953,7 +926,6 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
IOCB_t *irsp;
struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp;
@@ -1069,10 +1041,9 @@ stop_rr_fcf_flogi:
}
/* FLOGI failed, so there is no fabric */
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP |
- FC_PT2PT_NO_NVME);
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_FABRIC, &vport->fc_flag);
+ clear_bit(FC_PUBLIC_LOOP, &vport->fc_flag);
+ clear_bit(FC_PT2PT_NO_NVME, &vport->fc_flag);
/* If private loop, then allow max outstanding els to be
* LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
@@ -1081,15 +1052,14 @@ stop_rr_fcf_flogi:
if (phba->alpa_map[0] == 0)
vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
if ((phba->sli_rev == LPFC_SLI_REV4) &&
- (!(vport->fc_flag & FC_VFI_REGISTERED) ||
+ (!test_bit(FC_VFI_REGISTERED, &vport->fc_flag) ||
(vport->fc_prevDID != vport->fc_myDID) ||
phba->fc_topology_changed)) {
- if (vport->fc_flag & FC_VFI_REGISTERED) {
+ if (test_bit(FC_VFI_REGISTERED, &vport->fc_flag)) {
if (phba->fc_topology_changed) {
lpfc_unregister_fcf_prep(phba);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_VFI_REGISTERED;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_VFI_REGISTERED,
+ &vport->fc_flag);
phba->fc_topology_changed = 0;
} else {
lpfc_sli4_unreg_all_rpis(vport);
@@ -1104,10 +1074,8 @@ stop_rr_fcf_flogi:
}
goto flogifail;
}
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
- vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_VPORT_CVL_RCVD, &vport->fc_flag);
+ clear_bit(FC_VPORT_LOGO_RCVD, &vport->fc_flag);
/*
* The FLOGI succeeded. Sync the data for the CPU before
@@ -1123,7 +1091,7 @@ stop_rr_fcf_flogi:
/* FLOGI completes successfully */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0101 FLOGI completes successfully, I/O tag:x%x "
- "xri x%x Data: x%x x%x x%x x%x x%x x%x x%x %d\n",
+ "xri x%x Data: x%x x%x x%x x%x x%x x%lx x%x %d\n",
cmdiocb->iotag, cmdiocb->sli4_xritag,
ulp_word4, sp->cmn.e_d_tov,
sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
@@ -1202,7 +1170,7 @@ stop_rr_fcf_flogi:
goto out;
}
} else if (vport->port_state > LPFC_FLOGI &&
- vport->fc_flag & FC_PT2PT) {
+ test_bit(FC_PT2PT, &vport->fc_flag)) {
/*
* In a p2p topology, it is possible that discovery has
* already progressed, and this completion can be ignored.
@@ -1506,8 +1474,9 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba)
if (ulp_command == CMD_ELS_REQUEST64_CR) {
ndlp = iocb->ndlp;
if (ndlp && ndlp->nlp_DID == Fabric_DID) {
- if ((phba->pport->fc_flag & FC_PT2PT) &&
- !(phba->pport->fc_flag & FC_PT2PT_PLOGI))
+ if (test_bit(FC_PT2PT, &phba->pport->fc_flag) &&
+ !test_bit(FC_PT2PT_PLOGI,
+ &phba->pport->fc_flag))
iocb->fabric_cmd_cmpl =
lpfc_ignore_els_cmpl;
lpfc_sli_issue_abort_iotag(phba, pring, iocb,
@@ -1562,7 +1531,7 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
}
/* Reset the Fabric flag, topology change may have happened */
- vport->fc_flag &= ~FC_FABRIC;
+ clear_bit(FC_FABRIC, &vport->fc_flag);
if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
/* A node reference should be retained while registered with a
* transport or dev-loss-evt work is pending.
@@ -1645,11 +1614,12 @@ lpfc_more_plogi(struct lpfc_vport *vport)
/* Continue discovery with <num_disc_nodes> PLOGIs to go */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0232 Continue discovery with %d PLOGIs to go "
- "Data: x%x x%x x%x\n",
- vport->num_disc_nodes, vport->fc_plogi_cnt,
+ "Data: x%x x%lx x%x\n",
+ vport->num_disc_nodes,
+ atomic_read(&vport->fc_plogi_cnt),
vport->fc_flag, vport->port_state);
/* Check to see if there are more PLOGIs to be sent */
- if (vport->fc_flag & FC_NLP_MORE)
+ if (test_bit(FC_NLP_MORE, &vport->fc_flag))
/* go thru NPR nodes and issue any remaining ELS PLOGIs */
lpfc_els_disc_plogi(vport);
@@ -1696,18 +1666,13 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
struct serv_parm *sp;
uint8_t name[sizeof(struct lpfc_name)];
uint32_t keepDID = 0, keep_nlp_flag = 0;
+ int rc;
uint32_t keep_new_nlp_flag = 0;
uint16_t keep_nlp_state;
u32 keep_nlp_fc4_type = 0;
struct lpfc_nvme_rport *keep_nrport = NULL;
unsigned long *active_rrqs_xri_bitmap = NULL;
- /* Fabric nodes can have the same WWPN so we don't bother searching
- * by WWPN. Just return the ndlp that was given to us.
- */
- if (ndlp->nlp_type & NLP_FABRIC)
- return ndlp;
-
sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t));
memset(name, 0, sizeof(struct lpfc_name));
@@ -1717,15 +1682,9 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
/* return immediately if the WWPN matches ndlp */
- if (!new_ndlp || (new_ndlp == ndlp))
+ if (new_ndlp == ndlp)
return ndlp;
- /*
- * Unregister from backend if not done yet. Could have been skipped
- * due to ADISC
- */
- lpfc_nlp_unreg_node(vport, new_ndlp);
-
if (phba->sli_rev == LPFC_SLI_REV4) {
active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool,
GFP_KERNEL);
@@ -1742,18 +1701,44 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
(new_ndlp ? new_ndlp->nlp_flag : 0),
(new_ndlp ? new_ndlp->nlp_fc4_type : 0));
- keepDID = new_ndlp->nlp_DID;
+ if (!new_ndlp) {
+ rc = memcmp(&ndlp->nlp_portname, name,
+ sizeof(struct lpfc_name));
+ if (!rc) {
+ if (active_rrqs_xri_bitmap)
+ mempool_free(active_rrqs_xri_bitmap,
+ phba->active_rrq_pool);
+ return ndlp;
+ }
+ new_ndlp = lpfc_nlp_init(vport, ndlp->nlp_DID);
+ if (!new_ndlp) {
+ if (active_rrqs_xri_bitmap)
+ mempool_free(active_rrqs_xri_bitmap,
+ phba->active_rrq_pool);
+ return ndlp;
+ }
+ } else {
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ active_rrqs_xri_bitmap)
+ memcpy(active_rrqs_xri_bitmap,
+ new_ndlp->active_rrqs_xri_bitmap,
+ phba->cfg_rrq_xri_bitmap_sz);
- if (phba->sli_rev == LPFC_SLI_REV4 && active_rrqs_xri_bitmap)
- memcpy(active_rrqs_xri_bitmap, new_ndlp->active_rrqs_xri_bitmap,
- phba->cfg_rrq_xri_bitmap_sz);
+ /*
+ * Unregister from backend if not done yet. Could have been
+ * skipped due to ADISC
+ */
+ lpfc_nlp_unreg_node(vport, new_ndlp);
+ }
+
+ keepDID = new_ndlp->nlp_DID;
/* At this point in this routine, we know new_ndlp will be
* returned. however, any previous GID_FTs that were done
* would have updated nlp_fc4_type in ndlp, so we must ensure
* new_ndlp has the right value.
*/
- if (vport->fc_flag & FC_FABRIC) {
+ if (test_bit(FC_FABRIC, &vport->fc_flag)) {
keep_nlp_fc4_type = new_ndlp->nlp_fc4_type;
new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type;
}
@@ -1914,21 +1899,17 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
void
lpfc_end_rscn(struct lpfc_vport *vport)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- if (vport->fc_flag & FC_RSCN_MODE) {
+ if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) {
/*
* Check to see if more RSCNs came in while we were
* processing this one.
*/
if (vport->fc_rscn_id_cnt ||
- (vport->fc_flag & FC_RSCN_DISCOVERY) != 0)
+ test_bit(FC_RSCN_DISCOVERY, &vport->fc_flag))
lpfc_els_handle_rscn(vport);
- else {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_RSCN_MODE;
- spin_unlock_irq(shost->host_lock);
- }
+ else
+ clear_bit(FC_RSCN_MODE, &vport->fc_flag);
}
}
@@ -2015,7 +1996,6 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp;
struct lpfc_nodelist *ndlp, *free_ndlp;
struct lpfc_dmabuf *prsp;
@@ -2162,9 +2142,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_more_plogi(vport);
if (vport->num_disc_nodes == 0) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_NDISC_ACTIVE;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_NDISC_ACTIVE, &vport->fc_flag);
lpfc_can_disctmo(vport);
lpfc_end_rscn(vport);
@@ -2226,7 +2204,7 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
*/
if ((ndlp->nlp_flag & (NLP_IGNR_REG_CMPL | NLP_UNREG_INP)) &&
((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
- !(vport->fc_flag & FC_OFFLINE_MODE)) {
+ !test_bit(FC_OFFLINE_MODE, &vport->fc_flag)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"4110 Issue PLOGI x%x deferred "
"on NPort x%x rpi x%x flg x%x Data:"
@@ -2258,7 +2236,8 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
* If we are a N-port connected to a Fabric, fix-up paramm's so logins
* to device on remote loops work.
*/
- if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP))
+ if (test_bit(FC_FABRIC, &vport->fc_flag) &&
+ !test_bit(FC_PUBLIC_LOOP, &vport->fc_flag))
sp->cmn.altBbCredit = 1;
if (sp->cmn.fcphLow < FC_PH_4_3)
@@ -2382,8 +2361,8 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* If we don't send GFT_ID to Fabric, a PRLI error
* could be expected.
*/
- if ((vport->fc_flag & FC_FABRIC) ||
- (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH)) {
+ if (test_bit(FC_FABRIC, &vport->fc_flag) ||
+ vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH) {
mode = KERN_ERR;
loglevel = LOG_TRACE_EVENT;
} else {
@@ -2424,7 +2403,7 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* For P2P topology, retain the node so that PLOGI can be
* attempted on it again.
*/
- if (vport->fc_flag & FC_PT2PT)
+ if (test_bit(FC_PT2PT, &vport->fc_flag))
goto out;
/* As long as this node is not registered with the SCSI
@@ -2500,7 +2479,7 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* the remote NPort beng a NVME Target.
*/
if (phba->sli_rev == LPFC_SLI_REV4 &&
- vport->fc_flag & FC_RSCN_MODE &&
+ test_bit(FC_RSCN_MODE, &vport->fc_flag) &&
vport->nvmei_support)
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
local_nlp_type = ndlp->nlp_fc4_type;
@@ -2677,7 +2656,7 @@ lpfc_rscn_disc(struct lpfc_vport *vport)
/* RSCN discovery */
/* go thru NPR nodes and issue ELS PLOGIs */
- if (vport->fc_npr_cnt)
+ if (atomic_read(&vport->fc_npr_cnt))
if (lpfc_els_disc_plogi(vport))
return;
@@ -2697,7 +2676,6 @@ lpfc_rscn_disc(struct lpfc_vport *vport)
static void
lpfc_adisc_done(struct lpfc_vport *vport)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
/*
@@ -2705,7 +2683,7 @@ lpfc_adisc_done(struct lpfc_vport *vport)
* and continue discovery.
*/
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
- !(vport->fc_flag & FC_RSCN_MODE) &&
+ !test_bit(FC_RSCN_MODE, &vport->fc_flag) &&
(phba->sli_rev < LPFC_SLI_REV4)) {
/*
@@ -2734,15 +2712,13 @@ lpfc_adisc_done(struct lpfc_vport *vport)
if (vport->port_state < LPFC_VPORT_READY) {
/* If we get here, there is nothing to ADISC */
lpfc_issue_clear_la(phba, vport);
- if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
+ if (!test_bit(FC_ABORT_DISCOVERY, &vport->fc_flag)) {
vport->num_disc_nodes = 0;
/* go thru NPR list, issue ELS PLOGIs */
- if (vport->fc_npr_cnt)
+ if (atomic_read(&vport->fc_npr_cnt))
lpfc_els_disc_plogi(vport);
if (!vport->num_disc_nodes) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_NDISC_ACTIVE;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_NDISC_ACTIVE, &vport->fc_flag);
lpfc_can_disctmo(vport);
lpfc_end_rscn(vport);
}
@@ -2769,11 +2745,12 @@ lpfc_more_adisc(struct lpfc_vport *vport)
/* Continue discovery with <num_disc_nodes> ADISCs to go */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0210 Continue discovery with %d ADISCs to go "
- "Data: x%x x%x x%x\n",
- vport->num_disc_nodes, vport->fc_adisc_cnt,
+ "Data: x%x x%lx x%x\n",
+ vport->num_disc_nodes,
+ atomic_read(&vport->fc_adisc_cnt),
vport->fc_flag, vport->port_state);
/* Check to see if there are more ADISCs to be sent */
- if (vport->fc_flag & FC_NLP_MORE) {
+ if (test_bit(FC_NLP_MORE, &vport->fc_flag)) {
lpfc_set_disctmo(vport);
/* go thru NPR nodes and issue any remaining ELS ADISCs */
lpfc_els_disc_adisc(vport);
@@ -3618,10 +3595,10 @@ lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
/* Not supported for private loop */
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
- !(vport->fc_flag & FC_PUBLIC_LOOP))
+ !test_bit(FC_PUBLIC_LOOP, &vport->fc_flag))
return 1;
- if (vport->fc_flag & FC_PT2PT) {
+ if (test_bit(FC_PT2PT, &vport->fc_flag)) {
/* find any mapped nport - that would be the other nport */
ndlp = lpfc_findnode_mapped(vport);
if (!ndlp)
@@ -4399,7 +4376,6 @@ try_rdf:
void
lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_work_evt *evtp;
if (!(nlp->nlp_flag & NLP_DELAY_TMO))
@@ -4427,9 +4403,8 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
/* Check if there are more PLOGIs to be sent */
lpfc_more_plogi(vport);
if (vport->num_disc_nodes == 0) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_NDISC_ACTIVE;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_NDISC_ACTIVE,
+ &vport->fc_flag);
lpfc_can_disctmo(vport);
lpfc_end_rscn(vport);
}
@@ -4546,7 +4521,7 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
}
break;
case ELS_CMD_FDISC:
- if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI))
+ if (!test_bit(FC_VPORT_NEEDS_INIT_VPI, &vport->fc_flag))
lpfc_issue_els_fdisc(vport, ndlp, retry);
break;
}
@@ -4784,7 +4759,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Added for Vendor specifc support
* Just keep retrying for these Rsn / Exp codes
*/
- if ((vport->fc_flag & FC_PT2PT) &&
+ if (test_bit(FC_PT2PT, &vport->fc_flag) &&
cmd == ELS_CMD_NVMEPRLI) {
switch (stat.un.b.lsRjtRsnCode) {
case LSRJT_UNABLE_TPC:
@@ -4797,7 +4772,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"support NVME, disabling NVME\n",
stat.un.b.lsRjtRsnCode);
retry = 0;
- vport->fc_flag |= FC_PT2PT_NO_NVME;
+ set_bit(FC_PT2PT_NO_NVME, &vport->fc_flag);
goto out_retry;
}
}
@@ -4989,7 +4964,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
retry = 0;
}
- if ((vport->load_flag & FC_UNLOADING) != 0)
+ if (test_bit(FC_UNLOADING, &vport->load_flag))
retry = 0;
out_retry:
@@ -5020,7 +4995,7 @@ out_retry:
/* If discovery / RSCN timer is running, reset it */
if (timer_pending(&vport->fc_disctmo) ||
- (vport->fc_flag & FC_RSCN_MODE))
+ test_bit(FC_RSCN_MODE, &vport->fc_flag))
lpfc_set_disctmo(vport);
}
@@ -5406,7 +5381,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (ulp_status == 0
&& (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
if (!lpfc_unreg_rpi(vport, ndlp) &&
- (!(vport->fc_flag & FC_PT2PT))) {
+ !test_bit(FC_PT2PT, &vport->fc_flag)) {
if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
ndlp->nlp_state ==
NLP_STE_REG_LOGIN_ISSUE) {
@@ -5778,7 +5753,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, "
"XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x "
- "RPI: x%x, fc_flag x%x refcnt %d\n",
+ "RPI: x%x, fc_flag x%lx refcnt %d\n",
rc, elsiocb->iotag, elsiocb->sli4_xritag,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi, vport->fc_flag, kref_read(&ndlp->kref));
@@ -5984,7 +5959,7 @@ lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0152 Xmit EDC ACC response Status: x%x, IoTag: x%x, "
"XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x "
- "RPI: x%x, fc_flag x%x\n",
+ "RPI: x%x, fc_flag x%lx\n",
rc, elsiocb->iotag, elsiocb->sli4_xritag,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi, vport->fc_flag);
@@ -6551,7 +6526,6 @@ lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
int
lpfc_els_disc_adisc(struct lpfc_vport *vport)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp, *next_ndlp;
int sentadisc = 0;
@@ -6586,18 +6560,13 @@ lpfc_els_disc_adisc(struct lpfc_vport *vport)
vport->num_disc_nodes++;
if (vport->num_disc_nodes >=
vport->cfg_discovery_threads) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_NLP_MORE;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_NLP_MORE, &vport->fc_flag);
break;
}
}
- if (sentadisc == 0) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_NLP_MORE;
- spin_unlock_irq(shost->host_lock);
- }
+ if (sentadisc == 0)
+ clear_bit(FC_NLP_MORE, &vport->fc_flag);
return sentadisc;
}
@@ -6623,7 +6592,6 @@ lpfc_els_disc_adisc(struct lpfc_vport *vport)
int
lpfc_els_disc_plogi(struct lpfc_vport *vport)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp, *next_ndlp;
int sentplogi = 0;
@@ -6640,26 +6608,20 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport)
vport->num_disc_nodes++;
if (vport->num_disc_nodes >=
vport->cfg_discovery_threads) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_NLP_MORE;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_NLP_MORE, &vport->fc_flag);
break;
}
}
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "6452 Discover PLOGI %d flag x%x\n",
+ "6452 Discover PLOGI %d flag x%lx\n",
sentplogi, vport->fc_flag);
- if (sentplogi) {
+ if (sentplogi)
lpfc_set_disctmo(vport);
- }
- else {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_NLP_MORE;
- spin_unlock_irq(shost->host_lock);
- }
+ else
+ clear_bit(FC_NLP_MORE, &vport->fc_flag);
return sentplogi;
}
@@ -7070,7 +7032,7 @@ lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc,
{
desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
- if (vport->fc_flag & FC_FABRIC) {
+ if (test_bit(FC_FABRIC, &vport->fc_flag)) {
memcpy(desc->port_names.wwnn, &vport->fabric_nodename,
sizeof(desc->port_names.wwnn));
@@ -7276,7 +7238,7 @@ lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context)
goto rdp_fail;
mbox->vport = rdp_context->ndlp->vport;
mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0;
- mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
+ mbox->ctx_u.rdp = rdp_context;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
@@ -7328,7 +7290,7 @@ int lpfc_get_sfp_info_wait(struct lpfc_hba *phba,
mbox->in_ext_byte_len = DMP_SFF_PAGE_A0_SIZE;
mbox->out_ext_byte_len = DMP_SFF_PAGE_A0_SIZE;
mbox->mbox_offset_word = 5;
- mbox->ctx_buf = virt;
+ mbox->ext_buf = virt;
} else {
bf_set(lpfc_mbx_memory_dump_type3_length,
&mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A0_SIZE);
@@ -7336,7 +7298,6 @@ int lpfc_get_sfp_info_wait(struct lpfc_hba *phba,
mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
}
mbox->vport = phba->pport;
- mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
rc = lpfc_sli_issue_mbox_wait(phba, mbox, 30);
if (rc == MBX_NOT_FINISHED) {
@@ -7345,7 +7306,7 @@ int lpfc_get_sfp_info_wait(struct lpfc_hba *phba,
}
if (phba->sli_rev == LPFC_SLI_REV4)
- mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
+ mp = mbox->ctx_buf;
else
mp = mpsave;
@@ -7388,7 +7349,7 @@ int lpfc_get_sfp_info_wait(struct lpfc_hba *phba,
mbox->in_ext_byte_len = DMP_SFF_PAGE_A2_SIZE;
mbox->out_ext_byte_len = DMP_SFF_PAGE_A2_SIZE;
mbox->mbox_offset_word = 5;
- mbox->ctx_buf = virt;
+ mbox->ext_buf = virt;
} else {
bf_set(lpfc_mbx_memory_dump_type3_length,
&mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A2_SIZE);
@@ -7396,7 +7357,6 @@ int lpfc_get_sfp_info_wait(struct lpfc_hba *phba,
mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
}
- mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
rc = lpfc_sli_issue_mbox_wait(phba, mbox, 30);
if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) {
rc = 1;
@@ -7538,9 +7498,9 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
int rc;
mb = &pmb->u.mb;
- lcb_context = (struct lpfc_lcb_context *)pmb->ctx_ndlp;
+ lcb_context = pmb->ctx_u.lcb;
ndlp = lcb_context->ndlp;
- pmb->ctx_ndlp = NULL;
+ memset(&pmb->ctx_u, 0, sizeof(pmb->ctx_u));
pmb->ctx_buf = NULL;
shdr = (union lpfc_sli4_cfg_shdr *)
@@ -7680,7 +7640,7 @@ lpfc_sli4_set_beacon(struct lpfc_vport *vport,
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len,
LPFC_SLI4_MBX_EMBED);
- mbox->ctx_ndlp = (void *)lcb_context;
+ mbox->ctx_u.lcb = lcb_context;
mbox->vport = phba->pport;
mbox->mbox_cmpl = lpfc_els_lcb_rsp;
bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config,
@@ -7854,9 +7814,10 @@ lpfc_els_flush_rscn(struct lpfc_vport *vport)
lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]);
vport->fc_rscn_id_list[i] = NULL;
}
+ clear_bit(FC_RSCN_MODE, &vport->fc_flag);
+ clear_bit(FC_RSCN_DISCOVERY, &vport->fc_flag);
spin_lock_irq(shost->host_lock);
vport->fc_rscn_id_cnt = 0;
- vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
spin_unlock_irq(shost->host_lock);
lpfc_can_disctmo(vport);
/* Indicate we are done walking this fc_rscn_id_list */
@@ -7891,7 +7852,7 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
return 0;
/* If we are doing a FULL RSCN rediscovery, match everything */
- if (vport->fc_flag & FC_RSCN_DISCOVERY)
+ if (test_bit(FC_RSCN_DISCOVERY, &vport->fc_flag))
return did;
spin_lock_irq(shost->host_lock);
@@ -8070,7 +8031,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
payload_len -= sizeof(uint32_t); /* take off word 0 */
/* RSCN received */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0214 RSCN received Data: x%x x%x x%x x%x\n",
+ "0214 RSCN received Data: x%lx x%x x%x x%x\n",
vport->fc_flag, payload_len, *lp,
vport->fc_rscn_id_cnt);
@@ -8082,10 +8043,10 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
FCH_EVT_RSCN, lp[i]);
/* Check if RSCN is coming from a direct-connected remote NPort */
- if (vport->fc_flag & FC_PT2PT) {
+ if (test_bit(FC_PT2PT, &vport->fc_flag)) {
/* If so, just ACC it, no other action needed for now */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "2024 pt2pt RSCN %08x Data: x%x x%x\n",
+ "2024 pt2pt RSCN %08x Data: x%lx x%x\n",
*lp, vport->fc_flag, payload_len);
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
@@ -8129,7 +8090,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
/* ALL NPortIDs in RSCN are on HBA */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0219 Ignore RSCN "
- "Data: x%x x%x x%x x%x\n",
+ "Data: x%lx x%x x%x x%x\n",
vport->fc_flag, payload_len,
*lp, vport->fc_rscn_id_cnt);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -8140,7 +8101,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb,
ndlp, NULL);
/* Restart disctmo if its already running */
- if (vport->fc_flag & FC_DISC_TMO) {
+ if (test_bit(FC_DISC_TMO, &vport->fc_flag)) {
tmo = ((phba->fc_ratov * 3) + 3);
mod_timer(&vport->fc_disctmo,
jiffies +
@@ -8153,8 +8114,8 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
spin_lock_irq(shost->host_lock);
if (vport->fc_rscn_flush) {
/* Another thread is walking fc_rscn_id_list on this vport */
- vport->fc_flag |= FC_RSCN_DISCOVERY;
spin_unlock_irq(shost->host_lock);
+ set_bit(FC_RSCN_DISCOVERY, &vport->fc_flag);
/* Send back ACC */
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
return 0;
@@ -8167,24 +8128,23 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
/* If we are already processing an RSCN, save the received
* RSCN payload buffer, cmdiocb->cmd_dmabuf to process later.
*/
- if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
+ if (test_bit(FC_RSCN_MODE, &vport->fc_flag) ||
+ test_bit(FC_NDISC_ACTIVE, &vport->fc_flag)) {
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
"RCV RSCN defer: did:x%x/ste:x%x flg:x%x",
ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_RSCN_DEFERRED;
+ set_bit(FC_RSCN_DEFERRED, &vport->fc_flag);
/* Restart disctmo if its already running */
- if (vport->fc_flag & FC_DISC_TMO) {
+ if (test_bit(FC_DISC_TMO, &vport->fc_flag)) {
tmo = ((phba->fc_ratov * 3) + 3);
mod_timer(&vport->fc_disctmo,
jiffies + msecs_to_jiffies(1000 * tmo));
}
if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
- !(vport->fc_flag & FC_RSCN_DISCOVERY)) {
- vport->fc_flag |= FC_RSCN_MODE;
- spin_unlock_irq(shost->host_lock);
+ !test_bit(FC_RSCN_DISCOVERY, &vport->fc_flag)) {
+ set_bit(FC_RSCN_MODE, &vport->fc_flag);
if (rscn_cnt) {
cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt;
length = be32_to_cpu(*cmd & ~ELS_CMD_MASK);
@@ -8206,16 +8166,15 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
/* Deferred RSCN */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0235 Deferred RSCN "
- "Data: x%x x%x x%x\n",
+ "Data: x%x x%lx x%x\n",
vport->fc_rscn_id_cnt, vport->fc_flag,
vport->port_state);
} else {
- vport->fc_flag |= FC_RSCN_DISCOVERY;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_RSCN_DISCOVERY, &vport->fc_flag);
/* ReDiscovery RSCN */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0234 ReDiscovery RSCN "
- "Data: x%x x%x x%x\n",
+ "Data: x%x x%lx x%x\n",
vport->fc_rscn_id_cnt, vport->fc_flag,
vport->port_state);
}
@@ -8231,9 +8190,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
"RCV RSCN: did:x%x/ste:x%x flg:x%x",
ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_RSCN_MODE;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_RSCN_MODE, &vport->fc_flag);
vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd;
/* Indicate we are done walking fc_rscn_id_list on this vport */
vport->fc_rscn_flush = 0;
@@ -8273,7 +8230,7 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
struct lpfc_hba *phba = vport->phba;
/* Ignore RSCN if the port is being torn down. */
- if (vport->load_flag & FC_UNLOADING) {
+ if (test_bit(FC_UNLOADING, &vport->load_flag)) {
lpfc_els_flush_rscn(vport);
return 0;
}
@@ -8283,7 +8240,7 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
/* RSCN processed */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0215 RSCN processed Data: x%x x%x x%x x%x x%x x%x\n",
+ "0215 RSCN processed Data: x%lx x%x x%x x%x x%x x%x\n",
vport->fc_flag, 0, vport->fc_rscn_id_cnt,
vport->port_state, vport->num_disc_nodes,
vport->gidft_inp);
@@ -8372,7 +8329,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
LPFC_MBOXQ_t *mbox;
uint32_t cmd, did;
int rc;
- uint32_t fc_flag = 0;
+ unsigned long fc_flag = 0;
uint32_t port_state = 0;
/* Clear external loopback plug detected flag */
@@ -8442,9 +8399,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
return 0;
} else if (rc > 0) { /* greater than */
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_PT2PT_PLOGI;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_PT2PT_PLOGI, &vport->fc_flag);
/* If we have the high WWPN we can assign our own
* myDID; otherwise, we have to WAIT for a PLOGI
@@ -8463,17 +8418,17 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
spin_lock_irq(shost->host_lock);
fc_flag = vport->fc_flag;
port_state = vport->port_state;
- vport->fc_flag |= FC_PT2PT;
- vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
-
/* Acking an unsol FLOGI. Count 1 for link bounce
* work-around.
*/
vport->rcv_flogi_cnt++;
spin_unlock_irq(shost->host_lock);
+ set_bit(FC_PT2PT, &vport->fc_flag);
+ clear_bit(FC_FABRIC, &vport->fc_flag);
+ clear_bit(FC_PUBLIC_LOOP, &vport->fc_flag);
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3311 Rcv Flogi PS x%x new PS x%x "
- "fc_flag x%x new fc_flag x%x\n",
+ "fc_flag x%lx new fc_flag x%lx\n",
port_state, vport->port_state,
fc_flag, vport->fc_flag);
@@ -8682,9 +8637,9 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
mb = &pmb->u.mb;
ndlp = pmb->ctx_ndlp;
- rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff);
- oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff);
- pmb->ctx_buf = NULL;
+ rxid = (uint16_t)(pmb->ctx_u.ox_rx_id & 0xffff);
+ oxid = (uint16_t)((pmb->ctx_u.ox_rx_id >> 16) & 0xffff);
+ memset(&pmb->ctx_u, 0, sizeof(pmb->ctx_u));
pmb->ctx_ndlp = NULL;
if (mb->mbxStatus) {
@@ -8788,8 +8743,7 @@ lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
if (mbox) {
lpfc_read_lnk_stat(phba, mbox);
- mbox->ctx_buf = (void *)((unsigned long)
- (ox_id << 16 | ctx));
+ mbox->ctx_u.ox_rx_id = ox_id << 16 | ctx;
mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
if (!mbox->ctx_ndlp)
goto node_err;
@@ -9492,11 +9446,11 @@ lpfc_els_timeout(struct timer_list *t)
spin_lock_irqsave(&vport->work_port_lock, iflag);
tmo_posted = vport->work_port_events & WORKER_ELS_TMO;
- if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
+ if (!tmo_posted && !test_bit(FC_UNLOADING, &vport->load_flag))
vport->work_port_events |= WORKER_ELS_TMO;
spin_unlock_irqrestore(&vport->work_port_lock, iflag);
- if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
+ if (!tmo_posted && !test_bit(FC_UNLOADING, &vport->load_flag))
lpfc_worker_wake_up(phba);
return;
}
@@ -9532,7 +9486,7 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
if (unlikely(!pring))
return;
- if (phba->pport->load_flag & FC_UNLOADING)
+ if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
return;
spin_lock_irq(&phba->hbalock);
@@ -9608,7 +9562,7 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
lpfc_issue_hb_tmo(phba);
if (!list_empty(&pring->txcmplq))
- if (!(phba->pport->load_flag & FC_UNLOADING))
+ if (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
mod_timer(&vport->els_tmofunc,
jiffies + msecs_to_jiffies(1000 * timeout));
}
@@ -10116,6 +10070,9 @@ lpfc_els_rcv_fpin_peer_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
pc_evt_str = lpfc_get_fpin_congn_event_nm(pc_evt);
cnt = be32_to_cpu(pc->pname_count);
+ /* Capture FPIN frequency */
+ phba->cgn_fpin_frequency = be32_to_cpu(pc->event_period);
+
lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_ELS,
"4684 FPIN Peer Congestion %s (x%x) "
"Duration %d mSecs "
@@ -10404,12 +10361,12 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
goto dropit;
/* Ignore traffic received during vport shutdown. */
- if (vport->load_flag & FC_UNLOADING)
+ if (test_bit(FC_UNLOADING, &vport->load_flag))
goto dropit;
/* If NPort discovery is delayed drop incoming ELS */
- if ((vport->fc_flag & FC_DISC_DELAYED) &&
- (cmd != ELS_CMD_PLOGI))
+ if (test_bit(FC_DISC_DELAYED, &vport->fc_flag) &&
+ cmd != ELS_CMD_PLOGI)
goto dropit;
ndlp = lpfc_findnode_did(vport, did);
@@ -10453,14 +10410,14 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* ELS command <elsCmd> received from NPORT <did> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0112 ELS command x%x received from NPORT x%x "
- "refcnt %d Data: x%x x%x x%x x%x\n",
+ "refcnt %d Data: x%x x%lx x%x x%x\n",
cmd, did, kref_read(&ndlp->kref), vport->port_state,
vport->fc_flag, vport->fc_myDID, vport->fc_prevDID);
/* reject till our FLOGI completes or PLOGI assigned DID via PT2PT */
if ((vport->port_state < LPFC_FABRIC_CFG_LINK) &&
(cmd != ELS_CMD_FLOGI) &&
- !((cmd == ELS_CMD_PLOGI) && (vport->fc_flag & FC_PT2PT))) {
+ !((cmd == ELS_CMD_PLOGI) && test_bit(FC_PT2PT, &vport->fc_flag))) {
rjt_err = LSRJT_LOGICAL_BSY;
rjt_exp = LSEXP_NOTHING_MORE;
goto lsrjt;
@@ -10475,7 +10432,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvPLOGI++;
ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
if (phba->sli_rev == LPFC_SLI_REV4 &&
- (phba->pport->fc_flag & FC_PT2PT)) {
+ test_bit(FC_PT2PT, &phba->pport->fc_flag)) {
vport->fc_prevDID = vport->fc_myDID;
/* Our DID needs to be updated before registering
* the vfi. This is done in lpfc_rcv_plogi but
@@ -10493,15 +10450,15 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_send_els_event(vport, ndlp, payload);
/* If Nport discovery is delayed, reject PLOGIs */
- if (vport->fc_flag & FC_DISC_DELAYED) {
+ if (test_bit(FC_DISC_DELAYED, &vport->fc_flag)) {
rjt_err = LSRJT_UNABLE_TPC;
rjt_exp = LSEXP_NOTHING_MORE;
break;
}
if (vport->port_state < LPFC_DISC_AUTH) {
- if (!(phba->pport->fc_flag & FC_PT2PT) ||
- (phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
+ if (!test_bit(FC_PT2PT, &phba->pport->fc_flag) ||
+ test_bit(FC_PT2PT_PLOGI, &phba->pport->fc_flag)) {
rjt_err = LSRJT_UNABLE_TPC;
rjt_exp = LSEXP_NOTHING_MORE;
break;
@@ -10527,7 +10484,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
* bounce the link. There is some descrepancy.
*/
if (vport->port_state >= LPFC_LOCAL_CFG_LINK &&
- vport->fc_flag & FC_PT2PT &&
+ test_bit(FC_PT2PT, &vport->fc_flag) &&
vport->rcv_flogi_cnt >= 1) {
rjt_err = LSRJT_LOGICAL_BSY;
rjt_exp = LSEXP_NOTHING_MORE;
@@ -10650,7 +10607,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvPRLI++;
if ((vport->port_state < LPFC_DISC_AUTH) &&
- (vport->fc_flag & FC_FABRIC)) {
+ test_bit(FC_FABRIC, &vport->fc_flag)) {
rjt_err = LSRJT_UNABLE_TPC;
rjt_exp = LSEXP_NOTHING_MORE;
break;
@@ -10825,7 +10782,7 @@ lsrjt:
return;
dropit:
- if (vport && !(vport->load_flag & FC_UNLOADING))
+ if (vport && !test_bit(FC_UNLOADING, &vport->load_flag))
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0111 Dropping received ELS cmd "
"Data: x%x x%x x%x x%x\n",
@@ -10979,16 +10936,13 @@ void
lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
{
struct lpfc_nodelist *ndlp;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
/*
* If lpfc_delay_discovery parameter is set and the clean address
* bit is cleared and fc fabric parameters chenged, delay FC NPort
* discovery.
*/
- spin_lock_irq(shost->host_lock);
- if (vport->fc_flag & FC_DISC_DELAYED) {
- spin_unlock_irq(shost->host_lock);
+ if (test_bit(FC_DISC_DELAYED, &vport->fc_flag)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"3334 Delay fc port discovery for %d secs\n",
phba->fc_ratov);
@@ -10996,7 +10950,6 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
jiffies + msecs_to_jiffies(1000 * phba->fc_ratov));
return;
}
- spin_unlock_irq(shost->host_lock);
ndlp = lpfc_findnode_did(vport, NameServer_DID);
if (!ndlp) {
@@ -11025,8 +10978,8 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
}
if ((phba->cfg_enable_SmartSAN ||
- (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) &&
- (vport->load_flag & FC_ALLOW_FDMI))
+ phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT) &&
+ test_bit(FC_ALLOW_FDMI, &vport->load_flag))
lpfc_start_fdmi(vport);
}
@@ -11046,14 +10999,12 @@ static void
lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
MAILBOX_t *mb = &pmb->u.mb;
int rc;
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
if (mb->mbxStatus) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -11070,16 +11021,13 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
case 0x9602: /* Link event since CLEAR_LA */
/* giving up on vport registration */
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_FABRIC, &vport->fc_flag);
+ clear_bit(FC_PUBLIC_LOOP, &vport->fc_flag);
lpfc_can_disctmo(vport);
break;
/* If reg_vpi fail with invalid VPI status, re-init VPI */
case 0x20:
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
lpfc_init_vpi(phba, pmb, vport->vpi);
pmb->vport = vport;
pmb->mbox_cmpl = lpfc_init_vpi_cmpl;
@@ -11100,13 +11048,11 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (phba->sli_rev == LPFC_SLI_REV4)
lpfc_sli4_unreg_all_rpis(vport);
lpfc_mbx_unreg_vpi(vport);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
if (mb->mbxStatus == MBX_NOT_FINISHED)
break;
if ((vport->port_type == LPFC_PHYSICAL_PORT) &&
- !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) {
+ !test_bit(FC_LOGO_RCVD_DID_CHNG, &vport->fc_flag)) {
if (phba->sli_rev == LPFC_SLI_REV4)
lpfc_issue_init_vfi(vport);
else
@@ -11167,7 +11113,6 @@ void
lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
LPFC_MBOXQ_t *mbox;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -11202,9 +11147,7 @@ lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
mbox_err_exit:
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
return;
}
@@ -11319,7 +11262,6 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
struct lpfc_nodelist *np;
struct lpfc_nodelist *next_np;
@@ -11367,13 +11309,11 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_check_nlp_post_devloss(vport, ndlp);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
- vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
- vport->fc_flag |= FC_FABRIC;
+ clear_bit(FC_VPORT_CVL_RCVD, &vport->fc_flag);
+ clear_bit(FC_VPORT_LOGO_RCVD, &vport->fc_flag);
+ set_bit(FC_FABRIC, &vport->fc_flag);
if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP)
- vport->fc_flag |= FC_PUBLIC_LOOP;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_PUBLIC_LOOP, &vport->fc_flag);
vport->fc_myDID = ulp_word4 & Mask_DID;
lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
@@ -11390,7 +11330,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
memcpy(&vport->fabric_nodename, &sp->nodeName,
sizeof(struct lpfc_name));
if (fabric_param_changed &&
- !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
+ !test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag)) {
/* If our NportID changed, we need to ensure all
* remaining NPORTs get unreg_login'ed so we can
* issue unreg_vpi.
@@ -11411,15 +11351,13 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_sli4_unreg_all_rpis(vport);
lpfc_mbx_unreg_vpi(vport);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
if (phba->sli_rev == LPFC_SLI_REV4)
- vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+ set_bit(FC_VPORT_NEEDS_INIT_VPI, &vport->fc_flag);
else
- vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_LOGO_RCVD_DID_CHNG, &vport->fc_flag);
} else if ((phba->sli_rev == LPFC_SLI_REV4) &&
- !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
+ !test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag)) {
/*
* Driver needs to re-reg VPI in order for f/w
* to update the MAC address.
@@ -11429,9 +11367,9 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
}
- if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)
+ if (test_bit(FC_VPORT_NEEDS_INIT_VPI, &vport->fc_flag))
lpfc_issue_init_vpi(vport);
- else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
+ else if (test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag))
lpfc_register_new_vport(phba, vport, ndlp);
else
lpfc_do_scr_ns_plogi(phba, vport);
@@ -11584,7 +11522,6 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_vport *vport = cmdiocb->vport;
IOCB_t *irsp;
struct lpfc_nodelist *ndlp;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
u32 ulp_status, ulp_word4, did, tmo;
ndlp = cmdiocb->ndlp;
@@ -11615,10 +11552,8 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ndlp->fc4_xpt_flags);
if (ulp_status == IOSTAT_SUCCESS) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_NDISC_ACTIVE;
- vport->fc_flag &= ~FC_FABRIC;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_NDISC_ACTIVE, &vport->fc_flag);
+ clear_bit(FC_FABRIC, &vport->fc_flag);
lpfc_can_disctmo(vport);
}
@@ -12076,7 +12011,7 @@ lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
* node and the vport is unloading, the xri aborted wcqe
* likely isn't coming back. Just release the sgl.
*/
- if ((vport->load_flag & FC_UNLOADING) &&
+ if (test_bit(FC_UNLOADING, &vport->load_flag) &&
ndlp->nlp_DID == Fabric_DID) {
list_del(&sglq_entry->list);
sglq_entry->state = SGL_FREED;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index da3aee0f63..e42fa9c822 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -169,13 +169,13 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
"3181 dev_loss_callbk x%06x, rport x%px flg x%x "
- "load_flag x%x refcnt %u state %d xpt x%x\n",
+ "load_flag x%lx refcnt %u state %d xpt x%x\n",
ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag,
vport->load_flag, kref_read(&ndlp->kref),
ndlp->nlp_state, ndlp->fc4_xpt_flags);
/* Don't schedule a worker thread event if the vport is going down. */
- if (vport->load_flag & FC_UNLOADING) {
+ if (test_bit(FC_UNLOADING, &vport->load_flag)) {
spin_lock_irqsave(&ndlp->lock, iflags);
ndlp->rport = NULL;
@@ -265,7 +265,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
} else {
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
"3188 worker thread is stopped %s x%06x, "
- " rport x%px flg x%x load_flag x%x refcnt "
+ " rport x%px flg x%x load_flag x%lx refcnt "
"%d\n", __func__, ndlp->nlp_DID,
ndlp->rport, ndlp->nlp_flag,
vport->load_flag, kref_read(&ndlp->kref));
@@ -910,7 +910,7 @@ lpfc_work_list_done(struct lpfc_hba *phba)
free_evt = 0;
break;
case LPFC_EVT_RESET_HBA:
- if (!(phba->pport->load_flag & FC_UNLOADING))
+ if (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
lpfc_reset_hba(phba);
break;
}
@@ -1148,7 +1148,6 @@ lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
void
lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp, *next_ndlp;
@@ -1179,9 +1178,7 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
if (phba->sli_rev == LPFC_SLI_REV4)
lpfc_sli4_unreg_all_rpis(vport);
lpfc_mbx_unreg_vpi(vport);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
}
}
@@ -1209,7 +1206,7 @@ void
lpfc_linkdown_port(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
fc_host_post_event(shost, fc_get_event_number(),
@@ -1222,9 +1219,7 @@ lpfc_linkdown_port(struct lpfc_vport *vport)
lpfc_port_link_failure(vport);
/* Stop delayed Nport discovery */
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_DISC_DELAYED;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_DISC_DELAYED, &vport->fc_flag);
del_timer_sync(&vport->delayed_disc_tmo);
if (phba->sli_rev == LPFC_SLI_REV4 &&
@@ -1239,7 +1234,7 @@ int
lpfc_linkdown(struct lpfc_hba *phba)
{
struct lpfc_vport *vport = phba->pport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_vport **vports;
LPFC_MBOXQ_t *mb;
int i;
@@ -1272,9 +1267,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
phba->sli4_hba.link_state.logical_speed =
LPFC_LINK_SPEED_UNKNOWN;
}
- spin_lock_irq(shost->host_lock);
- phba->pport->fc_flag &= ~FC_LBIT;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_LBIT, &phba->pport->fc_flag);
}
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL) {
@@ -1312,7 +1305,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
skip_unreg_did:
/* Setup myDID for link up if we are in pt2pt mode */
- if (phba->pport->fc_flag & FC_PT2PT) {
+ if (test_bit(FC_PT2PT, &phba->pport->fc_flag)) {
mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mb) {
lpfc_config_link(phba, mb);
@@ -1323,8 +1316,9 @@ lpfc_linkdown(struct lpfc_hba *phba)
mempool_free(mb, phba->mbox_mem_pool);
}
}
+ clear_bit(FC_PT2PT, &phba->pport->fc_flag);
+ clear_bit(FC_PT2PT_PLOGI, &phba->pport->fc_flag);
spin_lock_irq(shost->host_lock);
- phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
phba->pport->rcv_flogi_cnt = 0;
spin_unlock_irq(shost->host_lock);
}
@@ -1363,7 +1357,7 @@ lpfc_linkup_port(struct lpfc_vport *vport)
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
- if ((vport->load_flag & FC_UNLOADING) != 0)
+ if (test_bit(FC_UNLOADING, &vport->load_flag))
return;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
@@ -1375,19 +1369,22 @@ lpfc_linkup_port(struct lpfc_vport *vport)
(vport != phba->pport))
return;
- if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
- fc_host_post_event(shost, fc_get_event_number(),
- FCH_EVT_LINKUP, 0);
+ if (phba->defer_flogi_acc_flag) {
+ clear_bit(FC_ABORT_DISCOVERY, &vport->fc_flag);
+ clear_bit(FC_RSCN_MODE, &vport->fc_flag);
+ clear_bit(FC_NLP_MORE, &vport->fc_flag);
+ clear_bit(FC_RSCN_DISCOVERY, &vport->fc_flag);
+ } else {
+ clear_bit(FC_PT2PT, &vport->fc_flag);
+ clear_bit(FC_PT2PT_PLOGI, &vport->fc_flag);
+ clear_bit(FC_ABORT_DISCOVERY, &vport->fc_flag);
+ clear_bit(FC_RSCN_MODE, &vport->fc_flag);
+ clear_bit(FC_NLP_MORE, &vport->fc_flag);
+ clear_bit(FC_RSCN_DISCOVERY, &vport->fc_flag);
+ }
+ set_bit(FC_NDISC_ACTIVE, &vport->fc_flag);
spin_lock_irq(shost->host_lock);
- if (phba->defer_flogi_acc_flag)
- vport->fc_flag &= ~(FC_ABORT_DISCOVERY | FC_RSCN_MODE |
- FC_NLP_MORE | FC_RSCN_DISCOVERY);
- else
- vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI |
- FC_ABORT_DISCOVERY | FC_RSCN_MODE |
- FC_NLP_MORE | FC_RSCN_DISCOVERY);
- vport->fc_flag |= FC_NDISC_ACTIVE;
vport->fc_ns_retry = 0;
spin_unlock_irq(shost->host_lock);
lpfc_setup_fdmi_mask(vport);
@@ -1438,7 +1435,6 @@ static void
lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_sli *psli = &phba->sli;
MAILBOX_t *mb = &pmb->u.mb;
uint32_t control;
@@ -1477,9 +1473,7 @@ out:
"0225 Device Discovery completes\n");
mempool_free(pmb, phba->mbox_mem_pool);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_ABORT_DISCOVERY;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_ABORT_DISCOVERY, &vport->fc_flag);
lpfc_can_disctmo(vport);
@@ -1516,8 +1510,8 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
return;
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
- vport->fc_flag & FC_PUBLIC_LOOP &&
- !(vport->fc_flag & FC_LBIT)) {
+ test_bit(FC_PUBLIC_LOOP, &vport->fc_flag) &&
+ !test_bit(FC_LBIT, &vport->fc_flag)) {
/* Need to wait for FAN - use discovery timer
* for timeout. port_state is identically
* LPFC_LOCAL_CFG_LINK while waiting for FAN
@@ -1559,7 +1553,7 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_initial_flogi(vport);
}
} else {
- if (vport->fc_flag & FC_PT2PT)
+ if (test_bit(FC_PT2PT, &vport->fc_flag))
lpfc_disc_start(vport);
}
return;
@@ -1883,7 +1877,7 @@ lpfc_register_fcf(struct lpfc_hba *phba)
phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
phba->hba_flag &= ~FCF_TS_INPROG;
if (phba->pport->port_state != LPFC_FLOGI &&
- phba->pport->fc_flag & FC_FABRIC) {
+ test_bit(FC_FABRIC, &phba->pport->fc_flag)) {
phba->hba_flag |= FCF_RR_INPROG;
spin_unlock_irq(&phba->hbalock);
lpfc_initial_flogi(phba->pport);
@@ -2741,7 +2735,7 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
"2836 New FCF matches in-use "
"FCF (x%x), port_state:x%x, "
- "fc_flag:x%x\n",
+ "fc_flag:x%lx\n",
phba->fcf.current_rec.fcf_indx,
phba->pport->port_state,
phba->pport->fc_flag);
@@ -3217,7 +3211,6 @@ lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
struct lpfc_vport *vport = mboxq->vport;
struct lpfc_nodelist *ndlp;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
if (mboxq->u.mb.mbxStatus) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -3227,9 +3220,7 @@ lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
return;
}
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_VPORT_NEEDS_INIT_VPI, &vport->fc_flag);
/* If this port is physical port or FDISC is done, do reg_vpi */
if ((phba->pport == vport) || (vport->port_state == LPFC_FDISC)) {
@@ -3327,7 +3318,8 @@ lpfc_start_fdiscs(struct lpfc_hba *phba)
FC_VPORT_LINKDOWN);
continue;
}
- if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
+ if (test_bit(FC_VPORT_NEEDS_INIT_VPI,
+ &vports[i]->fc_flag)) {
lpfc_issue_init_vpi(vports[i]);
continue;
}
@@ -3379,17 +3371,17 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
* Unless this was a VFI update and we are in PT2PT mode, then
* we should drop through to set the port state to ready.
*/
- if (vport->fc_flag & FC_VFI_REGISTERED)
+ if (test_bit(FC_VFI_REGISTERED, &vport->fc_flag))
if (!(phba->sli_rev == LPFC_SLI_REV4 &&
- vport->fc_flag & FC_PT2PT))
+ test_bit(FC_PT2PT, &vport->fc_flag)))
goto out_free_mem;
/* The VPI is implicitly registered when the VFI is registered */
+ set_bit(FC_VFI_REGISTERED, &vport->fc_flag);
+ clear_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
+ clear_bit(FC_VPORT_NEEDS_INIT_VPI, &vport->fc_flag);
spin_lock_irq(shost->host_lock);
vport->vpi_state |= LPFC_VPI_REGISTERED;
- vport->fc_flag |= FC_VFI_REGISTERED;
- vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
- vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
spin_unlock_irq(shost->host_lock);
/* In case SLI4 FC loopback test, we are ready */
@@ -3400,8 +3392,8 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
- "3313 cmpl reg vfi port_state:%x fc_flag:%x myDid:%x "
- "alpacnt:%d LinkState:%x topology:%x\n",
+ "3313 cmpl reg vfi port_state:%x fc_flag:%lx "
+ "myDid:%x alpacnt:%d LinkState:%x topology:%x\n",
vport->port_state, vport->fc_flag, vport->fc_myDID,
vport->phba->alpa_map[0],
phba->link_state, phba->fc_topology);
@@ -3411,14 +3403,14 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
* For private loop or for NPort pt2pt,
* just start discovery and we are done.
*/
- if ((vport->fc_flag & FC_PT2PT) ||
- ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
- !(vport->fc_flag & FC_PUBLIC_LOOP))) {
+ if (test_bit(FC_PT2PT, &vport->fc_flag) ||
+ (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
+ !test_bit(FC_PUBLIC_LOOP, &vport->fc_flag))) {
/* Use loop map to make discovery list */
lpfc_disc_list_loopmap(vport);
/* Start discovery */
- if (vport->fc_flag & FC_PT2PT)
+ if (test_bit(FC_PT2PT, &vport->fc_flag))
vport->port_state = LPFC_VPORT_READY;
else
lpfc_disc_start(vport);
@@ -3436,7 +3428,7 @@ static void
lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb = &pmb->u.mb;
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
+ struct lpfc_dmabuf *mp = pmb->ctx_buf;
struct lpfc_vport *vport = pmb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct serv_parm *sp = &vport->fc_sparam;
@@ -3495,11 +3487,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
{
struct lpfc_vport *vport = phba->pport;
LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
- struct Scsi_Host *shost;
int i;
int rc;
struct fcf_record *fcf_record;
- uint32_t fc_flags = 0;
unsigned long iflags;
spin_lock_irqsave(&phba->hbalock, iflags);
@@ -3536,7 +3526,6 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
phba->link_flag &= ~(LS_NPIV_FAB_SUPPORTED | LS_CT_VEN_RPA);
- shost = lpfc_shost_from_vport(vport);
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
@@ -3549,7 +3538,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
"topology\n");
/* Get Loop Map information */
if (bf_get(lpfc_mbx_read_top_il, la))
- fc_flags |= FC_LBIT;
+ set_bit(FC_LBIT, &vport->fc_flag);
vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
i = la->lilpBde64.tus.f.bdeSize;
@@ -3598,16 +3587,10 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
}
vport->fc_myDID = phba->fc_pref_DID;
- fc_flags |= FC_LBIT;
+ set_bit(FC_LBIT, &vport->fc_flag);
}
spin_unlock_irqrestore(&phba->hbalock, iflags);
- if (fc_flags) {
- spin_lock_irqsave(shost->host_lock, iflags);
- vport->fc_flag |= fc_flags;
- spin_unlock_irqrestore(shost->host_lock, iflags);
- }
-
lpfc_linkup(phba);
sparam_mbox = NULL;
@@ -3750,13 +3733,11 @@ void
lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_mbx_read_top *la;
struct lpfc_sli_ring *pring;
MAILBOX_t *mb = &pmb->u.mb;
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
+ struct lpfc_dmabuf *mp = pmb->ctx_buf;
uint8_t attn_type;
- unsigned long iflags;
/* Unblock ELS traffic */
pring = lpfc_phba_elsring(phba);
@@ -3778,12 +3759,10 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
memcpy(&phba->alpa_map[0], mp->virt, 128);
- spin_lock_irqsave(shost->host_lock, iflags);
if (bf_get(lpfc_mbx_read_top_pb, la))
- vport->fc_flag |= FC_BYPASSED_MODE;
+ set_bit(FC_BYPASSED_MODE, &vport->fc_flag);
else
- vport->fc_flag &= ~FC_BYPASSED_MODE;
- spin_unlock_irqrestore(shost->host_lock, iflags);
+ clear_bit(FC_BYPASSED_MODE, &vport->fc_flag);
if (phba->fc_eventTag <= la->eventTag) {
phba->fc_stat.LinkMultiEvent++;
@@ -3831,20 +3810,20 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1308 Link Down Event in loop back mode "
"x%x received "
- "Data: x%x x%x x%x\n",
+ "Data: x%x x%x x%lx\n",
la->eventTag, phba->fc_eventTag,
phba->pport->port_state, vport->fc_flag);
else if (attn_type == LPFC_ATT_UNEXP_WWPN)
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1313 Link Down Unexpected FA WWPN Event x%x "
- "received Data: x%x x%x x%x x%x\n",
+ "received Data: x%x x%x x%lx x%x\n",
la->eventTag, phba->fc_eventTag,
phba->pport->port_state, vport->fc_flag,
bf_get(lpfc_mbx_read_top_fa, la));
else
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1305 Link Down Event x%x received "
- "Data: x%x x%x x%x x%x\n",
+ "Data: x%x x%x x%lx x%x\n",
la->eventTag, phba->fc_eventTag,
phba->pport->port_state, vport->fc_flag,
bf_get(lpfc_mbx_read_top_fa, la));
@@ -3871,8 +3850,8 @@ void
lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ struct lpfc_dmabuf *mp = pmb->ctx_buf;
+ struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
/* The driver calls the state machine with the pmb pointer
* but wants to make sure a stale ctx_buf isn't acted on.
@@ -3944,13 +3923,14 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n",
vport->vpi, mb->mbxStatus);
- if (!(phba->pport->load_flag & FC_UNLOADING))
+ if (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
lpfc_workq_post_event(phba, NULL, NULL,
LPFC_EVT_RESET_HBA);
}
+
+ set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
spin_lock_irq(shost->host_lock);
vport->vpi_state &= ~LPFC_VPI_REGISTERED;
- vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
spin_unlock_irq(shost->host_lock);
mempool_free(pmb, phba->mbox_mem_pool);
lpfc_cleanup_vports_rrqs(vport, NULL);
@@ -3958,7 +3938,7 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
* This shost reference might have been taken at the beginning of
* lpfc_vport_delete()
*/
- if ((vport->load_flag & FC_UNLOADING) && (vport != phba->pport))
+ if (test_bit(FC_UNLOADING, &vport->load_flag) && vport != phba->pport)
scsi_host_put(shost);
}
@@ -4001,9 +3981,8 @@ lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
"0912 cmpl_reg_vpi, mb status = 0x%x\n",
mb->mbxStatus);
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_FABRIC, &vport->fc_flag);
+ clear_bit(FC_PUBLIC_LOOP, &vport->fc_flag);
vport->fc_myDID = 0;
if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
@@ -4016,19 +3995,17 @@ lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
goto out;
}
+ clear_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
spin_lock_irq(shost->host_lock);
vport->vpi_state |= LPFC_VPI_REGISTERED;
- vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
spin_unlock_irq(shost->host_lock);
vport->num_disc_nodes = 0;
/* go thru NPR list and issue ELS PLOGIs */
- if (vport->fc_npr_cnt)
+ if (atomic_read(&vport->fc_npr_cnt))
lpfc_els_disc_plogi(vport);
if (!vport->num_disc_nodes) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_NDISC_ACTIVE;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_NDISC_ACTIVE, &vport->fc_flag);
lpfc_can_disctmo(vport);
}
vport->port_state = LPFC_VPORT_READY;
@@ -4088,7 +4065,7 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
* the dump routine is a single-use construct.
*/
if (pmb->ctx_buf) {
- mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
+ mp = pmb->ctx_buf;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
pmb->ctx_buf = NULL;
@@ -4111,7 +4088,7 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
if (phba->sli_rev == LPFC_SLI_REV4) {
byte_count = pmb->u.mqe.un.mb_words[5];
- mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
+ mp = pmb->ctx_buf;
if (byte_count > sizeof(struct static_vport_info) -
offset)
byte_count = sizeof(struct static_vport_info)
@@ -4191,8 +4168,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
MAILBOX_t *mb = &pmb->u.mb;
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
- struct Scsi_Host *shost;
+ struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
pmb->ctx_ndlp = NULL;
@@ -4231,14 +4207,8 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
/* when physical port receive logo donot start
* vport discovery */
- if (!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
+ if (!test_and_clear_bit(FC_LOGO_RCVD_DID_CHNG, &vport->fc_flag))
lpfc_start_fdiscs(phba);
- else {
- shost = lpfc_shost_from_vport(vport);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG ;
- spin_unlock_irq(shost->host_lock);
- }
lpfc_do_scr_ns_plogi(phba, vport);
}
@@ -4336,7 +4306,7 @@ void
lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb = &pmb->u.mb;
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
struct lpfc_vport *vport = pmb->vport;
int rc;
@@ -4460,7 +4430,7 @@ lpfc_mbx_cmpl_fc_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
MAILBOX_t *mb = &pmb->u.mb;
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
pmb->ctx_ndlp = NULL;
if (mb->mbxStatus) {
@@ -4519,7 +4489,7 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
/* Don't add the remote port if unloading. */
- if (vport->load_flag & FC_UNLOADING)
+ if (test_bit(FC_UNLOADING, &vport->load_flag))
return;
ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
@@ -4599,40 +4569,35 @@ lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
static void
lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- unsigned long iflags;
-
- spin_lock_irqsave(shost->host_lock, iflags);
switch (state) {
case NLP_STE_UNUSED_NODE:
- vport->fc_unused_cnt += count;
+ atomic_add(count, &vport->fc_unused_cnt);
break;
case NLP_STE_PLOGI_ISSUE:
- vport->fc_plogi_cnt += count;
+ atomic_add(count, &vport->fc_plogi_cnt);
break;
case NLP_STE_ADISC_ISSUE:
- vport->fc_adisc_cnt += count;
+ atomic_add(count, &vport->fc_adisc_cnt);
break;
case NLP_STE_REG_LOGIN_ISSUE:
- vport->fc_reglogin_cnt += count;
+ atomic_add(count, &vport->fc_reglogin_cnt);
break;
case NLP_STE_PRLI_ISSUE:
- vport->fc_prli_cnt += count;
+ atomic_add(count, &vport->fc_prli_cnt);
break;
case NLP_STE_UNMAPPED_NODE:
- vport->fc_unmap_cnt += count;
+ atomic_add(count, &vport->fc_unmap_cnt);
break;
case NLP_STE_MAPPED_NODE:
- vport->fc_map_cnt += count;
+ atomic_add(count, &vport->fc_map_cnt);
break;
case NLP_STE_NPR_NODE:
- if (vport->fc_npr_cnt == 0 && count == -1)
- vport->fc_npr_cnt = 0;
+ if (!atomic_read(&vport->fc_npr_cnt) && count == -1)
+ atomic_set(&vport->fc_npr_cnt, 0);
else
- vport->fc_npr_cnt += count;
+ atomic_add(count, &vport->fc_npr_cnt);
break;
}
- spin_unlock_irqrestore(shost->host_lock, iflags);
}
/* Register a node with backend if not already done */
@@ -4864,10 +4829,10 @@ void
lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
int state)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
int old_state = ndlp->nlp_state;
int node_dropped = ndlp->nlp_flag & NLP_DROPPED;
char name1[16], name2[16];
+ unsigned long iflags;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"0904 NPort state transition x%06x, %s -> %s\n",
@@ -4894,9 +4859,9 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
if (list_empty(&ndlp->nlp_listp)) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
} else if (old_state)
lpfc_nlp_counters(vport, old_state, -1);
@@ -4908,26 +4873,26 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void
lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ unsigned long iflags;
if (list_empty(&ndlp->nlp_listp)) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
}
}
void
lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ unsigned long iflags;
lpfc_cancel_retry_delay_tmo(vport, ndlp);
if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
list_del_init(&ndlp->nlp_listp);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
NLP_STE_UNUSED_NODE);
}
@@ -5002,7 +4967,6 @@ lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
void
lpfc_set_disctmo(struct lpfc_vport *vport)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
uint32_t tmo;
@@ -5024,17 +4988,16 @@ lpfc_set_disctmo(struct lpfc_vport *vport)
}
mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo));
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_DISC_TMO;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_DISC_TMO, &vport->fc_flag);
/* Start Discovery Timer state <hba_state> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0247 Start Discovery Timer state x%x "
"Data: x%x x%lx x%x x%x\n",
vport->port_state, tmo,
- (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
- vport->fc_adisc_cnt);
+ (unsigned long)&vport->fc_disctmo,
+ atomic_read(&vport->fc_plogi_cnt),
+ atomic_read(&vport->fc_adisc_cnt));
return;
}
@@ -5045,7 +5008,6 @@ lpfc_set_disctmo(struct lpfc_vport *vport)
int
lpfc_can_disctmo(struct lpfc_vport *vport)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
unsigned long iflags;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
@@ -5053,11 +5015,9 @@ lpfc_can_disctmo(struct lpfc_vport *vport)
vport->port_state, vport->fc_ns_retry, vport->fc_flag);
/* Turn off discovery timer if its running */
- if (vport->fc_flag & FC_DISC_TMO ||
+ if (test_bit(FC_DISC_TMO, &vport->fc_flag) ||
timer_pending(&vport->fc_disctmo)) {
- spin_lock_irqsave(shost->host_lock, iflags);
- vport->fc_flag &= ~FC_DISC_TMO;
- spin_unlock_irqrestore(shost->host_lock, iflags);
+ clear_bit(FC_DISC_TMO, &vport->fc_flag);
del_timer_sync(&vport->fc_disctmo);
spin_lock_irqsave(&vport->work_port_lock, iflags);
vport->work_port_events &= ~WORKER_DISC_TMO;
@@ -5067,9 +5027,10 @@ lpfc_can_disctmo(struct lpfc_vport *vport)
/* Cancel Discovery Timer state <hba_state> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0248 Cancel Discovery Timer state x%x "
- "Data: x%x x%x x%x\n",
+ "Data: x%lx x%x x%x\n",
vport->port_state, vport->fc_flag,
- vport->fc_plogi_cnt, vport->fc_adisc_cnt);
+ atomic_read(&vport->fc_plogi_cnt),
+ atomic_read(&vport->fc_adisc_cnt));
return 0;
}
@@ -5212,7 +5173,7 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
struct lpfc_vport *vport = pmb->vport;
struct lpfc_nodelist *ndlp;
- ndlp = (struct lpfc_nodelist *)(pmb->ctx_ndlp);
+ ndlp = pmb->ctx_ndlp;
if (!ndlp)
return;
lpfc_issue_els_logo(vport, ndlp, 0);
@@ -5273,13 +5234,13 @@ lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport,
mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
} else if (phba->sli_rev == LPFC_SLI_REV4 &&
- (!(vport->load_flag & FC_UNLOADING)) &&
+ !test_bit(FC_UNLOADING, &vport->load_flag) &&
(bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
LPFC_SLI_INTF_IF_TYPE_2) &&
(kref_read(&ndlp->kref) > 0)) {
mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr;
} else {
- if (vport->load_flag & FC_UNLOADING) {
+ if (test_bit(FC_UNLOADING, &vport->load_flag)) {
if (phba->sli_rev == LPFC_SLI_REV4) {
spin_lock_irqsave(&ndlp->lock, iflags);
ndlp->nlp_flag |= NLP_RELEASE_RPI;
@@ -5355,7 +5316,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
acc_plogi = 0;
if (((ndlp->nlp_DID & Fabric_DID_MASK) !=
Fabric_DID_MASK) &&
- (!(vport->fc_flag & FC_OFFLINE_MODE)))
+ (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag)))
ndlp->nlp_flag |= NLP_UNREG_INP;
lpfc_printf_vlog(vport, KERN_INFO,
@@ -5387,7 +5348,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* will issue a LOGO here and keep the rpi alive if
* not unloading.
*/
- if (!(vport->load_flag & FC_UNLOADING)) {
+ if (!test_bit(FC_UNLOADING, &vport->load_flag)) {
ndlp->nlp_flag &= ~NLP_UNREG_INP;
lpfc_issue_els_logo(vport, ndlp, 0);
ndlp->nlp_prev_state = ndlp->nlp_state;
@@ -5423,8 +5384,8 @@ lpfc_unreg_hba_rpis(struct lpfc_hba *phba)
{
struct lpfc_vport **vports;
struct lpfc_nodelist *ndlp;
- struct Scsi_Host *shost;
int i;
+ unsigned long iflags;
vports = lpfc_create_vport_work_array(phba);
if (!vports) {
@@ -5433,17 +5394,18 @@ lpfc_unreg_hba_rpis(struct lpfc_hba *phba)
return;
}
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
- shost = lpfc_shost_from_vport(vports[i]);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(&vports[i]->fc_nodes_list_lock, iflags);
list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
/* The mempool_alloc might sleep */
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vports[i]->fc_nodes_list_lock,
+ iflags);
lpfc_unreg_rpi(vports[i], ndlp);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(&vports[i]->fc_nodes_list_lock,
+ iflags);
}
}
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vports[i]->fc_nodes_list_lock, iflags);
}
lpfc_destroy_vport_work_array(phba, vports);
}
@@ -5533,7 +5495,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if ((mb = phba->sli.mbox_active)) {
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
!(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
- (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
+ (ndlp == mb->ctx_ndlp)) {
mb->ctx_ndlp = NULL;
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
}
@@ -5544,7 +5506,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) ||
(mb->mbox_flag & LPFC_MBX_IMED_UNREG) ||
- (ndlp != (struct lpfc_nodelist *)mb->ctx_ndlp))
+ (ndlp != mb->ctx_ndlp))
continue;
mb->ctx_ndlp = NULL;
@@ -5554,7 +5516,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
!(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
- (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
+ (ndlp == mb->ctx_ndlp)) {
list_del(&mb->list);
lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_LOCKED);
@@ -5685,12 +5647,11 @@ lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
struct lpfc_nodelist *
lpfc_findnode_mapped(struct lpfc_vport *vport)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp;
uint32_t data1;
unsigned long iflags;
- spin_lock_irqsave(shost->host_lock, iflags);
+ spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
@@ -5699,7 +5660,8 @@ lpfc_findnode_mapped(struct lpfc_vport *vport)
((uint32_t)ndlp->nlp_xri << 16) |
((uint32_t)ndlp->nlp_type << 8) |
((uint32_t)ndlp->nlp_rpi & 0xff));
- spin_unlock_irqrestore(shost->host_lock, iflags);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock,
+ iflags);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE_VERBOSE,
"2025 FIND node DID MAPPED "
"Data: x%px x%x x%x x%x x%px\n",
@@ -5709,7 +5671,7 @@ lpfc_findnode_mapped(struct lpfc_vport *vport)
return ndlp;
}
}
- spin_unlock_irqrestore(shost->host_lock, iflags);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
/* FIND node did <did> NOT FOUND */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
@@ -5726,7 +5688,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
if (!ndlp) {
if (vport->phba->nvmet_support)
return NULL;
- if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
+ if (test_bit(FC_RSCN_MODE, &vport->fc_flag) &&
lpfc_rscn_payload_check(vport, did) == 0)
return NULL;
ndlp = lpfc_nlp_init(vport, did);
@@ -5736,7 +5698,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"6453 Setup New Node 2B_DISC x%x "
- "Data:x%x x%x x%x\n",
+ "Data:x%x x%x x%lx\n",
ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, vport->fc_flag);
@@ -5750,8 +5712,8 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
* The goal is to allow the target to reset its state and clear
* pending IO in preparation for the initiator to recover.
*/
- if ((vport->fc_flag & FC_RSCN_MODE) &&
- !(vport->fc_flag & FC_NDISC_ACTIVE)) {
+ if (test_bit(FC_RSCN_MODE, &vport->fc_flag) &&
+ !test_bit(FC_NDISC_ACTIVE, &vport->fc_flag)) {
if (lpfc_rscn_payload_check(vport, did)) {
/* Since this node is marked for discovery,
@@ -5761,7 +5723,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"6455 Setup RSCN Node 2B_DISC x%x "
- "Data:x%x x%x x%x\n",
+ "Data:x%x x%x x%lx\n",
ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, vport->fc_flag);
@@ -5773,14 +5735,6 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
if (vport->phba->nvmet_support)
return ndlp;
- /* If we've already received a PLOGI from this NPort
- * we don't need to try to discover it again.
- */
- if (ndlp->nlp_flag & NLP_RCV_PLOGI &&
- !(ndlp->nlp_type &
- (NLP_FCP_TARGET | NLP_NVME_TARGET)))
- return NULL;
-
if (ndlp->nlp_state > NLP_STE_UNUSED_NODE &&
ndlp->nlp_state < NLP_STE_PRLI_ISSUE) {
lpfc_disc_state_machine(vport, ndlp, NULL,
@@ -5793,7 +5747,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
} else {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"6456 Skip Setup RSCN Node x%x "
- "Data:x%x x%x x%x\n",
+ "Data:x%x x%x x%lx\n",
ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, vport->fc_flag);
ndlp = NULL;
@@ -5801,7 +5755,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
} else {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"6457 Setup Active Node 2B_DISC x%x "
- "Data:x%x x%x x%x\n",
+ "Data:x%x x%x x%lx\n",
ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, vport->fc_flag);
@@ -5929,7 +5883,6 @@ lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
void
lpfc_disc_start(struct lpfc_vport *vport)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
uint32_t num_sent;
uint32_t clear_la_pending;
@@ -5957,9 +5910,11 @@ lpfc_disc_start(struct lpfc_vport *vport)
/* Start Discovery state <hba_state> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0202 Start Discovery port state x%x "
- "flg x%x Data: x%x x%x x%x\n",
- vport->port_state, vport->fc_flag, vport->fc_plogi_cnt,
- vport->fc_adisc_cnt, vport->fc_npr_cnt);
+ "flg x%lx Data: x%x x%x x%x\n",
+ vport->port_state, vport->fc_flag,
+ atomic_read(&vport->fc_plogi_cnt),
+ atomic_read(&vport->fc_adisc_cnt),
+ atomic_read(&vport->fc_npr_cnt));
/* First do ADISCs - if any */
num_sent = lpfc_els_disc_adisc(vport);
@@ -5969,8 +5924,8 @@ lpfc_disc_start(struct lpfc_vport *vport)
/* Register the VPI for SLI3, NPIV only. */
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
- !(vport->fc_flag & FC_PT2PT) &&
- !(vport->fc_flag & FC_RSCN_MODE) &&
+ !test_bit(FC_PT2PT, &vport->fc_flag) &&
+ !test_bit(FC_RSCN_MODE, &vport->fc_flag) &&
(phba->sli_rev < LPFC_SLI_REV4)) {
lpfc_issue_clear_la(phba, vport);
lpfc_issue_reg_vpi(phba, vport);
@@ -5985,16 +5940,14 @@ lpfc_disc_start(struct lpfc_vport *vport)
/* If we get here, there is nothing to ADISC */
lpfc_issue_clear_la(phba, vport);
- if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
+ if (!test_bit(FC_ABORT_DISCOVERY, &vport->fc_flag)) {
vport->num_disc_nodes = 0;
/* go thru NPR nodes and issue ELS PLOGIs */
- if (vport->fc_npr_cnt)
+ if (atomic_read(&vport->fc_npr_cnt))
lpfc_els_disc_plogi(vport);
if (!vport->num_disc_nodes) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_NDISC_ACTIVE;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_NDISC_ACTIVE, &vport->fc_flag);
lpfc_can_disctmo(vport);
}
}
@@ -6006,18 +5959,17 @@ lpfc_disc_start(struct lpfc_vport *vport)
if (num_sent)
return;
- if (vport->fc_flag & FC_RSCN_MODE) {
+ if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) {
/* Check to see if more RSCNs came in while we
* were processing this one.
*/
- if ((vport->fc_rscn_id_cnt == 0) &&
- (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_RSCN_MODE;
- spin_unlock_irq(shost->host_lock);
+ if (vport->fc_rscn_id_cnt == 0 &&
+ !test_bit(FC_RSCN_DISCOVERY, &vport->fc_flag)) {
+ clear_bit(FC_RSCN_MODE, &vport->fc_flag);
lpfc_can_disctmo(vport);
- } else
+ } else {
lpfc_els_handle_rscn(vport);
+ }
}
}
return;
@@ -6084,7 +6036,8 @@ lpfc_disc_flush_list(struct lpfc_vport *vport)
struct lpfc_nodelist *ndlp, *next_ndlp;
struct lpfc_hba *phba = vport->phba;
- if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
+ if (atomic_read(&vport->fc_plogi_cnt) ||
+ atomic_read(&vport->fc_adisc_cnt)) {
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
nlp_listp) {
if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
@@ -6165,20 +6118,15 @@ lpfc_disc_timeout(struct timer_list *t)
static void
lpfc_disc_timeout_handler(struct lpfc_vport *vport)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_sli *psli = &phba->sli;
struct lpfc_nodelist *ndlp, *next_ndlp;
LPFC_MBOXQ_t *initlinkmbox;
int rc, clrlaerr = 0;
- if (!(vport->fc_flag & FC_DISC_TMO))
+ if (!test_and_clear_bit(FC_DISC_TMO, &vport->fc_flag))
return;
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_DISC_TMO;
- spin_unlock_irq(shost->host_lock);
-
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"disc timeout: state:x%x rtry:x%x flg:x%x",
vport->port_state, vport->fc_ns_retry, vport->fc_flag);
@@ -6332,7 +6280,7 @@ restart_disc:
break;
case LPFC_VPORT_READY:
- if (vport->fc_flag & FC_RSCN_MODE) {
+ if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) {
lpfc_printf_vlog(vport, KERN_ERR,
LOG_TRACE_EVENT,
"0231 RSCN timeout Data: x%x "
@@ -6408,7 +6356,7 @@ void
lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb = &pmb->u.mb;
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
struct lpfc_vport *vport = pmb->vport;
pmb->ctx_ndlp = NULL;
@@ -6749,7 +6697,7 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
struct lpfc_vport **vports;
int i, ret = 0;
struct lpfc_nodelist *ndlp;
- struct Scsi_Host *shost;
+ unsigned long iflags;
vports = lpfc_create_vport_work_array(phba);
@@ -6758,24 +6706,23 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
return 1;
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
- shost = lpfc_shost_from_vport(vports[i]);
- spin_lock_irq(shost->host_lock);
/*
* IF the CVL_RCVD bit is not set then we have sent the
* flogi.
* If dev_loss fires while we are waiting we do not want to
* unreg the fcf.
*/
- if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) {
- spin_unlock_irq(shost->host_lock);
+ if (!test_bit(FC_VPORT_CVL_RCVD, &vports[i]->fc_flag)) {
ret = 1;
goto out;
}
+ spin_lock_irqsave(&vports[i]->fc_nodes_list_lock, iflags);
list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
if (ndlp->rport &&
(ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
ret = 1;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vports[i]->fc_nodes_list_lock,
+ iflags);
goto out;
} else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
ret = 1;
@@ -6787,7 +6734,7 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
ndlp->nlp_flag);
}
}
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vports[i]->fc_nodes_list_lock, iflags);
}
out:
lpfc_destroy_vport_work_array(phba, vports);
@@ -6805,7 +6752,6 @@ void
lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
struct lpfc_vport *vport = mboxq->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
if (mboxq->u.mb.mbxStatus) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
@@ -6813,9 +6759,7 @@ lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
"HBA state x%x\n",
mboxq->u.mb.mbxStatus, vport->port_state);
}
- spin_lock_irq(shost->host_lock);
- phba->pport->fc_flag &= ~FC_VFI_REGISTERED;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_VFI_REGISTERED, &phba->pport->fc_flag);
mempool_free(mboxq, phba->mbox_mem_pool);
return;
}
@@ -6879,9 +6823,9 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
lpfc_mbx_unreg_vpi(vports[i]);
shost = lpfc_shost_from_vport(vports[i]);
spin_lock_irq(shost->host_lock);
- vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
spin_unlock_irq(shost->host_lock);
+ set_bit(FC_VPORT_NEEDS_INIT_VPI, &vports[i]->fc_flag);
}
lpfc_destroy_vport_work_array(phba, vports);
if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) {
@@ -6894,9 +6838,9 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
lpfc_mbx_unreg_vpi(phba->pport);
shost = lpfc_shost_from_vport(phba->pport);
spin_lock_irq(shost->host_lock);
- phba->pport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED;
spin_unlock_irq(shost->host_lock);
+ set_bit(FC_VPORT_NEEDS_INIT_VPI, &phba->pport->fc_flag);
}
/* Cleanup any outstanding ELS commands */
@@ -6980,8 +6924,8 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
* If driver is not unloading, check if there is any other
* FCF record that can be used for discovery.
*/
- if ((phba->pport->load_flag & FC_UNLOADING) ||
- (phba->link_state < LPFC_LINK_UP))
+ if (test_bit(FC_UNLOADING, &phba->pport->load_flag) ||
+ phba->link_state < LPFC_LINK_UP)
return;
/* This is considered as the initial FCF discovery scan */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 5d4f9f2708..367e6b066d 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -4069,7 +4069,6 @@ struct lpfc_mcqe {
#define LPFC_TRAILER_CODE_GRP5 0x5
#define LPFC_TRAILER_CODE_FC 0x10
#define LPFC_TRAILER_CODE_SLI 0x11
-#define LPFC_TRAILER_CODE_CMSTAT 0x13
};
struct lpfc_acqe_link {
@@ -4339,6 +4338,7 @@ struct lpfc_acqe_sli {
#define LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE 0x10
#define LPFC_SLI_EVENT_TYPE_CGN_SIGNAL 0x11
#define LPFC_SLI_EVENT_TYPE_RD_SIGNAL 0x12
+#define LPFC_SLI_EVENT_TYPE_RESET_CM_STATS 0x13
};
/*
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 858ca395c0..f7a0aa3625 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -94,6 +94,7 @@ static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *);
+static void lpfc_sli4_async_cmstat_evt(struct lpfc_hba *phba);
static void lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba);
static struct scsi_transport_template *lpfc_transport_template = NULL;
@@ -459,7 +460,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
return -EIO;
}
- mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
+ mp = pmb->ctx_buf;
/* This dmabuf was allocated by lpfc_read_sparam. The dmabuf is no
* longer needed. Prevent unintended ctx_buf access as the mbox is
@@ -891,7 +892,7 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
readl(phba->HCregaddr); /* flush */
}
- if (phba->pport->load_flag & FC_UNLOADING)
+ if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
lpfc_cleanup_discovery_resources(phba->pport);
else {
vports = lpfc_create_vport_work_array(phba);
@@ -1231,13 +1232,13 @@ lpfc_rrq_timeout(struct timer_list *t)
phba = from_timer(phba, t, rrq_tmr);
spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
- if (!(phba->pport->load_flag & FC_UNLOADING))
+ if (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
phba->hba_flag |= HBA_RRQ_ACTIVE;
else
phba->hba_flag &= ~HBA_RRQ_ACTIVE;
spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
- if (!(phba->pport->load_flag & FC_UNLOADING))
+ if (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
lpfc_worker_wake_up(phba);
}
@@ -1268,9 +1269,9 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
/* Check and reset heart-beat timer if necessary */
mempool_free(pmboxq, phba->mbox_mem_pool);
- if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
- !(phba->link_state == LPFC_HBA_ERROR) &&
- !(phba->pport->load_flag & FC_UNLOADING))
+ if (!test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag) &&
+ !(phba->link_state == LPFC_HBA_ERROR) &&
+ !test_bit(FC_UNLOADING, &phba->pport->load_flag))
mod_timer(&phba->hb_tmofunc,
jiffies +
msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
@@ -1297,11 +1298,11 @@ lpfc_idle_stat_delay_work(struct work_struct *work)
u32 i, idle_percent;
u64 wall, wall_idle, diff_wall, diff_idle, busy_time;
- if (phba->pport->load_flag & FC_UNLOADING)
+ if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
return;
if (phba->link_state == LPFC_HBA_ERROR ||
- phba->pport->fc_flag & FC_OFFLINE_MODE ||
+ test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag) ||
phba->cmf_active_mode != LPFC_CFG_OFF)
goto requeue;
@@ -1358,11 +1359,12 @@ lpfc_hb_eq_delay_work(struct work_struct *work)
uint32_t usdelay;
int i;
- if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
+ if (!phba->cfg_auto_imax ||
+ test_bit(FC_UNLOADING, &phba->pport->load_flag))
return;
if (phba->link_state == LPFC_HBA_ERROR ||
- phba->pport->fc_flag & FC_OFFLINE_MODE)
+ test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag))
goto requeue;
ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
@@ -1533,9 +1535,9 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
}
lpfc_destroy_vport_work_array(phba, vports);
- if ((phba->link_state == LPFC_HBA_ERROR) ||
- (phba->pport->load_flag & FC_UNLOADING) ||
- (phba->pport->fc_flag & FC_OFFLINE_MODE))
+ if (phba->link_state == LPFC_HBA_ERROR ||
+ test_bit(FC_UNLOADING, &phba->pport->load_flag) ||
+ test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag))
return;
if (phba->elsbuf_cnt &&
@@ -1736,7 +1738,7 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
break;
}
/* If driver is unloading let the worker thread continue */
- if (phba->pport->load_flag & FC_UNLOADING) {
+ if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
phba->work_hs = 0;
break;
}
@@ -1747,7 +1749,7 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
* first write to the host attention register clear the
* host status register.
*/
- if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
+ if (!phba->work_hs && !test_bit(FC_UNLOADING, &phba->pport->load_flag))
phba->work_hs = old_host_status & ~HS_FFER1;
spin_lock_irq(&phba->hbalock);
@@ -2215,7 +2217,7 @@ lpfc_handle_latt(struct lpfc_hba *phba)
/* Cleanup any outstanding ELS commands */
lpfc_els_flush_all_cmd(phba);
psli->slistat.link_event++;
- lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
+ lpfc_read_topology(phba, pmb, pmb->ctx_buf);
pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
pmb->vport = vport;
/* Block ELS IOCBs until we have processed this mbox command */
@@ -3085,7 +3087,7 @@ lpfc_cleanup(struct lpfc_vport *vport)
* The flush here is only when the pci slot
* is offline.
*/
- if (vport->load_flag & FC_UNLOADING &&
+ if (test_bit(FC_UNLOADING, &vport->load_flag) &&
pci_channel_offline(phba->pcidev))
lpfc_sli_flush_io_rings(vport->phba);
@@ -3411,7 +3413,7 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba)
return;
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
- if (vports[i]->load_flag & FC_UNLOADING)
+ if (test_bit(FC_UNLOADING, &vports[i]->load_flag))
continue;
list_for_each_entry_safe(ndlp, next_ndlp,
@@ -3611,7 +3613,7 @@ static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
lpfc_destroy_expedite_pool(phba);
- if (!(phba->pport->load_flag & FC_UNLOADING))
+ if (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
lpfc_sli_flush_io_rings(phba);
hwq_count = phba->cfg_hdw_queue;
@@ -3697,7 +3699,7 @@ lpfc_online(struct lpfc_hba *phba)
return 0;
vport = phba->pport;
- if (!(vport->fc_flag & FC_OFFLINE_MODE))
+ if (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag))
return 0;
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -3737,20 +3739,18 @@ lpfc_online(struct lpfc_hba *phba)
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL) {
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
- struct Scsi_Host *shost;
- shost = lpfc_shost_from_vport(vports[i]);
- spin_lock_irq(shost->host_lock);
- vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
+ clear_bit(FC_OFFLINE_MODE, &vports[i]->fc_flag);
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
- vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ set_bit(FC_VPORT_NEEDS_REG_VPI,
+ &vports[i]->fc_flag);
if (phba->sli_rev == LPFC_SLI_REV4) {
- vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+ set_bit(FC_VPORT_NEEDS_INIT_VPI,
+ &vports[i]->fc_flag);
if ((vpis_cleared) &&
(vports[i]->port_type !=
LPFC_PHYSICAL_PORT))
vports[i]->vpi = 0;
}
- spin_unlock_irq(shost->host_lock);
}
}
lpfc_destroy_vport_work_array(phba, vports);
@@ -3805,7 +3805,7 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
int offline;
bool hba_pci_err;
- if (vport->fc_flag & FC_OFFLINE_MODE)
+ if (test_bit(FC_OFFLINE_MODE, &vport->fc_flag))
return;
lpfc_block_mgmt_io(phba, mbx_action);
@@ -3819,16 +3819,15 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL) {
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
- if (vports[i]->load_flag & FC_UNLOADING)
+ if (test_bit(FC_UNLOADING, &vports[i]->load_flag))
continue;
shost = lpfc_shost_from_vport(vports[i]);
spin_lock_irq(shost->host_lock);
vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
- vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
- vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
spin_unlock_irq(shost->host_lock);
+ set_bit(FC_VPORT_NEEDS_REG_VPI, &vports[i]->fc_flag);
+ clear_bit(FC_VFI_REGISTERED, &vports[i]->fc_flag);
- shost = lpfc_shost_from_vport(vports[i]);
list_for_each_entry_safe(ndlp, next_ndlp,
&vports[i]->fc_nodes,
nlp_listp) {
@@ -3910,7 +3909,7 @@ lpfc_offline(struct lpfc_hba *phba)
struct lpfc_vport **vports;
int i;
- if (phba->pport->fc_flag & FC_OFFLINE_MODE)
+ if (test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag))
return;
/* stop port and all timers associated with this hba */
@@ -3941,14 +3940,14 @@ lpfc_offline(struct lpfc_hba *phba)
shost = lpfc_shost_from_vport(vports[i]);
spin_lock_irq(shost->host_lock);
vports[i]->work_port_events = 0;
- vports[i]->fc_flag |= FC_OFFLINE_MODE;
spin_unlock_irq(shost->host_lock);
+ set_bit(FC_OFFLINE_MODE, &vports[i]->fc_flag);
}
lpfc_destroy_vport_work_array(phba, vports);
/* If OFFLINE flag is clear (i.e. unloading), cpuhp removal is handled
* in hba_unset
*/
- if (phba->pport->fc_flag & FC_OFFLINE_MODE)
+ if (test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag))
__lpfc_cpuhp_remove(phba);
if (phba->cfg_xri_rebalancing)
@@ -4766,9 +4765,17 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
vport = (struct lpfc_vport *) shost->hostdata;
vport->phba = phba;
- vport->load_flag |= FC_LOADING;
- vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ set_bit(FC_LOADING, &vport->load_flag);
+ set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
vport->fc_rscn_flush = 0;
+ atomic_set(&vport->fc_plogi_cnt, 0);
+ atomic_set(&vport->fc_adisc_cnt, 0);
+ atomic_set(&vport->fc_reglogin_cnt, 0);
+ atomic_set(&vport->fc_prli_cnt, 0);
+ atomic_set(&vport->fc_unmap_cnt, 0);
+ atomic_set(&vport->fc_map_cnt, 0);
+ atomic_set(&vport->fc_npr_cnt, 0);
+ atomic_set(&vport->fc_unused_cnt, 0);
lpfc_get_vport_cfgparam(vport);
/* Adjust value in vport */
@@ -4824,6 +4831,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
/* Initialize all internally managed lists. */
INIT_LIST_HEAD(&vport->fc_nodes);
+ spin_lock_init(&vport->fc_nodes_list_lock);
INIT_LIST_HEAD(&vport->rcv_buffer_list);
spin_lock_init(&vport->work_port_lock);
@@ -4921,7 +4929,7 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
spin_lock_irq(shost->host_lock);
- if (vport->load_flag & FC_UNLOADING) {
+ if (test_bit(FC_UNLOADING, &vport->load_flag)) {
stat = 1;
goto finished;
}
@@ -4945,7 +4953,8 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
goto finished;
if (vport->num_disc_nodes || vport->fc_prli_sent)
goto finished;
- if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
+ if (!atomic_read(&vport->fc_map_cnt) &&
+ time < msecs_to_jiffies(2 * 1000))
goto finished;
if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
goto finished;
@@ -5034,9 +5043,7 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
fc_host_active_fc4s(shost)[7] = 1;
fc_host_max_npiv_vports(shost) = phba->max_vpi;
- spin_lock_irq(shost->host_lock);
- vport->load_flag &= ~FC_LOADING;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_LOADING, &vport->load_flag);
}
/**
@@ -5172,7 +5179,7 @@ lpfc_vmid_poll(struct timer_list *t)
/* Is the vmid inactivity timer enabled */
if (phba->pport->vmid_inactivity_timeout ||
- phba->pport->load_flag & FC_DEREGISTER_ALL_APP_ID) {
+ test_bit(FC_DEREGISTER_ALL_APP_ID, &phba->pport->load_flag)) {
wake_up = 1;
phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID;
}
@@ -5447,7 +5454,7 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
phba->sli.slistat.link_event++;
/* Create lpfc_handle_latt mailbox command from link ACQE */
- lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
+ lpfc_read_topology(phba, pmb, pmb->ctx_buf);
pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
pmb->vport = phba->pport;
@@ -6340,7 +6347,7 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
phba->sli.slistat.link_event++;
/* Create lpfc_handle_latt mailbox command from link ACQE */
- lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
+ lpfc_read_topology(phba, pmb, pmb->ctx_buf);
pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
pmb->vport = phba->pport;
@@ -6636,6 +6643,11 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
acqe_sli->event_data1, acqe_sli->event_data2,
acqe_sli->event_data3);
break;
+ case LPFC_SLI_EVENT_TYPE_RESET_CM_STATS:
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "2905 Reset CM statistics\n");
+ lpfc_sli4_async_cmstat_evt(phba);
+ break;
default:
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"3193 Unrecognized SLI event, type: 0x%x",
@@ -6689,9 +6701,7 @@ lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
return NULL;
lpfc_linkdown_port(vport);
lpfc_cleanup_pending_mbox(vport);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_VPORT_CVL_RCVD;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_VPORT_CVL_RCVD, &vport->fc_flag);
return ndlp;
}
@@ -6888,9 +6898,9 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
if (vports) {
for (i = 0; i <= phba->max_vports && vports[i] != NULL;
i++) {
- if ((!(vports[i]->fc_flag &
- FC_VPORT_CVL_RCVD)) &&
- (vports[i]->port_state > LPFC_FDISC)) {
+ if (!test_bit(FC_VPORT_CVL_RCVD,
+ &vports[i]->fc_flag) &&
+ vports[i]->port_state > LPFC_FDISC) {
active_vlink_present = 1;
break;
}
@@ -6903,8 +6913,8 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
* If we are here first then vport_delete is going to wait
* for discovery to complete.
*/
- if (!(vport->load_flag & FC_UNLOADING) &&
- active_vlink_present) {
+ if (!test_bit(FC_UNLOADING, &vport->load_flag) &&
+ active_vlink_present) {
/*
* If there are other active VLinks present,
* re-instantiate the Vlink using FDISC.
@@ -7346,9 +7356,6 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
case LPFC_TRAILER_CODE_SLI:
lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
break;
- case LPFC_TRAILER_CODE_CMSTAT:
- lpfc_sli4_async_cmstat_evt(phba);
- break;
default:
lpfc_printf_log(phba, KERN_ERR,
LOG_TRACE_EVENT,
@@ -9088,7 +9095,7 @@ lpfc_setup_fdmi_mask(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
- vport->load_flag |= FC_ALLOW_FDMI;
+ set_bit(FC_ALLOW_FDMI, &vport->load_flag);
if (phba->cfg_enable_SmartSAN ||
phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT) {
/* Setup appropriate attribute masks */
@@ -12774,7 +12781,8 @@ static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
{
- if (phba->pport && (phba->pport->fc_flag & FC_OFFLINE_MODE))
+ if (phba->pport &&
+ test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag))
return;
__lpfc_cpuhp_remove(phba);
@@ -12799,7 +12807,7 @@ static void lpfc_cpuhp_add(struct lpfc_hba *phba)
static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
{
- if (phba->pport->load_flag & FC_UNLOADING) {
+ if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
*retval = -EAGAIN;
return true;
}
@@ -13319,12 +13327,7 @@ lpfc_sli4_disable_intr(struct lpfc_hba *phba)
static void
lpfc_unset_hba(struct lpfc_hba *phba)
{
- struct lpfc_vport *vport = phba->pport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
- spin_lock_irq(shost->host_lock);
- vport->load_flag |= FC_UNLOADING;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_UNLOADING, &phba->pport->load_flag);
kfree(phba->vpi_bmask);
kfree(phba->vpi_ids);
@@ -14116,9 +14119,7 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
struct lpfc_hba *phba = vport->phba;
int i;
- spin_lock_irq(&phba->hbalock);
- vport->load_flag |= FC_UNLOADING;
- spin_unlock_irq(&phba->hbalock);
+ set_bit(FC_UNLOADING, &vport->load_flag);
lpfc_free_sysfs_attr(vport);
@@ -14961,9 +14962,7 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
int i;
/* Mark the device unloading flag */
- spin_lock_irq(&phba->hbalock);
- vport->load_flag |= FC_UNLOADING;
- spin_unlock_irq(&phba->hbalock);
+ set_bit(FC_UNLOADING, &vport->load_flag);
if (phba->cgn_i)
lpfc_unreg_congestion_buf(phba);
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index cadcd16494..e98f1c2b22 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -102,7 +102,7 @@ lpfc_mbox_rsrc_cleanup(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
{
struct lpfc_dmabuf *mp;
- mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
+ mp = mbox->ctx_buf;
mbox->ctx_buf = NULL;
/* Release the generic BPL buffer memory. */
@@ -204,10 +204,8 @@ lpfc_dump_mem(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, uint16_t offset,
uint16_t region_id)
{
MAILBOX_t *mb;
- void *ctx;
mb = &pmb->u.mb;
- ctx = pmb->ctx_buf;
/* Setup to dump VPD region */
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
@@ -219,7 +217,6 @@ lpfc_dump_mem(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, uint16_t offset,
mb->un.varDmp.word_cnt = (DMP_RSP_SIZE / sizeof (uint32_t));
mb->un.varDmp.co = 0;
mb->un.varDmp.resp_offset = 0;
- pmb->ctx_buf = ctx;
mb->mbxOwner = OWN_HOST;
return;
}
@@ -236,11 +233,8 @@ void
lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb;
- void *ctx;
mb = &pmb->u.mb;
- /* Save context so that we can restore after memset */
- ctx = pmb->ctx_buf;
/* Setup to dump VPD region */
memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
@@ -254,7 +248,6 @@ lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
mb->un.varDmp.word_cnt = WAKE_UP_PARMS_WORD_SIZE;
mb->un.varDmp.co = 0;
mb->un.varDmp.resp_offset = 0;
- pmb->ctx_buf = ctx;
return;
}
@@ -372,7 +365,7 @@ lpfc_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
/* Save address for later completion and set the owner to host so that
* the FW knows this mailbox is available for processing.
*/
- pmb->ctx_buf = (uint8_t *)mp;
+ pmb->ctx_buf = mp;
mb->mbxOwner = OWN_HOST;
return (0);
}
@@ -949,7 +942,7 @@ lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
* Set the re-reg VPI bit for f/w to update the MAC address.
*/
if ((phba->sli_rev == LPFC_SLI_REV4) &&
- !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI))
+ !test_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag))
mb->un.varRegVpi.upd = 1;
mb->un.varRegVpi.vpi = phba->vpi_ids[vport->vpi];
@@ -1816,7 +1809,7 @@ lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
}
/* Reinitialize the context pointers to avoid stale usage. */
mbox->ctx_buf = NULL;
- mbox->context3 = NULL;
+ memset(&mbox->ctx_u, 0, sizeof(mbox->ctx_u));
kfree(mbox->sge_array);
/* Finally, free the mailbox command itself */
mempool_free(mbox, phba->mbox_mem_pool);
@@ -2244,7 +2237,7 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
/* Only FC supports upd bit */
if ((phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) &&
- (vport->fc_flag & FC_VFI_REGISTERED) &&
+ test_bit(FC_VFI_REGISTERED, &vport->fc_flag) &&
(!phba->fc_topology_changed))
bf_set(lpfc_reg_vfi_upd, reg_vfi, 1);
@@ -2271,8 +2264,8 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX,
"3134 Register VFI, mydid:x%x, fcfi:%d, "
- " vfi:%d, vpi:%d, fc_pname:%x%x fc_flag:x%x"
- " port_state:x%x topology chg:%d bbscn_fabric :%d\n",
+ "vfi:%d, vpi:%d, fc_pname:%x%x fc_flag:x%lx "
+ "port_state:x%x topology chg:%d bbscn_fabric :%d\n",
vport->fc_myDID,
phba->fcf.fcfi,
phba->sli4_hba.vfi_ids[vport->vfi],
@@ -2366,8 +2359,7 @@ lpfc_mbx_cmpl_rdp_link_stat(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
MAILBOX_t *mb;
int rc = FAILURE;
- struct lpfc_rdp_context *rdp_context =
- (struct lpfc_rdp_context *)(mboxq->ctx_ndlp);
+ struct lpfc_rdp_context *rdp_context = mboxq->ctx_u.rdp;
mb = &mboxq->u.mb;
if (mb->mbxStatus)
@@ -2385,9 +2377,8 @@ mbx_failed:
static void
lpfc_mbx_cmpl_rdp_page_a2(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
{
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
- struct lpfc_rdp_context *rdp_context =
- (struct lpfc_rdp_context *)(mbox->ctx_ndlp);
+ struct lpfc_dmabuf *mp = mbox->ctx_buf;
+ struct lpfc_rdp_context *rdp_context = mbox->ctx_u.rdp;
if (bf_get(lpfc_mqe_status, &mbox->u.mqe))
goto error_mbox_free;
@@ -2401,7 +2392,7 @@ lpfc_mbx_cmpl_rdp_page_a2(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
/* Save the dma buffer for cleanup in the final completion. */
mbox->ctx_buf = mp;
mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_link_stat;
- mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
+ mbox->ctx_u.rdp = rdp_context;
if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == MBX_NOT_FINISHED)
goto error_mbox_free;
@@ -2416,9 +2407,8 @@ void
lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
{
int rc;
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
- struct lpfc_rdp_context *rdp_context =
- (struct lpfc_rdp_context *)(mbox->ctx_ndlp);
+ struct lpfc_dmabuf *mp = mbox->ctx_buf;
+ struct lpfc_rdp_context *rdp_context = mbox->ctx_u.rdp;
if (bf_get(lpfc_mqe_status, &mbox->u.mqe))
goto error;
@@ -2448,7 +2438,7 @@ lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a2;
- mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
+ mbox->ctx_u.rdp = rdp_context;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED)
goto error;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index b147304b01..c4172791c2 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -300,7 +300,7 @@ lpfc_defer_plogi_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *login_mbox)
int rc;
ndlp = login_mbox->ctx_ndlp;
- save_iocb = login_mbox->context3;
+ save_iocb = login_mbox->ctx_u.save_iocb;
if (mb->mbxStatus == MBX_SUCCESS) {
/* Now that REG_RPI completed successfully,
@@ -382,7 +382,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* PLOGI chkparm OK */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0114 PLOGI chkparm OK Data: x%x x%x x%x "
- "x%x x%x x%x\n",
+ "x%x x%x x%lx\n",
ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
ndlp->nlp_rpi, vport->port_state,
vport->fc_flag);
@@ -434,7 +434,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
if (nlp_portwwn != 0 &&
nlp_portwwn != wwn_to_u64(sp->portName.u.wwn))
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0143 PLOGI recv'd from DID: x%x "
"WWPN changed: old %llx new %llx\n",
ndlp->nlp_DID,
@@ -464,8 +464,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
save_iocb = NULL;
/* Check for Nport to NPort pt2pt protocol */
- if ((vport->fc_flag & FC_PT2PT) &&
- !(vport->fc_flag & FC_PT2PT_PLOGI)) {
+ if (test_bit(FC_PT2PT, &vport->fc_flag) &&
+ !test_bit(FC_PT2PT_PLOGI, &vport->fc_flag)) {
/* rcv'ed PLOGI decides what our NPortId will be */
if (phba->sli_rev == LPFC_SLI_REV4) {
vport->fc_myDID = bf_get(els_rsp64_sid,
@@ -580,7 +580,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* This only applies to a fabric environment.
*/
if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) &&
- (vport->fc_flag & FC_FABRIC)) {
+ test_bit(FC_FABRIC, &vport->fc_flag)) {
/* software abort outstanding PLOGI */
lpfc_els_abort(phba, ndlp);
}
@@ -640,7 +640,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (!login_mbox->ctx_ndlp)
goto out;
- login_mbox->context3 = save_iocb; /* For PLOGI ACC */
+ login_mbox->ctx_u.save_iocb = save_iocb; /* For PLOGI ACC */
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
@@ -682,8 +682,8 @@ lpfc_mbx_cmpl_resume_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
struct lpfc_nodelist *ndlp;
uint32_t cmd;
- elsiocb = (struct lpfc_iocbq *)mboxq->ctx_buf;
- ndlp = (struct lpfc_nodelist *)mboxq->ctx_ndlp;
+ elsiocb = mboxq->ctx_u.save_iocb;
+ ndlp = mboxq->ctx_ndlp;
vport = mboxq->vport;
cmd = elsiocb->drvrTimeout;
@@ -804,7 +804,6 @@ static int
lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb, uint32_t els_cmd)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_vport **vports;
int i, active_vlink_present = 0 ;
@@ -837,19 +836,17 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (ndlp->nlp_DID == Fabric_DID) {
if (vport->port_state <= LPFC_FDISC ||
- vport->fc_flag & FC_PT2PT)
+ test_bit(FC_PT2PT, &vport->fc_flag))
goto out;
lpfc_linkdown_port(vport);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_VPORT_LOGO_RCVD;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_VPORT_LOGO_RCVD, &vport->fc_flag);
vports = lpfc_create_vport_work_array(phba);
if (vports) {
for (i = 0; i <= phba->max_vports && vports[i] != NULL;
i++) {
- if ((!(vports[i]->fc_flag &
- FC_VPORT_LOGO_RCVD)) &&
- (vports[i]->port_state > LPFC_FDISC)) {
+ if (!test_bit(FC_VPORT_LOGO_RCVD,
+ &vports[i]->fc_flag) &&
+ vports[i]->port_state > LPFC_FDISC) {
active_vlink_present = 1;
break;
}
@@ -862,8 +859,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* If we are here first then vport_delete is going to wait
* for discovery to complete.
*/
- if (!(vport->load_flag & FC_UNLOADING) &&
- active_vlink_present) {
+ if (!test_bit(FC_UNLOADING, &vport->load_flag) &&
+ active_vlink_present) {
/*
* If there are other active VLinks present,
* re-instantiate the Vlink using FDISC.
@@ -876,23 +873,21 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
vport->port_state = LPFC_FDISC;
} else {
- spin_lock_irq(shost->host_lock);
- phba->pport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_LOGO_RCVD_DID_CHNG, &phba->pport->fc_flag);
lpfc_retry_pport_discovery(phba);
}
} else {
lpfc_printf_vlog(vport, KERN_INFO,
LOG_NODE | LOG_ELS | LOG_DISCOVERY,
"3203 LOGO recover nport x%06x state x%x "
- "ntype x%x fc_flag x%x\n",
+ "ntype x%x fc_flag x%lx\n",
ndlp->nlp_DID, ndlp->nlp_state,
ndlp->nlp_type, vport->fc_flag);
/* Special cases for rports that recover post LOGO. */
if ((!(ndlp->nlp_type == NLP_FABRIC) &&
(ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) ||
- vport->fc_flag & FC_PT2PT)) ||
+ test_bit(FC_PT2PT, &vport->fc_flag))) ||
(ndlp->nlp_state >= NLP_STE_ADISC_ISSUE ||
ndlp->nlp_state <= NLP_STE_PRLI_ISSUE)) {
mod_timer(&ndlp->nlp_delayfunc,
@@ -1057,9 +1052,10 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
return 0;
}
- if (!(vport->fc_flag & FC_PT2PT)) {
+ if (!test_bit(FC_PT2PT, &vport->fc_flag)) {
/* Check config parameter use-adisc or FCP-2 */
- if (vport->cfg_use_adisc && ((vport->fc_flag & FC_RSCN_MODE) ||
+ if (vport->cfg_use_adisc &&
+ (test_bit(FC_RSCN_MODE, &vport->fc_flag) ||
((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
(ndlp->nlp_type & NLP_FCP_TARGET)))) {
spin_lock_irq(&ndlp->lock);
@@ -1123,7 +1119,7 @@ lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport,
}
if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
- (!(vport->fc_flag & FC_OFFLINE_MODE)))
+ (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag)))
ndlp->nlp_flag |= NLP_UNREG_INP;
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
@@ -1149,9 +1145,8 @@ lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
phba = vport->phba;
/* Release the RPI if reglogin completing */
- if (!(phba->pport->load_flag & FC_UNLOADING) &&
- (evt == NLP_EVT_CMPL_REG_LOGIN) &&
- (!pmb->u.mb.mbxStatus)) {
+ if (!test_bit(FC_UNLOADING, &phba->pport->load_flag) &&
+ evt == NLP_EVT_CMPL_REG_LOGIN && !pmb->u.mb.mbxStatus) {
rpi = pmb->u.mb.un.varWords[0];
lpfc_release_rpi(phba, vport, ndlp, rpi);
}
@@ -1246,7 +1241,6 @@ static uint32_t
lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb = arg;
struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
@@ -1281,9 +1275,7 @@ lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Check if there are more PLOGIs to be sent */
lpfc_more_plogi(vport);
if (vport->num_disc_nodes == 0) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_NDISC_ACTIVE;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_NDISC_ACTIVE, &vport->fc_flag);
lpfc_can_disctmo(vport);
lpfc_end_rscn(vport);
}
@@ -1423,8 +1415,8 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
ndlp->nlp_maxframe =
((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
- if ((vport->fc_flag & FC_PT2PT) &&
- (vport->fc_flag & FC_PT2PT_PLOGI)) {
+ if (test_bit(FC_PT2PT, &vport->fc_flag) &&
+ test_bit(FC_PT2PT_PLOGI, &vport->fc_flag)) {
ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
if (sp->cmn.edtovResolution) {
/* E_D_TOV ticks are in nanoseconds */
@@ -1578,8 +1570,8 @@ lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport,
phba = vport->phba;
/* Release the RPI */
- if (!(phba->pport->load_flag & FC_UNLOADING) &&
- !mb->mbxStatus) {
+ if (!test_bit(FC_UNLOADING, &phba->pport->load_flag) &&
+ !mb->mbxStatus) {
rpi = pmb->u.mb.un.varWords[0];
lpfc_release_rpi(phba, vport, ndlp, rpi);
}
@@ -1615,7 +1607,7 @@ lpfc_device_recov_plogi_issue(struct lpfc_vport *vport,
/* Don't do anything that will mess up processing of the
* previous RSCN.
*/
- if (vport->fc_flag & FC_RSCN_DEFERRED)
+ if (test_bit(FC_RSCN_DEFERRED, &vport->fc_flag))
return ndlp->nlp_state;
/* software abort outstanding PLOGI */
@@ -1801,7 +1793,7 @@ lpfc_device_recov_adisc_issue(struct lpfc_vport *vport,
/* Don't do anything that will mess up processing of the
* previous RSCN.
*/
- if (vport->fc_flag & FC_RSCN_DEFERRED)
+ if (test_bit(FC_RSCN_DEFERRED, &vport->fc_flag))
return ndlp->nlp_state;
/* software abort outstanding ADISC */
@@ -1883,7 +1875,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
/* cleanup any ndlp on mbox q waiting for reglogin cmpl */
if ((mb = phba->sli.mbox_active)) {
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
- (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
+ (ndlp == mb->ctx_ndlp)) {
ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
lpfc_nlp_put(ndlp);
mb->ctx_ndlp = NULL;
@@ -1894,7 +1886,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
- (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
+ (ndlp == mb->ctx_ndlp)) {
ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
lpfc_nlp_put(ndlp);
list_del(&mb->list);
@@ -1991,13 +1983,13 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
* know what PRLI to send yet. Figure that out now and
* call PRLI depending on the outcome.
*/
- if (vport->fc_flag & FC_PT2PT) {
+ if (test_bit(FC_PT2PT, &vport->fc_flag)) {
/* If we are pt2pt, there is no Fabric to determine
* the FC4 type of the remote nport. So if NVME
* is configured try it.
*/
ndlp->nlp_fc4_type |= NLP_FC4_FCP;
- if ((!(vport->fc_flag & FC_PT2PT_NO_NVME)) &&
+ if ((!test_bit(FC_PT2PT_NO_NVME, &vport->fc_flag)) &&
(vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH ||
vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
@@ -2029,7 +2021,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
}
} else {
- if ((vport->fc_flag & FC_PT2PT) && phba->nvmet_support)
+ if (test_bit(FC_PT2PT, &vport->fc_flag) && phba->nvmet_support)
phba->targetport->port_id = vport->fc_myDID;
/* Only Fabric ports should transition. NVME target
@@ -2070,7 +2062,7 @@ lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
/* Don't do anything that will mess up processing of the
* previous RSCN.
*/
- if (vport->fc_flag & FC_RSCN_DEFERRED)
+ if (test_bit(FC_RSCN_DEFERRED, &vport->fc_flag))
return ndlp->nlp_state;
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
@@ -2386,7 +2378,7 @@ lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
/* Don't do anything that will mess up processing of the
* previous RSCN.
*/
- if (vport->fc_flag & FC_RSCN_DEFERRED)
+ if (test_bit(FC_RSCN_DEFERRED, &vport->fc_flag))
return ndlp->nlp_state;
/* software abort outstanding PRLI */
@@ -2830,13 +2822,10 @@ static uint32_t
lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
/* For the fabric port just clear the fc flags. */
if (ndlp->nlp_DID == Fabric_DID) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_FABRIC, &vport->fc_flag);
+ clear_bit(FC_PUBLIC_LOOP, &vport->fc_flag);
}
lpfc_unreg_rpi(vport, ndlp);
return ndlp->nlp_state;
@@ -2908,7 +2897,7 @@ lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Don't do anything that will mess up processing of the
* previous RSCN.
*/
- if (vport->fc_flag & FC_RSCN_DEFERRED)
+ if (test_bit(FC_RSCN_DEFERRED, &vport->fc_flag))
return ndlp->nlp_state;
lpfc_cancel_retry_delay_tmo(vport, ndlp);
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 47218cf4d1..c5792eaf3f 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -94,7 +94,7 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
vport = lport->vport;
- if (!vport || vport->load_flag & FC_UNLOADING ||
+ if (!vport || test_bit(FC_UNLOADING, &vport->load_flag) ||
vport->phba->hba_flag & HBA_IOQ_FLUSH)
return -ENODEV;
@@ -674,7 +674,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
return -EINVAL;
vport = lport->vport;
- if (vport->load_flag & FC_UNLOADING ||
+ if (test_bit(FC_UNLOADING, &vport->load_flag) ||
vport->phba->hba_flag & HBA_IOQ_FLUSH)
return -ENODEV;
@@ -765,7 +765,7 @@ lpfc_nvme_xmt_ls_rsp(struct nvme_fc_local_port *localport,
struct lpfc_nvme_lport *lport;
int rc;
- if (axchg->phba->pport->load_flag & FC_UNLOADING)
+ if (test_bit(FC_UNLOADING, &axchg->phba->pport->load_flag))
return -ENODEV;
lport = (struct lpfc_nvme_lport *)localport->private;
@@ -810,7 +810,7 @@ lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
return;
vport = lport->vport;
- if (vport->load_flag & FC_UNLOADING)
+ if (test_bit(FC_UNLOADING, &vport->load_flag))
return;
ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
@@ -1567,7 +1567,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
phba = vport->phba;
- if ((unlikely(vport->load_flag & FC_UNLOADING)) ||
+ if ((unlikely(test_bit(FC_UNLOADING, &vport->load_flag))) ||
phba->hba_flag & HBA_IOQ_FLUSH) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6124 Fail IO, Driver unload\n");
@@ -1886,7 +1886,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
if (unlikely(!freqpriv))
return;
- if (vport->load_flag & FC_UNLOADING)
+ if (test_bit(FC_UNLOADING, &vport->load_flag))
return;
/* Announce entry to new IO submit field. */
@@ -2263,7 +2263,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
if (!vport->localport ||
test_bit(HBA_PCI_ERR, &vport->phba->bit_flags) ||
phba->link_state == LPFC_HBA_ERROR ||
- vport->load_flag & FC_UNLOADING)
+ test_bit(FC_UNLOADING, &vport->load_flag))
return;
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -2625,7 +2625,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* return values is ignored. The upcall is a courtesy to the
* transport.
*/
- if (vport->load_flag & FC_UNLOADING ||
+ if (test_bit(FC_UNLOADING, &vport->load_flag) ||
unlikely(vport->phba->link_state == LPFC_HBA_ERROR))
(void)nvme_fc_set_remoteport_devloss(remoteport, 0);
@@ -2644,7 +2644,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
"port_state x%x\n",
ret, remoteport->port_state);
- if (vport->load_flag & FC_UNLOADING) {
+ if (test_bit(FC_UNLOADING, &vport->load_flag)) {
/* Only 1 thread can drop the initial node
* reference. Check if another thread has set
* NLP_DROPPED.
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index d41fea53e4..561ced5503 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -872,7 +872,7 @@ __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
struct ulp_bde64 bpl;
int rc;
- if (phba->pport->load_flag & FC_UNLOADING)
+ if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
return -ENODEV;
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
@@ -984,7 +984,7 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
int rc;
- if (axchg->phba->pport->load_flag & FC_UNLOADING)
+ if (test_bit(FC_UNLOADING, &axchg->phba->pport->load_flag))
return -ENODEV;
rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, lpfc_nvmet_xmt_ls_rsp_cmp);
@@ -1022,7 +1022,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
int id;
#endif
- if (phba->pport->load_flag & FC_UNLOADING) {
+ if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
rc = -ENODEV;
goto aerr;
}
@@ -1145,7 +1145,7 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
struct lpfc_queue *wq;
unsigned long flags;
- if (phba->pport->load_flag & FC_UNLOADING)
+ if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
return;
if (!ctxp->hdwq)
@@ -1317,7 +1317,7 @@ lpfc_nvmet_ls_req(struct nvmet_fc_target_port *targetport,
return -EINVAL;
phba = lpfc_nvmet->phba;
- if (phba->pport->load_flag & FC_UNLOADING)
+ if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
return -EINVAL;
hstate = atomic_read(&lpfc_nvmet->state);
@@ -1353,7 +1353,7 @@ lpfc_nvmet_ls_abort(struct nvmet_fc_target_port *targetport,
int ret;
phba = lpfc_nvmet->phba;
- if (phba->pport->load_flag & FC_UNLOADING)
+ if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
return;
ndlp = (struct lpfc_nodelist *)hosthandle;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index cf506556f3..4a6e5223a2 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -2723,14 +2723,14 @@ lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
sgde = scsi_sglist(cmd);
blksize = scsi_prot_interval(cmd);
data_src = (uint8_t *)sg_virt(sgde);
- data_len = sgde->length;
+ data_len = sg_dma_len(sgde);
if ((data_len & (blksize - 1)) == 0)
chk_guard = 1;
src = (struct scsi_dif_tuple *)sg_virt(sgpe);
start_ref_tag = scsi_prot_ref_tag(cmd);
start_app_tag = src->app_tag;
- len = sgpe->length;
+ len = sg_dma_len(sgpe);
while (src && protsegcnt) {
while (len) {
@@ -2795,7 +2795,7 @@ skipit:
goto out;
data_src = (uint8_t *)sg_virt(sgde);
- data_len = sgde->length;
+ data_len = sg_dma_len(sgde);
if ((data_len & (blksize - 1)) == 0)
chk_guard = 1;
}
@@ -2805,7 +2805,7 @@ skipit:
sgpe = sg_next(sgpe);
if (sgpe) {
src = (struct scsi_dif_tuple *)sg_virt(sgpe);
- len = sgpe->length;
+ len = sg_dma_len(sgpe);
} else {
src = NULL;
}
@@ -5331,16 +5331,6 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
}
err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
} else {
- if (vport->phba->cfg_enable_bg) {
- lpfc_printf_vlog(vport,
- KERN_INFO, LOG_SCSI_CMD,
- "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
- "x%x reftag x%x cnt %u pt %x\n",
- cmnd->cmnd[0],
- scsi_prot_ref_tag(cmnd),
- scsi_logical_block_count(cmnd),
- (cmnd->cmnd[1]>>5));
- }
err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
}
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index e182107255..a028e008dd 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -1036,7 +1036,7 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba)
}
spin_unlock_irqrestore(&phba->hbalock, iflags);
if ((!list_empty(&phba->active_rrq_list)) &&
- (!(phba->pport->load_flag & FC_UNLOADING)))
+ (!test_bit(FC_UNLOADING, &phba->pport->load_flag)))
mod_timer(&phba->rrq_tmr, next_time);
list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
list_del(&rrq->list);
@@ -1180,12 +1180,12 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
return -EINVAL;
spin_lock_irqsave(&phba->hbalock, iflags);
- if (phba->pport->load_flag & FC_UNLOADING) {
+ if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
phba->hba_flag &= ~HBA_RRQ_ACTIVE;
goto out;
}
- if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
+ if (ndlp->vport && test_bit(FC_UNLOADING, &ndlp->vport->load_flag))
goto out;
if (!ndlp->active_rrqs_xri_bitmap)
@@ -1732,7 +1732,7 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
(ulp_command != CMD_ABORT_XRI_CN) &&
(ulp_command != CMD_CLOSE_XRI_CN)) {
BUG_ON(!piocb->vport);
- if (!(piocb->vport->load_flag & FC_UNLOADING))
+ if (!test_bit(FC_UNLOADING, &piocb->vport->load_flag))
mod_timer(&piocb->vport->els_tmofunc,
jiffies +
msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
@@ -2830,7 +2830,7 @@ lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
*/
pmboxq->mbox_flag |= LPFC_MBX_WAKE;
spin_lock_irqsave(&phba->hbalock, drvr_flag);
- pmbox_done = (struct completion *)pmboxq->context3;
+ pmbox_done = pmboxq->ctx_u.mbox_wait;
if (pmbox_done)
complete(pmbox_done);
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
@@ -2882,10 +2882,10 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
* If a REG_LOGIN succeeded after node is destroyed or node
* is in re-discovery driver need to cleanup the RPI.
*/
- if (!(phba->pport->load_flag & FC_UNLOADING) &&
+ if (!test_bit(FC_UNLOADING, &phba->pport->load_flag) &&
pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
!pmb->u.mb.mbxStatus) {
- mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
+ mp = pmb->ctx_buf;
if (mp) {
pmb->ctx_buf = NULL;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -2904,22 +2904,22 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
- !(phba->pport->load_flag & FC_UNLOADING) &&
+ !test_bit(FC_UNLOADING, &phba->pport->load_flag) &&
!pmb->u.mb.mbxStatus) {
shost = lpfc_shost_from_vport(vport);
spin_lock_irq(shost->host_lock);
vport->vpi_state |= LPFC_VPI_REGISTERED;
- vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
}
if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
- ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ ndlp = pmb->ctx_ndlp;
lpfc_nlp_put(ndlp);
}
if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
- ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ ndlp = pmb->ctx_ndlp;
/* Check to see if there are any deferred events to process */
if (ndlp) {
@@ -2927,7 +2927,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
vport,
KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
"1438 UNREG cmpl deferred mbox x%x "
- "on NPort x%x Data: x%x x%x x%px x%x x%x\n",
+ "on NPort x%x Data: x%x x%x x%px x%lx x%x\n",
ndlp->nlp_rpi, ndlp->nlp_DID,
ndlp->nlp_flag, ndlp->nlp_defer_did,
ndlp, vport->load_flag, kref_read(&ndlp->kref));
@@ -2952,7 +2952,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
/* This nlp_put pairs with lpfc_sli4_resume_rpi */
if (pmb->u.mb.mbxCommand == MBX_RESUME_RPI) {
- ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ ndlp = pmb->ctx_ndlp;
lpfc_nlp_put(ndlp);
}
@@ -3235,7 +3235,7 @@ lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
lpfc_nvmeio_data(phba, "NVME LS RCV: xri x%x sz %d from %06x\n",
oxid, size, sid);
- if (phba->pport->load_flag & FC_UNLOADING) {
+ if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
failwhy = "Driver Unloading";
} else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
failwhy = "NVME FC4 Disabled";
@@ -3940,7 +3940,7 @@ void lpfc_poll_eratt(struct timer_list *t)
if (!(phba->hba_flag & HBA_SETUP))
return;
- if (phba->pport->load_flag & FC_UNLOADING)
+ if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
return;
/* Here we will also keep track of interrupts per sec of the hba */
@@ -5819,7 +5819,7 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
goto out_free_mboxq;
}
- mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
+ mp = mboxq->ctx_buf;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
@@ -7582,7 +7582,7 @@ lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
struct lpfc_sglq *sglq_entry = NULL;
struct lpfc_sglq *sglq_entry_next = NULL;
struct lpfc_sglq *sglq_entry_first = NULL;
- int status, total_cnt;
+ int status = 0, total_cnt;
int post_cnt = 0, num_posted = 0, block_cnt = 0;
int last_xritag = NO_XRI;
LIST_HEAD(prep_sgl_list);
@@ -8766,7 +8766,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
mboxq->vport = vport;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
- mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
+ mp = mboxq->ctx_buf;
if (rc == MBX_SUCCESS) {
memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
rc = 0;
@@ -9548,8 +9548,8 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
}
/* Copy the mailbox extension data */
- if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
- lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
+ if (pmbox->in_ext_byte_len && pmbox->ext_buf) {
+ lpfc_sli_pcimem_bcopy(pmbox->ext_buf,
(uint8_t *)phba->mbox_ext,
pmbox->in_ext_byte_len);
}
@@ -9562,10 +9562,10 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
= MAILBOX_HBA_EXT_OFFSET;
/* Copy the mailbox extension data */
- if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
+ if (pmbox->in_ext_byte_len && pmbox->ext_buf)
lpfc_memcpy_to_slim(phba->MBslimaddr +
MAILBOX_HBA_EXT_OFFSET,
- pmbox->ctx_buf, pmbox->in_ext_byte_len);
+ pmbox->ext_buf, pmbox->in_ext_byte_len);
if (mbx->mbxCommand == MBX_CONFIG_PORT)
/* copy command data into host mbox for cmpl */
@@ -9688,9 +9688,9 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
MAILBOX_CMD_SIZE);
/* Copy the mailbox extension data */
- if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
+ if (pmbox->out_ext_byte_len && pmbox->ext_buf) {
lpfc_sli_pcimem_bcopy(phba->mbox_ext,
- pmbox->ctx_buf,
+ pmbox->ext_buf,
pmbox->out_ext_byte_len);
}
} else {
@@ -9698,9 +9698,9 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
MAILBOX_CMD_SIZE);
/* Copy the mailbox extension data */
- if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
+ if (pmbox->out_ext_byte_len && pmbox->ext_buf) {
lpfc_memcpy_from_slim(
- pmbox->ctx_buf,
+ pmbox->ext_buf,
phba->MBslimaddr +
MAILBOX_HBA_EXT_OFFSET,
pmbox->out_ext_byte_len);
@@ -10888,7 +10888,7 @@ __lpfc_sli_prep_els_req_rsp_s4(struct lpfc_iocbq *cmdiocbq,
* all ELS pt2pt protocol traffic as well.
*/
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
- (vport->fc_flag & FC_PT2PT)) {
+ test_bit(FC_PT2PT, &vport->fc_flag)) {
if (expect_rsp) {
bf_set(els_req64_sid, &wqe->els_req, vport->fc_myDID);
@@ -12428,7 +12428,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
* If we're unloading, don't abort iocb on the ELS ring, but change
* the callback so that nothing happens when it finishes.
*/
- if ((vport->load_flag & FC_UNLOADING) &&
+ if (test_bit(FC_UNLOADING, &vport->load_flag) &&
pring->ringno == LPFC_ELS_RING) {
if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
@@ -13262,9 +13262,9 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
/* setup wake call as IOCB callback */
pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
- /* setup context3 field to pass wait_queue pointer to wake function */
+ /* setup ctx_u field to pass wait_queue pointer to wake function */
init_completion(&mbox_done);
- pmboxq->context3 = &mbox_done;
+ pmboxq->ctx_u.mbox_wait = &mbox_done;
/* now issue the command */
retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
@@ -13272,7 +13272,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
msecs_to_jiffies(timeout * 1000));
spin_lock_irqsave(&phba->hbalock, flag);
- pmboxq->context3 = NULL;
+ pmboxq->ctx_u.mbox_wait = NULL;
/*
* if LPFC_MBX_WAKE flag is set the mailbox is completed
* else do not free the resources.
@@ -13813,10 +13813,10 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
lpfc_sli_pcimem_bcopy(mbox, pmbox,
MAILBOX_CMD_SIZE);
if (pmb->out_ext_byte_len &&
- pmb->ctx_buf)
+ pmb->ext_buf)
lpfc_sli_pcimem_bcopy(
phba->mbox_ext,
- pmb->ctx_buf,
+ pmb->ext_buf,
pmb->out_ext_byte_len);
}
if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
@@ -13830,10 +13830,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
pmbox->un.varWords[0], 0);
if (!pmbox->mbxStatus) {
- mp = (struct lpfc_dmabuf *)
- (pmb->ctx_buf);
- ndlp = (struct lpfc_nodelist *)
- pmb->ctx_ndlp;
+ mp = pmb->ctx_buf;
+ ndlp = pmb->ctx_ndlp;
/* Reg_LOGIN of dflt RPI was
* successful. new lets get
@@ -14340,8 +14338,8 @@ lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
mcqe_status,
pmbox->un.varWords[0], 0);
if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
- mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
- ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ mp = pmb->ctx_buf;
+ ndlp = pmb->ctx_ndlp;
/* Reg_LOGIN of dflt RPI was successful. Mark the
* node as having an UNREG_LOGIN in progress to stop
@@ -14658,7 +14656,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
spin_unlock_irqrestore(&phba->hbalock, iflags);
/* Handle MDS Loopback frames */
- if (!(phba->pport->load_flag & FC_UNLOADING))
+ if (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
lpfc_sli4_handle_mds_loopback(phba->pport,
dma_buf);
else
@@ -18552,8 +18550,8 @@ lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
if (did == Fabric_DID)
return phba->pport;
- if ((phba->pport->fc_flag & FC_PT2PT) &&
- !(phba->link_state == LPFC_HBA_READY))
+ if (test_bit(FC_PT2PT, &phba->pport->fc_flag) &&
+ phba->link_state != LPFC_HBA_READY)
return phba->pport;
vports = lpfc_create_vport_work_array(phba);
@@ -18933,7 +18931,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
"oxid:x%x SID:x%x\n", oxid, sid);
return;
}
- /* Put ndlp onto pport node list */
+ /* Put ndlp onto vport node list */
lpfc_enqueue_node(vport, ndlp);
}
@@ -18953,7 +18951,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
return;
}
- ctiocb->vport = phba->pport;
+ ctiocb->vport = vport;
ctiocb->cmd_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
ctiocb->sli4_lxritag = NO_XRI;
ctiocb->sli4_xritag = NO_XRI;
@@ -19040,6 +19038,16 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
ctiocb->ndlp = NULL;
lpfc_sli_release_iocbq(phba, ctiocb);
}
+
+ /* if only usage of this nodelist is BLS response, release initial ref
+ * to free ndlp when transmit completes
+ */
+ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE &&
+ !(ndlp->nlp_flag & NLP_DROPPED) &&
+ !(ndlp->fc4_xpt_flags & (NVME_XPT_REGD | SCSI_XPT_REGD))) {
+ ndlp->nlp_flag |= NLP_DROPPED;
+ lpfc_nlp_put(ndlp);
+ }
}
/**
@@ -19447,7 +19455,7 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
vport = phba->pport;
/* Handle MDS Loopback frames */
- if (!(phba->pport->load_flag & FC_UNLOADING))
+ if (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
lpfc_sli4_handle_mds_loopback(vport, dmabuf);
else
lpfc_in_buf_free(phba, &dmabuf->dbuf);
@@ -19497,8 +19505,8 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
* The pt2pt protocol allows for discovery frames
* to be received without a registered VPI.
*/
- if (!(vport->fc_flag & FC_PT2PT) ||
- (phba->link_state == LPFC_HBA_READY)) {
+ if (!test_bit(FC_PT2PT, &vport->fc_flag) ||
+ phba->link_state == LPFC_HBA_READY) {
lpfc_in_buf_free(phba, &dmabuf->dbuf);
return;
}
@@ -19813,14 +19821,15 @@ lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
* lpfc_sli4_resume_rpi - Remove the rpi bitmask region
* @ndlp: pointer to lpfc nodelist data structure.
* @cmpl: completion call-back.
- * @arg: data to load as MBox 'caller buffer information'
+ * @iocbq: data to load as mbox ctx_u information
*
* This routine is invoked to remove the memory region that
* provided rpi via a bitmask.
**/
int
lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
- void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
+ void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *),
+ struct lpfc_iocbq *iocbq)
{
LPFC_MBOXQ_t *mboxq;
struct lpfc_hba *phba = ndlp->phba;
@@ -19849,7 +19858,7 @@ lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
lpfc_resume_rpi(mboxq, ndlp);
if (cmpl) {
mboxq->mbox_cmpl = cmpl;
- mboxq->ctx_buf = arg;
+ mboxq->ctx_u.save_iocb = iocbq;
} else
mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
mboxq->ctx_ndlp = ndlp;
@@ -20666,7 +20675,7 @@ lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
goto out;
mqe = &mboxq->u.mqe;
- mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
+ mp = mboxq->ctx_buf;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
if (rc)
goto out;
@@ -21025,7 +21034,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
(mb->u.mb.mbxCommand == MBX_REG_VPI))
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
- act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
+ act_mbx_ndlp = mb->ctx_ndlp;
/* This reference is local to this routine. The
* reference is removed at routine exit.
@@ -21054,7 +21063,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
- ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
+ ndlp = mb->ctx_ndlp;
/* Unregister the RPI when mailbox complete */
mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
restart_loop = 1;
@@ -21074,7 +21083,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
while (!list_empty(&mbox_cmd_list)) {
list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
- ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
+ ndlp = mb->ctx_ndlp;
mb->ctx_ndlp = NULL;
if (ndlp) {
spin_lock(&ndlp->lock);
@@ -22656,7 +22665,7 @@ lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job)
if_type = bf_get(lpfc_sli_intf_if_type,
&phba->sli4_hba.sli_intf);
if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
- if (job->vport->fc_flag & FC_PT2PT) {
+ if (test_bit(FC_PT2PT, &job->vport->fc_flag)) {
bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
job->vport->fc_myDID);
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index c911a39cb4..cf7c42ec03 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -182,11 +182,29 @@ typedef struct lpfcMboxq {
struct lpfc_mqe mqe;
} u;
struct lpfc_vport *vport; /* virtual port pointer */
- void *ctx_ndlp; /* an lpfc_nodelist pointer */
- void *ctx_buf; /* an lpfc_dmabuf pointer */
- void *context3; /* a generic pointer. Code must
- * accommodate the actual datatype.
- */
+ struct lpfc_nodelist *ctx_ndlp; /* caller ndlp pointer */
+ struct lpfc_dmabuf *ctx_buf; /* caller buffer information */
+ void *ext_buf; /* extended buffer for extended mbox
+ * cmds. Not a generic pointer.
+ * Use for storing virtual address.
+ */
+
+ /* Pointers that are seldom used during mbox execution, but require
+ * a saved context.
+ */
+ union {
+ unsigned long ox_rx_id; /* Used in els_rsp_rls_acc */
+ struct lpfc_rdp_context *rdp; /* Used in get_rdp_info */
+ struct lpfc_lcb_context *lcb; /* Used in set_beacon */
+ struct completion *mbox_wait; /* Used in issue_mbox_wait */
+ struct bsg_job_data *dd_data; /* Used in bsg_issue_mbox_cmpl
+ * and
+ * bsg_issue_mbox_ext_handle_job
+ */
+ struct lpfc_iocbq *save_iocb; /* Used in defer_plogi_acc and
+ * lpfc_mbx_cmpl_resume_rpi
+ */
+ } ctx_u;
void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *);
uint8_t mbox_flag;
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 2541a8fba0..c1e9ec0243 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -1118,8 +1118,9 @@ void lpfc_sli4_free_rpi(struct lpfc_hba *, int);
void lpfc_sli4_remove_rpis(struct lpfc_hba *);
void lpfc_sli4_async_event_proc(struct lpfc_hba *);
void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
-int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
- void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
+int lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
+ void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *),
+ struct lpfc_iocbq *iocbq);
void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba);
void lpfc_sli4_nvme_pci_offline_aborted(struct lpfc_hba *phba,
struct lpfc_io_buf *lpfc_ncmd);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index aba1c1cee8..915f2f11fb 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.2.0.17"
+#define LPFC_DRIVER_VERSION "14.4.0.1"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
@@ -32,6 +32,6 @@
#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
LPFC_DRIVER_VERSION
-#define LPFC_COPYRIGHT "Copyright (C) 2017-2023 Broadcom. All Rights " \
+#define LPFC_COPYRIGHT "Copyright (C) 2017-2024 Broadcom. All Rights " \
"Reserved. The term \"Broadcom\" refers to Broadcom Inc. " \
"and/or its subsidiaries."
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 9e0e9e02d2..4439167a51 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -166,7 +166,7 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
}
}
- mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
+ mp = pmb->ctx_buf;
memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
sizeof (struct lpfc_name));
@@ -238,13 +238,9 @@ lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport)
static void lpfc_discovery_wait(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
- uint32_t wait_flags = 0;
unsigned long wait_time_max;
unsigned long start_time;
- wait_flags = FC_RSCN_MODE | FC_RSCN_DISCOVERY | FC_NLP_MORE |
- FC_RSCN_DEFERRED | FC_NDISC_ACTIVE | FC_DISC_TMO;
-
/*
* The time constraint on this loop is a balance between the
* fabric RA_TOV value and dev_loss tmo. The driver's
@@ -255,14 +251,19 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport)
start_time = jiffies;
while (time_before(jiffies, wait_time_max)) {
if ((vport->num_disc_nodes > 0) ||
- (vport->fc_flag & wait_flags) ||
+ test_bit(FC_RSCN_MODE, &vport->fc_flag) ||
+ test_bit(FC_RSCN_DISCOVERY, &vport->fc_flag) ||
+ test_bit(FC_NLP_MORE, &vport->fc_flag) ||
+ test_bit(FC_RSCN_DEFERRED, &vport->fc_flag) ||
+ test_bit(FC_NDISC_ACTIVE, &vport->fc_flag) ||
+ test_bit(FC_DISC_TMO, &vport->fc_flag) ||
((vport->port_state > LPFC_VPORT_FAILED) &&
(vport->port_state < LPFC_VPORT_READY))) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
- "1833 Vport discovery quiesce Wait:"
- " state x%x fc_flags x%x"
- " num_nodes x%x, waiting 1000 msecs"
- " total wait msecs x%x\n",
+ "1833 Vport discovery quiesce Wait: "
+ "state x%x fc_flags x%lx "
+ "num_nodes x%x, waiting 1000 msecs "
+ "total wait msecs x%x\n",
vport->port_state, vport->fc_flag,
vport->num_disc_nodes,
jiffies_to_msecs(jiffies - start_time));
@@ -270,9 +271,9 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport)
} else {
/* Base case. Wait variants satisfied. Break out */
lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
- "1834 Vport discovery quiesced:"
- " state x%x fc_flags x%x"
- " wait msecs x%x\n",
+ "1834 Vport discovery quiesced: "
+ "state x%x fc_flags x%lx "
+ "wait msecs x%x\n",
vport->port_state, vport->fc_flag,
jiffies_to_msecs(jiffies
- start_time));
@@ -283,7 +284,7 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport)
if (time_after(jiffies, wait_time_max))
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1835 Vport discovery quiesce failed:"
- " state x%x fc_flags x%x wait msecs x%x\n",
+ " state x%x fc_flags x%lx wait msecs x%x\n",
vport->port_state, vport->fc_flag,
jiffies_to_msecs(jiffies - start_time));
}
@@ -407,7 +408,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
vport->fc_vport = fc_vport;
/* At this point we are fully registered with SCSI Layer. */
- vport->load_flag |= FC_ALLOW_FDMI;
+ set_bit(FC_ALLOW_FDMI, &vport->load_flag);
if (phba->cfg_enable_SmartSAN ||
(phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
/* Setup appropriate attribute masks */
@@ -420,7 +421,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
* by the port.
*/
if ((phba->sli_rev == LPFC_SLI_REV4) &&
- (pport->fc_flag & FC_VFI_REGISTERED)) {
+ test_bit(FC_VFI_REGISTERED, &pport->fc_flag)) {
rc = lpfc_sli4_init_vpi(vport);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
@@ -435,7 +436,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
* Driver cannot INIT_VPI now. Set the flags to
* init_vpi when reg_vfi complete.
*/
- vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+ set_bit(FC_VPORT_NEEDS_INIT_VPI, &vport->fc_flag);
lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
rc = VPORT_OK;
goto out;
@@ -535,10 +536,9 @@ disable_vport(struct fc_vport *fc_vport)
struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp = NULL;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
/* Can't disable during an outstanding delete. */
- if (vport->load_flag & FC_UNLOADING)
+ if (test_bit(FC_UNLOADING, &vport->load_flag))
return 0;
ndlp = lpfc_findnode_did(vport, Fabric_DID);
@@ -556,11 +556,8 @@ disable_vport(struct fc_vport *fc_vport)
* scsi_host_put() to release the vport.
*/
lpfc_mbx_unreg_vpi(vport);
- if (phba->sli_rev == LPFC_SLI_REV4) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
- spin_unlock_irq(shost->host_lock);
- }
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ set_bit(FC_VPORT_NEEDS_INIT_VPI, &vport->fc_flag);
lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
@@ -574,7 +571,6 @@ enable_vport(struct fc_vport *fc_vport)
struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp = NULL;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
if ((phba->link_state < LPFC_LINK_UP) ||
(phba->fc_topology == LPFC_TOPOLOGY_LOOP)) {
@@ -582,16 +578,13 @@ enable_vport(struct fc_vport *fc_vport)
return VPORT_OK;
}
- spin_lock_irq(shost->host_lock);
- vport->load_flag |= FC_LOADING;
- if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_LOADING, &vport->load_flag);
+ if (test_bit(FC_VPORT_NEEDS_INIT_VPI, &vport->fc_flag)) {
lpfc_issue_init_vpi(vport);
goto out;
}
- vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
/* Use the Physical nodes Fabric NDLP to determine if the link is
* up and ready to FDISC.
@@ -643,22 +636,20 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
/* If the vport is a static vport fail the deletion. */
if ((vport->vport_flag & STATIC_VPORT) &&
- !(phba->pport->load_flag & FC_UNLOADING)) {
+ !test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1837 vport_delete failed: Cannot delete "
"static vport.\n");
return VPORT_ERROR;
}
- spin_lock_irq(&phba->hbalock);
- vport->load_flag |= FC_UNLOADING;
- spin_unlock_irq(&phba->hbalock);
+ set_bit(FC_UNLOADING, &vport->load_flag);
/*
* If we are not unloading the driver then prevent the vport_delete
* from happening until after this vport's discovery is finished.
*/
- if (!(phba->pport->load_flag & FC_UNLOADING)) {
+ if (!test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
int check_count = 0;
while (check_count < ((phba->fc_ratov * 3) + 3) &&
vport->port_state > LPFC_VPORT_FAILED &&
@@ -721,7 +712,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
goto skip_logo;
}
- if (!(phba->pport->load_flag & FC_UNLOADING))
+ if (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
lpfc_discovery_wait(vport);
skip_logo:
@@ -736,7 +727,7 @@ skip_logo:
lpfc_sli_host_down(vport);
lpfc_stop_vport_timers(vport);
- if (!(phba->pport->load_flag & FC_UNLOADING)) {
+ if (!test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
lpfc_unreg_all_rpis(vport);
lpfc_unreg_default_rpis(vport);
/*
@@ -773,7 +764,7 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba)
return NULL;
spin_lock_irq(&phba->port_list_lock);
list_for_each_entry(port_iterator, &phba->port_list, listentry) {
- if (port_iterator->load_flag & FC_UNLOADING)
+ if (test_bit(FC_UNLOADING, &port_iterator->load_flag))
continue;
if (!scsi_host_get(lpfc_shost_from_vport(port_iterator))) {
lpfc_printf_vlog(port_iterator, KERN_ERR,
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index 6a01913210..377dcab32c 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -508,7 +508,7 @@ static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *mat
return rc;
}
-static int mac53c94_remove(struct macio_dev *mdev)
+static void mac53c94_remove(struct macio_dev *mdev)
{
struct fsc_state *fp = (struct fsc_state *)macio_get_drvdata(mdev);
struct Scsi_Host *host = fp->host;
@@ -526,11 +526,8 @@ static int mac53c94_remove(struct macio_dev *mdev)
scsi_host_put(host);
macio_release_resources(mdev);
-
- return 0;
}
-
static struct of_device_id mac53c94_match[] =
{
{
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 66a30a3e6c..38976f9445 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -219,7 +219,7 @@ mega_query_adapter(adapter_t *adapter)
raw_mbox[3] = ENQ3_GET_SOLICITED_FULL; /* i.e. 0x02 */
/* Issue a blocking command to the card */
- if ((retval = issue_scb_block(adapter, raw_mbox))) {
+ if (issue_scb_block(adapter, raw_mbox)) {
/* the adapter does not support 40ld */
mraid_ext_inquiry *ext_inq;
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index e276583c59..1c15cac41d 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -54,7 +54,7 @@
#define KERN_DEBUG KERN_WARNING
#endif
-MODULE_AUTHOR("Paul Mackerras (paulus@samba.org)");
+MODULE_AUTHOR("Paul Mackerras <paulus@samba.org>");
MODULE_DESCRIPTION("PowerMac MESH SCSI driver");
MODULE_LICENSE("GPL");
@@ -1986,7 +1986,7 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
return -ENODEV;
}
-static int mesh_remove(struct macio_dev *mdev)
+static void mesh_remove(struct macio_dev *mdev)
{
struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
struct Scsi_Host *mesh_host = ms->host;
@@ -2013,11 +2013,8 @@ static int mesh_remove(struct macio_dev *mdev)
macio_release_resources(mdev);
scsi_host_put(mesh_host);
-
- return 0;
}
-
static struct of_device_id mesh_match[] =
{
{
diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
index 55d590b919..6a3db7032c 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_app.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_app.c
@@ -2158,10 +2158,72 @@ persistent_id_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(persistent_id);
+/**
+ * sas_ncq_prio_supported_show - Indicate if device supports NCQ priority
+ * @dev: pointer to embedded device
+ * @attr: sas_ncq_prio_supported attribute descriptor
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' sdev attribute, only works with SATA devices
+ */
+static ssize_t
+sas_ncq_prio_supported_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ return sysfs_emit(buf, "%d\n", sas_ata_ncq_prio_supported(sdev));
+}
+static DEVICE_ATTR_RO(sas_ncq_prio_supported);
+
+/**
+ * sas_ncq_prio_enable_show - send prioritized io commands to device
+ * @dev: pointer to embedded device
+ * @attr: sas_ncq_prio_enable attribute descriptor
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read/write' sdev attribute, only works with SATA devices
+ */
+static ssize_t
+sas_ncq_prio_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata;
+
+ if (!sdev_priv_data)
+ return 0;
+
+ return sysfs_emit(buf, "%d\n", sdev_priv_data->ncq_prio_enable);
+}
+
+static ssize_t
+sas_ncq_prio_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata;
+ bool ncq_prio_enable = 0;
+
+ if (kstrtobool(buf, &ncq_prio_enable))
+ return -EINVAL;
+
+ if (!sas_ata_ncq_prio_supported(sdev))
+ return -EINVAL;
+
+ sdev_priv_data->ncq_prio_enable = ncq_prio_enable;
+
+ return strlen(buf);
+}
+static DEVICE_ATTR_RW(sas_ncq_prio_enable);
+
static struct attribute *mpi3mr_dev_attrs[] = {
&dev_attr_sas_address.attr,
&dev_attr_device_handle.attr,
&dev_attr_persistent_id.attr,
+ &dev_attr_sas_ncq_prio_supported.attr,
+ &dev_attr_sas_ncq_prio_enable.attr,
NULL,
};
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index 1bffd629c1..73c831a97d 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -8,11 +8,12 @@
*/
#include "mpi3mr.h"
+#include <linux/idr.h>
/* global driver scop variables */
LIST_HEAD(mrioc_list);
DEFINE_SPINLOCK(mrioc_list_lock);
-static int mrioc_ids;
+static DEFINE_IDA(mrioc_ida);
static int warn_non_secure_ctlr;
atomic64_t event_counter;
@@ -5072,7 +5073,10 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
mrioc = shost_priv(shost);
- mrioc->id = mrioc_ids++;
+ retval = ida_alloc_range(&mrioc_ida, 1, U8_MAX, GFP_KERNEL);
+ if (retval < 0)
+ goto id_alloc_failed;
+ mrioc->id = (u8)retval;
sprintf(mrioc->driver_name, "%s", MPI3MR_DRIVER_NAME);
sprintf(mrioc->name, "%s%d", mrioc->driver_name, mrioc->id);
INIT_LIST_HEAD(&mrioc->list);
@@ -5222,9 +5226,11 @@ init_ioc_failed:
resource_alloc_failed:
destroy_workqueue(mrioc->fwevt_worker_thread);
fwevtthread_failed:
+ ida_free(&mrioc_ida, mrioc->id);
spin_lock(&mrioc_list_lock);
list_del(&mrioc->list);
spin_unlock(&mrioc_list_lock);
+id_alloc_failed:
scsi_host_put(shost);
shost_failed:
return retval;
@@ -5310,6 +5316,7 @@ static void mpi3mr_remove(struct pci_dev *pdev)
mrioc->sas_hba.num_phys = 0;
}
+ ida_free(&mrioc_ida, mrioc->id);
spin_lock(&mrioc_list_lock);
list_del(&mrioc->list);
spin_unlock(&mrioc_list_lock);
@@ -5525,6 +5532,7 @@ static void __exit mpi3mr_exit(void)
&driver_attr_event_counter);
pci_unregister_driver(&mpi3mr_pci_driver);
sas_release_transport(mpi3mr_transport_template);
+ ida_destroy(&mrioc_ida);
}
module_init(mpi3mr_init);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index b8120ca93c..86f553c617 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -5481,7 +5481,7 @@ mpt3sas_atto_validate_nvram(struct MPT3SAS_ADAPTER *ioc,
* mpt3sas_atto_get_sas_addr - get the ATTO SAS address from mfg page 1
*
* @ioc : per adapter object
- * @*sas_addr : return sas address
+ * @sas_addr : return sas address
* Return: 0 for success, non-zero for failure.
*/
static int
@@ -7916,26 +7916,22 @@ mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
}
/**
- * _base_diag_reset - the "big hammer" start of day reset
- * @ioc: per adapter object
- *
- * Return: 0 for success, non-zero for failure.
- */
-static int
-_base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
-{
- u32 host_diagnostic;
- u32 ioc_state;
- u32 count;
- u32 hcb_size;
-
- ioc_info(ioc, "sending diag reset !!\n");
-
- pci_cfg_access_lock(ioc->pdev);
+* mpt3sas_base_unlock_and_get_host_diagnostic- enable Host Diagnostic Register writes
+* @ioc: per adapter object
+* @host_diagnostic: host diagnostic register content
+*
+* Return: 0 for success, non-zero for failure.
+*/
- drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));
+int
+mpt3sas_base_unlock_and_get_host_diagnostic(struct MPT3SAS_ADAPTER *ioc,
+ u32 *host_diagnostic)
+{
+ u32 count;
+ *host_diagnostic = 0;
count = 0;
+
do {
/* Write magic sequence to WriteSequence register
* Loop until in diagnostic mode
@@ -7954,30 +7950,67 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
if (count++ > 20) {
ioc_info(ioc,
- "Stop writing magic sequence after 20 retries\n");
+ "Stop writing magic sequence after 20 retries\n");
_base_dump_reg_set(ioc);
- goto out;
+ return -EFAULT;
}
- host_diagnostic = ioc->base_readl_ext_retry(&ioc->chip->HostDiagnostic);
+ *host_diagnostic = ioc->base_readl_ext_retry(&ioc->chip->HostDiagnostic);
drsprintk(ioc,
- ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
- count, host_diagnostic));
+ ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
+ count, *host_diagnostic));
- } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
+ } while ((*host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
+ return 0;
+}
- hcb_size = ioc->base_readl(&ioc->chip->HCBSize);
+/**
+ * mpt3sas_base_lock_host_diagnostic: Disable Host Diagnostic Register writes
+ * @ioc: per adapter object
+ */
+void
+mpt3sas_base_lock_host_diagnostic(struct MPT3SAS_ADAPTER *ioc)
+{
+ drsprintk(ioc, ioc_info(ioc, "disable writes to the diagnostic register\n"));
+ writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
+}
+
+/**
+ * _base_diag_reset - the "big hammer" start of day reset
+ * @ioc: per adapter object
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+_base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
+{
+ u32 host_diagnostic;
+ u32 ioc_state;
+ u32 count;
+ u32 hcb_size;
+
+ ioc_info(ioc, "sending diag reset !!\n");
+
+ pci_cfg_access_lock(ioc->pdev);
+
+ drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));
+
+ mutex_lock(&ioc->hostdiag_unlock_mutex);
+ if (mpt3sas_base_unlock_and_get_host_diagnostic(ioc, &host_diagnostic))
+ goto out;
+
+ hcb_size = ioc->base_readl(&ioc->chip->HCBSize);
drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n"));
writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
&ioc->chip->HostDiagnostic);
- /*This delay allows the chip PCIe hardware time to finish reset tasks*/
+ /* This delay allows the chip PCIe hardware time to finish reset tasks */
msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
/* Approximately 300 second max wait */
for (count = 0; count < (300000000 /
- MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
+ MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
host_diagnostic = ioc->base_readl_ext_retry(&ioc->chip->HostDiagnostic);
@@ -7990,13 +8023,15 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
break;
- msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC / 1000);
+ /* Wait to pass the second read delay window */
+ msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC/1000);
}
if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
drsprintk(ioc,
- ioc_info(ioc, "restart the adapter assuming the HCB Address points to good F/W\n"));
+ ioc_info(ioc, "restart the adapter assuming the\n"
+ "HCB Address points to good F/W\n"));
host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
writel(host_diagnostic, &ioc->chip->HostDiagnostic);
@@ -8010,9 +8045,8 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
&ioc->chip->HostDiagnostic);
- drsprintk(ioc,
- ioc_info(ioc, "disable writes to the diagnostic register\n"));
- writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
+ mpt3sas_base_lock_host_diagnostic(ioc);
+ mutex_unlock(&ioc->hostdiag_unlock_mutex);
drsprintk(ioc, ioc_info(ioc, "Wait for FW to go to the READY state\n"));
ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20);
@@ -8030,6 +8064,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
out:
pci_cfg_access_unlock(ioc->pdev);
ioc_err(ioc, "diag reset: FAILED\n");
+ mutex_unlock(&ioc->hostdiag_unlock_mutex);
return -EFAULT;
}
@@ -8477,6 +8512,12 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
if (ioc->facts.MaxDevHandle % 8)
ioc->pd_handles_sz++;
+ /*
+ * pd_handles_sz should have, at least, the minimal room for
+ * set_bit()/test_bit(), otherwise out-of-memory touch may occur.
+ */
+ ioc->pd_handles_sz = ALIGN(ioc->pd_handles_sz, sizeof(unsigned long));
+
ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
GFP_KERNEL);
if (!ioc->pd_handles) {
@@ -8494,6 +8535,13 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8);
if (ioc->facts.MaxDevHandle % 8)
ioc->pend_os_device_add_sz++;
+
+ /*
+ * pend_os_device_add_sz should have, at least, the minimal room for
+ * set_bit()/test_bit(), otherwise out-of-memory may occur.
+ */
+ ioc->pend_os_device_add_sz = ALIGN(ioc->pend_os_device_add_sz,
+ sizeof(unsigned long));
ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
GFP_KERNEL);
if (!ioc->pend_os_device_add) {
@@ -8785,6 +8833,12 @@ _base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc)
if (ioc->facts.MaxDevHandle % 8)
pd_handles_sz++;
+ /*
+ * pd_handles should have, at least, the minimal room for
+ * set_bit()/test_bit(), otherwise out-of-memory touch may
+ * occur.
+ */
+ pd_handles_sz = ALIGN(pd_handles_sz, sizeof(unsigned long));
pd_handles = krealloc(ioc->pd_handles, pd_handles_sz,
GFP_KERNEL);
if (!pd_handles) {
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 6d0bc8c667..fe1e96fda2 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -77,8 +77,8 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "43.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 43
+#define MPT3SAS_DRIVER_VERSION "48.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 48
#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -1366,6 +1366,7 @@ struct MPT3SAS_ADAPTER {
u8 got_task_abort_from_ioctl;
struct mutex reset_in_progress_mutex;
+ struct mutex hostdiag_unlock_mutex;
spinlock_t ioc_reset_in_progress_lock;
u8 ioc_link_reset_in_progress;
@@ -1790,6 +1791,9 @@ void mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc);
int mpt3sas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num);
void mpt3sas_base_pause_mq_polling(struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_base_resume_mq_polling(struct MPT3SAS_ADAPTER *ioc);
+int mpt3sas_base_unlock_and_get_host_diagnostic(struct MPT3SAS_ADAPTER *ioc,
+ u32 *host_diagnostic);
+void mpt3sas_base_lock_host_diagnostic(struct MPT3SAS_ADAPTER *ioc);
/* scsih shared API */
struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc,
@@ -2044,9 +2048,6 @@ void
mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request);
-/* NCQ Prio Handling Check */
-bool scsih_ncq_prio_supp(struct scsi_device *sdev);
-
void mpt3sas_setup_debugfs(struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_destroy_debugfs(struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_init_debugfs(void);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 147cb7088d..87784c9624 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -2543,6 +2543,56 @@ out:
return 0;
}
+/**
+ * _ctl_enable_diag_sbr_reload - enable sbr reload bit
+ * @ioc: per adapter object
+ * @arg: user space buffer containing ioctl content
+ *
+ * Enable the SBR reload bit
+ */
+static int
+_ctl_enable_diag_sbr_reload(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ u32 ioc_state, host_diagnostic;
+
+ if (ioc->shost_recovery ||
+ ioc->pci_error_recovery || ioc->is_driver_loading ||
+ ioc->remove_host)
+ return -EAGAIN;
+
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+
+ if (ioc_state != MPI2_IOC_STATE_OPERATIONAL)
+ return -EFAULT;
+
+ host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
+
+ if (host_diagnostic & MPI2_DIAG_SBR_RELOAD)
+ return 0;
+
+ if (mutex_trylock(&ioc->hostdiag_unlock_mutex)) {
+ if (mpt3sas_base_unlock_and_get_host_diagnostic(ioc, &host_diagnostic)) {
+ mutex_unlock(&ioc->hostdiag_unlock_mutex);
+ return -EFAULT;
+ }
+ } else
+ return -EAGAIN;
+
+ host_diagnostic |= MPI2_DIAG_SBR_RELOAD;
+ writel(host_diagnostic, &ioc->chip->HostDiagnostic);
+ host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
+ mpt3sas_base_lock_host_diagnostic(ioc);
+ mutex_unlock(&ioc->hostdiag_unlock_mutex);
+
+ if (!(host_diagnostic & MPI2_DIAG_SBR_RELOAD)) {
+ ioc_err(ioc, "%s: Failed to set Diag SBR Reload Bit\n", __func__);
+ return -EFAULT;
+ }
+
+ ioc_info(ioc, "%s: Successfully set the Diag SBR Reload Bit\n", __func__);
+ return 0;
+}
+
#ifdef CONFIG_COMPAT
/**
* _ctl_compat_mpt_command - convert 32bit pointers to 64bit.
@@ -2719,6 +2769,10 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg,
if (_IOC_SIZE(cmd) == sizeof(struct mpt3_addnl_diag_query))
ret = _ctl_addnl_diag_query(ioc, arg);
break;
+ case MPT3ENABLEDIAGSBRRELOAD:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_enable_diag_sbr_reload))
+ ret = _ctl_enable_diag_sbr_reload(ioc, arg);
+ break;
default:
dctlprintk(ioc,
ioc_info(ioc, "unsupported ioctl opcode(0x%08x)\n",
@@ -4034,7 +4088,7 @@ sas_ncq_prio_supported_show(struct device *dev,
{
struct scsi_device *sdev = to_scsi_device(dev);
- return sysfs_emit(buf, "%d\n", scsih_ncq_prio_supp(sdev));
+ return sysfs_emit(buf, "%d\n", sas_ata_ncq_prio_supported(sdev));
}
static DEVICE_ATTR_RO(sas_ncq_prio_supported);
@@ -4069,7 +4123,7 @@ sas_ncq_prio_enable_store(struct device *dev,
if (kstrtobool(buf, &ncq_prio_enable))
return -EINVAL;
- if (!scsih_ncq_prio_supp(sdev))
+ if (!sas_ata_ncq_prio_supported(sdev))
return -EINVAL;
sas_device_priv_data->ncq_prio_enable = ncq_prio_enable;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
index 8f6ffb4026..171709e910 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
@@ -98,6 +98,8 @@
struct mpt3_diag_read_buffer)
#define MPT3ADDNLDIAGQUERY _IOWR(MPT3_MAGIC_NUMBER, 32, \
struct mpt3_addnl_diag_query)
+#define MPT3ENABLEDIAGSBRRELOAD _IOWR(MPT3_MAGIC_NUMBER, 33, \
+ struct mpt3_enable_diag_sbr_reload)
/* Trace Buffer default UniqueId */
#define MPT2DIAGBUFFUNIQUEID (0x07075900)
@@ -448,4 +450,12 @@ struct mpt3_addnl_diag_query {
uint32_t reserved2[2];
};
+/**
+ * struct mpt3_enable_diag_sbr_reload - enable sbr reload
+ * @hdr - generic header
+ */
+struct mpt3_enable_diag_sbr_reload {
+ struct mpt3_ioctl_header hdr;
+};
+
#endif /* MPT3SAS_CTL_H_INCLUDED */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 51b5788da0..7e923e0249 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -12240,6 +12240,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* misc semaphores and spin locks */
mutex_init(&ioc->reset_in_progress_mutex);
+ mutex_init(&ioc->hostdiag_unlock_mutex);
/* initializing pci_access_mutex lock */
mutex_init(&ioc->pci_access_mutex);
spin_lock_init(&ioc->ioc_reset_in_progress_lock);
@@ -12572,29 +12573,6 @@ scsih_pci_mmio_enabled(struct pci_dev *pdev)
return PCI_ERS_RESULT_RECOVERED;
}
-/**
- * scsih_ncq_prio_supp - Check for NCQ command priority support
- * @sdev: scsi device struct
- *
- * This is called when a user indicates they would like to enable
- * ncq command priorities. This works only on SATA devices.
- */
-bool scsih_ncq_prio_supp(struct scsi_device *sdev)
-{
- struct scsi_vpd *vpd;
- bool ncq_prio_supp = false;
-
- rcu_read_lock();
- vpd = rcu_dereference(sdev->vpd_pg89);
- if (!vpd || vpd->len < 214)
- goto out;
-
- ncq_prio_supp = (vpd->data[213] >> 4) & 1;
-out:
- rcu_read_unlock();
-
- return ncq_prio_supp;
-}
/*
* The pci device ids are defined in mpi/mpi2_cnfg.h.
*/
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index 5c26a13ffb..7b27618fd7 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -880,9 +880,9 @@ static ssize_t pm8001_show_update_fw(struct device *cdev,
if (pm8001_ha->fw_status != FLASH_IN_PROGRESS)
pm8001_ha->fw_status = FLASH_OK;
- return snprintf(buf, PAGE_SIZE, "status=%x %s\n",
- flash_error_table[i].err_code,
- flash_error_table[i].reason);
+ return sysfs_emit(buf, "status=%x %s\n",
+ flash_error_table[i].err_code,
+ flash_error_table[i].reason);
}
static DEVICE_ATTR(update_fw, S_IRUGO|S_IWUSR|S_IWGRP,
pm8001_show_update_fw, pm8001_store_update_fw);
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index e8bcc3a887..0614b7e366 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -61,7 +61,9 @@ static atomic_t pmcraid_adapter_count = ATOMIC_INIT(0);
* pmcraid_minor - minor number(s) to use
*/
static unsigned int pmcraid_major;
-static struct class *pmcraid_class;
+static const struct class pmcraid_class = {
+ .name = PMCRAID_DEVFILE,
+};
static DECLARE_BITMAP(pmcraid_minor, PMCRAID_MAX_ADAPTERS);
/*
@@ -4723,7 +4725,7 @@ static int pmcraid_setup_chrdev(struct pmcraid_instance *pinstance)
if (error)
pmcraid_release_minor(minor);
else
- device_create(pmcraid_class, NULL, MKDEV(pmcraid_major, minor),
+ device_create(&pmcraid_class, NULL, MKDEV(pmcraid_major, minor),
NULL, "%s%u", PMCRAID_DEVFILE, minor);
return error;
}
@@ -4739,7 +4741,7 @@ static int pmcraid_setup_chrdev(struct pmcraid_instance *pinstance)
static void pmcraid_release_chrdev(struct pmcraid_instance *pinstance)
{
pmcraid_release_minor(MINOR(pinstance->cdev.dev));
- device_destroy(pmcraid_class,
+ device_destroy(&pmcraid_class,
MKDEV(pmcraid_major, MINOR(pinstance->cdev.dev)));
cdev_del(&pinstance->cdev);
}
@@ -5390,10 +5392,10 @@ static int __init pmcraid_init(void)
}
pmcraid_major = MAJOR(dev);
- pmcraid_class = class_create(PMCRAID_DEVFILE);
- if (IS_ERR(pmcraid_class)) {
- error = PTR_ERR(pmcraid_class);
+ error = class_register(&pmcraid_class);
+
+ if (error) {
pmcraid_err("failed to register with sysfs, error = %x\n",
error);
goto out_unreg_chrdev;
@@ -5402,7 +5404,7 @@ static int __init pmcraid_init(void)
error = pmcraid_netlink_init();
if (error) {
- class_destroy(pmcraid_class);
+ class_unregister(&pmcraid_class);
goto out_unreg_chrdev;
}
@@ -5413,7 +5415,7 @@ static int __init pmcraid_init(void)
pmcraid_err("failed to register pmcraid driver, error = %x\n",
error);
- class_destroy(pmcraid_class);
+ class_unregister(&pmcraid_class);
pmcraid_netlink_release();
out_unreg_chrdev:
@@ -5432,7 +5434,7 @@ static void __exit pmcraid_exit(void)
unregister_chrdev_region(MKDEV(pmcraid_major, 0),
PMCRAID_MAX_ADAPTERS);
pci_unregister_driver(&pmcraid_driver);
- class_destroy(pmcraid_class);
+ class_unregister(&pmcraid_class);
}
module_init(pmcraid_init);
diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c
index 8deb2001dc..37eed6a278 100644
--- a/drivers/scsi/qedi/qedi_debugfs.c
+++ b/drivers/scsi/qedi/qedi_debugfs.c
@@ -120,15 +120,11 @@ static ssize_t
qedi_dbg_do_not_recover_cmd_read(struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
- size_t cnt = 0;
-
- if (*ppos)
- return 0;
+ char buf[64];
+ int len;
- cnt = sprintf(buffer, "do_not_recover=%d\n", qedi_do_not_recover);
- cnt = min_t(int, count, cnt - *ppos);
- *ppos += cnt;
- return cnt;
+ len = sprintf(buf, "do_not_recover=%d\n", qedi_do_not_recover);
+ return simple_read_from_buffer(buffer, count, ppos, buf, len);
}
static int
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 27bce80262..8958547ac1 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -2478,7 +2478,6 @@ qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
/* Load return mailbox registers. */
optr = mb;
iptr = (uint16_t *) &ha->mailbox_out[0];
- mr = MAILBOX_REGISTER_COUNT;
memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t));
if (ha->flags.reset_marker)
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index d903563e96..7627fd807b 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -6,9 +6,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.02.09.100-k"
+#define QLA2XXX_VERSION "10.02.09.200-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 2
#define QLA_DRIVER_PATCH_VER 9
-#define QLA_DRIVER_BETA_VER 100
+#define QLA_DRIVER_BETA_VER 200
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 5d560d9b89..6177f4798f 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -1468,7 +1468,7 @@ static struct platform_driver qpti_sbus_driver = {
module_platform_driver(qpti_sbus_driver);
MODULE_DESCRIPTION("QlogicISP SBUS driver");
-MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
MODULE_LICENSE("GPL");
MODULE_VERSION("2.1");
MODULE_FIRMWARE("qlogic/isp1000.bin");
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 8cad9792a5..ee69bd3588 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -350,6 +350,13 @@ static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page)
if (result < SCSI_VPD_HEADER_SIZE)
return 0;
+ if (result > sizeof(vpd)) {
+ dev_warn_once(&sdev->sdev_gendev,
+ "%s: long VPD page 0 length: %d bytes\n",
+ __func__, result);
+ result = sizeof(vpd);
+ }
+
result -= SCSI_VPD_HEADER_SIZE;
if (!memchr(&vpd[SCSI_VPD_HEADER_SIZE], page, result))
return 0;
@@ -517,6 +524,8 @@ void scsi_attach_vpd(struct scsi_device *sdev)
scsi_update_vpd_page(sdev, 0xb1, &sdev->vpd_pgb1);
if (vpd_buf->data[i] == 0xb2)
scsi_update_vpd_page(sdev, 0xb2, &sdev->vpd_pgb2);
+ if (vpd_buf->data[i] == 0xb7)
+ scsi_update_vpd_page(sdev, 0xb7, &sdev->vpd_pgb7);
}
kfree(vpd_buf);
}
@@ -664,6 +673,13 @@ void scsi_cdl_check(struct scsi_device *sdev)
sdev->use_10_for_rw = 0;
sdev->cdl_supported = 1;
+
+ /*
+ * If the device supports CDL, make sure that the current drive
+ * feature status is consistent with the user controlled
+ * cdl_enable state.
+ */
+ scsi_cdl_enable(sdev, sdev->cdl_enable);
} else {
sdev->cdl_supported = 0;
}
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index d03d66f114..acf0592d63 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -43,6 +43,7 @@
#include <linux/prefetch.h>
#include <linux/debugfs.h>
#include <linux/async.h>
+#include <linux/cleanup.h>
#include <net/checksum.h>
@@ -532,6 +533,8 @@ static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_get_stream_status(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip);
static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
@@ -606,6 +609,9 @@ static const struct opcode_info_t sa_in_16_iarr[] = {
{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
{16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
+ {0, 0x9e, 0x16, F_SA_LOW | F_D_IN, resp_get_stream_status, NULL,
+ {16, 0x16, 0, 0, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff,
+ 0, 0} }, /* GET STREAM STATUS */
};
static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
@@ -896,10 +902,12 @@ static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
static int poll_queues; /* iouring iopoll interface.*/
+static atomic_long_t writes_by_group_number[64];
+
static char sdebug_proc_name[] = MY_NAME;
static const char *my_name = MY_NAME;
-static struct bus_type pseudo_lld_bus;
+static const struct bus_type pseudo_lld_bus;
static struct device_driver sdebug_driverfs_driver = {
.name = sdebug_proc_name,
@@ -1867,6 +1875,19 @@ static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
return 0x3c;
}
+#define SDEBUG_BLE_LEN_AFTER_B4 28 /* thus vpage 32 bytes long */
+
+enum { MAXIMUM_NUMBER_OF_STREAMS = 6, PERMANENT_STREAM_COUNT = 5 };
+
+/* Block limits extension VPD page (SBC-4) */
+static int inquiry_vpd_b7(unsigned char *arrb4)
+{
+ memset(arrb4, 0, SDEBUG_BLE_LEN_AFTER_B4);
+ arrb4[1] = 1; /* Reduced stream control support (RSCS) */
+ put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS, &arrb4[2]);
+ return SDEBUG_BLE_LEN_AFTER_B4;
+}
+
#define SDEBUG_LONG_INQ_SZ 96
#define SDEBUG_MAX_INQ_ARR_SZ 584
@@ -1903,7 +1924,8 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
u32 len;
char lu_id_str[6];
int host_no = devip->sdbg_host->shost->host_no;
-
+
+ arr[1] = cmd[2];
port_group_id = (((host_no + 1) & 0x7f) << 8) +
(devip->channel & 0x7f);
if (sdebug_vpd_use_hostno == 0)
@@ -1914,7 +1936,6 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
(devip->target * 1000) - 3;
len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
if (0 == cmd[2]) { /* supported vital product data pages */
- arr[1] = cmd[2]; /*sanity */
n = 4;
arr[n++] = 0x0; /* this page */
arr[n++] = 0x80; /* unit serial number */
@@ -1932,26 +1953,22 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
arr[n++] = 0xb2; /* LB Provisioning */
if (is_zbc)
arr[n++] = 0xb6; /* ZB dev. char. */
+ arr[n++] = 0xb7; /* Block limits extension */
}
arr[3] = n - 4; /* number of supported VPD pages */
} else if (0x80 == cmd[2]) { /* unit serial number */
- arr[1] = cmd[2]; /*sanity */
arr[3] = len;
memcpy(&arr[4], lu_id_str, len);
} else if (0x83 == cmd[2]) { /* device identification */
- arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
target_dev_id, lu_id_num,
lu_id_str, len,
&devip->lu_name);
} else if (0x84 == cmd[2]) { /* Software interface ident. */
- arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_84(&arr[4]);
} else if (0x85 == cmd[2]) { /* Management network addresses */
- arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_85(&arr[4]);
} else if (0x86 == cmd[2]) { /* extended inquiry */
- arr[1] = cmd[2]; /*sanity */
arr[3] = 0x3c; /* number of following entries */
if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
arr[4] = 0x4; /* SPT: GRD_CHK:1 */
@@ -1959,33 +1976,32 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
else
arr[4] = 0x0; /* no protection stuff */
- arr[5] = 0x7; /* head of q, ordered + simple q's */
+ /*
+ * GROUP_SUP=1; HEADSUP=1 (HEAD OF QUEUE); ORDSUP=1
+ * (ORDERED queuing); SIMPSUP=1 (SIMPLE queuing).
+ */
+ arr[5] = 0x17;
} else if (0x87 == cmd[2]) { /* mode page policy */
- arr[1] = cmd[2]; /*sanity */
arr[3] = 0x8; /* number of following entries */
arr[4] = 0x2; /* disconnect-reconnect mp */
arr[6] = 0x80; /* mlus, shared */
arr[8] = 0x18; /* protocol specific lu */
arr[10] = 0x82; /* mlus, per initiator port */
} else if (0x88 == cmd[2]) { /* SCSI Ports */
- arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
- arr[1] = cmd[2]; /*sanity */
n = inquiry_vpd_89(&arr[4]);
put_unaligned_be16(n, arr + 2);
} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
- arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_b0(&arr[4]);
} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
- arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_b1(devip, &arr[4]);
} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
- arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_b2(&arr[4]);
} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
- arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_b6(devip, &arr[4]);
+ } else if (cmd[2] == 0xb7) { /* block limits extension page */
+ arr[3] = inquiry_vpd_b7(&arr[4]);
} else {
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
kfree(arr);
@@ -2554,6 +2570,40 @@ static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
return sizeof(ctrl_m_pg);
}
+/* IO Advice Hints Grouping mode page */
+static int resp_grouping_m_pg(unsigned char *p, int pcontrol, int target)
+{
+ /* IO Advice Hints Grouping mode page */
+ struct grouping_m_pg {
+ u8 page_code; /* OR 0x40 when subpage_code > 0 */
+ u8 subpage_code;
+ __be16 page_length;
+ u8 reserved[12];
+ struct scsi_io_group_descriptor descr[MAXIMUM_NUMBER_OF_STREAMS];
+ };
+ static const struct grouping_m_pg gr_m_pg = {
+ .page_code = 0xa | 0x40,
+ .subpage_code = 5,
+ .page_length = cpu_to_be16(sizeof(gr_m_pg) - 4),
+ .descr = {
+ { .st_enble = 1 },
+ { .st_enble = 1 },
+ { .st_enble = 1 },
+ { .st_enble = 1 },
+ { .st_enble = 1 },
+ { .st_enble = 0 },
+ }
+ };
+
+ BUILD_BUG_ON(sizeof(struct grouping_m_pg) !=
+ 16 + MAXIMUM_NUMBER_OF_STREAMS * 16);
+ memcpy(p, &gr_m_pg, sizeof(gr_m_pg));
+ if (1 == pcontrol) {
+ /* There are no changeable values so clear from byte 4 on. */
+ memset(p + 4, 0, sizeof(gr_m_pg) - 4);
+ }
+ return sizeof(gr_m_pg);
+}
static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
{ /* Informational Exceptions control mode page for mode_sense */
@@ -2627,7 +2677,8 @@ static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
return sizeof(sas_sha_m_pg);
}
-#define SDEBUG_MAX_MSENSE_SZ 256
+/* PAGE_SIZE is more than necessary but provides room for future expansion. */
+#define SDEBUG_MAX_MSENSE_SZ PAGE_SIZE
static int resp_mode_sense(struct scsi_cmnd *scp,
struct sdebug_dev_info *devip)
@@ -2638,10 +2689,13 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
int target_dev_id;
int target = scp->device->id;
unsigned char *ap;
- unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
+ unsigned char *arr __free(kfree);
unsigned char *cmd = scp->cmnd;
- bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
+ bool dbd, llbaa, msense_6, is_disk, is_zbc;
+ arr = kzalloc(SDEBUG_MAX_MSENSE_SZ, GFP_ATOMIC);
+ if (!arr)
+ return -ENOMEM;
dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
pcontrol = (cmd[2] & 0xc0) >> 6;
pcode = cmd[2] & 0x3f;
@@ -2699,45 +2753,63 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
ap = arr + offset;
}
- if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
- /* TODO: Control Extension page */
- mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
- return check_condition_result;
- }
- bad_pcode = false;
-
+ /*
+ * N.B. If len>0 before resp_*_pg() call, then form of that call should be:
+ * len += resp_*_pg(ap + len, pcontrol, target);
+ */
switch (pcode) {
case 0x1: /* Read-Write error recovery page, direct access */
+ if (subpcode > 0x0 && subpcode < 0xff)
+ goto bad_subpcode;
len = resp_err_recov_pg(ap, pcontrol, target);
offset += len;
break;
case 0x2: /* Disconnect-Reconnect page, all devices */
+ if (subpcode > 0x0 && subpcode < 0xff)
+ goto bad_subpcode;
len = resp_disconnect_pg(ap, pcontrol, target);
offset += len;
break;
case 0x3: /* Format device page, direct access */
+ if (subpcode > 0x0 && subpcode < 0xff)
+ goto bad_subpcode;
if (is_disk) {
len = resp_format_pg(ap, pcontrol, target);
offset += len;
- } else
- bad_pcode = true;
+ } else {
+ goto bad_pcode;
+ }
break;
case 0x8: /* Caching page, direct access */
+ if (subpcode > 0x0 && subpcode < 0xff)
+ goto bad_subpcode;
if (is_disk || is_zbc) {
len = resp_caching_pg(ap, pcontrol, target);
offset += len;
- } else
- bad_pcode = true;
+ } else {
+ goto bad_pcode;
+ }
break;
case 0xa: /* Control Mode page, all devices */
- len = resp_ctrl_m_pg(ap, pcontrol, target);
+ switch (subpcode) {
+ case 0:
+ len = resp_ctrl_m_pg(ap, pcontrol, target);
+ break;
+ case 0x05:
+ len = resp_grouping_m_pg(ap, pcontrol, target);
+ break;
+ case 0xff:
+ len = resp_ctrl_m_pg(ap, pcontrol, target);
+ len += resp_grouping_m_pg(ap + len, pcontrol, target);
+ break;
+ default:
+ goto bad_subpcode;
+ }
offset += len;
break;
case 0x19: /* if spc==1 then sas phy, control+discover */
- if ((subpcode > 0x2) && (subpcode < 0xff)) {
- mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
- return check_condition_result;
- }
+ if (subpcode > 0x2 && subpcode < 0xff)
+ goto bad_subpcode;
len = 0;
if ((0x0 == subpcode) || (0xff == subpcode))
len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
@@ -2749,49 +2821,50 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
offset += len;
break;
case 0x1c: /* Informational Exceptions Mode page, all devices */
+ if (subpcode > 0x0 && subpcode < 0xff)
+ goto bad_subpcode;
len = resp_iec_m_pg(ap, pcontrol, target);
offset += len;
break;
case 0x3f: /* Read all Mode pages */
- if ((0 == subpcode) || (0xff == subpcode)) {
- len = resp_err_recov_pg(ap, pcontrol, target);
- len += resp_disconnect_pg(ap + len, pcontrol, target);
- if (is_disk) {
- len += resp_format_pg(ap + len, pcontrol,
- target);
- len += resp_caching_pg(ap + len, pcontrol,
- target);
- } else if (is_zbc) {
- len += resp_caching_pg(ap + len, pcontrol,
- target);
- }
- len += resp_ctrl_m_pg(ap + len, pcontrol, target);
- len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
- if (0xff == subpcode) {
- len += resp_sas_pcd_m_spg(ap + len, pcontrol,
- target, target_dev_id);
- len += resp_sas_sha_m_spg(ap + len, pcontrol);
- }
- len += resp_iec_m_pg(ap + len, pcontrol, target);
- offset += len;
- } else {
- mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
- return check_condition_result;
+ if (subpcode > 0x0 && subpcode < 0xff)
+ goto bad_subpcode;
+ len = resp_err_recov_pg(ap, pcontrol, target);
+ len += resp_disconnect_pg(ap + len, pcontrol, target);
+ if (is_disk) {
+ len += resp_format_pg(ap + len, pcontrol, target);
+ len += resp_caching_pg(ap + len, pcontrol, target);
+ } else if (is_zbc) {
+ len += resp_caching_pg(ap + len, pcontrol, target);
}
+ len += resp_ctrl_m_pg(ap + len, pcontrol, target);
+ if (0xff == subpcode)
+ len += resp_grouping_m_pg(ap + len, pcontrol, target);
+ len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
+ if (0xff == subpcode) {
+ len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
+ target_dev_id);
+ len += resp_sas_sha_m_spg(ap + len, pcontrol);
+ }
+ len += resp_iec_m_pg(ap + len, pcontrol, target);
+ offset += len;
break;
default:
- bad_pcode = true;
- break;
- }
- if (bad_pcode) {
- mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
- return check_condition_result;
+ goto bad_pcode;
}
if (msense_6)
arr[0] = offset - 1;
else
put_unaligned_be16((offset - 2), arr + 0);
return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
+
+bad_pcode:
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
+ return check_condition_result;
+
+bad_subpcode:
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
+ return check_condition_result;
}
#define SDEBUG_MAX_MSELECT_SZ 512
@@ -3306,7 +3379,8 @@ static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
/* Returns number of bytes copied or -1 if error. */
static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
- u32 sg_skip, u64 lba, u32 num, bool do_write)
+ u32 sg_skip, u64 lba, u32 num, bool do_write,
+ u8 group_number)
{
int ret;
u64 block, rest = 0;
@@ -3325,6 +3399,10 @@ static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
return 0;
if (scp->sc_data_direction != dir)
return -1;
+
+ if (do_write && group_number < ARRAY_SIZE(writes_by_group_number))
+ atomic_long_inc(&writes_by_group_number[group_number]);
+
fsp = sip->storep;
block = do_div(lba, sdebug_store_sectors);
@@ -3698,7 +3776,7 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
}
}
- ret = do_device_access(sip, scp, 0, lba, num, false);
+ ret = do_device_access(sip, scp, 0, lba, num, false, 0);
sdeb_read_unlock(sip);
if (unlikely(ret == -1))
return DID_ERROR << 16;
@@ -3883,6 +3961,7 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{
bool check_prot;
u32 num;
+ u8 group = 0;
u32 ei_lba;
int ret;
u64 lba;
@@ -3894,11 +3973,13 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
ei_lba = 0;
lba = get_unaligned_be64(cmd + 2);
num = get_unaligned_be32(cmd + 10);
+ group = cmd[14] & 0x3f;
check_prot = true;
break;
case WRITE_10:
ei_lba = 0;
lba = get_unaligned_be32(cmd + 2);
+ group = cmd[6] & 0x3f;
num = get_unaligned_be16(cmd + 7);
check_prot = true;
break;
@@ -3913,15 +3994,18 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
ei_lba = 0;
lba = get_unaligned_be32(cmd + 2);
num = get_unaligned_be32(cmd + 6);
+ group = cmd[6] & 0x3f;
check_prot = true;
break;
case 0x53: /* XDWRITEREAD(10) */
ei_lba = 0;
lba = get_unaligned_be32(cmd + 2);
+ group = cmd[6] & 0x1f;
num = get_unaligned_be16(cmd + 7);
check_prot = false;
break;
default: /* assume WRITE(32) */
+ group = cmd[6] & 0x3f;
lba = get_unaligned_be64(cmd + 12);
ei_lba = get_unaligned_be32(cmd + 20);
num = get_unaligned_be32(cmd + 28);
@@ -3976,7 +4060,7 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
}
}
- ret = do_device_access(sip, scp, 0, lba, num, true);
+ ret = do_device_access(sip, scp, 0, lba, num, true, group);
if (unlikely(scsi_debug_lbp()))
map_region(sip, lba, num);
/* If ZBC zone then bump its write pointer */
@@ -4028,12 +4112,14 @@ static int resp_write_scat(struct scsi_cmnd *scp,
u32 lb_size = sdebug_sector_size;
u32 ei_lba;
u64 lba;
+ u8 group;
int ret, res;
bool is_16;
static const u32 lrd_size = 32; /* + parameter list header size */
if (cmd[0] == VARIABLE_LENGTH_CMD) {
is_16 = false;
+ group = cmd[6] & 0x3f;
wrprotect = (cmd[10] >> 5) & 0x7;
lbdof = get_unaligned_be16(cmd + 12);
num_lrd = get_unaligned_be16(cmd + 16);
@@ -4044,6 +4130,7 @@ static int resp_write_scat(struct scsi_cmnd *scp,
lbdof = get_unaligned_be16(cmd + 4);
num_lrd = get_unaligned_be16(cmd + 8);
bt_len = get_unaligned_be32(cmd + 10);
+ group = cmd[14] & 0x3f;
if (unlikely(have_dif_prot)) {
if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
wrprotect) {
@@ -4132,7 +4219,7 @@ static int resp_write_scat(struct scsi_cmnd *scp,
}
}
- ret = do_device_access(sip, scp, sg_off, lba, num, true);
+ ret = do_device_access(sip, scp, sg_off, lba, num, true, group);
/* If ZBC zone then bump its write pointer */
if (sdebug_dev_is_zoned(devip))
zbc_inc_wp(devip, lba, num);
@@ -4507,6 +4594,51 @@ static int resp_get_lba_status(struct scsi_cmnd *scp,
return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
}
+static int resp_get_stream_status(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ u16 starting_stream_id, stream_id;
+ const u8 *cmd = scp->cmnd;
+ u32 alloc_len, offset;
+ u8 arr[256] = {};
+ struct scsi_stream_status_header *h = (void *)arr;
+
+ starting_stream_id = get_unaligned_be16(cmd + 4);
+ alloc_len = get_unaligned_be32(cmd + 10);
+
+ if (alloc_len < 8) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
+ return check_condition_result;
+ }
+
+ if (starting_stream_id >= MAXIMUM_NUMBER_OF_STREAMS) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
+ return check_condition_result;
+ }
+
+ /*
+ * The GET STREAM STATUS command only reports status information
+ * about open streams. Treat the non-permanent stream as open.
+ */
+ put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS,
+ &h->number_of_open_streams);
+
+ for (offset = 8, stream_id = starting_stream_id;
+ offset + 8 <= min_t(u32, alloc_len, sizeof(arr)) &&
+ stream_id < MAXIMUM_NUMBER_OF_STREAMS;
+ offset += 8, stream_id++) {
+ struct scsi_stream_status *stream_status = (void *)arr + offset;
+
+ stream_status->perm = stream_id < PERMANENT_STREAM_COUNT;
+ put_unaligned_be16(stream_id,
+ &stream_status->stream_identifier);
+ stream_status->rel_lifetime = stream_id + 1;
+ }
+ put_unaligned_be32(offset - 8, &h->len); /* PARAMETER DATA LENGTH */
+
+ return fill_from_dev_buffer(scp, arr, min(offset, alloc_len));
+}
+
static int resp_sync_cache(struct scsi_cmnd *scp,
struct sdebug_dev_info *devip)
{
@@ -7182,6 +7314,30 @@ static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
}
static DRIVER_ATTR_RO(tur_ms_to_ready);
+static ssize_t group_number_stats_show(struct device_driver *ddp, char *buf)
+{
+ char *p = buf, *end = buf + PAGE_SIZE;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(writes_by_group_number); i++)
+ p += scnprintf(p, end - p, "%d %ld\n", i,
+ atomic_long_read(&writes_by_group_number[i]));
+
+ return p - buf;
+}
+
+static ssize_t group_number_stats_store(struct device_driver *ddp,
+ const char *buf, size_t count)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(writes_by_group_number); i++)
+ atomic_long_set(&writes_by_group_number[i], 0);
+
+ return count;
+}
+static DRIVER_ATTR_RW(group_number_stats);
+
/* Note: The following array creates attribute files in the
/sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
files (over those found in the /sys/module/scsi_debug/parameters
@@ -7228,6 +7384,7 @@ static struct attribute *sdebug_drv_attrs[] = {
&driver_attr_cdb_len.attr,
&driver_attr_tur_ms_to_ready.attr,
&driver_attr_zbc.attr,
+ &driver_attr_group_number_stats.attr,
NULL,
};
ATTRIBUTE_GROUPS(sdebug_drv);
@@ -8405,7 +8562,7 @@ static void sdebug_driver_remove(struct device *dev)
scsi_host_put(sdbg_host->shost);
}
-static struct bus_type pseudo_lld_bus = {
+static const struct bus_type pseudo_lld_bus = {
.name = "pseudo",
.probe = sdebug_driver_probe,
.remove = sdebug_driver_remove,
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 3fcaf10a9d..ba7237e838 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -551,9 +551,9 @@ static int scsi_dev_info_list_add_str(char *dev_list)
if (model)
strflags = strsep(&next, next_check);
if (!model || !strflags) {
- printk(KERN_ERR "%s: bad dev info string '%s' '%s'"
- " '%s'\n", __func__, vendor, model,
- strflags);
+ pr_err("%s: bad dev info string '%s' '%s' '%s'\n",
+ __func__, vendor, model ? model : "",
+ strflags ? strflags : "");
res = -EINVAL;
} else
res = scsi_dev_info_list_add(0 /* compatible */, vendor,
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 189dfeb378..5b3230ef51 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -184,6 +184,92 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
__scsi_queue_insert(cmd, reason, true);
}
+void scsi_failures_reset_retries(struct scsi_failures *failures)
+{
+ struct scsi_failure *failure;
+
+ failures->total_retries = 0;
+
+ for (failure = failures->failure_definitions; failure->result;
+ failure++)
+ failure->retries = 0;
+}
+EXPORT_SYMBOL_GPL(scsi_failures_reset_retries);
+
+/**
+ * scsi_check_passthrough - Determine if passthrough scsi_cmnd needs a retry.
+ * @scmd: scsi_cmnd to check.
+ * @failures: scsi_failures struct that lists failures to check for.
+ *
+ * Returns -EAGAIN if the caller should retry else 0.
+ */
+static int scsi_check_passthrough(struct scsi_cmnd *scmd,
+ struct scsi_failures *failures)
+{
+ struct scsi_failure *failure;
+ struct scsi_sense_hdr sshdr;
+ enum sam_status status;
+
+ if (!failures)
+ return 0;
+
+ for (failure = failures->failure_definitions; failure->result;
+ failure++) {
+ if (failure->result == SCMD_FAILURE_RESULT_ANY)
+ goto maybe_retry;
+
+ if (host_byte(scmd->result) &&
+ host_byte(scmd->result) == host_byte(failure->result))
+ goto maybe_retry;
+
+ status = status_byte(scmd->result);
+ if (!status)
+ continue;
+
+ if (failure->result == SCMD_FAILURE_STAT_ANY &&
+ !scsi_status_is_good(scmd->result))
+ goto maybe_retry;
+
+ if (status != status_byte(failure->result))
+ continue;
+
+ if (status_byte(failure->result) != SAM_STAT_CHECK_CONDITION ||
+ failure->sense == SCMD_FAILURE_SENSE_ANY)
+ goto maybe_retry;
+
+ if (!scsi_command_normalize_sense(scmd, &sshdr))
+ return 0;
+
+ if (failure->sense != sshdr.sense_key)
+ continue;
+
+ if (failure->asc == SCMD_FAILURE_ASC_ANY)
+ goto maybe_retry;
+
+ if (failure->asc != sshdr.asc)
+ continue;
+
+ if (failure->ascq == SCMD_FAILURE_ASCQ_ANY ||
+ failure->ascq == sshdr.ascq)
+ goto maybe_retry;
+ }
+
+ return 0;
+
+maybe_retry:
+ if (failure->allowed) {
+ if (failure->allowed == SCMD_FAILURE_NO_LIMIT ||
+ ++failure->retries <= failure->allowed)
+ return -EAGAIN;
+ } else {
+ if (failures->total_allowed == SCMD_FAILURE_NO_LIMIT ||
+ ++failures->total_retries <= failures->total_allowed)
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
/**
* scsi_execute_cmd - insert request and wait for the result
* @sdev: scsi_device
@@ -192,7 +278,7 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
* @buffer: data buffer
* @bufflen: len of buffer
* @timeout: request timeout in HZ
- * @retries: number of times to retry request
+ * @ml_retries: number of times SCSI midlayer will retry request
* @args: Optional args. See struct definition for field descriptions
*
* Returns the scsi_cmnd result field if a command was executed, or a negative
@@ -200,7 +286,7 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
*/
int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd,
blk_opf_t opf, void *buffer, unsigned int bufflen,
- int timeout, int retries,
+ int timeout, int ml_retries,
const struct scsi_exec_args *args)
{
static const struct scsi_exec_args default_args;
@@ -214,6 +300,7 @@ int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd,
args->sense_len != SCSI_SENSE_BUFFERSIZE))
return -EINVAL;
+retry:
req = scsi_alloc_request(sdev->request_queue, opf, args->req_flags);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -227,7 +314,7 @@ int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd,
scmd = blk_mq_rq_to_pdu(req);
scmd->cmd_len = COMMAND_SIZE(cmd[0]);
memcpy(scmd->cmnd, cmd, scmd->cmd_len);
- scmd->allowed = retries;
+ scmd->allowed = ml_retries;
scmd->flags |= args->scmd_flags;
req->timeout = timeout;
req->rq_flags |= RQF_QUIET;
@@ -237,6 +324,11 @@ int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd,
*/
blk_execute_rq(req, true);
+ if (scsi_check_passthrough(scmd, args->failures) == -EAGAIN) {
+ blk_mq_free_request(req);
+ goto retry;
+ }
+
/*
* Some devices (USB mass-storage in particular) may transfer
* garbage data together with a residue indicating that the data
@@ -2171,11 +2263,25 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, int subpage,
unsigned char cmd[12];
int use_10_for_ms;
int header_length;
- int result, retry_count = retries;
+ int result;
struct scsi_sense_hdr my_sshdr;
+ struct scsi_failure failure_defs[] = {
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = SCMD_FAILURE_ASC_ANY,
+ .ascq = SCMD_FAILURE_ASCQ_ANY,
+ .allowed = retries,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = failure_defs,
+ };
const struct scsi_exec_args exec_args = {
/* caller might not be interested in sense, but we need it */
.sshdr = sshdr ? : &my_sshdr,
+ .failures = &failures,
};
memset(data, 0, sizeof(*data));
@@ -2237,12 +2343,6 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, int subpage,
goto retry;
}
}
- if (scsi_status_is_check_condition(result) &&
- sshdr->sense_key == UNIT_ATTENTION &&
- retry_count) {
- retry_count--;
- goto retry;
- }
}
return -EIO;
}
@@ -3335,3 +3435,7 @@ void scsi_build_sense(struct scsi_cmnd *scmd, int desc, u8 key, u8 asc, u8 ascq)
scmd->result = SAM_STAT_CHECK_CONDITION;
}
EXPORT_SYMBOL_GPL(scsi_build_sense);
+
+#ifdef CONFIG_SCSI_LIB_KUNIT_TEST
+#include "scsi_lib_test.c"
+#endif
diff --git a/drivers/scsi/scsi_lib_test.c b/drivers/scsi/scsi_lib_test.c
new file mode 100644
index 0000000000..99834426a1
--- /dev/null
+++ b/drivers/scsi/scsi_lib_test.c
@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit tests for scsi_lib.c.
+ *
+ * Copyright (C) 2023, Oracle Corporation
+ */
+#include <kunit/test.h>
+
+#include <scsi/scsi_proto.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+
+#define SCSI_LIB_TEST_MAX_ALLOWED 3
+#define SCSI_LIB_TEST_TOTAL_MAX_ALLOWED 5
+
+static void scsi_lib_test_multiple_sense(struct kunit *test)
+{
+ struct scsi_failure multiple_sense_failure_defs[] = {
+ {
+ .sense = DATA_PROTECT,
+ .asc = 0x1,
+ .ascq = 0x1,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = 0x11,
+ .ascq = 0x0,
+ .allowed = SCSI_LIB_TEST_MAX_ALLOWED,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {
+ .sense = NOT_READY,
+ .asc = 0x11,
+ .ascq = 0x22,
+ .allowed = SCSI_LIB_TEST_MAX_ALLOWED,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {
+ .sense = ABORTED_COMMAND,
+ .asc = 0x11,
+ .ascq = SCMD_FAILURE_ASCQ_ANY,
+ .allowed = SCSI_LIB_TEST_MAX_ALLOWED,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {
+ .sense = HARDWARE_ERROR,
+ .asc = SCMD_FAILURE_ASC_ANY,
+ .allowed = SCSI_LIB_TEST_MAX_ALLOWED,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {
+ .sense = ILLEGAL_REQUEST,
+ .asc = 0x91,
+ .ascq = 0x36,
+ .allowed = SCSI_LIB_TEST_MAX_ALLOWED,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = multiple_sense_failure_defs,
+ };
+ u8 sense[SCSI_SENSE_BUFFERSIZE] = {};
+ struct scsi_cmnd sc = {
+ .sense_buffer = sense,
+ };
+ int i;
+
+ /* Match end of array */
+ scsi_build_sense(&sc, 0, ILLEGAL_REQUEST, 0x91, 0x36);
+ KUNIT_EXPECT_EQ(test, -EAGAIN, scsi_check_passthrough(&sc, &failures));
+ /* Basic match in array */
+ scsi_build_sense(&sc, 0, UNIT_ATTENTION, 0x11, 0x0);
+ KUNIT_EXPECT_EQ(test, -EAGAIN, scsi_check_passthrough(&sc, &failures));
+ /* No matching sense entry */
+ scsi_build_sense(&sc, 0, MISCOMPARE, 0x11, 0x11);
+ KUNIT_EXPECT_EQ(test, 0, scsi_check_passthrough(&sc, &failures));
+ /* Match using SCMD_FAILURE_ASCQ_ANY */
+ scsi_build_sense(&sc, 0, ABORTED_COMMAND, 0x11, 0x22);
+ KUNIT_EXPECT_EQ(test, -EAGAIN, scsi_check_passthrough(&sc, &failures));
+ /* Fail to match */
+ scsi_build_sense(&sc, 0, ABORTED_COMMAND, 0x22, 0x22);
+ KUNIT_EXPECT_EQ(test, 0, scsi_check_passthrough(&sc, &failures));
+ /* Match using SCMD_FAILURE_ASC_ANY */
+ scsi_build_sense(&sc, 0, HARDWARE_ERROR, 0x11, 0x22);
+ KUNIT_EXPECT_EQ(test, -EAGAIN, scsi_check_passthrough(&sc, &failures));
+ /* No matching status entry */
+ sc.result = SAM_STAT_RESERVATION_CONFLICT;
+ KUNIT_EXPECT_EQ(test, 0, scsi_check_passthrough(&sc, &failures));
+
+ /* Test hitting allowed limit */
+ scsi_build_sense(&sc, 0, NOT_READY, 0x11, 0x22);
+ for (i = 0; i < SCSI_LIB_TEST_MAX_ALLOWED; i++)
+ KUNIT_EXPECT_EQ(test, -EAGAIN, scsi_check_passthrough(&sc,
+ &failures));
+ KUNIT_EXPECT_EQ(test, 0, scsi_check_passthrough(&sc, &failures));
+
+ /* reset retries so we can retest */
+ failures.failure_definitions = multiple_sense_failure_defs;
+ scsi_failures_reset_retries(&failures);
+
+ /* Test no retries allowed */
+ scsi_build_sense(&sc, 0, DATA_PROTECT, 0x1, 0x1);
+ KUNIT_EXPECT_EQ(test, 0, scsi_check_passthrough(&sc, &failures));
+}
+
+static void scsi_lib_test_any_sense(struct kunit *test)
+{
+ struct scsi_failure any_sense_failure_defs[] = {
+ {
+ .result = SCMD_FAILURE_SENSE_ANY,
+ .allowed = SCSI_LIB_TEST_MAX_ALLOWED,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = any_sense_failure_defs,
+ };
+ u8 sense[SCSI_SENSE_BUFFERSIZE] = {};
+ struct scsi_cmnd sc = {
+ .sense_buffer = sense,
+ };
+
+ /* Match using SCMD_FAILURE_SENSE_ANY */
+ failures.failure_definitions = any_sense_failure_defs;
+ scsi_build_sense(&sc, 0, MEDIUM_ERROR, 0x11, 0x22);
+ KUNIT_EXPECT_EQ(test, -EAGAIN, scsi_check_passthrough(&sc, &failures));
+}
+
+static void scsi_lib_test_host(struct kunit *test)
+{
+ struct scsi_failure retryable_host_failure_defs[] = {
+ {
+ .result = DID_TRANSPORT_DISRUPTED << 16,
+ .allowed = SCSI_LIB_TEST_MAX_ALLOWED,
+ },
+ {
+ .result = DID_TIME_OUT << 16,
+ .allowed = SCSI_LIB_TEST_MAX_ALLOWED,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = retryable_host_failure_defs,
+ };
+ u8 sense[SCSI_SENSE_BUFFERSIZE] = {};
+ struct scsi_cmnd sc = {
+ .sense_buffer = sense,
+ };
+
+ /* No matching host byte entry */
+ failures.failure_definitions = retryable_host_failure_defs;
+ sc.result = DID_NO_CONNECT << 16;
+ KUNIT_EXPECT_EQ(test, 0, scsi_check_passthrough(&sc, &failures));
+ /* Matching host byte entry */
+ sc.result = DID_TIME_OUT << 16;
+ KUNIT_EXPECT_EQ(test, -EAGAIN, scsi_check_passthrough(&sc, &failures));
+}
+
+static void scsi_lib_test_any_failure(struct kunit *test)
+{
+ struct scsi_failure any_failure_defs[] = {
+ {
+ .result = SCMD_FAILURE_RESULT_ANY,
+ .allowed = SCSI_LIB_TEST_MAX_ALLOWED,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = any_failure_defs,
+ };
+ u8 sense[SCSI_SENSE_BUFFERSIZE] = {};
+ struct scsi_cmnd sc = {
+ .sense_buffer = sense,
+ };
+
+ /* Match SCMD_FAILURE_RESULT_ANY */
+ failures.failure_definitions = any_failure_defs;
+ sc.result = DID_TRANSPORT_FAILFAST << 16;
+ KUNIT_EXPECT_EQ(test, -EAGAIN, scsi_check_passthrough(&sc, &failures));
+}
+
+static void scsi_lib_test_any_status(struct kunit *test)
+{
+ struct scsi_failure any_status_failure_defs[] = {
+ {
+ .result = SCMD_FAILURE_STAT_ANY,
+ .allowed = SCSI_LIB_TEST_MAX_ALLOWED,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = any_status_failure_defs,
+ };
+ u8 sense[SCSI_SENSE_BUFFERSIZE] = {};
+ struct scsi_cmnd sc = {
+ .sense_buffer = sense,
+ };
+
+ /* Test any status handling */
+ failures.failure_definitions = any_status_failure_defs;
+ sc.result = SAM_STAT_RESERVATION_CONFLICT;
+ KUNIT_EXPECT_EQ(test, -EAGAIN, scsi_check_passthrough(&sc, &failures));
+}
+
+static void scsi_lib_test_total_allowed(struct kunit *test)
+{
+ struct scsi_failure total_allowed_defs[] = {
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = SCMD_FAILURE_ASC_ANY,
+ .ascq = SCMD_FAILURE_ASCQ_ANY,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ /* Fail all CCs except the UA above */
+ {
+ .sense = SCMD_FAILURE_SENSE_ANY,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ /* Retry any other errors not listed above */
+ {
+ .result = SCMD_FAILURE_RESULT_ANY,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = total_allowed_defs,
+ };
+ u8 sense[SCSI_SENSE_BUFFERSIZE] = {};
+ struct scsi_cmnd sc = {
+ .sense_buffer = sense,
+ };
+ int i;
+
+ /* Test total_allowed */
+ failures.failure_definitions = total_allowed_defs;
+ scsi_failures_reset_retries(&failures);
+ failures.total_allowed = SCSI_LIB_TEST_TOTAL_MAX_ALLOWED;
+
+ scsi_build_sense(&sc, 0, UNIT_ATTENTION, 0x28, 0x0);
+ for (i = 0; i < SCSI_LIB_TEST_TOTAL_MAX_ALLOWED; i++)
+ /* Retry since we under the total_allowed limit */
+ KUNIT_EXPECT_EQ(test, -EAGAIN, scsi_check_passthrough(&sc,
+ &failures));
+ sc.result = DID_TIME_OUT << 16;
+ /* We have now hit the total_allowed limit so no more retries */
+ KUNIT_EXPECT_EQ(test, 0, scsi_check_passthrough(&sc, &failures));
+}
+
+static void scsi_lib_test_mixed_total(struct kunit *test)
+{
+ struct scsi_failure mixed_total_defs[] = {
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = 0x28,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = 0x29,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {
+ .allowed = 1,
+ .result = DID_TIME_OUT << 16,
+ },
+ {}
+ };
+ u8 sense[SCSI_SENSE_BUFFERSIZE] = {};
+ struct scsi_failures failures = {
+ .failure_definitions = mixed_total_defs,
+ };
+ struct scsi_cmnd sc = {
+ .sense_buffer = sense,
+ };
+ int i;
+
+ /*
+ * Test total_allowed when there is a mix of per failure allowed
+ * and total_allowed limits.
+ */
+ failures.failure_definitions = mixed_total_defs;
+ scsi_failures_reset_retries(&failures);
+ failures.total_allowed = SCSI_LIB_TEST_TOTAL_MAX_ALLOWED;
+
+ scsi_build_sense(&sc, 0, UNIT_ATTENTION, 0x28, 0x0);
+ for (i = 0; i < SCSI_LIB_TEST_TOTAL_MAX_ALLOWED; i++)
+ /* Retry since we under the total_allowed limit */
+ KUNIT_EXPECT_EQ(test, -EAGAIN, scsi_check_passthrough(&sc,
+ &failures));
+ /* Do not retry since we are now over total_allowed limit */
+ KUNIT_EXPECT_EQ(test, 0, scsi_check_passthrough(&sc, &failures));
+
+ scsi_failures_reset_retries(&failures);
+ scsi_build_sense(&sc, 0, UNIT_ATTENTION, 0x28, 0x0);
+ for (i = 0; i < SCSI_LIB_TEST_TOTAL_MAX_ALLOWED; i++)
+ /* Retry since we under the total_allowed limit */
+ KUNIT_EXPECT_EQ(test, -EAGAIN, scsi_check_passthrough(&sc,
+ &failures));
+ sc.result = DID_TIME_OUT << 16;
+ /* Retry because this failure has a per failure limit */
+ KUNIT_EXPECT_EQ(test, -EAGAIN, scsi_check_passthrough(&sc, &failures));
+ scsi_build_sense(&sc, 0, UNIT_ATTENTION, 0x29, 0x0);
+ /* total_allowed is now hit so no more retries */
+ KUNIT_EXPECT_EQ(test, 0, scsi_check_passthrough(&sc, &failures));
+}
+
+static void scsi_lib_test_check_passthough(struct kunit *test)
+{
+ scsi_lib_test_multiple_sense(test);
+ scsi_lib_test_any_sense(test);
+ scsi_lib_test_host(test);
+ scsi_lib_test_any_failure(test);
+ scsi_lib_test_any_status(test);
+ scsi_lib_test_total_allowed(test);
+ scsi_lib_test_mixed_total(test);
+}
+
+static struct kunit_case scsi_lib_test_cases[] = {
+ KUNIT_CASE(scsi_lib_test_check_passthough),
+ {}
+};
+
+static struct kunit_suite scsi_lib_test_suite = {
+ .name = "scsi_lib",
+ .test_cases = scsi_lib_test_cases,
+};
+
+kunit_test_suite(scsi_lib_test_suite);
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 1fbfe1b52c..9fc397a9ce 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -156,7 +156,7 @@ extern void scsi_sysfs_device_initialize(struct scsi_device *);
extern struct scsi_transport_template blank_transport_template;
extern void __scsi_remove_device(struct scsi_device *);
-extern struct bus_type scsi_bus_type;
+extern const struct bus_type scsi_bus_type;
extern const struct attribute_group *scsi_shost_groups[];
/* scsi_netlink.c */
diff --git a/drivers/scsi/scsi_proto_test.c b/drivers/scsi/scsi_proto_test.c
new file mode 100644
index 0000000000..7fa0a78a2a
--- /dev/null
+++ b/drivers/scsi/scsi_proto_test.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Google LLC
+ */
+#include <kunit/test.h>
+#include <asm-generic/unaligned.h>
+#include <scsi/scsi_proto.h>
+
+static void test_scsi_proto(struct kunit *test)
+{
+ static const union {
+ struct scsi_io_group_descriptor desc;
+ u8 arr[sizeof(struct scsi_io_group_descriptor)];
+ } d = { .arr = { 0x45, 0, 0, 0, 0xb0, 0xe4, 0xe3 } };
+ KUNIT_EXPECT_EQ(test, d.desc.io_advice_hints_mode + 0, 1);
+ KUNIT_EXPECT_EQ(test, d.desc.st_enble + 0, 1);
+ KUNIT_EXPECT_EQ(test, d.desc.cs_enble + 0, 0);
+ KUNIT_EXPECT_EQ(test, d.desc.ic_enable + 0, 1);
+ KUNIT_EXPECT_EQ(test, d.desc.acdlu + 0, 1);
+ KUNIT_EXPECT_EQ(test, d.desc.rlbsr + 0, 3);
+ KUNIT_EXPECT_EQ(test, d.desc.lbm_descriptor_type + 0, 0);
+ KUNIT_EXPECT_EQ(test, d.desc.params[0] + 0, 0xe4);
+ KUNIT_EXPECT_EQ(test, d.desc.params[1] + 0, 0xe3);
+
+ static const union {
+ struct scsi_stream_status s;
+ u8 arr[sizeof(struct scsi_stream_status)];
+ } ss = { .arr = { 0x80, 0, 0x12, 0x34, 0x3f } };
+ KUNIT_EXPECT_EQ(test, ss.s.perm + 0, 1);
+ KUNIT_EXPECT_EQ(test, get_unaligned_be16(&ss.s.stream_identifier),
+ 0x1234);
+ KUNIT_EXPECT_EQ(test, ss.s.rel_lifetime + 0, 0x3f);
+
+ static const union {
+ struct scsi_stream_status_header h;
+ u8 arr[sizeof(struct scsi_stream_status_header)];
+ } sh = { .arr = { 1, 2, 3, 4, 0, 0, 5, 6 } };
+ KUNIT_EXPECT_EQ(test, get_unaligned_be32(&sh.h.len), 0x1020304);
+ KUNIT_EXPECT_EQ(test, get_unaligned_be16(&sh.h.number_of_open_streams),
+ 0x506);
+}
+
+static struct kunit_case scsi_proto_test_cases[] = {
+ KUNIT_CASE(test_scsi_proto),
+ {}
+};
+
+static struct kunit_suite scsi_proto_test_suite = {
+ .name = "scsi_proto",
+ .test_cases = scsi_proto_test_cases,
+};
+kunit_test_suite(scsi_proto_test_suite);
+
+MODULE_DESCRIPTION("<scsi/scsi_proto.h> unit tests");
+MODULE_AUTHOR("Bart Van Assche");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index ca99be7341..ffd7e7e729 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -332,7 +332,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
sdev->sg_reserved_size = INT_MAX;
- q = blk_mq_init_queue(&sdev->host->tag_set);
+ q = blk_mq_alloc_queue(&sdev->host->tag_set, NULL, NULL);
if (IS_ERR(q)) {
/* release fn is set up in scsi_sysfs_device_initialise, so
* have to free and put manually here */
@@ -412,7 +412,7 @@ static void scsi_target_dev_release(struct device *dev)
put_device(parent);
}
-static struct device_type scsi_target_type = {
+static const struct device_type scsi_target_type = {
.name = "scsi_target",
.release = scsi_target_dev_release,
};
@@ -626,6 +626,7 @@ void scsi_sanitize_inquiry_string(unsigned char *s, int len)
}
EXPORT_SYMBOL(scsi_sanitize_inquiry_string);
+
/**
* scsi_probe_lun - probe a single LUN using a SCSI INQUIRY
* @sdev: scsi_device to probe
@@ -647,10 +648,36 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
int first_inquiry_len, try_inquiry_len, next_inquiry_len;
int response_len = 0;
int pass, count, result, resid;
- struct scsi_sense_hdr sshdr;
+ struct scsi_failure failure_defs[] = {
+ /*
+ * not-ready to ready transition [asc/ascq=0x28/0x0] or
+ * power-on, reset [asc/ascq=0x29/0x0], continue. INQUIRY
+ * should not yield UNIT_ATTENTION but many buggy devices do
+ * so anyway.
+ */
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = 0x28,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = 0x29,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {
+ .allowed = 1,
+ .result = DID_TIME_OUT << 16,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .total_allowed = 3,
+ .failure_definitions = failure_defs,
+ };
const struct scsi_exec_args exec_args = {
- .sshdr = &sshdr,
.resid = &resid,
+ .failures = &failures,
};
*bflags = 0;
@@ -668,6 +695,8 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
pass, try_inquiry_len));
/* Each pass gets up to three chances to ignore Unit Attention */
+ scsi_failures_reset_retries(&failures);
+
for (count = 0; count < 3; ++count) {
memset(scsi_cmd, 0, 6);
scsi_cmd[0] = INQUIRY;
@@ -684,22 +713,7 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
"scsi scan: INQUIRY %s with code 0x%x\n",
result ? "failed" : "successful", result));
- if (result > 0) {
- /*
- * not-ready to ready transition [asc/ascq=0x28/0x0]
- * or power-on, reset [asc/ascq=0x29/0x0], continue.
- * INQUIRY should not yield UNIT_ATTENTION
- * but many buggy devices do so anyway.
- */
- if (scsi_status_is_check_condition(result) &&
- scsi_sense_valid(&sshdr)) {
- if ((sshdr.sense_key == UNIT_ATTENTION) &&
- ((sshdr.asc == 0x28) ||
- (sshdr.asc == 0x29)) &&
- (sshdr.ascq == 0))
- continue;
- }
- } else if (result == 0) {
+ if (result == 0) {
/*
* if nothing was transferred, we try
* again. It's a workaround for some USB
@@ -1402,14 +1416,34 @@ static int scsi_report_lun_scan(struct scsi_target *starget, blist_flags_t bflag
unsigned int length;
u64 lun;
unsigned int num_luns;
- unsigned int retries;
int result;
struct scsi_lun *lunp, *lun_data;
- struct scsi_sense_hdr sshdr;
struct scsi_device *sdev;
struct Scsi_Host *shost = dev_to_shost(&starget->dev);
+ struct scsi_failure failure_defs[] = {
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = SCMD_FAILURE_ASC_ANY,
+ .ascq = SCMD_FAILURE_ASCQ_ANY,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ /* Fail all CCs except the UA above */
+ {
+ .sense = SCMD_FAILURE_SENSE_ANY,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ /* Retry any other errors not listed above */
+ {
+ .result = SCMD_FAILURE_RESULT_ANY,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .total_allowed = 3,
+ .failure_definitions = failure_defs,
+ };
const struct scsi_exec_args exec_args = {
- .sshdr = &sshdr,
+ .failures = &failures,
};
int ret = 0;
@@ -1480,29 +1514,18 @@ retry:
* should come through as a check condition, and will not generate
* a retry.
*/
- for (retries = 0; retries < 3; retries++) {
- SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
- "scsi scan: Sending REPORT LUNS to (try %d)\n",
- retries));
-
- result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN,
- lun_data, length,
- SCSI_REPORT_LUNS_TIMEOUT, 3,
- &exec_args);
+ scsi_failures_reset_retries(&failures);
- SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
- "scsi scan: REPORT LUNS"
- " %s (try %d) result 0x%x\n",
- result ? "failed" : "successful",
- retries, result));
- if (result == 0)
- break;
- else if (scsi_sense_valid(&sshdr)) {
- if (sshdr.sense_key != UNIT_ATTENTION)
- break;
- }
- }
+ SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
+ "scsi scan: Sending REPORT LUNS\n"));
+ result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, lun_data,
+ length, SCSI_REPORT_LUNS_TIMEOUT, 3,
+ &exec_args);
+
+ SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
+ "scsi scan: REPORT LUNS %s result 0x%x\n",
+ result ? "failed" : "successful", result));
if (result) {
/*
* The device probably does not support a REPORT LUN command
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 24f6eefb68..775df00021 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -27,7 +27,7 @@
#include "scsi_priv.h"
#include "scsi_logging.h"
-static struct device_type scsi_dev_type;
+static const struct device_type scsi_dev_type;
static const struct {
enum scsi_device_state value;
@@ -449,6 +449,7 @@ static void scsi_device_dev_release(struct device *dev)
struct scsi_vpd *vpd_pg80 = NULL, *vpd_pg83 = NULL;
struct scsi_vpd *vpd_pg0 = NULL, *vpd_pg89 = NULL;
struct scsi_vpd *vpd_pgb0 = NULL, *vpd_pgb1 = NULL, *vpd_pgb2 = NULL;
+ struct scsi_vpd *vpd_pgb7 = NULL;
unsigned long flags;
might_sleep();
@@ -494,6 +495,8 @@ static void scsi_device_dev_release(struct device *dev)
lockdep_is_held(&sdev->inquiry_mutex));
vpd_pgb2 = rcu_replace_pointer(sdev->vpd_pgb2, vpd_pgb2,
lockdep_is_held(&sdev->inquiry_mutex));
+ vpd_pgb7 = rcu_replace_pointer(sdev->vpd_pgb7, vpd_pgb7,
+ lockdep_is_held(&sdev->inquiry_mutex));
mutex_unlock(&sdev->inquiry_mutex);
if (vpd_pg0)
@@ -510,6 +513,8 @@ static void scsi_device_dev_release(struct device *dev)
kfree_rcu(vpd_pgb1, rcu);
if (vpd_pgb2)
kfree_rcu(vpd_pgb2, rcu);
+ if (vpd_pgb7)
+ kfree_rcu(vpd_pgb7, rcu);
kfree(sdev->inquiry);
kfree(sdev);
@@ -549,7 +554,7 @@ static int scsi_bus_uevent(const struct device *dev, struct kobj_uevent_env *env
return 0;
}
-struct bus_type scsi_bus_type = {
+const struct bus_type scsi_bus_type = {
.name = "scsi",
.match = scsi_bus_match,
.uevent = scsi_bus_uevent,
@@ -921,6 +926,7 @@ sdev_vpd_pg_attr(pg89);
sdev_vpd_pg_attr(pgb0);
sdev_vpd_pg_attr(pgb1);
sdev_vpd_pg_attr(pgb2);
+sdev_vpd_pg_attr(pgb7);
sdev_vpd_pg_attr(pg0);
static ssize_t show_inquiry(struct file *filep, struct kobject *kobj,
@@ -1295,6 +1301,9 @@ static umode_t scsi_sdev_bin_attr_is_visible(struct kobject *kobj,
if (attr == &dev_attr_vpd_pgb2 && !sdev->vpd_pgb2)
return 0;
+ if (attr == &dev_attr_vpd_pgb7 && !sdev->vpd_pgb7)
+ return 0;
+
return S_IRUGO;
}
@@ -1347,6 +1356,7 @@ static struct bin_attribute *scsi_sdev_bin_attrs[] = {
&dev_attr_vpd_pgb0,
&dev_attr_vpd_pgb1,
&dev_attr_vpd_pgb2,
+ &dev_attr_vpd_pgb7,
&dev_attr_inquiry,
NULL
};
@@ -1626,7 +1636,7 @@ int scsi_sysfs_add_host(struct Scsi_Host *shost)
return 0;
}
-static struct device_type scsi_dev_type = {
+static const struct device_type scsi_dev_type = {
.name = "scsi_device",
.release = scsi_device_dev_release,
.groups = scsi_sdev_attr_groups,
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 3075b2ddf7..af3ac63467 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1201,7 +1201,7 @@ static const struct device_type iscsi_flashnode_conn_dev_type = {
.release = iscsi_flashnode_conn_release,
};
-static struct bus_type iscsi_flashnode_bus;
+static const struct bus_type iscsi_flashnode_bus;
int iscsi_flashnode_bus_match(struct device *dev,
struct device_driver *drv)
@@ -1212,7 +1212,7 @@ int iscsi_flashnode_bus_match(struct device *dev,
}
EXPORT_SYMBOL_GPL(iscsi_flashnode_bus_match);
-static struct bus_type iscsi_flashnode_bus = {
+static const struct bus_type iscsi_flashnode_bus = {
.name = "iscsi_flashnode",
.match = &iscsi_flashnode_bus_match,
};
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index d704c484a2..7fdd2b61fe 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -416,6 +416,29 @@ unsigned int sas_is_tlr_enabled(struct scsi_device *sdev)
}
EXPORT_SYMBOL_GPL(sas_is_tlr_enabled);
+/**
+ * sas_ata_ncq_prio_supported - Check for ATA NCQ command priority support
+ * @sdev: SCSI device
+ *
+ * Check if an ATA device supports NCQ priority using VPD page 89h (ATA
+ * Information). Since this VPD page is implemented only for ATA devices,
+ * this function always returns false for SCSI devices.
+ */
+bool sas_ata_ncq_prio_supported(struct scsi_device *sdev)
+{
+ struct scsi_vpd *vpd;
+ bool ncq_prio_supported = false;
+
+ rcu_read_lock();
+ vpd = rcu_dereference(sdev->vpd_pg89);
+ if (vpd && vpd->len >= 214)
+ ncq_prio_supported = (vpd->data[213] >> 4) & 1;
+ rcu_read_unlock();
+
+ return ncq_prio_supported;
+}
+EXPORT_SYMBOL_GPL(sas_ata_ncq_prio_supported);
+
/*
* SAS Phy attributes
*/
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index f668c1c0a9..64852e6df3 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -108,29 +108,30 @@ static int spi_execute(struct scsi_device *sdev, const void *cmd,
enum req_op op, void *buffer, unsigned int bufflen,
struct scsi_sense_hdr *sshdr)
{
- int i, result;
- struct scsi_sense_hdr sshdr_tmp;
blk_opf_t opf = op | REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
+ struct scsi_failure failure_defs[] = {
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = SCMD_FAILURE_ASC_ANY,
+ .ascq = SCMD_FAILURE_ASCQ_ANY,
+ .allowed = DV_RETRIES,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = failure_defs,
+ };
const struct scsi_exec_args exec_args = {
+ /* bypass the SDEV_QUIESCE state with BLK_MQ_REQ_PM */
.req_flags = BLK_MQ_REQ_PM,
- .sshdr = sshdr ? : &sshdr_tmp,
+ .sshdr = sshdr,
+ .failures = &failures,
};
- sshdr = exec_args.sshdr;
-
- for(i = 0; i < DV_RETRIES; i++) {
- /*
- * The purpose of the RQF_PM flag below is to bypass the
- * SDEV_QUIESCE state.
- */
- result = scsi_execute_cmd(sdev, cmd, opf, buffer, bufflen,
- DV_TIMEOUT, 1, &exec_args);
- if (result < 0 || !scsi_sense_valid(sshdr) ||
- sshdr->sense_key != UNIT_ATTENTION)
- break;
- }
- return result;
+ return scsi_execute_cmd(sdev, cmd, opf, buffer, bufflen, DV_TIMEOUT, 1,
+ &exec_args);
}
static struct {
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 35200a7a73..a4638ea925 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -47,6 +47,7 @@
#include <linux/blkpg.h>
#include <linux/blk-pm.h>
#include <linux/delay.h>
+#include <linux/rw_hint.h>
#include <linux/major.h>
#include <linux/mutex.h>
#include <linux/string_helpers.h>
@@ -62,6 +63,7 @@
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_device.h>
+#include <scsi/scsi_devinfo.h>
#include <scsi/scsi_driver.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
@@ -1080,12 +1082,38 @@ static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
return BLK_STS_OK;
}
+/**
+ * sd_group_number() - Compute the GROUP NUMBER field
+ * @cmd: SCSI command for which to compute the value of the six-bit GROUP NUMBER
+ * field.
+ *
+ * From SBC-5 r05 (https://www.t10.org/cgi-bin/ac.pl?t=f&f=sbc5r05.pdf):
+ * 0: no relative lifetime.
+ * 1: shortest relative lifetime.
+ * 2: second shortest relative lifetime.
+ * 3 - 0x3d: intermediate relative lifetimes.
+ * 0x3e: second longest relative lifetime.
+ * 0x3f: longest relative lifetime.
+ */
+static u8 sd_group_number(struct scsi_cmnd *cmd)
+{
+ const struct request *rq = scsi_cmd_to_rq(cmd);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
+
+ if (!sdkp->rscs)
+ return 0;
+
+ return min3((u32)rq->write_hint, (u32)sdkp->permanent_stream_count,
+ 0x3fu);
+}
+
static blk_status_t sd_setup_rw32_cmnd(struct scsi_cmnd *cmd, bool write,
sector_t lba, unsigned int nr_blocks,
unsigned char flags, unsigned int dld)
{
cmd->cmd_len = SD_EXT_CDB_SIZE;
cmd->cmnd[0] = VARIABLE_LENGTH_CMD;
+ cmd->cmnd[6] = sd_group_number(cmd);
cmd->cmnd[7] = 0x18; /* Additional CDB len */
cmd->cmnd[9] = write ? WRITE_32 : READ_32;
cmd->cmnd[10] = flags;
@@ -1104,7 +1132,7 @@ static blk_status_t sd_setup_rw16_cmnd(struct scsi_cmnd *cmd, bool write,
cmd->cmd_len = 16;
cmd->cmnd[0] = write ? WRITE_16 : READ_16;
cmd->cmnd[1] = flags | ((dld >> 2) & 0x01);
- cmd->cmnd[14] = (dld & 0x03) << 6;
+ cmd->cmnd[14] = ((dld & 0x03) << 6) | sd_group_number(cmd);
cmd->cmnd[15] = 0;
put_unaligned_be64(lba, &cmd->cmnd[2]);
put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
@@ -1119,7 +1147,7 @@ static blk_status_t sd_setup_rw10_cmnd(struct scsi_cmnd *cmd, bool write,
cmd->cmd_len = 10;
cmd->cmnd[0] = write ? WRITE_10 : READ_10;
cmd->cmnd[1] = flags;
- cmd->cmnd[6] = 0;
+ cmd->cmnd[6] = sd_group_number(cmd);
cmd->cmnd[9] = 0;
put_unaligned_be32(lba, &cmd->cmnd[2]);
put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
@@ -1256,7 +1284,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
ret = sd_setup_rw16_cmnd(cmd, write, lba, nr_blocks,
protect | fua, dld);
} else if ((nr_blocks > 0xff) || (lba > 0x1fffff) ||
- sdp->use_10_for_rw || protect) {
+ sdp->use_10_for_rw || protect || rq->write_hint) {
ret = sd_setup_rw10_cmnd(cmd, write, lba, nr_blocks,
protect | fua);
} else {
@@ -1645,36 +1673,35 @@ out:
static int sd_sync_cache(struct scsi_disk *sdkp)
{
- int retries, res;
+ int res;
struct scsi_device *sdp = sdkp->device;
const int timeout = sdp->request_queue->rq_timeout
* SD_FLUSH_TIMEOUT_MULTIPLIER;
+ /* Leave the rest of the command zero to indicate flush everything. */
+ const unsigned char cmd[16] = { sdp->use_16_for_sync ?
+ SYNCHRONIZE_CACHE_16 : SYNCHRONIZE_CACHE };
struct scsi_sense_hdr sshdr;
+ struct scsi_failure failure_defs[] = {
+ {
+ .allowed = 3,
+ .result = SCMD_FAILURE_RESULT_ANY,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = failure_defs,
+ };
const struct scsi_exec_args exec_args = {
.req_flags = BLK_MQ_REQ_PM,
.sshdr = &sshdr,
+ .failures = &failures,
};
if (!scsi_device_online(sdp))
return -ENODEV;
- for (retries = 3; retries > 0; --retries) {
- unsigned char cmd[16] = { 0 };
-
- if (sdp->use_16_for_sync)
- cmd[0] = SYNCHRONIZE_CACHE_16;
- else
- cmd[0] = SYNCHRONIZE_CACHE;
- /*
- * Leave the rest of the command zero to indicate
- * flush everything.
- */
- res = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, NULL, 0,
- timeout, sdkp->max_retries, &exec_args);
- if (res == 0)
- break;
- }
-
+ res = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, NULL, 0, timeout,
+ sdkp->max_retries, &exec_args);
if (res) {
sd_print_result(sdkp, "Synchronize Cache(10) failed", res);
@@ -1801,8 +1828,22 @@ static int sd_pr_in_command(struct block_device *bdev, u8 sa,
struct scsi_device *sdev = sdkp->device;
struct scsi_sense_hdr sshdr;
u8 cmd[10] = { PERSISTENT_RESERVE_IN, sa };
+ struct scsi_failure failure_defs[] = {
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = SCMD_FAILURE_ASC_ANY,
+ .ascq = SCMD_FAILURE_ASCQ_ANY,
+ .allowed = 5,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = failure_defs,
+ };
const struct scsi_exec_args exec_args = {
.sshdr = &sshdr,
+ .failures = &failures,
};
int result;
@@ -1889,8 +1930,22 @@ static int sd_pr_out_command(struct block_device *bdev, u8 sa, u64 key,
struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
struct scsi_device *sdev = sdkp->device;
struct scsi_sense_hdr sshdr;
+ struct scsi_failure failure_defs[] = {
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = SCMD_FAILURE_ASC_ANY,
+ .ascq = SCMD_FAILURE_ASCQ_ANY,
+ .allowed = 5,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = failure_defs,
+ };
const struct scsi_exec_args exec_args = {
.sshdr = &sshdr,
+ .failures = &failures,
};
int result;
u8 cmd[16] = { 0, };
@@ -2235,55 +2290,68 @@ static int sd_done(struct scsi_cmnd *SCpnt)
static void
sd_spinup_disk(struct scsi_disk *sdkp)
{
- unsigned char cmd[10];
+ static const u8 cmd[10] = { TEST_UNIT_READY };
unsigned long spintime_expire = 0;
- int retries, spintime;
+ int spintime, sense_valid = 0;
unsigned int the_result;
struct scsi_sense_hdr sshdr;
+ struct scsi_failure failure_defs[] = {
+ /* Do not retry Medium Not Present */
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = 0x3A,
+ .ascq = SCMD_FAILURE_ASCQ_ANY,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {
+ .sense = NOT_READY,
+ .asc = 0x3A,
+ .ascq = SCMD_FAILURE_ASCQ_ANY,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ /* Retry when scsi_status_is_good would return false 3 times */
+ {
+ .result = SCMD_FAILURE_STAT_ANY,
+ .allowed = 3,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = failure_defs,
+ };
const struct scsi_exec_args exec_args = {
.sshdr = &sshdr,
+ .failures = &failures,
};
- int sense_valid = 0;
spintime = 0;
/* Spin up drives, as required. Only do this at boot time */
/* Spinup needs to be done for module loads too. */
do {
- retries = 0;
+ bool media_was_present = sdkp->media_present;
- do {
- bool media_was_present = sdkp->media_present;
+ scsi_failures_reset_retries(&failures);
- cmd[0] = TEST_UNIT_READY;
- memset((void *) &cmd[1], 0, 9);
+ the_result = scsi_execute_cmd(sdkp->device, cmd, REQ_OP_DRV_IN,
+ NULL, 0, SD_TIMEOUT,
+ sdkp->max_retries, &exec_args);
- the_result = scsi_execute_cmd(sdkp->device, cmd,
- REQ_OP_DRV_IN, NULL, 0,
- SD_TIMEOUT,
- sdkp->max_retries,
- &exec_args);
- if (the_result > 0) {
- /*
- * If the drive has indicated to us that it
- * doesn't have any media in it, don't bother
- * with any more polling.
- */
- if (media_not_present(sdkp, &sshdr)) {
- if (media_was_present)
- sd_printk(KERN_NOTICE, sdkp,
- "Media removed, stopped polling\n");
- return;
- }
-
- sense_valid = scsi_sense_valid(&sshdr);
+ if (the_result > 0) {
+ /*
+ * If the drive has indicated to us that it doesn't
+ * have any media in it, don't bother with any more
+ * polling.
+ */
+ if (media_not_present(sdkp, &sshdr)) {
+ if (media_was_present)
+ sd_printk(KERN_NOTICE, sdkp,
+ "Media removed, stopped polling\n");
+ return;
}
- retries++;
- } while (retries < 3 &&
- (!scsi_status_is_good(the_result) ||
- (scsi_status_is_check_condition(the_result) &&
- sense_valid && sshdr.sense_key == UNIT_ATTENTION)));
+ sense_valid = scsi_sense_valid(&sshdr);
+ }
if (!scsi_status_is_check_condition(the_result)) {
/* no sense, TUR either succeeded or failed
@@ -2318,14 +2386,16 @@ sd_spinup_disk(struct scsi_disk *sdkp)
* Issue command to spin up drive when not ready
*/
if (!spintime) {
+ /* Return immediately and start spin cycle */
+ const u8 start_cmd[10] = {
+ [0] = START_STOP,
+ [1] = 1,
+ [4] = sdkp->device->start_stop_pwr_cond ?
+ 0x11 : 1,
+ };
+
sd_printk(KERN_NOTICE, sdkp, "Spinning up disk...");
- cmd[0] = START_STOP;
- cmd[1] = 1; /* Return immediately */
- memset((void *) &cmd[2], 0, 8);
- cmd[4] = 1; /* Start spin cycle */
- if (sdkp->device->start_stop_pwr_cond)
- cmd[4] |= 1 << 4;
- scsi_execute_cmd(sdkp->device, cmd,
+ scsi_execute_cmd(sdkp->device, start_cmd,
REQ_OP_DRV_IN, NULL, 0,
SD_TIMEOUT, sdkp->max_retries,
&exec_args);
@@ -2546,42 +2616,58 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
unsigned char *buffer)
{
- unsigned char cmd[16];
+ static const u8 cmd[10] = { READ_CAPACITY };
struct scsi_sense_hdr sshdr;
+ struct scsi_failure failure_defs[] = {
+ /* Do not retry Medium Not Present */
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = 0x3A,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {
+ .sense = NOT_READY,
+ .asc = 0x3A,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ /* Device reset might occur several times so retry a lot */
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = 0x29,
+ .allowed = READ_CAPACITY_RETRIES_ON_RESET,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ /* Any other error not listed above retry 3 times */
+ {
+ .result = SCMD_FAILURE_RESULT_ANY,
+ .allowed = 3,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = failure_defs,
+ };
const struct scsi_exec_args exec_args = {
.sshdr = &sshdr,
+ .failures = &failures,
};
int sense_valid = 0;
int the_result;
- int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
sector_t lba;
unsigned sector_size;
- do {
- cmd[0] = READ_CAPACITY;
- memset(&cmd[1], 0, 9);
- memset(buffer, 0, 8);
+ memset(buffer, 0, 8);
- the_result = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, buffer,
- 8, SD_TIMEOUT, sdkp->max_retries,
- &exec_args);
+ the_result = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, buffer,
+ 8, SD_TIMEOUT, sdkp->max_retries,
+ &exec_args);
+
+ if (the_result > 0) {
+ sense_valid = scsi_sense_valid(&sshdr);
if (media_not_present(sdkp, &sshdr))
return -ENODEV;
-
- if (the_result > 0) {
- sense_valid = scsi_sense_valid(&sshdr);
- if (sense_valid &&
- sshdr.sense_key == UNIT_ATTENTION &&
- sshdr.asc == 0x29 && sshdr.ascq == 0x00)
- /* Device reset might occur several times,
- * give it one more chance */
- if (--reset_retries > 0)
- continue;
- }
- retries--;
-
- } while (the_result && retries);
+ }
if (the_result) {
sd_print_result(sdkp, "Read Capacity(10) failed", the_result);
@@ -3001,6 +3087,75 @@ defaults:
sdkp->DPOFUA = 0;
}
+static bool sd_is_perm_stream(struct scsi_disk *sdkp, unsigned int stream_id)
+{
+ u8 cdb[16] = { SERVICE_ACTION_IN_16, SAI_GET_STREAM_STATUS };
+ struct {
+ struct scsi_stream_status_header h;
+ struct scsi_stream_status s;
+ } buf;
+ struct scsi_device *sdev = sdkp->device;
+ struct scsi_sense_hdr sshdr;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ };
+ int res;
+
+ put_unaligned_be16(stream_id, &cdb[4]);
+ put_unaligned_be32(sizeof(buf), &cdb[10]);
+
+ res = scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, &buf, sizeof(buf),
+ SD_TIMEOUT, sdkp->max_retries, &exec_args);
+ if (res < 0)
+ return false;
+ if (scsi_status_is_check_condition(res) && scsi_sense_valid(&sshdr))
+ sd_print_sense_hdr(sdkp, &sshdr);
+ if (res)
+ return false;
+ if (get_unaligned_be32(&buf.h.len) < sizeof(struct scsi_stream_status))
+ return false;
+ return buf.h.stream_status[0].perm;
+}
+
+static void sd_read_io_hints(struct scsi_disk *sdkp, unsigned char *buffer)
+{
+ struct scsi_device *sdp = sdkp->device;
+ const struct scsi_io_group_descriptor *desc, *start, *end;
+ u16 permanent_stream_count_old;
+ struct scsi_sense_hdr sshdr;
+ struct scsi_mode_data data;
+ int res;
+
+ if (sdp->sdev_bflags & BLIST_SKIP_IO_HINTS)
+ return;
+
+ res = scsi_mode_sense(sdp, /*dbd=*/0x8, /*modepage=*/0x0a,
+ /*subpage=*/0x05, buffer, SD_BUF_SIZE, SD_TIMEOUT,
+ sdkp->max_retries, &data, &sshdr);
+ if (res < 0)
+ return;
+ start = (void *)buffer + data.header_length + 16;
+ end = (void *)buffer + ALIGN_DOWN(data.header_length + data.length,
+ sizeof(*end));
+ /*
+ * From "SBC-5 Constrained Streams with Data Lifetimes": Device severs
+ * should assign the lowest numbered stream identifiers to permanent
+ * streams.
+ */
+ for (desc = start; desc < end; desc++)
+ if (!desc->st_enble || !sd_is_perm_stream(sdkp, desc - start))
+ break;
+ permanent_stream_count_old = sdkp->permanent_stream_count;
+ sdkp->permanent_stream_count = desc - start;
+ if (sdkp->rscs && sdkp->permanent_stream_count < 2)
+ sd_printk(KERN_INFO, sdkp,
+ "Unexpected: RSCS has been set and the permanent stream count is %u\n",
+ sdkp->permanent_stream_count);
+ else if (sdkp->permanent_stream_count != permanent_stream_count_old)
+ sd_printk(KERN_INFO, sdkp, "permanent stream count = %d\n",
+ sdkp->permanent_stream_count);
+}
+
/*
* The ATO bit indicates whether the DIF application tag is available
* for use by the operating system.
@@ -3108,6 +3263,18 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
rcu_read_unlock();
}
+/* Parse the Block Limits Extension VPD page (0xb7) */
+static void sd_read_block_limits_ext(struct scsi_disk *sdkp)
+{
+ struct scsi_vpd *vpd;
+
+ rcu_read_lock();
+ vpd = rcu_dereference(sdkp->device->vpd_pgb7);
+ if (vpd && vpd->len >= 2)
+ sdkp->rscs = vpd->data[5] & 1;
+ rcu_read_unlock();
+}
+
/**
* sd_read_block_characteristics - Query block dev. characteristics
* @sdkp: disk to query
@@ -3409,16 +3576,23 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
static void sd_read_block_zero(struct scsi_disk *sdkp)
{
- unsigned int buf_len = sdkp->device->sector_size;
- char *buffer, cmd[10] = { };
+ struct scsi_device *sdev = sdkp->device;
+ unsigned int buf_len = sdev->sector_size;
+ u8 *buffer, cmd[16] = { };
buffer = kmalloc(buf_len, GFP_KERNEL);
if (!buffer)
return;
- cmd[0] = READ_10;
- put_unaligned_be32(0, &cmd[2]); /* Logical block address 0 */
- put_unaligned_be16(1, &cmd[7]); /* Transfer 1 logical block */
+ if (sdev->use_16_for_rw) {
+ cmd[0] = READ_16;
+ put_unaligned_be64(0, &cmd[2]); /* Logical block address 0 */
+ put_unaligned_be32(1, &cmd[10]);/* Transfer 1 logical block */
+ } else {
+ cmd[0] = READ_10;
+ put_unaligned_be32(0, &cmd[2]); /* Logical block address 0 */
+ put_unaligned_be16(1, &cmd[7]); /* Transfer 1 logical block */
+ }
scsi_execute_cmd(sdkp->device, cmd, REQ_OP_DRV_IN, buffer, buf_len,
SD_TIMEOUT, sdkp->max_retries, NULL);
@@ -3483,6 +3657,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
if (scsi_device_supports_vpd(sdp)) {
sd_read_block_provisioning(sdkp);
sd_read_block_limits(sdkp);
+ sd_read_block_limits_ext(sdkp);
sd_read_block_characteristics(sdkp);
sd_zbc_read_zones(sdkp, buffer);
sd_read_cpr(sdkp);
@@ -3492,6 +3667,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_write_protect_flag(sdkp, buffer);
sd_read_cache_type(sdkp, buffer);
+ sd_read_io_hints(sdkp, buffer);
sd_read_app_tag_own(sdkp, buffer);
sd_read_write_same(sdkp, buffer);
sd_read_security(sdkp, buffer);
@@ -3542,8 +3718,10 @@ static int sd_revalidate_disk(struct gendisk *disk)
*/
if (sdkp->first_scan ||
q->limits.max_sectors > q->limits.max_dev_sectors ||
- q->limits.max_sectors > q->limits.max_hw_sectors)
+ q->limits.max_sectors > q->limits.max_hw_sectors) {
q->limits.max_sectors = rw_max;
+ q->limits.max_user_sectors = rw_max;
+ }
sdkp->first_scan = 0;
@@ -3752,7 +3930,7 @@ static int sd_probe(struct device *dev)
blk_pm_runtime_init(sdp->request_queue, dev);
if (sdp->rpm_autosuspend) {
pm_runtime_set_autosuspend_delay(dev,
- sdp->host->hostt->rpm_autosuspend_delay);
+ sdp->host->rpm_autosuspend_delay);
}
error = device_add_disk(dev, gd, NULL);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 409dda5350..5c4285a582 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -125,6 +125,8 @@ struct scsi_disk {
unsigned int physical_block_size;
unsigned int max_medium_access_timeouts;
unsigned int medium_access_timed_out;
+ /* number of permanent streams */
+ u16 permanent_stream_count;
u8 media_present;
u8 write_prot;
u8 protection_type;/* Data Integrity Field */
@@ -151,6 +153,7 @@ struct scsi_disk {
unsigned urswrz : 1;
unsigned security : 1;
unsigned ignore_medium_access_errors : 1;
+ unsigned rscs : 1; /* reduced stream control support */
};
#define to_scsi_disk(obj) container_of(obj, struct scsi_disk, disk_dev)
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index d7d0c35c58..0f2c87cc95 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -87,19 +87,32 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
0
};
unsigned char recv_page_code;
- unsigned int retries = SES_RETRIES;
- struct scsi_sense_hdr sshdr;
+ struct scsi_failure failure_defs[] = {
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = 0x29,
+ .ascq = SCMD_FAILURE_ASCQ_ANY,
+ .allowed = SES_RETRIES,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {
+ .sense = NOT_READY,
+ .asc = SCMD_FAILURE_ASC_ANY,
+ .ascq = SCMD_FAILURE_ASCQ_ANY,
+ .allowed = SES_RETRIES,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = failure_defs,
+ };
const struct scsi_exec_args exec_args = {
- .sshdr = &sshdr,
+ .failures = &failures,
};
- do {
- ret = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buf, bufflen,
- SES_TIMEOUT, 1, &exec_args);
- } while (ret > 0 && --retries && scsi_sense_valid(&sshdr) &&
- (sshdr.sense_key == NOT_READY ||
- (sshdr.sense_key == UNIT_ATTENTION && sshdr.asc == 0x29)));
-
+ ret = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buf, bufflen,
+ SES_TIMEOUT, 1, &exec_args);
if (unlikely(ret))
return ret;
@@ -131,19 +144,32 @@ static int ses_send_diag(struct scsi_device *sdev, int page_code,
bufflen & 0xff,
0
};
- struct scsi_sense_hdr sshdr;
- unsigned int retries = SES_RETRIES;
+ struct scsi_failure failure_defs[] = {
+ {
+ .sense = UNIT_ATTENTION,
+ .asc = 0x29,
+ .ascq = SCMD_FAILURE_ASCQ_ANY,
+ .allowed = SES_RETRIES,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {
+ .sense = NOT_READY,
+ .asc = SCMD_FAILURE_ASC_ANY,
+ .ascq = SCMD_FAILURE_ASCQ_ANY,
+ .allowed = SES_RETRIES,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = failure_defs,
+ };
const struct scsi_exec_args exec_args = {
- .sshdr = &sshdr,
+ .failures = &failures,
};
- do {
- result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_OUT, buf,
- bufflen, SES_TIMEOUT, 1, &exec_args);
- } while (result > 0 && --retries && scsi_sense_valid(&sshdr) &&
- (sshdr.sense_key == NOT_READY ||
- (sshdr.sense_key == UNIT_ATTENTION && sshdr.asc == 0x29)));
-
+ result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_OUT, buf, bufflen,
+ SES_TIMEOUT, 1, &exec_args);
if (result)
sdev_printk(KERN_ERR, sdev, "SEND DIAGNOSTIC result: %8x\n",
result);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index b2d02dacae..baf870a03e 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1427,7 +1427,9 @@ static const struct file_operations sg_fops = {
.llseek = no_llseek,
};
-static struct class *sg_sysfs_class;
+static const struct class sg_sysfs_class = {
+ .name = "scsi_generic"
+};
static int sg_sysfs_valid = 0;
@@ -1529,7 +1531,7 @@ sg_add_device(struct device *cl_dev)
if (sg_sysfs_valid) {
struct device *sg_class_member;
- sg_class_member = device_create(sg_sysfs_class, cl_dev->parent,
+ sg_class_member = device_create(&sg_sysfs_class, cl_dev->parent,
MKDEV(SCSI_GENERIC_MAJOR,
sdp->index),
sdp, "%s", sdp->name);
@@ -1619,7 +1621,7 @@ sg_remove_device(struct device *cl_dev)
read_unlock_irqrestore(&sdp->sfd_lock, iflags);
sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
- device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index));
+ device_destroy(&sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index));
cdev_del(sdp->cdev);
sdp->cdev = NULL;
@@ -1690,11 +1692,9 @@ init_sg(void)
SG_MAX_DEVS, "sg");
if (rc)
return rc;
- sg_sysfs_class = class_create("scsi_generic");
- if ( IS_ERR(sg_sysfs_class) ) {
- rc = PTR_ERR(sg_sysfs_class);
+ rc = class_register(&sg_sysfs_class);
+ if (rc)
goto err_out;
- }
sg_sysfs_valid = 1;
rc = scsi_register_interface(&sg_interface);
if (0 == rc) {
@@ -1703,7 +1703,7 @@ init_sg(void)
#endif /* CONFIG_SCSI_PROC_FS */
return 0;
}
- class_destroy(sg_sysfs_class);
+ class_unregister(&sg_sysfs_class);
register_sg_sysctls();
err_out:
unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS);
@@ -1718,7 +1718,7 @@ exit_sg(void)
remove_proc_subtree("scsi/sg", NULL);
#endif /* CONFIG_SCSI_PROC_FS */
scsi_unregister_interface(&sg_interface);
- class_destroy(sg_sysfs_class);
+ class_unregister(&sg_sysfs_class);
sg_sysfs_valid = 0;
unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
SG_MAX_DEVS);
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index d093dd187b..268b3a4089 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -717,27 +717,29 @@ fail:
static void get_sectorsize(struct scsi_cd *cd)
{
- unsigned char cmd[10];
- unsigned char buffer[8];
- int the_result, retries = 3;
+ static const u8 cmd[10] = { READ_CAPACITY };
+ unsigned char buffer[8] = { };
+ int the_result;
int sector_size;
struct request_queue *queue;
+ struct scsi_failure failure_defs[] = {
+ {
+ .result = SCMD_FAILURE_RESULT_ANY,
+ .allowed = 3,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .failure_definitions = failure_defs,
+ };
+ const struct scsi_exec_args exec_args = {
+ .failures = &failures,
+ };
- do {
- cmd[0] = READ_CAPACITY;
- memset((void *) &cmd[1], 0, 9);
- memset(buffer, 0, sizeof(buffer));
-
- /* Do the command and wait.. */
- the_result = scsi_execute_cmd(cd->device, cmd, REQ_OP_DRV_IN,
- buffer, sizeof(buffer),
- SR_TIMEOUT, MAX_RETRIES, NULL);
-
- retries--;
-
- } while (the_result && retries);
-
-
+ /* Do the command and wait.. */
+ the_result = scsi_execute_cmd(cd->device, cmd, REQ_OP_DRV_IN, buffer,
+ sizeof(buffer), SR_TIMEOUT, MAX_RETRIES,
+ &exec_args);
if (the_result) {
cd->capacity = 0x1fffff;
sector_size = 2048; /* A guess, just in case */
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 338aa8c429..5a9bcf8e07 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -87,7 +87,7 @@ static int try_rdio = 1;
static int try_wdio = 1;
static int debug_flag;
-static struct class st_sysfs_class;
+static const struct class st_sysfs_class;
static const struct attribute_group *st_dev_groups[];
static const struct attribute_group *st_drv_groups[];
@@ -4438,7 +4438,7 @@ static void scsi_tape_release(struct kref *kref)
return;
}
-static struct class st_sysfs_class = {
+static const struct class st_sysfs_class = {
.name = "scsi_tape",
.dev_groups = st_dev_groups,
};
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index 09219c362a..e20f314cf3 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -273,7 +273,7 @@ static struct platform_driver esp_sun3x_driver = {
module_platform_driver(esp_sun3x_driver);
MODULE_DESCRIPTION("Sun3x ESP SCSI driver");
-MODULE_AUTHOR("Thomas Bogendoerfer (tsbogend@alpha.franken.de)");
+MODULE_AUTHOR("Thomas Bogendoerfer <tsbogend@alpha.franken.de>");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:sun3x_esp");
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
index 64a7c2c6c5..5ce6c9d19d 100644
--- a/drivers/scsi/sun_esp.c
+++ b/drivers/scsi/sun_esp.c
@@ -608,6 +608,6 @@ static struct platform_driver esp_sbus_driver = {
module_platform_driver(esp_sbus_driver);
MODULE_DESCRIPTION("Sun ESP SCSI driver");
-MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);