summaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:39:57 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:39:57 +0000
commitdc50eab76b709d68175a358d6e23a5a3890764d3 (patch)
treec754d0390db060af0213ff994f0ac310e4cfd6e9 /drivers/scsi
parentAdding debian version 6.6.15-2. (diff)
downloadlinux-dc50eab76b709d68175a358d6e23a5a3890764d3.tar.xz
linux-dc50eab76b709d68175a358d6e23a5a3890764d3.zip
Merging upstream version 6.7.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--drivers/scsi/Kconfig17
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c38
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c127
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h4
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c6
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c14
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c94
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c79
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c21
-rw-r--r--drivers/scsi/elx/efct/efct_lio.c5
-rw-r--r--drivers/scsi/esas2r/esas2r_ioctl.c16
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c20
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c11
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h3
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c7
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c252
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c370
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h50
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c3
-rw-r--r--drivers/scsi/imm.c70
-rw-r--r--drivers/scsi/imm.h4
-rw-r--r--drivers/scsi/ipr.c12
-rw-r--r--drivers/scsi/ips.c18
-rw-r--r--drivers/scsi/isci/request.c2
-rw-r--r--drivers/scsi/libfc/fc_fcp.c18
-rw-r--r--drivers/scsi/libsas/sas_discover.c2
-rw-r--r--drivers/scsi/libsas/sas_init.c4
-rw-r--r--drivers/scsi/libsas/sas_internal.h12
-rw-r--r--drivers/scsi/lpfc/lpfc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c41
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c18
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vmid.c1
-rw-r--r--drivers/scsi/megaraid.c53
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c3
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c3
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c70
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c89
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c285
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h11
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c59
-rw-r--r--drivers/scsi/pmcraid.c69
-rw-r--r--drivers/scsi/qedf/qedf.h5
-rw-r--r--drivers/scsi/qedf/qedf_io.c75
-rw-r--r--drivers/scsi/qedf/qedf_main.c19
-rw-r--r--drivers/scsi/qla1280.c42
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c59
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c5
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c6
-rw-r--r--drivers/scsi/scsi.c24
-rw-r--r--drivers/scsi/scsi_debug.c568
-rw-r--r--drivers/scsi/scsi_error.c9
-rw-r--r--drivers/scsi/scsi_lib.c41
-rw-r--r--drivers/scsi/scsi_priv.h2
-rw-r--r--drivers/scsi/scsi_sysctl.c1
-rw-r--r--drivers/scsi/scsi_transport_spi.c4
-rw-r--r--drivers/scsi/sd.c83
-rw-r--r--drivers/scsi/sg.c1
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h1
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c88
-rw-r--r--drivers/scsi/snic/snic_scsi.c14
-rw-r--r--drivers/scsi/sr.c3
-rw-r--r--drivers/scsi/storvsc_drv.c12
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c189
73 files changed, 2002 insertions, 1284 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 695a57d894..9ce2709272 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -834,21 +834,6 @@ config SCSI_IMM
To compile this driver as a module, choose M here: the
module will be called imm.
-config SCSI_IZIP_EPP16
- bool "ppa/imm option - Use slow (but safe) EPP-16"
- depends on SCSI_IMM
- help
- EPP (Enhanced Parallel Port) is a standard for parallel ports which
- allows them to act as expansion buses that can handle up to 64
- peripheral devices.
-
- Some parallel port chipsets are slower than their motherboard, and
- so we have to control the state of the chipset's FIFO queue every
- now and then to avoid data loss. This will be done if you say Y
- here.
-
- Generally, saying Y is the safe option and slows things down a bit.
-
config SCSI_IZIP_SLOW_CTR
bool "ppa/imm option - Assume slow parport control register"
depends on SCSI_PPA || SCSI_IMM
@@ -1285,7 +1270,7 @@ source "drivers/scsi/arm/Kconfig"
config JAZZ_ESP
bool "MIPS JAZZ FAS216 SCSI support"
- depends on MACH_JAZZ && SCSI
+ depends on MACH_JAZZ && SCSI=y
select SCSI_SPI_ATTRS
help
This is the driver for the onboard SCSI host adapter of MIPS Magnum
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index f2f3405cde..4202059815 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -536,13 +536,18 @@ ahd_linux_unmap_scb(struct ahd_softc *ahd, struct scb *scb)
struct scsi_cmnd *cmd;
cmd = scb->io_ctx;
- ahd_sync_sglist(ahd, scb, BUS_DMASYNC_POSTWRITE);
- scsi_dma_unmap(cmd);
+ if (cmd) {
+ ahd_sync_sglist(ahd, scb, BUS_DMASYNC_POSTWRITE);
+ scsi_dma_unmap(cmd);
+ }
}
/******************************** Macros **************************************/
-#define BUILD_SCSIID(ahd, cmd) \
- (((scmd_id(cmd) << TID_SHIFT) & TID) | (ahd)->our_id)
+static inline unsigned int ahd_build_scsiid(struct ahd_softc *ahd,
+ struct scsi_device *sdev)
+{
+ return ((sdev_id(sdev) << TID_SHIFT) & TID) | (ahd)->our_id;
+}
/*
* Return a string describing the driver.
@@ -811,14 +816,14 @@ ahd_linux_dev_reset(struct scsi_cmnd *cmd)
tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
cmd->device->id, &tstate);
- reset_scb->io_ctx = cmd;
+ reset_scb->io_ctx = NULL;
reset_scb->platform_data->dev = dev;
reset_scb->sg_count = 0;
ahd_set_residual(reset_scb, 0);
ahd_set_sense_residual(reset_scb, 0);
reset_scb->platform_data->xfer_len = 0;
reset_scb->hscb->control = 0;
- reset_scb->hscb->scsiid = BUILD_SCSIID(ahd,cmd);
+ reset_scb->hscb->scsiid = ahd_build_scsiid(ahd, cmd->device);
reset_scb->hscb->lun = cmd->device->lun;
reset_scb->hscb->cdb_len = 0;
reset_scb->hscb->task_management = SIU_TASKMGMT_LUN_RESET;
@@ -1577,7 +1582,7 @@ ahd_linux_run_command(struct ahd_softc *ahd, struct ahd_linux_device *dev,
* Fill out basics of the HSCB.
*/
hscb->control = 0;
- hscb->scsiid = BUILD_SCSIID(ahd, cmd);
+ hscb->scsiid = ahd_build_scsiid(ahd, cmd->device);
hscb->lun = cmd->device->lun;
scb->hscb->task_management = 0;
mask = SCB_GET_TARGET_MASK(ahd, scb);
@@ -1766,9 +1771,16 @@ ahd_done(struct ahd_softc *ahd, struct scb *scb)
dev = scb->platform_data->dev;
dev->active--;
dev->openings++;
- if ((cmd->result & (CAM_DEV_QFRZN << 16)) != 0) {
- cmd->result &= ~(CAM_DEV_QFRZN << 16);
- dev->qfrozen--;
+ if (cmd) {
+ if ((cmd->result & (CAM_DEV_QFRZN << 16)) != 0) {
+ cmd->result &= ~(CAM_DEV_QFRZN << 16);
+ dev->qfrozen--;
+ }
+ } else if (scb->flags & SCB_DEVICE_RESET) {
+ if (ahd->platform_data->eh_done)
+ complete(ahd->platform_data->eh_done);
+ ahd_free_scb(ahd, scb);
+ return;
}
ahd_linux_unmap_scb(ahd, scb);
@@ -1822,7 +1834,8 @@ ahd_done(struct ahd_softc *ahd, struct scb *scb)
} else {
ahd_set_transaction_status(scb, CAM_REQ_CMP);
}
- } else if (ahd_get_transaction_status(scb) == CAM_SCSI_STATUS_ERROR) {
+ } else if (cmd &&
+ ahd_get_transaction_status(scb) == CAM_SCSI_STATUS_ERROR) {
ahd_linux_handle_scsi_status(ahd, cmd->device, scb);
}
@@ -1856,7 +1869,8 @@ ahd_done(struct ahd_softc *ahd, struct scb *scb)
}
ahd_free_scb(ahd, scb);
- ahd_linux_queue_cmd_complete(ahd, cmd);
+ if (cmd)
+ ahd_linux_queue_cmd_complete(ahd, cmd);
}
static void
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index d3b1082654..4ae0a1c4d3 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -366,7 +366,8 @@ static void ahc_linux_queue_cmd_complete(struct ahc_softc *ahc,
struct scsi_cmnd *cmd);
static void ahc_linux_freeze_simq(struct ahc_softc *ahc);
static void ahc_linux_release_simq(struct ahc_softc *ahc);
-static int ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag);
+static int ahc_linux_queue_recovery_cmd(struct scsi_device *sdev,
+ struct scsi_cmnd *cmd);
static void ahc_linux_initialize_scsi_bus(struct ahc_softc *ahc);
static u_int ahc_linux_user_tagdepth(struct ahc_softc *ahc,
struct ahc_devinfo *devinfo);
@@ -728,7 +729,7 @@ ahc_linux_abort(struct scsi_cmnd *cmd)
{
int error;
- error = ahc_linux_queue_recovery_cmd(cmd, SCB_ABORT);
+ error = ahc_linux_queue_recovery_cmd(cmd->device, cmd);
if (error != SUCCESS)
printk("aic7xxx_abort returns 0x%x\n", error);
return (error);
@@ -742,7 +743,7 @@ ahc_linux_dev_reset(struct scsi_cmnd *cmd)
{
int error;
- error = ahc_linux_queue_recovery_cmd(cmd, SCB_DEVICE_RESET);
+ error = ahc_linux_queue_recovery_cmd(cmd->device, NULL);
if (error != SUCCESS)
printk("aic7xxx_dev_reset returns 0x%x\n", error);
return (error);
@@ -798,11 +799,18 @@ struct scsi_host_template aic7xxx_driver_template = {
/**************************** Tasklet Handler *********************************/
-/******************************** Macros **************************************/
-#define BUILD_SCSIID(ahc, cmd) \
- ((((cmd)->device->id << TID_SHIFT) & TID) \
- | (((cmd)->device->channel == 0) ? (ahc)->our_id : (ahc)->our_id_b) \
- | (((cmd)->device->channel == 0) ? 0 : TWIN_CHNLB))
+
+static inline unsigned int ahc_build_scsiid(struct ahc_softc *ahc,
+ struct scsi_device *sdev)
+{
+ unsigned int scsiid = (sdev->id << TID_SHIFT) & TID;
+
+ if (sdev->channel == 0)
+ scsiid |= ahc->our_id;
+ else
+ scsiid |= ahc->our_id_b | TWIN_CHNLB;
+ return scsiid;
+}
/******************************** Bus DMA *************************************/
int
@@ -1457,7 +1465,7 @@ ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev,
* Fill out basics of the HSCB.
*/
hscb->control = 0;
- hscb->scsiid = BUILD_SCSIID(ahc, cmd);
+ hscb->scsiid = ahc_build_scsiid(ahc, cmd->device);
hscb->lun = cmd->device->lun;
mask = SCB_GET_TARGET_MASK(ahc, scb);
tinfo = ahc_fetch_transinfo(ahc, SCB_GET_CHANNEL(ahc, scb),
@@ -2029,11 +2037,12 @@ ahc_linux_release_simq(struct ahc_softc *ahc)
}
static int
-ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
+ahc_linux_queue_recovery_cmd(struct scsi_device *sdev,
+ struct scsi_cmnd *cmd)
{
struct ahc_softc *ahc;
struct ahc_linux_device *dev;
- struct scb *pending_scb;
+ struct scb *pending_scb = NULL, *scb;
u_int saved_scbptr;
u_int active_scb_index;
u_int last_phase;
@@ -2046,18 +2055,19 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
int disconnected;
unsigned long flags;
- pending_scb = NULL;
paused = FALSE;
wait = FALSE;
- ahc = *(struct ahc_softc **)cmd->device->host->hostdata;
+ ahc = *(struct ahc_softc **)sdev->host->hostdata;
- scmd_printk(KERN_INFO, cmd, "Attempting to queue a%s message\n",
- flag == SCB_ABORT ? "n ABORT" : " TARGET RESET");
+ sdev_printk(KERN_INFO, sdev, "Attempting to queue a%s message\n",
+ cmd ? "n ABORT" : " TARGET RESET");
- printk("CDB:");
- for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++)
- printk(" 0x%x", cmd->cmnd[cdb_byte]);
- printk("\n");
+ if (cmd) {
+ printk("CDB:");
+ for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++)
+ printk(" 0x%x", cmd->cmnd[cdb_byte]);
+ printk("\n");
+ }
ahc_lock(ahc, &flags);
@@ -2068,7 +2078,7 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
* at all, and the system wanted us to just abort the
* command, return success.
*/
- dev = scsi_transport_device_data(cmd->device);
+ dev = scsi_transport_device_data(sdev);
if (dev == NULL) {
/*
@@ -2076,13 +2086,12 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
* so we must not still own the command.
*/
printk("%s:%d:%d:%d: Is not an active device\n",
- ahc_name(ahc), cmd->device->channel, cmd->device->id,
- (u8)cmd->device->lun);
+ ahc_name(ahc), sdev->channel, sdev->id, (u8)sdev->lun);
retval = SUCCESS;
goto no_cmd;
}
- if ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED)) == 0
+ if (cmd && (dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED)) == 0
&& ahc_search_untagged_queues(ahc, cmd, cmd->device->id,
cmd->device->channel + 'A',
(u8)cmd->device->lun,
@@ -2097,25 +2106,28 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
/*
* See if we can find a matching cmd in the pending list.
*/
- LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) {
- if (pending_scb->io_ctx == cmd)
- break;
- }
-
- if (pending_scb == NULL && flag == SCB_DEVICE_RESET) {
-
+ if (cmd) {
+ LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) {
+ if (scb->io_ctx == cmd) {
+ pending_scb = scb;
+ break;
+ }
+ }
+ } else {
/* Any SCB for this device will do for a target reset */
- LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) {
- if (ahc_match_scb(ahc, pending_scb, scmd_id(cmd),
- scmd_channel(cmd) + 'A',
+ LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) {
+ if (ahc_match_scb(ahc, scb, sdev->id,
+ sdev->channel + 'A',
CAM_LUN_WILDCARD,
- SCB_LIST_NULL, ROLE_INITIATOR))
+ SCB_LIST_NULL, ROLE_INITIATOR)) {
+ pending_scb = scb;
break;
+ }
}
}
if (pending_scb == NULL) {
- scmd_printk(KERN_INFO, cmd, "Command not found\n");
+ sdev_printk(KERN_INFO, sdev, "Command not found\n");
goto no_cmd;
}
@@ -2146,22 +2158,22 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
ahc_dump_card_state(ahc);
disconnected = TRUE;
- if (flag == SCB_ABORT) {
- if (ahc_search_qinfifo(ahc, cmd->device->id,
- cmd->device->channel + 'A',
- cmd->device->lun,
+ if (cmd) {
+ if (ahc_search_qinfifo(ahc, sdev->id,
+ sdev->channel + 'A',
+ sdev->lun,
pending_scb->hscb->tag,
ROLE_INITIATOR, CAM_REQ_ABORTED,
SEARCH_COMPLETE) > 0) {
printk("%s:%d:%d:%d: Cmd aborted from QINFIFO\n",
- ahc_name(ahc), cmd->device->channel,
- cmd->device->id, (u8)cmd->device->lun);
+ ahc_name(ahc), sdev->channel,
+ sdev->id, (u8)sdev->lun);
retval = SUCCESS;
goto done;
}
- } else if (ahc_search_qinfifo(ahc, cmd->device->id,
- cmd->device->channel + 'A',
- cmd->device->lun,
+ } else if (ahc_search_qinfifo(ahc, sdev->id,
+ sdev->channel + 'A',
+ sdev->lun,
pending_scb->hscb->tag,
ROLE_INITIATOR, /*status*/0,
SEARCH_COUNT) > 0) {
@@ -2174,7 +2186,7 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
bus_scb = ahc_lookup_scb(ahc, ahc_inb(ahc, SCB_TAG));
if (bus_scb == pending_scb)
disconnected = FALSE;
- else if (flag != SCB_ABORT
+ else if (!cmd
&& ahc_inb(ahc, SAVED_SCSIID) == pending_scb->hscb->scsiid
&& ahc_inb(ahc, SAVED_LUN) == SCB_GET_LUN(pending_scb))
disconnected = FALSE;
@@ -2194,18 +2206,18 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
saved_scsiid = ahc_inb(ahc, SAVED_SCSIID);
if (last_phase != P_BUSFREE
&& (pending_scb->hscb->tag == active_scb_index
- || (flag == SCB_DEVICE_RESET
- && SCSIID_TARGET(ahc, saved_scsiid) == scmd_id(cmd)))) {
+ || (!cmd && SCSIID_TARGET(ahc, saved_scsiid) == sdev->id))) {
/*
* We're active on the bus, so assert ATN
* and hope that the target responds.
*/
pending_scb = ahc_lookup_scb(ahc, active_scb_index);
- pending_scb->flags |= SCB_RECOVERY_SCB|flag;
+ pending_scb->flags |= SCB_RECOVERY_SCB;
+ pending_scb->flags |= cmd ? SCB_ABORT : SCB_DEVICE_RESET;
ahc_outb(ahc, MSG_OUT, HOST_MSG);
ahc_outb(ahc, SCSISIGO, last_phase|ATNO);
- scmd_printk(KERN_INFO, cmd, "Device is active, asserting ATN\n");
+ sdev_printk(KERN_INFO, sdev, "Device is active, asserting ATN\n");
wait = TRUE;
} else if (disconnected) {
@@ -2226,7 +2238,8 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
* an unsolicited reselection occurred.
*/
pending_scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
- pending_scb->flags |= SCB_RECOVERY_SCB|flag;
+ pending_scb->flags |= SCB_RECOVERY_SCB;
+ pending_scb->flags |= cmd ? SCB_ABORT : SCB_DEVICE_RESET;
/*
* Remove any cached copy of this SCB in the
@@ -2235,9 +2248,9 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
* same element in the SCB, SCB_NEXT, for
* both the qinfifo and the disconnected list.
*/
- ahc_search_disc_list(ahc, cmd->device->id,
- cmd->device->channel + 'A',
- cmd->device->lun, pending_scb->hscb->tag,
+ ahc_search_disc_list(ahc, sdev->id,
+ sdev->channel + 'A',
+ sdev->lun, pending_scb->hscb->tag,
/*stop_on_first*/TRUE,
/*remove*/TRUE,
/*save_state*/FALSE);
@@ -2260,9 +2273,9 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
* so we are the next SCB for this target
* to run.
*/
- ahc_search_qinfifo(ahc, cmd->device->id,
- cmd->device->channel + 'A',
- cmd->device->lun, SCB_LIST_NULL,
+ ahc_search_qinfifo(ahc, sdev->id,
+ sdev->channel + 'A',
+ (u8)sdev->lun, SCB_LIST_NULL,
ROLE_INITIATOR, CAM_REQUEUE_REQ,
SEARCH_COMPLETE);
ahc_qinfifo_requeue_tail(ahc, pending_scb);
@@ -2271,7 +2284,7 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
printk("Device is disconnected, re-queuing SCB\n");
wait = TRUE;
} else {
- scmd_printk(KERN_INFO, cmd, "Unable to deliver message\n");
+ sdev_printk(KERN_INFO, sdev, "Unable to deliver message\n");
retval = FAILED;
goto done;
}
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index ed8d931986..3819d559eb 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -78,9 +78,13 @@ struct device_attribute;
#ifndef PCI_DEVICE_ID_ARECA_1203
#define PCI_DEVICE_ID_ARECA_1203 0x1203
#endif
+#ifndef PCI_DEVICE_ID_ARECA_1883
+#define PCI_DEVICE_ID_ARECA_1883 0x1883
+#endif
#ifndef PCI_DEVICE_ID_ARECA_1884
#define PCI_DEVICE_ID_ARECA_1884 0x1884
#endif
+#define PCI_DEVICE_ID_ARECA_1886_0 0x1886
#define PCI_DEVICE_ID_ARECA_1886 0x188A
#define ARCMSR_HOURS (1000 * 60 * 60 * 4)
#define ARCMSR_MINUTES (1000 * 60 * 60)
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index a66221c3b7..01fb1396e1 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -214,8 +214,12 @@ static struct pci_device_id arcmsr_device_id_table[] = {
.driver_data = ACB_ADAPTER_TYPE_A},
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880),
.driver_data = ACB_ADAPTER_TYPE_C},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1883),
+ .driver_data = ACB_ADAPTER_TYPE_C},
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1884),
.driver_data = ACB_ADAPTER_TYPE_E},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1886_0),
+ .driver_data = ACB_ADAPTER_TYPE_F},
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1886),
.driver_data = ACB_ADAPTER_TYPE_F},
{0, 0}, /* Terminating entry */
@@ -4706,9 +4710,11 @@ static const char *arcmsr_info(struct Scsi_Host *host)
case PCI_DEVICE_ID_ARECA_1680:
case PCI_DEVICE_ID_ARECA_1681:
case PCI_DEVICE_ID_ARECA_1880:
+ case PCI_DEVICE_ID_ARECA_1883:
case PCI_DEVICE_ID_ARECA_1884:
type = "SAS/SATA";
break;
+ case PCI_DEVICE_ID_ARECA_1886_0:
case PCI_DEVICE_ID_ARECA_1886:
type = "NVMe/SAS/SATA";
break;
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 046247420c..7e74f77da1 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -384,6 +384,7 @@ struct bnx2fc_rport {
};
struct bnx2fc_mp_req {
+ u64 tm_lun;
u8 tm_flags;
u32 req_len;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 7765443855..090d436bce 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1709,7 +1709,8 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
struct fcoe_cached_sge_ctx *cached_sge;
struct fcoe_ext_mul_sges_ctx *sgl;
int dev_type = tgt->dev_type;
- u64 *fcp_cmnd;
+ struct fcp_cmnd *fcp_cmnd;
+ u64 *raw_fcp_cmnd;
u64 tmp_fcp_cmnd[4];
u32 context_id;
int cnt, i;
@@ -1778,16 +1779,19 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
task->txwr_rxrd.union_ctx.tx_seq.ctx.seq_cnt = 1;
/* Fill FCP_CMND IU */
- fcp_cmnd = (u64 *)
+ fcp_cmnd = (struct fcp_cmnd *)&tmp_fcp_cmnd;
+ bnx2fc_build_fcp_cmnd(io_req, fcp_cmnd);
+ int_to_scsilun(sc_cmd->device->lun, &fcp_cmnd->fc_lun);
+ memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
+ raw_fcp_cmnd = (u64 *)
task->txwr_rxrd.union_ctx.fcp_cmd.opaque;
- bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
/* swap fcp_cmnd */
cnt = sizeof(struct fcp_cmnd) / sizeof(u64);
for (i = 0; i < cnt; i++) {
- *fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]);
- fcp_cmnd++;
+ *raw_fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]);
+ raw_fcp_cmnd++;
}
/* Rx Write Tx Read */
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index b42a9accb8..33057908f1 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -656,10 +656,9 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
return SUCCESS;
}
-static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
+static int bnx2fc_initiate_tmf(struct fc_lport *lport, struct fc_rport *rport,
+ u64 tm_lun, u8 tm_flags)
{
- struct fc_lport *lport;
- struct fc_rport *rport;
struct fc_rport_libfc_priv *rp;
struct fcoe_port *port;
struct bnx2fc_interface *interface;
@@ -668,7 +667,6 @@ static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
struct bnx2fc_mp_req *tm_req;
struct fcoe_task_ctx_entry *task;
struct fcoe_task_ctx_entry *task_page;
- struct Scsi_Host *host = sc_cmd->device->host;
struct fc_frame_header *fc_hdr;
struct fcp_cmnd *fcp_cmnd;
int task_idx, index;
@@ -677,8 +675,6 @@ static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
u32 sid, did;
unsigned long start = jiffies;
- lport = shost_priv(host);
- rport = starget_to_rport(scsi_target(sc_cmd->device));
port = lport_priv(lport);
interface = port->priv;
@@ -689,7 +685,7 @@ static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
}
rp = rport->dd_data;
- rc = fc_block_scsi_eh(sc_cmd);
+ rc = fc_block_rport(rport);
if (rc)
return rc;
@@ -718,7 +714,7 @@ retry_tmf:
goto retry_tmf;
}
/* Initialize rest of io_req fields */
- io_req->sc_cmd = sc_cmd;
+ io_req->sc_cmd = NULL;
io_req->port = port;
io_req->tgt = tgt;
@@ -736,11 +732,13 @@ retry_tmf:
/* Set TM flags */
io_req->io_req_flags = 0;
tm_req->tm_flags = tm_flags;
+ tm_req->tm_lun = tm_lun;
/* Fill FCP_CMND */
bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf);
fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf;
- memset(fcp_cmnd->fc_cdb, 0, sc_cmd->cmd_len);
+ int_to_scsilun(tm_lun, &fcp_cmnd->fc_lun);
+ memset(fcp_cmnd->fc_cdb, 0, BNX2FC_MAX_CMD_LEN);
fcp_cmnd->fc_dl = 0;
/* Fill FC header */
@@ -763,8 +761,6 @@ retry_tmf:
task = &(task_page[index]);
bnx2fc_init_mp_task(io_req, task);
- bnx2fc_priv(sc_cmd)->io_req = io_req;
-
/* Obtain free SQ entry */
spin_lock_bh(&tgt->tgt_lock);
bnx2fc_add_2_sq(tgt, xid);
@@ -1062,7 +1058,10 @@ cleanup_err:
*/
int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd)
{
- return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET);
+ struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+ struct fc_lport *lport = shost_priv(rport_to_shost(rport));
+
+ return bnx2fc_initiate_tmf(lport, rport, 0, FCP_TMF_TGT_RESET);
}
/**
@@ -1075,7 +1074,11 @@ int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd)
*/
int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
{
- return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
+ struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+ struct fc_lport *lport = shost_priv(rport_to_shost(rport));
+
+ return bnx2fc_initiate_tmf(lport, rport, sc_cmd->device->lun,
+ FCP_TMF_LUN_RESET);
}
static int bnx2fc_abts_cleanup(struct bnx2fc_cmd *io_req)
@@ -1450,10 +1453,9 @@ io_compl:
static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req)
{
- struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
struct bnx2fc_rport *tgt = io_req->tgt;
struct bnx2fc_cmd *cmd, *tmp;
- u64 tm_lun = sc_cmd->device->lun;
+ struct bnx2fc_mp_req *tm_req = &io_req->mp_req;
u64 lun;
int rc = 0;
@@ -1465,8 +1467,10 @@ static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req)
*/
list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) {
BNX2FC_TGT_DBG(tgt, "LUN RST cmpl: scan for pending IOs\n");
+ if (!cmd->sc_cmd)
+ continue;
lun = cmd->sc_cmd->device->lun;
- if (lun == tm_lun) {
+ if (lun == tm_req->tm_lun) {
/* Initiate ABTS on this cmd */
if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
&cmd->req_flags)) {
@@ -1570,31 +1574,36 @@ void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
printk(KERN_ERR PFX "tmf's fc_hdr r_ctl = 0x%x\n",
fc_hdr->fh_r_ctl);
}
- if (!bnx2fc_priv(sc_cmd)->io_req) {
- printk(KERN_ERR PFX "tm_compl: io_req is NULL\n");
- return;
- }
- switch (io_req->fcp_status) {
- case FC_GOOD:
- if (io_req->cdb_status == 0) {
- /* Good IO completion */
- sc_cmd->result = DID_OK << 16;
- } else {
- /* Transport status is good, SCSI status not good */
- sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
+ if (sc_cmd) {
+ if (!bnx2fc_priv(sc_cmd)->io_req) {
+ printk(KERN_ERR PFX "tm_compl: io_req is NULL\n");
+ return;
+ }
+ switch (io_req->fcp_status) {
+ case FC_GOOD:
+ if (io_req->cdb_status == 0) {
+ /* Good IO completion */
+ sc_cmd->result = DID_OK << 16;
+ } else {
+ /* Transport status is good, SCSI status not good */
+ sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
+ }
+ if (io_req->fcp_resid)
+ scsi_set_resid(sc_cmd, io_req->fcp_resid);
+ break;
+
+ default:
+ BNX2FC_IO_DBG(io_req, "process_tm_compl: fcp_status = %d\n",
+ io_req->fcp_status);
+ break;
}
- if (io_req->fcp_resid)
- scsi_set_resid(sc_cmd, io_req->fcp_resid);
- break;
- default:
- BNX2FC_IO_DBG(io_req, "process_tm_compl: fcp_status = %d\n",
- io_req->fcp_status);
- break;
- }
+ sc_cmd = io_req->sc_cmd;
+ io_req->sc_cmd = NULL;
- sc_cmd = io_req->sc_cmd;
- io_req->sc_cmd = NULL;
+ bnx2fc_priv(sc_cmd)->io_req = NULL;
+ scsi_done(sc_cmd);
+ }
/* check if the io_req exists in tgt's tmf_q */
if (io_req->on_tmf_queue) {
@@ -1607,9 +1616,6 @@ void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
return;
}
- bnx2fc_priv(sc_cmd)->io_req = NULL;
- scsi_done(sc_cmd);
-
kref_put(&io_req->refcount, bnx2fc_cmd_release);
if (io_req->wait_for_abts_comp) {
BNX2FC_IO_DBG(io_req, "tm_compl - wake up the waiter\n");
@@ -1738,15 +1744,9 @@ static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req)
void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req,
struct fcp_cmnd *fcp_cmnd)
{
- struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
-
memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
- int_to_scsilun(sc_cmd->device->lun, &fcp_cmnd->fc_lun);
-
fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
- memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
-
fcp_cmnd->fc_cmdref = 0;
fcp_cmnd->fc_pri_ta = 0;
fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags;
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index abde60a50c..bf75940f2b 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -1294,7 +1294,7 @@ static int cxgbi_ddp_reserve(struct cxgbi_conn *cconn,
/*
* the ddp tag will be used for the itt in the outgoing pdu,
- * the itt genrated by libiscsi is saved in the ppm and can be
+ * the itt generated by libiscsi is saved in the ppm and can be
* retrieved via the ddp tag
*/
err = cxgbi_ppm_ppods_reserve(ppm, ttinfo->nr_pages, 0, &ttinfo->idx,
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index 5f2f943d92..944ea4e0cc 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -82,7 +82,7 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
{
unsigned char cmd[6] = { TEST_UNIT_READY };
struct scsi_sense_hdr sshdr;
- int ret = SCSI_DH_OK, res;
+ int ret, res;
blk_opf_t opf = REQ_OP_DRV_IN | REQ_FAILFAST_DEV |
REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER;
const struct scsi_exec_args exec_args = {
@@ -92,19 +92,18 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
retry:
res = scsi_execute_cmd(sdev, cmd, opf, NULL, 0, HP_SW_TIMEOUT,
HP_SW_RETRIES, &exec_args);
- if (res) {
- if (scsi_sense_valid(&sshdr))
- ret = tur_done(sdev, h, &sshdr);
- else {
- sdev_printk(KERN_WARNING, sdev,
- "%s: sending tur failed with %x\n",
- HP_SW_NAME, res);
- ret = SCSI_DH_IO;
- }
- } else {
+ if (res > 0 && scsi_sense_valid(&sshdr)) {
+ ret = tur_done(sdev, h, &sshdr);
+ } else if (res == 0) {
h->path_state = HP_SW_PATH_ACTIVE;
ret = SCSI_DH_OK;
+ } else {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: sending tur failed with %x\n",
+ HP_SW_NAME, res);
+ ret = SCSI_DH_IO;
}
+
if (ret == SCSI_DH_IMM_RETRY)
goto retry;
@@ -122,7 +121,7 @@ static int hp_sw_start_stop(struct hp_sw_dh_data *h)
unsigned char cmd[6] = { START_STOP, 0, 0, 0, 1, 0 };
struct scsi_sense_hdr sshdr;
struct scsi_device *sdev = h->sdev;
- int res, rc = SCSI_DH_OK;
+ int res, rc;
int retry_cnt = HP_SW_RETRIES;
blk_opf_t opf = REQ_OP_DRV_IN | REQ_FAILFAST_DEV |
REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER;
@@ -133,35 +132,37 @@ static int hp_sw_start_stop(struct hp_sw_dh_data *h)
retry:
res = scsi_execute_cmd(sdev, cmd, opf, NULL, 0, HP_SW_TIMEOUT,
HP_SW_RETRIES, &exec_args);
- if (res) {
- if (!scsi_sense_valid(&sshdr)) {
- sdev_printk(KERN_WARNING, sdev,
- "%s: sending start_stop_unit failed, "
- "no sense available\n", HP_SW_NAME);
- return SCSI_DH_IO;
- }
- switch (sshdr.sense_key) {
- case NOT_READY:
- if (sshdr.asc == 0x04 && sshdr.ascq == 3) {
- /*
- * LUN not ready - manual intervention required
- *
- * Switch-over in progress, retry.
- */
- if (--retry_cnt)
- goto retry;
- rc = SCSI_DH_RETRY;
- break;
- }
- fallthrough;
- default:
- sdev_printk(KERN_WARNING, sdev,
- "%s: sending start_stop_unit failed, "
- "sense %x/%x/%x\n", HP_SW_NAME,
- sshdr.sense_key, sshdr.asc, sshdr.ascq);
- rc = SCSI_DH_IO;
+ if (!res) {
+ return SCSI_DH_OK;
+ } else if (res < 0 || !scsi_sense_valid(&sshdr)) {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: sending start_stop_unit failed, "
+ "no sense available\n", HP_SW_NAME);
+ return SCSI_DH_IO;
+ }
+
+ switch (sshdr.sense_key) {
+ case NOT_READY:
+ if (sshdr.asc == 0x04 && sshdr.ascq == 3) {
+ /*
+ * LUN not ready - manual intervention required
+ *
+ * Switch-over in progress, retry.
+ */
+ if (--retry_cnt)
+ goto retry;
+ rc = SCSI_DH_RETRY;
+ break;
}
+ fallthrough;
+ default:
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: sending start_stop_unit failed, "
+ "sense %x/%x/%x\n", HP_SW_NAME,
+ sshdr.sense_key, sshdr.asc, sshdr.ascq);
+ rc = SCSI_DH_IO;
}
+
return rc;
}
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index c553864505..1ac2ae17e8 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -530,7 +530,7 @@ static void send_mode_select(struct work_struct *work)
container_of(work, struct rdac_controller, ms_work);
struct scsi_device *sdev = ctlr->ms_sdev;
struct rdac_dh_data *h = sdev->handler_data;
- int err = SCSI_DH_OK, retry_cnt = RDAC_RETRY_COUNT;
+ int rc, err, retry_cnt = RDAC_RETRY_COUNT;
struct rdac_queue_data *tmp, *qdata;
LIST_HEAD(list);
unsigned char cdb[MAX_COMMAND_SIZE];
@@ -558,20 +558,23 @@ static void send_mode_select(struct work_struct *work)
(char *) h->ctlr->array_name, h->ctlr->index,
(retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying");
- if (scsi_execute_cmd(sdev, cdb, opf, &h->ctlr->mode_select, data_size,
- RDAC_TIMEOUT * HZ, RDAC_RETRIES, &exec_args)) {
+ rc = scsi_execute_cmd(sdev, cdb, opf, &h->ctlr->mode_select, data_size,
+ RDAC_TIMEOUT * HZ, RDAC_RETRIES, &exec_args);
+ if (!rc) {
+ h->state = RDAC_STATE_ACTIVE;
+ RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
+ "MODE_SELECT completed",
+ (char *) h->ctlr->array_name, h->ctlr->index);
+ err = SCSI_DH_OK;
+ } else if (rc < 0) {
+ err = SCSI_DH_IO;
+ } else {
err = mode_select_handle_sense(sdev, &sshdr);
if (err == SCSI_DH_RETRY && retry_cnt--)
goto retry;
if (err == SCSI_DH_IMM_RETRY)
goto retry;
}
- if (err == SCSI_DH_OK) {
- h->state = RDAC_STATE_ACTIVE;
- RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
- "MODE_SELECT completed",
- (char *) h->ctlr->array_name, h->ctlr->index);
- }
list_for_each_entry_safe(qdata, tmp, &list, entry) {
list_del(&qdata->entry);
diff --git a/drivers/scsi/elx/efct/efct_lio.c b/drivers/scsi/elx/efct/efct_lio.c
index a982b9cf98..6a6ec32c46 100644
--- a/drivers/scsi/elx/efct/efct_lio.c
+++ b/drivers/scsi/elx/efct/efct_lio.c
@@ -1611,6 +1611,8 @@ static const struct target_core_fabric_ops efct_lio_ops = {
.sess_get_initiator_sid = NULL,
.tfc_tpg_base_attrs = efct_lio_tpg_attrs,
.tfc_tpg_attrib_attrs = efct_lio_tpg_attrib_attrs,
+ .default_submit_type = TARGET_DIRECT_SUBMIT,
+ .direct_submit_supp = 1,
};
static const struct target_core_fabric_ops efct_lio_npiv_ops = {
@@ -1646,6 +1648,9 @@ static const struct target_core_fabric_ops efct_lio_npiv_ops = {
.sess_get_initiator_sid = NULL,
.tfc_tpg_base_attrs = efct_lio_npiv_tpg_attrs,
.tfc_tpg_attrib_attrs = efct_lio_npiv_tpg_attrib_attrs,
+
+ .default_submit_type = TARGET_DIRECT_SUBMIT,
+ .direct_submit_supp = 1,
};
int efct_scsi_tgt_driver_init(void)
diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c
index 055d2e87a2..3f7c1d131e 100644
--- a/drivers/scsi/esas2r/esas2r_ioctl.c
+++ b/drivers/scsi/esas2r/esas2r_ioctl.c
@@ -41,6 +41,8 @@
* USA.
*/
+#include <linux/bitfield.h>
+
#include "esas2r.h"
/*
@@ -792,16 +794,10 @@ static int hba_ioctl_callback(struct esas2r_adapter *a,
pcie_capability_read_dword(a->pcid, PCI_EXP_LNKCAP,
&caps);
- gai->pci.link_speed_curr =
- (u8)(stat & PCI_EXP_LNKSTA_CLS);
- gai->pci.link_speed_max =
- (u8)(caps & PCI_EXP_LNKCAP_SLS);
- gai->pci.link_width_curr =
- (u8)((stat & PCI_EXP_LNKSTA_NLW)
- >> PCI_EXP_LNKSTA_NLW_SHIFT);
- gai->pci.link_width_max =
- (u8)((caps & PCI_EXP_LNKCAP_MLW)
- >> 4);
+ gai->pci.link_speed_curr = FIELD_GET(PCI_EXP_LNKSTA_CLS, stat);
+ gai->pci.link_speed_max = FIELD_GET(PCI_EXP_LNKCAP_SLS, caps);
+ gai->pci.link_width_curr = FIELD_GET(PCI_EXP_LNKSTA_NLW, stat);
+ gai->pci.link_width_max = FIELD_GET(PCI_EXP_LNKCAP_MLW, caps);
}
gai->pci.msi_vector_cnt = 1;
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 19eee108db..5c8d1ba3f8 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -319,17 +319,16 @@ static void fcoe_ctlr_announce(struct fcoe_ctlr *fip)
{
struct fcoe_fcf *sel;
struct fcoe_fcf *fcf;
- unsigned long flags;
mutex_lock(&fip->ctlr_mutex);
- spin_lock_irqsave(&fip->ctlr_lock, flags);
+ spin_lock_bh(&fip->ctlr_lock);
kfree_skb(fip->flogi_req);
fip->flogi_req = NULL;
list_for_each_entry(fcf, &fip->fcfs, list)
fcf->flogi_sent = 0;
- spin_unlock_irqrestore(&fip->ctlr_lock, flags);
+ spin_unlock_bh(&fip->ctlr_lock);
sel = fip->sel_fcf;
if (sel && ether_addr_equal(sel->fcf_mac, fip->dest_addr))
@@ -700,7 +699,6 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
{
struct fc_frame *fp;
struct fc_frame_header *fh;
- unsigned long flags;
u16 old_xid;
u8 op;
u8 mac[ETH_ALEN];
@@ -734,11 +732,11 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
op = FIP_DT_FLOGI;
if (fip->mode == FIP_MODE_VN2VN)
break;
- spin_lock_irqsave(&fip->ctlr_lock, flags);
+ spin_lock_bh(&fip->ctlr_lock);
kfree_skb(fip->flogi_req);
fip->flogi_req = skb;
fip->flogi_req_send = 1;
- spin_unlock_irqrestore(&fip->ctlr_lock, flags);
+ spin_unlock_bh(&fip->ctlr_lock);
schedule_work(&fip->timer_work);
return -EINPROGRESS;
case ELS_FDISC:
@@ -1707,11 +1705,10 @@ static int fcoe_ctlr_flogi_send_locked(struct fcoe_ctlr *fip)
static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
{
struct fcoe_fcf *fcf;
- unsigned long flags;
int error;
mutex_lock(&fip->ctlr_mutex);
- spin_lock_irqsave(&fip->ctlr_lock, flags);
+ spin_lock_bh(&fip->ctlr_lock);
LIBFCOE_FIP_DBG(fip, "re-sending FLOGI - reselect\n");
fcf = fcoe_ctlr_select(fip);
if (!fcf || fcf->flogi_sent) {
@@ -1722,7 +1719,7 @@ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
fcoe_ctlr_solicit(fip, NULL);
error = fcoe_ctlr_flogi_send_locked(fip);
}
- spin_unlock_irqrestore(&fip->ctlr_lock, flags);
+ spin_unlock_bh(&fip->ctlr_lock);
mutex_unlock(&fip->ctlr_mutex);
return error;
}
@@ -1739,9 +1736,8 @@ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip)
{
struct fcoe_fcf *fcf;
- unsigned long flags;
- spin_lock_irqsave(&fip->ctlr_lock, flags);
+ spin_lock_bh(&fip->ctlr_lock);
fcf = fip->sel_fcf;
if (!fcf || !fip->flogi_req_send)
goto unlock;
@@ -1768,7 +1764,7 @@ static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip)
} else /* XXX */
LIBFCOE_FIP_DBG(fip, "No FCF selected - defer send\n");
unlock:
- spin_unlock_irqrestore(&fip->ctlr_lock, flags);
+ spin_unlock_bh(&fip->ctlr_lock);
}
/**
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 79ddfaaf71..55632c67a8 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -145,16 +145,17 @@ void fnic_handle_link(struct work_struct *work)
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
if (fnic->config.flags & VFCF_FIP_CAPABLE) {
/* start FCoE VLAN discovery */
- fnic_fc_trace_set_data(
- fnic->lport->host->host_no,
- FNIC_FC_LE, "Link Status: DOWN_UP_VLAN",
- strlen("Link Status: DOWN_UP_VLAN"));
+ fnic_fc_trace_set_data(fnic->lport->host->host_no,
+ FNIC_FC_LE, "Link Status: DOWN_UP_VLAN",
+ strlen("Link Status: DOWN_UP_VLAN"));
fnic_fcoe_send_vlan_req(fnic);
+
return;
}
+
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE,
- "Link Status: DOWN_UP", strlen("Link Status: DOWN_UP"));
+ "Link Status: DOWN_UP", strlen("Link Status: DOWN_UP"));
fcoe_ctlr_link_up(&fnic->ctlr);
} else {
/* UP -> DOWN */
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 9e73e9cbbc..1e4550156b 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -343,7 +343,7 @@ struct hisi_sas_hw {
u8 reg_index, u8 reg_count, u8 *write_data);
void (*wait_cmds_complete_timeout)(struct hisi_hba *hisi_hba,
int delay_ms, int timeout_ms);
- void (*debugfs_snapshot_regs)(struct hisi_hba *hisi_hba);
+ int (*debugfs_snapshot_regs)(struct hisi_hba *hisi_hba);
int complete_hdr_size;
const struct scsi_host_template *sht;
};
@@ -451,7 +451,6 @@ struct hisi_hba {
const struct hisi_sas_hw *hw; /* Low level hw interface */
unsigned long sata_dev_bitmap[BITS_TO_LONGS(HISI_SAS_MAX_DEVICES)];
struct work_struct rst_work;
- struct work_struct debugfs_work;
u32 phy_state;
u32 intr_coal_ticks; /* Time of interrupt coalesce in us */
u32 intr_coal_count; /* Interrupt count to coalesce */
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index b155ac8009..bbb7b2d9ff 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -1961,8 +1961,11 @@ static bool hisi_sas_internal_abort_timeout(struct sas_task *task,
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct hisi_sas_internal_abort_data *timeout = data;
- if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
- queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
+ if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct) {
+ down(&hisi_hba->sem);
+ hisi_hba->hw->debugfs_snapshot_regs(hisi_hba);
+ up(&hisi_hba->sem);
+ }
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
pr_err("Internal abort: timeout %016llx\n",
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 520fffc142..b56fbc61a1 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -558,8 +558,7 @@ static int experimental_iopoll_q_cnt;
module_param(experimental_iopoll_q_cnt, int, 0444);
MODULE_PARM_DESC(experimental_iopoll_q_cnt, "number of queues to be used as poll mode, def=0");
-static void debugfs_work_handler_v3_hw(struct work_struct *work);
-static void debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba);
+static int debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba);
static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
{
@@ -1606,6 +1605,11 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
}
phy->port_id = port_id;
+ spin_lock(&phy->lock);
+ /* Delete timer and set phy_attached atomically */
+ del_timer(&phy->timer);
+ phy->phy_attached = 1;
+ spin_unlock(&phy->lock);
/*
* Call pm_runtime_get_noresume() which pairs with
@@ -1619,11 +1623,6 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
res = IRQ_HANDLED;
- spin_lock(&phy->lock);
- /* Delete timer and set phy_attached atomically */
- del_timer(&phy->timer);
- phy->phy_attached = 1;
- spin_unlock(&phy->lock);
end:
if (phy->reset_completion)
complete(phy->reset_completion);
@@ -3388,7 +3387,6 @@ hisi_sas_shost_alloc_pci(struct pci_dev *pdev)
hisi_hba = shost_priv(shost);
INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
- INIT_WORK(&hisi_hba->debugfs_work, debugfs_work_handler_v3_hw);
hisi_hba->hw = &hisi_sas_v3_hw;
hisi_hba->pci_dev = pdev;
hisi_hba->dev = dev;
@@ -3860,37 +3858,6 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba)
&debugfs_ras_v3_hw_fops);
}
-static void debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba)
-{
- int debugfs_dump_index = hisi_hba->debugfs_dump_index;
- struct device *dev = hisi_hba->dev;
- u64 timestamp = local_clock();
-
- if (debugfs_dump_index >= hisi_sas_debugfs_dump_count) {
- dev_warn(dev, "dump count exceeded!\n");
- return;
- }
-
- do_div(timestamp, NSEC_PER_MSEC);
- hisi_hba->debugfs_timestamp[debugfs_dump_index] = timestamp;
-
- debugfs_snapshot_prepare_v3_hw(hisi_hba);
-
- debugfs_snapshot_global_reg_v3_hw(hisi_hba);
- debugfs_snapshot_port_reg_v3_hw(hisi_hba);
- debugfs_snapshot_axi_reg_v3_hw(hisi_hba);
- debugfs_snapshot_ras_reg_v3_hw(hisi_hba);
- debugfs_snapshot_cq_reg_v3_hw(hisi_hba);
- debugfs_snapshot_dq_reg_v3_hw(hisi_hba);
- debugfs_snapshot_itct_reg_v3_hw(hisi_hba);
- debugfs_snapshot_iost_reg_v3_hw(hisi_hba);
-
- debugfs_create_files_v3_hw(hisi_hba);
-
- debugfs_snapshot_restore_v3_hw(hisi_hba);
- hisi_hba->debugfs_dump_index++;
-}
-
static ssize_t debugfs_trigger_dump_v3_hw_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
@@ -3898,9 +3865,6 @@ static ssize_t debugfs_trigger_dump_v3_hw_write(struct file *file,
struct hisi_hba *hisi_hba = file->f_inode->i_private;
char buf[8];
- if (hisi_hba->debugfs_dump_index >= hisi_sas_debugfs_dump_count)
- return -EFAULT;
-
if (count > 8)
return -EFAULT;
@@ -3910,7 +3874,12 @@ static ssize_t debugfs_trigger_dump_v3_hw_write(struct file *file,
if (buf[0] != '1')
return -EFAULT;
- queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
+ down(&hisi_hba->sem);
+ if (debugfs_snapshot_regs_v3_hw(hisi_hba)) {
+ up(&hisi_hba->sem);
+ return -EFAULT;
+ }
+ up(&hisi_hba->sem);
return count;
}
@@ -3990,22 +3959,7 @@ static ssize_t debugfs_bist_linkrate_v3_hw_write(struct file *filp,
return count;
}
-
-static int debugfs_bist_linkrate_v3_hw_open(struct inode *inode,
- struct file *filp)
-{
- return single_open(filp, debugfs_bist_linkrate_v3_hw_show,
- inode->i_private);
-}
-
-static const struct file_operations debugfs_bist_linkrate_v3_hw_fops = {
- .open = debugfs_bist_linkrate_v3_hw_open,
- .read = seq_read,
- .write = debugfs_bist_linkrate_v3_hw_write,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_STORE_ATTRIBUTE(debugfs_bist_linkrate_v3_hw);
static const struct {
int value;
@@ -4080,22 +4034,7 @@ static ssize_t debugfs_bist_code_mode_v3_hw_write(struct file *filp,
return count;
}
-
-static int debugfs_bist_code_mode_v3_hw_open(struct inode *inode,
- struct file *filp)
-{
- return single_open(filp, debugfs_bist_code_mode_v3_hw_show,
- inode->i_private);
-}
-
-static const struct file_operations debugfs_bist_code_mode_v3_hw_fops = {
- .open = debugfs_bist_code_mode_v3_hw_open,
- .read = seq_read,
- .write = debugfs_bist_code_mode_v3_hw_write,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_STORE_ATTRIBUTE(debugfs_bist_code_mode_v3_hw);
static ssize_t debugfs_bist_phy_v3_hw_write(struct file *filp,
const char __user *buf,
@@ -4129,22 +4068,7 @@ static int debugfs_bist_phy_v3_hw_show(struct seq_file *s, void *p)
return 0;
}
-
-static int debugfs_bist_phy_v3_hw_open(struct inode *inode,
- struct file *filp)
-{
- return single_open(filp, debugfs_bist_phy_v3_hw_show,
- inode->i_private);
-}
-
-static const struct file_operations debugfs_bist_phy_v3_hw_fops = {
- .open = debugfs_bist_phy_v3_hw_open,
- .read = seq_read,
- .write = debugfs_bist_phy_v3_hw_write,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_STORE_ATTRIBUTE(debugfs_bist_phy_v3_hw);
static ssize_t debugfs_bist_cnt_v3_hw_write(struct file *filp,
const char __user *buf,
@@ -4177,22 +4101,7 @@ static int debugfs_bist_cnt_v3_hw_show(struct seq_file *s, void *p)
return 0;
}
-
-static int debugfs_bist_cnt_v3_hw_open(struct inode *inode,
- struct file *filp)
-{
- return single_open(filp, debugfs_bist_cnt_v3_hw_show,
- inode->i_private);
-}
-
-static const struct file_operations debugfs_bist_cnt_v3_hw_ops = {
- .open = debugfs_bist_cnt_v3_hw_open,
- .read = seq_read,
- .write = debugfs_bist_cnt_v3_hw_write,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_STORE_ATTRIBUTE(debugfs_bist_cnt_v3_hw);
static const struct {
int value;
@@ -4256,22 +4165,7 @@ static ssize_t debugfs_bist_mode_v3_hw_write(struct file *filp,
return count;
}
-
-static int debugfs_bist_mode_v3_hw_open(struct inode *inode,
- struct file *filp)
-{
- return single_open(filp, debugfs_bist_mode_v3_hw_show,
- inode->i_private);
-}
-
-static const struct file_operations debugfs_bist_mode_v3_hw_fops = {
- .open = debugfs_bist_mode_v3_hw_open,
- .read = seq_read,
- .write = debugfs_bist_mode_v3_hw_write,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_STORE_ATTRIBUTE(debugfs_bist_mode_v3_hw);
static ssize_t debugfs_bist_enable_v3_hw_write(struct file *filp,
const char __user *buf,
@@ -4309,22 +4203,7 @@ static int debugfs_bist_enable_v3_hw_show(struct seq_file *s, void *p)
return 0;
}
-
-static int debugfs_bist_enable_v3_hw_open(struct inode *inode,
- struct file *filp)
-{
- return single_open(filp, debugfs_bist_enable_v3_hw_show,
- inode->i_private);
-}
-
-static const struct file_operations debugfs_bist_enable_v3_hw_fops = {
- .open = debugfs_bist_enable_v3_hw_open,
- .read = seq_read,
- .write = debugfs_bist_enable_v3_hw_write,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_STORE_ATTRIBUTE(debugfs_bist_enable_v3_hw);
static const struct {
char *name;
@@ -4362,21 +4241,7 @@ static int debugfs_v3_hw_show(struct seq_file *s, void *p)
return 0;
}
-
-static int debugfs_v3_hw_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, debugfs_v3_hw_show,
- inode->i_private);
-}
-
-static const struct file_operations debugfs_v3_hw_fops = {
- .open = debugfs_v3_hw_open,
- .read = seq_read,
- .write = debugfs_v3_hw_write,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_STORE_ATTRIBUTE(debugfs_v3_hw);
static ssize_t debugfs_phy_down_cnt_v3_hw_write(struct file *filp,
const char __user *buf,
@@ -4407,22 +4272,7 @@ static int debugfs_phy_down_cnt_v3_hw_show(struct seq_file *s, void *p)
return 0;
}
-
-static int debugfs_phy_down_cnt_v3_hw_open(struct inode *inode,
- struct file *filp)
-{
- return single_open(filp, debugfs_phy_down_cnt_v3_hw_show,
- inode->i_private);
-}
-
-static const struct file_operations debugfs_phy_down_cnt_v3_hw_fops = {
- .open = debugfs_phy_down_cnt_v3_hw_open,
- .read = seq_read,
- .write = debugfs_phy_down_cnt_v3_hw_write,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_STORE_ATTRIBUTE(debugfs_phy_down_cnt_v3_hw);
enum fifo_dump_mode_v3_hw {
FIFO_DUMP_FORVER = (1U << 0),
@@ -4661,14 +4511,6 @@ static void debugfs_fifo_init_v3_hw(struct hisi_hba *hisi_hba)
}
}
-static void debugfs_work_handler_v3_hw(struct work_struct *work)
-{
- struct hisi_hba *hisi_hba =
- container_of(work, struct hisi_hba, debugfs_work);
-
- debugfs_snapshot_regs_v3_hw(hisi_hba);
-}
-
static void debugfs_release_v3_hw(struct hisi_hba *hisi_hba, int dump_index)
{
struct device *dev = hisi_hba->dev;
@@ -4703,7 +4545,7 @@ static int debugfs_alloc_v3_hw(struct hisi_hba *hisi_hba, int dump_index)
{
const struct hisi_sas_hw *hw = hisi_hba->hw;
struct device *dev = hisi_hba->dev;
- int p, c, d, r, i;
+ int p, c, d, r;
size_t sz;
for (r = 0; r < DEBUGFS_REGS_NUM; r++) {
@@ -4783,11 +4625,48 @@ static int debugfs_alloc_v3_hw(struct hisi_hba *hisi_hba, int dump_index)
return 0;
fail:
- for (i = 0; i < hisi_sas_debugfs_dump_count; i++)
- debugfs_release_v3_hw(hisi_hba, i);
+ debugfs_release_v3_hw(hisi_hba, dump_index);
return -ENOMEM;
}
+static int debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba)
+{
+ int debugfs_dump_index = hisi_hba->debugfs_dump_index;
+ struct device *dev = hisi_hba->dev;
+ u64 timestamp = local_clock();
+
+ if (debugfs_dump_index >= hisi_sas_debugfs_dump_count) {
+ dev_warn(dev, "dump count exceeded!\n");
+ return -EINVAL;
+ }
+
+ if (debugfs_alloc_v3_hw(hisi_hba, debugfs_dump_index)) {
+ dev_warn(dev, "failed to alloc memory\n");
+ return -ENOMEM;
+ }
+
+ do_div(timestamp, NSEC_PER_MSEC);
+ hisi_hba->debugfs_timestamp[debugfs_dump_index] = timestamp;
+
+ debugfs_snapshot_prepare_v3_hw(hisi_hba);
+
+ debugfs_snapshot_global_reg_v3_hw(hisi_hba);
+ debugfs_snapshot_port_reg_v3_hw(hisi_hba);
+ debugfs_snapshot_axi_reg_v3_hw(hisi_hba);
+ debugfs_snapshot_ras_reg_v3_hw(hisi_hba);
+ debugfs_snapshot_cq_reg_v3_hw(hisi_hba);
+ debugfs_snapshot_dq_reg_v3_hw(hisi_hba);
+ debugfs_snapshot_itct_reg_v3_hw(hisi_hba);
+ debugfs_snapshot_iost_reg_v3_hw(hisi_hba);
+
+ debugfs_create_files_v3_hw(hisi_hba);
+
+ debugfs_snapshot_restore_v3_hw(hisi_hba);
+ hisi_hba->debugfs_dump_index++;
+
+ return 0;
+}
+
static void debugfs_phy_down_cnt_init_v3_hw(struct hisi_hba *hisi_hba)
{
struct dentry *dir = debugfs_create_dir("phy_down_cnt",
@@ -4832,7 +4711,7 @@ static void debugfs_bist_init_v3_hw(struct hisi_hba *hisi_hba)
hisi_hba, &debugfs_bist_phy_v3_hw_fops);
debugfs_create_file("cnt", 0600, hisi_hba->debugfs_bist_dentry,
- hisi_hba, &debugfs_bist_cnt_v3_hw_ops);
+ hisi_hba, &debugfs_bist_cnt_v3_hw_fops);
debugfs_create_file("loopback_mode", 0600,
hisi_hba->debugfs_bist_dentry,
@@ -4874,7 +4753,6 @@ static void debugfs_exit_v3_hw(struct hisi_hba *hisi_hba)
static void debugfs_init_v3_hw(struct hisi_hba *hisi_hba)
{
struct device *dev = hisi_hba->dev;
- int i;
hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev),
hisi_sas_debugfs_dir);
@@ -4891,14 +4769,6 @@ static void debugfs_init_v3_hw(struct hisi_hba *hisi_hba)
debugfs_phy_down_cnt_init_v3_hw(hisi_hba);
debugfs_fifo_init_v3_hw(hisi_hba);
-
- for (i = 0; i < hisi_sas_debugfs_dump_count; i++) {
- if (debugfs_alloc_v3_hw(hisi_hba, i)) {
- debugfs_exit_v3_hw(hisi_hba);
- dev_dbg(dev, "failed to init debugfs!\n");
- break;
- }
- }
}
static int
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index c98346e464..92c440f2e3 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -37,6 +37,7 @@ static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT;
static u64 max_lun = IBMVFC_MAX_LUN;
static unsigned int max_targets = IBMVFC_MAX_TARGETS;
static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
+static u16 scsi_qdepth = IBMVFC_SCSI_QDEPTH;
static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
@@ -82,6 +83,9 @@ MODULE_PARM_DESC(default_timeout,
module_param_named(max_requests, max_requests, uint, S_IRUGO);
MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. "
"[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]");
+module_param_named(scsi_qdepth, scsi_qdepth, ushort, S_IRUGO);
+MODULE_PARM_DESC(scsi_qdepth, "Maximum scsi command depth per adapter queue. "
+ "[Default=" __stringify(IBMVFC_SCSI_QDEPTH) "]");
module_param_named(max_lun, max_lun, ullong, S_IRUGO);
MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. "
"[Default=" __stringify(IBMVFC_MAX_LUN) "]");
@@ -159,8 +163,8 @@ static void ibmvfc_npiv_logout(struct ibmvfc_host *);
static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *);
static void ibmvfc_tgt_move_login(struct ibmvfc_target *);
-static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *);
-static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *);
+static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *, struct ibmvfc_channels *);
+static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *, struct ibmvfc_channels *);
static const char *unknown_error = "unknown error";
@@ -775,28 +779,26 @@ static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
* ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
* @vhost: ibmvfc host who owns the event pool
* @queue: ibmvfc queue struct
- * @size: pool size
*
* Returns zero on success.
**/
static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
- struct ibmvfc_queue *queue,
- unsigned int size)
+ struct ibmvfc_queue *queue)
{
int i;
struct ibmvfc_event_pool *pool = &queue->evt_pool;
ENTER;
- if (!size)
+ if (!queue->total_depth)
return 0;
- pool->size = size;
- pool->events = kcalloc(size, sizeof(*pool->events), GFP_KERNEL);
+ pool->size = queue->total_depth;
+ pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
if (!pool->events)
return -ENOMEM;
pool->iu_storage = dma_alloc_coherent(vhost->dev,
- size * sizeof(*pool->iu_storage),
+ pool->size * sizeof(*pool->iu_storage),
&pool->iu_token, 0);
if (!pool->iu_storage) {
@@ -806,9 +808,11 @@ static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
INIT_LIST_HEAD(&queue->sent);
INIT_LIST_HEAD(&queue->free);
+ queue->evt_free = queue->evt_depth;
+ queue->reserved_free = queue->reserved_depth;
spin_lock_init(&queue->l_lock);
- for (i = 0; i < size; ++i) {
+ for (i = 0; i < pool->size; ++i) {
struct ibmvfc_event *evt = &pool->events[i];
/*
@@ -921,7 +925,7 @@ static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
struct vio_dev *vdev = to_vio_dev(vhost->dev);
unsigned long flags;
- ibmvfc_dereg_sub_crqs(vhost);
+ ibmvfc_dereg_sub_crqs(vhost, &vhost->scsi_scrqs);
/* Re-enable the CRQ */
do {
@@ -940,7 +944,7 @@ static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
spin_unlock(vhost->crq.q_lock);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
- ibmvfc_reg_sub_crqs(vhost);
+ ibmvfc_reg_sub_crqs(vhost, &vhost->scsi_scrqs);
return rc;
}
@@ -959,7 +963,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
struct vio_dev *vdev = to_vio_dev(vhost->dev);
struct ibmvfc_queue *crq = &vhost->crq;
- ibmvfc_dereg_sub_crqs(vhost);
+ ibmvfc_dereg_sub_crqs(vhost, &vhost->scsi_scrqs);
/* Close the CRQ */
do {
@@ -992,7 +996,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
spin_unlock(vhost->crq.q_lock);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
- ibmvfc_reg_sub_crqs(vhost);
+ ibmvfc_reg_sub_crqs(vhost, &vhost->scsi_scrqs);
return rc;
}
@@ -1032,6 +1036,12 @@ static void ibmvfc_free_event(struct ibmvfc_event *evt)
spin_lock_irqsave(&evt->queue->l_lock, flags);
list_add_tail(&evt->queue_list, &evt->queue->free);
+ if (evt->reserved) {
+ evt->reserved = 0;
+ evt->queue->reserved_free++;
+ } else {
+ evt->queue->evt_free++;
+ }
if (evt->eh_comp)
complete(evt->eh_comp);
spin_unlock_irqrestore(&evt->queue->l_lock, flags);
@@ -1474,6 +1484,12 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
struct ibmvfc_queue *async_crq = &vhost->async_crq;
struct device_node *of_node = vhost->dev->of_node;
const char *location;
+ u16 max_cmds;
+
+ max_cmds = scsi_qdepth + IBMVFC_NUM_INTERNAL_REQ;
+ if (mq_enabled)
+ max_cmds += (scsi_qdepth + IBMVFC_NUM_INTERNAL_SUBQ_REQ) *
+ vhost->scsi_scrqs.desired_queues;
memset(login_info, 0, sizeof(*login_info));
@@ -1488,7 +1504,7 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
if (vhost->client_migrated)
login_info->flags |= cpu_to_be16(IBMVFC_CLIENT_MIGRATED);
- login_info->max_cmds = cpu_to_be32(max_requests + IBMVFC_NUM_INTERNAL_REQ);
+ login_info->max_cmds = cpu_to_be32(max_cmds);
login_info->capabilities = cpu_to_be64(IBMVFC_CAN_MIGRATE | IBMVFC_CAN_SEND_VF_WWPN);
if (vhost->mq_enabled || vhost->using_channels)
@@ -1507,29 +1523,39 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
}
/**
- * ibmvfc_get_event - Gets the next free event in pool
+ * __ibmvfc_get_event - Gets the next free event in pool
* @queue: ibmvfc queue struct
+ * @reserved: event is for a reserved management command
*
* Returns a free event from the pool.
**/
-static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_queue *queue)
+static struct ibmvfc_event *__ibmvfc_get_event(struct ibmvfc_queue *queue, int reserved)
{
- struct ibmvfc_event *evt;
+ struct ibmvfc_event *evt = NULL;
unsigned long flags;
spin_lock_irqsave(&queue->l_lock, flags);
- if (list_empty(&queue->free)) {
- ibmvfc_log(queue->vhost, 4, "empty event pool on queue:%ld\n", queue->hwq_id);
- spin_unlock_irqrestore(&queue->l_lock, flags);
- return NULL;
+ if (reserved && queue->reserved_free) {
+ evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
+ evt->reserved = 1;
+ queue->reserved_free--;
+ } else if (queue->evt_free) {
+ evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
+ queue->evt_free--;
+ } else {
+ goto out;
}
- evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
+
atomic_set(&evt->free, 0);
list_del(&evt->queue_list);
+out:
spin_unlock_irqrestore(&queue->l_lock, flags);
return evt;
}
+#define ibmvfc_get_event(queue) __ibmvfc_get_event(queue, 0)
+#define ibmvfc_get_reserved_event(queue) __ibmvfc_get_event(queue, 1)
+
/**
* ibmvfc_locked_done - Calls evt completion with host_lock held
* @evt: ibmvfc evt to complete
@@ -2046,7 +2072,7 @@ static int ibmvfc_bsg_timeout(struct bsg_job *job)
}
vhost->aborting_passthru = 1;
- evt = ibmvfc_get_event(&vhost->crq);
+ evt = ibmvfc_get_reserved_event(&vhost->crq);
if (!evt) {
spin_unlock_irqrestore(vhost->host->host_lock, flags);
return -ENOMEM;
@@ -2109,7 +2135,7 @@ static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id)
if (unlikely((rc = ibmvfc_host_chkready(vhost))))
goto unlock_out;
- evt = ibmvfc_get_event(&vhost->crq);
+ evt = ibmvfc_get_reserved_event(&vhost->crq);
if (!evt) {
rc = -ENOMEM;
goto unlock_out;
@@ -2231,7 +2257,7 @@ static int ibmvfc_bsg_request(struct bsg_job *job)
goto out;
}
- evt = ibmvfc_get_event(&vhost->crq);
+ evt = ibmvfc_get_reserved_event(&vhost->crq);
if (!evt) {
spin_unlock_irqrestore(vhost->host->host_lock, flags);
rc = -ENOMEM;
@@ -2532,7 +2558,7 @@ static struct ibmvfc_event *ibmvfc_init_tmf(struct ibmvfc_queue *queue,
struct ibmvfc_event *evt;
struct ibmvfc_tmf *tmf;
- evt = ibmvfc_get_event(queue);
+ evt = ibmvfc_get_reserved_event(queue);
if (!evt)
return NULL;
ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
@@ -2970,18 +2996,6 @@ static void ibmvfc_dev_cancel_all_noreset(struct scsi_device *sdev, void *data)
}
/**
- * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function
- * @sdev: scsi device struct
- * @data: return code
- *
- **/
-static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data)
-{
- unsigned long *rc = data;
- *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET);
-}
-
-/**
* ibmvfc_eh_target_reset_handler - Reset the target
* @cmd: scsi command struct
*
@@ -2990,22 +3004,38 @@ static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data)
**/
static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
{
- struct scsi_device *sdev = cmd->device;
- struct ibmvfc_host *vhost = shost_priv(sdev->host);
- struct scsi_target *starget = scsi_target(sdev);
+ struct scsi_target *starget = scsi_target(cmd->device);
+ struct fc_rport *rport = starget_to_rport(starget);
+ struct Scsi_Host *shost = rport_to_shost(rport);
+ struct ibmvfc_host *vhost = shost_priv(shost);
int block_rc;
int reset_rc = 0;
int rc = FAILED;
unsigned long cancel_rc = 0;
+ bool tgt_reset = false;
ENTER;
- block_rc = fc_block_scsi_eh(cmd);
+ block_rc = fc_block_rport(rport);
ibmvfc_wait_while_resetting(vhost);
if (block_rc != FAST_IO_FAIL) {
- starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
- reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, shost) {
+ if ((sdev->channel != starget->channel) ||
+ (sdev->id != starget->id))
+ continue;
+
+ cancel_rc |= ibmvfc_cancel_all(sdev,
+ IBMVFC_TMF_TGT_RESET);
+ if (!tgt_reset) {
+ reset_rc = ibmvfc_reset_device(sdev,
+ IBMVFC_TARGET_RESET, "target");
+ tgt_reset = true;
+ }
+ }
} else
- starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_noreset);
+ starget_for_each_device(starget, &cancel_rc,
+ ibmvfc_dev_cancel_all_noreset);
if (!cancel_rc && !reset_rc)
rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
@@ -3552,11 +3582,12 @@ static ssize_t ibmvfc_show_scsi_channels(struct device *dev,
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ibmvfc_host *vhost = shost_priv(shost);
+ struct ibmvfc_channels *scsi = &vhost->scsi_scrqs;
unsigned long flags = 0;
int len;
spin_lock_irqsave(shost->host_lock, flags);
- len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->client_scsi_channels);
+ len = snprintf(buf, PAGE_SIZE, "%d\n", scsi->desired_queues);
spin_unlock_irqrestore(shost->host_lock, flags);
return len;
}
@@ -3567,12 +3598,13 @@ static ssize_t ibmvfc_store_scsi_channels(struct device *dev,
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ibmvfc_host *vhost = shost_priv(shost);
+ struct ibmvfc_channels *scsi = &vhost->scsi_scrqs;
unsigned long flags = 0;
unsigned int channels;
spin_lock_irqsave(shost->host_lock, flags);
channels = simple_strtoul(buf, NULL, 10);
- vhost->client_scsi_channels = min(channels, nr_scsi_hw_queues);
+ scsi->desired_queues = min(channels, shost->nr_hw_queues);
ibmvfc_hard_reset_host(vhost);
spin_unlock_irqrestore(shost->host_lock, flags);
return strlen(buf);
@@ -3672,7 +3704,6 @@ static const struct scsi_host_template driver_template = {
.max_sectors = IBMVFC_MAX_SECTORS,
.shost_groups = ibmvfc_host_groups,
.track_queue_depth = 1,
- .host_tagset = 1,
};
/**
@@ -3908,7 +3939,7 @@ static void ibmvfc_drain_sub_crq(struct ibmvfc_queue *scrq)
}
}
-static irqreturn_t ibmvfc_interrupt_scsi(int irq, void *scrq_instance)
+static irqreturn_t ibmvfc_interrupt_mq(int irq, void *scrq_instance)
{
struct ibmvfc_queue *scrq = (struct ibmvfc_queue *)scrq_instance;
@@ -4070,7 +4101,7 @@ static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
return;
kref_get(&tgt->kref);
- evt = ibmvfc_get_event(&vhost->crq);
+ evt = ibmvfc_get_reserved_event(&vhost->crq);
if (!evt) {
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
kref_put(&tgt->kref, ibmvfc_release_tgt);
@@ -4183,7 +4214,7 @@ static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
kref_get(&tgt->kref);
tgt->logo_rcvd = 0;
- evt = ibmvfc_get_event(&vhost->crq);
+ evt = ibmvfc_get_reserved_event(&vhost->crq);
if (!evt) {
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
kref_put(&tgt->kref, ibmvfc_release_tgt);
@@ -4265,7 +4296,7 @@ static struct ibmvfc_event *__ibmvfc_tgt_get_implicit_logout_evt(struct ibmvfc_t
struct ibmvfc_event *evt;
kref_get(&tgt->kref);
- evt = ibmvfc_get_event(&vhost->crq);
+ evt = ibmvfc_get_reserved_event(&vhost->crq);
if (!evt)
return NULL;
ibmvfc_init_event(evt, done, IBMVFC_MAD_FORMAT);
@@ -4440,7 +4471,7 @@ static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt)
return;
kref_get(&tgt->kref);
- evt = ibmvfc_get_event(&vhost->crq);
+ evt = ibmvfc_get_reserved_event(&vhost->crq);
if (!evt) {
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
kref_put(&tgt->kref, ibmvfc_release_tgt);
@@ -4612,7 +4643,7 @@ static void ibmvfc_adisc_timeout(struct timer_list *t)
vhost->abort_threads++;
kref_get(&tgt->kref);
- evt = ibmvfc_get_event(&vhost->crq);
+ evt = ibmvfc_get_reserved_event(&vhost->crq);
if (!evt) {
tgt_err(tgt, "Failed to get cancel event for ADISC.\n");
vhost->abort_threads--;
@@ -4670,7 +4701,7 @@ static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
return;
kref_get(&tgt->kref);
- evt = ibmvfc_get_event(&vhost->crq);
+ evt = ibmvfc_get_reserved_event(&vhost->crq);
if (!evt) {
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
kref_put(&tgt->kref, ibmvfc_release_tgt);
@@ -4779,7 +4810,7 @@ static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
return;
kref_get(&tgt->kref);
- evt = ibmvfc_get_event(&vhost->crq);
+ evt = ibmvfc_get_reserved_event(&vhost->crq);
if (!evt) {
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
kref_put(&tgt->kref, ibmvfc_release_tgt);
@@ -4908,7 +4939,7 @@ static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost)
int i, rc;
for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++)
- rc = ibmvfc_alloc_target(vhost, &vhost->disc_buf[i]);
+ rc = ibmvfc_alloc_target(vhost, &vhost->scsi_scrqs.disc_buf[i]);
return rc;
}
@@ -4957,7 +4988,7 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
{
struct ibmvfc_discover_targets *mad;
- struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
+ struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq);
int level = IBMVFC_DEFAULT_LOG_LEVEL;
if (!evt) {
@@ -4972,9 +5003,9 @@ static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
mad->common.version = cpu_to_be32(1);
mad->common.opcode = cpu_to_be32(IBMVFC_DISC_TARGETS);
mad->common.length = cpu_to_be16(sizeof(*mad));
- mad->bufflen = cpu_to_be32(vhost->disc_buf_sz);
- mad->buffer.va = cpu_to_be64(vhost->disc_buf_dma);
- mad->buffer.len = cpu_to_be32(vhost->disc_buf_sz);
+ mad->bufflen = cpu_to_be32(vhost->scsi_scrqs.disc_buf_sz);
+ mad->buffer.va = cpu_to_be64(vhost->scsi_scrqs.disc_buf_dma);
+ mad->buffer.len = cpu_to_be32(vhost->scsi_scrqs.disc_buf_sz);
mad->flags = cpu_to_be32(IBMVFC_DISC_TGT_PORT_ID_WWPN_LIST);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
@@ -4988,7 +5019,7 @@ static void ibmvfc_channel_setup_done(struct ibmvfc_event *evt)
{
struct ibmvfc_host *vhost = evt->vhost;
struct ibmvfc_channel_setup *setup = vhost->channel_setup_buf;
- struct ibmvfc_scsi_channels *scrqs = &vhost->scsi_scrqs;
+ struct ibmvfc_channels *scrqs = &vhost->scsi_scrqs;
u32 mad_status = be16_to_cpu(evt->xfer_iu->channel_setup.common.status);
int level = IBMVFC_DEFAULT_LOG_LEVEL;
int flags, active_queues, i;
@@ -5038,10 +5069,10 @@ static void ibmvfc_channel_setup(struct ibmvfc_host *vhost)
{
struct ibmvfc_channel_setup_mad *mad;
struct ibmvfc_channel_setup *setup_buf = vhost->channel_setup_buf;
- struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
- struct ibmvfc_scsi_channels *scrqs = &vhost->scsi_scrqs;
+ struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq);
+ struct ibmvfc_channels *scrqs = &vhost->scsi_scrqs;
unsigned int num_channels =
- min(vhost->client_scsi_channels, vhost->max_vios_scsi_channels);
+ min(scrqs->desired_queues, vhost->max_vios_scsi_channels);
int level = IBMVFC_DEFAULT_LOG_LEVEL;
int i;
@@ -5111,7 +5142,7 @@ static void ibmvfc_channel_enquiry_done(struct ibmvfc_event *evt)
static void ibmvfc_channel_enquiry(struct ibmvfc_host *vhost)
{
struct ibmvfc_channel_enquiry *mad;
- struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
+ struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq);
int level = IBMVFC_DEFAULT_LOG_LEVEL;
if (!evt) {
@@ -5239,7 +5270,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
{
struct ibmvfc_npiv_login_mad *mad;
- struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
+ struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq);
if (!evt) {
ibmvfc_dbg(vhost, "NPIV Login failed: no available events\n");
@@ -5310,7 +5341,7 @@ static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
struct ibmvfc_npiv_logout_mad *mad;
struct ibmvfc_event *evt;
- evt = ibmvfc_get_event(&vhost->crq);
+ evt = ibmvfc_get_reserved_event(&vhost->crq);
if (!evt) {
ibmvfc_dbg(vhost, "NPIV Logout failed: no available events\n");
ibmvfc_hard_reset_host(vhost);
@@ -5764,7 +5795,6 @@ static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
{
struct device *dev = vhost->dev;
size_t fmt_size;
- unsigned int pool_size = 0;
ENTER;
spin_lock_init(&queue->_lock);
@@ -5773,7 +5803,9 @@ static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
switch (fmt) {
case IBMVFC_CRQ_FMT:
fmt_size = sizeof(*queue->msgs.crq);
- pool_size = max_requests + IBMVFC_NUM_INTERNAL_REQ;
+ queue->total_depth = scsi_qdepth + IBMVFC_NUM_INTERNAL_REQ;
+ queue->evt_depth = scsi_qdepth;
+ queue->reserved_depth = IBMVFC_NUM_INTERNAL_REQ;
break;
case IBMVFC_ASYNC_FMT:
fmt_size = sizeof(*queue->msgs.async);
@@ -5781,14 +5813,17 @@ static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
case IBMVFC_SUB_CRQ_FMT:
fmt_size = sizeof(*queue->msgs.scrq);
/* We need one extra event for Cancel Commands */
- pool_size = max_requests + 1;
+ queue->total_depth = scsi_qdepth + IBMVFC_NUM_INTERNAL_SUBQ_REQ;
+ queue->evt_depth = scsi_qdepth;
+ queue->reserved_depth = IBMVFC_NUM_INTERNAL_SUBQ_REQ;
break;
default:
dev_warn(dev, "Unknown command/response queue message format: %d\n", fmt);
return -EINVAL;
}
- if (ibmvfc_init_event_pool(vhost, queue, pool_size)) {
+ queue->fmt = fmt;
+ if (ibmvfc_init_event_pool(vhost, queue)) {
dev_err(dev, "Couldn't initialize event pool.\n");
return -ENOMEM;
}
@@ -5807,7 +5842,6 @@ static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
}
queue->cur = 0;
- queue->fmt = fmt;
queue->size = PAGE_SIZE / fmt_size;
queue->vhost = vhost;
@@ -5876,12 +5910,13 @@ reg_crq_failed:
return retrc;
}
-static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost,
- int index)
+static int ibmvfc_register_channel(struct ibmvfc_host *vhost,
+ struct ibmvfc_channels *channels,
+ int index)
{
struct device *dev = vhost->dev;
struct vio_dev *vdev = to_vio_dev(dev);
- struct ibmvfc_queue *scrq = &vhost->scsi_scrqs.scrqs[index];
+ struct ibmvfc_queue *scrq = &channels->scrqs[index];
int rc = -ENOMEM;
ENTER;
@@ -5905,9 +5940,24 @@ static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost,
goto irq_failed;
}
- snprintf(scrq->name, sizeof(scrq->name), "ibmvfc-%x-scsi%d",
- vdev->unit_address, index);
- rc = request_irq(scrq->irq, ibmvfc_interrupt_scsi, 0, scrq->name, scrq);
+ switch (channels->protocol) {
+ case IBMVFC_PROTO_SCSI:
+ snprintf(scrq->name, sizeof(scrq->name), "ibmvfc-%x-scsi%d",
+ vdev->unit_address, index);
+ scrq->handler = ibmvfc_interrupt_mq;
+ break;
+ case IBMVFC_PROTO_NVME:
+ snprintf(scrq->name, sizeof(scrq->name), "ibmvfc-%x-nvmf%d",
+ vdev->unit_address, index);
+ scrq->handler = ibmvfc_interrupt_mq;
+ break;
+ default:
+ dev_err(dev, "Unknown channel protocol (%d)\n",
+ channels->protocol);
+ goto irq_failed;
+ }
+
+ rc = request_irq(scrq->irq, scrq->handler, 0, scrq->name, scrq);
if (rc) {
dev_err(dev, "Couldn't register sub-crq[%d] irq\n", index);
@@ -5929,11 +5979,13 @@ reg_failed:
return rc;
}
-static void ibmvfc_deregister_scsi_channel(struct ibmvfc_host *vhost, int index)
+static void ibmvfc_deregister_channel(struct ibmvfc_host *vhost,
+ struct ibmvfc_channels *channels,
+ int index)
{
struct device *dev = vhost->dev;
struct vio_dev *vdev = to_vio_dev(dev);
- struct ibmvfc_queue *scrq = &vhost->scsi_scrqs.scrqs[index];
+ struct ibmvfc_queue *scrq = &channels->scrqs[index];
long rc;
ENTER;
@@ -5957,18 +6009,19 @@ static void ibmvfc_deregister_scsi_channel(struct ibmvfc_host *vhost, int index)
LEAVE;
}
-static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *vhost)
+static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *vhost,
+ struct ibmvfc_channels *channels)
{
int i, j;
ENTER;
- if (!vhost->mq_enabled || !vhost->scsi_scrqs.scrqs)
+ if (!vhost->mq_enabled || !channels->scrqs)
return;
- for (i = 0; i < nr_scsi_hw_queues; i++) {
- if (ibmvfc_register_scsi_channel(vhost, i)) {
+ for (i = 0; i < channels->max_queues; i++) {
+ if (ibmvfc_register_channel(vhost, channels, i)) {
for (j = i; j > 0; j--)
- ibmvfc_deregister_scsi_channel(vhost, j - 1);
+ ibmvfc_deregister_channel(vhost, channels, j - 1);
vhost->do_enquiry = 0;
return;
}
@@ -5977,80 +6030,105 @@ static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *vhost)
LEAVE;
}
-static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *vhost)
+static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *vhost,
+ struct ibmvfc_channels *channels)
{
int i;
ENTER;
- if (!vhost->mq_enabled || !vhost->scsi_scrqs.scrqs)
+ if (!vhost->mq_enabled || !channels->scrqs)
return;
- for (i = 0; i < nr_scsi_hw_queues; i++)
- ibmvfc_deregister_scsi_channel(vhost, i);
+ for (i = 0; i < channels->max_queues; i++)
+ ibmvfc_deregister_channel(vhost, channels, i);
LEAVE;
}
-static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost)
+static int ibmvfc_alloc_channels(struct ibmvfc_host *vhost,
+ struct ibmvfc_channels *channels)
{
struct ibmvfc_queue *scrq;
int i, j;
+ int rc = 0;
+
+ channels->scrqs = kcalloc(channels->max_queues,
+ sizeof(*channels->scrqs),
+ GFP_KERNEL);
+ if (!channels->scrqs)
+ return -ENOMEM;
+ for (i = 0; i < channels->max_queues; i++) {
+ scrq = &channels->scrqs[i];
+ rc = ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT);
+ if (rc) {
+ for (j = i; j > 0; j--) {
+ scrq = &channels->scrqs[j - 1];
+ ibmvfc_free_queue(vhost, scrq);
+ }
+ kfree(channels->scrqs);
+ channels->scrqs = NULL;
+ channels->active_queues = 0;
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost)
+{
ENTER;
if (!vhost->mq_enabled)
return;
- vhost->scsi_scrqs.scrqs = kcalloc(nr_scsi_hw_queues,
- sizeof(*vhost->scsi_scrqs.scrqs),
- GFP_KERNEL);
- if (!vhost->scsi_scrqs.scrqs) {
+ if (ibmvfc_alloc_channels(vhost, &vhost->scsi_scrqs)) {
vhost->do_enquiry = 0;
+ vhost->mq_enabled = 0;
return;
}
- for (i = 0; i < nr_scsi_hw_queues; i++) {
- scrq = &vhost->scsi_scrqs.scrqs[i];
- if (ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT)) {
- for (j = i; j > 0; j--) {
- scrq = &vhost->scsi_scrqs.scrqs[j - 1];
- ibmvfc_free_queue(vhost, scrq);
- }
- kfree(vhost->scsi_scrqs.scrqs);
- vhost->scsi_scrqs.scrqs = NULL;
- vhost->scsi_scrqs.active_queues = 0;
- vhost->do_enquiry = 0;
- vhost->mq_enabled = 0;
- return;
- }
- }
-
- ibmvfc_reg_sub_crqs(vhost);
+ ibmvfc_reg_sub_crqs(vhost, &vhost->scsi_scrqs);
LEAVE;
}
-static void ibmvfc_release_sub_crqs(struct ibmvfc_host *vhost)
+static void ibmvfc_release_channels(struct ibmvfc_host *vhost,
+ struct ibmvfc_channels *channels)
{
struct ibmvfc_queue *scrq;
int i;
+ if (channels->scrqs) {
+ for (i = 0; i < channels->max_queues; i++) {
+ scrq = &channels->scrqs[i];
+ ibmvfc_free_queue(vhost, scrq);
+ }
+
+ kfree(channels->scrqs);
+ channels->scrqs = NULL;
+ channels->active_queues = 0;
+ }
+}
+
+static void ibmvfc_release_sub_crqs(struct ibmvfc_host *vhost)
+{
ENTER;
if (!vhost->scsi_scrqs.scrqs)
return;
- ibmvfc_dereg_sub_crqs(vhost);
+ ibmvfc_dereg_sub_crqs(vhost, &vhost->scsi_scrqs);
- for (i = 0; i < nr_scsi_hw_queues; i++) {
- scrq = &vhost->scsi_scrqs.scrqs[i];
- ibmvfc_free_queue(vhost, scrq);
- }
-
- kfree(vhost->scsi_scrqs.scrqs);
- vhost->scsi_scrqs.scrqs = NULL;
- vhost->scsi_scrqs.active_queues = 0;
+ ibmvfc_release_channels(vhost, &vhost->scsi_scrqs);
LEAVE;
}
+static void ibmvfc_free_disc_buf(struct device *dev, struct ibmvfc_channels *channels)
+{
+ dma_free_coherent(dev, channels->disc_buf_sz, channels->disc_buf,
+ channels->disc_buf_dma);
+}
+
/**
* ibmvfc_free_mem - Free memory for vhost
* @vhost: ibmvfc host struct
@@ -6065,8 +6143,7 @@ static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
ENTER;
mempool_destroy(vhost->tgt_pool);
kfree(vhost->trace);
- dma_free_coherent(vhost->dev, vhost->disc_buf_sz, vhost->disc_buf,
- vhost->disc_buf_dma);
+ ibmvfc_free_disc_buf(vhost->dev, &vhost->scsi_scrqs);
dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf),
vhost->login_buf, vhost->login_buf_dma);
dma_free_coherent(vhost->dev, sizeof(*vhost->channel_setup_buf),
@@ -6076,6 +6153,21 @@ static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
LEAVE;
}
+static int ibmvfc_alloc_disc_buf(struct device *dev, struct ibmvfc_channels *channels)
+{
+ channels->disc_buf_sz = sizeof(*channels->disc_buf) * max_targets;
+ channels->disc_buf = dma_alloc_coherent(dev, channels->disc_buf_sz,
+ &channels->disc_buf_dma, GFP_KERNEL);
+
+ if (!channels->disc_buf) {
+ dev_err(dev, "Couldn't allocate %s Discover Targets buffer\n",
+ (channels->protocol == IBMVFC_PROTO_SCSI) ? "SCSI" : "NVMe");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
/**
* ibmvfc_alloc_mem - Allocate memory for vhost
* @vhost: ibmvfc host struct
@@ -6111,21 +6203,15 @@ static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
goto free_sg_pool;
}
- vhost->disc_buf_sz = sizeof(*vhost->disc_buf) * max_targets;
- vhost->disc_buf = dma_alloc_coherent(dev, vhost->disc_buf_sz,
- &vhost->disc_buf_dma, GFP_KERNEL);
-
- if (!vhost->disc_buf) {
- dev_err(dev, "Couldn't allocate Discover Targets buffer\n");
+ if (ibmvfc_alloc_disc_buf(dev, &vhost->scsi_scrqs))
goto free_login_buffer;
- }
vhost->trace = kcalloc(IBMVFC_NUM_TRACE_ENTRIES,
sizeof(struct ibmvfc_trace_entry), GFP_KERNEL);
atomic_set(&vhost->trace_index, -1);
if (!vhost->trace)
- goto free_disc_buffer;
+ goto free_scsi_disc_buffer;
vhost->tgt_pool = mempool_create_kmalloc_pool(IBMVFC_TGT_MEMPOOL_SZ,
sizeof(struct ibmvfc_target));
@@ -6151,9 +6237,8 @@ free_tgt_pool:
mempool_destroy(vhost->tgt_pool);
free_trace:
kfree(vhost->trace);
-free_disc_buffer:
- dma_free_coherent(dev, vhost->disc_buf_sz, vhost->disc_buf,
- vhost->disc_buf_dma);
+free_scsi_disc_buffer:
+ ibmvfc_free_disc_buf(dev, &vhost->scsi_scrqs);
free_login_buffer:
dma_free_coherent(dev, sizeof(*vhost->login_buf),
vhost->login_buf, vhost->login_buf_dma);
@@ -6232,7 +6317,8 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
struct Scsi_Host *shost;
struct device *dev = &vdev->dev;
int rc = -ENOMEM;
- unsigned int max_scsi_queues = IBMVFC_MAX_SCSI_QUEUES;
+ unsigned int online_cpus = num_online_cpus();
+ unsigned int max_scsi_queues = min((unsigned int)IBMVFC_MAX_SCSI_QUEUES, online_cpus);
ENTER;
shost = scsi_host_alloc(&driver_template, sizeof(*vhost));
@@ -6242,7 +6328,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
}
shost->transportt = ibmvfc_transport_template;
- shost->can_queue = max_requests;
+ shost->can_queue = scsi_qdepth;
shost->max_lun = max_lun;
shost->max_id = max_targets;
shost->max_sectors = IBMVFC_MAX_SECTORS;
@@ -6261,7 +6347,9 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
vhost->task_set = 1;
vhost->mq_enabled = mq_enabled;
- vhost->client_scsi_channels = min(shost->nr_hw_queues, nr_scsi_channels);
+ vhost->scsi_scrqs.desired_queues = min(shost->nr_hw_queues, nr_scsi_channels);
+ vhost->scsi_scrqs.max_queues = shost->nr_hw_queues;
+ vhost->scsi_scrqs.protocol = IBMVFC_PROTO_SCSI;
vhost->using_channels = 0;
vhost->do_enquiry = 1;
vhost->scan_timeout = 0;
@@ -6401,7 +6489,9 @@ static int ibmvfc_resume(struct device *dev)
*/
static unsigned long ibmvfc_get_desired_dma(struct vio_dev *vdev)
{
- unsigned long pool_dma = max_requests * sizeof(union ibmvfc_iu);
+ unsigned long pool_dma;
+
+ pool_dma = (IBMVFC_MAX_SCSI_QUEUES * scsi_qdepth) * sizeof(union ibmvfc_iu);
return pool_dma + ((512 * 1024) * driver_template.cmd_per_lun);
}
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index c39a245f43..745ad5ac72 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -27,6 +27,7 @@
#define IBMVFC_ABORT_TIMEOUT 8
#define IBMVFC_ABORT_WAIT_TIMEOUT 40
#define IBMVFC_MAX_REQUESTS_DEFAULT 100
+#define IBMVFC_SCSI_QDEPTH 128
#define IBMVFC_DEBUG 0
#define IBMVFC_MAX_TARGETS 1024
@@ -57,6 +58,8 @@
* 2 for each discovery thread
*/
#define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + 1 + 2 + (disc_threads * 2))
+/* Reserved suset of events for cancelling channelized IO commands */
+#define IBMVFC_NUM_INTERNAL_SUBQ_REQ 4
#define IBMVFC_MAD_SUCCESS 0x00
#define IBMVFC_MAD_NOT_SUPPORTED 0xF1
@@ -713,9 +716,15 @@ enum ibmvfc_target_action {
IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT,
};
+enum ibmvfc_protocol {
+ IBMVFC_PROTO_SCSI = 0,
+ IBMVFC_PROTO_NVME = 1,
+};
+
struct ibmvfc_target {
struct list_head queue;
struct ibmvfc_host *vhost;
+ enum ibmvfc_protocol protocol;
u64 scsi_id;
u64 wwpn;
u64 new_scsi_id;
@@ -758,6 +767,7 @@ struct ibmvfc_event {
struct completion *eh_comp;
struct timer_list timer;
u16 hwq;
+ u8 reserved;
};
/* a pool of event structs for use */
@@ -793,6 +803,11 @@ struct ibmvfc_queue {
struct ibmvfc_event_pool evt_pool;
struct list_head sent;
struct list_head free;
+ u16 total_depth;
+ u16 evt_depth;
+ u16 reserved_depth;
+ u16 evt_free;
+ u16 reserved_free;
spinlock_t l_lock;
union ibmvfc_iu cancel_rsp;
@@ -804,11 +819,18 @@ struct ibmvfc_queue {
unsigned long irq;
unsigned long hwq_id;
char name[32];
+ irq_handler_t handler;
};
-struct ibmvfc_scsi_channels {
+struct ibmvfc_channels {
struct ibmvfc_queue *scrqs;
+ enum ibmvfc_protocol protocol;
unsigned int active_queues;
+ unsigned int desired_queues;
+ unsigned int max_queues;
+ int disc_buf_sz;
+ struct ibmvfc_discover_targets_entry *disc_buf;
+ dma_addr_t disc_buf_dma;
};
enum ibmvfc_host_action {
@@ -857,37 +879,33 @@ struct ibmvfc_host {
mempool_t *tgt_pool;
struct ibmvfc_queue crq;
struct ibmvfc_queue async_crq;
- struct ibmvfc_scsi_channels scsi_scrqs;
+ struct ibmvfc_channels scsi_scrqs;
struct ibmvfc_npiv_login login_info;
union ibmvfc_npiv_login_data *login_buf;
dma_addr_t login_buf_dma;
struct ibmvfc_channel_setup *channel_setup_buf;
dma_addr_t channel_setup_dma;
- int disc_buf_sz;
int log_level;
- struct ibmvfc_discover_targets_entry *disc_buf;
struct mutex passthru_mutex;
- int max_vios_scsi_channels;
+ unsigned int max_vios_scsi_channels;
int task_set;
int init_retries;
int discovery_threads;
int abort_threads;
- int client_migrated;
- int reinit;
- int delay_init;
- int scan_complete;
+ unsigned int client_migrated:1;
+ unsigned int reinit:1;
+ unsigned int delay_init:1;
+ unsigned int logged_in:1;
+ unsigned int mq_enabled:1;
+ unsigned int using_channels:1;
+ unsigned int do_enquiry:1;
+ unsigned int aborting_passthru:1;
+ unsigned int scan_complete:1;
int scan_timeout;
- int logged_in;
- int mq_enabled;
- int using_channels;
- int do_enquiry;
- int client_scsi_channels;
- int aborting_passthru;
int events_to_log;
#define IBMVFC_AE_LINKUP 0x0001
#define IBMVFC_AE_LINKDOWN 0x0002
#define IBMVFC_AE_RSCN 0x0004
- dma_addr_t disc_buf_dma;
unsigned int partition_number;
char partition_name[97];
void (*job_step) (struct ibmvfc_host *);
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 385f812b87..4dc411a581 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -3975,6 +3975,9 @@ static const struct target_core_fabric_ops ibmvscsis_ops = {
.fabric_drop_tpg = ibmvscsis_drop_tpg,
.tfc_wwn_attrs = ibmvscsis_wwn_attrs,
+
+ .default_submit_type = TARGET_DIRECT_SUBMIT,
+ .direct_submit_supp = 1,
};
static void ibmvscsis_dev_release(struct device *dev) {};
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index 07db98161a..180a5ddedb 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -51,10 +51,15 @@ typedef struct {
} imm_struct;
static void imm_reset_pulse(unsigned int base);
-static int device_check(imm_struct *dev);
+static int device_check(imm_struct *dev, bool autodetect);
#include "imm.h"
+static unsigned int mode = IMM_AUTODETECT;
+module_param(mode, uint, 0644);
+MODULE_PARM_DESC(mode, "Transfer mode (0 = Autodetect, 1 = SPP 4-bit, "
+ "2 = SPP 8-bit, 3 = EPP 8-bit, 4 = EPP 16-bit, 5 = EPP 32-bit");
+
static inline imm_struct *imm_dev(struct Scsi_Host *host)
{
return *(imm_struct **)&host->hostdata;
@@ -366,13 +371,10 @@ static int imm_out(imm_struct *dev, char *buffer, int len)
case IMM_EPP_8:
epp_reset(ppb);
w_ctr(ppb, 0x4);
-#ifdef CONFIG_SCSI_IZIP_EPP16
- if (!(((long) buffer | len) & 0x01))
- outsw(ppb + 4, buffer, len >> 1);
-#else
- if (!(((long) buffer | len) & 0x03))
+ if (dev->mode == IMM_EPP_32 && !(((long) buffer | len) & 0x03))
outsl(ppb + 4, buffer, len >> 2);
-#endif
+ else if (dev->mode == IMM_EPP_16 && !(((long) buffer | len) & 0x01))
+ outsw(ppb + 4, buffer, len >> 1);
else
outsb(ppb + 4, buffer, len);
w_ctr(ppb, 0xc);
@@ -426,13 +428,10 @@ static int imm_in(imm_struct *dev, char *buffer, int len)
case IMM_EPP_8:
epp_reset(ppb);
w_ctr(ppb, 0x24);
-#ifdef CONFIG_SCSI_IZIP_EPP16
- if (!(((long) buffer | len) & 0x01))
- insw(ppb + 4, buffer, len >> 1);
-#else
- if (!(((long) buffer | len) & 0x03))
- insl(ppb + 4, buffer, len >> 2);
-#endif
+ if (dev->mode == IMM_EPP_32 && !(((long) buffer | len) & 0x03))
+ insw(ppb + 4, buffer, len >> 2);
+ else if (dev->mode == IMM_EPP_16 && !(((long) buffer | len) & 0x01))
+ insl(ppb + 4, buffer, len >> 1);
else
insb(ppb + 4, buffer, len);
w_ctr(ppb, 0x2c);
@@ -589,13 +588,28 @@ static int imm_select(imm_struct *dev, int target)
static int imm_init(imm_struct *dev)
{
+ bool autodetect = dev->mode == IMM_AUTODETECT;
+
+ if (autodetect) {
+ int modes = dev->dev->port->modes;
+
+ /* Mode detection works up the chain of speed
+ * This avoids a nasty if-then-else-if-... tree
+ */
+ dev->mode = IMM_NIBBLE;
+
+ if (modes & PARPORT_MODE_TRISTATE)
+ dev->mode = IMM_PS2;
+ }
+
if (imm_connect(dev, 0) != 1)
return -EIO;
imm_reset_pulse(dev->base);
mdelay(1); /* Delay to allow devices to settle */
imm_disconnect(dev);
mdelay(1); /* Another delay to allow devices to settle */
- return device_check(dev);
+
+ return device_check(dev, autodetect);
}
static inline int imm_send_command(struct scsi_cmnd *cmd)
@@ -1000,7 +1014,7 @@ static int imm_reset(struct scsi_cmnd *cmd)
return SUCCESS;
}
-static int device_check(imm_struct *dev)
+static int device_check(imm_struct *dev, bool autodetect)
{
/* This routine looks for a device and then attempts to use EPP
to send a command. If all goes as planned then EPP is available. */
@@ -1012,8 +1026,8 @@ static int device_check(imm_struct *dev)
old_mode = dev->mode;
for (loop = 0; loop < 8; loop++) {
/* Attempt to use EPP for Test Unit Ready */
- if ((ppb & 0x0007) == 0x0000)
- dev->mode = IMM_EPP_32;
+ if (autodetect && (ppb & 0x0007) == 0x0000)
+ dev->mode = IMM_EPP_8;
second_pass:
imm_connect(dev, CONNECT_EPP_MAYBE);
@@ -1038,7 +1052,7 @@ static int device_check(imm_struct *dev)
udelay(1000);
imm_disconnect(dev);
udelay(1000);
- if (dev->mode == IMM_EPP_32) {
+ if (dev->mode != old_mode) {
dev->mode = old_mode;
goto second_pass;
}
@@ -1063,7 +1077,7 @@ static int device_check(imm_struct *dev)
udelay(1000);
imm_disconnect(dev);
udelay(1000);
- if (dev->mode == IMM_EPP_32) {
+ if (dev->mode != old_mode) {
dev->mode = old_mode;
goto second_pass;
}
@@ -1150,7 +1164,6 @@ static int __imm_attach(struct parport *pb)
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waiting);
DEFINE_WAIT(wait);
int ports;
- int modes, ppb;
int err = -ENOMEM;
struct pardev_cb imm_cb;
@@ -1162,7 +1175,7 @@ static int __imm_attach(struct parport *pb)
dev->base = -1;
- dev->mode = IMM_AUTODETECT;
+ dev->mode = mode < IMM_UNKNOWN ? mode : IMM_AUTODETECT;
INIT_LIST_HEAD(&dev->list);
temp = find_parent();
@@ -1197,18 +1210,9 @@ static int __imm_attach(struct parport *pb)
}
dev->waiting = NULL;
finish_wait(&waiting, &wait);
- ppb = dev->base = dev->dev->port->base;
+ dev->base = dev->dev->port->base;
dev->base_hi = dev->dev->port->base_hi;
- w_ctr(ppb, 0x0c);
- modes = dev->dev->port->modes;
-
- /* Mode detection works up the chain of speed
- * This avoids a nasty if-then-else-if-... tree
- */
- dev->mode = IMM_NIBBLE;
-
- if (modes & PARPORT_MODE_TRISTATE)
- dev->mode = IMM_PS2;
+ w_ctr(dev->base, 0x0c);
/* Done configuration */
diff --git a/drivers/scsi/imm.h b/drivers/scsi/imm.h
index 411cf94af5..398fa5b151 100644
--- a/drivers/scsi/imm.h
+++ b/drivers/scsi/imm.h
@@ -100,11 +100,7 @@ static char *IMM_MODE_STRING[] =
[IMM_PS2] = "PS/2",
[IMM_EPP_8] = "EPP 8 bit",
[IMM_EPP_16] = "EPP 16 bit",
-#ifdef CONFIG_SCSI_IZIP_EPP16
- [IMM_EPP_32] = "EPP 16 bit",
-#else
[IMM_EPP_32] = "EPP 32 bit",
-#endif
[IMM_UNKNOWN] = "Unknown",
};
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 4e13797b2a..81e3d464d1 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -761,12 +761,14 @@ static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
{
int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
+ int rc;
if (pcix_cmd_reg == 0)
return 0;
- if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
- &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
+ rc = pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
+ &ioa_cfg->saved_pcix_cmd_reg);
+ if (rc != PCIBIOS_SUCCESSFUL) {
dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
return -EIO;
}
@@ -785,10 +787,12 @@ static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
{
int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
+ int rc;
if (pcix_cmd_reg) {
- if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
- ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
+ rc = pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
+ ioa_cfg->saved_pcix_cmd_reg);
+ if (rc != PCIBIOS_SUCCESSFUL) {
dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
return -EIO;
}
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index bb20650926..10cf5775a9 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -835,7 +835,6 @@ static int __ips_eh_reset(struct scsi_cmnd *SC)
int i;
ips_ha_t *ha;
ips_scb_t *scb;
- ips_copp_wait_item_t *item;
METHOD_TRACE("ips_eh_reset", 1);
@@ -860,23 +859,6 @@ static int __ips_eh_reset(struct scsi_cmnd *SC)
if (!ha->active)
return (FAILED);
- /* See if the command is on the copp queue */
- item = ha->copp_waitlist.head;
- while ((item) && (item->scsi_cmd != SC))
- item = item->next;
-
- if (item) {
- /* Found it */
- ips_removeq_copp(&ha->copp_waitlist, item);
- return (SUCCESS);
- }
-
- /* See if the command is on the wait queue */
- if (ips_removeq_wait(&ha->scb_waitlist, SC)) {
- /* command not sent yet */
- return (SUCCESS);
- }
-
/* An explanation for the casual observer: */
/* Part of the function of a RAID controller is automatic error */
/* detection and recovery. As such, the only problem that physically */
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index a7b3243b47..7162a5029b 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -3390,7 +3390,7 @@ static enum sci_status isci_io_request_build(struct isci_host *ihost,
return SCI_FAILURE;
}
- return SCI_SUCCESS;
+ return status;
}
static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag)
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 945adca5e7..05be0810b5 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -265,6 +265,11 @@ static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
if (!fsp->seq_ptr)
return -EINVAL;
+ if (fsp->state & FC_SRB_ABORT_PENDING) {
+ FC_FCP_DBG(fsp, "abort already pending\n");
+ return -EBUSY;
+ }
+
this_cpu_inc(fsp->lp->stats->FcpPktAborts);
fsp->state |= FC_SRB_ABORT_PENDING;
@@ -1671,7 +1676,7 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
fc_fcp_rec(fsp);
else
- fc_fcp_recovery(fsp, FC_ERROR);
+ fc_fcp_recovery(fsp, FC_TIMED_OUT);
break;
}
fc_fcp_unlock_pkt(fsp);
@@ -1690,11 +1695,12 @@ static void fc_fcp_recovery(struct fc_fcp_pkt *fsp, u8 code)
fsp->status_code = code;
fsp->cdb_status = 0;
fsp->io_status = 0;
- /*
- * if this fails then we let the scsi command timer fire and
- * scsi-ml escalate.
- */
- fc_fcp_send_abort(fsp);
+ if (!fsp->cmd)
+ /*
+ * Only abort non-scsi commands; otherwise let the
+ * scsi command timer fire and scsi-ml escalate.
+ */
+ fc_fcp_send_abort(fsp);
}
/**
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index ff7b63b10a..8fb7c41c09 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -275,7 +275,7 @@ static void sas_resume_devices(struct work_struct *work)
*
* See comment in sas_discover_sata().
*/
-int sas_discover_end_dev(struct domain_device *dev)
+static int sas_discover_end_dev(struct domain_device *dev)
{
return sas_notify_lldd_dev_found(dev);
}
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 8586dc79f2..9c8cc72317 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -315,8 +315,8 @@ int sas_phy_reset(struct sas_phy *phy, int hard_reset)
}
EXPORT_SYMBOL_GPL(sas_phy_reset);
-int sas_set_phy_speed(struct sas_phy *phy,
- struct sas_phy_linkrates *rates)
+static int sas_set_phy_speed(struct sas_phy *phy,
+ struct sas_phy_linkrates *rates)
{
int ret;
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index a6dc7dc07f..3804aef165 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -39,6 +39,18 @@ struct sas_phy_data {
struct sas_work enable_work;
};
+void sas_hash_addr(u8 *hashed, const u8 *sas_addr);
+
+int sas_discover_root_expander(struct domain_device *dev);
+
+int sas_ex_revalidate_domain(struct domain_device *dev);
+void sas_unregister_domain_devices(struct asd_sas_port *port, int gone);
+void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port);
+void sas_discover_event(struct asd_sas_port *port, enum discover_event ev);
+
+void sas_init_dev(struct domain_device *dev);
+void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev);
+
void sas_scsi_recover_host(struct Scsi_Host *shost);
int sas_register_phys(struct sas_ha_struct *sas_ha);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index af15f7a22d..04d608ea91 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -33,6 +33,7 @@
struct lpfc_sli2_slim;
#define ELX_MODEL_NAME_SIZE 80
+#define ELX_FW_NAME_SIZE 84
#define LPFC_PCI_DEV_LP 0x1
#define LPFC_PCI_DEV_OC 0x2
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 54e47f2682..385e1636f1 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -131,6 +131,15 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
return 1;
}
+static bool lpfc_is_els_acc_rsp(struct lpfc_dmabuf *buf)
+{
+ struct fc_els_ls_acc *rsp = buf->virt;
+
+ if (rsp && rsp->la_cmd == ELS_LS_ACC)
+ return true;
+ return false;
+}
+
/**
* lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure
* @vport: pointer to a host virtual N_Port data structure.
@@ -1107,6 +1116,8 @@ stop_rr_fcf_flogi:
prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
if (!prsp)
goto out;
+ if (!lpfc_is_els_acc_rsp(prsp))
+ goto out;
sp = prsp->virt + sizeof(uint32_t);
/* FLOGI completes successfully */
@@ -1119,12 +1130,12 @@ stop_rr_fcf_flogi:
vport->port_state, vport->fc_flag,
sp->cmn.priority_tagging, kref_read(&ndlp->kref));
- if (sp->cmn.priority_tagging)
- vport->phba->pport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA |
- LPFC_VMID_TYPE_PRIO);
/* reinitialize the VMID datastructure before returning */
if (lpfc_is_vmid_enabled(phba))
lpfc_reinit_vmid(vport);
+ if (sp->cmn.priority_tagging)
+ vport->phba->pport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA |
+ LPFC_VMID_TYPE_PRIO);
/*
* Address a timing race with dev_loss. If dev_loss is active on
@@ -2117,8 +2128,12 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
NLP_EVT_DEVICE_RM);
} else {
/* Good status, call state machine */
- prsp = list_entry(cmdiocb->cmd_dmabuf->list.next,
- struct lpfc_dmabuf, list);
+ prsp = list_get_first(&cmdiocb->cmd_dmabuf->list,
+ struct lpfc_dmabuf, list);
+ if (!prsp)
+ goto out;
+ if (!lpfc_is_els_acc_rsp(prsp))
+ goto out;
ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
sp = (struct serv_parm *)((u8 *)prsp->virt +
@@ -3445,6 +3460,8 @@ lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
prdf = (struct lpfc_els_rdf_rsp *)prsp->virt;
if (!prdf)
goto out;
+ if (!lpfc_is_els_acc_rsp(prsp))
+ goto out;
for (i = 0; i < ELS_RDF_REG_TAG_CNT &&
i < be32_to_cpu(prdf->reg_d1.reg_desc.count); i++)
@@ -4043,6 +4060,9 @@ lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
edc_rsp->acc_hdr.la_cmd,
be32_to_cpu(edc_rsp->desc_list_len));
+ if (!lpfc_is_els_acc_rsp(prsp))
+ goto out;
+
/*
* Payload length in bytes is the response descriptor list
* length minus the 12 bytes of Link Service Request
@@ -11110,6 +11130,14 @@ mbox_err_exit:
lpfc_nlp_put(ndlp);
mempool_free(pmb, phba->mbox_mem_pool);
+
+ /* reinitialize the VMID datastructure before returning.
+ * this is specifically for vport
+ */
+ if (lpfc_is_vmid_enabled(phba))
+ lpfc_reinit_vmid(vport);
+ vport->vmid_flag = vport->phba->pport->vmid_flag;
+
return;
}
@@ -11339,6 +11367,9 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
if (!prsp)
goto out;
+ if (!lpfc_is_els_acc_rsp(prsp))
+ goto out;
+
sp = prsp->virt + sizeof(uint32_t);
fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
memcpy(&vport->fabric_portname, &sp->portName,
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 5154eeaee0..7ef9841f07 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -5654,7 +5654,7 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
((uint32_t)ndlp->nlp_xri << 16) |
((uint32_t)ndlp->nlp_type << 8)
);
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE_VERBOSE,
"0929 FIND node DID "
"Data: x%px x%x x%x x%x x%x x%px\n",
ndlp, ndlp->nlp_DID,
@@ -5701,8 +5701,8 @@ lpfc_findnode_mapped(struct lpfc_vport *vport)
((uint32_t)ndlp->nlp_type << 8) |
((uint32_t)ndlp->nlp_rpi & 0xff));
spin_unlock_irqrestore(shost->host_lock, iflags);
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "2025 FIND node DID "
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE_VERBOSE,
+ "2025 FIND node DID MAPPED "
"Data: x%px x%x x%x x%x x%px\n",
ndlp, ndlp->nlp_DID,
ndlp->nlp_flag, data1,
@@ -6468,7 +6468,7 @@ __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (filter(ndlp, param)) {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE_VERBOSE,
"3185 FIND node filter %ps DID "
"ndlp x%px did x%x flg x%x st x%x "
"xri x%x type x%x rpi x%x\n",
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 9e59c05010..70bcee64bc 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -12442,9 +12442,6 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
int max_core_id, min_core_id;
struct lpfc_vector_map_info *cpup;
struct lpfc_vector_map_info *new_cpup;
-#ifdef CONFIG_X86
- struct cpuinfo_x86 *cpuinfo;
-#endif
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
struct lpfc_hdwq_stat *c_stat;
#endif
@@ -12458,9 +12455,8 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
for_each_present_cpu(cpu) {
cpup = &phba->sli4_hba.cpu_map[cpu];
#ifdef CONFIG_X86
- cpuinfo = &cpu_data(cpu);
- cpup->phys_id = cpuinfo->phys_proc_id;
- cpup->core_id = cpuinfo->cpu_core_id;
+ cpup->phys_id = topology_physical_package_id(cpu);
+ cpup->core_id = topology_core_id(cpu);
if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
cpup->flag |= LPFC_CPU_MAP_HYPER;
#else
@@ -14725,7 +14721,7 @@ out:
int
lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
{
- uint8_t file_name[ELX_MODEL_NAME_SIZE];
+ char file_name[ELX_FW_NAME_SIZE] = {0};
int ret;
const struct firmware *fw;
@@ -14734,7 +14730,7 @@ lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
LPFC_SLI_INTF_IF_TYPE_2)
return -EPERM;
- snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
+ scnprintf(file_name, sizeof(file_name), "%s.grp", phba->ModelName);
if (fw_upgrade == INT_FW_UPGRADE) {
ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index f896ec6104..59bd2bafc7 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -25,7 +25,7 @@
#define LOG_MBOX 0x00000004 /* Mailbox events */
#define LOG_INIT 0x00000008 /* Initialization events */
#define LOG_LINK_EVENT 0x00000010 /* Link events */
-#define LOG_IP 0x00000020 /* IP traffic history */
+#define LOG_NODE_VERBOSE 0x00000020 /* Node verbose events */
#define LOG_FCP 0x00000040 /* FCP traffic history */
#define LOG_NODE 0x00000080 /* Node table events */
#define LOG_TEMP 0x00000100 /* Temperature sensor events */
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 1eb7f7e60b..d9074929fb 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -934,25 +934,35 @@ lpfc_rcv_prli_support_check(struct lpfc_vport *vport,
struct ls_rjt stat;
uint32_t *payload;
uint32_t cmd;
+ PRLI *npr;
payload = cmdiocb->cmd_dmabuf->virt;
cmd = *payload;
+ npr = (PRLI *)((uint8_t *)payload + sizeof(uint32_t));
+
if (vport->phba->nvmet_support) {
/* Must be a NVME PRLI */
- if (cmd == ELS_CMD_PRLI)
+ if (cmd == ELS_CMD_PRLI)
goto out;
} else {
/* Initiator mode. */
if (!vport->nvmei_support && (cmd == ELS_CMD_NVMEPRLI))
goto out;
+
+ /* NPIV ports will RJT initiator only functions */
+ if (vport->port_type == LPFC_NPIV_PORT &&
+ npr->initiatorFunc && !npr->targetFunc)
+ goto out;
}
return 1;
out:
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME_DISC,
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
"6115 Rcv PRLI (%x) check failed: ndlp rpi %d "
- "state x%x flags x%x\n",
+ "state x%x flags x%x port_type: x%x "
+ "npr->initfcn: x%x npr->tgtfcn: x%x\n",
cmd, ndlp->nlp_rpi, ndlp->nlp_state,
- ndlp->nlp_flag);
+ ndlp->nlp_flag, vport->port_type,
+ npr->initiatorFunc, npr->targetFunc);
memset(&stat, 0, sizeof(struct ls_rjt));
stat.un.b.lsRjtRsnCode = LSRJT_CMD_UNSUPPORTED;
stat.un.b.lsRjtRsnCodeExp = LSEXP_REQ_UNSUPPORTED;
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 96e11a26c2..128fc1bab5 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -950,7 +950,7 @@ lpfc_nvme_io_cmd_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
int cpu;
#endif
- int offline = 0;
+ bool offline = false;
/* Sanity check on return of outstanding command */
if (!lpfc_ncmd) {
@@ -1124,7 +1124,9 @@ out_err:
nCmd->transferred_length = 0;
nCmd->rcv_rsplen = 0;
nCmd->status = NVME_SC_INTERNAL;
- offline = pci_channel_offline(vport->phba->pcidev);
+ if (pci_channel_offline(vport->phba->pcidev) ||
+ lpfc_ncmd->result == IOERR_SLI_DOWN)
+ offline = true;
}
}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index d26941b131..bf879d8184 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1918,7 +1918,7 @@ out:
*
* Returns the number of SGEs added to the SGL.
**/
-static int
+static uint32_t
lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
struct sli4_sge *sgl, int datasegcnt,
struct lpfc_io_buf *lpfc_cmd)
@@ -1926,8 +1926,8 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
struct scatterlist *sgde = NULL; /* s/g data entry */
struct sli4_sge_diseed *diseed = NULL;
dma_addr_t physaddr;
- int i = 0, num_sge = 0, status;
- uint32_t reftag;
+ int i = 0, status;
+ uint32_t reftag, num_sge = 0;
uint8_t txop, rxop;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint32_t rc;
@@ -2099,7 +2099,7 @@ out:
*
* Returns the number of SGEs added to the SGL.
**/
-static int
+static uint32_t
lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
struct sli4_sge *sgl, int datacnt, int protcnt,
struct lpfc_io_buf *lpfc_cmd)
@@ -2123,8 +2123,8 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
uint32_t rc;
#endif
uint32_t checking = 1;
- uint32_t dma_offset = 0;
- int num_sge = 0, j = 2;
+ uint32_t dma_offset = 0, num_sge = 0;
+ int j = 2;
struct sli4_hybrid_sgl *sgl_xtra = NULL;
sgpe = scsi_prot_sglist(sc);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 4dfadf254a..9386e7b447 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -8571,12 +8571,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
* is not fatal as the driver will use generic values.
*/
rc = lpfc_parse_vpd(phba, vpd, vpd_size);
- if (unlikely(!rc)) {
+ if (unlikely(!rc))
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0377 Error %d parsing vpd. "
"Using defaults.\n", rc);
- rc = 0;
- }
kfree(vpd);
/* Save information as VPD data */
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 13a547277f..88068834ca 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.2.0.14"
+#define LPFC_DRIVER_VERSION "14.2.0.15"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vmid.c b/drivers/scsi/lpfc/lpfc_vmid.c
index cf8ba840d0..773e02ae20 100644
--- a/drivers/scsi/lpfc/lpfc_vmid.c
+++ b/drivers/scsi/lpfc/lpfc_vmid.c
@@ -321,5 +321,6 @@ lpfc_reinit_vmid(struct lpfc_vport *vport)
if (!hash_empty(vport->hash_table))
hash_for_each_safe(vport->hash_table, bucket, tmp, cur, hnode)
hash_del(&cur->hnode);
+ vport->vmid_flag = 0;
write_unlock(&vport->vmid_lock);
}
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index e92f1a73cc..66a30a3e6c 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -1898,7 +1898,7 @@ megaraid_reset(struct scsi_cmnd *cmd)
spin_lock_irq(&adapter->lock);
- rval = megaraid_abort_and_reset(adapter, cmd, SCB_RESET);
+ rval = megaraid_abort_and_reset(adapter, NULL, SCB_RESET);
/*
* This is required here to complete any completed requests
@@ -1925,10 +1925,13 @@ megaraid_abort_and_reset(adapter_t *adapter, struct scsi_cmnd *cmd, int aor)
struct list_head *pos, *next;
scb_t *scb;
- dev_warn(&adapter->dev->dev, "%s cmd=%x <c=%d t=%d l=%d>\n",
- (aor == SCB_ABORT)? "ABORTING":"RESET",
- cmd->cmnd[0], cmd->device->channel,
- cmd->device->id, (u32)cmd->device->lun);
+ if (aor == SCB_ABORT)
+ dev_warn(&adapter->dev->dev,
+ "ABORTING cmd=%x <c=%d t=%d l=%d>\n",
+ cmd->cmnd[0], cmd->device->channel,
+ cmd->device->id, (u32)cmd->device->lun);
+ else
+ dev_warn(&adapter->dev->dev, "RESETTING\n");
if(list_empty(&adapter->pending_list))
return FAILED;
@@ -1937,7 +1940,7 @@ megaraid_abort_and_reset(adapter_t *adapter, struct scsi_cmnd *cmd, int aor)
scb = list_entry(pos, scb_t, list);
- if (scb->cmd == cmd) { /* Found command */
+ if (!cmd || scb->cmd == cmd) { /* Found command */
scb->state |= aor;
@@ -1956,31 +1959,23 @@ megaraid_abort_and_reset(adapter_t *adapter, struct scsi_cmnd *cmd, int aor)
return FAILED;
}
- else {
-
- /*
- * Not yet issued! Remove from the pending
- * list
- */
- dev_warn(&adapter->dev->dev,
- "%s-[%x], driver owner\n",
- (aor==SCB_ABORT) ? "ABORTING":"RESET",
- scb->idx);
-
- mega_free_scb(adapter, scb);
-
- if( aor == SCB_ABORT ) {
- cmd->result = (DID_ABORT << 16);
- }
- else {
- cmd->result = (DID_RESET << 16);
- }
+ /*
+ * Not yet issued! Remove from the pending
+ * list
+ */
+ dev_warn(&adapter->dev->dev,
+ "%s-[%x], driver owner\n",
+ (cmd) ? "ABORTING":"RESET",
+ scb->idx);
+ mega_free_scb(adapter, scb);
+ if (cmd) {
+ cmd->result = (DID_ABORT << 16);
list_add_tail(SCSI_LIST(cmd),
- &adapter->completed_list);
-
- return SUCCESS;
+ &adapter->completed_list);
}
+
+ return SUCCESS;
}
}
@@ -4114,8 +4109,6 @@ static const struct scsi_host_template megaraid_template = {
.sg_tablesize = MAX_SGLIST,
.cmd_per_lun = DEF_CMD_PER_LUN,
.eh_abort_handler = megaraid_abort,
- .eh_device_reset_handler = megaraid_reset,
- .eh_bus_reset_handler = megaraid_reset,
.eh_host_reset_handler = megaraid_reset,
.no_write_same = 1,
.cmd_size = sizeof(struct megaraid_cmd_priv),
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 94abba5758..56624cbf7f 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -23,8 +23,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "07.725.01.00-rc1"
-#define MEGASAS_RELDATE "Mar 2, 2023"
+#define MEGASAS_VERSION "07.727.03.00-rc1"
+#define MEGASAS_RELDATE "Oct 03, 2023"
#define MEGASAS_MSIX_NAME_LEN 32
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 8a83f3fc2b..c60014e07b 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -4268,6 +4268,9 @@ megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
}
out:
+ if (!retval && reason == SCSIIO_TIMEOUT_OCR)
+ dev_info(&instance->pdev->dev, "IO is completed, no OCR is required\n");
+
return retval;
}
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index f039f1d986..0d148c39eb 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -1892,7 +1892,8 @@ static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
reply_qid = qidx + 1;
op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
- if (!mrioc->pdev->revision)
+ if ((mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) &&
+ !mrioc->pdev->revision)
op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K;
op_reply_q->ci = 0;
op_reply_q->ephase = 1;
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index c7c7525742..872d4b809d 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -4023,20 +4023,45 @@ static inline void mpi3mr_setup_divert_ws(struct mpi3mr_ioc *mrioc,
* mpi3mr_eh_host_reset - Host reset error handling callback
* @scmd: SCSI command reference
*
- * Issue controller reset if the scmd is for a Physical Device,
- * if the scmd is for RAID volume, then wait for
- * MPI3MR_RAID_ERRREC_RESET_TIMEOUT and checke whether any
- * pending I/Os prior to issuing reset to the controller.
+ * Issue controller reset
*
* Return: SUCCESS of successful reset else FAILED
*/
static int mpi3mr_eh_host_reset(struct scsi_cmnd *scmd)
{
struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
+ int retval = FAILED, ret;
+
+ ret = mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_EH_HOS, 1);
+ if (ret)
+ goto out;
+
+ retval = SUCCESS;
+out:
+ sdev_printk(KERN_INFO, scmd->device,
+ "Host reset is %s for scmd(%p)\n",
+ ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+
+ return retval;
+}
+
+/**
+ * mpi3mr_eh_bus_reset - Bus reset error handling callback
+ * @scmd: SCSI command reference
+ *
+ * Checks whether pending I/Os are present for the RAID volume;
+ * if not there's no need to reset the adapter.
+ *
+ * Return: SUCCESS of successful reset else FAILED
+ */
+static int mpi3mr_eh_bus_reset(struct scsi_cmnd *scmd)
+{
+ struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
struct mpi3mr_stgt_priv_data *stgt_priv_data;
struct mpi3mr_sdev_priv_data *sdev_priv_data;
u8 dev_type = MPI3_DEVICE_DEVFORM_VD;
- int retval = FAILED, ret;
+ int retval = FAILED;
sdev_priv_data = scmd->device->hostdata;
if (sdev_priv_data && sdev_priv_data->tgt_priv_data) {
@@ -4046,25 +4071,16 @@ static int mpi3mr_eh_host_reset(struct scsi_cmnd *scmd)
if (dev_type == MPI3_DEVICE_DEVFORM_VD) {
mpi3mr_wait_for_host_io(mrioc,
- MPI3MR_RAID_ERRREC_RESET_TIMEOUT);
- if (!mpi3mr_get_fw_pending_ios(mrioc)) {
+ MPI3MR_RAID_ERRREC_RESET_TIMEOUT);
+ if (!mpi3mr_get_fw_pending_ios(mrioc))
retval = SUCCESS;
- goto out;
- }
}
+ if (retval == FAILED)
+ mpi3mr_print_pending_host_io(mrioc);
- mpi3mr_print_pending_host_io(mrioc);
- ret = mpi3mr_soft_reset_handler(mrioc,
- MPI3MR_RESET_FROM_EH_HOS, 1);
- if (ret)
- goto out;
-
- retval = SUCCESS;
-out:
sdev_printk(KERN_INFO, scmd->device,
- "Host reset is %s for scmd(%p)\n",
- ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
-
+ "Bus reset is %s for scmd(%p)\n",
+ ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
return retval;
}
@@ -4911,6 +4927,7 @@ static const struct scsi_host_template mpi3mr_driver_template = {
.change_queue_depth = mpi3mr_change_queue_depth,
.eh_device_reset_handler = mpi3mr_eh_dev_reset,
.eh_target_reset_handler = mpi3mr_eh_target_reset,
+ .eh_bus_reset_handler = mpi3mr_eh_bus_reset,
.eh_host_reset_handler = mpi3mr_eh_host_reset,
.bios_param = mpi3mr_bios_param,
.map_queues = mpi3mr_map_queues,
@@ -5095,7 +5112,10 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mpi3mr_init_drv_cmd(&mrioc->evtack_cmds[i],
MPI3MR_HOSTTAG_EVTACKCMD_MIN + i);
- if (pdev->revision)
+ if ((pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) &&
+ !pdev->revision)
+ mrioc->enable_segqueue = false;
+ else
mrioc->enable_segqueue = true;
init_waitqueue_head(&mrioc->reset_waitq);
@@ -5424,6 +5444,14 @@ static const struct pci_device_id mpi3mr_pci_id_table[] = {
PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM,
MPI3_MFGPAGE_DEVID_SAS4116, PCI_ANY_ID, PCI_ANY_ID)
},
+ {
+ PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM,
+ MPI3_MFGPAGE_DEVID_SAS5116_MPI, PCI_ANY_ID, PCI_ANY_ID)
+ },
+ {
+ PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM,
+ MPI3_MFGPAGE_DEVID_SAS5116_MPI_MGMT, PCI_ANY_ID, PCI_ANY_ID)
+ },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table);
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 90069c7b16..dec1e2d380 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -1180,65 +1180,6 @@ void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha)
}
}
-#ifndef PM8001_USE_MSIX
-/**
- * pm8001_chip_intx_interrupt_enable - enable PM8001 chip interrupt
- * @pm8001_ha: our hba card information
- */
-static void
-pm8001_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha)
-{
- pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL);
- pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL);
-}
-
-/**
- * pm8001_chip_intx_interrupt_disable - disable PM8001 chip interrupt
- * @pm8001_ha: our hba card information
- */
-static void
-pm8001_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
-{
- pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_MASK_ALL);
-}
-
-#else
-
-/**
- * pm8001_chip_msix_interrupt_enable - enable PM8001 chip interrupt
- * @pm8001_ha: our hba card information
- * @int_vec_idx: interrupt number to enable
- */
-static void
-pm8001_chip_msix_interrupt_enable(struct pm8001_hba_info *pm8001_ha,
- u32 int_vec_idx)
-{
- u32 msi_index;
- u32 value;
- msi_index = int_vec_idx * MSIX_TABLE_ELEMENT_SIZE;
- msi_index += MSIX_TABLE_BASE;
- pm8001_cw32(pm8001_ha, 0, msi_index, MSIX_INTERRUPT_ENABLE);
- value = (1 << int_vec_idx);
- pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, value);
-
-}
-
-/**
- * pm8001_chip_msix_interrupt_disable - disable PM8001 chip interrupt
- * @pm8001_ha: our hba card information
- * @int_vec_idx: interrupt number to disable
- */
-static void
-pm8001_chip_msix_interrupt_disable(struct pm8001_hba_info *pm8001_ha,
- u32 int_vec_idx)
-{
- u32 msi_index;
- msi_index = int_vec_idx * MSIX_TABLE_ELEMENT_SIZE;
- msi_index += MSIX_TABLE_BASE;
- pm8001_cw32(pm8001_ha, 0, msi_index, MSIX_INTERRUPT_DISABLE);
-}
-#endif
-
/**
* pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
* @pm8001_ha: our hba card information
@@ -1247,11 +1188,14 @@ pm8001_chip_msix_interrupt_disable(struct pm8001_hba_info *pm8001_ha,
static void
pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
-#ifdef PM8001_USE_MSIX
- pm8001_chip_msix_interrupt_enable(pm8001_ha, 0);
-#else
- pm8001_chip_intx_interrupt_enable(pm8001_ha);
-#endif
+ if (pm8001_ha->use_msix) {
+ pm8001_cw32(pm8001_ha, 0, MSIX_TABLE_BASE,
+ MSIX_INTERRUPT_ENABLE);
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, 1);
+ } else {
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL);
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL);
+ }
}
/**
@@ -1262,11 +1206,11 @@ pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
static void
pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
-#ifdef PM8001_USE_MSIX
- pm8001_chip_msix_interrupt_disable(pm8001_ha, 0);
-#else
- pm8001_chip_intx_interrupt_disable(pm8001_ha);
-#endif
+ if (pm8001_ha->use_msix)
+ pm8001_cw32(pm8001_ha, 0, MSIX_TABLE_BASE,
+ MSIX_INTERRUPT_DISABLE);
+ else
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_MASK_ALL);
}
/**
@@ -4309,16 +4253,15 @@ static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
static u32 pm8001_chip_is_our_interrupt(struct pm8001_hba_info *pm8001_ha)
{
-#ifdef PM8001_USE_MSIX
- return 1;
-#else
u32 value;
+ if (pm8001_ha->use_msix)
+ return 1;
+
value = pm8001_cr32(pm8001_ha, 0, MSGU_ODR);
if (value)
return 1;
return 0;
-#endif
}
/**
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 443a3176c6..ed6b7d954d 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -56,6 +56,18 @@ MODULE_PARM_DESC(link_rate, "Enable link rate.\n"
" 4: Link rate 6.0G\n"
" 8: Link rate 12.0G\n");
+bool pm8001_use_msix = true;
+module_param_named(use_msix, pm8001_use_msix, bool, 0444);
+MODULE_PARM_DESC(zoned, "Use MSIX interrupts. Default: true");
+
+static bool pm8001_use_tasklet = true;
+module_param_named(use_tasklet, pm8001_use_tasklet, bool, 0444);
+MODULE_PARM_DESC(zoned, "Use MSIX interrupts. Default: true");
+
+static bool pm8001_read_wwn = true;
+module_param_named(read_wwn, pm8001_read_wwn, bool, 0444);
+MODULE_PARM_DESC(zoned, "Get WWN from the controller. Default: true");
+
static struct scsi_transport_template *pm8001_stt;
static int pm8001_init_ccb_tag(struct pm8001_hba_info *);
@@ -200,8 +212,6 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
kfree(pm8001_ha);
}
-#ifdef PM8001_USE_TASKLET
-
/**
* pm8001_tasklet() - tasklet for 64 msi-x interrupt handler
* @opaque: the passed general host adapter struct
@@ -209,16 +219,67 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
*/
static void pm8001_tasklet(unsigned long opaque)
{
- struct pm8001_hba_info *pm8001_ha;
- struct isr_param *irq_vector;
+ struct isr_param *irq_vector = (struct isr_param *)opaque;
+ struct pm8001_hba_info *pm8001_ha = irq_vector->drv_inst;
+
+ if (WARN_ON_ONCE(!pm8001_ha))
+ return;
- irq_vector = (struct isr_param *)opaque;
- pm8001_ha = irq_vector->drv_inst;
- if (unlikely(!pm8001_ha))
- BUG_ON(1);
PM8001_CHIP_DISP->isr(pm8001_ha, irq_vector->irq_id);
}
-#endif
+
+static void pm8001_init_tasklet(struct pm8001_hba_info *pm8001_ha)
+{
+ int i;
+
+ if (!pm8001_use_tasklet)
+ return;
+
+ /* Tasklet for non msi-x interrupt handler */
+ if ((!pm8001_ha->pdev->msix_cap || !pci_msi_enabled()) ||
+ (pm8001_ha->chip_id == chip_8001)) {
+ tasklet_init(&pm8001_ha->tasklet[0], pm8001_tasklet,
+ (unsigned long)&(pm8001_ha->irq_vector[0]));
+ return;
+ }
+ for (i = 0; i < PM8001_MAX_MSIX_VEC; i++)
+ tasklet_init(&pm8001_ha->tasklet[i], pm8001_tasklet,
+ (unsigned long)&(pm8001_ha->irq_vector[i]));
+}
+
+static void pm8001_kill_tasklet(struct pm8001_hba_info *pm8001_ha)
+{
+ int i;
+
+ if (!pm8001_use_tasklet)
+ return;
+
+ /* For non-msix and msix interrupts */
+ if ((!pm8001_ha->pdev->msix_cap || !pci_msi_enabled()) ||
+ (pm8001_ha->chip_id == chip_8001)) {
+ tasklet_kill(&pm8001_ha->tasklet[0]);
+ return;
+ }
+
+ for (i = 0; i < PM8001_MAX_MSIX_VEC; i++)
+ tasklet_kill(&pm8001_ha->tasklet[i]);
+}
+
+static irqreturn_t pm8001_handle_irq(struct pm8001_hba_info *pm8001_ha,
+ int irq)
+{
+ if (unlikely(!pm8001_ha))
+ return IRQ_NONE;
+
+ if (!PM8001_CHIP_DISP->is_our_interrupt(pm8001_ha))
+ return IRQ_NONE;
+
+ if (!pm8001_use_tasklet)
+ return PM8001_CHIP_DISP->isr(pm8001_ha, irq);
+
+ tasklet_schedule(&pm8001_ha->tasklet[irq]);
+ return IRQ_HANDLED;
+}
/**
* pm8001_interrupt_handler_msix - main MSIX interrupt handler.
@@ -230,22 +291,10 @@ static void pm8001_tasklet(unsigned long opaque)
*/
static irqreturn_t pm8001_interrupt_handler_msix(int irq, void *opaque)
{
- struct isr_param *irq_vector;
- struct pm8001_hba_info *pm8001_ha;
- irqreturn_t ret = IRQ_HANDLED;
- irq_vector = (struct isr_param *)opaque;
- pm8001_ha = irq_vector->drv_inst;
+ struct isr_param *irq_vector = (struct isr_param *)opaque;
+ struct pm8001_hba_info *pm8001_ha = irq_vector->drv_inst;
- if (unlikely(!pm8001_ha))
- return IRQ_NONE;
- if (!PM8001_CHIP_DISP->is_our_interrupt(pm8001_ha))
- return IRQ_NONE;
-#ifdef PM8001_USE_TASKLET
- tasklet_schedule(&pm8001_ha->tasklet[irq_vector->irq_id]);
-#else
- ret = PM8001_CHIP_DISP->isr(pm8001_ha, irq_vector->irq_id);
-#endif
- return ret;
+ return pm8001_handle_irq(pm8001_ha, irq_vector->irq_id);
}
/**
@@ -256,24 +305,14 @@ static irqreturn_t pm8001_interrupt_handler_msix(int irq, void *opaque)
static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id)
{
- struct pm8001_hba_info *pm8001_ha;
- irqreturn_t ret = IRQ_HANDLED;
struct sas_ha_struct *sha = dev_id;
- pm8001_ha = sha->lldd_ha;
- if (unlikely(!pm8001_ha))
- return IRQ_NONE;
- if (!PM8001_CHIP_DISP->is_our_interrupt(pm8001_ha))
- return IRQ_NONE;
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
-#ifdef PM8001_USE_TASKLET
- tasklet_schedule(&pm8001_ha->tasklet[0]);
-#else
- ret = PM8001_CHIP_DISP->isr(pm8001_ha, 0);
-#endif
- return ret;
+ return pm8001_handle_irq(pm8001_ha, 0);
}
static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha);
+static void pm8001_free_irq(struct pm8001_hba_info *pm8001_ha);
/**
* pm8001_alloc - initiate our hba structure and 6 DMAs area.
@@ -511,7 +550,6 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
{
struct pm8001_hba_info *pm8001_ha;
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
- int j;
pm8001_ha = sha->lldd_ha;
if (!pm8001_ha)
@@ -542,17 +580,8 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
else
pm8001_ha->iomb_size = IOMB_SIZE_SPC;
-#ifdef PM8001_USE_TASKLET
- /* Tasklet for non msi-x interrupt handler */
- if ((!pdev->msix_cap || !pci_msi_enabled())
- || (pm8001_ha->chip_id == chip_8001))
- tasklet_init(&pm8001_ha->tasklet[0], pm8001_tasklet,
- (unsigned long)&(pm8001_ha->irq_vector[0]));
- else
- for (j = 0; j < PM8001_MAX_MSIX_VEC; j++)
- tasklet_init(&pm8001_ha->tasklet[j], pm8001_tasklet,
- (unsigned long)&(pm8001_ha->irq_vector[j]));
-#endif
+ pm8001_init_tasklet(pm8001_ha);
+
if (pm8001_ioremap(pm8001_ha))
goto failed_pci_alloc;
if (!pm8001_alloc(pm8001_ha, ent))
@@ -658,19 +687,30 @@ static void pm8001_post_sas_ha_init(struct Scsi_Host *shost,
*/
static int pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
{
- u8 i, j;
- u8 sas_add[8];
-#ifdef PM8001_READ_VPD
- /* For new SPC controllers WWN is stored in flash vpd
- * For SPC/SPCve controllers WWN is stored in EEPROM
- * For Older SPC WWN is stored in NVMD
- */
DECLARE_COMPLETION_ONSTACK(completion);
struct pm8001_ioctl_payload payload;
+ unsigned long time_remaining;
+ u8 sas_add[8];
u16 deviceid;
int rc;
- unsigned long time_remaining;
+ u8 i, j;
+
+ if (!pm8001_read_wwn) {
+ __be64 dev_sas_addr = cpu_to_be64(0x50010c600047f9d0ULL);
+ for (i = 0; i < pm8001_ha->chip->n_phy; i++)
+ memcpy(&pm8001_ha->phy[i].dev_sas_addr, &dev_sas_addr,
+ SAS_ADDR_SIZE);
+ memcpy(pm8001_ha->sas_addr, &pm8001_ha->phy[0].dev_sas_addr,
+ SAS_ADDR_SIZE);
+ return 0;
+ }
+
+ /*
+ * For new SPC controllers WWN is stored in flash vpd. For SPC/SPCve
+ * controllers WWN is stored in EEPROM. And for Older SPC WWN is stored
+ * in NVMD.
+ */
if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) {
pm8001_dbg(pm8001_ha, FAIL, "controller is in fatal error state\n");
return -EIO;
@@ -744,16 +784,7 @@ static int pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->phy[i].dev_sas_addr);
}
kfree(payload.func_specific);
-#else
- for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
- pm8001_ha->phy[i].dev_sas_addr = 0x50010c600047f9d0ULL;
- pm8001_ha->phy[i].dev_sas_addr =
- cpu_to_be64((u64)
- (*(u64 *)&pm8001_ha->phy[i].dev_sas_addr));
- }
- memcpy(pm8001_ha->sas_addr, &pm8001_ha->phy[0].dev_sas_addr,
- SAS_ADDR_SIZE);
-#endif
+
return 0;
}
@@ -763,13 +794,13 @@ static int pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
*/
static int pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha)
{
-
-#ifdef PM8001_READ_VPD
- /*OPTION ROM FLASH read for the SPC cards */
DECLARE_COMPLETION_ONSTACK(completion);
struct pm8001_ioctl_payload payload;
int rc;
+ if (!pm8001_read_wwn)
+ return 0;
+
pm8001_ha->nvmd_completion = &completion;
/* SAS ADDRESS read from flash / EEPROM */
payload.minor_function = 6;
@@ -788,7 +819,7 @@ static int pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha)
wait_for_completion(&completion);
pm8001_set_phy_profile(pm8001_ha, sizeof(u8), payload.func_specific);
kfree(payload.func_specific);
-#endif
+
return 0;
}
@@ -939,7 +970,6 @@ static int pm8001_configure_phy_settings(struct pm8001_hba_info *pm8001_ha)
}
}
-#ifdef PM8001_USE_MSIX
/**
* pm8001_setup_msix - enable MSI-X interrupt
* @pm8001_ha: our ha struct.
@@ -1021,7 +1051,6 @@ static u32 pm8001_request_msix(struct pm8001_hba_info *pm8001_ha)
return rc;
}
-#endif
/**
* pm8001_request_irq - register interrupt
@@ -1030,10 +1059,9 @@ static u32 pm8001_request_msix(struct pm8001_hba_info *pm8001_ha)
static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha)
{
struct pci_dev *pdev = pm8001_ha->pdev;
-#ifdef PM8001_USE_MSIX
int rc;
- if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
+ if (pm8001_use_msix && pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
rc = pm8001_setup_msix(pm8001_ha);
if (rc) {
pm8001_dbg(pm8001_ha, FAIL,
@@ -1041,14 +1069,22 @@ static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha)
return rc;
}
- if (pdev->msix_cap && pci_msi_enabled())
- return pm8001_request_msix(pm8001_ha);
+ if (!pdev->msix_cap || !pci_msi_enabled())
+ goto use_intx;
+
+ rc = pm8001_request_msix(pm8001_ha);
+ if (rc)
+ return rc;
+
+ pm8001_ha->use_msix = true;
+
+ return 0;
}
+use_intx:
+ /* Initialize the INT-X interrupt */
pm8001_dbg(pm8001_ha, INIT, "MSIX not supported!!!\n");
-#endif
-
- /* initialize the INT-X interrupt */
+ pm8001_ha->use_msix = false;
pm8001_ha->irq_vector[0].irq_id = 0;
pm8001_ha->irq_vector[0].drv_inst = pm8001_ha;
@@ -1057,6 +1093,26 @@ static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha)
SHOST_TO_SAS_HA(pm8001_ha->shost));
}
+static void pm8001_free_irq(struct pm8001_hba_info *pm8001_ha)
+{
+ struct pci_dev *pdev = pm8001_ha->pdev;
+ int i;
+
+ if (pm8001_ha->use_msix) {
+ for (i = 0; i < pm8001_ha->number_of_intr; i++)
+ synchronize_irq(pci_irq_vector(pdev, i));
+
+ for (i = 0; i < pm8001_ha->number_of_intr; i++)
+ free_irq(pci_irq_vector(pdev, i), &pm8001_ha->irq_vector[i]);
+
+ pci_free_irq_vectors(pdev);
+ return;
+ }
+
+ /* INT-X */
+ free_irq(pm8001_ha->irq, pm8001_ha->sas);
+}
+
/**
* pm8001_pci_probe - probe supported device
* @pdev: pci device which kernel has been prepared for.
@@ -1252,33 +1308,17 @@ err_out:
static void pm8001_pci_remove(struct pci_dev *pdev)
{
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
- struct pm8001_hba_info *pm8001_ha;
- int i, j;
- pm8001_ha = sha->lldd_ha;
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ int i;
+
sas_unregister_ha(sha);
sas_remove_host(pm8001_ha->shost);
list_del(&pm8001_ha->list);
PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF);
PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
-#ifdef PM8001_USE_MSIX
- for (i = 0; i < pm8001_ha->number_of_intr; i++)
- synchronize_irq(pci_irq_vector(pdev, i));
- for (i = 0; i < pm8001_ha->number_of_intr; i++)
- free_irq(pci_irq_vector(pdev, i), &pm8001_ha->irq_vector[i]);
- pci_free_irq_vectors(pdev);
-#else
- free_irq(pm8001_ha->irq, sha);
-#endif
-#ifdef PM8001_USE_TASKLET
- /* For non-msix and msix interrupts */
- if ((!pdev->msix_cap || !pci_msi_enabled()) ||
- (pm8001_ha->chip_id == chip_8001))
- tasklet_kill(&pm8001_ha->tasklet[0]);
- else
- for (j = 0; j < PM8001_MAX_MSIX_VEC; j++)
- tasklet_kill(&pm8001_ha->tasklet[j]);
-#endif
+ pm8001_free_irq(pm8001_ha);
+ pm8001_kill_tasklet(pm8001_ha);
scsi_host_put(pm8001_ha->shost);
for (i = 0; i < pm8001_ha->ccb_count; i++) {
@@ -1309,7 +1349,7 @@ static int __maybe_unused pm8001_pci_suspend(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
- int i, j;
+
sas_suspend_ha(sha);
flush_workqueue(pm8001_wq);
scsi_block_requests(pm8001_ha->shost);
@@ -1319,24 +1359,10 @@ static int __maybe_unused pm8001_pci_suspend(struct device *dev)
}
PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF);
PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
-#ifdef PM8001_USE_MSIX
- for (i = 0; i < pm8001_ha->number_of_intr; i++)
- synchronize_irq(pci_irq_vector(pdev, i));
- for (i = 0; i < pm8001_ha->number_of_intr; i++)
- free_irq(pci_irq_vector(pdev, i), &pm8001_ha->irq_vector[i]);
- pci_free_irq_vectors(pdev);
-#else
- free_irq(pm8001_ha->irq, sha);
-#endif
-#ifdef PM8001_USE_TASKLET
- /* For non-msix and msix interrupts */
- if ((!pdev->msix_cap || !pci_msi_enabled()) ||
- (pm8001_ha->chip_id == chip_8001))
- tasklet_kill(&pm8001_ha->tasklet[0]);
- else
- for (j = 0; j < PM8001_MAX_MSIX_VEC; j++)
- tasklet_kill(&pm8001_ha->tasklet[j]);
-#endif
+
+ pm8001_free_irq(pm8001_ha);
+ pm8001_kill_tasklet(pm8001_ha);
+
pm8001_info(pm8001_ha, "pdev=0x%p, slot=%s, entering "
"suspended state\n", pdev,
pm8001_ha->name);
@@ -1355,7 +1381,7 @@ static int __maybe_unused pm8001_pci_resume(struct device *dev)
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
struct pm8001_hba_info *pm8001_ha;
int rc;
- u8 i = 0, j;
+ u8 i = 0;
DECLARE_COMPLETION_ONSTACK(completion);
pm8001_ha = sha->lldd_ha;
@@ -1383,17 +1409,9 @@ static int __maybe_unused pm8001_pci_resume(struct device *dev)
rc = pm8001_request_irq(pm8001_ha);
if (rc)
goto err_out_disable;
-#ifdef PM8001_USE_TASKLET
- /* Tasklet for non msi-x interrupt handler */
- if ((!pdev->msix_cap || !pci_msi_enabled()) ||
- (pm8001_ha->chip_id == chip_8001))
- tasklet_init(&pm8001_ha->tasklet[0], pm8001_tasklet,
- (unsigned long)&(pm8001_ha->irq_vector[0]));
- else
- for (j = 0; j < PM8001_MAX_MSIX_VEC; j++)
- tasklet_init(&pm8001_ha->tasklet[j], pm8001_tasklet,
- (unsigned long)&(pm8001_ha->irq_vector[j]));
-#endif
+
+ pm8001_init_tasklet(pm8001_ha);
+
PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0);
if (pm8001_ha->chip_id != chip_8001) {
for (i = 1; i < pm8001_ha->number_of_intr; i++)
@@ -1525,6 +1543,9 @@ static int __init pm8001_init(void)
{
int rc = -ENOMEM;
+ if (pm8001_use_tasklet && !pm8001_use_msix)
+ pm8001_use_tasklet = false;
+
pm8001_wq = alloc_workqueue("pm80xx", 0, 0);
if (!pm8001_wq)
goto err;
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 2fadd353f1..3ccb737190 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -83,10 +83,7 @@ do { \
pm8001_info(HBA, fmt, ##__VA_ARGS__); \
} while (0)
-#define PM8001_USE_TASKLET
-#define PM8001_USE_MSIX
-#define PM8001_READ_VPD
-
+extern bool pm8001_use_msix;
#define IS_SPCV_12G(dev) ((dev->device == 0X8074) \
|| (dev->device == 0X8076) \
@@ -520,14 +517,12 @@ struct pm8001_hba_info {
struct pm8001_device *devices;
struct pm8001_ccb_info *ccb_info;
u32 ccb_count;
-#ifdef PM8001_USE_MSIX
+
+ bool use_msix;
int number_of_intr;/*will be used in remove()*/
char intr_drvname[PM8001_MAX_MSIX_VEC]
[PM8001_NAME_LENGTH+1+3+1];
-#endif
-#ifdef PM8001_USE_TASKLET
struct tasklet_struct tasklet[PM8001_MAX_MSIX_VEC];
-#endif
u32 logging_level;
u32 link_rate;
u32 fw_status;
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 3afd9443c4..a52ae68419 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -1715,27 +1715,6 @@ static void pm80xx_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
}
/**
- * pm80xx_chip_intx_interrupt_enable - enable PM8001 chip interrupt
- * @pm8001_ha: our hba card information
- */
-static void
-pm80xx_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha)
-{
- pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL);
- pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL);
-}
-
-/**
- * pm80xx_chip_intx_interrupt_disable - disable PM8001 chip interrupt
- * @pm8001_ha: our hba card information
- */
-static void
-pm80xx_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
-{
- pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, ODMR_MASK_ALL);
-}
-
-/**
* pm80xx_chip_interrupt_enable - enable PM8001 chip interrupt
* @pm8001_ha: our hba card information
* @vec: interrupt number to enable
@@ -1743,16 +1722,16 @@ pm80xx_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
static void
pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
-#ifdef PM8001_USE_MSIX
+ if (!pm8001_ha->use_msix) {
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL);
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL);
+ return;
+ }
+
if (vec < 32)
pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, 1U << vec);
else
- pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR_U,
- 1U << (vec - 32));
- return;
-#endif
- pm80xx_chip_intx_interrupt_enable(pm8001_ha);
-
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR_U, 1U << (vec - 32));
}
/**
@@ -1763,19 +1742,20 @@ pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
static void
pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
-#ifdef PM8001_USE_MSIX
+ if (!pm8001_ha->use_msix) {
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, ODMR_MASK_ALL);
+ return;
+ }
+
if (vec == 0xFF) {
/* disable all vectors 0-31, 32-63 */
pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 0xFFFFFFFF);
pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U, 0xFFFFFFFF);
- } else if (vec < 32)
+ } else if (vec < 32) {
pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 1U << vec);
- else
- pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U,
- 1U << (vec - 32));
- return;
-#endif
- pm80xx_chip_intx_interrupt_disable(pm8001_ha);
+ } else {
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U, 1U << (vec - 32));
+ }
}
/**
@@ -4802,16 +4782,15 @@ static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
static u32 pm80xx_chip_is_our_interrupt(struct pm8001_hba_info *pm8001_ha)
{
-#ifdef PM8001_USE_MSIX
- return 1;
-#else
u32 value;
+ if (pm8001_ha->use_msix)
+ return 1;
+
value = pm8001_cr32(pm8001_ha, 0, MSGU_ODR);
if (value)
return 1;
return 0;
-#endif
}
/**
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 50dc30051f..e8bcc3a887 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -2679,7 +2679,7 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
/**
* pmcraid_reset_device - device reset handler functions
*
- * @scsi_cmd: scsi command struct
+ * @scsi_dev: scsi device struct
* @timeout: command timeout
* @modifier: reset modifier indicating the reset sequence to be performed
*
@@ -2691,7 +2691,7 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
* SUCCESS / FAILED
*/
static int pmcraid_reset_device(
- struct scsi_cmnd *scsi_cmd,
+ struct scsi_device *scsi_dev,
unsigned long timeout,
u8 modifier)
{
@@ -2703,11 +2703,11 @@ static int pmcraid_reset_device(
u32 ioasc;
pinstance =
- (struct pmcraid_instance *)scsi_cmd->device->host->hostdata;
- res = scsi_cmd->device->hostdata;
+ (struct pmcraid_instance *)scsi_dev->host->hostdata;
+ res = scsi_dev->hostdata;
if (!res) {
- sdev_printk(KERN_ERR, scsi_cmd->device,
+ sdev_printk(KERN_ERR, scsi_dev,
"reset_device: NULL resource pointer\n");
return FAILED;
}
@@ -3018,27 +3018,72 @@ static int pmcraid_eh_device_reset_handler(struct scsi_cmnd *scmd)
{
scmd_printk(KERN_INFO, scmd,
"resetting device due to an I/O command timeout.\n");
- return pmcraid_reset_device(scmd,
+ return pmcraid_reset_device(scmd->device,
PMCRAID_INTERNAL_TIMEOUT,
RESET_DEVICE_LUN);
}
static int pmcraid_eh_bus_reset_handler(struct scsi_cmnd *scmd)
{
- scmd_printk(KERN_INFO, scmd,
+ struct Scsi_Host *host = scmd->device->host;
+ struct pmcraid_instance *pinstance =
+ (struct pmcraid_instance *)host->hostdata;
+ struct pmcraid_resource_entry *res = NULL;
+ struct pmcraid_resource_entry *temp;
+ struct scsi_device *sdev = NULL;
+ unsigned long lock_flags;
+
+ /*
+ * The reset device code insists on us passing down
+ * a device, so grab the first device on the bus.
+ */
+ spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
+ list_for_each_entry(temp, &pinstance->used_res_q, queue) {
+ if (scmd->device->channel == PMCRAID_VSET_BUS_ID &&
+ RES_IS_VSET(temp->cfg_entry)) {
+ res = temp;
+ break;
+ } else if (scmd->device->channel == PMCRAID_PHYS_BUS_ID &&
+ RES_IS_GSCSI(temp->cfg_entry)) {
+ res = temp;
+ break;
+ }
+ }
+ if (res)
+ sdev = res->scsi_dev;
+ spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
+ if (!sdev)
+ return FAILED;
+
+ sdev_printk(KERN_INFO, sdev,
"Doing bus reset due to an I/O command timeout.\n");
- return pmcraid_reset_device(scmd,
+ return pmcraid_reset_device(sdev,
PMCRAID_RESET_BUS_TIMEOUT,
RESET_DEVICE_BUS);
}
static int pmcraid_eh_target_reset_handler(struct scsi_cmnd *scmd)
{
- scmd_printk(KERN_INFO, scmd,
+ struct Scsi_Host *shost = scmd->device->host;
+ struct scsi_device *scsi_dev = NULL, *tmp;
+ int ret;
+
+ shost_for_each_device(tmp, shost) {
+ if ((tmp->channel == scmd->device->channel) &&
+ (tmp->id == scmd->device->id)) {
+ scsi_dev = tmp;
+ break;
+ }
+ }
+ if (!scsi_dev)
+ return FAILED;
+ sdev_printk(KERN_INFO, scsi_dev,
"Doing target reset due to an I/O command timeout.\n");
- return pmcraid_reset_device(scmd,
- PMCRAID_INTERNAL_TIMEOUT,
- RESET_DEVICE_TARGET);
+ ret = pmcraid_reset_device(scsi_dev,
+ PMCRAID_INTERNAL_TIMEOUT,
+ RESET_DEVICE_TARGET);
+ scsi_device_put(scsi_dev);
+ return ret;
}
/**
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index 1619cc3303..5058e01b65 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -112,6 +112,7 @@ struct qedf_ioreq {
#define QEDF_CMD_ERR_SCSI_DONE 0x5
u8 io_req_flags;
uint8_t tm_flags;
+ u64 tm_lun;
struct qedf_rport *fcport;
#define QEDF_CMD_ST_INACTIVE 0
#define QEDFC_CMD_ST_IO_ACTIVE 1
@@ -497,7 +498,7 @@ extern void qedf_process_warning_compl(struct qedf_ctx *qedf,
struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
extern void qedf_process_error_detect(struct qedf_ctx *qedf,
struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
-extern void qedf_flush_active_ios(struct qedf_rport *fcport, int lun);
+extern void qedf_flush_active_ios(struct qedf_rport *fcport, u64 lun);
extern void qedf_release_cmd(struct kref *ref);
extern int qedf_initiate_abts(struct qedf_ioreq *io_req,
bool return_scsi_cmd_on_abts);
@@ -522,7 +523,7 @@ extern int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
bool return_scsi_cmd_on_abts);
extern void qedf_process_cleanup_compl(struct qedf_ctx *qedf,
struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
-extern int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags);
+extern int qedf_initiate_tmf(struct fc_rport *rport, u64 lun, u8 tm_flags);
extern void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
struct qedf_ioreq *io_req);
extern void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe);
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index 10fe338385..bf921caaf6 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -546,7 +546,7 @@ static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
}
static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
- struct fcp_cmnd *fcp_cmnd)
+ struct fcp_cmnd *fcp_cmnd)
{
struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
@@ -554,8 +554,12 @@ static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
memset(fcp_cmnd, 0, FCP_CMND_LEN);
/* 8 bytes: SCSI LUN info */
- int_to_scsilun(sc_cmd->device->lun,
- (struct scsi_lun *)&fcp_cmnd->fc_lun);
+ if (io_req->cmd_type == QEDF_TASK_MGMT_CMD)
+ int_to_scsilun(io_req->tm_lun,
+ (struct scsi_lun *)&fcp_cmnd->fc_lun);
+ else
+ int_to_scsilun(sc_cmd->device->lun,
+ (struct scsi_lun *)&fcp_cmnd->fc_lun);
/* 4 bytes: flag info */
fcp_cmnd->fc_pri_ta = 0;
@@ -1095,7 +1099,7 @@ static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req,
}
/* The sense buffer can be NULL for TMF commands */
- if (sc_cmd->sense_buffer) {
+ if (sc_cmd && sc_cmd->sense_buffer) {
memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
if (fcp_sns_len)
memcpy(sc_cmd->sense_buffer, sense_data,
@@ -1580,7 +1584,7 @@ static void qedf_flush_els_req(struct qedf_ctx *qedf,
/* A value of -1 for lun is a wild card that means flush all
* active SCSI I/Os for the target.
*/
-void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
+void qedf_flush_active_ios(struct qedf_rport *fcport, u64 lun)
{
struct qedf_ioreq *io_req;
struct qedf_ctx *qedf;
@@ -1768,10 +1772,6 @@ void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
kref_put(&io_req->refcount, qedf_release_cmd);
continue;
}
- if (lun > -1) {
- if (io_req->lun != lun)
- continue;
- }
/*
* Use kref_get_unless_zero in the unlikely case the command
@@ -2287,7 +2287,7 @@ void qedf_process_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
complete(&io_req->cleanup_done);
}
-static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
+static int qedf_execute_tmf(struct qedf_rport *fcport, u64 tm_lun,
uint8_t tm_flags)
{
struct qedf_ioreq *io_req;
@@ -2297,17 +2297,10 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
int rc = 0;
uint16_t xid;
int tmo = 0;
- int lun = 0;
unsigned long flags;
struct fcoe_wqe *sqe;
u16 sqe_idx;
- if (!sc_cmd) {
- QEDF_ERR(&qedf->dbg_ctx, "sc_cmd is NULL\n");
- return FAILED;
- }
-
- lun = (int)sc_cmd->device->lun;
if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n");
rc = FAILED;
@@ -2327,7 +2320,7 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
qedf->target_resets++;
/* Initialize rest of io_req fields */
- io_req->sc_cmd = sc_cmd;
+ io_req->sc_cmd = NULL;
io_req->fcport = fcport;
io_req->cmd_type = QEDF_TASK_MGMT_CMD;
@@ -2341,6 +2334,7 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
/* Default is to return a SCSI command when an error occurs */
io_req->return_scsi_cmd_on_abts = false;
+ io_req->tm_lun = tm_lun;
/* Obtain exchange id */
xid = io_req->xid;
@@ -2395,7 +2389,7 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
if (tm_flags == FCP_TMF_LUN_RESET)
- qedf_flush_active_ios(fcport, lun);
+ qedf_flush_active_ios(fcport, tm_lun);
else
qedf_flush_active_ios(fcport, -1);
@@ -2410,23 +2404,18 @@ no_flush:
return rc;
}
-int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
+int qedf_initiate_tmf(struct fc_rport *rport, u64 lun, u8 tm_flags)
{
- struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
struct fc_rport_libfc_priv *rp = rport->dd_data;
struct qedf_rport *fcport = (struct qedf_rport *)&rp[1];
- struct qedf_ctx *qedf;
- struct fc_lport *lport = shost_priv(sc_cmd->device->host);
+ struct qedf_ctx *qedf = fcport->qedf;
+ struct fc_lport *lport = rp->local_port;
int rc = SUCCESS;
- int rval;
- struct qedf_ioreq *io_req = NULL;
- int ref_cnt = 0;
struct fc_rport_priv *rdata = fcport->rdata;
QEDF_ERR(NULL,
- "tm_flags 0x%x sc_cmd %p op = 0x%02x target_id = 0x%x lun=%d\n",
- tm_flags, sc_cmd, sc_cmd->cmd_len ? sc_cmd->cmnd[0] : 0xff,
- rport->scsi_target_id, (int)sc_cmd->device->lun);
+ "tm_flags 0x%x target_id = 0x%x lun=%llu\n",
+ tm_flags, rport->scsi_target_id, lun);
if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
QEDF_ERR(NULL, "stale rport\n");
@@ -2437,33 +2426,10 @@ int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
(tm_flags == FCP_TMF_TGT_RESET) ? "TARGET RESET" :
"LUN RESET");
- if (qedf_priv(sc_cmd)->io_req) {
- io_req = qedf_priv(sc_cmd)->io_req;
- ref_cnt = kref_read(&io_req->refcount);
- QEDF_ERR(NULL,
- "orig io_req = %p xid = 0x%x ref_cnt = %d.\n",
- io_req, io_req->xid, ref_cnt);
- }
-
- rval = fc_remote_port_chkready(rport);
- if (rval) {
- QEDF_ERR(NULL, "device_reset rport not ready\n");
- rc = FAILED;
- goto tmf_err;
- }
-
- rc = fc_block_scsi_eh(sc_cmd);
+ rc = fc_block_rport(rport);
if (rc)
goto tmf_err;
- if (!fcport) {
- QEDF_ERR(NULL, "device_reset: rport is NULL\n");
- rc = FAILED;
- goto tmf_err;
- }
-
- qedf = fcport->qedf;
-
if (!qedf) {
QEDF_ERR(NULL, "qedf is NULL.\n");
rc = FAILED;
@@ -2500,7 +2466,7 @@ int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
goto tmf_err;
}
- rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags);
+ rc = qedf_execute_tmf(fcport, lun, tm_flags);
tmf_err:
kref_put(&rdata->kref, fc_rport_destroy);
@@ -2517,7 +2483,6 @@ void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
fcp_rsp = &cqe->cqe_info.rsp_info;
qedf_parse_fcp_rsp(io_req, fcp_rsp);
- io_req->sc_cmd = NULL;
complete(&io_req->tm_done);
}
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 91f3f1d709..a58353b7b4 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -774,7 +774,7 @@ static int qedf_eh_abort(struct scsi_cmnd *sc_cmd)
goto drop_rdata_kref;
}
- rc = fc_block_scsi_eh(sc_cmd);
+ rc = fc_block_rport(rport);
if (rc)
goto drop_rdata_kref;
@@ -858,18 +858,19 @@ out:
static int qedf_eh_target_reset(struct scsi_cmnd *sc_cmd)
{
- QEDF_ERR(NULL, "%d:0:%d:%lld: TARGET RESET Issued...",
- sc_cmd->device->host->host_no, sc_cmd->device->id,
- sc_cmd->device->lun);
- return qedf_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET);
+ struct scsi_target *starget = scsi_target(sc_cmd->device);
+ struct fc_rport *rport = starget_to_rport(starget);
+
+ QEDF_ERR(NULL, "TARGET RESET Issued...");
+ return qedf_initiate_tmf(rport, 0, FCP_TMF_TGT_RESET);
}
static int qedf_eh_device_reset(struct scsi_cmnd *sc_cmd)
{
- QEDF_ERR(NULL, "%d:0:%d:%lld: LUN RESET Issued... ",
- sc_cmd->device->host->host_no, sc_cmd->device->id,
- sc_cmd->device->lun);
- return qedf_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
+ struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+
+ QEDF_ERR(NULL, "LUN RESET Issued...\n");
+ return qedf_initiate_tmf(rport, sc_cmd->device->lun, FCP_TMF_LUN_RESET);
}
bool qedf_wait_for_upload(struct qedf_ctx *qedf)
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 6e5e89aaa2..27bce80262 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -716,7 +716,6 @@ enum action {
ABORT_COMMAND,
DEVICE_RESET,
BUS_RESET,
- ADAPTER_RESET,
};
@@ -898,22 +897,9 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
}
break;
- case ADAPTER_RESET:
default:
- if (qla1280_verbose) {
- printk(KERN_INFO
- "scsi(%ld): Issued ADAPTER RESET\n",
- ha->host_no);
- printk(KERN_INFO "scsi(%ld): I/O processing will "
- "continue automatically\n", ha->host_no);
- }
- ha->flags.reset_active = 1;
-
- if (qla1280_abort_isp(ha) != 0) { /* it's dead */
- result = FAILED;
- }
-
- ha->flags.reset_active = 0;
+ dprintk(1, "RESET invalid action %d\n", action);
+ return FAILED;
}
/*
@@ -1011,11 +997,27 @@ qla1280_eh_bus_reset(struct scsi_cmnd *cmd)
static int
qla1280_eh_adapter_reset(struct scsi_cmnd *cmd)
{
- int rc;
+ int rc = SUCCESS;
+ struct Scsi_Host *shost = cmd->device->host;
+ struct scsi_qla_host *ha = (struct scsi_qla_host *)shost->hostdata;
- spin_lock_irq(cmd->device->host->host_lock);
- rc = qla1280_error_action(cmd, ADAPTER_RESET);
- spin_unlock_irq(cmd->device->host->host_lock);
+ spin_lock_irq(shost->host_lock);
+ if (qla1280_verbose) {
+ printk(KERN_INFO
+ "scsi(%ld): Issued ADAPTER RESET\n",
+ ha->host_no);
+ printk(KERN_INFO "scsi(%ld): I/O processing will "
+ "continue automatically\n", ha->host_no);
+ }
+ ha->flags.reset_active = 1;
+
+ if (qla1280_abort_isp(ha) != 0) { /* it's dead */
+ rc = FAILED;
+ }
+
+ ha->flags.reset_active = 0;
+
+ spin_unlock_irq(shost->host_lock);
return rc;
}
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index a7a364760b..55ff3d7482 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -528,51 +528,22 @@ qla_dfs_naqp_show(struct seq_file *s, void *unused)
*
* Example for creating "TEST" sysfs file:
* 1. struct qla_hw_data { ... struct dentry *dfs_TEST; }
- * 2. QLA_DFS_SETUP_RD(TEST, scsi_qla_host_t);
+ * 2. QLA_DFS_SETUP_RD(TEST);
* 3. In qla2x00_dfs_setup():
* QLA_DFS_CREATE_FILE(ha, TEST, 0600, ha->dfs_dir, vha);
* 4. In qla2x00_dfs_remove():
* QLA_DFS_REMOVE_FILE(ha, TEST);
*/
-#define QLA_DFS_SETUP_RD(_name, _ctx_struct) \
-static int \
-qla_dfs_##_name##_open(struct inode *inode, struct file *file) \
-{ \
- _ctx_struct *__ctx = inode->i_private; \
- \
- return single_open(file, qla_dfs_##_name##_show, __ctx); \
-} \
- \
-static const struct file_operations qla_dfs_##_name##_ops = { \
- .open = qla_dfs_##_name##_open, \
- .read = seq_read, \
- .llseek = seq_lseek, \
- .release = single_release, \
-};
+#define QLA_DFS_SETUP_RD(_name) DEFINE_SHOW_ATTRIBUTE(qla_dfs_##_name)
-#define QLA_DFS_SETUP_RW(_name, _ctx_struct) \
-static int \
-qla_dfs_##_name##_open(struct inode *inode, struct file *file) \
-{ \
- _ctx_struct *__ctx = inode->i_private; \
- \
- return single_open(file, qla_dfs_##_name##_show, __ctx); \
-} \
- \
-static const struct file_operations qla_dfs_##_name##_ops = { \
- .open = qla_dfs_##_name##_open, \
- .read = seq_read, \
- .llseek = seq_lseek, \
- .release = single_release, \
- .write = qla_dfs_##_name##_write, \
-};
+#define QLA_DFS_SETUP_RW(_name) DEFINE_SHOW_STORE_ATTRIBUTE(qla_dfs_##_name)
#define QLA_DFS_ROOT_CREATE_FILE(_name, _perm, _ctx) \
do { \
if (!qla_dfs_##_name) \
qla_dfs_##_name = debugfs_create_file(#_name, \
_perm, qla2x00_dfs_root, _ctx, \
- &qla_dfs_##_name##_ops); \
+ &qla_dfs_##_name##_fops); \
} while (0)
#define QLA_DFS_ROOT_REMOVE_FILE(_name) \
@@ -587,7 +558,7 @@ static const struct file_operations qla_dfs_##_name##_ops = { \
do { \
(_struct)->dfs_##_name = debugfs_create_file(#_name, \
_perm, _parent, _ctx, \
- &qla_dfs_##_name##_ops) \
+ &qla_dfs_##_name##_fops) \
} while (0)
#define QLA_DFS_REMOVE_FILE(_struct, _name) \
@@ -598,14 +569,6 @@ static const struct file_operations qla_dfs_##_name##_ops = { \
} \
} while (0)
-static int
-qla_dfs_naqp_open(struct inode *inode, struct file *file)
-{
- struct scsi_qla_host *vha = inode->i_private;
-
- return single_open(file, qla_dfs_naqp_show, vha);
-}
-
static ssize_t
qla_dfs_naqp_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
@@ -653,15 +616,7 @@ out_free:
kfree(buf);
return rc;
}
-
-static const struct file_operations dfs_naqp_ops = {
- .open = qla_dfs_naqp_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = qla_dfs_naqp_write,
-};
-
+QLA_DFS_SETUP_RW(naqp);
int
qla2x00_dfs_setup(scsi_qla_host_t *vha)
@@ -707,7 +662,7 @@ create_nodes:
if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) {
ha->tgt.dfs_naqp = debugfs_create_file("naqp",
- 0400, ha->dfs_dir, vha, &dfs_naqp_ops);
+ 0400, ha->dfs_dir, vha, &qla_dfs_naqp_fops);
if (IS_ERR(ha->tgt.dfs_naqp)) {
ql_log(ql_log_warn, vha, 0xd011,
"Unable to create debugFS naqp node.\n");
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index c45eef743c..03348f605c 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -5,6 +5,7 @@
*/
#include "qla_def.h"
+#include <linux/bitfield.h>
#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
#include <linux/delay.h>
@@ -633,8 +634,8 @@ qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
const char *speed_str;
pcie_capability_read_dword(ha->pdev, PCI_EXP_LNKCAP, &lstat);
- lspeed = lstat & PCI_EXP_LNKCAP_SLS;
- lwidth = (lstat & PCI_EXP_LNKCAP_MLW) >> 4;
+ lspeed = FIELD_GET(PCI_EXP_LNKCAP_SLS, lstat);
+ lwidth = FIELD_GET(PCI_EXP_LNKCAP_MLW, lstat);
switch (lspeed) {
case 1:
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 68a0e6a2fb..7e7460a747 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -1822,6 +1822,9 @@ static const struct target_core_fabric_ops tcm_qla2xxx_ops = {
.tfc_wwn_attrs = tcm_qla2xxx_wwn_attrs,
.tfc_tpg_base_attrs = tcm_qla2xxx_tpg_attrs,
.tfc_tpg_attrib_attrs = tcm_qla2xxx_tpg_attrib_attrs,
+
+ .default_submit_type = TARGET_DIRECT_SUBMIT,
+ .direct_submit_supp = 1,
};
static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
@@ -1859,6 +1862,9 @@ static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
.fabric_init_nodeacl = tcm_qla2xxx_init_nodeacl,
.tfc_wwn_attrs = tcm_qla2xxx_wwn_attrs,
+
+ .default_submit_type = TARGET_DIRECT_SUBMIT,
+ .direct_submit_supp = 1,
};
static int tcm_qla2xxx_register_configfs(void)
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 89367c4bf0..8cad9792a5 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -328,21 +328,39 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
return result + 4;
}
+enum scsi_vpd_parameters {
+ SCSI_VPD_HEADER_SIZE = 4,
+ SCSI_VPD_LIST_SIZE = 36,
+};
+
static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page)
{
- unsigned char vpd_header[SCSI_VPD_HEADER_SIZE] __aligned(4);
+ unsigned char vpd[SCSI_VPD_LIST_SIZE] __aligned(4);
int result;
if (sdev->no_vpd_size)
return SCSI_DEFAULT_VPD_LEN;
/*
+ * Fetch the supported pages VPD and validate that the requested page
+ * number is present.
+ */
+ if (page != 0) {
+ result = scsi_vpd_inquiry(sdev, vpd, 0, sizeof(vpd));
+ if (result < SCSI_VPD_HEADER_SIZE)
+ return 0;
+
+ result -= SCSI_VPD_HEADER_SIZE;
+ if (!memchr(&vpd[SCSI_VPD_HEADER_SIZE], page, result))
+ return 0;
+ }
+ /*
* Fetch the VPD page header to find out how big the page
* is. This is done to prevent problems on legacy devices
* which can not handle allocation lengths as large as
* potentially requested by the caller.
*/
- result = scsi_vpd_inquiry(sdev, vpd_header, page, sizeof(vpd_header));
+ result = scsi_vpd_inquiry(sdev, vpd, page, SCSI_VPD_HEADER_SIZE);
if (result < 0)
return 0;
@@ -703,7 +721,7 @@ int scsi_cdl_enable(struct scsi_device *sdev, bool enable)
ret = scsi_mode_select(sdev, 1, 0, buf_data, len, 5 * HZ, 3,
&data, &sshdr);
if (ret) {
- if (scsi_sense_valid(&sshdr))
+ if (ret > 0 && scsi_sense_valid(&sshdr))
scsi_print_sense_hdr(sdev,
dev_name(&sdev->sdev_gendev), &sshdr);
return ret;
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 9c0af50501..6d8218a441 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -41,6 +41,8 @@
#include <linux/random.h>
#include <linux/xarray.h>
#include <linux/prefetch.h>
+#include <linux/debugfs.h>
+#include <linux/async.h>
#include <net/checksum.h>
@@ -285,6 +287,46 @@ struct sdeb_zone_state { /* ZBC: per zone state */
sector_t z_wp;
};
+enum sdebug_err_type {
+ ERR_TMOUT_CMD = 0, /* make specific scsi command timeout */
+ ERR_FAIL_QUEUE_CMD = 1, /* make specific scsi command's */
+ /* queuecmd return failed */
+ ERR_FAIL_CMD = 2, /* make specific scsi command's */
+ /* queuecmd return succeed but */
+ /* with errors set in scsi_cmnd */
+ ERR_ABORT_CMD_FAILED = 3, /* control return FAILED from */
+ /* scsi_debug_abort() */
+ ERR_LUN_RESET_FAILED = 4, /* control return FAILED from */
+ /* scsi_debug_device_reseLUN_RESET_FAILEDt() */
+};
+
+struct sdebug_err_inject {
+ int type;
+ struct list_head list;
+ int cnt;
+ unsigned char cmd;
+ struct rcu_head rcu;
+
+ union {
+ /*
+ * For ERR_FAIL_QUEUE_CMD
+ */
+ int queuecmd_ret;
+
+ /*
+ * For ERR_FAIL_CMD
+ */
+ struct {
+ unsigned char host_byte;
+ unsigned char driver_byte;
+ unsigned char status_byte;
+ unsigned char sense_key;
+ unsigned char asc;
+ unsigned char asq;
+ };
+ };
+};
+
struct sdebug_dev_info {
struct list_head dev_list;
unsigned int channel;
@@ -310,6 +352,15 @@ struct sdebug_dev_info {
unsigned int max_open;
ktime_t create_ts; /* time since bootup that this device was created */
struct sdeb_zone_state *zstate;
+
+ struct dentry *debugfs_entry;
+ struct spinlock list_lock;
+ struct list_head inject_err_list;
+};
+
+struct sdebug_target_info {
+ bool reset_fail;
+ struct dentry *debugfs_entry;
};
struct sdebug_host_info {
@@ -792,6 +843,7 @@ static bool have_dif_prot;
static bool write_since_sync;
static bool sdebug_statistics = DEF_STATISTICS;
static bool sdebug_wp;
+static bool sdebug_allow_restart;
/* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
static char *sdeb_zbc_model_s;
@@ -862,6 +914,258 @@ static const int device_qfull_result =
static const int condition_met_result = SAM_STAT_CONDITION_MET;
+static struct dentry *sdebug_debugfs_root;
+
+static void sdebug_err_free(struct rcu_head *head)
+{
+ struct sdebug_err_inject *inject =
+ container_of(head, typeof(*inject), rcu);
+
+ kfree(inject);
+}
+
+static void sdebug_err_add(struct scsi_device *sdev, struct sdebug_err_inject *new)
+{
+ struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
+ struct sdebug_err_inject *err;
+
+ spin_lock(&devip->list_lock);
+ list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
+ if (err->type == new->type && err->cmd == new->cmd) {
+ list_del_rcu(&err->list);
+ call_rcu(&err->rcu, sdebug_err_free);
+ }
+ }
+
+ list_add_tail_rcu(&new->list, &devip->inject_err_list);
+ spin_unlock(&devip->list_lock);
+}
+
+static int sdebug_err_remove(struct scsi_device *sdev, const char *buf, size_t count)
+{
+ struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
+ struct sdebug_err_inject *err;
+ int type;
+ unsigned char cmd;
+
+ if (sscanf(buf, "- %d %hhx", &type, &cmd) != 2) {
+ kfree(buf);
+ return -EINVAL;
+ }
+
+ spin_lock(&devip->list_lock);
+ list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
+ if (err->type == type && err->cmd == cmd) {
+ list_del_rcu(&err->list);
+ call_rcu(&err->rcu, sdebug_err_free);
+ spin_unlock(&devip->list_lock);
+ kfree(buf);
+ return count;
+ }
+ }
+ spin_unlock(&devip->list_lock);
+
+ kfree(buf);
+ return -EINVAL;
+}
+
+static int sdebug_error_show(struct seq_file *m, void *p)
+{
+ struct scsi_device *sdev = (struct scsi_device *)m->private;
+ struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
+ struct sdebug_err_inject *err;
+
+ seq_puts(m, "Type\tCount\tCommand\n");
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
+ switch (err->type) {
+ case ERR_TMOUT_CMD:
+ case ERR_ABORT_CMD_FAILED:
+ case ERR_LUN_RESET_FAILED:
+ seq_printf(m, "%d\t%d\t0x%x\n", err->type, err->cnt,
+ err->cmd);
+ break;
+
+ case ERR_FAIL_QUEUE_CMD:
+ seq_printf(m, "%d\t%d\t0x%x\t0x%x\n", err->type,
+ err->cnt, err->cmd, err->queuecmd_ret);
+ break;
+
+ case ERR_FAIL_CMD:
+ seq_printf(m, "%d\t%d\t0x%x\t0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ err->type, err->cnt, err->cmd,
+ err->host_byte, err->driver_byte,
+ err->status_byte, err->sense_key,
+ err->asc, err->asq);
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static int sdebug_error_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, sdebug_error_show, inode->i_private);
+}
+
+static ssize_t sdebug_error_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char *buf;
+ unsigned int inject_type;
+ struct sdebug_err_inject *inject;
+ struct scsi_device *sdev = (struct scsi_device *)file->f_inode->i_private;
+
+ buf = kzalloc(count + 1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, ubuf, count)) {
+ kfree(buf);
+ return -EFAULT;
+ }
+
+ if (buf[0] == '-')
+ return sdebug_err_remove(sdev, buf, count);
+
+ if (sscanf(buf, "%d", &inject_type) != 1) {
+ kfree(buf);
+ return -EINVAL;
+ }
+
+ inject = kzalloc(sizeof(struct sdebug_err_inject), GFP_KERNEL);
+ if (!inject) {
+ kfree(buf);
+ return -ENOMEM;
+ }
+
+ switch (inject_type) {
+ case ERR_TMOUT_CMD:
+ case ERR_ABORT_CMD_FAILED:
+ case ERR_LUN_RESET_FAILED:
+ if (sscanf(buf, "%d %d %hhx", &inject->type, &inject->cnt,
+ &inject->cmd) != 3)
+ goto out_error;
+ break;
+
+ case ERR_FAIL_QUEUE_CMD:
+ if (sscanf(buf, "%d %d %hhx %x", &inject->type, &inject->cnt,
+ &inject->cmd, &inject->queuecmd_ret) != 4)
+ goto out_error;
+ break;
+
+ case ERR_FAIL_CMD:
+ if (sscanf(buf, "%d %d %hhx %hhx %hhx %hhx %hhx %hhx %hhx",
+ &inject->type, &inject->cnt, &inject->cmd,
+ &inject->host_byte, &inject->driver_byte,
+ &inject->status_byte, &inject->sense_key,
+ &inject->asc, &inject->asq) != 9)
+ goto out_error;
+ break;
+
+ default:
+ goto out_error;
+ break;
+ }
+
+ kfree(buf);
+ sdebug_err_add(sdev, inject);
+
+ return count;
+
+out_error:
+ kfree(buf);
+ kfree(inject);
+ return -EINVAL;
+}
+
+static const struct file_operations sdebug_error_fops = {
+ .open = sdebug_error_open,
+ .read = seq_read,
+ .write = sdebug_error_write,
+ .release = single_release,
+};
+
+static int sdebug_target_reset_fail_show(struct seq_file *m, void *p)
+{
+ struct scsi_target *starget = (struct scsi_target *)m->private;
+ struct sdebug_target_info *targetip =
+ (struct sdebug_target_info *)starget->hostdata;
+
+ if (targetip)
+ seq_printf(m, "%c\n", targetip->reset_fail ? 'Y' : 'N');
+
+ return 0;
+}
+
+static int sdebug_target_reset_fail_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, sdebug_target_reset_fail_show, inode->i_private);
+}
+
+static ssize_t sdebug_target_reset_fail_write(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ int ret;
+ struct scsi_target *starget =
+ (struct scsi_target *)file->f_inode->i_private;
+ struct sdebug_target_info *targetip =
+ (struct sdebug_target_info *)starget->hostdata;
+
+ if (targetip) {
+ ret = kstrtobool_from_user(ubuf, count, &targetip->reset_fail);
+ return ret < 0 ? ret : count;
+ }
+ return -ENODEV;
+}
+
+static const struct file_operations sdebug_target_reset_fail_fops = {
+ .open = sdebug_target_reset_fail_open,
+ .read = seq_read,
+ .write = sdebug_target_reset_fail_write,
+ .release = single_release,
+};
+
+static int sdebug_target_alloc(struct scsi_target *starget)
+{
+ struct sdebug_target_info *targetip;
+
+ targetip = kzalloc(sizeof(struct sdebug_target_info), GFP_KERNEL);
+ if (!targetip)
+ return -ENOMEM;
+
+ targetip->debugfs_entry = debugfs_create_dir(dev_name(&starget->dev),
+ sdebug_debugfs_root);
+
+ debugfs_create_file("fail_reset", 0600, targetip->debugfs_entry, starget,
+ &sdebug_target_reset_fail_fops);
+
+ starget->hostdata = targetip;
+
+ return 0;
+}
+
+static void sdebug_tartget_cleanup_async(void *data, async_cookie_t cookie)
+{
+ struct sdebug_target_info *targetip = data;
+
+ debugfs_remove(targetip->debugfs_entry);
+ kfree(targetip);
+}
+
+static void sdebug_target_destroy(struct scsi_target *starget)
+{
+ struct sdebug_target_info *targetip;
+
+ targetip = (struct sdebug_target_info *)starget->hostdata;
+ if (targetip) {
+ starget->hostdata = NULL;
+ async_schedule(sdebug_tartget_cleanup_async, targetip);
+ }
+}
/* Only do the extra work involved in logical block provisioning if one or
* more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
@@ -5096,6 +5400,8 @@ static struct sdebug_dev_info *sdebug_device_create(
}
devip->create_ts = ktime_get_boottime();
atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
+ spin_lock_init(&devip->list_lock);
+ INIT_LIST_HEAD(&devip->inject_err_list);
list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
}
return devip;
@@ -5141,6 +5447,7 @@ static int scsi_debug_slave_alloc(struct scsi_device *sdp)
if (sdebug_verbose)
pr_info("slave_alloc <%u %u %u %llu>\n",
sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
+
return 0;
}
@@ -5148,6 +5455,7 @@ static int scsi_debug_slave_configure(struct scsi_device *sdp)
{
struct sdebug_dev_info *devip =
(struct sdebug_dev_info *)sdp->hostdata;
+ struct dentry *dentry;
if (sdebug_verbose)
pr_info("slave_configure <%u %u %u %llu>\n",
@@ -5163,6 +5471,22 @@ static int scsi_debug_slave_configure(struct scsi_device *sdp)
if (sdebug_no_uld)
sdp->no_uld_attach = 1;
config_cdb_len(sdp);
+
+ if (sdebug_allow_restart)
+ sdp->allow_restart = 1;
+
+ devip->debugfs_entry = debugfs_create_dir(dev_name(&sdp->sdev_dev),
+ sdebug_debugfs_root);
+ if (IS_ERR_OR_NULL(devip->debugfs_entry))
+ pr_info("%s: failed to create debugfs directory for device %s\n",
+ __func__, dev_name(&sdp->sdev_gendev));
+
+ dentry = debugfs_create_file("error", 0600, devip->debugfs_entry, sdp,
+ &sdebug_error_fops);
+ if (IS_ERR_OR_NULL(dentry))
+ pr_info("%s: failed to create error file for device %s\n",
+ __func__, dev_name(&sdp->sdev_gendev));
+
return 0;
}
@@ -5170,15 +5494,27 @@ static void scsi_debug_slave_destroy(struct scsi_device *sdp)
{
struct sdebug_dev_info *devip =
(struct sdebug_dev_info *)sdp->hostdata;
+ struct sdebug_err_inject *err;
if (sdebug_verbose)
pr_info("slave_destroy <%u %u %u %llu>\n",
sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
- if (devip) {
- /* make this slot available for re-use */
- devip->used = false;
- sdp->hostdata = NULL;
+
+ if (!devip)
+ return;
+
+ spin_lock(&devip->list_lock);
+ list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
+ list_del_rcu(&err->list);
+ call_rcu(&err->rcu, sdebug_err_free);
}
+ spin_unlock(&devip->list_lock);
+
+ debugfs_remove(devip->debugfs_entry);
+
+ /* make this slot available for re-use */
+ devip->used = false;
+ sdp->hostdata = NULL;
}
/* Returns true if we require the queued memory to be freed by the caller. */
@@ -5272,9 +5608,39 @@ static void stop_all_queued(void)
mutex_unlock(&sdebug_host_list_mutex);
}
+static int sdebug_fail_abort(struct scsi_cmnd *cmnd)
+{
+ struct scsi_device *sdp = cmnd->device;
+ struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
+ struct sdebug_err_inject *err;
+ unsigned char *cmd = cmnd->cmnd;
+ int ret = 0;
+
+ if (devip == NULL)
+ return 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
+ if (err->type == ERR_ABORT_CMD_FAILED &&
+ (err->cmd == cmd[0] || err->cmd == 0xff)) {
+ ret = !!err->cnt;
+ if (err->cnt < 0)
+ err->cnt++;
+
+ rcu_read_unlock();
+ return ret;
+ }
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+
static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
{
bool ok = scsi_debug_abort_cmnd(SCpnt);
+ u8 *cmd = SCpnt->cmnd;
+ u8 opcode = cmd[0];
++num_aborts;
@@ -5283,6 +5649,12 @@ static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
"%s: command%s found\n", __func__,
ok ? "" : " not");
+ if (sdebug_fail_abort(SCpnt)) {
+ scmd_printk(KERN_INFO, SCpnt, "fail abort command 0x%x\n",
+ opcode);
+ return FAILED;
+ }
+
return SUCCESS;
}
@@ -5306,10 +5678,40 @@ static void scsi_debug_stop_all_queued(struct scsi_device *sdp)
scsi_debug_stop_all_queued_iter, sdp);
}
+static int sdebug_fail_lun_reset(struct scsi_cmnd *cmnd)
+{
+ struct scsi_device *sdp = cmnd->device;
+ struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
+ struct sdebug_err_inject *err;
+ unsigned char *cmd = cmnd->cmnd;
+ int ret = 0;
+
+ if (devip == NULL)
+ return 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
+ if (err->type == ERR_LUN_RESET_FAILED &&
+ (err->cmd == cmd[0] || err->cmd == 0xff)) {
+ ret = !!err->cnt;
+ if (err->cnt < 0)
+ err->cnt++;
+
+ rcu_read_unlock();
+ return ret;
+ }
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+
static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
{
struct scsi_device *sdp = SCpnt->device;
struct sdebug_dev_info *devip = sdp->hostdata;
+ u8 *cmd = SCpnt->cmnd;
+ u8 opcode = cmd[0];
++num_dev_resets;
@@ -5320,14 +5722,33 @@ static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
if (devip)
set_bit(SDEBUG_UA_POR, devip->uas_bm);
+ if (sdebug_fail_lun_reset(SCpnt)) {
+ scmd_printk(KERN_INFO, SCpnt, "fail lun reset 0x%x\n", opcode);
+ return FAILED;
+ }
+
return SUCCESS;
}
+static int sdebug_fail_target_reset(struct scsi_cmnd *cmnd)
+{
+ struct scsi_target *starget = scsi_target(cmnd->device);
+ struct sdebug_target_info *targetip =
+ (struct sdebug_target_info *)starget->hostdata;
+
+ if (targetip)
+ return targetip->reset_fail;
+
+ return 0;
+}
+
static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
{
struct scsi_device *sdp = SCpnt->device;
struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
struct sdebug_dev_info *devip;
+ u8 *cmd = SCpnt->cmnd;
+ u8 opcode = cmd[0];
int k = 0;
++num_target_resets;
@@ -5345,6 +5766,12 @@ static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
sdev_printk(KERN_INFO, sdp,
"%s: %d device(s) found in target\n", __func__, k);
+ if (sdebug_fail_target_reset(SCpnt)) {
+ scmd_printk(KERN_INFO, SCpnt, "fail target reset 0x%x\n",
+ opcode);
+ return FAILED;
+ }
+
return SUCCESS;
}
@@ -5772,6 +6199,7 @@ module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
+module_param_named(allow_restart, sdebug_allow_restart, bool, S_IRUGO | S_IWUSR);
MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
MODULE_DESCRIPTION("SCSI debug adapter driver");
@@ -5844,6 +6272,7 @@ MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
+MODULE_PARM_DESC(allow_restart, "Set scsi_device's allow_restart flag(def=0)");
#define SDEBUG_INFO_LEN 256
static char sdebug_info[SDEBUG_INFO_LEN];
@@ -7011,6 +7440,10 @@ static int __init scsi_debug_init(void)
goto driver_unreg;
}
+ sdebug_debugfs_root = debugfs_create_dir("scsi_debug", NULL);
+ if (IS_ERR_OR_NULL(sdebug_debugfs_root))
+ pr_info("%s: failed to create initial debugfs directory\n", __func__);
+
for (k = 0; k < hosts_to_add; k++) {
if (want_store && k == 0) {
ret = sdebug_add_host_helper(idx);
@@ -7057,6 +7490,7 @@ static void __exit scsi_debug_exit(void)
sdebug_erase_all_stores(false);
xa_destroy(per_store_ap);
+ debugfs_remove(sdebug_debugfs_root);
}
device_initcall(scsi_debug_init);
@@ -7496,6 +7930,104 @@ static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
return num_entries;
}
+static int sdebug_timeout_cmd(struct scsi_cmnd *cmnd)
+{
+ struct scsi_device *sdp = cmnd->device;
+ struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
+ struct sdebug_err_inject *err;
+ unsigned char *cmd = cmnd->cmnd;
+ int ret = 0;
+
+ if (devip == NULL)
+ return 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
+ if (err->type == ERR_TMOUT_CMD &&
+ (err->cmd == cmd[0] || err->cmd == 0xff)) {
+ ret = !!err->cnt;
+ if (err->cnt < 0)
+ err->cnt++;
+
+ rcu_read_unlock();
+ return ret;
+ }
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static int sdebug_fail_queue_cmd(struct scsi_cmnd *cmnd)
+{
+ struct scsi_device *sdp = cmnd->device;
+ struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
+ struct sdebug_err_inject *err;
+ unsigned char *cmd = cmnd->cmnd;
+ int ret = 0;
+
+ if (devip == NULL)
+ return 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
+ if (err->type == ERR_FAIL_QUEUE_CMD &&
+ (err->cmd == cmd[0] || err->cmd == 0xff)) {
+ ret = err->cnt ? err->queuecmd_ret : 0;
+ if (err->cnt < 0)
+ err->cnt++;
+
+ rcu_read_unlock();
+ return ret;
+ }
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static int sdebug_fail_cmd(struct scsi_cmnd *cmnd, int *retval,
+ struct sdebug_err_inject *info)
+{
+ struct scsi_device *sdp = cmnd->device;
+ struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
+ struct sdebug_err_inject *err;
+ unsigned char *cmd = cmnd->cmnd;
+ int ret = 0;
+ int result;
+
+ if (devip == NULL)
+ return 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
+ if (err->type == ERR_FAIL_CMD &&
+ (err->cmd == cmd[0] || err->cmd == 0xff)) {
+ if (!err->cnt) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ ret = !!err->cnt;
+ rcu_read_unlock();
+ goto out_handle;
+ }
+ }
+ rcu_read_unlock();
+
+ return 0;
+
+out_handle:
+ if (err->cnt < 0)
+ err->cnt++;
+ mk_sense_buffer(cmnd, err->sense_key, err->asc, err->asq);
+ result = err->status_byte | err->host_byte << 16 | err->driver_byte << 24;
+ *info = *err;
+ *retval = schedule_resp(cmnd, devip, result, NULL, 0, 0);
+
+ return ret;
+}
+
static int scsi_debug_queuecommand(struct Scsi_Host *shost,
struct scsi_cmnd *scp)
{
@@ -7515,6 +8047,8 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
u8 opcode = cmd[0];
bool has_wlun_rl;
bool inject_now;
+ int ret = 0;
+ struct sdebug_err_inject err;
scsi_set_resid(scp, 0);
if (sdebug_statistics) {
@@ -7554,6 +8088,29 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
if (NULL == devip)
goto err_out;
}
+
+ if (sdebug_timeout_cmd(scp)) {
+ scmd_printk(KERN_INFO, scp, "timeout command 0x%x\n", opcode);
+ return 0;
+ }
+
+ ret = sdebug_fail_queue_cmd(scp);
+ if (ret) {
+ scmd_printk(KERN_INFO, scp, "fail queue command 0x%x with 0x%x\n",
+ opcode, ret);
+ return ret;
+ }
+
+ if (sdebug_fail_cmd(scp, &ret, &err)) {
+ scmd_printk(KERN_INFO, scp,
+ "fail command 0x%x with hostbyte=0x%x, "
+ "driverbyte=0x%x, statusbyte=0x%x, "
+ "sense_key=0x%x, asc=0x%x, asq=0x%x\n",
+ opcode, err.host_byte, err.driver_byte,
+ err.status_byte, err.sense_key, err.asc, err.asq);
+ return ret;
+ }
+
if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
atomic_set(&sdeb_inject_pending, 1);
@@ -7672,7 +8229,6 @@ static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
return 0;
}
-
static struct scsi_host_template sdebug_driver_template = {
.show_info = scsi_debug_show_info,
.write_info = scsi_debug_write_info,
@@ -7702,6 +8258,8 @@ static struct scsi_host_template sdebug_driver_template = {
.track_queue_depth = 1,
.cmd_size = sizeof(struct sdebug_scsi_cmd),
.init_cmd_priv = sdebug_init_cmd_priv,
+ .target_alloc = sdebug_target_alloc,
+ .target_destroy = sdebug_target_destroy,
};
static int sdebug_driver_probe(struct device *dev)
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index d983f4a0e9..43eff11070 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -61,11 +61,11 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd);
static enum scsi_disposition scsi_try_to_abort_cmd(const struct scsi_host_template *,
struct scsi_cmnd *);
-void scsi_eh_wakeup(struct Scsi_Host *shost)
+void scsi_eh_wakeup(struct Scsi_Host *shost, unsigned int busy)
{
lockdep_assert_held(shost->host_lock);
- if (scsi_host_busy(shost) == shost->host_failed) {
+ if (busy == shost->host_failed) {
trace_scsi_eh_wakeup(shost);
wake_up_process(shost->ehandler);
SCSI_LOG_ERROR_RECOVERY(5, shost_printk(KERN_INFO, shost,
@@ -88,7 +88,7 @@ void scsi_schedule_eh(struct Scsi_Host *shost)
if (scsi_host_set_state(shost, SHOST_RECOVERY) == 0 ||
scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY) == 0) {
shost->host_eh_scheduled++;
- scsi_eh_wakeup(shost);
+ scsi_eh_wakeup(shost, scsi_host_busy(shost));
}
spin_unlock_irqrestore(shost->host_lock, flags);
@@ -282,11 +282,12 @@ static void scsi_eh_inc_host_failed(struct rcu_head *head)
{
struct scsi_cmnd *scmd = container_of(head, typeof(*scmd), rcu);
struct Scsi_Host *shost = scmd->device->host;
+ unsigned int busy = scsi_host_busy(shost);
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
shost->host_failed++;
- scsi_eh_wakeup(shost);
+ scsi_eh_wakeup(shost, busy);
spin_unlock_irqrestore(shost->host_lock, flags);
}
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index c2f647a7c1..df5ac03d5d 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -278,9 +278,11 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
rcu_read_lock();
__clear_bit(SCMD_STATE_INFLIGHT, &cmd->state);
if (unlikely(scsi_host_in_recovery(shost))) {
+ unsigned int busy = scsi_host_busy(shost);
+
spin_lock_irqsave(shost->host_lock, flags);
if (shost->host_failed || shost->host_eh_scheduled)
- scsi_eh_wakeup(shost);
+ scsi_eh_wakeup(shost, busy);
spin_unlock_irqrestore(shost->host_lock, flags);
}
rcu_read_unlock();
@@ -774,6 +776,7 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
case 0x1b: /* sanitize in progress */
case 0x1d: /* configuration in progress */
case 0x24: /* depopulation in progress */
+ case 0x25: /* depopulation restore in progress */
action = ACTION_DELAYED_RETRY;
break;
case 0x0a: /* ALUA state transition */
@@ -1250,28 +1253,26 @@ static inline int scsi_dev_queue_ready(struct request_queue *q,
int token;
token = sbitmap_get(&sdev->budget_map);
- if (atomic_read(&sdev->device_blocked)) {
- if (token < 0)
- goto out;
+ if (token < 0)
+ return -1;
- if (scsi_device_busy(sdev) > 1)
- goto out_dec;
+ if (!atomic_read(&sdev->device_blocked))
+ return token;
- /*
- * unblock after device_blocked iterates to zero
- */
- if (atomic_dec_return(&sdev->device_blocked) > 0)
- goto out_dec;
- SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev,
- "unblocking device at zero depth\n"));
+ /*
+ * Only unblock if no other commands are pending and
+ * if device_blocked has decreased to zero
+ */
+ if (scsi_device_busy(sdev) > 1 ||
+ atomic_dec_return(&sdev->device_blocked) > 0) {
+ sbitmap_put(&sdev->budget_map, token);
+ return -1;
}
+ SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev,
+ "unblocking device at zero depth\n"));
+
return token;
-out_dec:
- if (token >= 0)
- sbitmap_put(&sdev->budget_map, token);
-out:
- return -1;
}
/*
@@ -2299,10 +2300,10 @@ scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
do {
result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, NULL, 0,
timeout, 1, &exec_args);
- if (sdev->removable && scsi_sense_valid(sshdr) &&
+ if (sdev->removable && result > 0 && scsi_sense_valid(sshdr) &&
sshdr->sense_key == UNIT_ATTENTION)
sdev->changed = 1;
- } while (scsi_sense_valid(sshdr) &&
+ } while (result > 0 && scsi_sense_valid(sshdr) &&
sshdr->sense_key == UNIT_ATTENTION && --retries);
return result;
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 3f0dfb97db..1fbfe1b52c 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -92,7 +92,7 @@ extern void scmd_eh_abort_handler(struct work_struct *work);
extern enum blk_eh_timer_return scsi_timeout(struct request *req);
extern int scsi_error_handler(void *host);
extern enum scsi_disposition scsi_decide_disposition(struct scsi_cmnd *cmd);
-extern void scsi_eh_wakeup(struct Scsi_Host *shost);
+extern void scsi_eh_wakeup(struct Scsi_Host *shost, unsigned int busy);
extern void scsi_eh_scmd_add(struct scsi_cmnd *);
void scsi_eh_ready_devs(struct Scsi_Host *shost,
struct list_head *work_q,
diff --git a/drivers/scsi/scsi_sysctl.c b/drivers/scsi/scsi_sysctl.c
index 7f0914ea16..093774d775 100644
--- a/drivers/scsi/scsi_sysctl.c
+++ b/drivers/scsi/scsi_sysctl.c
@@ -18,7 +18,6 @@ static struct ctl_table scsi_table[] = {
.maxlen = sizeof(scsi_logging_level),
.mode = 0644,
.proc_handler = proc_dointvec },
- { }
};
static struct ctl_table_header *scsi_table_header;
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 2442d4d2e3..f668c1c0a9 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -676,10 +676,10 @@ spi_dv_device_echo_buffer(struct scsi_device *sdev, u8 *buffer,
for (r = 0; r < retries; r++) {
result = spi_execute(sdev, spi_write_buffer, REQ_OP_DRV_OUT,
buffer, len, &sshdr);
- if(result || !scsi_device_online(sdev)) {
+ if (result || !scsi_device_online(sdev)) {
scsi_device_set_state(sdev, SDEV_QUIESCE);
- if (scsi_sense_valid(&sshdr)
+ if (result > 0 && scsi_sense_valid(&sshdr)
&& sshdr.sense_key == ILLEGAL_REQUEST
/* INVALID FIELD IN CDB */
&& sshdr.asc == 0x24 && sshdr.ascq == 0x00)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index c2e8d9e277..a12ff43ac8 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -143,7 +143,7 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
struct scsi_mode_data data;
struct scsi_sense_hdr sshdr;
static const char temp[] = "temporary ";
- int len;
+ int len, ret;
if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
/* no cache control on RBC devices; theoretically they
@@ -190,9 +190,10 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
*/
data.device_specific = 0;
- if (scsi_mode_select(sdp, 1, sp, buffer_data, len, SD_TIMEOUT,
- sdkp->max_retries, &data, &sshdr)) {
- if (scsi_sense_valid(&sshdr))
+ ret = scsi_mode_select(sdp, 1, sp, buffer_data, len, SD_TIMEOUT,
+ sdkp->max_retries, &data, &sshdr);
+ if (ret) {
+ if (ret > 0 && scsi_sense_valid(&sshdr))
sd_print_sense_hdr(sdkp, &sshdr);
return -EINVAL;
}
@@ -2263,19 +2264,21 @@ sd_spinup_disk(struct scsi_disk *sdkp)
sdkp->max_retries,
&exec_args);
- /*
- * If the drive has indicated to us that it
- * doesn't have any media in it, don't bother
- * with any more polling.
- */
- if (media_not_present(sdkp, &sshdr)) {
- if (media_was_present)
- sd_printk(KERN_NOTICE, sdkp, "Media removed, stopped polling\n");
- return;
- }
+ if (the_result > 0) {
+ /*
+ * If the drive has indicated to us that it
+ * doesn't have any media in it, don't bother
+ * with any more polling.
+ */
+ if (media_not_present(sdkp, &sshdr)) {
+ if (media_was_present)
+ sd_printk(KERN_NOTICE, sdkp,
+ "Media removed, stopped polling\n");
+ return;
+ }
- if (the_result)
sense_valid = scsi_sense_valid(&sshdr);
+ }
retries++;
} while (retries < 3 &&
(!scsi_status_is_good(the_result) ||
@@ -2307,6 +2310,10 @@ sd_spinup_disk(struct scsi_disk *sdkp)
break; /* unavailable */
if (sshdr.asc == 4 && sshdr.ascq == 0x1b)
break; /* sanitize in progress */
+ if (sshdr.asc == 4 && sshdr.ascq == 0x24)
+ break; /* depopulation in progress */
+ if (sshdr.asc == 4 && sshdr.ascq == 0x25)
+ break; /* depopulation restoration in progress */
/*
* Issue command to spin up drive when not ready
*/
@@ -2471,11 +2478,10 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
the_result = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN,
buffer, RC16_LEN, SD_TIMEOUT,
sdkp->max_retries, &exec_args);
-
- if (media_not_present(sdkp, &sshdr))
- return -ENODEV;
-
if (the_result > 0) {
+ if (media_not_present(sdkp, &sshdr))
+ return -ENODEV;
+
sense_valid = scsi_sense_valid(&sshdr);
if (sense_valid &&
sshdr.sense_key == ILLEGAL_REQUEST &&
@@ -2972,7 +2978,7 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
}
bad_sense:
- if (scsi_sense_valid(&sshdr) &&
+ if (res == -EIO && scsi_sense_valid(&sshdr) &&
sshdr.sense_key == ILLEGAL_REQUEST &&
sshdr.asc == 0x24 && sshdr.ascq == 0x0)
/* Invalid field in CDB */
@@ -3020,7 +3026,7 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
sd_first_printk(KERN_WARNING, sdkp,
"getting Control mode page failed, assume no ATO\n");
- if (scsi_sense_valid(&sshdr))
+ if (res == -EIO && scsi_sense_valid(&sshdr))
sd_print_sense_hdr(sdkp, &sshdr);
return;
@@ -3404,6 +3410,24 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
return true;
}
+static void sd_read_block_zero(struct scsi_disk *sdkp)
+{
+ unsigned int buf_len = sdkp->device->sector_size;
+ char *buffer, cmd[10] = { };
+
+ buffer = kmalloc(buf_len, GFP_KERNEL);
+ if (!buffer)
+ return;
+
+ cmd[0] = READ_10;
+ put_unaligned_be32(0, &cmd[2]); /* Logical block address 0 */
+ put_unaligned_be16(1, &cmd[7]); /* Transfer 1 logical block */
+
+ scsi_execute_cmd(sdkp->device, cmd, REQ_OP_DRV_IN, buffer, buf_len,
+ SD_TIMEOUT, sdkp->max_retries, NULL);
+ kfree(buffer);
+}
+
/**
* sd_revalidate_disk - called the first time a new disk is seen,
* performs disk spin up, read_capacity, etc.
@@ -3443,7 +3467,13 @@ static int sd_revalidate_disk(struct gendisk *disk)
*/
if (sdkp->media_present) {
sd_read_capacity(sdkp, buffer);
-
+ /*
+ * Some USB/UAS devices return generic values for mode pages
+ * until the media has been accessed. Trigger a READ operation
+ * to force the device to populate mode pages.
+ */
+ if (sdp->read_before_ms)
+ sd_read_block_zero(sdkp);
/*
* set the default to rotational. All non-rotational devices
* support the block characteristics VPD page, which will
@@ -3921,7 +3951,7 @@ static int sd_suspend_runtime(struct device *dev)
static int sd_resume(struct device *dev, bool runtime)
{
struct scsi_disk *sdkp = dev_get_drvdata(dev);
- int ret = 0;
+ int ret;
if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */
return 0;
@@ -3931,11 +3961,8 @@ static int sd_resume(struct device *dev, bool runtime)
return 0;
}
- if (!sdkp->device->no_start_on_resume) {
- sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
- ret = sd_start_stop_device(sdkp, 1);
- }
-
+ sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
+ ret = sd_start_stop_device(sdkp, 1);
if (!ret) {
opal_unlock_from_suspend(sdkp->opal_dev);
sdkp->suspended = false;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 0d8afffd16..86210e4dd0 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1650,7 +1650,6 @@ static struct ctl_table sg_sysctls[] = {
.mode = 0444,
.proc_handler = proc_dointvec,
},
- {}
};
static struct ctl_table_header *hdr;
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index 0419401835..cdedc27185 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -1347,7 +1347,6 @@ struct pqi_ctrl_info {
bool controller_online;
bool block_requests;
bool scan_blocked;
- u8 logical_volume_rescan_needed : 1;
u8 inbound_spanning_supported : 1;
u8 outbound_spanning_supported : 1;
u8 pqi_mode_enabled : 1;
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 9a58df9312..868453b18c 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -2093,8 +2093,6 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
if (existing_device->devtype == TYPE_DISK) {
existing_device->raid_level = new_device->raid_level;
existing_device->volume_status = new_device->volume_status;
- if (ctrl_info->logical_volume_rescan_needed)
- existing_device->rescan = true;
memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group));
if (!pqi_raid_maps_equal(existing_device->raid_map, new_device->raid_map)) {
kfree(existing_device->raid_map);
@@ -2164,6 +2162,20 @@ static inline void pqi_init_device_tmf_work(struct pqi_scsi_dev *device)
INIT_WORK(&tmf_work->work_struct, pqi_tmf_worker);
}
+static inline bool pqi_volume_rescan_needed(struct pqi_scsi_dev *device)
+{
+ if (pqi_device_in_remove(device))
+ return false;
+
+ if (device->sdev == NULL)
+ return false;
+
+ if (!scsi_device_online(device->sdev))
+ return false;
+
+ return device->rescan;
+}
+
static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
{
@@ -2284,9 +2296,13 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
device->advertised_queue_depth = device->queue_depth;
scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
- if (device->rescan) {
- scsi_rescan_device(device->sdev);
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+ if (pqi_volume_rescan_needed(device)) {
device->rescan = false;
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ scsi_rescan_device(device->sdev);
+ } else {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
}
}
}
@@ -2308,8 +2324,6 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
}
}
- ctrl_info->logical_volume_rescan_needed = false;
-
}
static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
@@ -3702,6 +3716,21 @@ static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
return ack_event;
}
+static void pqi_mark_volumes_for_rescan(struct pqi_ctrl_info *ctrl_info)
+{
+ unsigned long flags;
+ struct pqi_scsi_dev *device;
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
+ if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK)
+ device->rescan = true;
+ }
+
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+}
+
static void pqi_disable_raid_bypass(struct pqi_ctrl_info *ctrl_info)
{
unsigned long flags;
@@ -3742,7 +3771,7 @@ static void pqi_event_worker(struct work_struct *work)
ack_event = true;
rescan_needed = true;
if (event->event_type == PQI_EVENT_TYPE_LOGICAL_DEVICE)
- ctrl_info->logical_volume_rescan_needed = true;
+ pqi_mark_volumes_for_rescan(ctrl_info);
else if (event->event_type == PQI_EVENT_TYPE_AIO_STATE_CHANGE)
pqi_disable_raid_bypass(ctrl_info);
}
@@ -6504,8 +6533,11 @@ static void pqi_map_queues(struct Scsi_Host *shost)
{
struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
- blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
+ if (!ctrl_info->disable_managed_interrupts)
+ return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
ctrl_info->pci_dev, 0);
+ else
+ return blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]);
}
static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
@@ -10144,6 +10176,18 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1137, 0x02f8)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1137, 0x02f9)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1137, 0x02fa)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1e93, 0x1000)
},
{
@@ -10200,6 +10244,34 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x100e)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x100f)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1010)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1011)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1043)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1044)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1045)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_ANY_ID, PCI_ANY_ID)
},
{ 0 }
diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
index c50ede326c..84973f0f77 100644
--- a/drivers/scsi/snic/snic_scsi.c
+++ b/drivers/scsi/snic/snic_scsi.c
@@ -1850,7 +1850,7 @@ snic_dr_clean_pending_req(struct snic *snic, struct scsi_cmnd *lr_sc)
{
struct scsi_device *lr_sdev = lr_sc->device;
u32 tag = 0;
- int ret = FAILED;
+ int ret;
for (tag = 0; tag < snic->max_tag_id; tag++) {
if (tag == snic_cmd_tag(lr_sc))
@@ -1859,7 +1859,6 @@ snic_dr_clean_pending_req(struct snic *snic, struct scsi_cmnd *lr_sc)
ret = snic_dr_clean_single_req(snic, tag, lr_sdev);
if (ret) {
SNIC_HOST_ERR(snic->shost, "clean_err:tag = %d\n", tag);
-
goto clean_err;
}
}
@@ -1867,24 +1866,19 @@ snic_dr_clean_pending_req(struct snic *snic, struct scsi_cmnd *lr_sc)
schedule_timeout(msecs_to_jiffies(100));
/* Walk through all the cmds and check abts status. */
- if (snic_is_abts_pending(snic, lr_sc)) {
- ret = FAILED;
-
+ if (snic_is_abts_pending(snic, lr_sc))
goto clean_err;
- }
- ret = 0;
SNIC_SCSI_DBG(snic->shost, "clean_pending_req: Success.\n");
- return ret;
+ return 0;
clean_err:
- ret = FAILED;
SNIC_HOST_ERR(snic->shost,
"Failed to Clean Pending IOs on %s device.\n",
dev_name(&lr_sdev->sdev_gendev));
- return ret;
+ return FAILED;
} /* end of snic_dr_clean_pending_req */
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 07ef3db3d1..d093dd187b 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -177,7 +177,8 @@ static unsigned int sr_get_events(struct scsi_device *sdev)
result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buf, sizeof(buf),
SR_TIMEOUT, MAX_RETRIES, &exec_args);
- if (scsi_sense_valid(&sshdr) && sshdr.sense_key == UNIT_ATTENTION)
+ if (result > 0 && scsi_sense_valid(&sshdr) &&
+ sshdr.sense_key == UNIT_ATTENTION)
return DISK_EVENT_MEDIA_CHANGE;
if (result || be16_to_cpu(eh->data_len) < sizeof(*med))
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index a95936b18f..7ceb982040 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -330,6 +330,7 @@ enum storvsc_request_type {
*/
static int storvsc_ringbuffer_size = (128 * 1024);
+static int aligned_ringbuffer_size;
static u32 max_outstanding_req_per_channel;
static int storvsc_change_queue_depth(struct scsi_device *sdev, int queue_depth);
@@ -687,8 +688,8 @@ static void handle_sc_creation(struct vmbus_channel *new_sc)
new_sc->next_request_id_callback = storvsc_next_request_id;
ret = vmbus_open(new_sc,
- storvsc_ringbuffer_size,
- storvsc_ringbuffer_size,
+ aligned_ringbuffer_size,
+ aligned_ringbuffer_size,
(void *)&props,
sizeof(struct vmstorage_channel_properties),
storvsc_on_channel_callback, new_sc);
@@ -1973,7 +1974,7 @@ static int storvsc_probe(struct hv_device *device,
dma_set_min_align_mask(&device->device, HV_HYP_PAGE_SIZE - 1);
stor_device->port_number = host->host_no;
- ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size, is_fc);
+ ret = storvsc_connect_to_vsp(device, aligned_ringbuffer_size, is_fc);
if (ret)
goto err_out1;
@@ -2164,7 +2165,7 @@ static int storvsc_resume(struct hv_device *hv_dev)
{
int ret;
- ret = storvsc_connect_to_vsp(hv_dev, storvsc_ringbuffer_size,
+ ret = storvsc_connect_to_vsp(hv_dev, aligned_ringbuffer_size,
hv_dev_is_fc(hv_dev));
return ret;
}
@@ -2198,8 +2199,9 @@ static int __init storvsc_drv_init(void)
* the ring buffer indices) by the max request size (which is
* vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64)
*/
+ aligned_ringbuffer_size = VMBUS_RING_SIZE(storvsc_ringbuffer_size);
max_outstanding_req_per_channel =
- ((storvsc_ringbuffer_size - PAGE_SIZE) /
+ ((aligned_ringbuffer_size - PAGE_SIZE) /
ALIGN(MAX_MULTIPAGE_BUFFER_PACKET +
sizeof(struct vstor_packet) + sizeof(u64),
sizeof(u64)));
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 17491ba104..a2560cc807 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -559,14 +559,15 @@ static void sym53c8xx_timer(struct timer_list *t)
*/
#define SYM_EH_ABORT 0
#define SYM_EH_DEVICE_RESET 1
-#define SYM_EH_BUS_RESET 2
-#define SYM_EH_HOST_RESET 3
/*
* Generic method for our eh processing.
* The 'op' argument tells what we have to do.
*/
-static int sym_eh_handler(int op, char *opname, struct scsi_cmnd *cmd)
+/*
+ * Error handlers called from the eh thread (one thread per HBA).
+ */
+static int sym53c8xx_eh_abort_handler(struct scsi_cmnd *cmd)
{
struct sym_ucmd *ucmd = SYM_UCMD_PTR(cmd);
struct Scsi_Host *shost = cmd->device->host;
@@ -578,37 +579,13 @@ static int sym_eh_handler(int op, char *opname, struct scsi_cmnd *cmd)
int sts = -1;
struct completion eh_done;
- scmd_printk(KERN_WARNING, cmd, "%s operation started\n", opname);
+ scmd_printk(KERN_WARNING, cmd, "ABORT operation started\n");
- /* We may be in an error condition because the PCI bus
- * went down. In this case, we need to wait until the
- * PCI bus is reset, the card is reset, and only then
- * proceed with the scsi error recovery. There's no
- * point in hurrying; take a leisurely wait.
+ /*
+ * Escalate to host reset if the PCI bus went down
*/
-#define WAIT_FOR_PCI_RECOVERY 35
- if (pci_channel_offline(pdev)) {
- int finished_reset = 0;
- init_completion(&eh_done);
- spin_lock_irq(shost->host_lock);
- /* Make sure we didn't race */
- if (pci_channel_offline(pdev)) {
- BUG_ON(sym_data->io_reset);
- sym_data->io_reset = &eh_done;
- } else {
- finished_reset = 1;
- }
- spin_unlock_irq(shost->host_lock);
- if (!finished_reset)
- finished_reset = wait_for_completion_timeout
- (sym_data->io_reset,
- WAIT_FOR_PCI_RECOVERY*HZ);
- spin_lock_irq(shost->host_lock);
- sym_data->io_reset = NULL;
- spin_unlock_irq(shost->host_lock);
- if (!finished_reset)
- return SCSI_FAILED;
- }
+ if (pci_channel_offline(pdev))
+ return SCSI_FAILED;
spin_lock_irq(shost->host_lock);
/* This one is queued in some place -> to wait for completion */
@@ -620,28 +597,7 @@ static int sym_eh_handler(int op, char *opname, struct scsi_cmnd *cmd)
}
}
- /* Try to proceed the operation we have been asked for */
- sts = -1;
- switch(op) {
- case SYM_EH_ABORT:
- sts = sym_abort_scsiio(np, cmd, 1);
- break;
- case SYM_EH_DEVICE_RESET:
- sts = sym_reset_scsi_target(np, cmd->device->id);
- break;
- case SYM_EH_BUS_RESET:
- sym_reset_scsi_bus(np, 1);
- sts = 0;
- break;
- case SYM_EH_HOST_RESET:
- sym_reset_scsi_bus(np, 0);
- sym_start_up(shost, 1);
- sts = 0;
- break;
- default:
- break;
- }
-
+ sts = sym_abort_scsiio(np, cmd, 1);
/* On error, restore everything and cross fingers :) */
if (sts)
cmd_queued = 0;
@@ -658,33 +614,130 @@ static int sym_eh_handler(int op, char *opname, struct scsi_cmnd *cmd)
spin_unlock_irq(shost->host_lock);
}
- dev_warn(&cmd->device->sdev_gendev, "%s operation %s.\n", opname,
+ dev_warn(&cmd->device->sdev_gendev, "ABORT operation %s.\n",
sts==0 ? "complete" :sts==-2 ? "timed-out" : "failed");
return sts ? SCSI_FAILED : SCSI_SUCCESS;
}
-
-/*
- * Error handlers called from the eh thread (one thread per HBA).
- */
-static int sym53c8xx_eh_abort_handler(struct scsi_cmnd *cmd)
+static int sym53c8xx_eh_target_reset_handler(struct scsi_cmnd *cmd)
{
- return sym_eh_handler(SYM_EH_ABORT, "ABORT", cmd);
-}
+ struct scsi_target *starget = scsi_target(cmd->device);
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct sym_data *sym_data = shost_priv(shost);
+ struct pci_dev *pdev = sym_data->pdev;
+ struct sym_hcb *np = sym_data->ncb;
+ SYM_QUEHEAD *qp;
+ int sts;
+ struct completion eh_done;
-static int sym53c8xx_eh_device_reset_handler(struct scsi_cmnd *cmd)
-{
- return sym_eh_handler(SYM_EH_DEVICE_RESET, "DEVICE RESET", cmd);
+ starget_printk(KERN_WARNING, starget,
+ "TARGET RESET operation started\n");
+
+ /*
+ * Escalate to host reset if the PCI bus went down
+ */
+ if (pci_channel_offline(pdev))
+ return SCSI_FAILED;
+
+ spin_lock_irq(shost->host_lock);
+ sts = sym_reset_scsi_target(np, starget->id);
+ if (!sts) {
+ FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
+ struct sym_ccb *cp = sym_que_entry(qp, struct sym_ccb,
+ link_ccbq);
+ struct scsi_cmnd *cmd = cp->cmd;
+ struct sym_ucmd *ucmd;
+
+ if (!cmd || cmd->device->channel != starget->channel ||
+ cmd->device->id != starget->id)
+ continue;
+
+ ucmd = SYM_UCMD_PTR(cmd);
+ init_completion(&eh_done);
+ ucmd->eh_done = &eh_done;
+ spin_unlock_irq(shost->host_lock);
+ if (!wait_for_completion_timeout(&eh_done, 5*HZ)) {
+ ucmd->eh_done = NULL;
+ sts = -2;
+ }
+ spin_lock_irq(shost->host_lock);
+ }
+ }
+ spin_unlock_irq(shost->host_lock);
+
+ starget_printk(KERN_WARNING, starget, "TARGET RESET operation %s.\n",
+ sts==0 ? "complete" :sts==-2 ? "timed-out" : "failed");
+ return SCSI_SUCCESS;
}
static int sym53c8xx_eh_bus_reset_handler(struct scsi_cmnd *cmd)
{
- return sym_eh_handler(SYM_EH_BUS_RESET, "BUS RESET", cmd);
+ struct Scsi_Host *shost = cmd->device->host;
+ struct sym_data *sym_data = shost_priv(shost);
+ struct pci_dev *pdev = sym_data->pdev;
+ struct sym_hcb *np = sym_data->ncb;
+
+ scmd_printk(KERN_WARNING, cmd, "BUS RESET operation started\n");
+
+ /*
+ * Escalate to host reset if the PCI bus went down
+ */
+ if (pci_channel_offline(pdev))
+ return SCSI_FAILED;
+
+ spin_lock_irq(shost->host_lock);
+ sym_reset_scsi_bus(np, 1);
+ spin_unlock_irq(shost->host_lock);
+
+ dev_warn(&cmd->device->sdev_gendev, "BUS RESET operation complete.\n");
+ return SCSI_SUCCESS;
}
static int sym53c8xx_eh_host_reset_handler(struct scsi_cmnd *cmd)
{
- return sym_eh_handler(SYM_EH_HOST_RESET, "HOST RESET", cmd);
+ struct Scsi_Host *shost = cmd->device->host;
+ struct sym_data *sym_data = shost_priv(shost);
+ struct pci_dev *pdev = sym_data->pdev;
+ struct sym_hcb *np = sym_data->ncb;
+ struct completion eh_done;
+ int finished_reset = 1;
+
+ shost_printk(KERN_WARNING, shost, "HOST RESET operation started\n");
+
+ /* We may be in an error condition because the PCI bus
+ * went down. In this case, we need to wait until the
+ * PCI bus is reset, the card is reset, and only then
+ * proceed with the scsi error recovery. There's no
+ * point in hurrying; take a leisurely wait.
+ */
+#define WAIT_FOR_PCI_RECOVERY 35
+ if (pci_channel_offline(pdev)) {
+ init_completion(&eh_done);
+ spin_lock_irq(shost->host_lock);
+ /* Make sure we didn't race */
+ if (pci_channel_offline(pdev)) {
+ BUG_ON(sym_data->io_reset);
+ sym_data->io_reset = &eh_done;
+ finished_reset = 0;
+ }
+ spin_unlock_irq(shost->host_lock);
+ if (!finished_reset)
+ finished_reset = wait_for_completion_timeout
+ (sym_data->io_reset,
+ WAIT_FOR_PCI_RECOVERY*HZ);
+ spin_lock_irq(shost->host_lock);
+ sym_data->io_reset = NULL;
+ spin_unlock_irq(shost->host_lock);
+ }
+
+ if (finished_reset) {
+ sym_reset_scsi_bus(np, 0);
+ sym_start_up(shost, 1);
+ }
+
+ shost_printk(KERN_WARNING, shost, "HOST RESET operation %s.\n",
+ finished_reset==1 ? "complete" : "failed");
+ return finished_reset ? SCSI_SUCCESS : SCSI_FAILED;
}
/*
@@ -1635,7 +1688,7 @@ static const struct scsi_host_template sym2_template = {
.slave_configure = sym53c8xx_slave_configure,
.slave_destroy = sym53c8xx_slave_destroy,
.eh_abort_handler = sym53c8xx_eh_abort_handler,
- .eh_device_reset_handler = sym53c8xx_eh_device_reset_handler,
+ .eh_target_reset_handler = sym53c8xx_eh_target_reset_handler,
.eh_bus_reset_handler = sym53c8xx_eh_bus_reset_handler,
.eh_host_reset_handler = sym53c8xx_eh_host_reset_handler,
.this_id = 7,