summaryrefslogtreecommitdiffstats
path: root/drivers/ata
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ata')
-rw-r--r--drivers/ata/Kconfig28
-rw-r--r--drivers/ata/ahci.h2
-rw-r--r--drivers/ata/libata-core.c114
-rw-r--r--drivers/ata/libata-sata.c171
-rw-r--r--drivers/ata/libata-scsi.c195
-rw-r--r--drivers/ata/libata-sff.c4
-rw-r--r--drivers/ata/libata.h11
-rw-r--r--drivers/ata/pata_cs5520.c6
-rw-r--r--drivers/ata/pata_macio.c20
-rw-r--r--drivers/ata/sata_mv.c2
-rw-r--r--drivers/ata/sata_nv.c24
-rw-r--r--drivers/ata/sata_sil24.c2
12 files changed, 322 insertions, 257 deletions
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 928ec93c6b..b595494ab9 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -556,7 +556,7 @@ comment "PATA SFF controllers with BMDMA"
config PATA_ALI
tristate "ALi PATA support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
select PATA_TIMINGS
help
This option enables support for the ALi ATA interfaces
@@ -566,7 +566,7 @@ config PATA_ALI
config PATA_AMD
tristate "AMD/NVidia PATA support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
select PATA_TIMINGS
help
This option enables support for the AMD and NVidia PATA
@@ -584,7 +584,7 @@ config PATA_ARASAN_CF
config PATA_ARTOP
tristate "ARTOP 6210/6260 PATA support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
This option enables support for ARTOP PATA controllers.
@@ -611,7 +611,7 @@ config PATA_ATP867X
config PATA_CMD64X
tristate "CMD64x PATA support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
select PATA_TIMINGS
help
This option enables support for the CMD64x series chips
@@ -658,7 +658,7 @@ config PATA_CS5536
config PATA_CYPRESS
tristate "Cypress CY82C693 PATA support (Very Experimental)"
- depends on PCI
+ depends on PCI && HAS_IOPORT
select PATA_TIMINGS
help
This option enables support for the Cypress/Contaq CY82C693
@@ -706,7 +706,7 @@ config PATA_HPT366
config PATA_HPT37X
tristate "HPT 370/370A/371/372/374/302 PATA support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
This option enables support for the majority of the later HPT
PATA controllers via the new ATA layer.
@@ -715,7 +715,7 @@ config PATA_HPT37X
config PATA_HPT3X2N
tristate "HPT 371N/372N/302N PATA support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
This option enables support for the N variant HPT PATA
controllers via the new ATA layer.
@@ -818,7 +818,7 @@ config PATA_MPC52xx
config PATA_NETCELL
tristate "NETCELL Revolution RAID support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
This option enables support for the Netcell Revolution RAID
PATA controller.
@@ -854,7 +854,7 @@ config PATA_OLDPIIX
config PATA_OPTIDMA
tristate "OPTI FireStar PATA support (Very Experimental)"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
This option enables DMA/PIO support for the later OPTi
controllers found on some old motherboards and in some
@@ -864,7 +864,7 @@ config PATA_OPTIDMA
config PATA_PDC2027X
tristate "Promise PATA 2027x support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
This option enables support for Promise PATA pdc20268 to pdc20277 host adapters.
@@ -872,7 +872,7 @@ config PATA_PDC2027X
config PATA_PDC_OLD
tristate "Older Promise PATA controller support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
This option enables support for the Promise 20246, 20262, 20263,
20265 and 20267 adapters.
@@ -900,7 +900,7 @@ config PATA_RDC
config PATA_SC1200
tristate "SC1200 PATA support"
- depends on PCI && (X86_32 || COMPILE_TEST)
+ depends on PCI && (X86_32 || COMPILE_TEST) && HAS_IOPORT
help
This option enables support for the NatSemi/AMD SC1200 SoC
companion chip used with the Geode processor family.
@@ -918,7 +918,7 @@ config PATA_SCH
config PATA_SERVERWORKS
tristate "SERVERWORKS OSB4/CSB5/CSB6/HT1000 PATA support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
This option enables support for the Serverworks OSB4/CSB5/CSB6 and
HT1000 PATA controllers, via the new ATA layer.
@@ -1182,7 +1182,7 @@ config ATA_GENERIC
config PATA_LEGACY
tristate "Legacy ISA PATA support (Experimental)"
- depends on (ISA || PCI)
+ depends on (ISA || PCI) && HAS_IOPORT
select PATA_TIMINGS
help
This option enables support for ISA/VLB/PCI bus legacy PATA
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 344c87210d..8f40f75ba0 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -397,7 +397,7 @@ extern const struct attribute_group *ahci_sdev_groups[];
.sdev_groups = ahci_sdev_groups, \
.change_queue_depth = ata_scsi_change_queue_depth, \
.tag_alloc_policy = BLK_TAG_ALLOC_RR, \
- .slave_configure = ata_scsi_slave_config
+ .device_configure = ata_scsi_device_configure
extern struct ata_port_operations ahci_ops;
extern struct ata_port_operations ahci_platform_ops;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index d937e6e5cc..74b59b78d2 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1480,19 +1480,19 @@ static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
}
/**
- * ata_exec_internal_sg - execute libata internal command
+ * ata_exec_internal - execute libata internal command
* @dev: Device to which the command is sent
* @tf: Taskfile registers for the command and the result
* @cdb: CDB for packet command
* @dma_dir: Data transfer direction of the command
- * @sgl: sg list for the data buffer of the command
- * @n_elem: Number of sg entries
+ * @buf: Data buffer of the command
+ * @buflen: Length of data buffer
* @timeout: Timeout in msecs (0 for default)
*
- * Executes libata internal command with timeout. @tf contains
- * command on entry and result on return. Timeout and error
- * conditions are reported via return value. No recovery action
- * is taken after a command times out. It's caller's duty to
+ * Executes libata internal command with timeout. @tf contains
+ * the command on entry and the result on return. Timeout and error
+ * conditions are reported via the return value. No recovery action
+ * is taken after a command times out. It is the caller's duty to
* clean up after timeout.
*
* LOCKING:
@@ -1501,34 +1501,38 @@ static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
* RETURNS:
* Zero on success, AC_ERR_* mask on failure
*/
-static unsigned ata_exec_internal_sg(struct ata_device *dev,
- struct ata_taskfile *tf, const u8 *cdb,
- int dma_dir, struct scatterlist *sgl,
- unsigned int n_elem, unsigned int timeout)
+unsigned int ata_exec_internal(struct ata_device *dev, struct ata_taskfile *tf,
+ const u8 *cdb, enum dma_data_direction dma_dir,
+ void *buf, unsigned int buflen,
+ unsigned int timeout)
{
struct ata_link *link = dev->link;
struct ata_port *ap = link->ap;
u8 command = tf->command;
- int auto_timeout = 0;
struct ata_queued_cmd *qc;
+ struct scatterlist sgl;
unsigned int preempted_tag;
u32 preempted_sactive;
u64 preempted_qc_active;
int preempted_nr_active_links;
+ bool auto_timeout = false;
DECLARE_COMPLETION_ONSTACK(wait);
unsigned long flags;
unsigned int err_mask;
int rc;
+ if (WARN_ON(dma_dir != DMA_NONE && !buf))
+ return AC_ERR_INVALID;
+
spin_lock_irqsave(ap->lock, flags);
- /* no internal command while frozen */
+ /* No internal command while frozen */
if (ata_port_is_frozen(ap)) {
spin_unlock_irqrestore(ap->lock, flags);
return AC_ERR_SYSTEM;
}
- /* initialize internal qc */
+ /* Initialize internal qc */
qc = __ata_qc_from_tag(ap, ATA_TAG_INTERNAL);
qc->tag = ATA_TAG_INTERNAL;
@@ -1547,12 +1551,12 @@ static unsigned ata_exec_internal_sg(struct ata_device *dev,
ap->qc_active = 0;
ap->nr_active_links = 0;
- /* prepare & issue qc */
+ /* Prepare and issue qc */
qc->tf = *tf;
if (cdb)
memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
- /* some SATA bridges need us to indicate data xfer direction */
+ /* Some SATA bridges need us to indicate data xfer direction */
if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
dma_dir == DMA_FROM_DEVICE)
qc->tf.feature |= ATAPI_DMADIR;
@@ -1560,13 +1564,8 @@ static unsigned ata_exec_internal_sg(struct ata_device *dev,
qc->flags |= ATA_QCFLAG_RESULT_TF;
qc->dma_dir = dma_dir;
if (dma_dir != DMA_NONE) {
- unsigned int i, buflen = 0;
- struct scatterlist *sg;
-
- for_each_sg(sgl, sg, n_elem, i)
- buflen += sg->length;
-
- ata_sg_init(qc, sgl, n_elem);
+ sg_init_one(&sgl, buf, buflen);
+ ata_sg_init(qc, &sgl, 1);
qc->nbytes = buflen;
}
@@ -1578,11 +1577,11 @@ static unsigned ata_exec_internal_sg(struct ata_device *dev,
spin_unlock_irqrestore(ap->lock, flags);
if (!timeout) {
- if (ata_probe_timeout)
+ if (ata_probe_timeout) {
timeout = ata_probe_timeout * 1000;
- else {
+ } else {
timeout = ata_internal_cmd_timeout(dev, command);
- auto_timeout = 1;
+ auto_timeout = true;
}
}
@@ -1595,30 +1594,25 @@ static unsigned ata_exec_internal_sg(struct ata_device *dev,
ata_sff_flush_pio_task(ap);
if (!rc) {
- spin_lock_irqsave(ap->lock, flags);
-
- /* We're racing with irq here. If we lose, the
- * following test prevents us from completing the qc
- * twice. If we win, the port is frozen and will be
- * cleaned up by ->post_internal_cmd().
+ /*
+ * We are racing with irq here. If we lose, the following test
+ * prevents us from completing the qc twice. If we win, the port
+ * is frozen and will be cleaned up by ->post_internal_cmd().
*/
+ spin_lock_irqsave(ap->lock, flags);
if (qc->flags & ATA_QCFLAG_ACTIVE) {
qc->err_mask |= AC_ERR_TIMEOUT;
-
ata_port_freeze(ap);
-
ata_dev_warn(dev, "qc timeout after %u msecs (cmd 0x%x)\n",
timeout, command);
}
-
spin_unlock_irqrestore(ap->lock, flags);
}
- /* do post_internal_cmd */
if (ap->ops->post_internal_cmd)
ap->ops->post_internal_cmd(qc);
- /* perform minimal error analysis */
+ /* Perform minimal error analysis */
if (qc->flags & ATA_QCFLAG_EH) {
if (qc->result_tf.status & (ATA_ERR | ATA_DF))
qc->err_mask |= AC_ERR_DEV;
@@ -1632,7 +1626,7 @@ static unsigned ata_exec_internal_sg(struct ata_device *dev,
qc->result_tf.status |= ATA_SENSE;
}
- /* finish up */
+ /* Finish up */
spin_lock_irqsave(ap->lock, flags);
*tf = qc->result_tf;
@@ -1653,44 +1647,6 @@ static unsigned ata_exec_internal_sg(struct ata_device *dev,
}
/**
- * ata_exec_internal - execute libata internal command
- * @dev: Device to which the command is sent
- * @tf: Taskfile registers for the command and the result
- * @cdb: CDB for packet command
- * @dma_dir: Data transfer direction of the command
- * @buf: Data buffer of the command
- * @buflen: Length of data buffer
- * @timeout: Timeout in msecs (0 for default)
- *
- * Wrapper around ata_exec_internal_sg() which takes simple
- * buffer instead of sg list.
- *
- * LOCKING:
- * None. Should be called with kernel context, might sleep.
- *
- * RETURNS:
- * Zero on success, AC_ERR_* mask on failure
- */
-unsigned ata_exec_internal(struct ata_device *dev,
- struct ata_taskfile *tf, const u8 *cdb,
- int dma_dir, void *buf, unsigned int buflen,
- unsigned int timeout)
-{
- struct scatterlist *psg = NULL, sg;
- unsigned int n_elem = 0;
-
- if (dma_dir != DMA_NONE) {
- WARN_ON(!buf);
- sg_init_one(&sg, buf, buflen);
- psg = &sg;
- n_elem++;
- }
-
- return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
- timeout);
-}
-
-/**
* ata_pio_need_iordy - check if iordy needed
* @adev: ATA device
*
@@ -4199,12 +4155,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
ATA_HORKAGE_ZERO_AFTER_TRIM |
ATA_HORKAGE_NOLPM },
- /* Apacer models with LPM issues */
- { "Apacer AS340*", NULL, ATA_HORKAGE_NOLPM },
-
/* AMD Radeon devices with broken LPM support */
{ "R3SL240G", NULL, ATA_HORKAGE_NOLPM },
+ /* Apacer models with LPM issues */
+ { "Apacer AS340*", NULL, ATA_HORKAGE_NOLPM },
+
/* These specific Samsung models/firmware-revs do not handle LPM well */
{ "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM },
{ "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM },
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index 0fb1934875..9e047bf912 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -848,80 +848,143 @@ DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
ata_scsi_lpm_show, ata_scsi_lpm_store);
EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy);
-static ssize_t ata_ncq_prio_supported_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
+/**
+ * ata_ncq_prio_supported - Check if device supports NCQ Priority
+ * @ap: ATA port of the target device
+ * @sdev: SCSI device
+ * @supported: Address of a boolean to store the result
+ *
+ * Helper to check if device supports NCQ Priority feature.
+ *
+ * Context: Any context. Takes and releases @ap->lock.
+ *
+ * Return:
+ * * %0 - OK. Status is stored into @supported
+ * * %-ENODEV - Failed to find the ATA device
+ */
+int ata_ncq_prio_supported(struct ata_port *ap, struct scsi_device *sdev,
+ bool *supported)
{
- struct scsi_device *sdev = to_scsi_device(device);
- struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *dev;
- bool ncq_prio_supported;
+ unsigned long flags;
int rc = 0;
- spin_lock_irq(ap->lock);
+ spin_lock_irqsave(ap->lock, flags);
dev = ata_scsi_find_dev(ap, sdev);
if (!dev)
rc = -ENODEV;
else
- ncq_prio_supported = dev->flags & ATA_DFLAG_NCQ_PRIO;
- spin_unlock_irq(ap->lock);
+ *supported = dev->flags & ATA_DFLAG_NCQ_PRIO;
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ata_ncq_prio_supported);
+
+static ssize_t ata_ncq_prio_supported_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(device);
+ struct ata_port *ap = ata_shost_to_port(sdev->host);
+ bool supported;
+ int rc;
+
+ rc = ata_ncq_prio_supported(ap, sdev, &supported);
+ if (rc)
+ return rc;
- return rc ? rc : sysfs_emit(buf, "%u\n", ncq_prio_supported);
+ return sysfs_emit(buf, "%d\n", supported);
}
DEVICE_ATTR(ncq_prio_supported, S_IRUGO, ata_ncq_prio_supported_show, NULL);
EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_supported);
-static ssize_t ata_ncq_prio_enable_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
+/**
+ * ata_ncq_prio_enabled - Check if NCQ Priority is enabled
+ * @ap: ATA port of the target device
+ * @sdev: SCSI device
+ * @enabled: Address of a boolean to store the result
+ *
+ * Helper to check if NCQ Priority feature is enabled.
+ *
+ * Context: Any context. Takes and releases @ap->lock.
+ *
+ * Return:
+ * * %0 - OK. Status is stored into @enabled
+ * * %-ENODEV - Failed to find the ATA device
+ */
+int ata_ncq_prio_enabled(struct ata_port *ap, struct scsi_device *sdev,
+ bool *enabled)
{
- struct scsi_device *sdev = to_scsi_device(device);
- struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *dev;
- bool ncq_prio_enable;
+ unsigned long flags;
int rc = 0;
- spin_lock_irq(ap->lock);
+ spin_lock_irqsave(ap->lock, flags);
dev = ata_scsi_find_dev(ap, sdev);
if (!dev)
rc = -ENODEV;
else
- ncq_prio_enable = dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED;
- spin_unlock_irq(ap->lock);
+ *enabled = dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED;
+ spin_unlock_irqrestore(ap->lock, flags);
- return rc ? rc : sysfs_emit(buf, "%u\n", ncq_prio_enable);
+ return rc;
}
+EXPORT_SYMBOL_GPL(ata_ncq_prio_enabled);
-static ssize_t ata_ncq_prio_enable_store(struct device *device,
- struct device_attribute *attr,
- const char *buf, size_t len)
+static ssize_t ata_ncq_prio_enable_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
{
struct scsi_device *sdev = to_scsi_device(device);
- struct ata_port *ap;
- struct ata_device *dev;
- long int input;
- int rc = 0;
+ struct ata_port *ap = ata_shost_to_port(sdev->host);
+ bool enabled;
+ int rc;
- rc = kstrtol(buf, 10, &input);
+ rc = ata_ncq_prio_enabled(ap, sdev, &enabled);
if (rc)
return rc;
- if ((input < 0) || (input > 1))
- return -EINVAL;
- ap = ata_shost_to_port(sdev->host);
- dev = ata_scsi_find_dev(ap, sdev);
- if (unlikely(!dev))
- return -ENODEV;
+ return sysfs_emit(buf, "%d\n", enabled);
+}
+
+/**
+ * ata_ncq_prio_enable - Enable/disable NCQ Priority
+ * @ap: ATA port of the target device
+ * @sdev: SCSI device
+ * @enable: true - enable NCQ Priority, false - disable NCQ Priority
+ *
+ * Helper to enable/disable NCQ Priority feature.
+ *
+ * Context: Any context. Takes and releases @ap->lock.
+ *
+ * Return:
+ * * %0 - OK. Status is stored into @enabled
+ * * %-ENODEV - Failed to find the ATA device
+ * * %-EINVAL - NCQ Priority is not supported or CDL is enabled
+ */
+int ata_ncq_prio_enable(struct ata_port *ap, struct scsi_device *sdev,
+ bool enable)
+{
+ struct ata_device *dev;
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(ap->lock, flags);
- spin_lock_irq(ap->lock);
+ dev = ata_scsi_find_dev(ap, sdev);
+ if (!dev) {
+ rc = -ENODEV;
+ goto unlock;
+ }
if (!(dev->flags & ATA_DFLAG_NCQ_PRIO)) {
rc = -EINVAL;
goto unlock;
}
- if (input) {
+ if (enable) {
if (dev->flags & ATA_DFLAG_CDL_ENABLED) {
ata_dev_err(dev,
"CDL must be disabled to enable NCQ priority\n");
@@ -934,9 +997,30 @@ static ssize_t ata_ncq_prio_enable_store(struct device *device,
}
unlock:
- spin_unlock_irq(ap->lock);
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ata_ncq_prio_enable);
+
+static ssize_t ata_ncq_prio_enable_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct scsi_device *sdev = to_scsi_device(device);
+ struct ata_port *ap = ata_shost_to_port(sdev->host);
+ bool enable;
+ int rc;
+
+ rc = kstrtobool(buf, &enable);
+ if (rc)
+ return rc;
+
+ rc = ata_ncq_prio_enable(ap, sdev, enable);
+ if (rc)
+ return rc;
- return rc ? rc : len;
+ return len;
}
DEVICE_ATTR(ncq_prio_enable, S_IRUGO | S_IWUSR,
@@ -1170,21 +1254,24 @@ void ata_sas_tport_delete(struct ata_port *ap)
EXPORT_SYMBOL_GPL(ata_sas_tport_delete);
/**
- * ata_sas_slave_configure - Default slave_config routine for libata devices
+ * ata_sas_device_configure - Default device_configure routine for libata
+ * devices
* @sdev: SCSI device to configure
+ * @lim: queue limits
* @ap: ATA port to which SCSI device is attached
*
* RETURNS:
* Zero.
*/
-int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap)
+int ata_sas_device_configure(struct scsi_device *sdev, struct queue_limits *lim,
+ struct ata_port *ap)
{
ata_scsi_sdev_config(sdev);
- return ata_scsi_dev_config(sdev, ap->link.device);
+ return ata_scsi_dev_config(sdev, lim, ap->link.device);
}
-EXPORT_SYMBOL_GPL(ata_sas_slave_configure);
+EXPORT_SYMBOL_GPL(ata_sas_device_configure);
/**
* ata_sas_queuecmd - Issue SCSI cdb to libata-managed device
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 9c3daa7d19..076fbeadce 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -230,6 +230,80 @@ void ata_scsi_set_sense_information(struct ata_device *dev,
SCSI_SENSE_BUFFERSIZE, information);
}
+/**
+ * ata_scsi_set_passthru_sense_fields - Set ATA fields in sense buffer
+ * @qc: ATA PASS-THROUGH command.
+ *
+ * Populates "ATA Status Return sense data descriptor" / "Fixed format
+ * sense data" with ATA taskfile fields.
+ *
+ * LOCKING:
+ * None.
+ */
+static void ata_scsi_set_passthru_sense_fields(struct ata_queued_cmd *qc)
+{
+ struct scsi_cmnd *cmd = qc->scsicmd;
+ struct ata_taskfile *tf = &qc->result_tf;
+ unsigned char *sb = cmd->sense_buffer;
+
+ if ((sb[0] & 0x7f) >= 0x72) {
+ unsigned char *desc;
+ u8 len;
+
+ /* descriptor format */
+ len = sb[7];
+ desc = (char *)scsi_sense_desc_find(sb, len + 8, 9);
+ if (!desc) {
+ if (SCSI_SENSE_BUFFERSIZE < len + 14)
+ return;
+ sb[7] = len + 14;
+ desc = sb + 8 + len;
+ }
+ desc[0] = 9;
+ desc[1] = 12;
+ /*
+ * Copy registers into sense buffer.
+ */
+ desc[2] = 0x00;
+ desc[3] = tf->error;
+ desc[5] = tf->nsect;
+ desc[7] = tf->lbal;
+ desc[9] = tf->lbam;
+ desc[11] = tf->lbah;
+ desc[12] = tf->device;
+ desc[13] = tf->status;
+
+ /*
+ * Fill in Extend bit, and the high order bytes
+ * if applicable.
+ */
+ if (tf->flags & ATA_TFLAG_LBA48) {
+ desc[2] |= 0x01;
+ desc[4] = tf->hob_nsect;
+ desc[6] = tf->hob_lbal;
+ desc[8] = tf->hob_lbam;
+ desc[10] = tf->hob_lbah;
+ }
+ } else {
+ /* Fixed sense format */
+ sb[0] |= 0x80;
+ sb[3] = tf->error;
+ sb[4] = tf->status;
+ sb[5] = tf->device;
+ sb[6] = tf->nsect;
+ if (tf->flags & ATA_TFLAG_LBA48) {
+ sb[8] |= 0x80;
+ if (tf->hob_nsect)
+ sb[8] |= 0x40;
+ if (tf->hob_lbal || tf->hob_lbam || tf->hob_lbah)
+ sb[8] |= 0x20;
+ }
+ sb[9] = tf->lbal;
+ sb[10] = tf->lbam;
+ sb[11] = tf->lbah;
+ }
+}
+
static void ata_scsi_set_invalid_field(struct ata_device *dev,
struct scsi_cmnd *cmd, u16 field, u8 bit)
{
@@ -837,10 +911,8 @@ static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk,
* ata_gen_passthru_sense - Generate check condition sense block.
* @qc: Command that completed.
*
- * This function is specific to the ATA descriptor format sense
- * block specified for the ATA pass through commands. Regardless
- * of whether the command errored or not, return a sense
- * block. Copy all controller registers into the sense
+ * This function is specific to the ATA pass through commands.
+ * Regardless of whether the command errored or not, return a sense
* block. If there was no error, we get the request from an ATA
* passthrough command, so we use the following sense data:
* sk = RECOVERED ERROR
@@ -855,7 +927,6 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
struct scsi_cmnd *cmd = qc->scsicmd;
struct ata_taskfile *tf = &qc->result_tf;
unsigned char *sb = cmd->sense_buffer;
- unsigned char *desc = sb + 8;
u8 sense_key, asc, ascq;
memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
@@ -870,67 +941,8 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
&sense_key, &asc, &ascq);
ata_scsi_set_sense(qc->dev, cmd, sense_key, asc, ascq);
} else {
- /*
- * ATA PASS-THROUGH INFORMATION AVAILABLE
- * Always in descriptor format sense.
- */
- scsi_build_sense(cmd, 1, RECOVERED_ERROR, 0, 0x1D);
- }
-
- if ((cmd->sense_buffer[0] & 0x7f) >= 0x72) {
- u8 len;
-
- /* descriptor format */
- len = sb[7];
- desc = (char *)scsi_sense_desc_find(sb, len + 8, 9);
- if (!desc) {
- if (SCSI_SENSE_BUFFERSIZE < len + 14)
- return;
- sb[7] = len + 14;
- desc = sb + 8 + len;
- }
- desc[0] = 9;
- desc[1] = 12;
- /*
- * Copy registers into sense buffer.
- */
- desc[2] = 0x00;
- desc[3] = tf->error;
- desc[5] = tf->nsect;
- desc[7] = tf->lbal;
- desc[9] = tf->lbam;
- desc[11] = tf->lbah;
- desc[12] = tf->device;
- desc[13] = tf->status;
-
- /*
- * Fill in Extend bit, and the high order bytes
- * if applicable.
- */
- if (tf->flags & ATA_TFLAG_LBA48) {
- desc[2] |= 0x01;
- desc[4] = tf->hob_nsect;
- desc[6] = tf->hob_lbal;
- desc[8] = tf->hob_lbam;
- desc[10] = tf->hob_lbah;
- }
- } else {
- /* Fixed sense format */
- desc[0] = tf->error;
- desc[1] = tf->status;
- desc[2] = tf->device;
- desc[3] = tf->nsect;
- desc[7] = 0;
- if (tf->flags & ATA_TFLAG_LBA48) {
- desc[8] |= 0x80;
- if (tf->hob_nsect)
- desc[8] |= 0x40;
- if (tf->hob_lbal || tf->hob_lbam || tf->hob_lbah)
- desc[8] |= 0x20;
- }
- desc[9] = tf->lbal;
- desc[10] = tf->lbam;
- desc[11] = tf->lbah;
+ /* ATA PASS-THROUGH INFORMATION AVAILABLE */
+ ata_scsi_set_sense(qc->dev, cmd, RECOVERED_ERROR, 0, 0x1D);
}
}
@@ -1021,7 +1033,8 @@ bool ata_scsi_dma_need_drain(struct request *rq)
}
EXPORT_SYMBOL_GPL(ata_scsi_dma_need_drain);
-int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
+int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim,
+ struct ata_device *dev)
{
struct request_queue *q = sdev->request_queue;
int depth = 1;
@@ -1031,7 +1044,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
/* configure max sectors */
dev->max_sectors = min(dev->max_sectors, sdev->host->max_sectors);
- blk_queue_max_hw_sectors(q, dev->max_sectors);
+ lim->max_hw_sectors = dev->max_sectors;
if (dev->class == ATA_DEV_ATAPI) {
sdev->sector_size = ATA_SECT_SIZE;
@@ -1040,7 +1053,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
blk_queue_update_dma_pad(q, ATA_DMA_PAD_SZ - 1);
/* make room for appending the drain */
- blk_queue_max_segments(q, queue_max_segments(q) - 1);
+ lim->max_segments--;
sdev->dma_drain_len = ATAPI_MAX_DRAIN;
sdev->dma_drain_buf = kmalloc(sdev->dma_drain_len, GFP_NOIO);
@@ -1077,7 +1090,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
"sector_size=%u > PAGE_SIZE, PIO may malfunction\n",
sdev->sector_size);
- blk_queue_update_dma_alignment(q, sdev->sector_size - 1);
+ lim->dma_alignment = sdev->sector_size - 1;
if (dev->flags & ATA_DFLAG_AN)
set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events);
@@ -1131,8 +1144,9 @@ int ata_scsi_slave_alloc(struct scsi_device *sdev)
EXPORT_SYMBOL_GPL(ata_scsi_slave_alloc);
/**
- * ata_scsi_slave_config - Set SCSI device attributes
+ * ata_scsi_device_configure - Set SCSI device attributes
* @sdev: SCSI device to examine
+ * @lim: queue limits
*
* This is called before we actually start reading
* and writing to the device, to configure certain
@@ -1142,17 +1156,18 @@ EXPORT_SYMBOL_GPL(ata_scsi_slave_alloc);
* Defined by SCSI layer. We don't really care.
*/
-int ata_scsi_slave_config(struct scsi_device *sdev)
+int ata_scsi_device_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
if (dev)
- return ata_scsi_dev_config(sdev, dev);
+ return ata_scsi_dev_config(sdev, lim, dev);
return 0;
}
-EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
+EXPORT_SYMBOL_GPL(ata_scsi_device_configure);
/**
* ata_scsi_slave_destroy - SCSI device is about to be destroyed
@@ -1629,26 +1644,32 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *cmd = qc->scsicmd;
u8 *cdb = cmd->cmnd;
- int need_sense = (qc->err_mask != 0) &&
- !(qc->flags & ATA_QCFLAG_SENSE_VALID);
+ bool have_sense = qc->flags & ATA_QCFLAG_SENSE_VALID;
+ bool is_ata_passthru = cdb[0] == ATA_16 || cdb[0] == ATA_12;
+ bool is_ck_cond_request = cdb[2] & 0x20;
+ bool is_error = qc->err_mask != 0;
/* For ATA pass thru (SAT) commands, generate a sense block if
* user mandated it or if there's an error. Note that if we
- * generate because the user forced us to [CK_COND =1], a check
+ * generate because the user forced us to [CK_COND=1], a check
* condition is generated and the ATA register values are returned
* whether the command completed successfully or not. If there
- * was no error, we use the following sense data:
+ * was no error, and CK_COND=1, we use the following sense data:
* sk = RECOVERED ERROR
* asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE
*/
- if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
- ((cdb[2] & 0x20) || need_sense))
- ata_gen_passthru_sense(qc);
- else if (need_sense)
+ if (is_ata_passthru && (is_ck_cond_request || is_error || have_sense)) {
+ if (!have_sense)
+ ata_gen_passthru_sense(qc);
+ ata_scsi_set_passthru_sense_fields(qc);
+ if (is_ck_cond_request)
+ set_status_byte(qc->scsicmd, SAM_STAT_CHECK_CONDITION);
+ } else if (is_error && !have_sense) {
ata_gen_ata_sense(qc);
- else
+ } else {
/* Keep the SCSI ML and status byte, clear host byte. */
cmd->result &= 0x0000ffff;
+ }
ata_qc_done(qc);
}
@@ -2587,14 +2608,8 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
/* handle completion from EH */
if (unlikely(err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID)) {
- if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
- /* FIXME: not quite right; we don't want the
- * translation of taskfile registers into a
- * sense descriptors, since that's only
- * correct for ATA, not ATAPI
- */
+ if (!(qc->flags & ATA_QCFLAG_SENSE_VALID))
ata_gen_passthru_sense(qc);
- }
/* SCSI EH automatically locks door if sdev->locked is
* set. Sometimes door lock request continues to
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 95a19c4ef2..250f7dae05 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -3032,6 +3032,7 @@ EXPORT_SYMBOL_GPL(ata_bmdma_port_start32);
*/
int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev)
{
+#ifdef CONFIG_HAS_IOPORT
unsigned long bmdma = pci_resource_start(pdev, 4);
u8 simplex;
@@ -3044,6 +3045,9 @@ int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev)
if (simplex & 0x80)
return -EOPNOTSUPP;
return 0;
+#else
+ return -ENOENT;
+#endif /* CONFIG_HAS_IOPORT */
}
EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 5c685bb193..38ce13b554 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -50,10 +50,10 @@ extern int ata_build_rw_tf(struct ata_queued_cmd *qc, u64 block, u32 n_block,
unsigned int tf_flags, int dld, int class);
extern u64 ata_tf_read_block(const struct ata_taskfile *tf,
struct ata_device *dev);
-extern unsigned ata_exec_internal(struct ata_device *dev,
- struct ata_taskfile *tf, const u8 *cdb,
- int dma_dir, void *buf, unsigned int buflen,
- unsigned int timeout);
+unsigned int ata_exec_internal(struct ata_device *dev, struct ata_taskfile *tf,
+ const u8 *cdb, enum dma_data_direction dma_dir,
+ void *buf, unsigned int buflen,
+ unsigned int timeout);
extern int ata_wait_ready(struct ata_link *link, unsigned long deadline,
int (*check_ready)(struct ata_link *link));
extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
@@ -131,7 +131,8 @@ extern void ata_scsi_dev_rescan(struct work_struct *work);
extern int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
unsigned int id, u64 lun);
void ata_scsi_sdev_config(struct scsi_device *sdev);
-int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev);
+int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim,
+ struct ata_device *dev);
int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev);
/* libata-eh.c */
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index 38795508c2..027cf67101 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -151,12 +151,6 @@ static int cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (!host)
return -ENOMEM;
- /* Perform set up for DMA */
- if (pci_enable_device_io(pdev)) {
- dev_err(&pdev->dev, "unable to configure BAR2.\n");
- return -ENODEV;
- }
-
if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
dev_err(&pdev->dev, "unable to configure DMA mask.\n");
return -ENODEV;
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index 88b2e9817f..3cb455a32d 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -796,7 +796,8 @@ static void pata_macio_reset_hw(struct pata_macio_priv *priv, int resume)
/* Hook the standard slave config to fixup some HW related alignment
* restrictions
*/
-static int pata_macio_slave_config(struct scsi_device *sdev)
+static int pata_macio_device_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct pata_macio_priv *priv = ap->private_data;
@@ -805,7 +806,7 @@ static int pata_macio_slave_config(struct scsi_device *sdev)
int rc;
/* First call original */
- rc = ata_scsi_slave_config(sdev);
+ rc = ata_scsi_device_configure(sdev, lim);
if (rc)
return rc;
@@ -814,7 +815,7 @@ static int pata_macio_slave_config(struct scsi_device *sdev)
/* OHare has issues with non cache aligned DMA on some chipsets */
if (priv->kind == controller_ohare) {
- blk_queue_update_dma_alignment(sdev->request_queue, 31);
+ lim->dma_alignment = 31;
blk_queue_update_dma_pad(sdev->request_queue, 31);
/* Tell the world about it */
@@ -829,7 +830,7 @@ static int pata_macio_slave_config(struct scsi_device *sdev)
/* Shasta and K2 seem to have "issues" with reads ... */
if (priv->kind == controller_sh_ata6 || priv->kind == controller_k2_ata6) {
/* Allright these are bad, apply restrictions */
- blk_queue_update_dma_alignment(sdev->request_queue, 15);
+ lim->dma_alignment = 15;
blk_queue_update_dma_pad(sdev->request_queue, 15);
/* We enable MWI and hack cache line size directly here, this
@@ -914,11 +915,14 @@ static const struct scsi_host_template pata_macio_sht = {
.sg_tablesize = MAX_DCMDS,
/* We may not need that strict one */
.dma_boundary = ATA_DMA_BOUNDARY,
- /* Not sure what the real max is but we know it's less than 64K, let's
- * use 64K minus 256
+ /*
+ * The SCSI core requires the segment size to cover at least a page, so
+ * for 64K page size kernels this must be at least 64K. However the
+ * hardware can't handle 64K, so pata_macio_qc_prep() will split large
+ * requests.
*/
- .max_segment_size = MAX_DBDMA_SEG,
- .slave_configure = pata_macio_slave_config,
+ .max_segment_size = SZ_64K,
+ .device_configure = pata_macio_device_configure,
.sdev_groups = ata_common_sdev_groups,
.can_queue = ATA_DEF_QUEUE,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 9bec0aee92..05c905827d 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -673,7 +673,7 @@ static const struct scsi_host_template mv6_sht = {
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
- .slave_configure = ata_scsi_slave_config
+ .device_configure = ata_scsi_device_configure
};
static struct ata_port_operations mv5_ops = {
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 0a0cee755b..36d99043ef 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -296,7 +296,8 @@ static void nv_nf2_freeze(struct ata_port *ap);
static void nv_nf2_thaw(struct ata_port *ap);
static void nv_ck804_freeze(struct ata_port *ap);
static void nv_ck804_thaw(struct ata_port *ap);
-static int nv_adma_slave_config(struct scsi_device *sdev);
+static int nv_adma_device_configure(struct scsi_device *sdev,
+ struct queue_limits *lim);
static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc);
static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
@@ -318,7 +319,8 @@ static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
static void nv_mcp55_thaw(struct ata_port *ap);
static void nv_mcp55_freeze(struct ata_port *ap);
static void nv_swncq_error_handler(struct ata_port *ap);
-static int nv_swncq_slave_config(struct scsi_device *sdev);
+static int nv_swncq_device_configure(struct scsi_device *sdev,
+ struct queue_limits *lim);
static int nv_swncq_port_start(struct ata_port *ap);
static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc);
static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
@@ -380,7 +382,7 @@ static const struct scsi_host_template nv_adma_sht = {
.can_queue = NV_ADMA_MAX_CPBS,
.sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
.dma_boundary = NV_ADMA_DMA_BOUNDARY,
- .slave_configure = nv_adma_slave_config,
+ .device_configure = nv_adma_device_configure,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
@@ -391,7 +393,7 @@ static const struct scsi_host_template nv_swncq_sht = {
.can_queue = ATA_MAX_QUEUE - 1,
.sg_tablesize = LIBATA_MAX_PRD,
.dma_boundary = ATA_DMA_BOUNDARY,
- .slave_configure = nv_swncq_slave_config,
+ .device_configure = nv_swncq_device_configure,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
@@ -661,7 +663,8 @@ static void nv_adma_mode(struct ata_port *ap)
pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
}
-static int nv_adma_slave_config(struct scsi_device *sdev)
+static int nv_adma_device_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct nv_adma_port_priv *pp = ap->private_data;
@@ -673,7 +676,7 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
int adma_enable;
u32 current_reg, new_reg, config_mask;
- rc = ata_scsi_slave_config(sdev);
+ rc = ata_scsi_device_configure(sdev, lim);
if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
/* Not a proper libata device, ignore */
@@ -740,8 +743,8 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
rc = dma_set_mask(&pdev->dev, pp->adma_dma_mask);
}
- blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
- blk_queue_max_segments(sdev->request_queue, sg_tablesize);
+ lim->seg_boundary_mask = segment_boundary;
+ lim->max_segments = sg_tablesize;
ata_port_info(ap,
"DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
(unsigned long long)*ap->host->dev->dma_mask,
@@ -1868,7 +1871,8 @@ static void nv_swncq_host_init(struct ata_host *host)
writel(~0x0, mmio + NV_INT_STATUS_MCP55);
}
-static int nv_swncq_slave_config(struct scsi_device *sdev)
+static int nv_swncq_device_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
@@ -1878,7 +1882,7 @@ static int nv_swncq_slave_config(struct scsi_device *sdev)
u8 check_maxtor = 0;
unsigned char model_num[ATA_ID_PROD_LEN + 1];
- rc = ata_scsi_slave_config(sdev);
+ rc = ata_scsi_device_configure(sdev, lim);
if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
/* Not a proper libata device, ignore */
return rc;
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 142e70bfc4..72c03cbdaf 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -381,7 +381,7 @@ static const struct scsi_host_template sil24_sht = {
.tag_alloc_policy = BLK_TAG_ALLOC_FIFO,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
- .slave_configure = ata_scsi_slave_config
+ .device_configure = ata_scsi_device_configure
};
static struct ata_port_operations sil24_ops = {