summaryrefslogtreecommitdiffstats
path: root/drivers/ata
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ata')
-rw-r--r--drivers/ata/Kconfig33
-rw-r--r--drivers/ata/ahci.c541
-rw-r--r--drivers/ata/ahci.h12
-rw-r--r--drivers/ata/ahci_st.c1
-rw-r--r--drivers/ata/libahci.c21
-rw-r--r--drivers/ata/libata-core.c147
-rw-r--r--drivers/ata/libata-sata.c171
-rw-r--r--drivers/ata/libata-scsi.c206
-rw-r--r--drivers/ata/libata-sff.c4
-rw-r--r--drivers/ata/libata.h11
-rw-r--r--drivers/ata/pata_cs5520.c6
-rw-r--r--drivers/ata/pata_legacy.c8
-rw-r--r--drivers/ata/pata_macio.c27
-rw-r--r--drivers/ata/pata_parport/pata_parport.c2
-rw-r--r--drivers/ata/sata_mv.c2
-rw-r--r--drivers/ata/sata_nv.c24
-rw-r--r--drivers/ata/sata_sil24.c2
17 files changed, 695 insertions, 523 deletions
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 42b51c9812..b595494ab9 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -116,15 +116,14 @@ config SATA_AHCI
If unsure, say N.
config SATA_MOBILE_LPM_POLICY
- int "Default SATA Link Power Management policy for low power chipsets"
+ int "Default SATA Link Power Management policy"
range 0 4
default 0
depends on SATA_AHCI
help
Select the Default SATA Link Power Management (LPM) policy to use
for chipsets / "South Bridges" supporting low-power modes. Such
- chipsets are typically found on most laptops but desktops and
- servers now also widely use chipsets supporting low power modes.
+ chipsets are ubiquitous across laptops, desktops and servers.
The value set has the following meanings:
0 => Keep firmware settings
@@ -557,7 +556,7 @@ comment "PATA SFF controllers with BMDMA"
config PATA_ALI
tristate "ALi PATA support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
select PATA_TIMINGS
help
This option enables support for the ALi ATA interfaces
@@ -567,7 +566,7 @@ config PATA_ALI
config PATA_AMD
tristate "AMD/NVidia PATA support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
select PATA_TIMINGS
help
This option enables support for the AMD and NVidia PATA
@@ -585,7 +584,7 @@ config PATA_ARASAN_CF
config PATA_ARTOP
tristate "ARTOP 6210/6260 PATA support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
This option enables support for ARTOP PATA controllers.
@@ -612,7 +611,7 @@ config PATA_ATP867X
config PATA_CMD64X
tristate "CMD64x PATA support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
select PATA_TIMINGS
help
This option enables support for the CMD64x series chips
@@ -659,7 +658,7 @@ config PATA_CS5536
config PATA_CYPRESS
tristate "Cypress CY82C693 PATA support (Very Experimental)"
- depends on PCI
+ depends on PCI && HAS_IOPORT
select PATA_TIMINGS
help
This option enables support for the Cypress/Contaq CY82C693
@@ -707,7 +706,7 @@ config PATA_HPT366
config PATA_HPT37X
tristate "HPT 370/370A/371/372/374/302 PATA support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
This option enables support for the majority of the later HPT
PATA controllers via the new ATA layer.
@@ -716,7 +715,7 @@ config PATA_HPT37X
config PATA_HPT3X2N
tristate "HPT 371N/372N/302N PATA support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
This option enables support for the N variant HPT PATA
controllers via the new ATA layer.
@@ -819,7 +818,7 @@ config PATA_MPC52xx
config PATA_NETCELL
tristate "NETCELL Revolution RAID support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
This option enables support for the Netcell Revolution RAID
PATA controller.
@@ -855,7 +854,7 @@ config PATA_OLDPIIX
config PATA_OPTIDMA
tristate "OPTI FireStar PATA support (Very Experimental)"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
This option enables DMA/PIO support for the later OPTi
controllers found on some old motherboards and in some
@@ -865,7 +864,7 @@ config PATA_OPTIDMA
config PATA_PDC2027X
tristate "Promise PATA 2027x support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
This option enables support for Promise PATA pdc20268 to pdc20277 host adapters.
@@ -873,7 +872,7 @@ config PATA_PDC2027X
config PATA_PDC_OLD
tristate "Older Promise PATA controller support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
This option enables support for the Promise 20246, 20262, 20263,
20265 and 20267 adapters.
@@ -901,7 +900,7 @@ config PATA_RDC
config PATA_SC1200
tristate "SC1200 PATA support"
- depends on PCI && (X86_32 || COMPILE_TEST)
+ depends on PCI && (X86_32 || COMPILE_TEST) && HAS_IOPORT
help
This option enables support for the NatSemi/AMD SC1200 SoC
companion chip used with the Geode processor family.
@@ -919,7 +918,7 @@ config PATA_SCH
config PATA_SERVERWORKS
tristate "SERVERWORKS OSB4/CSB5/CSB6/HT1000 PATA support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
This option enables support for the Serverworks OSB4/CSB5/CSB6 and
HT1000 PATA controllers, via the new ATA layer.
@@ -1183,7 +1182,7 @@ config ATA_GENERIC
config PATA_LEGACY
tristate "Legacy ISA PATA support (Experimental)"
- depends on (ISA || PCI)
+ depends on (ISA || PCI) && HAS_IOPORT
select PATA_TIMINGS
help
This option enables support for ISA/VLB/PCI bus legacy PATA
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index df3fd6474b..fc6fd583fa 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -50,11 +50,18 @@ enum board_ids {
board_ahci,
board_ahci_43bit_dma,
board_ahci_ign_iferr,
- board_ahci_low_power,
board_ahci_no_debounce_delay,
- board_ahci_nomsi,
- board_ahci_noncq,
- board_ahci_nosntf,
+ board_ahci_no_msi,
+ /*
+ * board_ahci_pcs_quirk is for legacy Intel platforms.
+ * Modern Intel platforms should use board_ahci instead.
+ * (Some modern Intel platforms might have been added with
+ * board_ahci_pcs_quirk, however, we cannot change them to board_ahci
+ * without testing that the platform actually works without the quirk.)
+ */
+ board_ahci_pcs_quirk,
+ board_ahci_pcs_quirk_no_devslp,
+ board_ahci_pcs_quirk_no_sntf,
board_ahci_yes_fbs,
/* board IDs for specific chipsets in alphabetical order */
@@ -68,12 +75,6 @@ enum board_ids {
board_ahci_sb700, /* for SB700 and SB800 */
board_ahci_vt8251,
- /*
- * board IDs for Intel chipsets that support more than 6 ports
- * *and* end up needing the PCS quirk.
- */
- board_ahci_pcs7,
-
/* aliases */
board_ahci_mcp_linux = board_ahci_mcp65,
board_ahci_mcp67 = board_ahci_mcp65,
@@ -143,36 +144,38 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- [board_ahci_low_power] = {
- AHCI_HFLAGS (AHCI_HFLAG_USE_LPM_POLICY),
+ [board_ahci_no_debounce_delay] = {
.flags = AHCI_FLAG_COMMON,
+ .link_flags = ATA_LFLAG_NO_DEBOUNCE_DELAY,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- [board_ahci_no_debounce_delay] = {
+ [board_ahci_no_msi] = {
+ AHCI_HFLAGS (AHCI_HFLAG_NO_MSI),
.flags = AHCI_FLAG_COMMON,
- .link_flags = ATA_LFLAG_NO_DEBOUNCE_DELAY,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- [board_ahci_nomsi] = {
- AHCI_HFLAGS (AHCI_HFLAG_NO_MSI),
+ [board_ahci_pcs_quirk] = {
+ AHCI_HFLAGS (AHCI_HFLAG_INTEL_PCS_QUIRK),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- [board_ahci_noncq] = {
- AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ),
+ [board_ahci_pcs_quirk_no_devslp] = {
+ AHCI_HFLAGS (AHCI_HFLAG_INTEL_PCS_QUIRK |
+ AHCI_HFLAG_NO_DEVSLP),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- [board_ahci_nosntf] = {
- AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF),
+ [board_ahci_pcs_quirk_no_sntf] = {
+ AHCI_HFLAGS (AHCI_HFLAG_INTEL_PCS_QUIRK |
+ AHCI_HFLAG_NO_SNTF),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
@@ -194,6 +197,7 @@ static const struct ata_port_info ahci_port_info[] = {
.port_ops = &ahci_ops,
},
[board_ahci_avn] = {
+ AHCI_HFLAGS (AHCI_HFLAG_INTEL_PCS_QUIRK),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
@@ -252,119 +256,113 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_vt8251_ops,
},
- [board_ahci_pcs7] = {
- .flags = AHCI_FLAG_COMMON,
- .pio_mask = ATA_PIO4,
- .udma_mask = ATA_UDMA6,
- .port_ops = &ahci_ops,
- },
};
static const struct pci_device_id ahci_pci_tbl[] = {
/* Intel */
- { PCI_VDEVICE(INTEL, 0x06d6), board_ahci }, /* Comet Lake PCH-H RAID */
- { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
- { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
- { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
- { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
- { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
+ { PCI_VDEVICE(INTEL, 0x06d6), board_ahci_pcs_quirk }, /* Comet Lake PCH-H RAID */
+ { PCI_VDEVICE(INTEL, 0x2652), board_ahci_pcs_quirk }, /* ICH6 */
+ { PCI_VDEVICE(INTEL, 0x2653), board_ahci_pcs_quirk }, /* ICH6M */
+ { PCI_VDEVICE(INTEL, 0x27c1), board_ahci_pcs_quirk }, /* ICH7 */
+ { PCI_VDEVICE(INTEL, 0x27c5), board_ahci_pcs_quirk }, /* ICH7M */
+ { PCI_VDEVICE(INTEL, 0x27c3), board_ahci_pcs_quirk }, /* ICH7R */
{ PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */
- { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
- { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
- { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
- { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
- { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
- { PCI_VDEVICE(INTEL, 0x2822), board_ahci_nosntf }, /* ICH8/Lewisburg RAID*/
- { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
- { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
- { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
- { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
- { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
- { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
- { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
- { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
- { PCI_VDEVICE(INTEL, 0x2929), board_ahci_low_power }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x292a), board_ahci_low_power }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x292b), board_ahci_low_power }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x292c), board_ahci_low_power }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x292f), board_ahci_low_power }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
- { PCI_VDEVICE(INTEL, 0x294e), board_ahci_low_power }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
- { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
- { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
- { PCI_VDEVICE(INTEL, 0x3a22), board_ahci }, /* ICH10 */
- { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
- { PCI_VDEVICE(INTEL, 0x3b22), board_ahci }, /* PCH AHCI */
- { PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */
- { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
- { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
- { PCI_VDEVICE(INTEL, 0x3b29), board_ahci_low_power }, /* PCH M AHCI */
- { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
- { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci_low_power }, /* PCH M RAID */
- { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
- { PCI_VDEVICE(INTEL, 0x19b0), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19b1), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19b2), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19b3), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19b4), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19b5), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19b6), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19b7), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19bE), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19bF), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19c0), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19c1), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19c2), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19c3), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19c4), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19c5), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19c6), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19c7), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19cE), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19cF), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
- { PCI_VDEVICE(INTEL, 0x1c03), board_ahci_low_power }, /* CPT M AHCI */
- { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
- { PCI_VDEVICE(INTEL, 0x1c05), board_ahci_low_power }, /* CPT M RAID */
- { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
- { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
- { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
- { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */
- { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */
- { PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */
- { PCI_VDEVICE(INTEL, 0x1e02), board_ahci }, /* Panther Point AHCI */
- { PCI_VDEVICE(INTEL, 0x1e03), board_ahci_low_power }, /* Panther M AHCI */
- { PCI_VDEVICE(INTEL, 0x1e04), board_ahci }, /* Panther Point RAID */
- { PCI_VDEVICE(INTEL, 0x1e05), board_ahci }, /* Panther Point RAID */
- { PCI_VDEVICE(INTEL, 0x1e06), board_ahci }, /* Panther Point RAID */
- { PCI_VDEVICE(INTEL, 0x1e07), board_ahci_low_power }, /* Panther M RAID */
- { PCI_VDEVICE(INTEL, 0x1e0e), board_ahci }, /* Panther Point RAID */
- { PCI_VDEVICE(INTEL, 0x8c02), board_ahci }, /* Lynx Point AHCI */
- { PCI_VDEVICE(INTEL, 0x8c03), board_ahci_low_power }, /* Lynx M AHCI */
- { PCI_VDEVICE(INTEL, 0x8c04), board_ahci }, /* Lynx Point RAID */
- { PCI_VDEVICE(INTEL, 0x8c05), board_ahci_low_power }, /* Lynx M RAID */
- { PCI_VDEVICE(INTEL, 0x8c06), board_ahci }, /* Lynx Point RAID */
- { PCI_VDEVICE(INTEL, 0x8c07), board_ahci_low_power }, /* Lynx M RAID */
- { PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */
- { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci_low_power }, /* Lynx M RAID */
- { PCI_VDEVICE(INTEL, 0x9c02), board_ahci_low_power }, /* Lynx LP AHCI */
- { PCI_VDEVICE(INTEL, 0x9c03), board_ahci_low_power }, /* Lynx LP AHCI */
- { PCI_VDEVICE(INTEL, 0x9c04), board_ahci_low_power }, /* Lynx LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c05), board_ahci_low_power }, /* Lynx LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c06), board_ahci_low_power }, /* Lynx LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c07), board_ahci_low_power }, /* Lynx LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci_low_power }, /* Lynx LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci_low_power }, /* Lynx LP RAID */
- { PCI_VDEVICE(INTEL, 0x9dd3), board_ahci_low_power }, /* Cannon Lake PCH-LP AHCI */
- { PCI_VDEVICE(INTEL, 0x1f22), board_ahci }, /* Avoton AHCI */
- { PCI_VDEVICE(INTEL, 0x1f23), board_ahci }, /* Avoton AHCI */
- { PCI_VDEVICE(INTEL, 0x1f24), board_ahci }, /* Avoton RAID */
- { PCI_VDEVICE(INTEL, 0x1f25), board_ahci }, /* Avoton RAID */
- { PCI_VDEVICE(INTEL, 0x1f26), board_ahci }, /* Avoton RAID */
- { PCI_VDEVICE(INTEL, 0x1f27), board_ahci }, /* Avoton RAID */
- { PCI_VDEVICE(INTEL, 0x1f2e), board_ahci }, /* Avoton RAID */
- { PCI_VDEVICE(INTEL, 0x1f2f), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x2681), board_ahci_pcs_quirk }, /* ESB2 */
+ { PCI_VDEVICE(INTEL, 0x2682), board_ahci_pcs_quirk }, /* ESB2 */
+ { PCI_VDEVICE(INTEL, 0x2683), board_ahci_pcs_quirk }, /* ESB2 */
+ { PCI_VDEVICE(INTEL, 0x27c6), board_ahci_pcs_quirk }, /* ICH7-M DH */
+ { PCI_VDEVICE(INTEL, 0x2821), board_ahci_pcs_quirk }, /* ICH8 */
+ { PCI_VDEVICE(INTEL, 0x2822), board_ahci_pcs_quirk_no_sntf }, /* ICH8/Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0x2824), board_ahci_pcs_quirk }, /* ICH8 */
+ { PCI_VDEVICE(INTEL, 0x2829), board_ahci_pcs_quirk }, /* ICH8M */
+ { PCI_VDEVICE(INTEL, 0x282a), board_ahci_pcs_quirk }, /* ICH8M */
+ { PCI_VDEVICE(INTEL, 0x2922), board_ahci_pcs_quirk }, /* ICH9 */
+ { PCI_VDEVICE(INTEL, 0x2923), board_ahci_pcs_quirk }, /* ICH9 */
+ { PCI_VDEVICE(INTEL, 0x2924), board_ahci_pcs_quirk }, /* ICH9 */
+ { PCI_VDEVICE(INTEL, 0x2925), board_ahci_pcs_quirk }, /* ICH9 */
+ { PCI_VDEVICE(INTEL, 0x2927), board_ahci_pcs_quirk }, /* ICH9 */
+ { PCI_VDEVICE(INTEL, 0x2929), board_ahci_pcs_quirk }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292a), board_ahci_pcs_quirk }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292b), board_ahci_pcs_quirk }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292c), board_ahci_pcs_quirk }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292f), board_ahci_pcs_quirk }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x294d), board_ahci_pcs_quirk }, /* ICH9 */
+ { PCI_VDEVICE(INTEL, 0x294e), board_ahci_pcs_quirk }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x502a), board_ahci_pcs_quirk }, /* Tolapai */
+ { PCI_VDEVICE(INTEL, 0x502b), board_ahci_pcs_quirk }, /* Tolapai */
+ { PCI_VDEVICE(INTEL, 0x3a05), board_ahci_pcs_quirk }, /* ICH10 */
+ { PCI_VDEVICE(INTEL, 0x3a22), board_ahci_pcs_quirk }, /* ICH10 */
+ { PCI_VDEVICE(INTEL, 0x3a25), board_ahci_pcs_quirk }, /* ICH10 */
+ { PCI_VDEVICE(INTEL, 0x3b22), board_ahci_pcs_quirk }, /* PCH AHCI */
+ { PCI_VDEVICE(INTEL, 0x3b23), board_ahci_pcs_quirk }, /* PCH AHCI */
+ { PCI_VDEVICE(INTEL, 0x3b24), board_ahci_pcs_quirk }, /* PCH RAID */
+ { PCI_VDEVICE(INTEL, 0x3b25), board_ahci_pcs_quirk }, /* PCH RAID */
+ { PCI_VDEVICE(INTEL, 0x3b29), board_ahci_pcs_quirk }, /* PCH M AHCI */
+ { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci_pcs_quirk }, /* PCH RAID */
+ { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci_pcs_quirk }, /* PCH M RAID */
+ { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci_pcs_quirk }, /* PCH AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b2), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b3), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b4), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b5), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b6), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b7), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19bE), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19bF), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c0), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c1), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c2), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c3), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c4), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c5), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c6), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c7), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x1c02), board_ahci_pcs_quirk }, /* CPT AHCI */
+ { PCI_VDEVICE(INTEL, 0x1c03), board_ahci_pcs_quirk }, /* CPT M AHCI */
+ { PCI_VDEVICE(INTEL, 0x1c04), board_ahci_pcs_quirk }, /* CPT RAID */
+ { PCI_VDEVICE(INTEL, 0x1c05), board_ahci_pcs_quirk }, /* CPT M RAID */
+ { PCI_VDEVICE(INTEL, 0x1c06), board_ahci_pcs_quirk }, /* CPT RAID */
+ { PCI_VDEVICE(INTEL, 0x1c07), board_ahci_pcs_quirk }, /* CPT RAID */
+ { PCI_VDEVICE(INTEL, 0x1d02), board_ahci_pcs_quirk }, /* PBG AHCI */
+ { PCI_VDEVICE(INTEL, 0x1d04), board_ahci_pcs_quirk }, /* PBG RAID */
+ { PCI_VDEVICE(INTEL, 0x1d06), board_ahci_pcs_quirk }, /* PBG RAID */
+ { PCI_VDEVICE(INTEL, 0x2323), board_ahci_pcs_quirk }, /* DH89xxCC AHCI */
+ { PCI_VDEVICE(INTEL, 0x1e02), board_ahci_pcs_quirk }, /* Panther Point AHCI */
+ { PCI_VDEVICE(INTEL, 0x1e03), board_ahci_pcs_quirk }, /* Panther M AHCI */
+ { PCI_VDEVICE(INTEL, 0x1e04), board_ahci_pcs_quirk }, /* Panther Point RAID */
+ { PCI_VDEVICE(INTEL, 0x1e05), board_ahci_pcs_quirk }, /* Panther Point RAID */
+ { PCI_VDEVICE(INTEL, 0x1e06), board_ahci_pcs_quirk }, /* Panther Point RAID */
+ { PCI_VDEVICE(INTEL, 0x1e07), board_ahci_pcs_quirk }, /* Panther M RAID */
+ { PCI_VDEVICE(INTEL, 0x1e0e), board_ahci_pcs_quirk }, /* Panther Point RAID */
+ { PCI_VDEVICE(INTEL, 0x8c02), board_ahci_pcs_quirk }, /* Lynx Point AHCI */
+ { PCI_VDEVICE(INTEL, 0x8c03), board_ahci_pcs_quirk }, /* Lynx M AHCI */
+ { PCI_VDEVICE(INTEL, 0x8c04), board_ahci_pcs_quirk }, /* Lynx Point RAID */
+ { PCI_VDEVICE(INTEL, 0x8c05), board_ahci_pcs_quirk }, /* Lynx M RAID */
+ { PCI_VDEVICE(INTEL, 0x8c06), board_ahci_pcs_quirk }, /* Lynx Point RAID */
+ { PCI_VDEVICE(INTEL, 0x8c07), board_ahci_pcs_quirk }, /* Lynx M RAID */
+ { PCI_VDEVICE(INTEL, 0x8c0e), board_ahci_pcs_quirk }, /* Lynx Point RAID */
+ { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci_pcs_quirk }, /* Lynx M RAID */
+ { PCI_VDEVICE(INTEL, 0x9c02), board_ahci_pcs_quirk }, /* Lynx LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c03), board_ahci_pcs_quirk }, /* Lynx LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c04), board_ahci_pcs_quirk }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c05), board_ahci_pcs_quirk }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c06), board_ahci_pcs_quirk }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c07), board_ahci_pcs_quirk }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci_pcs_quirk }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci_pcs_quirk }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9dd3), board_ahci_pcs_quirk }, /* Cannon Lake PCH-LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x1f22), board_ahci_pcs_quirk }, /* Avoton AHCI */
+ { PCI_VDEVICE(INTEL, 0x1f23), board_ahci_pcs_quirk }, /* Avoton AHCI */
+ { PCI_VDEVICE(INTEL, 0x1f24), board_ahci_pcs_quirk }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f25), board_ahci_pcs_quirk }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f26), board_ahci_pcs_quirk }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f27), board_ahci_pcs_quirk }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f2e), board_ahci_pcs_quirk }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f2f), board_ahci_pcs_quirk }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f32), board_ahci_avn }, /* Avoton AHCI */
{ PCI_VDEVICE(INTEL, 0x1f33), board_ahci_avn }, /* Avoton AHCI */
{ PCI_VDEVICE(INTEL, 0x1f34), board_ahci_avn }, /* Avoton RAID */
@@ -373,65 +371,64 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */
- { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg/Lewisburg AHCI*/
- { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* *burg SATA0 'RAID' */
- { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* *burg SATA1 'RAID' */
- { PCI_VDEVICE(INTEL, 0x282f), board_ahci }, /* *burg SATA2 'RAID' */
- { PCI_VDEVICE(INTEL, 0x43d4), board_ahci }, /* Rocket Lake PCH-H RAID */
- { PCI_VDEVICE(INTEL, 0x43d5), board_ahci }, /* Rocket Lake PCH-H RAID */
- { PCI_VDEVICE(INTEL, 0x43d6), board_ahci }, /* Rocket Lake PCH-H RAID */
- { PCI_VDEVICE(INTEL, 0x43d7), board_ahci }, /* Rocket Lake PCH-H RAID */
- { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
- { PCI_VDEVICE(INTEL, 0x8d04), board_ahci }, /* Wellsburg RAID */
- { PCI_VDEVICE(INTEL, 0x8d06), board_ahci }, /* Wellsburg RAID */
- { PCI_VDEVICE(INTEL, 0x8d0e), board_ahci }, /* Wellsburg RAID */
- { PCI_VDEVICE(INTEL, 0x8d62), board_ahci }, /* Wellsburg AHCI */
- { PCI_VDEVICE(INTEL, 0x8d64), board_ahci }, /* Wellsburg RAID */
- { PCI_VDEVICE(INTEL, 0x8d66), board_ahci }, /* Wellsburg RAID */
- { PCI_VDEVICE(INTEL, 0x8d6e), board_ahci }, /* Wellsburg RAID */
- { PCI_VDEVICE(INTEL, 0x23a3), board_ahci }, /* Coleto Creek AHCI */
- { PCI_VDEVICE(INTEL, 0x9c83), board_ahci_low_power }, /* Wildcat LP AHCI */
- { PCI_VDEVICE(INTEL, 0x9c85), board_ahci_low_power }, /* Wildcat LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c87), board_ahci_low_power }, /* Wildcat LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci_low_power }, /* Wildcat LP RAID */
- { PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */
- { PCI_VDEVICE(INTEL, 0x8c83), board_ahci_low_power }, /* 9 Series M AHCI */
- { PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */
- { PCI_VDEVICE(INTEL, 0x8c85), board_ahci_low_power }, /* 9 Series M RAID */
- { PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */
- { PCI_VDEVICE(INTEL, 0x8c87), board_ahci_low_power }, /* 9 Series M RAID */
- { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
- { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci_low_power }, /* 9 Series M RAID */
- { PCI_VDEVICE(INTEL, 0x9d03), board_ahci_low_power }, /* Sunrise LP AHCI */
- { PCI_VDEVICE(INTEL, 0x9d05), board_ahci_low_power }, /* Sunrise LP RAID */
- { PCI_VDEVICE(INTEL, 0x9d07), board_ahci_low_power }, /* Sunrise LP RAID */
- { PCI_VDEVICE(INTEL, 0xa102), board_ahci }, /* Sunrise Point-H AHCI */
- { PCI_VDEVICE(INTEL, 0xa103), board_ahci_low_power }, /* Sunrise M AHCI */
- { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
- { PCI_VDEVICE(INTEL, 0xa106), board_ahci }, /* Sunrise Point-H RAID */
- { PCI_VDEVICE(INTEL, 0xa107), board_ahci_low_power }, /* Sunrise M RAID */
- { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
- { PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
- { PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
- { PCI_VDEVICE(INTEL, 0xa1d2), board_ahci }, /* Lewisburg RAID*/
- { PCI_VDEVICE(INTEL, 0xa1d6), board_ahci }, /* Lewisburg RAID*/
- { PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
- { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
- { PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/
- { PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/
- { PCI_VDEVICE(INTEL, 0xa356), board_ahci }, /* Cannon Lake PCH-H RAID */
- { PCI_VDEVICE(INTEL, 0x06d7), board_ahci }, /* Comet Lake-H RAID */
- { PCI_VDEVICE(INTEL, 0xa386), board_ahci }, /* Comet Lake PCH-V RAID */
- { PCI_VDEVICE(INTEL, 0x0f22), board_ahci_low_power }, /* Bay Trail AHCI */
- { PCI_VDEVICE(INTEL, 0x0f23), board_ahci_low_power }, /* Bay Trail AHCI */
- { PCI_VDEVICE(INTEL, 0x22a3), board_ahci_low_power }, /* Cherry Tr. AHCI */
- { PCI_VDEVICE(INTEL, 0x5ae3), board_ahci_low_power }, /* ApolloLake AHCI */
- { PCI_VDEVICE(INTEL, 0x34d3), board_ahci_low_power }, /* Ice Lake LP AHCI */
- { PCI_VDEVICE(INTEL, 0x02d3), board_ahci_low_power }, /* Comet Lake PCH-U AHCI */
- { PCI_VDEVICE(INTEL, 0x02d7), board_ahci_low_power }, /* Comet Lake PCH RAID */
+ { PCI_VDEVICE(INTEL, 0x2823), board_ahci_pcs_quirk }, /* Wellsburg/Lewisburg AHCI*/
+ { PCI_VDEVICE(INTEL, 0x2826), board_ahci_pcs_quirk }, /* *burg SATA0 'RAID' */
+ { PCI_VDEVICE(INTEL, 0x2827), board_ahci_pcs_quirk }, /* *burg SATA1 'RAID' */
+ { PCI_VDEVICE(INTEL, 0x282f), board_ahci_pcs_quirk }, /* *burg SATA2 'RAID' */
+ { PCI_VDEVICE(INTEL, 0x43d4), board_ahci_pcs_quirk }, /* Rocket Lake PCH-H RAID */
+ { PCI_VDEVICE(INTEL, 0x43d5), board_ahci_pcs_quirk }, /* Rocket Lake PCH-H RAID */
+ { PCI_VDEVICE(INTEL, 0x43d6), board_ahci_pcs_quirk }, /* Rocket Lake PCH-H RAID */
+ { PCI_VDEVICE(INTEL, 0x43d7), board_ahci_pcs_quirk }, /* Rocket Lake PCH-H RAID */
+ { PCI_VDEVICE(INTEL, 0x8d02), board_ahci_pcs_quirk }, /* Wellsburg AHCI */
+ { PCI_VDEVICE(INTEL, 0x8d04), board_ahci_pcs_quirk }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d06), board_ahci_pcs_quirk }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d0e), board_ahci_pcs_quirk }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d62), board_ahci_pcs_quirk }, /* Wellsburg AHCI */
+ { PCI_VDEVICE(INTEL, 0x8d64), board_ahci_pcs_quirk }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d66), board_ahci_pcs_quirk }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d6e), board_ahci_pcs_quirk }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x23a3), board_ahci_pcs_quirk }, /* Coleto Creek AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c83), board_ahci_pcs_quirk }, /* Wildcat LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c85), board_ahci_pcs_quirk }, /* Wildcat LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c87), board_ahci_pcs_quirk }, /* Wildcat LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci_pcs_quirk }, /* Wildcat LP RAID */
+ { PCI_VDEVICE(INTEL, 0x8c82), board_ahci_pcs_quirk }, /* 9 Series AHCI */
+ { PCI_VDEVICE(INTEL, 0x8c83), board_ahci_pcs_quirk }, /* 9 Series M AHCI */
+ { PCI_VDEVICE(INTEL, 0x8c84), board_ahci_pcs_quirk }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0x8c85), board_ahci_pcs_quirk }, /* 9 Series M RAID */
+ { PCI_VDEVICE(INTEL, 0x8c86), board_ahci_pcs_quirk }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0x8c87), board_ahci_pcs_quirk }, /* 9 Series M RAID */
+ { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci_pcs_quirk }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci_pcs_quirk }, /* 9 Series M RAID */
+ { PCI_VDEVICE(INTEL, 0x9d03), board_ahci_pcs_quirk }, /* Sunrise LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9d05), board_ahci_pcs_quirk }, /* Sunrise LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9d07), board_ahci_pcs_quirk }, /* Sunrise LP RAID */
+ { PCI_VDEVICE(INTEL, 0xa102), board_ahci_pcs_quirk }, /* Sunrise Point-H AHCI */
+ { PCI_VDEVICE(INTEL, 0xa103), board_ahci_pcs_quirk }, /* Sunrise M AHCI */
+ { PCI_VDEVICE(INTEL, 0xa105), board_ahci_pcs_quirk }, /* Sunrise Point-H RAID */
+ { PCI_VDEVICE(INTEL, 0xa106), board_ahci_pcs_quirk }, /* Sunrise Point-H RAID */
+ { PCI_VDEVICE(INTEL, 0xa107), board_ahci_pcs_quirk }, /* Sunrise M RAID */
+ { PCI_VDEVICE(INTEL, 0xa10f), board_ahci_pcs_quirk }, /* Sunrise Point-H RAID */
+ { PCI_VDEVICE(INTEL, 0xa182), board_ahci_pcs_quirk }, /* Lewisburg AHCI*/
+ { PCI_VDEVICE(INTEL, 0xa186), board_ahci_pcs_quirk }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa1d2), board_ahci_pcs_quirk }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa1d6), board_ahci_pcs_quirk }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa202), board_ahci_pcs_quirk }, /* Lewisburg AHCI*/
+ { PCI_VDEVICE(INTEL, 0xa206), board_ahci_pcs_quirk }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa252), board_ahci_pcs_quirk }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa256), board_ahci_pcs_quirk }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa356), board_ahci_pcs_quirk }, /* Cannon Lake PCH-H RAID */
+ { PCI_VDEVICE(INTEL, 0x06d7), board_ahci_pcs_quirk }, /* Comet Lake-H RAID */
+ { PCI_VDEVICE(INTEL, 0xa386), board_ahci_pcs_quirk }, /* Comet Lake PCH-V RAID */
+ { PCI_VDEVICE(INTEL, 0x0f22), board_ahci_pcs_quirk }, /* Bay Trail AHCI */
+ { PCI_VDEVICE(INTEL, 0x0f23), board_ahci_pcs_quirk_no_devslp }, /* Bay Trail AHCI */
+ { PCI_VDEVICE(INTEL, 0x22a3), board_ahci_pcs_quirk }, /* Cherry Tr. AHCI */
+ { PCI_VDEVICE(INTEL, 0x5ae3), board_ahci_pcs_quirk }, /* ApolloLake AHCI */
+ { PCI_VDEVICE(INTEL, 0x34d3), board_ahci_pcs_quirk }, /* Ice Lake LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x02d3), board_ahci_pcs_quirk }, /* Comet Lake PCH-U AHCI */
+ { PCI_VDEVICE(INTEL, 0x02d7), board_ahci_pcs_quirk }, /* Comet Lake PCH RAID */
/* Elkhart Lake IDs 0x4b60 & 0x4b62 https://sata-io.org/product/8803 not tested yet */
- { PCI_VDEVICE(INTEL, 0x4b63), board_ahci_low_power }, /* Elkhart Lake AHCI */
- { PCI_VDEVICE(INTEL, 0x7ae2), board_ahci_low_power }, /* Alder Lake-P AHCI */
+ { PCI_VDEVICE(INTEL, 0x4b63), board_ahci_pcs_quirk }, /* Elkhart Lake AHCI */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -459,14 +456,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */
{ PCI_VDEVICE(AMD, 0x7801), board_ahci_no_debounce_delay }, /* AMD Hudson-2 (AHCI mode) */
{ PCI_VDEVICE(AMD, 0x7900), board_ahci }, /* AMD CZ */
- { PCI_VDEVICE(AMD, 0x7901), board_ahci_low_power }, /* AMD Green Sardine */
+ { PCI_VDEVICE(AMD, 0x7901), board_ahci }, /* AMD Green Sardine */
/* AMD is using RAID class only for ahci controllers */
{ PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
/* Dell S140/S150 */
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_SUBVENDOR_ID_DELL, PCI_ANY_ID,
- PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
+ PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci_pcs_quirk },
/* VIA */
{ PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
@@ -623,8 +620,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
* Samsung SSDs found on some macbooks. NCQ times out if MSI is
* enabled. https://bugzilla.kernel.org/show_bug.cgi?id=60731
*/
- { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_nomsi },
- { PCI_VDEVICE(SAMSUNG, 0xa800), board_ahci_nomsi },
+ { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_no_msi },
+ { PCI_VDEVICE(SAMSUNG, 0xa800), board_ahci_no_msi },
/* Enmotus */
{ PCI_DEVICE(0x1c44, 0x8000), board_ahci },
@@ -668,6 +665,87 @@ static int mobile_lpm_policy = -1;
module_param(mobile_lpm_policy, int, 0644);
MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
+static char *ahci_mask_port_map;
+module_param_named(mask_port_map, ahci_mask_port_map, charp, 0444);
+MODULE_PARM_DESC(mask_port_map,
+ "32-bits port map masks to ignore controllers ports. "
+ "Valid values are: "
+ "\"<mask>\" to apply the same mask to all AHCI controller "
+ "devices, and \"<pci_dev>=<mask>,<pci_dev>=<mask>,...\" to "
+ "specify different masks for the controllers specified, "
+ "where <pci_dev> is the PCI ID of an AHCI controller in the "
+ "form \"domain:bus:dev.func\"");
+
+static void ahci_apply_port_map_mask(struct device *dev,
+ struct ahci_host_priv *hpriv, char *mask_s)
+{
+ unsigned int mask;
+
+ if (kstrtouint(mask_s, 0, &mask)) {
+ dev_err(dev, "Invalid port map mask\n");
+ return;
+ }
+
+ hpriv->mask_port_map = mask;
+}
+
+static void ahci_get_port_map_mask(struct device *dev,
+ struct ahci_host_priv *hpriv)
+{
+ char *param, *end, *str, *mask_s;
+ char *name;
+
+ if (!strlen(ahci_mask_port_map))
+ return;
+
+ str = kstrdup(ahci_mask_port_map, GFP_KERNEL);
+ if (!str)
+ return;
+
+ /* Handle single mask case */
+ if (!strchr(str, '=')) {
+ ahci_apply_port_map_mask(dev, hpriv, str);
+ goto free;
+ }
+
+ /*
+ * Mask list case: parse the parameter to apply the mask only if
+ * the device name matches.
+ */
+ param = str;
+ end = param + strlen(param);
+ while (param && param < end && *param) {
+ name = param;
+ param = strchr(name, '=');
+ if (!param)
+ break;
+
+ *param = '\0';
+ param++;
+ if (param >= end)
+ break;
+
+ if (strcmp(dev_name(dev), name) != 0) {
+ param = strchr(param, ',');
+ if (param)
+ param++;
+ continue;
+ }
+
+ mask_s = param;
+ param = strchr(mask_s, ',');
+ if (param) {
+ *param = '\0';
+ param++;
+ }
+
+ ahci_apply_port_map_mask(dev, hpriv, mask_s);
+ }
+
+free:
+ kfree(str);
+}
+
static void ahci_pci_save_initial_config(struct pci_dev *pdev,
struct ahci_host_priv *hpriv)
{
@@ -690,6 +768,10 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
"Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
}
+ /* Handle port map masks passed as module parameter. */
+ if (ahci_mask_port_map)
+ ahci_get_port_map_mask(&pdev->dev, hpriv);
+
ahci_save_initial_config(&pdev->dev, hpriv);
}
@@ -1418,17 +1500,6 @@ static bool ahci_broken_online(struct pci_dev *pdev)
return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff);
}
-static bool ahci_broken_devslp(struct pci_dev *pdev)
-{
- /* device with broken DEVSLP but still showing SDS capability */
- static const struct pci_device_id ids[] = {
- { PCI_VDEVICE(INTEL, 0x0f23)}, /* Valleyview SoC */
- {}
- };
-
- return pci_match_id(ids, pdev);
-}
-
#ifdef CONFIG_ATA_ACPI
static void ahci_gtf_filter_workaround(struct ata_host *host)
{
@@ -1637,15 +1708,40 @@ static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
return pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
}
-static void ahci_update_initial_lpm_policy(struct ata_port *ap,
- struct ahci_host_priv *hpriv)
+static void ahci_mark_external_port(struct ata_port *ap)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 tmp;
+
+ /* mark external ports (hotplug-capable, eSATA) */
+ tmp = readl(port_mmio + PORT_CMD);
+ if (((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS)) ||
+ (tmp & PORT_CMD_HPCP))
+ ap->pflags |= ATA_PFLAG_EXTERNAL;
+}
+
+static void ahci_update_initial_lpm_policy(struct ata_port *ap)
{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
int policy = CONFIG_SATA_MOBILE_LPM_POLICY;
+ /*
+ * AHCI contains a known incompatibility between LPM and hot-plug
+ * removal events, see 7.3.1 Hot Plug Removal Detection and Power
+ * Management Interaction in AHCI 1.3.1. Therefore, do not enable
+ * LPM if the port advertises itself as an external port.
+ */
+ if (ap->pflags & ATA_PFLAG_EXTERNAL)
+ return;
- /* Ignore processing for chipsets that don't use policy */
- if (!(hpriv->flags & AHCI_HFLAG_USE_LPM_POLICY))
+ /* If no LPM states are supported by the HBA, do not bother with LPM */
+ if ((ap->host->flags & ATA_HOST_NO_PART) &&
+ (ap->host->flags & ATA_HOST_NO_SSC) &&
+ (ap->host->flags & ATA_HOST_NO_DEVSLP)) {
+ ata_port_dbg(ap, "no LPM states supported, not enabling LPM\n");
return;
+ }
/* user modified policy via module param */
if (mobile_lpm_policy != -1) {
@@ -1667,17 +1763,9 @@ update_policy:
static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hpriv)
{
- const struct pci_device_id *id = pci_match_id(ahci_pci_tbl, pdev);
u16 tmp16;
- /*
- * Only apply the 6-port PCS quirk for known legacy platforms.
- */
- if (!id || id->vendor != PCI_VENDOR_ID_INTEL)
- return;
-
- /* Skip applying the quirk on Denverton and beyond */
- if (((enum board_ids) id->driver_data) >= board_ahci_pcs7)
+ if (!(hpriv->flags & AHCI_HFLAG_INTEL_PCS_QUIRK))
return;
/*
@@ -1812,10 +1900,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
&dev_attr_remapped_nvme.attr,
NULL);
- /* must set flag prior to save config in order to take effect */
- if (ahci_broken_devslp(pdev))
- hpriv->flags |= AHCI_HFLAG_NO_DEVSLP;
-
#ifdef CONFIG_ARM64
if (pdev->vendor == PCI_VENDOR_ID_HUAWEI &&
pdev->device == 0xa235 &&
@@ -1891,8 +1975,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
- if (!host)
- return -ENOMEM;
+ if (!host) {
+ rc = -ENOMEM;
+ goto err_rm_sysfs_file;
+ }
host->private_data = hpriv;
if (ahci_init_msi(pdev, n_ports, hpriv) < 0) {
@@ -1929,7 +2015,9 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ap->flags & ATA_FLAG_EM)
ap->em_message_type = hpriv->em_msg_type;
- ahci_update_initial_lpm_policy(ap, hpriv);
+ ahci_mark_external_port(ap);
+
+ ahci_update_initial_lpm_policy(ap);
/* disabled/not-implemented port */
if (!(hpriv->port_map & (1 << i)))
@@ -1945,11 +2033,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* initialize adapter */
rc = ahci_configure_dma_masks(pdev, hpriv);
if (rc)
- return rc;
+ goto err_rm_sysfs_file;
rc = ahci_pci_reset_controller(host);
if (rc)
- return rc;
+ goto err_rm_sysfs_file;
ahci_pci_init_controller(host);
ahci_pci_print_info(host);
@@ -1958,10 +2046,15 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = ahci_host_activate(host, &ahci_sht);
if (rc)
- return rc;
+ goto err_rm_sysfs_file;
pm_runtime_put_noidle(&pdev->dev);
return 0;
+
+err_rm_sysfs_file:
+ sysfs_remove_file_from_group(&pdev->dev.kobj,
+ &dev_attr_remapped_nvme.attr, NULL);
+ return rc;
}
static void ahci_shutdown_one(struct pci_dev *pdev)
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index df8f8a1a3a..8f40f75ba0 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -241,13 +241,11 @@ enum {
AHCI_HFLAG_YES_ALPM = BIT(23), /* force ALPM cap on */
AHCI_HFLAG_NO_WRITE_TO_RO = BIT(24), /* don't write to read
only registers */
- AHCI_HFLAG_USE_LPM_POLICY = BIT(25), /* chipset that should use
- SATA_MOBILE_LPM_POLICY
- as default lpm_policy */
- AHCI_HFLAG_SUSPEND_PHYS = BIT(26), /* handle PHYs during
+ AHCI_HFLAG_SUSPEND_PHYS = BIT(25), /* handle PHYs during
suspend/resume */
- AHCI_HFLAG_NO_SXS = BIT(28), /* SXS not supported */
- AHCI_HFLAG_43BIT_ONLY = BIT(29), /* 43bit DMA addr limit */
+ AHCI_HFLAG_NO_SXS = BIT(26), /* SXS not supported */
+ AHCI_HFLAG_43BIT_ONLY = BIT(27), /* 43bit DMA addr limit */
+ AHCI_HFLAG_INTEL_PCS_QUIRK = BIT(28), /* apply Intel PCS quirk */
/* ap->flags bits */
@@ -399,7 +397,7 @@ extern const struct attribute_group *ahci_sdev_groups[];
.sdev_groups = ahci_sdev_groups, \
.change_queue_depth = ata_scsi_change_queue_depth, \
.tag_alloc_policy = BLK_TAG_ALLOC_RR, \
- .slave_configure = ata_scsi_slave_config
+ .device_configure = ata_scsi_device_configure
extern struct ata_port_operations ahci_ops;
extern struct ata_port_operations ahci_platform_ops;
diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c
index d4a626f879..79a8b0aa37 100644
--- a/drivers/ata/ahci_st.c
+++ b/drivers/ata/ahci_st.c
@@ -30,7 +30,6 @@
#define ST_AHCI_OOBR_CIMAX_SHIFT 0
struct st_ahci_drv_data {
- struct platform_device *ahci;
struct reset_control *pwr;
struct reset_control *sw_rst;
struct reset_control *pwr_rst;
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 1a63200ea4..83431aae74 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1280,10 +1280,8 @@ static void ahci_port_init(struct device *dev, struct ata_port *ap,
int port_no, void __iomem *mmio,
void __iomem *port_mmio)
{
- struct ahci_host_priv *hpriv = ap->host->private_data;
const char *emsg = NULL;
int rc;
- u32 tmp;
/* make sure port is not active */
rc = ahci_deinit_port(ap, &emsg);
@@ -1291,11 +1289,6 @@ static void ahci_port_init(struct device *dev, struct ata_port *ap,
dev_warn(dev, "%s (%d)\n", emsg, rc);
ahci_port_clear_pending_irq(ap);
-
- /* mark esata ports */
- tmp = readl(port_mmio + PORT_CMD);
- if ((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS))
- ap->pflags |= ATA_PFLAG_EXTERNAL;
}
void ahci_init_controller(struct ata_host *host)
@@ -2627,8 +2620,8 @@ void ahci_print_info(struct ata_host *host, const char *scc_s)
speed_s = "?";
dev_info(host->dev,
- "AHCI %02x%02x.%02x%02x "
- "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
+ "AHCI vers %02x%02x.%02x%02x, "
+ "%u command slots, %s Gbps, %s mode\n"
,
(vers >> 24) & 0xff,
@@ -2637,12 +2630,18 @@ void ahci_print_info(struct ata_host *host, const char *scc_s)
vers & 0xff,
((cap >> 8) & 0x1f) + 1,
- (cap & 0x1f) + 1,
speed_s,
- impl,
scc_s);
dev_info(host->dev,
+ "%u/%u ports implemented (port mask 0x%x)\n"
+ ,
+
+ hweight32(impl),
+ (cap & 0x1f) + 1,
+ impl);
+
+ dev_info(host->dev,
"flags: "
"%s%s%s%s%s%s%s"
"%s%s%s%s%s%s%s"
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index c449d60d9b..74b59b78d2 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1480,19 +1480,19 @@ static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
}
/**
- * ata_exec_internal_sg - execute libata internal command
+ * ata_exec_internal - execute libata internal command
* @dev: Device to which the command is sent
* @tf: Taskfile registers for the command and the result
* @cdb: CDB for packet command
* @dma_dir: Data transfer direction of the command
- * @sgl: sg list for the data buffer of the command
- * @n_elem: Number of sg entries
+ * @buf: Data buffer of the command
+ * @buflen: Length of data buffer
* @timeout: Timeout in msecs (0 for default)
*
- * Executes libata internal command with timeout. @tf contains
- * command on entry and result on return. Timeout and error
- * conditions are reported via return value. No recovery action
- * is taken after a command times out. It's caller's duty to
+ * Executes libata internal command with timeout. @tf contains
+ * the command on entry and the result on return. Timeout and error
+ * conditions are reported via the return value. No recovery action
+ * is taken after a command times out. It is the caller's duty to
* clean up after timeout.
*
* LOCKING:
@@ -1501,34 +1501,38 @@ static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
* RETURNS:
* Zero on success, AC_ERR_* mask on failure
*/
-static unsigned ata_exec_internal_sg(struct ata_device *dev,
- struct ata_taskfile *tf, const u8 *cdb,
- int dma_dir, struct scatterlist *sgl,
- unsigned int n_elem, unsigned int timeout)
+unsigned int ata_exec_internal(struct ata_device *dev, struct ata_taskfile *tf,
+ const u8 *cdb, enum dma_data_direction dma_dir,
+ void *buf, unsigned int buflen,
+ unsigned int timeout)
{
struct ata_link *link = dev->link;
struct ata_port *ap = link->ap;
u8 command = tf->command;
- int auto_timeout = 0;
struct ata_queued_cmd *qc;
+ struct scatterlist sgl;
unsigned int preempted_tag;
u32 preempted_sactive;
u64 preempted_qc_active;
int preempted_nr_active_links;
+ bool auto_timeout = false;
DECLARE_COMPLETION_ONSTACK(wait);
unsigned long flags;
unsigned int err_mask;
int rc;
+ if (WARN_ON(dma_dir != DMA_NONE && !buf))
+ return AC_ERR_INVALID;
+
spin_lock_irqsave(ap->lock, flags);
- /* no internal command while frozen */
+ /* No internal command while frozen */
if (ata_port_is_frozen(ap)) {
spin_unlock_irqrestore(ap->lock, flags);
return AC_ERR_SYSTEM;
}
- /* initialize internal qc */
+ /* Initialize internal qc */
qc = __ata_qc_from_tag(ap, ATA_TAG_INTERNAL);
qc->tag = ATA_TAG_INTERNAL;
@@ -1547,12 +1551,12 @@ static unsigned ata_exec_internal_sg(struct ata_device *dev,
ap->qc_active = 0;
ap->nr_active_links = 0;
- /* prepare & issue qc */
+ /* Prepare and issue qc */
qc->tf = *tf;
if (cdb)
memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
- /* some SATA bridges need us to indicate data xfer direction */
+ /* Some SATA bridges need us to indicate data xfer direction */
if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
dma_dir == DMA_FROM_DEVICE)
qc->tf.feature |= ATAPI_DMADIR;
@@ -1560,13 +1564,8 @@ static unsigned ata_exec_internal_sg(struct ata_device *dev,
qc->flags |= ATA_QCFLAG_RESULT_TF;
qc->dma_dir = dma_dir;
if (dma_dir != DMA_NONE) {
- unsigned int i, buflen = 0;
- struct scatterlist *sg;
-
- for_each_sg(sgl, sg, n_elem, i)
- buflen += sg->length;
-
- ata_sg_init(qc, sgl, n_elem);
+ sg_init_one(&sgl, buf, buflen);
+ ata_sg_init(qc, &sgl, 1);
qc->nbytes = buflen;
}
@@ -1578,11 +1577,11 @@ static unsigned ata_exec_internal_sg(struct ata_device *dev,
spin_unlock_irqrestore(ap->lock, flags);
if (!timeout) {
- if (ata_probe_timeout)
+ if (ata_probe_timeout) {
timeout = ata_probe_timeout * 1000;
- else {
+ } else {
timeout = ata_internal_cmd_timeout(dev, command);
- auto_timeout = 1;
+ auto_timeout = true;
}
}
@@ -1595,30 +1594,25 @@ static unsigned ata_exec_internal_sg(struct ata_device *dev,
ata_sff_flush_pio_task(ap);
if (!rc) {
- spin_lock_irqsave(ap->lock, flags);
-
- /* We're racing with irq here. If we lose, the
- * following test prevents us from completing the qc
- * twice. If we win, the port is frozen and will be
- * cleaned up by ->post_internal_cmd().
+ /*
+ * We are racing with irq here. If we lose, the following test
+ * prevents us from completing the qc twice. If we win, the port
+ * is frozen and will be cleaned up by ->post_internal_cmd().
*/
+ spin_lock_irqsave(ap->lock, flags);
if (qc->flags & ATA_QCFLAG_ACTIVE) {
qc->err_mask |= AC_ERR_TIMEOUT;
-
ata_port_freeze(ap);
-
ata_dev_warn(dev, "qc timeout after %u msecs (cmd 0x%x)\n",
timeout, command);
}
-
spin_unlock_irqrestore(ap->lock, flags);
}
- /* do post_internal_cmd */
if (ap->ops->post_internal_cmd)
ap->ops->post_internal_cmd(qc);
- /* perform minimal error analysis */
+ /* Perform minimal error analysis */
if (qc->flags & ATA_QCFLAG_EH) {
if (qc->result_tf.status & (ATA_ERR | ATA_DF))
qc->err_mask |= AC_ERR_DEV;
@@ -1632,7 +1626,7 @@ static unsigned ata_exec_internal_sg(struct ata_device *dev,
qc->result_tf.status |= ATA_SENSE;
}
- /* finish up */
+ /* Finish up */
spin_lock_irqsave(ap->lock, flags);
*tf = qc->result_tf;
@@ -1653,44 +1647,6 @@ static unsigned ata_exec_internal_sg(struct ata_device *dev,
}
/**
- * ata_exec_internal - execute libata internal command
- * @dev: Device to which the command is sent
- * @tf: Taskfile registers for the command and the result
- * @cdb: CDB for packet command
- * @dma_dir: Data transfer direction of the command
- * @buf: Data buffer of the command
- * @buflen: Length of data buffer
- * @timeout: Timeout in msecs (0 for default)
- *
- * Wrapper around ata_exec_internal_sg() which takes simple
- * buffer instead of sg list.
- *
- * LOCKING:
- * None. Should be called with kernel context, might sleep.
- *
- * RETURNS:
- * Zero on success, AC_ERR_* mask on failure
- */
-unsigned ata_exec_internal(struct ata_device *dev,
- struct ata_taskfile *tf, const u8 *cdb,
- int dma_dir, void *buf, unsigned int buflen,
- unsigned int timeout)
-{
- struct scatterlist *psg = NULL, sg;
- unsigned int n_elem = 0;
-
- if (dma_dir != DMA_NONE) {
- WARN_ON(!buf);
- sg_init_one(&sg, buf, buflen);
- psg = &sg;
- n_elem++;
- }
-
- return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
- timeout);
-}
-
-/**
* ata_pio_need_iordy - check if iordy needed
* @adev: ATA device
*
@@ -4180,8 +4136,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "PIONEER BD-RW BDR-207M", NULL, ATA_HORKAGE_NOLPM },
{ "PIONEER BD-RW BDR-205", NULL, ATA_HORKAGE_NOLPM },
- /* Crucial BX100 SSD 500GB has broken LPM support */
- { "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM },
+ /* Crucial devices with broken LPM support */
+ { "CT*0BX*00SSD1", NULL, ATA_HORKAGE_NOLPM },
/* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
{ "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
@@ -4199,6 +4155,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
ATA_HORKAGE_ZERO_AFTER_TRIM |
ATA_HORKAGE_NOLPM },
+ /* AMD Radeon devices with broken LPM support */
+ { "R3SL240G", NULL, ATA_HORKAGE_NOLPM },
+
+ /* Apacer models with LPM issues */
+ { "Apacer AS340*", NULL, ATA_HORKAGE_NOLPM },
+
/* These specific Samsung models/firmware-revs do not handle LPM well */
{ "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM },
{ "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM },
@@ -5527,6 +5489,18 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
return ap;
}
+void ata_port_free(struct ata_port *ap)
+{
+ if (!ap)
+ return;
+
+ kfree(ap->pmp_link);
+ kfree(ap->slave_link);
+ kfree(ap->ncq_sense_buf);
+ kfree(ap);
+}
+EXPORT_SYMBOL_GPL(ata_port_free);
+
static void ata_devres_release(struct device *gendev, void *res)
{
struct ata_host *host = dev_get_drvdata(gendev);
@@ -5553,12 +5527,7 @@ static void ata_host_release(struct kref *kref)
int i;
for (i = 0; i < host->n_ports; i++) {
- struct ata_port *ap = host->ports[i];
-
- kfree(ap->pmp_link);
- kfree(ap->slave_link);
- kfree(ap->ncq_sense_buf);
- kfree(ap);
+ ata_port_free(host->ports[i]);
host->ports[i] = NULL;
}
kfree(host);
@@ -5608,8 +5577,10 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
if (!host)
return NULL;
- if (!devres_open_group(dev, NULL, GFP_KERNEL))
- goto err_free;
+ if (!devres_open_group(dev, NULL, GFP_KERNEL)) {
+ kfree(host);
+ return NULL;
+ }
dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL);
if (!dr)
@@ -5641,8 +5612,6 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
err_out:
devres_release_group(dev, NULL);
- err_free:
- kfree(host);
return NULL;
}
EXPORT_SYMBOL_GPL(ata_host_alloc);
@@ -5941,7 +5910,7 @@ int ata_host_register(struct ata_host *host, const struct scsi_host_template *sh
* allocation time.
*/
for (i = host->n_ports; host->ports[i]; i++)
- kfree(host->ports[i]);
+ ata_port_free(host->ports[i]);
/* give ports names and add SCSI hosts */
for (i = 0; i < host->n_ports; i++) {
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index 0fb1934875..9e047bf912 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -848,80 +848,143 @@ DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
ata_scsi_lpm_show, ata_scsi_lpm_store);
EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy);
-static ssize_t ata_ncq_prio_supported_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
+/**
+ * ata_ncq_prio_supported - Check if device supports NCQ Priority
+ * @ap: ATA port of the target device
+ * @sdev: SCSI device
+ * @supported: Address of a boolean to store the result
+ *
+ * Helper to check if device supports NCQ Priority feature.
+ *
+ * Context: Any context. Takes and releases @ap->lock.
+ *
+ * Return:
+ * * %0 - OK. Status is stored into @supported
+ * * %-ENODEV - Failed to find the ATA device
+ */
+int ata_ncq_prio_supported(struct ata_port *ap, struct scsi_device *sdev,
+ bool *supported)
{
- struct scsi_device *sdev = to_scsi_device(device);
- struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *dev;
- bool ncq_prio_supported;
+ unsigned long flags;
int rc = 0;
- spin_lock_irq(ap->lock);
+ spin_lock_irqsave(ap->lock, flags);
dev = ata_scsi_find_dev(ap, sdev);
if (!dev)
rc = -ENODEV;
else
- ncq_prio_supported = dev->flags & ATA_DFLAG_NCQ_PRIO;
- spin_unlock_irq(ap->lock);
+ *supported = dev->flags & ATA_DFLAG_NCQ_PRIO;
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ata_ncq_prio_supported);
+
+static ssize_t ata_ncq_prio_supported_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(device);
+ struct ata_port *ap = ata_shost_to_port(sdev->host);
+ bool supported;
+ int rc;
+
+ rc = ata_ncq_prio_supported(ap, sdev, &supported);
+ if (rc)
+ return rc;
- return rc ? rc : sysfs_emit(buf, "%u\n", ncq_prio_supported);
+ return sysfs_emit(buf, "%d\n", supported);
}
DEVICE_ATTR(ncq_prio_supported, S_IRUGO, ata_ncq_prio_supported_show, NULL);
EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_supported);
-static ssize_t ata_ncq_prio_enable_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
+/**
+ * ata_ncq_prio_enabled - Check if NCQ Priority is enabled
+ * @ap: ATA port of the target device
+ * @sdev: SCSI device
+ * @enabled: Address of a boolean to store the result
+ *
+ * Helper to check if NCQ Priority feature is enabled.
+ *
+ * Context: Any context. Takes and releases @ap->lock.
+ *
+ * Return:
+ * * %0 - OK. Status is stored into @enabled
+ * * %-ENODEV - Failed to find the ATA device
+ */
+int ata_ncq_prio_enabled(struct ata_port *ap, struct scsi_device *sdev,
+ bool *enabled)
{
- struct scsi_device *sdev = to_scsi_device(device);
- struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *dev;
- bool ncq_prio_enable;
+ unsigned long flags;
int rc = 0;
- spin_lock_irq(ap->lock);
+ spin_lock_irqsave(ap->lock, flags);
dev = ata_scsi_find_dev(ap, sdev);
if (!dev)
rc = -ENODEV;
else
- ncq_prio_enable = dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED;
- spin_unlock_irq(ap->lock);
+ *enabled = dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED;
+ spin_unlock_irqrestore(ap->lock, flags);
- return rc ? rc : sysfs_emit(buf, "%u\n", ncq_prio_enable);
+ return rc;
}
+EXPORT_SYMBOL_GPL(ata_ncq_prio_enabled);
-static ssize_t ata_ncq_prio_enable_store(struct device *device,
- struct device_attribute *attr,
- const char *buf, size_t len)
+static ssize_t ata_ncq_prio_enable_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
{
struct scsi_device *sdev = to_scsi_device(device);
- struct ata_port *ap;
- struct ata_device *dev;
- long int input;
- int rc = 0;
+ struct ata_port *ap = ata_shost_to_port(sdev->host);
+ bool enabled;
+ int rc;
- rc = kstrtol(buf, 10, &input);
+ rc = ata_ncq_prio_enabled(ap, sdev, &enabled);
if (rc)
return rc;
- if ((input < 0) || (input > 1))
- return -EINVAL;
- ap = ata_shost_to_port(sdev->host);
- dev = ata_scsi_find_dev(ap, sdev);
- if (unlikely(!dev))
- return -ENODEV;
+ return sysfs_emit(buf, "%d\n", enabled);
+}
+
+/**
+ * ata_ncq_prio_enable - Enable/disable NCQ Priority
+ * @ap: ATA port of the target device
+ * @sdev: SCSI device
+ * @enable: true - enable NCQ Priority, false - disable NCQ Priority
+ *
+ * Helper to enable/disable NCQ Priority feature.
+ *
+ * Context: Any context. Takes and releases @ap->lock.
+ *
+ * Return:
+ * * %0 - OK. Status is stored into @enabled
+ * * %-ENODEV - Failed to find the ATA device
+ * * %-EINVAL - NCQ Priority is not supported or CDL is enabled
+ */
+int ata_ncq_prio_enable(struct ata_port *ap, struct scsi_device *sdev,
+ bool enable)
+{
+ struct ata_device *dev;
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(ap->lock, flags);
- spin_lock_irq(ap->lock);
+ dev = ata_scsi_find_dev(ap, sdev);
+ if (!dev) {
+ rc = -ENODEV;
+ goto unlock;
+ }
if (!(dev->flags & ATA_DFLAG_NCQ_PRIO)) {
rc = -EINVAL;
goto unlock;
}
- if (input) {
+ if (enable) {
if (dev->flags & ATA_DFLAG_CDL_ENABLED) {
ata_dev_err(dev,
"CDL must be disabled to enable NCQ priority\n");
@@ -934,9 +997,30 @@ static ssize_t ata_ncq_prio_enable_store(struct device *device,
}
unlock:
- spin_unlock_irq(ap->lock);
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ata_ncq_prio_enable);
+
+static ssize_t ata_ncq_prio_enable_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct scsi_device *sdev = to_scsi_device(device);
+ struct ata_port *ap = ata_shost_to_port(sdev->host);
+ bool enable;
+ int rc;
+
+ rc = kstrtobool(buf, &enable);
+ if (rc)
+ return rc;
+
+ rc = ata_ncq_prio_enable(ap, sdev, enable);
+ if (rc)
+ return rc;
- return rc ? rc : len;
+ return len;
}
DEVICE_ATTR(ncq_prio_enable, S_IRUGO | S_IWUSR,
@@ -1170,21 +1254,24 @@ void ata_sas_tport_delete(struct ata_port *ap)
EXPORT_SYMBOL_GPL(ata_sas_tport_delete);
/**
- * ata_sas_slave_configure - Default slave_config routine for libata devices
+ * ata_sas_device_configure - Default device_configure routine for libata
+ * devices
* @sdev: SCSI device to configure
+ * @lim: queue limits
* @ap: ATA port to which SCSI device is attached
*
* RETURNS:
* Zero.
*/
-int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap)
+int ata_sas_device_configure(struct scsi_device *sdev, struct queue_limits *lim,
+ struct ata_port *ap)
{
ata_scsi_sdev_config(sdev);
- return ata_scsi_dev_config(sdev, ap->link.device);
+ return ata_scsi_dev_config(sdev, lim, ap->link.device);
}
-EXPORT_SYMBOL_GPL(ata_sas_slave_configure);
+EXPORT_SYMBOL_GPL(ata_sas_device_configure);
/**
* ata_sas_queuecmd - Issue SCSI cdb to libata-managed device
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index e954976891..4e08476011 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -230,6 +230,80 @@ void ata_scsi_set_sense_information(struct ata_device *dev,
SCSI_SENSE_BUFFERSIZE, information);
}
+/**
+ * ata_scsi_set_passthru_sense_fields - Set ATA fields in sense buffer
+ * @qc: ATA PASS-THROUGH command.
+ *
+ * Populates "ATA Status Return sense data descriptor" / "Fixed format
+ * sense data" with ATA taskfile fields.
+ *
+ * LOCKING:
+ * None.
+ */
+static void ata_scsi_set_passthru_sense_fields(struct ata_queued_cmd *qc)
+{
+ struct scsi_cmnd *cmd = qc->scsicmd;
+ struct ata_taskfile *tf = &qc->result_tf;
+ unsigned char *sb = cmd->sense_buffer;
+
+ if ((sb[0] & 0x7f) >= 0x72) {
+ unsigned char *desc;
+ u8 len;
+
+ /* descriptor format */
+ len = sb[7];
+ desc = (char *)scsi_sense_desc_find(sb, len + 8, 9);
+ if (!desc) {
+ if (SCSI_SENSE_BUFFERSIZE < len + 14)
+ return;
+ sb[7] = len + 14;
+ desc = sb + 8 + len;
+ }
+ desc[0] = 9;
+ desc[1] = 12;
+ /*
+ * Copy registers into sense buffer.
+ */
+ desc[2] = 0x00;
+ desc[3] = tf->error;
+ desc[5] = tf->nsect;
+ desc[7] = tf->lbal;
+ desc[9] = tf->lbam;
+ desc[11] = tf->lbah;
+ desc[12] = tf->device;
+ desc[13] = tf->status;
+
+ /*
+ * Fill in Extend bit, and the high order bytes
+ * if applicable.
+ */
+ if (tf->flags & ATA_TFLAG_LBA48) {
+ desc[2] |= 0x01;
+ desc[4] = tf->hob_nsect;
+ desc[6] = tf->hob_lbal;
+ desc[8] = tf->hob_lbam;
+ desc[10] = tf->hob_lbah;
+ }
+ } else {
+ /* Fixed sense format */
+ sb[0] |= 0x80;
+ sb[3] = tf->error;
+ sb[4] = tf->status;
+ sb[5] = tf->device;
+ sb[6] = tf->nsect;
+ if (tf->flags & ATA_TFLAG_LBA48) {
+ sb[8] |= 0x80;
+ if (tf->hob_nsect)
+ sb[8] |= 0x40;
+ if (tf->hob_lbal || tf->hob_lbam || tf->hob_lbah)
+ sb[8] |= 0x20;
+ }
+ sb[9] = tf->lbal;
+ sb[10] = tf->lbam;
+ sb[11] = tf->lbah;
+ }
+}
+
static void ata_scsi_set_invalid_field(struct ata_device *dev,
struct scsi_cmnd *cmd, u16 field, u8 bit)
{
@@ -837,10 +911,8 @@ static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk,
* ata_gen_passthru_sense - Generate check condition sense block.
* @qc: Command that completed.
*
- * This function is specific to the ATA descriptor format sense
- * block specified for the ATA pass through commands. Regardless
- * of whether the command errored or not, return a sense
- * block. Copy all controller registers into the sense
+ * This function is specific to the ATA pass through commands.
+ * Regardless of whether the command errored or not, return a sense
* block. If there was no error, we get the request from an ATA
* passthrough command, so we use the following sense data:
* sk = RECOVERED ERROR
@@ -855,7 +927,6 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
struct scsi_cmnd *cmd = qc->scsicmd;
struct ata_taskfile *tf = &qc->result_tf;
unsigned char *sb = cmd->sense_buffer;
- unsigned char *desc = sb + 8;
u8 sense_key, asc, ascq;
memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
@@ -872,66 +943,18 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
} else {
/*
* ATA PASS-THROUGH INFORMATION AVAILABLE
- * Always in descriptor format sense.
+ *
+ * Note: we are supposed to call ata_scsi_set_sense(), which
+ * respects the D_SENSE bit, instead of unconditionally
+ * generating the sense data in descriptor format. However,
+ * because hdparm, hddtemp, and udisks incorrectly assume sense
+ * data in descriptor format, without even looking at the
+ * RESPONSE CODE field in the returned sense data (to see which
+ * format the returned sense data is in), we are stuck with
+ * being bug compatible with older kernels.
*/
scsi_build_sense(cmd, 1, RECOVERED_ERROR, 0, 0x1D);
}
-
- if ((cmd->sense_buffer[0] & 0x7f) >= 0x72) {
- u8 len;
-
- /* descriptor format */
- len = sb[7];
- desc = (char *)scsi_sense_desc_find(sb, len + 8, 9);
- if (!desc) {
- if (SCSI_SENSE_BUFFERSIZE < len + 14)
- return;
- sb[7] = len + 14;
- desc = sb + 8 + len;
- }
- desc[0] = 9;
- desc[1] = 12;
- /*
- * Copy registers into sense buffer.
- */
- desc[2] = 0x00;
- desc[3] = tf->error;
- desc[5] = tf->nsect;
- desc[7] = tf->lbal;
- desc[9] = tf->lbam;
- desc[11] = tf->lbah;
- desc[12] = tf->device;
- desc[13] = tf->status;
-
- /*
- * Fill in Extend bit, and the high order bytes
- * if applicable.
- */
- if (tf->flags & ATA_TFLAG_LBA48) {
- desc[2] |= 0x01;
- desc[4] = tf->hob_nsect;
- desc[6] = tf->hob_lbal;
- desc[8] = tf->hob_lbam;
- desc[10] = tf->hob_lbah;
- }
- } else {
- /* Fixed sense format */
- desc[0] = tf->error;
- desc[1] = tf->status;
- desc[2] = tf->device;
- desc[3] = tf->nsect;
- desc[7] = 0;
- if (tf->flags & ATA_TFLAG_LBA48) {
- desc[8] |= 0x80;
- if (tf->hob_nsect)
- desc[8] |= 0x40;
- if (tf->hob_lbal || tf->hob_lbam || tf->hob_lbah)
- desc[8] |= 0x20;
- }
- desc[9] = tf->lbal;
- desc[10] = tf->lbam;
- desc[11] = tf->lbah;
- }
}
/**
@@ -1021,7 +1044,8 @@ bool ata_scsi_dma_need_drain(struct request *rq)
}
EXPORT_SYMBOL_GPL(ata_scsi_dma_need_drain);
-int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
+int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim,
+ struct ata_device *dev)
{
struct request_queue *q = sdev->request_queue;
int depth = 1;
@@ -1031,7 +1055,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
/* configure max sectors */
dev->max_sectors = min(dev->max_sectors, sdev->host->max_sectors);
- blk_queue_max_hw_sectors(q, dev->max_sectors);
+ lim->max_hw_sectors = dev->max_sectors;
if (dev->class == ATA_DEV_ATAPI) {
sdev->sector_size = ATA_SECT_SIZE;
@@ -1040,7 +1064,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
blk_queue_update_dma_pad(q, ATA_DMA_PAD_SZ - 1);
/* make room for appending the drain */
- blk_queue_max_segments(q, queue_max_segments(q) - 1);
+ lim->max_segments--;
sdev->dma_drain_len = ATAPI_MAX_DRAIN;
sdev->dma_drain_buf = kmalloc(sdev->dma_drain_len, GFP_NOIO);
@@ -1077,7 +1101,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
"sector_size=%u > PAGE_SIZE, PIO may malfunction\n",
sdev->sector_size);
- blk_queue_update_dma_alignment(q, sdev->sector_size - 1);
+ lim->dma_alignment = sdev->sector_size - 1;
if (dev->flags & ATA_DFLAG_AN)
set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events);
@@ -1131,8 +1155,9 @@ int ata_scsi_slave_alloc(struct scsi_device *sdev)
EXPORT_SYMBOL_GPL(ata_scsi_slave_alloc);
/**
- * ata_scsi_slave_config - Set SCSI device attributes
+ * ata_scsi_device_configure - Set SCSI device attributes
* @sdev: SCSI device to examine
+ * @lim: queue limits
*
* This is called before we actually start reading
* and writing to the device, to configure certain
@@ -1142,17 +1167,18 @@ EXPORT_SYMBOL_GPL(ata_scsi_slave_alloc);
* Defined by SCSI layer. We don't really care.
*/
-int ata_scsi_slave_config(struct scsi_device *sdev)
+int ata_scsi_device_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
if (dev)
- return ata_scsi_dev_config(sdev, dev);
+ return ata_scsi_dev_config(sdev, lim, dev);
return 0;
}
-EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
+EXPORT_SYMBOL_GPL(ata_scsi_device_configure);
/**
* ata_scsi_slave_destroy - SCSI device is about to be destroyed
@@ -1629,26 +1655,32 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *cmd = qc->scsicmd;
u8 *cdb = cmd->cmnd;
- int need_sense = (qc->err_mask != 0) &&
- !(qc->flags & ATA_QCFLAG_SENSE_VALID);
+ bool have_sense = qc->flags & ATA_QCFLAG_SENSE_VALID;
+ bool is_ata_passthru = cdb[0] == ATA_16 || cdb[0] == ATA_12;
+ bool is_ck_cond_request = cdb[2] & 0x20;
+ bool is_error = qc->err_mask != 0;
/* For ATA pass thru (SAT) commands, generate a sense block if
* user mandated it or if there's an error. Note that if we
- * generate because the user forced us to [CK_COND =1], a check
+ * generate because the user forced us to [CK_COND=1], a check
* condition is generated and the ATA register values are returned
* whether the command completed successfully or not. If there
- * was no error, we use the following sense data:
+ * was no error, and CK_COND=1, we use the following sense data:
* sk = RECOVERED ERROR
* asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE
*/
- if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
- ((cdb[2] & 0x20) || need_sense))
- ata_gen_passthru_sense(qc);
- else if (need_sense)
+ if (is_ata_passthru && (is_ck_cond_request || is_error || have_sense)) {
+ if (!have_sense)
+ ata_gen_passthru_sense(qc);
+ ata_scsi_set_passthru_sense_fields(qc);
+ if (is_ck_cond_request)
+ set_status_byte(qc->scsicmd, SAM_STAT_CHECK_CONDITION);
+ } else if (is_error && !have_sense) {
ata_gen_ata_sense(qc);
- else
+ } else {
/* Keep the SCSI ML and status byte, clear host byte. */
cmd->result &= 0x0000ffff;
+ }
ata_qc_done(qc);
}
@@ -1828,11 +1860,11 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
2
};
- /* set scsi removable (RMB) bit per ata bit, or if the
- * AHCI port says it's external (Hotplug-capable, eSATA).
+ /*
+ * Set the SCSI Removable Media Bit (RMB) if the ATA removable media
+ * device bit (obsolete since ATA-8 ACS) is set.
*/
- if (ata_id_removable(args->id) ||
- (args->dev->link->ap->pflags & ATA_PFLAG_EXTERNAL))
+ if (ata_id_removable(args->id))
hdr[1] |= (1 << 7);
if (args->dev->class == ATA_DEV_ZAC) {
@@ -2587,14 +2619,8 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
/* handle completion from EH */
if (unlikely(err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID)) {
- if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
- /* FIXME: not quite right; we don't want the
- * translation of taskfile registers into a
- * sense descriptors, since that's only
- * correct for ATA, not ATAPI
- */
+ if (!(qc->flags & ATA_QCFLAG_SENSE_VALID))
ata_gen_passthru_sense(qc);
- }
/* SCSI EH automatically locks door if sdev->locked is
* set. Sometimes door lock request continues to
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 95a19c4ef2..250f7dae05 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -3032,6 +3032,7 @@ EXPORT_SYMBOL_GPL(ata_bmdma_port_start32);
*/
int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev)
{
+#ifdef CONFIG_HAS_IOPORT
unsigned long bmdma = pci_resource_start(pdev, 4);
u8 simplex;
@@ -3044,6 +3045,9 @@ int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev)
if (simplex & 0x80)
return -EOPNOTSUPP;
return 0;
+#else
+ return -ENOENT;
+#endif /* CONFIG_HAS_IOPORT */
}
EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 5c685bb193..38ce13b554 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -50,10 +50,10 @@ extern int ata_build_rw_tf(struct ata_queued_cmd *qc, u64 block, u32 n_block,
unsigned int tf_flags, int dld, int class);
extern u64 ata_tf_read_block(const struct ata_taskfile *tf,
struct ata_device *dev);
-extern unsigned ata_exec_internal(struct ata_device *dev,
- struct ata_taskfile *tf, const u8 *cdb,
- int dma_dir, void *buf, unsigned int buflen,
- unsigned int timeout);
+unsigned int ata_exec_internal(struct ata_device *dev, struct ata_taskfile *tf,
+ const u8 *cdb, enum dma_data_direction dma_dir,
+ void *buf, unsigned int buflen,
+ unsigned int timeout);
extern int ata_wait_ready(struct ata_link *link, unsigned long deadline,
int (*check_ready)(struct ata_link *link));
extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
@@ -131,7 +131,8 @@ extern void ata_scsi_dev_rescan(struct work_struct *work);
extern int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
unsigned int id, u64 lun);
void ata_scsi_sdev_config(struct scsi_device *sdev);
-int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev);
+int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim,
+ struct ata_device *dev);
int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev);
/* libata-eh.c */
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index 38795508c2..027cf67101 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -151,12 +151,6 @@ static int cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (!host)
return -ENOMEM;
- /* Perform set up for DMA */
- if (pci_enable_device_io(pdev)) {
- dev_err(&pdev->dev, "unable to configure BAR2.\n");
- return -ENODEV;
- }
-
if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
dev_err(&pdev->dev, "unable to configure DMA mask.\n");
return -ENODEV;
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index 448a511cbc..e7ac142c24 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -173,8 +173,6 @@ static int legacy_port[NR_HOST] = { 0x1f0, 0x170, 0x1e8, 0x168, 0x1e0, 0x160 };
static struct legacy_probe probe_list[NR_HOST];
static struct legacy_data legacy_data[NR_HOST];
static struct ata_host *legacy_host[NR_HOST];
-static int nr_legacy_host;
-
/**
* legacy_probe_add - Add interface to probe list
@@ -1276,9 +1274,11 @@ static __exit void legacy_exit(void)
{
int i;
- for (i = 0; i < nr_legacy_host; i++) {
+ for (i = 0; i < NR_HOST; i++) {
struct legacy_data *ld = &legacy_data[i];
- ata_host_detach(legacy_host[i]);
+
+ if (legacy_host[i])
+ ata_host_detach(legacy_host[i]);
platform_device_unregister(ld->platform_dev);
}
}
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index 17f6ccee53..3cb455a32d 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -796,7 +796,8 @@ static void pata_macio_reset_hw(struct pata_macio_priv *priv, int resume)
/* Hook the standard slave config to fixup some HW related alignment
* restrictions
*/
-static int pata_macio_slave_config(struct scsi_device *sdev)
+static int pata_macio_device_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct pata_macio_priv *priv = ap->private_data;
@@ -805,7 +806,7 @@ static int pata_macio_slave_config(struct scsi_device *sdev)
int rc;
/* First call original */
- rc = ata_scsi_slave_config(sdev);
+ rc = ata_scsi_device_configure(sdev, lim);
if (rc)
return rc;
@@ -814,7 +815,7 @@ static int pata_macio_slave_config(struct scsi_device *sdev)
/* OHare has issues with non cache aligned DMA on some chipsets */
if (priv->kind == controller_ohare) {
- blk_queue_update_dma_alignment(sdev->request_queue, 31);
+ lim->dma_alignment = 31;
blk_queue_update_dma_pad(sdev->request_queue, 31);
/* Tell the world about it */
@@ -829,7 +830,7 @@ static int pata_macio_slave_config(struct scsi_device *sdev)
/* Shasta and K2 seem to have "issues" with reads ... */
if (priv->kind == controller_sh_ata6 || priv->kind == controller_k2_ata6) {
/* Allright these are bad, apply restrictions */
- blk_queue_update_dma_alignment(sdev->request_queue, 15);
+ lim->dma_alignment = 15;
blk_queue_update_dma_pad(sdev->request_queue, 15);
/* We enable MWI and hack cache line size directly here, this
@@ -914,11 +915,14 @@ static const struct scsi_host_template pata_macio_sht = {
.sg_tablesize = MAX_DCMDS,
/* We may not need that strict one */
.dma_boundary = ATA_DMA_BOUNDARY,
- /* Not sure what the real max is but we know it's less than 64K, let's
- * use 64K minus 256
+ /*
+ * The SCSI core requires the segment size to cover at least a page, so
+ * for 64K page size kernels this must be at least 64K. However the
+ * hardware can't handle 64K, so pata_macio_qc_prep() will split large
+ * requests.
*/
- .max_segment_size = MAX_DBDMA_SEG,
- .slave_configure = pata_macio_slave_config,
+ .max_segment_size = SZ_64K,
+ .device_configure = pata_macio_device_configure,
.sdev_groups = ata_common_sdev_groups,
.can_queue = ATA_DEF_QUEUE,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
@@ -1188,7 +1192,7 @@ static int pata_macio_attach(struct macio_dev *mdev,
return rc;
}
-static int pata_macio_detach(struct macio_dev *mdev)
+static void pata_macio_detach(struct macio_dev *mdev)
{
struct ata_host *host = macio_get_drvdata(mdev);
struct pata_macio_priv *priv = host->private_data;
@@ -1203,8 +1207,6 @@ static int pata_macio_detach(struct macio_dev *mdev)
ata_host_detach(host);
unlock_media_bay(priv->mdev->media_bay);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -1373,9 +1375,6 @@ static struct pci_driver pata_macio_pci_driver = {
.suspend = pata_macio_pci_suspend,
.resume = pata_macio_pci_resume,
#endif
- .driver = {
- .owner = THIS_MODULE,
- },
};
MODULE_DEVICE_TABLE(pci, pata_macio_pci_match);
diff --git a/drivers/ata/pata_parport/pata_parport.c b/drivers/ata/pata_parport/pata_parport.c
index a7adfdcb5e..9a2cb9ca9d 100644
--- a/drivers/ata/pata_parport/pata_parport.c
+++ b/drivers/ata/pata_parport/pata_parport.c
@@ -464,7 +464,7 @@ static void pata_parport_bus_release(struct device *dev)
/* nothing to do here but required to avoid warning on device removal */
}
-static struct bus_type pata_parport_bus_type = {
+static const struct bus_type pata_parport_bus_type = {
.name = DRV_NAME,
};
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 9bec0aee92..05c905827d 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -673,7 +673,7 @@ static const struct scsi_host_template mv6_sht = {
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
- .slave_configure = ata_scsi_slave_config
+ .device_configure = ata_scsi_device_configure
};
static struct ata_port_operations mv5_ops = {
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 0a0cee755b..36d99043ef 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -296,7 +296,8 @@ static void nv_nf2_freeze(struct ata_port *ap);
static void nv_nf2_thaw(struct ata_port *ap);
static void nv_ck804_freeze(struct ata_port *ap);
static void nv_ck804_thaw(struct ata_port *ap);
-static int nv_adma_slave_config(struct scsi_device *sdev);
+static int nv_adma_device_configure(struct scsi_device *sdev,
+ struct queue_limits *lim);
static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc);
static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
@@ -318,7 +319,8 @@ static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
static void nv_mcp55_thaw(struct ata_port *ap);
static void nv_mcp55_freeze(struct ata_port *ap);
static void nv_swncq_error_handler(struct ata_port *ap);
-static int nv_swncq_slave_config(struct scsi_device *sdev);
+static int nv_swncq_device_configure(struct scsi_device *sdev,
+ struct queue_limits *lim);
static int nv_swncq_port_start(struct ata_port *ap);
static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc);
static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
@@ -380,7 +382,7 @@ static const struct scsi_host_template nv_adma_sht = {
.can_queue = NV_ADMA_MAX_CPBS,
.sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
.dma_boundary = NV_ADMA_DMA_BOUNDARY,
- .slave_configure = nv_adma_slave_config,
+ .device_configure = nv_adma_device_configure,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
@@ -391,7 +393,7 @@ static const struct scsi_host_template nv_swncq_sht = {
.can_queue = ATA_MAX_QUEUE - 1,
.sg_tablesize = LIBATA_MAX_PRD,
.dma_boundary = ATA_DMA_BOUNDARY,
- .slave_configure = nv_swncq_slave_config,
+ .device_configure = nv_swncq_device_configure,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
@@ -661,7 +663,8 @@ static void nv_adma_mode(struct ata_port *ap)
pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
}
-static int nv_adma_slave_config(struct scsi_device *sdev)
+static int nv_adma_device_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct nv_adma_port_priv *pp = ap->private_data;
@@ -673,7 +676,7 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
int adma_enable;
u32 current_reg, new_reg, config_mask;
- rc = ata_scsi_slave_config(sdev);
+ rc = ata_scsi_device_configure(sdev, lim);
if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
/* Not a proper libata device, ignore */
@@ -740,8 +743,8 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
rc = dma_set_mask(&pdev->dev, pp->adma_dma_mask);
}
- blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
- blk_queue_max_segments(sdev->request_queue, sg_tablesize);
+ lim->seg_boundary_mask = segment_boundary;
+ lim->max_segments = sg_tablesize;
ata_port_info(ap,
"DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
(unsigned long long)*ap->host->dev->dma_mask,
@@ -1868,7 +1871,8 @@ static void nv_swncq_host_init(struct ata_host *host)
writel(~0x0, mmio + NV_INT_STATUS_MCP55);
}
-static int nv_swncq_slave_config(struct scsi_device *sdev)
+static int nv_swncq_device_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
@@ -1878,7 +1882,7 @@ static int nv_swncq_slave_config(struct scsi_device *sdev)
u8 check_maxtor = 0;
unsigned char model_num[ATA_ID_PROD_LEN + 1];
- rc = ata_scsi_slave_config(sdev);
+ rc = ata_scsi_device_configure(sdev, lim);
if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
/* Not a proper libata device, ignore */
return rc;
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 142e70bfc4..72c03cbdaf 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -381,7 +381,7 @@ static const struct scsi_host_template sil24_sht = {
.tag_alloc_policy = BLK_TAG_ALLOC_FIFO,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
- .slave_configure = ata_scsi_slave_config
+ .device_configure = ata_scsi_device_configure
};
static struct ata_port_operations sil24_ops = {