diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
commit | ace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch) | |
tree | b2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/scsi/pm8001 | |
parent | Initial commit. (diff) | |
download | linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip |
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/scsi/pm8001')
-rw-r--r-- | drivers/scsi/pm8001/Makefile | 17 | ||||
-rw-r--r-- | drivers/scsi/pm8001/pm8001_chips.h | 89 | ||||
-rw-r--r-- | drivers/scsi/pm8001/pm8001_ctl.c | 1041 | ||||
-rw-r--r-- | drivers/scsi/pm8001/pm8001_ctl.h | 68 | ||||
-rw-r--r-- | drivers/scsi/pm8001/pm8001_defs.h | 143 | ||||
-rw-r--r-- | drivers/scsi/pm8001/pm8001_hwi.c | 4838 | ||||
-rw-r--r-- | drivers/scsi/pm8001/pm8001_hwi.h | 1030 | ||||
-rw-r--r-- | drivers/scsi/pm8001/pm8001_init.c | 1569 | ||||
-rw-r--r-- | drivers/scsi/pm8001/pm8001_sas.c | 1195 | ||||
-rw-r--r-- | drivers/scsi/pm8001/pm8001_sas.h | 794 | ||||
-rw-r--r-- | drivers/scsi/pm8001/pm80xx_hwi.c | 4940 | ||||
-rw-r--r-- | drivers/scsi/pm8001/pm80xx_hwi.h | 1665 | ||||
-rw-r--r-- | drivers/scsi/pm8001/pm80xx_tracepoints.c | 10 | ||||
-rw-r--r-- | drivers/scsi/pm8001/pm80xx_tracepoints.h | 113 |
14 files changed, 17512 insertions, 0 deletions
diff --git a/drivers/scsi/pm8001/Makefile b/drivers/scsi/pm8001/Makefile new file mode 100644 index 0000000000..bbb51b7312 --- /dev/null +++ b/drivers/scsi/pm8001/Makefile @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Kernel configuration file for the PM8001 SAS/SATA 8x6G based HBA driver +# +# Copyright (C) 2008-2009 USI Co., Ltd. + + +obj-$(CONFIG_SCSI_PM8001) += pm80xx.o + +CFLAGS_pm80xx_tracepoints.o := -I$(src) + +pm80xx-y += pm8001_init.o \ + pm8001_sas.o \ + pm8001_ctl.o \ + pm8001_hwi.o \ + pm80xx_hwi.o \ + pm80xx_tracepoints.o diff --git a/drivers/scsi/pm8001/pm8001_chips.h b/drivers/scsi/pm8001/pm8001_chips.h new file mode 100644 index 0000000000..9241c78260 --- /dev/null +++ b/drivers/scsi/pm8001/pm8001_chips.h @@ -0,0 +1,89 @@ +/* + * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 USI Co., Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ + +#ifndef _PM8001_CHIPS_H_ +#define _PM8001_CHIPS_H_ + +static inline u32 pm8001_read_32(void *virt_addr) +{ + return *((u32 *)virt_addr); +} + +static inline void pm8001_write_32(void *addr, u32 offset, __le32 val) +{ + *((__le32 *)(addr + offset)) = val; +} + +static inline u32 pm8001_cr32(struct pm8001_hba_info *pm8001_ha, u32 bar, + u32 offset) +{ + return readl(pm8001_ha->io_mem[bar].memvirtaddr + offset); +} + +static inline void pm8001_cw32(struct pm8001_hba_info *pm8001_ha, u32 bar, + u32 addr, u32 val) +{ + writel(val, pm8001_ha->io_mem[bar].memvirtaddr + addr); +} +static inline u32 pm8001_mr32(void __iomem *addr, u32 offset) +{ + return readl(addr + offset); +} +static inline void pm8001_mw32(void __iomem *addr, u32 offset, u32 val) +{ + writel(val, addr + offset); +} +static inline u32 get_pci_bar_index(u32 pcibar) +{ + switch (pcibar) { + case 0x18: + case 0x1C: + return 1; + case 0x20: + return 2; + case 0x24: + return 3; + default: + return 0; + } +} + +#endif /* _PM8001_CHIPS_H_ */ + diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c new file mode 100644 index 0000000000..5c26a13ffb --- /dev/null +++ b/drivers/scsi/pm8001/pm8001_ctl.c @@ -0,0 +1,1041 @@ +/* + * PMC-Sierra 8001/8081/8088/8089 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 USI Co., Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ +#include <linux/firmware.h> +#include <linux/slab.h> +#include "pm8001_sas.h" +#include "pm8001_ctl.h" +#include "pm8001_chips.h" + +/* scsi host attributes */ + +/** + * pm8001_ctl_mpi_interface_rev_show - MPI interface revision number + * @cdev: pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_mpi_interface_rev_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + + if (pm8001_ha->chip_id == chip_8001) { + return sysfs_emit(buf, "%d\n", + pm8001_ha->main_cfg_tbl.pm8001_tbl.interface_rev); + } else { + return sysfs_emit(buf, "%d\n", + pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev); + } +} +static +DEVICE_ATTR(interface_rev, S_IRUGO, pm8001_ctl_mpi_interface_rev_show, NULL); + +/** + * controller_fatal_error_show - check controller is under fatal err + * @cdev: pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t controller_fatal_error_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + + return sysfs_emit(buf, "%d\n", + pm8001_ha->controller_fatal_error); +} +static DEVICE_ATTR_RO(controller_fatal_error); + +/** + * pm8001_ctl_fw_version_show - firmware version + * @cdev: pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_fw_version_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + + if (pm8001_ha->chip_id == chip_8001) { + return sysfs_emit(buf, "%02x.%02x.%02x.%02x\n", + (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 24), + (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 16), + (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 8), + (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev)); + } else { + return sysfs_emit(buf, "%02x.%02x.%02x.%02x\n", + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 24), + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 16), + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 8), + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev)); + } +} +static DEVICE_ATTR(fw_version, S_IRUGO, pm8001_ctl_fw_version_show, NULL); + +/** + * pm8001_ctl_ila_version_show - ila version + * @cdev: pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_ila_version_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + + if (pm8001_ha->chip_id != chip_8001) { + return sysfs_emit(buf, "%02x.%02x.%02x.%02x\n", + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version >> 24), + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version >> 16), + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version >> 8), + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version)); + } + return 0; +} +static DEVICE_ATTR(ila_version, 0444, pm8001_ctl_ila_version_show, NULL); + +/** + * pm8001_ctl_inactive_fw_version_show - Inactive firmware version number + * @cdev: pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_inactive_fw_version_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + + if (pm8001_ha->chip_id != chip_8001) { + return sysfs_emit(buf, "%02x.%02x.%02x.%02x\n", + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version >> 24), + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version >> 16), + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version >> 8), + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version)); + } + return 0; +} +static +DEVICE_ATTR(inc_fw_ver, 0444, pm8001_ctl_inactive_fw_version_show, NULL); + +/** + * pm8001_ctl_max_out_io_show - max outstanding io supported + * @cdev: pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_max_out_io_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + + if (pm8001_ha->chip_id == chip_8001) { + return sysfs_emit(buf, "%d\n", + pm8001_ha->main_cfg_tbl.pm8001_tbl.max_out_io); + } else { + return sysfs_emit(buf, "%d\n", + pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io); + } +} +static DEVICE_ATTR(max_out_io, S_IRUGO, pm8001_ctl_max_out_io_show, NULL); +/** + * pm8001_ctl_max_devices_show - max devices support + * @cdev: pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_max_devices_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + + if (pm8001_ha->chip_id == chip_8001) { + return sysfs_emit(buf, "%04d\n", + (u16)(pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl >> 16)); + } else { + return sysfs_emit(buf, "%04d\n", + (u16)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl >> 16)); + } +} +static DEVICE_ATTR(max_devices, S_IRUGO, pm8001_ctl_max_devices_show, NULL); +/** + * pm8001_ctl_max_sg_list_show - max sg list supported iff not 0.0 for no + * hardware limitation + * @cdev: pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_max_sg_list_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + + if (pm8001_ha->chip_id == chip_8001) { + return sysfs_emit(buf, "%04d\n", + pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl & 0x0000FFFF); + } else { + return sysfs_emit(buf, "%04d\n", + pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl & 0x0000FFFF); + } +} +static DEVICE_ATTR(max_sg_list, S_IRUGO, pm8001_ctl_max_sg_list_show, NULL); + +#define SAS_1_0 0x1 +#define SAS_1_1 0x2 +#define SAS_2_0 0x4 + +static ssize_t +show_sas_spec_support_status(unsigned int mode, char *buf) +{ + ssize_t len = 0; + + if (mode & SAS_1_1) + len = sprintf(buf, "%s", "SAS1.1"); + if (mode & SAS_2_0) + len += sprintf(buf + len, "%s%s", len ? ", " : "", "SAS2.0"); + len += sprintf(buf + len, "\n"); + + return len; +} + +/** + * pm8001_ctl_sas_spec_support_show - sas spec supported + * @cdev: pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_sas_spec_support_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + unsigned int mode; + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + /* fe000000 means supports SAS2.1 */ + if (pm8001_ha->chip_id == chip_8001) + mode = (pm8001_ha->main_cfg_tbl.pm8001_tbl.ctrl_cap_flag & + 0xfe000000)>>25; + else + /* fe000000 means supports SAS2.1 */ + mode = (pm8001_ha->main_cfg_tbl.pm80xx_tbl.ctrl_cap_flag & + 0xfe000000)>>25; + return show_sas_spec_support_status(mode, buf); +} +static DEVICE_ATTR(sas_spec_support, S_IRUGO, + pm8001_ctl_sas_spec_support_show, NULL); + +/** + * pm8001_ctl_host_sas_address_show - sas address + * @cdev: pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * This is the controller sas address + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_host_sas_address_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + return sysfs_emit(buf, "0x%016llx\n", + be64_to_cpu(*(__be64 *)pm8001_ha->sas_addr)); +} +static DEVICE_ATTR(host_sas_address, S_IRUGO, + pm8001_ctl_host_sas_address_show, NULL); + +/** + * pm8001_ctl_logging_level_show - logging level + * @cdev: pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read/write' shost attribute. + */ +static ssize_t pm8001_ctl_logging_level_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + + return sysfs_emit(buf, "%08xh\n", pm8001_ha->logging_level); +} + +static ssize_t pm8001_ctl_logging_level_store(struct device *cdev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + int val = 0; + + if (sscanf(buf, "%x", &val) != 1) + return -EINVAL; + + pm8001_ha->logging_level = val; + return strlen(buf); +} + +static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, + pm8001_ctl_logging_level_show, pm8001_ctl_logging_level_store); +/** + * pm8001_ctl_aap_log_show - aap1 event log + * @cdev: pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_aap_log_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + u8 *ptr = (u8 *)pm8001_ha->memoryMap.region[AAP1].virt_ptr; + int i; + + char *str = buf; + int max = 2; + for (i = 0; i < max; i++) { + str += sprintf(str, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x" + "0x%08x 0x%08x\n", + pm8001_ctl_aap1_memmap(ptr, i, 0), + pm8001_ctl_aap1_memmap(ptr, i, 4), + pm8001_ctl_aap1_memmap(ptr, i, 8), + pm8001_ctl_aap1_memmap(ptr, i, 12), + pm8001_ctl_aap1_memmap(ptr, i, 16), + pm8001_ctl_aap1_memmap(ptr, i, 20), + pm8001_ctl_aap1_memmap(ptr, i, 24), + pm8001_ctl_aap1_memmap(ptr, i, 28)); + } + + return str - buf; +} +static DEVICE_ATTR(aap_log, S_IRUGO, pm8001_ctl_aap_log_show, NULL); +/** + * pm8001_ctl_ib_queue_log_show - Out bound Queue log + * @cdev:pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_ib_queue_log_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + int offset; + char *str = buf; + int start = 0; + u32 ib_offset = pm8001_ha->ib_offset; + u32 queue_size = pm8001_ha->max_q_num * PM8001_MPI_QUEUE * 128; +#define IB_MEMMAP(c) \ + (*(u32 *)((u8 *)pm8001_ha-> \ + memoryMap.region[ib_offset].virt_ptr + \ + pm8001_ha->evtlog_ib_offset + (c))) + + for (offset = 0; offset < IB_OB_READ_TIMES; offset++) { + str += sprintf(str, "0x%08x\n", IB_MEMMAP(start)); + start = start + 4; + } + pm8001_ha->evtlog_ib_offset += SYSFS_OFFSET; + if (((pm8001_ha->evtlog_ib_offset) % queue_size) == 0) + pm8001_ha->evtlog_ib_offset = 0; + + return str - buf; +} + +static DEVICE_ATTR(ib_log, S_IRUGO, pm8001_ctl_ib_queue_log_show, NULL); +/** + * pm8001_ctl_ob_queue_log_show - Out bound Queue log + * @cdev:pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ + +static ssize_t pm8001_ctl_ob_queue_log_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + int offset; + char *str = buf; + int start = 0; + u32 ob_offset = pm8001_ha->ob_offset; + u32 queue_size = pm8001_ha->max_q_num * PM8001_MPI_QUEUE * 128; +#define OB_MEMMAP(c) \ + (*(u32 *)((u8 *)pm8001_ha-> \ + memoryMap.region[ob_offset].virt_ptr + \ + pm8001_ha->evtlog_ob_offset + (c))) + + for (offset = 0; offset < IB_OB_READ_TIMES; offset++) { + str += sprintf(str, "0x%08x\n", OB_MEMMAP(start)); + start = start + 4; + } + pm8001_ha->evtlog_ob_offset += SYSFS_OFFSET; + if (((pm8001_ha->evtlog_ob_offset) % queue_size) == 0) + pm8001_ha->evtlog_ob_offset = 0; + + return str - buf; +} +static DEVICE_ATTR(ob_log, S_IRUGO, pm8001_ctl_ob_queue_log_show, NULL); +/** + * pm8001_ctl_bios_version_show - Bios version Display + * @cdev:pointer to embedded class device + * @attr: device attribute (unused) + * @buf:the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_bios_version_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + char *str = buf; + int bios_index; + DECLARE_COMPLETION_ONSTACK(completion); + struct pm8001_ioctl_payload payload; + + pm8001_ha->nvmd_completion = &completion; + payload.minor_function = 7; + payload.offset = 0; + payload.rd_length = 4096; + payload.func_specific = kzalloc(4096, GFP_KERNEL); + if (!payload.func_specific) + return -ENOMEM; + if (PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload)) { + kfree(payload.func_specific); + return -ENOMEM; + } + wait_for_completion(&completion); + for (bios_index = BIOSOFFSET; bios_index < BIOS_OFFSET_LIMIT; + bios_index++) + str += sprintf(str, "%c", + *(payload.func_specific+bios_index)); + kfree(payload.func_specific); + return str - buf; +} +static DEVICE_ATTR(bios_version, S_IRUGO, pm8001_ctl_bios_version_show, NULL); +/** + * event_log_size_show - event log size + * @cdev: pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs read shost attribute. + */ +static ssize_t event_log_size_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + + return sysfs_emit(buf, "%d\n", + pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size); +} +static DEVICE_ATTR_RO(event_log_size); +/** + * pm8001_ctl_iop_log_show - IOP event log + * @cdev: pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_iop_log_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + char *str = buf; + u32 read_size = + pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size / 1024; + static u32 start, end, count; + u32 max_read_times = 32; + u32 max_count = (read_size * 1024) / (max_read_times * 4); + u32 *temp = (u32 *)pm8001_ha->memoryMap.region[IOP].virt_ptr; + + if ((count % max_count) == 0) { + start = 0; + end = max_read_times; + count = 0; + } else { + start = end; + end = end + max_read_times; + } + + for (; start < end; start++) + str += sprintf(str, "%08x ", *(temp+start)); + count++; + return str - buf; +} +static DEVICE_ATTR(iop_log, S_IRUGO, pm8001_ctl_iop_log_show, NULL); + +/** + * pm8001_ctl_fatal_log_show - fatal error logging + * @cdev:pointer to embedded class device + * @attr: device attribute + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ + +static ssize_t pm8001_ctl_fatal_log_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + ssize_t count; + + count = pm80xx_get_fatal_dump(cdev, attr, buf); + return count; +} + +static DEVICE_ATTR(fatal_log, S_IRUGO, pm8001_ctl_fatal_log_show, NULL); + +/** + * non_fatal_log_show - non fatal error logging + * @cdev:pointer to embedded class device + * @attr: device attribute + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t non_fatal_log_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + u32 count; + + count = pm80xx_get_non_fatal_dump(cdev, attr, buf); + return count; +} +static DEVICE_ATTR_RO(non_fatal_log); + +static ssize_t non_fatal_count_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + + return sysfs_emit(buf, "%08x\n", + pm8001_ha->non_fatal_count); +} + +static ssize_t non_fatal_count_store(struct device *cdev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + int val = 0; + + if (kstrtoint(buf, 16, &val) != 0) + return -EINVAL; + + pm8001_ha->non_fatal_count = val; + return strlen(buf); +} +static DEVICE_ATTR_RW(non_fatal_count); + +/** + * pm8001_ctl_gsm_log_show - gsm dump collection + * @cdev:pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_gsm_log_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + ssize_t count; + + count = pm8001_get_gsm_dump(cdev, SYSFS_OFFSET, buf); + return count; +} + +static DEVICE_ATTR(gsm_log, S_IRUGO, pm8001_ctl_gsm_log_show, NULL); + +#define FLASH_CMD_NONE 0x00 +#define FLASH_CMD_UPDATE 0x01 +#define FLASH_CMD_SET_NVMD 0x02 + +struct flash_command { + u8 command[8]; + int code; +}; + +static const struct flash_command flash_command_table[] = { + {"set_nvmd", FLASH_CMD_SET_NVMD}, + {"update", FLASH_CMD_UPDATE}, + {"", FLASH_CMD_NONE} /* Last entry should be NULL. */ +}; + +struct error_fw { + char *reason; + int err_code; +}; + +static const struct error_fw flash_error_table[] = { + {"Failed to open fw image file", FAIL_OPEN_BIOS_FILE}, + {"image header mismatch", FLASH_UPDATE_HDR_ERR}, + {"image offset mismatch", FLASH_UPDATE_OFFSET_ERR}, + {"image CRC Error", FLASH_UPDATE_CRC_ERR}, + {"image length Error.", FLASH_UPDATE_LENGTH_ERR}, + {"Failed to program flash chip", FLASH_UPDATE_HW_ERR}, + {"Flash chip not supported.", FLASH_UPDATE_DNLD_NOT_SUPPORTED}, + {"Flash update disabled.", FLASH_UPDATE_DISABLED}, + {"Flash in progress", FLASH_IN_PROGRESS}, + {"Image file size Error", FAIL_FILE_SIZE}, + {"Input parameter error", FAIL_PARAMETERS}, + {"Out of memory", FAIL_OUT_MEMORY}, + {"OK", 0} /* Last entry err_code = 0. */ +}; + +static int pm8001_set_nvmd(struct pm8001_hba_info *pm8001_ha) +{ + struct pm8001_ioctl_payload *payload; + DECLARE_COMPLETION_ONSTACK(completion); + u8 *ioctlbuffer; + u32 ret; + u32 length = 1024 * 5 + sizeof(*payload) - 1; + + if (pm8001_ha->fw_image->size > 4096) { + pm8001_ha->fw_status = FAIL_FILE_SIZE; + return -EFAULT; + } + + ioctlbuffer = kzalloc(length, GFP_KERNEL); + if (!ioctlbuffer) { + pm8001_ha->fw_status = FAIL_OUT_MEMORY; + return -ENOMEM; + } + payload = (struct pm8001_ioctl_payload *)ioctlbuffer; + memcpy((u8 *)&payload->func_specific, (u8 *)pm8001_ha->fw_image->data, + pm8001_ha->fw_image->size); + payload->wr_length = pm8001_ha->fw_image->size; + payload->id = 0; + payload->minor_function = 0x1; + pm8001_ha->nvmd_completion = &completion; + ret = PM8001_CHIP_DISP->set_nvmd_req(pm8001_ha, payload); + if (ret) { + pm8001_ha->fw_status = FAIL_OUT_MEMORY; + goto out; + } + wait_for_completion(&completion); +out: + kfree(ioctlbuffer); + return ret; +} + +static int pm8001_update_flash(struct pm8001_hba_info *pm8001_ha) +{ + struct pm8001_ioctl_payload *payload; + DECLARE_COMPLETION_ONSTACK(completion); + u8 *ioctlbuffer; + struct fw_control_info *fwControl; + __be32 partitionSizeTmp; + u32 partitionSize; + u32 loopNumber, loopcount; + struct pm8001_fw_image_header *image_hdr; + u32 sizeRead = 0; + u32 ret = 0; + u32 length = 1024 * 16 + sizeof(*payload) - 1; + u32 fc_len; + u8 *read_buf; + + if (pm8001_ha->fw_image->size < 28) { + pm8001_ha->fw_status = FAIL_FILE_SIZE; + return -EFAULT; + } + ioctlbuffer = kzalloc(length, GFP_KERNEL); + if (!ioctlbuffer) { + pm8001_ha->fw_status = FAIL_OUT_MEMORY; + return -ENOMEM; + } + image_hdr = (struct pm8001_fw_image_header *)pm8001_ha->fw_image->data; + while (sizeRead < pm8001_ha->fw_image->size) { + partitionSizeTmp = + *(__be32 *)((u8 *)&image_hdr->image_length + sizeRead); + partitionSize = be32_to_cpu(partitionSizeTmp); + loopcount = DIV_ROUND_UP(partitionSize + HEADER_LEN, + IOCTL_BUF_SIZE); + for (loopNumber = 0; loopNumber < loopcount; loopNumber++) { + payload = (struct pm8001_ioctl_payload *)ioctlbuffer; + payload->wr_length = 1024*16; + payload->id = 0; + fwControl = + (struct fw_control_info *)&payload->func_specific; + fwControl->len = IOCTL_BUF_SIZE; /* IN */ + fwControl->size = partitionSize + HEADER_LEN;/* IN */ + fwControl->retcode = 0;/* OUT */ + fwControl->offset = loopNumber * IOCTL_BUF_SIZE;/*OUT */ + + /* + * for the last chunk of data in case file size is + * not even with 4k, load only the rest + */ + + read_buf = (u8 *)pm8001_ha->fw_image->data + sizeRead; + fc_len = (partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE; + + if (loopcount - loopNumber == 1 && fc_len) { + fwControl->len = fc_len; + memcpy((u8 *)fwControl->buffer, read_buf, fc_len); + sizeRead += fc_len; + } else { + memcpy((u8 *)fwControl->buffer, read_buf, IOCTL_BUF_SIZE); + sizeRead += IOCTL_BUF_SIZE; + } + + pm8001_ha->nvmd_completion = &completion; + ret = PM8001_CHIP_DISP->fw_flash_update_req(pm8001_ha, payload); + if (ret) { + pm8001_ha->fw_status = FAIL_OUT_MEMORY; + goto out; + } + wait_for_completion(&completion); + if (fwControl->retcode > FLASH_UPDATE_IN_PROGRESS) { + pm8001_ha->fw_status = fwControl->retcode; + ret = -EFAULT; + goto out; + } + } + } +out: + kfree(ioctlbuffer); + return ret; +} +static ssize_t pm8001_store_update_fw(struct device *cdev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + char *cmd_ptr, *filename_ptr; + int res, i; + int flash_command = FLASH_CMD_NONE; + int ret; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + /* this test protects us from running two flash processes at once, + * so we should start with this test */ + if (pm8001_ha->fw_status == FLASH_IN_PROGRESS) + return -EINPROGRESS; + pm8001_ha->fw_status = FLASH_IN_PROGRESS; + + cmd_ptr = kcalloc(count, 2, GFP_KERNEL); + if (!cmd_ptr) { + pm8001_ha->fw_status = FAIL_OUT_MEMORY; + return -ENOMEM; + } + + filename_ptr = cmd_ptr + count; + res = sscanf(buf, "%s %s", cmd_ptr, filename_ptr); + if (res != 2) { + pm8001_ha->fw_status = FAIL_PARAMETERS; + ret = -EINVAL; + goto out; + } + + for (i = 0; flash_command_table[i].code != FLASH_CMD_NONE; i++) { + if (!memcmp(flash_command_table[i].command, + cmd_ptr, strlen(cmd_ptr))) { + flash_command = flash_command_table[i].code; + break; + } + } + if (flash_command == FLASH_CMD_NONE) { + pm8001_ha->fw_status = FAIL_PARAMETERS; + ret = -EINVAL; + goto out; + } + + ret = request_firmware(&pm8001_ha->fw_image, + filename_ptr, + pm8001_ha->dev); + + if (ret) { + pm8001_dbg(pm8001_ha, FAIL, + "Failed to load firmware image file %s, error %d\n", + filename_ptr, ret); + pm8001_ha->fw_status = FAIL_OPEN_BIOS_FILE; + goto out; + } + + if (FLASH_CMD_UPDATE == flash_command) + ret = pm8001_update_flash(pm8001_ha); + else + ret = pm8001_set_nvmd(pm8001_ha); + + release_firmware(pm8001_ha->fw_image); +out: + kfree(cmd_ptr); + + if (ret) + return ret; + + pm8001_ha->fw_status = FLASH_OK; + return count; +} + +static ssize_t pm8001_show_update_fw(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + int i; + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + + for (i = 0; flash_error_table[i].err_code != 0; i++) { + if (flash_error_table[i].err_code == pm8001_ha->fw_status) + break; + } + if (pm8001_ha->fw_status != FLASH_IN_PROGRESS) + pm8001_ha->fw_status = FLASH_OK; + + return snprintf(buf, PAGE_SIZE, "status=%x %s\n", + flash_error_table[i].err_code, + flash_error_table[i].reason); +} +static DEVICE_ATTR(update_fw, S_IRUGO|S_IWUSR|S_IWGRP, + pm8001_show_update_fw, pm8001_store_update_fw); + +static const char *const mpiStateText[] = { + "MPI is not initialized", + "MPI is successfully initialized", + "MPI termination is in progress", + "MPI initialization failed with error in [31:16]" +}; + +/** + * ctl_mpi_state_show - controller MPI state check + * @cdev: pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t ctl_mpi_state_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + unsigned int mpidw0; + + mpidw0 = pm8001_mr32(pm8001_ha->general_stat_tbl_addr, 0); + return sysfs_emit(buf, "%s\n", mpiStateText[mpidw0 & 0x0003]); +} +static DEVICE_ATTR_RO(ctl_mpi_state); + +/** + * ctl_hmi_error_show - controller MPI initialization fails + * @cdev: pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t ctl_hmi_error_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + unsigned int mpidw0; + + mpidw0 = pm8001_mr32(pm8001_ha->general_stat_tbl_addr, 0); + return sysfs_emit(buf, "0x%08x\n", (mpidw0 >> 16)); +} +static DEVICE_ATTR_RO(ctl_hmi_error); + +/** + * ctl_raae_count_show - controller raae count check + * @cdev: pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t ctl_raae_count_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + unsigned int raaecnt; + + raaecnt = pm8001_mr32(pm8001_ha->general_stat_tbl_addr, 12); + return sysfs_emit(buf, "0x%08x\n", raaecnt); +} +static DEVICE_ATTR_RO(ctl_raae_count); + +/** + * ctl_iop0_count_show - controller iop0 count check + * @cdev: pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t ctl_iop0_count_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + unsigned int iop0cnt; + + iop0cnt = pm8001_mr32(pm8001_ha->general_stat_tbl_addr, 16); + return sysfs_emit(buf, "0x%08x\n", iop0cnt); +} +static DEVICE_ATTR_RO(ctl_iop0_count); + +/** + * ctl_iop1_count_show - controller iop1 count check + * @cdev: pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t ctl_iop1_count_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + unsigned int iop1cnt; + + iop1cnt = pm8001_mr32(pm8001_ha->general_stat_tbl_addr, 20); + return sysfs_emit(buf, "0x%08x\n", iop1cnt); + +} +static DEVICE_ATTR_RO(ctl_iop1_count); + +static struct attribute *pm8001_host_attrs[] = { + &dev_attr_interface_rev.attr, + &dev_attr_controller_fatal_error.attr, + &dev_attr_fw_version.attr, + &dev_attr_update_fw.attr, + &dev_attr_aap_log.attr, + &dev_attr_iop_log.attr, + &dev_attr_fatal_log.attr, + &dev_attr_non_fatal_log.attr, + &dev_attr_non_fatal_count.attr, + &dev_attr_gsm_log.attr, + &dev_attr_max_out_io.attr, + &dev_attr_max_devices.attr, + &dev_attr_max_sg_list.attr, + &dev_attr_sas_spec_support.attr, + &dev_attr_logging_level.attr, + &dev_attr_event_log_size.attr, + &dev_attr_host_sas_address.attr, + &dev_attr_bios_version.attr, + &dev_attr_ib_log.attr, + &dev_attr_ob_log.attr, + &dev_attr_ila_version.attr, + &dev_attr_inc_fw_ver.attr, + &dev_attr_ctl_mpi_state.attr, + &dev_attr_ctl_hmi_error.attr, + &dev_attr_ctl_raae_count.attr, + &dev_attr_ctl_iop0_count.attr, + &dev_attr_ctl_iop1_count.attr, + NULL, +}; + +static const struct attribute_group pm8001_host_attr_group = { + .attrs = pm8001_host_attrs +}; + +const struct attribute_group *pm8001_host_groups[] = { + &pm8001_host_attr_group, + NULL +}; diff --git a/drivers/scsi/pm8001/pm8001_ctl.h b/drivers/scsi/pm8001/pm8001_ctl.h new file mode 100644 index 0000000000..4743f0de22 --- /dev/null +++ b/drivers/scsi/pm8001/pm8001_ctl.h @@ -0,0 +1,68 @@ + /* + * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 USI Co., Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ + +#ifndef PM8001_CTL_H_INCLUDED +#define PM8001_CTL_H_INCLUDED + +#define IOCTL_BUF_SIZE 4096 +#define HEADER_LEN 28 +#define SIZE_OFFSET 16 + +#define BIOSOFFSET 56 +#define BIOS_OFFSET_LIMIT 61 + +#define FLASH_OK 0x000000 +#define FAIL_OPEN_BIOS_FILE 0x000100 +#define FAIL_FILE_SIZE 0x000a00 +#define FAIL_PARAMETERS 0x000b00 +#define FAIL_OUT_MEMORY 0x000c00 +#define FLASH_IN_PROGRESS 0x001000 + +#define IB_OB_READ_TIMES 256 +#define SYSFS_OFFSET 1024 +#define PM80XX_IB_OB_QUEUE_SIZE (32 * 1024) +#define PM8001_IB_OB_QUEUE_SIZE (16 * 1024) + +static inline u32 pm8001_ctl_aap1_memmap(u8 *ptr, int idx, int off) +{ + return *(u32 *)(ptr + idx * 32 + off); +} +#endif /* PM8001_CTL_H_INCLUDED */ + diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h new file mode 100644 index 0000000000..501b574239 --- /dev/null +++ b/drivers/scsi/pm8001/pm8001_defs.h @@ -0,0 +1,143 @@ +/* + * PMC-Sierra 8001/8081/8088/8089 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 USI Co., Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ + +#ifndef _PM8001_DEFS_H_ +#define _PM8001_DEFS_H_ + +enum chip_flavors { + chip_8001, + chip_8008, + chip_8009, + chip_8018, + chip_8019, + chip_8074, + chip_8076, + chip_8077, + chip_8006, + chip_8070, + chip_8072 +}; + +enum phy_speed { + PHY_SPEED_15 = 0x01, + PHY_SPEED_30 = 0x02, + PHY_SPEED_60 = 0x04, + PHY_SPEED_120 = 0x08, +}; + +enum data_direction { + DATA_DIR_NONE = 0x0, /* NO TRANSFER */ + DATA_DIR_IN = 0x01, /* INBOUND */ + DATA_DIR_OUT = 0x02, /* OUTBOUND */ + DATA_DIR_BYRECIPIENT = 0x04, /* UNSPECIFIED */ +}; + +enum port_type { + PORT_TYPE_SAS = (1L << 1), + PORT_TYPE_SATA = (1L << 0), +}; + +/* driver compile-time configuration */ +#define PM8001_MAX_CCB 1024 /* max ccbs supported */ +#define PM8001_MPI_QUEUE 1024 /* maximum mpi queue entries */ +#define PM8001_MAX_INB_NUM 64 +#define PM8001_MAX_OUTB_NUM 64 +#define PM8001_CAN_QUEUE 508 /* SCSI Queue depth */ + +/* Inbound/Outbound queue size */ +#define IOMB_SIZE_SPC 64 +#define IOMB_SIZE_SPCV 128 + +/* unchangeable hardware details */ +#define PM8001_MAX_PHYS 16 /* max. possible phys */ +#define PM8001_MAX_PORTS 16 /* max. possible ports */ +#define PM8001_MAX_DEVICES 2048 /* max supported device */ +#define PM8001_MAX_MSIX_VEC 64 /* max msi-x int for spcv/ve */ +#define PM8001_RESERVE_SLOT 8 + +#define CONFIG_SCSI_PM8001_MAX_DMA_SG 528 +#define PM8001_MAX_DMA_SG CONFIG_SCSI_PM8001_MAX_DMA_SG + +enum memory_region_num { + AAP1 = 0x0, /* application acceleration processor */ + IOP, /* IO processor */ + NVMD, /* NVM device */ + FW_FLASH, /* memory for fw flash update */ + FORENSIC_MEM, /* memory for fw forensic data */ + USI_MAX_MEMCNT_BASE +}; +#define PM8001_EVENT_LOG_SIZE (128 * 1024) + +/** + * maximum DMA memory regions(number of IBQ + number of IBQ CI + * + number of OBQ + number of OBQ PI) + */ +#define USI_MAX_MEMCNT (USI_MAX_MEMCNT_BASE + ((2 * PM8001_MAX_INB_NUM) \ + + (2 * PM8001_MAX_OUTB_NUM))) +/*error code*/ +enum mpi_err { + MPI_IO_STATUS_SUCCESS = 0x0, + MPI_IO_STATUS_BUSY = 0x01, + MPI_IO_STATUS_FAIL = 0x02, +}; + +/** + * Phy Control constants + */ +enum phy_control_type { + PHY_LINK_RESET = 0x01, + PHY_HARD_RESET = 0x02, + PHY_NOTIFY_ENABLE_SPINUP = 0x10, +}; + +enum pm8001_hba_info_flags { + PM8001F_INIT_TIME = (1U << 0), + PM8001F_RUN_TIME = (1U << 1), +}; + +/** + * Phy Status + */ +#define PHY_LINK_DISABLE 0x00 +#define PHY_LINK_DOWN 0x01 +#define PHY_STATE_LINK_UP_SPCV 0x2 +#define PHY_STATE_LINK_UP_SPC 0x1 + +#endif diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c new file mode 100644 index 0000000000..90069c7b16 --- /dev/null +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -0,0 +1,4838 @@ +/* + * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 USI Co., Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ + #include <linux/slab.h> + #include "pm8001_sas.h" + #include "pm8001_hwi.h" + #include "pm8001_chips.h" + #include "pm8001_ctl.h" + #include "pm80xx_tracepoints.h" + +/** + * read_main_config_table - read the configure table and save it. + * @pm8001_ha: our hba card information + */ +static void read_main_config_table(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *address = pm8001_ha->main_cfg_tbl_addr; + pm8001_ha->main_cfg_tbl.pm8001_tbl.signature = + pm8001_mr32(address, 0x00); + pm8001_ha->main_cfg_tbl.pm8001_tbl.interface_rev = + pm8001_mr32(address, 0x04); + pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev = + pm8001_mr32(address, 0x08); + pm8001_ha->main_cfg_tbl.pm8001_tbl.max_out_io = + pm8001_mr32(address, 0x0C); + pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl = + pm8001_mr32(address, 0x10); + pm8001_ha->main_cfg_tbl.pm8001_tbl.ctrl_cap_flag = + pm8001_mr32(address, 0x14); + pm8001_ha->main_cfg_tbl.pm8001_tbl.gst_offset = + pm8001_mr32(address, 0x18); + pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_queue_offset = + pm8001_mr32(address, MAIN_IBQ_OFFSET); + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_queue_offset = + pm8001_mr32(address, MAIN_OBQ_OFFSET); + pm8001_ha->main_cfg_tbl.pm8001_tbl.hda_mode_flag = + pm8001_mr32(address, MAIN_HDA_FLAGS_OFFSET); + + /* read analog Setting offset from the configuration table */ + pm8001_ha->main_cfg_tbl.pm8001_tbl.anolog_setup_table_offset = + pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET); + + /* read Error Dump Offset and Length */ + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_offset0 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET); + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_length0 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH); + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_offset1 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET); + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_length1 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH); +} + +/** + * read_general_status_table - read the general status table and save it. + * @pm8001_ha: our hba card information + */ +static void read_general_status_table(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *address = pm8001_ha->general_stat_tbl_addr; + pm8001_ha->gs_tbl.pm8001_tbl.gst_len_mpistate = + pm8001_mr32(address, 0x00); + pm8001_ha->gs_tbl.pm8001_tbl.iq_freeze_state0 = + pm8001_mr32(address, 0x04); + pm8001_ha->gs_tbl.pm8001_tbl.iq_freeze_state1 = + pm8001_mr32(address, 0x08); + pm8001_ha->gs_tbl.pm8001_tbl.msgu_tcnt = + pm8001_mr32(address, 0x0C); + pm8001_ha->gs_tbl.pm8001_tbl.iop_tcnt = + pm8001_mr32(address, 0x10); + pm8001_ha->gs_tbl.pm8001_tbl.rsvd = + pm8001_mr32(address, 0x14); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[0] = + pm8001_mr32(address, 0x18); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[1] = + pm8001_mr32(address, 0x1C); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[2] = + pm8001_mr32(address, 0x20); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[3] = + pm8001_mr32(address, 0x24); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[4] = + pm8001_mr32(address, 0x28); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[5] = + pm8001_mr32(address, 0x2C); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[6] = + pm8001_mr32(address, 0x30); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[7] = + pm8001_mr32(address, 0x34); + pm8001_ha->gs_tbl.pm8001_tbl.gpio_input_val = + pm8001_mr32(address, 0x38); + pm8001_ha->gs_tbl.pm8001_tbl.rsvd1[0] = + pm8001_mr32(address, 0x3C); + pm8001_ha->gs_tbl.pm8001_tbl.rsvd1[1] = + pm8001_mr32(address, 0x40); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[0] = + pm8001_mr32(address, 0x44); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[1] = + pm8001_mr32(address, 0x48); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[2] = + pm8001_mr32(address, 0x4C); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[3] = + pm8001_mr32(address, 0x50); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[4] = + pm8001_mr32(address, 0x54); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[5] = + pm8001_mr32(address, 0x58); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[6] = + pm8001_mr32(address, 0x5C); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[7] = + pm8001_mr32(address, 0x60); +} + +/** + * read_inbnd_queue_table - read the inbound queue table and save it. + * @pm8001_ha: our hba card information + */ +static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha) +{ + int i; + void __iomem *address = pm8001_ha->inbnd_q_tbl_addr; + for (i = 0; i < PM8001_MAX_INB_NUM; i++) { + u32 offset = i * 0x20; + pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = + get_pci_bar_index(pm8001_mr32(address, (offset + 0x14))); + pm8001_ha->inbnd_q_tbl[i].pi_offset = + pm8001_mr32(address, (offset + 0x18)); + } +} + +/** + * read_outbnd_queue_table - read the outbound queue table and save it. + * @pm8001_ha: our hba card information + */ +static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha) +{ + int i; + void __iomem *address = pm8001_ha->outbnd_q_tbl_addr; + for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) { + u32 offset = i * 0x24; + pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = + get_pci_bar_index(pm8001_mr32(address, (offset + 0x14))); + pm8001_ha->outbnd_q_tbl[i].ci_offset = + pm8001_mr32(address, (offset + 0x18)); + } +} + +/** + * init_default_table_values - init the default table. + * @pm8001_ha: our hba card information + */ +static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) +{ + int i; + u32 offsetib, offsetob; + void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr; + void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr; + u32 ib_offset = pm8001_ha->ib_offset; + u32 ob_offset = pm8001_ha->ob_offset; + u32 ci_offset = pm8001_ha->ci_offset; + u32 pi_offset = pm8001_ha->pi_offset; + + pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_q_nppd_hppd = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid0_3 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid4_7 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid0_3 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid4_7 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ITNexus_event_pid0_3 = + 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ITNexus_event_pid4_7 = + 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ssp_event_pid0_3 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ssp_event_pid4_7 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_smp_event_pid0_3 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_smp_event_pid4_7 = 0; + + pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_event_log_addr = + pm8001_ha->memoryMap.region[AAP1].phys_addr_hi; + pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_event_log_addr = + pm8001_ha->memoryMap.region[AAP1].phys_addr_lo; + pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_size = + PM8001_EVENT_LOG_SIZE; + pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_option = 0x01; + pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_iop_event_log_addr = + pm8001_ha->memoryMap.region[IOP].phys_addr_hi; + pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_iop_event_log_addr = + pm8001_ha->memoryMap.region[IOP].phys_addr_lo; + pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_size = + PM8001_EVENT_LOG_SIZE; + pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option = 0x01; + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt = 0x01; + for (i = 0; i < pm8001_ha->max_q_num; i++) { + pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = + PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30); + pm8001_ha->inbnd_q_tbl[i].upper_base_addr = + pm8001_ha->memoryMap.region[ib_offset + i].phys_addr_hi; + pm8001_ha->inbnd_q_tbl[i].lower_base_addr = + pm8001_ha->memoryMap.region[ib_offset + i].phys_addr_lo; + pm8001_ha->inbnd_q_tbl[i].base_virt = + (u8 *)pm8001_ha->memoryMap.region[ib_offset + i].virt_ptr; + pm8001_ha->inbnd_q_tbl[i].total_length = + pm8001_ha->memoryMap.region[ib_offset + i].total_len; + pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr = + pm8001_ha->memoryMap.region[ci_offset + i].phys_addr_hi; + pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr = + pm8001_ha->memoryMap.region[ci_offset + i].phys_addr_lo; + pm8001_ha->inbnd_q_tbl[i].ci_virt = + pm8001_ha->memoryMap.region[ci_offset + i].virt_ptr; + pm8001_write_32(pm8001_ha->inbnd_q_tbl[i].ci_virt, 0, 0); + offsetib = i * 0x20; + pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = + get_pci_bar_index(pm8001_mr32(addressib, + (offsetib + 0x14))); + pm8001_ha->inbnd_q_tbl[i].pi_offset = + pm8001_mr32(addressib, (offsetib + 0x18)); + pm8001_ha->inbnd_q_tbl[i].producer_idx = 0; + pm8001_ha->inbnd_q_tbl[i].consumer_index = 0; + } + for (i = 0; i < pm8001_ha->max_q_num; i++) { + pm8001_ha->outbnd_q_tbl[i].element_size_cnt = + PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30); + pm8001_ha->outbnd_q_tbl[i].upper_base_addr = + pm8001_ha->memoryMap.region[ob_offset + i].phys_addr_hi; + pm8001_ha->outbnd_q_tbl[i].lower_base_addr = + pm8001_ha->memoryMap.region[ob_offset + i].phys_addr_lo; + pm8001_ha->outbnd_q_tbl[i].base_virt = + (u8 *)pm8001_ha->memoryMap.region[ob_offset + i].virt_ptr; + pm8001_ha->outbnd_q_tbl[i].total_length = + pm8001_ha->memoryMap.region[ob_offset + i].total_len; + pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr = + pm8001_ha->memoryMap.region[pi_offset + i].phys_addr_hi; + pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr = + pm8001_ha->memoryMap.region[pi_offset + i].phys_addr_lo; + pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay = + 0 | (10 << 16) | (i << 24); + pm8001_ha->outbnd_q_tbl[i].pi_virt = + pm8001_ha->memoryMap.region[pi_offset + i].virt_ptr; + pm8001_write_32(pm8001_ha->outbnd_q_tbl[i].pi_virt, 0, 0); + offsetob = i * 0x24; + pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = + get_pci_bar_index(pm8001_mr32(addressob, + offsetob + 0x14)); + pm8001_ha->outbnd_q_tbl[i].ci_offset = + pm8001_mr32(addressob, (offsetob + 0x18)); + pm8001_ha->outbnd_q_tbl[i].consumer_idx = 0; + pm8001_ha->outbnd_q_tbl[i].producer_index = 0; + } +} + +/** + * update_main_config_table - update the main default table to the HBA. + * @pm8001_ha: our hba card information + */ +static void update_main_config_table(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *address = pm8001_ha->main_cfg_tbl_addr; + pm8001_mw32(address, 0x24, + pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_q_nppd_hppd); + pm8001_mw32(address, 0x28, + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid0_3); + pm8001_mw32(address, 0x2C, + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid4_7); + pm8001_mw32(address, 0x30, + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid0_3); + pm8001_mw32(address, 0x34, + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid4_7); + pm8001_mw32(address, 0x38, + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_ITNexus_event_pid0_3); + pm8001_mw32(address, 0x3C, + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_ITNexus_event_pid4_7); + pm8001_mw32(address, 0x40, + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_ssp_event_pid0_3); + pm8001_mw32(address, 0x44, + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_ssp_event_pid4_7); + pm8001_mw32(address, 0x48, + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_smp_event_pid0_3); + pm8001_mw32(address, 0x4C, + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_smp_event_pid4_7); + pm8001_mw32(address, 0x50, + pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_event_log_addr); + pm8001_mw32(address, 0x54, + pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_event_log_addr); + pm8001_mw32(address, 0x58, + pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_size); + pm8001_mw32(address, 0x5C, + pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_option); + pm8001_mw32(address, 0x60, + pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_iop_event_log_addr); + pm8001_mw32(address, 0x64, + pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_iop_event_log_addr); + pm8001_mw32(address, 0x68, + pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_size); + pm8001_mw32(address, 0x6C, + pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option); + pm8001_mw32(address, 0x70, + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt); +} + +/** + * update_inbnd_queue_table - update the inbound queue table to the HBA. + * @pm8001_ha: our hba card information + * @number: entry in the queue + */ +static void update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha, + int number) +{ + void __iomem *address = pm8001_ha->inbnd_q_tbl_addr; + u16 offset = number * 0x20; + pm8001_mw32(address, offset + 0x00, + pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt); + pm8001_mw32(address, offset + 0x04, + pm8001_ha->inbnd_q_tbl[number].upper_base_addr); + pm8001_mw32(address, offset + 0x08, + pm8001_ha->inbnd_q_tbl[number].lower_base_addr); + pm8001_mw32(address, offset + 0x0C, + pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr); + pm8001_mw32(address, offset + 0x10, + pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr); +} + +/** + * update_outbnd_queue_table - update the outbound queue table to the HBA. + * @pm8001_ha: our hba card information + * @number: entry in the queue + */ +static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha, + int number) +{ + void __iomem *address = pm8001_ha->outbnd_q_tbl_addr; + u16 offset = number * 0x24; + pm8001_mw32(address, offset + 0x00, + pm8001_ha->outbnd_q_tbl[number].element_size_cnt); + pm8001_mw32(address, offset + 0x04, + pm8001_ha->outbnd_q_tbl[number].upper_base_addr); + pm8001_mw32(address, offset + 0x08, + pm8001_ha->outbnd_q_tbl[number].lower_base_addr); + pm8001_mw32(address, offset + 0x0C, + pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr); + pm8001_mw32(address, offset + 0x10, + pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr); + pm8001_mw32(address, offset + 0x1C, + pm8001_ha->outbnd_q_tbl[number].interrup_vec_cnt_delay); +} + +/** + * pm8001_bar4_shift - function is called to shift BAR base address + * @pm8001_ha : our hba card information + * @shiftValue : shifting value in memory bar. + */ +int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue) +{ + u32 regVal; + unsigned long start; + + /* program the inbound AXI translation Lower Address */ + pm8001_cw32(pm8001_ha, 1, SPC_IBW_AXI_TRANSLATION_LOW, shiftValue); + + /* confirm the setting is written */ + start = jiffies + HZ; /* 1 sec */ + do { + regVal = pm8001_cr32(pm8001_ha, 1, SPC_IBW_AXI_TRANSLATION_LOW); + } while ((regVal != shiftValue) && time_before(jiffies, start)); + + if (regVal != shiftValue) { + pm8001_dbg(pm8001_ha, INIT, + "TIMEOUT:SPC_IBW_AXI_TRANSLATION_LOW = 0x%x\n", + regVal); + return -1; + } + return 0; +} + +/** + * mpi_set_phys_g3_with_ssc + * @pm8001_ha: our hba card information + * @SSCbit: set SSCbit to 0 to disable all phys ssc; 1 to enable all phys ssc. + */ +static void mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, + u32 SSCbit) +{ + u32 offset, i; + unsigned long flags; + +#define SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR 0x00030000 +#define SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR 0x00040000 +#define SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET 0x1074 +#define SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET 0x1074 +#define PHY_G3_WITHOUT_SSC_BIT_SHIFT 12 +#define PHY_G3_WITH_SSC_BIT_SHIFT 13 +#define SNW3_PHY_CAPABILITIES_PARITY 31 + + /* + * Using shifted destination address 0x3_0000:0x1074 + 0x4000*N (N=0:3) + * Using shifted destination address 0x4_0000:0x1074 + 0x4000*(N-4) (N=4:7) + */ + spin_lock_irqsave(&pm8001_ha->lock, flags); + if (-1 == pm8001_bar4_shift(pm8001_ha, + SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR)) { + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + return; + } + + for (i = 0; i < 4; i++) { + offset = SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET + 0x4000 * i; + pm8001_cw32(pm8001_ha, 2, offset, 0x80001501); + } + /* shift membase 3 for SAS2_SETTINGS_LOCAL_PHY 4 - 7 */ + if (-1 == pm8001_bar4_shift(pm8001_ha, + SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR)) { + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + return; + } + for (i = 4; i < 8; i++) { + offset = SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET + 0x4000 * (i-4); + pm8001_cw32(pm8001_ha, 2, offset, 0x80001501); + } + /************************************************************* + Change the SSC upspreading value to 0x0 so that upspreading is disabled. + Device MABC SMOD0 Controls + Address: (via MEMBASE-III): + Using shifted destination address 0x0_0000: with Offset 0xD8 + + 31:28 R/W Reserved Do not change + 27:24 R/W SAS_SMOD_SPRDUP 0000 + 23:20 R/W SAS_SMOD_SPRDDN 0000 + 19:0 R/W Reserved Do not change + Upon power-up this register will read as 0x8990c016, + and I would like you to change the SAS_SMOD_SPRDUP bits to 0b0000 + so that the written value will be 0x8090c016. + This will ensure only down-spreading SSC is enabled on the SPC. + *************************************************************/ + pm8001_cr32(pm8001_ha, 2, 0xd8); + pm8001_cw32(pm8001_ha, 2, 0xd8, 0x8000C016); + + /*set the shifted destination address to 0x0 to avoid error operation */ + pm8001_bar4_shift(pm8001_ha, 0x0); + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + return; +} + +/** + * mpi_set_open_retry_interval_reg + * @pm8001_ha: our hba card information + * @interval: interval time for each OPEN_REJECT (RETRY). The units are in 1us. + */ +static void mpi_set_open_retry_interval_reg(struct pm8001_hba_info *pm8001_ha, + u32 interval) +{ + u32 offset; + u32 value; + u32 i; + unsigned long flags; + +#define OPEN_RETRY_INTERVAL_PHY_0_3_SHIFT_ADDR 0x00030000 +#define OPEN_RETRY_INTERVAL_PHY_4_7_SHIFT_ADDR 0x00040000 +#define OPEN_RETRY_INTERVAL_PHY_0_3_OFFSET 0x30B4 +#define OPEN_RETRY_INTERVAL_PHY_4_7_OFFSET 0x30B4 +#define OPEN_RETRY_INTERVAL_REG_MASK 0x0000FFFF + + value = interval & OPEN_RETRY_INTERVAL_REG_MASK; + spin_lock_irqsave(&pm8001_ha->lock, flags); + /* shift bar and set the OPEN_REJECT(RETRY) interval time of PHY 0 -3.*/ + if (-1 == pm8001_bar4_shift(pm8001_ha, + OPEN_RETRY_INTERVAL_PHY_0_3_SHIFT_ADDR)) { + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + return; + } + for (i = 0; i < 4; i++) { + offset = OPEN_RETRY_INTERVAL_PHY_0_3_OFFSET + 0x4000 * i; + pm8001_cw32(pm8001_ha, 2, offset, value); + } + + if (-1 == pm8001_bar4_shift(pm8001_ha, + OPEN_RETRY_INTERVAL_PHY_4_7_SHIFT_ADDR)) { + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + return; + } + for (i = 4; i < 8; i++) { + offset = OPEN_RETRY_INTERVAL_PHY_4_7_OFFSET + 0x4000 * (i-4); + pm8001_cw32(pm8001_ha, 2, offset, value); + } + /*set the shifted destination address to 0x0 to avoid error operation */ + pm8001_bar4_shift(pm8001_ha, 0x0); + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + return; +} + +/** + * mpi_init_check - check firmware initialization status. + * @pm8001_ha: our hba card information + */ +static int mpi_init_check(struct pm8001_hba_info *pm8001_ha) +{ + u32 max_wait_count; + u32 value; + u32 gst_len_mpistate; + /* Write bit0=1 to Inbound DoorBell Register to tell the SPC FW the + table is updated */ + pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPC_MSGU_CFG_TABLE_UPDATE); + /* wait until Inbound DoorBell Clear Register toggled */ + max_wait_count = 1 * 1000 * 1000;/* 1 sec */ + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET); + value &= SPC_MSGU_CFG_TABLE_UPDATE; + } while ((value != 0) && (--max_wait_count)); + + if (!max_wait_count) + return -1; + /* check the MPI-State for initialization */ + gst_len_mpistate = + pm8001_mr32(pm8001_ha->general_stat_tbl_addr, + GST_GSTLEN_MPIS_OFFSET); + if (GST_MPI_STATE_INIT != (gst_len_mpistate & GST_MPI_STATE_MASK)) + return -1; + /* check MPI Initialization error */ + gst_len_mpistate = gst_len_mpistate >> 16; + if (0x0000 != gst_len_mpistate) + return -1; + return 0; +} + +/** + * check_fw_ready - The LLDD check if the FW is ready, if not, return error. + * @pm8001_ha: our hba card information + */ +static int check_fw_ready(struct pm8001_hba_info *pm8001_ha) +{ + u32 value, value1; + u32 max_wait_count; + /* check error state */ + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + value1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2); + /* check AAP error */ + if (SCRATCH_PAD1_ERR == (value & SCRATCH_PAD_STATE_MASK)) { + /* error state */ + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0); + return -1; + } + + /* check IOP error */ + if (SCRATCH_PAD2_ERR == (value1 & SCRATCH_PAD_STATE_MASK)) { + /* error state */ + value1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3); + return -1; + } + + /* bit 4-31 of scratch pad1 should be zeros if it is not + in error state*/ + if (value & SCRATCH_PAD1_STATE_MASK) { + /* error case */ + pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0); + return -1; + } + + /* bit 2, 4-31 of scratch pad2 should be zeros if it is not + in error state */ + if (value1 & SCRATCH_PAD2_STATE_MASK) { + /* error case */ + return -1; + } + + max_wait_count = 1 * 1000 * 1000;/* 1 sec timeout */ + + /* wait until scratch pad 1 and 2 registers in ready state */ + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) + & SCRATCH_PAD1_RDY; + value1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2) + & SCRATCH_PAD2_RDY; + if ((--max_wait_count) == 0) + return -1; + } while ((value != SCRATCH_PAD1_RDY) || (value1 != SCRATCH_PAD2_RDY)); + return 0; +} + +static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *base_addr; + u32 value; + u32 offset; + u32 pcibar; + u32 pcilogic; + + value = pm8001_cr32(pm8001_ha, 0, 0x44); + offset = value & 0x03FFFFFF; + pm8001_dbg(pm8001_ha, INIT, "Scratchpad 0 Offset: %x\n", offset); + pcilogic = (value & 0xFC000000) >> 26; + pcibar = get_pci_bar_index(pcilogic); + pm8001_dbg(pm8001_ha, INIT, "Scratchpad 0 PCI BAR: %d\n", pcibar); + pm8001_ha->main_cfg_tbl_addr = base_addr = + pm8001_ha->io_mem[pcibar].memvirtaddr + offset; + pm8001_ha->general_stat_tbl_addr = + base_addr + pm8001_cr32(pm8001_ha, pcibar, offset + 0x18); + pm8001_ha->inbnd_q_tbl_addr = + base_addr + pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C); + pm8001_ha->outbnd_q_tbl_addr = + base_addr + pm8001_cr32(pm8001_ha, pcibar, offset + 0x20); +} + +/** + * pm8001_chip_init - the main init function that initialize whole PM8001 chip. + * @pm8001_ha: our hba card information + */ +static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha) +{ + u32 i = 0; + u16 deviceid; + pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid); + /* 8081 controllers need BAR shift to access MPI space + * as this is shared with BIOS data */ + if (deviceid == 0x8081 || deviceid == 0x0042) { + if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) { + pm8001_dbg(pm8001_ha, FAIL, + "Shift Bar4 to 0x%x failed\n", + GSM_SM_BASE); + return -1; + } + } + /* check the firmware status */ + if (-1 == check_fw_ready(pm8001_ha)) { + pm8001_dbg(pm8001_ha, FAIL, "Firmware is not ready!\n"); + return -EBUSY; + } + + /* Initialize pci space address eg: mpi offset */ + init_pci_device_addresses(pm8001_ha); + init_default_table_values(pm8001_ha); + read_main_config_table(pm8001_ha); + read_general_status_table(pm8001_ha); + read_inbnd_queue_table(pm8001_ha); + read_outbnd_queue_table(pm8001_ha); + /* update main config table ,inbound table and outbound table */ + update_main_config_table(pm8001_ha); + for (i = 0; i < pm8001_ha->max_q_num; i++) + update_inbnd_queue_table(pm8001_ha, i); + for (i = 0; i < pm8001_ha->max_q_num; i++) + update_outbnd_queue_table(pm8001_ha, i); + /* 8081 controller donot require these operations */ + if (deviceid != 0x8081 && deviceid != 0x0042) { + mpi_set_phys_g3_with_ssc(pm8001_ha, 0); + /* 7->130ms, 34->500ms, 119->1.5s */ + mpi_set_open_retry_interval_reg(pm8001_ha, 119); + } + /* notify firmware update finished and check initialization status */ + if (0 == mpi_init_check(pm8001_ha)) { + pm8001_dbg(pm8001_ha, INIT, "MPI initialize successful!\n"); + } else + return -EBUSY; + /*This register is a 16-bit timer with a resolution of 1us. This is the + timer used for interrupt delay/coalescing in the PCIe Application Layer. + Zero is not a valid value. A value of 1 in the register will cause the + interrupts to be normal. A value greater than 1 will cause coalescing + delays.*/ + pm8001_cw32(pm8001_ha, 1, 0x0033c0, 0x1); + pm8001_cw32(pm8001_ha, 1, 0x0033c4, 0x0); + return 0; +} + +static void pm8001_chip_post_init(struct pm8001_hba_info *pm8001_ha) +{ +} + +static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha) +{ + u32 max_wait_count; + u32 value; + u32 gst_len_mpistate; + u16 deviceid; + pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid); + if (deviceid == 0x8081 || deviceid == 0x0042) { + if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) { + pm8001_dbg(pm8001_ha, FAIL, + "Shift Bar4 to 0x%x failed\n", + GSM_SM_BASE); + return -1; + } + } + init_pci_device_addresses(pm8001_ha); + /* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the + table is stop */ + pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPC_MSGU_CFG_TABLE_RESET); + + /* wait until Inbound DoorBell Clear Register toggled */ + max_wait_count = 1 * 1000 * 1000;/* 1 sec */ + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET); + value &= SPC_MSGU_CFG_TABLE_RESET; + } while ((value != 0) && (--max_wait_count)); + + if (!max_wait_count) { + pm8001_dbg(pm8001_ha, FAIL, "TIMEOUT:IBDB value/=0x%x\n", + value); + return -1; + } + + /* check the MPI-State for termination in progress */ + /* wait until Inbound DoorBell Clear Register toggled */ + max_wait_count = 1 * 1000 * 1000; /* 1 sec */ + do { + udelay(1); + gst_len_mpistate = + pm8001_mr32(pm8001_ha->general_stat_tbl_addr, + GST_GSTLEN_MPIS_OFFSET); + if (GST_MPI_STATE_UNINIT == + (gst_len_mpistate & GST_MPI_STATE_MASK)) + break; + } while (--max_wait_count); + if (!max_wait_count) { + pm8001_dbg(pm8001_ha, FAIL, " TIME OUT MPI State = 0x%x\n", + gst_len_mpistate & GST_MPI_STATE_MASK); + return -1; + } + return 0; +} + +/** + * soft_reset_ready_check - Function to check FW is ready for soft reset. + * @pm8001_ha: our hba card information + */ +static u32 soft_reset_ready_check(struct pm8001_hba_info *pm8001_ha) +{ + u32 regVal, regVal1, regVal2; + if (mpi_uninit_check(pm8001_ha) != 0) { + pm8001_dbg(pm8001_ha, FAIL, "MPI state is not ready\n"); + return -1; + } + /* read the scratch pad 2 register bit 2 */ + regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2) + & SCRATCH_PAD2_FWRDY_RST; + if (regVal == SCRATCH_PAD2_FWRDY_RST) { + pm8001_dbg(pm8001_ha, INIT, "Firmware is ready for reset.\n"); + } else { + unsigned long flags; + /* Trigger NMI twice via RB6 */ + spin_lock_irqsave(&pm8001_ha->lock, flags); + if (-1 == pm8001_bar4_shift(pm8001_ha, RB6_ACCESS_REG)) { + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + pm8001_dbg(pm8001_ha, FAIL, + "Shift Bar4 to 0x%x failed\n", + RB6_ACCESS_REG); + return -1; + } + pm8001_cw32(pm8001_ha, 2, SPC_RB6_OFFSET, + RB6_MAGIC_NUMBER_RST); + pm8001_cw32(pm8001_ha, 2, SPC_RB6_OFFSET, RB6_MAGIC_NUMBER_RST); + /* wait for 100 ms */ + mdelay(100); + regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2) & + SCRATCH_PAD2_FWRDY_RST; + if (regVal != SCRATCH_PAD2_FWRDY_RST) { + regVal1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + regVal2 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2); + pm8001_dbg(pm8001_ha, FAIL, "TIMEOUT:MSGU_SCRATCH_PAD1=0x%x, MSGU_SCRATCH_PAD2=0x%x\n", + regVal1, regVal2); + pm8001_dbg(pm8001_ha, FAIL, + "SCRATCH_PAD0 value = 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0)); + pm8001_dbg(pm8001_ha, FAIL, + "SCRATCH_PAD3 value = 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3)); + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + return -1; + } + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + } + return 0; +} + +/** + * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all + * the FW register status to the originated status. + * @pm8001_ha: our hba card information + */ +static int +pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha) +{ + u32 regVal, toggleVal; + u32 max_wait_count; + u32 regVal1, regVal2, regVal3; + u32 signature = 0x252acbcd; /* for host scratch pad0 */ + unsigned long flags; + + /* step1: Check FW is ready for soft reset */ + if (soft_reset_ready_check(pm8001_ha) != 0) { + pm8001_dbg(pm8001_ha, FAIL, "FW is not ready\n"); + return -1; + } + + /* step 2: clear NMI status register on AAP1 and IOP, write the same + value to clear */ + /* map 0x60000 to BAR4(0x20), BAR2(win) */ + spin_lock_irqsave(&pm8001_ha->lock, flags); + if (-1 == pm8001_bar4_shift(pm8001_ha, MBIC_AAP1_ADDR_BASE)) { + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + pm8001_dbg(pm8001_ha, FAIL, "Shift Bar4 to 0x%x failed\n", + MBIC_AAP1_ADDR_BASE); + return -1; + } + regVal = pm8001_cr32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_IOP); + pm8001_dbg(pm8001_ha, INIT, "MBIC - NMI Enable VPE0 (IOP)= 0x%x\n", + regVal); + pm8001_cw32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_IOP, 0x0); + /* map 0x70000 to BAR4(0x20), BAR2(win) */ + if (-1 == pm8001_bar4_shift(pm8001_ha, MBIC_IOP_ADDR_BASE)) { + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + pm8001_dbg(pm8001_ha, FAIL, "Shift Bar4 to 0x%x failed\n", + MBIC_IOP_ADDR_BASE); + return -1; + } + regVal = pm8001_cr32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_AAP1); + pm8001_dbg(pm8001_ha, INIT, "MBIC - NMI Enable VPE0 (AAP1)= 0x%x\n", + regVal); + pm8001_cw32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_AAP1, 0x0); + + regVal = pm8001_cr32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT_ENABLE); + pm8001_dbg(pm8001_ha, INIT, "PCIE -Event Interrupt Enable = 0x%x\n", + regVal); + pm8001_cw32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT_ENABLE, 0x0); + + regVal = pm8001_cr32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT); + pm8001_dbg(pm8001_ha, INIT, "PCIE - Event Interrupt = 0x%x\n", + regVal); + pm8001_cw32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT, regVal); + + regVal = pm8001_cr32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT_ENABLE); + pm8001_dbg(pm8001_ha, INIT, "PCIE -Error Interrupt Enable = 0x%x\n", + regVal); + pm8001_cw32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT_ENABLE, 0x0); + + regVal = pm8001_cr32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT); + pm8001_dbg(pm8001_ha, INIT, "PCIE - Error Interrupt = 0x%x\n", regVal); + pm8001_cw32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT, regVal); + + /* read the scratch pad 1 register bit 2 */ + regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) + & SCRATCH_PAD1_RST; + toggleVal = regVal ^ SCRATCH_PAD1_RST; + + /* set signature in host scratch pad0 register to tell SPC that the + host performs the soft reset */ + pm8001_cw32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_0, signature); + + /* read required registers for confirmming */ + /* map 0x0700000 to BAR4(0x20), BAR2(win) */ + if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_ADDR_BASE)) { + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + pm8001_dbg(pm8001_ha, FAIL, "Shift Bar4 to 0x%x failed\n", + GSM_ADDR_BASE); + return -1; + } + pm8001_dbg(pm8001_ha, INIT, + "GSM 0x0(0x00007b88)-GSM Configuration and Reset = 0x%x\n", + pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET)); + + /* step 3: host read GSM Configuration and Reset register */ + regVal = pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET); + /* Put those bits to low */ + /* GSM XCBI offset = 0x70 0000 + 0x00 Bit 13 COM_SLV_SW_RSTB 1 + 0x00 Bit 12 QSSP_SW_RSTB 1 + 0x00 Bit 11 RAAE_SW_RSTB 1 + 0x00 Bit 9 RB_1_SW_RSTB 1 + 0x00 Bit 8 SM_SW_RSTB 1 + */ + regVal &= ~(0x00003b00); + /* host write GSM Configuration and Reset register */ + pm8001_cw32(pm8001_ha, 2, GSM_CONFIG_RESET, regVal); + pm8001_dbg(pm8001_ha, INIT, + "GSM 0x0 (0x00007b88 ==> 0x00004088) - GSM Configuration and Reset is set to = 0x%x\n", + pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET)); + + /* step 4: */ + /* disable GSM - Read Address Parity Check */ + regVal1 = pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK); + pm8001_dbg(pm8001_ha, INIT, + "GSM 0x700038 - Read Address Parity Check Enable = 0x%x\n", + regVal1); + pm8001_cw32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK, 0x0); + pm8001_dbg(pm8001_ha, INIT, + "GSM 0x700038 - Read Address Parity Check Enable is set to = 0x%x\n", + pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK)); + + /* disable GSM - Write Address Parity Check */ + regVal2 = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK); + pm8001_dbg(pm8001_ha, INIT, + "GSM 0x700040 - Write Address Parity Check Enable = 0x%x\n", + regVal2); + pm8001_cw32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK, 0x0); + pm8001_dbg(pm8001_ha, INIT, + "GSM 0x700040 - Write Address Parity Check Enable is set to = 0x%x\n", + pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK)); + + /* disable GSM - Write Data Parity Check */ + regVal3 = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK); + pm8001_dbg(pm8001_ha, INIT, "GSM 0x300048 - Write Data Parity Check Enable = 0x%x\n", + regVal3); + pm8001_cw32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK, 0x0); + pm8001_dbg(pm8001_ha, INIT, + "GSM 0x300048 - Write Data Parity Check Enable is set to = 0x%x\n", + pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK)); + + /* step 5: delay 10 usec */ + udelay(10); + /* step 5-b: set GPIO-0 output control to tristate anyway */ + if (-1 == pm8001_bar4_shift(pm8001_ha, GPIO_ADDR_BASE)) { + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + pm8001_dbg(pm8001_ha, INIT, "Shift Bar4 to 0x%x failed\n", + GPIO_ADDR_BASE); + return -1; + } + regVal = pm8001_cr32(pm8001_ha, 2, GPIO_GPIO_0_0UTPUT_CTL_OFFSET); + pm8001_dbg(pm8001_ha, INIT, "GPIO Output Control Register: = 0x%x\n", + regVal); + /* set GPIO-0 output control to tri-state */ + regVal &= 0xFFFFFFFC; + pm8001_cw32(pm8001_ha, 2, GPIO_GPIO_0_0UTPUT_CTL_OFFSET, regVal); + + /* Step 6: Reset the IOP and AAP1 */ + /* map 0x00000 to BAR4(0x20), BAR2(win) */ + if (-1 == pm8001_bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) { + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + pm8001_dbg(pm8001_ha, FAIL, "SPC Shift Bar4 to 0x%x failed\n", + SPC_TOP_LEVEL_ADDR_BASE); + return -1; + } + regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET); + pm8001_dbg(pm8001_ha, INIT, "Top Register before resetting IOP/AAP1:= 0x%x\n", + regVal); + regVal &= ~(SPC_REG_RESET_PCS_IOP_SS | SPC_REG_RESET_PCS_AAP1_SS); + pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal); + + /* step 7: Reset the BDMA/OSSP */ + regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET); + pm8001_dbg(pm8001_ha, INIT, "Top Register before resetting BDMA/OSSP: = 0x%x\n", + regVal); + regVal &= ~(SPC_REG_RESET_BDMA_CORE | SPC_REG_RESET_OSSP); + pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal); + + /* step 8: delay 10 usec */ + udelay(10); + + /* step 9: bring the BDMA and OSSP out of reset */ + regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET); + pm8001_dbg(pm8001_ha, INIT, + "Top Register before bringing up BDMA/OSSP:= 0x%x\n", + regVal); + regVal |= (SPC_REG_RESET_BDMA_CORE | SPC_REG_RESET_OSSP); + pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal); + + /* step 10: delay 10 usec */ + udelay(10); + + /* step 11: reads and sets the GSM Configuration and Reset Register */ + /* map 0x0700000 to BAR4(0x20), BAR2(win) */ + if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_ADDR_BASE)) { + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + pm8001_dbg(pm8001_ha, FAIL, "SPC Shift Bar4 to 0x%x failed\n", + GSM_ADDR_BASE); + return -1; + } + pm8001_dbg(pm8001_ha, INIT, + "GSM 0x0 (0x00007b88)-GSM Configuration and Reset = 0x%x\n", + pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET)); + regVal = pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET); + /* Put those bits to high */ + /* GSM XCBI offset = 0x70 0000 + 0x00 Bit 13 COM_SLV_SW_RSTB 1 + 0x00 Bit 12 QSSP_SW_RSTB 1 + 0x00 Bit 11 RAAE_SW_RSTB 1 + 0x00 Bit 9 RB_1_SW_RSTB 1 + 0x00 Bit 8 SM_SW_RSTB 1 + */ + regVal |= (GSM_CONFIG_RESET_VALUE); + pm8001_cw32(pm8001_ha, 2, GSM_CONFIG_RESET, regVal); + pm8001_dbg(pm8001_ha, INIT, "GSM (0x00004088 ==> 0x00007b88) - GSM Configuration and Reset is set to = 0x%x\n", + pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET)); + + /* step 12: Restore GSM - Read Address Parity Check */ + regVal = pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK); + /* just for debugging */ + pm8001_dbg(pm8001_ha, INIT, + "GSM 0x700038 - Read Address Parity Check Enable = 0x%x\n", + regVal); + pm8001_cw32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK, regVal1); + pm8001_dbg(pm8001_ha, INIT, "GSM 0x700038 - Read Address Parity Check Enable is set to = 0x%x\n", + pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK)); + /* Restore GSM - Write Address Parity Check */ + regVal = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK); + pm8001_cw32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK, regVal2); + pm8001_dbg(pm8001_ha, INIT, + "GSM 0x700040 - Write Address Parity Check Enable is set to = 0x%x\n", + pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK)); + /* Restore GSM - Write Data Parity Check */ + regVal = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK); + pm8001_cw32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK, regVal3); + pm8001_dbg(pm8001_ha, INIT, + "GSM 0x700048 - Write Data Parity Check Enable is set to = 0x%x\n", + pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK)); + + /* step 13: bring the IOP and AAP1 out of reset */ + /* map 0x00000 to BAR4(0x20), BAR2(win) */ + if (-1 == pm8001_bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) { + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + pm8001_dbg(pm8001_ha, FAIL, "Shift Bar4 to 0x%x failed\n", + SPC_TOP_LEVEL_ADDR_BASE); + return -1; + } + regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET); + regVal |= (SPC_REG_RESET_PCS_IOP_SS | SPC_REG_RESET_PCS_AAP1_SS); + pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal); + + /* step 14: delay 10 usec - Normal Mode */ + udelay(10); + /* check Soft Reset Normal mode or Soft Reset HDA mode */ + if (signature == SPC_SOFT_RESET_SIGNATURE) { + /* step 15 (Normal Mode): wait until scratch pad1 register + bit 2 toggled */ + max_wait_count = 2 * 1000 * 1000;/* 2 sec */ + do { + udelay(1); + regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) & + SCRATCH_PAD1_RST; + } while ((regVal != toggleVal) && (--max_wait_count)); + + if (!max_wait_count) { + regVal = pm8001_cr32(pm8001_ha, 0, + MSGU_SCRATCH_PAD_1); + pm8001_dbg(pm8001_ha, FAIL, "TIMEOUT : ToggleVal 0x%x,MSGU_SCRATCH_PAD1 = 0x%x\n", + toggleVal, regVal); + pm8001_dbg(pm8001_ha, FAIL, + "SCRATCH_PAD0 value = 0x%x\n", + pm8001_cr32(pm8001_ha, 0, + MSGU_SCRATCH_PAD_0)); + pm8001_dbg(pm8001_ha, FAIL, + "SCRATCH_PAD2 value = 0x%x\n", + pm8001_cr32(pm8001_ha, 0, + MSGU_SCRATCH_PAD_2)); + pm8001_dbg(pm8001_ha, FAIL, + "SCRATCH_PAD3 value = 0x%x\n", + pm8001_cr32(pm8001_ha, 0, + MSGU_SCRATCH_PAD_3)); + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + return -1; + } + + /* step 16 (Normal) - Clear ODMR and ODCR */ + pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL); + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL); + + /* step 17 (Normal Mode): wait for the FW and IOP to get + ready - 1 sec timeout */ + /* Wait for the SPC Configuration Table to be ready */ + if (check_fw_ready(pm8001_ha) == -1) { + regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + /* return error if MPI Configuration Table not ready */ + pm8001_dbg(pm8001_ha, INIT, + "FW not ready SCRATCH_PAD1 = 0x%x\n", + regVal); + regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2); + /* return error if MPI Configuration Table not ready */ + pm8001_dbg(pm8001_ha, INIT, + "FW not ready SCRATCH_PAD2 = 0x%x\n", + regVal); + pm8001_dbg(pm8001_ha, INIT, + "SCRATCH_PAD0 value = 0x%x\n", + pm8001_cr32(pm8001_ha, 0, + MSGU_SCRATCH_PAD_0)); + pm8001_dbg(pm8001_ha, INIT, + "SCRATCH_PAD3 value = 0x%x\n", + pm8001_cr32(pm8001_ha, 0, + MSGU_SCRATCH_PAD_3)); + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + return -1; + } + } + pm8001_bar4_shift(pm8001_ha, 0); + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + + pm8001_dbg(pm8001_ha, INIT, "SPC soft reset Complete\n"); + return 0; +} + +static void pm8001_hw_chip_rst(struct pm8001_hba_info *pm8001_ha) +{ + u32 i; + u32 regVal; + pm8001_dbg(pm8001_ha, INIT, "chip reset start\n"); + + /* do SPC chip reset. */ + regVal = pm8001_cr32(pm8001_ha, 1, SPC_REG_RESET); + regVal &= ~(SPC_REG_RESET_DEVICE); + pm8001_cw32(pm8001_ha, 1, SPC_REG_RESET, regVal); + + /* delay 10 usec */ + udelay(10); + + /* bring chip reset out of reset */ + regVal = pm8001_cr32(pm8001_ha, 1, SPC_REG_RESET); + regVal |= SPC_REG_RESET_DEVICE; + pm8001_cw32(pm8001_ha, 1, SPC_REG_RESET, regVal); + + /* delay 10 usec */ + udelay(10); + + /* wait for 20 msec until the firmware gets reloaded */ + i = 20; + do { + mdelay(1); + } while ((--i) != 0); + + pm8001_dbg(pm8001_ha, INIT, "chip reset finished\n"); +} + +/** + * pm8001_chip_iounmap - which mapped when initialized. + * @pm8001_ha: our hba card information + */ +void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha) +{ + s8 bar, logical = 0; + for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { + /* + ** logical BARs for SPC: + ** bar 0 and 1 - logical BAR0 + ** bar 2 and 3 - logical BAR1 + ** bar4 - logical BAR2 + ** bar5 - logical BAR3 + ** Skip the appropriate assignments: + */ + if ((bar == 1) || (bar == 3)) + continue; + if (pm8001_ha->io_mem[logical].memvirtaddr) { + iounmap(pm8001_ha->io_mem[logical].memvirtaddr); + logical++; + } + } +} + +#ifndef PM8001_USE_MSIX +/** + * pm8001_chip_intx_interrupt_enable - enable PM8001 chip interrupt + * @pm8001_ha: our hba card information + */ +static void +pm8001_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha) +{ + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL); + pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL); +} + +/** + * pm8001_chip_intx_interrupt_disable - disable PM8001 chip interrupt + * @pm8001_ha: our hba card information + */ +static void +pm8001_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha) +{ + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_MASK_ALL); +} + +#else + +/** + * pm8001_chip_msix_interrupt_enable - enable PM8001 chip interrupt + * @pm8001_ha: our hba card information + * @int_vec_idx: interrupt number to enable + */ +static void +pm8001_chip_msix_interrupt_enable(struct pm8001_hba_info *pm8001_ha, + u32 int_vec_idx) +{ + u32 msi_index; + u32 value; + msi_index = int_vec_idx * MSIX_TABLE_ELEMENT_SIZE; + msi_index += MSIX_TABLE_BASE; + pm8001_cw32(pm8001_ha, 0, msi_index, MSIX_INTERRUPT_ENABLE); + value = (1 << int_vec_idx); + pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, value); + +} + +/** + * pm8001_chip_msix_interrupt_disable - disable PM8001 chip interrupt + * @pm8001_ha: our hba card information + * @int_vec_idx: interrupt number to disable + */ +static void +pm8001_chip_msix_interrupt_disable(struct pm8001_hba_info *pm8001_ha, + u32 int_vec_idx) +{ + u32 msi_index; + msi_index = int_vec_idx * MSIX_TABLE_ELEMENT_SIZE; + msi_index += MSIX_TABLE_BASE; + pm8001_cw32(pm8001_ha, 0, msi_index, MSIX_INTERRUPT_DISABLE); +} +#endif + +/** + * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt + * @pm8001_ha: our hba card information + * @vec: unused + */ +static void +pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec) +{ +#ifdef PM8001_USE_MSIX + pm8001_chip_msix_interrupt_enable(pm8001_ha, 0); +#else + pm8001_chip_intx_interrupt_enable(pm8001_ha); +#endif +} + +/** + * pm8001_chip_interrupt_disable - disable PM8001 chip interrupt + * @pm8001_ha: our hba card information + * @vec: unused + */ +static void +pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec) +{ +#ifdef PM8001_USE_MSIX + pm8001_chip_msix_interrupt_disable(pm8001_ha, 0); +#else + pm8001_chip_intx_interrupt_disable(pm8001_ha); +#endif +} + +/** + * pm8001_mpi_msg_free_get - get the free message buffer for transfer + * inbound queue. + * @circularQ: the inbound queue we want to transfer to HBA. + * @messageSize: the message size of this transfer, normally it is 64 bytes + * @messagePtr: the pointer to message. + */ +int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ, + u16 messageSize, void **messagePtr) +{ + u32 offset, consumer_index; + struct mpi_msg_hdr *msgHeader; + u8 bcCount = 1; /* only support single buffer */ + + /* Checks is the requested message size can be allocated in this queue*/ + if (messageSize > IOMB_SIZE_SPCV) { + *messagePtr = NULL; + return -1; + } + + /* Stores the new consumer index */ + consumer_index = pm8001_read_32(circularQ->ci_virt); + circularQ->consumer_index = cpu_to_le32(consumer_index); + if (((circularQ->producer_idx + bcCount) % PM8001_MPI_QUEUE) == + le32_to_cpu(circularQ->consumer_index)) { + *messagePtr = NULL; + return -1; + } + /* get memory IOMB buffer address */ + offset = circularQ->producer_idx * messageSize; + /* increment to next bcCount element */ + circularQ->producer_idx = (circularQ->producer_idx + bcCount) + % PM8001_MPI_QUEUE; + /* Adds that distance to the base of the region virtual address plus + the message header size*/ + msgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt + offset); + *messagePtr = ((void *)msgHeader) + sizeof(struct mpi_msg_hdr); + return 0; +} + +/** + * pm8001_mpi_build_cmd- build the message queue for transfer, update the PI to + * FW to tell the fw to get this message from IOMB. + * @pm8001_ha: our hba card information + * @q_index: the index in the inbound queue we want to transfer to HBA. + * @opCode: the operation code represents commands which LLDD and fw recognized. + * @payload: the command payload of each operation command. + * @nb: size in bytes of the command payload + * @responseQueue: queue to interrupt on w/ command response (if any) + */ +int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, + u32 q_index, u32 opCode, void *payload, size_t nb, + u32 responseQueue) +{ + u32 Header = 0, hpriority = 0, bc = 1, category = 0x02; + void *pMessage; + unsigned long flags; + struct inbound_queue_table *circularQ = &pm8001_ha->inbnd_q_tbl[q_index]; + int rv; + u32 htag = le32_to_cpu(*(__le32 *)payload); + + trace_pm80xx_mpi_build_cmd(pm8001_ha->id, opCode, htag, q_index, + circularQ->producer_idx, le32_to_cpu(circularQ->consumer_index)); + + if (WARN_ON(q_index >= pm8001_ha->max_q_num)) + return -EINVAL; + + spin_lock_irqsave(&circularQ->iq_lock, flags); + rv = pm8001_mpi_msg_free_get(circularQ, pm8001_ha->iomb_size, + &pMessage); + if (rv < 0) { + pm8001_dbg(pm8001_ha, IO, "No free mpi buffer\n"); + rv = -ENOMEM; + goto done; + } + + if (nb > (pm8001_ha->iomb_size - sizeof(struct mpi_msg_hdr))) + nb = pm8001_ha->iomb_size - sizeof(struct mpi_msg_hdr); + memcpy(pMessage, payload, nb); + if (nb + sizeof(struct mpi_msg_hdr) < pm8001_ha->iomb_size) + memset(pMessage + nb, 0, pm8001_ha->iomb_size - + (nb + sizeof(struct mpi_msg_hdr))); + + /*Build the header*/ + Header = ((1 << 31) | (hpriority << 30) | ((bc & 0x1f) << 24) + | ((responseQueue & 0x3F) << 16) + | ((category & 0xF) << 12) | (opCode & 0xFFF)); + + pm8001_write_32((pMessage - 4), 0, cpu_to_le32(Header)); + /*Update the PI to the firmware*/ + pm8001_cw32(pm8001_ha, circularQ->pi_pci_bar, + circularQ->pi_offset, circularQ->producer_idx); + pm8001_dbg(pm8001_ha, DEVIO, + "INB Q %x OPCODE:%x , UPDATED PI=%d CI=%d\n", + responseQueue, opCode, circularQ->producer_idx, + circularQ->consumer_index); +done: + spin_unlock_irqrestore(&circularQ->iq_lock, flags); + return rv; +} + +u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg, + struct outbound_queue_table *circularQ, u8 bc) +{ + u32 producer_index; + struct mpi_msg_hdr *msgHeader; + struct mpi_msg_hdr *pOutBoundMsgHeader; + + msgHeader = (struct mpi_msg_hdr *)(pMsg - sizeof(struct mpi_msg_hdr)); + pOutBoundMsgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt + + circularQ->consumer_idx * pm8001_ha->iomb_size); + if (pOutBoundMsgHeader != msgHeader) { + pm8001_dbg(pm8001_ha, FAIL, + "consumer_idx = %d msgHeader = %p\n", + circularQ->consumer_idx, msgHeader); + + /* Update the producer index from SPC */ + producer_index = pm8001_read_32(circularQ->pi_virt); + circularQ->producer_index = cpu_to_le32(producer_index); + pm8001_dbg(pm8001_ha, FAIL, + "consumer_idx = %d producer_index = %dmsgHeader = %p\n", + circularQ->consumer_idx, + circularQ->producer_index, msgHeader); + return 0; + } + /* free the circular queue buffer elements associated with the message*/ + circularQ->consumer_idx = (circularQ->consumer_idx + bc) + % PM8001_MPI_QUEUE; + /* update the CI of outbound queue */ + pm8001_cw32(pm8001_ha, circularQ->ci_pci_bar, circularQ->ci_offset, + circularQ->consumer_idx); + /* Update the producer index from SPC*/ + producer_index = pm8001_read_32(circularQ->pi_virt); + circularQ->producer_index = cpu_to_le32(producer_index); + pm8001_dbg(pm8001_ha, IO, " CI=%d PI=%d\n", + circularQ->consumer_idx, circularQ->producer_index); + return 0; +} + +/** + * pm8001_mpi_msg_consume- get the MPI message from outbound queue + * message table. + * @pm8001_ha: our hba card information + * @circularQ: the outbound queue table. + * @messagePtr1: the message contents of this outbound message. + * @pBC: the message size. + */ +u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, + struct outbound_queue_table *circularQ, + void **messagePtr1, u8 *pBC) +{ + struct mpi_msg_hdr *msgHeader; + __le32 msgHeader_tmp; + u32 header_tmp; + do { + /* If there are not-yet-delivered messages ... */ + if (le32_to_cpu(circularQ->producer_index) + != circularQ->consumer_idx) { + /*Get the pointer to the circular queue buffer element*/ + msgHeader = (struct mpi_msg_hdr *) + (circularQ->base_virt + + circularQ->consumer_idx * pm8001_ha->iomb_size); + /* read header */ + header_tmp = pm8001_read_32(msgHeader); + msgHeader_tmp = cpu_to_le32(header_tmp); + pm8001_dbg(pm8001_ha, DEVIO, + "outbound opcode msgheader:%x ci=%d pi=%d\n", + msgHeader_tmp, circularQ->consumer_idx, + circularQ->producer_index); + if (0 != (le32_to_cpu(msgHeader_tmp) & 0x80000000)) { + if (OPC_OUB_SKIP_ENTRY != + (le32_to_cpu(msgHeader_tmp) & 0xfff)) { + *messagePtr1 = + ((u8 *)msgHeader) + + sizeof(struct mpi_msg_hdr); + *pBC = (u8)((le32_to_cpu(msgHeader_tmp) + >> 24) & 0x1f); + pm8001_dbg(pm8001_ha, IO, + ": CI=%d PI=%d msgHeader=%x\n", + circularQ->consumer_idx, + circularQ->producer_index, + msgHeader_tmp); + return MPI_IO_STATUS_SUCCESS; + } else { + circularQ->consumer_idx = + (circularQ->consumer_idx + + ((le32_to_cpu(msgHeader_tmp) + >> 24) & 0x1f)) + % PM8001_MPI_QUEUE; + msgHeader_tmp = 0; + pm8001_write_32(msgHeader, 0, 0); + /* update the CI of outbound queue */ + pm8001_cw32(pm8001_ha, + circularQ->ci_pci_bar, + circularQ->ci_offset, + circularQ->consumer_idx); + } + } else { + circularQ->consumer_idx = + (circularQ->consumer_idx + + ((le32_to_cpu(msgHeader_tmp) >> 24) & + 0x1f)) % PM8001_MPI_QUEUE; + msgHeader_tmp = 0; + pm8001_write_32(msgHeader, 0, 0); + /* update the CI of outbound queue */ + pm8001_cw32(pm8001_ha, circularQ->ci_pci_bar, + circularQ->ci_offset, + circularQ->consumer_idx); + return MPI_IO_STATUS_FAIL; + } + } else { + u32 producer_index; + void *pi_virt = circularQ->pi_virt; + /* spurious interrupt during setup if + * kexec-ing and driver doing a doorbell access + * with the pre-kexec oq interrupt setup + */ + if (!pi_virt) + break; + /* Update the producer index from SPC */ + producer_index = pm8001_read_32(pi_virt); + circularQ->producer_index = cpu_to_le32(producer_index); + } + } while (le32_to_cpu(circularQ->producer_index) != + circularQ->consumer_idx); + /* while we don't have any more not-yet-delivered message */ + /* report empty */ + return MPI_IO_STATUS_BUSY; +} + +void pm8001_work_fn(struct work_struct *work) +{ + struct pm8001_work *pw = container_of(work, struct pm8001_work, work); + struct pm8001_device *pm8001_dev; + struct domain_device *dev; + + /* + * So far, all users of this stash an associated structure here. + * If we get here, and this pointer is null, then the action + * was cancelled. This nullification happens when the device + * goes away. + */ + if (pw->handler != IO_FATAL_ERROR) { + pm8001_dev = pw->data; /* Most stash device structure */ + if ((pm8001_dev == NULL) + || ((pw->handler != IO_XFER_ERROR_BREAK) + && (pm8001_dev->dev_type == SAS_PHY_UNUSED))) { + kfree(pw); + return; + } + } + + switch (pw->handler) { + case IO_XFER_ERROR_BREAK: + { /* This one stashes the sas_task instead */ + struct sas_task *t = (struct sas_task *)pm8001_dev; + struct pm8001_ccb_info *ccb; + struct pm8001_hba_info *pm8001_ha = pw->pm8001_ha; + unsigned long flags, flags1; + struct task_status_struct *ts; + int i; + + if (pm8001_query_task(t) == TMF_RESP_FUNC_SUCC) + break; /* Task still on lu */ + spin_lock_irqsave(&pm8001_ha->lock, flags); + + spin_lock_irqsave(&t->task_state_lock, flags1); + if (unlikely((t->task_state_flags & SAS_TASK_STATE_DONE))) { + spin_unlock_irqrestore(&t->task_state_lock, flags1); + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + break; /* Task got completed by another */ + } + spin_unlock_irqrestore(&t->task_state_lock, flags1); + + /* Search for a possible ccb that matches the task */ + for (i = 0; ccb = NULL, i < PM8001_MAX_CCB; i++) { + ccb = &pm8001_ha->ccb_info[i]; + if ((ccb->ccb_tag != PM8001_INVALID_TAG) && + (ccb->task == t)) + break; + } + if (!ccb) { + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + break; /* Task got freed by another */ + } + ts = &t->task_status; + ts->resp = SAS_TASK_COMPLETE; + /* Force the midlayer to retry */ + ts->stat = SAS_QUEUE_FULL; + pm8001_dev = ccb->device; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + spin_lock_irqsave(&t->task_state_lock, flags1); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags1); + pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with event 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", + t, pw->handler, ts->resp, ts->stat); + pm8001_ccb_task_free(pm8001_ha, ccb); + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags1); + pm8001_ccb_task_free(pm8001_ha, ccb); + mb();/* in order to force CPU ordering */ + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + t->task_done(t); + } + } break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + { /* This one stashes the sas_task instead */ + struct sas_task *t = (struct sas_task *)pm8001_dev; + struct pm8001_ccb_info *ccb; + struct pm8001_hba_info *pm8001_ha = pw->pm8001_ha; + unsigned long flags, flags1; + int i, ret = 0; + + pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n"); + + ret = pm8001_query_task(t); + + if (ret == TMF_RESP_FUNC_SUCC) + pm8001_dbg(pm8001_ha, IO, "...Task on lu\n"); + else if (ret == TMF_RESP_FUNC_COMPLETE) + pm8001_dbg(pm8001_ha, IO, "...Task NOT on lu\n"); + else + pm8001_dbg(pm8001_ha, DEVIO, "...query task failed!!!\n"); + + spin_lock_irqsave(&pm8001_ha->lock, flags); + + spin_lock_irqsave(&t->task_state_lock, flags1); + + if (unlikely((t->task_state_flags & SAS_TASK_STATE_DONE))) { + spin_unlock_irqrestore(&t->task_state_lock, flags1); + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + if (ret == TMF_RESP_FUNC_SUCC) /* task on lu */ + (void)pm8001_abort_task(t); + break; /* Task got completed by another */ + } + + spin_unlock_irqrestore(&t->task_state_lock, flags1); + + /* Search for a possible ccb that matches the task */ + for (i = 0; ccb = NULL, i < PM8001_MAX_CCB; i++) { + ccb = &pm8001_ha->ccb_info[i]; + if ((ccb->ccb_tag != PM8001_INVALID_TAG) && + (ccb->task == t)) + break; + } + if (!ccb) { + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + if (ret == TMF_RESP_FUNC_SUCC) /* task on lu */ + (void)pm8001_abort_task(t); + break; /* Task got freed by another */ + } + + pm8001_dev = ccb->device; + dev = pm8001_dev->sas_device; + + switch (ret) { + case TMF_RESP_FUNC_SUCC: /* task on lu */ + ccb->open_retry = 1; /* Snub completion */ + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + ret = pm8001_abort_task(t); + ccb->open_retry = 0; + switch (ret) { + case TMF_RESP_FUNC_SUCC: + case TMF_RESP_FUNC_COMPLETE: + break; + default: /* device misbehavior */ + ret = TMF_RESP_FUNC_FAILED; + pm8001_dbg(pm8001_ha, IO, "...Reset phy\n"); + pm8001_I_T_nexus_reset(dev); + break; + } + break; + + case TMF_RESP_FUNC_COMPLETE: /* task not on lu */ + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + /* Do we need to abort the task locally? */ + break; + + default: /* device misbehavior */ + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + ret = TMF_RESP_FUNC_FAILED; + pm8001_dbg(pm8001_ha, IO, "...Reset phy\n"); + pm8001_I_T_nexus_reset(dev); + } + + if (ret == TMF_RESP_FUNC_FAILED) + t = NULL; + pm8001_open_reject_retry(pm8001_ha, t, pm8001_dev); + pm8001_dbg(pm8001_ha, IO, "...Complete\n"); + } break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + dev = pm8001_dev->sas_device; + pm8001_I_T_nexus_event_handler(dev); + break; + case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: + dev = pm8001_dev->sas_device; + pm8001_I_T_nexus_reset(dev); + break; + case IO_DS_IN_ERROR: + dev = pm8001_dev->sas_device; + pm8001_I_T_nexus_reset(dev); + break; + case IO_DS_NON_OPERATIONAL: + dev = pm8001_dev->sas_device; + pm8001_I_T_nexus_reset(dev); + break; + case IO_FATAL_ERROR: + { + struct pm8001_hba_info *pm8001_ha = pw->pm8001_ha; + struct pm8001_ccb_info *ccb; + struct task_status_struct *ts; + struct sas_task *task; + int i; + u32 device_id; + + for (i = 0; ccb = NULL, i < PM8001_MAX_CCB; i++) { + ccb = &pm8001_ha->ccb_info[i]; + task = ccb->task; + ts = &task->task_status; + + if (task != NULL) { + dev = task->dev; + if (!dev) { + pm8001_dbg(pm8001_ha, FAIL, + "dev is NULL\n"); + continue; + } + /*complete sas task and update to top layer */ + pm8001_ccb_task_free(pm8001_ha, ccb); + ts->resp = SAS_TASK_COMPLETE; + task->task_done(task); + } else if (ccb->ccb_tag != PM8001_INVALID_TAG) { + /* complete the internal commands/non-sas task */ + pm8001_dev = ccb->device; + if (pm8001_dev->dcompletion) { + complete(pm8001_dev->dcompletion); + pm8001_dev->dcompletion = NULL; + } + complete(pm8001_ha->nvmd_completion); + pm8001_ccb_free(pm8001_ha, ccb); + } + } + /* Deregister all the device ids */ + for (i = 0; i < PM8001_MAX_DEVICES; i++) { + pm8001_dev = &pm8001_ha->devices[i]; + device_id = pm8001_dev->device_id; + if (device_id) { + PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id); + pm8001_free_dev(pm8001_dev); + } + } + } + break; + case IO_XFER_ERROR_ABORTED_NCQ_MODE: + { + dev = pm8001_dev->sas_device; + sas_ata_device_link_abort(dev, false); + } + break; + } + kfree(pw); +} + +int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data, + int handler) +{ + struct pm8001_work *pw; + int ret = 0; + + pw = kmalloc(sizeof(struct pm8001_work), GFP_ATOMIC); + if (pw) { + pw->pm8001_ha = pm8001_ha; + pw->data = data; + pw->handler = handler; + INIT_WORK(&pw->work, pm8001_work_fn); + queue_work(pm8001_wq, &pw->work); + } else + ret = -ENOMEM; + + return ret; +} + +/** + * mpi_ssp_completion- process the event that FW response to the SSP request. + * @pm8001_ha: our hba card information + * @piomb: the message contents of this outbound message. + * + * When FW has completed a ssp request for example a IO request, after it has + * filled the SG data with the data, it will trigger this event representing + * that he has finished the job; please check the corresponding buffer. + * So we will tell the caller who maybe waiting the result to tell upper layer + * that the task has been finished. + */ +static void +mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct sas_task *t; + struct pm8001_ccb_info *ccb; + unsigned long flags; + u32 status; + u32 param; + u32 tag; + struct ssp_completion_resp *psspPayload; + struct task_status_struct *ts; + struct ssp_response_iu *iu; + struct pm8001_device *pm8001_dev; + psspPayload = (struct ssp_completion_resp *)(piomb + 4); + status = le32_to_cpu(psspPayload->status); + tag = le32_to_cpu(psspPayload->tag); + ccb = &pm8001_ha->ccb_info[tag]; + if ((status == IO_ABORTED) && ccb->open_retry) { + /* Being completed by another */ + ccb->open_retry = 0; + return; + } + pm8001_dev = ccb->device; + param = le32_to_cpu(psspPayload->param); + + t = ccb->task; + + if (status && status != IO_UNDERFLOW) + pm8001_dbg(pm8001_ha, FAIL, "sas IO status 0x%x\n", status); + if (unlikely(!t || !t->lldd_task || !t->dev)) + return; + ts = &t->task_status; + /* Print sas address of IO failed device */ + if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) && + (status != IO_UNDERFLOW)) + pm8001_dbg(pm8001_ha, FAIL, "SAS Address of IO Failure Drive:%016llx\n", + SAS_ADDR(t->dev->sas_addr)); + + if (status) + pm8001_dbg(pm8001_ha, IOERR, + "status:0x%x, tag:0x%x, task:0x%p\n", + status, tag, t); + + switch (status) { + case IO_SUCCESS: + pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS,param = %d\n", + param); + if (param == 0) { + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_SAM_STAT_GOOD; + } else { + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_PROTO_RESPONSE; + ts->residual = param; + iu = &psspPayload->ssp_resp_iu; + sas_ssp_task_response(pm8001_ha->dev, t, iu); + } + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_ABORTED: + pm8001_dbg(pm8001_ha, IO, "IO_ABORTED IOMB Tag\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + break; + case IO_UNDERFLOW: + /* SSP Completion with error */ + pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW,param = %d\n", + param); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + ts->residual = param; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_NO_DEVICE: + pm8001_dbg(pm8001_ha, IO, "IO_NO_DEVICE\n"); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PHY_DOWN; + break; + case IO_XFER_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + /* Force the midlayer to retry */ + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_XFER_ERROR_PHY_NOT_READY: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + if (!t->uldd_task) + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_NAK_RECEIVED: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_XFER_ERROR_ACK_NAK_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_DMA: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_DMA\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_XFER_ERROR_OFFSET_MISMATCH: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_OFFSET_MISMATCH\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_PORT_IN_RESET: + pm8001_dbg(pm8001_ha, IO, "IO_PORT_IN_RESET\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_DS_NON_OPERATIONAL: + pm8001_dbg(pm8001_ha, IO, "IO_DS_NON_OPERATIONAL\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + if (!t->uldd_task) + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_DS_NON_OPERATIONAL); + break; + case IO_DS_IN_RECOVERY: + pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_RECOVERY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_TM_TAG_NOT_FOUND: + pm8001_dbg(pm8001_ha, IO, "IO_TM_TAG_NOT_FOUND\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_SSP_EXT_IU_ZERO_LEN_ERROR: + pm8001_dbg(pm8001_ha, IO, "IO_SSP_EXT_IU_ZERO_LEN_ERROR\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + default: + pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", status); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + } + pm8001_dbg(pm8001_ha, IO, "scsi_status = %x\n", + psspPayload->ssp_resp_iu.status); + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", + t, status, ts->resp, ts->stat); + pm8001_ccb_task_free(pm8001_ha, ccb); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, ccb); + mb();/* in order to force CPU ordering */ + t->task_done(t); + } +} + +/*See the comments for mpi_ssp_completion */ +static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct sas_task *t; + unsigned long flags; + struct task_status_struct *ts; + struct pm8001_ccb_info *ccb; + struct pm8001_device *pm8001_dev; + struct ssp_event_resp *psspPayload = + (struct ssp_event_resp *)(piomb + 4); + u32 event = le32_to_cpu(psspPayload->event); + u32 tag = le32_to_cpu(psspPayload->tag); + u32 port_id = le32_to_cpu(psspPayload->port_id); + u32 dev_id = le32_to_cpu(psspPayload->device_id); + + ccb = &pm8001_ha->ccb_info[tag]; + t = ccb->task; + pm8001_dev = ccb->device; + if (event) + pm8001_dbg(pm8001_ha, FAIL, "sas IO status 0x%x\n", event); + if (unlikely(!t || !t->lldd_task || !t->dev)) + return; + ts = &t->task_status; + pm8001_dbg(pm8001_ha, DEVIO, "port_id = %x,device_id = %x\n", + port_id, dev_id); + switch (event) { + case IO_OVERFLOW: + pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + ts->residual = 0; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n"); + pm8001_handle_event(pm8001_ha, t, IO_XFER_ERROR_BREAK); + return; + case IO_XFER_ERROR_PHY_NOT_READY: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + if (!t->uldd_task) + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_NAK_RECEIVED: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_XFER_ERROR_ACK_NAK_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n"); + pm8001_handle_event(pm8001_ha, t, IO_XFER_OPEN_RETRY_TIMEOUT); + return; + case IO_XFER_ERROR_UNEXPECTED_PHASE: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_UNEXPECTED_PHASE\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_XFER_RDY_OVERRUN: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_XFER_RDY_OVERRUN\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED: + pm8001_dbg(pm8001_ha, IO, + "IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, + "IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_OFFSET_MISMATCH: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_OFFSET_MISMATCH\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_XFER_ZERO_DATA_LEN: + pm8001_dbg(pm8001_ha, IO, + "IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_CMD_FRAME_ISSUED: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_CMD_FRAME_ISSUED\n"); + return; + default: + pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", event); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + } + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with event 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", + t, event, ts->resp, ts->stat); + pm8001_ccb_task_free(pm8001_ha, ccb); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, ccb); + mb();/* in order to force CPU ordering */ + t->task_done(t); + } +} + +/*See the comments for mpi_ssp_completion */ +static void +mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct sas_task *t; + struct pm8001_ccb_info *ccb; + u32 param; + u32 status; + u32 tag; + int i, j; + u8 sata_addr_low[4]; + u32 temp_sata_addr_low; + u8 sata_addr_hi[4]; + u32 temp_sata_addr_hi; + struct sata_completion_resp *psataPayload; + struct task_status_struct *ts; + struct ata_task_resp *resp ; + u32 *sata_resp; + struct pm8001_device *pm8001_dev; + unsigned long flags; + + psataPayload = (struct sata_completion_resp *)(piomb + 4); + status = le32_to_cpu(psataPayload->status); + param = le32_to_cpu(psataPayload->param); + tag = le32_to_cpu(psataPayload->tag); + + ccb = &pm8001_ha->ccb_info[tag]; + t = ccb->task; + pm8001_dev = ccb->device; + + if (t) { + if (t->dev && (t->dev->lldd_dev)) + pm8001_dev = t->dev->lldd_dev; + } else { + pm8001_dbg(pm8001_ha, FAIL, "task null, freeing CCB tag %d\n", + ccb->ccb_tag); + pm8001_ccb_free(pm8001_ha, ccb); + return; + } + + if (pm8001_dev && unlikely(!t || !t->lldd_task || !t->dev)) { + pm8001_dbg(pm8001_ha, FAIL, "task or dev null\n"); + return; + } + + ts = &t->task_status; + + if (status) + pm8001_dbg(pm8001_ha, IOERR, + "status:0x%x, tag:0x%x, task::0x%p\n", + status, tag, t); + + /* Print sas address of IO failed device */ + if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) && + (status != IO_UNDERFLOW)) { + if (!((t->dev->parent) && + (dev_is_expander(t->dev->parent->dev_type)))) { + for (i = 0, j = 4; j <= 7 && i <= 3; i++, j++) + sata_addr_low[i] = pm8001_ha->sas_addr[j]; + for (i = 0, j = 0; j <= 3 && i <= 3; i++, j++) + sata_addr_hi[i] = pm8001_ha->sas_addr[j]; + memcpy(&temp_sata_addr_low, sata_addr_low, + sizeof(sata_addr_low)); + memcpy(&temp_sata_addr_hi, sata_addr_hi, + sizeof(sata_addr_hi)); + temp_sata_addr_hi = (((temp_sata_addr_hi >> 24) & 0xff) + |((temp_sata_addr_hi << 8) & + 0xff0000) | + ((temp_sata_addr_hi >> 8) + & 0xff00) | + ((temp_sata_addr_hi << 24) & + 0xff000000)); + temp_sata_addr_low = ((((temp_sata_addr_low >> 24) + & 0xff) | + ((temp_sata_addr_low << 8) + & 0xff0000) | + ((temp_sata_addr_low >> 8) + & 0xff00) | + ((temp_sata_addr_low << 24) + & 0xff000000)) + + pm8001_dev->attached_phy + + 0x10); + pm8001_dbg(pm8001_ha, FAIL, + "SAS Address of IO Failure Drive:%08x%08x\n", + temp_sata_addr_hi, + temp_sata_addr_low); + } else { + pm8001_dbg(pm8001_ha, FAIL, + "SAS Address of IO Failure Drive:%016llx\n", + SAS_ADDR(t->dev->sas_addr)); + } + } + switch (status) { + case IO_SUCCESS: + pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS\n"); + if (param == 0) { + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_SAM_STAT_GOOD; + } else { + u8 len; + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_PROTO_RESPONSE; + ts->residual = param; + pm8001_dbg(pm8001_ha, IO, + "SAS_PROTO_RESPONSE len = %d\n", + param); + sata_resp = &psataPayload->sata_resp[0]; + resp = (struct ata_task_resp *)ts->buf; + if (t->ata_task.dma_xfer == 0 && + t->data_dir == DMA_FROM_DEVICE) { + len = sizeof(struct pio_setup_fis); + pm8001_dbg(pm8001_ha, IO, + "PIO read len = %d\n", len); + } else if (t->ata_task.use_ncq && + t->data_dir != DMA_NONE) { + len = sizeof(struct set_dev_bits_fis); + pm8001_dbg(pm8001_ha, IO, "FPDMA len = %d\n", + len); + } else { + len = sizeof(struct dev_to_host_fis); + pm8001_dbg(pm8001_ha, IO, "other len = %d\n", + len); + } + if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) { + resp->frame_len = len; + memcpy(&resp->ending_fis[0], sata_resp, len); + ts->buf_valid_size = sizeof(*resp); + } else + pm8001_dbg(pm8001_ha, IO, + "response too large\n"); + } + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_ABORTED: + pm8001_dbg(pm8001_ha, IO, "IO_ABORTED IOMB Tag\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + /* following cases are to do cases */ + case IO_UNDERFLOW: + /* SATA Completion with error */ + pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW param = %d\n", param); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + ts->residual = param; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_NO_DEVICE: + pm8001_dbg(pm8001_ha, IO, "IO_NO_DEVICE\n"); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PHY_DOWN; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_INTERRUPTED; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_PHY_NOT_READY: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_OPEN_CNX_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free_done(pm8001_ha, ccb); + return; + } + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free_done(pm8001_ha, ccb); + return; + } + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free_done(pm8001_ha, ccb); + return; + } + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_NAK_RECEIVED: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_ACK_NAK_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_DMA: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_DMA\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_SATA_LINK_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_SATA_LINK_TIMEOUT\n"); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_DEV_NO_RESPONSE; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_REJECTED_NCQ_MODE: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_REJECTED_NCQ_MODE\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_PORT_IN_RESET: + pm8001_dbg(pm8001_ha, IO, "IO_PORT_IN_RESET\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_DS_NON_OPERATIONAL: + pm8001_dbg(pm8001_ha, IO, "IO_DS_NON_OPERATIONAL\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, pm8001_dev, + IO_DS_NON_OPERATIONAL); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free_done(pm8001_ha, ccb); + return; + } + break; + case IO_DS_IN_RECOVERY: + pm8001_dbg(pm8001_ha, IO, " IO_DS_IN_RECOVERY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_DS_IN_ERROR: + pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_ERROR\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, pm8001_dev, + IO_DS_IN_ERROR); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free_done(pm8001_ha, ccb); + return; + } + break; + case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + default: + pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", status); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + } + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_dbg(pm8001_ha, FAIL, + "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", + t, status, ts->resp, ts->stat); + pm8001_ccb_task_free(pm8001_ha, ccb); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free_done(pm8001_ha, ccb); + } +} + +/*See the comments for mpi_ssp_completion */ +static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct sas_task *t; + struct task_status_struct *ts; + struct pm8001_ccb_info *ccb; + struct pm8001_device *pm8001_dev; + struct sata_event_resp *psataPayload = + (struct sata_event_resp *)(piomb + 4); + u32 event = le32_to_cpu(psataPayload->event); + u32 tag = le32_to_cpu(psataPayload->tag); + u32 port_id = le32_to_cpu(psataPayload->port_id); + u32 dev_id = le32_to_cpu(psataPayload->device_id); + + if (event) + pm8001_dbg(pm8001_ha, FAIL, "SATA EVENT 0x%x\n", event); + + /* Check if this is NCQ error */ + if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) { + /* find device using device id */ + pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id); + if (pm8001_dev) + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_XFER_ERROR_ABORTED_NCQ_MODE); + return; + } + + ccb = &pm8001_ha->ccb_info[tag]; + t = ccb->task; + pm8001_dev = ccb->device; + if (event) + pm8001_dbg(pm8001_ha, FAIL, "sata IO status 0x%x\n", event); + + if (unlikely(!t)) { + pm8001_dbg(pm8001_ha, FAIL, "task null, freeing CCB tag %d\n", + ccb->ccb_tag); + pm8001_ccb_free(pm8001_ha, ccb); + return; + } + + if (unlikely(!t->lldd_task || !t->dev)) + return; + + ts = &t->task_status; + pm8001_dbg(pm8001_ha, DEVIO, + "port_id:0x%x, device_id:0x%x, tag:0x%x, event:0x%x\n", + port_id, dev_id, tag, event); + switch (event) { + case IO_OVERFLOW: + pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + ts->residual = 0; + break; + case IO_XFER_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_INTERRUPTED; + break; + case IO_XFER_ERROR_PHY_NOT_READY: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_QUEUE_FULL; + return; + } + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_NAK_RECEIVED: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_PEER_ABORTED: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PEER_ABORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_REJECTED_NCQ_MODE: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_REJECTED_NCQ_MODE\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_UNEXPECTED_PHASE: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_UNEXPECTED_PHASE\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_XFER_RDY_OVERRUN: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_XFER_RDY_OVERRUN\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED: + pm8001_dbg(pm8001_ha, IO, + "IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_OFFSET_MISMATCH: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_OFFSET_MISMATCH\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_XFER_ZERO_DATA_LEN: + pm8001_dbg(pm8001_ha, IO, + "IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_CMD_FRAME_ISSUED: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_CMD_FRAME_ISSUED\n"); + break; + case IO_XFER_PIO_SETUP_ERROR: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_PIO_SETUP_ERROR\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + default: + pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", event); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + } +} + +/*See the comments for mpi_ssp_completion */ +static void +mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct sas_task *t; + struct pm8001_ccb_info *ccb; + unsigned long flags; + u32 status; + u32 tag; + struct smp_completion_resp *psmpPayload; + struct task_status_struct *ts; + struct pm8001_device *pm8001_dev; + + psmpPayload = (struct smp_completion_resp *)(piomb + 4); + status = le32_to_cpu(psmpPayload->status); + tag = le32_to_cpu(psmpPayload->tag); + + ccb = &pm8001_ha->ccb_info[tag]; + t = ccb->task; + ts = &t->task_status; + pm8001_dev = ccb->device; + if (status) { + pm8001_dbg(pm8001_ha, FAIL, "smp IO status 0x%x\n", status); + pm8001_dbg(pm8001_ha, IOERR, + "status:0x%x, tag:0x%x, task:0x%p\n", + status, tag, t); + } + if (unlikely(!t || !t->lldd_task || !t->dev)) + return; + + switch (status) { + case IO_SUCCESS: + pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_SAM_STAT_GOOD; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_ABORTED: + pm8001_dbg(pm8001_ha, IO, "IO_ABORTED IOMB\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_OVERFLOW: + pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + ts->residual = 0; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_NO_DEVICE: + pm8001_dbg(pm8001_ha, IO, "IO_NO_DEVICE\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_PHY_DOWN; + break; + case IO_ERROR_HW_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_ERROR_HW_TIMEOUT\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_SAM_STAT_BUSY; + break; + case IO_XFER_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_SAM_STAT_BUSY; + break; + case IO_XFER_ERROR_PHY_NOT_READY: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_SAM_STAT_BUSY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_RX_FRAME: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_RX_FRAME\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_ERROR_INTERNAL_SMP_RESOURCE: + pm8001_dbg(pm8001_ha, IO, "IO_ERROR_INTERNAL_SMP_RESOURCE\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_QUEUE_FULL; + break; + case IO_PORT_IN_RESET: + pm8001_dbg(pm8001_ha, IO, "IO_PORT_IN_RESET\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_DS_NON_OPERATIONAL: + pm8001_dbg(pm8001_ha, IO, "IO_DS_NON_OPERATIONAL\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case IO_DS_IN_RECOVERY: + pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_RECOVERY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + default: + pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", status); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + /* not allowed case. Therefore, return failed status */ + break; + } + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", + t, status, ts->resp, ts->stat); + pm8001_ccb_task_free(pm8001_ha, ccb); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free_done(pm8001_ha, ccb); + } +} + +void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + struct set_dev_state_resp *pPayload = + (struct set_dev_state_resp *)(piomb + 4); + u32 tag = le32_to_cpu(pPayload->tag); + struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag]; + struct pm8001_device *pm8001_dev = ccb->device; + u32 status = le32_to_cpu(pPayload->status); + u32 device_id = le32_to_cpu(pPayload->device_id); + u8 pds = le32_to_cpu(pPayload->pds_nds) & PDS_BITS; + u8 nds = le32_to_cpu(pPayload->pds_nds) & NDS_BITS; + + pm8001_dbg(pm8001_ha, MSG, + "Set device id = 0x%x state from 0x%x to 0x%x status = 0x%x!\n", + device_id, pds, nds, status); + complete(pm8001_dev->setds_completion); + pm8001_ccb_free(pm8001_ha, ccb); +} + +void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct get_nvm_data_resp *pPayload = + (struct get_nvm_data_resp *)(piomb + 4); + u32 tag = le32_to_cpu(pPayload->tag); + struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag]; + u32 dlen_status = le32_to_cpu(pPayload->dlen_status); + + complete(pm8001_ha->nvmd_completion); + pm8001_dbg(pm8001_ha, MSG, "Set nvm data complete!\n"); + if ((dlen_status & NVMD_STAT) != 0) { + pm8001_dbg(pm8001_ha, FAIL, "Set nvm data error %x\n", + dlen_status); + } + pm8001_ccb_free(pm8001_ha, ccb); +} + +void +pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct fw_control_ex *fw_control_context; + struct get_nvm_data_resp *pPayload = + (struct get_nvm_data_resp *)(piomb + 4); + u32 tag = le32_to_cpu(pPayload->tag); + struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag]; + u32 dlen_status = le32_to_cpu(pPayload->dlen_status); + u32 ir_tds_bn_dps_das_nvm = + le32_to_cpu(pPayload->ir_tda_bn_dps_das_nvm); + void *virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr; + fw_control_context = ccb->fw_control_context; + + pm8001_dbg(pm8001_ha, MSG, "Get nvm data complete!\n"); + if ((dlen_status & NVMD_STAT) != 0) { + pm8001_dbg(pm8001_ha, FAIL, "Get nvm data error %x\n", + dlen_status); + complete(pm8001_ha->nvmd_completion); + /* We should free tag during failure also, the tag is not being + * freed by requesting path anywhere. + */ + pm8001_ccb_free(pm8001_ha, ccb); + return; + } + if (ir_tds_bn_dps_das_nvm & IPMode) { + /* indirect mode - IR bit set */ + pm8001_dbg(pm8001_ha, MSG, "Get NVMD success, IR=1\n"); + if ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == TWI_DEVICE) { + if (ir_tds_bn_dps_das_nvm == 0x80a80200) { + memcpy(pm8001_ha->sas_addr, + ((u8 *)virt_addr + 4), + SAS_ADDR_SIZE); + pm8001_dbg(pm8001_ha, MSG, "Get SAS address from VPD successfully!\n"); + } + } else if (((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == C_SEEPROM) + || ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == VPD_FLASH) || + ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == EXPAN_ROM)) { + ; + } else if (((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == AAP1_RDUMP) + || ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == IOP_RDUMP)) { + ; + } else { + /* Should not be happened*/ + pm8001_dbg(pm8001_ha, MSG, + "(IR=1)Wrong Device type 0x%x\n", + ir_tds_bn_dps_das_nvm); + } + } else /* direct mode */{ + pm8001_dbg(pm8001_ha, MSG, + "Get NVMD success, IR=0, dataLen=%d\n", + (dlen_status & NVMD_LEN) >> 24); + } + /* Though fw_control_context is freed below, usrAddr still needs + * to be updated as this holds the response to the request function + */ + memcpy(fw_control_context->usrAddr, + pm8001_ha->memoryMap.region[NVMD].virt_ptr, + fw_control_context->len); + kfree(ccb->fw_control_context); + /* To avoid race condition, complete should be + * called after the message is copied to + * fw_control_context->usrAddr + */ + complete(pm8001_ha->nvmd_completion); + pm8001_dbg(pm8001_ha, MSG, "Get nvmd data complete!\n"); + pm8001_ccb_free(pm8001_ha, ccb); +} + +int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + u32 tag; + struct local_phy_ctl_resp *pPayload = + (struct local_phy_ctl_resp *)(piomb + 4); + u32 status = le32_to_cpu(pPayload->status); + u32 phy_id = le32_to_cpu(pPayload->phyop_phyid) & ID_BITS; + u32 phy_op = le32_to_cpu(pPayload->phyop_phyid) & OP_BITS; + tag = le32_to_cpu(pPayload->tag); + if (status != 0) { + pm8001_dbg(pm8001_ha, MSG, + "%x phy execute %x phy op failed!\n", + phy_id, phy_op); + } else { + pm8001_dbg(pm8001_ha, MSG, + "%x phy execute %x phy op success!\n", + phy_id, phy_op); + pm8001_ha->phy[phy_id].reset_success = true; + } + if (pm8001_ha->phy[phy_id].enable_completion) { + complete(pm8001_ha->phy[phy_id].enable_completion); + pm8001_ha->phy[phy_id].enable_completion = NULL; + } + pm8001_tag_free(pm8001_ha, tag); + return 0; +} + +/** + * pm8001_bytes_dmaed - one of the interface function communication with libsas + * @pm8001_ha: our hba card information + * @i: which phy that received the event. + * + * when HBA driver received the identify done event or initiate FIS received + * event(for SATA), it will invoke this function to notify the sas layer that + * the sas toplogy has formed, please discover the whole sas domain, + * while receive a broadcast(change) primitive just tell the sas + * layer to discover the changed domain rather than the whole domain. + */ +void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i) +{ + struct pm8001_phy *phy = &pm8001_ha->phy[i]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + if (!phy->phy_attached) + return; + + if (phy->phy_type & PORT_TYPE_SAS) { + struct sas_identify_frame *id; + id = (struct sas_identify_frame *)phy->frame_rcvd; + id->dev_type = phy->identify.device_type; + id->initiator_bits = SAS_PROTOCOL_ALL; + id->target_bits = phy->identify.target_port_protocols; + } else if (phy->phy_type & PORT_TYPE_SATA) { + /*Nothing*/ + } + pm8001_dbg(pm8001_ha, MSG, "phy %d byte dmaded.\n", i); + + sas_phy->frame_rcvd_size = phy->frame_rcvd_size; + sas_notify_port_event(sas_phy, PORTE_BYTES_DMAED, GFP_ATOMIC); +} + +/* Get the link rate speed */ +void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate) +{ + struct sas_phy *sas_phy = phy->sas_phy.phy; + + switch (link_rate) { + case PHY_SPEED_120: + phy->sas_phy.linkrate = SAS_LINK_RATE_12_0_GBPS; + break; + case PHY_SPEED_60: + phy->sas_phy.linkrate = SAS_LINK_RATE_6_0_GBPS; + break; + case PHY_SPEED_30: + phy->sas_phy.linkrate = SAS_LINK_RATE_3_0_GBPS; + break; + case PHY_SPEED_15: + phy->sas_phy.linkrate = SAS_LINK_RATE_1_5_GBPS; + break; + } + sas_phy->negotiated_linkrate = phy->sas_phy.linkrate; + sas_phy->maximum_linkrate_hw = phy->maximum_linkrate; + sas_phy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; + sas_phy->maximum_linkrate = phy->maximum_linkrate; + sas_phy->minimum_linkrate = phy->minimum_linkrate; +} + +/** + * pm8001_get_attached_sas_addr - extract/generate attached SAS address + * @phy: pointer to asd_phy + * @sas_addr: pointer to buffer where the SAS address is to be written + * + * This function extracts the SAS address from an IDENTIFY frame + * received. If OOB is SATA, then a SAS address is generated from the + * HA tables. + * + * LOCKING: the frame_rcvd_lock needs to be held since this parses the frame + * buffer. + */ +void pm8001_get_attached_sas_addr(struct pm8001_phy *phy, + u8 *sas_addr) +{ + if (phy->sas_phy.frame_rcvd[0] == 0x34 + && phy->sas_phy.oob_mode == SATA_OOB_MODE) { + struct pm8001_hba_info *pm8001_ha = phy->sas_phy.ha->lldd_ha; + /* FIS device-to-host */ + u64 addr = be64_to_cpu(*(__be64 *)pm8001_ha->sas_addr); + addr += phy->sas_phy.id; + *(__be64 *)sas_addr = cpu_to_be64(addr); + } else { + struct sas_identify_frame *idframe = + (void *) phy->sas_phy.frame_rcvd; + memcpy(sas_addr, idframe->sas_addr, SAS_ADDR_SIZE); + } +} + +/** + * pm8001_hw_event_ack_req- For PM8001,some events need to acknowage to FW. + * @pm8001_ha: our hba card information + * @Qnum: the outbound queue message number. + * @SEA: source of event to ack + * @port_id: port id. + * @phyId: phy id. + * @param0: parameter 0. + * @param1: parameter 1. + */ +static void pm8001_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha, + u32 Qnum, u32 SEA, u32 port_id, u32 phyId, u32 param0, u32 param1) +{ + struct hw_event_ack_req payload; + u32 opc = OPC_INB_SAS_HW_EVENT_ACK; + + memset((u8 *)&payload, 0, sizeof(payload)); + payload.tag = cpu_to_le32(1); + payload.sea_phyid_portid = cpu_to_le32(((SEA & 0xFFFF) << 8) | + ((phyId & 0x0F) << 4) | (port_id & 0x0F)); + payload.param0 = cpu_to_le32(param0); + payload.param1 = cpu_to_le32(param1); + + pm8001_mpi_build_cmd(pm8001_ha, Qnum, opc, &payload, sizeof(payload), 0); +} + +static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, + u32 phyId, u32 phy_op); + +/** + * hw_event_sas_phy_up -FW tells me a SAS phy up event. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static void +hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + u32 lr_evt_status_phyid_portid = + le32_to_cpu(pPayload->lr_evt_status_phyid_portid); + u8 link_rate = + (u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28); + u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F); + u8 phy_id = + (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4); + u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate); + u8 portstate = (u8)(npip_portstate & 0x0000000F); + struct pm8001_port *port = &pm8001_ha->port[port_id]; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + unsigned long flags; + u8 deviceType = pPayload->sas_identify.dev_type; + phy->port = port; + port->port_id = port_id; + port->port_state = portstate; + phy->phy_state = PHY_STATE_LINK_UP_SPC; + pm8001_dbg(pm8001_ha, MSG, + "HW_EVENT_SAS_PHY_UP port id = %d, phy id = %d\n", + port_id, phy_id); + + switch (deviceType) { + case SAS_PHY_UNUSED: + pm8001_dbg(pm8001_ha, MSG, "device type no device.\n"); + break; + case SAS_END_DEVICE: + pm8001_dbg(pm8001_ha, MSG, "end device.\n"); + pm8001_chip_phy_ctl_req(pm8001_ha, phy_id, + PHY_NOTIFY_ENABLE_SPINUP); + port->port_attached = 1; + pm8001_get_lrate_mode(phy, link_rate); + break; + case SAS_EDGE_EXPANDER_DEVICE: + pm8001_dbg(pm8001_ha, MSG, "expander device.\n"); + port->port_attached = 1; + pm8001_get_lrate_mode(phy, link_rate); + break; + case SAS_FANOUT_EXPANDER_DEVICE: + pm8001_dbg(pm8001_ha, MSG, "fanout expander device.\n"); + port->port_attached = 1; + pm8001_get_lrate_mode(phy, link_rate); + break; + default: + pm8001_dbg(pm8001_ha, DEVIO, "unknown device type(%x)\n", + deviceType); + break; + } + phy->phy_type |= PORT_TYPE_SAS; + phy->identify.device_type = deviceType; + phy->phy_attached = 1; + if (phy->identify.device_type == SAS_END_DEVICE) + phy->identify.target_port_protocols = SAS_PROTOCOL_SSP; + else if (phy->identify.device_type != SAS_PHY_UNUSED) + phy->identify.target_port_protocols = SAS_PROTOCOL_SMP; + phy->sas_phy.oob_mode = SAS_OOB_MODE; + sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE, GFP_ATOMIC); + spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags); + memcpy(phy->frame_rcvd, &pPayload->sas_identify, + sizeof(struct sas_identify_frame)-4); + phy->frame_rcvd_size = sizeof(struct sas_identify_frame) - 4; + pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); + spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); + if (pm8001_ha->flags == PM8001F_RUN_TIME) + mdelay(200);/*delay a moment to wait disk to spinup*/ + pm8001_bytes_dmaed(pm8001_ha, phy_id); +} + +/** + * hw_event_sata_phy_up -FW tells me a SATA phy up event. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static void +hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + u32 lr_evt_status_phyid_portid = + le32_to_cpu(pPayload->lr_evt_status_phyid_portid); + u8 link_rate = + (u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28); + u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F); + u8 phy_id = + (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4); + u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate); + u8 portstate = (u8)(npip_portstate & 0x0000000F); + struct pm8001_port *port = &pm8001_ha->port[port_id]; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + unsigned long flags; + pm8001_dbg(pm8001_ha, DEVIO, "HW_EVENT_SATA_PHY_UP port id = %d, phy id = %d\n", + port_id, phy_id); + phy->port = port; + port->port_id = port_id; + port->port_state = portstate; + phy->phy_state = PHY_STATE_LINK_UP_SPC; + port->port_attached = 1; + pm8001_get_lrate_mode(phy, link_rate); + phy->phy_type |= PORT_TYPE_SATA; + phy->phy_attached = 1; + phy->sas_phy.oob_mode = SATA_OOB_MODE; + sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE, GFP_ATOMIC); + spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags); + memcpy(phy->frame_rcvd, ((u8 *)&pPayload->sata_fis - 4), + sizeof(struct dev_to_host_fis)); + phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); + phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; + phy->identify.device_type = SAS_SATA_DEV; + pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); + spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); + pm8001_bytes_dmaed(pm8001_ha, phy_id); +} + +/** + * hw_event_phy_down -we should notify the libsas the phy is down. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static void +hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + u32 lr_evt_status_phyid_portid = + le32_to_cpu(pPayload->lr_evt_status_phyid_portid); + u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F); + u8 phy_id = + (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4); + u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate); + u8 portstate = (u8)(npip_portstate & 0x0000000F); + struct pm8001_port *port = &pm8001_ha->port[port_id]; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + port->port_state = portstate; + phy->phy_type = 0; + phy->identify.device_type = 0; + phy->phy_attached = 0; + memset(&phy->dev_sas_addr, 0, SAS_ADDR_SIZE); + switch (portstate) { + case PORT_VALID: + break; + case PORT_INVALID: + pm8001_dbg(pm8001_ha, MSG, " PortInvalid portID %d\n", + port_id); + pm8001_dbg(pm8001_ha, MSG, + " Last phy Down and port invalid\n"); + port->port_attached = 0; + pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, + port_id, phy_id, 0, 0); + break; + case PORT_IN_RESET: + pm8001_dbg(pm8001_ha, MSG, " Port In Reset portID %d\n", + port_id); + break; + case PORT_NOT_ESTABLISHED: + pm8001_dbg(pm8001_ha, MSG, + " phy Down and PORT_NOT_ESTABLISHED\n"); + port->port_attached = 0; + break; + case PORT_LOSTCOMM: + pm8001_dbg(pm8001_ha, MSG, " phy Down and PORT_LOSTCOMM\n"); + pm8001_dbg(pm8001_ha, MSG, + " Last phy Down and port invalid\n"); + port->port_attached = 0; + pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, + port_id, phy_id, 0, 0); + break; + default: + port->port_attached = 0; + pm8001_dbg(pm8001_ha, DEVIO, " phy Down and(default) = %x\n", + portstate); + break; + + } +} + +/** + * pm8001_mpi_reg_resp -process register device ID response. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + * + * when sas layer find a device it will notify LLDD, then the driver register + * the domain device to FW, this event is the return device ID which the FW + * has assigned, from now, inter-communication with FW is no longer using the + * SAS address, use device ID which FW assigned. + */ +int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + u32 status; + u32 device_id; + u32 htag; + struct pm8001_ccb_info *ccb; + struct pm8001_device *pm8001_dev; + struct dev_reg_resp *registerRespPayload = + (struct dev_reg_resp *)(piomb + 4); + + htag = le32_to_cpu(registerRespPayload->tag); + ccb = &pm8001_ha->ccb_info[htag]; + pm8001_dev = ccb->device; + status = le32_to_cpu(registerRespPayload->status); + device_id = le32_to_cpu(registerRespPayload->device_id); + pm8001_dbg(pm8001_ha, INIT, + "register device status %d phy_id 0x%x device_id %d\n", + status, pm8001_dev->attached_phy, device_id); + switch (status) { + case DEVREG_SUCCESS: + pm8001_dbg(pm8001_ha, MSG, "DEVREG_SUCCESS\n"); + pm8001_dev->device_id = device_id; + break; + case DEVREG_FAILURE_OUT_OF_RESOURCE: + pm8001_dbg(pm8001_ha, MSG, "DEVREG_FAILURE_OUT_OF_RESOURCE\n"); + break; + case DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED: + pm8001_dbg(pm8001_ha, MSG, + "DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED\n"); + break; + case DEVREG_FAILURE_INVALID_PHY_ID: + pm8001_dbg(pm8001_ha, MSG, "DEVREG_FAILURE_INVALID_PHY_ID\n"); + break; + case DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED: + pm8001_dbg(pm8001_ha, MSG, + "DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED\n"); + break; + case DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE: + pm8001_dbg(pm8001_ha, MSG, + "DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE\n"); + break; + case DEVREG_FAILURE_PORT_NOT_VALID_STATE: + pm8001_dbg(pm8001_ha, MSG, + "DEVREG_FAILURE_PORT_NOT_VALID_STATE\n"); + break; + case DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID: + pm8001_dbg(pm8001_ha, MSG, + "DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID\n"); + break; + default: + pm8001_dbg(pm8001_ha, MSG, + "DEVREG_FAILURE_DEVICE_TYPE_NOT_SUPPORTED\n"); + break; + } + complete(pm8001_dev->dcompletion); + pm8001_ccb_free(pm8001_ha, ccb); + return 0; +} + +int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + u32 status; + u32 device_id; + struct dev_reg_resp *registerRespPayload = + (struct dev_reg_resp *)(piomb + 4); + + status = le32_to_cpu(registerRespPayload->status); + device_id = le32_to_cpu(registerRespPayload->device_id); + if (status != 0) + pm8001_dbg(pm8001_ha, MSG, + " deregister device failed ,status = %x, device_id = %x\n", + status, device_id); + return 0; +} + +/** + * pm8001_mpi_fw_flash_update_resp - Response from FW for flash update command. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + u32 status; + struct fw_flash_Update_resp *ppayload = + (struct fw_flash_Update_resp *)(piomb + 4); + u32 tag = le32_to_cpu(ppayload->tag); + struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag]; + + status = le32_to_cpu(ppayload->status); + switch (status) { + case FLASH_UPDATE_COMPLETE_PENDING_REBOOT: + pm8001_dbg(pm8001_ha, MSG, + ": FLASH_UPDATE_COMPLETE_PENDING_REBOOT\n"); + break; + case FLASH_UPDATE_IN_PROGRESS: + pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_IN_PROGRESS\n"); + break; + case FLASH_UPDATE_HDR_ERR: + pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_HDR_ERR\n"); + break; + case FLASH_UPDATE_OFFSET_ERR: + pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_OFFSET_ERR\n"); + break; + case FLASH_UPDATE_CRC_ERR: + pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_CRC_ERR\n"); + break; + case FLASH_UPDATE_LENGTH_ERR: + pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_LENGTH_ERR\n"); + break; + case FLASH_UPDATE_HW_ERR: + pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_HW_ERR\n"); + break; + case FLASH_UPDATE_DNLD_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, MSG, + ": FLASH_UPDATE_DNLD_NOT_SUPPORTED\n"); + break; + case FLASH_UPDATE_DISABLED: + pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_DISABLED\n"); + break; + default: + pm8001_dbg(pm8001_ha, DEVIO, "No matched status = %d\n", + status); + break; + } + kfree(ccb->fw_control_context); + pm8001_ccb_free(pm8001_ha, ccb); + complete(pm8001_ha->nvmd_completion); + return 0; +} + +int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + u32 status; + int i; + struct general_event_resp *pPayload = + (struct general_event_resp *)(piomb + 4); + status = le32_to_cpu(pPayload->status); + pm8001_dbg(pm8001_ha, MSG, " status = 0x%x\n", status); + for (i = 0; i < GENERAL_EVENT_PAYLOAD; i++) + pm8001_dbg(pm8001_ha, MSG, "inb_IOMB_payload[0x%x] 0x%x,\n", + i, + pPayload->inb_IOMB_payload[i]); + return 0; +} + +int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct sas_task *t; + struct pm8001_ccb_info *ccb; + unsigned long flags; + u32 status ; + u32 tag, scp; + struct task_status_struct *ts; + struct pm8001_device *pm8001_dev; + + struct task_abort_resp *pPayload = + (struct task_abort_resp *)(piomb + 4); + + status = le32_to_cpu(pPayload->status); + tag = le32_to_cpu(pPayload->tag); + + scp = le32_to_cpu(pPayload->scp); + ccb = &pm8001_ha->ccb_info[tag]; + t = ccb->task; + pm8001_dev = ccb->device; /* retrieve device */ + + if (!t) { + pm8001_dbg(pm8001_ha, FAIL, " TASK NULL. RETURNING !!!\n"); + return -1; + } + + if (t->task_proto == SAS_PROTOCOL_INTERNAL_ABORT) + atomic_dec(&pm8001_dev->running_req); + + ts = &t->task_status; + if (status != 0) + pm8001_dbg(pm8001_ha, FAIL, "task abort failed status 0x%x ,tag = 0x%x, scp= 0x%x\n", + status, tag, scp); + switch (status) { + case IO_SUCCESS: + pm8001_dbg(pm8001_ha, EH, "IO_SUCCESS\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_SAM_STAT_GOOD; + break; + case IO_NOT_VALID: + pm8001_dbg(pm8001_ha, EH, "IO_NOT_VALID\n"); + ts->resp = TMF_RESP_FUNC_FAILED; + break; + } + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags |= SAS_TASK_STATE_DONE; + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, ccb); + mb(); + + t->task_done(t); + + return 0; +} + +/** + * mpi_hw_event -The hw event has come. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + unsigned long flags; + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + u32 lr_evt_status_phyid_portid = + le32_to_cpu(pPayload->lr_evt_status_phyid_portid); + u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F); + u8 phy_id = + (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4); + u16 eventType = + (u16)((lr_evt_status_phyid_portid & 0x00FFFF00) >> 8); + u8 status = + (u8)((lr_evt_status_phyid_portid & 0x0F000000) >> 24); + struct sas_ha_struct *sas_ha = pm8001_ha->sas; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id]; + pm8001_dbg(pm8001_ha, DEVIO, + "SPC HW event for portid:%d, phyid:%d, event:%x, status:%x\n", + port_id, phy_id, eventType, status); + switch (eventType) { + case HW_EVENT_PHY_START_STATUS: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_START_STATUS status = %x\n", + status); + if (status == 0) + phy->phy_state = 1; + + if (pm8001_ha->flags == PM8001F_RUN_TIME && + phy->enable_completion != NULL) { + complete(phy->enable_completion); + phy->enable_completion = NULL; + } + break; + case HW_EVENT_SAS_PHY_UP: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_START_STATUS\n"); + hw_event_sas_phy_up(pm8001_ha, piomb); + break; + case HW_EVENT_SATA_PHY_UP: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_SATA_PHY_UP\n"); + hw_event_sata_phy_up(pm8001_ha, piomb); + break; + case HW_EVENT_PHY_STOP_STATUS: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_STOP_STATUS status = %x\n", + status); + if (status == 0) + phy->phy_state = 0; + break; + case HW_EVENT_SATA_SPINUP_HOLD: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_SATA_SPINUP_HOLD\n"); + sas_notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD, + GFP_ATOMIC); + break; + case HW_EVENT_PHY_DOWN: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_DOWN\n"); + sas_notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL, + GFP_ATOMIC); + phy->phy_attached = 0; + phy->phy_state = 0; + hw_event_phy_down(pm8001_ha, piomb); + break; + case HW_EVENT_PORT_INVALID: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_INVALID\n"); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, + GFP_ATOMIC); + break; + /* the broadcast change primitive received, tell the LIBSAS this event + to revalidate the sas domain*/ + case HW_EVENT_BROADCAST_CHANGE: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_CHANGE\n"); + pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_BROADCAST_CHANGE, + port_id, phy_id, 1, 0); + spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); + sas_phy->sas_prim = HW_EVENT_BROADCAST_CHANGE; + spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); + sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, + GFP_ATOMIC); + break; + case HW_EVENT_PHY_ERROR: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_ERROR\n"); + sas_phy_disconnected(&phy->sas_phy); + phy->phy_attached = 0; + sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR, GFP_ATOMIC); + break; + case HW_EVENT_BROADCAST_EXP: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_EXP\n"); + spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); + sas_phy->sas_prim = HW_EVENT_BROADCAST_EXP; + spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); + sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, + GFP_ATOMIC); + break; + case HW_EVENT_LINK_ERR_INVALID_DWORD: + pm8001_dbg(pm8001_ha, MSG, + "HW_EVENT_LINK_ERR_INVALID_DWORD\n"); + pm8001_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, + GFP_ATOMIC); + break; + case HW_EVENT_LINK_ERR_DISPARITY_ERROR: + pm8001_dbg(pm8001_ha, MSG, + "HW_EVENT_LINK_ERR_DISPARITY_ERROR\n"); + pm8001_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_DISPARITY_ERROR, + port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, + GFP_ATOMIC); + break; + case HW_EVENT_LINK_ERR_CODE_VIOLATION: + pm8001_dbg(pm8001_ha, MSG, + "HW_EVENT_LINK_ERR_CODE_VIOLATION\n"); + pm8001_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_CODE_VIOLATION, + port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, + GFP_ATOMIC); + break; + case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH: + pm8001_dbg(pm8001_ha, MSG, + "HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH\n"); + pm8001_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH, + port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, + GFP_ATOMIC); + break; + case HW_EVENT_MALFUNCTION: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_MALFUNCTION\n"); + break; + case HW_EVENT_BROADCAST_SES: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_SES\n"); + spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); + sas_phy->sas_prim = HW_EVENT_BROADCAST_SES; + spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); + sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, + GFP_ATOMIC); + break; + case HW_EVENT_INBOUND_CRC_ERROR: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_INBOUND_CRC_ERROR\n"); + pm8001_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_INBOUND_CRC_ERROR, + port_id, phy_id, 0, 0); + break; + case HW_EVENT_HARD_RESET_RECEIVED: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_HARD_RESET_RECEIVED\n"); + sas_notify_port_event(sas_phy, PORTE_HARD_RESET, GFP_ATOMIC); + break; + case HW_EVENT_ID_FRAME_TIMEOUT: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_ID_FRAME_TIMEOUT\n"); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, + GFP_ATOMIC); + break; + case HW_EVENT_LINK_ERR_PHY_RESET_FAILED: + pm8001_dbg(pm8001_ha, MSG, + "HW_EVENT_LINK_ERR_PHY_RESET_FAILED\n"); + pm8001_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_PHY_RESET_FAILED, + port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, + GFP_ATOMIC); + break; + case HW_EVENT_PORT_RESET_TIMER_TMO: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RESET_TIMER_TMO\n"); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, + GFP_ATOMIC); + break; + case HW_EVENT_PORT_RECOVERY_TIMER_TMO: + pm8001_dbg(pm8001_ha, MSG, + "HW_EVENT_PORT_RECOVERY_TIMER_TMO\n"); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, + GFP_ATOMIC); + break; + case HW_EVENT_PORT_RECOVER: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RECOVER\n"); + break; + case HW_EVENT_PORT_RESET_COMPLETE: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RESET_COMPLETE\n"); + break; + case EVENT_BROADCAST_ASYNCH_EVENT: + pm8001_dbg(pm8001_ha, MSG, "EVENT_BROADCAST_ASYNCH_EVENT\n"); + break; + default: + pm8001_dbg(pm8001_ha, DEVIO, "Unknown event type = %x\n", + eventType); + break; + } + return 0; +} + +/** + * process_one_iomb - process one outbound Queue memory block + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + __le32 pHeader = *(__le32 *)piomb; + u8 opc = (u8)((le32_to_cpu(pHeader)) & 0xFFF); + + pm8001_dbg(pm8001_ha, MSG, "process_one_iomb:\n"); + + switch (opc) { + case OPC_OUB_ECHO: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_ECHO\n"); + break; + case OPC_OUB_HW_EVENT: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_HW_EVENT\n"); + mpi_hw_event(pm8001_ha, piomb); + break; + case OPC_OUB_SSP_COMP: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_COMP\n"); + mpi_ssp_completion(pm8001_ha, piomb); + break; + case OPC_OUB_SMP_COMP: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SMP_COMP\n"); + mpi_smp_completion(pm8001_ha, piomb); + break; + case OPC_OUB_LOCAL_PHY_CNTRL: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_LOCAL_PHY_CNTRL\n"); + pm8001_mpi_local_phy_ctl(pm8001_ha, piomb); + break; + case OPC_OUB_DEV_REGIST: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_DEV_REGIST\n"); + pm8001_mpi_reg_resp(pm8001_ha, piomb); + break; + case OPC_OUB_DEREG_DEV: + pm8001_dbg(pm8001_ha, MSG, "unregister the device\n"); + pm8001_mpi_dereg_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_DEV_HANDLE: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_DEV_HANDLE\n"); + break; + case OPC_OUB_SATA_COMP: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SATA_COMP\n"); + mpi_sata_completion(pm8001_ha, piomb); + break; + case OPC_OUB_SATA_EVENT: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SATA_EVENT\n"); + mpi_sata_event(pm8001_ha, piomb); + break; + case OPC_OUB_SSP_EVENT: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_EVENT\n"); + mpi_ssp_event(pm8001_ha, piomb); + break; + case OPC_OUB_DEV_HANDLE_ARRIV: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_DEV_HANDLE_ARRIV\n"); + /*This is for target*/ + break; + case OPC_OUB_SSP_RECV_EVENT: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_RECV_EVENT\n"); + /*This is for target*/ + break; + case OPC_OUB_DEV_INFO: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_DEV_INFO\n"); + break; + case OPC_OUB_FW_FLASH_UPDATE: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_FW_FLASH_UPDATE\n"); + pm8001_mpi_fw_flash_update_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GPIO_RESPONSE: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GPIO_RESPONSE\n"); + break; + case OPC_OUB_GPIO_EVENT: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GPIO_EVENT\n"); + break; + case OPC_OUB_GENERAL_EVENT: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GENERAL_EVENT\n"); + pm8001_mpi_general_event(pm8001_ha, piomb); + break; + case OPC_OUB_SSP_ABORT_RSP: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_ABORT_RSP\n"); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SATA_ABORT_RSP: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SATA_ABORT_RSP\n"); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SAS_DIAG_MODE_START_END: + pm8001_dbg(pm8001_ha, MSG, + "OPC_OUB_SAS_DIAG_MODE_START_END\n"); + break; + case OPC_OUB_SAS_DIAG_EXECUTE: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SAS_DIAG_EXECUTE\n"); + break; + case OPC_OUB_GET_TIME_STAMP: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_TIME_STAMP\n"); + break; + case OPC_OUB_SAS_HW_EVENT_ACK: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SAS_HW_EVENT_ACK\n"); + break; + case OPC_OUB_PORT_CONTROL: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_PORT_CONTROL\n"); + break; + case OPC_OUB_SMP_ABORT_RSP: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SMP_ABORT_RSP\n"); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_NVMD_DATA: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_NVMD_DATA\n"); + pm8001_mpi_get_nvmd_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SET_NVMD_DATA: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SET_NVMD_DATA\n"); + pm8001_mpi_set_nvmd_resp(pm8001_ha, piomb); + break; + case OPC_OUB_DEVICE_HANDLE_REMOVAL: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_DEVICE_HANDLE_REMOVAL\n"); + break; + case OPC_OUB_SET_DEVICE_STATE: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SET_DEVICE_STATE\n"); + pm8001_mpi_set_dev_state_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_DEVICE_STATE: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_DEVICE_STATE\n"); + break; + case OPC_OUB_SET_DEV_INFO: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SET_DEV_INFO\n"); + break; + case OPC_OUB_SAS_RE_INITIALIZE: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SAS_RE_INITIALIZE\n"); + break; + default: + pm8001_dbg(pm8001_ha, DEVIO, + "Unknown outbound Queue IOMB OPC = %x\n", + opc); + break; + } +} + +static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec) +{ + struct outbound_queue_table *circularQ; + void *pMsg1 = NULL; + u8 bc; + u32 ret = MPI_IO_STATUS_FAIL; + unsigned long flags; + + spin_lock_irqsave(&pm8001_ha->lock, flags); + circularQ = &pm8001_ha->outbnd_q_tbl[vec]; + do { + ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc); + if (MPI_IO_STATUS_SUCCESS == ret) { + /* process the outbound message */ + process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4)); + /* free the message from the outbound circular buffer */ + pm8001_mpi_msg_free_set(pm8001_ha, pMsg1, + circularQ, bc); + } + if (MPI_IO_STATUS_BUSY == ret) { + /* Update the producer index from SPC */ + circularQ->producer_index = + cpu_to_le32(pm8001_read_32(circularQ->pi_virt)); + if (le32_to_cpu(circularQ->producer_index) == + circularQ->consumer_idx) + /* OQ is empty */ + break; + } + } while (1); + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + return ret; +} + +/* DMA_... to our direction translation. */ +static const u8 data_dir_flags[] = { + [DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */ + [DMA_TO_DEVICE] = DATA_DIR_OUT, /* OUTBOUND */ + [DMA_FROM_DEVICE] = DATA_DIR_IN, /* INBOUND */ + [DMA_NONE] = DATA_DIR_NONE, /* NO TRANSFER */ +}; +void +pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd) +{ + int i; + struct scatterlist *sg; + struct pm8001_prd *buf_prd = prd; + + for_each_sg(scatter, sg, nr, i) { + buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); + buf_prd->im_len.len = cpu_to_le32(sg_dma_len(sg)); + buf_prd->im_len.e = 0; + buf_prd++; + } +} + +static void build_smp_cmd(u32 deviceID, __le32 hTag, struct smp_req *psmp_cmd) +{ + psmp_cmd->tag = hTag; + psmp_cmd->device_id = cpu_to_le32(deviceID); + psmp_cmd->len_ip_ir = cpu_to_le32(1|(1 << 1)); +} + +/** + * pm8001_chip_smp_req - send a SMP task to FW + * @pm8001_ha: our hba card information. + * @ccb: the ccb information this request used. + */ +static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + int elem, rc; + struct sas_task *task = ccb->task; + struct domain_device *dev = task->dev; + struct pm8001_device *pm8001_dev = dev->lldd_dev; + struct scatterlist *sg_req, *sg_resp; + u32 req_len, resp_len; + struct smp_req smp_cmd; + u32 opc; + + memset(&smp_cmd, 0, sizeof(smp_cmd)); + /* + * DMA-map SMP request, response buffers + */ + sg_req = &task->smp_task.smp_req; + elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, DMA_TO_DEVICE); + if (!elem) + return -ENOMEM; + req_len = sg_dma_len(sg_req); + + sg_resp = &task->smp_task.smp_resp; + elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, DMA_FROM_DEVICE); + if (!elem) { + rc = -ENOMEM; + goto err_out; + } + resp_len = sg_dma_len(sg_resp); + /* must be in dwords */ + if ((req_len & 0x3) || (resp_len & 0x3)) { + rc = -EINVAL; + goto err_out_2; + } + + opc = OPC_INB_SMP_REQUEST; + smp_cmd.tag = cpu_to_le32(ccb->ccb_tag); + smp_cmd.long_smp_req.long_req_addr = + cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req)); + smp_cmd.long_smp_req.long_req_size = + cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4); + smp_cmd.long_smp_req.long_resp_addr = + cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_resp)); + smp_cmd.long_smp_req.long_resp_size = + cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4); + build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd); + rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, + &smp_cmd, sizeof(smp_cmd), 0); + if (rc) + goto err_out_2; + + return 0; + +err_out_2: + dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1, + DMA_FROM_DEVICE); +err_out: + dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1, + DMA_TO_DEVICE); + return rc; +} + +/** + * pm8001_chip_ssp_io_req - send a SSP task to FW + * @pm8001_ha: our hba card information. + * @ccb: the ccb information this request used. + */ +static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + struct sas_task *task = ccb->task; + struct domain_device *dev = task->dev; + struct pm8001_device *pm8001_dev = dev->lldd_dev; + struct ssp_ini_io_start_req ssp_cmd; + u32 tag = ccb->ccb_tag; + u64 phys_addr; + u32 opc = OPC_INB_SSPINIIOSTART; + memset(&ssp_cmd, 0, sizeof(ssp_cmd)); + memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8); + ssp_cmd.dir_m_tlr = + cpu_to_le32(data_dir_flags[task->data_dir] << 8 | 0x0);/*0 for + SAS 1.1 compatible TLR*/ + ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id); + ssp_cmd.tag = cpu_to_le32(tag); + ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7); + memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd, + task->ssp_task.cmd->cmd_len); + + /* fill in PRD (scatter/gather) table, if any */ + if (task->num_scatter > 1) { + pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle; + ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(phys_addr)); + ssp_cmd.addr_high = cpu_to_le32(upper_32_bits(phys_addr)); + ssp_cmd.esgl = cpu_to_le32(1<<31); + } else if (task->num_scatter == 1) { + u64 dma_addr = sg_dma_address(task->scatter); + ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(dma_addr)); + ssp_cmd.addr_high = cpu_to_le32(upper_32_bits(dma_addr)); + ssp_cmd.len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.esgl = 0; + } else if (task->num_scatter == 0) { + ssp_cmd.addr_low = 0; + ssp_cmd.addr_high = 0; + ssp_cmd.len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.esgl = 0; + } + + return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &ssp_cmd, + sizeof(ssp_cmd), 0); +} + +static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + struct sas_task *task = ccb->task; + struct domain_device *dev = task->dev; + struct pm8001_device *pm8001_ha_dev = dev->lldd_dev; + u32 tag = ccb->ccb_tag; + struct sata_start_req sata_cmd; + u32 hdr_tag, ncg_tag = 0; + u64 phys_addr; + u32 ATAP = 0x0; + u32 dir, retfis = 0; + u32 opc = OPC_INB_SATA_HOST_OPSTART; + + memset(&sata_cmd, 0, sizeof(sata_cmd)); + + if (task->data_dir == DMA_NONE && !task->ata_task.use_ncq) { + ATAP = 0x04; /* no data*/ + pm8001_dbg(pm8001_ha, IO, "no data\n"); + } else if (likely(!task->ata_task.device_control_reg_update)) { + if (task->ata_task.use_ncq && + dev->sata_dev.class != ATA_DEV_ATAPI) { + ATAP = 0x07; /* FPDMA */ + pm8001_dbg(pm8001_ha, IO, "FPDMA\n"); + } else if (task->ata_task.dma_xfer) { + ATAP = 0x06; /* DMA */ + pm8001_dbg(pm8001_ha, IO, "DMA\n"); + } else { + ATAP = 0x05; /* PIO*/ + pm8001_dbg(pm8001_ha, IO, "PIO\n"); + } + } + if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) { + task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); + ncg_tag = hdr_tag; + } + dir = data_dir_flags[task->data_dir] << 8; + sata_cmd.tag = cpu_to_le32(tag); + sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); + sata_cmd.data_len = cpu_to_le32(task->total_xfer_len); + if (task->ata_task.return_fis_on_success) + retfis = 1; + sata_cmd.retfis_ncqtag_atap_dir_m = + cpu_to_le32((retfis << 24) | ((ncg_tag & 0xff) << 16) | + ((ATAP & 0x3f) << 10) | dir); + sata_cmd.sata_fis = task->ata_task.fis; + if (likely(!task->ata_task.device_control_reg_update)) + sata_cmd.sata_fis.flags |= 0x80;/* C=1: update ATA cmd reg */ + sata_cmd.sata_fis.flags &= 0xF0;/* PM_PORT field shall be 0 */ + /* fill in PRD (scatter/gather) table, if any */ + if (task->num_scatter > 1) { + pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle; + sata_cmd.addr_low = lower_32_bits(phys_addr); + sata_cmd.addr_high = upper_32_bits(phys_addr); + sata_cmd.esgl = cpu_to_le32(1 << 31); + } else if (task->num_scatter == 1) { + u64 dma_addr = sg_dma_address(task->scatter); + sata_cmd.addr_low = lower_32_bits(dma_addr); + sata_cmd.addr_high = upper_32_bits(dma_addr); + sata_cmd.len = cpu_to_le32(task->total_xfer_len); + sata_cmd.esgl = 0; + } else if (task->num_scatter == 0) { + sata_cmd.addr_low = 0; + sata_cmd.addr_high = 0; + sata_cmd.len = cpu_to_le32(task->total_xfer_len); + sata_cmd.esgl = 0; + } + + return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &sata_cmd, + sizeof(sata_cmd), 0); +} + +/** + * pm8001_chip_phy_start_req - start phy via PHY_START COMMAND + * @pm8001_ha: our hba card information. + * @phy_id: the phy id which we wanted to start up. + */ +static int +pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) +{ + struct phy_start_req payload; + u32 tag = 0x01; + u32 opcode = OPC_INB_PHYSTART; + + memset(&payload, 0, sizeof(payload)); + payload.tag = cpu_to_le32(tag); + /* + ** [0:7] PHY Identifier + ** [8:11] link rate 1.5G, 3G, 6G + ** [12:13] link mode 01b SAS mode; 10b SATA mode; 11b both + ** [14] 0b disable spin up hold; 1b enable spin up hold + */ + payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE | + LINKMODE_AUTO | LINKRATE_15 | + LINKRATE_30 | LINKRATE_60 | phy_id); + payload.sas_identify.dev_type = SAS_END_DEVICE; + payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL; + memcpy(payload.sas_identify.sas_addr, + &pm8001_ha->phy[phy_id].dev_sas_addr, SAS_ADDR_SIZE); + payload.sas_identify.phy_id = phy_id; + + return pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload, + sizeof(payload), 0); +} + +/** + * pm8001_chip_phy_stop_req - start phy via PHY_STOP COMMAND + * @pm8001_ha: our hba card information. + * @phy_id: the phy id which we wanted to start up. + */ +static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha, + u8 phy_id) +{ + struct phy_stop_req payload; + u32 tag = 0x01; + u32 opcode = OPC_INB_PHYSTOP; + + memset(&payload, 0, sizeof(payload)); + payload.tag = cpu_to_le32(tag); + payload.phy_id = cpu_to_le32(phy_id); + + return pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload, + sizeof(payload), 0); +} + +/* + * see comments on pm8001_mpi_reg_resp. + */ +static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_dev, u32 flag) +{ + struct reg_dev_req payload; + u32 opc; + u32 stp_sspsmp_sata = 0x4; + u32 linkrate, phy_id; + int rc; + struct pm8001_ccb_info *ccb; + u8 retryFlag = 0x1; + u16 firstBurstSize = 0; + u16 ITNT = 2000; + struct domain_device *dev = pm8001_dev->sas_device; + struct domain_device *parent_dev = dev->parent; + struct pm8001_port *port = dev->port->lldd_port; + + memset(&payload, 0, sizeof(payload)); + ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_dev, NULL); + if (!ccb) + return -SAS_QUEUE_FULL; + + payload.tag = cpu_to_le32(ccb->ccb_tag); + if (flag == 1) + stp_sspsmp_sata = 0x02; /*direct attached sata */ + else { + if (pm8001_dev->dev_type == SAS_SATA_DEV) + stp_sspsmp_sata = 0x00; /* stp*/ + else if (pm8001_dev->dev_type == SAS_END_DEVICE || + dev_is_expander(pm8001_dev->dev_type)) + stp_sspsmp_sata = 0x01; /*ssp or smp*/ + } + if (parent_dev && dev_is_expander(parent_dev->dev_type)) + phy_id = parent_dev->ex_dev.ex_phy->phy_id; + else + phy_id = pm8001_dev->attached_phy; + opc = OPC_INB_REG_DEV; + linkrate = (pm8001_dev->sas_device->linkrate < dev->port->linkrate) ? + pm8001_dev->sas_device->linkrate : dev->port->linkrate; + payload.phyid_portid = + cpu_to_le32(((port->port_id) & 0x0F) | + ((phy_id & 0x0F) << 4)); + payload.dtype_dlr_retry = cpu_to_le32((retryFlag & 0x01) | + ((linkrate & 0x0F) * 0x1000000) | + ((stp_sspsmp_sata & 0x03) * 0x10000000)); + payload.firstburstsize_ITNexustimeout = + cpu_to_le32(ITNT | (firstBurstSize * 0x10000)); + memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr, + SAS_ADDR_SIZE); + + rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload, + sizeof(payload), 0); + if (rc) + pm8001_ccb_free(pm8001_ha, ccb); + + return rc; +} + +/* + * see comments on pm8001_mpi_reg_resp. + */ +int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, + u32 device_id) +{ + struct dereg_dev_req payload; + u32 opc = OPC_INB_DEREG_DEV_HANDLE; + + memset(&payload, 0, sizeof(payload)); + payload.tag = cpu_to_le32(1); + payload.device_id = cpu_to_le32(device_id); + pm8001_dbg(pm8001_ha, INIT, "unregister device device_id %d\n", + device_id); + + return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload, + sizeof(payload), 0); +} + +/** + * pm8001_chip_phy_ctl_req - support the local phy operation + * @pm8001_ha: our hba card information. + * @phyId: the phy id which we wanted to operate + * @phy_op: the phy operation to request + */ +static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, + u32 phyId, u32 phy_op) +{ + struct local_phy_ctl_req payload; + u32 opc = OPC_INB_LOCAL_PHY_CONTROL; + + memset(&payload, 0, sizeof(payload)); + payload.tag = cpu_to_le32(1); + payload.phyop_phyid = + cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F)); + + return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload, + sizeof(payload), 0); +} + +static u32 pm8001_chip_is_our_interrupt(struct pm8001_hba_info *pm8001_ha) +{ +#ifdef PM8001_USE_MSIX + return 1; +#else + u32 value; + + value = pm8001_cr32(pm8001_ha, 0, MSGU_ODR); + if (value) + return 1; + return 0; +#endif +} + +/** + * pm8001_chip_isr - PM8001 isr handler. + * @pm8001_ha: our hba card information. + * @vec: IRQ number + */ +static irqreturn_t +pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec) +{ + pm8001_chip_interrupt_disable(pm8001_ha, vec); + pm8001_dbg(pm8001_ha, DEVIO, + "irq vec %d, ODMR:0x%x\n", + vec, pm8001_cr32(pm8001_ha, 0, 0x30)); + process_oq(pm8001_ha, vec); + pm8001_chip_interrupt_enable(pm8001_ha, vec); + return IRQ_HANDLED; +} + +static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc, + u32 dev_id, enum sas_internal_abort type, u32 task_tag, u32 cmd_tag) +{ + struct task_abort_req task_abort; + + memset(&task_abort, 0, sizeof(task_abort)); + if (type == SAS_INTERNAL_ABORT_SINGLE) { + task_abort.abort_all = 0; + task_abort.device_id = cpu_to_le32(dev_id); + task_abort.tag_to_abort = cpu_to_le32(task_tag); + } else if (type == SAS_INTERNAL_ABORT_DEV) { + task_abort.abort_all = cpu_to_le32(1); + task_abort.device_id = cpu_to_le32(dev_id); + } else { + pm8001_dbg(pm8001_ha, EH, "unknown type (%d)\n", type); + return -EIO; + } + + task_abort.tag = cpu_to_le32(cmd_tag); + + return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &task_abort, + sizeof(task_abort), 0); +} + +/* + * pm8001_chip_abort_task - SAS abort task when error or exception happened. + */ +int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + struct sas_task *task = ccb->task; + struct sas_internal_abort_task *abort = &task->abort_task; + struct pm8001_device *pm8001_dev = ccb->device; + int rc = TMF_RESP_FUNC_FAILED; + u32 opc, device_id; + + pm8001_dbg(pm8001_ha, EH, "cmd_tag = %x, abort task tag = 0x%x\n", + ccb->ccb_tag, abort->tag); + if (pm8001_dev->dev_type == SAS_END_DEVICE) + opc = OPC_INB_SSP_ABORT; + else if (pm8001_dev->dev_type == SAS_SATA_DEV) + opc = OPC_INB_SATA_ABORT; + else + opc = OPC_INB_SMP_ABORT;/* SMP */ + device_id = pm8001_dev->device_id; + rc = send_task_abort(pm8001_ha, opc, device_id, abort->type, + abort->tag, ccb->ccb_tag); + if (rc != TMF_RESP_FUNC_COMPLETE) + pm8001_dbg(pm8001_ha, EH, "rc= %d\n", rc); + return rc; +} + +/** + * pm8001_chip_ssp_tm_req - built the task management command. + * @pm8001_ha: our hba card information. + * @ccb: the ccb information. + * @tmf: task management function. + */ +int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb, struct sas_tmf_task *tmf) +{ + struct sas_task *task = ccb->task; + struct domain_device *dev = task->dev; + struct pm8001_device *pm8001_dev = dev->lldd_dev; + u32 opc = OPC_INB_SSPINITMSTART; + struct ssp_ini_tm_start_req sspTMCmd; + + memset(&sspTMCmd, 0, sizeof(sspTMCmd)); + sspTMCmd.device_id = cpu_to_le32(pm8001_dev->device_id); + sspTMCmd.relate_tag = cpu_to_le32((u32)tmf->tag_of_task_to_be_managed); + sspTMCmd.tmf = cpu_to_le32(tmf->tmf); + memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8); + sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag); + if (pm8001_ha->chip_id != chip_8001) + sspTMCmd.ds_ads_m = cpu_to_le32(0x08); + + return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &sspTMCmd, + sizeof(sspTMCmd), 0); +} + +int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, + void *payload) +{ + u32 opc = OPC_INB_GET_NVMD_DATA; + u32 nvmd_type; + int rc; + struct pm8001_ccb_info *ccb; + struct get_nvm_data_req nvmd_req; + struct fw_control_ex *fw_control_context; + struct pm8001_ioctl_payload *ioctl_payload = payload; + + nvmd_type = ioctl_payload->minor_function; + fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); + if (!fw_control_context) + return -ENOMEM; + fw_control_context->usrAddr = (u8 *)ioctl_payload->func_specific; + fw_control_context->len = ioctl_payload->rd_length; + memset(&nvmd_req, 0, sizeof(nvmd_req)); + + ccb = pm8001_ccb_alloc(pm8001_ha, NULL, NULL); + if (!ccb) { + kfree(fw_control_context); + return -SAS_QUEUE_FULL; + } + ccb->fw_control_context = fw_control_context; + + nvmd_req.tag = cpu_to_le32(ccb->ccb_tag); + + switch (nvmd_type) { + case TWI_DEVICE: { + u32 twi_addr, twi_page_size; + twi_addr = 0xa8; + twi_page_size = 2; + + nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | twi_addr << 16 | + twi_page_size << 8 | TWI_DEVICE); + nvmd_req.resp_len = cpu_to_le32(ioctl_payload->rd_length); + nvmd_req.resp_addr_hi = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); + nvmd_req.resp_addr_lo = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); + break; + } + case C_SEEPROM: { + nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | C_SEEPROM); + nvmd_req.resp_len = cpu_to_le32(ioctl_payload->rd_length); + nvmd_req.resp_addr_hi = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); + nvmd_req.resp_addr_lo = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); + break; + } + case VPD_FLASH: { + nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | VPD_FLASH); + nvmd_req.resp_len = cpu_to_le32(ioctl_payload->rd_length); + nvmd_req.resp_addr_hi = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); + nvmd_req.resp_addr_lo = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); + break; + } + case EXPAN_ROM: { + nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | EXPAN_ROM); + nvmd_req.resp_len = cpu_to_le32(ioctl_payload->rd_length); + nvmd_req.resp_addr_hi = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); + nvmd_req.resp_addr_lo = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); + break; + } + case IOP_RDUMP: { + nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | IOP_RDUMP); + nvmd_req.resp_len = cpu_to_le32(ioctl_payload->rd_length); + nvmd_req.vpd_offset = cpu_to_le32(ioctl_payload->offset); + nvmd_req.resp_addr_hi = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); + nvmd_req.resp_addr_lo = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); + break; + } + default: + break; + } + + rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &nvmd_req, + sizeof(nvmd_req), 0); + if (rc) { + kfree(fw_control_context); + pm8001_ccb_free(pm8001_ha, ccb); + } + return rc; +} + +int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, + void *payload) +{ + u32 opc = OPC_INB_SET_NVMD_DATA; + u32 nvmd_type; + int rc; + struct pm8001_ccb_info *ccb; + struct set_nvm_data_req nvmd_req; + struct fw_control_ex *fw_control_context; + struct pm8001_ioctl_payload *ioctl_payload = payload; + + nvmd_type = ioctl_payload->minor_function; + fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); + if (!fw_control_context) + return -ENOMEM; + + memcpy(pm8001_ha->memoryMap.region[NVMD].virt_ptr, + &ioctl_payload->func_specific, + ioctl_payload->wr_length); + memset(&nvmd_req, 0, sizeof(nvmd_req)); + + ccb = pm8001_ccb_alloc(pm8001_ha, NULL, NULL); + if (!ccb) { + kfree(fw_control_context); + return -SAS_QUEUE_FULL; + } + ccb->fw_control_context = fw_control_context; + + nvmd_req.tag = cpu_to_le32(ccb->ccb_tag); + switch (nvmd_type) { + case TWI_DEVICE: { + u32 twi_addr, twi_page_size; + twi_addr = 0xa8; + twi_page_size = 2; + nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98); + nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | twi_addr << 16 | + twi_page_size << 8 | TWI_DEVICE); + nvmd_req.resp_len = cpu_to_le32(ioctl_payload->wr_length); + nvmd_req.resp_addr_hi = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); + nvmd_req.resp_addr_lo = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); + break; + } + case C_SEEPROM: + nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | C_SEEPROM); + nvmd_req.resp_len = cpu_to_le32(ioctl_payload->wr_length); + nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98); + nvmd_req.resp_addr_hi = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); + nvmd_req.resp_addr_lo = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); + break; + case VPD_FLASH: + nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | VPD_FLASH); + nvmd_req.resp_len = cpu_to_le32(ioctl_payload->wr_length); + nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98); + nvmd_req.resp_addr_hi = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); + nvmd_req.resp_addr_lo = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); + break; + case EXPAN_ROM: + nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | EXPAN_ROM); + nvmd_req.resp_len = cpu_to_le32(ioctl_payload->wr_length); + nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98); + nvmd_req.resp_addr_hi = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); + nvmd_req.resp_addr_lo = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); + break; + default: + break; + } + + rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &nvmd_req, + sizeof(nvmd_req), 0); + if (rc) { + kfree(fw_control_context); + pm8001_ccb_free(pm8001_ha, ccb); + } + return rc; +} + +/** + * pm8001_chip_fw_flash_update_build - support the firmware update operation + * @pm8001_ha: our hba card information. + * @fw_flash_updata_info: firmware flash update param + * @tag: Tag to apply to the payload + */ +int +pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha, + void *fw_flash_updata_info, u32 tag) +{ + struct fw_flash_Update_req payload; + struct fw_flash_updata_info *info; + u32 opc = OPC_INB_FW_FLASH_UPDATE; + + memset(&payload, 0, sizeof(struct fw_flash_Update_req)); + info = fw_flash_updata_info; + payload.tag = cpu_to_le32(tag); + payload.cur_image_len = cpu_to_le32(info->cur_image_len); + payload.cur_image_offset = cpu_to_le32(info->cur_image_offset); + payload.total_image_len = cpu_to_le32(info->total_image_len); + payload.len = info->sgl.im_len.len ; + payload.sgl_addr_lo = + cpu_to_le32(lower_32_bits(le64_to_cpu(info->sgl.addr))); + payload.sgl_addr_hi = + cpu_to_le32(upper_32_bits(le64_to_cpu(info->sgl.addr))); + + return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload, + sizeof(payload), 0); +} + +int +pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, + void *payload) +{ + struct fw_flash_updata_info flash_update_info; + struct fw_control_info *fw_control; + struct fw_control_ex *fw_control_context; + int rc; + struct pm8001_ccb_info *ccb; + void *buffer = pm8001_ha->memoryMap.region[FW_FLASH].virt_ptr; + dma_addr_t phys_addr = pm8001_ha->memoryMap.region[FW_FLASH].phys_addr; + struct pm8001_ioctl_payload *ioctl_payload = payload; + + fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); + if (!fw_control_context) + return -ENOMEM; + fw_control = (struct fw_control_info *)&ioctl_payload->func_specific; + pm8001_dbg(pm8001_ha, DEVIO, + "dma fw_control context input length :%x\n", + fw_control->len); + memcpy(buffer, fw_control->buffer, fw_control->len); + flash_update_info.sgl.addr = cpu_to_le64(phys_addr); + flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len); + flash_update_info.sgl.im_len.e = 0; + flash_update_info.cur_image_offset = fw_control->offset; + flash_update_info.cur_image_len = fw_control->len; + flash_update_info.total_image_len = fw_control->size; + fw_control_context->fw_control = fw_control; + fw_control_context->virtAddr = buffer; + fw_control_context->phys_addr = phys_addr; + fw_control_context->len = fw_control->len; + + ccb = pm8001_ccb_alloc(pm8001_ha, NULL, NULL); + if (!ccb) { + kfree(fw_control_context); + return -SAS_QUEUE_FULL; + } + ccb->fw_control_context = fw_control_context; + + rc = pm8001_chip_fw_flash_update_build(pm8001_ha, &flash_update_info, + ccb->ccb_tag); + if (rc) { + kfree(fw_control_context); + pm8001_ccb_free(pm8001_ha, ccb); + } + + return rc; +} + +ssize_t +pm8001_get_gsm_dump(struct device *cdev, u32 length, char *buf) +{ + u32 value, rem, offset = 0, bar = 0; + u32 index, work_offset, dw_length; + u32 shift_value, gsm_base, gsm_dump_offset; + char *direct_data; + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + + direct_data = buf; + gsm_dump_offset = pm8001_ha->fatal_forensic_shift_offset; + + /* check max is 1 Mbytes */ + if ((length > 0x100000) || (gsm_dump_offset & 3) || + ((gsm_dump_offset + length) > 0x1000000)) + return -EINVAL; + + if (pm8001_ha->chip_id == chip_8001) + bar = 2; + else + bar = 1; + + work_offset = gsm_dump_offset & 0xFFFF0000; + offset = gsm_dump_offset & 0x0000FFFF; + gsm_dump_offset = work_offset; + /* adjust length to dword boundary */ + rem = length & 3; + dw_length = length >> 2; + + for (index = 0; index < dw_length; index++) { + if ((work_offset + offset) & 0xFFFF0000) { + if (pm8001_ha->chip_id == chip_8001) + shift_value = ((gsm_dump_offset + offset) & + SHIFT_REG_64K_MASK); + else + shift_value = (((gsm_dump_offset + offset) & + SHIFT_REG_64K_MASK) >> + SHIFT_REG_BIT_SHIFT); + + if (pm8001_ha->chip_id == chip_8001) { + gsm_base = GSM_BASE; + if (-1 == pm8001_bar4_shift(pm8001_ha, + (gsm_base + shift_value))) + return -EIO; + } else { + gsm_base = 0; + if (-1 == pm80xx_bar4_shift(pm8001_ha, + (gsm_base + shift_value))) + return -EIO; + } + gsm_dump_offset = (gsm_dump_offset + offset) & + 0xFFFF0000; + work_offset = 0; + offset = offset & 0x0000FFFF; + } + value = pm8001_cr32(pm8001_ha, bar, (work_offset + offset) & + 0x0000FFFF); + direct_data += sprintf(direct_data, "%08x ", value); + offset += 4; + } + if (rem != 0) { + value = pm8001_cr32(pm8001_ha, bar, (work_offset + offset) & + 0x0000FFFF); + /* xfr for non_dw */ + direct_data += sprintf(direct_data, "%08x ", value); + } + /* Shift back to BAR4 original address */ + if (-1 == pm8001_bar4_shift(pm8001_ha, 0)) + return -EIO; + pm8001_ha->fatal_forensic_shift_offset += 1024; + + if (pm8001_ha->fatal_forensic_shift_offset >= 0x100000) + pm8001_ha->fatal_forensic_shift_offset = 0; + return direct_data - buf; +} + +int +pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_dev, u32 state) +{ + struct set_dev_state_req payload; + struct pm8001_ccb_info *ccb; + int rc; + u32 opc = OPC_INB_SET_DEVICE_STATE; + + memset(&payload, 0, sizeof(payload)); + + ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_dev, NULL); + if (!ccb) + return -SAS_QUEUE_FULL; + + payload.tag = cpu_to_le32(ccb->ccb_tag); + payload.device_id = cpu_to_le32(pm8001_dev->device_id); + payload.nds = cpu_to_le32(state); + + rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload, + sizeof(payload), 0); + if (rc) + pm8001_ccb_free(pm8001_ha, ccb); + + return rc; +} + +static int +pm8001_chip_sas_re_initialization(struct pm8001_hba_info *pm8001_ha) +{ + struct sas_re_initialization_req payload; + struct pm8001_ccb_info *ccb; + int rc; + u32 opc = OPC_INB_SAS_RE_INITIALIZE; + + memset(&payload, 0, sizeof(payload)); + + ccb = pm8001_ccb_alloc(pm8001_ha, NULL, NULL); + if (!ccb) + return -SAS_QUEUE_FULL; + + payload.tag = cpu_to_le32(ccb->ccb_tag); + payload.SSAHOLT = cpu_to_le32(0xd << 25); + payload.sata_hol_tmo = cpu_to_le32(80); + payload.open_reject_cmdretries_data_retries = cpu_to_le32(0xff00ff); + + rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload, + sizeof(payload), 0); + if (rc) + pm8001_ccb_free(pm8001_ha, ccb); + + return rc; +} + +const struct pm8001_dispatch pm8001_8001_dispatch = { + .name = "pmc8001", + .chip_init = pm8001_chip_init, + .chip_post_init = pm8001_chip_post_init, + .chip_soft_rst = pm8001_chip_soft_rst, + .chip_rst = pm8001_hw_chip_rst, + .chip_iounmap = pm8001_chip_iounmap, + .isr = pm8001_chip_isr, + .is_our_interrupt = pm8001_chip_is_our_interrupt, + .isr_process_oq = process_oq, + .interrupt_enable = pm8001_chip_interrupt_enable, + .interrupt_disable = pm8001_chip_interrupt_disable, + .make_prd = pm8001_chip_make_sg, + .smp_req = pm8001_chip_smp_req, + .ssp_io_req = pm8001_chip_ssp_io_req, + .sata_req = pm8001_chip_sata_req, + .phy_start_req = pm8001_chip_phy_start_req, + .phy_stop_req = pm8001_chip_phy_stop_req, + .reg_dev_req = pm8001_chip_reg_dev_req, + .dereg_dev_req = pm8001_chip_dereg_dev_req, + .phy_ctl_req = pm8001_chip_phy_ctl_req, + .task_abort = pm8001_chip_abort_task, + .ssp_tm_req = pm8001_chip_ssp_tm_req, + .get_nvmd_req = pm8001_chip_get_nvmd_req, + .set_nvmd_req = pm8001_chip_set_nvmd_req, + .fw_flash_update_req = pm8001_chip_fw_flash_update_req, + .set_dev_state_req = pm8001_chip_set_dev_state_req, + .sas_re_init_req = pm8001_chip_sas_re_initialization, + .fatal_errors = pm80xx_fatal_errors, +}; diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h new file mode 100644 index 0000000000..fc2127dcb5 --- /dev/null +++ b/drivers/scsi/pm8001/pm8001_hwi.h @@ -0,0 +1,1030 @@ +/* + * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 USI Co., Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ +#ifndef _PMC8001_REG_H_ +#define _PMC8001_REG_H_ + +#include <linux/types.h> +#include <scsi/libsas.h> + + +/* for Request Opcode of IOMB */ +#define OPC_INB_ECHO 1 /* 0x000 */ +#define OPC_INB_PHYSTART 4 /* 0x004 */ +#define OPC_INB_PHYSTOP 5 /* 0x005 */ +#define OPC_INB_SSPINIIOSTART 6 /* 0x006 */ +#define OPC_INB_SSPINITMSTART 7 /* 0x007 */ +#define OPC_INB_SSPINIEXTIOSTART 8 /* 0x008 */ +#define OPC_INB_DEV_HANDLE_ACCEPT 9 /* 0x009 */ +#define OPC_INB_SSPTGTIOSTART 10 /* 0x00A */ +#define OPC_INB_SSPTGTRSPSTART 11 /* 0x00B */ +#define OPC_INB_SSPINIEDCIOSTART 12 /* 0x00C */ +#define OPC_INB_SSPINIEXTEDCIOSTART 13 /* 0x00D */ +#define OPC_INB_SSPTGTEDCIOSTART 14 /* 0x00E */ +#define OPC_INB_SSP_ABORT 15 /* 0x00F */ +#define OPC_INB_DEREG_DEV_HANDLE 16 /* 0x010 */ +#define OPC_INB_GET_DEV_HANDLE 17 /* 0x011 */ +#define OPC_INB_SMP_REQUEST 18 /* 0x012 */ +/* SMP_RESPONSE is removed */ +#define OPC_INB_SMP_RESPONSE 19 /* 0x013 */ +#define OPC_INB_SMP_ABORT 20 /* 0x014 */ +#define OPC_INB_REG_DEV 22 /* 0x016 */ +#define OPC_INB_SATA_HOST_OPSTART 23 /* 0x017 */ +#define OPC_INB_SATA_ABORT 24 /* 0x018 */ +#define OPC_INB_LOCAL_PHY_CONTROL 25 /* 0x019 */ +#define OPC_INB_GET_DEV_INFO 26 /* 0x01A */ +#define OPC_INB_FW_FLASH_UPDATE 32 /* 0x020 */ +#define OPC_INB_GPIO 34 /* 0x022 */ +#define OPC_INB_SAS_DIAG_MODE_START_END 35 /* 0x023 */ +#define OPC_INB_SAS_DIAG_EXECUTE 36 /* 0x024 */ +#define OPC_INB_SAS_HW_EVENT_ACK 37 /* 0x025 */ +#define OPC_INB_GET_TIME_STAMP 38 /* 0x026 */ +#define OPC_INB_PORT_CONTROL 39 /* 0x027 */ +#define OPC_INB_GET_NVMD_DATA 40 /* 0x028 */ +#define OPC_INB_SET_NVMD_DATA 41 /* 0x029 */ +#define OPC_INB_SET_DEVICE_STATE 42 /* 0x02A */ +#define OPC_INB_GET_DEVICE_STATE 43 /* 0x02B */ +#define OPC_INB_SET_DEV_INFO 44 /* 0x02C */ +#define OPC_INB_SAS_RE_INITIALIZE 45 /* 0x02D */ + +/* for Response Opcode of IOMB */ +#define OPC_OUB_ECHO 1 /* 0x001 */ +#define OPC_OUB_HW_EVENT 4 /* 0x004 */ +#define OPC_OUB_SSP_COMP 5 /* 0x005 */ +#define OPC_OUB_SMP_COMP 6 /* 0x006 */ +#define OPC_OUB_LOCAL_PHY_CNTRL 7 /* 0x007 */ +#define OPC_OUB_DEV_REGIST 10 /* 0x00A */ +#define OPC_OUB_DEREG_DEV 11 /* 0x00B */ +#define OPC_OUB_GET_DEV_HANDLE 12 /* 0x00C */ +#define OPC_OUB_SATA_COMP 13 /* 0x00D */ +#define OPC_OUB_SATA_EVENT 14 /* 0x00E */ +#define OPC_OUB_SSP_EVENT 15 /* 0x00F */ +#define OPC_OUB_DEV_HANDLE_ARRIV 16 /* 0x010 */ +/* SMP_RECEIVED Notification is removed */ +#define OPC_OUB_SMP_RECV_EVENT 17 /* 0x011 */ +#define OPC_OUB_SSP_RECV_EVENT 18 /* 0x012 */ +#define OPC_OUB_DEV_INFO 19 /* 0x013 */ +#define OPC_OUB_FW_FLASH_UPDATE 20 /* 0x014 */ +#define OPC_OUB_GPIO_RESPONSE 22 /* 0x016 */ +#define OPC_OUB_GPIO_EVENT 23 /* 0x017 */ +#define OPC_OUB_GENERAL_EVENT 24 /* 0x018 */ +#define OPC_OUB_SSP_ABORT_RSP 26 /* 0x01A */ +#define OPC_OUB_SATA_ABORT_RSP 27 /* 0x01B */ +#define OPC_OUB_SAS_DIAG_MODE_START_END 28 /* 0x01C */ +#define OPC_OUB_SAS_DIAG_EXECUTE 29 /* 0x01D */ +#define OPC_OUB_GET_TIME_STAMP 30 /* 0x01E */ +#define OPC_OUB_SAS_HW_EVENT_ACK 31 /* 0x01F */ +#define OPC_OUB_PORT_CONTROL 32 /* 0x020 */ +#define OPC_OUB_SKIP_ENTRY 33 /* 0x021 */ +#define OPC_OUB_SMP_ABORT_RSP 34 /* 0x022 */ +#define OPC_OUB_GET_NVMD_DATA 35 /* 0x023 */ +#define OPC_OUB_SET_NVMD_DATA 36 /* 0x024 */ +#define OPC_OUB_DEVICE_HANDLE_REMOVAL 37 /* 0x025 */ +#define OPC_OUB_SET_DEVICE_STATE 38 /* 0x026 */ +#define OPC_OUB_GET_DEVICE_STATE 39 /* 0x027 */ +#define OPC_OUB_SET_DEV_INFO 40 /* 0x028 */ +#define OPC_OUB_SAS_RE_INITIALIZE 41 /* 0x029 */ + +/* for phy start*/ +#define SPINHOLD_DISABLE (0x00 << 14) +#define SPINHOLD_ENABLE (0x01 << 14) +#define LINKMODE_SAS (0x01 << 12) +#define LINKMODE_DSATA (0x02 << 12) +#define LINKMODE_AUTO (0x03 << 12) +#define LINKRATE_15 (0x01 << 8) +#define LINKRATE_30 (0x02 << 8) +#define LINKRATE_60 (0x04 << 8) + +/* for new SPC controllers MEMBASE III is shared between BIOS and DATA */ +#define GSM_SM_BASE 0x4F0000 +struct mpi_msg_hdr{ + __le32 header; /* Bits [11:0] - Message operation code */ + /* Bits [15:12] - Message Category */ + /* Bits [21:16] - Outboundqueue ID for the + operation completion message */ + /* Bits [23:22] - Reserved */ + /* Bits [28:24] - Buffer Count, indicates how + many buffer are allocated for the massage */ + /* Bits [30:29] - Reserved */ + /* Bits [31] - Message Valid bit */ +} __attribute__((packed, aligned(4))); + + +/* + * brief the data structure of PHY Start Command + * use to describe enable the phy (64 bytes) + */ +struct phy_start_req { + __le32 tag; + __le32 ase_sh_lm_slr_phyid; + struct sas_identify_frame sas_identify; + u32 reserved[5]; +} __attribute__((packed, aligned(4))); + + +/* + * brief the data structure of PHY Start Command + * use to disable the phy (64 bytes) + */ +struct phy_stop_req { + __le32 tag; + __le32 phy_id; + u32 reserved[13]; +} __attribute__((packed, aligned(4))); + + +/* set device bits fis - device to host */ +struct set_dev_bits_fis { + u8 fis_type; /* 0xA1*/ + u8 n_i_pmport; + /* b7 : n Bit. Notification bit. If set device needs attention. */ + /* b6 : i Bit. Interrupt Bit */ + /* b5-b4: reserved2 */ + /* b3-b0: PM Port */ + u8 status; + u8 error; + u32 _r_a; +} __attribute__ ((packed)); +/* PIO setup FIS - device to host */ +struct pio_setup_fis { + u8 fis_type; /* 0x5f */ + u8 i_d_pmPort; + /* b7 : reserved */ + /* b6 : i bit. Interrupt bit */ + /* b5 : d bit. data transfer direction. set to 1 for device to host + xfer */ + /* b4 : reserved */ + /* b3-b0: PM Port */ + u8 status; + u8 error; + u8 lbal; + u8 lbam; + u8 lbah; + u8 device; + u8 lbal_exp; + u8 lbam_exp; + u8 lbah_exp; + u8 _r_a; + u8 sector_count; + u8 sector_count_exp; + u8 _r_b; + u8 e_status; + u8 _r_c[2]; + u8 transfer_count; +} __attribute__ ((packed)); + +/* + * brief the data structure of SATA Completion Response + * use to describe the sata task response (64 bytes) + */ +struct sata_completion_resp { + __le32 tag; + __le32 status; + __le32 param; + u32 sata_resp[12]; +} __attribute__((packed, aligned(4))); + + +/* + * brief the data structure of SAS HW Event Notification + * use to alert the host about the hardware event(64 bytes) + */ +struct hw_event_resp { + __le32 lr_evt_status_phyid_portid; + __le32 evt_param; + __le32 npip_portstate; + struct sas_identify_frame sas_identify; + struct dev_to_host_fis sata_fis; +} __attribute__((packed, aligned(4))); + + +/* + * brief the data structure of REGISTER DEVICE Command + * use to describe MPI REGISTER DEVICE Command (64 bytes) + */ + +struct reg_dev_req { + __le32 tag; + __le32 phyid_portid; + __le32 dtype_dlr_retry; + __le32 firstburstsize_ITNexustimeout; + u8 sas_addr[SAS_ADDR_SIZE]; + __le32 upper_device_id; + u32 reserved[8]; +} __attribute__((packed, aligned(4))); + + +/* + * brief the data structure of DEREGISTER DEVICE Command + * use to request spc to remove all internal resources associated + * with the device id (64 bytes) + */ + +struct dereg_dev_req { + __le32 tag; + __le32 device_id; + u32 reserved[13]; +} __attribute__((packed, aligned(4))); + + +/* + * brief the data structure of DEVICE_REGISTRATION Response + * use to notify the completion of the device registration (64 bytes) + */ + +struct dev_reg_resp { + __le32 tag; + __le32 status; + __le32 device_id; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + + +/* + * brief the data structure of Local PHY Control Command + * use to issue PHY CONTROL to local phy (64 bytes) + */ +struct local_phy_ctl_req { + __le32 tag; + __le32 phyop_phyid; + u32 reserved1[13]; +} __attribute__((packed, aligned(4))); + + +/** + * brief the data structure of Local Phy Control Response + * use to describe MPI Local Phy Control Response (64 bytes) + */ +struct local_phy_ctl_resp { + __le32 tag; + __le32 phyop_phyid; + __le32 status; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + + +#define OP_BITS 0x0000FF00 +#define ID_BITS 0x000000FF + +/* + * brief the data structure of PORT Control Command + * use to control port properties (64 bytes) + */ + +struct port_ctl_req { + __le32 tag; + __le32 portop_portid; + __le32 param0; + __le32 param1; + u32 reserved1[11]; +} __attribute__((packed, aligned(4))); + + +/* + * brief the data structure of HW Event Ack Command + * use to acknowledge receive HW event (64 bytes) + */ + +struct hw_event_ack_req { + __le32 tag; + __le32 sea_phyid_portid; + __le32 param0; + __le32 param1; + u32 reserved1[11]; +} __attribute__((packed, aligned(4))); + + +/* + * brief the data structure of SSP Completion Response + * use to indicate a SSP Completion (n bytes) + */ +struct ssp_completion_resp { + __le32 tag; + __le32 status; + __le32 param; + __le32 ssptag_rescv_rescpad; + struct ssp_response_iu ssp_resp_iu; + __le32 residual_count; +} __attribute__((packed, aligned(4))); + + +#define SSP_RESCV_BIT 0x00010000 + +/* + * brief the data structure of SATA EVNET esponse + * use to indicate a SATA Completion (64 bytes) + */ + +struct sata_event_resp { + __le32 tag; + __le32 event; + __le32 port_id; + __le32 device_id; + u32 reserved[11]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of SSP EVNET esponse + * use to indicate a SSP Completion (64 bytes) + */ + +struct ssp_event_resp { + __le32 tag; + __le32 event; + __le32 port_id; + __le32 device_id; + u32 reserved[11]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of General Event Notification Response + * use to describe MPI General Event Notification Response (64 bytes) + */ +struct general_event_resp { + __le32 status; + __le32 inb_IOMB_payload[14]; +} __attribute__((packed, aligned(4))); + + +#define GENERAL_EVENT_PAYLOAD 14 +#define OPCODE_BITS 0x00000fff + +/* + * brief the data structure of SMP Request Command + * use to describe MPI SMP REQUEST Command (64 bytes) + */ +struct smp_req { + __le32 tag; + __le32 device_id; + __le32 len_ip_ir; + /* Bits [0] - Indirect response */ + /* Bits [1] - Indirect Payload */ + /* Bits [15:2] - Reserved */ + /* Bits [23:16] - direct payload Len */ + /* Bits [31:24] - Reserved */ + u8 smp_req16[16]; + union { + u8 smp_req[32]; + struct { + __le64 long_req_addr;/* sg dma address, LE */ + __le32 long_req_size;/* LE */ + u32 _r_a; + __le64 long_resp_addr;/* sg dma address, LE */ + __le32 long_resp_size;/* LE */ + u32 _r_b; + } long_smp_req;/* sequencer extension */ + }; +} __attribute__((packed, aligned(4))); +/* + * brief the data structure of SMP Completion Response + * use to describe MPI SMP Completion Response (64 bytes) + */ +struct smp_completion_resp { + __le32 tag; + __le32 status; + __le32 param; + __le32 _r_a[12]; +} __attribute__((packed, aligned(4))); + +/* + *brief the data structure of SSP SMP SATA Abort Command + * use to describe MPI SSP SMP & SATA Abort Command (64 bytes) + */ +struct task_abort_req { + __le32 tag; + __le32 device_id; + __le32 tag_to_abort; + __le32 abort_all; + u32 reserved[11]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SSP SATA SMP Abort Response + * use to describe SSP SMP & SATA Abort Response ( 64 bytes) + */ +struct task_abort_resp { + __le32 tag; + __le32 status; + __le32 scp; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + + +/** + * brief the data structure of SAS Diagnostic Start/End Command + * use to describe MPI SAS Diagnostic Start/End Command (64 bytes) + */ +struct sas_diag_start_end_req { + __le32 tag; + __le32 operation_phyid; + u32 reserved[13]; +} __attribute__((packed, aligned(4))); + + +/** + * brief the data structure of SAS Diagnostic Execute Command + * use to describe MPI SAS Diagnostic Execute Command (64 bytes) + */ +struct sas_diag_execute_req{ + __le32 tag; + __le32 cmdtype_cmddesc_phyid; + __le32 pat1_pat2; + __le32 threshold; + __le32 codepat_errmsk; + __le32 pmon; + __le32 pERF1CTL; + u32 reserved[8]; +} __attribute__((packed, aligned(4))); + + +#define SAS_DIAG_PARAM_BYTES 24 + +/* + * brief the data structure of Set Device State Command + * use to describe MPI Set Device State Command (64 bytes) + */ +struct set_dev_state_req { + __le32 tag; + __le32 device_id; + __le32 nds; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of sas_re_initialization + */ +struct sas_re_initialization_req { + + __le32 tag; + __le32 SSAHOLT;/* bit29-set max port; + ** bit28-set open reject cmd retries. + ** bit27-set open reject data retries. + ** bit26-set open reject option, remap:1 or not:0. + ** bit25-set sata head of line time out. + */ + __le32 reserved_maxPorts; + __le32 open_reject_cmdretries_data_retries;/* cmd retries: 31-bit16; + * data retries: bit15-bit0. + */ + __le32 sata_hol_tmo; + u32 reserved1[10]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of SATA Start Command + * use to describe MPI SATA IO Start Command (64 bytes) + */ + +struct sata_start_req { + __le32 tag; + __le32 device_id; + __le32 data_len; + __le32 retfis_ncqtag_atap_dir_m; + struct host_to_dev_fis sata_fis; + u32 reserved1; + u32 reserved2; + u32 addr_low; + u32 addr_high; + __le32 len; + __le32 esgl; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SSP INI TM Start Command + * use to describe MPI SSP INI TM Start Command (64 bytes) + */ +struct ssp_ini_tm_start_req { + __le32 tag; + __le32 device_id; + __le32 relate_tag; + __le32 tmf; + u8 lun[8]; + __le32 ds_ads_m; + u32 reserved[8]; +} __attribute__((packed, aligned(4))); + + +struct ssp_info_unit { + u8 lun[8];/* SCSI Logical Unit Number */ + u8 reserved1;/* reserved */ + u8 efb_prio_attr; + /* B7 : enabledFirstBurst */ + /* B6-3 : taskPriority */ + /* B2-0 : taskAttribute */ + u8 reserved2; /* reserved */ + u8 additional_cdb_len; + /* B7-2 : additional_cdb_len */ + /* B1-0 : reserved */ + u8 cdb[16];/* The SCSI CDB up to 16 bytes length */ +} __attribute__((packed, aligned(4))); + + +/** + * brief the data structure of SSP INI IO Start Command + * use to describe MPI SSP INI IO Start Command (64 bytes) + */ +struct ssp_ini_io_start_req { + __le32 tag; + __le32 device_id; + __le32 data_len; + __le32 dir_m_tlr; + struct ssp_info_unit ssp_iu; + __le32 addr_low; + __le32 addr_high; + __le32 len; + __le32 esgl; +} __attribute__((packed, aligned(4))); + + +/** + * brief the data structure of Firmware download + * use to describe MPI FW DOWNLOAD Command (64 bytes) + */ +struct fw_flash_Update_req { + __le32 tag; + __le32 cur_image_offset; + __le32 cur_image_len; + __le32 total_image_len; + u32 reserved0[7]; + __le32 sgl_addr_lo; + __le32 sgl_addr_hi; + __le32 len; + __le32 ext_reserved; +} __attribute__((packed, aligned(4))); + + +#define FWFLASH_IOMB_RESERVED_LEN 0x07 +/** + * brief the data structure of FW_FLASH_UPDATE Response + * use to describe MPI FW_FLASH_UPDATE Response (64 bytes) + * + */ +struct fw_flash_Update_resp { + __le32 tag; + __le32 status; + u32 reserved[13]; +} __attribute__((packed, aligned(4))); + + +/** + * brief the data structure of Get NVM Data Command + * use to get data from NVM in HBA(64 bytes) + */ +struct get_nvm_data_req { + __le32 tag; + __le32 len_ir_vpdd; + __le32 vpd_offset; + u32 reserved[8]; + __le32 resp_addr_lo; + __le32 resp_addr_hi; + __le32 resp_len; + u32 reserved1; +} __attribute__((packed, aligned(4))); + + +struct set_nvm_data_req { + __le32 tag; + __le32 len_ir_vpdd; + __le32 vpd_offset; + __le32 reserved[8]; + __le32 resp_addr_lo; + __le32 resp_addr_hi; + __le32 resp_len; + u32 reserved1; +} __attribute__((packed, aligned(4))); + + +#define TWI_DEVICE 0x0 +#define C_SEEPROM 0x1 +#define VPD_FLASH 0x4 +#define AAP1_RDUMP 0x5 +#define IOP_RDUMP 0x6 +#define EXPAN_ROM 0x7 + +#define IPMode 0x80000000 +#define NVMD_TYPE 0x0000000F +#define NVMD_STAT 0x0000FFFF +#define NVMD_LEN 0xFF000000 +/** + * brief the data structure of Get NVMD Data Response + * use to describe MPI Get NVMD Data Response (64 bytes) + */ +struct get_nvm_data_resp { + __le32 tag; + __le32 ir_tda_bn_dps_das_nvm; + __le32 dlen_status; + __le32 nvm_data[12]; +} __attribute__((packed, aligned(4))); + + +/** + * brief the data structure of SAS Diagnostic Start/End Response + * use to describe MPI SAS Diagnostic Start/End Response (64 bytes) + * + */ +struct sas_diag_start_end_resp { + __le32 tag; + __le32 status; + u32 reserved[13]; +} __attribute__((packed, aligned(4))); + + +/** + * brief the data structure of SAS Diagnostic Execute Response + * use to describe MPI SAS Diagnostic Execute Response (64 bytes) + * + */ +struct sas_diag_execute_resp { + __le32 tag; + __le32 cmdtype_cmddesc_phyid; + __le32 Status; + __le32 ReportData; + u32 reserved[11]; +} __attribute__((packed, aligned(4))); + + +/** + * brief the data structure of Set Device State Response + * use to describe MPI Set Device State Response (64 bytes) + * + */ +struct set_dev_state_resp { + __le32 tag; + __le32 status; + __le32 device_id; + __le32 pds_nds; + u32 reserved[11]; +} __attribute__((packed, aligned(4))); + + +#define NDS_BITS 0x0F +#define PDS_BITS 0xF0 + +/* + * HW Events type + */ + +#define HW_EVENT_RESET_START 0x01 +#define HW_EVENT_CHIP_RESET_COMPLETE 0x02 +#define HW_EVENT_PHY_STOP_STATUS 0x03 +#define HW_EVENT_SAS_PHY_UP 0x04 +#define HW_EVENT_SATA_PHY_UP 0x05 +#define HW_EVENT_SATA_SPINUP_HOLD 0x06 +#define HW_EVENT_PHY_DOWN 0x07 +#define HW_EVENT_PORT_INVALID 0x08 +#define HW_EVENT_BROADCAST_CHANGE 0x09 +#define HW_EVENT_PHY_ERROR 0x0A +#define HW_EVENT_BROADCAST_SES 0x0B +#define HW_EVENT_INBOUND_CRC_ERROR 0x0C +#define HW_EVENT_HARD_RESET_RECEIVED 0x0D +#define HW_EVENT_MALFUNCTION 0x0E +#define HW_EVENT_ID_FRAME_TIMEOUT 0x0F +#define HW_EVENT_BROADCAST_EXP 0x10 +#define HW_EVENT_PHY_START_STATUS 0x11 +#define HW_EVENT_LINK_ERR_INVALID_DWORD 0x12 +#define HW_EVENT_LINK_ERR_DISPARITY_ERROR 0x13 +#define HW_EVENT_LINK_ERR_CODE_VIOLATION 0x14 +#define HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH 0x15 +#define HW_EVENT_LINK_ERR_PHY_RESET_FAILED 0x16 +#define HW_EVENT_PORT_RECOVERY_TIMER_TMO 0x17 +#define HW_EVENT_PORT_RECOVER 0x18 +#define HW_EVENT_PORT_RESET_TIMER_TMO 0x19 +#define HW_EVENT_PORT_RESET_COMPLETE 0x20 +#define EVENT_BROADCAST_ASYNCH_EVENT 0x21 + +/* port state */ +#define PORT_NOT_ESTABLISHED 0x00 +#define PORT_VALID 0x01 +#define PORT_LOSTCOMM 0x02 +#define PORT_IN_RESET 0x04 +#define PORT_INVALID 0x08 + +/* + * SSP/SMP/SATA IO Completion Status values + */ + +#define IO_SUCCESS 0x00 +#define IO_ABORTED 0x01 +#define IO_OVERFLOW 0x02 +#define IO_UNDERFLOW 0x03 +#define IO_FAILED 0x04 +#define IO_ABORT_RESET 0x05 +#define IO_NOT_VALID 0x06 +#define IO_NO_DEVICE 0x07 +#define IO_ILLEGAL_PARAMETER 0x08 +#define IO_LINK_FAILURE 0x09 +#define IO_PROG_ERROR 0x0A +#define IO_EDC_IN_ERROR 0x0B +#define IO_EDC_OUT_ERROR 0x0C +#define IO_ERROR_HW_TIMEOUT 0x0D +#define IO_XFER_ERROR_BREAK 0x0E +#define IO_XFER_ERROR_PHY_NOT_READY 0x0F +#define IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED 0x10 +#define IO_OPEN_CNX_ERROR_ZONE_VIOLATION 0x11 +#define IO_OPEN_CNX_ERROR_BREAK 0x12 +#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS 0x13 +#define IO_OPEN_CNX_ERROR_BAD_DESTINATION 0x14 +#define IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED 0x15 +#define IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY 0x16 +#define IO_OPEN_CNX_ERROR_WRONG_DESTINATION 0x17 +#define IO_OPEN_CNX_ERROR_UNKNOWN_ERROR 0x18 +#define IO_XFER_ERROR_NAK_RECEIVED 0x19 +#define IO_XFER_ERROR_ACK_NAK_TIMEOUT 0x1A +#define IO_XFER_ERROR_PEER_ABORTED 0x1B +#define IO_XFER_ERROR_RX_FRAME 0x1C +#define IO_XFER_ERROR_DMA 0x1D +#define IO_XFER_ERROR_CREDIT_TIMEOUT 0x1E +#define IO_XFER_ERROR_SATA_LINK_TIMEOUT 0x1F +#define IO_XFER_ERROR_SATA 0x20 +#define IO_XFER_ERROR_ABORTED_DUE_TO_SRST 0x22 +#define IO_XFER_ERROR_REJECTED_NCQ_MODE 0x21 +#define IO_XFER_ERROR_ABORTED_NCQ_MODE 0x23 +#define IO_XFER_OPEN_RETRY_TIMEOUT 0x24 +#define IO_XFER_SMP_RESP_CONNECTION_ERROR 0x25 +#define IO_XFER_ERROR_UNEXPECTED_PHASE 0x26 +#define IO_XFER_ERROR_XFER_RDY_OVERRUN 0x27 +#define IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED 0x28 + +#define IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT 0x30 +#define IO_XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NAK 0x31 +#define IO_XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK 0x32 + +#define IO_XFER_ERROR_OFFSET_MISMATCH 0x34 +#define IO_XFER_ERROR_XFER_ZERO_DATA_LEN 0x35 +#define IO_XFER_CMD_FRAME_ISSUED 0x36 +#define IO_ERROR_INTERNAL_SMP_RESOURCE 0x37 +#define IO_PORT_IN_RESET 0x38 +#define IO_DS_NON_OPERATIONAL 0x39 +#define IO_DS_IN_RECOVERY 0x3A +#define IO_TM_TAG_NOT_FOUND 0x3B +#define IO_XFER_PIO_SETUP_ERROR 0x3C +#define IO_SSP_EXT_IU_ZERO_LEN_ERROR 0x3D +#define IO_DS_IN_ERROR 0x3E +#define IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY 0x3F +#define IO_ABORT_IN_PROGRESS 0x40 +#define IO_ABORT_DELAYED 0x41 +#define IO_INVALID_LENGTH 0x42 +#define IO_FATAL_ERROR 0x51 + +/* WARNING: This error code must always be the last number. + * If you add error code, modify this code also + * It is used as an index + */ +#define IO_ERROR_UNKNOWN_GENERIC 0x43 + +/* MSGU CONFIGURATION TABLE*/ + +#define SPC_MSGU_CFG_TABLE_UPDATE 0x01/* Inbound doorbell bit0 */ +#define SPC_MSGU_CFG_TABLE_RESET 0x02/* Inbound doorbell bit1 */ +#define SPC_MSGU_CFG_TABLE_FREEZE 0x04/* Inbound doorbell bit2 */ +#define SPC_MSGU_CFG_TABLE_UNFREEZE 0x08/* Inbound doorbell bit4 */ +#define MSGU_IBDB_SET 0x04 +#define MSGU_HOST_INT_STATUS 0x08 +#define MSGU_HOST_INT_MASK 0x0C +#define MSGU_IOPIB_INT_STATUS 0x18 +#define MSGU_IOPIB_INT_MASK 0x1C +#define MSGU_IBDB_CLEAR 0x20/* RevB - Host not use */ +#define MSGU_MSGU_CONTROL 0x24 +#define MSGU_ODR 0x3C/* RevB */ +#define MSGU_ODCR 0x40/* RevB */ +#define MSGU_SCRATCH_PAD_0 0x44 +#define MSGU_SCRATCH_PAD_1 0x48 +#define MSGU_SCRATCH_PAD_2 0x4C +#define MSGU_SCRATCH_PAD_3 0x50 +#define MSGU_HOST_SCRATCH_PAD_0 0x54 +#define MSGU_HOST_SCRATCH_PAD_1 0x58 +#define MSGU_HOST_SCRATCH_PAD_2 0x5C +#define MSGU_HOST_SCRATCH_PAD_3 0x60 +#define MSGU_HOST_SCRATCH_PAD_4 0x64 +#define MSGU_HOST_SCRATCH_PAD_5 0x68 +#define MSGU_HOST_SCRATCH_PAD_6 0x6C +#define MSGU_HOST_SCRATCH_PAD_7 0x70 +#define MSGU_ODMR 0x74/* RevB */ + +/* bit definition for ODMR register */ +#define ODMR_MASK_ALL 0xFFFFFFFF/* mask all + interrupt vector */ +#define ODMR_CLEAR_ALL 0/* clear all + interrupt vector */ +/* bit definition for ODCR register */ +#define ODCR_CLEAR_ALL 0xFFFFFFFF /* mask all + interrupt vector*/ +/* MSIX Interupts */ +#define MSIX_TABLE_OFFSET 0x2000 +#define MSIX_TABLE_ELEMENT_SIZE 0x10 +#define MSIX_INTERRUPT_CONTROL_OFFSET 0xC +#define MSIX_TABLE_BASE (MSIX_TABLE_OFFSET + MSIX_INTERRUPT_CONTROL_OFFSET) +#define MSIX_INTERRUPT_DISABLE 0x1 +#define MSIX_INTERRUPT_ENABLE 0x0 + + +/* state definition for Scratch Pad1 register */ +#define SCRATCH_PAD1_POR 0x00 /* power on reset state */ +#define SCRATCH_PAD1_SFR 0x01 /* soft reset state */ +#define SCRATCH_PAD1_ERR 0x02 /* error state */ +#define SCRATCH_PAD1_RDY 0x03 /* ready state */ +#define SCRATCH_PAD1_RST 0x04 /* soft reset toggle flag */ +#define SCRATCH_PAD1_AAP1RDY_RST 0x08 /* AAP1 ready for soft reset */ +#define SCRATCH_PAD1_STATE_MASK 0xFFFFFFF0 /* ScratchPad1 + Mask, bit1-0 State, bit2 Soft Reset, bit3 FW RDY for Soft Reset */ +#define SCRATCH_PAD1_RESERVED 0x000003F8 /* Scratch Pad1 + Reserved bit 3 to 9 */ + + /* state definition for Scratch Pad2 register */ +#define SCRATCH_PAD2_POR 0x00 /* power on state */ +#define SCRATCH_PAD2_SFR 0x01 /* soft reset state */ +#define SCRATCH_PAD2_ERR 0x02 /* error state */ +#define SCRATCH_PAD2_RDY 0x03 /* ready state */ +#define SCRATCH_PAD2_FWRDY_RST 0x04 /* FW ready for soft reset flag*/ +#define SCRATCH_PAD2_IOPRDY_RST 0x08 /* IOP ready for soft reset */ +#define SCRATCH_PAD2_STATE_MASK 0xFFFFFFF4 /* ScratchPad 2 + Mask, bit1-0 State */ +#define SCRATCH_PAD2_RESERVED 0x000003FC /* Scratch Pad1 + Reserved bit 2 to 9 */ + +#define SCRATCH_PAD_ERROR_MASK 0xFFFFFC00 /* Error mask bits */ +#define SCRATCH_PAD_STATE_MASK 0x00000003 /* State Mask bits */ + +/* main configuration offset - byte offset */ +#define MAIN_SIGNATURE_OFFSET 0x00/* DWORD 0x00 */ +#define MAIN_INTERFACE_REVISION 0x04/* DWORD 0x01 */ +#define MAIN_FW_REVISION 0x08/* DWORD 0x02 */ +#define MAIN_MAX_OUTSTANDING_IO_OFFSET 0x0C/* DWORD 0x03 */ +#define MAIN_MAX_SGL_OFFSET 0x10/* DWORD 0x04 */ +#define MAIN_CNTRL_CAP_OFFSET 0x14/* DWORD 0x05 */ +#define MAIN_GST_OFFSET 0x18/* DWORD 0x06 */ +#define MAIN_IBQ_OFFSET 0x1C/* DWORD 0x07 */ +#define MAIN_OBQ_OFFSET 0x20/* DWORD 0x08 */ +#define MAIN_IQNPPD_HPPD_OFFSET 0x24/* DWORD 0x09 */ +#define MAIN_OB_HW_EVENT_PID03_OFFSET 0x28/* DWORD 0x0A */ +#define MAIN_OB_HW_EVENT_PID47_OFFSET 0x2C/* DWORD 0x0B */ +#define MAIN_OB_NCQ_EVENT_PID03_OFFSET 0x30/* DWORD 0x0C */ +#define MAIN_OB_NCQ_EVENT_PID47_OFFSET 0x34/* DWORD 0x0D */ +#define MAIN_TITNX_EVENT_PID03_OFFSET 0x38/* DWORD 0x0E */ +#define MAIN_TITNX_EVENT_PID47_OFFSET 0x3C/* DWORD 0x0F */ +#define MAIN_OB_SSP_EVENT_PID03_OFFSET 0x40/* DWORD 0x10 */ +#define MAIN_OB_SSP_EVENT_PID47_OFFSET 0x44/* DWORD 0x11 */ +#define MAIN_OB_SMP_EVENT_PID03_OFFSET 0x48/* DWORD 0x12 */ +#define MAIN_OB_SMP_EVENT_PID47_OFFSET 0x4C/* DWORD 0x13 */ +#define MAIN_EVENT_LOG_ADDR_HI 0x50/* DWORD 0x14 */ +#define MAIN_EVENT_LOG_ADDR_LO 0x54/* DWORD 0x15 */ +#define MAIN_EVENT_LOG_BUFF_SIZE 0x58/* DWORD 0x16 */ +#define MAIN_EVENT_LOG_OPTION 0x5C/* DWORD 0x17 */ +#define MAIN_IOP_EVENT_LOG_ADDR_HI 0x60/* DWORD 0x18 */ +#define MAIN_IOP_EVENT_LOG_ADDR_LO 0x64/* DWORD 0x19 */ +#define MAIN_IOP_EVENT_LOG_BUFF_SIZE 0x68/* DWORD 0x1A */ +#define MAIN_IOP_EVENT_LOG_OPTION 0x6C/* DWORD 0x1B */ +#define MAIN_FATAL_ERROR_INTERRUPT 0x70/* DWORD 0x1C */ +#define MAIN_FATAL_ERROR_RDUMP0_OFFSET 0x74/* DWORD 0x1D */ +#define MAIN_FATAL_ERROR_RDUMP0_LENGTH 0x78/* DWORD 0x1E */ +#define MAIN_FATAL_ERROR_RDUMP1_OFFSET 0x7C/* DWORD 0x1F */ +#define MAIN_FATAL_ERROR_RDUMP1_LENGTH 0x80/* DWORD 0x20 */ +#define MAIN_HDA_FLAGS_OFFSET 0x84/* DWORD 0x21 */ +#define MAIN_ANALOG_SETUP_OFFSET 0x88/* DWORD 0x22 */ + +/* Gereral Status Table offset - byte offset */ +#define GST_GSTLEN_MPIS_OFFSET 0x00 +#define GST_IQ_FREEZE_STATE0_OFFSET 0x04 +#define GST_IQ_FREEZE_STATE1_OFFSET 0x08 +#define GST_MSGUTCNT_OFFSET 0x0C +#define GST_IOPTCNT_OFFSET 0x10 +#define GST_PHYSTATE_OFFSET 0x18 +#define GST_PHYSTATE0_OFFSET 0x18 +#define GST_PHYSTATE1_OFFSET 0x1C +#define GST_PHYSTATE2_OFFSET 0x20 +#define GST_PHYSTATE3_OFFSET 0x24 +#define GST_PHYSTATE4_OFFSET 0x28 +#define GST_PHYSTATE5_OFFSET 0x2C +#define GST_PHYSTATE6_OFFSET 0x30 +#define GST_PHYSTATE7_OFFSET 0x34 +#define GST_RERRINFO_OFFSET 0x44 + +/* General Status Table - MPI state */ +#define GST_MPI_STATE_UNINIT 0x00 +#define GST_MPI_STATE_INIT 0x01 +#define GST_MPI_STATE_TERMINATION 0x02 +#define GST_MPI_STATE_ERROR 0x03 +#define GST_MPI_STATE_MASK 0x07 + +#define MBIC_NMI_ENABLE_VPE0_IOP 0x000418 +#define MBIC_NMI_ENABLE_VPE0_AAP1 0x000418 +/* PCIE registers - BAR2(0x18), BAR1(win) 0x010000 */ +#define PCIE_EVENT_INTERRUPT_ENABLE 0x003040 +#define PCIE_EVENT_INTERRUPT 0x003044 +#define PCIE_ERROR_INTERRUPT_ENABLE 0x003048 +#define PCIE_ERROR_INTERRUPT 0x00304C +/* signature definition for host scratch pad0 register */ +#define SPC_SOFT_RESET_SIGNATURE 0x252acbcd +/* Signature for Soft Reset */ + +/* SPC Reset register - BAR4(0x20), BAR2(win) (need dynamic mapping) */ +#define SPC_REG_RESET 0x000000/* reset register */ + +/* bit difination for SPC_RESET register */ +#define SPC_REG_RESET_OSSP 0x00000001 +#define SPC_REG_RESET_RAAE 0x00000002 +#define SPC_REG_RESET_PCS_SPBC 0x00000004 +#define SPC_REG_RESET_PCS_IOP_SS 0x00000008 +#define SPC_REG_RESET_PCS_AAP1_SS 0x00000010 +#define SPC_REG_RESET_PCS_AAP2_SS 0x00000020 +#define SPC_REG_RESET_PCS_LM 0x00000040 +#define SPC_REG_RESET_PCS 0x00000080 +#define SPC_REG_RESET_GSM 0x00000100 +#define SPC_REG_RESET_DDR2 0x00010000 +#define SPC_REG_RESET_BDMA_CORE 0x00020000 +#define SPC_REG_RESET_BDMA_SXCBI 0x00040000 +#define SPC_REG_RESET_PCIE_AL_SXCBI 0x00080000 +#define SPC_REG_RESET_PCIE_PWR 0x00100000 +#define SPC_REG_RESET_PCIE_SFT 0x00200000 +#define SPC_REG_RESET_PCS_SXCBI 0x00400000 +#define SPC_REG_RESET_LMS_SXCBI 0x00800000 +#define SPC_REG_RESET_PMIC_SXCBI 0x01000000 +#define SPC_REG_RESET_PMIC_CORE 0x02000000 +#define SPC_REG_RESET_PCIE_PC_SXCBI 0x04000000 +#define SPC_REG_RESET_DEVICE 0x80000000 + +/* registers for BAR Shifting - BAR2(0x18), BAR1(win) */ +#define SPC_IBW_AXI_TRANSLATION_LOW 0x003258 + +#define MBIC_AAP1_ADDR_BASE 0x060000 +#define MBIC_IOP_ADDR_BASE 0x070000 +#define GSM_ADDR_BASE 0x0700000 +/* Dynamic map through Bar4 - 0x00700000 */ +#define GSM_CONFIG_RESET 0x00000000 +#define RAM_ECC_DB_ERR 0x00000018 +#define GSM_READ_ADDR_PARITY_INDIC 0x00000058 +#define GSM_WRITE_ADDR_PARITY_INDIC 0x00000060 +#define GSM_WRITE_DATA_PARITY_INDIC 0x00000068 +#define GSM_READ_ADDR_PARITY_CHECK 0x00000038 +#define GSM_WRITE_ADDR_PARITY_CHECK 0x00000040 +#define GSM_WRITE_DATA_PARITY_CHECK 0x00000048 + +#define RB6_ACCESS_REG 0x6A0000 +#define HDAC_EXEC_CMD 0x0002 +#define HDA_C_PA 0xcb +#define HDA_SEQ_ID_BITS 0x00ff0000 +#define HDA_GSM_OFFSET_BITS 0x00FFFFFF +#define MBIC_AAP1_ADDR_BASE 0x060000 +#define MBIC_IOP_ADDR_BASE 0x070000 +#define GSM_ADDR_BASE 0x0700000 +#define SPC_TOP_LEVEL_ADDR_BASE 0x000000 +#define GSM_CONFIG_RESET_VALUE 0x00003b00 +#define GPIO_ADDR_BASE 0x00090000 +#define GPIO_GPIO_0_0UTPUT_CTL_OFFSET 0x0000010c + +/* RB6 offset */ +#define SPC_RB6_OFFSET 0x80C0 +/* Magic number of soft reset for RB6 */ +#define RB6_MAGIC_NUMBER_RST 0x1234 + +/* Device Register status */ +#define DEVREG_SUCCESS 0x00 +#define DEVREG_FAILURE_OUT_OF_RESOURCE 0x01 +#define DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED 0x02 +#define DEVREG_FAILURE_INVALID_PHY_ID 0x03 +#define DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED 0x04 +#define DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE 0x05 +#define DEVREG_FAILURE_PORT_NOT_VALID_STATE 0x06 +#define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID 0x07 + +#define GSM_BASE 0x4F0000 +#define SHIFT_REG_64K_MASK 0xffff0000 +#define SHIFT_REG_BIT_SHIFT 8 +#endif + diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c new file mode 100644 index 0000000000..443a3176c6 --- /dev/null +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -0,0 +1,1569 @@ +/* + * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 USI Co., Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ + +#include <linux/slab.h> +#include "pm8001_sas.h" +#include "pm8001_chips.h" +#include "pm80xx_hwi.h" + +static ulong logging_level = PM8001_FAIL_LOGGING | PM8001_IOERR_LOGGING | + PM8001_EVENT_LOGGING | PM8001_INIT_LOGGING; +module_param(logging_level, ulong, 0644); +MODULE_PARM_DESC(logging_level, " bits for enabling logging info."); + +static ulong link_rate = LINKRATE_15 | LINKRATE_30 | LINKRATE_60 | LINKRATE_120; +module_param(link_rate, ulong, 0644); +MODULE_PARM_DESC(link_rate, "Enable link rate.\n" + " 1: Link rate 1.5G\n" + " 2: Link rate 3.0G\n" + " 4: Link rate 6.0G\n" + " 8: Link rate 12.0G\n"); + +static struct scsi_transport_template *pm8001_stt; +static int pm8001_init_ccb_tag(struct pm8001_hba_info *); + +/* + * chip info structure to identify chip key functionality as + * encryption available/not, no of ports, hw specific function ref + */ +static const struct pm8001_chip_info pm8001_chips[] = { + [chip_8001] = {0, 8, &pm8001_8001_dispatch,}, + [chip_8008] = {0, 8, &pm8001_80xx_dispatch,}, + [chip_8009] = {1, 8, &pm8001_80xx_dispatch,}, + [chip_8018] = {0, 16, &pm8001_80xx_dispatch,}, + [chip_8019] = {1, 16, &pm8001_80xx_dispatch,}, + [chip_8074] = {0, 8, &pm8001_80xx_dispatch,}, + [chip_8076] = {0, 16, &pm8001_80xx_dispatch,}, + [chip_8077] = {0, 16, &pm8001_80xx_dispatch,}, + [chip_8006] = {0, 16, &pm8001_80xx_dispatch,}, + [chip_8070] = {0, 8, &pm8001_80xx_dispatch,}, + [chip_8072] = {0, 16, &pm8001_80xx_dispatch,}, +}; +static int pm8001_id; + +LIST_HEAD(hba_list); + +struct workqueue_struct *pm8001_wq; + +static void pm8001_map_queues(struct Scsi_Host *shost) +{ + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; + + if (pm8001_ha->number_of_intr > 1) + blk_mq_pci_map_queues(qmap, pm8001_ha->pdev, 1); + + return blk_mq_map_queues(qmap); +} + +/* + * The main structure which LLDD must register for scsi core. + */ +static const struct scsi_host_template pm8001_sht = { + .module = THIS_MODULE, + .name = DRV_NAME, + .proc_name = DRV_NAME, + .queuecommand = sas_queuecommand, + .dma_need_drain = ata_scsi_dma_need_drain, + .target_alloc = sas_target_alloc, + .slave_configure = sas_slave_configure, + .scan_finished = pm8001_scan_finished, + .scan_start = pm8001_scan_start, + .change_queue_depth = sas_change_queue_depth, + .bios_param = sas_bios_param, + .can_queue = 1, + .this_id = -1, + .sg_tablesize = PM8001_MAX_DMA_SG, + .max_sectors = SCSI_DEFAULT_MAX_SECTORS, + .eh_device_reset_handler = sas_eh_device_reset_handler, + .eh_target_reset_handler = sas_eh_target_reset_handler, + .slave_alloc = sas_slave_alloc, + .target_destroy = sas_target_destroy, + .ioctl = sas_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = sas_ioctl, +#endif + .shost_groups = pm8001_host_groups, + .track_queue_depth = 1, + .cmd_per_lun = 32, + .map_queues = pm8001_map_queues, +}; + +/* + * Sas layer call this function to execute specific task. + */ +static struct sas_domain_function_template pm8001_transport_ops = { + .lldd_dev_found = pm8001_dev_found, + .lldd_dev_gone = pm8001_dev_gone, + + .lldd_execute_task = pm8001_queue_command, + .lldd_control_phy = pm8001_phy_control, + + .lldd_abort_task = pm8001_abort_task, + .lldd_abort_task_set = sas_abort_task_set, + .lldd_clear_task_set = pm8001_clear_task_set, + .lldd_I_T_nexus_reset = pm8001_I_T_nexus_reset, + .lldd_lu_reset = pm8001_lu_reset, + .lldd_query_task = pm8001_query_task, + .lldd_port_formed = pm8001_port_formed, + .lldd_tmf_exec_complete = pm8001_setds_completion, + .lldd_tmf_aborted = pm8001_tmf_aborted, +}; + +/** + * pm8001_phy_init - initiate our adapter phys + * @pm8001_ha: our hba structure. + * @phy_id: phy id. + */ +static void pm8001_phy_init(struct pm8001_hba_info *pm8001_ha, int phy_id) +{ + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + phy->phy_state = PHY_LINK_DISABLE; + phy->pm8001_ha = pm8001_ha; + phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; + phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS; + sas_phy->enabled = (phy_id < pm8001_ha->chip->n_phy) ? 1 : 0; + sas_phy->iproto = SAS_PROTOCOL_ALL; + sas_phy->tproto = 0; + sas_phy->role = PHY_ROLE_INITIATOR; + sas_phy->oob_mode = OOB_NOT_CONNECTED; + sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; + sas_phy->id = phy_id; + sas_phy->sas_addr = (u8 *)&phy->dev_sas_addr; + sas_phy->frame_rcvd = &phy->frame_rcvd[0]; + sas_phy->ha = (struct sas_ha_struct *)pm8001_ha->shost->hostdata; + sas_phy->lldd_phy = phy; +} + +/** + * pm8001_free - free hba + * @pm8001_ha: our hba structure. + */ +static void pm8001_free(struct pm8001_hba_info *pm8001_ha) +{ + int i; + + if (!pm8001_ha) + return; + + for (i = 0; i < USI_MAX_MEMCNT; i++) { + if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) { + dma_free_coherent(&pm8001_ha->pdev->dev, + (pm8001_ha->memoryMap.region[i].total_len + + pm8001_ha->memoryMap.region[i].alignment), + pm8001_ha->memoryMap.region[i].virt_ptr, + pm8001_ha->memoryMap.region[i].phys_addr); + } + } + PM8001_CHIP_DISP->chip_iounmap(pm8001_ha); + flush_workqueue(pm8001_wq); + bitmap_free(pm8001_ha->rsvd_tags); + kfree(pm8001_ha); +} + +#ifdef PM8001_USE_TASKLET + +/** + * pm8001_tasklet() - tasklet for 64 msi-x interrupt handler + * @opaque: the passed general host adapter struct + * Note: pm8001_tasklet is common for pm8001 & pm80xx + */ +static void pm8001_tasklet(unsigned long opaque) +{ + struct pm8001_hba_info *pm8001_ha; + struct isr_param *irq_vector; + + irq_vector = (struct isr_param *)opaque; + pm8001_ha = irq_vector->drv_inst; + if (unlikely(!pm8001_ha)) + BUG_ON(1); + PM8001_CHIP_DISP->isr(pm8001_ha, irq_vector->irq_id); +} +#endif + +/** + * pm8001_interrupt_handler_msix - main MSIX interrupt handler. + * It obtains the vector number and calls the equivalent bottom + * half or services directly. + * @irq: interrupt number + * @opaque: the passed outbound queue/vector. Host structure is + * retrieved from the same. + */ +static irqreturn_t pm8001_interrupt_handler_msix(int irq, void *opaque) +{ + struct isr_param *irq_vector; + struct pm8001_hba_info *pm8001_ha; + irqreturn_t ret = IRQ_HANDLED; + irq_vector = (struct isr_param *)opaque; + pm8001_ha = irq_vector->drv_inst; + + if (unlikely(!pm8001_ha)) + return IRQ_NONE; + if (!PM8001_CHIP_DISP->is_our_interrupt(pm8001_ha)) + return IRQ_NONE; +#ifdef PM8001_USE_TASKLET + tasklet_schedule(&pm8001_ha->tasklet[irq_vector->irq_id]); +#else + ret = PM8001_CHIP_DISP->isr(pm8001_ha, irq_vector->irq_id); +#endif + return ret; +} + +/** + * pm8001_interrupt_handler_intx - main INTx interrupt handler. + * @irq: interrupt number + * @dev_id: sas_ha structure. The HBA is retrieved from sas_ha structure. + */ + +static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id) +{ + struct pm8001_hba_info *pm8001_ha; + irqreturn_t ret = IRQ_HANDLED; + struct sas_ha_struct *sha = dev_id; + pm8001_ha = sha->lldd_ha; + if (unlikely(!pm8001_ha)) + return IRQ_NONE; + if (!PM8001_CHIP_DISP->is_our_interrupt(pm8001_ha)) + return IRQ_NONE; + +#ifdef PM8001_USE_TASKLET + tasklet_schedule(&pm8001_ha->tasklet[0]); +#else + ret = PM8001_CHIP_DISP->isr(pm8001_ha, 0); +#endif + return ret; +} + +static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha); + +/** + * pm8001_alloc - initiate our hba structure and 6 DMAs area. + * @pm8001_ha: our hba structure. + * @ent: PCI device ID structure to match on + */ +static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha, + const struct pci_device_id *ent) +{ + int i, count = 0, rc = 0; + u32 ci_offset, ib_offset, ob_offset, pi_offset; + struct inbound_queue_table *ibq; + struct outbound_queue_table *obq; + + spin_lock_init(&pm8001_ha->lock); + spin_lock_init(&pm8001_ha->bitmap_lock); + pm8001_dbg(pm8001_ha, INIT, "pm8001_alloc: PHY:%x\n", + pm8001_ha->chip->n_phy); + + /* Request Interrupt */ + rc = pm8001_request_irq(pm8001_ha); + if (rc) + goto err_out; + + count = pm8001_ha->max_q_num; + /* Queues are chosen based on the number of cores/msix availability */ + ib_offset = pm8001_ha->ib_offset = USI_MAX_MEMCNT_BASE; + ci_offset = pm8001_ha->ci_offset = ib_offset + count; + ob_offset = pm8001_ha->ob_offset = ci_offset + count; + pi_offset = pm8001_ha->pi_offset = ob_offset + count; + pm8001_ha->max_memcnt = pi_offset + count; + + for (i = 0; i < pm8001_ha->chip->n_phy; i++) { + pm8001_phy_init(pm8001_ha, i); + pm8001_ha->port[i].wide_port_phymap = 0; + pm8001_ha->port[i].port_attached = 0; + pm8001_ha->port[i].port_state = 0; + INIT_LIST_HEAD(&pm8001_ha->port[i].list); + } + + /* MPI Memory region 1 for AAP Event Log for fw */ + pm8001_ha->memoryMap.region[AAP1].num_elements = 1; + pm8001_ha->memoryMap.region[AAP1].element_size = PM8001_EVENT_LOG_SIZE; + pm8001_ha->memoryMap.region[AAP1].total_len = PM8001_EVENT_LOG_SIZE; + pm8001_ha->memoryMap.region[AAP1].alignment = 32; + + /* MPI Memory region 2 for IOP Event Log for fw */ + pm8001_ha->memoryMap.region[IOP].num_elements = 1; + pm8001_ha->memoryMap.region[IOP].element_size = PM8001_EVENT_LOG_SIZE; + pm8001_ha->memoryMap.region[IOP].total_len = PM8001_EVENT_LOG_SIZE; + pm8001_ha->memoryMap.region[IOP].alignment = 32; + + for (i = 0; i < count; i++) { + ibq = &pm8001_ha->inbnd_q_tbl[i]; + spin_lock_init(&ibq->iq_lock); + /* MPI Memory region 3 for consumer Index of inbound queues */ + pm8001_ha->memoryMap.region[ci_offset+i].num_elements = 1; + pm8001_ha->memoryMap.region[ci_offset+i].element_size = 4; + pm8001_ha->memoryMap.region[ci_offset+i].total_len = 4; + pm8001_ha->memoryMap.region[ci_offset+i].alignment = 4; + + if ((ent->driver_data) != chip_8001) { + /* MPI Memory region 5 inbound queues */ + pm8001_ha->memoryMap.region[ib_offset+i].num_elements = + PM8001_MPI_QUEUE; + pm8001_ha->memoryMap.region[ib_offset+i].element_size + = 128; + pm8001_ha->memoryMap.region[ib_offset+i].total_len = + PM8001_MPI_QUEUE * 128; + pm8001_ha->memoryMap.region[ib_offset+i].alignment + = 128; + } else { + pm8001_ha->memoryMap.region[ib_offset+i].num_elements = + PM8001_MPI_QUEUE; + pm8001_ha->memoryMap.region[ib_offset+i].element_size + = 64; + pm8001_ha->memoryMap.region[ib_offset+i].total_len = + PM8001_MPI_QUEUE * 64; + pm8001_ha->memoryMap.region[ib_offset+i].alignment = 64; + } + } + + for (i = 0; i < count; i++) { + obq = &pm8001_ha->outbnd_q_tbl[i]; + spin_lock_init(&obq->oq_lock); + /* MPI Memory region 4 for producer Index of outbound queues */ + pm8001_ha->memoryMap.region[pi_offset+i].num_elements = 1; + pm8001_ha->memoryMap.region[pi_offset+i].element_size = 4; + pm8001_ha->memoryMap.region[pi_offset+i].total_len = 4; + pm8001_ha->memoryMap.region[pi_offset+i].alignment = 4; + + if (ent->driver_data != chip_8001) { + /* MPI Memory region 6 Outbound queues */ + pm8001_ha->memoryMap.region[ob_offset+i].num_elements = + PM8001_MPI_QUEUE; + pm8001_ha->memoryMap.region[ob_offset+i].element_size + = 128; + pm8001_ha->memoryMap.region[ob_offset+i].total_len = + PM8001_MPI_QUEUE * 128; + pm8001_ha->memoryMap.region[ob_offset+i].alignment + = 128; + } else { + /* MPI Memory region 6 Outbound queues */ + pm8001_ha->memoryMap.region[ob_offset+i].num_elements = + PM8001_MPI_QUEUE; + pm8001_ha->memoryMap.region[ob_offset+i].element_size + = 64; + pm8001_ha->memoryMap.region[ob_offset+i].total_len = + PM8001_MPI_QUEUE * 64; + pm8001_ha->memoryMap.region[ob_offset+i].alignment = 64; + } + + } + /* Memory region write DMA*/ + pm8001_ha->memoryMap.region[NVMD].num_elements = 1; + pm8001_ha->memoryMap.region[NVMD].element_size = 4096; + pm8001_ha->memoryMap.region[NVMD].total_len = 4096; + + /* Memory region for fw flash */ + pm8001_ha->memoryMap.region[FW_FLASH].total_len = 4096; + + pm8001_ha->memoryMap.region[FORENSIC_MEM].num_elements = 1; + pm8001_ha->memoryMap.region[FORENSIC_MEM].total_len = 0x10000; + pm8001_ha->memoryMap.region[FORENSIC_MEM].element_size = 0x10000; + pm8001_ha->memoryMap.region[FORENSIC_MEM].alignment = 0x10000; + for (i = 0; i < pm8001_ha->max_memcnt; i++) { + struct mpi_mem *region = &pm8001_ha->memoryMap.region[i]; + + if (pm8001_mem_alloc(pm8001_ha->pdev, + ®ion->virt_ptr, + ®ion->phys_addr, + ®ion->phys_addr_hi, + ®ion->phys_addr_lo, + region->total_len, + region->alignment) != 0) { + pm8001_dbg(pm8001_ha, FAIL, "Mem%d alloc failed\n", i); + goto err_out; + } + } + + /* Memory region for devices*/ + pm8001_ha->devices = kzalloc(PM8001_MAX_DEVICES + * sizeof(struct pm8001_device), GFP_KERNEL); + if (!pm8001_ha->devices) { + rc = -ENOMEM; + goto err_out_nodev; + } + for (i = 0; i < PM8001_MAX_DEVICES; i++) { + pm8001_ha->devices[i].dev_type = SAS_PHY_UNUSED; + pm8001_ha->devices[i].id = i; + pm8001_ha->devices[i].device_id = PM8001_MAX_DEVICES; + atomic_set(&pm8001_ha->devices[i].running_req, 0); + } + pm8001_ha->flags = PM8001F_INIT_TIME; + return 0; + +err_out_nodev: + for (i = 0; i < pm8001_ha->max_memcnt; i++) { + if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) { + dma_free_coherent(&pm8001_ha->pdev->dev, + (pm8001_ha->memoryMap.region[i].total_len + + pm8001_ha->memoryMap.region[i].alignment), + pm8001_ha->memoryMap.region[i].virt_ptr, + pm8001_ha->memoryMap.region[i].phys_addr); + } + } +err_out: + return 1; +} + +/** + * pm8001_ioremap - remap the pci high physical address to kernel virtual + * address so that we can access them. + * @pm8001_ha: our hba structure. + */ +static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha) +{ + u32 bar; + u32 logicalBar = 0; + struct pci_dev *pdev; + + pdev = pm8001_ha->pdev; + /* map pci mem (PMC pci base 0-3)*/ + for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { + /* + ** logical BARs for SPC: + ** bar 0 and 1 - logical BAR0 + ** bar 2 and 3 - logical BAR1 + ** bar4 - logical BAR2 + ** bar5 - logical BAR3 + ** Skip the appropriate assignments: + */ + if ((bar == 1) || (bar == 3)) + continue; + if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { + pm8001_ha->io_mem[logicalBar].membase = + pci_resource_start(pdev, bar); + pm8001_ha->io_mem[logicalBar].memsize = + pci_resource_len(pdev, bar); + pm8001_ha->io_mem[logicalBar].memvirtaddr = + ioremap(pm8001_ha->io_mem[logicalBar].membase, + pm8001_ha->io_mem[logicalBar].memsize); + if (!pm8001_ha->io_mem[logicalBar].memvirtaddr) { + pm8001_dbg(pm8001_ha, INIT, + "Failed to ioremap bar %d, logicalBar %d", + bar, logicalBar); + return -ENOMEM; + } + pm8001_dbg(pm8001_ha, INIT, + "base addr %llx virt_addr=%llx len=%d\n", + (u64)pm8001_ha->io_mem[logicalBar].membase, + (u64)(unsigned long) + pm8001_ha->io_mem[logicalBar].memvirtaddr, + pm8001_ha->io_mem[logicalBar].memsize); + } else { + pm8001_ha->io_mem[logicalBar].membase = 0; + pm8001_ha->io_mem[logicalBar].memsize = 0; + pm8001_ha->io_mem[logicalBar].memvirtaddr = NULL; + } + logicalBar++; + } + return 0; +} + +/** + * pm8001_pci_alloc - initialize our ha card structure + * @pdev: pci device. + * @ent: ent + * @shost: scsi host struct which has been initialized before. + */ +static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev, + const struct pci_device_id *ent, + struct Scsi_Host *shost) + +{ + struct pm8001_hba_info *pm8001_ha; + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + int j; + + pm8001_ha = sha->lldd_ha; + if (!pm8001_ha) + return NULL; + + pm8001_ha->pdev = pdev; + pm8001_ha->dev = &pdev->dev; + pm8001_ha->chip_id = ent->driver_data; + pm8001_ha->chip = &pm8001_chips[pm8001_ha->chip_id]; + pm8001_ha->irq = pdev->irq; + pm8001_ha->sas = sha; + pm8001_ha->shost = shost; + pm8001_ha->id = pm8001_id++; + pm8001_ha->logging_level = logging_level; + pm8001_ha->non_fatal_count = 0; + if (link_rate >= 1 && link_rate <= 15) + pm8001_ha->link_rate = (link_rate << 8); + else { + pm8001_ha->link_rate = LINKRATE_15 | LINKRATE_30 | + LINKRATE_60 | LINKRATE_120; + pm8001_dbg(pm8001_ha, FAIL, + "Setting link rate to default value\n"); + } + sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id); + /* IOMB size is 128 for 8088/89 controllers */ + if (pm8001_ha->chip_id != chip_8001) + pm8001_ha->iomb_size = IOMB_SIZE_SPCV; + else + pm8001_ha->iomb_size = IOMB_SIZE_SPC; + +#ifdef PM8001_USE_TASKLET + /* Tasklet for non msi-x interrupt handler */ + if ((!pdev->msix_cap || !pci_msi_enabled()) + || (pm8001_ha->chip_id == chip_8001)) + tasklet_init(&pm8001_ha->tasklet[0], pm8001_tasklet, + (unsigned long)&(pm8001_ha->irq_vector[0])); + else + for (j = 0; j < PM8001_MAX_MSIX_VEC; j++) + tasklet_init(&pm8001_ha->tasklet[j], pm8001_tasklet, + (unsigned long)&(pm8001_ha->irq_vector[j])); +#endif + if (pm8001_ioremap(pm8001_ha)) + goto failed_pci_alloc; + if (!pm8001_alloc(pm8001_ha, ent)) + return pm8001_ha; +failed_pci_alloc: + pm8001_free(pm8001_ha); + return NULL; +} + +/** + * pci_go_44 - pm8001 specified, its DMA is 44 bit rather than 64 bit + * @pdev: pci device. + */ +static int pci_go_44(struct pci_dev *pdev) +{ + int rc; + + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44)); + if (rc) { + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (rc) + dev_printk(KERN_ERR, &pdev->dev, + "32-bit DMA enable failed\n"); + } + return rc; +} + +/** + * pm8001_prep_sas_ha_init - allocate memory in general hba struct && init them. + * @shost: scsi host which has been allocated outside. + * @chip_info: our ha struct. + */ +static int pm8001_prep_sas_ha_init(struct Scsi_Host *shost, + const struct pm8001_chip_info *chip_info) +{ + int phy_nr, port_nr; + struct asd_sas_phy **arr_phy; + struct asd_sas_port **arr_port; + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + + phy_nr = chip_info->n_phy; + port_nr = phy_nr; + memset(sha, 0x00, sizeof(*sha)); + arr_phy = kcalloc(phy_nr, sizeof(void *), GFP_KERNEL); + if (!arr_phy) + goto exit; + arr_port = kcalloc(port_nr, sizeof(void *), GFP_KERNEL); + if (!arr_port) + goto exit_free2; + + sha->sas_phy = arr_phy; + sha->sas_port = arr_port; + sha->lldd_ha = kzalloc(sizeof(struct pm8001_hba_info), GFP_KERNEL); + if (!sha->lldd_ha) + goto exit_free1; + + shost->transportt = pm8001_stt; + shost->max_id = PM8001_MAX_DEVICES; + shost->unique_id = pm8001_id; + shost->max_cmd_len = 16; + return 0; +exit_free1: + kfree(arr_port); +exit_free2: + kfree(arr_phy); +exit: + return -1; +} + +/** + * pm8001_post_sas_ha_init - initialize general hba struct defined in libsas + * @shost: scsi host which has been allocated outside + * @chip_info: our ha struct. + */ +static void pm8001_post_sas_ha_init(struct Scsi_Host *shost, + const struct pm8001_chip_info *chip_info) +{ + int i = 0; + struct pm8001_hba_info *pm8001_ha; + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + + pm8001_ha = sha->lldd_ha; + for (i = 0; i < chip_info->n_phy; i++) { + sha->sas_phy[i] = &pm8001_ha->phy[i].sas_phy; + sha->sas_port[i] = &pm8001_ha->port[i].sas_port; + sha->sas_phy[i]->sas_addr = + (u8 *)&pm8001_ha->phy[i].dev_sas_addr; + } + sha->sas_ha_name = DRV_NAME; + sha->dev = pm8001_ha->dev; + sha->strict_wide_ports = 1; + sha->sas_addr = &pm8001_ha->sas_addr[0]; + sha->num_phys = chip_info->n_phy; + sha->shost = shost; +} + +/** + * pm8001_init_sas_add - initialize sas address + * @pm8001_ha: our ha struct. + * + * Currently we just set the fixed SAS address to our HBA, for manufacture, + * it should read from the EEPROM + */ +static int pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha) +{ + u8 i, j; + u8 sas_add[8]; +#ifdef PM8001_READ_VPD + /* For new SPC controllers WWN is stored in flash vpd + * For SPC/SPCve controllers WWN is stored in EEPROM + * For Older SPC WWN is stored in NVMD + */ + DECLARE_COMPLETION_ONSTACK(completion); + struct pm8001_ioctl_payload payload; + u16 deviceid; + int rc; + unsigned long time_remaining; + + if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) { + pm8001_dbg(pm8001_ha, FAIL, "controller is in fatal error state\n"); + return -EIO; + } + + pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid); + pm8001_ha->nvmd_completion = &completion; + + if (pm8001_ha->chip_id == chip_8001) { + if (deviceid == 0x8081 || deviceid == 0x0042) { + payload.minor_function = 4; + payload.rd_length = 4096; + } else { + payload.minor_function = 0; + payload.rd_length = 128; + } + } else if ((pm8001_ha->chip_id == chip_8070 || + pm8001_ha->chip_id == chip_8072) && + pm8001_ha->pdev->subsystem_vendor == PCI_VENDOR_ID_ATTO) { + payload.minor_function = 4; + payload.rd_length = 4096; + } else { + payload.minor_function = 1; + payload.rd_length = 4096; + } + payload.offset = 0; + payload.func_specific = kzalloc(payload.rd_length, GFP_KERNEL); + if (!payload.func_specific) { + pm8001_dbg(pm8001_ha, FAIL, "mem alloc fail\n"); + return -ENOMEM; + } + rc = PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload); + if (rc) { + kfree(payload.func_specific); + pm8001_dbg(pm8001_ha, FAIL, "nvmd failed\n"); + return -EIO; + } + time_remaining = wait_for_completion_timeout(&completion, + msecs_to_jiffies(60*1000)); // 1 min + if (!time_remaining) { + kfree(payload.func_specific); + pm8001_dbg(pm8001_ha, FAIL, "get_nvmd_req timeout\n"); + return -EIO; + } + + + for (i = 0, j = 0; i <= 7; i++, j++) { + if (pm8001_ha->chip_id == chip_8001) { + if (deviceid == 0x8081) + pm8001_ha->sas_addr[j] = + payload.func_specific[0x704 + i]; + else if (deviceid == 0x0042) + pm8001_ha->sas_addr[j] = + payload.func_specific[0x010 + i]; + } else if ((pm8001_ha->chip_id == chip_8070 || + pm8001_ha->chip_id == chip_8072) && + pm8001_ha->pdev->subsystem_vendor == PCI_VENDOR_ID_ATTO) { + pm8001_ha->sas_addr[j] = + payload.func_specific[0x010 + i]; + } else + pm8001_ha->sas_addr[j] = + payload.func_specific[0x804 + i]; + } + memcpy(sas_add, pm8001_ha->sas_addr, SAS_ADDR_SIZE); + for (i = 0; i < pm8001_ha->chip->n_phy; i++) { + if (i && ((i % 4) == 0)) + sas_add[7] = sas_add[7] + 4; + memcpy(&pm8001_ha->phy[i].dev_sas_addr, + sas_add, SAS_ADDR_SIZE); + pm8001_dbg(pm8001_ha, INIT, "phy %d sas_addr = %016llx\n", i, + pm8001_ha->phy[i].dev_sas_addr); + } + kfree(payload.func_specific); +#else + for (i = 0; i < pm8001_ha->chip->n_phy; i++) { + pm8001_ha->phy[i].dev_sas_addr = 0x50010c600047f9d0ULL; + pm8001_ha->phy[i].dev_sas_addr = + cpu_to_be64((u64) + (*(u64 *)&pm8001_ha->phy[i].dev_sas_addr)); + } + memcpy(pm8001_ha->sas_addr, &pm8001_ha->phy[0].dev_sas_addr, + SAS_ADDR_SIZE); +#endif + return 0; +} + +/* + * pm8001_get_phy_settings_info : Read phy setting values. + * @pm8001_ha : our hba. + */ +static int pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha) +{ + +#ifdef PM8001_READ_VPD + /*OPTION ROM FLASH read for the SPC cards */ + DECLARE_COMPLETION_ONSTACK(completion); + struct pm8001_ioctl_payload payload; + int rc; + + pm8001_ha->nvmd_completion = &completion; + /* SAS ADDRESS read from flash / EEPROM */ + payload.minor_function = 6; + payload.offset = 0; + payload.rd_length = 4096; + payload.func_specific = kzalloc(4096, GFP_KERNEL); + if (!payload.func_specific) + return -ENOMEM; + /* Read phy setting values from flash */ + rc = PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload); + if (rc) { + kfree(payload.func_specific); + pm8001_dbg(pm8001_ha, INIT, "nvmd failed\n"); + return -ENOMEM; + } + wait_for_completion(&completion); + pm8001_set_phy_profile(pm8001_ha, sizeof(u8), payload.func_specific); + kfree(payload.func_specific); +#endif + return 0; +} + +struct pm8001_mpi3_phy_pg_trx_config { + u32 LaneLosCfg; + u32 LanePgaCfg1; + u32 LanePisoCfg1; + u32 LanePisoCfg2; + u32 LanePisoCfg3; + u32 LanePisoCfg4; + u32 LanePisoCfg5; + u32 LanePisoCfg6; + u32 LaneBctCtrl; +}; + +/** + * pm8001_get_internal_phy_settings - Retrieves the internal PHY settings + * @pm8001_ha : our adapter + * @phycfg : PHY config page to populate + */ +static +void pm8001_get_internal_phy_settings(struct pm8001_hba_info *pm8001_ha, + struct pm8001_mpi3_phy_pg_trx_config *phycfg) +{ + phycfg->LaneLosCfg = 0x00000132; + phycfg->LanePgaCfg1 = 0x00203949; + phycfg->LanePisoCfg1 = 0x000000FF; + phycfg->LanePisoCfg2 = 0xFF000001; + phycfg->LanePisoCfg3 = 0xE7011300; + phycfg->LanePisoCfg4 = 0x631C40C0; + phycfg->LanePisoCfg5 = 0xF8102036; + phycfg->LanePisoCfg6 = 0xF74A1000; + phycfg->LaneBctCtrl = 0x00FB33F8; +} + +/** + * pm8001_get_external_phy_settings - Retrieves the external PHY settings + * @pm8001_ha : our adapter + * @phycfg : PHY config page to populate + */ +static +void pm8001_get_external_phy_settings(struct pm8001_hba_info *pm8001_ha, + struct pm8001_mpi3_phy_pg_trx_config *phycfg) +{ + phycfg->LaneLosCfg = 0x00000132; + phycfg->LanePgaCfg1 = 0x00203949; + phycfg->LanePisoCfg1 = 0x000000FF; + phycfg->LanePisoCfg2 = 0xFF000001; + phycfg->LanePisoCfg3 = 0xE7011300; + phycfg->LanePisoCfg4 = 0x63349140; + phycfg->LanePisoCfg5 = 0xF8102036; + phycfg->LanePisoCfg6 = 0xF80D9300; + phycfg->LaneBctCtrl = 0x00FB33F8; +} + +/** + * pm8001_get_phy_mask - Retrieves the mask that denotes if a PHY is int/ext + * @pm8001_ha : our adapter + * @phymask : The PHY mask + */ +static +void pm8001_get_phy_mask(struct pm8001_hba_info *pm8001_ha, int *phymask) +{ + switch (pm8001_ha->pdev->subsystem_device) { + case 0x0070: /* H1280 - 8 external 0 internal */ + case 0x0072: /* H12F0 - 16 external 0 internal */ + *phymask = 0x0000; + break; + + case 0x0071: /* H1208 - 0 external 8 internal */ + case 0x0073: /* H120F - 0 external 16 internal */ + *phymask = 0xFFFF; + break; + + case 0x0080: /* H1244 - 4 external 4 internal */ + *phymask = 0x00F0; + break; + + case 0x0081: /* H1248 - 4 external 8 internal */ + *phymask = 0x0FF0; + break; + + case 0x0082: /* H1288 - 8 external 8 internal */ + *phymask = 0xFF00; + break; + + default: + pm8001_dbg(pm8001_ha, INIT, + "Unknown subsystem device=0x%.04x\n", + pm8001_ha->pdev->subsystem_device); + } +} + +/** + * pm8001_set_phy_settings_ven_117c_12G() - Configure ATTO 12Gb PHY settings + * @pm8001_ha : our adapter + */ +static +int pm8001_set_phy_settings_ven_117c_12G(struct pm8001_hba_info *pm8001_ha) +{ + struct pm8001_mpi3_phy_pg_trx_config phycfg_int; + struct pm8001_mpi3_phy_pg_trx_config phycfg_ext; + int phymask = 0; + int i = 0; + + memset(&phycfg_int, 0, sizeof(phycfg_int)); + memset(&phycfg_ext, 0, sizeof(phycfg_ext)); + + pm8001_get_internal_phy_settings(pm8001_ha, &phycfg_int); + pm8001_get_external_phy_settings(pm8001_ha, &phycfg_ext); + pm8001_get_phy_mask(pm8001_ha, &phymask); + + for (i = 0; i < pm8001_ha->chip->n_phy; i++) { + if (phymask & (1 << i)) {/* Internal PHY */ + pm8001_set_phy_profile_single(pm8001_ha, i, + sizeof(phycfg_int) / sizeof(u32), + (u32 *)&phycfg_int); + + } else { /* External PHY */ + pm8001_set_phy_profile_single(pm8001_ha, i, + sizeof(phycfg_ext) / sizeof(u32), + (u32 *)&phycfg_ext); + } + } + + return 0; +} + +/** + * pm8001_configure_phy_settings - Configures PHY settings based on vendor ID. + * @pm8001_ha : our hba. + */ +static int pm8001_configure_phy_settings(struct pm8001_hba_info *pm8001_ha) +{ + switch (pm8001_ha->pdev->subsystem_vendor) { + case PCI_VENDOR_ID_ATTO: + if (pm8001_ha->pdev->device == 0x0042) /* 6Gb */ + return 0; + else + return pm8001_set_phy_settings_ven_117c_12G(pm8001_ha); + + case PCI_VENDOR_ID_ADAPTEC2: + case 0: + return 0; + + default: + return pm8001_get_phy_settings_info(pm8001_ha); + } +} + +#ifdef PM8001_USE_MSIX +/** + * pm8001_setup_msix - enable MSI-X interrupt + * @pm8001_ha: our ha struct. + */ +static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha) +{ + unsigned int allocated_irq_vectors; + int rc; + + /* SPCv controllers supports 64 msi-x */ + if (pm8001_ha->chip_id == chip_8001) { + rc = pci_alloc_irq_vectors(pm8001_ha->pdev, 1, 1, + PCI_IRQ_MSIX); + } else { + /* + * Queue index #0 is used always for housekeeping, so don't + * include in the affinity spreading. + */ + struct irq_affinity desc = { + .pre_vectors = 1, + }; + rc = pci_alloc_irq_vectors_affinity( + pm8001_ha->pdev, 2, PM8001_MAX_MSIX_VEC, + PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc); + } + + allocated_irq_vectors = rc; + if (rc < 0) + return rc; + + /* Assigns the number of interrupts */ + pm8001_ha->number_of_intr = allocated_irq_vectors; + + /* Maximum queue number updating in HBA structure */ + pm8001_ha->max_q_num = allocated_irq_vectors; + + pm8001_dbg(pm8001_ha, INIT, + "pci_alloc_irq_vectors request ret:%d no of intr %d\n", + rc, pm8001_ha->number_of_intr); + return 0; +} + +static u32 pm8001_request_msix(struct pm8001_hba_info *pm8001_ha) +{ + u32 i = 0, j = 0; + int flag = 0, rc = 0; + int nr_irqs = pm8001_ha->number_of_intr; + + if (pm8001_ha->chip_id != chip_8001) + flag &= ~IRQF_SHARED; + + pm8001_dbg(pm8001_ha, INIT, + "pci_enable_msix request number of intr %d\n", + pm8001_ha->number_of_intr); + + if (nr_irqs > ARRAY_SIZE(pm8001_ha->intr_drvname)) + nr_irqs = ARRAY_SIZE(pm8001_ha->intr_drvname); + + for (i = 0; i < nr_irqs; i++) { + snprintf(pm8001_ha->intr_drvname[i], + sizeof(pm8001_ha->intr_drvname[0]), + "%s-%d", pm8001_ha->name, i); + pm8001_ha->irq_vector[i].irq_id = i; + pm8001_ha->irq_vector[i].drv_inst = pm8001_ha; + + rc = request_irq(pci_irq_vector(pm8001_ha->pdev, i), + pm8001_interrupt_handler_msix, flag, + pm8001_ha->intr_drvname[i], + &(pm8001_ha->irq_vector[i])); + if (rc) { + for (j = 0; j < i; j++) { + free_irq(pci_irq_vector(pm8001_ha->pdev, i), + &(pm8001_ha->irq_vector[i])); + } + pci_free_irq_vectors(pm8001_ha->pdev); + break; + } + } + + return rc; +} +#endif + +/** + * pm8001_request_irq - register interrupt + * @pm8001_ha: our ha struct. + */ +static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha) +{ + struct pci_dev *pdev = pm8001_ha->pdev; +#ifdef PM8001_USE_MSIX + int rc; + + if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) { + rc = pm8001_setup_msix(pm8001_ha); + if (rc) { + pm8001_dbg(pm8001_ha, FAIL, + "pm8001_setup_irq failed [ret: %d]\n", rc); + return rc; + } + + if (pdev->msix_cap && pci_msi_enabled()) + return pm8001_request_msix(pm8001_ha); + } + + pm8001_dbg(pm8001_ha, INIT, "MSIX not supported!!!\n"); +#endif + + /* initialize the INT-X interrupt */ + pm8001_ha->irq_vector[0].irq_id = 0; + pm8001_ha->irq_vector[0].drv_inst = pm8001_ha; + + return request_irq(pdev->irq, pm8001_interrupt_handler_intx, + IRQF_SHARED, pm8001_ha->name, + SHOST_TO_SAS_HA(pm8001_ha->shost)); +} + +/** + * pm8001_pci_probe - probe supported device + * @pdev: pci device which kernel has been prepared for. + * @ent: pci device id + * + * This function is the main initialization function, when register a new + * pci driver it is invoked, all struct and hardware initialization should be + * done here, also, register interrupt. + */ +static int pm8001_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + unsigned int rc; + u32 pci_reg; + u8 i = 0; + struct pm8001_hba_info *pm8001_ha; + struct Scsi_Host *shost = NULL; + const struct pm8001_chip_info *chip; + struct sas_ha_struct *sha; + + dev_printk(KERN_INFO, &pdev->dev, + "pm80xx: driver version %s\n", DRV_VERSION); + rc = pci_enable_device(pdev); + if (rc) + goto err_out_enable; + pci_set_master(pdev); + /* + * Enable pci slot busmaster by setting pci command register. + * This is required by FW for Cyclone card. + */ + + pci_read_config_dword(pdev, PCI_COMMAND, &pci_reg); + pci_reg |= 0x157; + pci_write_config_dword(pdev, PCI_COMMAND, pci_reg); + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) + goto err_out_disable; + rc = pci_go_44(pdev); + if (rc) + goto err_out_regions; + + shost = scsi_host_alloc(&pm8001_sht, sizeof(void *)); + if (!shost) { + rc = -ENOMEM; + goto err_out_regions; + } + chip = &pm8001_chips[ent->driver_data]; + sha = kzalloc(sizeof(struct sas_ha_struct), GFP_KERNEL); + if (!sha) { + rc = -ENOMEM; + goto err_out_free_host; + } + SHOST_TO_SAS_HA(shost) = sha; + + rc = pm8001_prep_sas_ha_init(shost, chip); + if (rc) { + rc = -ENOMEM; + goto err_out_free; + } + pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost)); + /* ent->driver variable is used to differentiate between controllers */ + pm8001_ha = pm8001_pci_alloc(pdev, ent, shost); + if (!pm8001_ha) { + rc = -ENOMEM; + goto err_out_free; + } + + PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha); + rc = PM8001_CHIP_DISP->chip_init(pm8001_ha); + if (rc) { + pm8001_dbg(pm8001_ha, FAIL, + "chip_init failed [ret: %d]\n", rc); + goto err_out_ha_free; + } + + rc = pm8001_init_ccb_tag(pm8001_ha); + if (rc) + goto err_out_enable; + + + PM8001_CHIP_DISP->chip_post_init(pm8001_ha); + + if (pm8001_ha->number_of_intr > 1) { + shost->nr_hw_queues = pm8001_ha->number_of_intr - 1; + /* + * For now, ensure we're not sent too many commands by setting + * host_tagset. This is also required if we start using request + * tag. + */ + shost->host_tagset = 1; + } + + rc = scsi_add_host(shost, &pdev->dev); + if (rc) + goto err_out_ha_free; + + PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0); + if (pm8001_ha->chip_id != chip_8001) { + for (i = 1; i < pm8001_ha->number_of_intr; i++) + PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, i); + /* setup thermal configuration. */ + pm80xx_set_thermal_config(pm8001_ha); + } + + rc = pm8001_init_sas_add(pm8001_ha); + if (rc) + goto err_out_shost; + /* phy setting support for motherboard controller */ + rc = pm8001_configure_phy_settings(pm8001_ha); + if (rc) + goto err_out_shost; + + pm8001_post_sas_ha_init(shost, chip); + rc = sas_register_ha(SHOST_TO_SAS_HA(shost)); + if (rc) { + pm8001_dbg(pm8001_ha, FAIL, + "sas_register_ha failed [ret: %d]\n", rc); + goto err_out_shost; + } + list_add_tail(&pm8001_ha->list, &hba_list); + pm8001_ha->flags = PM8001F_RUN_TIME; + scsi_scan_host(pm8001_ha->shost); + return 0; + +err_out_shost: + scsi_remove_host(pm8001_ha->shost); +err_out_ha_free: + pm8001_free(pm8001_ha); +err_out_free: + kfree(sha); +err_out_free_host: + scsi_host_put(shost); +err_out_regions: + pci_release_regions(pdev); +err_out_disable: + pci_disable_device(pdev); +err_out_enable: + return rc; +} + +/** + * pm8001_init_ccb_tag - allocate memory to CCB and tag. + * @pm8001_ha: our hba card information. + */ +static int pm8001_init_ccb_tag(struct pm8001_hba_info *pm8001_ha) +{ + struct Scsi_Host *shost = pm8001_ha->shost; + struct device *dev = pm8001_ha->dev; + u32 max_out_io, ccb_count; + int i; + + max_out_io = pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io; + ccb_count = min_t(int, PM8001_MAX_CCB, max_out_io); + + shost->can_queue = ccb_count - PM8001_RESERVE_SLOT; + + pm8001_ha->rsvd_tags = bitmap_zalloc(PM8001_RESERVE_SLOT, GFP_KERNEL); + if (!pm8001_ha->rsvd_tags) + goto err_out; + + /* Memory region for ccb_info*/ + pm8001_ha->ccb_count = ccb_count; + pm8001_ha->ccb_info = + kcalloc(ccb_count, sizeof(struct pm8001_ccb_info), GFP_KERNEL); + if (!pm8001_ha->ccb_info) { + pm8001_dbg(pm8001_ha, FAIL, + "Unable to allocate memory for ccb\n"); + goto err_out_noccb; + } + for (i = 0; i < ccb_count; i++) { + pm8001_ha->ccb_info[i].buf_prd = dma_alloc_coherent(dev, + sizeof(struct pm8001_prd) * PM8001_MAX_DMA_SG, + &pm8001_ha->ccb_info[i].ccb_dma_handle, + GFP_KERNEL); + if (!pm8001_ha->ccb_info[i].buf_prd) { + pm8001_dbg(pm8001_ha, FAIL, + "ccb prd memory allocation error\n"); + goto err_out; + } + pm8001_ha->ccb_info[i].task = NULL; + pm8001_ha->ccb_info[i].ccb_tag = PM8001_INVALID_TAG; + pm8001_ha->ccb_info[i].device = NULL; + } + + return 0; + +err_out_noccb: + kfree(pm8001_ha->devices); +err_out: + return -ENOMEM; +} + +static void pm8001_pci_remove(struct pci_dev *pdev) +{ + struct sas_ha_struct *sha = pci_get_drvdata(pdev); + struct pm8001_hba_info *pm8001_ha; + int i, j; + pm8001_ha = sha->lldd_ha; + sas_unregister_ha(sha); + sas_remove_host(pm8001_ha->shost); + list_del(&pm8001_ha->list); + PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF); + PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha); + +#ifdef PM8001_USE_MSIX + for (i = 0; i < pm8001_ha->number_of_intr; i++) + synchronize_irq(pci_irq_vector(pdev, i)); + for (i = 0; i < pm8001_ha->number_of_intr; i++) + free_irq(pci_irq_vector(pdev, i), &pm8001_ha->irq_vector[i]); + pci_free_irq_vectors(pdev); +#else + free_irq(pm8001_ha->irq, sha); +#endif +#ifdef PM8001_USE_TASKLET + /* For non-msix and msix interrupts */ + if ((!pdev->msix_cap || !pci_msi_enabled()) || + (pm8001_ha->chip_id == chip_8001)) + tasklet_kill(&pm8001_ha->tasklet[0]); + else + for (j = 0; j < PM8001_MAX_MSIX_VEC; j++) + tasklet_kill(&pm8001_ha->tasklet[j]); +#endif + scsi_host_put(pm8001_ha->shost); + + for (i = 0; i < pm8001_ha->ccb_count; i++) { + dma_free_coherent(&pm8001_ha->pdev->dev, + sizeof(struct pm8001_prd) * PM8001_MAX_DMA_SG, + pm8001_ha->ccb_info[i].buf_prd, + pm8001_ha->ccb_info[i].ccb_dma_handle); + } + kfree(pm8001_ha->ccb_info); + kfree(pm8001_ha->devices); + + pm8001_free(pm8001_ha); + kfree(sha->sas_phy); + kfree(sha->sas_port); + kfree(sha); + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +/** + * pm8001_pci_suspend - power management suspend main entry point + * @dev: Device struct + * + * Return: 0 on success, anything else on error. + */ +static int __maybe_unused pm8001_pci_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct sas_ha_struct *sha = pci_get_drvdata(pdev); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + int i, j; + sas_suspend_ha(sha); + flush_workqueue(pm8001_wq); + scsi_block_requests(pm8001_ha->shost); + if (!pdev->pm_cap) { + dev_err(dev, " PCI PM not supported\n"); + return -ENODEV; + } + PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF); + PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha); +#ifdef PM8001_USE_MSIX + for (i = 0; i < pm8001_ha->number_of_intr; i++) + synchronize_irq(pci_irq_vector(pdev, i)); + for (i = 0; i < pm8001_ha->number_of_intr; i++) + free_irq(pci_irq_vector(pdev, i), &pm8001_ha->irq_vector[i]); + pci_free_irq_vectors(pdev); +#else + free_irq(pm8001_ha->irq, sha); +#endif +#ifdef PM8001_USE_TASKLET + /* For non-msix and msix interrupts */ + if ((!pdev->msix_cap || !pci_msi_enabled()) || + (pm8001_ha->chip_id == chip_8001)) + tasklet_kill(&pm8001_ha->tasklet[0]); + else + for (j = 0; j < PM8001_MAX_MSIX_VEC; j++) + tasklet_kill(&pm8001_ha->tasklet[j]); +#endif + pm8001_info(pm8001_ha, "pdev=0x%p, slot=%s, entering " + "suspended state\n", pdev, + pm8001_ha->name); + return 0; +} + +/** + * pm8001_pci_resume - power management resume main entry point + * @dev: Device struct + * + * Return: 0 on success, anything else on error. + */ +static int __maybe_unused pm8001_pci_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct sas_ha_struct *sha = pci_get_drvdata(pdev); + struct pm8001_hba_info *pm8001_ha; + int rc; + u8 i = 0, j; + DECLARE_COMPLETION_ONSTACK(completion); + + pm8001_ha = sha->lldd_ha; + + pm8001_info(pm8001_ha, + "pdev=0x%p, slot=%s, resuming from previous operating state [D%d]\n", + pdev, pm8001_ha->name, pdev->current_state); + + rc = pci_go_44(pdev); + if (rc) + goto err_out_disable; + sas_prep_resume_ha(sha); + /* chip soft rst only for spc */ + if (pm8001_ha->chip_id == chip_8001) { + PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha); + pm8001_dbg(pm8001_ha, INIT, "chip soft reset successful\n"); + } + rc = PM8001_CHIP_DISP->chip_init(pm8001_ha); + if (rc) + goto err_out_disable; + + /* disable all the interrupt bits */ + PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF); + + rc = pm8001_request_irq(pm8001_ha); + if (rc) + goto err_out_disable; +#ifdef PM8001_USE_TASKLET + /* Tasklet for non msi-x interrupt handler */ + if ((!pdev->msix_cap || !pci_msi_enabled()) || + (pm8001_ha->chip_id == chip_8001)) + tasklet_init(&pm8001_ha->tasklet[0], pm8001_tasklet, + (unsigned long)&(pm8001_ha->irq_vector[0])); + else + for (j = 0; j < PM8001_MAX_MSIX_VEC; j++) + tasklet_init(&pm8001_ha->tasklet[j], pm8001_tasklet, + (unsigned long)&(pm8001_ha->irq_vector[j])); +#endif + PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0); + if (pm8001_ha->chip_id != chip_8001) { + for (i = 1; i < pm8001_ha->number_of_intr; i++) + PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, i); + } + + /* Chip documentation for the 8070 and 8072 SPCv */ + /* states that a 500ms minimum delay is required */ + /* before issuing commands. Otherwise, the firmware */ + /* will enter an unrecoverable state. */ + + if (pm8001_ha->chip_id == chip_8070 || + pm8001_ha->chip_id == chip_8072) { + mdelay(500); + } + + /* Spin up the PHYs */ + + pm8001_ha->flags = PM8001F_RUN_TIME; + for (i = 0; i < pm8001_ha->chip->n_phy; i++) { + pm8001_ha->phy[i].enable_completion = &completion; + PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i); + wait_for_completion(&completion); + } + sas_resume_ha(sha); + return 0; + +err_out_disable: + scsi_remove_host(pm8001_ha->shost); + + return rc; +} + +/* update of pci device, vendor id and driver data with + * unique value for each of the controller + */ +static struct pci_device_id pm8001_pci_table[] = { + { PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 }, + { PCI_VDEVICE(PMC_Sierra, 0x8006), chip_8006 }, + { PCI_VDEVICE(ADAPTEC2, 0x8006), chip_8006 }, + { PCI_VDEVICE(ATTO, 0x0042), chip_8001 }, + /* Support for SPC/SPCv/SPCve controllers */ + { PCI_VDEVICE(ADAPTEC2, 0x8001), chip_8001 }, + { PCI_VDEVICE(PMC_Sierra, 0x8008), chip_8008 }, + { PCI_VDEVICE(ADAPTEC2, 0x8008), chip_8008 }, + { PCI_VDEVICE(PMC_Sierra, 0x8018), chip_8018 }, + { PCI_VDEVICE(ADAPTEC2, 0x8018), chip_8018 }, + { PCI_VDEVICE(PMC_Sierra, 0x8009), chip_8009 }, + { PCI_VDEVICE(ADAPTEC2, 0x8009), chip_8009 }, + { PCI_VDEVICE(PMC_Sierra, 0x8019), chip_8019 }, + { PCI_VDEVICE(ADAPTEC2, 0x8019), chip_8019 }, + { PCI_VDEVICE(PMC_Sierra, 0x8074), chip_8074 }, + { PCI_VDEVICE(ADAPTEC2, 0x8074), chip_8074 }, + { PCI_VDEVICE(PMC_Sierra, 0x8076), chip_8076 }, + { PCI_VDEVICE(ADAPTEC2, 0x8076), chip_8076 }, + { PCI_VDEVICE(PMC_Sierra, 0x8077), chip_8077 }, + { PCI_VDEVICE(ADAPTEC2, 0x8077), chip_8077 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8081, + PCI_VENDOR_ID_ADAPTEC2, 0x0400, 0, 0, chip_8001 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8081, + PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8001 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8088, + PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8008 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8088, + PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8008 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8089, + PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8009 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8089, + PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8009 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8088, + PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8018 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8088, + PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8018 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8089, + PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8019 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8089, + PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8019 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8074, + PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8074 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8076, + PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8076 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8077, + PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8077 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8074, + PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8074 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8076, + PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8076 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8077, + PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8077 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8076, + PCI_VENDOR_ID_ADAPTEC2, 0x0808, 0, 0, chip_8076 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8077, + PCI_VENDOR_ID_ADAPTEC2, 0x0808, 0, 0, chip_8077 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8074, + PCI_VENDOR_ID_ADAPTEC2, 0x0404, 0, 0, chip_8074 }, + { PCI_VENDOR_ID_ATTO, 0x8070, + PCI_VENDOR_ID_ATTO, 0x0070, 0, 0, chip_8070 }, + { PCI_VENDOR_ID_ATTO, 0x8070, + PCI_VENDOR_ID_ATTO, 0x0071, 0, 0, chip_8070 }, + { PCI_VENDOR_ID_ATTO, 0x8072, + PCI_VENDOR_ID_ATTO, 0x0072, 0, 0, chip_8072 }, + { PCI_VENDOR_ID_ATTO, 0x8072, + PCI_VENDOR_ID_ATTO, 0x0073, 0, 0, chip_8072 }, + { PCI_VENDOR_ID_ATTO, 0x8070, + PCI_VENDOR_ID_ATTO, 0x0080, 0, 0, chip_8070 }, + { PCI_VENDOR_ID_ATTO, 0x8072, + PCI_VENDOR_ID_ATTO, 0x0081, 0, 0, chip_8072 }, + { PCI_VENDOR_ID_ATTO, 0x8072, + PCI_VENDOR_ID_ATTO, 0x0082, 0, 0, chip_8072 }, + {} /* terminate list */ +}; + +static SIMPLE_DEV_PM_OPS(pm8001_pci_pm_ops, + pm8001_pci_suspend, + pm8001_pci_resume); + +static struct pci_driver pm8001_pci_driver = { + .name = DRV_NAME, + .id_table = pm8001_pci_table, + .probe = pm8001_pci_probe, + .remove = pm8001_pci_remove, + .driver.pm = &pm8001_pci_pm_ops, +}; + +/** + * pm8001_init - initialize scsi transport template + */ +static int __init pm8001_init(void) +{ + int rc = -ENOMEM; + + pm8001_wq = alloc_workqueue("pm80xx", 0, 0); + if (!pm8001_wq) + goto err; + + pm8001_id = 0; + pm8001_stt = sas_domain_attach_transport(&pm8001_transport_ops); + if (!pm8001_stt) + goto err_wq; + rc = pci_register_driver(&pm8001_pci_driver); + if (rc) + goto err_tp; + return 0; + +err_tp: + sas_release_transport(pm8001_stt); +err_wq: + destroy_workqueue(pm8001_wq); +err: + return rc; +} + +static void __exit pm8001_exit(void) +{ + pci_unregister_driver(&pm8001_pci_driver); + sas_release_transport(pm8001_stt); + destroy_workqueue(pm8001_wq); +} + +module_init(pm8001_init); +module_exit(pm8001_exit); + +MODULE_AUTHOR("Jack Wang <jack_wang@usish.com>"); +MODULE_AUTHOR("Anand Kumar Santhanam <AnandKumar.Santhanam@pmcs.com>"); +MODULE_AUTHOR("Sangeetha Gnanasekaran <Sangeetha.Gnanasekaran@pmcs.com>"); +MODULE_AUTHOR("Nikith Ganigarakoppal <Nikith.Ganigarakoppal@pmcs.com>"); +MODULE_DESCRIPTION( + "PMC-Sierra PM8001/8006/8081/8088/8089/8074/8076/8077/8070/8072 " + "SAS/SATA controller driver"); +MODULE_VERSION(DRV_VERSION); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, pm8001_pci_table); + diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c new file mode 100644 index 0000000000..a5a31dfa45 --- /dev/null +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -0,0 +1,1195 @@ +/* + * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 USI Co., Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ + +#include <linux/slab.h> +#include "pm8001_sas.h" +#include "pm80xx_tracepoints.h" + +/** + * pm8001_find_tag - from sas task to find out tag that belongs to this task + * @task: the task sent to the LLDD + * @tag: the found tag associated with the task + */ +static int pm8001_find_tag(struct sas_task *task, u32 *tag) +{ + if (task->lldd_task) { + struct pm8001_ccb_info *ccb; + ccb = task->lldd_task; + *tag = ccb->ccb_tag; + return 1; + } + return 0; +} + +/** + * pm8001_tag_free - free the no more needed tag + * @pm8001_ha: our hba struct + * @tag: the found tag associated with the task + */ +void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag) +{ + void *bitmap = pm8001_ha->rsvd_tags; + unsigned long flags; + + if (tag >= PM8001_RESERVE_SLOT) + return; + + spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags); + __clear_bit(tag, bitmap); + spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags); +} + +/** + * pm8001_tag_alloc - allocate a empty tag for task used. + * @pm8001_ha: our hba struct + * @tag_out: the found empty tag . + */ +int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out) +{ + void *bitmap = pm8001_ha->rsvd_tags; + unsigned long flags; + unsigned int tag; + + spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags); + tag = find_first_zero_bit(bitmap, PM8001_RESERVE_SLOT); + if (tag >= PM8001_RESERVE_SLOT) { + spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags); + return -SAS_QUEUE_FULL; + } + __set_bit(tag, bitmap); + spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags); + + /* reserved tags are in the lower region of the tagset */ + *tag_out = tag; + return 0; +} + +/** + * pm8001_mem_alloc - allocate memory for pm8001. + * @pdev: pci device. + * @virt_addr: the allocated virtual address + * @pphys_addr: DMA address for this device + * @pphys_addr_hi: the physical address high byte address. + * @pphys_addr_lo: the physical address low byte address. + * @mem_size: memory size. + * @align: requested byte alignment + */ +int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr, + dma_addr_t *pphys_addr, u32 *pphys_addr_hi, + u32 *pphys_addr_lo, u32 mem_size, u32 align) +{ + caddr_t mem_virt_alloc; + dma_addr_t mem_dma_handle; + u64 phys_align; + u64 align_offset = 0; + if (align) + align_offset = (dma_addr_t)align - 1; + mem_virt_alloc = dma_alloc_coherent(&pdev->dev, mem_size + align, + &mem_dma_handle, GFP_KERNEL); + if (!mem_virt_alloc) + return -ENOMEM; + *pphys_addr = mem_dma_handle; + phys_align = (*pphys_addr + align_offset) & ~align_offset; + *virt_addr = (void *)mem_virt_alloc + phys_align - *pphys_addr; + *pphys_addr_hi = upper_32_bits(phys_align); + *pphys_addr_lo = lower_32_bits(phys_align); + return 0; +} + +/** + * pm8001_find_ha_by_dev - from domain device which come from sas layer to + * find out our hba struct. + * @dev: the domain device which from sas layer. + */ +static +struct pm8001_hba_info *pm8001_find_ha_by_dev(struct domain_device *dev) +{ + struct sas_ha_struct *sha = dev->port->ha; + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + return pm8001_ha; +} + +/** + * pm8001_phy_control - this function should be registered to + * sas_domain_function_template to provide libsas used, note: this is just + * control the HBA phy rather than other expander phy if you want control + * other phy, you should use SMP command. + * @sas_phy: which phy in HBA phys. + * @func: the operation. + * @funcdata: always NULL. + */ +int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, + void *funcdata) +{ + int rc = 0, phy_id = sas_phy->id; + struct pm8001_hba_info *pm8001_ha = NULL; + struct sas_phy_linkrates *rates; + struct pm8001_phy *phy; + DECLARE_COMPLETION_ONSTACK(completion); + unsigned long flags; + pm8001_ha = sas_phy->ha->lldd_ha; + phy = &pm8001_ha->phy[phy_id]; + pm8001_ha->phy[phy_id].enable_completion = &completion; + + if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) { + /* + * If the controller is in fatal error state, + * we will not get a response from the controller + */ + pm8001_dbg(pm8001_ha, FAIL, + "Phy control failed due to fatal errors\n"); + return -EFAULT; + } + + switch (func) { + case PHY_FUNC_SET_LINK_RATE: + rates = funcdata; + if (rates->minimum_linkrate) { + pm8001_ha->phy[phy_id].minimum_linkrate = + rates->minimum_linkrate; + } + if (rates->maximum_linkrate) { + pm8001_ha->phy[phy_id].maximum_linkrate = + rates->maximum_linkrate; + } + if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) { + PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); + wait_for_completion(&completion); + } + PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, + PHY_LINK_RESET); + break; + case PHY_FUNC_HARD_RESET: + if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) { + PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); + wait_for_completion(&completion); + } + PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, + PHY_HARD_RESET); + break; + case PHY_FUNC_LINK_RESET: + if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) { + PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); + wait_for_completion(&completion); + } + PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, + PHY_LINK_RESET); + break; + case PHY_FUNC_RELEASE_SPINUP_HOLD: + PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, + PHY_LINK_RESET); + break; + case PHY_FUNC_DISABLE: + if (pm8001_ha->chip_id != chip_8001) { + if (pm8001_ha->phy[phy_id].phy_state == + PHY_STATE_LINK_UP_SPCV) { + sas_phy_disconnected(&phy->sas_phy); + sas_notify_phy_event(&phy->sas_phy, + PHYE_LOSS_OF_SIGNAL, GFP_KERNEL); + phy->phy_attached = 0; + } + } else { + if (pm8001_ha->phy[phy_id].phy_state == + PHY_STATE_LINK_UP_SPC) { + sas_phy_disconnected(&phy->sas_phy); + sas_notify_phy_event(&phy->sas_phy, + PHYE_LOSS_OF_SIGNAL, GFP_KERNEL); + phy->phy_attached = 0; + } + } + PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id); + break; + case PHY_FUNC_GET_EVENTS: + spin_lock_irqsave(&pm8001_ha->lock, flags); + if (pm8001_ha->chip_id == chip_8001) { + if (-1 == pm8001_bar4_shift(pm8001_ha, + (phy_id < 4) ? 0x30000 : 0x40000)) { + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + return -EINVAL; + } + } + { + struct sas_phy *phy = sas_phy->phy; + u32 __iomem *qp = pm8001_ha->io_mem[2].memvirtaddr + + 0x1034 + (0x4000 * (phy_id & 3)); + + phy->invalid_dword_count = readl(qp); + phy->running_disparity_error_count = readl(&qp[1]); + phy->loss_of_dword_sync_count = readl(&qp[3]); + phy->phy_reset_problem_count = readl(&qp[4]); + } + if (pm8001_ha->chip_id == chip_8001) + pm8001_bar4_shift(pm8001_ha, 0); + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + return 0; + default: + pm8001_dbg(pm8001_ha, DEVIO, "func 0x%x\n", func); + rc = -EOPNOTSUPP; + } + msleep(300); + return rc; +} + +/** + * pm8001_scan_start - we should enable all HBA phys by sending the phy_start + * command to HBA. + * @shost: the scsi host data. + */ +void pm8001_scan_start(struct Scsi_Host *shost) +{ + int i; + struct pm8001_hba_info *pm8001_ha; + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + DECLARE_COMPLETION_ONSTACK(completion); + pm8001_ha = sha->lldd_ha; + /* SAS_RE_INITIALIZATION not available in SPCv/ve */ + if (pm8001_ha->chip_id == chip_8001) + PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha); + for (i = 0; i < pm8001_ha->chip->n_phy; ++i) { + pm8001_ha->phy[i].enable_completion = &completion; + PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i); + wait_for_completion(&completion); + msleep(300); + } +} + +int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time) +{ + struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); + + /* give the phy enabling interrupt event time to come in (1s + * is empirically about all it takes) */ + if (time < HZ) + return 0; + /* Wait for discovery to finish */ + sas_drain_work(ha); + return 1; +} + +/** + * pm8001_task_prep_smp - the dispatcher function, prepare data for smp task + * @pm8001_ha: our hba card information + * @ccb: the ccb which attached to smp task + */ +static int pm8001_task_prep_smp(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + return PM8001_CHIP_DISP->smp_req(pm8001_ha, ccb); +} + +u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag) +{ + struct ata_queued_cmd *qc = task->uldd_task; + + if (qc && ata_is_ncq(qc->tf.protocol)) { + *tag = qc->tag; + return 1; + } + + return 0; +} + +/** + * pm8001_task_prep_ata - the dispatcher function, prepare data for sata task + * @pm8001_ha: our hba card information + * @ccb: the ccb which attached to sata task + */ +static int pm8001_task_prep_ata(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + return PM8001_CHIP_DISP->sata_req(pm8001_ha, ccb); +} + +/** + * pm8001_task_prep_internal_abort - the dispatcher function, prepare data + * for internal abort task + * @pm8001_ha: our hba card information + * @ccb: the ccb which attached to sata task + */ +static int pm8001_task_prep_internal_abort(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + return PM8001_CHIP_DISP->task_abort(pm8001_ha, ccb); +} + +/** + * pm8001_task_prep_ssp_tm - the dispatcher function, prepare task management data + * @pm8001_ha: our hba card information + * @ccb: the ccb which attached to TM + * @tmf: the task management IU + */ +static int pm8001_task_prep_ssp_tm(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb, struct sas_tmf_task *tmf) +{ + return PM8001_CHIP_DISP->ssp_tm_req(pm8001_ha, ccb, tmf); +} + +/** + * pm8001_task_prep_ssp - the dispatcher function, prepare ssp data for ssp task + * @pm8001_ha: our hba card information + * @ccb: the ccb which attached to ssp task + */ +static int pm8001_task_prep_ssp(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + return PM8001_CHIP_DISP->ssp_io_req(pm8001_ha, ccb); +} + + /* Find the local port id that's attached to this device */ +static int sas_find_local_port_id(struct domain_device *dev) +{ + struct domain_device *pdev = dev->parent; + + /* Directly attached device */ + if (!pdev) + return dev->port->id; + while (pdev) { + struct domain_device *pdev_p = pdev->parent; + if (!pdev_p) + return pdev->port->id; + pdev = pdev->parent; + } + return 0; +} + +#define DEV_IS_GONE(pm8001_dev) \ + ((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))) + + +static int pm8001_deliver_command(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + struct sas_task *task = ccb->task; + enum sas_protocol task_proto = task->task_proto; + struct sas_tmf_task *tmf = task->tmf; + int is_tmf = !!tmf; + + switch (task_proto) { + case SAS_PROTOCOL_SMP: + return pm8001_task_prep_smp(pm8001_ha, ccb); + case SAS_PROTOCOL_SSP: + if (is_tmf) + return pm8001_task_prep_ssp_tm(pm8001_ha, ccb, tmf); + return pm8001_task_prep_ssp(pm8001_ha, ccb); + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + return pm8001_task_prep_ata(pm8001_ha, ccb); + case SAS_PROTOCOL_INTERNAL_ABORT: + return pm8001_task_prep_internal_abort(pm8001_ha, ccb); + default: + dev_err(pm8001_ha->dev, "unknown sas_task proto: 0x%x\n", + task_proto); + } + + return -EINVAL; +} + +/** + * pm8001_queue_command - register for upper layer used, all IO commands sent + * to HBA are from this interface. + * @task: the task to be execute. + * @gfp_flags: gfp_flags + */ +int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags) +{ + struct task_status_struct *ts = &task->task_status; + enum sas_protocol task_proto = task->task_proto; + struct domain_device *dev = task->dev; + struct pm8001_device *pm8001_dev = dev->lldd_dev; + bool internal_abort = sas_is_internal_abort(task); + struct pm8001_hba_info *pm8001_ha; + struct pm8001_port *port = NULL; + struct pm8001_ccb_info *ccb; + unsigned long flags; + u32 n_elem = 0; + int rc = 0; + + if (!internal_abort && !dev->port) { + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PHY_DOWN; + if (dev->dev_type != SAS_SATA_DEV) + task->task_done(task); + return 0; + } + + pm8001_ha = pm8001_find_ha_by_dev(dev); + if (pm8001_ha->controller_fatal_error) { + ts->resp = SAS_TASK_UNDELIVERED; + task->task_done(task); + return 0; + } + + pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec device\n"); + + spin_lock_irqsave(&pm8001_ha->lock, flags); + + pm8001_dev = dev->lldd_dev; + port = &pm8001_ha->port[sas_find_local_port_id(dev)]; + + if (!internal_abort && + (DEV_IS_GONE(pm8001_dev) || !port->port_attached)) { + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PHY_DOWN; + if (sas_protocol_ata(task_proto)) { + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + task->task_done(task); + spin_lock_irqsave(&pm8001_ha->lock, flags); + } else { + task->task_done(task); + } + rc = -ENODEV; + goto err_out; + } + + ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_dev, task); + if (!ccb) { + rc = -SAS_QUEUE_FULL; + goto err_out; + } + + if (!sas_protocol_ata(task_proto)) { + if (task->num_scatter) { + n_elem = dma_map_sg(pm8001_ha->dev, task->scatter, + task->num_scatter, task->data_dir); + if (!n_elem) { + rc = -ENOMEM; + goto err_out_ccb; + } + } + } else { + n_elem = task->num_scatter; + } + + task->lldd_task = ccb; + ccb->n_elem = n_elem; + + atomic_inc(&pm8001_dev->running_req); + + rc = pm8001_deliver_command(pm8001_ha, ccb); + if (rc) { + atomic_dec(&pm8001_dev->running_req); + if (!sas_protocol_ata(task_proto) && n_elem) + dma_unmap_sg(pm8001_ha->dev, task->scatter, + task->num_scatter, task->data_dir); +err_out_ccb: + pm8001_ccb_free(pm8001_ha, ccb); + +err_out: + pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec failed[%d]!\n", rc); + } + + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + + return rc; +} + +/** + * pm8001_ccb_task_free - free the sg for ssp and smp command, free the ccb. + * @pm8001_ha: our hba card information + * @ccb: the ccb which attached to ssp task to free + */ +void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + struct sas_task *task = ccb->task; + struct ata_queued_cmd *qc; + struct pm8001_device *pm8001_dev; + + if (!task) + return; + + if (!sas_protocol_ata(task->task_proto) && ccb->n_elem) + dma_unmap_sg(pm8001_ha->dev, task->scatter, + task->num_scatter, task->data_dir); + + switch (task->task_proto) { + case SAS_PROTOCOL_SMP: + dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_resp, 1, + DMA_FROM_DEVICE); + dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_req, 1, + DMA_TO_DEVICE); + break; + + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SSP: + default: + /* do nothing */ + break; + } + + if (sas_protocol_ata(task->task_proto)) { + /* For SCSI/ATA commands uldd_task points to ata_queued_cmd */ + qc = task->uldd_task; + pm8001_dev = ccb->device; + trace_pm80xx_request_complete(pm8001_ha->id, + pm8001_dev ? pm8001_dev->attached_phy : PM8001_MAX_PHYS, + ccb->ccb_tag, 0 /* ctlr_opcode not known */, + qc ? qc->tf.command : 0, // ata opcode + pm8001_dev ? atomic_read(&pm8001_dev->running_req) : -1); + } + + task->lldd_task = NULL; + pm8001_ccb_free(pm8001_ha, ccb); +} + +/** + * pm8001_alloc_dev - find a empty pm8001_device + * @pm8001_ha: our hba card information + */ +static struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha) +{ + u32 dev; + for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) { + if (pm8001_ha->devices[dev].dev_type == SAS_PHY_UNUSED) { + pm8001_ha->devices[dev].id = dev; + return &pm8001_ha->devices[dev]; + } + } + if (dev == PM8001_MAX_DEVICES) { + pm8001_dbg(pm8001_ha, FAIL, + "max support %d devices, ignore ..\n", + PM8001_MAX_DEVICES); + } + return NULL; +} +/** + * pm8001_find_dev - find a matching pm8001_device + * @pm8001_ha: our hba card information + * @device_id: device ID to match against + */ +struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha, + u32 device_id) +{ + u32 dev; + for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) { + if (pm8001_ha->devices[dev].device_id == device_id) + return &pm8001_ha->devices[dev]; + } + if (dev == PM8001_MAX_DEVICES) { + pm8001_dbg(pm8001_ha, FAIL, "NO MATCHING DEVICE FOUND !!!\n"); + } + return NULL; +} + +void pm8001_free_dev(struct pm8001_device *pm8001_dev) +{ + u32 id = pm8001_dev->id; + memset(pm8001_dev, 0, sizeof(*pm8001_dev)); + pm8001_dev->id = id; + pm8001_dev->dev_type = SAS_PHY_UNUSED; + pm8001_dev->device_id = PM8001_MAX_DEVICES; + pm8001_dev->sas_device = NULL; +} + +/** + * pm8001_dev_found_notify - libsas notify a device is found. + * @dev: the device structure which sas layer used. + * + * when libsas find a sas domain device, it should tell the LLDD that + * device is found, and then LLDD register this device to HBA firmware + * by the command "OPC_INB_REG_DEV", after that the HBA will assign a + * device ID(according to device's sas address) and returned it to LLDD. From + * now on, we communicate with HBA FW with the device ID which HBA assigned + * rather than sas address. it is the necessary step for our HBA but it is + * the optional for other HBA driver. + */ +static int pm8001_dev_found_notify(struct domain_device *dev) +{ + unsigned long flags = 0; + int res = 0; + struct pm8001_hba_info *pm8001_ha = NULL; + struct domain_device *parent_dev = dev->parent; + struct pm8001_device *pm8001_device; + DECLARE_COMPLETION_ONSTACK(completion); + u32 flag = 0; + pm8001_ha = pm8001_find_ha_by_dev(dev); + spin_lock_irqsave(&pm8001_ha->lock, flags); + + pm8001_device = pm8001_alloc_dev(pm8001_ha); + if (!pm8001_device) { + res = -1; + goto found_out; + } + pm8001_device->sas_device = dev; + dev->lldd_dev = pm8001_device; + pm8001_device->dev_type = dev->dev_type; + pm8001_device->dcompletion = &completion; + if (parent_dev && dev_is_expander(parent_dev->dev_type)) { + int phy_id; + + phy_id = sas_find_attached_phy_id(&parent_dev->ex_dev, dev); + if (phy_id < 0) { + pm8001_dbg(pm8001_ha, FAIL, + "Error: no attached dev:%016llx at ex:%016llx.\n", + SAS_ADDR(dev->sas_addr), + SAS_ADDR(parent_dev->sas_addr)); + res = phy_id; + } else { + pm8001_device->attached_phy = phy_id; + } + } else { + if (dev->dev_type == SAS_SATA_DEV) { + pm8001_device->attached_phy = + dev->rphy->identify.phy_identifier; + flag = 1; /* directly sata */ + } + } /*register this device to HBA*/ + pm8001_dbg(pm8001_ha, DISC, "Found device\n"); + PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag); + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + wait_for_completion(&completion); + if (dev->dev_type == SAS_END_DEVICE) + msleep(50); + pm8001_ha->flags = PM8001F_RUN_TIME; + return 0; +found_out: + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + return res; +} + +int pm8001_dev_found(struct domain_device *dev) +{ + return pm8001_dev_found_notify(dev); +} + +#define PM8001_TASK_TIMEOUT 20 + +/** + * pm8001_dev_gone_notify - see the comments for "pm8001_dev_found_notify" + * @dev: the device structure which sas layer used. + */ +static void pm8001_dev_gone_notify(struct domain_device *dev) +{ + unsigned long flags = 0; + struct pm8001_hba_info *pm8001_ha; + struct pm8001_device *pm8001_dev = dev->lldd_dev; + + pm8001_ha = pm8001_find_ha_by_dev(dev); + spin_lock_irqsave(&pm8001_ha->lock, flags); + if (pm8001_dev) { + u32 device_id = pm8001_dev->device_id; + + pm8001_dbg(pm8001_ha, DISC, "found dev[%d:%x] is gone.\n", + pm8001_dev->device_id, pm8001_dev->dev_type); + if (atomic_read(&pm8001_dev->running_req)) { + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + sas_execute_internal_abort_dev(dev, 0, NULL); + while (atomic_read(&pm8001_dev->running_req)) + msleep(20); + spin_lock_irqsave(&pm8001_ha->lock, flags); + } + PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id); + pm8001_free_dev(pm8001_dev); + } else { + pm8001_dbg(pm8001_ha, DISC, "Found dev has gone.\n"); + } + dev->lldd_dev = NULL; + spin_unlock_irqrestore(&pm8001_ha->lock, flags); +} + +void pm8001_dev_gone(struct domain_device *dev) +{ + pm8001_dev_gone_notify(dev); +} + +/* retry commands by ha, by task and/or by device */ +void pm8001_open_reject_retry( + struct pm8001_hba_info *pm8001_ha, + struct sas_task *task_to_close, + struct pm8001_device *device_to_close) +{ + int i; + unsigned long flags; + + if (pm8001_ha == NULL) + return; + + spin_lock_irqsave(&pm8001_ha->lock, flags); + + for (i = 0; i < PM8001_MAX_CCB; i++) { + struct sas_task *task; + struct task_status_struct *ts; + struct pm8001_device *pm8001_dev; + unsigned long flags1; + struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i]; + + if (ccb->ccb_tag == PM8001_INVALID_TAG) + continue; + + pm8001_dev = ccb->device; + if (!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)) + continue; + if (!device_to_close) { + uintptr_t d = (uintptr_t)pm8001_dev + - (uintptr_t)&pm8001_ha->devices; + if (((d % sizeof(*pm8001_dev)) != 0) + || ((d / sizeof(*pm8001_dev)) >= PM8001_MAX_DEVICES)) + continue; + } else if (pm8001_dev != device_to_close) + continue; + task = ccb->task; + if (!task || !task->task_done) + continue; + if (task_to_close && (task != task_to_close)) + continue; + ts = &task->task_status; + ts->resp = SAS_TASK_COMPLETE; + /* Force the midlayer to retry */ + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + spin_lock_irqsave(&task->task_state_lock, flags1); + task->task_state_flags &= ~SAS_TASK_STATE_PENDING; + task->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((task->task_state_flags + & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&task->task_state_lock, + flags1); + pm8001_ccb_task_free(pm8001_ha, ccb); + } else { + spin_unlock_irqrestore(&task->task_state_lock, + flags1); + pm8001_ccb_task_free(pm8001_ha, ccb); + mb();/* in order to force CPU ordering */ + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + task->task_done(task); + spin_lock_irqsave(&pm8001_ha->lock, flags); + } + } + + spin_unlock_irqrestore(&pm8001_ha->lock, flags); +} + +/** + * pm8001_I_T_nexus_reset() - reset the initiator/target connection + * @dev: the device structure for the device to reset. + * + * Standard mandates link reset for ATA (type 0) and hard reset for + * SSP (type 1), only for RECOVERY + */ +int pm8001_I_T_nexus_reset(struct domain_device *dev) +{ + int rc = TMF_RESP_FUNC_FAILED; + struct pm8001_device *pm8001_dev; + struct pm8001_hba_info *pm8001_ha; + struct sas_phy *phy; + + if (!dev || !dev->lldd_dev) + return -ENODEV; + + pm8001_dev = dev->lldd_dev; + pm8001_ha = pm8001_find_ha_by_dev(dev); + phy = sas_get_local_phy(dev); + + if (dev_is_sata(dev)) { + if (scsi_is_sas_phy_local(phy)) { + rc = 0; + goto out; + } + rc = sas_phy_reset(phy, 1); + if (rc) { + pm8001_dbg(pm8001_ha, EH, + "phy reset failed for device %x\n" + "with rc %d\n", pm8001_dev->device_id, rc); + rc = TMF_RESP_FUNC_FAILED; + goto out; + } + msleep(2000); + rc = sas_execute_internal_abort_dev(dev, 0, NULL); + if (rc) { + pm8001_dbg(pm8001_ha, EH, "task abort failed %x\n" + "with rc %d\n", pm8001_dev->device_id, rc); + rc = TMF_RESP_FUNC_FAILED; + } + } else { + rc = sas_phy_reset(phy, 1); + msleep(2000); + } + pm8001_dbg(pm8001_ha, EH, " for device[%x]:rc=%d\n", + pm8001_dev->device_id, rc); + out: + sas_put_local_phy(phy); + return rc; +} + +/* +* This function handle the IT_NEXUS_XXX event or completion +* status code for SSP/SATA/SMP I/O request. +*/ +int pm8001_I_T_nexus_event_handler(struct domain_device *dev) +{ + int rc = TMF_RESP_FUNC_FAILED; + struct pm8001_device *pm8001_dev; + struct pm8001_hba_info *pm8001_ha; + struct sas_phy *phy; + + if (!dev || !dev->lldd_dev) + return -1; + + pm8001_dev = dev->lldd_dev; + pm8001_ha = pm8001_find_ha_by_dev(dev); + + pm8001_dbg(pm8001_ha, EH, "I_T_Nexus handler invoked !!\n"); + + phy = sas_get_local_phy(dev); + + if (dev_is_sata(dev)) { + DECLARE_COMPLETION_ONSTACK(completion_setstate); + if (scsi_is_sas_phy_local(phy)) { + rc = 0; + goto out; + } + /* send internal ssp/sata/smp abort command to FW */ + sas_execute_internal_abort_dev(dev, 0, NULL); + msleep(100); + + /* deregister the target device */ + pm8001_dev_gone_notify(dev); + msleep(200); + + /*send phy reset to hard reset target */ + rc = sas_phy_reset(phy, 1); + msleep(2000); + pm8001_dev->setds_completion = &completion_setstate; + + wait_for_completion(&completion_setstate); + } else { + /* send internal ssp/sata/smp abort command to FW */ + sas_execute_internal_abort_dev(dev, 0, NULL); + msleep(100); + + /* deregister the target device */ + pm8001_dev_gone_notify(dev); + msleep(200); + + /*send phy reset to hard reset target */ + rc = sas_phy_reset(phy, 1); + msleep(2000); + } + pm8001_dbg(pm8001_ha, EH, " for device[%x]:rc=%d\n", + pm8001_dev->device_id, rc); +out: + sas_put_local_phy(phy); + + return rc; +} +/* mandatory SAM-3, the task reset the specified LUN*/ +int pm8001_lu_reset(struct domain_device *dev, u8 *lun) +{ + int rc = TMF_RESP_FUNC_FAILED; + struct pm8001_device *pm8001_dev = dev->lldd_dev; + struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); + DECLARE_COMPLETION_ONSTACK(completion_setstate); + + if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) { + /* + * If the controller is in fatal error state, + * we will not get a response from the controller + */ + pm8001_dbg(pm8001_ha, FAIL, + "LUN reset failed due to fatal errors\n"); + return rc; + } + + if (dev_is_sata(dev)) { + struct sas_phy *phy = sas_get_local_phy(dev); + sas_execute_internal_abort_dev(dev, 0, NULL); + rc = sas_phy_reset(phy, 1); + sas_put_local_phy(phy); + pm8001_dev->setds_completion = &completion_setstate; + rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, + pm8001_dev, DS_OPERATIONAL); + wait_for_completion(&completion_setstate); + } else { + rc = sas_lu_reset(dev, lun); + } + /* If failed, fall-through I_T_Nexus reset */ + pm8001_dbg(pm8001_ha, EH, "for device[%x]:rc=%d\n", + pm8001_dev->device_id, rc); + return rc; +} + +/* optional SAM-3 */ +int pm8001_query_task(struct sas_task *task) +{ + u32 tag = 0xdeadbeef; + int rc = TMF_RESP_FUNC_FAILED; + if (unlikely(!task || !task->lldd_task || !task->dev)) + return rc; + + if (task->task_proto & SAS_PROTOCOL_SSP) { + struct scsi_cmnd *cmnd = task->uldd_task; + struct domain_device *dev = task->dev; + struct pm8001_hba_info *pm8001_ha = + pm8001_find_ha_by_dev(dev); + + rc = pm8001_find_tag(task, &tag); + if (rc == 0) { + rc = TMF_RESP_FUNC_FAILED; + return rc; + } + pm8001_dbg(pm8001_ha, EH, "Query:[%16ph]\n", cmnd->cmnd); + + rc = sas_query_task(task, tag); + switch (rc) { + /* The task is still in Lun, release it then */ + case TMF_RESP_FUNC_SUCC: + pm8001_dbg(pm8001_ha, EH, + "The task is still in Lun\n"); + break; + /* The task is not in Lun or failed, reset the phy */ + case TMF_RESP_FUNC_FAILED: + case TMF_RESP_FUNC_COMPLETE: + pm8001_dbg(pm8001_ha, EH, + "The task is not in Lun or failed, reset the phy\n"); + break; + } + } + pr_err("pm80xx: rc= %d\n", rc); + return rc; +} + +/* mandatory SAM-3, still need free task/ccb info, abort the specified task */ +int pm8001_abort_task(struct sas_task *task) +{ + struct pm8001_ccb_info *ccb = task->lldd_task; + unsigned long flags; + u32 tag; + struct domain_device *dev ; + struct pm8001_hba_info *pm8001_ha; + struct pm8001_device *pm8001_dev; + int rc = TMF_RESP_FUNC_FAILED, ret; + u32 phy_id, port_id; + struct sas_task_slow slow_task; + + if (!task->lldd_task || !task->dev) + return TMF_RESP_FUNC_FAILED; + + dev = task->dev; + pm8001_dev = dev->lldd_dev; + pm8001_ha = pm8001_find_ha_by_dev(dev); + phy_id = pm8001_dev->attached_phy; + + if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) { + // If the controller is seeing fatal errors + // abort task will not get a response from the controller + return TMF_RESP_FUNC_FAILED; + } + + ret = pm8001_find_tag(task, &tag); + if (ret == 0) { + pm8001_info(pm8001_ha, "no tag for task:%p\n", task); + return TMF_RESP_FUNC_FAILED; + } + spin_lock_irqsave(&task->task_state_lock, flags); + if (task->task_state_flags & SAS_TASK_STATE_DONE) { + spin_unlock_irqrestore(&task->task_state_lock, flags); + return TMF_RESP_FUNC_COMPLETE; + } + task->task_state_flags |= SAS_TASK_STATE_ABORTED; + if (task->slow_task == NULL) { + init_completion(&slow_task.completion); + task->slow_task = &slow_task; + } + spin_unlock_irqrestore(&task->task_state_lock, flags); + if (task->task_proto & SAS_PROTOCOL_SSP) { + rc = sas_abort_task(task, tag); + sas_execute_internal_abort_single(dev, tag, 0, NULL); + } else if (task->task_proto & SAS_PROTOCOL_SATA || + task->task_proto & SAS_PROTOCOL_STP) { + if (pm8001_ha->chip_id == chip_8006) { + DECLARE_COMPLETION_ONSTACK(completion_reset); + DECLARE_COMPLETION_ONSTACK(completion); + struct pm8001_phy *phy = pm8001_ha->phy + phy_id; + port_id = phy->port->port_id; + + /* 1. Set Device state as Recovery */ + pm8001_dev->setds_completion = &completion; + PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, + pm8001_dev, DS_IN_RECOVERY); + wait_for_completion(&completion); + + /* 2. Send Phy Control Hard Reset */ + reinit_completion(&completion); + phy->port_reset_status = PORT_RESET_TMO; + phy->reset_success = false; + phy->enable_completion = &completion; + phy->reset_completion = &completion_reset; + ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, + PHY_HARD_RESET); + if (ret) { + phy->enable_completion = NULL; + phy->reset_completion = NULL; + goto out; + } + + /* In the case of the reset timeout/fail we still + * abort the command at the firmware. The assumption + * here is that the drive is off doing something so + * that it's not processing requests, and we want to + * avoid getting a completion for this and either + * leaking the task in libsas or losing the race and + * getting a double free. + */ + pm8001_dbg(pm8001_ha, MSG, + "Waiting for local phy ctl\n"); + ret = wait_for_completion_timeout(&completion, + PM8001_TASK_TIMEOUT * HZ); + if (!ret || !phy->reset_success) { + phy->enable_completion = NULL; + phy->reset_completion = NULL; + } else { + /* 3. Wait for Port Reset complete or + * Port reset TMO + */ + pm8001_dbg(pm8001_ha, MSG, + "Waiting for Port reset\n"); + ret = wait_for_completion_timeout( + &completion_reset, + PM8001_TASK_TIMEOUT * HZ); + if (!ret) + phy->reset_completion = NULL; + WARN_ON(phy->port_reset_status == + PORT_RESET_TMO); + if (phy->port_reset_status == PORT_RESET_TMO) { + pm8001_dev_gone_notify(dev); + PM8001_CHIP_DISP->hw_event_ack_req( + pm8001_ha, 0, + 0x07, /*HW_EVENT_PHY_DOWN ack*/ + port_id, phy_id, 0, 0); + goto out; + } + } + + /* + * 4. SATA Abort ALL + * we wait for the task to be aborted so that the task + * is removed from the ccb. on success the caller is + * going to free the task. + */ + ret = sas_execute_internal_abort_dev(dev, 0, NULL); + if (ret) + goto out; + ret = wait_for_completion_timeout( + &task->slow_task->completion, + PM8001_TASK_TIMEOUT * HZ); + if (!ret) + goto out; + + /* 5. Set Device State as Operational */ + reinit_completion(&completion); + pm8001_dev->setds_completion = &completion; + PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, + pm8001_dev, DS_OPERATIONAL); + wait_for_completion(&completion); + } else { + /* + * Ensure that if we see a completion for the ccb + * associated with the task which we are trying to + * abort then we should not touch the sas_task as it + * may race with libsas freeing it when return here. + */ + ccb->task = NULL; + ret = sas_execute_internal_abort_single(dev, tag, 0, NULL); + } + rc = TMF_RESP_FUNC_COMPLETE; + } else if (task->task_proto & SAS_PROTOCOL_SMP) { + /* SMP */ + rc = sas_execute_internal_abort_single(dev, tag, 0, NULL); + + } +out: + spin_lock_irqsave(&task->task_state_lock, flags); + if (task->slow_task == &slow_task) + task->slow_task = NULL; + spin_unlock_irqrestore(&task->task_state_lock, flags); + if (rc != TMF_RESP_FUNC_COMPLETE) + pm8001_info(pm8001_ha, "rc= %d\n", rc); + return rc; +} + +int pm8001_clear_task_set(struct domain_device *dev, u8 *lun) +{ + struct pm8001_device *pm8001_dev = dev->lldd_dev; + struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); + + pm8001_dbg(pm8001_ha, EH, "I_T_L_Q clear task set[%x]\n", + pm8001_dev->device_id); + return sas_clear_task_set(dev, lun); +} + +void pm8001_port_formed(struct asd_sas_phy *sas_phy) +{ + struct sas_ha_struct *sas_ha = sas_phy->ha; + struct pm8001_hba_info *pm8001_ha = sas_ha->lldd_ha; + struct pm8001_phy *phy = sas_phy->lldd_phy; + struct asd_sas_port *sas_port = sas_phy->port; + struct pm8001_port *port = phy->port; + + if (!sas_port) { + pm8001_dbg(pm8001_ha, FAIL, "Received null port\n"); + return; + } + sas_port->lldd_port = port; +} + +void pm8001_setds_completion(struct domain_device *dev) +{ + struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); + struct pm8001_device *pm8001_dev = dev->lldd_dev; + DECLARE_COMPLETION_ONSTACK(completion_setstate); + + if (pm8001_ha->chip_id != chip_8001) { + pm8001_dev->setds_completion = &completion_setstate; + PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, + pm8001_dev, DS_OPERATIONAL); + wait_for_completion(&completion_setstate); + } +} + +void pm8001_tmf_aborted(struct sas_task *task) +{ + struct pm8001_ccb_info *ccb = task->lldd_task; + + if (ccb) + ccb->task = NULL; +} diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h new file mode 100644 index 0000000000..2fadd353f1 --- /dev/null +++ b/drivers/scsi/pm8001/pm8001_sas.h @@ -0,0 +1,794 @@ +/* + * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 USI Co., Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ + +#ifndef _PM8001_SAS_H_ +#define _PM8001_SAS_H_ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/spinlock.h> +#include <linux/delay.h> +#include <linux/types.h> +#include <linux/ctype.h> +#include <linux/dma-mapping.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/workqueue.h> +#include <scsi/libsas.h> +#include <scsi/scsi_tcq.h> +#include <scsi/sas_ata.h> +#include <linux/atomic.h> +#include <linux/blk-mq.h> +#include <linux/blk-mq-pci.h> +#include "pm8001_defs.h" + +#define DRV_NAME "pm80xx" +#define DRV_VERSION "0.1.40" +#define PM8001_FAIL_LOGGING 0x01 /* Error message logging */ +#define PM8001_INIT_LOGGING 0x02 /* driver init logging */ +#define PM8001_DISC_LOGGING 0x04 /* discovery layer logging */ +#define PM8001_IO_LOGGING 0x08 /* I/O path logging */ +#define PM8001_EH_LOGGING 0x10 /* libsas EH function logging*/ +#define PM8001_IOCTL_LOGGING 0x20 /* IOCTL message logging */ +#define PM8001_MSG_LOGGING 0x40 /* misc message logging */ +#define PM8001_DEV_LOGGING 0x80 /* development message logging */ +#define PM8001_DEVIO_LOGGING 0x100 /* development io message logging */ +#define PM8001_IOERR_LOGGING 0x200 /* development io err message logging */ +#define PM8001_EVENT_LOGGING 0x400 /* HW event logging */ + +#define pm8001_info(HBA, fmt, ...) \ + pr_info("%s:: %s %d: " fmt, \ + (HBA)->name, __func__, __LINE__, ##__VA_ARGS__) + +#define pm8001_dbg(HBA, level, fmt, ...) \ +do { \ + if (unlikely((HBA)->logging_level & PM8001_##level##_LOGGING)) \ + pm8001_info(HBA, fmt, ##__VA_ARGS__); \ +} while (0) + +#define PM8001_USE_TASKLET +#define PM8001_USE_MSIX +#define PM8001_READ_VPD + + +#define IS_SPCV_12G(dev) ((dev->device == 0X8074) \ + || (dev->device == 0X8076) \ + || (dev->device == 0X8077) \ + || (dev->device == 0X8070) \ + || (dev->device == 0X8072)) + +#define PM8001_NAME_LENGTH 32/* generic length of strings */ +extern struct list_head hba_list; +extern const struct pm8001_dispatch pm8001_8001_dispatch; +extern const struct pm8001_dispatch pm8001_80xx_dispatch; + +struct pm8001_hba_info; +struct pm8001_ccb_info; +struct pm8001_device; + +struct pm8001_ioctl_payload { + u32 signature; + u16 major_function; + u16 minor_function; + u16 status; + u16 offset; + u16 id; + u32 wr_length; + u32 rd_length; + u8 *func_specific; +}; + +#define MPI_FATAL_ERROR_TABLE_OFFSET_MASK 0xFFFFFF +#define MPI_FATAL_ERROR_TABLE_SIZE(value) ((0xFF000000 & value) >> SHIFT24) +#define MPI_FATAL_EDUMP_TABLE_LO_OFFSET 0x00 /* HNFBUFL */ +#define MPI_FATAL_EDUMP_TABLE_HI_OFFSET 0x04 /* HNFBUFH */ +#define MPI_FATAL_EDUMP_TABLE_LENGTH 0x08 /* HNFBLEN */ +#define MPI_FATAL_EDUMP_TABLE_HANDSHAKE 0x0C /* FDDHSHK */ +#define MPI_FATAL_EDUMP_TABLE_STATUS 0x10 /* FDDTSTAT */ +#define MPI_FATAL_EDUMP_TABLE_ACCUM_LEN 0x14 /* ACCDDLEN */ +#define MPI_FATAL_EDUMP_TABLE_TOTAL_LEN 0x18 /* TOTALLEN */ +#define MPI_FATAL_EDUMP_TABLE_SIGNATURE 0x1C /* SIGNITURE */ +#define MPI_FATAL_EDUMP_HANDSHAKE_RDY 0x1 +#define MPI_FATAL_EDUMP_HANDSHAKE_BUSY 0x0 +#define MPI_FATAL_EDUMP_TABLE_STAT_RSVD 0x0 +#define MPI_FATAL_EDUMP_TABLE_STAT_DMA_FAILED 0x1 +#define MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_MORE_DATA 0x2 +#define MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE 0x3 +#define TYPE_GSM_SPACE 1 +#define TYPE_QUEUE 2 +#define TYPE_FATAL 3 +#define TYPE_NON_FATAL 4 +#define TYPE_INBOUND 1 +#define TYPE_OUTBOUND 2 +struct forensic_data { + u32 data_type; + union { + struct { + u32 direct_len; + u32 direct_offset; + void *direct_data; + } gsm_buf; + struct { + u16 queue_type; + u16 queue_index; + u32 direct_len; + void *direct_data; + } queue_buf; + struct { + u32 direct_len; + u32 direct_offset; + u32 read_len; + void *direct_data; + } data_buf; + }; +}; + +/* bit31-26 - mask bar */ +#define SCRATCH_PAD0_BAR_MASK 0xFC000000 +/* bit25-0 - offset mask */ +#define SCRATCH_PAD0_OFFSET_MASK 0x03FFFFFF +/* if AAP error state */ +#define SCRATCH_PAD0_AAPERR_MASK 0xFFFFFFFF +/* Inbound doorbell bit7 */ +#define SPCv_MSGU_CFG_TABLE_NONFATAL_DUMP 0x80 +/* Inbound doorbell bit7 SPCV */ +#define SPCV_MSGU_CFG_TABLE_TRANSFER_DEBUG_INFO 0x80 +#define MAIN_MERRDCTO_MERRDCES 0xA0/* DWORD 0x28) */ + +struct pm8001_dispatch { + char *name; + int (*chip_init)(struct pm8001_hba_info *pm8001_ha); + void (*chip_post_init)(struct pm8001_hba_info *pm8001_ha); + int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha); + void (*chip_rst)(struct pm8001_hba_info *pm8001_ha); + int (*chip_ioremap)(struct pm8001_hba_info *pm8001_ha); + void (*chip_iounmap)(struct pm8001_hba_info *pm8001_ha); + irqreturn_t (*isr)(struct pm8001_hba_info *pm8001_ha, u8 vec); + u32 (*is_our_interrupt)(struct pm8001_hba_info *pm8001_ha); + int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha, u8 vec); + void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha, u8 vec); + void (*interrupt_disable)(struct pm8001_hba_info *pm8001_ha, u8 vec); + void (*make_prd)(struct scatterlist *scatter, int nr, void *prd); + int (*smp_req)(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb); + int (*ssp_io_req)(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb); + int (*sata_req)(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb); + int (*phy_start_req)(struct pm8001_hba_info *pm8001_ha, u8 phy_id); + int (*phy_stop_req)(struct pm8001_hba_info *pm8001_ha, u8 phy_id); + int (*reg_dev_req)(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_dev, u32 flag); + int (*dereg_dev_req)(struct pm8001_hba_info *pm8001_ha, u32 device_id); + int (*phy_ctl_req)(struct pm8001_hba_info *pm8001_ha, + u32 phy_id, u32 phy_op); + int (*task_abort)(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb); + int (*ssp_tm_req)(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb, struct sas_tmf_task *tmf); + int (*get_nvmd_req)(struct pm8001_hba_info *pm8001_ha, void *payload); + int (*set_nvmd_req)(struct pm8001_hba_info *pm8001_ha, void *payload); + int (*fw_flash_update_req)(struct pm8001_hba_info *pm8001_ha, + void *payload); + int (*set_dev_state_req)(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_dev, u32 state); + int (*sas_diag_start_end_req)(struct pm8001_hba_info *pm8001_ha, + u32 state); + int (*sas_diag_execute_req)(struct pm8001_hba_info *pm8001_ha, + u32 state); + int (*sas_re_init_req)(struct pm8001_hba_info *pm8001_ha); + int (*fatal_errors)(struct pm8001_hba_info *pm8001_ha); + void (*hw_event_ack_req)(struct pm8001_hba_info *pm8001_ha, + u32 Qnum, u32 SEA, u32 port_id, u32 phyId, u32 param0, + u32 param1); +}; + +struct pm8001_chip_info { + u32 encrypt; + u32 n_phy; + const struct pm8001_dispatch *dispatch; +}; +#define PM8001_CHIP_DISP (pm8001_ha->chip->dispatch) + +struct pm8001_port { + struct asd_sas_port sas_port; + u8 port_attached; + u16 wide_port_phymap; + u8 port_state; + u8 port_id; + struct list_head list; +}; + +struct pm8001_phy { + struct pm8001_hba_info *pm8001_ha; + struct pm8001_port *port; + struct asd_sas_phy sas_phy; + struct sas_identify identify; + struct scsi_device *sdev; + u64 dev_sas_addr; + u32 phy_type; + struct completion *enable_completion; + u32 frame_rcvd_size; + u8 frame_rcvd[32]; + u8 phy_attached; + u8 phy_state; + enum sas_linkrate minimum_linkrate; + enum sas_linkrate maximum_linkrate; + struct completion *reset_completion; + bool port_reset_status; + bool reset_success; +}; + +/* port reset status */ +#define PORT_RESET_SUCCESS 0x00 +#define PORT_RESET_TMO 0x01 + +struct pm8001_device { + enum sas_device_type dev_type; + struct domain_device *sas_device; + u32 attached_phy; + u32 id; + struct completion *dcompletion; + struct completion *setds_completion; + u32 device_id; + atomic_t running_req; +}; + +struct pm8001_prd_imt { + __le32 len; + __le32 e; +}; + +struct pm8001_prd { + __le64 addr; /* 64-bit buffer address */ + struct pm8001_prd_imt im_len; /* 64-bit length */ +} __attribute__ ((packed)); +/* + * CCB(Command Control Block) + */ +struct pm8001_ccb_info { + struct sas_task *task; + u32 n_elem; + u32 ccb_tag; + dma_addr_t ccb_dma_handle; + struct pm8001_device *device; + struct pm8001_prd *buf_prd; + struct fw_control_ex *fw_control_context; + u8 open_retry; +}; + +struct mpi_mem { + void *virt_ptr; + dma_addr_t phys_addr; + u32 phys_addr_hi; + u32 phys_addr_lo; + u32 total_len; + u32 num_elements; + u32 element_size; + u32 alignment; +}; + +struct mpi_mem_req { + /* The number of element in the mpiMemory array */ + u32 count; + /* The array of structures that define memroy regions*/ + struct mpi_mem region[USI_MAX_MEMCNT]; +}; + +struct encrypt { + u32 cipher_mode; + u32 sec_mode; + u32 status; + u32 flag; +}; + +struct sas_phy_attribute_table { + u32 phystart1_16[16]; + u32 outbound_hw_event_pid1_16[16]; +}; + +union main_cfg_table { + struct { + u32 signature; + u32 interface_rev; + u32 firmware_rev; + u32 max_out_io; + u32 max_sgl; + u32 ctrl_cap_flag; + u32 gst_offset; + u32 inbound_queue_offset; + u32 outbound_queue_offset; + u32 inbound_q_nppd_hppd; + u32 outbound_hw_event_pid0_3; + u32 outbound_hw_event_pid4_7; + u32 outbound_ncq_event_pid0_3; + u32 outbound_ncq_event_pid4_7; + u32 outbound_tgt_ITNexus_event_pid0_3; + u32 outbound_tgt_ITNexus_event_pid4_7; + u32 outbound_tgt_ssp_event_pid0_3; + u32 outbound_tgt_ssp_event_pid4_7; + u32 outbound_tgt_smp_event_pid0_3; + u32 outbound_tgt_smp_event_pid4_7; + u32 upper_event_log_addr; + u32 lower_event_log_addr; + u32 event_log_size; + u32 event_log_option; + u32 upper_iop_event_log_addr; + u32 lower_iop_event_log_addr; + u32 iop_event_log_size; + u32 iop_event_log_option; + u32 fatal_err_interrupt; + u32 fatal_err_dump_offset0; + u32 fatal_err_dump_length0; + u32 fatal_err_dump_offset1; + u32 fatal_err_dump_length1; + u32 hda_mode_flag; + u32 anolog_setup_table_offset; + u32 rsvd[4]; + } pm8001_tbl; + + struct { + u32 signature; + u32 interface_rev; + u32 firmware_rev; + u32 max_out_io; + u32 max_sgl; + u32 ctrl_cap_flag; + u32 gst_offset; + u32 inbound_queue_offset; + u32 outbound_queue_offset; + u32 inbound_q_nppd_hppd; + u32 rsvd[8]; + u32 crc_core_dump; + u32 rsvd1; + u32 upper_event_log_addr; + u32 lower_event_log_addr; + u32 event_log_size; + u32 event_log_severity; + u32 upper_pcs_event_log_addr; + u32 lower_pcs_event_log_addr; + u32 pcs_event_log_size; + u32 pcs_event_log_severity; + u32 fatal_err_interrupt; + u32 fatal_err_dump_offset0; + u32 fatal_err_dump_length0; + u32 fatal_err_dump_offset1; + u32 fatal_err_dump_length1; + u32 gpio_led_mapping; + u32 analog_setup_table_offset; + u32 int_vec_table_offset; + u32 phy_attr_table_offset; + u32 port_recovery_timer; + u32 interrupt_reassertion_delay; + u32 fatal_n_non_fatal_dump; /* 0x28 */ + u32 ila_version; + u32 inc_fw_version; + } pm80xx_tbl; +}; + +union general_status_table { + struct { + u32 gst_len_mpistate; + u32 iq_freeze_state0; + u32 iq_freeze_state1; + u32 msgu_tcnt; + u32 iop_tcnt; + u32 rsvd; + u32 phy_state[8]; + u32 gpio_input_val; + u32 rsvd1[2]; + u32 recover_err_info[8]; + } pm8001_tbl; + struct { + u32 gst_len_mpistate; + u32 iq_freeze_state0; + u32 iq_freeze_state1; + u32 msgu_tcnt; + u32 iop_tcnt; + u32 rsvd[9]; + u32 gpio_input_val; + u32 rsvd1[2]; + u32 recover_err_info[8]; + } pm80xx_tbl; +}; +struct inbound_queue_table { + u32 element_pri_size_cnt; + u32 upper_base_addr; + u32 lower_base_addr; + u32 ci_upper_base_addr; + u32 ci_lower_base_addr; + u32 pi_pci_bar; + u32 pi_offset; + u32 total_length; + void *base_virt; + void *ci_virt; + u32 reserved; + __le32 consumer_index; + u32 producer_idx; + spinlock_t iq_lock; +}; +struct outbound_queue_table { + u32 element_size_cnt; + u32 upper_base_addr; + u32 lower_base_addr; + void *base_virt; + u32 pi_upper_base_addr; + u32 pi_lower_base_addr; + u32 ci_pci_bar; + u32 ci_offset; + u32 total_length; + void *pi_virt; + u32 interrup_vec_cnt_delay; + u32 dinterrup_to_pci_offset; + __le32 producer_index; + u32 consumer_idx; + spinlock_t oq_lock; + unsigned long lock_flags; +}; +struct pm8001_hba_memspace { + void __iomem *memvirtaddr; + u64 membase; + u32 memsize; +}; +struct isr_param { + struct pm8001_hba_info *drv_inst; + u32 irq_id; +}; +struct pm8001_hba_info { + char name[PM8001_NAME_LENGTH]; + struct list_head list; + unsigned long flags; + spinlock_t lock;/* host-wide lock */ + spinlock_t bitmap_lock; + struct pci_dev *pdev;/* our device */ + struct device *dev; + struct pm8001_hba_memspace io_mem[6]; + struct mpi_mem_req memoryMap; + struct encrypt encrypt_info; /* support encryption */ + struct forensic_data forensic_info; + u32 fatal_bar_loc; + u32 forensic_last_offset; + u32 fatal_forensic_shift_offset; + u32 forensic_fatal_step; + u32 forensic_preserved_accumulated_transfer; + u32 evtlog_ib_offset; + u32 evtlog_ob_offset; + void __iomem *msg_unit_tbl_addr;/*Message Unit Table Addr*/ + void __iomem *main_cfg_tbl_addr;/*Main Config Table Addr*/ + void __iomem *general_stat_tbl_addr;/*General Status Table Addr*/ + void __iomem *inbnd_q_tbl_addr;/*Inbound Queue Config Table Addr*/ + void __iomem *outbnd_q_tbl_addr;/*Outbound Queue Config Table Addr*/ + void __iomem *pspa_q_tbl_addr; + /*MPI SAS PHY attributes Queue Config Table Addr*/ + void __iomem *ivt_tbl_addr; /*MPI IVT Table Addr */ + void __iomem *fatal_tbl_addr; /*MPI IVT Table Addr */ + union main_cfg_table main_cfg_tbl; + union general_status_table gs_tbl; + struct inbound_queue_table inbnd_q_tbl[PM8001_MAX_INB_NUM]; + struct outbound_queue_table outbnd_q_tbl[PM8001_MAX_OUTB_NUM]; + struct sas_phy_attribute_table phy_attr_table; + /* MPI SAS PHY attributes */ + u8 sas_addr[SAS_ADDR_SIZE]; + struct sas_ha_struct *sas;/* SCSI/SAS glue */ + struct Scsi_Host *shost; + u32 chip_id; + const struct pm8001_chip_info *chip; + struct completion *nvmd_completion; + unsigned long *rsvd_tags; + struct pm8001_phy phy[PM8001_MAX_PHYS]; + struct pm8001_port port[PM8001_MAX_PHYS]; + u32 id; + u32 irq; + u32 iomb_size; /* SPC and SPCV IOMB size */ + struct pm8001_device *devices; + struct pm8001_ccb_info *ccb_info; + u32 ccb_count; +#ifdef PM8001_USE_MSIX + int number_of_intr;/*will be used in remove()*/ + char intr_drvname[PM8001_MAX_MSIX_VEC] + [PM8001_NAME_LENGTH+1+3+1]; +#endif +#ifdef PM8001_USE_TASKLET + struct tasklet_struct tasklet[PM8001_MAX_MSIX_VEC]; +#endif + u32 logging_level; + u32 link_rate; + u32 fw_status; + u32 smp_exp_mode; + bool controller_fatal_error; + const struct firmware *fw_image; + struct isr_param irq_vector[PM8001_MAX_MSIX_VEC]; + u32 non_fatal_count; + u32 non_fatal_read_length; + u32 max_q_num; + u32 ib_offset; + u32 ob_offset; + u32 ci_offset; + u32 pi_offset; + u32 max_memcnt; +}; + +struct pm8001_work { + struct work_struct work; + struct pm8001_hba_info *pm8001_ha; + void *data; + int handler; +}; + +struct pm8001_fw_image_header { + u8 vender_id[8]; + u8 product_id; + u8 hardware_rev; + u8 dest_partition; + u8 reserved; + u8 fw_rev[4]; + __be32 image_length; + __be32 image_crc; + __be32 startup_entry; +} __attribute__((packed, aligned(4))); + + +/** + * FW Flash Update status values + */ +#define FLASH_UPDATE_COMPLETE_PENDING_REBOOT 0x00 +#define FLASH_UPDATE_IN_PROGRESS 0x01 +#define FLASH_UPDATE_HDR_ERR 0x02 +#define FLASH_UPDATE_OFFSET_ERR 0x03 +#define FLASH_UPDATE_CRC_ERR 0x04 +#define FLASH_UPDATE_LENGTH_ERR 0x05 +#define FLASH_UPDATE_HW_ERR 0x06 +#define FLASH_UPDATE_DNLD_NOT_SUPPORTED 0x10 +#define FLASH_UPDATE_DISABLED 0x11 + +/* Device states */ +#define DS_OPERATIONAL 0x01 +#define DS_PORT_IN_RESET 0x02 +#define DS_IN_RECOVERY 0x03 +#define DS_IN_ERROR 0x04 +#define DS_NON_OPERATIONAL 0x07 + +/** + * brief param structure for firmware flash update. + */ +struct fw_flash_updata_info { + u32 cur_image_offset; + u32 cur_image_len; + u32 total_image_len; + struct pm8001_prd sgl; +}; + +struct fw_control_info { + u32 retcode;/*ret code (status)*/ + u32 phase;/*ret code phase*/ + u32 phaseCmplt;/*percent complete for the current + update phase */ + u32 version;/*Hex encoded firmware version number*/ + u32 offset;/*Used for downloading firmware */ + u32 len; /*len of buffer*/ + u32 size;/* Used in OS VPD and Trace get size + operations.*/ + u32 reserved;/* padding required for 64 bit + alignment */ + u8 buffer[];/* Start of buffer */ +}; +struct fw_control_ex { + struct fw_control_info *fw_control; + void *buffer;/* keep buffer pointer to be + freed when the response comes*/ + void *virtAddr;/* keep virtual address of the data */ + void *usrAddr;/* keep virtual address of the + user data */ + dma_addr_t phys_addr; + u32 len; /* len of buffer */ + void *payload; /* pointer to IOCTL Payload */ + u8 inProgress;/*if 1 - the IOCTL request is in + progress */ + void *param1; + void *param2; + void *param3; +}; + +/* pm8001 workqueue */ +extern struct workqueue_struct *pm8001_wq; + +/******************** function prototype *********************/ +int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out); +u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag); +void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb); +int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, + void *funcdata); +void pm8001_scan_start(struct Scsi_Host *shost); +int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time); +int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags); +int pm8001_abort_task(struct sas_task *task); +int pm8001_clear_task_set(struct domain_device *dev, u8 *lun); +int pm8001_dev_found(struct domain_device *dev); +void pm8001_dev_gone(struct domain_device *dev); +int pm8001_lu_reset(struct domain_device *dev, u8 *lun); +int pm8001_I_T_nexus_reset(struct domain_device *dev); +int pm8001_I_T_nexus_event_handler(struct domain_device *dev); +int pm8001_query_task(struct sas_task *task); +void pm8001_port_formed(struct asd_sas_phy *sas_phy); +void pm8001_open_reject_retry( + struct pm8001_hba_info *pm8001_ha, + struct sas_task *task_to_close, + struct pm8001_device *device_to_close); +int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr, + dma_addr_t *pphys_addr, u32 *pphys_addr_hi, u32 *pphys_addr_lo, + u32 mem_size, u32 align); + +void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha); +int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, + u32 q_index, u32 opCode, void *payload, size_t nb, + u32 responseQueue); +int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ, + u16 messageSize, void **messagePtr); +u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg, + struct outbound_queue_table *circularQ, u8 bc); +u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, + struct outbound_queue_table *circularQ, + void **messagePtr1, u8 *pBC); +int pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_dev, u32 state); +int pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, + void *payload); +int pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha, + void *fw_flash_updata_info, u32 tag); +int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload); +int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload); +int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb, + struct sas_tmf_task *tmf); +int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb); +int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, u32 device_id); +void pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd); +void pm8001_work_fn(struct work_struct *work); +int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, + void *data, int handler); +void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb); +void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb); +void pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb); +int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, + void *piomb); +void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate); +void pm8001_get_attached_sas_addr(struct pm8001_phy *phy, u8 *sas_addr); +void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i); +int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb); +int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb); +int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb); +int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha, void *piomb); +int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb); +void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag); +struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha, + u32 device_id); +int pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha); + +int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue); +void pm8001_set_phy_profile(struct pm8001_hba_info *pm8001_ha, + u32 length, u8 *buf); +void pm8001_set_phy_profile_single(struct pm8001_hba_info *pm8001_ha, + u32 phy, u32 length, u32 *buf); +int pm80xx_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue); +ssize_t pm80xx_get_fatal_dump(struct device *cdev, + struct device_attribute *attr, char *buf); +ssize_t pm80xx_get_non_fatal_dump(struct device *cdev, + struct device_attribute *attr, char *buf); +ssize_t pm8001_get_gsm_dump(struct device *cdev, u32, char *buf); +int pm80xx_fatal_errors(struct pm8001_hba_info *pm8001_ha); +void pm8001_free_dev(struct pm8001_device *pm8001_dev); +/* ctl shared API */ +extern const struct attribute_group *pm8001_host_groups[]; + +#define PM8001_INVALID_TAG ((u32)-1) + +/* + * Allocate a new tag and return the corresponding ccb after initializing it. + */ +static inline struct pm8001_ccb_info * +pm8001_ccb_alloc(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *dev, struct sas_task *task) +{ + struct pm8001_ccb_info *ccb; + struct request *rq = NULL; + u32 tag; + + if (task) + rq = sas_task_find_rq(task); + + if (rq) { + tag = rq->tag + PM8001_RESERVE_SLOT; + } else if (pm8001_tag_alloc(pm8001_ha, &tag)) { + pm8001_dbg(pm8001_ha, FAIL, "Failed to allocate a tag\n"); + return NULL; + } + + ccb = &pm8001_ha->ccb_info[tag]; + ccb->task = task; + ccb->n_elem = 0; + ccb->ccb_tag = tag; + ccb->device = dev; + ccb->fw_control_context = NULL; + ccb->open_retry = 0; + + return ccb; +} + +/* + * Free the tag of an initialized ccb. + */ +static inline void pm8001_ccb_free(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + u32 tag = ccb->ccb_tag; + + /* + * Cleanup the ccb to make sure that a manual scan of the adapter + * ccb_info array can detect ccb's that are in use. + * C.f. pm8001_open_reject_retry() + */ + ccb->task = NULL; + ccb->ccb_tag = PM8001_INVALID_TAG; + ccb->device = NULL; + ccb->fw_control_context = NULL; + + pm8001_tag_free(pm8001_ha, tag); +} + +static inline void pm8001_ccb_task_free_done(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + struct sas_task *task = ccb->task; + + pm8001_ccb_task_free(pm8001_ha, ccb); + smp_mb(); /*in order to force CPU ordering*/ + task->task_done(task); +} +void pm8001_setds_completion(struct domain_device *dev); +void pm8001_tmf_aborted(struct sas_task *task); + +#endif + diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c new file mode 100644 index 0000000000..3afd9443c4 --- /dev/null +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -0,0 +1,4940 @@ +/* + * PMC-Sierra SPCv/ve 8088/8089 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 PMC-Sierra, Inc., + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ + #include <linux/slab.h> + #include "pm8001_sas.h" + #include "pm80xx_hwi.h" + #include "pm8001_chips.h" + #include "pm8001_ctl.h" +#include "pm80xx_tracepoints.h" + +#define SMP_DIRECT 1 +#define SMP_INDIRECT 2 + + +int pm80xx_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shift_value) +{ + u32 reg_val; + unsigned long start; + pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, shift_value); + /* confirm the setting is written */ + start = jiffies + HZ; /* 1 sec */ + do { + reg_val = pm8001_cr32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER); + } while ((reg_val != shift_value) && time_before(jiffies, start)); + if (reg_val != shift_value) { + pm8001_dbg(pm8001_ha, FAIL, "TIMEOUT:MEMBASE_II_SHIFT_REGISTER = 0x%x\n", + reg_val); + return -1; + } + return 0; +} + +static void pm80xx_pci_mem_copy(struct pm8001_hba_info *pm8001_ha, u32 soffset, + __le32 *destination, + u32 dw_count, u32 bus_base_number) +{ + u32 index, value, offset; + + for (index = 0; index < dw_count; index += 4, destination++) { + offset = (soffset + index); + if (offset < (64 * 1024)) { + value = pm8001_cr32(pm8001_ha, bus_base_number, offset); + *destination = cpu_to_le32(value); + } + } + return; +} + +ssize_t pm80xx_get_fatal_dump(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + void __iomem *fatal_table_address = pm8001_ha->fatal_tbl_addr; + u32 accum_len, reg_val, index, *temp; + u32 status = 1; + unsigned long start; + u8 *direct_data; + char *fatal_error_data = buf; + u32 length_to_read; + u32 offset; + + pm8001_ha->forensic_info.data_buf.direct_data = buf; + if (pm8001_ha->chip_id == chip_8001) { + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf(pm8001_ha->forensic_info.data_buf.direct_data, + "Not supported for SPC controller"); + return (char *)pm8001_ha->forensic_info.data_buf.direct_data - + (char *)buf; + } + /* initialize variables for very first call from host application */ + if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) { + pm8001_dbg(pm8001_ha, IO, + "forensic_info TYPE_NON_FATAL..............\n"); + direct_data = (u8 *)fatal_error_data; + pm8001_ha->forensic_info.data_type = TYPE_NON_FATAL; + pm8001_ha->forensic_info.data_buf.direct_len = SYSFS_OFFSET; + pm8001_ha->forensic_info.data_buf.direct_offset = 0; + pm8001_ha->forensic_info.data_buf.read_len = 0; + pm8001_ha->forensic_preserved_accumulated_transfer = 0; + + /* Write signature to fatal dump table */ + pm8001_mw32(fatal_table_address, + MPI_FATAL_EDUMP_TABLE_SIGNATURE, 0x1234abcd); + + pm8001_ha->forensic_info.data_buf.direct_data = direct_data; + pm8001_dbg(pm8001_ha, IO, "ossaHwCB: status1 %d\n", status); + pm8001_dbg(pm8001_ha, IO, "ossaHwCB: read_len 0x%x\n", + pm8001_ha->forensic_info.data_buf.read_len); + pm8001_dbg(pm8001_ha, IO, "ossaHwCB: direct_len 0x%x\n", + pm8001_ha->forensic_info.data_buf.direct_len); + pm8001_dbg(pm8001_ha, IO, "ossaHwCB: direct_offset 0x%x\n", + pm8001_ha->forensic_info.data_buf.direct_offset); + } + if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) { + /* start to get data */ + /* Program the MEMBASE II Shifting Register with 0x00.*/ + pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, + pm8001_ha->fatal_forensic_shift_offset); + pm8001_ha->forensic_last_offset = 0; + pm8001_ha->forensic_fatal_step = 0; + pm8001_ha->fatal_bar_loc = 0; + } + + /* Read until accum_len is retrieved */ + accum_len = pm8001_mr32(fatal_table_address, + MPI_FATAL_EDUMP_TABLE_ACCUM_LEN); + /* Determine length of data between previously stored transfer length + * and current accumulated transfer length + */ + length_to_read = + accum_len - pm8001_ha->forensic_preserved_accumulated_transfer; + pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv: accum_len 0x%x\n", + accum_len); + pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv: length_to_read 0x%x\n", + length_to_read); + pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv: last_offset 0x%x\n", + pm8001_ha->forensic_last_offset); + pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv: read_len 0x%x\n", + pm8001_ha->forensic_info.data_buf.read_len); + pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv:: direct_len 0x%x\n", + pm8001_ha->forensic_info.data_buf.direct_len); + pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv:: direct_offset 0x%x\n", + pm8001_ha->forensic_info.data_buf.direct_offset); + + /* If accumulated length failed to read correctly fail the attempt.*/ + if (accum_len == 0xFFFFFFFF) { + pm8001_dbg(pm8001_ha, IO, + "Possible PCI issue 0x%x not expected\n", + accum_len); + return status; + } + /* If accumulated length is zero fail the attempt */ + if (accum_len == 0) { + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf(pm8001_ha->forensic_info.data_buf.direct_data, + "%08x ", 0xFFFFFFFF); + return (char *)pm8001_ha->forensic_info.data_buf.direct_data - + (char *)buf; + } + /* Accumulated length is good so start capturing the first data */ + temp = (u32 *)pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr; + if (pm8001_ha->forensic_fatal_step == 0) { +moreData: + /* If data to read is less than SYSFS_OFFSET then reduce the + * length of dataLen + */ + if (pm8001_ha->forensic_last_offset + SYSFS_OFFSET + > length_to_read) { + pm8001_ha->forensic_info.data_buf.direct_len = + length_to_read - + pm8001_ha->forensic_last_offset; + } else { + pm8001_ha->forensic_info.data_buf.direct_len = + SYSFS_OFFSET; + } + if (pm8001_ha->forensic_info.data_buf.direct_data) { + /* Data is in bar, copy to host memory */ + pm80xx_pci_mem_copy(pm8001_ha, + pm8001_ha->fatal_bar_loc, + pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr, + pm8001_ha->forensic_info.data_buf.direct_len, 1); + } + pm8001_ha->fatal_bar_loc += + pm8001_ha->forensic_info.data_buf.direct_len; + pm8001_ha->forensic_info.data_buf.direct_offset += + pm8001_ha->forensic_info.data_buf.direct_len; + pm8001_ha->forensic_last_offset += + pm8001_ha->forensic_info.data_buf.direct_len; + pm8001_ha->forensic_info.data_buf.read_len = + pm8001_ha->forensic_info.data_buf.direct_len; + + if (pm8001_ha->forensic_last_offset >= length_to_read) { + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf(pm8001_ha->forensic_info.data_buf.direct_data, + "%08x ", 3); + for (index = 0; index < + (pm8001_ha->forensic_info.data_buf.direct_len + / 4); index++) { + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf( + pm8001_ha->forensic_info.data_buf.direct_data, + "%08x ", *(temp + index)); + } + + pm8001_ha->fatal_bar_loc = 0; + pm8001_ha->forensic_fatal_step = 1; + pm8001_ha->fatal_forensic_shift_offset = 0; + pm8001_ha->forensic_last_offset = 0; + status = 0; + offset = (int) + ((char *)pm8001_ha->forensic_info.data_buf.direct_data + - (char *)buf); + pm8001_dbg(pm8001_ha, IO, + "get_fatal_spcv:return1 0x%x\n", offset); + return (char *)pm8001_ha-> + forensic_info.data_buf.direct_data - + (char *)buf; + } + if (pm8001_ha->fatal_bar_loc < (64 * 1024)) { + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf(pm8001_ha-> + forensic_info.data_buf.direct_data, + "%08x ", 2); + for (index = 0; index < + (pm8001_ha->forensic_info.data_buf.direct_len + / 4); index++) { + pm8001_ha->forensic_info.data_buf.direct_data + += sprintf(pm8001_ha-> + forensic_info.data_buf.direct_data, + "%08x ", *(temp + index)); + } + status = 0; + offset = (int) + ((char *)pm8001_ha->forensic_info.data_buf.direct_data + - (char *)buf); + pm8001_dbg(pm8001_ha, IO, + "get_fatal_spcv:return2 0x%x\n", offset); + return (char *)pm8001_ha-> + forensic_info.data_buf.direct_data - + (char *)buf; + } + + /* Increment the MEMBASE II Shifting Register value by 0x100.*/ + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf(pm8001_ha->forensic_info.data_buf.direct_data, + "%08x ", 2); + for (index = 0; index < + (pm8001_ha->forensic_info.data_buf.direct_len + / 4) ; index++) { + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf(pm8001_ha-> + forensic_info.data_buf.direct_data, + "%08x ", *(temp + index)); + } + pm8001_ha->fatal_forensic_shift_offset += 0x100; + pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, + pm8001_ha->fatal_forensic_shift_offset); + pm8001_ha->fatal_bar_loc = 0; + status = 0; + offset = (int) + ((char *)pm8001_ha->forensic_info.data_buf.direct_data + - (char *)buf); + pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv: return3 0x%x\n", + offset); + return (char *)pm8001_ha->forensic_info.data_buf.direct_data - + (char *)buf; + } + if (pm8001_ha->forensic_fatal_step == 1) { + /* store previous accumulated length before triggering next + * accumulated length update + */ + pm8001_ha->forensic_preserved_accumulated_transfer = + pm8001_mr32(fatal_table_address, + MPI_FATAL_EDUMP_TABLE_ACCUM_LEN); + + /* continue capturing the fatal log until Dump status is 0x3 */ + if (pm8001_mr32(fatal_table_address, + MPI_FATAL_EDUMP_TABLE_STATUS) < + MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE) { + + /* reset fddstat bit by writing to zero*/ + pm8001_mw32(fatal_table_address, + MPI_FATAL_EDUMP_TABLE_STATUS, 0x0); + + /* set dump control value to '1' so that new data will + * be transferred to shared memory + */ + pm8001_mw32(fatal_table_address, + MPI_FATAL_EDUMP_TABLE_HANDSHAKE, + MPI_FATAL_EDUMP_HANDSHAKE_RDY); + + /*Poll FDDHSHK until clear */ + start = jiffies + (2 * HZ); /* 2 sec */ + + do { + reg_val = pm8001_mr32(fatal_table_address, + MPI_FATAL_EDUMP_TABLE_HANDSHAKE); + } while ((reg_val) && time_before(jiffies, start)); + + if (reg_val != 0) { + pm8001_dbg(pm8001_ha, FAIL, + "TIMEOUT:MPI_FATAL_EDUMP_TABLE_HDSHAKE 0x%x\n", + reg_val); + /* Fail the dump if a timeout occurs */ + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf( + pm8001_ha->forensic_info.data_buf.direct_data, + "%08x ", 0xFFFFFFFF); + return((char *) + pm8001_ha->forensic_info.data_buf.direct_data + - (char *)buf); + } + /* Poll status register until set to 2 or + * 3 for up to 2 seconds + */ + start = jiffies + (2 * HZ); /* 2 sec */ + + do { + reg_val = pm8001_mr32(fatal_table_address, + MPI_FATAL_EDUMP_TABLE_STATUS); + } while (((reg_val != 2) && (reg_val != 3)) && + time_before(jiffies, start)); + + if (reg_val < 2) { + pm8001_dbg(pm8001_ha, FAIL, + "TIMEOUT:MPI_FATAL_EDUMP_TABLE_STATUS = 0x%x\n", + reg_val); + /* Fail the dump if a timeout occurs */ + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf( + pm8001_ha->forensic_info.data_buf.direct_data, + "%08x ", 0xFFFFFFFF); + return((char *)pm8001_ha->forensic_info.data_buf.direct_data - + (char *)buf); + } + /* reset fatal_forensic_shift_offset back to zero and reset MEMBASE 2 register to zero */ + pm8001_ha->fatal_forensic_shift_offset = 0; /* location in 64k region */ + pm8001_cw32(pm8001_ha, 0, + MEMBASE_II_SHIFT_REGISTER, + pm8001_ha->fatal_forensic_shift_offset); + } + /* Read the next block of the debug data.*/ + length_to_read = pm8001_mr32(fatal_table_address, + MPI_FATAL_EDUMP_TABLE_ACCUM_LEN) - + pm8001_ha->forensic_preserved_accumulated_transfer; + if (length_to_read != 0x0) { + pm8001_ha->forensic_fatal_step = 0; + goto moreData; + } else { + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf(pm8001_ha->forensic_info.data_buf.direct_data, + "%08x ", 4); + pm8001_ha->forensic_info.data_buf.read_len = 0xFFFFFFFF; + pm8001_ha->forensic_info.data_buf.direct_len = 0; + pm8001_ha->forensic_info.data_buf.direct_offset = 0; + pm8001_ha->forensic_info.data_buf.read_len = 0; + } + } + offset = (int)((char *)pm8001_ha->forensic_info.data_buf.direct_data + - (char *)buf); + pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv: return4 0x%x\n", offset); + return ((char *)pm8001_ha->forensic_info.data_buf.direct_data - + (char *)buf); +} + +/* pm80xx_get_non_fatal_dump - dump the nonfatal data from the dma + * location by the firmware. + */ +ssize_t pm80xx_get_non_fatal_dump(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + void __iomem *nonfatal_table_address = pm8001_ha->fatal_tbl_addr; + u32 accum_len = 0; + u32 total_len = 0; + u32 reg_val = 0; + u32 *temp = NULL; + u32 index = 0; + u32 output_length; + unsigned long start = 0; + char *buf_copy = buf; + + temp = (u32 *)pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr; + if (++pm8001_ha->non_fatal_count == 1) { + if (pm8001_ha->chip_id == chip_8001) { + snprintf(pm8001_ha->forensic_info.data_buf.direct_data, + PAGE_SIZE, "Not supported for SPC controller"); + return 0; + } + pm8001_dbg(pm8001_ha, IO, "forensic_info TYPE_NON_FATAL...\n"); + /* + * Step 1: Write the host buffer parameters in the MPI Fatal and + * Non-Fatal Error Dump Capture Table.This is the buffer + * where debug data will be DMAed to. + */ + pm8001_mw32(nonfatal_table_address, + MPI_FATAL_EDUMP_TABLE_LO_OFFSET, + pm8001_ha->memoryMap.region[FORENSIC_MEM].phys_addr_lo); + + pm8001_mw32(nonfatal_table_address, + MPI_FATAL_EDUMP_TABLE_HI_OFFSET, + pm8001_ha->memoryMap.region[FORENSIC_MEM].phys_addr_hi); + + pm8001_mw32(nonfatal_table_address, + MPI_FATAL_EDUMP_TABLE_LENGTH, SYSFS_OFFSET); + + /* Optionally, set the DUMPCTRL bit to 1 if the host + * keeps sending active I/Os while capturing the non-fatal + * debug data. Otherwise, leave this bit set to zero + */ + pm8001_mw32(nonfatal_table_address, + MPI_FATAL_EDUMP_TABLE_HANDSHAKE, MPI_FATAL_EDUMP_HANDSHAKE_RDY); + + /* + * Step 2: Clear Accumulative Length of Debug Data Transferred + * [ACCDDLEN] field in the MPI Fatal and Non-Fatal Error Dump + * Capture Table to zero. + */ + pm8001_mw32(nonfatal_table_address, + MPI_FATAL_EDUMP_TABLE_ACCUM_LEN, 0); + + /* initiallize previous accumulated length to 0 */ + pm8001_ha->forensic_preserved_accumulated_transfer = 0; + pm8001_ha->non_fatal_read_length = 0; + } + + total_len = pm8001_mr32(nonfatal_table_address, + MPI_FATAL_EDUMP_TABLE_TOTAL_LEN); + /* + * Step 3:Clear Fatal/Non-Fatal Debug Data Transfer Status [FDDTSTAT] + * field and then request that the SPCv controller transfer the debug + * data by setting bit 7 of the Inbound Doorbell Set Register. + */ + pm8001_mw32(nonfatal_table_address, MPI_FATAL_EDUMP_TABLE_STATUS, 0); + pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, + SPCv_MSGU_CFG_TABLE_NONFATAL_DUMP); + + /* + * Step 4.1: Read back the Inbound Doorbell Set Register (by polling for + * 2 seconds) until register bit 7 is cleared. + * This step only indicates the request is accepted by the controller. + */ + start = jiffies + (2 * HZ); /* 2 sec */ + do { + reg_val = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET) & + SPCv_MSGU_CFG_TABLE_NONFATAL_DUMP; + } while ((reg_val != 0) && time_before(jiffies, start)); + + /* Step 4.2: To check the completion of the transfer, poll the Fatal/Non + * Fatal Debug Data Transfer Status [FDDTSTAT] field for 2 seconds in + * the MPI Fatal and Non-Fatal Error Dump Capture Table. + */ + start = jiffies + (2 * HZ); /* 2 sec */ + do { + reg_val = pm8001_mr32(nonfatal_table_address, + MPI_FATAL_EDUMP_TABLE_STATUS); + } while ((!reg_val) && time_before(jiffies, start)); + + if ((reg_val == 0x00) || + (reg_val == MPI_FATAL_EDUMP_TABLE_STAT_DMA_FAILED) || + (reg_val > MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE)) { + pm8001_ha->non_fatal_read_length = 0; + buf_copy += snprintf(buf_copy, PAGE_SIZE, "%08x ", 0xFFFFFFFF); + pm8001_ha->non_fatal_count = 0; + return (buf_copy - buf); + } else if (reg_val == + MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_MORE_DATA) { + buf_copy += snprintf(buf_copy, PAGE_SIZE, "%08x ", 2); + } else if ((reg_val == MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE) || + (pm8001_ha->non_fatal_read_length >= total_len)) { + pm8001_ha->non_fatal_read_length = 0; + buf_copy += snprintf(buf_copy, PAGE_SIZE, "%08x ", 4); + pm8001_ha->non_fatal_count = 0; + } + accum_len = pm8001_mr32(nonfatal_table_address, + MPI_FATAL_EDUMP_TABLE_ACCUM_LEN); + output_length = accum_len - + pm8001_ha->forensic_preserved_accumulated_transfer; + + for (index = 0; index < output_length/4; index++) + buf_copy += snprintf(buf_copy, PAGE_SIZE, + "%08x ", *(temp+index)); + + pm8001_ha->non_fatal_read_length += output_length; + + /* store current accumulated length to use in next iteration as + * the previous accumulated length + */ + pm8001_ha->forensic_preserved_accumulated_transfer = accum_len; + return (buf_copy - buf); +} + +/** + * read_main_config_table - read the configure table and save it. + * @pm8001_ha: our hba card information + */ +static void read_main_config_table(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *address = pm8001_ha->main_cfg_tbl_addr; + + pm8001_ha->main_cfg_tbl.pm80xx_tbl.signature = + pm8001_mr32(address, MAIN_SIGNATURE_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev = + pm8001_mr32(address, MAIN_INTERFACE_REVISION); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev = + pm8001_mr32(address, MAIN_FW_REVISION); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io = + pm8001_mr32(address, MAIN_MAX_OUTSTANDING_IO_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl = + pm8001_mr32(address, MAIN_MAX_SGL_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.ctrl_cap_flag = + pm8001_mr32(address, MAIN_CNTRL_CAP_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.gst_offset = + pm8001_mr32(address, MAIN_GST_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_queue_offset = + pm8001_mr32(address, MAIN_IBQ_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.outbound_queue_offset = + pm8001_mr32(address, MAIN_OBQ_OFFSET); + + /* read Error Dump Offset and Length */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_offset0 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_length0 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_offset1 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_length1 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH); + + /* read GPIO LED settings from the configuration table */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping = + pm8001_mr32(address, MAIN_GPIO_LED_FLAGS_OFFSET); + + /* read analog Setting offset from the configuration table */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.analog_setup_table_offset = + pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET); + + pm8001_ha->main_cfg_tbl.pm80xx_tbl.int_vec_table_offset = + pm8001_mr32(address, MAIN_INT_VECTOR_TABLE_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.phy_attr_table_offset = + pm8001_mr32(address, MAIN_SAS_PHY_ATTR_TABLE_OFFSET); + /* read port recover and reset timeout */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer = + pm8001_mr32(address, MAIN_PORT_RECOVERY_TIMER); + /* read ILA and inactive firmware version */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version = + pm8001_mr32(address, MAIN_MPI_ILA_RELEASE_TYPE); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version = + pm8001_mr32(address, MAIN_MPI_INACTIVE_FW_VERSION); + + pm8001_dbg(pm8001_ha, DEV, + "Main cfg table: sign:%x interface rev:%x fw_rev:%x\n", + pm8001_ha->main_cfg_tbl.pm80xx_tbl.signature, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev); + + pm8001_dbg(pm8001_ha, DEV, + "table offset: gst:%x iq:%x oq:%x int vec:%x phy attr:%x\n", + pm8001_ha->main_cfg_tbl.pm80xx_tbl.gst_offset, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_queue_offset, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.outbound_queue_offset, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.int_vec_table_offset, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.phy_attr_table_offset); + + pm8001_dbg(pm8001_ha, DEV, + "Main cfg table; ila rev:%x Inactive fw rev:%x\n", + pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version); +} + +/** + * read_general_status_table - read the general status table and save it. + * @pm8001_ha: our hba card information + */ +static void read_general_status_table(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *address = pm8001_ha->general_stat_tbl_addr; + pm8001_ha->gs_tbl.pm80xx_tbl.gst_len_mpistate = + pm8001_mr32(address, GST_GSTLEN_MPIS_OFFSET); + pm8001_ha->gs_tbl.pm80xx_tbl.iq_freeze_state0 = + pm8001_mr32(address, GST_IQ_FREEZE_STATE0_OFFSET); + pm8001_ha->gs_tbl.pm80xx_tbl.iq_freeze_state1 = + pm8001_mr32(address, GST_IQ_FREEZE_STATE1_OFFSET); + pm8001_ha->gs_tbl.pm80xx_tbl.msgu_tcnt = + pm8001_mr32(address, GST_MSGUTCNT_OFFSET); + pm8001_ha->gs_tbl.pm80xx_tbl.iop_tcnt = + pm8001_mr32(address, GST_IOPTCNT_OFFSET); + pm8001_ha->gs_tbl.pm80xx_tbl.gpio_input_val = + pm8001_mr32(address, GST_GPIO_INPUT_VAL); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[0] = + pm8001_mr32(address, GST_RERRINFO_OFFSET0); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[1] = + pm8001_mr32(address, GST_RERRINFO_OFFSET1); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[2] = + pm8001_mr32(address, GST_RERRINFO_OFFSET2); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[3] = + pm8001_mr32(address, GST_RERRINFO_OFFSET3); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[4] = + pm8001_mr32(address, GST_RERRINFO_OFFSET4); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[5] = + pm8001_mr32(address, GST_RERRINFO_OFFSET5); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[6] = + pm8001_mr32(address, GST_RERRINFO_OFFSET6); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[7] = + pm8001_mr32(address, GST_RERRINFO_OFFSET7); +} +/** + * read_phy_attr_table - read the phy attribute table and save it. + * @pm8001_ha: our hba card information + */ +static void read_phy_attr_table(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *address = pm8001_ha->pspa_q_tbl_addr; + pm8001_ha->phy_attr_table.phystart1_16[0] = + pm8001_mr32(address, PSPA_PHYSTATE0_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[1] = + pm8001_mr32(address, PSPA_PHYSTATE1_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[2] = + pm8001_mr32(address, PSPA_PHYSTATE2_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[3] = + pm8001_mr32(address, PSPA_PHYSTATE3_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[4] = + pm8001_mr32(address, PSPA_PHYSTATE4_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[5] = + pm8001_mr32(address, PSPA_PHYSTATE5_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[6] = + pm8001_mr32(address, PSPA_PHYSTATE6_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[7] = + pm8001_mr32(address, PSPA_PHYSTATE7_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[8] = + pm8001_mr32(address, PSPA_PHYSTATE8_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[9] = + pm8001_mr32(address, PSPA_PHYSTATE9_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[10] = + pm8001_mr32(address, PSPA_PHYSTATE10_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[11] = + pm8001_mr32(address, PSPA_PHYSTATE11_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[12] = + pm8001_mr32(address, PSPA_PHYSTATE12_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[13] = + pm8001_mr32(address, PSPA_PHYSTATE13_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[14] = + pm8001_mr32(address, PSPA_PHYSTATE14_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[15] = + pm8001_mr32(address, PSPA_PHYSTATE15_OFFSET); + + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[0] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID0_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[1] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID1_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[2] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID2_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[3] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID3_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[4] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID4_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[5] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID5_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[6] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID6_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[7] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID7_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[8] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID8_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[9] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID9_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[10] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID10_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[11] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID11_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[12] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID12_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[13] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID13_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[14] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID14_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[15] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID15_OFFSET); + +} + +/** + * read_inbnd_queue_table - read the inbound queue table and save it. + * @pm8001_ha: our hba card information + */ +static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha) +{ + int i; + void __iomem *address = pm8001_ha->inbnd_q_tbl_addr; + for (i = 0; i < PM8001_MAX_INB_NUM; i++) { + u32 offset = i * 0x20; + pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = + get_pci_bar_index(pm8001_mr32(address, + (offset + IB_PIPCI_BAR))); + pm8001_ha->inbnd_q_tbl[i].pi_offset = + pm8001_mr32(address, (offset + IB_PIPCI_BAR_OFFSET)); + } +} + +/** + * read_outbnd_queue_table - read the outbound queue table and save it. + * @pm8001_ha: our hba card information + */ +static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha) +{ + int i; + void __iomem *address = pm8001_ha->outbnd_q_tbl_addr; + for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) { + u32 offset = i * 0x24; + pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = + get_pci_bar_index(pm8001_mr32(address, + (offset + OB_CIPCI_BAR))); + pm8001_ha->outbnd_q_tbl[i].ci_offset = + pm8001_mr32(address, (offset + OB_CIPCI_BAR_OFFSET)); + } +} + +/** + * init_default_table_values - init the default table. + * @pm8001_ha: our hba card information + */ +static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) +{ + int i; + u32 offsetib, offsetob; + void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr; + void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr; + u32 ib_offset = pm8001_ha->ib_offset; + u32 ob_offset = pm8001_ha->ob_offset; + u32 ci_offset = pm8001_ha->ci_offset; + u32 pi_offset = pm8001_ha->pi_offset; + + pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_event_log_addr = + pm8001_ha->memoryMap.region[AAP1].phys_addr_hi; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_event_log_addr = + pm8001_ha->memoryMap.region[AAP1].phys_addr_lo; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size = + PM8001_EVENT_LOG_SIZE; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_severity = 0x01; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_pcs_event_log_addr = + pm8001_ha->memoryMap.region[IOP].phys_addr_hi; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_pcs_event_log_addr = + pm8001_ha->memoryMap.region[IOP].phys_addr_lo; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size = + PM8001_EVENT_LOG_SIZE; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity = 0x01; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt = 0x01; + + /* Enable higher IQs and OQs, 32 to 63, bit 16 */ + if (pm8001_ha->max_q_num > 32) + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt |= + 1 << 16; + /* Disable end to end CRC checking */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump = (0x1 << 16); + + for (i = 0; i < pm8001_ha->max_q_num; i++) { + pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = + PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30); + pm8001_ha->inbnd_q_tbl[i].upper_base_addr = + pm8001_ha->memoryMap.region[ib_offset + i].phys_addr_hi; + pm8001_ha->inbnd_q_tbl[i].lower_base_addr = + pm8001_ha->memoryMap.region[ib_offset + i].phys_addr_lo; + pm8001_ha->inbnd_q_tbl[i].base_virt = + (u8 *)pm8001_ha->memoryMap.region[ib_offset + i].virt_ptr; + pm8001_ha->inbnd_q_tbl[i].total_length = + pm8001_ha->memoryMap.region[ib_offset + i].total_len; + pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr = + pm8001_ha->memoryMap.region[ci_offset + i].phys_addr_hi; + pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr = + pm8001_ha->memoryMap.region[ci_offset + i].phys_addr_lo; + pm8001_ha->inbnd_q_tbl[i].ci_virt = + pm8001_ha->memoryMap.region[ci_offset + i].virt_ptr; + pm8001_write_32(pm8001_ha->inbnd_q_tbl[i].ci_virt, 0, 0); + offsetib = i * 0x20; + pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = + get_pci_bar_index(pm8001_mr32(addressib, + (offsetib + 0x14))); + pm8001_ha->inbnd_q_tbl[i].pi_offset = + pm8001_mr32(addressib, (offsetib + 0x18)); + pm8001_ha->inbnd_q_tbl[i].producer_idx = 0; + pm8001_ha->inbnd_q_tbl[i].consumer_index = 0; + + pm8001_dbg(pm8001_ha, DEV, + "IQ %d pi_bar 0x%x pi_offset 0x%x\n", i, + pm8001_ha->inbnd_q_tbl[i].pi_pci_bar, + pm8001_ha->inbnd_q_tbl[i].pi_offset); + } + for (i = 0; i < pm8001_ha->max_q_num; i++) { + pm8001_ha->outbnd_q_tbl[i].element_size_cnt = + PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30); + pm8001_ha->outbnd_q_tbl[i].upper_base_addr = + pm8001_ha->memoryMap.region[ob_offset + i].phys_addr_hi; + pm8001_ha->outbnd_q_tbl[i].lower_base_addr = + pm8001_ha->memoryMap.region[ob_offset + i].phys_addr_lo; + pm8001_ha->outbnd_q_tbl[i].base_virt = + (u8 *)pm8001_ha->memoryMap.region[ob_offset + i].virt_ptr; + pm8001_ha->outbnd_q_tbl[i].total_length = + pm8001_ha->memoryMap.region[ob_offset + i].total_len; + pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr = + pm8001_ha->memoryMap.region[pi_offset + i].phys_addr_hi; + pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr = + pm8001_ha->memoryMap.region[pi_offset + i].phys_addr_lo; + /* interrupt vector based on oq */ + pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay = (i << 24); + pm8001_ha->outbnd_q_tbl[i].pi_virt = + pm8001_ha->memoryMap.region[pi_offset + i].virt_ptr; + pm8001_write_32(pm8001_ha->outbnd_q_tbl[i].pi_virt, 0, 0); + offsetob = i * 0x24; + pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = + get_pci_bar_index(pm8001_mr32(addressob, + offsetob + 0x14)); + pm8001_ha->outbnd_q_tbl[i].ci_offset = + pm8001_mr32(addressob, (offsetob + 0x18)); + pm8001_ha->outbnd_q_tbl[i].consumer_idx = 0; + pm8001_ha->outbnd_q_tbl[i].producer_index = 0; + + pm8001_dbg(pm8001_ha, DEV, + "OQ %d ci_bar 0x%x ci_offset 0x%x\n", i, + pm8001_ha->outbnd_q_tbl[i].ci_pci_bar, + pm8001_ha->outbnd_q_tbl[i].ci_offset); + } +} + +/** + * update_main_config_table - update the main default table to the HBA. + * @pm8001_ha: our hba card information + */ +static void update_main_config_table(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *address = pm8001_ha->main_cfg_tbl_addr; + pm8001_mw32(address, MAIN_IQNPPD_HPPD_OFFSET, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_q_nppd_hppd); + pm8001_mw32(address, MAIN_EVENT_LOG_ADDR_HI, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_event_log_addr); + pm8001_mw32(address, MAIN_EVENT_LOG_ADDR_LO, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_event_log_addr); + pm8001_mw32(address, MAIN_EVENT_LOG_BUFF_SIZE, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size); + pm8001_mw32(address, MAIN_EVENT_LOG_OPTION, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_severity); + pm8001_mw32(address, MAIN_PCS_EVENT_LOG_ADDR_HI, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_pcs_event_log_addr); + pm8001_mw32(address, MAIN_PCS_EVENT_LOG_ADDR_LO, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_pcs_event_log_addr); + pm8001_mw32(address, MAIN_PCS_EVENT_LOG_BUFF_SIZE, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size); + pm8001_mw32(address, MAIN_PCS_EVENT_LOG_OPTION, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity); + /* Update Fatal error interrupt vector */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt |= + ((pm8001_ha->max_q_num - 1) << 8); + pm8001_mw32(address, MAIN_FATAL_ERROR_INTERRUPT, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt); + pm8001_dbg(pm8001_ha, DEV, + "Updated Fatal error interrupt vector 0x%x\n", + pm8001_mr32(address, MAIN_FATAL_ERROR_INTERRUPT)); + + pm8001_mw32(address, MAIN_EVENT_CRC_CHECK, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump); + + /* SPCv specific */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping &= 0xCFFFFFFF; + /* Set GPIOLED to 0x2 for LED indicator */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping |= 0x20000000; + pm8001_mw32(address, MAIN_GPIO_LED_FLAGS_OFFSET, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping); + pm8001_dbg(pm8001_ha, DEV, + "Programming DW 0x21 in main cfg table with 0x%x\n", + pm8001_mr32(address, MAIN_GPIO_LED_FLAGS_OFFSET)); + + pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer); + pm8001_mw32(address, MAIN_INT_REASSERTION_DELAY, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.interrupt_reassertion_delay); + + pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer &= 0xffff0000; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer |= + PORT_RECOVERY_TIMEOUT; + if (pm8001_ha->chip_id == chip_8006) { + pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer &= + 0x0000ffff; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer |= + CHIP_8006_PORT_RECOVERY_TIMEOUT; + } + pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer); +} + +/** + * update_inbnd_queue_table - update the inbound queue table to the HBA. + * @pm8001_ha: our hba card information + * @number: entry in the queue + */ +static void update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha, + int number) +{ + void __iomem *address = pm8001_ha->inbnd_q_tbl_addr; + u16 offset = number * 0x20; + pm8001_mw32(address, offset + IB_PROPERITY_OFFSET, + pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt); + pm8001_mw32(address, offset + IB_BASE_ADDR_HI_OFFSET, + pm8001_ha->inbnd_q_tbl[number].upper_base_addr); + pm8001_mw32(address, offset + IB_BASE_ADDR_LO_OFFSET, + pm8001_ha->inbnd_q_tbl[number].lower_base_addr); + pm8001_mw32(address, offset + IB_CI_BASE_ADDR_HI_OFFSET, + pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr); + pm8001_mw32(address, offset + IB_CI_BASE_ADDR_LO_OFFSET, + pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr); + + pm8001_dbg(pm8001_ha, DEV, + "IQ %d: Element pri size 0x%x\n", + number, + pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt); + + pm8001_dbg(pm8001_ha, DEV, + "IQ upr base addr 0x%x IQ lwr base addr 0x%x\n", + pm8001_ha->inbnd_q_tbl[number].upper_base_addr, + pm8001_ha->inbnd_q_tbl[number].lower_base_addr); + + pm8001_dbg(pm8001_ha, DEV, + "CI upper base addr 0x%x CI lower base addr 0x%x\n", + pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr, + pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr); +} + +/** + * update_outbnd_queue_table - update the outbound queue table to the HBA. + * @pm8001_ha: our hba card information + * @number: entry in the queue + */ +static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha, + int number) +{ + void __iomem *address = pm8001_ha->outbnd_q_tbl_addr; + u16 offset = number * 0x24; + pm8001_mw32(address, offset + OB_PROPERITY_OFFSET, + pm8001_ha->outbnd_q_tbl[number].element_size_cnt); + pm8001_mw32(address, offset + OB_BASE_ADDR_HI_OFFSET, + pm8001_ha->outbnd_q_tbl[number].upper_base_addr); + pm8001_mw32(address, offset + OB_BASE_ADDR_LO_OFFSET, + pm8001_ha->outbnd_q_tbl[number].lower_base_addr); + pm8001_mw32(address, offset + OB_PI_BASE_ADDR_HI_OFFSET, + pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr); + pm8001_mw32(address, offset + OB_PI_BASE_ADDR_LO_OFFSET, + pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr); + pm8001_mw32(address, offset + OB_INTERRUPT_COALES_OFFSET, + pm8001_ha->outbnd_q_tbl[number].interrup_vec_cnt_delay); + + pm8001_dbg(pm8001_ha, DEV, + "OQ %d: Element pri size 0x%x\n", + number, + pm8001_ha->outbnd_q_tbl[number].element_size_cnt); + + pm8001_dbg(pm8001_ha, DEV, + "OQ upr base addr 0x%x OQ lwr base addr 0x%x\n", + pm8001_ha->outbnd_q_tbl[number].upper_base_addr, + pm8001_ha->outbnd_q_tbl[number].lower_base_addr); + + pm8001_dbg(pm8001_ha, DEV, + "PI upper base addr 0x%x PI lower base addr 0x%x\n", + pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr, + pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr); +} + +/** + * mpi_init_check - check firmware initialization status. + * @pm8001_ha: our hba card information + */ +static int mpi_init_check(struct pm8001_hba_info *pm8001_ha) +{ + u32 max_wait_count; + u32 value; + u32 gst_len_mpistate; + + /* Write bit0=1 to Inbound DoorBell Register to tell the SPC FW the + table is updated */ + pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_UPDATE); + /* wait until Inbound DoorBell Clear Register toggled */ + if (IS_SPCV_12G(pm8001_ha->pdev)) { + max_wait_count = SPCV_DOORBELL_CLEAR_TIMEOUT; + } else { + max_wait_count = SPC_DOORBELL_CLEAR_TIMEOUT; + } + do { + msleep(FW_READY_INTERVAL); + value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET); + value &= SPCv_MSGU_CFG_TABLE_UPDATE; + } while ((value != 0) && (--max_wait_count)); + + if (!max_wait_count) { + /* additional check */ + pm8001_dbg(pm8001_ha, FAIL, + "Inb doorbell clear not toggled[value:%x]\n", + value); + return -EBUSY; + } + /* check the MPI-State for initialization up to 100ms*/ + max_wait_count = 5;/* 100 msec */ + do { + msleep(FW_READY_INTERVAL); + gst_len_mpistate = + pm8001_mr32(pm8001_ha->general_stat_tbl_addr, + GST_GSTLEN_MPIS_OFFSET); + } while ((GST_MPI_STATE_INIT != + (gst_len_mpistate & GST_MPI_STATE_MASK)) && (--max_wait_count)); + if (!max_wait_count) + return -EBUSY; + + /* check MPI Initialization error */ + gst_len_mpistate = gst_len_mpistate >> 16; + if (0x0000 != gst_len_mpistate) + return -EBUSY; + + /* + * As per controller datasheet, after successful MPI + * initialization minimum 500ms delay is required before + * issuing commands. + */ + msleep(500); + + return 0; +} + +/** + * check_fw_ready - The LLDD check if the FW is ready, if not, return error. + * This function sleeps hence it must not be used in atomic context. + * @pm8001_ha: our hba card information + */ +static int check_fw_ready(struct pm8001_hba_info *pm8001_ha) +{ + u32 value; + u32 max_wait_count; + u32 max_wait_time; + u32 expected_mask; + int ret = 0; + + /* reset / PCIe ready */ + max_wait_time = max_wait_count = 5; /* 100 milli sec */ + do { + msleep(FW_READY_INTERVAL); + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + } while ((value == 0xFFFFFFFF) && (--max_wait_count)); + + /* check ila, RAAE and iops status */ + if ((pm8001_ha->chip_id != chip_8008) && + (pm8001_ha->chip_id != chip_8009)) { + max_wait_time = max_wait_count = 180; /* 3600 milli sec */ + expected_mask = SCRATCH_PAD_ILA_READY | + SCRATCH_PAD_RAAE_READY | + SCRATCH_PAD_IOP0_READY | + SCRATCH_PAD_IOP1_READY; + } else { + max_wait_time = max_wait_count = 170; /* 3400 milli sec */ + expected_mask = SCRATCH_PAD_ILA_READY | + SCRATCH_PAD_RAAE_READY | + SCRATCH_PAD_IOP0_READY; + } + do { + msleep(FW_READY_INTERVAL); + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + } while (((value & expected_mask) != + expected_mask) && (--max_wait_count)); + if (!max_wait_count) { + pm8001_dbg(pm8001_ha, INIT, + "At least one FW component failed to load within %d millisec: Scratchpad1: 0x%x\n", + max_wait_time * FW_READY_INTERVAL, value); + ret = -1; + } else { + pm8001_dbg(pm8001_ha, MSG, + "All FW components ready by %d ms\n", + (max_wait_time - max_wait_count) * FW_READY_INTERVAL); + } + return ret; +} + +static int init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *base_addr; + u32 value; + u32 offset; + u32 pcibar; + u32 pcilogic; + + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0); + + /* + * lower 26 bits of SCRATCHPAD0 register describes offset within the + * PCIe BAR where the MPI configuration table is present + */ + offset = value & 0x03FFFFFF; /* scratch pad 0 TBL address */ + + pm8001_dbg(pm8001_ha, DEV, "Scratchpad 0 Offset: 0x%x value 0x%x\n", + offset, value); + /* + * Upper 6 bits describe the offset within PCI config space where BAR + * is located. + */ + pcilogic = (value & 0xFC000000) >> 26; + pcibar = get_pci_bar_index(pcilogic); + pm8001_dbg(pm8001_ha, INIT, "Scratchpad 0 PCI BAR: %d\n", pcibar); + + /* + * Make sure the offset falls inside the ioremapped PCI BAR + */ + if (offset > pm8001_ha->io_mem[pcibar].memsize) { + pm8001_dbg(pm8001_ha, FAIL, + "Main cfg tbl offset outside %u > %u\n", + offset, pm8001_ha->io_mem[pcibar].memsize); + return -EBUSY; + } + pm8001_ha->main_cfg_tbl_addr = base_addr = + pm8001_ha->io_mem[pcibar].memvirtaddr + offset; + + /* + * Validate main configuration table address: first DWord should read + * "PMCS" + */ + value = pm8001_mr32(pm8001_ha->main_cfg_tbl_addr, 0); + if (memcmp(&value, "PMCS", 4) != 0) { + pm8001_dbg(pm8001_ha, FAIL, + "BAD main config signature 0x%x\n", + value); + return -EBUSY; + } + pm8001_dbg(pm8001_ha, INIT, + "VALID main config signature 0x%x\n", value); + pm8001_ha->general_stat_tbl_addr = + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x18) & + 0xFFFFFF); + pm8001_ha->inbnd_q_tbl_addr = + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C) & + 0xFFFFFF); + pm8001_ha->outbnd_q_tbl_addr = + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x20) & + 0xFFFFFF); + pm8001_ha->ivt_tbl_addr = + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x8C) & + 0xFFFFFF); + pm8001_ha->pspa_q_tbl_addr = + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x90) & + 0xFFFFFF); + pm8001_ha->fatal_tbl_addr = + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0xA0) & + 0xFFFFFF); + + pm8001_dbg(pm8001_ha, INIT, "GST OFFSET 0x%x\n", + pm8001_cr32(pm8001_ha, pcibar, offset + 0x18)); + pm8001_dbg(pm8001_ha, INIT, "INBND OFFSET 0x%x\n", + pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C)); + pm8001_dbg(pm8001_ha, INIT, "OBND OFFSET 0x%x\n", + pm8001_cr32(pm8001_ha, pcibar, offset + 0x20)); + pm8001_dbg(pm8001_ha, INIT, "IVT OFFSET 0x%x\n", + pm8001_cr32(pm8001_ha, pcibar, offset + 0x8C)); + pm8001_dbg(pm8001_ha, INIT, "PSPA OFFSET 0x%x\n", + pm8001_cr32(pm8001_ha, pcibar, offset + 0x90)); + pm8001_dbg(pm8001_ha, INIT, "addr - main cfg %p general status %p\n", + pm8001_ha->main_cfg_tbl_addr, + pm8001_ha->general_stat_tbl_addr); + pm8001_dbg(pm8001_ha, INIT, "addr - inbnd %p obnd %p\n", + pm8001_ha->inbnd_q_tbl_addr, + pm8001_ha->outbnd_q_tbl_addr); + pm8001_dbg(pm8001_ha, INIT, "addr - pspa %p ivt %p\n", + pm8001_ha->pspa_q_tbl_addr, + pm8001_ha->ivt_tbl_addr); + return 0; +} + +/** + * pm80xx_set_thermal_config - support the thermal configuration + * @pm8001_ha: our hba card information. + */ +int +pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha) +{ + struct set_ctrl_cfg_req payload; + int rc; + u32 tag; + u32 opc = OPC_INB_SET_CONTROLLER_CONFIG; + u32 page_code; + + memset(&payload, 0, sizeof(struct set_ctrl_cfg_req)); + rc = pm8001_tag_alloc(pm8001_ha, &tag); + if (rc) + return rc; + + payload.tag = cpu_to_le32(tag); + + if (IS_SPCV_12G(pm8001_ha->pdev)) + page_code = THERMAL_PAGE_CODE_7H; + else + page_code = THERMAL_PAGE_CODE_8H; + + payload.cfg_pg[0] = + cpu_to_le32((THERMAL_LOG_ENABLE << 9) | + (THERMAL_ENABLE << 8) | page_code); + payload.cfg_pg[1] = + cpu_to_le32((LTEMPHIL << 24) | (RTEMPHIL << 8)); + + pm8001_dbg(pm8001_ha, DEV, + "Setting up thermal config. cfg_pg 0 0x%x cfg_pg 1 0x%x\n", + payload.cfg_pg[0], payload.cfg_pg[1]); + + rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload, + sizeof(payload), 0); + if (rc) + pm8001_tag_free(pm8001_ha, tag); + return rc; + +} + +/** +* pm80xx_set_sas_protocol_timer_config - support the SAS Protocol +* Timer configuration page +* @pm8001_ha: our hba card information. +*/ +static int +pm80xx_set_sas_protocol_timer_config(struct pm8001_hba_info *pm8001_ha) +{ + struct set_ctrl_cfg_req payload; + SASProtocolTimerConfig_t SASConfigPage; + int rc; + u32 tag; + u32 opc = OPC_INB_SET_CONTROLLER_CONFIG; + + memset(&payload, 0, sizeof(struct set_ctrl_cfg_req)); + memset(&SASConfigPage, 0, sizeof(SASProtocolTimerConfig_t)); + + rc = pm8001_tag_alloc(pm8001_ha, &tag); + if (rc) + return rc; + + payload.tag = cpu_to_le32(tag); + + SASConfigPage.pageCode = cpu_to_le32(SAS_PROTOCOL_TIMER_CONFIG_PAGE); + SASConfigPage.MST_MSI = cpu_to_le32(3 << 15); + SASConfigPage.STP_SSP_MCT_TMO = + cpu_to_le32((STP_MCT_TMO << 16) | SSP_MCT_TMO); + SASConfigPage.STP_FRM_TMO = + cpu_to_le32((SAS_MAX_OPEN_TIME << 24) | + (SMP_MAX_CONN_TIMER << 16) | STP_FRM_TIMER); + SASConfigPage.STP_IDLE_TMO = cpu_to_le32(STP_IDLE_TIME); + + SASConfigPage.OPNRJT_RTRY_INTVL = + cpu_to_le32((SAS_MFD << 16) | SAS_OPNRJT_RTRY_INTVL); + SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO = + cpu_to_le32((SAS_DOPNRJT_RTRY_TMO << 16) | SAS_COPNRJT_RTRY_TMO); + SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR = + cpu_to_le32((SAS_DOPNRJT_RTRY_THR << 16) | SAS_COPNRJT_RTRY_THR); + SASConfigPage.MAX_AIP = cpu_to_le32(SAS_MAX_AIP); + + pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.pageCode 0x%08x\n", + le32_to_cpu(SASConfigPage.pageCode)); + pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.MST_MSI 0x%08x\n", + le32_to_cpu(SASConfigPage.MST_MSI)); + pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.STP_SSP_MCT_TMO 0x%08x\n", + le32_to_cpu(SASConfigPage.STP_SSP_MCT_TMO)); + pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.STP_FRM_TMO 0x%08x\n", + le32_to_cpu(SASConfigPage.STP_FRM_TMO)); + pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.STP_IDLE_TMO 0x%08x\n", + le32_to_cpu(SASConfigPage.STP_IDLE_TMO)); + pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.OPNRJT_RTRY_INTVL 0x%08x\n", + le32_to_cpu(SASConfigPage.OPNRJT_RTRY_INTVL)); + pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO 0x%08x\n", + le32_to_cpu(SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO)); + pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR 0x%08x\n", + le32_to_cpu(SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR)); + pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.MAX_AIP 0x%08x\n", + le32_to_cpu(SASConfigPage.MAX_AIP)); + + memcpy(&payload.cfg_pg, &SASConfigPage, + sizeof(SASProtocolTimerConfig_t)); + + rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload, + sizeof(payload), 0); + if (rc) + pm8001_tag_free(pm8001_ha, tag); + + return rc; +} + +/** + * pm80xx_get_encrypt_info - Check for encryption + * @pm8001_ha: our hba card information. + */ +static int +pm80xx_get_encrypt_info(struct pm8001_hba_info *pm8001_ha) +{ + u32 scratch3_value; + int ret = -1; + + /* Read encryption status from SCRATCH PAD 3 */ + scratch3_value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3); + + if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) == + SCRATCH_PAD3_ENC_READY) { + if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED) + pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMF_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMA_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMB_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB; + pm8001_ha->encrypt_info.status = 0; + pm8001_dbg(pm8001_ha, INIT, + "Encryption: SCRATCH_PAD3_ENC_READY 0x%08X.Cipher mode 0x%x Sec mode 0x%x status 0x%x\n", + scratch3_value, + pm8001_ha->encrypt_info.cipher_mode, + pm8001_ha->encrypt_info.sec_mode, + pm8001_ha->encrypt_info.status); + ret = 0; + } else if ((scratch3_value & SCRATCH_PAD3_ENC_READY) == + SCRATCH_PAD3_ENC_DISABLED) { + pm8001_dbg(pm8001_ha, INIT, + "Encryption: SCRATCH_PAD3_ENC_DISABLED 0x%08X\n", + scratch3_value); + pm8001_ha->encrypt_info.status = 0xFFFFFFFF; + pm8001_ha->encrypt_info.cipher_mode = 0; + pm8001_ha->encrypt_info.sec_mode = 0; + ret = 0; + } else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) == + SCRATCH_PAD3_ENC_DIS_ERR) { + pm8001_ha->encrypt_info.status = + (scratch3_value & SCRATCH_PAD3_ERR_CODE) >> 16; + if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED) + pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMF_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMA_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMB_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB; + pm8001_dbg(pm8001_ha, INIT, + "Encryption: SCRATCH_PAD3_DIS_ERR 0x%08X.Cipher mode 0x%x sec mode 0x%x status 0x%x\n", + scratch3_value, + pm8001_ha->encrypt_info.cipher_mode, + pm8001_ha->encrypt_info.sec_mode, + pm8001_ha->encrypt_info.status); + } else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) == + SCRATCH_PAD3_ENC_ENA_ERR) { + + pm8001_ha->encrypt_info.status = + (scratch3_value & SCRATCH_PAD3_ERR_CODE) >> 16; + if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED) + pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMF_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMA_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMB_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB; + + pm8001_dbg(pm8001_ha, INIT, + "Encryption: SCRATCH_PAD3_ENA_ERR 0x%08X.Cipher mode 0x%x sec mode 0x%x status 0x%x\n", + scratch3_value, + pm8001_ha->encrypt_info.cipher_mode, + pm8001_ha->encrypt_info.sec_mode, + pm8001_ha->encrypt_info.status); + } + return ret; +} + +/** + * pm80xx_encrypt_update - update flash with encryption information + * @pm8001_ha: our hba card information. + */ +static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha) +{ + struct kek_mgmt_req payload; + int rc; + u32 tag; + u32 opc = OPC_INB_KEK_MANAGEMENT; + + memset(&payload, 0, sizeof(struct kek_mgmt_req)); + rc = pm8001_tag_alloc(pm8001_ha, &tag); + if (rc) + return rc; + + payload.tag = cpu_to_le32(tag); + /* Currently only one key is used. New KEK index is 1. + * Current KEK index is 1. Store KEK to NVRAM is 1. + */ + payload.new_curidx_ksop = + cpu_to_le32(((1 << 24) | (1 << 16) | (1 << 8) | + KEK_MGMT_SUBOP_KEYCARDUPDATE)); + + pm8001_dbg(pm8001_ha, DEV, + "Saving Encryption info to flash. payload 0x%x\n", + le32_to_cpu(payload.new_curidx_ksop)); + + rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload, + sizeof(payload), 0); + if (rc) + pm8001_tag_free(pm8001_ha, tag); + + return rc; +} + +/** + * pm80xx_chip_init - the main init function that initializes whole PM8001 chip. + * @pm8001_ha: our hba card information + */ +static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha) +{ + int ret; + u8 i = 0; + + /* check the firmware status */ + if (-1 == check_fw_ready(pm8001_ha)) { + pm8001_dbg(pm8001_ha, FAIL, "Firmware is not ready!\n"); + return -EBUSY; + } + + /* Initialize the controller fatal error flag */ + pm8001_ha->controller_fatal_error = false; + + /* Initialize pci space address eg: mpi offset */ + ret = init_pci_device_addresses(pm8001_ha); + if (ret) { + pm8001_dbg(pm8001_ha, FAIL, + "Failed to init pci addresses"); + return ret; + } + init_default_table_values(pm8001_ha); + read_main_config_table(pm8001_ha); + read_general_status_table(pm8001_ha); + read_inbnd_queue_table(pm8001_ha); + read_outbnd_queue_table(pm8001_ha); + read_phy_attr_table(pm8001_ha); + + /* update main config table ,inbound table and outbound table */ + update_main_config_table(pm8001_ha); + for (i = 0; i < pm8001_ha->max_q_num; i++) { + update_inbnd_queue_table(pm8001_ha, i); + update_outbnd_queue_table(pm8001_ha, i); + } + /* notify firmware update finished and check initialization status */ + if (0 == mpi_init_check(pm8001_ha)) { + pm8001_dbg(pm8001_ha, INIT, "MPI initialize successful!\n"); + } else + return -EBUSY; + + return 0; +} + +static void pm80xx_chip_post_init(struct pm8001_hba_info *pm8001_ha) +{ + /* send SAS protocol timer configuration page to FW */ + pm80xx_set_sas_protocol_timer_config(pm8001_ha); + + /* Check for encryption */ + if (pm8001_ha->chip->encrypt) { + int ret; + + pm8001_dbg(pm8001_ha, INIT, "Checking for encryption\n"); + ret = pm80xx_get_encrypt_info(pm8001_ha); + if (ret == -1) { + pm8001_dbg(pm8001_ha, INIT, "Encryption error !!\n"); + if (pm8001_ha->encrypt_info.status == 0x81) { + pm8001_dbg(pm8001_ha, INIT, + "Encryption enabled with error.Saving encryption key to flash\n"); + pm80xx_encrypt_update(pm8001_ha); + } + } + } +} + +static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha) +{ + u32 max_wait_count; + u32 value; + u32 gst_len_mpistate; + int ret; + + ret = init_pci_device_addresses(pm8001_ha); + if (ret) { + pm8001_dbg(pm8001_ha, FAIL, + "Failed to init pci addresses"); + return ret; + } + + /* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the + table is stop */ + pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_RESET); + + /* wait until Inbound DoorBell Clear Register toggled */ + if (IS_SPCV_12G(pm8001_ha->pdev)) { + max_wait_count = SPCV_DOORBELL_CLEAR_TIMEOUT; + } else { + max_wait_count = SPC_DOORBELL_CLEAR_TIMEOUT; + } + do { + msleep(FW_READY_INTERVAL); + value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET); + value &= SPCv_MSGU_CFG_TABLE_RESET; + } while ((value != 0) && (--max_wait_count)); + + if (!max_wait_count) { + pm8001_dbg(pm8001_ha, FAIL, "TIMEOUT:IBDB value/=%x\n", value); + return -1; + } + + /* check the MPI-State for termination in progress */ + /* wait until Inbound DoorBell Clear Register toggled */ + max_wait_count = 100; /* 2 sec for spcv/ve */ + do { + msleep(FW_READY_INTERVAL); + gst_len_mpistate = + pm8001_mr32(pm8001_ha->general_stat_tbl_addr, + GST_GSTLEN_MPIS_OFFSET); + if (GST_MPI_STATE_UNINIT == + (gst_len_mpistate & GST_MPI_STATE_MASK)) + break; + } while (--max_wait_count); + if (!max_wait_count) { + pm8001_dbg(pm8001_ha, FAIL, " TIME OUT MPI State = 0x%x\n", + gst_len_mpistate & GST_MPI_STATE_MASK); + return -1; + } + + return 0; +} + +/** + * pm80xx_fatal_errors - returns non-zero *ONLY* when fatal errors + * @pm8001_ha: our hba card information + * + * Fatal errors are recoverable only after a host reboot. + */ +int +pm80xx_fatal_errors(struct pm8001_hba_info *pm8001_ha) +{ + int ret = 0; + u32 scratch_pad_rsvd0 = pm8001_cr32(pm8001_ha, 0, + MSGU_SCRATCH_PAD_RSVD_0); + u32 scratch_pad_rsvd1 = pm8001_cr32(pm8001_ha, 0, + MSGU_SCRATCH_PAD_RSVD_1); + u32 scratch_pad1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + u32 scratch_pad2 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2); + u32 scratch_pad3 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3); + + if (pm8001_ha->chip_id != chip_8006 && + pm8001_ha->chip_id != chip_8074 && + pm8001_ha->chip_id != chip_8076) { + return 0; + } + + if (MSGU_SCRATCHPAD1_STATE_FATAL_ERROR(scratch_pad1)) { + pm8001_dbg(pm8001_ha, FAIL, + "Fatal error SCRATCHPAD1 = 0x%x SCRATCHPAD2 = 0x%x SCRATCHPAD3 = 0x%x SCRATCHPAD_RSVD0 = 0x%x SCRATCHPAD_RSVD1 = 0x%x\n", + scratch_pad1, scratch_pad2, scratch_pad3, + scratch_pad_rsvd0, scratch_pad_rsvd1); + ret = 1; + } + + return ret; +} + +/** + * pm80xx_chip_soft_rst - soft reset the PM8001 chip, so that all + * FW register status are reset to the originated status. + * @pm8001_ha: our hba card information + */ + +static int +pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha) +{ + u32 regval; + u32 bootloader_state; + u32 ibutton0, ibutton1; + + /* Process MPI table uninitialization only if FW is ready */ + if (!pm8001_ha->controller_fatal_error) { + /* Check if MPI is in ready state to reset */ + if (mpi_uninit_check(pm8001_ha) != 0) { + u32 r0 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0); + u32 r1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + u32 r2 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2); + u32 r3 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3); + pm8001_dbg(pm8001_ha, FAIL, + "MPI state is not ready scratch: %x:%x:%x:%x\n", + r0, r1, r2, r3); + /* if things aren't ready but the bootloader is ok then + * try the reset anyway. + */ + if (r1 & SCRATCH_PAD1_BOOTSTATE_MASK) + return -1; + } + } + /* checked for reset register normal state; 0x0 */ + regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET); + pm8001_dbg(pm8001_ha, INIT, "reset register before write : 0x%x\n", + regval); + + pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, SPCv_NORMAL_RESET_VALUE); + msleep(500); + + regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET); + pm8001_dbg(pm8001_ha, INIT, "reset register after write 0x%x\n", + regval); + + if ((regval & SPCv_SOFT_RESET_READ_MASK) == + SPCv_SOFT_RESET_NORMAL_RESET_OCCURED) { + pm8001_dbg(pm8001_ha, MSG, + " soft reset successful [regval: 0x%x]\n", + regval); + } else { + pm8001_dbg(pm8001_ha, MSG, + " soft reset failed [regval: 0x%x]\n", + regval); + + /* check bootloader is successfully executed or in HDA mode */ + bootloader_state = + pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) & + SCRATCH_PAD1_BOOTSTATE_MASK; + + if (bootloader_state == SCRATCH_PAD1_BOOTSTATE_HDA_SEEPROM) { + pm8001_dbg(pm8001_ha, MSG, + "Bootloader state - HDA mode SEEPROM\n"); + } else if (bootloader_state == + SCRATCH_PAD1_BOOTSTATE_HDA_BOOTSTRAP) { + pm8001_dbg(pm8001_ha, MSG, + "Bootloader state - HDA mode Bootstrap Pin\n"); + } else if (bootloader_state == + SCRATCH_PAD1_BOOTSTATE_HDA_SOFTRESET) { + pm8001_dbg(pm8001_ha, MSG, + "Bootloader state - HDA mode soft reset\n"); + } else if (bootloader_state == + SCRATCH_PAD1_BOOTSTATE_CRIT_ERROR) { + pm8001_dbg(pm8001_ha, MSG, + "Bootloader state-HDA mode critical error\n"); + } + return -EBUSY; + } + + /* check the firmware status after reset */ + if (-1 == check_fw_ready(pm8001_ha)) { + pm8001_dbg(pm8001_ha, FAIL, "Firmware is not ready!\n"); + /* check iButton feature support for motherboard controller */ + if (pm8001_ha->pdev->subsystem_vendor != + PCI_VENDOR_ID_ADAPTEC2 && + pm8001_ha->pdev->subsystem_vendor != + PCI_VENDOR_ID_ATTO && + pm8001_ha->pdev->subsystem_vendor != 0) { + ibutton0 = pm8001_cr32(pm8001_ha, 0, + MSGU_SCRATCH_PAD_RSVD_0); + ibutton1 = pm8001_cr32(pm8001_ha, 0, + MSGU_SCRATCH_PAD_RSVD_1); + if (!ibutton0 && !ibutton1) { + pm8001_dbg(pm8001_ha, FAIL, + "iButton Feature is not Available!!!\n"); + return -EBUSY; + } + if (ibutton0 == 0xdeadbeef && ibutton1 == 0xdeadbeef) { + pm8001_dbg(pm8001_ha, FAIL, + "CRC Check for iButton Feature Failed!!!\n"); + return -EBUSY; + } + } + } + pm8001_dbg(pm8001_ha, INIT, "SPCv soft reset Complete\n"); + return 0; +} + +static void pm80xx_hw_chip_rst(struct pm8001_hba_info *pm8001_ha) +{ + u32 i; + + pm8001_dbg(pm8001_ha, INIT, "chip reset start\n"); + + /* do SPCv chip reset. */ + pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, 0x11); + pm8001_dbg(pm8001_ha, INIT, "SPC soft reset Complete\n"); + + /* Check this ..whether delay is required or no */ + /* delay 10 usec */ + udelay(10); + + /* wait for 20 msec until the firmware gets reloaded */ + i = 20; + do { + mdelay(1); + } while ((--i) != 0); + + pm8001_dbg(pm8001_ha, INIT, "chip reset finished\n"); +} + +/** + * pm80xx_chip_intx_interrupt_enable - enable PM8001 chip interrupt + * @pm8001_ha: our hba card information + */ +static void +pm80xx_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha) +{ + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL); + pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL); +} + +/** + * pm80xx_chip_intx_interrupt_disable - disable PM8001 chip interrupt + * @pm8001_ha: our hba card information + */ +static void +pm80xx_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha) +{ + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, ODMR_MASK_ALL); +} + +/** + * pm80xx_chip_interrupt_enable - enable PM8001 chip interrupt + * @pm8001_ha: our hba card information + * @vec: interrupt number to enable + */ +static void +pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec) +{ +#ifdef PM8001_USE_MSIX + if (vec < 32) + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, 1U << vec); + else + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR_U, + 1U << (vec - 32)); + return; +#endif + pm80xx_chip_intx_interrupt_enable(pm8001_ha); + +} + +/** + * pm80xx_chip_interrupt_disable - disable PM8001 chip interrupt + * @pm8001_ha: our hba card information + * @vec: interrupt number to disable + */ +static void +pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec) +{ +#ifdef PM8001_USE_MSIX + if (vec == 0xFF) { + /* disable all vectors 0-31, 32-63 */ + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 0xFFFFFFFF); + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U, 0xFFFFFFFF); + } else if (vec < 32) + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 1U << vec); + else + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U, + 1U << (vec - 32)); + return; +#endif + pm80xx_chip_intx_interrupt_disable(pm8001_ha); +} + +/** + * mpi_ssp_completion - process the event that FW response to the SSP request. + * @pm8001_ha: our hba card information + * @piomb: the message contents of this outbound message. + * + * When FW has completed a ssp request for example a IO request, after it has + * filled the SG data with the data, it will trigger this event representing + * that he has finished the job; please check the corresponding buffer. + * So we will tell the caller who maybe waiting the result to tell upper layer + * that the task has been finished. + */ +static void +mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct sas_task *t; + struct pm8001_ccb_info *ccb; + unsigned long flags; + u32 status; + u32 param; + u32 tag; + struct ssp_completion_resp *psspPayload; + struct task_status_struct *ts; + struct ssp_response_iu *iu; + struct pm8001_device *pm8001_dev; + psspPayload = (struct ssp_completion_resp *)(piomb + 4); + status = le32_to_cpu(psspPayload->status); + tag = le32_to_cpu(psspPayload->tag); + ccb = &pm8001_ha->ccb_info[tag]; + if ((status == IO_ABORTED) && ccb->open_retry) { + /* Being completed by another */ + ccb->open_retry = 0; + return; + } + pm8001_dev = ccb->device; + param = le32_to_cpu(psspPayload->param); + t = ccb->task; + + if (status && status != IO_UNDERFLOW) + pm8001_dbg(pm8001_ha, FAIL, "sas IO status 0x%x\n", status); + if (unlikely(!t || !t->lldd_task || !t->dev)) + return; + ts = &t->task_status; + + pm8001_dbg(pm8001_ha, DEV, + "tag::0x%x, status::0x%x task::0x%p\n", tag, status, t); + + /* Print sas address of IO failed device */ + if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) && + (status != IO_UNDERFLOW)) + pm8001_dbg(pm8001_ha, FAIL, "SAS Address of IO Failure Drive:%016llx\n", + SAS_ADDR(t->dev->sas_addr)); + + switch (status) { + case IO_SUCCESS: + pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS ,param = 0x%x\n", + param); + if (param == 0) { + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_SAM_STAT_GOOD; + } else { + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_PROTO_RESPONSE; + ts->residual = param; + iu = &psspPayload->ssp_resp_iu; + sas_ssp_task_response(pm8001_ha->dev, t, iu); + } + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_ABORTED: + pm8001_dbg(pm8001_ha, IO, "IO_ABORTED IOMB Tag\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_UNDERFLOW: + /* SSP Completion with error */ + pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW ,param = 0x%x\n", + param); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + ts->residual = param; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_NO_DEVICE: + pm8001_dbg(pm8001_ha, IO, "IO_NO_DEVICE\n"); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PHY_DOWN; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + /* Force the midlayer to retry */ + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_PHY_NOT_READY: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_INVALID_SSP_RSP_FRAME: + pm8001_dbg(pm8001_ha, IO, + "IO_XFER_ERROR_INVALID_SSP_RSP_FRAME\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_OPEN_CNX_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + if (!t->uldd_task) + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_NAK_RECEIVED: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_ACK_NAK_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_DMA: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_DMA\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_OFFSET_MISMATCH: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_OFFSET_MISMATCH\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_PORT_IN_RESET: + pm8001_dbg(pm8001_ha, IO, "IO_PORT_IN_RESET\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_DS_NON_OPERATIONAL: + pm8001_dbg(pm8001_ha, IO, "IO_DS_NON_OPERATIONAL\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + if (!t->uldd_task) + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_DS_NON_OPERATIONAL); + break; + case IO_DS_IN_RECOVERY: + pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_RECOVERY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_TM_TAG_NOT_FOUND: + pm8001_dbg(pm8001_ha, IO, "IO_TM_TAG_NOT_FOUND\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_SSP_EXT_IU_ZERO_LEN_ERROR: + pm8001_dbg(pm8001_ha, IO, "IO_SSP_EXT_IU_ZERO_LEN_ERROR\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + default: + pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", status); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + } + pm8001_dbg(pm8001_ha, IO, "scsi_status = 0x%x\n ", + psspPayload->ssp_resp_iu.status); + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_dbg(pm8001_ha, FAIL, + "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", + t, status, ts->resp, ts->stat); + pm8001_ccb_task_free(pm8001_ha, ccb); + if (t->slow_task) + complete(&t->slow_task->completion); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free_done(pm8001_ha, ccb); + } +} + +/*See the comments for mpi_ssp_completion */ +static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct sas_task *t; + unsigned long flags; + struct task_status_struct *ts; + struct pm8001_ccb_info *ccb; + struct pm8001_device *pm8001_dev; + struct ssp_event_resp *psspPayload = + (struct ssp_event_resp *)(piomb + 4); + u32 event = le32_to_cpu(psspPayload->event); + u32 tag = le32_to_cpu(psspPayload->tag); + u32 port_id = le32_to_cpu(psspPayload->port_id); + + ccb = &pm8001_ha->ccb_info[tag]; + t = ccb->task; + pm8001_dev = ccb->device; + if (event) + pm8001_dbg(pm8001_ha, FAIL, "sas IO status 0x%x\n", event); + if (unlikely(!t || !t->lldd_task || !t->dev)) + return; + ts = &t->task_status; + pm8001_dbg(pm8001_ha, IOERR, "port_id:0x%x, tag:0x%x, event:0x%x\n", + port_id, tag, event); + switch (event) { + case IO_OVERFLOW: + pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + ts->residual = 0; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n"); + pm8001_handle_event(pm8001_ha, t, IO_XFER_ERROR_BREAK); + return; + case IO_XFER_ERROR_PHY_NOT_READY: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + if (!t->uldd_task) + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_NAK_RECEIVED: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_XFER_ERROR_ACK_NAK_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n"); + pm8001_handle_event(pm8001_ha, t, IO_XFER_OPEN_RETRY_TIMEOUT); + return; + case IO_XFER_ERROR_UNEXPECTED_PHASE: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_UNEXPECTED_PHASE\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_XFER_RDY_OVERRUN: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_XFER_RDY_OVERRUN\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED: + pm8001_dbg(pm8001_ha, IO, + "IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, + "IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_OFFSET_MISMATCH: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_OFFSET_MISMATCH\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_XFER_ZERO_DATA_LEN: + pm8001_dbg(pm8001_ha, IO, + "IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_INTERNAL_CRC_ERROR: + pm8001_dbg(pm8001_ha, IOERR, + "IO_XFR_ERROR_INTERNAL_CRC_ERROR\n"); + /* TBC: used default set values */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_CMD_FRAME_ISSUED: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_CMD_FRAME_ISSUED\n"); + return; + default: + pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", event); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + } + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_dbg(pm8001_ha, FAIL, + "task 0x%p done with event 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", + t, event, ts->resp, ts->stat); + pm8001_ccb_task_free(pm8001_ha, ccb); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free_done(pm8001_ha, ccb); + } +} + +/*See the comments for mpi_ssp_completion */ +static void +mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, + struct outbound_queue_table *circularQ, void *piomb) +{ + struct sas_task *t; + struct pm8001_ccb_info *ccb; + u32 param; + u32 status; + u32 tag; + int i, j; + u8 sata_addr_low[4]; + u32 temp_sata_addr_low, temp_sata_addr_hi; + u8 sata_addr_hi[4]; + struct sata_completion_resp *psataPayload; + struct task_status_struct *ts; + struct ata_task_resp *resp ; + u32 *sata_resp; + struct pm8001_device *pm8001_dev; + unsigned long flags; + + psataPayload = (struct sata_completion_resp *)(piomb + 4); + status = le32_to_cpu(psataPayload->status); + param = le32_to_cpu(psataPayload->param); + tag = le32_to_cpu(psataPayload->tag); + + ccb = &pm8001_ha->ccb_info[tag]; + t = ccb->task; + pm8001_dev = ccb->device; + + if (t) { + if (t->dev && (t->dev->lldd_dev)) + pm8001_dev = t->dev->lldd_dev; + } else { + pm8001_dbg(pm8001_ha, FAIL, "task null, freeing CCB tag %d\n", + ccb->ccb_tag); + pm8001_ccb_free(pm8001_ha, ccb); + return; + } + + + if (pm8001_dev && unlikely(!t->lldd_task || !t->dev)) + return; + + ts = &t->task_status; + + if (status != IO_SUCCESS) { + pm8001_dbg(pm8001_ha, FAIL, + "IO failed device_id %u status 0x%x tag %d\n", + pm8001_dev->device_id, status, tag); + } + + /* Print sas address of IO failed device */ + if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) && + (status != IO_UNDERFLOW)) { + if (!((t->dev->parent) && + (dev_is_expander(t->dev->parent->dev_type)))) { + for (i = 0, j = 4; i <= 3 && j <= 7; i++, j++) + sata_addr_low[i] = pm8001_ha->sas_addr[j]; + for (i = 0, j = 0; i <= 3 && j <= 3; i++, j++) + sata_addr_hi[i] = pm8001_ha->sas_addr[j]; + memcpy(&temp_sata_addr_low, sata_addr_low, + sizeof(sata_addr_low)); + memcpy(&temp_sata_addr_hi, sata_addr_hi, + sizeof(sata_addr_hi)); + temp_sata_addr_hi = (((temp_sata_addr_hi >> 24) & 0xff) + |((temp_sata_addr_hi << 8) & + 0xff0000) | + ((temp_sata_addr_hi >> 8) + & 0xff00) | + ((temp_sata_addr_hi << 24) & + 0xff000000)); + temp_sata_addr_low = ((((temp_sata_addr_low >> 24) + & 0xff) | + ((temp_sata_addr_low << 8) + & 0xff0000) | + ((temp_sata_addr_low >> 8) + & 0xff00) | + ((temp_sata_addr_low << 24) + & 0xff000000)) + + pm8001_dev->attached_phy + + 0x10); + pm8001_dbg(pm8001_ha, FAIL, + "SAS Address of IO Failure Drive:%08x%08x\n", + temp_sata_addr_hi, + temp_sata_addr_low); + + } else { + pm8001_dbg(pm8001_ha, FAIL, + "SAS Address of IO Failure Drive:%016llx\n", + SAS_ADDR(t->dev->sas_addr)); + } + } + switch (status) { + case IO_SUCCESS: + pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS\n"); + if (param == 0) { + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_SAM_STAT_GOOD; + } else { + u8 len; + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_PROTO_RESPONSE; + ts->residual = param; + pm8001_dbg(pm8001_ha, IO, + "SAS_PROTO_RESPONSE len = %d\n", + param); + sata_resp = &psataPayload->sata_resp[0]; + resp = (struct ata_task_resp *)ts->buf; + if (t->ata_task.dma_xfer == 0 && + t->data_dir == DMA_FROM_DEVICE) { + len = sizeof(struct pio_setup_fis); + pm8001_dbg(pm8001_ha, IO, + "PIO read len = %d\n", len); + } else if (t->ata_task.use_ncq && + t->data_dir != DMA_NONE) { + len = sizeof(struct set_dev_bits_fis); + pm8001_dbg(pm8001_ha, IO, "FPDMA len = %d\n", + len); + } else { + len = sizeof(struct dev_to_host_fis); + pm8001_dbg(pm8001_ha, IO, "other len = %d\n", + len); + } + if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) { + resp->frame_len = len; + memcpy(&resp->ending_fis[0], sata_resp, len); + ts->buf_valid_size = sizeof(*resp); + } else + pm8001_dbg(pm8001_ha, IO, + "response too large\n"); + } + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_ABORTED: + pm8001_dbg(pm8001_ha, IO, "IO_ABORTED IOMB Tag\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + /* following cases are to do cases */ + case IO_UNDERFLOW: + /* SATA Completion with error */ + pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW param = %d\n", param); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + ts->residual = param; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_NO_DEVICE: + pm8001_dbg(pm8001_ha, IO, "IO_NO_DEVICE\n"); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PHY_DOWN; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_INTERRUPTED; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_PHY_NOT_READY: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_OPEN_CNX_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + spin_unlock_irqrestore(&circularQ->oq_lock, + circularQ->lock_flags); + pm8001_ccb_task_free_done(pm8001_ha, ccb); + spin_lock_irqsave(&circularQ->oq_lock, + circularQ->lock_flags); + return; + } + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + spin_unlock_irqrestore(&circularQ->oq_lock, + circularQ->lock_flags); + pm8001_ccb_task_free_done(pm8001_ha, ccb); + spin_lock_irqsave(&circularQ->oq_lock, + circularQ->lock_flags); + return; + } + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + spin_unlock_irqrestore(&circularQ->oq_lock, + circularQ->lock_flags); + pm8001_ccb_task_free_done(pm8001_ha, ccb); + spin_lock_irqsave(&circularQ->oq_lock, + circularQ->lock_flags); + return; + } + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_NAK_RECEIVED: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_ACK_NAK_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_DMA: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_DMA\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_SATA_LINK_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_SATA_LINK_TIMEOUT\n"); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_DEV_NO_RESPONSE; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_ERROR_REJECTED_NCQ_MODE: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_REJECTED_NCQ_MODE\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_PORT_IN_RESET: + pm8001_dbg(pm8001_ha, IO, "IO_PORT_IN_RESET\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_DS_NON_OPERATIONAL: + pm8001_dbg(pm8001_ha, IO, "IO_DS_NON_OPERATIONAL\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, pm8001_dev, + IO_DS_NON_OPERATIONAL); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + spin_unlock_irqrestore(&circularQ->oq_lock, + circularQ->lock_flags); + pm8001_ccb_task_free_done(pm8001_ha, ccb); + spin_lock_irqsave(&circularQ->oq_lock, + circularQ->lock_flags); + return; + } + break; + case IO_DS_IN_RECOVERY: + pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_RECOVERY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_DS_IN_ERROR: + pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_ERROR\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, pm8001_dev, + IO_DS_IN_ERROR); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + spin_unlock_irqrestore(&circularQ->oq_lock, + circularQ->lock_flags); + pm8001_ccb_task_free_done(pm8001_ha, ccb); + spin_lock_irqsave(&circularQ->oq_lock, + circularQ->lock_flags); + return; + } + break; + case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + default: + pm8001_dbg(pm8001_ha, DEVIO, + "Unknown status device_id %u status 0x%x tag %d\n", + pm8001_dev->device_id, status, tag); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + } + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_dbg(pm8001_ha, FAIL, + "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", + t, status, ts->resp, ts->stat); + pm8001_ccb_task_free(pm8001_ha, ccb); + if (t->slow_task) + complete(&t->slow_task->completion); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags); + spin_unlock_irqrestore(&circularQ->oq_lock, + circularQ->lock_flags); + pm8001_ccb_task_free_done(pm8001_ha, ccb); + spin_lock_irqsave(&circularQ->oq_lock, + circularQ->lock_flags); + } +} + +/*See the comments for mpi_ssp_completion */ +static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, + struct outbound_queue_table *circularQ, void *piomb) +{ + struct sas_task *t; + struct task_status_struct *ts; + struct pm8001_ccb_info *ccb; + struct pm8001_device *pm8001_dev; + struct sata_event_resp *psataPayload = + (struct sata_event_resp *)(piomb + 4); + u32 event = le32_to_cpu(psataPayload->event); + u32 tag = le32_to_cpu(psataPayload->tag); + u32 port_id = le32_to_cpu(psataPayload->port_id); + u32 dev_id = le32_to_cpu(psataPayload->device_id); + + if (event) + pm8001_dbg(pm8001_ha, FAIL, "SATA EVENT 0x%x\n", event); + + /* Check if this is NCQ error */ + if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) { + /* find device using device id */ + pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id); + /* send read log extension by aborting the link - libata does what we want */ + if (pm8001_dev) + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_XFER_ERROR_ABORTED_NCQ_MODE); + return; + } + + ccb = &pm8001_ha->ccb_info[tag]; + t = ccb->task; + pm8001_dev = ccb->device; + if (unlikely(!t)) { + pm8001_dbg(pm8001_ha, FAIL, "task null, freeing CCB tag %d\n", + ccb->ccb_tag); + pm8001_ccb_free(pm8001_ha, ccb); + return; + } + + if (unlikely(!t->lldd_task || !t->dev)) + return; + + ts = &t->task_status; + pm8001_dbg(pm8001_ha, IOERR, "port_id:0x%x, tag:0x%x, event:0x%x\n", + port_id, tag, event); + switch (event) { + case IO_OVERFLOW: + pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + ts->residual = 0; + break; + case IO_XFER_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_INTERRUPTED; + break; + case IO_XFER_ERROR_PHY_NOT_READY: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: + pm8001_dbg(pm8001_ha, FAIL, + "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_QUEUE_FULL; + return; + } + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_NAK_RECEIVED: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_PEER_ABORTED: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PEER_ABORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_REJECTED_NCQ_MODE: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_REJECTED_NCQ_MODE\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_UNEXPECTED_PHASE: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_UNEXPECTED_PHASE\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_XFER_RDY_OVERRUN: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_XFER_RDY_OVERRUN\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED: + pm8001_dbg(pm8001_ha, IO, + "IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_OFFSET_MISMATCH: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_OFFSET_MISMATCH\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_XFER_ZERO_DATA_LEN: + pm8001_dbg(pm8001_ha, IO, + "IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_CMD_FRAME_ISSUED: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_CMD_FRAME_ISSUED\n"); + break; + case IO_XFER_PIO_SETUP_ERROR: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_PIO_SETUP_ERROR\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_INTERNAL_CRC_ERROR: + pm8001_dbg(pm8001_ha, FAIL, + "IO_XFR_ERROR_INTERNAL_CRC_ERROR\n"); + /* TBC: used default set values */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_DMA_ACTIVATE_TIMEOUT: + pm8001_dbg(pm8001_ha, FAIL, "IO_XFR_DMA_ACTIVATE_TIMEOUT\n"); + /* TBC: used default set values */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + default: + pm8001_dbg(pm8001_ha, IO, "Unknown status 0x%x\n", event); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + } +} + +/*See the comments for mpi_ssp_completion */ +static void +mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + u32 param, i; + struct sas_task *t; + struct pm8001_ccb_info *ccb; + unsigned long flags; + u32 status; + u32 tag; + struct smp_completion_resp *psmpPayload; + struct task_status_struct *ts; + struct pm8001_device *pm8001_dev; + + psmpPayload = (struct smp_completion_resp *)(piomb + 4); + status = le32_to_cpu(psmpPayload->status); + tag = le32_to_cpu(psmpPayload->tag); + + ccb = &pm8001_ha->ccb_info[tag]; + param = le32_to_cpu(psmpPayload->param); + t = ccb->task; + ts = &t->task_status; + pm8001_dev = ccb->device; + if (status) + pm8001_dbg(pm8001_ha, FAIL, "smp IO status 0x%x\n", status); + if (unlikely(!t || !t->lldd_task || !t->dev)) + return; + + pm8001_dbg(pm8001_ha, DEV, "tag::0x%x status::0x%x\n", tag, status); + + switch (status) { + + case IO_SUCCESS: + pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_SAM_STAT_GOOD; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + if (pm8001_ha->smp_exp_mode == SMP_DIRECT) { + struct scatterlist *sg_resp = &t->smp_task.smp_resp; + u8 *payload; + void *to; + + pm8001_dbg(pm8001_ha, IO, + "DIRECT RESPONSE Length:%d\n", + param); + to = kmap_atomic(sg_page(sg_resp)); + payload = to + sg_resp->offset; + for (i = 0; i < param; i++) { + *(payload + i) = psmpPayload->_r_a[i]; + pm8001_dbg(pm8001_ha, IO, + "SMP Byte%d DMA data 0x%x psmp 0x%x\n", + i, *(payload + i), + psmpPayload->_r_a[i]); + } + kunmap_atomic(to); + } + break; + case IO_ABORTED: + pm8001_dbg(pm8001_ha, IO, "IO_ABORTED IOMB\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_OVERFLOW: + pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + ts->residual = 0; + if (pm8001_dev) + atomic_dec(&pm8001_dev->running_req); + break; + case IO_NO_DEVICE: + pm8001_dbg(pm8001_ha, IO, "IO_NO_DEVICE\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_PHY_DOWN; + break; + case IO_ERROR_HW_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_ERROR_HW_TIMEOUT\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_SAM_STAT_BUSY; + break; + case IO_XFER_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_SAM_STAT_BUSY; + break; + case IO_XFER_ERROR_PHY_NOT_READY: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_SAM_STAT_BUSY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: + pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_RX_FRAME: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_RX_FRAME\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_ERROR_INTERNAL_SMP_RESOURCE: + pm8001_dbg(pm8001_ha, IO, "IO_ERROR_INTERNAL_SMP_RESOURCE\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_QUEUE_FULL; + break; + case IO_PORT_IN_RESET: + pm8001_dbg(pm8001_ha, IO, "IO_PORT_IN_RESET\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_DS_NON_OPERATIONAL: + pm8001_dbg(pm8001_ha, IO, "IO_DS_NON_OPERATIONAL\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case IO_DS_IN_RECOVERY: + pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_RECOVERY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: + pm8001_dbg(pm8001_ha, IO, + "IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + default: + pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", status); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + /* not allowed case. Therefore, return failed status */ + break; + } + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_dbg(pm8001_ha, FAIL, + "task 0x%p done with io_status 0x%x resp 0x%xstat 0x%x but aborted by upper layer!\n", + t, status, ts->resp, ts->stat); + pm8001_ccb_task_free(pm8001_ha, ccb); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, ccb); + mb();/* in order to force CPU ordering */ + t->task_done(t); + } +} + +/** + * pm80xx_hw_event_ack_req- For PM8001, some events need to acknowledge to FW. + * @pm8001_ha: our hba card information + * @Qnum: the outbound queue message number. + * @SEA: source of event to ack + * @port_id: port id. + * @phyId: phy id. + * @param0: parameter 0. + * @param1: parameter 1. + */ +static void pm80xx_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha, + u32 Qnum, u32 SEA, u32 port_id, u32 phyId, u32 param0, u32 param1) +{ + struct hw_event_ack_req payload; + u32 opc = OPC_INB_SAS_HW_EVENT_ACK; + + memset((u8 *)&payload, 0, sizeof(payload)); + payload.tag = cpu_to_le32(1); + payload.phyid_sea_portid = cpu_to_le32(((SEA & 0xFFFF) << 8) | + ((phyId & 0xFF) << 24) | (port_id & 0xFF)); + payload.param0 = cpu_to_le32(param0); + payload.param1 = cpu_to_le32(param1); + + pm8001_mpi_build_cmd(pm8001_ha, Qnum, opc, &payload, + sizeof(payload), 0); +} + +static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, + u32 phyId, u32 phy_op); + +static void hw_event_port_recover(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + struct hw_event_resp *pPayload = (struct hw_event_resp *)(piomb + 4); + u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate); + u8 phy_id = (u8)((phyid_npip_portstate & 0xFF0000) >> 16); + u32 lr_status_evt_portid = + le32_to_cpu(pPayload->lr_status_evt_portid); + u8 deviceType = pPayload->sas_identify.dev_type; + u8 link_rate = (u8)((lr_status_evt_portid & 0xF0000000) >> 28); + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF); + struct pm8001_port *port = &pm8001_ha->port[port_id]; + + if (deviceType == SAS_END_DEVICE) { + pm80xx_chip_phy_ctl_req(pm8001_ha, phy_id, + PHY_NOTIFY_ENABLE_SPINUP); + } + + port->wide_port_phymap |= (1U << phy_id); + pm8001_get_lrate_mode(phy, link_rate); + phy->sas_phy.oob_mode = SAS_OOB_MODE; + phy->phy_state = PHY_STATE_LINK_UP_SPCV; + phy->phy_attached = 1; +} + +/** + * hw_event_sas_phy_up - FW tells me a SAS phy up event. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static void +hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + u32 lr_status_evt_portid = + le32_to_cpu(pPayload->lr_status_evt_portid); + u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate); + + u8 link_rate = + (u8)((lr_status_evt_portid & 0xF0000000) >> 28); + u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF); + u8 phy_id = + (u8)((phyid_npip_portstate & 0xFF0000) >> 16); + u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F); + + struct pm8001_port *port = &pm8001_ha->port[port_id]; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + unsigned long flags; + u8 deviceType = pPayload->sas_identify.dev_type; + phy->port = port; + port->port_id = port_id; + port->port_state = portstate; + port->wide_port_phymap |= (1U << phy_id); + phy->phy_state = PHY_STATE_LINK_UP_SPCV; + pm8001_dbg(pm8001_ha, MSG, + "portid:%d; phyid:%d; linkrate:%d; portstate:%x; devicetype:%x\n", + port_id, phy_id, link_rate, portstate, deviceType); + + switch (deviceType) { + case SAS_PHY_UNUSED: + pm8001_dbg(pm8001_ha, MSG, "device type no device.\n"); + break; + case SAS_END_DEVICE: + pm8001_dbg(pm8001_ha, MSG, "end device.\n"); + pm80xx_chip_phy_ctl_req(pm8001_ha, phy_id, + PHY_NOTIFY_ENABLE_SPINUP); + port->port_attached = 1; + pm8001_get_lrate_mode(phy, link_rate); + break; + case SAS_EDGE_EXPANDER_DEVICE: + pm8001_dbg(pm8001_ha, MSG, "expander device.\n"); + port->port_attached = 1; + pm8001_get_lrate_mode(phy, link_rate); + break; + case SAS_FANOUT_EXPANDER_DEVICE: + pm8001_dbg(pm8001_ha, MSG, "fanout expander device.\n"); + port->port_attached = 1; + pm8001_get_lrate_mode(phy, link_rate); + break; + default: + pm8001_dbg(pm8001_ha, DEVIO, "unknown device type(%x)\n", + deviceType); + break; + } + phy->phy_type |= PORT_TYPE_SAS; + phy->identify.device_type = deviceType; + phy->phy_attached = 1; + if (phy->identify.device_type == SAS_END_DEVICE) + phy->identify.target_port_protocols = SAS_PROTOCOL_SSP; + else if (phy->identify.device_type != SAS_PHY_UNUSED) + phy->identify.target_port_protocols = SAS_PROTOCOL_SMP; + phy->sas_phy.oob_mode = SAS_OOB_MODE; + sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE, GFP_ATOMIC); + spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags); + memcpy(phy->frame_rcvd, &pPayload->sas_identify, + sizeof(struct sas_identify_frame)-4); + phy->frame_rcvd_size = sizeof(struct sas_identify_frame) - 4; + pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); + spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); + if (pm8001_ha->flags == PM8001F_RUN_TIME) + mdelay(200); /* delay a moment to wait for disk to spin up */ + pm8001_bytes_dmaed(pm8001_ha, phy_id); +} + +/** + * hw_event_sata_phy_up - FW tells me a SATA phy up event. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static void +hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate); + u32 lr_status_evt_portid = + le32_to_cpu(pPayload->lr_status_evt_portid); + u8 link_rate = + (u8)((lr_status_evt_portid & 0xF0000000) >> 28); + u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF); + u8 phy_id = + (u8)((phyid_npip_portstate & 0xFF0000) >> 16); + + u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F); + + struct pm8001_port *port = &pm8001_ha->port[port_id]; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + unsigned long flags; + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_SATA_PHY_UP phyid:%#x port_id:%#x link_rate:%d portstate:%#x\n", + phy_id, port_id, link_rate, portstate); + + phy->port = port; + port->port_id = port_id; + port->port_state = portstate; + phy->phy_state = PHY_STATE_LINK_UP_SPCV; + port->port_attached = 1; + pm8001_get_lrate_mode(phy, link_rate); + phy->phy_type |= PORT_TYPE_SATA; + phy->phy_attached = 1; + phy->sas_phy.oob_mode = SATA_OOB_MODE; + sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE, GFP_ATOMIC); + spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags); + memcpy(phy->frame_rcvd, ((u8 *)&pPayload->sata_fis - 4), + sizeof(struct dev_to_host_fis)); + phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); + phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; + phy->identify.device_type = SAS_SATA_DEV; + pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); + spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); + pm8001_bytes_dmaed(pm8001_ha, phy_id); +} + +/** + * hw_event_phy_down - we should notify the libsas the phy is down. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static void +hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + + u32 lr_status_evt_portid = + le32_to_cpu(pPayload->lr_status_evt_portid); + u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF); + u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate); + u8 phy_id = + (u8)((phyid_npip_portstate & 0xFF0000) >> 16); + u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F); + + struct pm8001_port *port = &pm8001_ha->port[port_id]; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + u32 port_sata = (phy->phy_type & PORT_TYPE_SATA); + port->port_state = portstate; + phy->identify.device_type = 0; + phy->phy_attached = 0; + switch (portstate) { + case PORT_VALID: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_PHY_DOWN phyid:%#x port_id:%#x portstate: PORT_VALID\n", + phy_id, port_id); + break; + case PORT_INVALID: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_PHY_DOWN phyid:%#x port_id:%#x portstate: PORT_INVALID\n", + phy_id, port_id); + pm8001_dbg(pm8001_ha, MSG, + " Last phy Down and port invalid\n"); + if (port_sata) { + phy->phy_type = 0; + port->port_attached = 0; + pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, + port_id, phy_id, 0, 0); + } + sas_phy_disconnected(&phy->sas_phy); + break; + case PORT_IN_RESET: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_PHY_DOWN phyid:%#x port_id:%#x portstate: PORT_IN_RESET\n", + phy_id, port_id); + break; + case PORT_NOT_ESTABLISHED: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_PHY_DOWN phyid:%#x port_id:%#x portstate: PORT_NOT_ESTABLISHED\n", + phy_id, port_id); + port->port_attached = 0; + break; + case PORT_LOSTCOMM: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_PHY_DOWN phyid:%#x port_id:%#x portstate: PORT_LOSTCOMM\n", + phy_id, port_id); + pm8001_dbg(pm8001_ha, MSG, " Last phy Down and port invalid\n"); + if (port_sata) { + port->port_attached = 0; + phy->phy_type = 0; + pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, + port_id, phy_id, 0, 0); + } + sas_phy_disconnected(&phy->sas_phy); + break; + default: + port->port_attached = 0; + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_PHY_DOWN phyid:%#x port_id:%#x portstate:%#x\n", + phy_id, port_id, portstate); + break; + + } + if (port_sata && (portstate != PORT_IN_RESET)) + sas_notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL, + GFP_ATOMIC); +} + +static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct phy_start_resp *pPayload = + (struct phy_start_resp *)(piomb + 4); + u32 status = + le32_to_cpu(pPayload->status); + u32 phy_id = + le32_to_cpu(pPayload->phyid) & 0xFF; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + + pm8001_dbg(pm8001_ha, INIT, + "phy start resp status:0x%x, phyid:0x%x\n", + status, phy_id); + if (status == 0) + phy->phy_state = PHY_LINK_DOWN; + + if (pm8001_ha->flags == PM8001F_RUN_TIME && + phy->enable_completion != NULL) { + complete(phy->enable_completion); + phy->enable_completion = NULL; + } + return 0; + +} + +/** + * mpi_thermal_hw_event - a thermal hw event has come. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_thermal_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct thermal_hw_event *pPayload = + (struct thermal_hw_event *)(piomb + 4); + + u32 thermal_event = le32_to_cpu(pPayload->thermal_event); + u32 rht_lht = le32_to_cpu(pPayload->rht_lht); + + if (thermal_event & 0x40) { + pm8001_dbg(pm8001_ha, IO, + "Thermal Event: Local high temperature violated!\n"); + pm8001_dbg(pm8001_ha, IO, + "Thermal Event: Measured local high temperature %d\n", + ((rht_lht & 0xFF00) >> 8)); + } + if (thermal_event & 0x10) { + pm8001_dbg(pm8001_ha, IO, + "Thermal Event: Remote high temperature violated!\n"); + pm8001_dbg(pm8001_ha, IO, + "Thermal Event: Measured remote high temperature %d\n", + ((rht_lht & 0xFF000000) >> 24)); + } + return 0; +} + +/** + * mpi_hw_event - The hw event has come. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + unsigned long flags, i; + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + u32 lr_status_evt_portid = + le32_to_cpu(pPayload->lr_status_evt_portid); + u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate); + u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF); + u8 phy_id = + (u8)((phyid_npip_portstate & 0xFF0000) >> 16); + u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F); + u16 eventType = + (u16)((lr_status_evt_portid & 0x00FFFF00) >> 8); + u8 status = + (u8)((lr_status_evt_portid & 0x0F000000) >> 24); + struct sas_ha_struct *sas_ha = pm8001_ha->sas; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + struct pm8001_port *port = &pm8001_ha->port[port_id]; + struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id]; + pm8001_dbg(pm8001_ha, DEV, + "portid:%d phyid:%d event:0x%x status:0x%x\n", + port_id, phy_id, eventType, status); + + switch (eventType) { + + case HW_EVENT_SAS_PHY_UP: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_SAS_PHY_UP phyid:%#x port_id:%#x\n", + phy_id, port_id); + hw_event_sas_phy_up(pm8001_ha, piomb); + break; + case HW_EVENT_SATA_PHY_UP: + hw_event_sata_phy_up(pm8001_ha, piomb); + break; + case HW_EVENT_SATA_SPINUP_HOLD: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_SATA_SPINUP_HOLD phyid:%#x port_id:%#x\n", + phy_id, port_id); + sas_notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD, + GFP_ATOMIC); + break; + case HW_EVENT_PHY_DOWN: + hw_event_phy_down(pm8001_ha, piomb); + phy->phy_state = PHY_LINK_DISABLE; + break; + case HW_EVENT_PORT_INVALID: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_PORT_INVALID phyid:%#x port_id:%#x\n", + phy_id, port_id); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, + GFP_ATOMIC); + break; + /* the broadcast change primitive received, tell the LIBSAS this event + to revalidate the sas domain*/ + case HW_EVENT_BROADCAST_CHANGE: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_CHANGE\n"); + pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_BROADCAST_CHANGE, + port_id, phy_id, 1, 0); + spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); + sas_phy->sas_prim = HW_EVENT_BROADCAST_CHANGE; + spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); + sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, + GFP_ATOMIC); + break; + case HW_EVENT_PHY_ERROR: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_PHY_ERROR phyid:%#x port_id:%#x\n", + phy_id, port_id); + sas_phy_disconnected(&phy->sas_phy); + phy->phy_attached = 0; + sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR, GFP_ATOMIC); + break; + case HW_EVENT_BROADCAST_EXP: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_EXP\n"); + spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); + sas_phy->sas_prim = HW_EVENT_BROADCAST_EXP; + spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); + sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, + GFP_ATOMIC); + break; + case HW_EVENT_LINK_ERR_INVALID_DWORD: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_LINK_ERR_INVALID_DWORD phyid:%#x port_id:%#x\n", + phy_id, port_id); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0); + break; + case HW_EVENT_LINK_ERR_DISPARITY_ERROR: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_LINK_ERR_DISPARITY_ERROR phyid:%#x port_id:%#x\n", + phy_id, port_id); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_DISPARITY_ERROR, + port_id, phy_id, 0, 0); + break; + case HW_EVENT_LINK_ERR_CODE_VIOLATION: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_LINK_ERR_CODE_VIOLATION phyid:%#x port_id:%#x\n", + phy_id, port_id); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_CODE_VIOLATION, + port_id, phy_id, 0, 0); + break; + case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH phyid:%#x port_id:%#x\n", + phy_id, port_id); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH, + port_id, phy_id, 0, 0); + break; + case HW_EVENT_MALFUNCTION: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_MALFUNCTION phyid:%#x\n", phy_id); + break; + case HW_EVENT_BROADCAST_SES: + pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_SES\n"); + spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); + sas_phy->sas_prim = HW_EVENT_BROADCAST_SES; + spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); + sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, + GFP_ATOMIC); + break; + case HW_EVENT_INBOUND_CRC_ERROR: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_INBOUND_CRC_ERROR phyid:%#x port_id:%#x\n", + phy_id, port_id); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_INBOUND_CRC_ERROR, + port_id, phy_id, 0, 0); + break; + case HW_EVENT_HARD_RESET_RECEIVED: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_HARD_RESET_RECEIVED phyid:%#x\n", phy_id); + sas_notify_port_event(sas_phy, PORTE_HARD_RESET, GFP_ATOMIC); + break; + case HW_EVENT_ID_FRAME_TIMEOUT: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_ID_FRAME_TIMEOUT phyid:%#x\n", phy_id); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, + GFP_ATOMIC); + break; + case HW_EVENT_LINK_ERR_PHY_RESET_FAILED: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_LINK_ERR_PHY_RESET_FAILED phyid:%#x port_id:%#x\n", + phy_id, port_id); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_PHY_RESET_FAILED, + port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, + GFP_ATOMIC); + break; + case HW_EVENT_PORT_RESET_TIMER_TMO: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_PORT_RESET_TIMER_TMO phyid:%#x port_id:%#x portstate:%#x\n", + phy_id, port_id, portstate); + if (!pm8001_ha->phy[phy_id].reset_completion) { + pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, + port_id, phy_id, 0, 0); + } + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + port->port_state = portstate; + sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, + GFP_ATOMIC); + if (pm8001_ha->phy[phy_id].reset_completion) { + pm8001_ha->phy[phy_id].port_reset_status = + PORT_RESET_TMO; + complete(pm8001_ha->phy[phy_id].reset_completion); + pm8001_ha->phy[phy_id].reset_completion = NULL; + } + break; + case HW_EVENT_PORT_RECOVERY_TIMER_TMO: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_PORT_RECOVERY_TIMER_TMO phyid:%#x port_id:%#x\n", + phy_id, port_id); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_PORT_RECOVERY_TIMER_TMO, + port_id, phy_id, 0, 0); + for (i = 0; i < pm8001_ha->chip->n_phy; i++) { + if (port->wide_port_phymap & (1 << i)) { + phy = &pm8001_ha->phy[i]; + sas_notify_phy_event(&phy->sas_phy, + PHYE_LOSS_OF_SIGNAL, GFP_ATOMIC); + port->wide_port_phymap &= ~(1 << i); + } + } + break; + case HW_EVENT_PORT_RECOVER: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_PORT_RECOVER phyid:%#x port_id:%#x\n", + phy_id, port_id); + hw_event_port_recover(pm8001_ha, piomb); + break; + case HW_EVENT_PORT_RESET_COMPLETE: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_PORT_RESET_COMPLETE phyid:%#x port_id:%#x portstate:%#x\n", + phy_id, port_id, portstate); + if (pm8001_ha->phy[phy_id].reset_completion) { + pm8001_ha->phy[phy_id].port_reset_status = + PORT_RESET_SUCCESS; + complete(pm8001_ha->phy[phy_id].reset_completion); + pm8001_ha->phy[phy_id].reset_completion = NULL; + } + phy->phy_attached = 1; + phy->phy_state = PHY_STATE_LINK_UP_SPCV; + port->port_state = portstate; + break; + case EVENT_BROADCAST_ASYNCH_EVENT: + pm8001_dbg(pm8001_ha, MSG, "EVENT_BROADCAST_ASYNCH_EVENT\n"); + break; + default: + pm8001_dbg(pm8001_ha, DEVIO, + "Unknown event portid:%d phyid:%d event:0x%x status:0x%x\n", + port_id, phy_id, eventType, status); + break; + } + return 0; +} + +/** + * mpi_phy_stop_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_phy_stop_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct phy_stop_resp *pPayload = + (struct phy_stop_resp *)(piomb + 4); + u32 status = + le32_to_cpu(pPayload->status); + u32 phyid = + le32_to_cpu(pPayload->phyid) & 0xFF; + struct pm8001_phy *phy = &pm8001_ha->phy[phyid]; + pm8001_dbg(pm8001_ha, MSG, "phy:0x%x status:0x%x\n", + phyid, status); + if (status == PHY_STOP_SUCCESS || + status == PHY_STOP_ERR_DEVICE_ATTACHED) { + phy->phy_state = PHY_LINK_DISABLE; + phy->sas_phy.phy->negotiated_linkrate = SAS_PHY_DISABLED; + phy->sas_phy.linkrate = SAS_PHY_DISABLED; + } + + return 0; +} + +/** + * mpi_set_controller_config_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_set_controller_config_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + struct set_ctrl_cfg_resp *pPayload = + (struct set_ctrl_cfg_resp *)(piomb + 4); + u32 status = le32_to_cpu(pPayload->status); + u32 err_qlfr_pgcd = le32_to_cpu(pPayload->err_qlfr_pgcd); + u32 tag = le32_to_cpu(pPayload->tag); + + pm8001_dbg(pm8001_ha, MSG, + "SET CONTROLLER RESP: status 0x%x qlfr_pgcd 0x%x\n", + status, err_qlfr_pgcd); + pm8001_tag_free(pm8001_ha, tag); + + return 0; +} + +/** + * mpi_get_controller_config_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_get_controller_config_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + pm8001_dbg(pm8001_ha, MSG, " pm80xx_addition_functionality\n"); + + return 0; +} + +/** + * mpi_get_phy_profile_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_get_phy_profile_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + pm8001_dbg(pm8001_ha, MSG, " pm80xx_addition_functionality\n"); + + return 0; +} + +/** + * mpi_flash_op_ext_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_flash_op_ext_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + pm8001_dbg(pm8001_ha, MSG, " pm80xx_addition_functionality\n"); + + return 0; +} + +/** + * mpi_set_phy_profile_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_set_phy_profile_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + u32 tag; + u8 page_code; + int rc = 0; + struct set_phy_profile_resp *pPayload = + (struct set_phy_profile_resp *)(piomb + 4); + u32 ppc_phyid = le32_to_cpu(pPayload->ppc_phyid); + u32 status = le32_to_cpu(pPayload->status); + + tag = le32_to_cpu(pPayload->tag); + page_code = (u8)((ppc_phyid & 0xFF00) >> 8); + if (status) { + /* status is FAILED */ + pm8001_dbg(pm8001_ha, FAIL, + "PhyProfile command failed with status 0x%08X\n", + status); + rc = -1; + } else { + if (page_code != SAS_PHY_ANALOG_SETTINGS_PAGE) { + pm8001_dbg(pm8001_ha, FAIL, "Invalid page code 0x%X\n", + page_code); + rc = -1; + } + } + pm8001_tag_free(pm8001_ha, tag); + return rc; +} + +/** + * mpi_kek_management_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_kek_management_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + struct kek_mgmt_resp *pPayload = (struct kek_mgmt_resp *)(piomb + 4); + + u32 status = le32_to_cpu(pPayload->status); + u32 kidx_new_curr_ksop = le32_to_cpu(pPayload->kidx_new_curr_ksop); + u32 err_qlfr = le32_to_cpu(pPayload->err_qlfr); + + pm8001_dbg(pm8001_ha, MSG, + "KEK MGMT RESP. Status 0x%x idx_ksop 0x%x err_qlfr 0x%x\n", + status, kidx_new_curr_ksop, err_qlfr); + + return 0; +} + +/** + * mpi_dek_management_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_dek_management_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + pm8001_dbg(pm8001_ha, MSG, " pm80xx_addition_functionality\n"); + + return 0; +} + +/** + * ssp_coalesced_comp_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int ssp_coalesced_comp_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + pm8001_dbg(pm8001_ha, MSG, " pm80xx_addition_functionality\n"); + + return 0; +} + +/** + * process_one_iomb - process one outbound Queue memory block + * @pm8001_ha: our hba card information + * @circularQ: outbound circular queue + * @piomb: IO message buffer + */ +static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, + struct outbound_queue_table *circularQ, void *piomb) +{ + __le32 pHeader = *(__le32 *)piomb; + u32 opc = (u32)((le32_to_cpu(pHeader)) & 0xFFF); + + switch (opc) { + case OPC_OUB_ECHO: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_ECHO\n"); + break; + case OPC_OUB_HW_EVENT: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_HW_EVENT\n"); + mpi_hw_event(pm8001_ha, piomb); + break; + case OPC_OUB_THERM_HW_EVENT: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_THERMAL_EVENT\n"); + mpi_thermal_hw_event(pm8001_ha, piomb); + break; + case OPC_OUB_SSP_COMP: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_COMP\n"); + mpi_ssp_completion(pm8001_ha, piomb); + break; + case OPC_OUB_SMP_COMP: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SMP_COMP\n"); + mpi_smp_completion(pm8001_ha, piomb); + break; + case OPC_OUB_LOCAL_PHY_CNTRL: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_LOCAL_PHY_CNTRL\n"); + pm8001_mpi_local_phy_ctl(pm8001_ha, piomb); + break; + case OPC_OUB_DEV_REGIST: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_DEV_REGIST\n"); + pm8001_mpi_reg_resp(pm8001_ha, piomb); + break; + case OPC_OUB_DEREG_DEV: + pm8001_dbg(pm8001_ha, MSG, "unregister the device\n"); + pm8001_mpi_dereg_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_DEV_HANDLE: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_DEV_HANDLE\n"); + break; + case OPC_OUB_SATA_COMP: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SATA_COMP\n"); + mpi_sata_completion(pm8001_ha, circularQ, piomb); + break; + case OPC_OUB_SATA_EVENT: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SATA_EVENT\n"); + mpi_sata_event(pm8001_ha, circularQ, piomb); + break; + case OPC_OUB_SSP_EVENT: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_EVENT\n"); + mpi_ssp_event(pm8001_ha, piomb); + break; + case OPC_OUB_DEV_HANDLE_ARRIV: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_DEV_HANDLE_ARRIV\n"); + /*This is for target*/ + break; + case OPC_OUB_SSP_RECV_EVENT: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_RECV_EVENT\n"); + /*This is for target*/ + break; + case OPC_OUB_FW_FLASH_UPDATE: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_FW_FLASH_UPDATE\n"); + pm8001_mpi_fw_flash_update_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GPIO_RESPONSE: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GPIO_RESPONSE\n"); + break; + case OPC_OUB_GPIO_EVENT: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GPIO_EVENT\n"); + break; + case OPC_OUB_GENERAL_EVENT: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GENERAL_EVENT\n"); + pm8001_mpi_general_event(pm8001_ha, piomb); + break; + case OPC_OUB_SSP_ABORT_RSP: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_ABORT_RSP\n"); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SATA_ABORT_RSP: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SATA_ABORT_RSP\n"); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SAS_DIAG_MODE_START_END: + pm8001_dbg(pm8001_ha, MSG, + "OPC_OUB_SAS_DIAG_MODE_START_END\n"); + break; + case OPC_OUB_SAS_DIAG_EXECUTE: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SAS_DIAG_EXECUTE\n"); + break; + case OPC_OUB_GET_TIME_STAMP: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_TIME_STAMP\n"); + break; + case OPC_OUB_SAS_HW_EVENT_ACK: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SAS_HW_EVENT_ACK\n"); + break; + case OPC_OUB_PORT_CONTROL: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_PORT_CONTROL\n"); + break; + case OPC_OUB_SMP_ABORT_RSP: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SMP_ABORT_RSP\n"); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_NVMD_DATA: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_NVMD_DATA\n"); + pm8001_mpi_get_nvmd_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SET_NVMD_DATA: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SET_NVMD_DATA\n"); + pm8001_mpi_set_nvmd_resp(pm8001_ha, piomb); + break; + case OPC_OUB_DEVICE_HANDLE_REMOVAL: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_DEVICE_HANDLE_REMOVAL\n"); + break; + case OPC_OUB_SET_DEVICE_STATE: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SET_DEVICE_STATE\n"); + pm8001_mpi_set_dev_state_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_DEVICE_STATE: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_DEVICE_STATE\n"); + break; + case OPC_OUB_SET_DEV_INFO: + pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SET_DEV_INFO\n"); + break; + /* spcv specific commands */ + case OPC_OUB_PHY_START_RESP: + pm8001_dbg(pm8001_ha, MSG, + "OPC_OUB_PHY_START_RESP opcode:%x\n", opc); + mpi_phy_start_resp(pm8001_ha, piomb); + break; + case OPC_OUB_PHY_STOP_RESP: + pm8001_dbg(pm8001_ha, MSG, + "OPC_OUB_PHY_STOP_RESP opcode:%x\n", opc); + mpi_phy_stop_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SET_CONTROLLER_CONFIG: + pm8001_dbg(pm8001_ha, MSG, + "OPC_OUB_SET_CONTROLLER_CONFIG opcode:%x\n", opc); + mpi_set_controller_config_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_CONTROLLER_CONFIG: + pm8001_dbg(pm8001_ha, MSG, + "OPC_OUB_GET_CONTROLLER_CONFIG opcode:%x\n", opc); + mpi_get_controller_config_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_PHY_PROFILE: + pm8001_dbg(pm8001_ha, MSG, + "OPC_OUB_GET_PHY_PROFILE opcode:%x\n", opc); + mpi_get_phy_profile_resp(pm8001_ha, piomb); + break; + case OPC_OUB_FLASH_OP_EXT: + pm8001_dbg(pm8001_ha, MSG, + "OPC_OUB_FLASH_OP_EXT opcode:%x\n", opc); + mpi_flash_op_ext_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SET_PHY_PROFILE: + pm8001_dbg(pm8001_ha, MSG, + "OPC_OUB_SET_PHY_PROFILE opcode:%x\n", opc); + mpi_set_phy_profile_resp(pm8001_ha, piomb); + break; + case OPC_OUB_KEK_MANAGEMENT_RESP: + pm8001_dbg(pm8001_ha, MSG, + "OPC_OUB_KEK_MANAGEMENT_RESP opcode:%x\n", opc); + mpi_kek_management_resp(pm8001_ha, piomb); + break; + case OPC_OUB_DEK_MANAGEMENT_RESP: + pm8001_dbg(pm8001_ha, MSG, + "OPC_OUB_DEK_MANAGEMENT_RESP opcode:%x\n", opc); + mpi_dek_management_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SSP_COALESCED_COMP_RESP: + pm8001_dbg(pm8001_ha, MSG, + "OPC_OUB_SSP_COALESCED_COMP_RESP opcode:%x\n", opc); + ssp_coalesced_comp_resp(pm8001_ha, piomb); + break; + default: + pm8001_dbg(pm8001_ha, DEVIO, + "Unknown outbound Queue IOMB OPC = 0x%x\n", opc); + break; + } +} + +static void print_scratchpad_registers(struct pm8001_hba_info *pm8001_ha) +{ + pm8001_dbg(pm8001_ha, FAIL, "MSGU_SCRATCH_PAD_0: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0)); + pm8001_dbg(pm8001_ha, FAIL, "MSGU_SCRATCH_PAD_1:0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1)); + pm8001_dbg(pm8001_ha, FAIL, "MSGU_SCRATCH_PAD_2: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2)); + pm8001_dbg(pm8001_ha, FAIL, "MSGU_SCRATCH_PAD_3: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3)); + pm8001_dbg(pm8001_ha, FAIL, "MSGU_HOST_SCRATCH_PAD_0: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_0)); + pm8001_dbg(pm8001_ha, FAIL, "MSGU_HOST_SCRATCH_PAD_1: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_1)); + pm8001_dbg(pm8001_ha, FAIL, "MSGU_HOST_SCRATCH_PAD_2: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_2)); + pm8001_dbg(pm8001_ha, FAIL, "MSGU_HOST_SCRATCH_PAD_3: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_3)); + pm8001_dbg(pm8001_ha, FAIL, "MSGU_HOST_SCRATCH_PAD_4: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_4)); + pm8001_dbg(pm8001_ha, FAIL, "MSGU_HOST_SCRATCH_PAD_5: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_5)); + pm8001_dbg(pm8001_ha, FAIL, "MSGU_RSVD_SCRATCH_PAD_0: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_RSVD_0)); + pm8001_dbg(pm8001_ha, FAIL, "MSGU_RSVD_SCRATCH_PAD_1: 0x%x\n", + pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_RSVD_1)); +} + +static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec) +{ + struct outbound_queue_table *circularQ; + void *pMsg1 = NULL; + u8 bc; + u32 ret = MPI_IO_STATUS_FAIL; + u32 regval; + + /* + * Fatal errors are programmed to be signalled in irq vector + * pm8001_ha->max_q_num - 1 through pm8001_ha->main_cfg_tbl.pm80xx_tbl. + * fatal_err_interrupt + */ + if (vec == (pm8001_ha->max_q_num - 1)) { + u32 mipsall_ready; + + if (pm8001_ha->chip_id == chip_8008 || + pm8001_ha->chip_id == chip_8009) + mipsall_ready = SCRATCH_PAD_MIPSALL_READY_8PORT; + else + mipsall_ready = SCRATCH_PAD_MIPSALL_READY_16PORT; + + regval = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + if ((regval & mipsall_ready) != mipsall_ready) { + pm8001_ha->controller_fatal_error = true; + pm8001_dbg(pm8001_ha, FAIL, + "Firmware Fatal error! Regval:0x%x\n", + regval); + pm8001_handle_event(pm8001_ha, NULL, IO_FATAL_ERROR); + print_scratchpad_registers(pm8001_ha); + return ret; + } else { + /*read scratchpad rsvd 0 register*/ + regval = pm8001_cr32(pm8001_ha, 0, + MSGU_SCRATCH_PAD_RSVD_0); + switch (regval) { + case NON_FATAL_SPBC_LBUS_ECC_ERR: + case NON_FATAL_BDMA_ERR: + case NON_FATAL_THERM_OVERTEMP_ERR: + /*Clear the register*/ + pm8001_cw32(pm8001_ha, 0, + MSGU_SCRATCH_PAD_RSVD_0, + 0x00000000); + break; + default: + break; + } + } + } + circularQ = &pm8001_ha->outbnd_q_tbl[vec]; + spin_lock_irqsave(&circularQ->oq_lock, circularQ->lock_flags); + do { + /* spurious interrupt during setup if kexec-ing and + * driver doing a doorbell access w/ the pre-kexec oq + * interrupt setup. + */ + if (!circularQ->pi_virt) + break; + ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc); + if (MPI_IO_STATUS_SUCCESS == ret) { + /* process the outbound message */ + process_one_iomb(pm8001_ha, circularQ, + (void *)(pMsg1 - 4)); + /* free the message from the outbound circular buffer */ + pm8001_mpi_msg_free_set(pm8001_ha, pMsg1, + circularQ, bc); + } + if (MPI_IO_STATUS_BUSY == ret) { + /* Update the producer index from SPC */ + circularQ->producer_index = + cpu_to_le32(pm8001_read_32(circularQ->pi_virt)); + if (le32_to_cpu(circularQ->producer_index) == + circularQ->consumer_idx) + /* OQ is empty */ + break; + } + } while (1); + spin_unlock_irqrestore(&circularQ->oq_lock, circularQ->lock_flags); + return ret; +} + +/* DMA_... to our direction translation. */ +static const u8 data_dir_flags[] = { + [DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */ + [DMA_TO_DEVICE] = DATA_DIR_OUT, /* OUTBOUND */ + [DMA_FROM_DEVICE] = DATA_DIR_IN, /* INBOUND */ + [DMA_NONE] = DATA_DIR_NONE, /* NO TRANSFER */ +}; + +static void build_smp_cmd(u32 deviceID, __le32 hTag, + struct smp_req *psmp_cmd, int mode, int length) +{ + psmp_cmd->tag = hTag; + psmp_cmd->device_id = cpu_to_le32(deviceID); + if (mode == SMP_DIRECT) { + length = length - 4; /* subtract crc */ + psmp_cmd->len_ip_ir = cpu_to_le32(length << 16); + } else { + psmp_cmd->len_ip_ir = cpu_to_le32(1|(1 << 1)); + } +} + +/** + * pm80xx_chip_smp_req - send an SMP task to FW + * @pm8001_ha: our hba card information. + * @ccb: the ccb information this request used. + */ +static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + int elem, rc; + struct sas_task *task = ccb->task; + struct domain_device *dev = task->dev; + struct pm8001_device *pm8001_dev = dev->lldd_dev; + struct scatterlist *sg_req, *sg_resp, *smp_req; + u32 req_len, resp_len; + struct smp_req smp_cmd; + u32 opc; + u32 i, length; + u8 *payload; + u8 *to; + + memset(&smp_cmd, 0, sizeof(smp_cmd)); + /* + * DMA-map SMP request, response buffers + */ + sg_req = &task->smp_task.smp_req; + elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, DMA_TO_DEVICE); + if (!elem) + return -ENOMEM; + req_len = sg_dma_len(sg_req); + + sg_resp = &task->smp_task.smp_resp; + elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, DMA_FROM_DEVICE); + if (!elem) { + rc = -ENOMEM; + goto err_out; + } + resp_len = sg_dma_len(sg_resp); + /* must be in dwords */ + if ((req_len & 0x3) || (resp_len & 0x3)) { + rc = -EINVAL; + goto err_out_2; + } + + opc = OPC_INB_SMP_REQUEST; + smp_cmd.tag = cpu_to_le32(ccb->ccb_tag); + + length = sg_req->length; + pm8001_dbg(pm8001_ha, IO, "SMP Frame Length %d\n", sg_req->length); + if (!(length - 8)) + pm8001_ha->smp_exp_mode = SMP_DIRECT; + else + pm8001_ha->smp_exp_mode = SMP_INDIRECT; + + + smp_req = &task->smp_task.smp_req; + to = kmap_atomic(sg_page(smp_req)); + payload = to + smp_req->offset; + + /* INDIRECT MODE command settings. Use DMA */ + if (pm8001_ha->smp_exp_mode == SMP_INDIRECT) { + pm8001_dbg(pm8001_ha, IO, "SMP REQUEST INDIRECT MODE\n"); + /* for SPCv indirect mode. Place the top 4 bytes of + * SMP Request header here. */ + for (i = 0; i < 4; i++) + smp_cmd.smp_req16[i] = *(payload + i); + /* exclude top 4 bytes for SMP req header */ + smp_cmd.long_smp_req.long_req_addr = + cpu_to_le64((u64)sg_dma_address + (&task->smp_task.smp_req) + 4); + /* exclude 4 bytes for SMP req header and CRC */ + smp_cmd.long_smp_req.long_req_size = + cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-8); + smp_cmd.long_smp_req.long_resp_addr = + cpu_to_le64((u64)sg_dma_address + (&task->smp_task.smp_resp)); + smp_cmd.long_smp_req.long_resp_size = + cpu_to_le32((u32)sg_dma_len + (&task->smp_task.smp_resp)-4); + } else { /* DIRECT MODE */ + smp_cmd.long_smp_req.long_req_addr = + cpu_to_le64((u64)sg_dma_address + (&task->smp_task.smp_req)); + smp_cmd.long_smp_req.long_req_size = + cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4); + smp_cmd.long_smp_req.long_resp_addr = + cpu_to_le64((u64)sg_dma_address + (&task->smp_task.smp_resp)); + smp_cmd.long_smp_req.long_resp_size = + cpu_to_le32 + ((u32)sg_dma_len(&task->smp_task.smp_resp)-4); + } + if (pm8001_ha->smp_exp_mode == SMP_DIRECT) { + pm8001_dbg(pm8001_ha, IO, "SMP REQUEST DIRECT MODE\n"); + for (i = 0; i < length; i++) + if (i < 16) { + smp_cmd.smp_req16[i] = *(payload + i); + pm8001_dbg(pm8001_ha, IO, + "Byte[%d]:%x (DMA data:%x)\n", + i, smp_cmd.smp_req16[i], + *(payload)); + } else { + smp_cmd.smp_req[i] = *(payload + i); + pm8001_dbg(pm8001_ha, IO, + "Byte[%d]:%x (DMA data:%x)\n", + i, smp_cmd.smp_req[i], + *(payload)); + } + } + kunmap_atomic(to); + build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, + &smp_cmd, pm8001_ha->smp_exp_mode, length); + rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &smp_cmd, + sizeof(smp_cmd), 0); + if (rc) + goto err_out_2; + return 0; + +err_out_2: + dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1, + DMA_FROM_DEVICE); +err_out: + dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1, + DMA_TO_DEVICE); + return rc; +} + +static int check_enc_sas_cmd(struct sas_task *task) +{ + u8 cmd = task->ssp_task.cmd->cmnd[0]; + + if (cmd == READ_10 || cmd == WRITE_10 || cmd == WRITE_VERIFY) + return 1; + else + return 0; +} + +static int check_enc_sat_cmd(struct sas_task *task) +{ + int ret = 0; + switch (task->ata_task.fis.command) { + case ATA_CMD_FPDMA_READ: + case ATA_CMD_READ_EXT: + case ATA_CMD_READ: + case ATA_CMD_FPDMA_WRITE: + case ATA_CMD_WRITE_EXT: + case ATA_CMD_WRITE: + case ATA_CMD_PIO_READ: + case ATA_CMD_PIO_READ_EXT: + case ATA_CMD_PIO_WRITE: + case ATA_CMD_PIO_WRITE_EXT: + ret = 1; + break; + default: + ret = 0; + break; + } + return ret; +} + +static u32 pm80xx_chip_get_q_index(struct sas_task *task) +{ + struct request *rq = sas_task_find_rq(task); + + if (!rq) + return 0; + + return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(rq)); +} + +/** + * pm80xx_chip_ssp_io_req - send an SSP task to FW + * @pm8001_ha: our hba card information. + * @ccb: the ccb information this request used. + */ +static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + struct sas_task *task = ccb->task; + struct domain_device *dev = task->dev; + struct pm8001_device *pm8001_dev = dev->lldd_dev; + struct ssp_ini_io_start_req ssp_cmd; + u32 tag = ccb->ccb_tag; + u64 phys_addr, end_addr; + u32 end_addr_high, end_addr_low; + u32 q_index; + u32 opc = OPC_INB_SSPINIIOSTART; + + memset(&ssp_cmd, 0, sizeof(ssp_cmd)); + memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8); + + /* data address domain added for spcv; set to 0 by host, + * used internally by controller + * 0 for SAS 1.1 and SAS 2.0 compatible TLR + */ + ssp_cmd.dad_dir_m_tlr = + cpu_to_le32(data_dir_flags[task->data_dir] << 8 | 0x0); + ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id); + ssp_cmd.tag = cpu_to_le32(tag); + ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7); + memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd, + task->ssp_task.cmd->cmd_len); + q_index = pm80xx_chip_get_q_index(task); + + /* Check if encryption is set */ + if (pm8001_ha->chip->encrypt && + !(pm8001_ha->encrypt_info.status) && check_enc_sas_cmd(task)) { + pm8001_dbg(pm8001_ha, IO, + "Encryption enabled.Sending Encrypt SAS command 0x%x\n", + task->ssp_task.cmd->cmnd[0]); + opc = OPC_INB_SSP_INI_DIF_ENC_IO; + /* enable encryption. 0 for SAS 1.1 and SAS 2.0 compatible TLR*/ + ssp_cmd.dad_dir_m_tlr = cpu_to_le32 + ((data_dir_flags[task->data_dir] << 8) | 0x20 | 0x0); + + /* fill in PRD (scatter/gather) table, if any */ + if (task->num_scatter > 1) { + pm8001_chip_make_sg(task->scatter, + ccb->n_elem, ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle; + ssp_cmd.enc_addr_low = + cpu_to_le32(lower_32_bits(phys_addr)); + ssp_cmd.enc_addr_high = + cpu_to_le32(upper_32_bits(phys_addr)); + ssp_cmd.enc_esgl = cpu_to_le32(1<<31); + } else if (task->num_scatter == 1) { + u64 dma_addr = sg_dma_address(task->scatter); + + ssp_cmd.enc_addr_low = + cpu_to_le32(lower_32_bits(dma_addr)); + ssp_cmd.enc_addr_high = + cpu_to_le32(upper_32_bits(dma_addr)); + ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.enc_esgl = 0; + + /* Check 4G Boundary */ + end_addr = dma_addr + le32_to_cpu(ssp_cmd.enc_len) - 1; + end_addr_low = lower_32_bits(end_addr); + end_addr_high = upper_32_bits(end_addr); + + if (end_addr_high != le32_to_cpu(ssp_cmd.enc_addr_high)) { + pm8001_dbg(pm8001_ha, FAIL, + "The sg list address start_addr=0x%016llx data_len=0x%x end_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n", + dma_addr, + le32_to_cpu(ssp_cmd.enc_len), + end_addr_high, end_addr_low); + pm8001_chip_make_sg(task->scatter, 1, + ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle; + ssp_cmd.enc_addr_low = + cpu_to_le32(lower_32_bits(phys_addr)); + ssp_cmd.enc_addr_high = + cpu_to_le32(upper_32_bits(phys_addr)); + ssp_cmd.enc_esgl = cpu_to_le32(1U<<31); + } + } else if (task->num_scatter == 0) { + ssp_cmd.enc_addr_low = 0; + ssp_cmd.enc_addr_high = 0; + ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.enc_esgl = 0; + } + + /* XTS mode. All other fields are 0 */ + ssp_cmd.key_cmode = cpu_to_le32(0x6 << 4); + + /* set tweak values. Should be the start lba */ + ssp_cmd.twk_val0 = cpu_to_le32((task->ssp_task.cmd->cmnd[2] << 24) | + (task->ssp_task.cmd->cmnd[3] << 16) | + (task->ssp_task.cmd->cmnd[4] << 8) | + (task->ssp_task.cmd->cmnd[5])); + } else { + pm8001_dbg(pm8001_ha, IO, + "Sending Normal SAS command 0x%x inb q %x\n", + task->ssp_task.cmd->cmnd[0], q_index); + /* fill in PRD (scatter/gather) table, if any */ + if (task->num_scatter > 1) { + pm8001_chip_make_sg(task->scatter, ccb->n_elem, + ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle; + ssp_cmd.addr_low = + cpu_to_le32(lower_32_bits(phys_addr)); + ssp_cmd.addr_high = + cpu_to_le32(upper_32_bits(phys_addr)); + ssp_cmd.esgl = cpu_to_le32(1<<31); + } else if (task->num_scatter == 1) { + u64 dma_addr = sg_dma_address(task->scatter); + + ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(dma_addr)); + ssp_cmd.addr_high = + cpu_to_le32(upper_32_bits(dma_addr)); + ssp_cmd.len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.esgl = 0; + + /* Check 4G Boundary */ + end_addr = dma_addr + le32_to_cpu(ssp_cmd.len) - 1; + end_addr_low = lower_32_bits(end_addr); + end_addr_high = upper_32_bits(end_addr); + if (end_addr_high != le32_to_cpu(ssp_cmd.addr_high)) { + pm8001_dbg(pm8001_ha, FAIL, + "The sg list address start_addr=0x%016llx data_len=0x%x end_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n", + dma_addr, + le32_to_cpu(ssp_cmd.len), + end_addr_high, end_addr_low); + pm8001_chip_make_sg(task->scatter, 1, + ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle; + ssp_cmd.addr_low = + cpu_to_le32(lower_32_bits(phys_addr)); + ssp_cmd.addr_high = + cpu_to_le32(upper_32_bits(phys_addr)); + ssp_cmd.esgl = cpu_to_le32(1<<31); + } + } else if (task->num_scatter == 0) { + ssp_cmd.addr_low = 0; + ssp_cmd.addr_high = 0; + ssp_cmd.len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.esgl = 0; + } + } + + return pm8001_mpi_build_cmd(pm8001_ha, q_index, opc, &ssp_cmd, + sizeof(ssp_cmd), q_index); +} + +static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + struct sas_task *task = ccb->task; + struct domain_device *dev = task->dev; + struct pm8001_device *pm8001_ha_dev = dev->lldd_dev; + struct ata_queued_cmd *qc = task->uldd_task; + u32 tag = ccb->ccb_tag, q_index; + struct sata_start_req sata_cmd; + u32 hdr_tag, ncg_tag = 0; + u64 phys_addr, end_addr; + u32 end_addr_high, end_addr_low; + u32 ATAP = 0x0; + u32 dir, retfis = 0; + u32 opc = OPC_INB_SATA_HOST_OPSTART; + memset(&sata_cmd, 0, sizeof(sata_cmd)); + + q_index = pm80xx_chip_get_q_index(task); + + if (task->data_dir == DMA_NONE && !task->ata_task.use_ncq) { + ATAP = 0x04; /* no data*/ + pm8001_dbg(pm8001_ha, IO, "no data\n"); + } else if (likely(!task->ata_task.device_control_reg_update)) { + if (task->ata_task.use_ncq && + dev->sata_dev.class != ATA_DEV_ATAPI) { + ATAP = 0x07; /* FPDMA */ + pm8001_dbg(pm8001_ha, IO, "FPDMA\n"); + } else if (task->ata_task.dma_xfer) { + ATAP = 0x06; /* DMA */ + pm8001_dbg(pm8001_ha, IO, "DMA\n"); + } else { + ATAP = 0x05; /* PIO*/ + pm8001_dbg(pm8001_ha, IO, "PIO\n"); + } + } + if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) { + task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); + ncg_tag = hdr_tag; + } + dir = data_dir_flags[task->data_dir] << 8; + sata_cmd.tag = cpu_to_le32(tag); + sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); + sata_cmd.data_len = cpu_to_le32(task->total_xfer_len); + if (task->ata_task.return_fis_on_success) + retfis = 1; + sata_cmd.sata_fis = task->ata_task.fis; + if (likely(!task->ata_task.device_control_reg_update)) + sata_cmd.sata_fis.flags |= 0x80;/* C=1: update ATA cmd reg */ + sata_cmd.sata_fis.flags &= 0xF0;/* PM_PORT field shall be 0 */ + + /* Check if encryption is set */ + if (pm8001_ha->chip->encrypt && + !(pm8001_ha->encrypt_info.status) && check_enc_sat_cmd(task)) { + pm8001_dbg(pm8001_ha, IO, + "Encryption enabled.Sending Encrypt SATA cmd 0x%x\n", + sata_cmd.sata_fis.command); + opc = OPC_INB_SATA_DIF_ENC_IO; + /* set encryption bit; dad (bits 0-1) is 0 */ + sata_cmd.retfis_ncqtag_atap_dir_m_dad = + cpu_to_le32((retfis << 24) | ((ncg_tag & 0xff) << 16) | + ((ATAP & 0x3f) << 10) | 0x20 | dir); + /* fill in PRD (scatter/gather) table, if any */ + if (task->num_scatter > 1) { + pm8001_chip_make_sg(task->scatter, + ccb->n_elem, ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle; + sata_cmd.enc_addr_low = + cpu_to_le32(lower_32_bits(phys_addr)); + sata_cmd.enc_addr_high = + cpu_to_le32(upper_32_bits(phys_addr)); + sata_cmd.enc_esgl = cpu_to_le32(1 << 31); + } else if (task->num_scatter == 1) { + u64 dma_addr = sg_dma_address(task->scatter); + + sata_cmd.enc_addr_low = + cpu_to_le32(lower_32_bits(dma_addr)); + sata_cmd.enc_addr_high = + cpu_to_le32(upper_32_bits(dma_addr)); + sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len); + sata_cmd.enc_esgl = 0; + + /* Check 4G Boundary */ + end_addr = dma_addr + le32_to_cpu(sata_cmd.enc_len) - 1; + end_addr_low = lower_32_bits(end_addr); + end_addr_high = upper_32_bits(end_addr); + if (end_addr_high != le32_to_cpu(sata_cmd.enc_addr_high)) { + pm8001_dbg(pm8001_ha, FAIL, + "The sg list address start_addr=0x%016llx data_len=0x%x end_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n", + dma_addr, + le32_to_cpu(sata_cmd.enc_len), + end_addr_high, end_addr_low); + pm8001_chip_make_sg(task->scatter, 1, + ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle; + sata_cmd.enc_addr_low = + cpu_to_le32(lower_32_bits(phys_addr)); + sata_cmd.enc_addr_high = + cpu_to_le32(upper_32_bits(phys_addr)); + sata_cmd.enc_esgl = + cpu_to_le32(1 << 31); + } + } else if (task->num_scatter == 0) { + sata_cmd.enc_addr_low = 0; + sata_cmd.enc_addr_high = 0; + sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len); + sata_cmd.enc_esgl = 0; + } + /* XTS mode. All other fields are 0 */ + sata_cmd.key_index_mode = cpu_to_le32(0x6 << 4); + + /* set tweak values. Should be the start lba */ + sata_cmd.twk_val0 = + cpu_to_le32((sata_cmd.sata_fis.lbal_exp << 24) | + (sata_cmd.sata_fis.lbah << 16) | + (sata_cmd.sata_fis.lbam << 8) | + (sata_cmd.sata_fis.lbal)); + sata_cmd.twk_val1 = + cpu_to_le32((sata_cmd.sata_fis.lbah_exp << 8) | + (sata_cmd.sata_fis.lbam_exp)); + } else { + pm8001_dbg(pm8001_ha, IO, + "Sending Normal SATA command 0x%x inb %x\n", + sata_cmd.sata_fis.command, q_index); + /* dad (bits 0-1) is 0 */ + sata_cmd.retfis_ncqtag_atap_dir_m_dad = + cpu_to_le32((retfis << 24) | ((ncg_tag & 0xff) << 16) | + ((ATAP & 0x3f) << 10) | dir); + /* fill in PRD (scatter/gather) table, if any */ + if (task->num_scatter > 1) { + pm8001_chip_make_sg(task->scatter, + ccb->n_elem, ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle; + sata_cmd.addr_low = lower_32_bits(phys_addr); + sata_cmd.addr_high = upper_32_bits(phys_addr); + sata_cmd.esgl = cpu_to_le32(1U << 31); + } else if (task->num_scatter == 1) { + u64 dma_addr = sg_dma_address(task->scatter); + + sata_cmd.addr_low = lower_32_bits(dma_addr); + sata_cmd.addr_high = upper_32_bits(dma_addr); + sata_cmd.len = cpu_to_le32(task->total_xfer_len); + sata_cmd.esgl = 0; + + /* Check 4G Boundary */ + end_addr = dma_addr + le32_to_cpu(sata_cmd.len) - 1; + end_addr_low = lower_32_bits(end_addr); + end_addr_high = upper_32_bits(end_addr); + if (end_addr_high != sata_cmd.addr_high) { + pm8001_dbg(pm8001_ha, FAIL, + "The sg list address start_addr=0x%016llx data_len=0x%xend_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n", + dma_addr, + le32_to_cpu(sata_cmd.len), + end_addr_high, end_addr_low); + pm8001_chip_make_sg(task->scatter, 1, + ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle; + sata_cmd.addr_low = lower_32_bits(phys_addr); + sata_cmd.addr_high = upper_32_bits(phys_addr); + sata_cmd.esgl = cpu_to_le32(1U << 31); + } + } else if (task->num_scatter == 0) { + sata_cmd.addr_low = 0; + sata_cmd.addr_high = 0; + sata_cmd.len = cpu_to_le32(task->total_xfer_len); + sata_cmd.esgl = 0; + } + + /* scsi cdb */ + sata_cmd.atapi_scsi_cdb[0] = + cpu_to_le32(((task->ata_task.atapi_packet[0]) | + (task->ata_task.atapi_packet[1] << 8) | + (task->ata_task.atapi_packet[2] << 16) | + (task->ata_task.atapi_packet[3] << 24))); + sata_cmd.atapi_scsi_cdb[1] = + cpu_to_le32(((task->ata_task.atapi_packet[4]) | + (task->ata_task.atapi_packet[5] << 8) | + (task->ata_task.atapi_packet[6] << 16) | + (task->ata_task.atapi_packet[7] << 24))); + sata_cmd.atapi_scsi_cdb[2] = + cpu_to_le32(((task->ata_task.atapi_packet[8]) | + (task->ata_task.atapi_packet[9] << 8) | + (task->ata_task.atapi_packet[10] << 16) | + (task->ata_task.atapi_packet[11] << 24))); + sata_cmd.atapi_scsi_cdb[3] = + cpu_to_le32(((task->ata_task.atapi_packet[12]) | + (task->ata_task.atapi_packet[13] << 8) | + (task->ata_task.atapi_packet[14] << 16) | + (task->ata_task.atapi_packet[15] << 24))); + } + + trace_pm80xx_request_issue(pm8001_ha->id, + ccb->device ? ccb->device->attached_phy : PM8001_MAX_PHYS, + ccb->ccb_tag, opc, + qc ? qc->tf.command : 0, // ata opcode + ccb->device ? atomic_read(&ccb->device->running_req) : 0); + return pm8001_mpi_build_cmd(pm8001_ha, q_index, opc, &sata_cmd, + sizeof(sata_cmd), q_index); +} + +/** + * pm80xx_chip_phy_start_req - start phy via PHY_START COMMAND + * @pm8001_ha: our hba card information. + * @phy_id: the phy id which we wanted to start up. + */ +static int +pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) +{ + struct phy_start_req payload; + u32 tag = 0x01; + u32 opcode = OPC_INB_PHYSTART; + + memset(&payload, 0, sizeof(payload)); + payload.tag = cpu_to_le32(tag); + + pm8001_dbg(pm8001_ha, INIT, "PHY START REQ for phy_id %d\n", phy_id); + + payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE | + LINKMODE_AUTO | pm8001_ha->link_rate | phy_id); + /* SSC Disable and SAS Analog ST configuration */ + /* + payload.ase_sh_lm_slr_phyid = + cpu_to_le32(SSC_DISABLE_30 | SAS_ASE | SPINHOLD_DISABLE | + LINKMODE_AUTO | LINKRATE_15 | LINKRATE_30 | LINKRATE_60 | + phy_id); + Have to add "SAS PHY Analog Setup SPASTI 1 Byte" Based on need + */ + + payload.sas_identify.dev_type = SAS_END_DEVICE; + payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL; + memcpy(payload.sas_identify.sas_addr, + &pm8001_ha->phy[phy_id].dev_sas_addr, SAS_ADDR_SIZE); + payload.sas_identify.phy_id = phy_id; + + return pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload, + sizeof(payload), 0); +} + +/** + * pm80xx_chip_phy_stop_req - start phy via PHY_STOP COMMAND + * @pm8001_ha: our hba card information. + * @phy_id: the phy id which we wanted to start up. + */ +static int pm80xx_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha, + u8 phy_id) +{ + struct phy_stop_req payload; + u32 tag = 0x01; + u32 opcode = OPC_INB_PHYSTOP; + + memset(&payload, 0, sizeof(payload)); + payload.tag = cpu_to_le32(tag); + payload.phy_id = cpu_to_le32(phy_id); + + return pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload, + sizeof(payload), 0); +} + +/* + * see comments on pm8001_mpi_reg_resp. + */ +static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_dev, u32 flag) +{ + struct reg_dev_req payload; + u32 opc; + u32 stp_sspsmp_sata = 0x4; + u32 linkrate, phy_id; + int rc; + struct pm8001_ccb_info *ccb; + u8 retryFlag = 0x1; + u16 firstBurstSize = 0; + u16 ITNT = 2000; + struct domain_device *dev = pm8001_dev->sas_device; + struct domain_device *parent_dev = dev->parent; + struct pm8001_port *port = dev->port->lldd_port; + + memset(&payload, 0, sizeof(payload)); + ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_dev, NULL); + if (!ccb) + return -SAS_QUEUE_FULL; + + payload.tag = cpu_to_le32(ccb->ccb_tag); + + if (flag == 1) { + stp_sspsmp_sata = 0x02; /*direct attached sata */ + } else { + if (pm8001_dev->dev_type == SAS_SATA_DEV) + stp_sspsmp_sata = 0x00; /* stp*/ + else if (pm8001_dev->dev_type == SAS_END_DEVICE || + dev_is_expander(pm8001_dev->dev_type)) + stp_sspsmp_sata = 0x01; /*ssp or smp*/ + } + if (parent_dev && dev_is_expander(parent_dev->dev_type)) + phy_id = parent_dev->ex_dev.ex_phy->phy_id; + else + phy_id = pm8001_dev->attached_phy; + + opc = OPC_INB_REG_DEV; + + linkrate = (pm8001_dev->sas_device->linkrate < dev->port->linkrate) ? + pm8001_dev->sas_device->linkrate : dev->port->linkrate; + + payload.phyid_portid = + cpu_to_le32(((port->port_id) & 0xFF) | + ((phy_id & 0xFF) << 8)); + + payload.dtype_dlr_mcn_ir_retry = cpu_to_le32((retryFlag & 0x01) | + ((linkrate & 0x0F) << 24) | + ((stp_sspsmp_sata & 0x03) << 28)); + payload.firstburstsize_ITNexustimeout = + cpu_to_le32(ITNT | (firstBurstSize * 0x10000)); + + memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr, + SAS_ADDR_SIZE); + + pm8001_dbg(pm8001_ha, INIT, + "register device req phy_id 0x%x port_id 0x%x\n", phy_id, + (port->port_id & 0xFF)); + rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload, + sizeof(payload), 0); + if (rc) + pm8001_ccb_free(pm8001_ha, ccb); + + return rc; +} + +/** + * pm80xx_chip_phy_ctl_req - support the local phy operation + * @pm8001_ha: our hba card information. + * @phyId: the phy id which we wanted to operate + * @phy_op: phy operation to request + */ +static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, + u32 phyId, u32 phy_op) +{ + u32 tag; + int rc; + struct local_phy_ctl_req payload; + u32 opc = OPC_INB_LOCAL_PHY_CONTROL; + + memset(&payload, 0, sizeof(payload)); + rc = pm8001_tag_alloc(pm8001_ha, &tag); + if (rc) + return rc; + + payload.tag = cpu_to_le32(tag); + payload.phyop_phyid = + cpu_to_le32(((phy_op & 0xFF) << 8) | (phyId & 0xFF)); + + rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload, + sizeof(payload), 0); + if (rc) + pm8001_tag_free(pm8001_ha, tag); + + return rc; +} + +static u32 pm80xx_chip_is_our_interrupt(struct pm8001_hba_info *pm8001_ha) +{ +#ifdef PM8001_USE_MSIX + return 1; +#else + u32 value; + + value = pm8001_cr32(pm8001_ha, 0, MSGU_ODR); + if (value) + return 1; + return 0; +#endif +} + +/** + * pm80xx_chip_isr - PM8001 isr handler. + * @pm8001_ha: our hba card information. + * @vec: irq number. + */ +static irqreturn_t +pm80xx_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec) +{ + pm80xx_chip_interrupt_disable(pm8001_ha, vec); + pm8001_dbg(pm8001_ha, DEVIO, + "irq vec %d, ODMR:0x%x\n", + vec, pm8001_cr32(pm8001_ha, 0, 0x30)); + process_oq(pm8001_ha, vec); + pm80xx_chip_interrupt_enable(pm8001_ha, vec); + return IRQ_HANDLED; +} + +static void mpi_set_phy_profile_req(struct pm8001_hba_info *pm8001_ha, + u32 operation, u32 phyid, + u32 length, u32 *buf) +{ + u32 tag, i, j = 0; + int rc; + struct set_phy_profile_req payload; + u32 opc = OPC_INB_SET_PHY_PROFILE; + + memset(&payload, 0, sizeof(payload)); + rc = pm8001_tag_alloc(pm8001_ha, &tag); + if (rc) { + pm8001_dbg(pm8001_ha, FAIL, "Invalid tag\n"); + return; + } + + payload.tag = cpu_to_le32(tag); + payload.ppc_phyid = + cpu_to_le32(((operation & 0xF) << 8) | (phyid & 0xFF)); + pm8001_dbg(pm8001_ha, DISC, + " phy profile command for phy %x ,length is %d\n", + le32_to_cpu(payload.ppc_phyid), length); + for (i = length; i < (length + PHY_DWORD_LENGTH - 1); i++) { + payload.reserved[j] = cpu_to_le32(*((u32 *)buf + i)); + j++; + } + rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload, + sizeof(payload), 0); + if (rc) + pm8001_tag_free(pm8001_ha, tag); +} + +void pm8001_set_phy_profile(struct pm8001_hba_info *pm8001_ha, + u32 length, u8 *buf) +{ + u32 i; + + for (i = 0; i < pm8001_ha->chip->n_phy; i++) { + mpi_set_phy_profile_req(pm8001_ha, + SAS_PHY_ANALOG_SETTINGS_PAGE, i, length, (u32 *)buf); + length = length + PHY_DWORD_LENGTH; + } + pm8001_dbg(pm8001_ha, INIT, "phy settings completed\n"); +} + +void pm8001_set_phy_profile_single(struct pm8001_hba_info *pm8001_ha, + u32 phy, u32 length, u32 *buf) +{ + u32 tag, opc; + int rc, i; + struct set_phy_profile_req payload; + + memset(&payload, 0, sizeof(payload)); + + rc = pm8001_tag_alloc(pm8001_ha, &tag); + if (rc) { + pm8001_dbg(pm8001_ha, INIT, "Invalid tag\n"); + return; + } + + opc = OPC_INB_SET_PHY_PROFILE; + + payload.tag = cpu_to_le32(tag); + payload.ppc_phyid = + cpu_to_le32(((SAS_PHY_ANALOG_SETTINGS_PAGE & 0xF) << 8) + | (phy & 0xFF)); + + for (i = 0; i < length; i++) + payload.reserved[i] = cpu_to_le32(*(buf + i)); + + rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload, + sizeof(payload), 0); + if (rc) + pm8001_tag_free(pm8001_ha, tag); + + pm8001_dbg(pm8001_ha, INIT, "PHY %d settings applied\n", phy); +} +const struct pm8001_dispatch pm8001_80xx_dispatch = { + .name = "pmc80xx", + .chip_init = pm80xx_chip_init, + .chip_post_init = pm80xx_chip_post_init, + .chip_soft_rst = pm80xx_chip_soft_rst, + .chip_rst = pm80xx_hw_chip_rst, + .chip_iounmap = pm8001_chip_iounmap, + .isr = pm80xx_chip_isr, + .is_our_interrupt = pm80xx_chip_is_our_interrupt, + .isr_process_oq = process_oq, + .interrupt_enable = pm80xx_chip_interrupt_enable, + .interrupt_disable = pm80xx_chip_interrupt_disable, + .make_prd = pm8001_chip_make_sg, + .smp_req = pm80xx_chip_smp_req, + .ssp_io_req = pm80xx_chip_ssp_io_req, + .sata_req = pm80xx_chip_sata_req, + .phy_start_req = pm80xx_chip_phy_start_req, + .phy_stop_req = pm80xx_chip_phy_stop_req, + .reg_dev_req = pm80xx_chip_reg_dev_req, + .dereg_dev_req = pm8001_chip_dereg_dev_req, + .phy_ctl_req = pm80xx_chip_phy_ctl_req, + .task_abort = pm8001_chip_abort_task, + .ssp_tm_req = pm8001_chip_ssp_tm_req, + .get_nvmd_req = pm8001_chip_get_nvmd_req, + .set_nvmd_req = pm8001_chip_set_nvmd_req, + .fw_flash_update_req = pm8001_chip_fw_flash_update_req, + .set_dev_state_req = pm8001_chip_set_dev_state_req, + .fatal_errors = pm80xx_fatal_errors, + .hw_event_ack_req = pm80xx_hw_event_ack_req, +}; diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h new file mode 100644 index 0000000000..eb8fd37b20 --- /dev/null +++ b/drivers/scsi/pm8001/pm80xx_hwi.h @@ -0,0 +1,1665 @@ +/* + * PMC-Sierra SPCv/ve 8088/8089 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 USI Co., Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ + +#ifndef _PMC8001_REG_H_ +#define _PMC8001_REG_H_ + +#include <linux/types.h> +#include <scsi/libsas.h> + +/* for Request Opcode of IOMB */ +#define OPC_INB_ECHO 1 /* 0x000 */ +#define OPC_INB_PHYSTART 4 /* 0x004 */ +#define OPC_INB_PHYSTOP 5 /* 0x005 */ +#define OPC_INB_SSPINIIOSTART 6 /* 0x006 */ +#define OPC_INB_SSPINITMSTART 7 /* 0x007 */ +/* 0x8 RESV IN SPCv */ +#define OPC_INB_RSVD 8 /* 0x008 */ +#define OPC_INB_DEV_HANDLE_ACCEPT 9 /* 0x009 */ +#define OPC_INB_SSPTGTIOSTART 10 /* 0x00A */ +#define OPC_INB_SSPTGTRSPSTART 11 /* 0x00B */ +/* 0xC, 0xD, 0xE removed in SPCv */ +#define OPC_INB_SSP_ABORT 15 /* 0x00F */ +#define OPC_INB_DEREG_DEV_HANDLE 16 /* 0x010 */ +#define OPC_INB_GET_DEV_HANDLE 17 /* 0x011 */ +#define OPC_INB_SMP_REQUEST 18 /* 0x012 */ +/* 0x13 SMP_RESPONSE is removed in SPCv */ +#define OPC_INB_SMP_ABORT 20 /* 0x014 */ +/* 0x16 RESV IN SPCv */ +#define OPC_INB_RSVD1 22 /* 0x016 */ +#define OPC_INB_SATA_HOST_OPSTART 23 /* 0x017 */ +#define OPC_INB_SATA_ABORT 24 /* 0x018 */ +#define OPC_INB_LOCAL_PHY_CONTROL 25 /* 0x019 */ +/* 0x1A RESV IN SPCv */ +#define OPC_INB_RSVD2 26 /* 0x01A */ +#define OPC_INB_FW_FLASH_UPDATE 32 /* 0x020 */ +#define OPC_INB_GPIO 34 /* 0x022 */ +#define OPC_INB_SAS_DIAG_MODE_START_END 35 /* 0x023 */ +#define OPC_INB_SAS_DIAG_EXECUTE 36 /* 0x024 */ +/* 0x25 RESV IN SPCv */ +#define OPC_INB_RSVD3 37 /* 0x025 */ +#define OPC_INB_GET_TIME_STAMP 38 /* 0x026 */ +#define OPC_INB_PORT_CONTROL 39 /* 0x027 */ +#define OPC_INB_GET_NVMD_DATA 40 /* 0x028 */ +#define OPC_INB_SET_NVMD_DATA 41 /* 0x029 */ +#define OPC_INB_SET_DEVICE_STATE 42 /* 0x02A */ +#define OPC_INB_GET_DEVICE_STATE 43 /* 0x02B */ +#define OPC_INB_SET_DEV_INFO 44 /* 0x02C */ +/* 0x2D RESV IN SPCv */ +#define OPC_INB_RSVD4 45 /* 0x02D */ +#define OPC_INB_SGPIO_REGISTER 46 /* 0x02E */ +#define OPC_INB_PCIE_DIAG_EXEC 47 /* 0x02F */ +#define OPC_INB_SET_CONTROLLER_CONFIG 48 /* 0x030 */ +#define OPC_INB_GET_CONTROLLER_CONFIG 49 /* 0x031 */ +#define OPC_INB_REG_DEV 50 /* 0x032 */ +#define OPC_INB_SAS_HW_EVENT_ACK 51 /* 0x033 */ +#define OPC_INB_GET_DEVICE_INFO 52 /* 0x034 */ +#define OPC_INB_GET_PHY_PROFILE 53 /* 0x035 */ +#define OPC_INB_FLASH_OP_EXT 54 /* 0x036 */ +#define OPC_INB_SET_PHY_PROFILE 55 /* 0x037 */ +#define OPC_INB_KEK_MANAGEMENT 256 /* 0x100 */ +#define OPC_INB_DEK_MANAGEMENT 257 /* 0x101 */ +#define OPC_INB_SSP_INI_DIF_ENC_IO 258 /* 0x102 */ +#define OPC_INB_SATA_DIF_ENC_IO 259 /* 0x103 */ + +/* for Response Opcode of IOMB */ +#define OPC_OUB_ECHO 1 /* 0x001 */ +#define OPC_OUB_RSVD 4 /* 0x004 */ +#define OPC_OUB_SSP_COMP 5 /* 0x005 */ +#define OPC_OUB_SMP_COMP 6 /* 0x006 */ +#define OPC_OUB_LOCAL_PHY_CNTRL 7 /* 0x007 */ +#define OPC_OUB_RSVD1 10 /* 0x00A */ +#define OPC_OUB_DEREG_DEV 11 /* 0x00B */ +#define OPC_OUB_GET_DEV_HANDLE 12 /* 0x00C */ +#define OPC_OUB_SATA_COMP 13 /* 0x00D */ +#define OPC_OUB_SATA_EVENT 14 /* 0x00E */ +#define OPC_OUB_SSP_EVENT 15 /* 0x00F */ +#define OPC_OUB_RSVD2 16 /* 0x010 */ +/* 0x11 - SMP_RECEIVED Notification removed in SPCv*/ +#define OPC_OUB_SSP_RECV_EVENT 18 /* 0x012 */ +#define OPC_OUB_RSVD3 19 /* 0x013 */ +#define OPC_OUB_FW_FLASH_UPDATE 20 /* 0x014 */ +#define OPC_OUB_GPIO_RESPONSE 22 /* 0x016 */ +#define OPC_OUB_GPIO_EVENT 23 /* 0x017 */ +#define OPC_OUB_GENERAL_EVENT 24 /* 0x018 */ +#define OPC_OUB_SSP_ABORT_RSP 26 /* 0x01A */ +#define OPC_OUB_SATA_ABORT_RSP 27 /* 0x01B */ +#define OPC_OUB_SAS_DIAG_MODE_START_END 28 /* 0x01C */ +#define OPC_OUB_SAS_DIAG_EXECUTE 29 /* 0x01D */ +#define OPC_OUB_GET_TIME_STAMP 30 /* 0x01E */ +#define OPC_OUB_RSVD4 31 /* 0x01F */ +#define OPC_OUB_PORT_CONTROL 32 /* 0x020 */ +#define OPC_OUB_SKIP_ENTRY 33 /* 0x021 */ +#define OPC_OUB_SMP_ABORT_RSP 34 /* 0x022 */ +#define OPC_OUB_GET_NVMD_DATA 35 /* 0x023 */ +#define OPC_OUB_SET_NVMD_DATA 36 /* 0x024 */ +#define OPC_OUB_DEVICE_HANDLE_REMOVAL 37 /* 0x025 */ +#define OPC_OUB_SET_DEVICE_STATE 38 /* 0x026 */ +#define OPC_OUB_GET_DEVICE_STATE 39 /* 0x027 */ +#define OPC_OUB_SET_DEV_INFO 40 /* 0x028 */ +#define OPC_OUB_RSVD5 41 /* 0x029 */ +#define OPC_OUB_HW_EVENT 1792 /* 0x700 */ +#define OPC_OUB_DEV_HANDLE_ARRIV 1824 /* 0x720 */ +#define OPC_OUB_THERM_HW_EVENT 1840 /* 0x730 */ +#define OPC_OUB_SGPIO_RESP 2094 /* 0x82E */ +#define OPC_OUB_PCIE_DIAG_EXECUTE 2095 /* 0x82F */ +#define OPC_OUB_DEV_REGIST 2098 /* 0x832 */ +#define OPC_OUB_SAS_HW_EVENT_ACK 2099 /* 0x833 */ +#define OPC_OUB_GET_DEVICE_INFO 2100 /* 0x834 */ +/* spcv specific commands */ +#define OPC_OUB_PHY_START_RESP 2052 /* 0x804 */ +#define OPC_OUB_PHY_STOP_RESP 2053 /* 0x805 */ +#define OPC_OUB_SET_CONTROLLER_CONFIG 2096 /* 0x830 */ +#define OPC_OUB_GET_CONTROLLER_CONFIG 2097 /* 0x831 */ +#define OPC_OUB_GET_PHY_PROFILE 2101 /* 0x835 */ +#define OPC_OUB_FLASH_OP_EXT 2102 /* 0x836 */ +#define OPC_OUB_SET_PHY_PROFILE 2103 /* 0x837 */ +#define OPC_OUB_KEK_MANAGEMENT_RESP 2304 /* 0x900 */ +#define OPC_OUB_DEK_MANAGEMENT_RESP 2305 /* 0x901 */ +#define OPC_OUB_SSP_COALESCED_COMP_RESP 2306 /* 0x902 */ + +/* for phy start*/ +#define SSC_DISABLE_15 (0x01 << 16) +#define SSC_DISABLE_30 (0x02 << 16) +#define SSC_DISABLE_60 (0x04 << 16) +#define SAS_ASE (0x01 << 15) +#define SPINHOLD_DISABLE (0x00 << 14) +#define SPINHOLD_ENABLE (0x01 << 14) +#define LINKMODE_SAS (0x01 << 12) +#define LINKMODE_DSATA (0x02 << 12) +#define LINKMODE_AUTO (0x03 << 12) +#define LINKRATE_15 (0x01 << 8) +#define LINKRATE_30 (0x02 << 8) +#define LINKRATE_60 (0x04 << 8) +#define LINKRATE_120 (0x08 << 8) + +/*phy_stop*/ +#define PHY_STOP_SUCCESS 0x00 +#define PHY_STOP_ERR_DEVICE_ATTACHED 0x1046 + +/* phy_profile */ +#define SAS_PHY_ANALOG_SETTINGS_PAGE 0x04 +#define PHY_DWORD_LENGTH 0xC + +/* Thermal related */ +#define THERMAL_ENABLE 0x1 +#define THERMAL_LOG_ENABLE 0x1 +#define THERMAL_PAGE_CODE_7H 0x6 +#define THERMAL_PAGE_CODE_8H 0x7 +#define LTEMPHIL 70 +#define RTEMPHIL 100 + +/* Encryption info */ +#define SCRATCH_PAD3_ENC_DISABLED 0x00000000 +#define SCRATCH_PAD3_ENC_DIS_ERR 0x00000001 +#define SCRATCH_PAD3_ENC_ENA_ERR 0x00000002 +#define SCRATCH_PAD3_ENC_READY 0x00000003 +#define SCRATCH_PAD3_ENC_MASK SCRATCH_PAD3_ENC_READY + +#define SCRATCH_PAD3_XTS_ENABLED (1 << 14) +#define SCRATCH_PAD3_SMA_ENABLED (1 << 4) +#define SCRATCH_PAD3_SMB_ENABLED (1 << 5) +#define SCRATCH_PAD3_SMF_ENABLED 0 +#define SCRATCH_PAD3_SM_MASK 0x000000F0 +#define SCRATCH_PAD3_ERR_CODE 0x00FF0000 + +#define SEC_MODE_SMF 0x0 +#define SEC_MODE_SMA 0x100 +#define SEC_MODE_SMB 0x200 +#define CIPHER_MODE_ECB 0x00000001 +#define CIPHER_MODE_XTS 0x00000002 +#define KEK_MGMT_SUBOP_KEYCARDUPDATE 0x4 + +/* SAS protocol timer configuration page */ +#define SAS_PROTOCOL_TIMER_CONFIG_PAGE 0x04 +#define STP_MCT_TMO 32 +#define SSP_MCT_TMO 32 +#define SAS_MAX_OPEN_TIME 5 +#define SMP_MAX_CONN_TIMER 0xFF +#define STP_FRM_TIMER 0 +#define STP_IDLE_TIME 5 /* 5 us; controller default */ +#define SAS_MFD 0 +#define SAS_OPNRJT_RTRY_INTVL 2 +#define SAS_DOPNRJT_RTRY_TMO 128 +#define SAS_COPNRJT_RTRY_TMO 128 + +#define SPCV_DOORBELL_CLEAR_TIMEOUT (30 * 50) /* 30 sec */ +#define SPC_DOORBELL_CLEAR_TIMEOUT (15 * 50) /* 15 sec */ + +/* + Making ORR bigger than IT NEXUS LOSS which is 2000000us = 2 second. + Assuming a bigger value 3 second, 3000000/128 = 23437.5 where 128 + is DOPNRJT_RTRY_TMO +*/ +#define SAS_DOPNRJT_RTRY_THR 23438 +#define SAS_COPNRJT_RTRY_THR 23438 +#define SAS_MAX_AIP 0x200000 +#define IT_NEXUS_TIMEOUT 0x7D0 +#define PORT_RECOVERY_TIMEOUT ((IT_NEXUS_TIMEOUT/100) + 30) +/* Port recovery timeout, 10000 ms for PM8006 controller */ +#define CHIP_8006_PORT_RECOVERY_TIMEOUT 0x640000 + +#ifdef __LITTLE_ENDIAN_BITFIELD +struct sas_identify_frame_local { + /* Byte 0 */ + u8 frame_type:4; + u8 dev_type:3; + u8 _un0:1; + + /* Byte 1 */ + u8 _un1; + + /* Byte 2 */ + union { + struct { + u8 _un20:1; + u8 smp_iport:1; + u8 stp_iport:1; + u8 ssp_iport:1; + u8 _un247:4; + }; + u8 initiator_bits; + }; + + /* Byte 3 */ + union { + struct { + u8 _un30:1; + u8 smp_tport:1; + u8 stp_tport:1; + u8 ssp_tport:1; + u8 _un347:4; + }; + u8 target_bits; + }; + + /* Byte 4 - 11 */ + u8 _un4_11[8]; + + /* Byte 12 - 19 */ + u8 sas_addr[SAS_ADDR_SIZE]; + + /* Byte 20 */ + u8 phy_id; + + u8 _un21_27[7]; + +} __packed; + +#elif defined(__BIG_ENDIAN_BITFIELD) +struct sas_identify_frame_local { + /* Byte 0 */ + u8 _un0:1; + u8 dev_type:3; + u8 frame_type:4; + + /* Byte 1 */ + u8 _un1; + + /* Byte 2 */ + union { + struct { + u8 _un247:4; + u8 ssp_iport:1; + u8 stp_iport:1; + u8 smp_iport:1; + u8 _un20:1; + }; + u8 initiator_bits; + }; + + /* Byte 3 */ + union { + struct { + u8 _un347:4; + u8 ssp_tport:1; + u8 stp_tport:1; + u8 smp_tport:1; + u8 _un30:1; + }; + u8 target_bits; + }; + + /* Byte 4 - 11 */ + u8 _un4_11[8]; + + /* Byte 12 - 19 */ + u8 sas_addr[SAS_ADDR_SIZE]; + + /* Byte 20 */ + u8 phy_id; + + u8 _un21_27[7]; +} __packed; +#else +#error "Bitfield order not defined!" +#endif + +struct mpi_msg_hdr { + __le32 header; /* Bits [11:0] - Message operation code */ + /* Bits [15:12] - Message Category */ + /* Bits [21:16] - Outboundqueue ID for the + operation completion message */ + /* Bits [23:22] - Reserved */ + /* Bits [28:24] - Buffer Count, indicates how + many buffer are allocated for the massage */ + /* Bits [30:29] - Reserved */ + /* Bits [31] - Message Valid bit */ +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of PHY Start Command + * use to describe enable the phy (128 bytes) + */ +struct phy_start_req { + __le32 tag; + __le32 ase_sh_lm_slr_phyid; + struct sas_identify_frame_local sas_identify; /* 28 Bytes */ + __le32 spasti; + u32 reserved[21]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of PHY Start Command + * use to disable the phy (128 bytes) + */ +struct phy_stop_req { + __le32 tag; + __le32 phy_id; + u32 reserved[29]; +} __attribute__((packed, aligned(4))); + +/* set device bits fis - device to host */ +struct set_dev_bits_fis { + u8 fis_type; /* 0xA1*/ + u8 n_i_pmport; + /* b7 : n Bit. Notification bit. If set device needs attention. */ + /* b6 : i Bit. Interrupt Bit */ + /* b5-b4: reserved2 */ + /* b3-b0: PM Port */ + u8 status; + u8 error; + u32 _r_a; +} __attribute__ ((packed)); +/* PIO setup FIS - device to host */ +struct pio_setup_fis { + u8 fis_type; /* 0x5f */ + u8 i_d_pmPort; + /* b7 : reserved */ + /* b6 : i bit. Interrupt bit */ + /* b5 : d bit. data transfer direction. set to 1 for device to host + xfer */ + /* b4 : reserved */ + /* b3-b0: PM Port */ + u8 status; + u8 error; + u8 lbal; + u8 lbam; + u8 lbah; + u8 device; + u8 lbal_exp; + u8 lbam_exp; + u8 lbah_exp; + u8 _r_a; + u8 sector_count; + u8 sector_count_exp; + u8 _r_b; + u8 e_status; + u8 _r_c[2]; + u8 transfer_count; +} __attribute__ ((packed)); + +/* + * brief the data structure of SATA Completion Response + * use to describe the sata task response (64 bytes) + */ +struct sata_completion_resp { + __le32 tag; + __le32 status; + __le32 param; + u32 sata_resp[12]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of SAS HW Event Notification + * use to alert the host about the hardware event(64 bytes) + */ +/* updated outbound struct for spcv */ + +struct hw_event_resp { + __le32 lr_status_evt_portid; + __le32 evt_param; + __le32 phyid_npip_portstate; + struct sas_identify_frame sas_identify; + struct dev_to_host_fis sata_fis; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure for thermal event notification + */ + +struct thermal_hw_event { + __le32 thermal_event; + __le32 rht_lht; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of REGISTER DEVICE Command + * use to describe MPI REGISTER DEVICE Command (64 bytes) + */ + +struct reg_dev_req { + __le32 tag; + __le32 phyid_portid; + __le32 dtype_dlr_mcn_ir_retry; + __le32 firstburstsize_ITNexustimeout; + u8 sas_addr[SAS_ADDR_SIZE]; + __le32 upper_device_id; + u32 reserved[24]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of DEREGISTER DEVICE Command + * use to request spc to remove all internal resources associated + * with the device id (64 bytes) + */ + +struct dereg_dev_req { + __le32 tag; + __le32 device_id; + u32 reserved[29]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of DEVICE_REGISTRATION Response + * use to notify the completion of the device registration (64 bytes) + */ +struct dev_reg_resp { + __le32 tag; + __le32 status; + __le32 device_id; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of Local PHY Control Command + * use to issue PHY CONTROL to local phy (64 bytes) + */ +struct local_phy_ctl_req { + __le32 tag; + __le32 phyop_phyid; + u32 reserved1[29]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of Local Phy Control Response + * use to describe MPI Local Phy Control Response (64 bytes) + */ + struct local_phy_ctl_resp { + __le32 tag; + __le32 phyop_phyid; + __le32 status; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +#define OP_BITS 0x0000FF00 +#define ID_BITS 0x000000FF + +/* + * brief the data structure of PORT Control Command + * use to control port properties (64 bytes) + */ + +struct port_ctl_req { + __le32 tag; + __le32 portop_portid; + __le32 param0; + __le32 param1; + u32 reserved1[27]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of HW Event Ack Command + * use to acknowledge receive HW event (64 bytes) + */ +struct hw_event_ack_req { + __le32 tag; + __le32 phyid_sea_portid; + __le32 param0; + __le32 param1; + u32 reserved1[27]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of PHY_START Response Command + * indicates the completion of PHY_START command (64 bytes) + */ +struct phy_start_resp { + __le32 tag; + __le32 status; + __le32 phyid; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of PHY_STOP Response Command + * indicates the completion of PHY_STOP command (64 bytes) + */ +struct phy_stop_resp { + __le32 tag; + __le32 status; + __le32 phyid; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of SSP Completion Response + * use to indicate a SSP Completion (n bytes) + */ +struct ssp_completion_resp { + __le32 tag; + __le32 status; + __le32 param; + __le32 ssptag_rescv_rescpad; + struct ssp_response_iu ssp_resp_iu; + __le32 residual_count; +} __attribute__((packed, aligned(4))); + +#define SSP_RESCV_BIT 0x00010000 + +/* + * brief the data structure of SATA EVNET response + * use to indicate a SATA Completion (64 bytes) + */ +struct sata_event_resp { + __le32 tag; + __le32 event; + __le32 port_id; + __le32 device_id; + u32 reserved; + __le32 event_param0; + __le32 event_param1; + __le32 sata_addr_h32; + __le32 sata_addr_l32; + __le32 e_udt1_udt0_crc; + __le32 e_udt5_udt4_udt3_udt2; + __le32 a_udt1_udt0_crc; + __le32 a_udt5_udt4_udt3_udt2; + __le32 hwdevid_diferr; + __le32 err_framelen_byteoffset; + __le32 err_dataframe; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of SSP EVNET esponse + * use to indicate a SSP Completion (64 bytes) + */ +struct ssp_event_resp { + __le32 tag; + __le32 event; + __le32 port_id; + __le32 device_id; + __le32 ssp_tag; + __le32 event_param0; + __le32 event_param1; + __le32 sas_addr_h32; + __le32 sas_addr_l32; + __le32 e_udt1_udt0_crc; + __le32 e_udt5_udt4_udt3_udt2; + __le32 a_udt1_udt0_crc; + __le32 a_udt5_udt4_udt3_udt2; + __le32 hwdevid_diferr; + __le32 err_framelen_byteoffset; + __le32 err_dataframe; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of General Event Notification Response + * use to describe MPI General Event Notification Response (64 bytes) + */ +struct general_event_resp { + __le32 status; + __le32 inb_IOMB_payload[14]; +} __attribute__((packed, aligned(4))); + +#define GENERAL_EVENT_PAYLOAD 14 +#define OPCODE_BITS 0x00000fff + +/* + * brief the data structure of SMP Request Command + * use to describe MPI SMP REQUEST Command (64 bytes) + */ +struct smp_req { + __le32 tag; + __le32 device_id; + __le32 len_ip_ir; + /* Bits [0] - Indirect response */ + /* Bits [1] - Indirect Payload */ + /* Bits [15:2] - Reserved */ + /* Bits [23:16] - direct payload Len */ + /* Bits [31:24] - Reserved */ + u8 smp_req16[16]; + union { + u8 smp_req[32]; + struct { + __le64 long_req_addr;/* sg dma address, LE */ + __le32 long_req_size;/* LE */ + u32 _r_a; + __le64 long_resp_addr;/* sg dma address, LE */ + __le32 long_resp_size;/* LE */ + u32 _r_b; + } long_smp_req;/* sequencer extension */ + }; + __le32 rsvd[16]; +} __attribute__((packed, aligned(4))); +/* + * brief the data structure of SMP Completion Response + * use to describe MPI SMP Completion Response (64 bytes) + */ +struct smp_completion_resp { + __le32 tag; + __le32 status; + __le32 param; + u8 _r_a[252]; +} __attribute__((packed, aligned(4))); + +/* + *brief the data structure of SSP SMP SATA Abort Command + * use to describe MPI SSP SMP & SATA Abort Command (64 bytes) + */ +struct task_abort_req { + __le32 tag; + __le32 device_id; + __le32 tag_to_abort; + __le32 abort_all; + u32 reserved[27]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SSP SATA SMP Abort Response + * use to describe SSP SMP & SATA Abort Response ( 64 bytes) + */ +struct task_abort_resp { + __le32 tag; + __le32 status; + __le32 scp; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SAS Diagnostic Start/End Command + * use to describe MPI SAS Diagnostic Start/End Command (64 bytes) + */ +struct sas_diag_start_end_req { + __le32 tag; + __le32 operation_phyid; + u32 reserved[29]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SAS Diagnostic Execute Command + * use to describe MPI SAS Diagnostic Execute Command (64 bytes) + */ +struct sas_diag_execute_req { + __le32 tag; + __le32 cmdtype_cmddesc_phyid; + __le32 pat1_pat2; + __le32 threshold; + __le32 codepat_errmsk; + __le32 pmon; + __le32 pERF1CTL; + u32 reserved[24]; +} __attribute__((packed, aligned(4))); + +#define SAS_DIAG_PARAM_BYTES 24 + +/* + * brief the data structure of Set Device State Command + * use to describe MPI Set Device State Command (64 bytes) + */ +struct set_dev_state_req { + __le32 tag; + __le32 device_id; + __le32 nds; + u32 reserved[28]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of SATA Start Command + * use to describe MPI SATA IO Start Command (64 bytes) + * Note: This structure is common for normal / encryption I/O + */ + +struct sata_start_req { + __le32 tag; + __le32 device_id; + __le32 data_len; + __le32 retfis_ncqtag_atap_dir_m_dad; + struct host_to_dev_fis sata_fis; + u32 reserved1; + u32 reserved2; /* dword 11. rsvd for normal I/O. */ + /* EPLE Descl for enc I/O */ + u32 addr_low; /* dword 12. rsvd for enc I/O */ + u32 addr_high; /* dword 13. reserved for enc I/O */ + __le32 len; /* dword 14: length for normal I/O. */ + /* EPLE Desch for enc I/O */ + __le32 esgl; /* dword 15. rsvd for enc I/O */ + __le32 atapi_scsi_cdb[4]; /* dword 16-19. rsvd for enc I/O */ + /* The below fields are reserved for normal I/O */ + __le32 key_index_mode; /* dword 20 */ + __le32 sector_cnt_enss;/* dword 21 */ + __le32 keytagl; /* dword 22 */ + __le32 keytagh; /* dword 23 */ + __le32 twk_val0; /* dword 24 */ + __le32 twk_val1; /* dword 25 */ + __le32 twk_val2; /* dword 26 */ + __le32 twk_val3; /* dword 27 */ + __le32 enc_addr_low; /* dword 28. Encryption SGL address high */ + __le32 enc_addr_high; /* dword 29. Encryption SGL address low */ + __le32 enc_len; /* dword 30. Encryption length */ + __le32 enc_esgl; /* dword 31. Encryption esgl bit */ +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SSP INI TM Start Command + * use to describe MPI SSP INI TM Start Command (64 bytes) + */ +struct ssp_ini_tm_start_req { + __le32 tag; + __le32 device_id; + __le32 relate_tag; + __le32 tmf; + u8 lun[8]; + __le32 ds_ads_m; + u32 reserved[24]; +} __attribute__((packed, aligned(4))); + +struct ssp_info_unit { + u8 lun[8];/* SCSI Logical Unit Number */ + u8 reserved1;/* reserved */ + u8 efb_prio_attr; + /* B7 : enabledFirstBurst */ + /* B6-3 : taskPriority */ + /* B2-0 : taskAttribute */ + u8 reserved2; /* reserved */ + u8 additional_cdb_len; + /* B7-2 : additional_cdb_len */ + /* B1-0 : reserved */ + u8 cdb[16];/* The SCSI CDB up to 16 bytes length */ +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SSP INI IO Start Command + * use to describe MPI SSP INI IO Start Command (64 bytes) + * Note: This structure is common for normal / encryption I/O + */ +struct ssp_ini_io_start_req { + __le32 tag; + __le32 device_id; + __le32 data_len; + __le32 dad_dir_m_tlr; + struct ssp_info_unit ssp_iu; + __le32 addr_low; /* dword 12: sgl low for normal I/O. */ + /* epl_descl for encryption I/O */ + __le32 addr_high; /* dword 13: sgl hi for normal I/O */ + /* dpl_descl for encryption I/O */ + __le32 len; /* dword 14: len for normal I/O. */ + /* edpl_desch for encryption I/O */ + __le32 esgl; /* dword 15: ESGL bit for normal I/O. */ + /* user defined tag mask for enc I/O */ + /* The below fields are reserved for normal I/O */ + u8 udt[12]; /* dword 16-18 */ + __le32 sectcnt_ios; /* dword 19 */ + __le32 key_cmode; /* dword 20 */ + __le32 ks_enss; /* dword 21 */ + __le32 keytagl; /* dword 22 */ + __le32 keytagh; /* dword 23 */ + __le32 twk_val0; /* dword 24 */ + __le32 twk_val1; /* dword 25 */ + __le32 twk_val2; /* dword 26 */ + __le32 twk_val3; /* dword 27 */ + __le32 enc_addr_low; /* dword 28: Encryption sgl addr low */ + __le32 enc_addr_high; /* dword 29: Encryption sgl addr hi */ + __le32 enc_len; /* dword 30: Encryption length */ + __le32 enc_esgl; /* dword 31: ESGL bit for encryption */ +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for SSP_INI_DIF_ENC_IO COMMAND + * use to initiate SSP I/O operation with optional DIF/ENC + */ +struct ssp_dif_enc_io_req { + __le32 tag; + __le32 device_id; + __le32 data_len; + __le32 dirMTlr; + __le32 sspiu0; + __le32 sspiu1; + __le32 sspiu2; + __le32 sspiu3; + __le32 sspiu4; + __le32 sspiu5; + __le32 sspiu6; + __le32 epl_des; + __le32 dpl_desl_ndplr; + __le32 dpl_desh; + __le32 uum_uuv_bss_difbits; + u8 udt[12]; + __le32 sectcnt_ios; + __le32 key_cmode; + __le32 ks_enss; + __le32 keytagl; + __le32 keytagh; + __le32 twk_val0; + __le32 twk_val1; + __le32 twk_val2; + __le32 twk_val3; + __le32 addr_low; + __le32 addr_high; + __le32 len; + __le32 esgl; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of Firmware download + * use to describe MPI FW DOWNLOAD Command (64 bytes) + */ +struct fw_flash_Update_req { + __le32 tag; + __le32 cur_image_offset; + __le32 cur_image_len; + __le32 total_image_len; + u32 reserved0[7]; + __le32 sgl_addr_lo; + __le32 sgl_addr_hi; + __le32 len; + __le32 ext_reserved; + u32 reserved1[16]; +} __attribute__((packed, aligned(4))); + +#define FWFLASH_IOMB_RESERVED_LEN 0x07 +/** + * brief the data structure of FW_FLASH_UPDATE Response + * use to describe MPI FW_FLASH_UPDATE Response (64 bytes) + * + */ + struct fw_flash_Update_resp { + __le32 tag; + __le32 status; + u32 reserved[13]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of Get NVM Data Command + * use to get data from NVM in HBA(64 bytes) + */ +struct get_nvm_data_req { + __le32 tag; + __le32 len_ir_vpdd; + __le32 vpd_offset; + u32 reserved[8]; + __le32 resp_addr_lo; + __le32 resp_addr_hi; + __le32 resp_len; + u32 reserved1[17]; +} __attribute__((packed, aligned(4))); + +struct set_nvm_data_req { + __le32 tag; + __le32 len_ir_vpdd; + __le32 vpd_offset; + u32 reserved[8]; + __le32 resp_addr_lo; + __le32 resp_addr_hi; + __le32 resp_len; + u32 reserved1[17]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for SET CONTROLLER CONFIG COMMAND + * use to modify controller configuration + */ +struct set_ctrl_cfg_req { + __le32 tag; + __le32 cfg_pg[14]; + u32 reserved[16]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for GET CONTROLLER CONFIG COMMAND + * use to get controller configuration page + */ +struct get_ctrl_cfg_req { + __le32 tag; + __le32 pgcd; + __le32 int_vec; + u32 reserved[28]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for KEK_MANAGEMENT COMMAND + * use for KEK management + */ +struct kek_mgmt_req { + __le32 tag; + __le32 new_curidx_ksop; + u32 reserved; + __le32 kblob[12]; + u32 reserved1[16]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for DEK_MANAGEMENT COMMAND + * use for DEK management + */ +struct dek_mgmt_req { + __le32 tag; + __le32 kidx_dsop; + __le32 dekidx; + __le32 addr_l; + __le32 addr_h; + __le32 nent; + __le32 dbf_tblsize; + u32 reserved[24]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for SET PHY PROFILE COMMAND + * use to retrive phy specific information + */ +struct set_phy_profile_req { + __le32 tag; + __le32 ppc_phyid; + __le32 reserved[29]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for GET PHY PROFILE COMMAND + * use to retrive phy specific information + */ +struct get_phy_profile_req { + __le32 tag; + __le32 ppc_phyid; + __le32 profile[29]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for EXT FLASH PARTITION + * use to manage ext flash partition + */ +struct ext_flash_partition_req { + __le32 tag; + __le32 cmd; + __le32 offset; + __le32 len; + u32 reserved[7]; + __le32 addr_low; + __le32 addr_high; + __le32 len1; + __le32 ext; + u32 reserved1[16]; +} __attribute__((packed, aligned(4))); + +#define TWI_DEVICE 0x0 +#define C_SEEPROM 0x1 +#define VPD_FLASH 0x4 +#define AAP1_RDUMP 0x5 +#define IOP_RDUMP 0x6 +#define EXPAN_ROM 0x7 + +#define IPMode 0x80000000 +#define NVMD_TYPE 0x0000000F +#define NVMD_STAT 0x0000FFFF +#define NVMD_LEN 0xFF000000 +/** + * brief the data structure of Get NVMD Data Response + * use to describe MPI Get NVMD Data Response (64 bytes) + */ +struct get_nvm_data_resp { + __le32 tag; + __le32 ir_tda_bn_dps_das_nvm; + __le32 dlen_status; + __le32 nvm_data[12]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SAS Diagnostic Start/End Response + * use to describe MPI SAS Diagnostic Start/End Response (64 bytes) + * + */ +struct sas_diag_start_end_resp { + __le32 tag; + __le32 status; + u32 reserved[13]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SAS Diagnostic Execute Response + * use to describe MPI SAS Diagnostic Execute Response (64 bytes) + * + */ +struct sas_diag_execute_resp { + __le32 tag; + __le32 cmdtype_cmddesc_phyid; + __le32 Status; + __le32 ReportData; + u32 reserved[11]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of Set Device State Response + * use to describe MPI Set Device State Response (64 bytes) + * + */ +struct set_dev_state_resp { + __le32 tag; + __le32 status; + __le32 device_id; + __le32 pds_nds; + u32 reserved[11]; +} __attribute__((packed, aligned(4))); + +/* new outbound structure for spcv - begins */ +/** + * brief the data structure for SET CONTROLLER CONFIG COMMAND + * use to modify controller configuration + */ +struct set_ctrl_cfg_resp { + __le32 tag; + __le32 status; + __le32 err_qlfr_pgcd; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +struct get_ctrl_cfg_resp { + __le32 tag; + __le32 status; + __le32 err_qlfr; + __le32 confg_page[12]; +} __attribute__((packed, aligned(4))); + +struct kek_mgmt_resp { + __le32 tag; + __le32 status; + __le32 kidx_new_curr_ksop; + __le32 err_qlfr; + u32 reserved[11]; +} __attribute__((packed, aligned(4))); + +struct dek_mgmt_resp { + __le32 tag; + __le32 status; + __le32 kekidx_tbls_dsop; + __le32 dekidx; + __le32 err_qlfr; + u32 reserved[10]; +} __attribute__((packed, aligned(4))); + +struct get_phy_profile_resp { + __le32 tag; + __le32 status; + __le32 ppc_phyid; + __le32 ppc_specific_rsp[12]; +} __attribute__((packed, aligned(4))); + +struct flash_op_ext_resp { + __le32 tag; + __le32 cmd; + __le32 status; + __le32 epart_size; + __le32 epart_sect_size; + u32 reserved[10]; +} __attribute__((packed, aligned(4))); + +struct set_phy_profile_resp { + __le32 tag; + __le32 status; + __le32 ppc_phyid; + __le32 ppc_specific_rsp[12]; +} __attribute__((packed, aligned(4))); + +struct ssp_coalesced_comp_resp { + __le32 coal_cnt; + __le32 tag0; + __le32 ssp_tag0; + __le32 tag1; + __le32 ssp_tag1; + __le32 add_tag_ssp_tag[10]; +} __attribute__((packed, aligned(4))); + +/* new outbound structure for spcv - ends */ + +/* brief data structure for SAS protocol timer configuration page. + * + */ +struct SASProtocolTimerConfig { + __le32 pageCode; /* 0 */ + __le32 MST_MSI; /* 1 */ + __le32 STP_SSP_MCT_TMO; /* 2 */ + __le32 STP_FRM_TMO; /* 3 */ + __le32 STP_IDLE_TMO; /* 4 */ + __le32 OPNRJT_RTRY_INTVL; /* 5 */ + __le32 Data_Cmd_OPNRJT_RTRY_TMO; /* 6 */ + __le32 Data_Cmd_OPNRJT_RTRY_THR; /* 7 */ + __le32 MAX_AIP; /* 8 */ +} __attribute__((packed, aligned(4))); + +typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t; + +#define NDS_BITS 0x0F +#define PDS_BITS 0xF0 + +/* + * HW Events type + */ + +#define HW_EVENT_RESET_START 0x01 +#define HW_EVENT_CHIP_RESET_COMPLETE 0x02 +#define HW_EVENT_PHY_STOP_STATUS 0x03 +#define HW_EVENT_SAS_PHY_UP 0x04 +#define HW_EVENT_SATA_PHY_UP 0x05 +#define HW_EVENT_SATA_SPINUP_HOLD 0x06 +#define HW_EVENT_PHY_DOWN 0x07 +#define HW_EVENT_PORT_INVALID 0x08 +#define HW_EVENT_BROADCAST_CHANGE 0x09 +#define HW_EVENT_PHY_ERROR 0x0A +#define HW_EVENT_BROADCAST_SES 0x0B +#define HW_EVENT_INBOUND_CRC_ERROR 0x0C +#define HW_EVENT_HARD_RESET_RECEIVED 0x0D +#define HW_EVENT_MALFUNCTION 0x0E +#define HW_EVENT_ID_FRAME_TIMEOUT 0x0F +#define HW_EVENT_BROADCAST_EXP 0x10 +#define HW_EVENT_PHY_START_STATUS 0x11 +#define HW_EVENT_LINK_ERR_INVALID_DWORD 0x12 +#define HW_EVENT_LINK_ERR_DISPARITY_ERROR 0x13 +#define HW_EVENT_LINK_ERR_CODE_VIOLATION 0x14 +#define HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH 0x15 +#define HW_EVENT_LINK_ERR_PHY_RESET_FAILED 0x16 +#define HW_EVENT_PORT_RECOVERY_TIMER_TMO 0x17 +#define HW_EVENT_PORT_RECOVER 0x18 +#define HW_EVENT_PORT_RESET_TIMER_TMO 0x19 +#define HW_EVENT_PORT_RESET_COMPLETE 0x20 +#define EVENT_BROADCAST_ASYNCH_EVENT 0x21 + +/* port state */ +#define PORT_NOT_ESTABLISHED 0x00 +#define PORT_VALID 0x01 +#define PORT_LOSTCOMM 0x02 +#define PORT_IN_RESET 0x04 +#define PORT_3RD_PARTY_RESET 0x07 +#define PORT_INVALID 0x08 + +/* + * SSP/SMP/SATA IO Completion Status values + */ + +#define IO_SUCCESS 0x00 +#define IO_ABORTED 0x01 +#define IO_OVERFLOW 0x02 +#define IO_UNDERFLOW 0x03 +#define IO_FAILED 0x04 +#define IO_ABORT_RESET 0x05 +#define IO_NOT_VALID 0x06 +#define IO_NO_DEVICE 0x07 +#define IO_ILLEGAL_PARAMETER 0x08 +#define IO_LINK_FAILURE 0x09 +#define IO_PROG_ERROR 0x0A + +#define IO_EDC_IN_ERROR 0x0B +#define IO_EDC_OUT_ERROR 0x0C +#define IO_ERROR_HW_TIMEOUT 0x0D +#define IO_XFER_ERROR_BREAK 0x0E +#define IO_XFER_ERROR_PHY_NOT_READY 0x0F +#define IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED 0x10 +#define IO_OPEN_CNX_ERROR_ZONE_VIOLATION 0x11 +#define IO_OPEN_CNX_ERROR_BREAK 0x12 +#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS 0x13 +#define IO_OPEN_CNX_ERROR_BAD_DESTINATION 0x14 +#define IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED 0x15 +#define IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY 0x16 +#define IO_OPEN_CNX_ERROR_WRONG_DESTINATION 0x17 +/* This error code 0x18 is not used on SPCv */ +#define IO_OPEN_CNX_ERROR_UNKNOWN_ERROR 0x18 +#define IO_XFER_ERROR_NAK_RECEIVED 0x19 +#define IO_XFER_ERROR_ACK_NAK_TIMEOUT 0x1A +#define IO_XFER_ERROR_PEER_ABORTED 0x1B +#define IO_XFER_ERROR_RX_FRAME 0x1C +#define IO_XFER_ERROR_DMA 0x1D +#define IO_XFER_ERROR_CREDIT_TIMEOUT 0x1E +#define IO_XFER_ERROR_SATA_LINK_TIMEOUT 0x1F +#define IO_XFER_ERROR_SATA 0x20 + +/* This error code 0x22 is not used on SPCv */ +#define IO_XFER_ERROR_ABORTED_DUE_TO_SRST 0x22 +#define IO_XFER_ERROR_REJECTED_NCQ_MODE 0x21 +#define IO_XFER_ERROR_ABORTED_NCQ_MODE 0x23 +#define IO_XFER_OPEN_RETRY_TIMEOUT 0x24 +/* This error code 0x25 is not used on SPCv */ +#define IO_XFER_SMP_RESP_CONNECTION_ERROR 0x25 +#define IO_XFER_ERROR_UNEXPECTED_PHASE 0x26 +#define IO_XFER_ERROR_XFER_RDY_OVERRUN 0x27 +#define IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED 0x28 +#define IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT 0x30 + +/* The following error code 0x31 and 0x32 are not using (obsolete) */ +#define IO_XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NAK 0x31 +#define IO_XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK 0x32 + +#define IO_XFER_ERROR_OFFSET_MISMATCH 0x34 +#define IO_XFER_ERROR_XFER_ZERO_DATA_LEN 0x35 +#define IO_XFER_CMD_FRAME_ISSUED 0x36 +#define IO_ERROR_INTERNAL_SMP_RESOURCE 0x37 +#define IO_PORT_IN_RESET 0x38 +#define IO_DS_NON_OPERATIONAL 0x39 +#define IO_DS_IN_RECOVERY 0x3A +#define IO_TM_TAG_NOT_FOUND 0x3B +#define IO_XFER_PIO_SETUP_ERROR 0x3C +#define IO_SSP_EXT_IU_ZERO_LEN_ERROR 0x3D +#define IO_DS_IN_ERROR 0x3E +#define IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY 0x3F +#define IO_ABORT_IN_PROGRESS 0x40 +#define IO_ABORT_DELAYED 0x41 +#define IO_INVALID_LENGTH 0x42 + +/********** additional response event values *****************/ + +#define IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY_ALT 0x43 +#define IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED 0x44 +#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO 0x45 +#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST 0x46 +#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE 0x47 +#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED 0x48 +#define IO_DS_INVALID 0x49 +#define IO_FATAL_ERROR 0x51 +/* WARNING: the value is not contiguous from here */ +#define IO_XFER_ERR_LAST_PIO_DATAIN_CRC_ERR 0x52 +#define IO_XFER_DMA_ACTIVATE_TIMEOUT 0x53 +#define IO_XFER_ERROR_INTERNAL_CRC_ERROR 0x54 +#define MPI_IO_RQE_BUSY_FULL 0x55 +#define IO_XFER_ERR_EOB_DATA_OVERRUN 0x56 +#define IO_XFER_ERROR_INVALID_SSP_RSP_FRAME 0x57 +#define IO_OPEN_CNX_ERROR_OPEN_PREEMPTED 0x58 + +#define MPI_ERR_IO_RESOURCE_UNAVAILABLE 0x1004 +#define MPI_ERR_ATAPI_DEVICE_BUSY 0x1024 + +#define IO_XFR_ERROR_DEK_KEY_CACHE_MISS 0x2040 +/* + * An encryption IO request failed due to DEK Key Tag mismatch. + * The key tag supplied in the encryption IOMB does not match with + * the Key Tag in the referenced DEK Entry. + */ +#define IO_XFR_ERROR_DEK_KEY_TAG_MISMATCH 0x2041 +#define IO_XFR_ERROR_CIPHER_MODE_INVALID 0x2042 +/* + * An encryption I/O request failed because the initial value (IV) + * in the unwrapped DEK blob didn't match the IV used to unwrap it. + */ +#define IO_XFR_ERROR_DEK_IV_MISMATCH 0x2043 +/* An encryption I/O request failed due to an internal RAM ECC or + * interface error while unwrapping the DEK. */ +#define IO_XFR_ERROR_DEK_RAM_INTERFACE_ERROR 0x2044 +/* An encryption I/O request failed due to an internal RAM ECC or + * interface error while unwrapping the DEK. */ +#define IO_XFR_ERROR_INTERNAL_RAM 0x2045 +/* + * An encryption I/O request failed + * because the DEK index specified in the I/O was outside the bounds of + * the total number of entries in the host DEK table. + */ +#define IO_XFR_ERROR_DEK_INDEX_OUT_OF_BOUNDS0x2046 + +/* define DIF IO response error status code */ +#define IO_XFR_ERROR_DIF_MISMATCH 0x3000 +#define IO_XFR_ERROR_DIF_APPLICATION_TAG_MISMATCH 0x3001 +#define IO_XFR_ERROR_DIF_REFERENCE_TAG_MISMATCH 0x3002 +#define IO_XFR_ERROR_DIF_CRC_MISMATCH 0x3003 + +/* define operator management response status and error qualifier code */ +#define OPR_MGMT_OP_NOT_SUPPORTED 0x2060 +#define OPR_MGMT_MPI_ENC_ERR_OPR_PARAM_ILLEGAL 0x2061 +#define OPR_MGMT_MPI_ENC_ERR_OPR_ID_NOT_FOUND 0x2062 +#define OPR_MGMT_MPI_ENC_ERR_OPR_ROLE_NOT_MATCH 0x2063 +#define OPR_MGMT_MPI_ENC_ERR_OPR_MAX_NUM_EXCEEDED 0x2064 +#define OPR_MGMT_MPI_ENC_ERR_KEK_UNWRAP_FAIL 0x2022 +#define OPR_MGMT_MPI_ENC_ERR_NVRAM_OPERATION_FAILURE 0x2023 +/***************** additional response event values ***************/ + +/* WARNING: This error code must always be the last number. + * If you add error code, modify this code also + * It is used as an index + */ +#define IO_ERROR_UNKNOWN_GENERIC 0x2023 + +/* MSGU CONFIGURATION TABLE*/ + +#define SPCv_MSGU_CFG_TABLE_UPDATE 0x001 +#define SPCv_MSGU_CFG_TABLE_RESET 0x002 +#define SPCv_MSGU_CFG_TABLE_FREEZE 0x004 +#define SPCv_MSGU_CFG_TABLE_UNFREEZE 0x008 +#define MSGU_IBDB_SET 0x00 +#define MSGU_HOST_INT_STATUS 0x08 +#define MSGU_HOST_INT_MASK 0x0C +#define MSGU_IOPIB_INT_STATUS 0x18 +#define MSGU_IOPIB_INT_MASK 0x1C +#define MSGU_IBDB_CLEAR 0x20 + +#define MSGU_MSGU_CONTROL 0x24 +#define MSGU_ODR 0x20 +#define MSGU_ODCR 0x28 + +#define MSGU_ODMR 0x30 +#define MSGU_ODMR_U 0x34 +#define MSGU_ODMR_CLR 0x38 +#define MSGU_ODMR_CLR_U 0x3C +#define MSGU_OD_RSVD 0x40 + +#define MSGU_SCRATCH_PAD_0 0x44 +#define MSGU_SCRATCH_PAD_1 0x48 +#define MSGU_SCRATCH_PAD_2 0x4C +#define MSGU_SCRATCH_PAD_3 0x50 +#define MSGU_HOST_SCRATCH_PAD_0 0x54 +#define MSGU_HOST_SCRATCH_PAD_1 0x58 +#define MSGU_HOST_SCRATCH_PAD_2 0x5C +#define MSGU_HOST_SCRATCH_PAD_3 0x60 +#define MSGU_HOST_SCRATCH_PAD_4 0x64 +#define MSGU_HOST_SCRATCH_PAD_5 0x68 +#define MSGU_SCRATCH_PAD_RSVD_0 0x6C +#define MSGU_SCRATCH_PAD_RSVD_1 0x70 + +#define MSGU_SCRATCHPAD1_RAAE_STATE_ERR(x) ((x & 0x3) == 0x2) +#define MSGU_SCRATCHPAD1_ILA_STATE_ERR(x) (((x >> 2) & 0x3) == 0x2) +#define MSGU_SCRATCHPAD1_BOOTLDR_STATE_ERR(x) ((((x >> 4) & 0x7) == 0x7) || \ + (((x >> 4) & 0x7) == 0x4)) +#define MSGU_SCRATCHPAD1_IOP0_STATE_ERR(x) (((x >> 10) & 0x3) == 0x2) +#define MSGU_SCRATCHPAD1_IOP1_STATE_ERR(x) (((x >> 12) & 0x3) == 0x2) +#define MSGU_SCRATCHPAD1_STATE_FATAL_ERROR(x) \ + (MSGU_SCRATCHPAD1_RAAE_STATE_ERR(x) || \ + MSGU_SCRATCHPAD1_ILA_STATE_ERR(x) || \ + MSGU_SCRATCHPAD1_BOOTLDR_STATE_ERR(x) || \ + MSGU_SCRATCHPAD1_IOP0_STATE_ERR(x) || \ + MSGU_SCRATCHPAD1_IOP1_STATE_ERR(x)) + +/* bit definition for ODMR register */ +#define ODMR_MASK_ALL 0xFFFFFFFF/* mask all + interrupt vector */ +#define ODMR_CLEAR_ALL 0 /* clear all + interrupt vector */ +/* bit definition for ODCR register */ +#define ODCR_CLEAR_ALL 0xFFFFFFFF /* mask all + interrupt vector*/ +/* MSIX Interupts */ +#define MSIX_TABLE_OFFSET 0x2000 +#define MSIX_TABLE_ELEMENT_SIZE 0x10 +#define MSIX_INTERRUPT_CONTROL_OFFSET 0xC +#define MSIX_TABLE_BASE (MSIX_TABLE_OFFSET + \ + MSIX_INTERRUPT_CONTROL_OFFSET) +#define MSIX_INTERRUPT_DISABLE 0x1 +#define MSIX_INTERRUPT_ENABLE 0x0 + +/* state definition for Scratch Pad1 register */ +#define SCRATCH_PAD_RAAE_READY 0x3 +#define SCRATCH_PAD_ILA_READY 0xC +#define SCRATCH_PAD_BOOT_LOAD_SUCCESS 0x0 +#define SCRATCH_PAD_IOP0_READY 0xC00 +#define SCRATCH_PAD_IOP1_READY 0x3000 +#define SCRATCH_PAD_MIPSALL_READY_16PORT (SCRATCH_PAD_IOP1_READY | \ + SCRATCH_PAD_IOP0_READY | \ + SCRATCH_PAD_ILA_READY | \ + SCRATCH_PAD_RAAE_READY) +#define SCRATCH_PAD_MIPSALL_READY_8PORT (SCRATCH_PAD_IOP0_READY | \ + SCRATCH_PAD_ILA_READY | \ + SCRATCH_PAD_RAAE_READY) + +/* boot loader state */ +#define SCRATCH_PAD1_BOOTSTATE_MASK 0x70 /* Bit 4-6 */ +#define SCRATCH_PAD1_BOOTSTATE_SUCESS 0x0 /* Load successful */ +#define SCRATCH_PAD1_BOOTSTATE_HDA_SEEPROM 0x10 /* HDA SEEPROM */ +#define SCRATCH_PAD1_BOOTSTATE_HDA_BOOTSTRAP 0x20 /* HDA BootStrap Pins */ +#define SCRATCH_PAD1_BOOTSTATE_HDA_SOFTRESET 0x30 /* HDA Soft Reset */ +#define SCRATCH_PAD1_BOOTSTATE_CRIT_ERROR 0x40 /* HDA critical error */ +#define SCRATCH_PAD1_BOOTSTATE_R1 0x50 /* Reserved */ +#define SCRATCH_PAD1_BOOTSTATE_R2 0x60 /* Reserved */ +#define SCRATCH_PAD1_BOOTSTATE_FATAL 0x70 /* Fatal Error */ + + /* state definition for Scratch Pad2 register */ +#define SCRATCH_PAD2_POR 0x00 /* power on state */ +#define SCRATCH_PAD2_SFR 0x01 /* soft reset state */ +#define SCRATCH_PAD2_ERR 0x02 /* error state */ +#define SCRATCH_PAD2_RDY 0x03 /* ready state */ +#define SCRATCH_PAD2_FWRDY_RST 0x04 /* FW rdy for soft reset flag */ +#define SCRATCH_PAD2_IOPRDY_RST 0x08 /* IOP ready for soft reset */ +#define SCRATCH_PAD2_STATE_MASK 0xFFFFFFF4 /* ScratchPad 2 + Mask, bit1-0 State */ +#define SCRATCH_PAD2_RESERVED 0x000003FC/* Scratch Pad1 + Reserved bit 2 to 9 */ + +#define SCRATCH_PAD_ERROR_MASK 0xFFFFFC00 /* Error mask bits */ +#define SCRATCH_PAD_STATE_MASK 0x00000003 /* State Mask bits */ + +/*state definition for Scratchpad Rsvd 0, Offset 0x6C, Non-fatal*/ +#define NON_FATAL_SPBC_LBUS_ECC_ERR 0x70000001 +#define NON_FATAL_BDMA_ERR 0xE0000001 +#define NON_FATAL_THERM_OVERTEMP_ERR 0x80000001 + +/* main configuration offset - byte offset */ +#define MAIN_SIGNATURE_OFFSET 0x00 /* DWORD 0x00 */ +#define MAIN_INTERFACE_REVISION 0x04 /* DWORD 0x01 */ +#define MAIN_FW_REVISION 0x08 /* DWORD 0x02 */ +#define MAIN_MAX_OUTSTANDING_IO_OFFSET 0x0C /* DWORD 0x03 */ +#define MAIN_MAX_SGL_OFFSET 0x10 /* DWORD 0x04 */ +#define MAIN_CNTRL_CAP_OFFSET 0x14 /* DWORD 0x05 */ +#define MAIN_GST_OFFSET 0x18 /* DWORD 0x06 */ +#define MAIN_IBQ_OFFSET 0x1C /* DWORD 0x07 */ +#define MAIN_OBQ_OFFSET 0x20 /* DWORD 0x08 */ +#define MAIN_IQNPPD_HPPD_OFFSET 0x24 /* DWORD 0x09 */ + +/* 0x28 - 0x4C - RSVD */ +#define MAIN_EVENT_CRC_CHECK 0x48 /* DWORD 0x12 */ +#define MAIN_EVENT_LOG_ADDR_HI 0x50 /* DWORD 0x14 */ +#define MAIN_EVENT_LOG_ADDR_LO 0x54 /* DWORD 0x15 */ +#define MAIN_EVENT_LOG_BUFF_SIZE 0x58 /* DWORD 0x16 */ +#define MAIN_EVENT_LOG_OPTION 0x5C /* DWORD 0x17 */ +#define MAIN_PCS_EVENT_LOG_ADDR_HI 0x60 /* DWORD 0x18 */ +#define MAIN_PCS_EVENT_LOG_ADDR_LO 0x64 /* DWORD 0x19 */ +#define MAIN_PCS_EVENT_LOG_BUFF_SIZE 0x68 /* DWORD 0x1A */ +#define MAIN_PCS_EVENT_LOG_OPTION 0x6C /* DWORD 0x1B */ +#define MAIN_FATAL_ERROR_INTERRUPT 0x70 /* DWORD 0x1C */ +#define MAIN_FATAL_ERROR_RDUMP0_OFFSET 0x74 /* DWORD 0x1D */ +#define MAIN_FATAL_ERROR_RDUMP0_LENGTH 0x78 /* DWORD 0x1E */ +#define MAIN_FATAL_ERROR_RDUMP1_OFFSET 0x7C /* DWORD 0x1F */ +#define MAIN_FATAL_ERROR_RDUMP1_LENGTH 0x80 /* DWORD 0x20 */ +#define MAIN_GPIO_LED_FLAGS_OFFSET 0x84 /* DWORD 0x21 */ +#define MAIN_ANALOG_SETUP_OFFSET 0x88 /* DWORD 0x22 */ + +#define MAIN_INT_VECTOR_TABLE_OFFSET 0x8C /* DWORD 0x23 */ +#define MAIN_SAS_PHY_ATTR_TABLE_OFFSET 0x90 /* DWORD 0x24 */ +#define MAIN_PORT_RECOVERY_TIMER 0x94 /* DWORD 0x25 */ +#define MAIN_INT_REASSERTION_DELAY 0x98 /* DWORD 0x26 */ +#define MAIN_MPI_ILA_RELEASE_TYPE 0xA4 /* DWORD 0x29 */ +#define MAIN_MPI_INACTIVE_FW_VERSION 0XB0 /* DWORD 0x2C */ + +/* Gereral Status Table offset - byte offset */ +#define GST_GSTLEN_MPIS_OFFSET 0x00 +#define GST_IQ_FREEZE_STATE0_OFFSET 0x04 +#define GST_IQ_FREEZE_STATE1_OFFSET 0x08 +#define GST_MSGUTCNT_OFFSET 0x0C +#define GST_IOPTCNT_OFFSET 0x10 +/* 0x14 - 0x34 - RSVD */ +#define GST_GPIO_INPUT_VAL 0x38 +/* 0x3c - 0x40 - RSVD */ +#define GST_RERRINFO_OFFSET0 0x44 +#define GST_RERRINFO_OFFSET1 0x48 +#define GST_RERRINFO_OFFSET2 0x4c +#define GST_RERRINFO_OFFSET3 0x50 +#define GST_RERRINFO_OFFSET4 0x54 +#define GST_RERRINFO_OFFSET5 0x58 +#define GST_RERRINFO_OFFSET6 0x5c +#define GST_RERRINFO_OFFSET7 0x60 + +/* General Status Table - MPI state */ +#define GST_MPI_STATE_UNINIT 0x00 +#define GST_MPI_STATE_INIT 0x01 +#define GST_MPI_STATE_TERMINATION 0x02 +#define GST_MPI_STATE_ERROR 0x03 +#define GST_MPI_STATE_MASK 0x07 + +/* Per SAS PHY Attributes */ + +#define PSPA_PHYSTATE0_OFFSET 0x00 /* Dword V */ +#define PSPA_OB_HW_EVENT_PID0_OFFSET 0x04 /* DWORD V+1 */ +#define PSPA_PHYSTATE1_OFFSET 0x08 /* Dword V+2 */ +#define PSPA_OB_HW_EVENT_PID1_OFFSET 0x0C /* DWORD V+3 */ +#define PSPA_PHYSTATE2_OFFSET 0x10 /* Dword V+4 */ +#define PSPA_OB_HW_EVENT_PID2_OFFSET 0x14 /* DWORD V+5 */ +#define PSPA_PHYSTATE3_OFFSET 0x18 /* Dword V+6 */ +#define PSPA_OB_HW_EVENT_PID3_OFFSET 0x1C /* DWORD V+7 */ +#define PSPA_PHYSTATE4_OFFSET 0x20 /* Dword V+8 */ +#define PSPA_OB_HW_EVENT_PID4_OFFSET 0x24 /* DWORD V+9 */ +#define PSPA_PHYSTATE5_OFFSET 0x28 /* Dword V+10 */ +#define PSPA_OB_HW_EVENT_PID5_OFFSET 0x2C /* DWORD V+11 */ +#define PSPA_PHYSTATE6_OFFSET 0x30 /* Dword V+12 */ +#define PSPA_OB_HW_EVENT_PID6_OFFSET 0x34 /* DWORD V+13 */ +#define PSPA_PHYSTATE7_OFFSET 0x38 /* Dword V+14 */ +#define PSPA_OB_HW_EVENT_PID7_OFFSET 0x3C /* DWORD V+15 */ +#define PSPA_PHYSTATE8_OFFSET 0x40 /* DWORD V+16 */ +#define PSPA_OB_HW_EVENT_PID8_OFFSET 0x44 /* DWORD V+17 */ +#define PSPA_PHYSTATE9_OFFSET 0x48 /* DWORD V+18 */ +#define PSPA_OB_HW_EVENT_PID9_OFFSET 0x4C /* DWORD V+19 */ +#define PSPA_PHYSTATE10_OFFSET 0x50 /* DWORD V+20 */ +#define PSPA_OB_HW_EVENT_PID10_OFFSET 0x54 /* DWORD V+21 */ +#define PSPA_PHYSTATE11_OFFSET 0x58 /* DWORD V+22 */ +#define PSPA_OB_HW_EVENT_PID11_OFFSET 0x5C /* DWORD V+23 */ +#define PSPA_PHYSTATE12_OFFSET 0x60 /* DWORD V+24 */ +#define PSPA_OB_HW_EVENT_PID12_OFFSET 0x64 /* DWORD V+25 */ +#define PSPA_PHYSTATE13_OFFSET 0x68 /* DWORD V+26 */ +#define PSPA_OB_HW_EVENT_PID13_OFFSET 0x6c /* DWORD V+27 */ +#define PSPA_PHYSTATE14_OFFSET 0x70 /* DWORD V+28 */ +#define PSPA_OB_HW_EVENT_PID14_OFFSET 0x74 /* DWORD V+29 */ +#define PSPA_PHYSTATE15_OFFSET 0x78 /* DWORD V+30 */ +#define PSPA_OB_HW_EVENT_PID15_OFFSET 0x7c /* DWORD V+31 */ +/* end PSPA */ + +/* inbound queue configuration offset - byte offset */ +#define IB_PROPERITY_OFFSET 0x00 +#define IB_BASE_ADDR_HI_OFFSET 0x04 +#define IB_BASE_ADDR_LO_OFFSET 0x08 +#define IB_CI_BASE_ADDR_HI_OFFSET 0x0C +#define IB_CI_BASE_ADDR_LO_OFFSET 0x10 +#define IB_PIPCI_BAR 0x14 +#define IB_PIPCI_BAR_OFFSET 0x18 +#define IB_RESERVED_OFFSET 0x1C + +/* outbound queue configuration offset - byte offset */ +#define OB_PROPERITY_OFFSET 0x00 +#define OB_BASE_ADDR_HI_OFFSET 0x04 +#define OB_BASE_ADDR_LO_OFFSET 0x08 +#define OB_PI_BASE_ADDR_HI_OFFSET 0x0C +#define OB_PI_BASE_ADDR_LO_OFFSET 0x10 +#define OB_CIPCI_BAR 0x14 +#define OB_CIPCI_BAR_OFFSET 0x18 +#define OB_INTERRUPT_COALES_OFFSET 0x1C +#define OB_DYNAMIC_COALES_OFFSET 0x20 +#define OB_PROPERTY_INT_ENABLE 0x40000000 + +#define MBIC_NMI_ENABLE_VPE0_IOP 0x000418 +#define MBIC_NMI_ENABLE_VPE0_AAP1 0x000418 +/* PCIE registers - BAR2(0x18), BAR1(win) 0x010000 */ +#define PCIE_EVENT_INTERRUPT_ENABLE 0x003040 +#define PCIE_EVENT_INTERRUPT 0x003044 +#define PCIE_ERROR_INTERRUPT_ENABLE 0x003048 +#define PCIE_ERROR_INTERRUPT 0x00304C + +/* SPCV soft reset */ +#define SPC_REG_SOFT_RESET 0x00001000 +#define SPCv_NORMAL_RESET_VALUE 0x1 + +#define SPCv_SOFT_RESET_READ_MASK 0xC0 +#define SPCv_SOFT_RESET_NO_RESET 0x0 +#define SPCv_SOFT_RESET_NORMAL_RESET_OCCURED 0x40 +#define SPCv_SOFT_RESET_HDA_MODE_OCCURED 0x80 +#define SPCv_SOFT_RESET_CHIP_RESET_OCCURED 0xC0 + +/* signature definition for host scratch pad0 register */ +#define SPC_SOFT_RESET_SIGNATURE 0x252acbcd +/* Signature for Soft Reset */ + +/* SPC Reset register - BAR4(0x20), BAR2(win) (need dynamic mapping) */ +#define SPC_REG_RESET 0x000000/* reset register */ + +/* bit definition for SPC_RESET register */ +#define SPC_REG_RESET_OSSP 0x00000001 +#define SPC_REG_RESET_RAAE 0x00000002 +#define SPC_REG_RESET_PCS_SPBC 0x00000004 +#define SPC_REG_RESET_PCS_IOP_SS 0x00000008 +#define SPC_REG_RESET_PCS_AAP1_SS 0x00000010 +#define SPC_REG_RESET_PCS_AAP2_SS 0x00000020 +#define SPC_REG_RESET_PCS_LM 0x00000040 +#define SPC_REG_RESET_PCS 0x00000080 +#define SPC_REG_RESET_GSM 0x00000100 +#define SPC_REG_RESET_DDR2 0x00010000 +#define SPC_REG_RESET_BDMA_CORE 0x00020000 +#define SPC_REG_RESET_BDMA_SXCBI 0x00040000 +#define SPC_REG_RESET_PCIE_AL_SXCBI 0x00080000 +#define SPC_REG_RESET_PCIE_PWR 0x00100000 +#define SPC_REG_RESET_PCIE_SFT 0x00200000 +#define SPC_REG_RESET_PCS_SXCBI 0x00400000 +#define SPC_REG_RESET_LMS_SXCBI 0x00800000 +#define SPC_REG_RESET_PMIC_SXCBI 0x01000000 +#define SPC_REG_RESET_PMIC_CORE 0x02000000 +#define SPC_REG_RESET_PCIE_PC_SXCBI 0x04000000 +#define SPC_REG_RESET_DEVICE 0x80000000 + +/* registers for BAR Shifting - BAR2(0x18), BAR1(win) */ +#define SPCV_IBW_AXI_TRANSLATION_LOW 0x001010 + +#define MBIC_AAP1_ADDR_BASE 0x060000 +#define MBIC_IOP_ADDR_BASE 0x070000 +#define GSM_ADDR_BASE 0x0700000 +/* Dynamic map through Bar4 - 0x00700000 */ +#define GSM_CONFIG_RESET 0x00000000 +#define RAM_ECC_DB_ERR 0x00000018 +#define GSM_READ_ADDR_PARITY_INDIC 0x00000058 +#define GSM_WRITE_ADDR_PARITY_INDIC 0x00000060 +#define GSM_WRITE_DATA_PARITY_INDIC 0x00000068 +#define GSM_READ_ADDR_PARITY_CHECK 0x00000038 +#define GSM_WRITE_ADDR_PARITY_CHECK 0x00000040 +#define GSM_WRITE_DATA_PARITY_CHECK 0x00000048 + +#define RB6_ACCESS_REG 0x6A0000 +#define HDAC_EXEC_CMD 0x0002 +#define HDA_C_PA 0xcb +#define HDA_SEQ_ID_BITS 0x00ff0000 +#define HDA_GSM_OFFSET_BITS 0x00FFFFFF +#define HDA_GSM_CMD_OFFSET_BITS 0x42C0 +#define HDA_GSM_RSP_OFFSET_BITS 0x42E0 + +#define MBIC_AAP1_ADDR_BASE 0x060000 +#define MBIC_IOP_ADDR_BASE 0x070000 +#define GSM_ADDR_BASE 0x0700000 +#define SPC_TOP_LEVEL_ADDR_BASE 0x000000 +#define GSM_CONFIG_RESET_VALUE 0x00003b00 +#define GPIO_ADDR_BASE 0x00090000 +#define GPIO_GPIO_0_0UTPUT_CTL_OFFSET 0x0000010c + +/* RB6 offset */ +#define SPC_RB6_OFFSET 0x80C0 +/* Magic number of soft reset for RB6 */ +#define RB6_MAGIC_NUMBER_RST 0x1234 + +/* Device Register status */ +#define DEVREG_SUCCESS 0x00 +#define DEVREG_FAILURE_OUT_OF_RESOURCE 0x01 +#define DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED 0x02 +#define DEVREG_FAILURE_INVALID_PHY_ID 0x03 +#define DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED 0x04 +#define DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE 0x05 +#define DEVREG_FAILURE_PORT_NOT_VALID_STATE 0x06 +#define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID 0x07 + + +#define MEMBASE_II_SHIFT_REGISTER 0x1010 +#endif + +/** + * As we know sleep (1~20) ms may result in sleep longer than ~20 ms, hence we + * choose 20 ms interval. + */ +#define FW_READY_INTERVAL 20 diff --git a/drivers/scsi/pm8001/pm80xx_tracepoints.c b/drivers/scsi/pm8001/pm80xx_tracepoints.c new file mode 100644 index 0000000000..344aface9c --- /dev/null +++ b/drivers/scsi/pm8001/pm80xx_tracepoints.c @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Trace events in pm8001 driver. + * + * Copyright 2020 Google LLC + * Author: Akshat Jain <akshatzen@google.com> + */ + +#define CREATE_TRACE_POINTS +#include "pm80xx_tracepoints.h" diff --git a/drivers/scsi/pm8001/pm80xx_tracepoints.h b/drivers/scsi/pm8001/pm80xx_tracepoints.h new file mode 100644 index 0000000000..5e669a8a93 --- /dev/null +++ b/drivers/scsi/pm8001/pm80xx_tracepoints.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Trace events in pm8001 driver. + * + * Copyright 2020 Google LLC + * Author: Akshat Jain <akshatzen@google.com> + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM pm80xx + +#if !defined(_TRACE_PM80XX_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_PM80XX_H + +#include <linux/tracepoint.h> +#include "pm8001_sas.h" + +TRACE_EVENT(pm80xx_request_issue, + TP_PROTO(u32 id, u32 phy_id, u32 htag, u32 ctlr_opcode, + u16 ata_opcode, int running_req), + + TP_ARGS(id, phy_id, htag, ctlr_opcode, ata_opcode, running_req), + + TP_STRUCT__entry( + __field(u32, id) + __field(u32, phy_id) + __field(u32, htag) + __field(u32, ctlr_opcode) + __field(u16, ata_opcode) + __field(int, running_req) + ), + + TP_fast_assign( + __entry->id = id; + __entry->phy_id = phy_id; + __entry->htag = htag; + __entry->ctlr_opcode = ctlr_opcode; + __entry->ata_opcode = ata_opcode; + __entry->running_req = running_req; + ), + + TP_printk("ctlr_id = %u phy_id = %u htag = %#x, ctlr_opcode = %#x ata_opcode = %#x running_req = %d", + __entry->id, __entry->phy_id, __entry->htag, + __entry->ctlr_opcode, __entry->ata_opcode, + __entry->running_req) +); + +TRACE_EVENT(pm80xx_request_complete, + TP_PROTO(u32 id, u32 phy_id, u32 htag, u32 ctlr_opcode, + u16 ata_opcode, int running_req), + + TP_ARGS(id, phy_id, htag, ctlr_opcode, ata_opcode, running_req), + + TP_STRUCT__entry( + __field(u32, id) + __field(u32, phy_id) + __field(u32, htag) + __field(u32, ctlr_opcode) + __field(u16, ata_opcode) + __field(int, running_req) + ), + + TP_fast_assign( + __entry->id = id; + __entry->phy_id = phy_id; + __entry->htag = htag; + __entry->ctlr_opcode = ctlr_opcode; + __entry->ata_opcode = ata_opcode; + __entry->running_req = running_req; + ), + + TP_printk("ctlr_id = %u phy_id = %u htag = %#x, ctlr_opcode = %#x ata_opcode = %#x running_req = %d", + __entry->id, __entry->phy_id, __entry->htag, + __entry->ctlr_opcode, __entry->ata_opcode, + __entry->running_req) +); + +TRACE_EVENT(pm80xx_mpi_build_cmd, + TP_PROTO(u32 id, u32 opc, u32 htag, u32 qi, u32 pi, u32 ci), + + TP_ARGS(id, opc, htag, qi, pi, ci), + + TP_STRUCT__entry( + __field(u32, id) + __field(u32, opc) + __field(u32, htag) + __field(u32, qi) + __field(u32, pi) + __field(u32, ci) + ), + + TP_fast_assign( + __entry->id = id; + __entry->opc = opc; + __entry->htag = htag; + __entry->qi = qi; + __entry->pi = pi; + __entry->ci = ci; + ), + + TP_printk("ctlr_id = %u opc = %#x htag = %#x QI = %u PI = %u CI = %u", + __entry->id, __entry->opc, __entry->htag, __entry->qi, + __entry->pi, __entry->ci) +); + +#endif /* _TRACE_PM80XX_H_ */ + +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE pm80xx_tracepoints + +#include <trace/define_trace.h> |