diff options
Diffstat (limited to '')
25 files changed, 44843 insertions, 0 deletions
diff --git a/drivers/scsi/mpt3sas/Kconfig b/drivers/scsi/mpt3sas/Kconfig new file mode 100644 index 000000000..c299f7e07 --- /dev/null +++ b/drivers/scsi/mpt3sas/Kconfig @@ -0,0 +1,83 @@ +# +# Kernel configuration file for the MPT3SAS +# +# This code is based on drivers/scsi/mpt3sas/Kconfig +# Copyright (C) 2012-2014 LSI Corporation +# (mailto:DL-MPTFusionLinux@lsi.com) + +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# NO WARRANTY +# THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR +# CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT +# LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, +# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is +# solely responsible for determining the appropriateness of using and +# distributing the Program and assumes all risks associated with its +# exercise of rights under this Agreement, including but not limited to +# the risks and costs of program errors, damage to or loss of data, +# programs or equipment, and unavailability or interruption of operations. + +# DISCLAIMER OF LIABILITY +# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED +# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, +# USA. + +config SCSI_MPT3SAS + tristate "LSI MPT Fusion SAS 3.0 & SAS 2.0 Device Driver" + depends on PCI && SCSI + select SCSI_SAS_ATTRS + select RAID_ATTRS + select IRQ_POLL + help + This driver supports PCI-Express SAS 12Gb/s Host Adapters. + +config SCSI_MPT2SAS_MAX_SGE + int "LSI MPT Fusion SAS 2.0 Max number of SG Entries (16 - 256)" + depends on PCI && SCSI && SCSI_MPT3SAS + default "128" + range 16 256 + help + This option allows you to specify the maximum number of scatter- + gather entries per I/O. The driver default is 128, which matches + MAX_PHYS_SEGMENTS in most kernels. However in SuSE kernels this + can be 256. However, it may decreased down to 16. Decreasing this + parameter will reduce memory requirements on a per controller instance. + +config SCSI_MPT3SAS_MAX_SGE + int "LSI MPT Fusion SAS 3.0 Max number of SG Entries (16 - 256)" + depends on PCI && SCSI && SCSI_MPT3SAS + default "128" + range 16 256 + help + This option allows you to specify the maximum number of scatter- + gather entries per I/O. The driver default is 128, which matches + MAX_PHYS_SEGMENTS in most kernels. However in SuSE kernels this + can be 256. However, it may decreased down to 16. Decreasing this + parameter will reduce memory requirements on a per controller instance. + +config SCSI_MPT2SAS + tristate "Legacy MPT2SAS config option" + default n + select SCSI_MPT3SAS + depends on PCI && SCSI + help + Dummy config option for backwards compatibility: configure the MPT3SAS + driver instead. diff --git a/drivers/scsi/mpt3sas/Makefile b/drivers/scsi/mpt3sas/Makefile new file mode 100644 index 000000000..e76d994db --- /dev/null +++ b/drivers/scsi/mpt3sas/Makefile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0 +# mpt3sas makefile +obj-$(CONFIG_SCSI_MPT3SAS) += mpt3sas.o +mpt3sas-y += mpt3sas_base.o \ + mpt3sas_config.o \ + mpt3sas_scsih.o \ + mpt3sas_transport.o \ + mpt3sas_ctl.o \ + mpt3sas_trigger_diag.o \ + mpt3sas_warpdrive.o \ + mpt3sas_debugfs.o \ diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h new file mode 100644 index 000000000..ed3923f8d --- /dev/null +++ b/drivers/scsi/mpt3sas/mpi/mpi2.h @@ -0,0 +1,1300 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * + * + * Name: mpi2.h + * Title: MPI Message independent structures and definitions + * including System Interface Register Set and + * scatter/gather formats. + * Creation Date: June 21, 2006 + * + * mpi2.h Version: 02.00.54 + * + * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 + * prefix are for use only on MPI v2.5 products, and must not be used + * with MPI v2.0 products. Unless otherwise noted, names beginning with + * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 06-04-07 02.00.01 Bumped MPI2_HEADER_VERSION_UNIT. + * 06-26-07 02.00.02 Bumped MPI2_HEADER_VERSION_UNIT. + * 08-31-07 02.00.03 Bumped MPI2_HEADER_VERSION_UNIT. + * Moved ReplyPostHostIndex register to offset 0x6C of the + * MPI2_SYSTEM_INTERFACE_REGS and modified the define for + * MPI2_REPLY_POST_HOST_INDEX_OFFSET. + * Added union of request descriptors. + * Added union of reply descriptors. + * 10-31-07 02.00.04 Bumped MPI2_HEADER_VERSION_UNIT. + * Added define for MPI2_VERSION_02_00. + * Fixed the size of the FunctionDependent5 field in the + * MPI2_DEFAULT_REPLY structure. + * 12-18-07 02.00.05 Bumped MPI2_HEADER_VERSION_UNIT. + * Removed the MPI-defined Fault Codes and extended the + * product specific codes up to 0xEFFF. + * Added a sixth key value for the WriteSequence register + * and changed the flush value to 0x0. + * Added message function codes for Diagnostic Buffer Post + * and Diagnsotic Release. + * New IOCStatus define: MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED + * Moved MPI2_VERSION_UNION from mpi2_ioc.h. + * 02-29-08 02.00.06 Bumped MPI2_HEADER_VERSION_UNIT. + * 03-03-08 02.00.07 Bumped MPI2_HEADER_VERSION_UNIT. + * 05-21-08 02.00.08 Bumped MPI2_HEADER_VERSION_UNIT. + * Added #defines for marking a reply descriptor as unused. + * 06-27-08 02.00.09 Bumped MPI2_HEADER_VERSION_UNIT. + * 10-02-08 02.00.10 Bumped MPI2_HEADER_VERSION_UNIT. + * Moved LUN field defines from mpi2_init.h. + * 01-19-09 02.00.11 Bumped MPI2_HEADER_VERSION_UNIT. + * 05-06-09 02.00.12 Bumped MPI2_HEADER_VERSION_UNIT. + * In all request and reply descriptors, replaced VF_ID + * field with MSIxIndex field. + * Removed DevHandle field from + * MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those + * bytes reserved. + * Added RAID Accelerator functionality. + * 07-30-09 02.00.13 Bumped MPI2_HEADER_VERSION_UNIT. + * 10-28-09 02.00.14 Bumped MPI2_HEADER_VERSION_UNIT. + * Added MSI-x index mask and shift for Reply Post Host + * Index register. + * Added function code for Host Based Discovery Action. + * 02-10-10 02.00.15 Bumped MPI2_HEADER_VERSION_UNIT. + * Added define for MPI2_FUNCTION_PWR_MGMT_CONTROL. + * Added defines for product-specific range of message + * function codes, 0xF0 to 0xFF. + * 05-12-10 02.00.16 Bumped MPI2_HEADER_VERSION_UNIT. + * Added alternative defines for the SGE Direction bit. + * 08-11-10 02.00.17 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-10-10 02.00.18 Bumped MPI2_HEADER_VERSION_UNIT. + * Added MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR define. + * 02-23-11 02.00.19 Bumped MPI2_HEADER_VERSION_UNIT. + * Added MPI2_FUNCTION_SEND_HOST_MESSAGE. + * 03-09-11 02.00.20 Bumped MPI2_HEADER_VERSION_UNIT. + * 05-25-11 02.00.21 Bumped MPI2_HEADER_VERSION_UNIT. + * 08-24-11 02.00.22 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-18-11 02.00.23 Bumped MPI2_HEADER_VERSION_UNIT. + * Incorporating additions for MPI v2.5. + * 02-06-12 02.00.24 Bumped MPI2_HEADER_VERSION_UNIT. + * 03-29-12 02.00.25 Bumped MPI2_HEADER_VERSION_UNIT. + * Added Hard Reset delay timings. + * 07-10-12 02.00.26 Bumped MPI2_HEADER_VERSION_UNIT. + * 07-26-12 02.00.27 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-27-12 02.00.28 Bumped MPI2_HEADER_VERSION_UNIT. + * 12-20-12 02.00.29 Bumped MPI2_HEADER_VERSION_UNIT. + * Added MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET. + * 04-09-13 02.00.30 Bumped MPI2_HEADER_VERSION_UNIT. + * 04-17-13 02.00.31 Bumped MPI2_HEADER_VERSION_UNIT. + * 08-19-13 02.00.32 Bumped MPI2_HEADER_VERSION_UNIT. + * 12-05-13 02.00.33 Bumped MPI2_HEADER_VERSION_UNIT. + * 01-08-14 02.00.34 Bumped MPI2_HEADER_VERSION_UNIT + * 06-13-14 02.00.35 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-18-14 02.00.36 Updated copyright information. + * Bumped MPI2_HEADER_VERSION_UNIT. + * 03-16-15 02.00.37 Bumped MPI2_HEADER_VERSION_UNIT. + * Added Scratchpad registers to + * MPI2_SYSTEM_INTERFACE_REGS. + * Added MPI2_DIAG_SBR_RELOAD. + * 03-19-15 02.00.38 Bumped MPI2_HEADER_VERSION_UNIT. + * 05-25-15 02.00.39 Bumped MPI2_HEADER_VERSION_UNIT. + * 08-25-15 02.00.40 Bumped MPI2_HEADER_VERSION_UNIT. + * 12-15-15 02.00.41 Bumped MPI_HEADER_VERSION_UNIT + * 01-01-16 02.00.42 Bumped MPI_HEADER_VERSION_UNIT + * 04-05-16 02.00.43 Modified MPI26_DIAG_BOOT_DEVICE_SELECT defines + * to be unique within first 32 characters. + * Removed AHCI support. + * Removed SOP support. + * Bumped MPI2_HEADER_VERSION_UNIT. + * 04-10-16 02.00.44 Bumped MPI2_HEADER_VERSION_UNIT. + * 07-06-16 02.00.45 Bumped MPI2_HEADER_VERSION_UNIT. + * 09-02-16 02.00.46 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-23-16 02.00.47 Bumped MPI2_HEADER_VERSION_UNIT. + * 02-03-17 02.00.48 Bumped MPI2_HEADER_VERSION_UNIT. + * 06-13-17 02.00.49 Bumped MPI2_HEADER_VERSION_UNIT. + * 09-29-17 02.00.50 Bumped MPI2_HEADER_VERSION_UNIT. + * 07-22-18 02.00.51 Added SECURE_BOOT define. + * Bumped MPI2_HEADER_VERSION_UNIT + * 08-15-18 02.00.52 Bumped MPI2_HEADER_VERSION_UNIT. + * 08-28-18 02.00.53 Bumped MPI2_HEADER_VERSION_UNIT. + * Added MPI2_IOCSTATUS_FAILURE + * 12-17-18 02.00.54 Bumped MPI2_HEADER_VERSION_UNIT + * 06-24-19 02.00.55 Bumped MPI2_HEADER_VERSION_UNIT + * 08-01-19 02.00.56 Bumped MPI2_HEADER_VERSION_UNIT + * 10-02-19 02.00.57 Bumped MPI2_HEADER_VERSION_UNIT + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_H +#define MPI2_H + +/***************************************************************************** +* +* MPI Version Definitions +* +*****************************************************************************/ + +#define MPI2_VERSION_MAJOR_MASK (0xFF00) +#define MPI2_VERSION_MAJOR_SHIFT (8) +#define MPI2_VERSION_MINOR_MASK (0x00FF) +#define MPI2_VERSION_MINOR_SHIFT (0) + +/*major version for all MPI v2.x */ +#define MPI2_VERSION_MAJOR (0x02) + +/*minor version for MPI v2.0 compatible products */ +#define MPI2_VERSION_MINOR (0x00) +#define MPI2_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \ + MPI2_VERSION_MINOR) +#define MPI2_VERSION_02_00 (0x0200) + +/*minor version for MPI v2.5 compatible products */ +#define MPI25_VERSION_MINOR (0x05) +#define MPI25_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \ + MPI25_VERSION_MINOR) +#define MPI2_VERSION_02_05 (0x0205) + +/*minor version for MPI v2.6 compatible products */ +#define MPI26_VERSION_MINOR (0x06) +#define MPI26_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \ + MPI26_VERSION_MINOR) +#define MPI2_VERSION_02_06 (0x0206) + + +/* Unit and Dev versioning for this MPI header set */ +#define MPI2_HEADER_VERSION_UNIT (0x39) +#define MPI2_HEADER_VERSION_DEV (0x00) +#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) +#define MPI2_HEADER_VERSION_UNIT_SHIFT (8) +#define MPI2_HEADER_VERSION_DEV_MASK (0x00FF) +#define MPI2_HEADER_VERSION_DEV_SHIFT (0) +#define MPI2_HEADER_VERSION ((MPI2_HEADER_VERSION_UNIT << 8) | \ + MPI2_HEADER_VERSION_DEV) + +/***************************************************************************** +* +* IOC State Definitions +* +*****************************************************************************/ + +#define MPI2_IOC_STATE_RESET (0x00000000) +#define MPI2_IOC_STATE_READY (0x10000000) +#define MPI2_IOC_STATE_OPERATIONAL (0x20000000) +#define MPI2_IOC_STATE_FAULT (0x40000000) +#define MPI2_IOC_STATE_COREDUMP (0x50000000) + +#define MPI2_IOC_STATE_MASK (0xF0000000) +#define MPI2_IOC_STATE_SHIFT (28) + +/*Fault state range for prodcut specific codes */ +#define MPI2_FAULT_PRODUCT_SPECIFIC_MIN (0x0000) +#define MPI2_FAULT_PRODUCT_SPECIFIC_MAX (0xEFFF) + +/***************************************************************************** +* +* System Interface Register Definitions +* +*****************************************************************************/ + +typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS { + U32 Doorbell; /*0x00 */ + U32 WriteSequence; /*0x04 */ + U32 HostDiagnostic; /*0x08 */ + U32 Reserved1; /*0x0C */ + U32 DiagRWData; /*0x10 */ + U32 DiagRWAddressLow; /*0x14 */ + U32 DiagRWAddressHigh; /*0x18 */ + U32 Reserved2[5]; /*0x1C */ + U32 HostInterruptStatus; /*0x30 */ + U32 HostInterruptMask; /*0x34 */ + U32 DCRData; /*0x38 */ + U32 DCRAddress; /*0x3C */ + U32 Reserved3[2]; /*0x40 */ + U32 ReplyFreeHostIndex; /*0x48 */ + U32 Reserved4[8]; /*0x4C */ + U32 ReplyPostHostIndex; /*0x6C */ + U32 Reserved5; /*0x70 */ + U32 HCBSize; /*0x74 */ + U32 HCBAddressLow; /*0x78 */ + U32 HCBAddressHigh; /*0x7C */ + U32 Reserved6[12]; /*0x80 */ + U32 Scratchpad[4]; /*0xB0 */ + U32 RequestDescriptorPostLow; /*0xC0 */ + U32 RequestDescriptorPostHigh; /*0xC4 */ + U32 AtomicRequestDescriptorPost;/*0xC8 */ + U32 Reserved7[13]; /*0xCC */ +} MPI2_SYSTEM_INTERFACE_REGS, + *PTR_MPI2_SYSTEM_INTERFACE_REGS, + Mpi2SystemInterfaceRegs_t, + *pMpi2SystemInterfaceRegs_t; + +/* + *Defines for working with the Doorbell register. + */ +#define MPI2_DOORBELL_OFFSET (0x00000000) + +/*IOC --> System values */ +#define MPI2_DOORBELL_USED (0x08000000) +#define MPI2_DOORBELL_WHO_INIT_MASK (0x07000000) +#define MPI2_DOORBELL_WHO_INIT_SHIFT (24) +#define MPI2_DOORBELL_FAULT_CODE_MASK (0x0000FFFF) +#define MPI2_DOORBELL_DATA_MASK (0x0000FFFF) + +/*System --> IOC values */ +#define MPI2_DOORBELL_FUNCTION_MASK (0xFF000000) +#define MPI2_DOORBELL_FUNCTION_SHIFT (24) +#define MPI2_DOORBELL_ADD_DWORDS_MASK (0x00FF0000) +#define MPI2_DOORBELL_ADD_DWORDS_SHIFT (16) + +/* + *Defines for the WriteSequence register + */ +#define MPI2_WRITE_SEQUENCE_OFFSET (0x00000004) +#define MPI2_WRSEQ_KEY_VALUE_MASK (0x0000000F) +#define MPI2_WRSEQ_FLUSH_KEY_VALUE (0x0) +#define MPI2_WRSEQ_1ST_KEY_VALUE (0xF) +#define MPI2_WRSEQ_2ND_KEY_VALUE (0x4) +#define MPI2_WRSEQ_3RD_KEY_VALUE (0xB) +#define MPI2_WRSEQ_4TH_KEY_VALUE (0x2) +#define MPI2_WRSEQ_5TH_KEY_VALUE (0x7) +#define MPI2_WRSEQ_6TH_KEY_VALUE (0xD) + +/* + *Defines for the HostDiagnostic register + */ +#define MPI2_HOST_DIAGNOSTIC_OFFSET (0x00000008) + +#define MPI26_DIAG_SECURE_BOOT (0x80000000) + +#define MPI2_DIAG_SBR_RELOAD (0x00002000) + +#define MPI2_DIAG_BOOT_DEVICE_SELECT_MASK (0x00001800) +#define MPI2_DIAG_BOOT_DEVICE_SELECT_DEFAULT (0x00000000) +#define MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW (0x00000800) + +/* Defines for V7A/V7R HostDiagnostic Register */ +#define MPI26_DIAG_BOOT_DEVICE_SEL_64FLASH (0x00000000) +#define MPI26_DIAG_BOOT_DEVICE_SEL_64HCDW (0x00000800) +#define MPI26_DIAG_BOOT_DEVICE_SEL_32FLASH (0x00001000) +#define MPI26_DIAG_BOOT_DEVICE_SEL_32HCDW (0x00001800) + +#define MPI2_DIAG_CLEAR_FLASH_BAD_SIG (0x00000400) +#define MPI2_DIAG_FORCE_HCB_ON_RESET (0x00000200) +#define MPI2_DIAG_HCB_MODE (0x00000100) +#define MPI2_DIAG_DIAG_WRITE_ENABLE (0x00000080) +#define MPI2_DIAG_FLASH_BAD_SIG (0x00000040) +#define MPI2_DIAG_RESET_HISTORY (0x00000020) +#define MPI2_DIAG_DIAG_RW_ENABLE (0x00000010) +#define MPI2_DIAG_RESET_ADAPTER (0x00000004) +#define MPI2_DIAG_HOLD_IOC_RESET (0x00000002) + +/* + *Offsets for DiagRWData and address + */ +#define MPI2_DIAG_RW_DATA_OFFSET (0x00000010) +#define MPI2_DIAG_RW_ADDRESS_LOW_OFFSET (0x00000014) +#define MPI2_DIAG_RW_ADDRESS_HIGH_OFFSET (0x00000018) + +/* + *Defines for the HostInterruptStatus register + */ +#define MPI2_HOST_INTERRUPT_STATUS_OFFSET (0x00000030) +#define MPI2_HIS_SYS2IOC_DB_STATUS (0x80000000) +#define MPI2_HIS_IOP_DOORBELL_STATUS MPI2_HIS_SYS2IOC_DB_STATUS +#define MPI2_HIS_RESET_IRQ_STATUS (0x40000000) +#define MPI2_HIS_REPLY_DESCRIPTOR_INTERRUPT (0x00000008) +#define MPI2_HIS_IOC2SYS_DB_STATUS (0x00000001) +#define MPI2_HIS_DOORBELL_INTERRUPT MPI2_HIS_IOC2SYS_DB_STATUS + +/* + *Defines for the HostInterruptMask register + */ +#define MPI2_HOST_INTERRUPT_MASK_OFFSET (0x00000034) +#define MPI2_HIM_RESET_IRQ_MASK (0x40000000) +#define MPI2_HIM_REPLY_INT_MASK (0x00000008) +#define MPI2_HIM_RIM MPI2_HIM_REPLY_INT_MASK +#define MPI2_HIM_IOC2SYS_DB_MASK (0x00000001) +#define MPI2_HIM_DIM MPI2_HIM_IOC2SYS_DB_MASK + +/* + *Offsets for DCRData and address + */ +#define MPI2_DCR_DATA_OFFSET (0x00000038) +#define MPI2_DCR_ADDRESS_OFFSET (0x0000003C) + +/* + *Offset for the Reply Free Queue + */ +#define MPI2_REPLY_FREE_HOST_INDEX_OFFSET (0x00000048) + +/* + *Defines for the Reply Descriptor Post Queue + */ +#define MPI2_REPLY_POST_HOST_INDEX_OFFSET (0x0000006C) +#define MPI2_REPLY_POST_HOST_INDEX_MASK (0x00FFFFFF) +#define MPI2_RPHI_MSIX_INDEX_MASK (0xFF000000) +#define MPI2_RPHI_MSIX_INDEX_SHIFT (24) +#define MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET (0x0000030C) /*MPI v2.5 only*/ + + +/* + *Defines for the HCBSize and address + */ +#define MPI2_HCB_SIZE_OFFSET (0x00000074) +#define MPI2_HCB_SIZE_SIZE_MASK (0xFFFFF000) +#define MPI2_HCB_SIZE_HCB_ENABLE (0x00000001) + +#define MPI2_HCB_ADDRESS_LOW_OFFSET (0x00000078) +#define MPI2_HCB_ADDRESS_HIGH_OFFSET (0x0000007C) + +/* + *Offsets for the Scratchpad registers + */ +#define MPI26_SCRATCHPAD0_OFFSET (0x000000B0) +#define MPI26_SCRATCHPAD1_OFFSET (0x000000B4) +#define MPI26_SCRATCHPAD2_OFFSET (0x000000B8) +#define MPI26_SCRATCHPAD3_OFFSET (0x000000BC) + +/* + *Offsets for the Request Descriptor Post Queue + */ +#define MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET (0x000000C0) +#define MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET (0x000000C4) +#define MPI26_ATOMIC_REQUEST_DESCRIPTOR_POST_OFFSET (0x000000C8) + +/*Hard Reset delay timings */ +#define MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC (50000) +#define MPI2_HARD_RESET_PCIE_RESET_READ_WINDOW_MICRO_SEC (255000) +#define MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC (256000) + +/***************************************************************************** +* +* Message Descriptors +* +*****************************************************************************/ + +/*Request Descriptors */ + +/*Default Request Descriptor */ +typedef struct _MPI2_DEFAULT_REQUEST_DESCRIPTOR { + U8 RequestFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 SMID; /*0x02 */ + U16 LMID; /*0x04 */ + U16 DescriptorTypeDependent; /*0x06 */ +} MPI2_DEFAULT_REQUEST_DESCRIPTOR, + *PTR_MPI2_DEFAULT_REQUEST_DESCRIPTOR, + Mpi2DefaultRequestDescriptor_t, + *pMpi2DefaultRequestDescriptor_t; + +/*defines for the RequestFlags field */ +#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK (0x1E) +#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_RSHIFT (1) +#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00) +#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET (0x02) +#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x06) +#define MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE (0x08) +#define MPI2_REQ_DESCRIPT_FLAGS_RAID_ACCELERATOR (0x0A) +#define MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO (0x0C) +#define MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED (0x10) + +#define MPI2_REQ_DESCRIPT_FLAGS_IOC_FIFO_MARKER (0x01) + +/*High Priority Request Descriptor */ +typedef struct _MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR { + U8 RequestFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 SMID; /*0x02 */ + U16 LMID; /*0x04 */ + U16 Reserved1; /*0x06 */ +} MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR, + *PTR_MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR, + Mpi2HighPriorityRequestDescriptor_t, + *pMpi2HighPriorityRequestDescriptor_t; + +/*SCSI IO Request Descriptor */ +typedef struct _MPI2_SCSI_IO_REQUEST_DESCRIPTOR { + U8 RequestFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 SMID; /*0x02 */ + U16 LMID; /*0x04 */ + U16 DevHandle; /*0x06 */ +} MPI2_SCSI_IO_REQUEST_DESCRIPTOR, + *PTR_MPI2_SCSI_IO_REQUEST_DESCRIPTOR, + Mpi2SCSIIORequestDescriptor_t, + *pMpi2SCSIIORequestDescriptor_t; + +/*SCSI Target Request Descriptor */ +typedef struct _MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR { + U8 RequestFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 SMID; /*0x02 */ + U16 LMID; /*0x04 */ + U16 IoIndex; /*0x06 */ +} MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR, + *PTR_MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR, + Mpi2SCSITargetRequestDescriptor_t, + *pMpi2SCSITargetRequestDescriptor_t; + +/*RAID Accelerator Request Descriptor */ +typedef struct _MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR { + U8 RequestFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 SMID; /*0x02 */ + U16 LMID; /*0x04 */ + U16 Reserved; /*0x06 */ +} MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR, + *PTR_MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR, + Mpi2RAIDAcceleratorRequestDescriptor_t, + *pMpi2RAIDAcceleratorRequestDescriptor_t; + +/*Fast Path SCSI IO Request Descriptor */ +typedef MPI2_SCSI_IO_REQUEST_DESCRIPTOR + MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR, + *PTR_MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR, + Mpi25FastPathSCSIIORequestDescriptor_t, + *pMpi25FastPathSCSIIORequestDescriptor_t; + +/*PCIe Encapsulated Request Descriptor */ +typedef MPI2_SCSI_IO_REQUEST_DESCRIPTOR + MPI26_PCIE_ENCAPSULATED_REQUEST_DESCRIPTOR, + *PTR_MPI26_PCIE_ENCAPSULATED_REQUEST_DESCRIPTOR, + Mpi26PCIeEncapsulatedRequestDescriptor_t, + *pMpi26PCIeEncapsulatedRequestDescriptor_t; + +/*union of Request Descriptors */ +typedef union _MPI2_REQUEST_DESCRIPTOR_UNION { + MPI2_DEFAULT_REQUEST_DESCRIPTOR Default; + MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR HighPriority; + MPI2_SCSI_IO_REQUEST_DESCRIPTOR SCSIIO; + MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget; + MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR RAIDAccelerator; + MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR FastPathSCSIIO; + MPI26_PCIE_ENCAPSULATED_REQUEST_DESCRIPTOR PCIeEncapsulated; + U64 Words; +} MPI2_REQUEST_DESCRIPTOR_UNION, + *PTR_MPI2_REQUEST_DESCRIPTOR_UNION, + Mpi2RequestDescriptorUnion_t, + *pMpi2RequestDescriptorUnion_t; + +/*Atomic Request Descriptors */ + +/* + * All Atomic Request Descriptors have the same format, so the following + * structure is used for all Atomic Request Descriptors: + * Atomic Default Request Descriptor + * Atomic High Priority Request Descriptor + * Atomic SCSI IO Request Descriptor + * Atomic SCSI Target Request Descriptor + * Atomic RAID Accelerator Request Descriptor + * Atomic Fast Path SCSI IO Request Descriptor + * Atomic PCIe Encapsulated Request Descriptor + */ + +/*Atomic Request Descriptor */ +typedef struct _MPI26_ATOMIC_REQUEST_DESCRIPTOR { + U8 RequestFlags; /* 0x00 */ + U8 MSIxIndex; /* 0x01 */ + U16 SMID; /* 0x02 */ +} MPI26_ATOMIC_REQUEST_DESCRIPTOR, + *PTR_MPI26_ATOMIC_REQUEST_DESCRIPTOR, + Mpi26AtomicRequestDescriptor_t, + *pMpi26AtomicRequestDescriptor_t; + +/*for the RequestFlags field, use the same + *defines as MPI2_DEFAULT_REQUEST_DESCRIPTOR + */ + +/*Reply Descriptors */ + +/*Default Reply Descriptor */ +typedef struct _MPI2_DEFAULT_REPLY_DESCRIPTOR { + U8 ReplyFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 DescriptorTypeDependent1; /*0x02 */ + U32 DescriptorTypeDependent2; /*0x04 */ +} MPI2_DEFAULT_REPLY_DESCRIPTOR, + *PTR_MPI2_DEFAULT_REPLY_DESCRIPTOR, + Mpi2DefaultReplyDescriptor_t, + *pMpi2DefaultReplyDescriptor_t; + +/*defines for the ReplyFlags field */ +#define MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK (0x0F) +#define MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00) +#define MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY (0x01) +#define MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS (0x02) +#define MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER (0x03) +#define MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS (0x05) +#define MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS (0x06) +#define MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS (0x08) +#define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F) + +/*values for marking a reply descriptor as unused */ +#define MPI2_RPY_DESCRIPT_UNUSED_WORD0_MARK (0xFFFFFFFF) +#define MPI2_RPY_DESCRIPT_UNUSED_WORD1_MARK (0xFFFFFFFF) + +/*Address Reply Descriptor */ +typedef struct _MPI2_ADDRESS_REPLY_DESCRIPTOR { + U8 ReplyFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 SMID; /*0x02 */ + U32 ReplyFrameAddress; /*0x04 */ +} MPI2_ADDRESS_REPLY_DESCRIPTOR, + *PTR_MPI2_ADDRESS_REPLY_DESCRIPTOR, + Mpi2AddressReplyDescriptor_t, + *pMpi2AddressReplyDescriptor_t; + +#define MPI2_ADDRESS_REPLY_SMID_INVALID (0x00) + +/*SCSI IO Success Reply Descriptor */ +typedef struct _MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR { + U8 ReplyFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 SMID; /*0x02 */ + U16 TaskTag; /*0x04 */ + U16 Reserved1; /*0x06 */ +} MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR, + *PTR_MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR, + Mpi2SCSIIOSuccessReplyDescriptor_t, + *pMpi2SCSIIOSuccessReplyDescriptor_t; + +/*TargetAssist Success Reply Descriptor */ +typedef struct _MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR { + U8 ReplyFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 SMID; /*0x02 */ + U8 SequenceNumber; /*0x04 */ + U8 Reserved1; /*0x05 */ + U16 IoIndex; /*0x06 */ +} MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR, + *PTR_MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR, + Mpi2TargetAssistSuccessReplyDescriptor_t, + *pMpi2TargetAssistSuccessReplyDescriptor_t; + +/*Target Command Buffer Reply Descriptor */ +typedef struct _MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR { + U8 ReplyFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U8 VP_ID; /*0x02 */ + U8 Flags; /*0x03 */ + U16 InitiatorDevHandle; /*0x04 */ + U16 IoIndex; /*0x06 */ +} MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR, + *PTR_MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR, + Mpi2TargetCommandBufferReplyDescriptor_t, + *pMpi2TargetCommandBufferReplyDescriptor_t; + +/*defines for Flags field */ +#define MPI2_RPY_DESCRIPT_TCB_FLAGS_PHYNUM_MASK (0x3F) + +/*RAID Accelerator Success Reply Descriptor */ +typedef struct _MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR { + U8 ReplyFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 SMID; /*0x02 */ + U32 Reserved; /*0x04 */ +} MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR, + *PTR_MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR, + Mpi2RAIDAcceleratorSuccessReplyDescriptor_t, + *pMpi2RAIDAcceleratorSuccessReplyDescriptor_t; + +/*Fast Path SCSI IO Success Reply Descriptor */ +typedef MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR + MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR, + *PTR_MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR, + Mpi25FastPathSCSIIOSuccessReplyDescriptor_t, + *pMpi25FastPathSCSIIOSuccessReplyDescriptor_t; + +/*PCIe Encapsulated Success Reply Descriptor */ +typedef MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR + MPI26_PCIE_ENCAPSULATED_SUCCESS_REPLY_DESCRIPTOR, + *PTR_MPI26_PCIE_ENCAPSULATED_SUCCESS_REPLY_DESCRIPTOR, + Mpi26PCIeEncapsulatedSuccessReplyDescriptor_t, + *pMpi26PCIeEncapsulatedSuccessReplyDescriptor_t; + +/*union of Reply Descriptors */ +typedef union _MPI2_REPLY_DESCRIPTORS_UNION { + MPI2_DEFAULT_REPLY_DESCRIPTOR Default; + MPI2_ADDRESS_REPLY_DESCRIPTOR AddressReply; + MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR SCSIIOSuccess; + MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR TargetAssistSuccess; + MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer; + MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR RAIDAcceleratorSuccess; + MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR FastPathSCSIIOSuccess; + MPI26_PCIE_ENCAPSULATED_SUCCESS_REPLY_DESCRIPTOR + PCIeEncapsulatedSuccess; + U64 Words; +} MPI2_REPLY_DESCRIPTORS_UNION, + *PTR_MPI2_REPLY_DESCRIPTORS_UNION, + Mpi2ReplyDescriptorsUnion_t, + *pMpi2ReplyDescriptorsUnion_t; + +/***************************************************************************** +* +* Message Functions +* +*****************************************************************************/ + +#define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00) +#define MPI2_FUNCTION_SCSI_TASK_MGMT (0x01) +#define MPI2_FUNCTION_IOC_INIT (0x02) +#define MPI2_FUNCTION_IOC_FACTS (0x03) +#define MPI2_FUNCTION_CONFIG (0x04) +#define MPI2_FUNCTION_PORT_FACTS (0x05) +#define MPI2_FUNCTION_PORT_ENABLE (0x06) +#define MPI2_FUNCTION_EVENT_NOTIFICATION (0x07) +#define MPI2_FUNCTION_EVENT_ACK (0x08) +#define MPI2_FUNCTION_FW_DOWNLOAD (0x09) +#define MPI2_FUNCTION_TARGET_ASSIST (0x0B) +#define MPI2_FUNCTION_TARGET_STATUS_SEND (0x0C) +#define MPI2_FUNCTION_TARGET_MODE_ABORT (0x0D) +#define MPI2_FUNCTION_FW_UPLOAD (0x12) +#define MPI2_FUNCTION_RAID_ACTION (0x15) +#define MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH (0x16) +#define MPI2_FUNCTION_TOOLBOX (0x17) +#define MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR (0x18) +#define MPI2_FUNCTION_SMP_PASSTHROUGH (0x1A) +#define MPI2_FUNCTION_SAS_IO_UNIT_CONTROL (0x1B) +#define MPI2_FUNCTION_IO_UNIT_CONTROL (0x1B) +#define MPI2_FUNCTION_SATA_PASSTHROUGH (0x1C) +#define MPI2_FUNCTION_DIAG_BUFFER_POST (0x1D) +#define MPI2_FUNCTION_DIAG_RELEASE (0x1E) +#define MPI2_FUNCTION_TARGET_CMD_BUF_BASE_POST (0x24) +#define MPI2_FUNCTION_TARGET_CMD_BUF_LIST_POST (0x25) +#define MPI2_FUNCTION_RAID_ACCELERATOR (0x2C) +#define MPI2_FUNCTION_HOST_BASED_DISCOVERY_ACTION (0x2F) +#define MPI2_FUNCTION_PWR_MGMT_CONTROL (0x30) +#define MPI2_FUNCTION_SEND_HOST_MESSAGE (0x31) +#define MPI2_FUNCTION_NVME_ENCAPSULATED (0x33) +#define MPI2_FUNCTION_MIN_PRODUCT_SPECIFIC (0xF0) +#define MPI2_FUNCTION_MAX_PRODUCT_SPECIFIC (0xFF) + +/*Doorbell functions */ +#define MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET (0x40) +#define MPI2_FUNCTION_HANDSHAKE (0x42) + +/***************************************************************************** +* +* IOC Status Values +* +*****************************************************************************/ + +/*mask for IOCStatus status value */ +#define MPI2_IOCSTATUS_MASK (0x7FFF) + +/**************************************************************************** +* Common IOCStatus values for all replies +****************************************************************************/ + +#define MPI2_IOCSTATUS_SUCCESS (0x0000) +#define MPI2_IOCSTATUS_INVALID_FUNCTION (0x0001) +#define MPI2_IOCSTATUS_BUSY (0x0002) +#define MPI2_IOCSTATUS_INVALID_SGL (0x0003) +#define MPI2_IOCSTATUS_INTERNAL_ERROR (0x0004) +#define MPI2_IOCSTATUS_INVALID_VPID (0x0005) +#define MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES (0x0006) +#define MPI2_IOCSTATUS_INVALID_FIELD (0x0007) +#define MPI2_IOCSTATUS_INVALID_STATE (0x0008) +#define MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED (0x0009) +/*MPI v2.6 and later */ +#define MPI2_IOCSTATUS_INSUFFICIENT_POWER (0x000A) +#define MPI2_IOCSTATUS_FAILURE (0x000F) + +/**************************************************************************** +* Config IOCStatus values +****************************************************************************/ + +#define MPI2_IOCSTATUS_CONFIG_INVALID_ACTION (0x0020) +#define MPI2_IOCSTATUS_CONFIG_INVALID_TYPE (0x0021) +#define MPI2_IOCSTATUS_CONFIG_INVALID_PAGE (0x0022) +#define MPI2_IOCSTATUS_CONFIG_INVALID_DATA (0x0023) +#define MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS (0x0024) +#define MPI2_IOCSTATUS_CONFIG_CANT_COMMIT (0x0025) + +/**************************************************************************** +* SCSI IO Reply +****************************************************************************/ + +#define MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR (0x0040) +#define MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE (0x0042) +#define MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE (0x0043) +#define MPI2_IOCSTATUS_SCSI_DATA_OVERRUN (0x0044) +#define MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN (0x0045) +#define MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR (0x0046) +#define MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR (0x0047) +#define MPI2_IOCSTATUS_SCSI_TASK_TERMINATED (0x0048) +#define MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH (0x0049) +#define MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED (0x004A) +#define MPI2_IOCSTATUS_SCSI_IOC_TERMINATED (0x004B) +#define MPI2_IOCSTATUS_SCSI_EXT_TERMINATED (0x004C) + +/**************************************************************************** +* For use by SCSI Initiator and SCSI Target end-to-end data protection +****************************************************************************/ + +#define MPI2_IOCSTATUS_EEDP_GUARD_ERROR (0x004D) +#define MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR (0x004E) +#define MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR (0x004F) + +/**************************************************************************** +* SCSI Target values +****************************************************************************/ + +#define MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX (0x0062) +#define MPI2_IOCSTATUS_TARGET_ABORTED (0x0063) +#define MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE (0x0064) +#define MPI2_IOCSTATUS_TARGET_NO_CONNECTION (0x0065) +#define MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH (0x006A) +#define MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR (0x006D) +#define MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA (0x006E) +#define MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT (0x006F) +#define MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT (0x0070) +#define MPI2_IOCSTATUS_TARGET_NAK_RECEIVED (0x0071) + +/**************************************************************************** +* Serial Attached SCSI values +****************************************************************************/ + +#define MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED (0x0090) +#define MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN (0x0091) + +/**************************************************************************** +* Diagnostic Buffer Post / Diagnostic Release values +****************************************************************************/ + +#define MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED (0x00A0) + +/**************************************************************************** +* RAID Accelerator values +****************************************************************************/ + +#define MPI2_IOCSTATUS_RAID_ACCEL_ERROR (0x00B0) + +/**************************************************************************** +* IOCStatus flag to indicate that log info is available +****************************************************************************/ + +#define MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE (0x8000) + +/**************************************************************************** +* IOCLogInfo Types +****************************************************************************/ + +#define MPI2_IOCLOGINFO_TYPE_MASK (0xF0000000) +#define MPI2_IOCLOGINFO_TYPE_SHIFT (28) +#define MPI2_IOCLOGINFO_TYPE_NONE (0x0) +#define MPI2_IOCLOGINFO_TYPE_SCSI (0x1) +#define MPI2_IOCLOGINFO_TYPE_FC (0x2) +#define MPI2_IOCLOGINFO_TYPE_SAS (0x3) +#define MPI2_IOCLOGINFO_TYPE_ISCSI (0x4) +#define MPI2_IOCLOGINFO_LOG_DATA_MASK (0x0FFFFFFF) + +/***************************************************************************** +* +* Standard Message Structures +* +*****************************************************************************/ + +/**************************************************************************** +*Request Message Header for all request messages +****************************************************************************/ + +typedef struct _MPI2_REQUEST_HEADER { + U16 FunctionDependent1; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 FunctionDependent2; /*0x04 */ + U8 FunctionDependent3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved1; /*0x0A */ +} MPI2_REQUEST_HEADER, *PTR_MPI2_REQUEST_HEADER, + MPI2RequestHeader_t, *pMPI2RequestHeader_t; + +/**************************************************************************** +* Default Reply +****************************************************************************/ + +typedef struct _MPI2_DEFAULT_REPLY { + U16 FunctionDependent1; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 FunctionDependent2; /*0x04 */ + U8 FunctionDependent3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved1; /*0x0A */ + U16 FunctionDependent5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_DEFAULT_REPLY, *PTR_MPI2_DEFAULT_REPLY, + MPI2DefaultReply_t, *pMPI2DefaultReply_t; + +/*common version structure/union used in messages and configuration pages */ + +typedef struct _MPI2_VERSION_STRUCT { + U8 Dev; /*0x00 */ + U8 Unit; /*0x01 */ + U8 Minor; /*0x02 */ + U8 Major; /*0x03 */ +} MPI2_VERSION_STRUCT; + +typedef union _MPI2_VERSION_UNION { + MPI2_VERSION_STRUCT Struct; + U32 Word; +} MPI2_VERSION_UNION; + +/*LUN field defines, common to many structures */ +#define MPI2_LUN_FIRST_LEVEL_ADDRESSING (0x0000FFFF) +#define MPI2_LUN_SECOND_LEVEL_ADDRESSING (0xFFFF0000) +#define MPI2_LUN_THIRD_LEVEL_ADDRESSING (0x0000FFFF) +#define MPI2_LUN_FOURTH_LEVEL_ADDRESSING (0xFFFF0000) +#define MPI2_LUN_LEVEL_1_WORD (0xFF00) +#define MPI2_LUN_LEVEL_1_DWORD (0x0000FF00) + +/***************************************************************************** +* +* Fusion-MPT MPI Scatter Gather Elements +* +*****************************************************************************/ + +/**************************************************************************** +* MPI Simple Element structures +****************************************************************************/ + +typedef struct _MPI2_SGE_SIMPLE32 { + U32 FlagsLength; + U32 Address; +} MPI2_SGE_SIMPLE32, *PTR_MPI2_SGE_SIMPLE32, + Mpi2SGESimple32_t, *pMpi2SGESimple32_t; + +typedef struct _MPI2_SGE_SIMPLE64 { + U32 FlagsLength; + U64 Address; +} MPI2_SGE_SIMPLE64, *PTR_MPI2_SGE_SIMPLE64, + Mpi2SGESimple64_t, *pMpi2SGESimple64_t; + +typedef struct _MPI2_SGE_SIMPLE_UNION { + U32 FlagsLength; + union { + U32 Address32; + U64 Address64; + } u; +} MPI2_SGE_SIMPLE_UNION, + *PTR_MPI2_SGE_SIMPLE_UNION, + Mpi2SGESimpleUnion_t, + *pMpi2SGESimpleUnion_t; + +/**************************************************************************** +* MPI Chain Element structures - for MPI v2.0 products only +****************************************************************************/ + +typedef struct _MPI2_SGE_CHAIN32 { + U16 Length; + U8 NextChainOffset; + U8 Flags; + U32 Address; +} MPI2_SGE_CHAIN32, *PTR_MPI2_SGE_CHAIN32, + Mpi2SGEChain32_t, *pMpi2SGEChain32_t; + +typedef struct _MPI2_SGE_CHAIN64 { + U16 Length; + U8 NextChainOffset; + U8 Flags; + U64 Address; +} MPI2_SGE_CHAIN64, *PTR_MPI2_SGE_CHAIN64, + Mpi2SGEChain64_t, *pMpi2SGEChain64_t; + +typedef struct _MPI2_SGE_CHAIN_UNION { + U16 Length; + U8 NextChainOffset; + U8 Flags; + union { + U32 Address32; + U64 Address64; + } u; +} MPI2_SGE_CHAIN_UNION, + *PTR_MPI2_SGE_CHAIN_UNION, + Mpi2SGEChainUnion_t, + *pMpi2SGEChainUnion_t; + +/**************************************************************************** +* MPI Transaction Context Element structures - for MPI v2.0 products only +****************************************************************************/ + +typedef struct _MPI2_SGE_TRANSACTION32 { + U8 Reserved; + U8 ContextSize; + U8 DetailsLength; + U8 Flags; + U32 TransactionContext[1]; + U32 TransactionDetails[1]; +} MPI2_SGE_TRANSACTION32, + *PTR_MPI2_SGE_TRANSACTION32, + Mpi2SGETransaction32_t, + *pMpi2SGETransaction32_t; + +typedef struct _MPI2_SGE_TRANSACTION64 { + U8 Reserved; + U8 ContextSize; + U8 DetailsLength; + U8 Flags; + U32 TransactionContext[2]; + U32 TransactionDetails[1]; +} MPI2_SGE_TRANSACTION64, + *PTR_MPI2_SGE_TRANSACTION64, + Mpi2SGETransaction64_t, + *pMpi2SGETransaction64_t; + +typedef struct _MPI2_SGE_TRANSACTION96 { + U8 Reserved; + U8 ContextSize; + U8 DetailsLength; + U8 Flags; + U32 TransactionContext[3]; + U32 TransactionDetails[1]; +} MPI2_SGE_TRANSACTION96, *PTR_MPI2_SGE_TRANSACTION96, + Mpi2SGETransaction96_t, *pMpi2SGETransaction96_t; + +typedef struct _MPI2_SGE_TRANSACTION128 { + U8 Reserved; + U8 ContextSize; + U8 DetailsLength; + U8 Flags; + U32 TransactionContext[4]; + U32 TransactionDetails[1]; +} MPI2_SGE_TRANSACTION128, *PTR_MPI2_SGE_TRANSACTION128, + Mpi2SGETransaction_t128, *pMpi2SGETransaction_t128; + +typedef struct _MPI2_SGE_TRANSACTION_UNION { + U8 Reserved; + U8 ContextSize; + U8 DetailsLength; + U8 Flags; + union { + U32 TransactionContext32[1]; + U32 TransactionContext64[2]; + U32 TransactionContext96[3]; + U32 TransactionContext128[4]; + } u; + U32 TransactionDetails[1]; +} MPI2_SGE_TRANSACTION_UNION, + *PTR_MPI2_SGE_TRANSACTION_UNION, + Mpi2SGETransactionUnion_t, + *pMpi2SGETransactionUnion_t; + +/**************************************************************************** +* MPI SGE union for IO SGL's - for MPI v2.0 products only +****************************************************************************/ + +typedef struct _MPI2_MPI_SGE_IO_UNION { + union { + MPI2_SGE_SIMPLE_UNION Simple; + MPI2_SGE_CHAIN_UNION Chain; + } u; +} MPI2_MPI_SGE_IO_UNION, *PTR_MPI2_MPI_SGE_IO_UNION, + Mpi2MpiSGEIOUnion_t, *pMpi2MpiSGEIOUnion_t; + +/**************************************************************************** +* MPI SGE union for SGL's with Simple and Transaction elements - for MPI v2.0 products only +****************************************************************************/ + +typedef struct _MPI2_SGE_TRANS_SIMPLE_UNION { + union { + MPI2_SGE_SIMPLE_UNION Simple; + MPI2_SGE_TRANSACTION_UNION Transaction; + } u; +} MPI2_SGE_TRANS_SIMPLE_UNION, + *PTR_MPI2_SGE_TRANS_SIMPLE_UNION, + Mpi2SGETransSimpleUnion_t, + *pMpi2SGETransSimpleUnion_t; + +/**************************************************************************** +* All MPI SGE types union +****************************************************************************/ + +typedef struct _MPI2_MPI_SGE_UNION { + union { + MPI2_SGE_SIMPLE_UNION Simple; + MPI2_SGE_CHAIN_UNION Chain; + MPI2_SGE_TRANSACTION_UNION Transaction; + } u; +} MPI2_MPI_SGE_UNION, *PTR_MPI2_MPI_SGE_UNION, + Mpi2MpiSgeUnion_t, *pMpi2MpiSgeUnion_t; + +/**************************************************************************** +* MPI SGE field definition and masks +****************************************************************************/ + +/*Flags field bit definitions */ + +#define MPI2_SGE_FLAGS_LAST_ELEMENT (0x80) +#define MPI2_SGE_FLAGS_END_OF_BUFFER (0x40) +#define MPI2_SGE_FLAGS_ELEMENT_TYPE_MASK (0x30) +#define MPI2_SGE_FLAGS_LOCAL_ADDRESS (0x08) +#define MPI2_SGE_FLAGS_DIRECTION (0x04) +#define MPI2_SGE_FLAGS_ADDRESS_SIZE (0x02) +#define MPI2_SGE_FLAGS_END_OF_LIST (0x01) + +#define MPI2_SGE_FLAGS_SHIFT (24) + +#define MPI2_SGE_LENGTH_MASK (0x00FFFFFF) +#define MPI2_SGE_CHAIN_LENGTH_MASK (0x0000FFFF) + +/*Element Type */ + +#define MPI2_SGE_FLAGS_TRANSACTION_ELEMENT (0x00) +#define MPI2_SGE_FLAGS_SIMPLE_ELEMENT (0x10) +#define MPI2_SGE_FLAGS_CHAIN_ELEMENT (0x30) +#define MPI2_SGE_FLAGS_ELEMENT_MASK (0x30) + +/*Address location */ + +#define MPI2_SGE_FLAGS_SYSTEM_ADDRESS (0x00) + +/*Direction */ + +#define MPI2_SGE_FLAGS_IOC_TO_HOST (0x00) +#define MPI2_SGE_FLAGS_HOST_TO_IOC (0x04) + +#define MPI2_SGE_FLAGS_DEST (MPI2_SGE_FLAGS_IOC_TO_HOST) +#define MPI2_SGE_FLAGS_SOURCE (MPI2_SGE_FLAGS_HOST_TO_IOC) + +/*Address Size */ + +#define MPI2_SGE_FLAGS_32_BIT_ADDRESSING (0x00) +#define MPI2_SGE_FLAGS_64_BIT_ADDRESSING (0x02) + +/*Context Size */ + +#define MPI2_SGE_FLAGS_32_BIT_CONTEXT (0x00) +#define MPI2_SGE_FLAGS_64_BIT_CONTEXT (0x02) +#define MPI2_SGE_FLAGS_96_BIT_CONTEXT (0x04) +#define MPI2_SGE_FLAGS_128_BIT_CONTEXT (0x06) + +#define MPI2_SGE_CHAIN_OFFSET_MASK (0x00FF0000) +#define MPI2_SGE_CHAIN_OFFSET_SHIFT (16) + +/**************************************************************************** +* MPI SGE operation Macros +****************************************************************************/ + +/*SIMPLE FlagsLength manipulations... */ +#define MPI2_SGE_SET_FLAGS(f) ((U32)(f) << MPI2_SGE_FLAGS_SHIFT) +#define MPI2_SGE_GET_FLAGS(f) (((f) & ~MPI2_SGE_LENGTH_MASK) >> \ + MPI2_SGE_FLAGS_SHIFT) +#define MPI2_SGE_LENGTH(f) ((f) & MPI2_SGE_LENGTH_MASK) +#define MPI2_SGE_CHAIN_LENGTH(f) ((f) & MPI2_SGE_CHAIN_LENGTH_MASK) + +#define MPI2_SGE_SET_FLAGS_LENGTH(f, l) (MPI2_SGE_SET_FLAGS(f) | \ + MPI2_SGE_LENGTH(l)) + +#define MPI2_pSGE_GET_FLAGS(psg) MPI2_SGE_GET_FLAGS((psg)->FlagsLength) +#define MPI2_pSGE_GET_LENGTH(psg) MPI2_SGE_LENGTH((psg)->FlagsLength) +#define MPI2_pSGE_SET_FLAGS_LENGTH(psg, f, l) ((psg)->FlagsLength = \ + MPI2_SGE_SET_FLAGS_LENGTH(f, l)) + +/*CAUTION - The following are READ-MODIFY-WRITE! */ +#define MPI2_pSGE_SET_FLAGS(psg, f) ((psg)->FlagsLength |= \ + MPI2_SGE_SET_FLAGS(f)) +#define MPI2_pSGE_SET_LENGTH(psg, l) ((psg)->FlagsLength |= \ + MPI2_SGE_LENGTH(l)) + +#define MPI2_GET_CHAIN_OFFSET(x) ((x & MPI2_SGE_CHAIN_OFFSET_MASK) >> \ + MPI2_SGE_CHAIN_OFFSET_SHIFT) + +/***************************************************************************** +* +* Fusion-MPT IEEE Scatter Gather Elements +* +*****************************************************************************/ + +/**************************************************************************** +* IEEE Simple Element structures +****************************************************************************/ + +/*MPI2_IEEE_SGE_SIMPLE32 is for MPI v2.0 products only */ +typedef struct _MPI2_IEEE_SGE_SIMPLE32 { + U32 Address; + U32 FlagsLength; +} MPI2_IEEE_SGE_SIMPLE32, *PTR_MPI2_IEEE_SGE_SIMPLE32, + Mpi2IeeeSgeSimple32_t, *pMpi2IeeeSgeSimple32_t; + +typedef struct _MPI2_IEEE_SGE_SIMPLE64 { + U64 Address; + U32 Length; + U16 Reserved1; + U8 Reserved2; + U8 Flags; +} MPI2_IEEE_SGE_SIMPLE64, *PTR_MPI2_IEEE_SGE_SIMPLE64, + Mpi2IeeeSgeSimple64_t, *pMpi2IeeeSgeSimple64_t; + +typedef union _MPI2_IEEE_SGE_SIMPLE_UNION { + MPI2_IEEE_SGE_SIMPLE32 Simple32; + MPI2_IEEE_SGE_SIMPLE64 Simple64; +} MPI2_IEEE_SGE_SIMPLE_UNION, + *PTR_MPI2_IEEE_SGE_SIMPLE_UNION, + Mpi2IeeeSgeSimpleUnion_t, + *pMpi2IeeeSgeSimpleUnion_t; + +/**************************************************************************** +* IEEE Chain Element structures +****************************************************************************/ + +/*MPI2_IEEE_SGE_CHAIN32 is for MPI v2.0 products only */ +typedef MPI2_IEEE_SGE_SIMPLE32 MPI2_IEEE_SGE_CHAIN32; + +/*MPI2_IEEE_SGE_CHAIN64 is for MPI v2.0 products only */ +typedef MPI2_IEEE_SGE_SIMPLE64 MPI2_IEEE_SGE_CHAIN64; + +typedef union _MPI2_IEEE_SGE_CHAIN_UNION { + MPI2_IEEE_SGE_CHAIN32 Chain32; + MPI2_IEEE_SGE_CHAIN64 Chain64; +} MPI2_IEEE_SGE_CHAIN_UNION, + *PTR_MPI2_IEEE_SGE_CHAIN_UNION, + Mpi2IeeeSgeChainUnion_t, + *pMpi2IeeeSgeChainUnion_t; + +/*MPI25_IEEE_SGE_CHAIN64 is for MPI v2.5 and later */ +typedef struct _MPI25_IEEE_SGE_CHAIN64 { + U64 Address; + U32 Length; + U16 Reserved1; + U8 NextChainOffset; + U8 Flags; +} MPI25_IEEE_SGE_CHAIN64, + *PTR_MPI25_IEEE_SGE_CHAIN64, + Mpi25IeeeSgeChain64_t, + *pMpi25IeeeSgeChain64_t; + +/**************************************************************************** +* All IEEE SGE types union +****************************************************************************/ + +/*MPI2_IEEE_SGE_UNION is for MPI v2.0 products only */ +typedef struct _MPI2_IEEE_SGE_UNION { + union { + MPI2_IEEE_SGE_SIMPLE_UNION Simple; + MPI2_IEEE_SGE_CHAIN_UNION Chain; + } u; +} MPI2_IEEE_SGE_UNION, *PTR_MPI2_IEEE_SGE_UNION, + Mpi2IeeeSgeUnion_t, *pMpi2IeeeSgeUnion_t; + +/**************************************************************************** +* IEEE SGE union for IO SGL's +****************************************************************************/ + +typedef union _MPI25_SGE_IO_UNION { + MPI2_IEEE_SGE_SIMPLE64 IeeeSimple; + MPI25_IEEE_SGE_CHAIN64 IeeeChain; +} MPI25_SGE_IO_UNION, *PTR_MPI25_SGE_IO_UNION, + Mpi25SGEIOUnion_t, *pMpi25SGEIOUnion_t; + +/**************************************************************************** +* IEEE SGE field definitions and masks +****************************************************************************/ + +/*Flags field bit definitions */ + +#define MPI2_IEEE_SGE_FLAGS_ELEMENT_TYPE_MASK (0x80) +#define MPI25_IEEE_SGE_FLAGS_END_OF_LIST (0x40) + +#define MPI2_IEEE32_SGE_FLAGS_SHIFT (24) + +#define MPI2_IEEE32_SGE_LENGTH_MASK (0x00FFFFFF) + +/*Element Type */ + +#define MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT (0x00) +#define MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80) + +/*Next Segment Format */ + +#define MPI26_IEEE_SGE_FLAGS_NSF_MASK (0x1C) +#define MPI26_IEEE_SGE_FLAGS_NSF_MPI_IEEE (0x00) +#define MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP (0x08) +#define MPI26_IEEE_SGE_FLAGS_NSF_NVME_SGL (0x10) + +/*Data Location Address Space */ + +#define MPI2_IEEE_SGE_FLAGS_ADDR_MASK (0x03) +#define MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00) +#define MPI2_IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01) +#define MPI2_IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02) +#define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03) +#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR (0x03) +#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR \ + (MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR) +#define MPI26_IEEE_SGE_FLAGS_IOCCTL_ADDR (0x02) + +/**************************************************************************** +* IEEE SGE operation Macros +****************************************************************************/ + +/*SIMPLE FlagsLength manipulations... */ +#define MPI2_IEEE32_SGE_SET_FLAGS(f) ((U32)(f) << MPI2_IEEE32_SGE_FLAGS_SHIFT) +#define MPI2_IEEE32_SGE_GET_FLAGS(f) (((f) & ~MPI2_IEEE32_SGE_LENGTH_MASK) \ + >> MPI2_IEEE32_SGE_FLAGS_SHIFT) +#define MPI2_IEEE32_SGE_LENGTH(f) ((f) & MPI2_IEEE32_SGE_LENGTH_MASK) + +#define MPI2_IEEE32_SGE_SET_FLAGS_LENGTH(f, l) (MPI2_IEEE32_SGE_SET_FLAGS(f) |\ + MPI2_IEEE32_SGE_LENGTH(l)) + +#define MPI2_IEEE32_pSGE_GET_FLAGS(psg) \ + MPI2_IEEE32_SGE_GET_FLAGS((psg)->FlagsLength) +#define MPI2_IEEE32_pSGE_GET_LENGTH(psg) \ + MPI2_IEEE32_SGE_LENGTH((psg)->FlagsLength) +#define MPI2_IEEE32_pSGE_SET_FLAGS_LENGTH(psg, f, l) ((psg)->FlagsLength = \ + MPI2_IEEE32_SGE_SET_FLAGS_LENGTH(f, l)) + +/*CAUTION - The following are READ-MODIFY-WRITE! */ +#define MPI2_IEEE32_pSGE_SET_FLAGS(psg, f) ((psg)->FlagsLength |= \ + MPI2_IEEE32_SGE_SET_FLAGS(f)) +#define MPI2_IEEE32_pSGE_SET_LENGTH(psg, l) ((psg)->FlagsLength |= \ + MPI2_IEEE32_SGE_LENGTH(l)) + +/***************************************************************************** +* +* Fusion-MPT MPI/IEEE Scatter Gather Unions +* +*****************************************************************************/ + +typedef union _MPI2_SIMPLE_SGE_UNION { + MPI2_SGE_SIMPLE_UNION MpiSimple; + MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple; +} MPI2_SIMPLE_SGE_UNION, *PTR_MPI2_SIMPLE_SGE_UNION, + Mpi2SimpleSgeUntion_t, *pMpi2SimpleSgeUntion_t; + +typedef union _MPI2_SGE_IO_UNION { + MPI2_SGE_SIMPLE_UNION MpiSimple; + MPI2_SGE_CHAIN_UNION MpiChain; + MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple; + MPI2_IEEE_SGE_CHAIN_UNION IeeeChain; +} MPI2_SGE_IO_UNION, *PTR_MPI2_SGE_IO_UNION, + Mpi2SGEIOUnion_t, *pMpi2SGEIOUnion_t; + +/**************************************************************************** +* +* Values for SGLFlags field, used in many request messages with an SGL +* +****************************************************************************/ + +/*values for MPI SGL Data Location Address Space subfield */ +#define MPI2_SGLFLAGS_ADDRESS_SPACE_MASK (0x0C) +#define MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE (0x00) +#define MPI2_SGLFLAGS_IOCDDR_ADDRESS_SPACE (0x04) +#define MPI2_SGLFLAGS_IOCPLB_ADDRESS_SPACE (0x08) +#define MPI26_SGLFLAGS_IOCPLB_ADDRESS_SPACE (0x08) +#define MPI2_SGLFLAGS_IOCPLBNTA_ADDRESS_SPACE (0x0C) +/*values for SGL Type subfield */ +#define MPI2_SGLFLAGS_SGL_TYPE_MASK (0x03) +#define MPI2_SGLFLAGS_SGL_TYPE_MPI (0x00) +#define MPI2_SGLFLAGS_SGL_TYPE_IEEE32 (0x01) +#define MPI2_SGLFLAGS_SGL_TYPE_IEEE64 (0x02) + +#endif diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h new file mode 100644 index 000000000..4d0be5ab9 --- /dev/null +++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h @@ -0,0 +1,4093 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * + * + * Name: mpi2_cnfg.h + * Title: MPI Configuration messages and pages + * Creation Date: November 10, 2006 + * + * mpi2_cnfg.h Version: 02.00.47 + * + * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 + * prefix are for use only on MPI v2.5 products, and must not be used + * with MPI v2.0 products. Unless otherwise noted, names beginning with + * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 06-04-07 02.00.01 Added defines for SAS IO Unit Page 2 PhyFlags. + * Added Manufacturing Page 11. + * Added MPI2_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE + * define. + * 06-26-07 02.00.02 Adding generic structure for product-specific + * Manufacturing pages: MPI2_CONFIG_PAGE_MANUFACTURING_PS. + * Rework of BIOS Page 2 configuration page. + * Fixed MPI2_BIOSPAGE2_BOOT_DEVICE to be a union of the + * forms. + * Added configuration pages IOC Page 8 and Driver + * Persistent Mapping Page 0. + * 08-31-07 02.00.03 Modified configuration pages dealing with Integrated + * RAID (Manufacturing Page 4, RAID Volume Pages 0 and 1, + * RAID Physical Disk Pages 0 and 1, RAID Configuration + * Page 0). + * Added new value for AccessStatus field of SAS Device + * Page 0 (_SATA_NEEDS_INITIALIZATION). + * 10-31-07 02.00.04 Added missing SEPDevHandle field to + * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0. + * 12-18-07 02.00.05 Modified IO Unit Page 0 to use 32-bit version fields for + * NVDATA. + * Modified IOC Page 7 to use masks and added field for + * SASBroadcastPrimitiveMasks. + * Added MPI2_CONFIG_PAGE_BIOS_4. + * Added MPI2_CONFIG_PAGE_LOG_0. + * 02-29-08 02.00.06 Modified various names to make them 32-character unique. + * Added SAS Device IDs. + * Updated Integrated RAID configuration pages including + * Manufacturing Page 4, IOC Page 6, and RAID Configuration + * Page 0. + * 05-21-08 02.00.07 Added define MPI2_MANPAGE4_MIX_SSD_SAS_SATA. + * Added define MPI2_MANPAGE4_PHYSDISK_128MB_COERCION. + * Fixed define MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING. + * Added missing MaxNumRoutedSasAddresses field to + * MPI2_CONFIG_PAGE_EXPANDER_0. + * Added SAS Port Page 0. + * Modified structure layout for + * MPI2_CONFIG_PAGE_DRIVER_MAPPING_0. + * 06-27-08 02.00.08 Changed MPI2_CONFIG_PAGE_RD_PDISK_1 to use + * MPI2_RAID_PHYS_DISK1_PATH_MAX to size the array. + * 10-02-08 02.00.09 Changed MPI2_RAID_PGAD_CONFIGNUM_MASK from 0x0000FFFF + * to 0x000000FF. + * Added two new values for the Physical Disk Coercion Size + * bits in the Flags field of Manufacturing Page 4. + * Added product-specific Manufacturing pages 16 to 31. + * Modified Flags bits for controlling write cache on SATA + * drives in IO Unit Page 1. + * Added new bit to AdditionalControlFlags of SAS IO Unit + * Page 1 to control Invalid Topology Correction. + * Added additional defines for RAID Volume Page 0 + * VolumeStatusFlags field. + * Modified meaning of RAID Volume Page 0 VolumeSettings + * define for auto-configure of hot-swap drives. + * Added SupportedPhysDisks field to RAID Volume Page 1 and + * added related defines. + * Added PhysDiskAttributes field (and related defines) to + * RAID Physical Disk Page 0. + * Added MPI2_SAS_PHYINFO_PHY_VACANT define. + * Added three new DiscoveryStatus bits for SAS IO Unit + * Page 0 and SAS Expander Page 0. + * Removed multiplexing information from SAS IO Unit pages. + * Added BootDeviceWaitTime field to SAS IO Unit Page 4. + * Removed Zone Address Resolved bit from PhyInfo and from + * Expander Page 0 Flags field. + * Added two new AccessStatus values to SAS Device Page 0 + * for indicating routing problems. Added 3 reserved words + * to this page. + * 01-19-09 02.00.10 Fixed defines for GPIOVal field of IO Unit Page 3. + * Inserted missing reserved field into structure for IOC + * Page 6. + * Added more pending task bits to RAID Volume Page 0 + * VolumeStatusFlags defines. + * Added MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED define. + * Added a new DiscoveryStatus bit for SAS IO Unit Page 0 + * and SAS Expander Page 0 to flag a downstream initiator + * when in simplified routing mode. + * Removed SATA Init Failure defines for DiscoveryStatus + * fields of SAS IO Unit Page 0 and SAS Expander Page 0. + * Added MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED define. + * Added PortGroups, DmaGroup, and ControlGroup fields to + * SAS Device Page 0. + * 05-06-09 02.00.11 Added structures and defines for IO Unit Page 5 and IO + * Unit Page 6. + * Added expander reduced functionality data to SAS + * Expander Page 0. + * Added SAS PHY Page 2 and SAS PHY Page 3. + * 07-30-09 02.00.12 Added IO Unit Page 7. + * Added new device ids. + * Added SAS IO Unit Page 5. + * Added partial and slumber power management capable flags + * to SAS Device Page 0 Flags field. + * Added PhyInfo defines for power condition. + * Added Ethernet configuration pages. + * 10-28-09 02.00.13 Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY. + * Added SAS PHY Page 4 structure and defines. + * 02-10-10 02.00.14 Modified the comments for the configuration page + * structures that contain an array of data. The host + * should use the "count" field in the page data (e.g. the + * NumPhys field) to determine the number of valid elements + * in the array. + * Added/modified some MPI2_MFGPAGE_DEVID_SAS defines. + * Added PowerManagementCapabilities to IO Unit Page 7. + * Added PortWidthModGroup field to + * MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS. + * Added MPI2_CONFIG_PAGE_SASIOUNIT_6 and related defines. + * Added MPI2_CONFIG_PAGE_SASIOUNIT_7 and related defines. + * Added MPI2_CONFIG_PAGE_SASIOUNIT_8 and related defines. + * 05-12-10 02.00.15 Added MPI2_RAIDVOL0_STATUS_FLAG_VOL_NOT_CONSISTENT + * define. + * Added MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE define. + * Added MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY define. + * 08-11-10 02.00.16 Removed IO Unit Page 1 device path (multi-pathing) + * defines. + * 11-10-10 02.00.17 Added ReceptacleID field (replacing Reserved1) to + * MPI2_MANPAGE7_CONNECTOR_INFO and reworked defines for + * the Pinout field. + * Added BoardTemperature and BoardTemperatureUnits fields + * to MPI2_CONFIG_PAGE_IO_UNIT_7. + * Added MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING define + * and MPI2_CONFIG_PAGE_EXT_MAN_PS structure. + * 02-23-11 02.00.18 Added ProxyVF_ID field to MPI2_CONFIG_REQUEST. + * Added IO Unit Page 8, IO Unit Page 9, + * and IO Unit Page 10. + * Added SASNotifyPrimitiveMasks field to + * MPI2_CONFIG_PAGE_IOC_7. + * 03-09-11 02.00.19 Fixed IO Unit Page 10 (to match the spec). + * 05-25-11 02.00.20 Cleaned up a few comments. + * 08-24-11 02.00.21 Marked the IO Unit Page 7 PowerManagementCapabilities + * for PCIe link as obsolete. + * Added SpinupFlags field containing a Disable Spin-up bit + * to the MPI2_SAS_IOUNIT4_SPINUP_GROUP fields of SAS IO + * Unit Page 4. + * 11-18-11 02.00.22 Added define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT. + * Added UEFIVersion field to BIOS Page 1 and defined new + * BiosOptions bits. + * Incorporating additions for MPI v2.5. + * 11-27-12 02.00.23 Added MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER. + * Added MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID. + * 12-20-12 02.00.24 Marked MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION as + * obsolete for MPI v2.5 and later. + * Added some defines for 12G SAS speeds. + * 04-09-13 02.00.25 Added MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK. + * Fixed MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS to + * match the specification. + * 08-19-13 02.00.26 Added reserved words to MPI2_CONFIG_PAGE_IO_UNIT_7 for + * future use. + * 12-05-13 02.00.27 Added MPI2_MANPAGE7_FLAG_BASE_ENCLOSURE_LEVEL for + * MPI2_CONFIG_PAGE_MAN_7. + * Added EnclosureLevel and ConnectorName fields to + * MPI2_CONFIG_PAGE_SAS_DEV_0. + * Added MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID for + * MPI2_CONFIG_PAGE_SAS_DEV_0. + * Added EnclosureLevel field to + * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0. + * Added MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID for + * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0. + * 01-08-14 02.00.28 Added more defines for the BiosOptions field of + * MPI2_CONFIG_PAGE_BIOS_1. + * 06-13-14 02.00.29 Added SSUTimeout field to MPI2_CONFIG_PAGE_BIOS_1, and + * more defines for the BiosOptions field. + * 11-18-14 02.00.30 Updated copyright information. + * Added MPI2_BIOSPAGE1_OPTIONS_ADVANCED_CONFIG. + * Added AdapterOrderAux fields to BIOS Page 3. + * 03-16-15 02.00.31 Updated for MPI v2.6. + * Added Flags field to IO Unit Page 7. + * Added new SAS Phy Event codes + * 05-25-15 02.00.33 Added more defines for the BiosOptions field of + * MPI2_CONFIG_PAGE_BIOS_1. + * 08-25-15 02.00.34 Bumped Header Version. + * 12-18-15 02.00.35 Added SATADeviceWaitTime to SAS IO Unit Page 4. + * 01-21-16 02.00.36 Added/modified MPI2_MFGPAGE_DEVID_SAS defines. + * Added Link field to PCIe Link Pages + * Added EnclosureLevel and ConnectorName to PCIe + * Device Page 0. + * Added define for PCIE IoUnit page 1 max rate shift. + * Added comment for reserved ExtPageTypes. + * Added SAS 4 22.5 gbs speed support. + * Added PCIe 4 16.0 GT/sec speec support. + * Removed AHCI support. + * Removed SOP support. + * Added NegotiatedLinkRate and NegotiatedPortWidth to + * PCIe device page 0. + * 04-10-16 02.00.37 Fixed MPI2_MFGPAGE_DEVID_SAS3616/3708 defines + * 07-01-16 02.00.38 Added Manufacturing page 7 Connector types. + * Changed declaration of ConnectorName in PCIe DevicePage0 + * to match SAS DevicePage 0. + * Added SATADeviceWaitTime to IO Unit Page 11. + * Added MPI26_MFGPAGE_DEVID_SAS4008 + * Added x16 PCIe width to IO Unit Page 7 + * Added LINKFLAGS to control SRIS in PCIe IO Unit page 1 + * phy data. + * Added InitStatus to PCIe IO Unit Page 1 header. + * 09-01-16 02.00.39 Added MPI26_CONFIG_PAGE_ENCLOSURE_0 and related defines. + * Added MPI26_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE and + * MPI26_ENCLOS_PGAD_FORM_HANDLE page address formats. + * 02-02-17 02.00.40 Added MPI2_MANPAGE7_SLOT_UNKNOWN. + * Added ChassisSlot field to SAS Enclosure Page 0. + * Added ChassisSlot Valid bit (bit 5) to the Flags field + * in SAS Enclosure Page 0. + * 06-13-17 02.00.41 Added MPI26_MFGPAGE_DEVID_SAS3816 and + * MPI26_MFGPAGE_DEVID_SAS3916 defines. + * Removed MPI26_MFGPAGE_DEVID_SAS4008 define. + * Added MPI26_PCIEIOUNIT1_LINKFLAGS_SRNS_EN define. + * Renamed PI26_PCIEIOUNIT1_LINKFLAGS_EN_SRIS to + * PI26_PCIEIOUNIT1_LINKFLAGS_SRIS_EN. + * Renamed MPI26_PCIEIOUNIT1_LINKFLAGS_DIS_SRIS to + * MPI26_PCIEIOUNIT1_LINKFLAGS_DIS_SEPARATE_REFCLK. + * 09-29-17 02.00.42 Added ControllerResetTO field to PCIe Device Page 2. + * Added NOIOB field to PCIe Device Page 2. + * Added MPI26_PCIEDEV2_CAP_DATA_BLK_ALIGN_AND_GRAN to + * the Capabilities field of PCIe Device Page 2. + * 07-22-18 02.00.43 Added defines for SAS3916 and SAS3816. + * Added WRiteCache defines to IO Unit Page 1. + * Added MaxEnclosureLevel to BIOS Page 1. + * Added OEMRD to SAS Enclosure Page 1. + * Added DMDReportPCIe to PCIe IO Unit Page 1. + * Added Flags field and flags for Retimers to + * PCIe Switch Page 1. + * 08-02-18 02.00.44 Added Slotx2, Slotx4 to ManPage 7. + * 08-15-18 02.00.45 Added ProductSpecific field at end of IOC Page 1 + * 08-28-18 02.00.46 Added NVMs Write Cache flag to IOUnitPage1 + * Added DMDReport Delay Time defines to + * PCIeIOUnitPage1 + * -------------------------------------------------------------------------- + * 08-02-18 02.00.44 Added Slotx2, Slotx4 to ManPage 7. + * 08-15-18 02.00.45 Added ProductSpecific field at end of IOC Page 1 + * 08-28-18 02.00.46 Added NVMs Write Cache flag to IOUnitPage1 + * Added DMDReport Delay Time defines to PCIeIOUnitPage1 + * 12-17-18 02.00.47 Swap locations of Slotx2 and Slotx4 in ManPage 7. + * 08-01-19 02.00.49 Add MPI26_MANPAGE7_FLAG_X2_X4_SLOT_INFO_VALID + * Add MPI26_IOUNITPAGE1_NVME_WRCACHE_SHIFT + */ + +#ifndef MPI2_CNFG_H +#define MPI2_CNFG_H + +/***************************************************************************** +* Configuration Page Header and defines +*****************************************************************************/ + +/*Config Page Header */ +typedef struct _MPI2_CONFIG_PAGE_HEADER { + U8 PageVersion; /*0x00 */ + U8 PageLength; /*0x01 */ + U8 PageNumber; /*0x02 */ + U8 PageType; /*0x03 */ +} MPI2_CONFIG_PAGE_HEADER, *PTR_MPI2_CONFIG_PAGE_HEADER, + Mpi2ConfigPageHeader_t, *pMpi2ConfigPageHeader_t; + +typedef union _MPI2_CONFIG_PAGE_HEADER_UNION { + MPI2_CONFIG_PAGE_HEADER Struct; + U8 Bytes[4]; + U16 Word16[2]; + U32 Word32; +} MPI2_CONFIG_PAGE_HEADER_UNION, *PTR_MPI2_CONFIG_PAGE_HEADER_UNION, + Mpi2ConfigPageHeaderUnion, *pMpi2ConfigPageHeaderUnion; + +/*Extended Config Page Header */ +typedef struct _MPI2_CONFIG_EXTENDED_PAGE_HEADER { + U8 PageVersion; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 PageNumber; /*0x02 */ + U8 PageType; /*0x03 */ + U16 ExtPageLength; /*0x04 */ + U8 ExtPageType; /*0x06 */ + U8 Reserved2; /*0x07 */ +} MPI2_CONFIG_EXTENDED_PAGE_HEADER, + *PTR_MPI2_CONFIG_EXTENDED_PAGE_HEADER, + Mpi2ConfigExtendedPageHeader_t, + *pMpi2ConfigExtendedPageHeader_t; + +typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION { + MPI2_CONFIG_PAGE_HEADER Struct; + MPI2_CONFIG_EXTENDED_PAGE_HEADER Ext; + U8 Bytes[8]; + U16 Word16[4]; + U32 Word32[2]; +} MPI2_CONFIG_EXT_PAGE_HEADER_UNION, + *PTR_MPI2_CONFIG_EXT_PAGE_HEADER_UNION, + Mpi2ConfigPageExtendedHeaderUnion, + *pMpi2ConfigPageExtendedHeaderUnion; + + +/*PageType field values */ +#define MPI2_CONFIG_PAGEATTR_READ_ONLY (0x00) +#define MPI2_CONFIG_PAGEATTR_CHANGEABLE (0x10) +#define MPI2_CONFIG_PAGEATTR_PERSISTENT (0x20) +#define MPI2_CONFIG_PAGEATTR_MASK (0xF0) + +#define MPI2_CONFIG_PAGETYPE_IO_UNIT (0x00) +#define MPI2_CONFIG_PAGETYPE_IOC (0x01) +#define MPI2_CONFIG_PAGETYPE_BIOS (0x02) +#define MPI2_CONFIG_PAGETYPE_RAID_VOLUME (0x08) +#define MPI2_CONFIG_PAGETYPE_MANUFACTURING (0x09) +#define MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK (0x0A) +#define MPI2_CONFIG_PAGETYPE_EXTENDED (0x0F) +#define MPI2_CONFIG_PAGETYPE_MASK (0x0F) + +#define MPI2_CONFIG_TYPENUM_MASK (0x0FFF) + + +/*ExtPageType field values */ +#define MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT (0x10) +#define MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER (0x11) +#define MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE (0x12) +#define MPI2_CONFIG_EXTPAGETYPE_SAS_PHY (0x13) +#define MPI2_CONFIG_EXTPAGETYPE_LOG (0x14) +#define MPI2_CONFIG_EXTPAGETYPE_ENCLOSURE (0x15) +#define MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG (0x16) +#define MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING (0x17) +#define MPI2_CONFIG_EXTPAGETYPE_SAS_PORT (0x18) +#define MPI2_CONFIG_EXTPAGETYPE_ETHERNET (0x19) +#define MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING (0x1A) +#define MPI2_CONFIG_EXTPAGETYPE_PCIE_IO_UNIT (0x1B) +#define MPI2_CONFIG_EXTPAGETYPE_PCIE_SWITCH (0x1C) +#define MPI2_CONFIG_EXTPAGETYPE_PCIE_DEVICE (0x1D) +#define MPI2_CONFIG_EXTPAGETYPE_PCIE_LINK (0x1E) + + +/***************************************************************************** +* PageAddress defines +*****************************************************************************/ + +/*RAID Volume PageAddress format */ +#define MPI2_RAID_VOLUME_PGAD_FORM_MASK (0xF0000000) +#define MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) +#define MPI2_RAID_VOLUME_PGAD_FORM_HANDLE (0x10000000) + +#define MPI2_RAID_VOLUME_PGAD_HANDLE_MASK (0x0000FFFF) + + +/*RAID Physical Disk PageAddress format */ +#define MPI2_PHYSDISK_PGAD_FORM_MASK (0xF0000000) +#define MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM (0x00000000) +#define MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM (0x10000000) +#define MPI2_PHYSDISK_PGAD_FORM_DEVHANDLE (0x20000000) + +#define MPI2_PHYSDISK_PGAD_PHYSDISKNUM_MASK (0x000000FF) +#define MPI2_PHYSDISK_PGAD_DEVHANDLE_MASK (0x0000FFFF) + + +/*SAS Expander PageAddress format */ +#define MPI2_SAS_EXPAND_PGAD_FORM_MASK (0xF0000000) +#define MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL (0x00000000) +#define MPI2_SAS_EXPAND_PGAD_FORM_HNDL_PHY_NUM (0x10000000) +#define MPI2_SAS_EXPAND_PGAD_FORM_HNDL (0x20000000) + +#define MPI2_SAS_EXPAND_PGAD_HANDLE_MASK (0x0000FFFF) +#define MPI2_SAS_EXPAND_PGAD_PHYNUM_MASK (0x00FF0000) +#define MPI2_SAS_EXPAND_PGAD_PHYNUM_SHIFT (16) + + +/*SAS Device PageAddress format */ +#define MPI2_SAS_DEVICE_PGAD_FORM_MASK (0xF0000000) +#define MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) +#define MPI2_SAS_DEVICE_PGAD_FORM_HANDLE (0x20000000) + +#define MPI2_SAS_DEVICE_PGAD_HANDLE_MASK (0x0000FFFF) + + +/*SAS PHY PageAddress format */ +#define MPI2_SAS_PHY_PGAD_FORM_MASK (0xF0000000) +#define MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER (0x00000000) +#define MPI2_SAS_PHY_PGAD_FORM_PHY_TBL_INDEX (0x10000000) + +#define MPI2_SAS_PHY_PGAD_PHY_NUMBER_MASK (0x000000FF) +#define MPI2_SAS_PHY_PGAD_PHY_TBL_INDEX_MASK (0x0000FFFF) + + +/*SAS Port PageAddress format */ +#define MPI2_SASPORT_PGAD_FORM_MASK (0xF0000000) +#define MPI2_SASPORT_PGAD_FORM_GET_NEXT_PORT (0x00000000) +#define MPI2_SASPORT_PGAD_FORM_PORT_NUM (0x10000000) + +#define MPI2_SASPORT_PGAD_PORTNUMBER_MASK (0x00000FFF) + + +/*SAS Enclosure PageAddress format */ +#define MPI2_SAS_ENCLOS_PGAD_FORM_MASK (0xF0000000) +#define MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) +#define MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE (0x10000000) + +#define MPI2_SAS_ENCLOS_PGAD_HANDLE_MASK (0x0000FFFF) + +/*Enclosure PageAddress format */ +#define MPI26_ENCLOS_PGAD_FORM_MASK (0xF0000000) +#define MPI26_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) +#define MPI26_ENCLOS_PGAD_FORM_HANDLE (0x10000000) + +#define MPI26_ENCLOS_PGAD_HANDLE_MASK (0x0000FFFF) + +/*RAID Configuration PageAddress format */ +#define MPI2_RAID_PGAD_FORM_MASK (0xF0000000) +#define MPI2_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM (0x00000000) +#define MPI2_RAID_PGAD_FORM_CONFIGNUM (0x10000000) +#define MPI2_RAID_PGAD_FORM_ACTIVE_CONFIG (0x20000000) + +#define MPI2_RAID_PGAD_CONFIGNUM_MASK (0x000000FF) + + +/*Driver Persistent Mapping PageAddress format */ +#define MPI2_DPM_PGAD_FORM_MASK (0xF0000000) +#define MPI2_DPM_PGAD_FORM_ENTRY_RANGE (0x00000000) + +#define MPI2_DPM_PGAD_ENTRY_COUNT_MASK (0x0FFF0000) +#define MPI2_DPM_PGAD_ENTRY_COUNT_SHIFT (16) +#define MPI2_DPM_PGAD_START_ENTRY_MASK (0x0000FFFF) + + +/*Ethernet PageAddress format */ +#define MPI2_ETHERNET_PGAD_FORM_MASK (0xF0000000) +#define MPI2_ETHERNET_PGAD_FORM_IF_NUM (0x00000000) + +#define MPI2_ETHERNET_PGAD_IF_NUMBER_MASK (0x000000FF) + + +/*PCIe Switch PageAddress format */ +#define MPI26_PCIE_SWITCH_PGAD_FORM_MASK (0xF0000000) +#define MPI26_PCIE_SWITCH_PGAD_FORM_GET_NEXT_HNDL (0x00000000) +#define MPI26_PCIE_SWITCH_PGAD_FORM_HNDL_PORTNUM (0x10000000) +#define MPI26_PCIE_SWITCH_EXPAND_PGAD_FORM_HNDL (0x20000000) + +#define MPI26_PCIE_SWITCH_PGAD_HANDLE_MASK (0x0000FFFF) +#define MPI26_PCIE_SWITCH_PGAD_PORTNUM_MASK (0x00FF0000) +#define MPI26_PCIE_SWITCH_PGAD_PORTNUM_SHIFT (16) + + +/*PCIe Device PageAddress format */ +#define MPI26_PCIE_DEVICE_PGAD_FORM_MASK (0xF0000000) +#define MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) +#define MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE (0x20000000) + +#define MPI26_PCIE_DEVICE_PGAD_HANDLE_MASK (0x0000FFFF) + +/*PCIe Link PageAddress format */ +#define MPI26_PCIE_LINK_PGAD_FORM_MASK (0xF0000000) +#define MPI26_PCIE_LINK_PGAD_FORM_GET_NEXT_LINK (0x00000000) +#define MPI26_PCIE_LINK_PGAD_FORM_LINK_NUM (0x10000000) + +#define MPI26_PCIE_DEVICE_PGAD_LINKNUM_MASK (0x000000FF) + + + +/**************************************************************************** +* Configuration messages +****************************************************************************/ + +/*Configuration Request Message */ +typedef struct _MPI2_CONFIG_REQUEST { + U8 Action; /*0x00 */ + U8 SGLFlags; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 ExtPageLength; /*0x04 */ + U8 ExtPageType; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved1; /*0x0A */ + U8 Reserved2; /*0x0C */ + U8 ProxyVF_ID; /*0x0D */ + U16 Reserved4; /*0x0E */ + U32 Reserved3; /*0x10 */ + MPI2_CONFIG_PAGE_HEADER Header; /*0x14 */ + U32 PageAddress; /*0x18 */ + MPI2_SGE_IO_UNION PageBufferSGE; /*0x1C */ +} MPI2_CONFIG_REQUEST, *PTR_MPI2_CONFIG_REQUEST, + Mpi2ConfigRequest_t, *pMpi2ConfigRequest_t; + +/*values for the Action field */ +#define MPI2_CONFIG_ACTION_PAGE_HEADER (0x00) +#define MPI2_CONFIG_ACTION_PAGE_READ_CURRENT (0x01) +#define MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT (0x02) +#define MPI2_CONFIG_ACTION_PAGE_DEFAULT (0x03) +#define MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM (0x04) +#define MPI2_CONFIG_ACTION_PAGE_READ_DEFAULT (0x05) +#define MPI2_CONFIG_ACTION_PAGE_READ_NVRAM (0x06) +#define MPI2_CONFIG_ACTION_PAGE_GET_CHANGEABLE (0x07) + +/*use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ + + +/*Config Reply Message */ +typedef struct _MPI2_CONFIG_REPLY { + U8 Action; /*0x00 */ + U8 SGLFlags; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 ExtPageLength; /*0x04 */ + U8 ExtPageType; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved1; /*0x0A */ + U16 Reserved2; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + MPI2_CONFIG_PAGE_HEADER Header; /*0x14 */ +} MPI2_CONFIG_REPLY, *PTR_MPI2_CONFIG_REPLY, + Mpi2ConfigReply_t, *pMpi2ConfigReply_t; + + + +/***************************************************************************** +* +* C o n f i g u r a t i o n P a g e s +* +*****************************************************************************/ + +/**************************************************************************** +* Manufacturing Config pages +****************************************************************************/ + +#define MPI2_MFGPAGE_VENDORID_LSI (0x1000) +#define MPI2_MFGPAGE_VENDORID_ATTO (0x117C) + +/*MPI v2.0 SAS products */ +#define MPI2_MFGPAGE_DEVID_SAS2004 (0x0070) +#define MPI2_MFGPAGE_DEVID_SAS2008 (0x0072) +#define MPI2_MFGPAGE_DEVID_SAS2108_1 (0x0074) +#define MPI2_MFGPAGE_DEVID_SAS2108_2 (0x0076) +#define MPI2_MFGPAGE_DEVID_SAS2108_3 (0x0077) +#define MPI2_MFGPAGE_DEVID_SAS2116_1 (0x0064) +#define MPI2_MFGPAGE_DEVID_SAS2116_2 (0x0065) + +#define MPI2_MFGPAGE_DEVID_SSS6200 (0x007E) + +#define MPI2_MFGPAGE_DEVID_SAS2208_1 (0x0080) +#define MPI2_MFGPAGE_DEVID_SAS2208_2 (0x0081) +#define MPI2_MFGPAGE_DEVID_SAS2208_3 (0x0082) +#define MPI2_MFGPAGE_DEVID_SAS2208_4 (0x0083) +#define MPI2_MFGPAGE_DEVID_SAS2208_5 (0x0084) +#define MPI2_MFGPAGE_DEVID_SAS2208_6 (0x0085) +#define MPI2_MFGPAGE_DEVID_SAS2308_1 (0x0086) +#define MPI2_MFGPAGE_DEVID_SAS2308_2 (0x0087) +#define MPI2_MFGPAGE_DEVID_SAS2308_3 (0x006E) +#define MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP (0x02B0) +#define MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1 (0x02B1) + +/*MPI v2.5 SAS products */ +#define MPI25_MFGPAGE_DEVID_SAS3004 (0x0096) +#define MPI25_MFGPAGE_DEVID_SAS3008 (0x0097) +#define MPI25_MFGPAGE_DEVID_SAS3108_1 (0x0090) +#define MPI25_MFGPAGE_DEVID_SAS3108_2 (0x0091) +#define MPI25_MFGPAGE_DEVID_SAS3108_5 (0x0094) +#define MPI25_MFGPAGE_DEVID_SAS3108_6 (0x0095) + +/* MPI v2.6 SAS Products */ +#define MPI26_MFGPAGE_DEVID_SAS3216 (0x00C9) +#define MPI26_MFGPAGE_DEVID_SAS3224 (0x00C4) +#define MPI26_MFGPAGE_DEVID_SAS3316_1 (0x00C5) +#define MPI26_MFGPAGE_DEVID_SAS3316_2 (0x00C6) +#define MPI26_MFGPAGE_DEVID_SAS3316_3 (0x00C7) +#define MPI26_MFGPAGE_DEVID_SAS3316_4 (0x00C8) +#define MPI26_MFGPAGE_DEVID_SAS3324_1 (0x00C0) +#define MPI26_MFGPAGE_DEVID_SAS3324_2 (0x00C1) +#define MPI26_MFGPAGE_DEVID_SAS3324_3 (0x00C2) +#define MPI26_MFGPAGE_DEVID_SAS3324_4 (0x00C3) + +#define MPI26_MFGPAGE_DEVID_SAS3516 (0x00AA) +#define MPI26_MFGPAGE_DEVID_SAS3516_1 (0x00AB) +#define MPI26_MFGPAGE_DEVID_SAS3416 (0x00AC) +#define MPI26_MFGPAGE_DEVID_SAS3508 (0x00AD) +#define MPI26_MFGPAGE_DEVID_SAS3508_1 (0x00AE) +#define MPI26_MFGPAGE_DEVID_SAS3408 (0x00AF) +#define MPI26_MFGPAGE_DEVID_SAS3716 (0x00D0) +#define MPI26_MFGPAGE_DEVID_SAS3616 (0x00D1) +#define MPI26_MFGPAGE_DEVID_SAS3708 (0x00D2) + +#define MPI26_MFGPAGE_DEVID_SEC_MASK_3916 (0x0003) +#define MPI26_MFGPAGE_DEVID_INVALID0_3916 (0x00E0) +#define MPI26_MFGPAGE_DEVID_CFG_SEC_3916 (0x00E1) +#define MPI26_MFGPAGE_DEVID_HARD_SEC_3916 (0x00E2) +#define MPI26_MFGPAGE_DEVID_INVALID1_3916 (0x00E3) + +#define MPI26_MFGPAGE_DEVID_SEC_MASK_3816 (0x0003) +#define MPI26_MFGPAGE_DEVID_INVALID0_3816 (0x00E4) +#define MPI26_MFGPAGE_DEVID_CFG_SEC_3816 (0x00E5) +#define MPI26_MFGPAGE_DEVID_HARD_SEC_3816 (0x00E6) +#define MPI26_MFGPAGE_DEVID_INVALID1_3816 (0x00E7) + + +/*Manufacturing Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_MAN_0 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U8 ChipName[16]; /*0x04 */ + U8 ChipRevision[8]; /*0x14 */ + U8 BoardName[16]; /*0x1C */ + U8 BoardAssembly[16]; /*0x2C */ + U8 BoardTracerNumber[16]; /*0x3C */ +} MPI2_CONFIG_PAGE_MAN_0, + *PTR_MPI2_CONFIG_PAGE_MAN_0, + Mpi2ManufacturingPage0_t, + *pMpi2ManufacturingPage0_t; + +#define MPI2_MANUFACTURING0_PAGEVERSION (0x00) + + +/*Manufacturing Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_MAN_1 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U8 VPD[256]; /*0x04 */ +} MPI2_CONFIG_PAGE_MAN_1, + *PTR_MPI2_CONFIG_PAGE_MAN_1, + Mpi2ManufacturingPage1_t, + *pMpi2ManufacturingPage1_t; + +#define MPI2_MANUFACTURING1_PAGEVERSION (0x00) + + +typedef struct _MPI2_CHIP_REVISION_ID { + U16 DeviceID; /*0x00 */ + U8 PCIRevisionID; /*0x02 */ + U8 Reserved; /*0x03 */ +} MPI2_CHIP_REVISION_ID, *PTR_MPI2_CHIP_REVISION_ID, + Mpi2ChipRevisionId_t, *pMpi2ChipRevisionId_t; + + +/*Manufacturing Page 2 */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check Header.PageLength at runtime. + */ +#ifndef MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS +#define MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_MAN_2 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + MPI2_CHIP_REVISION_ID ChipId; /*0x04 */ + U32 + HwSettings[MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS];/*0x08 */ +} MPI2_CONFIG_PAGE_MAN_2, + *PTR_MPI2_CONFIG_PAGE_MAN_2, + Mpi2ManufacturingPage2_t, + *pMpi2ManufacturingPage2_t; + +#define MPI2_MANUFACTURING2_PAGEVERSION (0x00) + + +/*Manufacturing Page 3 */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check Header.PageLength at runtime. + */ +#ifndef MPI2_MAN_PAGE_3_INFO_WORDS +#define MPI2_MAN_PAGE_3_INFO_WORDS (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_MAN_3 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + MPI2_CHIP_REVISION_ID ChipId; /*0x04 */ + U32 + Info[MPI2_MAN_PAGE_3_INFO_WORDS];/*0x08 */ +} MPI2_CONFIG_PAGE_MAN_3, + *PTR_MPI2_CONFIG_PAGE_MAN_3, + Mpi2ManufacturingPage3_t, + *pMpi2ManufacturingPage3_t; + +#define MPI2_MANUFACTURING3_PAGEVERSION (0x00) + + +/*Manufacturing Page 4 */ + +typedef struct _MPI2_MANPAGE4_PWR_SAVE_SETTINGS { + U8 PowerSaveFlags; /*0x00 */ + U8 InternalOperationsSleepTime; /*0x01 */ + U8 InternalOperationsRunTime; /*0x02 */ + U8 HostIdleTime; /*0x03 */ +} MPI2_MANPAGE4_PWR_SAVE_SETTINGS, + *PTR_MPI2_MANPAGE4_PWR_SAVE_SETTINGS, + Mpi2ManPage4PwrSaveSettings_t, + *pMpi2ManPage4PwrSaveSettings_t; + +/*defines for the PowerSaveFlags field */ +#define MPI2_MANPAGE4_MASK_POWERSAVE_MODE (0x03) +#define MPI2_MANPAGE4_POWERSAVE_MODE_DISABLED (0x00) +#define MPI2_MANPAGE4_CUSTOM_POWERSAVE_MODE (0x01) +#define MPI2_MANPAGE4_FULL_POWERSAVE_MODE (0x02) + +typedef struct _MPI2_CONFIG_PAGE_MAN_4 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x04 */ + U32 Flags; /*0x08 */ + U8 InquirySize; /*0x0C */ + U8 Reserved2; /*0x0D */ + U16 Reserved3; /*0x0E */ + U8 InquiryData[56]; /*0x10 */ + U32 RAID0VolumeSettings; /*0x48 */ + U32 RAID1EVolumeSettings; /*0x4C */ + U32 RAID1VolumeSettings; /*0x50 */ + U32 RAID10VolumeSettings; /*0x54 */ + U32 Reserved4; /*0x58 */ + U32 Reserved5; /*0x5C */ + MPI2_MANPAGE4_PWR_SAVE_SETTINGS PowerSaveSettings; /*0x60 */ + U8 MaxOCEDisks; /*0x64 */ + U8 ResyncRate; /*0x65 */ + U16 DataScrubDuration; /*0x66 */ + U8 MaxHotSpares; /*0x68 */ + U8 MaxPhysDisksPerVol; /*0x69 */ + U8 MaxPhysDisks; /*0x6A */ + U8 MaxVolumes; /*0x6B */ +} MPI2_CONFIG_PAGE_MAN_4, + *PTR_MPI2_CONFIG_PAGE_MAN_4, + Mpi2ManufacturingPage4_t, + *pMpi2ManufacturingPage4_t; + +#define MPI2_MANUFACTURING4_PAGEVERSION (0x0A) + +/*Manufacturing Page 4 Flags field */ +#define MPI2_MANPAGE4_METADATA_SIZE_MASK (0x00030000) +#define MPI2_MANPAGE4_METADATA_512MB (0x00000000) + +#define MPI2_MANPAGE4_MIX_SSD_SAS_SATA (0x00008000) +#define MPI2_MANPAGE4_MIX_SSD_AND_NON_SSD (0x00004000) +#define MPI2_MANPAGE4_HIDE_PHYSDISK_NON_IR (0x00002000) + +#define MPI2_MANPAGE4_MASK_PHYSDISK_COERCION (0x00001C00) +#define MPI2_MANPAGE4_PHYSDISK_COERCION_1GB (0x00000000) +#define MPI2_MANPAGE4_PHYSDISK_128MB_COERCION (0x00000400) +#define MPI2_MANPAGE4_PHYSDISK_ADAPTIVE_COERCION (0x00000800) +#define MPI2_MANPAGE4_PHYSDISK_ZERO_COERCION (0x00000C00) + +#define MPI2_MANPAGE4_MASK_BAD_BLOCK_MARKING (0x00000300) +#define MPI2_MANPAGE4_DEFAULT_BAD_BLOCK_MARKING (0x00000000) +#define MPI2_MANPAGE4_TABLE_BAD_BLOCK_MARKING (0x00000100) +#define MPI2_MANPAGE4_WRITE_LONG_BAD_BLOCK_MARKING (0x00000200) + +#define MPI2_MANPAGE4_FORCE_OFFLINE_FAILOVER (0x00000080) +#define MPI2_MANPAGE4_RAID10_DISABLE (0x00000040) +#define MPI2_MANPAGE4_RAID1E_DISABLE (0x00000020) +#define MPI2_MANPAGE4_RAID1_DISABLE (0x00000010) +#define MPI2_MANPAGE4_RAID0_DISABLE (0x00000008) +#define MPI2_MANPAGE4_IR_MODEPAGE8_DISABLE (0x00000004) +#define MPI2_MANPAGE4_IM_RESYNC_CACHE_ENABLE (0x00000002) +#define MPI2_MANPAGE4_IR_NO_MIX_SAS_SATA (0x00000001) + + +/*Manufacturing Page 5 */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI2_MAN_PAGE_5_PHY_ENTRIES +#define MPI2_MAN_PAGE_5_PHY_ENTRIES (1) +#endif + +typedef struct _MPI2_MANUFACTURING5_ENTRY { + U64 WWID; /*0x00 */ + U64 DeviceName; /*0x08 */ +} MPI2_MANUFACTURING5_ENTRY, + *PTR_MPI2_MANUFACTURING5_ENTRY, + Mpi2Manufacturing5Entry_t, + *pMpi2Manufacturing5Entry_t; + +typedef struct _MPI2_CONFIG_PAGE_MAN_5 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U8 NumPhys; /*0x04 */ + U8 Reserved1; /*0x05 */ + U16 Reserved2; /*0x06 */ + U32 Reserved3; /*0x08 */ + U32 Reserved4; /*0x0C */ + MPI2_MANUFACTURING5_ENTRY + Phy[MPI2_MAN_PAGE_5_PHY_ENTRIES];/*0x08 */ +} MPI2_CONFIG_PAGE_MAN_5, + *PTR_MPI2_CONFIG_PAGE_MAN_5, + Mpi2ManufacturingPage5_t, + *pMpi2ManufacturingPage5_t; + +#define MPI2_MANUFACTURING5_PAGEVERSION (0x03) + + +/*Manufacturing Page 6 */ + +typedef struct _MPI2_CONFIG_PAGE_MAN_6 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 ProductSpecificInfo;/*0x04 */ +} MPI2_CONFIG_PAGE_MAN_6, + *PTR_MPI2_CONFIG_PAGE_MAN_6, + Mpi2ManufacturingPage6_t, + *pMpi2ManufacturingPage6_t; + +#define MPI2_MANUFACTURING6_PAGEVERSION (0x00) + + +/*Manufacturing Page 7 */ + +typedef struct _MPI2_MANPAGE7_CONNECTOR_INFO { + U32 Pinout; /*0x00 */ + U8 Connector[16]; /*0x04 */ + U8 Location; /*0x14 */ + U8 ReceptacleID; /*0x15 */ + U16 Slot; /*0x16 */ + U16 Slotx2; /*0x18 */ + U16 Slotx4; /*0x1A */ +} MPI2_MANPAGE7_CONNECTOR_INFO, + *PTR_MPI2_MANPAGE7_CONNECTOR_INFO, + Mpi2ManPage7ConnectorInfo_t, + *pMpi2ManPage7ConnectorInfo_t; + +/*defines for the Pinout field */ +#define MPI2_MANPAGE7_PINOUT_LANE_MASK (0x0000FF00) +#define MPI2_MANPAGE7_PINOUT_LANE_SHIFT (8) + +#define MPI2_MANPAGE7_PINOUT_TYPE_MASK (0x000000FF) +#define MPI2_MANPAGE7_PINOUT_TYPE_UNKNOWN (0x00) +#define MPI2_MANPAGE7_PINOUT_SATA_SINGLE (0x01) +#define MPI2_MANPAGE7_PINOUT_SFF_8482 (0x02) +#define MPI2_MANPAGE7_PINOUT_SFF_8486 (0x03) +#define MPI2_MANPAGE7_PINOUT_SFF_8484 (0x04) +#define MPI2_MANPAGE7_PINOUT_SFF_8087 (0x05) +#define MPI2_MANPAGE7_PINOUT_SFF_8643_4I (0x06) +#define MPI2_MANPAGE7_PINOUT_SFF_8643_8I (0x07) +#define MPI2_MANPAGE7_PINOUT_SFF_8470 (0x08) +#define MPI2_MANPAGE7_PINOUT_SFF_8088 (0x09) +#define MPI2_MANPAGE7_PINOUT_SFF_8644_4X (0x0A) +#define MPI2_MANPAGE7_PINOUT_SFF_8644_8X (0x0B) +#define MPI2_MANPAGE7_PINOUT_SFF_8644_16X (0x0C) +#define MPI2_MANPAGE7_PINOUT_SFF_8436 (0x0D) +#define MPI2_MANPAGE7_PINOUT_SFF_8088_A (0x0E) +#define MPI2_MANPAGE7_PINOUT_SFF_8643_16i (0x0F) +#define MPI2_MANPAGE7_PINOUT_SFF_8654_4i (0x10) +#define MPI2_MANPAGE7_PINOUT_SFF_8654_8i (0x11) +#define MPI2_MANPAGE7_PINOUT_SFF_8611_4i (0x12) +#define MPI2_MANPAGE7_PINOUT_SFF_8611_8i (0x13) + +/*defines for the Location field */ +#define MPI2_MANPAGE7_LOCATION_UNKNOWN (0x01) +#define MPI2_MANPAGE7_LOCATION_INTERNAL (0x02) +#define MPI2_MANPAGE7_LOCATION_EXTERNAL (0x04) +#define MPI2_MANPAGE7_LOCATION_SWITCHABLE (0x08) +#define MPI2_MANPAGE7_LOCATION_AUTO (0x10) +#define MPI2_MANPAGE7_LOCATION_NOT_PRESENT (0x20) +#define MPI2_MANPAGE7_LOCATION_NOT_CONNECTED (0x80) + +/*defines for the Slot field */ +#define MPI2_MANPAGE7_SLOT_UNKNOWN (0xFFFF) + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI2_MANPAGE7_CONNECTOR_INFO_MAX +#define MPI2_MANPAGE7_CONNECTOR_INFO_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_MAN_7 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x04 */ + U32 Reserved2; /*0x08 */ + U32 Flags; /*0x0C */ + U8 EnclosureName[16]; /*0x10 */ + U8 NumPhys; /*0x20 */ + U8 Reserved3; /*0x21 */ + U16 Reserved4; /*0x22 */ + MPI2_MANPAGE7_CONNECTOR_INFO + ConnectorInfo[MPI2_MANPAGE7_CONNECTOR_INFO_MAX]; /*0x24 */ +} MPI2_CONFIG_PAGE_MAN_7, + *PTR_MPI2_CONFIG_PAGE_MAN_7, + Mpi2ManufacturingPage7_t, + *pMpi2ManufacturingPage7_t; + +#define MPI2_MANUFACTURING7_PAGEVERSION (0x01) + +/*defines for the Flags field */ +#define MPI2_MANPAGE7_FLAG_BASE_ENCLOSURE_LEVEL (0x00000008) +#define MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER (0x00000002) +#define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001) + +#define MPI26_MANPAGE7_FLAG_CONN_LANE_USE_PINOUT (0x00000020) +#define MPI26_MANPAGE7_FLAG_X2_X4_SLOT_INFO_VALID (0x00000010) + +/* + *Generic structure to use for product-specific manufacturing pages + *(currently Manufacturing Page 8 through Manufacturing Page 31). + */ + +typedef struct _MPI2_CONFIG_PAGE_MAN_PS { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 ProductSpecificInfo;/*0x04 */ +} MPI2_CONFIG_PAGE_MAN_PS, + *PTR_MPI2_CONFIG_PAGE_MAN_PS, + Mpi2ManufacturingPagePS_t, + *pMpi2ManufacturingPagePS_t; + +#define MPI2_MANUFACTURING8_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING9_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING10_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING11_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING12_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING13_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING14_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING15_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING16_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING17_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING18_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING19_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING20_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING21_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING22_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING23_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING24_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING25_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING26_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING27_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING28_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING29_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING30_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING31_PAGEVERSION (0x00) + + +/**************************************************************************** +* IO Unit Config Pages +****************************************************************************/ + +/*IO Unit Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_0 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U64 UniqueValue; /*0x04 */ + MPI2_VERSION_UNION NvdataVersionDefault; /*0x08 */ + MPI2_VERSION_UNION NvdataVersionPersistent; /*0x0A */ +} MPI2_CONFIG_PAGE_IO_UNIT_0, + *PTR_MPI2_CONFIG_PAGE_IO_UNIT_0, + Mpi2IOUnitPage0_t, *pMpi2IOUnitPage0_t; + +#define MPI2_IOUNITPAGE0_PAGEVERSION (0x02) + + +/*IO Unit Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 Flags; /*0x04 */ +} MPI2_CONFIG_PAGE_IO_UNIT_1, + *PTR_MPI2_CONFIG_PAGE_IO_UNIT_1, + Mpi2IOUnitPage1_t, *pMpi2IOUnitPage1_t; + +#define MPI2_IOUNITPAGE1_PAGEVERSION (0x04) + +/* IO Unit Page 1 Flags defines */ +#define MPI26_IOUNITPAGE1_NVME_WRCACHE_MASK (0x00030000) +#define MPI26_IOUNITPAGE1_NVME_WRCACHE_SHIFT (16) +#define MPI26_IOUNITPAGE1_NVME_WRCACHE_NO_CHANGE (0x00000000) +#define MPI26_IOUNITPAGE1_NVME_WRCACHE_ENABLE (0x00010000) +#define MPI26_IOUNITPAGE1_NVME_WRCACHE_DISABLE (0x00020000) +#define MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK (0x00004000) +#define MPI25_IOUNITPAGE1_NEW_DEVICE_FAST_PATH_DISABLE (0x00002000) +#define MPI25_IOUNITPAGE1_DISABLE_FAST_PATH (0x00001000) +#define MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY (0x00000800) +#define MPI2_IOUNITPAGE1_MASK_SATA_WRITE_CACHE (0x00000600) +#define MPI2_IOUNITPAGE1_SATA_WRITE_CACHE_SHIFT (9) +#define MPI2_IOUNITPAGE1_ENABLE_SATA_WRITE_CACHE (0x00000000) +#define MPI2_IOUNITPAGE1_DISABLE_SATA_WRITE_CACHE (0x00000200) +#define MPI2_IOUNITPAGE1_UNCHANGED_SATA_WRITE_CACHE (0x00000400) +#define MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE (0x00000100) +#define MPI2_IOUNITPAGE1_DISABLE_IR (0x00000040) +#define MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING (0x00000020) +#define MPI2_IOUNITPAGE1_IR_USE_STATIC_VOLUME_ID (0x00000004) + + +/*IO Unit Page 3 */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for GPIOCount at runtime. + */ +#ifndef MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX +#define MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX (36) +#endif + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_3 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U8 GPIOCount; /*0x04 */ + U8 Reserved1; /*0x05 */ + U16 Reserved2; /*0x06 */ + U16 + GPIOVal[MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX];/*0x08 */ +} MPI2_CONFIG_PAGE_IO_UNIT_3, + *PTR_MPI2_CONFIG_PAGE_IO_UNIT_3, + Mpi2IOUnitPage3_t, *pMpi2IOUnitPage3_t; + +#define MPI2_IOUNITPAGE3_PAGEVERSION (0x01) + +/*defines for IO Unit Page 3 GPIOVal field */ +#define MPI2_IOUNITPAGE3_GPIO_FUNCTION_MASK (0xFFFC) +#define MPI2_IOUNITPAGE3_GPIO_FUNCTION_SHIFT (2) +#define MPI2_IOUNITPAGE3_GPIO_SETTING_OFF (0x0000) +#define MPI2_IOUNITPAGE3_GPIO_SETTING_ON (0x0001) + + +/*IO Unit Page 5 */ + +/* + *Upper layer code (drivers, utilities, etc.) should leave this define set to + *one and check the value returned for NumDmaEngines at runtime. + */ +#ifndef MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES +#define MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_5 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U64 + RaidAcceleratorBufferBaseAddress; /*0x04 */ + U64 + RaidAcceleratorBufferSize; /*0x0C */ + U64 + RaidAcceleratorControlBaseAddress; /*0x14 */ + U8 RAControlSize; /*0x1C */ + U8 NumDmaEngines; /*0x1D */ + U8 RAMinControlSize; /*0x1E */ + U8 RAMaxControlSize; /*0x1F */ + U32 Reserved1; /*0x20 */ + U32 Reserved2; /*0x24 */ + U32 Reserved3; /*0x28 */ + U32 + DmaEngineCapabilities[MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES]; /*0x2C */ +} MPI2_CONFIG_PAGE_IO_UNIT_5, + *PTR_MPI2_CONFIG_PAGE_IO_UNIT_5, + Mpi2IOUnitPage5_t, *pMpi2IOUnitPage5_t; + +#define MPI2_IOUNITPAGE5_PAGEVERSION (0x00) + +/*defines for IO Unit Page 5 DmaEngineCapabilities field */ +#define MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS (0xFFFF0000) +#define MPI2_IOUNITPAGE5_DMA_CAP_SHIFT_MAX_REQUESTS (16) + +#define MPI2_IOUNITPAGE5_DMA_CAP_EEDP (0x0008) +#define MPI2_IOUNITPAGE5_DMA_CAP_PARITY_GENERATION (0x0004) +#define MPI2_IOUNITPAGE5_DMA_CAP_HASHING (0x0002) +#define MPI2_IOUNITPAGE5_DMA_CAP_ENCRYPTION (0x0001) + + +/*IO Unit Page 6 */ + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_6 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U16 Flags; /*0x04 */ + U8 RAHostControlSize; /*0x06 */ + U8 Reserved0; /*0x07 */ + U64 + RaidAcceleratorHostControlBaseAddress; /*0x08 */ + U32 Reserved1; /*0x10 */ + U32 Reserved2; /*0x14 */ + U32 Reserved3; /*0x18 */ +} MPI2_CONFIG_PAGE_IO_UNIT_6, + *PTR_MPI2_CONFIG_PAGE_IO_UNIT_6, + Mpi2IOUnitPage6_t, *pMpi2IOUnitPage6_t; + +#define MPI2_IOUNITPAGE6_PAGEVERSION (0x00) + +/*defines for IO Unit Page 6 Flags field */ +#define MPI2_IOUNITPAGE6_FLAGS_ENABLE_RAID_ACCELERATOR (0x0001) + + +/*IO Unit Page 7 */ + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U8 CurrentPowerMode; /*0x04 */ + U8 PreviousPowerMode; /*0x05 */ + U8 PCIeWidth; /*0x06 */ + U8 PCIeSpeed; /*0x07 */ + U32 ProcessorState; /*0x08 */ + U32 + PowerManagementCapabilities; /*0x0C */ + U16 IOCTemperature; /*0x10 */ + U8 + IOCTemperatureUnits; /*0x12 */ + U8 IOCSpeed; /*0x13 */ + U16 BoardTemperature; /*0x14 */ + U8 + BoardTemperatureUnits; /*0x16 */ + U8 Reserved3; /*0x17 */ + U32 BoardPowerRequirement; /*0x18 */ + U32 PCISlotPowerAllocation; /*0x1C */ +/* reserved prior to MPI v2.6 */ + U8 Flags; /* 0x20 */ + U8 Reserved6; /* 0x21 */ + U16 Reserved7; /* 0x22 */ + U32 Reserved8; /* 0x24 */ +} MPI2_CONFIG_PAGE_IO_UNIT_7, + *PTR_MPI2_CONFIG_PAGE_IO_UNIT_7, + Mpi2IOUnitPage7_t, *pMpi2IOUnitPage7_t; + +#define MPI2_IOUNITPAGE7_PAGEVERSION (0x05) + +/*defines for IO Unit Page 7 CurrentPowerMode and PreviousPowerMode fields */ +#define MPI25_IOUNITPAGE7_PM_INIT_MASK (0xC0) +#define MPI25_IOUNITPAGE7_PM_INIT_UNAVAILABLE (0x00) +#define MPI25_IOUNITPAGE7_PM_INIT_HOST (0x40) +#define MPI25_IOUNITPAGE7_PM_INIT_IO_UNIT (0x80) +#define MPI25_IOUNITPAGE7_PM_INIT_PCIE_DPA (0xC0) + +#define MPI25_IOUNITPAGE7_PM_MODE_MASK (0x07) +#define MPI25_IOUNITPAGE7_PM_MODE_UNAVAILABLE (0x00) +#define MPI25_IOUNITPAGE7_PM_MODE_UNKNOWN (0x01) +#define MPI25_IOUNITPAGE7_PM_MODE_FULL_POWER (0x04) +#define MPI25_IOUNITPAGE7_PM_MODE_REDUCED_POWER (0x05) +#define MPI25_IOUNITPAGE7_PM_MODE_STANDBY (0x06) + + +/*defines for IO Unit Page 7 PCIeWidth field */ +#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X1 (0x01) +#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X2 (0x02) +#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X4 (0x04) +#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X8 (0x08) +#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X16 (0x10) + +/*defines for IO Unit Page 7 PCIeSpeed field */ +#define MPI2_IOUNITPAGE7_PCIE_SPEED_2_5_GBPS (0x00) +#define MPI2_IOUNITPAGE7_PCIE_SPEED_5_0_GBPS (0x01) +#define MPI2_IOUNITPAGE7_PCIE_SPEED_8_0_GBPS (0x02) +#define MPI2_IOUNITPAGE7_PCIE_SPEED_16_0_GBPS (0x03) + +/*defines for IO Unit Page 7 ProcessorState field */ +#define MPI2_IOUNITPAGE7_PSTATE_MASK_SECOND (0x0000000F) +#define MPI2_IOUNITPAGE7_PSTATE_SHIFT_SECOND (0) + +#define MPI2_IOUNITPAGE7_PSTATE_NOT_PRESENT (0x00) +#define MPI2_IOUNITPAGE7_PSTATE_DISABLED (0x01) +#define MPI2_IOUNITPAGE7_PSTATE_ENABLED (0x02) + +/*defines for IO Unit Page 7 PowerManagementCapabilities field */ +#define MPI25_IOUNITPAGE7_PMCAP_DPA_FULL_PWR_MODE (0x00400000) +#define MPI25_IOUNITPAGE7_PMCAP_DPA_REDUCED_PWR_MODE (0x00200000) +#define MPI25_IOUNITPAGE7_PMCAP_DPA_STANDBY_MODE (0x00100000) +#define MPI25_IOUNITPAGE7_PMCAP_HOST_FULL_PWR_MODE (0x00040000) +#define MPI25_IOUNITPAGE7_PMCAP_HOST_REDUCED_PWR_MODE (0x00020000) +#define MPI25_IOUNITPAGE7_PMCAP_HOST_STANDBY_MODE (0x00010000) +#define MPI25_IOUNITPAGE7_PMCAP_IO_FULL_PWR_MODE (0x00004000) +#define MPI25_IOUNITPAGE7_PMCAP_IO_REDUCED_PWR_MODE (0x00002000) +#define MPI25_IOUNITPAGE7_PMCAP_IO_STANDBY_MODE (0x00001000) +#define MPI2_IOUNITPAGE7_PMCAP_HOST_12_5_PCT_IOCSPEED (0x00000400) +#define MPI2_IOUNITPAGE7_PMCAP_HOST_25_0_PCT_IOCSPEED (0x00000200) +#define MPI2_IOUNITPAGE7_PMCAP_HOST_50_0_PCT_IOCSPEED (0x00000100) +#define MPI25_IOUNITPAGE7_PMCAP_IO_12_5_PCT_IOCSPEED (0x00000040) +#define MPI25_IOUNITPAGE7_PMCAP_IO_25_0_PCT_IOCSPEED (0x00000020) +#define MPI25_IOUNITPAGE7_PMCAP_IO_50_0_PCT_IOCSPEED (0x00000010) +#define MPI2_IOUNITPAGE7_PMCAP_HOST_WIDTH_CHANGE_PCIE (0x00000008) +#define MPI2_IOUNITPAGE7_PMCAP_HOST_SPEED_CHANGE_PCIE (0x00000004) +#define MPI25_IOUNITPAGE7_PMCAP_IO_WIDTH_CHANGE_PCIE (0x00000002) +#define MPI25_IOUNITPAGE7_PMCAP_IO_SPEED_CHANGE_PCIE (0x00000001) + +/*obsolete names for the PowerManagementCapabilities bits (above) */ +#define MPI2_IOUNITPAGE7_PMCAP_12_5_PCT_IOCSPEED (0x00000400) +#define MPI2_IOUNITPAGE7_PMCAP_25_0_PCT_IOCSPEED (0x00000200) +#define MPI2_IOUNITPAGE7_PMCAP_50_0_PCT_IOCSPEED (0x00000100) +#define MPI2_IOUNITPAGE7_PMCAP_PCIE_WIDTH_CHANGE (0x00000008) /*obsolete */ +#define MPI2_IOUNITPAGE7_PMCAP_PCIE_SPEED_CHANGE (0x00000004) /*obsolete */ + + +/*defines for IO Unit Page 7 IOCTemperatureUnits field */ +#define MPI2_IOUNITPAGE7_IOC_TEMP_NOT_PRESENT (0x00) +#define MPI2_IOUNITPAGE7_IOC_TEMP_FAHRENHEIT (0x01) +#define MPI2_IOUNITPAGE7_IOC_TEMP_CELSIUS (0x02) + +/*defines for IO Unit Page 7 IOCSpeed field */ +#define MPI2_IOUNITPAGE7_IOC_SPEED_FULL (0x01) +#define MPI2_IOUNITPAGE7_IOC_SPEED_HALF (0x02) +#define MPI2_IOUNITPAGE7_IOC_SPEED_QUARTER (0x04) +#define MPI2_IOUNITPAGE7_IOC_SPEED_EIGHTH (0x08) + +/*defines for IO Unit Page 7 BoardTemperatureUnits field */ +#define MPI2_IOUNITPAGE7_BOARD_TEMP_NOT_PRESENT (0x00) +#define MPI2_IOUNITPAGE7_BOARD_TEMP_FAHRENHEIT (0x01) +#define MPI2_IOUNITPAGE7_BOARD_TEMP_CELSIUS (0x02) + +/* defines for IO Unit Page 7 Flags field */ +#define MPI2_IOUNITPAGE7_FLAG_CABLE_POWER_EXC (0x01) + +/*IO Unit Page 8 */ + +#define MPI2_IOUNIT8_NUM_THRESHOLDS (4) + +typedef struct _MPI2_IOUNIT8_SENSOR { + U16 Flags; /*0x00 */ + U16 Reserved1; /*0x02 */ + U16 + Threshold[MPI2_IOUNIT8_NUM_THRESHOLDS]; /*0x04 */ + U32 Reserved2; /*0x0C */ + U32 Reserved3; /*0x10 */ + U32 Reserved4; /*0x14 */ +} MPI2_IOUNIT8_SENSOR, *PTR_MPI2_IOUNIT8_SENSOR, + Mpi2IOUnit8Sensor_t, *pMpi2IOUnit8Sensor_t; + +/*defines for IO Unit Page 8 Sensor Flags field */ +#define MPI2_IOUNIT8_SENSOR_FLAGS_T3_ENABLE (0x0008) +#define MPI2_IOUNIT8_SENSOR_FLAGS_T2_ENABLE (0x0004) +#define MPI2_IOUNIT8_SENSOR_FLAGS_T1_ENABLE (0x0002) +#define MPI2_IOUNIT8_SENSOR_FLAGS_T0_ENABLE (0x0001) + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumSensors at runtime. + */ +#ifndef MPI2_IOUNITPAGE8_SENSOR_ENTRIES +#define MPI2_IOUNITPAGE8_SENSOR_ENTRIES (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_8 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x04 */ + U32 Reserved2; /*0x08 */ + U8 NumSensors; /*0x0C */ + U8 PollingInterval; /*0x0D */ + U16 Reserved3; /*0x0E */ + MPI2_IOUNIT8_SENSOR + Sensor[MPI2_IOUNITPAGE8_SENSOR_ENTRIES];/*0x10 */ +} MPI2_CONFIG_PAGE_IO_UNIT_8, + *PTR_MPI2_CONFIG_PAGE_IO_UNIT_8, + Mpi2IOUnitPage8_t, *pMpi2IOUnitPage8_t; + +#define MPI2_IOUNITPAGE8_PAGEVERSION (0x00) + + +/*IO Unit Page 9 */ + +typedef struct _MPI2_IOUNIT9_SENSOR { + U16 CurrentTemperature; /*0x00 */ + U16 Reserved1; /*0x02 */ + U8 Flags; /*0x04 */ + U8 Reserved2; /*0x05 */ + U16 Reserved3; /*0x06 */ + U32 Reserved4; /*0x08 */ + U32 Reserved5; /*0x0C */ +} MPI2_IOUNIT9_SENSOR, *PTR_MPI2_IOUNIT9_SENSOR, + Mpi2IOUnit9Sensor_t, *pMpi2IOUnit9Sensor_t; + +/*defines for IO Unit Page 9 Sensor Flags field */ +#define MPI2_IOUNIT9_SENSOR_FLAGS_TEMP_VALID (0x01) + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumSensors at runtime. + */ +#ifndef MPI2_IOUNITPAGE9_SENSOR_ENTRIES +#define MPI2_IOUNITPAGE9_SENSOR_ENTRIES (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_9 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x04 */ + U32 Reserved2; /*0x08 */ + U8 NumSensors; /*0x0C */ + U8 Reserved4; /*0x0D */ + U16 Reserved3; /*0x0E */ + MPI2_IOUNIT9_SENSOR + Sensor[MPI2_IOUNITPAGE9_SENSOR_ENTRIES];/*0x10 */ +} MPI2_CONFIG_PAGE_IO_UNIT_9, + *PTR_MPI2_CONFIG_PAGE_IO_UNIT_9, + Mpi2IOUnitPage9_t, *pMpi2IOUnitPage9_t; + +#define MPI2_IOUNITPAGE9_PAGEVERSION (0x00) + + +/*IO Unit Page 10 */ + +typedef struct _MPI2_IOUNIT10_FUNCTION { + U8 CreditPercent; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ +} MPI2_IOUNIT10_FUNCTION, + *PTR_MPI2_IOUNIT10_FUNCTION, + Mpi2IOUnit10Function_t, + *pMpi2IOUnit10Function_t; + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumFunctions at runtime. + */ +#ifndef MPI2_IOUNITPAGE10_FUNCTION_ENTRIES +#define MPI2_IOUNITPAGE10_FUNCTION_ENTRIES (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_10 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U8 NumFunctions; /*0x04 */ + U8 Reserved1; /*0x05 */ + U16 Reserved2; /*0x06 */ + U32 Reserved3; /*0x08 */ + U32 Reserved4; /*0x0C */ + MPI2_IOUNIT10_FUNCTION + Function[MPI2_IOUNITPAGE10_FUNCTION_ENTRIES];/*0x10 */ +} MPI2_CONFIG_PAGE_IO_UNIT_10, + *PTR_MPI2_CONFIG_PAGE_IO_UNIT_10, + Mpi2IOUnitPage10_t, *pMpi2IOUnitPage10_t; + +#define MPI2_IOUNITPAGE10_PAGEVERSION (0x01) + + +/* IO Unit Page 11 (for MPI v2.6 and later) */ + +typedef struct _MPI26_IOUNIT11_SPINUP_GROUP { + U8 MaxTargetSpinup; /* 0x00 */ + U8 SpinupDelay; /* 0x01 */ + U8 SpinupFlags; /* 0x02 */ + U8 Reserved1; /* 0x03 */ +} MPI26_IOUNIT11_SPINUP_GROUP, + *PTR_MPI26_IOUNIT11_SPINUP_GROUP, + Mpi26IOUnit11SpinupGroup_t, + *pMpi26IOUnit11SpinupGroup_t; + +/* defines for IO Unit Page 11 SpinupFlags */ +#define MPI26_IOUNITPAGE11_SPINUP_DISABLE_FLAG (0x01) + + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * four and check the value returned for NumPhys at runtime. + */ +#ifndef MPI26_IOUNITPAGE11_PHY_MAX +#define MPI26_IOUNITPAGE11_PHY_MAX (4) +#endif + +typedef struct _MPI26_CONFIG_PAGE_IO_UNIT_11 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x04 */ + MPI26_IOUNIT11_SPINUP_GROUP SpinupGroupParameters[4]; /*0x08 */ + U32 Reserved2; /*0x18 */ + U32 Reserved3; /*0x1C */ + U32 Reserved4; /*0x20 */ + U8 BootDeviceWaitTime; /*0x24 */ + U8 Reserved5; /*0x25 */ + U16 Reserved6; /*0x26 */ + U8 NumPhys; /*0x28 */ + U8 PEInitialSpinupDelay; /*0x29 */ + U8 PEReplyDelay; /*0x2A */ + U8 Flags; /*0x2B */ + U8 PHY[MPI26_IOUNITPAGE11_PHY_MAX];/*0x2C */ +} MPI26_CONFIG_PAGE_IO_UNIT_11, + *PTR_MPI26_CONFIG_PAGE_IO_UNIT_11, + Mpi26IOUnitPage11_t, + *pMpi26IOUnitPage11_t; + +#define MPI26_IOUNITPAGE11_PAGEVERSION (0x00) + +/* defines for Flags field */ +#define MPI26_IOUNITPAGE11_FLAGS_AUTO_PORTENABLE (0x01) + +/* defines for PHY field */ +#define MPI26_IOUNITPAGE11_PHY_SPINUP_GROUP_MASK (0x03) + + + + + + +/**************************************************************************** +* IOC Config Pages +****************************************************************************/ + +/*IOC Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_IOC_0 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x04 */ + U32 Reserved2; /*0x08 */ + U16 VendorID; /*0x0C */ + U16 DeviceID; /*0x0E */ + U8 RevisionID; /*0x10 */ + U8 Reserved3; /*0x11 */ + U16 Reserved4; /*0x12 */ + U32 ClassCode; /*0x14 */ + U16 SubsystemVendorID; /*0x18 */ + U16 SubsystemID; /*0x1A */ +} MPI2_CONFIG_PAGE_IOC_0, + *PTR_MPI2_CONFIG_PAGE_IOC_0, + Mpi2IOCPage0_t, *pMpi2IOCPage0_t; + +#define MPI2_IOCPAGE0_PAGEVERSION (0x02) + + +/*IOC Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_IOC_1 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 Flags; /*0x04 */ + U32 CoalescingTimeout; /*0x08 */ + U8 CoalescingDepth; /*0x0C */ + U8 PCISlotNum; /*0x0D */ + U8 PCIBusNum; /*0x0E */ + U8 PCIDomainSegment; /*0x0F */ + U32 Reserved1; /*0x10 */ + U32 ProductSpecific; /* 0x14 */ +} MPI2_CONFIG_PAGE_IOC_1, + *PTR_MPI2_CONFIG_PAGE_IOC_1, + Mpi2IOCPage1_t, *pMpi2IOCPage1_t; + +#define MPI2_IOCPAGE1_PAGEVERSION (0x05) + +/*defines for IOC Page 1 Flags field */ +#define MPI2_IOCPAGE1_REPLY_COALESCING (0x00000001) + +#define MPI2_IOCPAGE1_PCISLOTNUM_UNKNOWN (0xFF) +#define MPI2_IOCPAGE1_PCIBUSNUM_UNKNOWN (0xFF) +#define MPI2_IOCPAGE1_PCIDOMAIN_UNKNOWN (0xFF) + +/*IOC Page 6 */ + +typedef struct _MPI2_CONFIG_PAGE_IOC_6 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 + CapabilitiesFlags; /*0x04 */ + U8 MaxDrivesRAID0; /*0x08 */ + U8 MaxDrivesRAID1; /*0x09 */ + U8 + MaxDrivesRAID1E; /*0x0A */ + U8 + MaxDrivesRAID10; /*0x0B */ + U8 MinDrivesRAID0; /*0x0C */ + U8 MinDrivesRAID1; /*0x0D */ + U8 + MinDrivesRAID1E; /*0x0E */ + U8 + MinDrivesRAID10; /*0x0F */ + U32 Reserved1; /*0x10 */ + U8 + MaxGlobalHotSpares; /*0x14 */ + U8 MaxPhysDisks; /*0x15 */ + U8 MaxVolumes; /*0x16 */ + U8 MaxConfigs; /*0x17 */ + U8 MaxOCEDisks; /*0x18 */ + U8 Reserved2; /*0x19 */ + U16 Reserved3; /*0x1A */ + U32 + SupportedStripeSizeMapRAID0; /*0x1C */ + U32 + SupportedStripeSizeMapRAID1E; /*0x20 */ + U32 + SupportedStripeSizeMapRAID10; /*0x24 */ + U32 Reserved4; /*0x28 */ + U32 Reserved5; /*0x2C */ + U16 + DefaultMetadataSize; /*0x30 */ + U16 Reserved6; /*0x32 */ + U16 + MaxBadBlockTableEntries; /*0x34 */ + U16 Reserved7; /*0x36 */ + U32 + IRNvsramVersion; /*0x38 */ +} MPI2_CONFIG_PAGE_IOC_6, + *PTR_MPI2_CONFIG_PAGE_IOC_6, + Mpi2IOCPage6_t, *pMpi2IOCPage6_t; + +#define MPI2_IOCPAGE6_PAGEVERSION (0x05) + +/*defines for IOC Page 6 CapabilitiesFlags */ +#define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT (0x00000020) +#define MPI2_IOCPAGE6_CAP_FLAGS_RAID10_SUPPORT (0x00000010) +#define MPI2_IOCPAGE6_CAP_FLAGS_RAID1_SUPPORT (0x00000008) +#define MPI2_IOCPAGE6_CAP_FLAGS_RAID1E_SUPPORT (0x00000004) +#define MPI2_IOCPAGE6_CAP_FLAGS_RAID0_SUPPORT (0x00000002) +#define MPI2_IOCPAGE6_CAP_FLAGS_GLOBAL_HOT_SPARE (0x00000001) + + +/*IOC Page 7 */ + +#define MPI2_IOCPAGE7_EVENTMASK_WORDS (4) + +typedef struct _MPI2_CONFIG_PAGE_IOC_7 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x04 */ + U32 + EventMasks[MPI2_IOCPAGE7_EVENTMASK_WORDS];/*0x08 */ + U16 SASBroadcastPrimitiveMasks; /*0x18 */ + U16 SASNotifyPrimitiveMasks; /*0x1A */ + U32 Reserved3; /*0x1C */ +} MPI2_CONFIG_PAGE_IOC_7, + *PTR_MPI2_CONFIG_PAGE_IOC_7, + Mpi2IOCPage7_t, *pMpi2IOCPage7_t; + +#define MPI2_IOCPAGE7_PAGEVERSION (0x02) + + +/*IOC Page 8 */ + +typedef struct _MPI2_CONFIG_PAGE_IOC_8 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U8 NumDevsPerEnclosure; /*0x04 */ + U8 Reserved1; /*0x05 */ + U16 Reserved2; /*0x06 */ + U16 MaxPersistentEntries; /*0x08 */ + U16 MaxNumPhysicalMappedIDs; /*0x0A */ + U16 Flags; /*0x0C */ + U16 Reserved3; /*0x0E */ + U16 IRVolumeMappingFlags; /*0x10 */ + U16 Reserved4; /*0x12 */ + U32 Reserved5; /*0x14 */ +} MPI2_CONFIG_PAGE_IOC_8, + *PTR_MPI2_CONFIG_PAGE_IOC_8, + Mpi2IOCPage8_t, *pMpi2IOCPage8_t; + +#define MPI2_IOCPAGE8_PAGEVERSION (0x00) + +/*defines for IOC Page 8 Flags field */ +#define MPI2_IOCPAGE8_FLAGS_DA_START_SLOT_1 (0x00000020) +#define MPI2_IOCPAGE8_FLAGS_RESERVED_TARGETID_0 (0x00000010) + +#define MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE (0x0000000E) +#define MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING (0x00000000) +#define MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING (0x00000002) + +#define MPI2_IOCPAGE8_FLAGS_DISABLE_PERSISTENT_MAPPING (0x00000001) +#define MPI2_IOCPAGE8_FLAGS_ENABLE_PERSISTENT_MAPPING (0x00000000) + +/*defines for IOC Page 8 IRVolumeMappingFlags */ +#define MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE (0x00000003) +#define MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING (0x00000000) +#define MPI2_IOCPAGE8_IRFLAGS_HIGH_VOLUME_MAPPING (0x00000001) + + +/**************************************************************************** +* BIOS Config Pages +****************************************************************************/ + +/*BIOS Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_BIOS_1 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 BiosOptions; /*0x04 */ + U32 IOCSettings; /*0x08 */ + U8 SSUTimeout; /*0x0C */ + U8 MaxEnclosureLevel; /*0x0D */ + U16 Reserved2; /*0x0E */ + U32 DeviceSettings; /*0x10 */ + U16 NumberOfDevices; /*0x14 */ + U16 UEFIVersion; /*0x16 */ + U16 IOTimeoutBlockDevicesNonRM; /*0x18 */ + U16 IOTimeoutSequential; /*0x1A */ + U16 IOTimeoutOther; /*0x1C */ + U16 IOTimeoutBlockDevicesRM; /*0x1E */ +} MPI2_CONFIG_PAGE_BIOS_1, + *PTR_MPI2_CONFIG_PAGE_BIOS_1, + Mpi2BiosPage1_t, *pMpi2BiosPage1_t; + +#define MPI2_BIOSPAGE1_PAGEVERSION (0x07) + +/*values for BIOS Page 1 BiosOptions field */ +#define MPI2_BIOSPAGE1_OPTIONS_BOOT_LIST_ADD_ALT_BOOT_DEVICE (0x00008000) +#define MPI2_BIOSPAGE1_OPTIONS_ADVANCED_CONFIG (0x00004000) + +#define MPI2_BIOSPAGE1_OPTIONS_PNS_MASK (0x00003800) +#define MPI2_BIOSPAGE1_OPTIONS_PNS_PBDHL (0x00000000) +#define MPI2_BIOSPAGE1_OPTIONS_PNS_ENCSLOSURE (0x00000800) +#define MPI2_BIOSPAGE1_OPTIONS_PNS_LWWID (0x00001000) +#define MPI2_BIOSPAGE1_OPTIONS_PNS_PSENS (0x00001800) +#define MPI2_BIOSPAGE1_OPTIONS_PNS_ESPHY (0x00002000) + +#define MPI2_BIOSPAGE1_OPTIONS_X86_DISABLE_BIOS (0x00000400) + +#define MPI2_BIOSPAGE1_OPTIONS_MASK_REGISTRATION_UEFI_BSD (0x00000300) +#define MPI2_BIOSPAGE1_OPTIONS_USE_BIT0_REGISTRATION_UEFI_BSD (0x00000000) +#define MPI2_BIOSPAGE1_OPTIONS_FULL_REGISTRATION_UEFI_BSD (0x00000100) +#define MPI2_BIOSPAGE1_OPTIONS_ADAPTER_REGISTRATION_UEFI_BSD (0x00000200) +#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_REGISTRATION_UEFI_BSD (0x00000300) + +#define MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID (0x000000F0) +#define MPI2_BIOSPAGE1_OPTIONS_LSI_OEM_ID (0x00000000) + +#define MPI2_BIOSPAGE1_OPTIONS_MASK_UEFI_HII_REGISTRATION (0x00000006) +#define MPI2_BIOSPAGE1_OPTIONS_ENABLE_UEFI_HII (0x00000000) +#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_UEFI_HII (0x00000002) +#define MPI2_BIOSPAGE1_OPTIONS_VERSION_CHECK_UEFI_HII (0x00000004) + +#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_BIOS (0x00000001) + +/*values for BIOS Page 1 IOCSettings field */ +#define MPI2_BIOSPAGE1_IOCSET_MASK_BOOT_PREFERENCE (0x00030000) +#define MPI2_BIOSPAGE1_IOCSET_ENCLOSURE_SLOT_BOOT (0x00000000) +#define MPI2_BIOSPAGE1_IOCSET_SAS_ADDRESS_BOOT (0x00010000) + +#define MPI2_BIOSPAGE1_IOCSET_MASK_RM_SETTING (0x000000C0) +#define MPI2_BIOSPAGE1_IOCSET_NONE_RM_SETTING (0x00000000) +#define MPI2_BIOSPAGE1_IOCSET_BOOT_RM_SETTING (0x00000040) +#define MPI2_BIOSPAGE1_IOCSET_MEDIA_RM_SETTING (0x00000080) + +#define MPI2_BIOSPAGE1_IOCSET_MASK_ADAPTER_SUPPORT (0x00000030) +#define MPI2_BIOSPAGE1_IOCSET_NO_SUPPORT (0x00000000) +#define MPI2_BIOSPAGE1_IOCSET_BIOS_SUPPORT (0x00000010) +#define MPI2_BIOSPAGE1_IOCSET_OS_SUPPORT (0x00000020) +#define MPI2_BIOSPAGE1_IOCSET_ALL_SUPPORT (0x00000030) + +#define MPI2_BIOSPAGE1_IOCSET_ALTERNATE_CHS (0x00000008) + +/*values for BIOS Page 1 DeviceSettings field */ +#define MPI2_BIOSPAGE1_DEVSET_DISABLE_SMART_POLLING (0x00000010) +#define MPI2_BIOSPAGE1_DEVSET_DISABLE_SEQ_LUN (0x00000008) +#define MPI2_BIOSPAGE1_DEVSET_DISABLE_RM_LUN (0x00000004) +#define MPI2_BIOSPAGE1_DEVSET_DISABLE_NON_RM_LUN (0x00000002) +#define MPI2_BIOSPAGE1_DEVSET_DISABLE_OTHER_LUN (0x00000001) + +/*defines for BIOS Page 1 UEFIVersion field */ +#define MPI2_BIOSPAGE1_UEFI_VER_MAJOR_MASK (0xFF00) +#define MPI2_BIOSPAGE1_UEFI_VER_MAJOR_SHIFT (8) +#define MPI2_BIOSPAGE1_UEFI_VER_MINOR_MASK (0x00FF) +#define MPI2_BIOSPAGE1_UEFI_VER_MINOR_SHIFT (0) + + + +/*BIOS Page 2 */ + +typedef struct _MPI2_BOOT_DEVICE_ADAPTER_ORDER { + U32 Reserved1; /*0x00 */ + U32 Reserved2; /*0x04 */ + U32 Reserved3; /*0x08 */ + U32 Reserved4; /*0x0C */ + U32 Reserved5; /*0x10 */ + U32 Reserved6; /*0x14 */ +} MPI2_BOOT_DEVICE_ADAPTER_ORDER, + *PTR_MPI2_BOOT_DEVICE_ADAPTER_ORDER, + Mpi2BootDeviceAdapterOrder_t, + *pMpi2BootDeviceAdapterOrder_t; + +typedef struct _MPI2_BOOT_DEVICE_SAS_WWID { + U64 SASAddress; /*0x00 */ + U8 LUN[8]; /*0x08 */ + U32 Reserved1; /*0x10 */ + U32 Reserved2; /*0x14 */ +} MPI2_BOOT_DEVICE_SAS_WWID, + *PTR_MPI2_BOOT_DEVICE_SAS_WWID, + Mpi2BootDeviceSasWwid_t, + *pMpi2BootDeviceSasWwid_t; + +typedef struct _MPI2_BOOT_DEVICE_ENCLOSURE_SLOT { + U64 EnclosureLogicalID; /*0x00 */ + U32 Reserved1; /*0x08 */ + U32 Reserved2; /*0x0C */ + U16 SlotNumber; /*0x10 */ + U16 Reserved3; /*0x12 */ + U32 Reserved4; /*0x14 */ +} MPI2_BOOT_DEVICE_ENCLOSURE_SLOT, + *PTR_MPI2_BOOT_DEVICE_ENCLOSURE_SLOT, + Mpi2BootDeviceEnclosureSlot_t, + *pMpi2BootDeviceEnclosureSlot_t; + +typedef struct _MPI2_BOOT_DEVICE_DEVICE_NAME { + U64 DeviceName; /*0x00 */ + U8 LUN[8]; /*0x08 */ + U32 Reserved1; /*0x10 */ + U32 Reserved2; /*0x14 */ +} MPI2_BOOT_DEVICE_DEVICE_NAME, + *PTR_MPI2_BOOT_DEVICE_DEVICE_NAME, + Mpi2BootDeviceDeviceName_t, + *pMpi2BootDeviceDeviceName_t; + +typedef union _MPI2_MPI2_BIOSPAGE2_BOOT_DEVICE { + MPI2_BOOT_DEVICE_ADAPTER_ORDER AdapterOrder; + MPI2_BOOT_DEVICE_SAS_WWID SasWwid; + MPI2_BOOT_DEVICE_ENCLOSURE_SLOT EnclosureSlot; + MPI2_BOOT_DEVICE_DEVICE_NAME DeviceName; +} MPI2_BIOSPAGE2_BOOT_DEVICE, + *PTR_MPI2_BIOSPAGE2_BOOT_DEVICE, + Mpi2BiosPage2BootDevice_t, + *pMpi2BiosPage2BootDevice_t; + +typedef struct _MPI2_CONFIG_PAGE_BIOS_2 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x04 */ + U32 Reserved2; /*0x08 */ + U32 Reserved3; /*0x0C */ + U32 Reserved4; /*0x10 */ + U32 Reserved5; /*0x14 */ + U32 Reserved6; /*0x18 */ + U8 ReqBootDeviceForm; /*0x1C */ + U8 Reserved7; /*0x1D */ + U16 Reserved8; /*0x1E */ + MPI2_BIOSPAGE2_BOOT_DEVICE RequestedBootDevice; /*0x20 */ + U8 ReqAltBootDeviceForm; /*0x38 */ + U8 Reserved9; /*0x39 */ + U16 Reserved10; /*0x3A */ + MPI2_BIOSPAGE2_BOOT_DEVICE RequestedAltBootDevice; /*0x3C */ + U8 CurrentBootDeviceForm; /*0x58 */ + U8 Reserved11; /*0x59 */ + U16 Reserved12; /*0x5A */ + MPI2_BIOSPAGE2_BOOT_DEVICE CurrentBootDevice; /*0x58 */ +} MPI2_CONFIG_PAGE_BIOS_2, *PTR_MPI2_CONFIG_PAGE_BIOS_2, + Mpi2BiosPage2_t, *pMpi2BiosPage2_t; + +#define MPI2_BIOSPAGE2_PAGEVERSION (0x04) + +/*values for BIOS Page 2 BootDeviceForm fields */ +#define MPI2_BIOSPAGE2_FORM_MASK (0x0F) +#define MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED (0x00) +#define MPI2_BIOSPAGE2_FORM_SAS_WWID (0x05) +#define MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT (0x06) +#define MPI2_BIOSPAGE2_FORM_DEVICE_NAME (0x07) + + +/*BIOS Page 3 */ + +#define MPI2_BIOSPAGE3_NUM_ADAPTER (4) + +typedef struct _MPI2_ADAPTER_INFO { + U8 PciBusNumber; /*0x00 */ + U8 PciDeviceAndFunctionNumber; /*0x01 */ + U16 AdapterFlags; /*0x02 */ +} MPI2_ADAPTER_INFO, *PTR_MPI2_ADAPTER_INFO, + Mpi2AdapterInfo_t, *pMpi2AdapterInfo_t; + +#define MPI2_ADAPTER_INFO_FLAGS_EMBEDDED (0x0001) +#define MPI2_ADAPTER_INFO_FLAGS_INIT_STATUS (0x0002) + +typedef struct _MPI2_ADAPTER_ORDER_AUX { + U64 WWID; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U32 Reserved2; /* 0x0C */ +} MPI2_ADAPTER_ORDER_AUX, *PTR_MPI2_ADAPTER_ORDER_AUX, + Mpi2AdapterOrderAux_t, *pMpi2AdapterOrderAux_t; + + +typedef struct _MPI2_CONFIG_PAGE_BIOS_3 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 GlobalFlags; /*0x04 */ + U32 BiosVersion; /*0x08 */ + MPI2_ADAPTER_INFO AdapterOrder[MPI2_BIOSPAGE3_NUM_ADAPTER]; + U32 Reserved1; /*0x1C */ + MPI2_ADAPTER_ORDER_AUX AdapterOrderAux[MPI2_BIOSPAGE3_NUM_ADAPTER]; +} MPI2_CONFIG_PAGE_BIOS_3, + *PTR_MPI2_CONFIG_PAGE_BIOS_3, + Mpi2BiosPage3_t, *pMpi2BiosPage3_t; + +#define MPI2_BIOSPAGE3_PAGEVERSION (0x01) + +/*values for BIOS Page 3 GlobalFlags */ +#define MPI2_BIOSPAGE3_FLAGS_PAUSE_ON_ERROR (0x00000002) +#define MPI2_BIOSPAGE3_FLAGS_VERBOSE_ENABLE (0x00000004) +#define MPI2_BIOSPAGE3_FLAGS_HOOK_INT_40_DISABLE (0x00000010) + +#define MPI2_BIOSPAGE3_FLAGS_DEV_LIST_DISPLAY_MASK (0x000000E0) +#define MPI2_BIOSPAGE3_FLAGS_INSTALLED_DEV_DISPLAY (0x00000000) +#define MPI2_BIOSPAGE3_FLAGS_ADAPTER_DISPLAY (0x00000020) +#define MPI2_BIOSPAGE3_FLAGS_ADAPTER_DEV_DISPLAY (0x00000040) + + +/*BIOS Page 4 */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI2_BIOS_PAGE_4_PHY_ENTRIES +#define MPI2_BIOS_PAGE_4_PHY_ENTRIES (1) +#endif + +typedef struct _MPI2_BIOS4_ENTRY { + U64 ReassignmentWWID; /*0x00 */ + U64 ReassignmentDeviceName; /*0x08 */ +} MPI2_BIOS4_ENTRY, *PTR_MPI2_BIOS4_ENTRY, + Mpi2MBios4Entry_t, *pMpi2Bios4Entry_t; + +typedef struct _MPI2_CONFIG_PAGE_BIOS_4 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U8 NumPhys; /*0x04 */ + U8 Reserved1; /*0x05 */ + U16 Reserved2; /*0x06 */ + MPI2_BIOS4_ENTRY + Phy[MPI2_BIOS_PAGE_4_PHY_ENTRIES]; /*0x08 */ +} MPI2_CONFIG_PAGE_BIOS_4, *PTR_MPI2_CONFIG_PAGE_BIOS_4, + Mpi2BiosPage4_t, *pMpi2BiosPage4_t; + +#define MPI2_BIOSPAGE4_PAGEVERSION (0x01) + + +/**************************************************************************** +* RAID Volume Config Pages +****************************************************************************/ + +/*RAID Volume Page 0 */ + +typedef struct _MPI2_RAIDVOL0_PHYS_DISK { + U8 RAIDSetNum; /*0x00 */ + U8 PhysDiskMap; /*0x01 */ + U8 PhysDiskNum; /*0x02 */ + U8 Reserved; /*0x03 */ +} MPI2_RAIDVOL0_PHYS_DISK, *PTR_MPI2_RAIDVOL0_PHYS_DISK, + Mpi2RaidVol0PhysDisk_t, *pMpi2RaidVol0PhysDisk_t; + +/*defines for the PhysDiskMap field */ +#define MPI2_RAIDVOL0_PHYSDISK_PRIMARY (0x01) +#define MPI2_RAIDVOL0_PHYSDISK_SECONDARY (0x02) + +typedef struct _MPI2_RAIDVOL0_SETTINGS { + U16 Settings; /*0x00 */ + U8 HotSparePool; /*0x01 */ + U8 Reserved; /*0x02 */ +} MPI2_RAIDVOL0_SETTINGS, *PTR_MPI2_RAIDVOL0_SETTINGS, + Mpi2RaidVol0Settings_t, + *pMpi2RaidVol0Settings_t; + +/*RAID Volume Page 0 HotSparePool defines, also used in RAID Physical Disk */ +#define MPI2_RAID_HOT_SPARE_POOL_0 (0x01) +#define MPI2_RAID_HOT_SPARE_POOL_1 (0x02) +#define MPI2_RAID_HOT_SPARE_POOL_2 (0x04) +#define MPI2_RAID_HOT_SPARE_POOL_3 (0x08) +#define MPI2_RAID_HOT_SPARE_POOL_4 (0x10) +#define MPI2_RAID_HOT_SPARE_POOL_5 (0x20) +#define MPI2_RAID_HOT_SPARE_POOL_6 (0x40) +#define MPI2_RAID_HOT_SPARE_POOL_7 (0x80) + +/*RAID Volume Page 0 VolumeSettings defines */ +#define MPI2_RAIDVOL0_SETTING_USE_PRODUCT_ID_SUFFIX (0x0008) +#define MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE (0x0004) + +#define MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING (0x0003) +#define MPI2_RAIDVOL0_SETTING_UNCHANGED (0x0000) +#define MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING (0x0001) +#define MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING (0x0002) + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhysDisks at runtime. + */ +#ifndef MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX +#define MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_RAID_VOL_0 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U16 DevHandle; /*0x04 */ + U8 VolumeState; /*0x06 */ + U8 VolumeType; /*0x07 */ + U32 VolumeStatusFlags; /*0x08 */ + MPI2_RAIDVOL0_SETTINGS VolumeSettings; /*0x0C */ + U64 MaxLBA; /*0x10 */ + U32 StripeSize; /*0x18 */ + U16 BlockSize; /*0x1C */ + U16 Reserved1; /*0x1E */ + U8 SupportedPhysDisks;/*0x20 */ + U8 ResyncRate; /*0x21 */ + U16 DataScrubDuration; /*0x22 */ + U8 NumPhysDisks; /*0x24 */ + U8 Reserved2; /*0x25 */ + U8 Reserved3; /*0x26 */ + U8 InactiveStatus; /*0x27 */ + MPI2_RAIDVOL0_PHYS_DISK + PhysDisk[MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX]; /*0x28 */ +} MPI2_CONFIG_PAGE_RAID_VOL_0, + *PTR_MPI2_CONFIG_PAGE_RAID_VOL_0, + Mpi2RaidVolPage0_t, *pMpi2RaidVolPage0_t; + +#define MPI2_RAIDVOLPAGE0_PAGEVERSION (0x0A) + +/*values for RAID VolumeState */ +#define MPI2_RAID_VOL_STATE_MISSING (0x00) +#define MPI2_RAID_VOL_STATE_FAILED (0x01) +#define MPI2_RAID_VOL_STATE_INITIALIZING (0x02) +#define MPI2_RAID_VOL_STATE_ONLINE (0x03) +#define MPI2_RAID_VOL_STATE_DEGRADED (0x04) +#define MPI2_RAID_VOL_STATE_OPTIMAL (0x05) + +/*values for RAID VolumeType */ +#define MPI2_RAID_VOL_TYPE_RAID0 (0x00) +#define MPI2_RAID_VOL_TYPE_RAID1E (0x01) +#define MPI2_RAID_VOL_TYPE_RAID1 (0x02) +#define MPI2_RAID_VOL_TYPE_RAID10 (0x05) +#define MPI2_RAID_VOL_TYPE_UNKNOWN (0xFF) + +/*values for RAID Volume Page 0 VolumeStatusFlags field */ +#define MPI2_RAIDVOL0_STATUS_FLAG_PENDING_RESYNC (0x02000000) +#define MPI2_RAIDVOL0_STATUS_FLAG_BACKG_INIT_PENDING (0x01000000) +#define MPI2_RAIDVOL0_STATUS_FLAG_MDC_PENDING (0x00800000) +#define MPI2_RAIDVOL0_STATUS_FLAG_USER_CONSIST_PENDING (0x00400000) +#define MPI2_RAIDVOL0_STATUS_FLAG_MAKE_DATA_CONSISTENT (0x00200000) +#define MPI2_RAIDVOL0_STATUS_FLAG_DATA_SCRUB (0x00100000) +#define MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK (0x00080000) +#define MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION (0x00040000) +#define MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT (0x00020000) +#define MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS (0x00010000) +#define MPI2_RAIDVOL0_STATUS_FLAG_VOL_NOT_CONSISTENT (0x00000080) +#define MPI2_RAIDVOL0_STATUS_FLAG_OCE_ALLOWED (0x00000040) +#define MPI2_RAIDVOL0_STATUS_FLAG_BGI_COMPLETE (0x00000020) +#define MPI2_RAIDVOL0_STATUS_FLAG_1E_OFFSET_MIRROR (0x00000000) +#define MPI2_RAIDVOL0_STATUS_FLAG_1E_ADJACENT_MIRROR (0x00000010) +#define MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL (0x00000008) +#define MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE (0x00000004) +#define MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED (0x00000002) +#define MPI2_RAIDVOL0_STATUS_FLAG_ENABLED (0x00000001) + +/*values for RAID Volume Page 0 SupportedPhysDisks field */ +#define MPI2_RAIDVOL0_SUPPORT_SOLID_STATE_DISKS (0x08) +#define MPI2_RAIDVOL0_SUPPORT_HARD_DISKS (0x04) +#define MPI2_RAIDVOL0_SUPPORT_SAS_PROTOCOL (0x02) +#define MPI2_RAIDVOL0_SUPPORT_SATA_PROTOCOL (0x01) + +/*values for RAID Volume Page 0 InactiveStatus field */ +#define MPI2_RAIDVOLPAGE0_UNKNOWN_INACTIVE (0x00) +#define MPI2_RAIDVOLPAGE0_STALE_METADATA_INACTIVE (0x01) +#define MPI2_RAIDVOLPAGE0_FOREIGN_VOLUME_INACTIVE (0x02) +#define MPI2_RAIDVOLPAGE0_INSUFFICIENT_RESOURCE_INACTIVE (0x03) +#define MPI2_RAIDVOLPAGE0_CLONE_VOLUME_INACTIVE (0x04) +#define MPI2_RAIDVOLPAGE0_INSUFFICIENT_METADATA_INACTIVE (0x05) +#define MPI2_RAIDVOLPAGE0_PREVIOUSLY_DELETED (0x06) + + +/*RAID Volume Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_RAID_VOL_1 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U16 DevHandle; /*0x04 */ + U16 Reserved0; /*0x06 */ + U8 GUID[24]; /*0x08 */ + U8 Name[16]; /*0x20 */ + U64 WWID; /*0x30 */ + U32 Reserved1; /*0x38 */ + U32 Reserved2; /*0x3C */ +} MPI2_CONFIG_PAGE_RAID_VOL_1, + *PTR_MPI2_CONFIG_PAGE_RAID_VOL_1, + Mpi2RaidVolPage1_t, *pMpi2RaidVolPage1_t; + +#define MPI2_RAIDVOLPAGE1_PAGEVERSION (0x03) + + +/**************************************************************************** +* RAID Physical Disk Config Pages +****************************************************************************/ + +/*RAID Physical Disk Page 0 */ + +typedef struct _MPI2_RAIDPHYSDISK0_SETTINGS { + U16 Reserved1; /*0x00 */ + U8 HotSparePool; /*0x02 */ + U8 Reserved2; /*0x03 */ +} MPI2_RAIDPHYSDISK0_SETTINGS, + *PTR_MPI2_RAIDPHYSDISK0_SETTINGS, + Mpi2RaidPhysDisk0Settings_t, + *pMpi2RaidPhysDisk0Settings_t; + +/*use MPI2_RAID_HOT_SPARE_POOL_ defines for the HotSparePool field */ + +typedef struct _MPI2_RAIDPHYSDISK0_INQUIRY_DATA { + U8 VendorID[8]; /*0x00 */ + U8 ProductID[16]; /*0x08 */ + U8 ProductRevLevel[4]; /*0x18 */ + U8 SerialNum[32]; /*0x1C */ +} MPI2_RAIDPHYSDISK0_INQUIRY_DATA, + *PTR_MPI2_RAIDPHYSDISK0_INQUIRY_DATA, + Mpi2RaidPhysDisk0InquiryData_t, + *pMpi2RaidPhysDisk0InquiryData_t; + +typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_0 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U16 DevHandle; /*0x04 */ + U8 Reserved1; /*0x06 */ + U8 PhysDiskNum; /*0x07 */ + MPI2_RAIDPHYSDISK0_SETTINGS PhysDiskSettings; /*0x08 */ + U32 Reserved2; /*0x0C */ + MPI2_RAIDPHYSDISK0_INQUIRY_DATA InquiryData; /*0x10 */ + U32 Reserved3; /*0x4C */ + U8 PhysDiskState; /*0x50 */ + U8 OfflineReason; /*0x51 */ + U8 IncompatibleReason; /*0x52 */ + U8 PhysDiskAttributes; /*0x53 */ + U32 PhysDiskStatusFlags;/*0x54 */ + U64 DeviceMaxLBA; /*0x58 */ + U64 HostMaxLBA; /*0x60 */ + U64 CoercedMaxLBA; /*0x68 */ + U16 BlockSize; /*0x70 */ + U16 Reserved5; /*0x72 */ + U32 Reserved6; /*0x74 */ +} MPI2_CONFIG_PAGE_RD_PDISK_0, + *PTR_MPI2_CONFIG_PAGE_RD_PDISK_0, + Mpi2RaidPhysDiskPage0_t, + *pMpi2RaidPhysDiskPage0_t; + +#define MPI2_RAIDPHYSDISKPAGE0_PAGEVERSION (0x05) + +/*PhysDiskState defines */ +#define MPI2_RAID_PD_STATE_NOT_CONFIGURED (0x00) +#define MPI2_RAID_PD_STATE_NOT_COMPATIBLE (0x01) +#define MPI2_RAID_PD_STATE_OFFLINE (0x02) +#define MPI2_RAID_PD_STATE_ONLINE (0x03) +#define MPI2_RAID_PD_STATE_HOT_SPARE (0x04) +#define MPI2_RAID_PD_STATE_DEGRADED (0x05) +#define MPI2_RAID_PD_STATE_REBUILDING (0x06) +#define MPI2_RAID_PD_STATE_OPTIMAL (0x07) + +/*OfflineReason defines */ +#define MPI2_PHYSDISK0_ONLINE (0x00) +#define MPI2_PHYSDISK0_OFFLINE_MISSING (0x01) +#define MPI2_PHYSDISK0_OFFLINE_FAILED (0x03) +#define MPI2_PHYSDISK0_OFFLINE_INITIALIZING (0x04) +#define MPI2_PHYSDISK0_OFFLINE_REQUESTED (0x05) +#define MPI2_PHYSDISK0_OFFLINE_FAILED_REQUESTED (0x06) +#define MPI2_PHYSDISK0_OFFLINE_OTHER (0xFF) + +/*IncompatibleReason defines */ +#define MPI2_PHYSDISK0_COMPATIBLE (0x00) +#define MPI2_PHYSDISK0_INCOMPATIBLE_PROTOCOL (0x01) +#define MPI2_PHYSDISK0_INCOMPATIBLE_BLOCKSIZE (0x02) +#define MPI2_PHYSDISK0_INCOMPATIBLE_MAX_LBA (0x03) +#define MPI2_PHYSDISK0_INCOMPATIBLE_SATA_EXTENDED_CMD (0x04) +#define MPI2_PHYSDISK0_INCOMPATIBLE_REMOVEABLE_MEDIA (0x05) +#define MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE (0x06) +#define MPI2_PHYSDISK0_INCOMPATIBLE_UNKNOWN (0xFF) + +/*PhysDiskAttributes defines */ +#define MPI2_PHYSDISK0_ATTRIB_MEDIA_MASK (0x0C) +#define MPI2_PHYSDISK0_ATTRIB_SOLID_STATE_DRIVE (0x08) +#define MPI2_PHYSDISK0_ATTRIB_HARD_DISK_DRIVE (0x04) + +#define MPI2_PHYSDISK0_ATTRIB_PROTOCOL_MASK (0x03) +#define MPI2_PHYSDISK0_ATTRIB_SAS_PROTOCOL (0x02) +#define MPI2_PHYSDISK0_ATTRIB_SATA_PROTOCOL (0x01) + +/*PhysDiskStatusFlags defines */ +#define MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED (0x00000040) +#define MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET (0x00000020) +#define MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED (0x00000010) +#define MPI2_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS (0x00000000) +#define MPI2_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS (0x00000008) +#define MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME (0x00000004) +#define MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED (0x00000002) +#define MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC (0x00000001) + + +/*RAID Physical Disk Page 1 */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhysDiskPaths at runtime. + */ +#ifndef MPI2_RAID_PHYS_DISK1_PATH_MAX +#define MPI2_RAID_PHYS_DISK1_PATH_MAX (1) +#endif + +typedef struct _MPI2_RAIDPHYSDISK1_PATH { + U16 DevHandle; /*0x00 */ + U16 Reserved1; /*0x02 */ + U64 WWID; /*0x04 */ + U64 OwnerWWID; /*0x0C */ + U8 OwnerIdentifier; /*0x14 */ + U8 Reserved2; /*0x15 */ + U16 Flags; /*0x16 */ +} MPI2_RAIDPHYSDISK1_PATH, *PTR_MPI2_RAIDPHYSDISK1_PATH, + Mpi2RaidPhysDisk1Path_t, + *pMpi2RaidPhysDisk1Path_t; + +/*RAID Physical Disk Page 1 Physical Disk Path Flags field defines */ +#define MPI2_RAID_PHYSDISK1_FLAG_PRIMARY (0x0004) +#define MPI2_RAID_PHYSDISK1_FLAG_BROKEN (0x0002) +#define MPI2_RAID_PHYSDISK1_FLAG_INVALID (0x0001) + +typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U8 NumPhysDiskPaths; /*0x04 */ + U8 PhysDiskNum; /*0x05 */ + U16 Reserved1; /*0x06 */ + U32 Reserved2; /*0x08 */ + MPI2_RAIDPHYSDISK1_PATH + PhysicalDiskPath[MPI2_RAID_PHYS_DISK1_PATH_MAX];/*0x0C */ +} MPI2_CONFIG_PAGE_RD_PDISK_1, + *PTR_MPI2_CONFIG_PAGE_RD_PDISK_1, + Mpi2RaidPhysDiskPage1_t, + *pMpi2RaidPhysDiskPage1_t; + +#define MPI2_RAIDPHYSDISKPAGE1_PAGEVERSION (0x02) + + +/**************************************************************************** +* values for fields used by several types of SAS Config Pages +****************************************************************************/ + +/*values for NegotiatedLinkRates fields */ +#define MPI2_SAS_NEG_LINK_RATE_MASK_LOGICAL (0xF0) +#define MPI2_SAS_NEG_LINK_RATE_SHIFT_LOGICAL (4) +#define MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL (0x0F) +/*link rates used for Negotiated Physical and Logical Link Rate */ +#define MPI2_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE (0x00) +#define MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED (0x01) +#define MPI2_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED (0x02) +#define MPI2_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE (0x03) +#define MPI2_SAS_NEG_LINK_RATE_PORT_SELECTOR (0x04) +#define MPI2_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS (0x05) +#define MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY (0x06) +#define MPI2_SAS_NEG_LINK_RATE_1_5 (0x08) +#define MPI2_SAS_NEG_LINK_RATE_3_0 (0x09) +#define MPI2_SAS_NEG_LINK_RATE_6_0 (0x0A) +#define MPI25_SAS_NEG_LINK_RATE_12_0 (0x0B) +#define MPI26_SAS_NEG_LINK_RATE_22_5 (0x0C) + + +/*values for AttachedPhyInfo fields */ +#define MPI2_SAS_APHYINFO_INSIDE_ZPSDS_PERSISTENT (0x00000040) +#define MPI2_SAS_APHYINFO_REQUESTED_INSIDE_ZPSDS (0x00000020) +#define MPI2_SAS_APHYINFO_BREAK_REPLY_CAPABLE (0x00000010) + +#define MPI2_SAS_APHYINFO_REASON_MASK (0x0000000F) +#define MPI2_SAS_APHYINFO_REASON_UNKNOWN (0x00000000) +#define MPI2_SAS_APHYINFO_REASON_POWER_ON (0x00000001) +#define MPI2_SAS_APHYINFO_REASON_HARD_RESET (0x00000002) +#define MPI2_SAS_APHYINFO_REASON_SMP_PHY_CONTROL (0x00000003) +#define MPI2_SAS_APHYINFO_REASON_LOSS_OF_SYNC (0x00000004) +#define MPI2_SAS_APHYINFO_REASON_MULTIPLEXING_SEQ (0x00000005) +#define MPI2_SAS_APHYINFO_REASON_IT_NEXUS_LOSS_TIMER (0x00000006) +#define MPI2_SAS_APHYINFO_REASON_BREAK_TIMEOUT (0x00000007) +#define MPI2_SAS_APHYINFO_REASON_PHY_TEST_STOPPED (0x00000008) + + +/*values for PhyInfo fields */ +#define MPI2_SAS_PHYINFO_PHY_VACANT (0x80000000) + +#define MPI2_SAS_PHYINFO_PHY_POWER_CONDITION_MASK (0x18000000) +#define MPI2_SAS_PHYINFO_SHIFT_PHY_POWER_CONDITION (27) +#define MPI2_SAS_PHYINFO_PHY_POWER_ACTIVE (0x00000000) +#define MPI2_SAS_PHYINFO_PHY_POWER_PARTIAL (0x08000000) +#define MPI2_SAS_PHYINFO_PHY_POWER_SLUMBER (0x10000000) + +#define MPI2_SAS_PHYINFO_CHANGED_REQ_INSIDE_ZPSDS (0x04000000) +#define MPI2_SAS_PHYINFO_INSIDE_ZPSDS_PERSISTENT (0x02000000) +#define MPI2_SAS_PHYINFO_REQ_INSIDE_ZPSDS (0x01000000) +#define MPI2_SAS_PHYINFO_ZONE_GROUP_PERSISTENT (0x00400000) +#define MPI2_SAS_PHYINFO_INSIDE_ZPSDS (0x00200000) +#define MPI2_SAS_PHYINFO_ZONING_ENABLED (0x00100000) + +#define MPI2_SAS_PHYINFO_REASON_MASK (0x000F0000) +#define MPI2_SAS_PHYINFO_REASON_UNKNOWN (0x00000000) +#define MPI2_SAS_PHYINFO_REASON_POWER_ON (0x00010000) +#define MPI2_SAS_PHYINFO_REASON_HARD_RESET (0x00020000) +#define MPI2_SAS_PHYINFO_REASON_SMP_PHY_CONTROL (0x00030000) +#define MPI2_SAS_PHYINFO_REASON_LOSS_OF_SYNC (0x00040000) +#define MPI2_SAS_PHYINFO_REASON_MULTIPLEXING_SEQ (0x00050000) +#define MPI2_SAS_PHYINFO_REASON_IT_NEXUS_LOSS_TIMER (0x00060000) +#define MPI2_SAS_PHYINFO_REASON_BREAK_TIMEOUT (0x00070000) +#define MPI2_SAS_PHYINFO_REASON_PHY_TEST_STOPPED (0x00080000) + +#define MPI2_SAS_PHYINFO_MULTIPLEXING_SUPPORTED (0x00008000) +#define MPI2_SAS_PHYINFO_SATA_PORT_ACTIVE (0x00004000) +#define MPI2_SAS_PHYINFO_SATA_PORT_SELECTOR_PRESENT (0x00002000) +#define MPI2_SAS_PHYINFO_VIRTUAL_PHY (0x00001000) + +#define MPI2_SAS_PHYINFO_MASK_PARTIAL_PATHWAY_TIME (0x00000F00) +#define MPI2_SAS_PHYINFO_SHIFT_PARTIAL_PATHWAY_TIME (8) + +#define MPI2_SAS_PHYINFO_MASK_ROUTING_ATTRIBUTE (0x000000F0) +#define MPI2_SAS_PHYINFO_DIRECT_ROUTING (0x00000000) +#define MPI2_SAS_PHYINFO_SUBTRACTIVE_ROUTING (0x00000010) +#define MPI2_SAS_PHYINFO_TABLE_ROUTING (0x00000020) + + +/*values for SAS ProgrammedLinkRate fields */ +#define MPI2_SAS_PRATE_MAX_RATE_MASK (0xF0) +#define MPI2_SAS_PRATE_MAX_RATE_NOT_PROGRAMMABLE (0x00) +#define MPI2_SAS_PRATE_MAX_RATE_1_5 (0x80) +#define MPI2_SAS_PRATE_MAX_RATE_3_0 (0x90) +#define MPI2_SAS_PRATE_MAX_RATE_6_0 (0xA0) +#define MPI25_SAS_PRATE_MAX_RATE_12_0 (0xB0) +#define MPI26_SAS_PRATE_MAX_RATE_22_5 (0xC0) +#define MPI2_SAS_PRATE_MIN_RATE_MASK (0x0F) +#define MPI2_SAS_PRATE_MIN_RATE_NOT_PROGRAMMABLE (0x00) +#define MPI2_SAS_PRATE_MIN_RATE_1_5 (0x08) +#define MPI2_SAS_PRATE_MIN_RATE_3_0 (0x09) +#define MPI2_SAS_PRATE_MIN_RATE_6_0 (0x0A) +#define MPI25_SAS_PRATE_MIN_RATE_12_0 (0x0B) +#define MPI26_SAS_PRATE_MIN_RATE_22_5 (0x0C) + + +/*values for SAS HwLinkRate fields */ +#define MPI2_SAS_HWRATE_MAX_RATE_MASK (0xF0) +#define MPI2_SAS_HWRATE_MAX_RATE_1_5 (0x80) +#define MPI2_SAS_HWRATE_MAX_RATE_3_0 (0x90) +#define MPI2_SAS_HWRATE_MAX_RATE_6_0 (0xA0) +#define MPI25_SAS_HWRATE_MAX_RATE_12_0 (0xB0) +#define MPI26_SAS_HWRATE_MAX_RATE_22_5 (0xC0) +#define MPI2_SAS_HWRATE_MIN_RATE_MASK (0x0F) +#define MPI2_SAS_HWRATE_MIN_RATE_1_5 (0x08) +#define MPI2_SAS_HWRATE_MIN_RATE_3_0 (0x09) +#define MPI2_SAS_HWRATE_MIN_RATE_6_0 (0x0A) +#define MPI25_SAS_HWRATE_MIN_RATE_12_0 (0x0B) +#define MPI26_SAS_HWRATE_MIN_RATE_22_5 (0x0C) + + + +/**************************************************************************** +* SAS IO Unit Config Pages +****************************************************************************/ + +/*SAS IO Unit Page 0 */ + +typedef struct _MPI2_SAS_IO_UNIT0_PHY_DATA { + U8 Port; /*0x00 */ + U8 PortFlags; /*0x01 */ + U8 PhyFlags; /*0x02 */ + U8 NegotiatedLinkRate; /*0x03 */ + U32 ControllerPhyDeviceInfo;/*0x04 */ + U16 AttachedDevHandle; /*0x08 */ + U16 ControllerDevHandle; /*0x0A */ + U32 DiscoveryStatus; /*0x0C */ + U32 Reserved; /*0x10 */ +} MPI2_SAS_IO_UNIT0_PHY_DATA, + *PTR_MPI2_SAS_IO_UNIT0_PHY_DATA, + Mpi2SasIOUnit0PhyData_t, + *pMpi2SasIOUnit0PhyData_t; + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI2_SAS_IOUNIT0_PHY_MAX +#define MPI2_SAS_IOUNIT0_PHY_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1;/*0x08 */ + U8 NumPhys; /*0x0C */ + U8 Reserved2;/*0x0D */ + U16 Reserved3;/*0x0E */ + MPI2_SAS_IO_UNIT0_PHY_DATA + PhyData[MPI2_SAS_IOUNIT0_PHY_MAX]; /*0x10 */ +} MPI2_CONFIG_PAGE_SASIOUNIT_0, + *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_0, + Mpi2SasIOUnitPage0_t, *pMpi2SasIOUnitPage0_t; + +#define MPI2_SASIOUNITPAGE0_PAGEVERSION (0x05) + +/*values for SAS IO Unit Page 0 PortFlags */ +#define MPI2_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS (0x08) +#define MPI2_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG (0x01) + +/*values for SAS IO Unit Page 0 PhyFlags */ +#define MPI2_SASIOUNIT0_PHYFLAGS_INIT_PERSIST_CONNECT (0x40) +#define MPI2_SASIOUNIT0_PHYFLAGS_TARG_PERSIST_CONNECT (0x20) +#define MPI2_SASIOUNIT0_PHYFLAGS_ZONING_ENABLED (0x10) +#define MPI2_SASIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08) + +/*use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ + +/*see mpi2_sas.h for values for + *SAS IO Unit Page 0 ControllerPhyDeviceInfo values */ + +/*values for SAS IO Unit Page 0 DiscoveryStatus */ +#define MPI2_SASIOUNIT0_DS_MAX_ENCLOSURES_EXCEED (0x80000000) +#define MPI2_SASIOUNIT0_DS_MAX_EXPANDERS_EXCEED (0x40000000) +#define MPI2_SASIOUNIT0_DS_MAX_DEVICES_EXCEED (0x20000000) +#define MPI2_SASIOUNIT0_DS_MAX_TOPO_PHYS_EXCEED (0x10000000) +#define MPI2_SASIOUNIT0_DS_DOWNSTREAM_INITIATOR (0x08000000) +#define MPI2_SASIOUNIT0_DS_MULTI_SUBTRACTIVE_SUBTRACTIVE (0x00008000) +#define MPI2_SASIOUNIT0_DS_EXP_MULTI_SUBTRACTIVE (0x00004000) +#define MPI2_SASIOUNIT0_DS_MULTI_PORT_DOMAIN (0x00002000) +#define MPI2_SASIOUNIT0_DS_TABLE_TO_SUBTRACTIVE_LINK (0x00001000) +#define MPI2_SASIOUNIT0_DS_UNSUPPORTED_DEVICE (0x00000800) +#define MPI2_SASIOUNIT0_DS_TABLE_LINK (0x00000400) +#define MPI2_SASIOUNIT0_DS_SUBTRACTIVE_LINK (0x00000200) +#define MPI2_SASIOUNIT0_DS_SMP_CRC_ERROR (0x00000100) +#define MPI2_SASIOUNIT0_DS_SMP_FUNCTION_FAILED (0x00000080) +#define MPI2_SASIOUNIT0_DS_INDEX_NOT_EXIST (0x00000040) +#define MPI2_SASIOUNIT0_DS_OUT_ROUTE_ENTRIES (0x00000020) +#define MPI2_SASIOUNIT0_DS_SMP_TIMEOUT (0x00000010) +#define MPI2_SASIOUNIT0_DS_MULTIPLE_PORTS (0x00000004) +#define MPI2_SASIOUNIT0_DS_UNADDRESSABLE_DEVICE (0x00000002) +#define MPI2_SASIOUNIT0_DS_LOOP_DETECTED (0x00000001) + + +/*SAS IO Unit Page 1 */ + +typedef struct _MPI2_SAS_IO_UNIT1_PHY_DATA { + U8 Port; /*0x00 */ + U8 PortFlags; /*0x01 */ + U8 PhyFlags; /*0x02 */ + U8 MaxMinLinkRate; /*0x03 */ + U32 ControllerPhyDeviceInfo; /*0x04 */ + U16 MaxTargetPortConnectTime; /*0x08 */ + U16 Reserved1; /*0x0A */ +} MPI2_SAS_IO_UNIT1_PHY_DATA, + *PTR_MPI2_SAS_IO_UNIT1_PHY_DATA, + Mpi2SasIOUnit1PhyData_t, + *pMpi2SasIOUnit1PhyData_t; + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI2_SAS_IOUNIT1_PHY_MAX +#define MPI2_SAS_IOUNIT1_PHY_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U16 + ControlFlags; /*0x08 */ + U16 + SASNarrowMaxQueueDepth; /*0x0A */ + U16 + AdditionalControlFlags; /*0x0C */ + U16 + SASWideMaxQueueDepth; /*0x0E */ + U8 + NumPhys; /*0x10 */ + U8 + SATAMaxQDepth; /*0x11 */ + U8 + ReportDeviceMissingDelay; /*0x12 */ + U8 + IODeviceMissingDelay; /*0x13 */ + MPI2_SAS_IO_UNIT1_PHY_DATA + PhyData[MPI2_SAS_IOUNIT1_PHY_MAX]; /*0x14 */ +} MPI2_CONFIG_PAGE_SASIOUNIT_1, + *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_1, + Mpi2SasIOUnitPage1_t, *pMpi2SasIOUnitPage1_t; + +#define MPI2_SASIOUNITPAGE1_PAGEVERSION (0x09) + +/*values for SAS IO Unit Page 1 ControlFlags */ +#define MPI2_SASIOUNIT1_CONTROL_DEVICE_SELF_TEST (0x8000) +#define MPI2_SASIOUNIT1_CONTROL_SATA_3_0_MAX (0x4000) +#define MPI2_SASIOUNIT1_CONTROL_SATA_1_5_MAX (0x2000) +#define MPI2_SASIOUNIT1_CONTROL_SATA_SW_PRESERVE (0x1000) + +#define MPI2_SASIOUNIT1_CONTROL_MASK_DEV_SUPPORT (0x0600) +#define MPI2_SASIOUNIT1_CONTROL_SHIFT_DEV_SUPPORT (9) +#define MPI2_SASIOUNIT1_CONTROL_DEV_SUPPORT_BOTH (0x0) +#define MPI2_SASIOUNIT1_CONTROL_DEV_SAS_SUPPORT (0x1) +#define MPI2_SASIOUNIT1_CONTROL_DEV_SATA_SUPPORT (0x2) + +#define MPI2_SASIOUNIT1_CONTROL_SATA_48BIT_LBA_REQUIRED (0x0080) +#define MPI2_SASIOUNIT1_CONTROL_SATA_SMART_REQUIRED (0x0040) +#define MPI2_SASIOUNIT1_CONTROL_SATA_NCQ_REQUIRED (0x0020) +#define MPI2_SASIOUNIT1_CONTROL_SATA_FUA_REQUIRED (0x0010) +#define MPI2_SASIOUNIT1_CONTROL_TABLE_SUBTRACTIVE_ILLEGAL (0x0008) +#define MPI2_SASIOUNIT1_CONTROL_SUBTRACTIVE_ILLEGAL (0x0004) +#define MPI2_SASIOUNIT1_CONTROL_FIRST_LVL_DISC_ONLY (0x0002) +#define MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION (0x0001) + +/*values for SAS IO Unit Page 1 AdditionalControlFlags */ +#define MPI2_SASIOUNIT1_ACONTROL_DA_PERSIST_CONNECT (0x0100) +#define MPI2_SASIOUNIT1_ACONTROL_MULTI_PORT_DOMAIN_ILLEGAL (0x0080) +#define MPI2_SASIOUNIT1_ACONTROL_SATA_ASYNCHROUNOUS_NOTIFICATION (0x0040) +#define MPI2_SASIOUNIT1_ACONTROL_INVALID_TOPOLOGY_CORRECTION (0x0020) +#define MPI2_SASIOUNIT1_ACONTROL_PORT_ENABLE_ONLY_SATA_LINK_RESET (0x0010) +#define MPI2_SASIOUNIT1_ACONTROL_OTHER_AFFILIATION_SATA_LINK_RESET (0x0008) +#define MPI2_SASIOUNIT1_ACONTROL_SELF_AFFILIATION_SATA_LINK_RESET (0x0004) +#define MPI2_SASIOUNIT1_ACONTROL_NO_AFFILIATION_SATA_LINK_RESET (0x0002) +#define MPI2_SASIOUNIT1_ACONTROL_ALLOW_TABLE_TO_TABLE (0x0001) + +/*defines for SAS IO Unit Page 1 ReportDeviceMissingDelay */ +#define MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK (0x7F) +#define MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16 (0x80) + +/*values for SAS IO Unit Page 1 PortFlags */ +#define MPI2_SASIOUNIT1_PORT_FLAGS_AUTO_PORT_CONFIG (0x01) + +/*values for SAS IO Unit Page 1 PhyFlags */ +#define MPI2_SASIOUNIT1_PHYFLAGS_INIT_PERSIST_CONNECT (0x40) +#define MPI2_SASIOUNIT1_PHYFLAGS_TARG_PERSIST_CONNECT (0x20) +#define MPI2_SASIOUNIT1_PHYFLAGS_ZONING_ENABLE (0x10) +#define MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08) + +/*values for SAS IO Unit Page 1 MaxMinLinkRate */ +#define MPI2_SASIOUNIT1_MAX_RATE_MASK (0xF0) +#define MPI2_SASIOUNIT1_MAX_RATE_1_5 (0x80) +#define MPI2_SASIOUNIT1_MAX_RATE_3_0 (0x90) +#define MPI2_SASIOUNIT1_MAX_RATE_6_0 (0xA0) +#define MPI25_SASIOUNIT1_MAX_RATE_12_0 (0xB0) +#define MPI26_SASIOUNIT1_MAX_RATE_22_5 (0xC0) +#define MPI2_SASIOUNIT1_MIN_RATE_MASK (0x0F) +#define MPI2_SASIOUNIT1_MIN_RATE_1_5 (0x08) +#define MPI2_SASIOUNIT1_MIN_RATE_3_0 (0x09) +#define MPI2_SASIOUNIT1_MIN_RATE_6_0 (0x0A) +#define MPI25_SASIOUNIT1_MIN_RATE_12_0 (0x0B) +#define MPI26_SASIOUNIT1_MIN_RATE_22_5 (0x0C) + +/*see mpi2_sas.h for values for + *SAS IO Unit Page 1 ControllerPhyDeviceInfo values */ + + +/*SAS IO Unit Page 4 (for MPI v2.5 and earlier) */ + +typedef struct _MPI2_SAS_IOUNIT4_SPINUP_GROUP { + U8 MaxTargetSpinup; /*0x00 */ + U8 SpinupDelay; /*0x01 */ + U8 SpinupFlags; /*0x02 */ + U8 Reserved1; /*0x03 */ +} MPI2_SAS_IOUNIT4_SPINUP_GROUP, + *PTR_MPI2_SAS_IOUNIT4_SPINUP_GROUP, + Mpi2SasIOUnit4SpinupGroup_t, + *pMpi2SasIOUnit4SpinupGroup_t; +/*defines for SAS IO Unit Page 4 SpinupFlags */ +#define MPI2_SASIOUNIT4_SPINUP_DISABLE_FLAG (0x01) + + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI2_SAS_IOUNIT4_PHY_MAX +#define MPI2_SAS_IOUNIT4_PHY_MAX (4) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_4 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header;/*0x00 */ + MPI2_SAS_IOUNIT4_SPINUP_GROUP + SpinupGroupParameters[4]; /*0x08 */ + U32 + Reserved1; /*0x18 */ + U32 + Reserved2; /*0x1C */ + U32 + Reserved3; /*0x20 */ + U8 + BootDeviceWaitTime; /*0x24 */ + U8 + SATADeviceWaitTime; /*0x25 */ + U16 + Reserved5; /*0x26 */ + U8 + NumPhys; /*0x28 */ + U8 + PEInitialSpinupDelay; /*0x29 */ + U8 + PEReplyDelay; /*0x2A */ + U8 + Flags; /*0x2B */ + U8 + PHY[MPI2_SAS_IOUNIT4_PHY_MAX]; /*0x2C */ +} MPI2_CONFIG_PAGE_SASIOUNIT_4, + *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_4, + Mpi2SasIOUnitPage4_t, *pMpi2SasIOUnitPage4_t; + +#define MPI2_SASIOUNITPAGE4_PAGEVERSION (0x02) + +/*defines for Flags field */ +#define MPI2_SASIOUNIT4_FLAGS_AUTO_PORTENABLE (0x01) + +/*defines for PHY field */ +#define MPI2_SASIOUNIT4_PHY_SPINUP_GROUP_MASK (0x03) + + +/*SAS IO Unit Page 5 */ + +typedef struct _MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS { + U8 ControlFlags; /*0x00 */ + U8 PortWidthModGroup; /*0x01 */ + U16 InactivityTimerExponent; /*0x02 */ + U8 SATAPartialTimeout; /*0x04 */ + U8 Reserved2; /*0x05 */ + U8 SATASlumberTimeout; /*0x06 */ + U8 Reserved3; /*0x07 */ + U8 SASPartialTimeout; /*0x08 */ + U8 Reserved4; /*0x09 */ + U8 SASSlumberTimeout; /*0x0A */ + U8 Reserved5; /*0x0B */ +} MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS, + *PTR_MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS, + Mpi2SasIOUnit5PhyPmSettings_t, + *pMpi2SasIOUnit5PhyPmSettings_t; + +/*defines for ControlFlags field */ +#define MPI2_SASIOUNIT5_CONTROL_SAS_SLUMBER_ENABLE (0x08) +#define MPI2_SASIOUNIT5_CONTROL_SAS_PARTIAL_ENABLE (0x04) +#define MPI2_SASIOUNIT5_CONTROL_SATA_SLUMBER_ENABLE (0x02) +#define MPI2_SASIOUNIT5_CONTROL_SATA_PARTIAL_ENABLE (0x01) + +/*defines for PortWidthModeGroup field */ +#define MPI2_SASIOUNIT5_PWMG_DISABLE (0xFF) + +/*defines for InactivityTimerExponent field */ +#define MPI2_SASIOUNIT5_ITE_MASK_SAS_SLUMBER (0x7000) +#define MPI2_SASIOUNIT5_ITE_SHIFT_SAS_SLUMBER (12) +#define MPI2_SASIOUNIT5_ITE_MASK_SAS_PARTIAL (0x0700) +#define MPI2_SASIOUNIT5_ITE_SHIFT_SAS_PARTIAL (8) +#define MPI2_SASIOUNIT5_ITE_MASK_SATA_SLUMBER (0x0070) +#define MPI2_SASIOUNIT5_ITE_SHIFT_SATA_SLUMBER (4) +#define MPI2_SASIOUNIT5_ITE_MASK_SATA_PARTIAL (0x0007) +#define MPI2_SASIOUNIT5_ITE_SHIFT_SATA_PARTIAL (0) + +#define MPI2_SASIOUNIT5_ITE_TEN_SECONDS (7) +#define MPI2_SASIOUNIT5_ITE_ONE_SECOND (6) +#define MPI2_SASIOUNIT5_ITE_HUNDRED_MILLISECONDS (5) +#define MPI2_SASIOUNIT5_ITE_TEN_MILLISECONDS (4) +#define MPI2_SASIOUNIT5_ITE_ONE_MILLISECOND (3) +#define MPI2_SASIOUNIT5_ITE_HUNDRED_MICROSECONDS (2) +#define MPI2_SASIOUNIT5_ITE_TEN_MICROSECONDS (1) +#define MPI2_SASIOUNIT5_ITE_ONE_MICROSECOND (0) + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI2_SAS_IOUNIT5_PHY_MAX +#define MPI2_SAS_IOUNIT5_PHY_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_5 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U8 NumPhys; /*0x08 */ + U8 Reserved1;/*0x09 */ + U16 Reserved2;/*0x0A */ + U32 Reserved3;/*0x0C */ + MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS + SASPhyPowerManagementSettings[MPI2_SAS_IOUNIT5_PHY_MAX];/*0x10 */ +} MPI2_CONFIG_PAGE_SASIOUNIT_5, + *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_5, + Mpi2SasIOUnitPage5_t, *pMpi2SasIOUnitPage5_t; + +#define MPI2_SASIOUNITPAGE5_PAGEVERSION (0x01) + + +/*SAS IO Unit Page 6 */ + +typedef struct _MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS { + U8 CurrentStatus; /*0x00 */ + U8 CurrentModulation; /*0x01 */ + U8 CurrentUtilization; /*0x02 */ + U8 Reserved1; /*0x03 */ + U32 Reserved2; /*0x04 */ +} MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS, + *PTR_MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS, + Mpi2SasIOUnit6PortWidthModGroupStatus_t, + *pMpi2SasIOUnit6PortWidthModGroupStatus_t; + +/*defines for CurrentStatus field */ +#define MPI2_SASIOUNIT6_STATUS_UNAVAILABLE (0x00) +#define MPI2_SASIOUNIT6_STATUS_UNCONFIGURED (0x01) +#define MPI2_SASIOUNIT6_STATUS_INVALID_CONFIG (0x02) +#define MPI2_SASIOUNIT6_STATUS_LINK_DOWN (0x03) +#define MPI2_SASIOUNIT6_STATUS_OBSERVATION_ONLY (0x04) +#define MPI2_SASIOUNIT6_STATUS_INACTIVE (0x05) +#define MPI2_SASIOUNIT6_STATUS_ACTIVE_IOUNIT (0x06) +#define MPI2_SASIOUNIT6_STATUS_ACTIVE_HOST (0x07) + +/*defines for CurrentModulation field */ +#define MPI2_SASIOUNIT6_MODULATION_25_PERCENT (0x00) +#define MPI2_SASIOUNIT6_MODULATION_50_PERCENT (0x01) +#define MPI2_SASIOUNIT6_MODULATION_75_PERCENT (0x02) +#define MPI2_SASIOUNIT6_MODULATION_100_PERCENT (0x03) + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumGroups at runtime. + */ +#ifndef MPI2_SAS_IOUNIT6_GROUP_MAX +#define MPI2_SAS_IOUNIT6_GROUP_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_6 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x08 */ + U32 Reserved2; /*0x0C */ + U8 NumGroups; /*0x10 */ + U8 Reserved3; /*0x11 */ + U16 Reserved4; /*0x12 */ + MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS + PortWidthModulationGroupStatus[MPI2_SAS_IOUNIT6_GROUP_MAX]; /*0x14 */ +} MPI2_CONFIG_PAGE_SASIOUNIT_6, + *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_6, + Mpi2SasIOUnitPage6_t, *pMpi2SasIOUnitPage6_t; + +#define MPI2_SASIOUNITPAGE6_PAGEVERSION (0x00) + + +/*SAS IO Unit Page 7 */ + +typedef struct _MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS { + U8 Flags; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U8 Threshold75Pct; /*0x04 */ + U8 Threshold50Pct; /*0x05 */ + U8 Threshold25Pct; /*0x06 */ + U8 Reserved3; /*0x07 */ +} MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS, + *PTR_MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS, + Mpi2SasIOUnit7PortWidthModGroupSettings_t, + *pMpi2SasIOUnit7PortWidthModGroupSettings_t; + +/*defines for Flags field */ +#define MPI2_SASIOUNIT7_FLAGS_ENABLE_PORT_WIDTH_MODULATION (0x01) + + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumGroups at runtime. + */ +#ifndef MPI2_SAS_IOUNIT7_GROUP_MAX +#define MPI2_SAS_IOUNIT7_GROUP_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_7 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U8 SamplingInterval; /*0x08 */ + U8 WindowLength; /*0x09 */ + U16 Reserved1; /*0x0A */ + U32 Reserved2; /*0x0C */ + U32 Reserved3; /*0x10 */ + U8 NumGroups; /*0x14 */ + U8 Reserved4; /*0x15 */ + U16 Reserved5; /*0x16 */ + MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS + PortWidthModulationGroupSettings[MPI2_SAS_IOUNIT7_GROUP_MAX];/*0x18 */ +} MPI2_CONFIG_PAGE_SASIOUNIT_7, + *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_7, + Mpi2SasIOUnitPage7_t, *pMpi2SasIOUnitPage7_t; + +#define MPI2_SASIOUNITPAGE7_PAGEVERSION (0x00) + + +/*SAS IO Unit Page 8 */ + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_8 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U32 + Reserved1; /*0x08 */ + U32 + PowerManagementCapabilities; /*0x0C */ + U8 + TxRxSleepStatus; /*0x10 */ + U8 + Reserved2; /*0x11 */ + U16 + Reserved3; /*0x12 */ +} MPI2_CONFIG_PAGE_SASIOUNIT_8, + *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_8, + Mpi2SasIOUnitPage8_t, *pMpi2SasIOUnitPage8_t; + +#define MPI2_SASIOUNITPAGE8_PAGEVERSION (0x00) + +/*defines for PowerManagementCapabilities field */ +#define MPI2_SASIOUNIT8_PM_HOST_PORT_WIDTH_MOD (0x00001000) +#define MPI2_SASIOUNIT8_PM_HOST_SAS_SLUMBER_MODE (0x00000800) +#define MPI2_SASIOUNIT8_PM_HOST_SAS_PARTIAL_MODE (0x00000400) +#define MPI2_SASIOUNIT8_PM_HOST_SATA_SLUMBER_MODE (0x00000200) +#define MPI2_SASIOUNIT8_PM_HOST_SATA_PARTIAL_MODE (0x00000100) +#define MPI2_SASIOUNIT8_PM_IOUNIT_PORT_WIDTH_MOD (0x00000010) +#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_SLUMBER_MODE (0x00000008) +#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_PARTIAL_MODE (0x00000004) +#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_SLUMBER_MODE (0x00000002) +#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_PARTIAL_MODE (0x00000001) + +/*defines for TxRxSleepStatus field */ +#define MPI25_SASIOUNIT8_TXRXSLEEP_UNSUPPORTED (0x00) +#define MPI25_SASIOUNIT8_TXRXSLEEP_DISENGAGED (0x01) +#define MPI25_SASIOUNIT8_TXRXSLEEP_ACTIVE (0x02) +#define MPI25_SASIOUNIT8_TXRXSLEEP_SHUTDOWN (0x03) + + + +/*SAS IO Unit Page 16 */ + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT16 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U64 + TimeStamp; /*0x08 */ + U32 + Reserved1; /*0x10 */ + U32 + Reserved2; /*0x14 */ + U32 + FastPathPendedRequests; /*0x18 */ + U32 + FastPathUnPendedRequests; /*0x1C */ + U32 + FastPathHostRequestStarts; /*0x20 */ + U32 + FastPathFirmwareRequestStarts; /*0x24 */ + U32 + FastPathHostCompletions; /*0x28 */ + U32 + FastPathFirmwareCompletions; /*0x2C */ + U32 + NonFastPathRequestStarts; /*0x30 */ + U32 + NonFastPathHostCompletions; /*0x30 */ +} MPI2_CONFIG_PAGE_SASIOUNIT16, + *PTR_MPI2_CONFIG_PAGE_SASIOUNIT16, + Mpi2SasIOUnitPage16_t, *pMpi2SasIOUnitPage16_t; + +#define MPI2_SASIOUNITPAGE16_PAGEVERSION (0x00) + + +/**************************************************************************** +* SAS Expander Config Pages +****************************************************************************/ + +/*SAS Expander Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_EXPANDER_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U8 + PhysicalPort; /*0x08 */ + U8 + ReportGenLength; /*0x09 */ + U16 + EnclosureHandle; /*0x0A */ + U64 + SASAddress; /*0x0C */ + U32 + DiscoveryStatus; /*0x14 */ + U16 + DevHandle; /*0x18 */ + U16 + ParentDevHandle; /*0x1A */ + U16 + ExpanderChangeCount; /*0x1C */ + U16 + ExpanderRouteIndexes; /*0x1E */ + U8 + NumPhys; /*0x20 */ + U8 + SASLevel; /*0x21 */ + U16 + Flags; /*0x22 */ + U16 + STPBusInactivityTimeLimit; /*0x24 */ + U16 + STPMaxConnectTimeLimit; /*0x26 */ + U16 + STP_SMP_NexusLossTime; /*0x28 */ + U16 + MaxNumRoutedSasAddresses; /*0x2A */ + U64 + ActiveZoneManagerSASAddress;/*0x2C */ + U16 + ZoneLockInactivityLimit; /*0x34 */ + U16 + Reserved1; /*0x36 */ + U8 + TimeToReducedFunc; /*0x38 */ + U8 + InitialTimeToReducedFunc; /*0x39 */ + U8 + MaxReducedFuncTime; /*0x3A */ + U8 + Reserved2; /*0x3B */ +} MPI2_CONFIG_PAGE_EXPANDER_0, + *PTR_MPI2_CONFIG_PAGE_EXPANDER_0, + Mpi2ExpanderPage0_t, *pMpi2ExpanderPage0_t; + +#define MPI2_SASEXPANDER0_PAGEVERSION (0x06) + +/*values for SAS Expander Page 0 DiscoveryStatus field */ +#define MPI2_SAS_EXPANDER0_DS_MAX_ENCLOSURES_EXCEED (0x80000000) +#define MPI2_SAS_EXPANDER0_DS_MAX_EXPANDERS_EXCEED (0x40000000) +#define MPI2_SAS_EXPANDER0_DS_MAX_DEVICES_EXCEED (0x20000000) +#define MPI2_SAS_EXPANDER0_DS_MAX_TOPO_PHYS_EXCEED (0x10000000) +#define MPI2_SAS_EXPANDER0_DS_DOWNSTREAM_INITIATOR (0x08000000) +#define MPI2_SAS_EXPANDER0_DS_MULTI_SUBTRACTIVE_SUBTRACTIVE (0x00008000) +#define MPI2_SAS_EXPANDER0_DS_EXP_MULTI_SUBTRACTIVE (0x00004000) +#define MPI2_SAS_EXPANDER0_DS_MULTI_PORT_DOMAIN (0x00002000) +#define MPI2_SAS_EXPANDER0_DS_TABLE_TO_SUBTRACTIVE_LINK (0x00001000) +#define MPI2_SAS_EXPANDER0_DS_UNSUPPORTED_DEVICE (0x00000800) +#define MPI2_SAS_EXPANDER0_DS_TABLE_LINK (0x00000400) +#define MPI2_SAS_EXPANDER0_DS_SUBTRACTIVE_LINK (0x00000200) +#define MPI2_SAS_EXPANDER0_DS_SMP_CRC_ERROR (0x00000100) +#define MPI2_SAS_EXPANDER0_DS_SMP_FUNCTION_FAILED (0x00000080) +#define MPI2_SAS_EXPANDER0_DS_INDEX_NOT_EXIST (0x00000040) +#define MPI2_SAS_EXPANDER0_DS_OUT_ROUTE_ENTRIES (0x00000020) +#define MPI2_SAS_EXPANDER0_DS_SMP_TIMEOUT (0x00000010) +#define MPI2_SAS_EXPANDER0_DS_MULTIPLE_PORTS (0x00000004) +#define MPI2_SAS_EXPANDER0_DS_UNADDRESSABLE_DEVICE (0x00000002) +#define MPI2_SAS_EXPANDER0_DS_LOOP_DETECTED (0x00000001) + +/*values for SAS Expander Page 0 Flags field */ +#define MPI2_SAS_EXPANDER0_FLAGS_REDUCED_FUNCTIONALITY (0x2000) +#define MPI2_SAS_EXPANDER0_FLAGS_ZONE_LOCKED (0x1000) +#define MPI2_SAS_EXPANDER0_FLAGS_SUPPORTED_PHYSICAL_PRES (0x0800) +#define MPI2_SAS_EXPANDER0_FLAGS_ASSERTED_PHYSICAL_PRES (0x0400) +#define MPI2_SAS_EXPANDER0_FLAGS_ZONING_SUPPORT (0x0200) +#define MPI2_SAS_EXPANDER0_FLAGS_ENABLED_ZONING (0x0100) +#define MPI2_SAS_EXPANDER0_FLAGS_TABLE_TO_TABLE_SUPPORT (0x0080) +#define MPI2_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE (0x0010) +#define MPI2_SAS_EXPANDER0_FLAGS_OTHERS_CONFIG (0x0004) +#define MPI2_SAS_EXPANDER0_FLAGS_CONFIG_IN_PROGRESS (0x0002) +#define MPI2_SAS_EXPANDER0_FLAGS_ROUTE_TABLE_CONFIG (0x0001) + + +/*SAS Expander Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_EXPANDER_1 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U8 + PhysicalPort; /*0x08 */ + U8 + Reserved1; /*0x09 */ + U16 + Reserved2; /*0x0A */ + U8 + NumPhys; /*0x0C */ + U8 + Phy; /*0x0D */ + U16 + NumTableEntriesProgrammed; /*0x0E */ + U8 + ProgrammedLinkRate; /*0x10 */ + U8 + HwLinkRate; /*0x11 */ + U16 + AttachedDevHandle; /*0x12 */ + U32 + PhyInfo; /*0x14 */ + U32 + AttachedDeviceInfo; /*0x18 */ + U16 + ExpanderDevHandle; /*0x1C */ + U8 + ChangeCount; /*0x1E */ + U8 + NegotiatedLinkRate; /*0x1F */ + U8 + PhyIdentifier; /*0x20 */ + U8 + AttachedPhyIdentifier; /*0x21 */ + U8 + Reserved3; /*0x22 */ + U8 + DiscoveryInfo; /*0x23 */ + U32 + AttachedPhyInfo; /*0x24 */ + U8 + ZoneGroup; /*0x28 */ + U8 + SelfConfigStatus; /*0x29 */ + U16 + Reserved4; /*0x2A */ +} MPI2_CONFIG_PAGE_EXPANDER_1, + *PTR_MPI2_CONFIG_PAGE_EXPANDER_1, + Mpi2ExpanderPage1_t, *pMpi2ExpanderPage1_t; + +#define MPI2_SASEXPANDER1_PAGEVERSION (0x02) + +/*use MPI2_SAS_PRATE_ defines for the ProgrammedLinkRate field */ + +/*use MPI2_SAS_HWRATE_ defines for the HwLinkRate field */ + +/*use MPI2_SAS_PHYINFO_ for the PhyInfo field */ + +/*see mpi2_sas.h for the MPI2_SAS_DEVICE_INFO_ defines + *used for the AttachedDeviceInfo field */ + +/*use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ + +/*values for SAS Expander Page 1 DiscoveryInfo field */ +#define MPI2_SAS_EXPANDER1_DISCINFO_BAD_PHY_DISABLED (0x04) +#define MPI2_SAS_EXPANDER1_DISCINFO_LINK_STATUS_CHANGE (0x02) +#define MPI2_SAS_EXPANDER1_DISCINFO_NO_ROUTING_ENTRIES (0x01) + +/*use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */ + + +/**************************************************************************** +* SAS Device Config Pages +****************************************************************************/ + +/*SAS Device Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U16 + Slot; /*0x08 */ + U16 + EnclosureHandle; /*0x0A */ + U64 + SASAddress; /*0x0C */ + U16 + ParentDevHandle; /*0x14 */ + U8 + PhyNum; /*0x16 */ + U8 + AccessStatus; /*0x17 */ + U16 + DevHandle; /*0x18 */ + U8 + AttachedPhyIdentifier; /*0x1A */ + U8 + ZoneGroup; /*0x1B */ + U32 + DeviceInfo; /*0x1C */ + U16 + Flags; /*0x20 */ + U8 + PhysicalPort; /*0x22 */ + U8 + MaxPortConnections; /*0x23 */ + U64 + DeviceName; /*0x24 */ + U8 + PortGroups; /*0x2C */ + U8 + DmaGroup; /*0x2D */ + U8 + ControlGroup; /*0x2E */ + U8 + EnclosureLevel; /*0x2F */ + U32 + ConnectorName[4]; /*0x30 */ + U32 + Reserved3; /*0x34 */ +} MPI2_CONFIG_PAGE_SAS_DEV_0, + *PTR_MPI2_CONFIG_PAGE_SAS_DEV_0, + Mpi2SasDevicePage0_t, + *pMpi2SasDevicePage0_t; + +#define MPI2_SASDEVICE0_PAGEVERSION (0x09) + +/*values for SAS Device Page 0 AccessStatus field */ +#define MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS (0x00) +#define MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED (0x01) +#define MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED (0x02) +#define MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT (0x03) +#define MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION (0x04) +#define MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE (0x05) +#define MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE (0x06) +#define MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED (0x07) +/*specific values for SATA Init failures */ +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN (0x10) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT (0x11) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG (0x12) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION (0x13) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER (0x14) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN (0x15) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN (0x16) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN (0x17) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION (0x18) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE (0x19) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX (0x1F) + +/*see mpi2_sas.h for values for SAS Device Page 0 DeviceInfo values */ + +/*values for SAS Device Page 0 Flags field */ +#define MPI2_SAS_DEVICE0_FLAGS_UNAUTHORIZED_DEVICE (0x8000) +#define MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH (0x4000) +#define MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE (0x2000) +#define MPI2_SAS_DEVICE0_FLAGS_SLUMBER_PM_CAPABLE (0x1000) +#define MPI2_SAS_DEVICE0_FLAGS_PARTIAL_PM_CAPABLE (0x0800) +#define MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY (0x0400) +#define MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE (0x0200) +#define MPI2_SAS_DEVICE0_FLAGS_UNSUPPORTED_DEVICE (0x0100) +#define MPI2_SAS_DEVICE0_FLAGS_SATA_48BIT_LBA_SUPPORTED (0x0080) +#define MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED (0x0040) +#define MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED (0x0020) +#define MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED (0x0010) +#define MPI2_SAS_DEVICE0_FLAGS_PORT_SELECTOR_ATTACH (0x0008) +#define MPI2_SAS_DEVICE0_FLAGS_PERSIST_CAPABLE (0x0004) +#define MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID (0x0002) +#define MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001) + + +/*SAS Device Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_1 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U32 + Reserved1; /*0x08 */ + U64 + SASAddress; /*0x0C */ + U32 + Reserved2; /*0x14 */ + U16 + DevHandle; /*0x18 */ + U16 + Reserved3; /*0x1A */ + U8 + InitialRegDeviceFIS[20];/*0x1C */ +} MPI2_CONFIG_PAGE_SAS_DEV_1, + *PTR_MPI2_CONFIG_PAGE_SAS_DEV_1, + Mpi2SasDevicePage1_t, + *pMpi2SasDevicePage1_t; + +#define MPI2_SASDEVICE1_PAGEVERSION (0x01) + + +/**************************************************************************** +* SAS PHY Config Pages +****************************************************************************/ + +/*SAS PHY Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U16 + OwnerDevHandle; /*0x08 */ + U16 + Reserved1; /*0x0A */ + U16 + AttachedDevHandle; /*0x0C */ + U8 + AttachedPhyIdentifier; /*0x0E */ + U8 + Reserved2; /*0x0F */ + U32 + AttachedPhyInfo; /*0x10 */ + U8 + ProgrammedLinkRate; /*0x14 */ + U8 + HwLinkRate; /*0x15 */ + U8 + ChangeCount; /*0x16 */ + U8 + Flags; /*0x17 */ + U32 + PhyInfo; /*0x18 */ + U8 + NegotiatedLinkRate; /*0x1C */ + U8 + Reserved3; /*0x1D */ + U16 + Reserved4; /*0x1E */ +} MPI2_CONFIG_PAGE_SAS_PHY_0, + *PTR_MPI2_CONFIG_PAGE_SAS_PHY_0, + Mpi2SasPhyPage0_t, *pMpi2SasPhyPage0_t; + +#define MPI2_SASPHY0_PAGEVERSION (0x03) + +/*use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */ + +/*use MPI2_SAS_PRATE_ defines for the ProgrammedLinkRate field */ + +/*use MPI2_SAS_HWRATE_ defines for the HwLinkRate field */ + +/*values for SAS PHY Page 0 Flags field */ +#define MPI2_SAS_PHY0_FLAGS_SGPIO_DIRECT_ATTACH_ENC (0x01) + +/*use MPI2_SAS_PHYINFO_ for the PhyInfo field */ + +/*use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ + + +/*SAS PHY Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_1 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U32 + Reserved1; /*0x08 */ + U32 + InvalidDwordCount; /*0x0C */ + U32 + RunningDisparityErrorCount; /*0x10 */ + U32 + LossDwordSynchCount; /*0x14 */ + U32 + PhyResetProblemCount; /*0x18 */ +} MPI2_CONFIG_PAGE_SAS_PHY_1, + *PTR_MPI2_CONFIG_PAGE_SAS_PHY_1, + Mpi2SasPhyPage1_t, *pMpi2SasPhyPage1_t; + +#define MPI2_SASPHY1_PAGEVERSION (0x01) + + +/*SAS PHY Page 2 */ + +typedef struct _MPI2_SASPHY2_PHY_EVENT { + U8 PhyEventCode; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U32 PhyEventInfo; /*0x04 */ +} MPI2_SASPHY2_PHY_EVENT, *PTR_MPI2_SASPHY2_PHY_EVENT, + Mpi2SasPhy2PhyEvent_t, *pMpi2SasPhy2PhyEvent_t; + +/*use MPI2_SASPHY3_EVENT_CODE_ for the PhyEventCode field */ + + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhyEvents at runtime. + */ +#ifndef MPI2_SASPHY2_PHY_EVENT_MAX +#define MPI2_SASPHY2_PHY_EVENT_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_2 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U32 + Reserved1; /*0x08 */ + U8 + NumPhyEvents; /*0x0C */ + U8 + Reserved2; /*0x0D */ + U16 + Reserved3; /*0x0E */ + MPI2_SASPHY2_PHY_EVENT + PhyEvent[MPI2_SASPHY2_PHY_EVENT_MAX]; /*0x10 */ +} MPI2_CONFIG_PAGE_SAS_PHY_2, + *PTR_MPI2_CONFIG_PAGE_SAS_PHY_2, + Mpi2SasPhyPage2_t, + *pMpi2SasPhyPage2_t; + +#define MPI2_SASPHY2_PAGEVERSION (0x00) + + +/*SAS PHY Page 3 */ + +typedef struct _MPI2_SASPHY3_PHY_EVENT_CONFIG { + U8 PhyEventCode; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U8 CounterType; /*0x04 */ + U8 ThresholdWindow; /*0x05 */ + U8 TimeUnits; /*0x06 */ + U8 Reserved3; /*0x07 */ + U32 EventThreshold; /*0x08 */ + U16 ThresholdFlags; /*0x0C */ + U16 Reserved4; /*0x0E */ +} MPI2_SASPHY3_PHY_EVENT_CONFIG, + *PTR_MPI2_SASPHY3_PHY_EVENT_CONFIG, + Mpi2SasPhy3PhyEventConfig_t, + *pMpi2SasPhy3PhyEventConfig_t; + +/*values for PhyEventCode field */ +#define MPI2_SASPHY3_EVENT_CODE_NO_EVENT (0x00) +#define MPI2_SASPHY3_EVENT_CODE_INVALID_DWORD (0x01) +#define MPI2_SASPHY3_EVENT_CODE_RUNNING_DISPARITY_ERROR (0x02) +#define MPI2_SASPHY3_EVENT_CODE_LOSS_DWORD_SYNC (0x03) +#define MPI2_SASPHY3_EVENT_CODE_PHY_RESET_PROBLEM (0x04) +#define MPI2_SASPHY3_EVENT_CODE_ELASTICITY_BUF_OVERFLOW (0x05) +#define MPI2_SASPHY3_EVENT_CODE_RX_ERROR (0x06) +#define MPI2_SASPHY3_EVENT_CODE_RX_ADDR_FRAME_ERROR (0x20) +#define MPI2_SASPHY3_EVENT_CODE_TX_AC_OPEN_REJECT (0x21) +#define MPI2_SASPHY3_EVENT_CODE_RX_AC_OPEN_REJECT (0x22) +#define MPI2_SASPHY3_EVENT_CODE_TX_RC_OPEN_REJECT (0x23) +#define MPI2_SASPHY3_EVENT_CODE_RX_RC_OPEN_REJECT (0x24) +#define MPI2_SASPHY3_EVENT_CODE_RX_AIP_PARTIAL_WAITING_ON (0x25) +#define MPI2_SASPHY3_EVENT_CODE_RX_AIP_CONNECT_WAITING_ON (0x26) +#define MPI2_SASPHY3_EVENT_CODE_TX_BREAK (0x27) +#define MPI2_SASPHY3_EVENT_CODE_RX_BREAK (0x28) +#define MPI2_SASPHY3_EVENT_CODE_BREAK_TIMEOUT (0x29) +#define MPI2_SASPHY3_EVENT_CODE_CONNECTION (0x2A) +#define MPI2_SASPHY3_EVENT_CODE_PEAKTX_PATHWAY_BLOCKED (0x2B) +#define MPI2_SASPHY3_EVENT_CODE_PEAKTX_ARB_WAIT_TIME (0x2C) +#define MPI2_SASPHY3_EVENT_CODE_PEAK_ARB_WAIT_TIME (0x2D) +#define MPI2_SASPHY3_EVENT_CODE_PEAK_CONNECT_TIME (0x2E) +#define MPI2_SASPHY3_EVENT_CODE_TX_SSP_FRAMES (0x40) +#define MPI2_SASPHY3_EVENT_CODE_RX_SSP_FRAMES (0x41) +#define MPI2_SASPHY3_EVENT_CODE_TX_SSP_ERROR_FRAMES (0x42) +#define MPI2_SASPHY3_EVENT_CODE_RX_SSP_ERROR_FRAMES (0x43) +#define MPI2_SASPHY3_EVENT_CODE_TX_CREDIT_BLOCKED (0x44) +#define MPI2_SASPHY3_EVENT_CODE_RX_CREDIT_BLOCKED (0x45) +#define MPI2_SASPHY3_EVENT_CODE_TX_SATA_FRAMES (0x50) +#define MPI2_SASPHY3_EVENT_CODE_RX_SATA_FRAMES (0x51) +#define MPI2_SASPHY3_EVENT_CODE_SATA_OVERFLOW (0x52) +#define MPI2_SASPHY3_EVENT_CODE_TX_SMP_FRAMES (0x60) +#define MPI2_SASPHY3_EVENT_CODE_RX_SMP_FRAMES (0x61) +#define MPI2_SASPHY3_EVENT_CODE_RX_SMP_ERROR_FRAMES (0x63) +#define MPI2_SASPHY3_EVENT_CODE_HOTPLUG_TIMEOUT (0xD0) +#define MPI2_SASPHY3_EVENT_CODE_MISALIGNED_MUX_PRIMITIVE (0xD1) +#define MPI2_SASPHY3_EVENT_CODE_RX_AIP (0xD2) + +/*Following codes are product specific and in MPI v2.6 and later */ +#define MPI2_SASPHY3_EVENT_CODE_LCARB_WAIT_TIME (0xD3) +#define MPI2_SASPHY3_EVENT_CODE_RCVD_CONN_RESP_WAIT_TIME (0xD4) +#define MPI2_SASPHY3_EVENT_CODE_LCCONN_TIME (0xD5) +#define MPI2_SASPHY3_EVENT_CODE_SSP_TX_START_TRANSMIT (0xD6) +#define MPI2_SASPHY3_EVENT_CODE_SATA_TX_START (0xD7) +#define MPI2_SASPHY3_EVENT_CODE_SMP_TX_START_TRANSMT (0xD8) +#define MPI2_SASPHY3_EVENT_CODE_TX_SMP_BREAK_CONN (0xD9) +#define MPI2_SASPHY3_EVENT_CODE_SSP_RX_START_RECEIVE (0xDA) +#define MPI2_SASPHY3_EVENT_CODE_SATA_RX_START_RECEIVE (0xDB) +#define MPI2_SASPHY3_EVENT_CODE_SMP_RX_START_RECEIVE (0xDC) + + +/*values for the CounterType field */ +#define MPI2_SASPHY3_COUNTER_TYPE_WRAPPING (0x00) +#define MPI2_SASPHY3_COUNTER_TYPE_SATURATING (0x01) +#define MPI2_SASPHY3_COUNTER_TYPE_PEAK_VALUE (0x02) + +/*values for the TimeUnits field */ +#define MPI2_SASPHY3_TIME_UNITS_10_MICROSECONDS (0x00) +#define MPI2_SASPHY3_TIME_UNITS_100_MICROSECONDS (0x01) +#define MPI2_SASPHY3_TIME_UNITS_1_MILLISECOND (0x02) +#define MPI2_SASPHY3_TIME_UNITS_10_MILLISECONDS (0x03) + +/*values for the ThresholdFlags field */ +#define MPI2_SASPHY3_TFLAGS_PHY_RESET (0x0002) +#define MPI2_SASPHY3_TFLAGS_EVENT_NOTIFY (0x0001) + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhyEvents at runtime. + */ +#ifndef MPI2_SASPHY3_PHY_EVENT_MAX +#define MPI2_SASPHY3_PHY_EVENT_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_3 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U32 + Reserved1; /*0x08 */ + U8 + NumPhyEvents; /*0x0C */ + U8 + Reserved2; /*0x0D */ + U16 + Reserved3; /*0x0E */ + MPI2_SASPHY3_PHY_EVENT_CONFIG + PhyEventConfig[MPI2_SASPHY3_PHY_EVENT_MAX]; /*0x10 */ +} MPI2_CONFIG_PAGE_SAS_PHY_3, + *PTR_MPI2_CONFIG_PAGE_SAS_PHY_3, + Mpi2SasPhyPage3_t, *pMpi2SasPhyPage3_t; + +#define MPI2_SASPHY3_PAGEVERSION (0x00) + + +/*SAS PHY Page 4 */ + +typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_4 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U16 + Reserved1; /*0x08 */ + U8 + Reserved2; /*0x0A */ + U8 + Flags; /*0x0B */ + U8 + InitialFrame[28]; /*0x0C */ +} MPI2_CONFIG_PAGE_SAS_PHY_4, + *PTR_MPI2_CONFIG_PAGE_SAS_PHY_4, + Mpi2SasPhyPage4_t, *pMpi2SasPhyPage4_t; + +#define MPI2_SASPHY4_PAGEVERSION (0x00) + +/*values for the Flags field */ +#define MPI2_SASPHY4_FLAGS_FRAME_VALID (0x02) +#define MPI2_SASPHY4_FLAGS_SATA_FRAME (0x01) + + + + +/**************************************************************************** +* SAS Port Config Pages +****************************************************************************/ + +/*SAS Port Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_SAS_PORT_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U8 + PortNumber; /*0x08 */ + U8 + PhysicalPort; /*0x09 */ + U8 + PortWidth; /*0x0A */ + U8 + PhysicalPortWidth; /*0x0B */ + U8 + ZoneGroup; /*0x0C */ + U8 + Reserved1; /*0x0D */ + U16 + Reserved2; /*0x0E */ + U64 + SASAddress; /*0x10 */ + U32 + DeviceInfo; /*0x18 */ + U32 + Reserved3; /*0x1C */ + U32 + Reserved4; /*0x20 */ +} MPI2_CONFIG_PAGE_SAS_PORT_0, + *PTR_MPI2_CONFIG_PAGE_SAS_PORT_0, + Mpi2SasPortPage0_t, *pMpi2SasPortPage0_t; + +#define MPI2_SASPORT0_PAGEVERSION (0x00) + +/*see mpi2_sas.h for values for SAS Port Page 0 DeviceInfo values */ + + +/**************************************************************************** +* SAS Enclosure Config Pages +****************************************************************************/ + +/*SAS Enclosure Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x08 */ + U64 EnclosureLogicalID; /*0x0C */ + U16 Flags; /*0x14 */ + U16 EnclosureHandle; /*0x16 */ + U16 NumSlots; /*0x18 */ + U16 StartSlot; /*0x1A */ + U8 ChassisSlot; /*0x1C */ + U8 EnclosureLevel; /*0x1D */ + U16 SEPDevHandle; /*0x1E */ + U8 OEMRD; /*0x20 */ + U8 Reserved1a; /*0x21 */ + U16 Reserved2; /*0x22 */ + U32 Reserved3; /*0x24 */ +} MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0, + *PTR_MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0, + Mpi2SasEnclosurePage0_t, *pMpi2SasEnclosurePage0_t, + MPI26_CONFIG_PAGE_ENCLOSURE_0, + *PTR_MPI26_CONFIG_PAGE_ENCLOSURE_0, + Mpi26EnclosurePage0_t, *pMpi26EnclosurePage0_t; + +#define MPI2_SASENCLOSURE0_PAGEVERSION (0x04) + +/*values for SAS Enclosure Page 0 Flags field */ +#define MPI26_SAS_ENCLS0_FLAGS_OEMRD_VALID (0x0080) +#define MPI26_SAS_ENCLS0_FLAGS_OEMRD_COLLECTING (0x0040) +#define MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID (0x0020) +#define MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID (0x0010) +#define MPI2_SAS_ENCLS0_FLAGS_MNG_MASK (0x000F) +#define MPI2_SAS_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000) +#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SES (0x0001) +#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SGPIO (0x0002) +#define MPI2_SAS_ENCLS0_FLAGS_MNG_EXP_SGPIO (0x0003) +#define MPI2_SAS_ENCLS0_FLAGS_MNG_SES_ENCLOSURE (0x0004) +#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_GPIO (0x0005) + +#define MPI26_ENCLOSURE0_PAGEVERSION (0x04) + +/*Values for Enclosure Page 0 Flags field */ +#define MPI26_ENCLS0_FLAGS_OEMRD_VALID (0x0080) +#define MPI26_ENCLS0_FLAGS_OEMRD_COLLECTING (0x0040) +#define MPI26_ENCLS0_FLAGS_CHASSIS_SLOT_VALID (0x0020) +#define MPI26_ENCLS0_FLAGS_ENCL_LEVEL_VALID (0x0010) +#define MPI26_ENCLS0_FLAGS_MNG_MASK (0x000F) +#define MPI26_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000) +#define MPI26_ENCLS0_FLAGS_MNG_IOC_SES (0x0001) +#define MPI26_ENCLS0_FLAGS_MNG_IOC_SGPIO (0x0002) +#define MPI26_ENCLS0_FLAGS_MNG_EXP_SGPIO (0x0003) +#define MPI26_ENCLS0_FLAGS_MNG_SES_ENCLOSURE (0x0004) +#define MPI26_ENCLS0_FLAGS_MNG_IOC_GPIO (0x0005) + +/**************************************************************************** +* Log Config Page +****************************************************************************/ + +/*Log Page 0 */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumLogEntries at runtime. + */ +#ifndef MPI2_LOG_0_NUM_LOG_ENTRIES +#define MPI2_LOG_0_NUM_LOG_ENTRIES (1) +#endif + +#define MPI2_LOG_0_LOG_DATA_LENGTH (0x1C) + +typedef struct _MPI2_LOG_0_ENTRY { + U64 TimeStamp; /*0x00 */ + U32 Reserved1; /*0x08 */ + U16 LogSequence; /*0x0C */ + U16 LogEntryQualifier; /*0x0E */ + U8 VP_ID; /*0x10 */ + U8 VF_ID; /*0x11 */ + U16 Reserved2; /*0x12 */ + U8 + LogData[MPI2_LOG_0_LOG_DATA_LENGTH];/*0x14 */ +} MPI2_LOG_0_ENTRY, *PTR_MPI2_LOG_0_ENTRY, + Mpi2Log0Entry_t, *pMpi2Log0Entry_t; + +/*values for Log Page 0 LogEntry LogEntryQualifier field */ +#define MPI2_LOG_0_ENTRY_QUAL_ENTRY_UNUSED (0x0000) +#define MPI2_LOG_0_ENTRY_QUAL_POWER_ON_RESET (0x0001) +#define MPI2_LOG_0_ENTRY_QUAL_TIMESTAMP_UPDATE (0x0002) +#define MPI2_LOG_0_ENTRY_QUAL_MIN_IMPLEMENT_SPEC (0x8000) +#define MPI2_LOG_0_ENTRY_QUAL_MAX_IMPLEMENT_SPEC (0xFFFF) + +typedef struct _MPI2_CONFIG_PAGE_LOG_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x08 */ + U32 Reserved2; /*0x0C */ + U16 NumLogEntries;/*0x10 */ + U16 Reserved3; /*0x12 */ + MPI2_LOG_0_ENTRY + LogEntry[MPI2_LOG_0_NUM_LOG_ENTRIES]; /*0x14 */ +} MPI2_CONFIG_PAGE_LOG_0, *PTR_MPI2_CONFIG_PAGE_LOG_0, + Mpi2LogPage0_t, *pMpi2LogPage0_t; + +#define MPI2_LOG_0_PAGEVERSION (0x02) + + +/**************************************************************************** +* RAID Config Page +****************************************************************************/ + +/*RAID Page 0 */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumElements at runtime. + */ +#ifndef MPI2_RAIDCONFIG0_MAX_ELEMENTS +#define MPI2_RAIDCONFIG0_MAX_ELEMENTS (1) +#endif + +typedef struct _MPI2_RAIDCONFIG0_CONFIG_ELEMENT { + U16 ElementFlags; /*0x00 */ + U16 VolDevHandle; /*0x02 */ + U8 HotSparePool; /*0x04 */ + U8 PhysDiskNum; /*0x05 */ + U16 PhysDiskDevHandle; /*0x06 */ +} MPI2_RAIDCONFIG0_CONFIG_ELEMENT, + *PTR_MPI2_RAIDCONFIG0_CONFIG_ELEMENT, + Mpi2RaidConfig0ConfigElement_t, + *pMpi2RaidConfig0ConfigElement_t; + +/*values for the ElementFlags field */ +#define MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE (0x000F) +#define MPI2_RAIDCONFIG0_EFLAGS_VOLUME_ELEMENT (0x0000) +#define MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT (0x0001) +#define MPI2_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT (0x0002) +#define MPI2_RAIDCONFIG0_EFLAGS_OCE_ELEMENT (0x0003) + + +typedef struct _MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U8 NumHotSpares; /*0x08 */ + U8 NumPhysDisks; /*0x09 */ + U8 NumVolumes; /*0x0A */ + U8 ConfigNum; /*0x0B */ + U32 Flags; /*0x0C */ + U8 ConfigGUID[24]; /*0x10 */ + U32 Reserved1; /*0x28 */ + U8 NumElements; /*0x2C */ + U8 Reserved2; /*0x2D */ + U16 Reserved3; /*0x2E */ + MPI2_RAIDCONFIG0_CONFIG_ELEMENT + ConfigElement[MPI2_RAIDCONFIG0_MAX_ELEMENTS]; /*0x30 */ +} MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0, + *PTR_MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0, + Mpi2RaidConfigurationPage0_t, + *pMpi2RaidConfigurationPage0_t; + +#define MPI2_RAIDCONFIG0_PAGEVERSION (0x00) + +/*values for RAID Configuration Page 0 Flags field */ +#define MPI2_RAIDCONFIG0_FLAG_FOREIGN_CONFIG (0x00000001) + + +/**************************************************************************** +* Driver Persistent Mapping Config Pages +****************************************************************************/ + +/*Driver Persistent Mapping Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY { + U64 PhysicalIdentifier; /*0x00 */ + U16 MappingInformation; /*0x08 */ + U16 DeviceIndex; /*0x0A */ + U32 PhysicalBitsMapping; /*0x0C */ + U32 Reserved1; /*0x10 */ +} MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY, + *PTR_MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY, + Mpi2DriverMap0Entry_t, *pMpi2DriverMap0Entry_t; + +typedef struct _MPI2_CONFIG_PAGE_DRIVER_MAPPING_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY Entry; /*0x08 */ +} MPI2_CONFIG_PAGE_DRIVER_MAPPING_0, + *PTR_MPI2_CONFIG_PAGE_DRIVER_MAPPING_0, + Mpi2DriverMappingPage0_t, *pMpi2DriverMappingPage0_t; + +#define MPI2_DRIVERMAPPING0_PAGEVERSION (0x00) + +/*values for Driver Persistent Mapping Page 0 MappingInformation field */ +#define MPI2_DRVMAP0_MAPINFO_SLOT_MASK (0x07F0) +#define MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT (4) +#define MPI2_DRVMAP0_MAPINFO_MISSING_MASK (0x000F) + + +/**************************************************************************** +* Ethernet Config Pages +****************************************************************************/ + +/*Ethernet Page 0 */ + +/*IP address (union of IPv4 and IPv6) */ +typedef union _MPI2_ETHERNET_IP_ADDR { + U32 IPv4Addr; + U32 IPv6Addr[4]; +} MPI2_ETHERNET_IP_ADDR, *PTR_MPI2_ETHERNET_IP_ADDR, + Mpi2EthernetIpAddr_t, *pMpi2EthernetIpAddr_t; + +#define MPI2_ETHERNET_HOST_NAME_LENGTH (32) + +typedef struct _MPI2_CONFIG_PAGE_ETHERNET_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U8 NumInterfaces; /*0x08 */ + U8 Reserved0; /*0x09 */ + U16 Reserved1; /*0x0A */ + U32 Status; /*0x0C */ + U8 MediaState; /*0x10 */ + U8 Reserved2; /*0x11 */ + U16 Reserved3; /*0x12 */ + U8 MacAddress[6]; /*0x14 */ + U8 Reserved4; /*0x1A */ + U8 Reserved5; /*0x1B */ + MPI2_ETHERNET_IP_ADDR IpAddress; /*0x1C */ + MPI2_ETHERNET_IP_ADDR SubnetMask; /*0x2C */ + MPI2_ETHERNET_IP_ADDR GatewayIpAddress;/*0x3C */ + MPI2_ETHERNET_IP_ADDR DNS1IpAddress; /*0x4C */ + MPI2_ETHERNET_IP_ADDR DNS2IpAddress; /*0x5C */ + MPI2_ETHERNET_IP_ADDR DhcpIpAddress; /*0x6C */ + U8 + HostName[MPI2_ETHERNET_HOST_NAME_LENGTH];/*0x7C */ +} MPI2_CONFIG_PAGE_ETHERNET_0, + *PTR_MPI2_CONFIG_PAGE_ETHERNET_0, + Mpi2EthernetPage0_t, *pMpi2EthernetPage0_t; + +#define MPI2_ETHERNETPAGE0_PAGEVERSION (0x00) + +/*values for Ethernet Page 0 Status field */ +#define MPI2_ETHPG0_STATUS_IPV6_CAPABLE (0x80000000) +#define MPI2_ETHPG0_STATUS_IPV4_CAPABLE (0x40000000) +#define MPI2_ETHPG0_STATUS_CONSOLE_CONNECTED (0x20000000) +#define MPI2_ETHPG0_STATUS_DEFAULT_IF (0x00000100) +#define MPI2_ETHPG0_STATUS_FW_DWNLD_ENABLED (0x00000080) +#define MPI2_ETHPG0_STATUS_TELNET_ENABLED (0x00000040) +#define MPI2_ETHPG0_STATUS_SSH2_ENABLED (0x00000020) +#define MPI2_ETHPG0_STATUS_DHCP_CLIENT_ENABLED (0x00000010) +#define MPI2_ETHPG0_STATUS_IPV6_ENABLED (0x00000008) +#define MPI2_ETHPG0_STATUS_IPV4_ENABLED (0x00000004) +#define MPI2_ETHPG0_STATUS_IPV6_ADDRESSES (0x00000002) +#define MPI2_ETHPG0_STATUS_ETH_IF_ENABLED (0x00000001) + +/*values for Ethernet Page 0 MediaState field */ +#define MPI2_ETHPG0_MS_DUPLEX_MASK (0x80) +#define MPI2_ETHPG0_MS_HALF_DUPLEX (0x00) +#define MPI2_ETHPG0_MS_FULL_DUPLEX (0x80) + +#define MPI2_ETHPG0_MS_CONNECT_SPEED_MASK (0x07) +#define MPI2_ETHPG0_MS_NOT_CONNECTED (0x00) +#define MPI2_ETHPG0_MS_10MBIT (0x01) +#define MPI2_ETHPG0_MS_100MBIT (0x02) +#define MPI2_ETHPG0_MS_1GBIT (0x03) + + +/*Ethernet Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_ETHERNET_1 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U32 + Reserved0; /*0x08 */ + U32 + Flags; /*0x0C */ + U8 + MediaState; /*0x10 */ + U8 + Reserved1; /*0x11 */ + U16 + Reserved2; /*0x12 */ + U8 + MacAddress[6]; /*0x14 */ + U8 + Reserved3; /*0x1A */ + U8 + Reserved4; /*0x1B */ + MPI2_ETHERNET_IP_ADDR + StaticIpAddress; /*0x1C */ + MPI2_ETHERNET_IP_ADDR + StaticSubnetMask; /*0x2C */ + MPI2_ETHERNET_IP_ADDR + StaticGatewayIpAddress; /*0x3C */ + MPI2_ETHERNET_IP_ADDR + StaticDNS1IpAddress; /*0x4C */ + MPI2_ETHERNET_IP_ADDR + StaticDNS2IpAddress; /*0x5C */ + U32 + Reserved5; /*0x6C */ + U32 + Reserved6; /*0x70 */ + U32 + Reserved7; /*0x74 */ + U32 + Reserved8; /*0x78 */ + U8 + HostName[MPI2_ETHERNET_HOST_NAME_LENGTH];/*0x7C */ +} MPI2_CONFIG_PAGE_ETHERNET_1, + *PTR_MPI2_CONFIG_PAGE_ETHERNET_1, + Mpi2EthernetPage1_t, *pMpi2EthernetPage1_t; + +#define MPI2_ETHERNETPAGE1_PAGEVERSION (0x00) + +/*values for Ethernet Page 1 Flags field */ +#define MPI2_ETHPG1_FLAG_SET_DEFAULT_IF (0x00000100) +#define MPI2_ETHPG1_FLAG_ENABLE_FW_DOWNLOAD (0x00000080) +#define MPI2_ETHPG1_FLAG_ENABLE_TELNET (0x00000040) +#define MPI2_ETHPG1_FLAG_ENABLE_SSH2 (0x00000020) +#define MPI2_ETHPG1_FLAG_ENABLE_DHCP_CLIENT (0x00000010) +#define MPI2_ETHPG1_FLAG_ENABLE_IPV6 (0x00000008) +#define MPI2_ETHPG1_FLAG_ENABLE_IPV4 (0x00000004) +#define MPI2_ETHPG1_FLAG_USE_IPV6_ADDRESSES (0x00000002) +#define MPI2_ETHPG1_FLAG_ENABLE_ETH_IF (0x00000001) + +/*values for Ethernet Page 1 MediaState field */ +#define MPI2_ETHPG1_MS_DUPLEX_MASK (0x80) +#define MPI2_ETHPG1_MS_HALF_DUPLEX (0x00) +#define MPI2_ETHPG1_MS_FULL_DUPLEX (0x80) + +#define MPI2_ETHPG1_MS_DATA_RATE_MASK (0x07) +#define MPI2_ETHPG1_MS_DATA_RATE_AUTO (0x00) +#define MPI2_ETHPG1_MS_DATA_RATE_10MBIT (0x01) +#define MPI2_ETHPG1_MS_DATA_RATE_100MBIT (0x02) +#define MPI2_ETHPG1_MS_DATA_RATE_1GBIT (0x03) + + +/**************************************************************************** +* Extended Manufacturing Config Pages +****************************************************************************/ + +/* + *Generic structure to use for product-specific extended manufacturing pages + *(currently Extended Manufacturing Page 40 through Extended Manufacturing + *Page 60). + */ + +typedef struct _MPI2_CONFIG_PAGE_EXT_MAN_PS { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U32 + ProductSpecificInfo; /*0x08 */ +} MPI2_CONFIG_PAGE_EXT_MAN_PS, + *PTR_MPI2_CONFIG_PAGE_EXT_MAN_PS, + Mpi2ExtManufacturingPagePS_t, + *pMpi2ExtManufacturingPagePS_t; + +/*PageVersion should be provided by product-specific code */ + + + +/**************************************************************************** +* values for fields used by several types of PCIe Config Pages +****************************************************************************/ + +/*values for NegotiatedLinkRates fields */ +#define MPI26_PCIE_NEG_LINK_RATE_MASK_PHYSICAL (0x0F) +/*link rates used for Negotiated Physical Link Rate */ +#define MPI26_PCIE_NEG_LINK_RATE_UNKNOWN (0x00) +#define MPI26_PCIE_NEG_LINK_RATE_PHY_DISABLED (0x01) +#define MPI26_PCIE_NEG_LINK_RATE_2_5 (0x02) +#define MPI26_PCIE_NEG_LINK_RATE_5_0 (0x03) +#define MPI26_PCIE_NEG_LINK_RATE_8_0 (0x04) +#define MPI26_PCIE_NEG_LINK_RATE_16_0 (0x05) + + +/**************************************************************************** +* PCIe IO Unit Config Pages (MPI v2.6 and later) +****************************************************************************/ + +/*PCIe IO Unit Page 0 */ + +typedef struct _MPI26_PCIE_IO_UNIT0_PHY_DATA { + U8 Link; /*0x00 */ + U8 LinkFlags; /*0x01 */ + U8 PhyFlags; /*0x02 */ + U8 NegotiatedLinkRate; /*0x03 */ + U32 ControllerPhyDeviceInfo;/*0x04 */ + U16 AttachedDevHandle; /*0x08 */ + U16 ControllerDevHandle; /*0x0A */ + U32 EnumerationStatus; /*0x0C */ + U32 Reserved1; /*0x10 */ +} MPI26_PCIE_IO_UNIT0_PHY_DATA, + *PTR_MPI26_PCIE_IO_UNIT0_PHY_DATA, + Mpi26PCIeIOUnit0PhyData_t, *pMpi26PCIeIOUnit0PhyData_t; + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI26_PCIE_IOUNIT0_PHY_MAX +#define MPI26_PCIE_IOUNIT0_PHY_MAX (1) +#endif + +typedef struct _MPI26_CONFIG_PAGE_PIOUNIT_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x08 */ + U8 NumPhys; /*0x0C */ + U8 InitStatus; /*0x0D */ + U16 Reserved3; /*0x0E */ + MPI26_PCIE_IO_UNIT0_PHY_DATA + PhyData[MPI26_PCIE_IOUNIT0_PHY_MAX]; /*0x10 */ +} MPI26_CONFIG_PAGE_PIOUNIT_0, + *PTR_MPI26_CONFIG_PAGE_PIOUNIT_0, + Mpi26PCIeIOUnitPage0_t, *pMpi26PCIeIOUnitPage0_t; + +#define MPI26_PCIEIOUNITPAGE0_PAGEVERSION (0x00) + +/*values for PCIe IO Unit Page 0 LinkFlags */ +#define MPI26_PCIEIOUNIT0_LINKFLAGS_ENUMERATION_IN_PROGRESS (0x08) + +/*values for PCIe IO Unit Page 0 PhyFlags */ +#define MPI26_PCIEIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08) + +/*use MPI26_PCIE_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ + +/*see mpi2_pci.h for values for PCIe IO Unit Page 0 ControllerPhyDeviceInfo + *values + */ + +/*values for PCIe IO Unit Page 0 EnumerationStatus */ +#define MPI26_PCIEIOUNIT0_ES_MAX_SWITCHES_EXCEEDED (0x40000000) +#define MPI26_PCIEIOUNIT0_ES_MAX_DEVICES_EXCEEDED (0x20000000) + + +/*PCIe IO Unit Page 1 */ + +typedef struct _MPI26_PCIE_IO_UNIT1_PHY_DATA { + U8 Link; /*0x00 */ + U8 LinkFlags; /*0x01 */ + U8 PhyFlags; /*0x02 */ + U8 MaxMinLinkRate; /*0x03 */ + U32 ControllerPhyDeviceInfo; /*0x04 */ + U32 Reserved1; /*0x08 */ +} MPI26_PCIE_IO_UNIT1_PHY_DATA, + *PTR_MPI26_PCIE_IO_UNIT1_PHY_DATA, + Mpi26PCIeIOUnit1PhyData_t, *pMpi26PCIeIOUnit1PhyData_t; + +/*values for LinkFlags */ +#define MPI26_PCIEIOUNIT1_LINKFLAGS_DIS_SEPARATE_REFCLK (0x00) +#define MPI26_PCIEIOUNIT1_LINKFLAGS_SRIS_EN (0x01) +#define MPI26_PCIEIOUNIT1_LINKFLAGS_SRNS_EN (0x02) + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI26_PCIE_IOUNIT1_PHY_MAX +#define MPI26_PCIE_IOUNIT1_PHY_MAX (1) +#endif + +typedef struct _MPI26_CONFIG_PAGE_PIOUNIT_1 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U16 ControlFlags; /*0x08 */ + U16 Reserved; /*0x0A */ + U16 AdditionalControlFlags; /*0x0C */ + U16 NVMeMaxQueueDepth; /*0x0E */ + U8 NumPhys; /*0x10 */ + U8 DMDReportPCIe; /*0x11 */ + U16 Reserved2; /*0x12 */ + MPI26_PCIE_IO_UNIT1_PHY_DATA + PhyData[MPI26_PCIE_IOUNIT1_PHY_MAX];/*0x14 */ +} MPI26_CONFIG_PAGE_PIOUNIT_1, + *PTR_MPI26_CONFIG_PAGE_PIOUNIT_1, + Mpi26PCIeIOUnitPage1_t, *pMpi26PCIeIOUnitPage1_t; + +#define MPI26_PCIEIOUNITPAGE1_PAGEVERSION (0x00) + +/*values for PCIe IO Unit Page 1 PhyFlags */ +#define MPI26_PCIEIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08) +#define MPI26_PCIEIOUNIT1_PHYFLAGS_ENDPOINT_ONLY (0x01) + +/*values for PCIe IO Unit Page 1 MaxMinLinkRate */ +#define MPI26_PCIEIOUNIT1_MAX_RATE_MASK (0xF0) +#define MPI26_PCIEIOUNIT1_MAX_RATE_SHIFT (4) +#define MPI26_PCIEIOUNIT1_MAX_RATE_2_5 (0x20) +#define MPI26_PCIEIOUNIT1_MAX_RATE_5_0 (0x30) +#define MPI26_PCIEIOUNIT1_MAX_RATE_8_0 (0x40) +#define MPI26_PCIEIOUNIT1_MAX_RATE_16_0 (0x50) + +/*values for PCIe IO Unit Page 1 DMDReportPCIe */ +#define MPI26_PCIEIOUNIT1_DMDRPT_UNIT_MASK (0x80) +#define MPI26_PCIEIOUNIT1_DMDRPT_UNIT_1_SEC (0x00) +#define MPI26_PCIEIOUNIT1_DMDRPT_UNIT_16_SEC (0x80) +#define MPI26_PCIEIOUNIT1_DMDRPT_DELAY_TIME_MASK (0x7F) + +/*see mpi2_pci.h for values for PCIe IO Unit Page 0 ControllerPhyDeviceInfo + *values + */ + + +/**************************************************************************** +* PCIe Switch Config Pages (MPI v2.6 and later) +****************************************************************************/ + +/*PCIe Switch Page 0 */ + +typedef struct _MPI26_CONFIG_PAGE_PSWITCH_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U8 PhysicalPort; /*0x08 */ + U8 Reserved1; /*0x09 */ + U16 Reserved2; /*0x0A */ + U16 DevHandle; /*0x0C */ + U16 ParentDevHandle; /*0x0E */ + U8 NumPorts; /*0x10 */ + U8 PCIeLevel; /*0x11 */ + U16 Reserved3; /*0x12 */ + U32 Reserved4; /*0x14 */ + U32 Reserved5; /*0x18 */ + U32 Reserved6; /*0x1C */ +} MPI26_CONFIG_PAGE_PSWITCH_0, *PTR_MPI26_CONFIG_PAGE_PSWITCH_0, + Mpi26PCIeSwitchPage0_t, *pMpi26PCIeSwitchPage0_t; + +#define MPI26_PCIESWITCH0_PAGEVERSION (0x00) + + +/*PCIe Switch Page 1 */ + +typedef struct _MPI26_CONFIG_PAGE_PSWITCH_1 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U8 PhysicalPort; /*0x08 */ + U8 Reserved1; /*0x09 */ + U16 Reserved2; /*0x0A */ + U8 NumPorts; /*0x0C */ + U8 PortNum; /*0x0D */ + U16 AttachedDevHandle; /*0x0E */ + U16 SwitchDevHandle; /*0x10 */ + U8 NegotiatedPortWidth; /*0x12 */ + U8 NegotiatedLinkRate; /*0x13 */ + U32 Reserved4; /*0x14 */ + U32 Reserved5; /*0x18 */ +} MPI26_CONFIG_PAGE_PSWITCH_1, *PTR_MPI26_CONFIG_PAGE_PSWITCH_1, + Mpi26PCIeSwitchPage1_t, *pMpi26PCIeSwitchPage1_t; + +#define MPI26_PCIESWITCH1_PAGEVERSION (0x00) + +/*use MPI26_PCIE_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ + +/* defines for the Flags field */ +#define MPI26_PCIESWITCH1_2_RETIMER_PRESENCE (0x0002) +#define MPI26_PCIESWITCH1_RETIMER_PRESENCE (0x0001) + +/**************************************************************************** +* PCIe Device Config Pages (MPI v2.6 and later) +****************************************************************************/ + +/*PCIe Device Page 0 */ + +typedef struct _MPI26_CONFIG_PAGE_PCIEDEV_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U16 Slot; /*0x08 */ + U16 EnclosureHandle; /*0x0A */ + U64 WWID; /*0x0C */ + U16 ParentDevHandle; /*0x14 */ + U8 PortNum; /*0x16 */ + U8 AccessStatus; /*0x17 */ + U16 DevHandle; /*0x18 */ + U8 PhysicalPort; /*0x1A */ + U8 Reserved1; /*0x1B */ + U32 DeviceInfo; /*0x1C */ + U32 Flags; /*0x20 */ + U8 SupportedLinkRates; /*0x24 */ + U8 MaxPortWidth; /*0x25 */ + U8 NegotiatedPortWidth; /*0x26 */ + U8 NegotiatedLinkRate; /*0x27 */ + U8 EnclosureLevel; /*0x28 */ + U8 Reserved2; /*0x29 */ + U16 Reserved3; /*0x2A */ + U8 ConnectorName[4]; /*0x2C */ + U32 Reserved4; /*0x30 */ + U32 Reserved5; /*0x34 */ +} MPI26_CONFIG_PAGE_PCIEDEV_0, *PTR_MPI26_CONFIG_PAGE_PCIEDEV_0, + Mpi26PCIeDevicePage0_t, *pMpi26PCIeDevicePage0_t; + +#define MPI26_PCIEDEVICE0_PAGEVERSION (0x01) + +/*values for PCIe Device Page 0 AccessStatus field */ +#define MPI26_PCIEDEV0_ASTATUS_NO_ERRORS (0x00) +#define MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION (0x04) +#define MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED (0x02) +#define MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED (0x07) +#define MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED (0x08) +#define MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE (0x09) +#define MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED (0x0A) +#define MPI26_PCIEDEV0_ASTATUS_UNKNOWN (0x10) + +#define MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT (0x30) +#define MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED (0x31) +#define MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED (0x32) +#define MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED (0x33) +#define MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED (0x34) +#define MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED (0x35) +#define MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED (0x36) +#define MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT (0x37) +#define MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS (0x38) + +#define MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX (0x3F) + +/*see mpi2_pci.h for the MPI26_PCIE_DEVINFO_ defines used for the DeviceInfo + *field + */ + +/*values for PCIe Device Page 0 Flags field*/ +#define MPI26_PCIEDEV0_FLAGS_2_RETIMER_PRESENCE (0x00020000) +#define MPI26_PCIEDEV0_FLAGS_RETIMER_PRESENCE (0x00010000) +#define MPI26_PCIEDEV0_FLAGS_UNAUTHORIZED_DEVICE (0x00008000) +#define MPI26_PCIEDEV0_FLAGS_ENABLED_FAST_PATH (0x00004000) +#define MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE (0x00002000) +#define MPI26_PCIEDEV0_FLAGS_ASYNCHRONOUS_NOTIFICATION (0x00000400) +#define MPI26_PCIEDEV0_FLAGS_ATA_SW_PRESERVATION (0x00000200) +#define MPI26_PCIEDEV0_FLAGS_UNSUPPORTED_DEVICE (0x00000100) +#define MPI26_PCIEDEV0_FLAGS_ATA_48BIT_LBA_SUPPORTED (0x00000080) +#define MPI26_PCIEDEV0_FLAGS_ATA_SMART_SUPPORTED (0x00000040) +#define MPI26_PCIEDEV0_FLAGS_ATA_NCQ_SUPPORTED (0x00000020) +#define MPI26_PCIEDEV0_FLAGS_ATA_FUA_SUPPORTED (0x00000010) +#define MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID (0x00000002) +#define MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT (0x00000001) + +/* values for PCIe Device Page 0 SupportedLinkRates field */ +#define MPI26_PCIEDEV0_LINK_RATE_16_0_SUPPORTED (0x08) +#define MPI26_PCIEDEV0_LINK_RATE_8_0_SUPPORTED (0x04) +#define MPI26_PCIEDEV0_LINK_RATE_5_0_SUPPORTED (0x02) +#define MPI26_PCIEDEV0_LINK_RATE_2_5_SUPPORTED (0x01) + +/*use MPI26_PCIE_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ + + +/*PCIe Device Page 2 */ + +typedef struct _MPI26_CONFIG_PAGE_PCIEDEV_2 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U16 DevHandle; /*0x08 */ + U8 ControllerResetTO; /* 0x0A */ + U8 Reserved1; /* 0x0B */ + U32 MaximumDataTransferSize; /*0x0C */ + U32 Capabilities; /*0x10 */ + U16 NOIOB; /* 0x14 */ + U16 ShutdownLatency; /* 0x16 */ + U16 VendorID; /* 0x18 */ + U16 DeviceID; /* 0x1A */ + U16 SubsystemVendorID; /* 0x1C */ + U16 SubsystemID; /* 0x1E */ + U8 RevisionID; /* 0x20 */ + U8 Reserved21[3]; /* 0x21 */ +} MPI26_CONFIG_PAGE_PCIEDEV_2, *PTR_MPI26_CONFIG_PAGE_PCIEDEV_2, + Mpi26PCIeDevicePage2_t, *pMpi26PCIeDevicePage2_t; + +#define MPI26_PCIEDEVICE2_PAGEVERSION (0x01) + +/*defines for PCIe Device Page 2 Capabilities field */ +#define MPI26_PCIEDEV2_CAP_DATA_BLK_ALIGN_AND_GRAN (0x00000008) +#define MPI26_PCIEDEV2_CAP_SGL_FORMAT (0x00000004) +#define MPI26_PCIEDEV2_CAP_BIT_BUCKET_SUPPORT (0x00000002) +#define MPI26_PCIEDEV2_CAP_SGL_SUPPORT (0x00000001) + +/* Defines for the NOIOB field */ +#define MPI26_PCIEDEV2_NOIOB_UNSUPPORTED (0x0000) + +/**************************************************************************** +* PCIe Link Config Pages (MPI v2.6 and later) +****************************************************************************/ + +/*PCIe Link Page 1 */ + +typedef struct _MPI26_CONFIG_PAGE_PCIELINK_1 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U8 Link; /*0x08 */ + U8 Reserved1; /*0x09 */ + U16 Reserved2; /*0x0A */ + U32 CorrectableErrorCount; /*0x0C */ + U16 NonFatalErrorCount; /*0x10 */ + U16 Reserved3; /*0x12 */ + U16 FatalErrorCount; /*0x14 */ + U16 Reserved4; /*0x16 */ +} MPI26_CONFIG_PAGE_PCIELINK_1, *PTR_MPI26_CONFIG_PAGE_PCIELINK_1, + Mpi26PcieLinkPage1_t, *pMpi26PcieLinkPage1_t; + +#define MPI26_PCIELINK1_PAGEVERSION (0x00) + +/*PCIe Link Page 2 */ + +typedef struct _MPI26_PCIELINK2_LINK_EVENT { + U8 LinkEventCode; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U32 LinkEventInfo; /*0x04 */ +} MPI26_PCIELINK2_LINK_EVENT, *PTR_MPI26_PCIELINK2_LINK_EVENT, + Mpi26PcieLink2LinkEvent_t, *pMpi26PcieLink2LinkEvent_t; + +/*use MPI26_PCIELINK3_EVTCODE_ for the LinkEventCode field */ + + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumLinkEvents at runtime. + */ +#ifndef MPI26_PCIELINK2_LINK_EVENT_MAX +#define MPI26_PCIELINK2_LINK_EVENT_MAX (1) +#endif + +typedef struct _MPI26_CONFIG_PAGE_PCIELINK_2 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U8 Link; /*0x08 */ + U8 Reserved1; /*0x09 */ + U16 Reserved2; /*0x0A */ + U8 NumLinkEvents; /*0x0C */ + U8 Reserved3; /*0x0D */ + U16 Reserved4; /*0x0E */ + MPI26_PCIELINK2_LINK_EVENT + LinkEvent[MPI26_PCIELINK2_LINK_EVENT_MAX]; /*0x10 */ +} MPI26_CONFIG_PAGE_PCIELINK_2, *PTR_MPI26_CONFIG_PAGE_PCIELINK_2, + Mpi26PcieLinkPage2_t, *pMpi26PcieLinkPage2_t; + +#define MPI26_PCIELINK2_PAGEVERSION (0x00) + +/*PCIe Link Page 3 */ + +typedef struct _MPI26_PCIELINK3_LINK_EVENT_CONFIG { + U8 LinkEventCode; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U8 CounterType; /*0x04 */ + U8 ThresholdWindow; /*0x05 */ + U8 TimeUnits; /*0x06 */ + U8 Reserved3; /*0x07 */ + U32 EventThreshold; /*0x08 */ + U16 ThresholdFlags; /*0x0C */ + U16 Reserved4; /*0x0E */ +} MPI26_PCIELINK3_LINK_EVENT_CONFIG, *PTR_MPI26_PCIELINK3_LINK_EVENT_CONFIG, + Mpi26PcieLink3LinkEventConfig_t, *pMpi26PcieLink3LinkEventConfig_t; + +/*values for LinkEventCode field */ +#define MPI26_PCIELINK3_EVTCODE_NO_EVENT (0x00) +#define MPI26_PCIELINK3_EVTCODE_CORRECTABLE_ERROR_RECEIVED (0x01) +#define MPI26_PCIELINK3_EVTCODE_NON_FATAL_ERROR_RECEIVED (0x02) +#define MPI26_PCIELINK3_EVTCODE_FATAL_ERROR_RECEIVED (0x03) +#define MPI26_PCIELINK3_EVTCODE_DATA_LINK_ERROR_DETECTED (0x04) +#define MPI26_PCIELINK3_EVTCODE_TRANSACTION_LAYER_ERROR_DETECTED (0x05) +#define MPI26_PCIELINK3_EVTCODE_TLP_ECRC_ERROR_DETECTED (0x06) +#define MPI26_PCIELINK3_EVTCODE_POISONED_TLP (0x07) +#define MPI26_PCIELINK3_EVTCODE_RECEIVED_NAK_DLLP (0x08) +#define MPI26_PCIELINK3_EVTCODE_SENT_NAK_DLLP (0x09) +#define MPI26_PCIELINK3_EVTCODE_LTSSM_RECOVERY_STATE (0x0A) +#define MPI26_PCIELINK3_EVTCODE_LTSSM_RXL0S_STATE (0x0B) +#define MPI26_PCIELINK3_EVTCODE_LTSSM_TXL0S_STATE (0x0C) +#define MPI26_PCIELINK3_EVTCODE_LTSSM_L1_STATE (0x0D) +#define MPI26_PCIELINK3_EVTCODE_LTSSM_DISABLED_STATE (0x0E) +#define MPI26_PCIELINK3_EVTCODE_LTSSM_HOT_RESET_STATE (0x0F) +#define MPI26_PCIELINK3_EVTCODE_SYSTEM_ERROR (0x10) +#define MPI26_PCIELINK3_EVTCODE_DECODE_ERROR (0x11) +#define MPI26_PCIELINK3_EVTCODE_DISPARITY_ERROR (0x12) + +/*values for the CounterType field */ +#define MPI26_PCIELINK3_COUNTER_TYPE_WRAPPING (0x00) +#define MPI26_PCIELINK3_COUNTER_TYPE_SATURATING (0x01) +#define MPI26_PCIELINK3_COUNTER_TYPE_PEAK_VALUE (0x02) + +/*values for the TimeUnits field */ +#define MPI26_PCIELINK3_TM_UNITS_10_MICROSECONDS (0x00) +#define MPI26_PCIELINK3_TM_UNITS_100_MICROSECONDS (0x01) +#define MPI26_PCIELINK3_TM_UNITS_1_MILLISECOND (0x02) +#define MPI26_PCIELINK3_TM_UNITS_10_MILLISECONDS (0x03) + +/*values for the ThresholdFlags field */ +#define MPI26_PCIELINK3_TFLAGS_EVENT_NOTIFY (0x0001) + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumLinkEvents at runtime. + */ +#ifndef MPI26_PCIELINK3_LINK_EVENT_MAX +#define MPI26_PCIELINK3_LINK_EVENT_MAX (1) +#endif + +typedef struct _MPI26_CONFIG_PAGE_PCIELINK_3 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U8 Link; /*0x08 */ + U8 Reserved1; /*0x09 */ + U16 Reserved2; /*0x0A */ + U8 NumLinkEvents; /*0x0C */ + U8 Reserved3; /*0x0D */ + U16 Reserved4; /*0x0E */ + MPI26_PCIELINK3_LINK_EVENT_CONFIG + LinkEventConfig[MPI26_PCIELINK3_LINK_EVENT_MAX]; /*0x10 */ +} MPI26_CONFIG_PAGE_PCIELINK_3, *PTR_MPI26_CONFIG_PAGE_PCIELINK_3, + Mpi26PcieLinkPage3_t, *pMpi26PcieLinkPage3_t; + +#define MPI26_PCIELINK3_PAGEVERSION (0x00) + + +#endif diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_image.h b/drivers/scsi/mpt3sas/mpi/mpi2_image.h new file mode 100644 index 000000000..33b9c3a6f --- /dev/null +++ b/drivers/scsi/mpt3sas/mpi/mpi2_image.h @@ -0,0 +1,516 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2016-2020 Broadcom Limited. All rights reserved. + * + * Name: mpi2_image.h + * Description: Contains definitions for firmware and other component images + * Creation Date: 04/02/2018 + * Version: 02.06.04 + * + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 08-01-18 02.06.00 Initial version for MPI 2.6.5. + * 08-14-18 02.06.01 Corrected define for MPI26_IMAGE_HEADER_SIGNATURE0_MPI26 + * 08-28-18 02.06.02 Added MPI2_EXT_IMAGE_TYPE_RDE + * 09-07-18 02.06.03 Added MPI26_EVENT_PCIE_TOPO_PI_16_LANES + * 12-17-18 02.06.04 Addd MPI2_EXT_IMAGE_TYPE_PBLP + * Shorten some defines to be compatible with DOS + * 06-24-19 02.06.05 Whitespace adjustments to help with identifier + * checking tool. + * 10-02-19 02.06.06 Added MPI26_IMAGE_HEADER_SIG1_COREDUMP + * Added MPI2_FLASH_REGION_COREDUMP + */ +#ifndef MPI2_IMAGE_H +#define MPI2_IMAGE_H + + +/*FW Image Header */ +typedef struct _MPI2_FW_IMAGE_HEADER { + U32 Signature; /*0x00 */ + U32 Signature0; /*0x04 */ + U32 Signature1; /*0x08 */ + U32 Signature2; /*0x0C */ + MPI2_VERSION_UNION MPIVersion; /*0x10 */ + MPI2_VERSION_UNION FWVersion; /*0x14 */ + MPI2_VERSION_UNION NVDATAVersion; /*0x18 */ + MPI2_VERSION_UNION PackageVersion; /*0x1C */ + U16 VendorID; /*0x20 */ + U16 ProductID; /*0x22 */ + U16 ProtocolFlags; /*0x24 */ + U16 Reserved26; /*0x26 */ + U32 IOCCapabilities; /*0x28 */ + U32 ImageSize; /*0x2C */ + U32 NextImageHeaderOffset; /*0x30 */ + U32 Checksum; /*0x34 */ + U32 Reserved38; /*0x38 */ + U32 Reserved3C; /*0x3C */ + U32 Reserved40; /*0x40 */ + U32 Reserved44; /*0x44 */ + U32 Reserved48; /*0x48 */ + U32 Reserved4C; /*0x4C */ + U32 Reserved50; /*0x50 */ + U32 Reserved54; /*0x54 */ + U32 Reserved58; /*0x58 */ + U32 Reserved5C; /*0x5C */ + U32 BootFlags; /*0x60 */ + U32 FirmwareVersionNameWhat; /*0x64 */ + U8 FirmwareVersionName[32]; /*0x68 */ + U32 VendorNameWhat; /*0x88 */ + U8 VendorName[32]; /*0x8C */ + U32 PackageNameWhat; /*0x88 */ + U8 PackageName[32]; /*0x8C */ + U32 ReservedD0; /*0xD0 */ + U32 ReservedD4; /*0xD4 */ + U32 ReservedD8; /*0xD8 */ + U32 ReservedDC; /*0xDC */ + U32 ReservedE0; /*0xE0 */ + U32 ReservedE4; /*0xE4 */ + U32 ReservedE8; /*0xE8 */ + U32 ReservedEC; /*0xEC */ + U32 ReservedF0; /*0xF0 */ + U32 ReservedF4; /*0xF4 */ + U32 ReservedF8; /*0xF8 */ + U32 ReservedFC; /*0xFC */ +} MPI2_FW_IMAGE_HEADER, *PTR_MPI2_FW_IMAGE_HEADER, + Mpi2FWImageHeader_t, *pMpi2FWImageHeader_t; + +/*Signature field */ +#define MPI2_FW_HEADER_SIGNATURE_OFFSET (0x00) +#define MPI2_FW_HEADER_SIGNATURE_MASK (0xFF000000) +#define MPI2_FW_HEADER_SIGNATURE (0xEA000000) +#define MPI26_FW_HEADER_SIGNATURE (0xEB000000) + +/*Signature0 field */ +#define MPI2_FW_HEADER_SIGNATURE0_OFFSET (0x04) +#define MPI2_FW_HEADER_SIGNATURE0 (0x5AFAA55A) +/*Last byte is defined by architecture */ +#define MPI26_FW_HEADER_SIGNATURE0_BASE (0x5AEAA500) +#define MPI26_FW_HEADER_SIGNATURE0_ARC_0 (0x5A) +#define MPI26_FW_HEADER_SIGNATURE0_ARC_1 (0x00) +#define MPI26_FW_HEADER_SIGNATURE0_ARC_2 (0x01) +/*legacy (0x5AEAA55A) */ +#define MPI26_FW_HEADER_SIGNATURE0_ARC_3 (0x02) +#define MPI26_FW_HEADER_SIGNATURE0 \ + (MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_0) +#define MPI26_FW_HEADER_SIGNATURE0_3516 \ + (MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_1) +#define MPI26_FW_HEADER_SIGNATURE0_4008 \ + (MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_3) + +/*Signature1 field */ +#define MPI2_FW_HEADER_SIGNATURE1_OFFSET (0x08) +#define MPI2_FW_HEADER_SIGNATURE1 (0xA55AFAA5) +#define MPI26_FW_HEADER_SIGNATURE1 (0xA55AEAA5) + +/*Signature2 field */ +#define MPI2_FW_HEADER_SIGNATURE2_OFFSET (0x0C) +#define MPI2_FW_HEADER_SIGNATURE2 (0x5AA55AFA) +#define MPI26_FW_HEADER_SIGNATURE2 (0x5AA55AEA) + +/*defines for using the ProductID field */ +#define MPI2_FW_HEADER_PID_TYPE_MASK (0xF000) +#define MPI2_FW_HEADER_PID_TYPE_SAS (0x2000) + +#define MPI2_FW_HEADER_PID_PROD_MASK (0x0F00) +#define MPI2_FW_HEADER_PID_PROD_A (0x0000) +#define MPI2_FW_HEADER_PID_PROD_TARGET_INITIATOR_SCSI (0x0200) +#define MPI2_FW_HEADER_PID_PROD_IR_SCSI (0x0700) + +#define MPI2_FW_HEADER_PID_FAMILY_MASK (0x00FF) +/*SAS ProductID Family bits */ +#define MPI2_FW_HEADER_PID_FAMILY_2108_SAS (0x0013) +#define MPI2_FW_HEADER_PID_FAMILY_2208_SAS (0x0014) +#define MPI25_FW_HEADER_PID_FAMILY_3108_SAS (0x0021) +#define MPI26_FW_HEADER_PID_FAMILY_3324_SAS (0x0028) +#define MPI26_FW_HEADER_PID_FAMILY_3516_SAS (0x0031) + +/*use MPI2_IOCFACTS_PROTOCOL_ defines for ProtocolFlags field */ + +/*use MPI2_IOCFACTS_CAPABILITY_ defines for IOCCapabilities field */ + +#define MPI2_FW_HEADER_IMAGESIZE_OFFSET (0x2C) +#define MPI2_FW_HEADER_NEXTIMAGE_OFFSET (0x30) + +#define MPI26_FW_HEADER_BOOTFLAGS_OFFSET (0x60) +#define MPI2_FW_HEADER_BOOTFLAGS_ISSI32M_FLAG (0x00000001) +#define MPI2_FW_HEADER_BOOTFLAGS_W25Q256JW_FLAG (0x00000002) +/*This image has a auto-discovery version of SPI */ +#define MPI2_FW_HEADER_BOOTFLAGS_AUTO_SPI_FLAG (0x00000004) + + +#define MPI2_FW_HEADER_VERNMHWAT_OFFSET (0x64) + +#define MPI2_FW_HEADER_WHAT_SIGNATURE (0x29232840) + +#define MPI2_FW_HEADER_SIZE (0x100) + + +/**************************************************************************** + * Component Image Format and related defines * + ****************************************************************************/ + +/*Maximum number of Hash Exclusion entries in a Component Image Header */ +#define MPI26_COMP_IMG_HDR_NUM_HASH_EXCL (4) + +/*Hash Exclusion Format */ +typedef struct _MPI26_HASH_EXCLUSION_FORMAT { + U32 Offset; /*0x00 */ + U32 Size; /*0x04 */ +} MPI26_HASH_EXCLUSION_FORMAT, + *PTR_MPI26_HASH_EXCLUSION_FORMAT, + Mpi26HashSxclusionFormat_t, + *pMpi26HashExclusionFormat_t; + +/*FW Image Header */ +typedef struct _MPI26_COMPONENT_IMAGE_HEADER { + U32 Signature0; /*0x00 */ + U32 LoadAddress; /*0x04 */ + U32 DataSize; /*0x08 */ + U32 StartAddress; /*0x0C */ + U32 Signature1; /*0x10 */ + U32 FlashOffset; /*0x14 */ + U32 FlashSize; /*0x18 */ + U32 VersionStringOffset; /*0x1C */ + U32 BuildDateStringOffset; /*0x20 */ + U32 BuildTimeStringOffset; /*0x24 */ + U32 EnvironmentVariableOffset; /*0x28 */ + U32 ApplicationSpecific; /*0x2C */ + U32 Signature2; /*0x30 */ + U32 HeaderSize; /*0x34 */ + U32 Crc; /*0x38 */ + U8 NotFlashImage; /*0x3C */ + U8 Compressed; /*0x3D */ + U16 Reserved3E; /*0x3E */ + U32 SecondaryFlashOffset; /*0x40 */ + U32 Reserved44; /*0x44 */ + U32 Reserved48; /*0x48 */ + MPI2_VERSION_UNION RMCInterfaceVersion; /*0x4C */ + MPI2_VERSION_UNION Reserved50; /*0x50 */ + MPI2_VERSION_UNION FWVersion; /*0x54 */ + MPI2_VERSION_UNION NvdataVersion; /*0x58 */ + MPI26_HASH_EXCLUSION_FORMAT + HashExclusion[MPI26_COMP_IMG_HDR_NUM_HASH_EXCL];/*0x5C */ + U32 NextImageHeaderOffset; /*0x7C */ + U32 Reserved80[32]; /*0x80 -- 0xFC */ +} MPI26_COMPONENT_IMAGE_HEADER, + *PTR_MPI26_COMPONENT_IMAGE_HEADER, + Mpi26ComponentImageHeader_t, + *pMpi26ComponentImageHeader_t; + + +/**** Definitions for Signature0 field ****/ +#define MPI26_IMAGE_HEADER_SIGNATURE0_MPI26 (0xEB000042) + +/**** Definitions for Signature1 field ****/ +#define MPI26_IMAGE_HEADER_SIG1_APPLICATION (0x20505041) +#define MPI26_IMAGE_HEADER_SIG1_CBB (0x20424243) +#define MPI26_IMAGE_HEADER_SIG1_MFG (0x2047464D) +#define MPI26_IMAGE_HEADER_SIG1_BIOS (0x534F4942) +#define MPI26_IMAGE_HEADER_SIG1_HIIM (0x4D494948) +#define MPI26_IMAGE_HEADER_SIG1_HIIA (0x41494948) +#define MPI26_IMAGE_HEADER_SIG1_CPLD (0x444C5043) +#define MPI26_IMAGE_HEADER_SIG1_SPD (0x20445053) +#define MPI26_IMAGE_HEADER_SIG1_NVDATA (0x5444564E) +#define MPI26_IMAGE_HEADER_SIG1_GAS_GAUGE (0x20534147) +#define MPI26_IMAGE_HEADER_SIG1_PBLP (0x504C4250) +/* little-endian "DUMP" */ +#define MPI26_IMAGE_HEADER_SIG1_COREDUMP (0x504D5544) + +/**** Definitions for Signature2 field ****/ +#define MPI26_IMAGE_HEADER_SIGNATURE2_VALUE (0x50584546) + +/**** Offsets for Image Header Fields ****/ +#define MPI26_IMAGE_HEADER_SIGNATURE0_OFFSET (0x00) +#define MPI26_IMAGE_HEADER_LOAD_ADDRESS_OFFSET (0x04) +#define MPI26_IMAGE_HEADER_DATA_SIZE_OFFSET (0x08) +#define MPI26_IMAGE_HEADER_START_ADDRESS_OFFSET (0x0C) +#define MPI26_IMAGE_HEADER_SIGNATURE1_OFFSET (0x10) +#define MPI26_IMAGE_HEADER_FLASH_OFFSET_OFFSET (0x14) +#define MPI26_IMAGE_HEADER_FLASH_SIZE_OFFSET (0x18) +#define MPI26_IMAGE_HEADER_VERSION_STRING_OFFSET_OFFSET (0x1C) +#define MPI26_IMAGE_HEADER_BUILD_DATE_STRING_OFFSET_OFFSET (0x20) +#define MPI26_IMAGE_HEADER_BUILD_TIME_OFFSET_OFFSET (0x24) +#define MPI26_IMAGE_HEADER_ENVIROMENT_VAR_OFFSET_OFFSET (0x28) +#define MPI26_IMAGE_HEADER_APPLICATION_SPECIFIC_OFFSET (0x2C) +#define MPI26_IMAGE_HEADER_SIGNATURE2_OFFSET (0x30) +#define MPI26_IMAGE_HEADER_HEADER_SIZE_OFFSET (0x34) +#define MPI26_IMAGE_HEADER_CRC_OFFSET (0x38) +#define MPI26_IMAGE_HEADER_NOT_FLASH_IMAGE_OFFSET (0x3C) +#define MPI26_IMAGE_HEADER_COMPRESSED_OFFSET (0x3D) +#define MPI26_IMAGE_HEADER_SECONDARY_FLASH_OFFSET_OFFSET (0x40) +#define MPI26_IMAGE_HEADER_RMC_INTERFACE_VER_OFFSET (0x4C) +#define MPI26_IMAGE_HEADER_COMPONENT_IMAGE_VER_OFFSET (0x54) +#define MPI26_IMAGE_HEADER_HASH_EXCLUSION_OFFSET (0x5C) +#define MPI26_IMAGE_HEADER_NEXT_IMAGE_HEADER_OFFSET_OFFSET (0x7C) + + +#define MPI26_IMAGE_HEADER_SIZE (0x100) + + +/*Extended Image Header */ +typedef struct _MPI2_EXT_IMAGE_HEADER { + U8 ImageType; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U32 Checksum; /*0x04 */ + U32 ImageSize; /*0x08 */ + U32 NextImageHeaderOffset; /*0x0C */ + U32 PackageVersion; /*0x10 */ + U32 Reserved3; /*0x14 */ + U32 Reserved4; /*0x18 */ + U32 Reserved5; /*0x1C */ + U8 IdentifyString[32]; /*0x20 */ +} MPI2_EXT_IMAGE_HEADER, *PTR_MPI2_EXT_IMAGE_HEADER, + Mpi2ExtImageHeader_t, *pMpi2ExtImageHeader_t; + +/*useful offsets */ +#define MPI2_EXT_IMAGE_IMAGETYPE_OFFSET (0x00) +#define MPI2_EXT_IMAGE_IMAGESIZE_OFFSET (0x08) +#define MPI2_EXT_IMAGE_NEXTIMAGE_OFFSET (0x0C) +#define MPI2_EXT_IMAGE_PACKAGEVERSION_OFFSET (0x10) + +#define MPI2_EXT_IMAGE_HEADER_SIZE (0x40) + +/*defines for the ImageType field */ +#define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED (0x00) +#define MPI2_EXT_IMAGE_TYPE_FW (0x01) +#define MPI2_EXT_IMAGE_TYPE_NVDATA (0x03) +#define MPI2_EXT_IMAGE_TYPE_BOOTLOADER (0x04) +#define MPI2_EXT_IMAGE_TYPE_INITIALIZATION (0x05) +#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT (0x06) +#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07) +#define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08) +#define MPI2_EXT_IMAGE_TYPE_ENCRYPTED_HASH (0x09) +#define MPI2_EXT_IMAGE_TYPE_RDE (0x0A) +#define MPI2_EXT_IMAGE_TYPE_PBLP (0x0B) +#define MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC (0x80) +#define MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC (0xFF) + +#define MPI2_EXT_IMAGE_TYPE_MAX (MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC) + +/*FLASH Layout Extended Image Data */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check RegionsPerLayout at runtime. + */ +#ifndef MPI2_FLASH_NUMBER_OF_REGIONS +#define MPI2_FLASH_NUMBER_OF_REGIONS (1) +#endif + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check NumberOfLayouts at runtime. + */ +#ifndef MPI2_FLASH_NUMBER_OF_LAYOUTS +#define MPI2_FLASH_NUMBER_OF_LAYOUTS (1) +#endif + +typedef struct _MPI2_FLASH_REGION { + U8 RegionType; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U32 RegionOffset; /*0x04 */ + U32 RegionSize; /*0x08 */ + U32 Reserved3; /*0x0C */ +} MPI2_FLASH_REGION, *PTR_MPI2_FLASH_REGION, + Mpi2FlashRegion_t, *pMpi2FlashRegion_t; + +typedef struct _MPI2_FLASH_LAYOUT { + U32 FlashSize; /*0x00 */ + U32 Reserved1; /*0x04 */ + U32 Reserved2; /*0x08 */ + U32 Reserved3; /*0x0C */ + MPI2_FLASH_REGION Region[MPI2_FLASH_NUMBER_OF_REGIONS]; /*0x10 */ +} MPI2_FLASH_LAYOUT, *PTR_MPI2_FLASH_LAYOUT, + Mpi2FlashLayout_t, *pMpi2FlashLayout_t; + +typedef struct _MPI2_FLASH_LAYOUT_DATA { + U8 ImageRevision; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 SizeOfRegion; /*0x02 */ + U8 Reserved2; /*0x03 */ + U16 NumberOfLayouts; /*0x04 */ + U16 RegionsPerLayout; /*0x06 */ + U16 MinimumSectorAlignment; /*0x08 */ + U16 Reserved3; /*0x0A */ + U32 Reserved4; /*0x0C */ + MPI2_FLASH_LAYOUT Layout[MPI2_FLASH_NUMBER_OF_LAYOUTS]; /*0x10 */ +} MPI2_FLASH_LAYOUT_DATA, *PTR_MPI2_FLASH_LAYOUT_DATA, + Mpi2FlashLayoutData_t, *pMpi2FlashLayoutData_t; + +/*defines for the RegionType field */ +#define MPI2_FLASH_REGION_UNUSED (0x00) +#define MPI2_FLASH_REGION_FIRMWARE (0x01) +#define MPI2_FLASH_REGION_BIOS (0x02) +#define MPI2_FLASH_REGION_NVDATA (0x03) +#define MPI2_FLASH_REGION_FIRMWARE_BACKUP (0x05) +#define MPI2_FLASH_REGION_MFG_INFORMATION (0x06) +#define MPI2_FLASH_REGION_CONFIG_1 (0x07) +#define MPI2_FLASH_REGION_CONFIG_2 (0x08) +#define MPI2_FLASH_REGION_MEGARAID (0x09) +#define MPI2_FLASH_REGION_COMMON_BOOT_BLOCK (0x0A) +#define MPI2_FLASH_REGION_INIT (MPI2_FLASH_REGION_COMMON_BOOT_BLOCK) +#define MPI2_FLASH_REGION_CBB_BACKUP (0x0D) +#define MPI2_FLASH_REGION_SBR (0x0E) +#define MPI2_FLASH_REGION_SBR_BACKUP (0x0F) +#define MPI2_FLASH_REGION_HIIM (0x10) +#define MPI2_FLASH_REGION_HIIA (0x11) +#define MPI2_FLASH_REGION_CTLR (0x12) +#define MPI2_FLASH_REGION_IMR_FIRMWARE (0x13) +#define MPI2_FLASH_REGION_MR_NVDATA (0x14) +#define MPI2_FLASH_REGION_CPLD (0x15) +#define MPI2_FLASH_REGION_PSOC (0x16) +#define MPI2_FLASH_REGION_COREDUMP (0x17) + +/*ImageRevision */ +#define MPI2_FLASH_LAYOUT_IMAGE_REVISION (0x00) + +/*Supported Devices Extended Image Data */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check NumberOfDevices at runtime. + */ +#ifndef MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES +#define MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES (1) +#endif + +typedef struct _MPI2_SUPPORTED_DEVICE { + U16 DeviceID; /*0x00 */ + U16 VendorID; /*0x02 */ + U16 DeviceIDMask; /*0x04 */ + U16 Reserved1; /*0x06 */ + U8 LowPCIRev; /*0x08 */ + U8 HighPCIRev; /*0x09 */ + U16 Reserved2; /*0x0A */ + U32 Reserved3; /*0x0C */ +} MPI2_SUPPORTED_DEVICE, *PTR_MPI2_SUPPORTED_DEVICE, + Mpi2SupportedDevice_t, *pMpi2SupportedDevice_t; + +typedef struct _MPI2_SUPPORTED_DEVICES_DATA { + U8 ImageRevision; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 NumberOfDevices; /*0x02 */ + U8 Reserved2; /*0x03 */ + U32 Reserved3; /*0x04 */ + MPI2_SUPPORTED_DEVICE + SupportedDevice[MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES];/*0x08 */ +} MPI2_SUPPORTED_DEVICES_DATA, *PTR_MPI2_SUPPORTED_DEVICES_DATA, + Mpi2SupportedDevicesData_t, *pMpi2SupportedDevicesData_t; + +/*ImageRevision */ +#define MPI2_SUPPORTED_DEVICES_IMAGE_REVISION (0x00) + +/*Init Extended Image Data */ + +typedef struct _MPI2_INIT_IMAGE_FOOTER { + U32 BootFlags; /*0x00 */ + U32 ImageSize; /*0x04 */ + U32 Signature0; /*0x08 */ + U32 Signature1; /*0x0C */ + U32 Signature2; /*0x10 */ + U32 ResetVector; /*0x14 */ +} MPI2_INIT_IMAGE_FOOTER, *PTR_MPI2_INIT_IMAGE_FOOTER, + Mpi2InitImageFooter_t, *pMpi2InitImageFooter_t; + +/*defines for the BootFlags field */ +#define MPI2_INIT_IMAGE_BOOTFLAGS_OFFSET (0x00) + +/*defines for the ImageSize field */ +#define MPI2_INIT_IMAGE_IMAGESIZE_OFFSET (0x04) + +/*defines for the Signature0 field */ +#define MPI2_INIT_IMAGE_SIGNATURE0_OFFSET (0x08) +#define MPI2_INIT_IMAGE_SIGNATURE0 (0x5AA55AEA) + +/*defines for the Signature1 field */ +#define MPI2_INIT_IMAGE_SIGNATURE1_OFFSET (0x0C) +#define MPI2_INIT_IMAGE_SIGNATURE1 (0xA55AEAA5) + +/*defines for the Signature2 field */ +#define MPI2_INIT_IMAGE_SIGNATURE2_OFFSET (0x10) +#define MPI2_INIT_IMAGE_SIGNATURE2 (0x5AEAA55A) + +/*Signature fields as individual bytes */ +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_0 (0xEA) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_1 (0x5A) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_2 (0xA5) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_3 (0x5A) + +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_4 (0xA5) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_5 (0xEA) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_6 (0x5A) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_7 (0xA5) + +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_8 (0x5A) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_9 (0xA5) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_A (0xEA) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_B (0x5A) + +/*defines for the ResetVector field */ +#define MPI2_INIT_IMAGE_RESETVECTOR_OFFSET (0x14) + + +/* Encrypted Hash Extended Image Data */ + +typedef struct _MPI25_ENCRYPTED_HASH_ENTRY { + U8 HashImageType; /*0x00 */ + U8 HashAlgorithm; /*0x01 */ + U8 EncryptionAlgorithm; /*0x02 */ + U8 Reserved1; /*0x03 */ + U32 Reserved2; /*0x04 */ + U32 EncryptedHash[1]; /*0x08 */ /* variable length */ +} MPI25_ENCRYPTED_HASH_ENTRY, *PTR_MPI25_ENCRYPTED_HASH_ENTRY, +Mpi25EncryptedHashEntry_t, *pMpi25EncryptedHashEntry_t; + +/* values for HashImageType */ +#define MPI25_HASH_IMAGE_TYPE_UNUSED (0x00) +#define MPI25_HASH_IMAGE_TYPE_FIRMWARE (0x01) +#define MPI25_HASH_IMAGE_TYPE_BIOS (0x02) + +#define MPI26_HASH_IMAGE_TYPE_UNUSED (0x00) +#define MPI26_HASH_IMAGE_TYPE_FIRMWARE (0x01) +#define MPI26_HASH_IMAGE_TYPE_BIOS (0x02) +#define MPI26_HASH_IMAGE_TYPE_KEY_HASH (0x03) + +/* values for HashAlgorithm */ +#define MPI25_HASH_ALGORITHM_UNUSED (0x00) +#define MPI25_HASH_ALGORITHM_SHA256 (0x01) + +#define MPI26_HASH_ALGORITHM_VER_MASK (0xE0) +#define MPI26_HASH_ALGORITHM_VER_NONE (0x00) +#define MPI26_HASH_ALGORITHM_VER_SHA1 (0x20) +#define MPI26_HASH_ALGORITHM_VER_SHA2 (0x40) +#define MPI26_HASH_ALGORITHM_VER_SHA3 (0x60) +#define MPI26_HASH_ALGORITHM_SIZE_MASK (0x1F) +#define MPI26_HASH_ALGORITHM_SIZE_256 (0x01) +#define MPI26_HASH_ALGORITHM_SIZE_512 (0x02) + + +/* values for EncryptionAlgorithm */ +#define MPI25_ENCRYPTION_ALG_UNUSED (0x00) +#define MPI25_ENCRYPTION_ALG_RSA256 (0x01) + +#define MPI26_ENCRYPTION_ALG_UNUSED (0x00) +#define MPI26_ENCRYPTION_ALG_RSA256 (0x01) +#define MPI26_ENCRYPTION_ALG_RSA512 (0x02) +#define MPI26_ENCRYPTION_ALG_RSA1024 (0x03) +#define MPI26_ENCRYPTION_ALG_RSA2048 (0x04) +#define MPI26_ENCRYPTION_ALG_RSA4096 (0x05) + +typedef struct _MPI25_ENCRYPTED_HASH_DATA { + U8 ImageVersion; /*0x00 */ + U8 NumHash; /*0x01 */ + U16 Reserved1; /*0x02 */ + U32 Reserved2; /*0x04 */ + MPI25_ENCRYPTED_HASH_ENTRY EncryptedHashEntry[1]; /*0x08 */ +} MPI25_ENCRYPTED_HASH_DATA, *PTR_MPI25_ENCRYPTED_HASH_DATA, +Mpi25EncryptedHashData_t, *pMpi25EncryptedHashData_t; + + +#endif /* MPI2_IMAGE_H */ diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_init.h b/drivers/scsi/mpt3sas/mpi/mpi2_init.h new file mode 100644 index 000000000..8f1b903fe --- /dev/null +++ b/drivers/scsi/mpt3sas/mpi/mpi2_init.h @@ -0,0 +1,591 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * + * + * Name: mpi2_init.h + * Title: MPI SCSI initiator mode messages and structures + * Creation Date: June 23, 2006 + * + * mpi2_init.h Version: 02.00.21 + * + * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 + * prefix are for use only on MPI v2.5 products, and must not be used + * with MPI v2.0 products. Unless otherwise noted, names beginning with + * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 10-31-07 02.00.01 Fixed name for pMpi2SCSITaskManagementRequest_t. + * 12-18-07 02.00.02 Modified Task Management Target Reset Method defines. + * 02-29-08 02.00.03 Added Query Task Set and Query Unit Attention. + * 03-03-08 02.00.04 Fixed name of struct _MPI2_SCSI_TASK_MANAGE_REPLY. + * 05-21-08 02.00.05 Fixed typo in name of Mpi2SepRequest_t. + * 10-02-08 02.00.06 Removed Untagged and No Disconnect values from SCSI IO + * Control field Task Attribute flags. + * Moved LUN field defines to mpi2.h becasue they are + * common to many structures. + * 05-06-09 02.00.07 Changed task management type of Query Unit Attention to + * Query Asynchronous Event. + * Defined two new bits in the SlotStatus field of the SCSI + * Enclosure Processor Request and Reply. + * 10-28-09 02.00.08 Added defines for decoding the ResponseInfo bytes for + * both SCSI IO Error Reply and SCSI Task Management Reply. + * Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY. + * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define. + * 02-10-10 02.00.09 Removed unused structure that had "#if 0" around it. + * 05-12-10 02.00.10 Added optional vendor-unique region to SCSI IO Request. + * 11-10-10 02.00.11 Added MPI2_SCSIIO_NUM_SGLOFFSETS define. + * 11-18-11 02.00.12 Incorporating additions for MPI v2.5. + * 02-06-12 02.00.13 Added alternate defines for Task Priority / Command + * Priority to match SAM-4. + * Added EEDPErrorOffset to MPI2_SCSI_IO_REPLY. + * 07-10-12 02.00.14 Added MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION. + * 04-09-13 02.00.15 Added SCSIStatusQualifier field to MPI2_SCSI_IO_REPLY, + * replacing the Reserved4 field. + * 11-18-14 02.00.16 Updated copyright information. + * 03-16-15 02.00.17 Updated for MPI v2.6. + * Added MPI26_SCSIIO_IOFLAGS_ESCAPE_PASSTHROUGH. + * Added MPI2_SEP_REQ_SLOTSTATUS_DEV_OFF and + * MPI2_SEP_REPLY_SLOTSTATUS_DEV_OFF. + * 08-26-15 02.00.18 Added SCSITASKMGMT_MSGFLAGS for Target Reset. + * 12-18-15 02.00.19 Added EEDPObservedValue added to SCSI IO Reply message. + * 01-04-16 02.00.20 Modified EEDP reported values in SCSI IO Reply message. + * 01-21-16 02.00.21 Modified MPI26_SCSITASKMGMT_MSGFLAGS_PCIE* defines to + * be unique within first 32 characters. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_INIT_H +#define MPI2_INIT_H + +/***************************************************************************** +* +* SCSI Initiator Messages +* +*****************************************************************************/ + +/**************************************************************************** +* SCSI IO messages and associated structures +****************************************************************************/ + +typedef struct _MPI2_SCSI_IO_CDB_EEDP32 { + U8 CDB[20]; /*0x00 */ + __be32 PrimaryReferenceTag; /*0x14 */ + U16 PrimaryApplicationTag; /*0x18 */ + U16 PrimaryApplicationTagMask; /*0x1A */ + U32 TransferLength; /*0x1C */ +} MPI2_SCSI_IO_CDB_EEDP32, *PTR_MPI2_SCSI_IO_CDB_EEDP32, + Mpi2ScsiIoCdbEedp32_t, *pMpi2ScsiIoCdbEedp32_t; + +/*MPI v2.0 CDB field */ +typedef union _MPI2_SCSI_IO_CDB_UNION { + U8 CDB32[32]; + MPI2_SCSI_IO_CDB_EEDP32 EEDP32; + MPI2_SGE_SIMPLE_UNION SGE; +} MPI2_SCSI_IO_CDB_UNION, *PTR_MPI2_SCSI_IO_CDB_UNION, + Mpi2ScsiIoCdb_t, *pMpi2ScsiIoCdb_t; + +/*MPI v2.0 SCSI IO Request Message */ +typedef struct _MPI2_SCSI_IO_REQUEST { + U16 DevHandle; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved1; /*0x04 */ + U8 Reserved2; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ + U32 SenseBufferLowAddress; /*0x0C */ + U16 SGLFlags; /*0x10 */ + U8 SenseBufferLength; /*0x12 */ + U8 Reserved4; /*0x13 */ + U8 SGLOffset0; /*0x14 */ + U8 SGLOffset1; /*0x15 */ + U8 SGLOffset2; /*0x16 */ + U8 SGLOffset3; /*0x17 */ + U32 SkipCount; /*0x18 */ + U32 DataLength; /*0x1C */ + U32 BidirectionalDataLength; /*0x20 */ + U16 IoFlags; /*0x24 */ + U16 EEDPFlags; /*0x26 */ + U32 EEDPBlockSize; /*0x28 */ + U32 SecondaryReferenceTag; /*0x2C */ + U16 SecondaryApplicationTag; /*0x30 */ + U16 ApplicationTagTranslationMask; /*0x32 */ + U8 LUN[8]; /*0x34 */ + U32 Control; /*0x3C */ + MPI2_SCSI_IO_CDB_UNION CDB; /*0x40 */ + +#ifdef MPI2_SCSI_IO_VENDOR_UNIQUE_REGION /*typically this is left undefined */ + MPI2_SCSI_IO_VENDOR_UNIQUE VendorRegion; +#endif + + MPI2_SGE_IO_UNION SGL; /*0x60 */ + +} MPI2_SCSI_IO_REQUEST, *PTR_MPI2_SCSI_IO_REQUEST, + Mpi2SCSIIORequest_t, *pMpi2SCSIIORequest_t; + +/*SCSI IO MsgFlags bits */ + +/*MsgFlags for SenseBufferAddressSpace */ +#define MPI2_SCSIIO_MSGFLAGS_MASK_SENSE_ADDR (0x0C) +#define MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR (0x00) +#define MPI2_SCSIIO_MSGFLAGS_IOCDDR_SENSE_ADDR (0x04) +#define MPI2_SCSIIO_MSGFLAGS_IOCPLB_SENSE_ADDR (0x08) +#define MPI2_SCSIIO_MSGFLAGS_IOCPLBNTA_SENSE_ADDR (0x0C) +#define MPI26_SCSIIO_MSGFLAGS_IOCCTL_SENSE_ADDR (0x08) + +/*SCSI IO SGLFlags bits */ + +/*base values for Data Location Address Space */ +#define MPI2_SCSIIO_SGLFLAGS_ADDR_MASK (0x0C) +#define MPI2_SCSIIO_SGLFLAGS_SYSTEM_ADDR (0x00) +#define MPI2_SCSIIO_SGLFLAGS_IOCDDR_ADDR (0x04) +#define MPI2_SCSIIO_SGLFLAGS_IOCPLB_ADDR (0x08) +#define MPI2_SCSIIO_SGLFLAGS_IOCPLBNTA_ADDR (0x0C) + +/*base values for Type */ +#define MPI2_SCSIIO_SGLFLAGS_TYPE_MASK (0x03) +#define MPI2_SCSIIO_SGLFLAGS_TYPE_MPI (0x00) +#define MPI2_SCSIIO_SGLFLAGS_TYPE_IEEE32 (0x01) +#define MPI2_SCSIIO_SGLFLAGS_TYPE_IEEE64 (0x02) + +/*shift values for each sub-field */ +#define MPI2_SCSIIO_SGLFLAGS_SGL3_SHIFT (12) +#define MPI2_SCSIIO_SGLFLAGS_SGL2_SHIFT (8) +#define MPI2_SCSIIO_SGLFLAGS_SGL1_SHIFT (4) +#define MPI2_SCSIIO_SGLFLAGS_SGL0_SHIFT (0) + +/*number of SGLOffset fields */ +#define MPI2_SCSIIO_NUM_SGLOFFSETS (4) + +/*SCSI IO IoFlags bits */ + +/*Large CDB Address Space */ +#define MPI2_SCSIIO_CDB_ADDR_MASK (0x6000) +#define MPI2_SCSIIO_CDB_ADDR_SYSTEM (0x0000) +#define MPI2_SCSIIO_CDB_ADDR_IOCDDR (0x2000) +#define MPI2_SCSIIO_CDB_ADDR_IOCPLB (0x4000) +#define MPI2_SCSIIO_CDB_ADDR_IOCPLBNTA (0x6000) + +#define MPI2_SCSIIO_IOFLAGS_LARGE_CDB (0x1000) +#define MPI2_SCSIIO_IOFLAGS_BIDIRECTIONAL (0x0800) +#define MPI2_SCSIIO_IOFLAGS_MULTICAST (0x0400) +#define MPI2_SCSIIO_IOFLAGS_CMD_DETERMINES_DATA_DIR (0x0200) +#define MPI2_SCSIIO_IOFLAGS_CDBLENGTH_MASK (0x01FF) + +/*SCSI IO EEDPFlags bits */ + +#define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG (0x8000) +#define MPI2_SCSIIO_EEDPFLAGS_INC_SEC_REFTAG (0x4000) +#define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG (0x2000) +#define MPI2_SCSIIO_EEDPFLAGS_INC_SEC_APPTAG (0x1000) + +#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG (0x0400) +#define MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG (0x0200) +#define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100) + +#define MPI2_SCSIIO_EEDPFLAGS_PASSTHRU_REFTAG (0x0008) + +#define MPI2_SCSIIO_EEDPFLAGS_MASK_OP (0x0007) +#define MPI2_SCSIIO_EEDPFLAGS_NOOP_OP (0x0000) +#define MPI2_SCSIIO_EEDPFLAGS_CHECK_OP (0x0001) +#define MPI2_SCSIIO_EEDPFLAGS_STRIP_OP (0x0002) +#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP (0x0003) +#define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004) +#define MPI2_SCSIIO_EEDPFLAGS_REPLACE_OP (0x0006) +#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REGEN_OP (0x0007) + +/*SCSI IO LUN fields: use MPI2_LUN_ from mpi2.h */ + +/*SCSI IO Control bits */ +#define MPI2_SCSIIO_CONTROL_ADDCDBLEN_MASK (0xFC000000) +#define MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT (26) + +#define MPI2_SCSIIO_CONTROL_DATADIRECTION_MASK (0x03000000) +#define MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION (24) +#define MPI2_SCSIIO_CONTROL_NODATATRANSFER (0x00000000) +#define MPI2_SCSIIO_CONTROL_WRITE (0x01000000) +#define MPI2_SCSIIO_CONTROL_READ (0x02000000) +#define MPI2_SCSIIO_CONTROL_BIDIRECTIONAL (0x03000000) + +#define MPI2_SCSIIO_CONTROL_TASKPRI_MASK (0x00007800) +#define MPI2_SCSIIO_CONTROL_TASKPRI_SHIFT (11) +/*alternate name for the previous field; called Command Priority in SAM-4 */ +#define MPI2_SCSIIO_CONTROL_CMDPRI_MASK (0x00007800) +#define MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT (11) + +#define MPI2_SCSIIO_CONTROL_TASKATTRIBUTE_MASK (0x00000700) +#define MPI2_SCSIIO_CONTROL_SIMPLEQ (0x00000000) +#define MPI2_SCSIIO_CONTROL_HEADOFQ (0x00000100) +#define MPI2_SCSIIO_CONTROL_ORDEREDQ (0x00000200) +#define MPI2_SCSIIO_CONTROL_ACAQ (0x00000400) + +#define MPI2_SCSIIO_CONTROL_TLR_MASK (0x000000C0) +#define MPI2_SCSIIO_CONTROL_NO_TLR (0x00000000) +#define MPI2_SCSIIO_CONTROL_TLR_ON (0x00000040) +#define MPI2_SCSIIO_CONTROL_TLR_OFF (0x00000080) + +/*MPI v2.5 CDB field */ +typedef union _MPI25_SCSI_IO_CDB_UNION { + U8 CDB32[32]; + MPI2_SCSI_IO_CDB_EEDP32 EEDP32; + MPI2_IEEE_SGE_SIMPLE64 SGE; +} MPI25_SCSI_IO_CDB_UNION, *PTR_MPI25_SCSI_IO_CDB_UNION, + Mpi25ScsiIoCdb_t, *pMpi25ScsiIoCdb_t; + +/*MPI v2.5/2.6 SCSI IO Request Message */ +typedef struct _MPI25_SCSI_IO_REQUEST { + U16 DevHandle; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved1; /*0x04 */ + U8 Reserved2; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ + U32 SenseBufferLowAddress; /*0x0C */ + U8 DMAFlags; /*0x10 */ + U8 Reserved5; /*0x11 */ + U8 SenseBufferLength; /*0x12 */ + U8 Reserved4; /*0x13 */ + U8 SGLOffset0; /*0x14 */ + U8 SGLOffset1; /*0x15 */ + U8 SGLOffset2; /*0x16 */ + U8 SGLOffset3; /*0x17 */ + U32 SkipCount; /*0x18 */ + U32 DataLength; /*0x1C */ + U32 BidirectionalDataLength; /*0x20 */ + U16 IoFlags; /*0x24 */ + U16 EEDPFlags; /*0x26 */ + U16 EEDPBlockSize; /*0x28 */ + U16 Reserved6; /*0x2A */ + U32 SecondaryReferenceTag; /*0x2C */ + U16 SecondaryApplicationTag; /*0x30 */ + U16 ApplicationTagTranslationMask; /*0x32 */ + U8 LUN[8]; /*0x34 */ + U32 Control; /*0x3C */ + MPI25_SCSI_IO_CDB_UNION CDB; /*0x40 */ + +#ifdef MPI25_SCSI_IO_VENDOR_UNIQUE_REGION /*typically this is left undefined */ + MPI25_SCSI_IO_VENDOR_UNIQUE VendorRegion; +#endif + + MPI25_SGE_IO_UNION SGL; /*0x60 */ + +} MPI25_SCSI_IO_REQUEST, *PTR_MPI25_SCSI_IO_REQUEST, + Mpi25SCSIIORequest_t, *pMpi25SCSIIORequest_t; + +/*use MPI2_SCSIIO_MSGFLAGS_ defines for the MsgFlags field */ + +/*Defines for the DMAFlags field + * Each setting affects 4 SGLS, from SGL0 to SGL3. + * D = Data + * C = Cache DIF + * I = Interleaved + * H = Host DIF + */ +#define MPI25_SCSIIO_DMAFLAGS_OP_MASK (0x0F) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_D_D (0x00) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_D_C (0x01) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_D_I (0x02) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_C_C (0x03) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_C_I (0x04) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_I_I (0x05) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_C_C_C (0x06) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_C_C_I (0x07) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_C_I_I (0x08) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_I_I_I (0x09) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_D_D (0x0A) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_D_C (0x0B) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_D_I (0x0C) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_C_C (0x0D) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_C_I (0x0E) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_I_I (0x0F) + +/*number of SGLOffset fields */ +#define MPI25_SCSIIO_NUM_SGLOFFSETS (4) + +/*defines for the IoFlags field */ +#define MPI25_SCSIIO_IOFLAGS_IO_PATH_MASK (0xC000) +#define MPI25_SCSIIO_IOFLAGS_NORMAL_PATH (0x0000) +#define MPI25_SCSIIO_IOFLAGS_FAST_PATH (0x4000) + +#define MPI26_SCSIIO_IOFLAGS_ESCAPE_PASSTHROUGH (0x2000) +#define MPI25_SCSIIO_IOFLAGS_LARGE_CDB (0x1000) +#define MPI25_SCSIIO_IOFLAGS_BIDIRECTIONAL (0x0800) +#define MPI26_SCSIIO_IOFLAGS_PORT_REQUEST (0x0400) +#define MPI25_SCSIIO_IOFLAGS_CDBLENGTH_MASK (0x01FF) + +/*MPI v2.5 defines for the EEDPFlags bits */ +/*use MPI2_SCSIIO_EEDPFLAGS_ defines for the other EEDPFlags bits */ +#define MPI25_SCSIIO_EEDPFLAGS_ESCAPE_MODE_MASK (0x00C0) +#define MPI25_SCSIIO_EEDPFLAGS_COMPATIBLE_MODE (0x0000) +#define MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE (0x0040) +#define MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE (0x0080) +#define MPI25_SCSIIO_EEDPFLAGS_APPTAG_REFTAG_DISABLE_MODE (0x00C0) + +#define MPI25_SCSIIO_EEDPFLAGS_HOST_GUARD_METHOD_MASK (0x0030) +#define MPI25_SCSIIO_EEDPFLAGS_T10_CRC_HOST_GUARD (0x0000) +#define MPI25_SCSIIO_EEDPFLAGS_IP_CHKSUM_HOST_GUARD (0x0010) + +/*use MPI2_LUN_ defines from mpi2.h for the LUN field */ + +/*use MPI2_SCSIIO_CONTROL_ defines for the Control field */ + +/*NOTE: The SCSI IO Reply is nearly the same for MPI 2.0 and MPI 2.5, so + * MPI2_SCSI_IO_REPLY is used for both. + */ + +/*SCSI IO Error Reply Message */ +typedef struct _MPI2_SCSI_IO_REPLY { + U16 DevHandle; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved1; /*0x04 */ + U8 Reserved2; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ + U8 SCSIStatus; /*0x0C */ + U8 SCSIState; /*0x0D */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U32 TransferCount; /*0x14 */ + U32 SenseCount; /*0x18 */ + U32 ResponseInfo; /*0x1C */ + U16 TaskTag; /*0x20 */ + U16 SCSIStatusQualifier; /* 0x22 */ + U32 BidirectionalTransferCount; /*0x24 */ + /* MPI 2.5+ only; Reserved in MPI 2.0 */ + U32 EEDPErrorOffset; /* 0x28 */ + /* MPI 2.5+ only; Reserved in MPI 2.0 */ + U16 EEDPObservedAppTag; /* 0x2C */ + /* MPI 2.5+ only; Reserved in MPI 2.0 */ + U16 EEDPObservedGuard; /* 0x2E */ + /* MPI 2.5+ only; Reserved in MPI 2.0 */ + U32 EEDPObservedRefTag; /* 0x30 */ +} MPI2_SCSI_IO_REPLY, *PTR_MPI2_SCSI_IO_REPLY, + Mpi2SCSIIOReply_t, *pMpi2SCSIIOReply_t; + +/*SCSI IO Reply MsgFlags bits */ +#define MPI26_SCSIIO_REPLY_MSGFLAGS_REFTAG_OBSERVED_VALID (0x01) +#define MPI26_SCSIIO_REPLY_MSGFLAGS_GUARD_OBSERVED_VALID (0x02) +#define MPI26_SCSIIO_REPLY_MSGFLAGS_APPTAG_OBSERVED_VALID (0x04) + +/*SCSI IO Reply SCSIStatus values (SAM-4 status codes) */ + +#define MPI2_SCSI_STATUS_GOOD (0x00) +#define MPI2_SCSI_STATUS_CHECK_CONDITION (0x02) +#define MPI2_SCSI_STATUS_CONDITION_MET (0x04) +#define MPI2_SCSI_STATUS_BUSY (0x08) +#define MPI2_SCSI_STATUS_INTERMEDIATE (0x10) +#define MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET (0x14) +#define MPI2_SCSI_STATUS_RESERVATION_CONFLICT (0x18) +#define MPI2_SCSI_STATUS_COMMAND_TERMINATED (0x22) /*obsolete */ +#define MPI2_SCSI_STATUS_TASK_SET_FULL (0x28) +#define MPI2_SCSI_STATUS_ACA_ACTIVE (0x30) +#define MPI2_SCSI_STATUS_TASK_ABORTED (0x40) + +/*SCSI IO Reply SCSIState flags */ + +#define MPI2_SCSI_STATE_RESPONSE_INFO_VALID (0x10) +#define MPI2_SCSI_STATE_TERMINATED (0x08) +#define MPI2_SCSI_STATE_NO_SCSI_STATUS (0x04) +#define MPI2_SCSI_STATE_AUTOSENSE_FAILED (0x02) +#define MPI2_SCSI_STATE_AUTOSENSE_VALID (0x01) + +/*masks and shifts for the ResponseInfo field */ + +#define MPI2_SCSI_RI_MASK_REASONCODE (0x000000FF) +#define MPI2_SCSI_RI_SHIFT_REASONCODE (0) + +#define MPI2_SCSI_TASKTAG_UNKNOWN (0xFFFF) + +/**************************************************************************** +* SCSI Task Management messages +****************************************************************************/ + +/*SCSI Task Management Request Message */ +typedef struct _MPI2_SCSI_TASK_MANAGE_REQUEST { + U16 DevHandle; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U8 Reserved1; /*0x04 */ + U8 TaskType; /*0x05 */ + U8 Reserved2; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ + U8 LUN[8]; /*0x0C */ + U32 Reserved4[7]; /*0x14 */ + U16 TaskMID; /*0x30 */ + U16 Reserved5; /*0x32 */ +} MPI2_SCSI_TASK_MANAGE_REQUEST, + *PTR_MPI2_SCSI_TASK_MANAGE_REQUEST, + Mpi2SCSITaskManagementRequest_t, + *pMpi2SCSITaskManagementRequest_t; + +/*TaskType values */ + +#define MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK (0x01) +#define MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET (0x02) +#define MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03) +#define MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05) +#define MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06) +#define MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07) +#define MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA (0x08) +#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET (0x09) +#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT (0x0A) + +/*obsolete TaskType name */ +#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION \ + (MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT) + +/*MsgFlags bits */ + +#define MPI2_SCSITASKMGMT_MSGFLAGS_MASK_TARGET_RESET (0x18) +#define MPI26_SCSITASKMGMT_MSGFLAGS_HOT_RESET_PCIE (0x00) +#define MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET (0x00) +#define MPI2_SCSITASKMGMT_MSGFLAGS_NEXUS_RESET_SRST (0x08) +#define MPI2_SCSITASKMGMT_MSGFLAGS_SAS_HARD_LINK_RESET (0x10) + +#define MPI2_SCSITASKMGMT_MSGFLAGS_DO_NOT_SEND_TASK_IU (0x01) +#define MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE (0x18) + +/*SCSI Task Management Reply Message */ +typedef struct _MPI2_SCSI_TASK_MANAGE_REPLY { + U16 DevHandle; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U8 ResponseCode; /*0x04 */ + U8 TaskType; /*0x05 */ + U8 Reserved1; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved2; /*0x0A */ + U16 Reserved3; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U32 TerminationCount; /*0x14 */ + U32 ResponseInfo; /*0x18 */ +} MPI2_SCSI_TASK_MANAGE_REPLY, + *PTR_MPI2_SCSI_TASK_MANAGE_REPLY, + Mpi2SCSITaskManagementReply_t, *pMpi2SCSIManagementReply_t; + +/*ResponseCode values */ + +#define MPI2_SCSITASKMGMT_RSP_TM_COMPLETE (0x00) +#define MPI2_SCSITASKMGMT_RSP_INVALID_FRAME (0x02) +#define MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED (0x04) +#define MPI2_SCSITASKMGMT_RSP_TM_FAILED (0x05) +#define MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED (0x08) +#define MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN (0x09) +#define MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG (0x0A) +#define MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC (0x80) + +/*masks and shifts for the ResponseInfo field */ + +#define MPI2_SCSITASKMGMT_RI_MASK_REASONCODE (0x000000FF) +#define MPI2_SCSITASKMGMT_RI_SHIFT_REASONCODE (0) +#define MPI2_SCSITASKMGMT_RI_MASK_ARI2 (0x0000FF00) +#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI2 (8) +#define MPI2_SCSITASKMGMT_RI_MASK_ARI1 (0x00FF0000) +#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI1 (16) +#define MPI2_SCSITASKMGMT_RI_MASK_ARI0 (0xFF000000) +#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI0 (24) + +/**************************************************************************** +* SCSI Enclosure Processor messages +****************************************************************************/ + +/*SCSI Enclosure Processor Request Message */ +typedef struct _MPI2_SEP_REQUEST { + U16 DevHandle; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U8 Action; /*0x04 */ + U8 Flags; /*0x05 */ + U8 Reserved1; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved2; /*0x0A */ + U32 SlotStatus; /*0x0C */ + U32 Reserved3; /*0x10 */ + U32 Reserved4; /*0x14 */ + U32 Reserved5; /*0x18 */ + U16 Slot; /*0x1C */ + U16 EnclosureHandle; /*0x1E */ +} MPI2_SEP_REQUEST, *PTR_MPI2_SEP_REQUEST, + Mpi2SepRequest_t, *pMpi2SepRequest_t; + +/*Action defines */ +#define MPI2_SEP_REQ_ACTION_WRITE_STATUS (0x00) +#define MPI2_SEP_REQ_ACTION_READ_STATUS (0x01) + +/*Flags defines */ +#define MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS (0x00) +#define MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS (0x01) + +/*SlotStatus defines */ +#define MPI2_SEP_REQ_SLOTSTATUS_DEV_OFF (0x00080000) +#define MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE (0x00040000) +#define MPI2_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000) +#define MPI2_SEP_REQ_SLOTSTATUS_REBUILD_STOPPED (0x00000200) +#define MPI2_SEP_REQ_SLOTSTATUS_HOT_SPARE (0x00000100) +#define MPI2_SEP_REQ_SLOTSTATUS_UNCONFIGURED (0x00000080) +#define MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT (0x00000040) +#define MPI2_SEP_REQ_SLOTSTATUS_IN_CRITICAL_ARRAY (0x00000010) +#define MPI2_SEP_REQ_SLOTSTATUS_IN_FAILED_ARRAY (0x00000008) +#define MPI2_SEP_REQ_SLOTSTATUS_DEV_REBUILDING (0x00000004) +#define MPI2_SEP_REQ_SLOTSTATUS_DEV_FAULTY (0x00000002) +#define MPI2_SEP_REQ_SLOTSTATUS_NO_ERROR (0x00000001) + +/*SCSI Enclosure Processor Reply Message */ +typedef struct _MPI2_SEP_REPLY { + U16 DevHandle; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U8 Action; /*0x04 */ + U8 Flags; /*0x05 */ + U8 Reserved1; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved2; /*0x0A */ + U16 Reserved3; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U32 SlotStatus; /*0x14 */ + U32 Reserved4; /*0x18 */ + U16 Slot; /*0x1C */ + U16 EnclosureHandle; /*0x1E */ +} MPI2_SEP_REPLY, *PTR_MPI2_SEP_REPLY, + Mpi2SepReply_t, *pMpi2SepReply_t; + +/*SlotStatus defines */ +#define MPI2_SEP_REPLY_SLOTSTATUS_DEV_OFF (0x00080000) +#define MPI2_SEP_REPLY_SLOTSTATUS_REMOVE_READY (0x00040000) +#define MPI2_SEP_REPLY_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000) +#define MPI2_SEP_REPLY_SLOTSTATUS_REBUILD_STOPPED (0x00000200) +#define MPI2_SEP_REPLY_SLOTSTATUS_HOT_SPARE (0x00000100) +#define MPI2_SEP_REPLY_SLOTSTATUS_UNCONFIGURED (0x00000080) +#define MPI2_SEP_REPLY_SLOTSTATUS_PREDICTED_FAULT (0x00000040) +#define MPI2_SEP_REPLY_SLOTSTATUS_IN_CRITICAL_ARRAY (0x00000010) +#define MPI2_SEP_REPLY_SLOTSTATUS_IN_FAILED_ARRAY (0x00000008) +#define MPI2_SEP_REPLY_SLOTSTATUS_DEV_REBUILDING (0x00000004) +#define MPI2_SEP_REPLY_SLOTSTATUS_DEV_FAULTY (0x00000002) +#define MPI2_SEP_REPLY_SLOTSTATUS_NO_ERROR (0x00000001) + +#endif diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h new file mode 100644 index 000000000..2c5711517 --- /dev/null +++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h @@ -0,0 +1,1811 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * + * + * Name: mpi2_ioc.h + * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages + * Creation Date: October 11, 2006 + * + * mpi2_ioc.h Version: 02.00.37 + * + * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 + * prefix are for use only on MPI v2.5 products, and must not be used + * with MPI v2.0 products. Unless otherwise noted, names beginning with + * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 06-04-07 02.00.01 In IOCFacts Reply structure, renamed MaxDevices to + * MaxTargets. + * Added TotalImageSize field to FWDownload Request. + * Added reserved words to FWUpload Request. + * 06-26-07 02.00.02 Added IR Configuration Change List Event. + * 08-31-07 02.00.03 Removed SystemReplyQueueDepth field from the IOCInit + * request and replaced it with + * ReplyDescriptorPostQueueDepth and ReplyFreeQueueDepth. + * Replaced the MinReplyQueueDepth field of the IOCFacts + * reply with MaxReplyDescriptorPostQueueDepth. + * Added MPI2_RDPQ_DEPTH_MIN define to specify the minimum + * depth for the Reply Descriptor Post Queue. + * Added SASAddress field to Initiator Device Table + * Overflow Event data. + * 10-31-07 02.00.04 Added ReasonCode MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING + * for SAS Initiator Device Status Change Event data. + * Modified Reason Code defines for SAS Topology Change + * List Event data, including adding a bit for PHY Vacant + * status, and adding a mask for the Reason Code. + * Added define for + * MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING. + * Added define for MPI2_EXT_IMAGE_TYPE_MEGARAID. + * 12-18-07 02.00.05 Added Boot Status defines for the IOCExceptions field of + * the IOCFacts Reply. + * Removed MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define. + * Moved MPI2_VERSION_UNION to mpi2.h. + * Changed MPI2_EVENT_NOTIFICATION_REQUEST to use masks + * instead of enables, and added SASBroadcastPrimitiveMasks + * field. + * Added Log Entry Added Event and related structure. + * 02-29-08 02.00.06 Added define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID. + * Removed define MPI2_IOCFACTS_PROTOCOL_SMP_TARGET. + * Added MaxVolumes and MaxPersistentEntries fields to + * IOCFacts reply. + * Added ProtocalFlags and IOCCapabilities fields to + * MPI2_FW_IMAGE_HEADER. + * Removed MPI2_PORTENABLE_FLAGS_ENABLE_SINGLE_PORT. + * 03-03-08 02.00.07 Fixed MPI2_FW_IMAGE_HEADER by changing Reserved26 to + * a U16 (from a U32). + * Removed extra 's' from EventMasks name. + * 06-27-08 02.00.08 Fixed an offset in a comment. + * 10-02-08 02.00.09 Removed SystemReplyFrameSize from MPI2_IOC_INIT_REQUEST. + * Removed CurReplyFrameSize from MPI2_IOC_FACTS_REPLY and + * renamed MinReplyFrameSize to ReplyFrameSize. + * Added MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX. + * Added two new RAIDOperation values for Integrated RAID + * Operations Status Event data. + * Added four new IR Configuration Change List Event data + * ReasonCode values. + * Added two new ReasonCode defines for SAS Device Status + * Change Event data. + * Added three new DiscoveryStatus bits for the SAS + * Discovery event data. + * Added Multiplexing Status Change bit to the PhyStatus + * field of the SAS Topology Change List event data. + * Removed define for MPI2_INIT_IMAGE_BOOTFLAGS_XMEMCOPY. + * BootFlags are now product-specific. + * Added defines for the indivdual signature bytes + * for MPI2_INIT_IMAGE_FOOTER. + * 01-19-09 02.00.10 Added MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY define. + * Added MPI2_EVENT_SAS_DISC_DS_DOWNSTREAM_INITIATOR + * define. + * Added MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE + * define. + * Removed MPI2_EVENT_SAS_DISC_DS_SATA_INIT_FAILURE define. + * 05-06-09 02.00.11 Added MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR define. + * Added MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX define. + * Added two new reason codes for SAS Device Status Change + * Event. + * Added new event: SAS PHY Counter. + * 07-30-09 02.00.12 Added GPIO Interrupt event define and structure. + * Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define. + * Added new product id family for 2208. + * 10-28-09 02.00.13 Added HostMSIxVectors field to MPI2_IOC_INIT_REQUEST. + * Added MaxMSIxVectors field to MPI2_IOC_FACTS_REPLY. + * Added MinDevHandle field to MPI2_IOC_FACTS_REPLY. + * Added MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY. + * Added MPI2_EVENT_HOST_BASED_DISCOVERY_PHY define. + * Added MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER define. + * Added Host Based Discovery Phy Event data. + * Added defines for ProductID Product field + * (MPI2_FW_HEADER_PID_). + * Modified values for SAS ProductID Family + * (MPI2_FW_HEADER_PID_FAMILY_). + * 02-10-10 02.00.14 Added SAS Quiesce Event structure and defines. + * Added PowerManagementControl Request structures and + * defines. + * 05-12-10 02.00.15 Marked Task Set Full Event as obsolete. + * Added MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY define. + * 11-10-10 02.00.16 Added MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC. + * 02-23-11 02.00.17 Added SAS NOTIFY Primitive event, and added + * SASNotifyPrimitiveMasks field to + * MPI2_EVENT_NOTIFICATION_REQUEST. + * Added Temperature Threshold Event. + * Added Host Message Event. + * Added Send Host Message request and reply. + * 05-25-11 02.00.18 For Extended Image Header, added + * MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC and + * MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC defines. + * Deprecated MPI2_EXT_IMAGE_TYPE_MAX define. + * 08-24-11 02.00.19 Added PhysicalPort field to + * MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE structure. + * Marked MPI2_PM_CONTROL_FEATURE_PCIE_LINK as obsolete. + * 11-18-11 02.00.20 Incorporating additions for MPI v2.5. + * 03-29-12 02.00.21 Added a product specific range to event values. + * 07-26-12 02.00.22 Added MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE. + * Added ElapsedSeconds field to + * MPI2_EVENT_DATA_IR_OPERATION_STATUS. + * 08-19-13 02.00.23 For IOCInit, added MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE + * and MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY. + * Added MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE. + * Added MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY. + * Added Encrypted Hash Extended Image. + * 12-05-13 02.00.24 Added MPI25_HASH_IMAGE_TYPE_BIOS. + * 11-18-14 02.00.25 Updated copyright information. + * 03-16-15 02.00.26 Updated for MPI v2.6. + * Added MPI2_EVENT_ACTIVE_CABLE_EXCEPTION and + * MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT. + * Added MPI26_FW_HEADER_PID_FAMILY_3324_SAS and + * MPI26_FW_HEADER_PID_FAMILY_3516_SAS. + * Added MPI26_CTRL_OP_SHUTDOWN. + * 08-25-15 02.00.27 Added IC ARCH Class based signature defines. + * Added MPI26_EVENT_PCIE_ENUM_ES_RESOURCES_EXHAUSTED event. + * Added ConigurationFlags field to IOCInit message to + * support NVMe SGL format control. + * Added PCIe SRIOV support. + * 02-17-16 02.00.28 Added SAS 4 22.5 gbs speed support. + * Added PCIe 4 16.0 GT/sec speec support. + * Removed AHCI support. + * Removed SOP support. + * 07-01-16 02.00.29 Added Archclass for 4008 product. + * Added IOCException MPI2_IOCFACTS_EXCEPT_PCIE_DISABLED + * 08-23-16 02.00.30 Added new defines for the ImageType field of FWDownload + * Request Message. + * Added new defines for the ImageType field of FWUpload + * Request Message. + * Added new values for the RegionType field in the Layout + * Data sections of the FLASH Layout Extended Image Data. + * Added new defines for the ReasonCode field of + * Active Cable Exception Event. + * Added MPI2_EVENT_ENCL_DEVICE_STATUS_CHANGE and + * MPI26_EVENT_DATA_ENCL_DEV_STATUS_CHANGE. + * 11-23-16 02.00.31 Added MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR and + * MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR. + * 02-02-17 02.00.32 Added MPI2_FW_DOWNLOAD_ITYPE_CBB_BACKUP. + * Added MPI25_EVENT_DATA_ACTIVE_CABLE_EXCEPT and related + * defines for the ReasonCode field. + * 06-13-17 02.00.33 Added MPI2_FW_DOWNLOAD_ITYPE_CPLD. + * 09-29-17 02.00.34 Added MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED + * to the ReasonCode field in PCIe Device Status Change + * Event Data. + * 07-22-18 02.00.35 Added FW_DOWNLOAD_ITYPE_CPLD and _PSOC. + * Moved FW image definitions ionto new mpi2_image,h + * 08-14-18 02.00.36 Fixed definition of MPI2_FW_DOWNLOAD_ITYPE_PSOC (0x16) + * 09-07-18 02.00.37 Added MPI26_EVENT_PCIE_TOPO_PI_16_LANES + * 10-02-19 02.00.38 Added MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE + * Added MPI26_IOCFACTS_CAPABILITY_COREDUMP_ENABLED + * Added MPI2_FW_DOWNLOAD_ITYPE_COREDUMP + * Added MPI2_FW_UPLOAD_ITYPE_COREDUMP + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_IOC_H +#define MPI2_IOC_H + +/***************************************************************************** +* +* IOC Messages +* +*****************************************************************************/ + +/**************************************************************************** +* IOCInit message +****************************************************************************/ + +/*IOCInit Request message */ +typedef struct _MPI2_IOC_INIT_REQUEST { + U8 WhoInit; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 MsgVersion; /*0x0C */ + U16 HeaderVersion; /*0x0E */ + U32 Reserved5; /*0x10 */ + U16 ConfigurationFlags; /* 0x14 */ + U8 HostPageSize; /*0x16 */ + U8 HostMSIxVectors; /*0x17 */ + U16 Reserved8; /*0x18 */ + U16 SystemRequestFrameSize; /*0x1A */ + U16 ReplyDescriptorPostQueueDepth; /*0x1C */ + U16 ReplyFreeQueueDepth; /*0x1E */ + U32 SenseBufferAddressHigh; /*0x20 */ + U32 SystemReplyAddressHigh; /*0x24 */ + U64 SystemRequestFrameBaseAddress; /*0x28 */ + U64 ReplyDescriptorPostQueueAddress; /*0x30 */ + U64 ReplyFreeQueueAddress; /*0x38 */ + U64 TimeStamp; /*0x40 */ +} MPI2_IOC_INIT_REQUEST, *PTR_MPI2_IOC_INIT_REQUEST, + Mpi2IOCInitRequest_t, *pMpi2IOCInitRequest_t; + +/*WhoInit values */ +#define MPI2_WHOINIT_NOT_INITIALIZED (0x00) +#define MPI2_WHOINIT_SYSTEM_BIOS (0x01) +#define MPI2_WHOINIT_ROM_BIOS (0x02) +#define MPI2_WHOINIT_PCI_PEER (0x03) +#define MPI2_WHOINIT_HOST_DRIVER (0x04) +#define MPI2_WHOINIT_MANUFACTURER (0x05) + +/* MsgFlags */ +#define MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE (0x01) + + +/*MsgVersion */ +#define MPI2_IOCINIT_MSGVERSION_MAJOR_MASK (0xFF00) +#define MPI2_IOCINIT_MSGVERSION_MAJOR_SHIFT (8) +#define MPI2_IOCINIT_MSGVERSION_MINOR_MASK (0x00FF) +#define MPI2_IOCINIT_MSGVERSION_MINOR_SHIFT (0) + +/*HeaderVersion */ +#define MPI2_IOCINIT_HDRVERSION_UNIT_MASK (0xFF00) +#define MPI2_IOCINIT_HDRVERSION_UNIT_SHIFT (8) +#define MPI2_IOCINIT_HDRVERSION_DEV_MASK (0x00FF) +#define MPI2_IOCINIT_HDRVERSION_DEV_SHIFT (0) + +/*ConfigurationFlags */ +#define MPI26_IOCINIT_CFGFLAGS_NVME_SGL_FORMAT (0x0001) +#define MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE (0x0002) + +/*minimum depth for a Reply Descriptor Post Queue */ +#define MPI2_RDPQ_DEPTH_MIN (16) + +/* Reply Descriptor Post Queue Array Entry */ +typedef struct _MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY { + U64 RDPQBaseAddress; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U32 Reserved2; /* 0x0C */ +} MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY, +*PTR_MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY, +Mpi2IOCInitRDPQArrayEntry, *pMpi2IOCInitRDPQArrayEntry; + + +/*IOCInit Reply message */ +typedef struct _MPI2_IOC_INIT_REPLY { + U8 WhoInit; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_IOC_INIT_REPLY, *PTR_MPI2_IOC_INIT_REPLY, + Mpi2IOCInitReply_t, *pMpi2IOCInitReply_t; + +/**************************************************************************** +* IOCFacts message +****************************************************************************/ + +/*IOCFacts Request message */ +typedef struct _MPI2_IOC_FACTS_REQUEST { + U16 Reserved1; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ +} MPI2_IOC_FACTS_REQUEST, *PTR_MPI2_IOC_FACTS_REQUEST, + Mpi2IOCFactsRequest_t, *pMpi2IOCFactsRequest_t; + +/*IOCFacts Reply message */ +typedef struct _MPI2_IOC_FACTS_REPLY { + U16 MsgVersion; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 HeaderVersion; /*0x04 */ + U8 IOCNumber; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved1; /*0x0A */ + U16 IOCExceptions; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U8 MaxChainDepth; /*0x14 */ + U8 WhoInit; /*0x15 */ + U8 NumberOfPorts; /*0x16 */ + U8 MaxMSIxVectors; /*0x17 */ + U16 RequestCredit; /*0x18 */ + U16 ProductID; /*0x1A */ + U32 IOCCapabilities; /*0x1C */ + MPI2_VERSION_UNION FWVersion; /*0x20 */ + U16 IOCRequestFrameSize; /*0x24 */ + U16 IOCMaxChainSegmentSize; /*0x26 */ + U16 MaxInitiators; /*0x28 */ + U16 MaxTargets; /*0x2A */ + U16 MaxSasExpanders; /*0x2C */ + U16 MaxEnclosures; /*0x2E */ + U16 ProtocolFlags; /*0x30 */ + U16 HighPriorityCredit; /*0x32 */ + U16 MaxReplyDescriptorPostQueueDepth; /*0x34 */ + U8 ReplyFrameSize; /*0x36 */ + U8 MaxVolumes; /*0x37 */ + U16 MaxDevHandle; /*0x38 */ + U16 MaxPersistentEntries; /*0x3A */ + U16 MinDevHandle; /*0x3C */ + U8 CurrentHostPageSize; /* 0x3E */ + U8 Reserved4; /* 0x3F */ + U8 SGEModifierMask; /*0x40 */ + U8 SGEModifierValue; /*0x41 */ + U8 SGEModifierShift; /*0x42 */ + U8 Reserved5; /*0x43 */ +} MPI2_IOC_FACTS_REPLY, *PTR_MPI2_IOC_FACTS_REPLY, + Mpi2IOCFactsReply_t, *pMpi2IOCFactsReply_t; + +/*MsgVersion */ +#define MPI2_IOCFACTS_MSGVERSION_MAJOR_MASK (0xFF00) +#define MPI2_IOCFACTS_MSGVERSION_MAJOR_SHIFT (8) +#define MPI2_IOCFACTS_MSGVERSION_MINOR_MASK (0x00FF) +#define MPI2_IOCFACTS_MSGVERSION_MINOR_SHIFT (0) + +/*HeaderVersion */ +#define MPI2_IOCFACTS_HDRVERSION_UNIT_MASK (0xFF00) +#define MPI2_IOCFACTS_HDRVERSION_UNIT_SHIFT (8) +#define MPI2_IOCFACTS_HDRVERSION_DEV_MASK (0x00FF) +#define MPI2_IOCFACTS_HDRVERSION_DEV_SHIFT (0) + +/*IOCExceptions */ +#define MPI2_IOCFACTS_EXCEPT_PCIE_DISABLED (0x0400) +#define MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE (0x0200) +#define MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX (0x0100) + +#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_MASK (0x00E0) +#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_GOOD (0x0000) +#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_BACKUP (0x0020) +#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_RESTORED (0x0040) +#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_CORRUPT_BACKUP (0x0060) + +#define MPI2_IOCFACTS_EXCEPT_METADATA_UNSUPPORTED (0x0010) +#define MPI2_IOCFACTS_EXCEPT_MANUFACT_CHECKSUM_FAIL (0x0008) +#define MPI2_IOCFACTS_EXCEPT_FW_CHECKSUM_FAIL (0x0004) +#define MPI2_IOCFACTS_EXCEPT_RAID_CONFIG_INVALID (0x0002) +#define MPI2_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL (0x0001) + +/*defines for WhoInit field are after the IOCInit Request */ + +/*ProductID field uses MPI2_FW_HEADER_PID_ */ + +/*IOCCapabilities */ +#define MPI26_IOCFACTS_CAPABILITY_COREDUMP_ENABLED (0x00200000) +#define MPI26_IOCFACTS_CAPABILITY_PCIE_SRIOV (0x00100000) +#define MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ (0x00080000) +#define MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE (0x00040000) +#define MPI25_IOCFACTS_CAPABILITY_FAST_PATH_CAPABLE (0x00020000) +#define MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY (0x00010000) +#define MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX (0x00008000) +#define MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR (0x00004000) +#define MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY (0x00002000) +#define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID (0x00001000) +#define MPI2_IOCFACTS_CAPABILITY_TLR (0x00000800) +#define MPI2_IOCFACTS_CAPABILITY_MULTICAST (0x00000100) +#define MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET (0x00000080) +#define MPI2_IOCFACTS_CAPABILITY_EEDP (0x00000040) +#define MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER (0x00000020) +#define MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER (0x00000010) +#define MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER (0x00000008) +#define MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING (0x00000004) + +/*ProtocolFlags */ +#define MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES (0x0008) +#define MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR (0x0002) +#define MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET (0x0001) + +/**************************************************************************** +* PortFacts message +****************************************************************************/ + +/*PortFacts Request message */ +typedef struct _MPI2_PORT_FACTS_REQUEST { + U16 Reserved1; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 PortNumber; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ +} MPI2_PORT_FACTS_REQUEST, *PTR_MPI2_PORT_FACTS_REQUEST, + Mpi2PortFactsRequest_t, *pMpi2PortFactsRequest_t; + +/*PortFacts Reply message */ +typedef struct _MPI2_PORT_FACTS_REPLY { + U16 Reserved1; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 PortNumber; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ + U16 Reserved4; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U8 Reserved5; /*0x14 */ + U8 PortType; /*0x15 */ + U16 Reserved6; /*0x16 */ + U16 MaxPostedCmdBuffers; /*0x18 */ + U16 Reserved7; /*0x1A */ +} MPI2_PORT_FACTS_REPLY, *PTR_MPI2_PORT_FACTS_REPLY, + Mpi2PortFactsReply_t, *pMpi2PortFactsReply_t; + +/*PortType values */ +#define MPI2_PORTFACTS_PORTTYPE_INACTIVE (0x00) +#define MPI2_PORTFACTS_PORTTYPE_FC (0x10) +#define MPI2_PORTFACTS_PORTTYPE_ISCSI (0x20) +#define MPI2_PORTFACTS_PORTTYPE_SAS_PHYSICAL (0x30) +#define MPI2_PORTFACTS_PORTTYPE_SAS_VIRTUAL (0x31) +#define MPI2_PORTFACTS_PORTTYPE_TRI_MODE (0x40) + + +/**************************************************************************** +* PortEnable message +****************************************************************************/ + +/*PortEnable Request message */ +typedef struct _MPI2_PORT_ENABLE_REQUEST { + U16 Reserved1; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U8 Reserved2; /*0x04 */ + U8 PortFlags; /*0x05 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ +} MPI2_PORT_ENABLE_REQUEST, *PTR_MPI2_PORT_ENABLE_REQUEST, + Mpi2PortEnableRequest_t, *pMpi2PortEnableRequest_t; + +/*PortEnable Reply message */ +typedef struct _MPI2_PORT_ENABLE_REPLY { + U16 Reserved1; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U8 Reserved2; /*0x04 */ + U8 PortFlags; /*0x05 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_PORT_ENABLE_REPLY, *PTR_MPI2_PORT_ENABLE_REPLY, + Mpi2PortEnableReply_t, *pMpi2PortEnableReply_t; + +/**************************************************************************** +* EventNotification message +****************************************************************************/ + +/*EventNotification Request message */ +#define MPI2_EVENT_NOTIFY_EVENTMASK_WORDS (4) + +typedef struct _MPI2_EVENT_NOTIFICATION_REQUEST { + U16 Reserved1; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U32 Reserved5; /*0x0C */ + U32 Reserved6; /*0x10 */ + U32 EventMasks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS]; /*0x14 */ + U16 SASBroadcastPrimitiveMasks; /*0x24 */ + U16 SASNotifyPrimitiveMasks; /*0x26 */ + U32 Reserved8; /*0x28 */ +} MPI2_EVENT_NOTIFICATION_REQUEST, + *PTR_MPI2_EVENT_NOTIFICATION_REQUEST, + Mpi2EventNotificationRequest_t, + *pMpi2EventNotificationRequest_t; + +/*EventNotification Reply message */ +typedef struct _MPI2_EVENT_NOTIFICATION_REPLY { + U16 EventDataLength; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved1; /*0x04 */ + U8 AckRequired; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved2; /*0x0A */ + U16 Reserved3; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U16 Event; /*0x14 */ + U16 Reserved4; /*0x16 */ + U32 EventContext; /*0x18 */ + U32 EventData[]; /*0x1C */ +} MPI2_EVENT_NOTIFICATION_REPLY, *PTR_MPI2_EVENT_NOTIFICATION_REPLY, + Mpi2EventNotificationReply_t, + *pMpi2EventNotificationReply_t; + +/*AckRequired */ +#define MPI2_EVENT_NOTIFICATION_ACK_NOT_REQUIRED (0x00) +#define MPI2_EVENT_NOTIFICATION_ACK_REQUIRED (0x01) + +/*Event */ +#define MPI2_EVENT_LOG_DATA (0x0001) +#define MPI2_EVENT_STATE_CHANGE (0x0002) +#define MPI2_EVENT_HARD_RESET_RECEIVED (0x0005) +#define MPI2_EVENT_EVENT_CHANGE (0x000A) +#define MPI2_EVENT_TASK_SET_FULL (0x000E) /*obsolete */ +#define MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE (0x000F) +#define MPI2_EVENT_IR_OPERATION_STATUS (0x0014) +#define MPI2_EVENT_SAS_DISCOVERY (0x0016) +#define MPI2_EVENT_SAS_BROADCAST_PRIMITIVE (0x0017) +#define MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE (0x0018) +#define MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW (0x0019) +#define MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST (0x001C) +#define MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE (0x001D) +#define MPI2_EVENT_ENCL_DEVICE_STATUS_CHANGE (0x001D) +#define MPI2_EVENT_IR_VOLUME (0x001E) +#define MPI2_EVENT_IR_PHYSICAL_DISK (0x001F) +#define MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST (0x0020) +#define MPI2_EVENT_LOG_ENTRY_ADDED (0x0021) +#define MPI2_EVENT_SAS_PHY_COUNTER (0x0022) +#define MPI2_EVENT_GPIO_INTERRUPT (0x0023) +#define MPI2_EVENT_HOST_BASED_DISCOVERY_PHY (0x0024) +#define MPI2_EVENT_SAS_QUIESCE (0x0025) +#define MPI2_EVENT_SAS_NOTIFY_PRIMITIVE (0x0026) +#define MPI2_EVENT_TEMP_THRESHOLD (0x0027) +#define MPI2_EVENT_HOST_MESSAGE (0x0028) +#define MPI2_EVENT_POWER_PERFORMANCE_CHANGE (0x0029) +#define MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE (0x0030) +#define MPI2_EVENT_PCIE_ENUMERATION (0x0031) +#define MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST (0x0032) +#define MPI2_EVENT_PCIE_LINK_COUNTER (0x0033) +#define MPI2_EVENT_ACTIVE_CABLE_EXCEPTION (0x0034) +#define MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR (0x0035) +#define MPI2_EVENT_MIN_PRODUCT_SPECIFIC (0x006E) +#define MPI2_EVENT_MAX_PRODUCT_SPECIFIC (0x007F) + +/*Log Entry Added Event data */ + +/*the following structure matches MPI2_LOG_0_ENTRY in mpi2_cnfg.h */ +#define MPI2_EVENT_DATA_LOG_DATA_LENGTH (0x1C) + +typedef struct _MPI2_EVENT_DATA_LOG_ENTRY_ADDED { + U64 TimeStamp; /*0x00 */ + U32 Reserved1; /*0x08 */ + U16 LogSequence; /*0x0C */ + U16 LogEntryQualifier; /*0x0E */ + U8 VP_ID; /*0x10 */ + U8 VF_ID; /*0x11 */ + U16 Reserved2; /*0x12 */ + U8 LogData[MPI2_EVENT_DATA_LOG_DATA_LENGTH]; /*0x14 */ +} MPI2_EVENT_DATA_LOG_ENTRY_ADDED, + *PTR_MPI2_EVENT_DATA_LOG_ENTRY_ADDED, + Mpi2EventDataLogEntryAdded_t, + *pMpi2EventDataLogEntryAdded_t; + +/*GPIO Interrupt Event data */ + +typedef struct _MPI2_EVENT_DATA_GPIO_INTERRUPT { + U8 GPIONum; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ +} MPI2_EVENT_DATA_GPIO_INTERRUPT, + *PTR_MPI2_EVENT_DATA_GPIO_INTERRUPT, + Mpi2EventDataGpioInterrupt_t, + *pMpi2EventDataGpioInterrupt_t; + +/*Temperature Threshold Event data */ + +typedef struct _MPI2_EVENT_DATA_TEMPERATURE { + U16 Status; /*0x00 */ + U8 SensorNum; /*0x02 */ + U8 Reserved1; /*0x03 */ + U16 CurrentTemperature; /*0x04 */ + U16 Reserved2; /*0x06 */ + U32 Reserved3; /*0x08 */ + U32 Reserved4; /*0x0C */ +} MPI2_EVENT_DATA_TEMPERATURE, + *PTR_MPI2_EVENT_DATA_TEMPERATURE, + Mpi2EventDataTemperature_t, *pMpi2EventDataTemperature_t; + +/*Temperature Threshold Event data Status bits */ +#define MPI2_EVENT_TEMPERATURE3_EXCEEDED (0x0008) +#define MPI2_EVENT_TEMPERATURE2_EXCEEDED (0x0004) +#define MPI2_EVENT_TEMPERATURE1_EXCEEDED (0x0002) +#define MPI2_EVENT_TEMPERATURE0_EXCEEDED (0x0001) + +/*Host Message Event data */ + +typedef struct _MPI2_EVENT_DATA_HOST_MESSAGE { + U8 SourceVF_ID; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U32 Reserved3; /*0x04 */ + U32 HostData[]; /*0x08 */ +} MPI2_EVENT_DATA_HOST_MESSAGE, *PTR_MPI2_EVENT_DATA_HOST_MESSAGE, + Mpi2EventDataHostMessage_t, *pMpi2EventDataHostMessage_t; + +/*Power Performance Change Event data */ + +typedef struct _MPI2_EVENT_DATA_POWER_PERF_CHANGE { + U8 CurrentPowerMode; /*0x00 */ + U8 PreviousPowerMode; /*0x01 */ + U16 Reserved1; /*0x02 */ +} MPI2_EVENT_DATA_POWER_PERF_CHANGE, + *PTR_MPI2_EVENT_DATA_POWER_PERF_CHANGE, + Mpi2EventDataPowerPerfChange_t, + *pMpi2EventDataPowerPerfChange_t; + +/*defines for CurrentPowerMode and PreviousPowerMode fields */ +#define MPI2_EVENT_PM_INIT_MASK (0xC0) +#define MPI2_EVENT_PM_INIT_UNAVAILABLE (0x00) +#define MPI2_EVENT_PM_INIT_HOST (0x40) +#define MPI2_EVENT_PM_INIT_IO_UNIT (0x80) +#define MPI2_EVENT_PM_INIT_PCIE_DPA (0xC0) + +#define MPI2_EVENT_PM_MODE_MASK (0x07) +#define MPI2_EVENT_PM_MODE_UNAVAILABLE (0x00) +#define MPI2_EVENT_PM_MODE_UNKNOWN (0x01) +#define MPI2_EVENT_PM_MODE_FULL_POWER (0x04) +#define MPI2_EVENT_PM_MODE_REDUCED_POWER (0x05) +#define MPI2_EVENT_PM_MODE_STANDBY (0x06) + +/* Active Cable Exception Event data */ + +typedef struct _MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT { + U32 ActiveCablePowerRequirement; /* 0x00 */ + U8 ReasonCode; /* 0x04 */ + U8 ReceptacleID; /* 0x05 */ + U16 Reserved1; /* 0x06 */ +} MPI25_EVENT_DATA_ACTIVE_CABLE_EXCEPT, + *PTR_MPI25_EVENT_DATA_ACTIVE_CABLE_EXCEPT, + Mpi25EventDataActiveCableExcept_t, + *pMpi25EventDataActiveCableExcept_t, + MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT, + *PTR_MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT, + Mpi26EventDataActiveCableExcept_t, + *pMpi26EventDataActiveCableExcept_t; + +/*MPI2.5 defines for the ReasonCode field */ +#define MPI25_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER (0x00) +#define MPI25_EVENT_ACTIVE_CABLE_PRESENT (0x01) +#define MPI25_EVENT_ACTIVE_CABLE_DEGRADED (0x02) + +/* defines for ReasonCode field */ +#define MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER (0x00) +#define MPI26_EVENT_ACTIVE_CABLE_PRESENT (0x01) +#define MPI26_EVENT_ACTIVE_CABLE_DEGRADED (0x02) + +/*Hard Reset Received Event data */ + +typedef struct _MPI2_EVENT_DATA_HARD_RESET_RECEIVED { + U8 Reserved1; /*0x00 */ + U8 Port; /*0x01 */ + U16 Reserved2; /*0x02 */ +} MPI2_EVENT_DATA_HARD_RESET_RECEIVED, + *PTR_MPI2_EVENT_DATA_HARD_RESET_RECEIVED, + Mpi2EventDataHardResetReceived_t, + *pMpi2EventDataHardResetReceived_t; + +/*Task Set Full Event data */ +/* this event is obsolete */ + +typedef struct _MPI2_EVENT_DATA_TASK_SET_FULL { + U16 DevHandle; /*0x00 */ + U16 CurrentDepth; /*0x02 */ +} MPI2_EVENT_DATA_TASK_SET_FULL, *PTR_MPI2_EVENT_DATA_TASK_SET_FULL, + Mpi2EventDataTaskSetFull_t, *pMpi2EventDataTaskSetFull_t; + +/*SAS Device Status Change Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE { + U16 TaskTag; /*0x00 */ + U8 ReasonCode; /*0x02 */ + U8 PhysicalPort; /*0x03 */ + U8 ASC; /*0x04 */ + U8 ASCQ; /*0x05 */ + U16 DevHandle; /*0x06 */ + U32 Reserved2; /*0x08 */ + U64 SASAddress; /*0x0C */ + U8 LUN[8]; /*0x14 */ +} MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE, + *PTR_MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE, + Mpi2EventDataSasDeviceStatusChange_t, + *pMpi2EventDataSasDeviceStatusChange_t; + +/*SAS Device Status Change Event data ReasonCode values */ +#define MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA (0x05) +#define MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED (0x07) +#define MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET (0x08) +#define MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL (0x09) +#define MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL (0x0A) +#define MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL (0x0B) +#define MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL (0x0C) +#define MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION (0x0D) +#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET (0x0E) +#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL (0x0F) +#define MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE (0x10) +#define MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY (0x11) +#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY (0x12) + +/*Integrated RAID Operation Status Event data */ + +typedef struct _MPI2_EVENT_DATA_IR_OPERATION_STATUS { + U16 VolDevHandle; /*0x00 */ + U16 Reserved1; /*0x02 */ + U8 RAIDOperation; /*0x04 */ + U8 PercentComplete; /*0x05 */ + U16 Reserved2; /*0x06 */ + U32 ElapsedSeconds; /*0x08 */ +} MPI2_EVENT_DATA_IR_OPERATION_STATUS, + *PTR_MPI2_EVENT_DATA_IR_OPERATION_STATUS, + Mpi2EventDataIrOperationStatus_t, + *pMpi2EventDataIrOperationStatus_t; + +/*Integrated RAID Operation Status Event data RAIDOperation values */ +#define MPI2_EVENT_IR_RAIDOP_RESYNC (0x00) +#define MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION (0x01) +#define MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK (0x02) +#define MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT (0x03) +#define MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT (0x04) + +/*Integrated RAID Volume Event data */ + +typedef struct _MPI2_EVENT_DATA_IR_VOLUME { + U16 VolDevHandle; /*0x00 */ + U8 ReasonCode; /*0x02 */ + U8 Reserved1; /*0x03 */ + U32 NewValue; /*0x04 */ + U32 PreviousValue; /*0x08 */ +} MPI2_EVENT_DATA_IR_VOLUME, *PTR_MPI2_EVENT_DATA_IR_VOLUME, + Mpi2EventDataIrVolume_t, *pMpi2EventDataIrVolume_t; + +/*Integrated RAID Volume Event data ReasonCode values */ +#define MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED (0x01) +#define MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED (0x02) +#define MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED (0x03) + +/*Integrated RAID Physical Disk Event data */ + +typedef struct _MPI2_EVENT_DATA_IR_PHYSICAL_DISK { + U16 Reserved1; /*0x00 */ + U8 ReasonCode; /*0x02 */ + U8 PhysDiskNum; /*0x03 */ + U16 PhysDiskDevHandle; /*0x04 */ + U16 Reserved2; /*0x06 */ + U16 Slot; /*0x08 */ + U16 EnclosureHandle; /*0x0A */ + U32 NewValue; /*0x0C */ + U32 PreviousValue; /*0x10 */ +} MPI2_EVENT_DATA_IR_PHYSICAL_DISK, + *PTR_MPI2_EVENT_DATA_IR_PHYSICAL_DISK, + Mpi2EventDataIrPhysicalDisk_t, + *pMpi2EventDataIrPhysicalDisk_t; + +/*Integrated RAID Physical Disk Event data ReasonCode values */ +#define MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED (0x01) +#define MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED (0x02) +#define MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED (0x03) + +/*Integrated RAID Configuration Change List Event data */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check NumElements at runtime. + */ +#ifndef MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT +#define MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT (1) +#endif + +typedef struct _MPI2_EVENT_IR_CONFIG_ELEMENT { + U16 ElementFlags; /*0x00 */ + U16 VolDevHandle; /*0x02 */ + U8 ReasonCode; /*0x04 */ + U8 PhysDiskNum; /*0x05 */ + U16 PhysDiskDevHandle; /*0x06 */ +} MPI2_EVENT_IR_CONFIG_ELEMENT, *PTR_MPI2_EVENT_IR_CONFIG_ELEMENT, + Mpi2EventIrConfigElement_t, *pMpi2EventIrConfigElement_t; + +/*IR Configuration Change List Event data ElementFlags values */ +#define MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK (0x000F) +#define MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT (0x0000) +#define MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT (0x0001) +#define MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT (0x0002) + +/*IR Configuration Change List Event data ReasonCode values */ +#define MPI2_EVENT_IR_CHANGE_RC_ADDED (0x01) +#define MPI2_EVENT_IR_CHANGE_RC_REMOVED (0x02) +#define MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE (0x03) +#define MPI2_EVENT_IR_CHANGE_RC_HIDE (0x04) +#define MPI2_EVENT_IR_CHANGE_RC_UNHIDE (0x05) +#define MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED (0x06) +#define MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED (0x07) +#define MPI2_EVENT_IR_CHANGE_RC_PD_CREATED (0x08) +#define MPI2_EVENT_IR_CHANGE_RC_PD_DELETED (0x09) + +typedef struct _MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST { + U8 NumElements; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 Reserved2; /*0x02 */ + U8 ConfigNum; /*0x03 */ + U32 Flags; /*0x04 */ + MPI2_EVENT_IR_CONFIG_ELEMENT + ConfigElement[MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT];/*0x08 */ +} MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST, + *PTR_MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST, + Mpi2EventDataIrConfigChangeList_t, + *pMpi2EventDataIrConfigChangeList_t; + +/*IR Configuration Change List Event data Flags values */ +#define MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG (0x00000001) + +/*SAS Discovery Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_DISCOVERY { + U8 Flags; /*0x00 */ + U8 ReasonCode; /*0x01 */ + U8 PhysicalPort; /*0x02 */ + U8 Reserved1; /*0x03 */ + U32 DiscoveryStatus; /*0x04 */ +} MPI2_EVENT_DATA_SAS_DISCOVERY, + *PTR_MPI2_EVENT_DATA_SAS_DISCOVERY, + Mpi2EventDataSasDiscovery_t, *pMpi2EventDataSasDiscovery_t; + +/*SAS Discovery Event data Flags values */ +#define MPI2_EVENT_SAS_DISC_DEVICE_CHANGE (0x02) +#define MPI2_EVENT_SAS_DISC_IN_PROGRESS (0x01) + +/*SAS Discovery Event data ReasonCode values */ +#define MPI2_EVENT_SAS_DISC_RC_STARTED (0x01) +#define MPI2_EVENT_SAS_DISC_RC_COMPLETED (0x02) + +/*SAS Discovery Event data DiscoveryStatus values */ +#define MPI2_EVENT_SAS_DISC_DS_MAX_ENCLOSURES_EXCEED (0x80000000) +#define MPI2_EVENT_SAS_DISC_DS_MAX_EXPANDERS_EXCEED (0x40000000) +#define MPI2_EVENT_SAS_DISC_DS_MAX_DEVICES_EXCEED (0x20000000) +#define MPI2_EVENT_SAS_DISC_DS_MAX_TOPO_PHYS_EXCEED (0x10000000) +#define MPI2_EVENT_SAS_DISC_DS_DOWNSTREAM_INITIATOR (0x08000000) +#define MPI2_EVENT_SAS_DISC_DS_MULTI_SUBTRACTIVE_SUBTRACTIVE (0x00008000) +#define MPI2_EVENT_SAS_DISC_DS_EXP_MULTI_SUBTRACTIVE (0x00004000) +#define MPI2_EVENT_SAS_DISC_DS_MULTI_PORT_DOMAIN (0x00002000) +#define MPI2_EVENT_SAS_DISC_DS_TABLE_TO_SUBTRACTIVE_LINK (0x00001000) +#define MPI2_EVENT_SAS_DISC_DS_UNSUPPORTED_DEVICE (0x00000800) +#define MPI2_EVENT_SAS_DISC_DS_TABLE_LINK (0x00000400) +#define MPI2_EVENT_SAS_DISC_DS_SUBTRACTIVE_LINK (0x00000200) +#define MPI2_EVENT_SAS_DISC_DS_SMP_CRC_ERROR (0x00000100) +#define MPI2_EVENT_SAS_DISC_DS_SMP_FUNCTION_FAILED (0x00000080) +#define MPI2_EVENT_SAS_DISC_DS_INDEX_NOT_EXIST (0x00000040) +#define MPI2_EVENT_SAS_DISC_DS_OUT_ROUTE_ENTRIES (0x00000020) +#define MPI2_EVENT_SAS_DISC_DS_SMP_TIMEOUT (0x00000010) +#define MPI2_EVENT_SAS_DISC_DS_MULTIPLE_PORTS (0x00000004) +#define MPI2_EVENT_SAS_DISC_DS_UNADDRESSABLE_DEVICE (0x00000002) +#define MPI2_EVENT_SAS_DISC_DS_LOOP_DETECTED (0x00000001) + +/*SAS Broadcast Primitive Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE { + U8 PhyNum; /*0x00 */ + U8 Port; /*0x01 */ + U8 PortWidth; /*0x02 */ + U8 Primitive; /*0x03 */ +} MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE, + *PTR_MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE, + Mpi2EventDataSasBroadcastPrimitive_t, + *pMpi2EventDataSasBroadcastPrimitive_t; + +/*defines for the Primitive field */ +#define MPI2_EVENT_PRIMITIVE_CHANGE (0x01) +#define MPI2_EVENT_PRIMITIVE_SES (0x02) +#define MPI2_EVENT_PRIMITIVE_EXPANDER (0x03) +#define MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT (0x04) +#define MPI2_EVENT_PRIMITIVE_RESERVED3 (0x05) +#define MPI2_EVENT_PRIMITIVE_RESERVED4 (0x06) +#define MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED (0x07) +#define MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED (0x08) + +/*SAS Notify Primitive Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE { + U8 PhyNum; /*0x00 */ + U8 Port; /*0x01 */ + U8 Reserved1; /*0x02 */ + U8 Primitive; /*0x03 */ +} MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE, + *PTR_MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE, + Mpi2EventDataSasNotifyPrimitive_t, + *pMpi2EventDataSasNotifyPrimitive_t; + +/*defines for the Primitive field */ +#define MPI2_EVENT_NOTIFY_ENABLE_SPINUP (0x01) +#define MPI2_EVENT_NOTIFY_POWER_LOSS_EXPECTED (0x02) +#define MPI2_EVENT_NOTIFY_RESERVED1 (0x03) +#define MPI2_EVENT_NOTIFY_RESERVED2 (0x04) + +/*SAS Initiator Device Status Change Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE { + U8 ReasonCode; /*0x00 */ + U8 PhysicalPort; /*0x01 */ + U16 DevHandle; /*0x02 */ + U64 SASAddress; /*0x04 */ +} MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE, + *PTR_MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE, + Mpi2EventDataSasInitDevStatusChange_t, + *pMpi2EventDataSasInitDevStatusChange_t; + +/*SAS Initiator Device Status Change event ReasonCode values */ +#define MPI2_EVENT_SAS_INIT_RC_ADDED (0x01) +#define MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING (0x02) + +/*SAS Initiator Device Table Overflow Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW { + U16 MaxInit; /*0x00 */ + U16 CurrentInit; /*0x02 */ + U64 SASAddress; /*0x04 */ +} MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW, + *PTR_MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW, + Mpi2EventDataSasInitTableOverflow_t, + *pMpi2EventDataSasInitTableOverflow_t; + +/*SAS Topology Change List Event data */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check NumEntries at runtime. + */ +#ifndef MPI2_EVENT_SAS_TOPO_PHY_COUNT +#define MPI2_EVENT_SAS_TOPO_PHY_COUNT (1) +#endif + +typedef struct _MPI2_EVENT_SAS_TOPO_PHY_ENTRY { + U16 AttachedDevHandle; /*0x00 */ + U8 LinkRate; /*0x02 */ + U8 PhyStatus; /*0x03 */ +} MPI2_EVENT_SAS_TOPO_PHY_ENTRY, *PTR_MPI2_EVENT_SAS_TOPO_PHY_ENTRY, + Mpi2EventSasTopoPhyEntry_t, *pMpi2EventSasTopoPhyEntry_t; + +typedef struct _MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST { + U16 EnclosureHandle; /*0x00 */ + U16 ExpanderDevHandle; /*0x02 */ + U8 NumPhys; /*0x04 */ + U8 Reserved1; /*0x05 */ + U16 Reserved2; /*0x06 */ + U8 NumEntries; /*0x08 */ + U8 StartPhyNum; /*0x09 */ + U8 ExpStatus; /*0x0A */ + U8 PhysicalPort; /*0x0B */ + MPI2_EVENT_SAS_TOPO_PHY_ENTRY + PHY[MPI2_EVENT_SAS_TOPO_PHY_COUNT]; /*0x0C */ +} MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST, + *PTR_MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST, + Mpi2EventDataSasTopologyChangeList_t, + *pMpi2EventDataSasTopologyChangeList_t; + +/*values for the ExpStatus field */ +#define MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER (0x00) +#define MPI2_EVENT_SAS_TOPO_ES_ADDED (0x01) +#define MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING (0x02) +#define MPI2_EVENT_SAS_TOPO_ES_RESPONDING (0x03) +#define MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING (0x04) + +/*defines for the LinkRate field */ +#define MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK (0xF0) +#define MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT (4) +#define MPI2_EVENT_SAS_TOPO_LR_PREV_MASK (0x0F) +#define MPI2_EVENT_SAS_TOPO_LR_PREV_SHIFT (0) + +#define MPI2_EVENT_SAS_TOPO_LR_UNKNOWN_LINK_RATE (0x00) +#define MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED (0x01) +#define MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED (0x02) +#define MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE (0x03) +#define MPI2_EVENT_SAS_TOPO_LR_PORT_SELECTOR (0x04) +#define MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS (0x05) +#define MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY (0x06) +#define MPI2_EVENT_SAS_TOPO_LR_RATE_1_5 (0x08) +#define MPI2_EVENT_SAS_TOPO_LR_RATE_3_0 (0x09) +#define MPI2_EVENT_SAS_TOPO_LR_RATE_6_0 (0x0A) +#define MPI25_EVENT_SAS_TOPO_LR_RATE_12_0 (0x0B) +#define MPI26_EVENT_SAS_TOPO_LR_RATE_22_5 (0x0C) + +/*values for the PhyStatus field */ +#define MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT (0x80) +#define MPI2_EVENT_SAS_TOPO_PS_MULTIPLEX_CHANGE (0x10) +/*values for the PhyStatus ReasonCode sub-field */ +#define MPI2_EVENT_SAS_TOPO_RC_MASK (0x0F) +#define MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED (0x01) +#define MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING (0x02) +#define MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED (0x03) +#define MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE (0x04) +#define MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING (0x05) + +/*SAS Enclosure Device Status Change Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE { + U16 EnclosureHandle; /*0x00 */ + U8 ReasonCode; /*0x02 */ + U8 PhysicalPort; /*0x03 */ + U64 EnclosureLogicalID; /*0x04 */ + U16 NumSlots; /*0x0C */ + U16 StartSlot; /*0x0E */ + U32 PhyBits; /*0x10 */ +} MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE, + *PTR_MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE, + Mpi2EventDataSasEnclDevStatusChange_t, + *pMpi2EventDataSasEnclDevStatusChange_t, + MPI26_EVENT_DATA_ENCL_DEV_STATUS_CHANGE, + *PTR_MPI26_EVENT_DATA_ENCL_DEV_STATUS_CHANGE, + Mpi26EventDataEnclDevStatusChange_t, + *pMpi26EventDataEnclDevStatusChange_t; + +/*SAS Enclosure Device Status Change event ReasonCode values */ +#define MPI2_EVENT_SAS_ENCL_RC_ADDED (0x01) +#define MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING (0x02) + +/*Enclosure Device Status Change event ReasonCode values */ +#define MPI26_EVENT_ENCL_RC_ADDED (0x01) +#define MPI26_EVENT_ENCL_RC_NOT_RESPONDING (0x02) + + +typedef struct _MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR { + U16 DevHandle; /*0x00 */ + U8 ReasonCode; /*0x02 */ + U8 PhysicalPort; /*0x03 */ + U32 Reserved1[2]; /*0x04 */ + U64 SASAddress; /*0x0C */ + U32 Reserved2[2]; /*0x14 */ +} MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR, + *PTR_MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR, + Mpi25EventDataSasDeviceDiscoveryError_t, + *pMpi25EventDataSasDeviceDiscoveryError_t; + +/*SAS Device Discovery Error Event data ReasonCode values */ +#define MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED (0x01) +#define MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT (0x02) + +/*SAS PHY Counter Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_PHY_COUNTER { + U64 TimeStamp; /*0x00 */ + U32 Reserved1; /*0x08 */ + U8 PhyEventCode; /*0x0C */ + U8 PhyNum; /*0x0D */ + U16 Reserved2; /*0x0E */ + U32 PhyEventInfo; /*0x10 */ + U8 CounterType; /*0x14 */ + U8 ThresholdWindow; /*0x15 */ + U8 TimeUnits; /*0x16 */ + U8 Reserved3; /*0x17 */ + U32 EventThreshold; /*0x18 */ + U16 ThresholdFlags; /*0x1C */ + U16 Reserved4; /*0x1E */ +} MPI2_EVENT_DATA_SAS_PHY_COUNTER, + *PTR_MPI2_EVENT_DATA_SAS_PHY_COUNTER, + Mpi2EventDataSasPhyCounter_t, + *pMpi2EventDataSasPhyCounter_t; + +/*use MPI2_SASPHY3_EVENT_CODE_ values from mpi2_cnfg.h + *for the PhyEventCode field */ + +/*use MPI2_SASPHY3_COUNTER_TYPE_ values from mpi2_cnfg.h + *for the CounterType field */ + +/*use MPI2_SASPHY3_TIME_UNITS_ values from mpi2_cnfg.h + *for the TimeUnits field */ + +/*use MPI2_SASPHY3_TFLAGS_ values from mpi2_cnfg.h + *for the ThresholdFlags field */ + +/*SAS Quiesce Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_QUIESCE { + U8 ReasonCode; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U32 Reserved3; /*0x04 */ +} MPI2_EVENT_DATA_SAS_QUIESCE, + *PTR_MPI2_EVENT_DATA_SAS_QUIESCE, + Mpi2EventDataSasQuiesce_t, *pMpi2EventDataSasQuiesce_t; + +/*SAS Quiesce Event data ReasonCode values */ +#define MPI2_EVENT_SAS_QUIESCE_RC_STARTED (0x01) +#define MPI2_EVENT_SAS_QUIESCE_RC_COMPLETED (0x02) + +/*Host Based Discovery Phy Event data */ + +typedef struct _MPI2_EVENT_HBD_PHY_SAS { + U8 Flags; /*0x00 */ + U8 NegotiatedLinkRate; /*0x01 */ + U8 PhyNum; /*0x02 */ + U8 PhysicalPort; /*0x03 */ + U32 Reserved1; /*0x04 */ + U8 InitialFrame[28]; /*0x08 */ +} MPI2_EVENT_HBD_PHY_SAS, *PTR_MPI2_EVENT_HBD_PHY_SAS, + Mpi2EventHbdPhySas_t, *pMpi2EventHbdPhySas_t; + +/*values for the Flags field */ +#define MPI2_EVENT_HBD_SAS_FLAGS_FRAME_VALID (0x02) +#define MPI2_EVENT_HBD_SAS_FLAGS_SATA_FRAME (0x01) + +/*use MPI2_SAS_NEG_LINK_RATE_ defines from mpi2_cnfg.h + *for the NegotiatedLinkRate field */ + +typedef union _MPI2_EVENT_HBD_DESCRIPTOR { + MPI2_EVENT_HBD_PHY_SAS Sas; +} MPI2_EVENT_HBD_DESCRIPTOR, *PTR_MPI2_EVENT_HBD_DESCRIPTOR, + Mpi2EventHbdDescriptor_t, *pMpi2EventHbdDescriptor_t; + +typedef struct _MPI2_EVENT_DATA_HBD_PHY { + U8 DescriptorType; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U32 Reserved3; /*0x04 */ + MPI2_EVENT_HBD_DESCRIPTOR Descriptor; /*0x08 */ +} MPI2_EVENT_DATA_HBD_PHY, *PTR_MPI2_EVENT_DATA_HBD_PHY, + Mpi2EventDataHbdPhy_t, + *pMpi2EventDataMpi2EventDataHbdPhy_t; + +/*values for the DescriptorType field */ +#define MPI2_EVENT_HBD_DT_SAS (0x01) + + +/*PCIe Device Status Change Event data (MPI v2.6 and later) */ + +typedef struct _MPI26_EVENT_DATA_PCIE_DEVICE_STATUS_CHANGE { + U16 TaskTag; /*0x00 */ + U8 ReasonCode; /*0x02 */ + U8 PhysicalPort; /*0x03 */ + U8 ASC; /*0x04 */ + U8 ASCQ; /*0x05 */ + U16 DevHandle; /*0x06 */ + U32 Reserved2; /*0x08 */ + U64 WWID; /*0x0C */ + U8 LUN[8]; /*0x14 */ +} MPI26_EVENT_DATA_PCIE_DEVICE_STATUS_CHANGE, + *PTR_MPI26_EVENT_DATA_PCIE_DEVICE_STATUS_CHANGE, + Mpi26EventDataPCIeDeviceStatusChange_t, + *pMpi26EventDataPCIeDeviceStatusChange_t; + +/*PCIe Device Status Change Event data ReasonCode values */ +#define MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA (0x05) +#define MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED (0x07) +#define MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET (0x08) +#define MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL (0x09) +#define MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL (0x0A) +#define MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL (0x0B) +#define MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL (0x0C) +#define MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION (0x0D) +#define MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET (0x0E) +#define MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL (0x0F) +#define MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE (0x10) +#define MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED (0x11) + + +/*PCIe Enumeration Event data (MPI v2.6 and later) */ + +typedef struct _MPI26_EVENT_DATA_PCIE_ENUMERATION { + U8 Flags; /*0x00 */ + U8 ReasonCode; /*0x01 */ + U8 PhysicalPort; /*0x02 */ + U8 Reserved1; /*0x03 */ + U32 EnumerationStatus; /*0x04 */ +} MPI26_EVENT_DATA_PCIE_ENUMERATION, + *PTR_MPI26_EVENT_DATA_PCIE_ENUMERATION, + Mpi26EventDataPCIeEnumeration_t, + *pMpi26EventDataPCIeEnumeration_t; + +/*PCIe Enumeration Event data Flags values */ +#define MPI26_EVENT_PCIE_ENUM_DEVICE_CHANGE (0x02) +#define MPI26_EVENT_PCIE_ENUM_IN_PROGRESS (0x01) + +/*PCIe Enumeration Event data ReasonCode values */ +#define MPI26_EVENT_PCIE_ENUM_RC_STARTED (0x01) +#define MPI26_EVENT_PCIE_ENUM_RC_COMPLETED (0x02) + +/*PCIe Enumeration Event data EnumerationStatus values */ +#define MPI26_EVENT_PCIE_ENUM_ES_MAX_SWITCHES_EXCEED (0x40000000) +#define MPI26_EVENT_PCIE_ENUM_ES_MAX_DEVICES_EXCEED (0x20000000) +#define MPI26_EVENT_PCIE_ENUM_ES_RESOURCES_EXHAUSTED (0x10000000) + + +/*PCIe Topology Change List Event data (MPI v2.6 and later) */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check NumEntries at runtime. + */ +#ifndef MPI26_EVENT_PCIE_TOPO_PORT_COUNT +#define MPI26_EVENT_PCIE_TOPO_PORT_COUNT (1) +#endif + +typedef struct _MPI26_EVENT_PCIE_TOPO_PORT_ENTRY { + U16 AttachedDevHandle; /*0x00 */ + U8 PortStatus; /*0x02 */ + U8 Reserved1; /*0x03 */ + U8 CurrentPortInfo; /*0x04 */ + U8 Reserved2; /*0x05 */ + U8 PreviousPortInfo; /*0x06 */ + U8 Reserved3; /*0x07 */ +} MPI26_EVENT_PCIE_TOPO_PORT_ENTRY, + *PTR_MPI26_EVENT_PCIE_TOPO_PORT_ENTRY, + Mpi26EventPCIeTopoPortEntry_t, + *pMpi26EventPCIeTopoPortEntry_t; + +/*PCIe Topology Change List Event data PortStatus values */ +#define MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED (0x01) +#define MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING (0x02) +#define MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED (0x03) +#define MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE (0x04) +#define MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING (0x05) + +/*PCIe Topology Change List Event data defines for CurrentPortInfo and + *PreviousPortInfo + */ +#define MPI26_EVENT_PCIE_TOPO_PI_LANE_MASK (0xF0) +#define MPI26_EVENT_PCIE_TOPO_PI_LANES_UNKNOWN (0x00) +#define MPI26_EVENT_PCIE_TOPO_PI_1_LANE (0x10) +#define MPI26_EVENT_PCIE_TOPO_PI_2_LANES (0x20) +#define MPI26_EVENT_PCIE_TOPO_PI_4_LANES (0x30) +#define MPI26_EVENT_PCIE_TOPO_PI_8_LANES (0x40) +#define MPI26_EVENT_PCIE_TOPO_PI_16_LANES (0x50) + +#define MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK (0x0F) +#define MPI26_EVENT_PCIE_TOPO_PI_RATE_UNKNOWN (0x00) +#define MPI26_EVENT_PCIE_TOPO_PI_RATE_DISABLED (0x01) +#define MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5 (0x02) +#define MPI26_EVENT_PCIE_TOPO_PI_RATE_5_0 (0x03) +#define MPI26_EVENT_PCIE_TOPO_PI_RATE_8_0 (0x04) +#define MPI26_EVENT_PCIE_TOPO_PI_RATE_16_0 (0x05) + +typedef struct _MPI26_EVENT_DATA_PCIE_TOPOLOGY_CHANGE_LIST { + U16 EnclosureHandle; /*0x00 */ + U16 SwitchDevHandle; /*0x02 */ + U8 NumPorts; /*0x04 */ + U8 Reserved1; /*0x05 */ + U16 Reserved2; /*0x06 */ + U8 NumEntries; /*0x08 */ + U8 StartPortNum; /*0x09 */ + U8 SwitchStatus; /*0x0A */ + U8 PhysicalPort; /*0x0B */ + MPI26_EVENT_PCIE_TOPO_PORT_ENTRY + PortEntry[MPI26_EVENT_PCIE_TOPO_PORT_COUNT]; /*0x0C */ +} MPI26_EVENT_DATA_PCIE_TOPOLOGY_CHANGE_LIST, + *PTR_MPI26_EVENT_DATA_PCIE_TOPOLOGY_CHANGE_LIST, + Mpi26EventDataPCIeTopologyChangeList_t, + *pMpi26EventDataPCIeTopologyChangeList_t; + +/*PCIe Topology Change List Event data SwitchStatus values */ +#define MPI26_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH (0x00) +#define MPI26_EVENT_PCIE_TOPO_SS_ADDED (0x01) +#define MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING (0x02) +#define MPI26_EVENT_PCIE_TOPO_SS_RESPONDING (0x03) +#define MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING (0x04) + +/*PCIe Link Counter Event data (MPI v2.6 and later) */ + +typedef struct _MPI26_EVENT_DATA_PCIE_LINK_COUNTER { + U64 TimeStamp; /*0x00 */ + U32 Reserved1; /*0x08 */ + U8 LinkEventCode; /*0x0C */ + U8 LinkNum; /*0x0D */ + U16 Reserved2; /*0x0E */ + U32 LinkEventInfo; /*0x10 */ + U8 CounterType; /*0x14 */ + U8 ThresholdWindow; /*0x15 */ + U8 TimeUnits; /*0x16 */ + U8 Reserved3; /*0x17 */ + U32 EventThreshold; /*0x18 */ + U16 ThresholdFlags; /*0x1C */ + U16 Reserved4; /*0x1E */ +} MPI26_EVENT_DATA_PCIE_LINK_COUNTER, + *PTR_MPI26_EVENT_DATA_PCIE_LINK_COUNTER, + Mpi26EventDataPcieLinkCounter_t, *pMpi26EventDataPcieLinkCounter_t; + + +/*use MPI26_PCIELINK3_EVTCODE_ values from mpi2_cnfg.h for the LinkEventCode + *field + */ + +/*use MPI26_PCIELINK3_COUNTER_TYPE_ values from mpi2_cnfg.h for the CounterType + *field + */ + +/*use MPI26_PCIELINK3_TIME_UNITS_ values from mpi2_cnfg.h for the TimeUnits + *field + */ + +/*use MPI26_PCIELINK3_TFLAGS_ values from mpi2_cnfg.h for the ThresholdFlags + *field + */ + +/**************************************************************************** +* EventAck message +****************************************************************************/ + +/*EventAck Request message */ +typedef struct _MPI2_EVENT_ACK_REQUEST { + U16 Reserved1; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Event; /*0x0C */ + U16 Reserved5; /*0x0E */ + U32 EventContext; /*0x10 */ +} MPI2_EVENT_ACK_REQUEST, *PTR_MPI2_EVENT_ACK_REQUEST, + Mpi2EventAckRequest_t, *pMpi2EventAckRequest_t; + +/*EventAck Reply message */ +typedef struct _MPI2_EVENT_ACK_REPLY { + U16 Reserved1; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_EVENT_ACK_REPLY, *PTR_MPI2_EVENT_ACK_REPLY, + Mpi2EventAckReply_t, *pMpi2EventAckReply_t; + +/**************************************************************************** +* SendHostMessage message +****************************************************************************/ + +/*SendHostMessage Request message */ +typedef struct _MPI2_SEND_HOST_MESSAGE_REQUEST { + U16 HostDataLength; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved1; /*0x04 */ + U8 Reserved2; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ + U8 Reserved4; /*0x0C */ + U8 DestVF_ID; /*0x0D */ + U16 Reserved5; /*0x0E */ + U32 Reserved6; /*0x10 */ + U32 Reserved7; /*0x14 */ + U32 Reserved8; /*0x18 */ + U32 Reserved9; /*0x1C */ + U32 Reserved10; /*0x20 */ + U32 HostData[]; /*0x24 */ +} MPI2_SEND_HOST_MESSAGE_REQUEST, + *PTR_MPI2_SEND_HOST_MESSAGE_REQUEST, + Mpi2SendHostMessageRequest_t, + *pMpi2SendHostMessageRequest_t; + +/*SendHostMessage Reply message */ +typedef struct _MPI2_SEND_HOST_MESSAGE_REPLY { + U16 HostDataLength; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved1; /*0x04 */ + U8 Reserved2; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ + U16 Reserved4; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_SEND_HOST_MESSAGE_REPLY, *PTR_MPI2_SEND_HOST_MESSAGE_REPLY, + Mpi2SendHostMessageReply_t, *pMpi2SendHostMessageReply_t; + +/**************************************************************************** +* FWDownload message +****************************************************************************/ + +/*MPI v2.0 FWDownload Request message */ +typedef struct _MPI2_FW_DOWNLOAD_REQUEST { + U8 ImageType; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U32 TotalImageSize; /*0x0C */ + U32 Reserved5; /*0x10 */ + MPI2_MPI_SGE_UNION SGL; /*0x14 */ +} MPI2_FW_DOWNLOAD_REQUEST, *PTR_MPI2_FW_DOWNLOAD_REQUEST, + Mpi2FWDownloadRequest, *pMpi2FWDownloadRequest; + +#define MPI2_FW_DOWNLOAD_MSGFLGS_LAST_SEGMENT (0x01) + +#define MPI2_FW_DOWNLOAD_ITYPE_FW (0x01) +#define MPI2_FW_DOWNLOAD_ITYPE_BIOS (0x02) +#define MPI2_FW_DOWNLOAD_ITYPE_MANUFACTURING (0x06) +#define MPI2_FW_DOWNLOAD_ITYPE_CONFIG_1 (0x07) +#define MPI2_FW_DOWNLOAD_ITYPE_CONFIG_2 (0x08) +#define MPI2_FW_DOWNLOAD_ITYPE_MEGARAID (0x09) +#define MPI2_FW_DOWNLOAD_ITYPE_COMPLETE (0x0A) +#define MPI2_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B) +#define MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY (0x0C) +#define MPI2_FW_DOWNLOAD_ITYPE_CBB_BACKUP (0x0D) +#define MPI2_FW_DOWNLOAD_ITYPE_SBR (0x0E) +#define MPI2_FW_DOWNLOAD_ITYPE_SBR_BACKUP (0x0F) +#define MPI2_FW_DOWNLOAD_ITYPE_HIIM (0x10) +#define MPI2_FW_DOWNLOAD_ITYPE_HIIA (0x11) +#define MPI2_FW_DOWNLOAD_ITYPE_CTLR (0x12) +#define MPI2_FW_DOWNLOAD_ITYPE_IMR_FIRMWARE (0x13) +#define MPI2_FW_DOWNLOAD_ITYPE_MR_NVDATA (0x14) +/*MPI v2.6 and newer */ +#define MPI2_FW_DOWNLOAD_ITYPE_CPLD (0x15) +#define MPI2_FW_DOWNLOAD_ITYPE_PSOC (0x16) +#define MPI2_FW_DOWNLOAD_ITYPE_COREDUMP (0x17) +#define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0) + +/*MPI v2.0 FWDownload TransactionContext Element */ +typedef struct _MPI2_FW_DOWNLOAD_TCSGE { + U8 Reserved1; /*0x00 */ + U8 ContextSize; /*0x01 */ + U8 DetailsLength; /*0x02 */ + U8 Flags; /*0x03 */ + U32 Reserved2; /*0x04 */ + U32 ImageOffset; /*0x08 */ + U32 ImageSize; /*0x0C */ +} MPI2_FW_DOWNLOAD_TCSGE, *PTR_MPI2_FW_DOWNLOAD_TCSGE, + Mpi2FWDownloadTCSGE_t, *pMpi2FWDownloadTCSGE_t; + +/*MPI v2.5 FWDownload Request message */ +typedef struct _MPI25_FW_DOWNLOAD_REQUEST { + U8 ImageType; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U32 TotalImageSize; /*0x0C */ + U32 Reserved5; /*0x10 */ + U32 Reserved6; /*0x14 */ + U32 ImageOffset; /*0x18 */ + U32 ImageSize; /*0x1C */ + MPI25_SGE_IO_UNION SGL; /*0x20 */ +} MPI25_FW_DOWNLOAD_REQUEST, *PTR_MPI25_FW_DOWNLOAD_REQUEST, + Mpi25FWDownloadRequest, *pMpi25FWDownloadRequest; + +/*FWDownload Reply message */ +typedef struct _MPI2_FW_DOWNLOAD_REPLY { + U8 ImageType; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_FW_DOWNLOAD_REPLY, *PTR_MPI2_FW_DOWNLOAD_REPLY, + Mpi2FWDownloadReply_t, *pMpi2FWDownloadReply_t; + +/**************************************************************************** +* FWUpload message +****************************************************************************/ + +/*MPI v2.0 FWUpload Request message */ +typedef struct _MPI2_FW_UPLOAD_REQUEST { + U8 ImageType; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U32 Reserved5; /*0x0C */ + U32 Reserved6; /*0x10 */ + MPI2_MPI_SGE_UNION SGL; /*0x14 */ +} MPI2_FW_UPLOAD_REQUEST, *PTR_MPI2_FW_UPLOAD_REQUEST, + Mpi2FWUploadRequest_t, *pMpi2FWUploadRequest_t; + +#define MPI2_FW_UPLOAD_ITYPE_FW_CURRENT (0x00) +#define MPI2_FW_UPLOAD_ITYPE_FW_FLASH (0x01) +#define MPI2_FW_UPLOAD_ITYPE_BIOS_FLASH (0x02) +#define MPI2_FW_UPLOAD_ITYPE_FW_BACKUP (0x05) +#define MPI2_FW_UPLOAD_ITYPE_MANUFACTURING (0x06) +#define MPI2_FW_UPLOAD_ITYPE_CONFIG_1 (0x07) +#define MPI2_FW_UPLOAD_ITYPE_CONFIG_2 (0x08) +#define MPI2_FW_UPLOAD_ITYPE_MEGARAID (0x09) +#define MPI2_FW_UPLOAD_ITYPE_COMPLETE (0x0A) +#define MPI2_FW_UPLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B) +#define MPI2_FW_UPLOAD_ITYPE_CBB_BACKUP (0x0D) +#define MPI2_FW_UPLOAD_ITYPE_SBR (0x0E) +#define MPI2_FW_UPLOAD_ITYPE_SBR_BACKUP (0x0F) +#define MPI2_FW_UPLOAD_ITYPE_HIIM (0x10) +#define MPI2_FW_UPLOAD_ITYPE_HIIA (0x11) +#define MPI2_FW_UPLOAD_ITYPE_CTLR (0x12) +#define MPI2_FW_UPLOAD_ITYPE_IMR_FIRMWARE (0x13) +#define MPI2_FW_UPLOAD_ITYPE_MR_NVDATA (0x14) + + +/*MPI v2.0 FWUpload TransactionContext Element */ +typedef struct _MPI2_FW_UPLOAD_TCSGE { + U8 Reserved1; /*0x00 */ + U8 ContextSize; /*0x01 */ + U8 DetailsLength; /*0x02 */ + U8 Flags; /*0x03 */ + U32 Reserved2; /*0x04 */ + U32 ImageOffset; /*0x08 */ + U32 ImageSize; /*0x0C */ +} MPI2_FW_UPLOAD_TCSGE, *PTR_MPI2_FW_UPLOAD_TCSGE, + Mpi2FWUploadTCSGE_t, *pMpi2FWUploadTCSGE_t; + +/*MPI v2.5 FWUpload Request message */ +typedef struct _MPI25_FW_UPLOAD_REQUEST { + U8 ImageType; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U32 Reserved5; /*0x0C */ + U32 Reserved6; /*0x10 */ + U32 Reserved7; /*0x14 */ + U32 ImageOffset; /*0x18 */ + U32 ImageSize; /*0x1C */ + MPI25_SGE_IO_UNION SGL; /*0x20 */ +} MPI25_FW_UPLOAD_REQUEST, *PTR_MPI25_FW_UPLOAD_REQUEST, + Mpi25FWUploadRequest_t, *pMpi25FWUploadRequest_t; + +/*FWUpload Reply message */ +typedef struct _MPI2_FW_UPLOAD_REPLY { + U8 ImageType; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U32 ActualImageSize; /*0x14 */ +} MPI2_FW_UPLOAD_REPLY, *PTR_MPI2_FW_UPLOAD_REPLY, + Mpi2FWUploadReply_t, *pMPi2FWUploadReply_t; + + +/**************************************************************************** +* PowerManagementControl message +****************************************************************************/ + +/*PowerManagementControl Request message */ +typedef struct _MPI2_PWR_MGMT_CONTROL_REQUEST { + U8 Feature; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U8 Parameter1; /*0x0C */ + U8 Parameter2; /*0x0D */ + U8 Parameter3; /*0x0E */ + U8 Parameter4; /*0x0F */ + U32 Reserved5; /*0x10 */ + U32 Reserved6; /*0x14 */ +} MPI2_PWR_MGMT_CONTROL_REQUEST, *PTR_MPI2_PWR_MGMT_CONTROL_REQUEST, + Mpi2PwrMgmtControlRequest_t, *pMpi2PwrMgmtControlRequest_t; + +/*defines for the Feature field */ +#define MPI2_PM_CONTROL_FEATURE_DA_PHY_POWER_COND (0x01) +#define MPI2_PM_CONTROL_FEATURE_PORT_WIDTH_MODULATION (0x02) +#define MPI2_PM_CONTROL_FEATURE_PCIE_LINK (0x03) /*obsolete */ +#define MPI2_PM_CONTROL_FEATURE_IOC_SPEED (0x04) +#define MPI2_PM_CONTROL_FEATURE_GLOBAL_PWR_MGMT_MODE (0x05) +#define MPI2_PM_CONTROL_FEATURE_MIN_PRODUCT_SPECIFIC (0x80) +#define MPI2_PM_CONTROL_FEATURE_MAX_PRODUCT_SPECIFIC (0xFF) + +/*parameter usage for the MPI2_PM_CONTROL_FEATURE_DA_PHY_POWER_COND Feature */ +/*Parameter1 contains a PHY number */ +/*Parameter2 indicates power condition action using these defines */ +#define MPI2_PM_CONTROL_PARAM2_PARTIAL (0x01) +#define MPI2_PM_CONTROL_PARAM2_SLUMBER (0x02) +#define MPI2_PM_CONTROL_PARAM2_EXIT_PWR_MGMT (0x03) +/*Parameter3 and Parameter4 are reserved */ + +/*parameter usage for the MPI2_PM_CONTROL_FEATURE_PORT_WIDTH_MODULATION + * Feature */ +/*Parameter1 contains SAS port width modulation group number */ +/*Parameter2 indicates IOC action using these defines */ +#define MPI2_PM_CONTROL_PARAM2_REQUEST_OWNERSHIP (0x01) +#define MPI2_PM_CONTROL_PARAM2_CHANGE_MODULATION (0x02) +#define MPI2_PM_CONTROL_PARAM2_RELINQUISH_OWNERSHIP (0x03) +/*Parameter3 indicates desired modulation level using these defines */ +#define MPI2_PM_CONTROL_PARAM3_25_PERCENT (0x00) +#define MPI2_PM_CONTROL_PARAM3_50_PERCENT (0x01) +#define MPI2_PM_CONTROL_PARAM3_75_PERCENT (0x02) +#define MPI2_PM_CONTROL_PARAM3_100_PERCENT (0x03) +/*Parameter4 is reserved */ + +/*this next set (_PCIE_LINK) is obsolete */ +/*parameter usage for the MPI2_PM_CONTROL_FEATURE_PCIE_LINK Feature */ +/*Parameter1 indicates desired PCIe link speed using these defines */ +#define MPI2_PM_CONTROL_PARAM1_PCIE_2_5_GBPS (0x00) /*obsolete */ +#define MPI2_PM_CONTROL_PARAM1_PCIE_5_0_GBPS (0x01) /*obsolete */ +#define MPI2_PM_CONTROL_PARAM1_PCIE_8_0_GBPS (0x02) /*obsolete */ +/*Parameter2 indicates desired PCIe link width using these defines */ +#define MPI2_PM_CONTROL_PARAM2_WIDTH_X1 (0x01) /*obsolete */ +#define MPI2_PM_CONTROL_PARAM2_WIDTH_X2 (0x02) /*obsolete */ +#define MPI2_PM_CONTROL_PARAM2_WIDTH_X4 (0x04) /*obsolete */ +#define MPI2_PM_CONTROL_PARAM2_WIDTH_X8 (0x08) /*obsolete */ +/*Parameter3 and Parameter4 are reserved */ + +/*parameter usage for the MPI2_PM_CONTROL_FEATURE_IOC_SPEED Feature */ +/*Parameter1 indicates desired IOC hardware clock speed using these defines */ +#define MPI2_PM_CONTROL_PARAM1_FULL_IOC_SPEED (0x01) +#define MPI2_PM_CONTROL_PARAM1_HALF_IOC_SPEED (0x02) +#define MPI2_PM_CONTROL_PARAM1_QUARTER_IOC_SPEED (0x04) +#define MPI2_PM_CONTROL_PARAM1_EIGHTH_IOC_SPEED (0x08) +/*Parameter2, Parameter3, and Parameter4 are reserved */ + +/*parameter usage for the MPI2_PM_CONTROL_FEATURE_GLOBAL_PWR_MGMT_MODE Feature*/ +/*Parameter1 indicates host action regarding global power management mode */ +#define MPI2_PM_CONTROL_PARAM1_TAKE_CONTROL (0x01) +#define MPI2_PM_CONTROL_PARAM1_CHANGE_GLOBAL_MODE (0x02) +#define MPI2_PM_CONTROL_PARAM1_RELEASE_CONTROL (0x03) +/*Parameter2 indicates the requested global power management mode */ +#define MPI2_PM_CONTROL_PARAM2_FULL_PWR_PERF (0x01) +#define MPI2_PM_CONTROL_PARAM2_REDUCED_PWR_PERF (0x08) +#define MPI2_PM_CONTROL_PARAM2_STANDBY (0x40) +/*Parameter3 and Parameter4 are reserved */ + +/*PowerManagementControl Reply message */ +typedef struct _MPI2_PWR_MGMT_CONTROL_REPLY { + U8 Feature; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_PWR_MGMT_CONTROL_REPLY, *PTR_MPI2_PWR_MGMT_CONTROL_REPLY, + Mpi2PwrMgmtControlReply_t, *pMpi2PwrMgmtControlReply_t; + +/**************************************************************************** +* IO Unit Control messages (MPI v2.6 and later only.) +****************************************************************************/ + +/* IO Unit Control Request Message */ +typedef struct _MPI26_IOUNIT_CONTROL_REQUEST { + U8 Operation; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 DevHandle; /* 0x04 */ + U8 IOCParameter; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U16 Reserved4; /* 0x0C */ + U8 PhyNum; /* 0x0E */ + U8 PrimFlags; /* 0x0F */ + U32 Primitive; /* 0x10 */ + U8 LookupMethod; /* 0x14 */ + U8 Reserved5; /* 0x15 */ + U16 SlotNumber; /* 0x16 */ + U64 LookupAddress; /* 0x18 */ + U32 IOCParameterValue; /* 0x20 */ + U32 Reserved7; /* 0x24 */ + U32 Reserved8; /* 0x28 */ +} MPI26_IOUNIT_CONTROL_REQUEST, + *PTR_MPI26_IOUNIT_CONTROL_REQUEST, + Mpi26IoUnitControlRequest_t, + *pMpi26IoUnitControlRequest_t; + +/* values for the Operation field */ +#define MPI26_CTRL_OP_CLEAR_ALL_PERSISTENT (0x02) +#define MPI26_CTRL_OP_SAS_PHY_LINK_RESET (0x06) +#define MPI26_CTRL_OP_SAS_PHY_HARD_RESET (0x07) +#define MPI26_CTRL_OP_PHY_CLEAR_ERROR_LOG (0x08) +#define MPI26_CTRL_OP_LINK_CLEAR_ERROR_LOG (0x09) +#define MPI26_CTRL_OP_SAS_SEND_PRIMITIVE (0x0A) +#define MPI26_CTRL_OP_FORCE_FULL_DISCOVERY (0x0B) +#define MPI26_CTRL_OP_REMOVE_DEVICE (0x0D) +#define MPI26_CTRL_OP_LOOKUP_MAPPING (0x0E) +#define MPI26_CTRL_OP_SET_IOC_PARAMETER (0x0F) +#define MPI26_CTRL_OP_ENABLE_FP_DEVICE (0x10) +#define MPI26_CTRL_OP_DISABLE_FP_DEVICE (0x11) +#define MPI26_CTRL_OP_ENABLE_FP_ALL (0x12) +#define MPI26_CTRL_OP_DISABLE_FP_ALL (0x13) +#define MPI26_CTRL_OP_DEV_ENABLE_NCQ (0x14) +#define MPI26_CTRL_OP_DEV_DISABLE_NCQ (0x15) +#define MPI26_CTRL_OP_SHUTDOWN (0x16) +#define MPI26_CTRL_OP_DEV_ENABLE_PERSIST_CONNECTION (0x17) +#define MPI26_CTRL_OP_DEV_DISABLE_PERSIST_CONNECTION (0x18) +#define MPI26_CTRL_OP_DEV_CLOSE_PERSIST_CONNECTION (0x19) +#define MPI26_CTRL_OP_ENABLE_NVME_SGL_FORMAT (0x1A) +#define MPI26_CTRL_OP_DISABLE_NVME_SGL_FORMAT (0x1B) +#define MPI26_CTRL_OP_PRODUCT_SPECIFIC_MIN (0x80) + +/* values for the PrimFlags field */ +#define MPI26_CTRL_PRIMFLAGS_SINGLE (0x08) +#define MPI26_CTRL_PRIMFLAGS_TRIPLE (0x02) +#define MPI26_CTRL_PRIMFLAGS_REDUNDANT (0x01) + +/* values for the LookupMethod field */ +#define MPI26_CTRL_LOOKUP_METHOD_WWID_ADDRESS (0x01) +#define MPI26_CTRL_LOOKUP_METHOD_ENCLOSURE_SLOT (0x02) +#define MPI26_CTRL_LOOKUP_METHOD_SAS_DEVICE_NAME (0x03) + + +/* IO Unit Control Reply Message */ +typedef struct _MPI26_IOUNIT_CONTROL_REPLY { + U8 Operation; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 MsgLength; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 DevHandle; /* 0x04 */ + U8 IOCParameter; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved3; /* 0x0A */ + U16 Reserved4; /* 0x0C */ + U16 IOCStatus; /* 0x0E */ + U32 IOCLogInfo; /* 0x10 */ +} MPI26_IOUNIT_CONTROL_REPLY, + *PTR_MPI26_IOUNIT_CONTROL_REPLY, + Mpi26IoUnitControlReply_t, + *pMpi26IoUnitControlReply_t; + + +#endif diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_pci.h b/drivers/scsi/mpt3sas/mpi/mpi2_pci.h new file mode 100644 index 000000000..bb7b79cfa --- /dev/null +++ b/drivers/scsi/mpt3sas/mpi/mpi2_pci.h @@ -0,0 +1,113 @@ +/* + * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * + * + * Name: mpi2_pci.h + * Title: MPI PCIe Attached Devices structures and definitions. + * Creation Date: October 9, 2012 + * + * mpi2_pci.h Version: 02.00.04 + * + * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 + * prefix are for use only on MPI v2.5 products, and must not be used + * with MPI v2.0 products. Unless otherwise noted, names beginning with + * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 03-16-15 02.00.00 Initial version. + * 02-17-16 02.00.01 Removed AHCI support. + * Removed SOP support. + * 07-01-16 02.00.02 Added MPI26_NVME_FLAGS_FORCE_ADMIN_ERR_RESP to + * NVME Encapsulated Request. + * 07-22-18 02.00.03 Updted flags field for NVME Encapsulated req + * 12-17-18 02.00.04 Added MPI26_PCIE_DEVINFO_SCSI + * Shortten some defines to be compatible with DOS + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_PCI_H +#define MPI2_PCI_H + + +/* + *Values for the PCIe DeviceInfo field used in PCIe Device Status Change Event + *data and PCIe Configuration pages. + */ +#define MPI26_PCIE_DEVINFO_DIRECT_ATTACH (0x00000010) + +#define MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE (0x0000000F) +#define MPI26_PCIE_DEVINFO_NO_DEVICE (0x00000000) +#define MPI26_PCIE_DEVINFO_PCI_SWITCH (0x00000001) +#define MPI26_PCIE_DEVINFO_NVME (0x00000003) +#define MPI26_PCIE_DEVINFO_SCSI (0x00000004) + +/**************************************************************************** +* NVMe Encapsulated message +****************************************************************************/ + +/*NVME Encapsulated Request Message */ +typedef struct _MPI26_NVME_ENCAPSULATED_REQUEST { + U16 DevHandle; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 EncapsulatedCommandLength; /*0x04 */ + U8 Reserved1; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved2; /*0x0A */ + U32 Reserved3; /*0x0C */ + U64 ErrorResponseBaseAddress; /*0x10 */ + U16 ErrorResponseAllocationLength; /*0x18 */ + U16 Flags; /*0x1A */ + U32 DataLength; /*0x1C */ + U8 NVMe_Command[4]; /*0x20 */ + +} MPI26_NVME_ENCAPSULATED_REQUEST, *PTR_MPI26_NVME_ENCAPSULATED_REQUEST, + Mpi26NVMeEncapsulatedRequest_t, *pMpi26NVMeEncapsulatedRequest_t; + +/*defines for the Flags field */ +#define MPI26_NVME_FLAGS_FORCE_ADMIN_ERR_RESP (0x0020) +/*Submission Queue Type*/ +#define MPI26_NVME_FLAGS_SUBMISSIONQ_MASK (0x0010) +#define MPI26_NVME_FLAGS_SUBMISSIONQ_IO (0x0000) +#define MPI26_NVME_FLAGS_SUBMISSIONQ_ADMIN (0x0010) +/*Error Response Address Space */ +#define MPI26_NVME_FLAGS_ERR_RSP_ADDR_MASK (0x000C) +#define MPI26_NVME_FLAGS_ERR_RSP_ADDR_SYSTEM (0x0000) +#define MPI26_NVME_FLAGS_ERR_RSP_ADDR_IOCTL (0x0008) +/* Data Direction*/ +#define MPI26_NVME_FLAGS_DATADIRECTION_MASK (0x0003) +#define MPI26_NVME_FLAGS_NODATATRANSFER (0x0000) +#define MPI26_NVME_FLAGS_WRITE (0x0001) +#define MPI26_NVME_FLAGS_READ (0x0002) +#define MPI26_NVME_FLAGS_BIDIRECTIONAL (0x0003) + + +/*NVMe Encapuslated Reply Message */ +typedef struct _MPI26_NVME_ENCAPSULATED_ERROR_REPLY { + U16 DevHandle; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 EncapsulatedCommandLength; /*0x04 */ + U8 Reserved1; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved2; /*0x0A */ + U16 Reserved3; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U16 ErrorResponseCount; /*0x14 */ + U16 Reserved4; /*0x16 */ +} MPI26_NVME_ENCAPSULATED_ERROR_REPLY, + *PTR_MPI26_NVME_ENCAPSULATED_ERROR_REPLY, + Mpi26NVMeEncapsulatedErrorReply_t, + *pMpi26NVMeEncapsulatedErrorReply_t; + + +#endif diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_raid.h b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h new file mode 100644 index 000000000..b770eb516 --- /dev/null +++ b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h @@ -0,0 +1,356 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * + * + * Name: mpi2_raid.h + * Title: MPI Integrated RAID messages and structures + * Creation Date: April 26, 2007 + * + * mpi2_raid.h Version: 02.00.11 + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 08-31-07 02.00.01 Modifications to RAID Action request and reply, + * including the Actions and ActionData. + * 02-29-08 02.00.02 Added MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD. + * 05-21-08 02.00.03 Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that + * the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT + * can be sized by the build environment. + * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of + * VolumeCreationFlags and marked the old one as obsolete. + * 05-12-10 02.00.05 Added MPI2_RAID_VOL_FLAGS_OP_MDC define. + * 08-24-10 02.00.06 Added MPI2_RAID_ACTION_COMPATIBILITY_CHECK along with + * related structures and defines. + * Added product-specific range to RAID Action values. + * 11-18-11 02.00.07 Incorporating additions for MPI v2.5. + * 02-06-12 02.00.08 Added MPI2_RAID_ACTION_PHYSDISK_HIDDEN. + * 07-26-12 02.00.09 Added ElapsedSeconds field to MPI2_RAID_VOL_INDICATOR. + * Added MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID define. + * 04-17-13 02.00.10 Added MPI25_RAID_ACTION_ADATA_ALLOW_PI. + * 11-18-14 02.00.11 Updated copyright information. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_RAID_H +#define MPI2_RAID_H + +/***************************************************************************** +* +* Integrated RAID Messages +* +*****************************************************************************/ + +/**************************************************************************** +* RAID Action messages +****************************************************************************/ + +/* ActionDataWord defines for use with MPI2_RAID_ACTION_CREATE_VOLUME action */ +#define MPI25_RAID_ACTION_ADATA_ALLOW_PI (0x80000000) + +/*ActionDataWord defines for use with MPI2_RAID_ACTION_DELETE_VOLUME action */ +#define MPI2_RAID_ACTION_ADATA_KEEP_LBA0 (0x00000000) +#define MPI2_RAID_ACTION_ADATA_ZERO_LBA0 (0x00000001) + +/*use MPI2_RAIDVOL0_SETTING_ defines from mpi2_cnfg.h for + *MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE action */ + +/*ActionDataWord defines for use with + *MPI2_RAID_ACTION_DISABLE_ALL_VOLUMES action */ +#define MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD (0x00000001) + +/*ActionDataWord for MPI2_RAID_ACTION_SET_RAID_FUNCTION_RATE Action */ +typedef struct _MPI2_RAID_ACTION_RATE_DATA { + U8 RateToChange; /*0x00 */ + U8 RateOrMode; /*0x01 */ + U16 DataScrubDuration; /*0x02 */ +} MPI2_RAID_ACTION_RATE_DATA, *PTR_MPI2_RAID_ACTION_RATE_DATA, + Mpi2RaidActionRateData_t, *pMpi2RaidActionRateData_t; + +#define MPI2_RAID_ACTION_SET_RATE_RESYNC (0x00) +#define MPI2_RAID_ACTION_SET_RATE_DATA_SCRUB (0x01) +#define MPI2_RAID_ACTION_SET_RATE_POWERSAVE_MODE (0x02) + +/*ActionDataWord for MPI2_RAID_ACTION_START_RAID_FUNCTION Action */ +typedef struct _MPI2_RAID_ACTION_START_RAID_FUNCTION { + U8 RAIDFunction; /*0x00 */ + U8 Flags; /*0x01 */ + U16 Reserved1; /*0x02 */ +} MPI2_RAID_ACTION_START_RAID_FUNCTION, + *PTR_MPI2_RAID_ACTION_START_RAID_FUNCTION, + Mpi2RaidActionStartRaidFunction_t, + *pMpi2RaidActionStartRaidFunction_t; + +/*defines for the RAIDFunction field */ +#define MPI2_RAID_ACTION_START_BACKGROUND_INIT (0x00) +#define MPI2_RAID_ACTION_START_ONLINE_CAP_EXPANSION (0x01) +#define MPI2_RAID_ACTION_START_CONSISTENCY_CHECK (0x02) + +/*defines for the Flags field */ +#define MPI2_RAID_ACTION_START_NEW (0x00) +#define MPI2_RAID_ACTION_START_RESUME (0x01) + +/*ActionDataWord for MPI2_RAID_ACTION_STOP_RAID_FUNCTION Action */ +typedef struct _MPI2_RAID_ACTION_STOP_RAID_FUNCTION { + U8 RAIDFunction; /*0x00 */ + U8 Flags; /*0x01 */ + U16 Reserved1; /*0x02 */ +} MPI2_RAID_ACTION_STOP_RAID_FUNCTION, + *PTR_MPI2_RAID_ACTION_STOP_RAID_FUNCTION, + Mpi2RaidActionStopRaidFunction_t, + *pMpi2RaidActionStopRaidFunction_t; + +/*defines for the RAIDFunction field */ +#define MPI2_RAID_ACTION_STOP_BACKGROUND_INIT (0x00) +#define MPI2_RAID_ACTION_STOP_ONLINE_CAP_EXPANSION (0x01) +#define MPI2_RAID_ACTION_STOP_CONSISTENCY_CHECK (0x02) + +/*defines for the Flags field */ +#define MPI2_RAID_ACTION_STOP_ABORT (0x00) +#define MPI2_RAID_ACTION_STOP_PAUSE (0x01) + +/*ActionDataWord for MPI2_RAID_ACTION_CREATE_HOT_SPARE Action */ +typedef struct _MPI2_RAID_ACTION_HOT_SPARE { + U8 HotSparePool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 DevHandle; /*0x02 */ +} MPI2_RAID_ACTION_HOT_SPARE, *PTR_MPI2_RAID_ACTION_HOT_SPARE, + Mpi2RaidActionHotSpare_t, *pMpi2RaidActionHotSpare_t; + +/*ActionDataWord for MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE Action */ +typedef struct _MPI2_RAID_ACTION_FW_UPDATE_MODE { + U8 Flags; /*0x00 */ + U8 DeviceFirmwareUpdateModeTimeout; /*0x01 */ + U16 Reserved1; /*0x02 */ +} MPI2_RAID_ACTION_FW_UPDATE_MODE, + *PTR_MPI2_RAID_ACTION_FW_UPDATE_MODE, + Mpi2RaidActionFwUpdateMode_t, + *pMpi2RaidActionFwUpdateMode_t; + +/*ActionDataWord defines for use with + *MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE action */ +#define MPI2_RAID_ACTION_ADATA_DISABLE_FW_UPDATE (0x00) +#define MPI2_RAID_ACTION_ADATA_ENABLE_FW_UPDATE (0x01) + +typedef union _MPI2_RAID_ACTION_DATA { + U32 Word; + MPI2_RAID_ACTION_RATE_DATA Rates; + MPI2_RAID_ACTION_START_RAID_FUNCTION StartRaidFunction; + MPI2_RAID_ACTION_STOP_RAID_FUNCTION StopRaidFunction; + MPI2_RAID_ACTION_HOT_SPARE HotSpare; + MPI2_RAID_ACTION_FW_UPDATE_MODE FwUpdateMode; +} MPI2_RAID_ACTION_DATA, *PTR_MPI2_RAID_ACTION_DATA, + Mpi2RaidActionData_t, *pMpi2RaidActionData_t; + +/*RAID Action Request Message */ +typedef struct _MPI2_RAID_ACTION_REQUEST { + U8 Action; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 VolDevHandle; /*0x04 */ + U8 PhysDiskNum; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved2; /*0x0A */ + U32 Reserved3; /*0x0C */ + MPI2_RAID_ACTION_DATA ActionDataWord; /*0x10 */ + MPI2_SGE_SIMPLE_UNION ActionDataSGE; /*0x14 */ +} MPI2_RAID_ACTION_REQUEST, *PTR_MPI2_RAID_ACTION_REQUEST, + Mpi2RaidActionRequest_t, *pMpi2RaidActionRequest_t; + +/*RAID Action request Action values */ + +#define MPI2_RAID_ACTION_INDICATOR_STRUCT (0x01) +#define MPI2_RAID_ACTION_CREATE_VOLUME (0x02) +#define MPI2_RAID_ACTION_DELETE_VOLUME (0x03) +#define MPI2_RAID_ACTION_DISABLE_ALL_VOLUMES (0x04) +#define MPI2_RAID_ACTION_ENABLE_ALL_VOLUMES (0x05) +#define MPI2_RAID_ACTION_PHYSDISK_OFFLINE (0x0A) +#define MPI2_RAID_ACTION_PHYSDISK_ONLINE (0x0B) +#define MPI2_RAID_ACTION_FAIL_PHYSDISK (0x0F) +#define MPI2_RAID_ACTION_ACTIVATE_VOLUME (0x11) +#define MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE (0x15) +#define MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE (0x17) +#define MPI2_RAID_ACTION_SET_VOLUME_NAME (0x18) +#define MPI2_RAID_ACTION_SET_RAID_FUNCTION_RATE (0x19) +#define MPI2_RAID_ACTION_ENABLE_FAILED_VOLUME (0x1C) +#define MPI2_RAID_ACTION_CREATE_HOT_SPARE (0x1D) +#define MPI2_RAID_ACTION_DELETE_HOT_SPARE (0x1E) +#define MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED (0x20) +#define MPI2_RAID_ACTION_START_RAID_FUNCTION (0x21) +#define MPI2_RAID_ACTION_STOP_RAID_FUNCTION (0x22) +#define MPI2_RAID_ACTION_COMPATIBILITY_CHECK (0x23) +#define MPI2_RAID_ACTION_PHYSDISK_HIDDEN (0x24) +#define MPI2_RAID_ACTION_MIN_PRODUCT_SPECIFIC (0x80) +#define MPI2_RAID_ACTION_MAX_PRODUCT_SPECIFIC (0xFF) + +/*RAID Volume Creation Structure */ + +/* + *The following define can be customized for the targeted product. + */ +#ifndef MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS +#define MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS (1) +#endif + +typedef struct _MPI2_RAID_VOLUME_PHYSDISK { + U8 RAIDSetNum; /*0x00 */ + U8 PhysDiskMap; /*0x01 */ + U16 PhysDiskDevHandle; /*0x02 */ +} MPI2_RAID_VOLUME_PHYSDISK, *PTR_MPI2_RAID_VOLUME_PHYSDISK, + Mpi2RaidVolumePhysDisk_t, *pMpi2RaidVolumePhysDisk_t; + +/*defines for the PhysDiskMap field */ +#define MPI2_RAIDACTION_PHYSDISK_PRIMARY (0x01) +#define MPI2_RAIDACTION_PHYSDISK_SECONDARY (0x02) + +typedef struct _MPI2_RAID_VOLUME_CREATION_STRUCT { + U8 NumPhysDisks; /*0x00 */ + U8 VolumeType; /*0x01 */ + U16 Reserved1; /*0x02 */ + U32 VolumeCreationFlags; /*0x04 */ + U32 VolumeSettings; /*0x08 */ + U8 Reserved2; /*0x0C */ + U8 ResyncRate; /*0x0D */ + U16 DataScrubDuration; /*0x0E */ + U64 VolumeMaxLBA; /*0x10 */ + U32 StripeSize; /*0x18 */ + U8 Name[16]; /*0x1C */ + MPI2_RAID_VOLUME_PHYSDISK + PhysDisk[MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS]; /*0x2C */ +} MPI2_RAID_VOLUME_CREATION_STRUCT, + *PTR_MPI2_RAID_VOLUME_CREATION_STRUCT, + Mpi2RaidVolumeCreationStruct_t, + *pMpi2RaidVolumeCreationStruct_t; + +/*use MPI2_RAID_VOL_TYPE_ defines from mpi2_cnfg.h for VolumeType */ + +/*defines for the VolumeCreationFlags field */ +#define MPI2_RAID_VOL_CREATION_DEFAULT_SETTINGS (0x80000000) +#define MPI2_RAID_VOL_CREATION_BACKGROUND_INIT (0x00000004) +#define MPI2_RAID_VOL_CREATION_LOW_LEVEL_INIT (0x00000002) +#define MPI2_RAID_VOL_CREATION_MIGRATE_DATA (0x00000001) +/*The following is an obsolete define. + *It must be shifted left 24 bits in order to set the proper bit. + */ +#define MPI2_RAID_VOL_CREATION_USE_DEFAULT_SETTINGS (0x80) + +/*RAID Online Capacity Expansion Structure */ + +typedef struct _MPI2_RAID_ONLINE_CAPACITY_EXPANSION { + U32 Flags; /*0x00 */ + U16 DevHandle0; /*0x04 */ + U16 Reserved1; /*0x06 */ + U16 DevHandle1; /*0x08 */ + U16 Reserved2; /*0x0A */ +} MPI2_RAID_ONLINE_CAPACITY_EXPANSION, + *PTR_MPI2_RAID_ONLINE_CAPACITY_EXPANSION, + Mpi2RaidOnlineCapacityExpansion_t, + *pMpi2RaidOnlineCapacityExpansion_t; + +/*RAID Compatibility Input Structure */ + +typedef struct _MPI2_RAID_COMPATIBILITY_INPUT_STRUCT { + U16 SourceDevHandle; /*0x00 */ + U16 CandidateDevHandle; /*0x02 */ + U32 Flags; /*0x04 */ + U32 Reserved1; /*0x08 */ + U32 Reserved2; /*0x0C */ +} MPI2_RAID_COMPATIBILITY_INPUT_STRUCT, + *PTR_MPI2_RAID_COMPATIBILITY_INPUT_STRUCT, + Mpi2RaidCompatibilityInputStruct_t, + *pMpi2RaidCompatibilityInputStruct_t; + +/*defines for RAID Compatibility Structure Flags field */ +#define MPI2_RAID_COMPAT_SOURCE_IS_VOLUME_FLAG (0x00000002) +#define MPI2_RAID_COMPAT_REPORT_SOURCE_INFO_FLAG (0x00000001) + +/*RAID Volume Indicator Structure */ + +typedef struct _MPI2_RAID_VOL_INDICATOR { + U64 TotalBlocks; /*0x00 */ + U64 BlocksRemaining; /*0x08 */ + U32 Flags; /*0x10 */ + U32 ElapsedSeconds; /* 0x14 */ +} MPI2_RAID_VOL_INDICATOR, *PTR_MPI2_RAID_VOL_INDICATOR, + Mpi2RaidVolIndicator_t, *pMpi2RaidVolIndicator_t; + +/*defines for RAID Volume Indicator Flags field */ +#define MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID (0x80000000) +#define MPI2_RAID_VOL_FLAGS_OP_MASK (0x0000000F) +#define MPI2_RAID_VOL_FLAGS_OP_BACKGROUND_INIT (0x00000000) +#define MPI2_RAID_VOL_FLAGS_OP_ONLINE_CAP_EXPANSION (0x00000001) +#define MPI2_RAID_VOL_FLAGS_OP_CONSISTENCY_CHECK (0x00000002) +#define MPI2_RAID_VOL_FLAGS_OP_RESYNC (0x00000003) +#define MPI2_RAID_VOL_FLAGS_OP_MDC (0x00000004) + +/*RAID Compatibility Result Structure */ + +typedef struct _MPI2_RAID_COMPATIBILITY_RESULT_STRUCT { + U8 State; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U32 GenericAttributes; /*0x04 */ + U32 OEMSpecificAttributes; /*0x08 */ + U32 Reserved3; /*0x0C */ + U32 Reserved4; /*0x10 */ +} MPI2_RAID_COMPATIBILITY_RESULT_STRUCT, + *PTR_MPI2_RAID_COMPATIBILITY_RESULT_STRUCT, + Mpi2RaidCompatibilityResultStruct_t, + *pMpi2RaidCompatibilityResultStruct_t; + +/*defines for RAID Compatibility Result Structure State field */ +#define MPI2_RAID_COMPAT_STATE_COMPATIBLE (0x00) +#define MPI2_RAID_COMPAT_STATE_NOT_COMPATIBLE (0x01) + +/*defines for RAID Compatibility Result Structure GenericAttributes field */ +#define MPI2_RAID_COMPAT_GENATTRIB_4K_SECTOR (0x00000010) + +#define MPI2_RAID_COMPAT_GENATTRIB_MEDIA_MASK (0x0000000C) +#define MPI2_RAID_COMPAT_GENATTRIB_SOLID_STATE_DRIVE (0x00000008) +#define MPI2_RAID_COMPAT_GENATTRIB_HARD_DISK_DRIVE (0x00000004) + +#define MPI2_RAID_COMPAT_GENATTRIB_PROTOCOL_MASK (0x00000003) +#define MPI2_RAID_COMPAT_GENATTRIB_SAS_PROTOCOL (0x00000002) +#define MPI2_RAID_COMPAT_GENATTRIB_SATA_PROTOCOL (0x00000001) + +/*RAID Action Reply ActionData union */ +typedef union _MPI2_RAID_ACTION_REPLY_DATA { + U32 Word[6]; + MPI2_RAID_VOL_INDICATOR RaidVolumeIndicator; + U16 VolDevHandle; + U8 VolumeState; + U8 PhysDiskNum; + MPI2_RAID_COMPATIBILITY_RESULT_STRUCT RaidCompatibilityResult; +} MPI2_RAID_ACTION_REPLY_DATA, *PTR_MPI2_RAID_ACTION_REPLY_DATA, + Mpi2RaidActionReplyData_t, *pMpi2RaidActionReplyData_t; + +/*use MPI2_RAIDVOL0_SETTING_ defines from mpi2_cnfg.h for + *MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE action */ + +/*RAID Action Reply Message */ +typedef struct _MPI2_RAID_ACTION_REPLY { + U8 Action; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 VolDevHandle; /*0x04 */ + U8 PhysDiskNum; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved2; /*0x0A */ + U16 Reserved3; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + MPI2_RAID_ACTION_REPLY_DATA ActionData; /*0x14 */ +} MPI2_RAID_ACTION_REPLY, *PTR_MPI2_RAID_ACTION_REPLY, + Mpi2RaidActionReply_t, *pMpi2RaidActionReply_t; + +#endif diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_sas.h b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h new file mode 100644 index 000000000..16c922a8a --- /dev/null +++ b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h @@ -0,0 +1,304 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * + * + * Name: mpi2_sas.h + * Title: MPI Serial Attached SCSI structures and definitions + * Creation Date: February 9, 2007 + * + * mpi2_sas.h Version: 02.00.10 + * + * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 + * prefix are for use only on MPI v2.5 products, and must not be used + * with MPI v2.0 products. Unless otherwise noted, names beginning with + * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 06-26-07 02.00.01 Added Clear All Persistent Operation to SAS IO Unit + * Control Request. + * 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control + * Request. + * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST + * to MPI2_SGE_IO_UNION since it supports chained SGLs. + * 05-12-10 02.00.04 Modified some comments. + * 08-11-10 02.00.05 Added NCQ operations to SAS IO Unit Control. + * 11-18-11 02.00.06 Incorporating additions for MPI v2.5. + * 07-10-12 02.00.07 Added MPI2_SATA_PT_SGE_UNION for use in the SATA + * Passthrough Request message. + * 08-19-13 02.00.08 Made MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL obsolete + * for anything newer than MPI v2.0. + * 11-18-14 02.00.09 Updated copyright information. + * 03-16-15 02.00.10 Updated for MPI v2.6. + * Added MPI2_SATA_PT_REQ_PT_FLAGS_FPDMA. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_SAS_H +#define MPI2_SAS_H + +/* + *Values for SASStatus. + */ +#define MPI2_SASSTATUS_SUCCESS (0x00) +#define MPI2_SASSTATUS_UNKNOWN_ERROR (0x01) +#define MPI2_SASSTATUS_INVALID_FRAME (0x02) +#define MPI2_SASSTATUS_UTC_BAD_DEST (0x03) +#define MPI2_SASSTATUS_UTC_BREAK_RECEIVED (0x04) +#define MPI2_SASSTATUS_UTC_CONNECT_RATE_NOT_SUPPORTED (0x05) +#define MPI2_SASSTATUS_UTC_PORT_LAYER_REQUEST (0x06) +#define MPI2_SASSTATUS_UTC_PROTOCOL_NOT_SUPPORTED (0x07) +#define MPI2_SASSTATUS_UTC_STP_RESOURCES_BUSY (0x08) +#define MPI2_SASSTATUS_UTC_WRONG_DESTINATION (0x09) +#define MPI2_SASSTATUS_SHORT_INFORMATION_UNIT (0x0A) +#define MPI2_SASSTATUS_LONG_INFORMATION_UNIT (0x0B) +#define MPI2_SASSTATUS_XFER_RDY_INCORRECT_WRITE_DATA (0x0C) +#define MPI2_SASSTATUS_XFER_RDY_REQUEST_OFFSET_ERROR (0x0D) +#define MPI2_SASSTATUS_XFER_RDY_NOT_EXPECTED (0x0E) +#define MPI2_SASSTATUS_DATA_INCORRECT_DATA_LENGTH (0x0F) +#define MPI2_SASSTATUS_DATA_TOO_MUCH_READ_DATA (0x10) +#define MPI2_SASSTATUS_DATA_OFFSET_ERROR (0x11) +#define MPI2_SASSTATUS_SDSF_NAK_RECEIVED (0x12) +#define MPI2_SASSTATUS_SDSF_CONNECTION_FAILED (0x13) +#define MPI2_SASSTATUS_INITIATOR_RESPONSE_TIMEOUT (0x14) + +/* + *Values for the SAS DeviceInfo field used in SAS Device Status Change Event + *data and SAS Configuration pages. + */ +#define MPI2_SAS_DEVICE_INFO_SEP (0x00004000) +#define MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE (0x00002000) +#define MPI2_SAS_DEVICE_INFO_LSI_DEVICE (0x00001000) +#define MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH (0x00000800) +#define MPI2_SAS_DEVICE_INFO_SSP_TARGET (0x00000400) +#define MPI2_SAS_DEVICE_INFO_STP_TARGET (0x00000200) +#define MPI2_SAS_DEVICE_INFO_SMP_TARGET (0x00000100) +#define MPI2_SAS_DEVICE_INFO_SATA_DEVICE (0x00000080) +#define MPI2_SAS_DEVICE_INFO_SSP_INITIATOR (0x00000040) +#define MPI2_SAS_DEVICE_INFO_STP_INITIATOR (0x00000020) +#define MPI2_SAS_DEVICE_INFO_SMP_INITIATOR (0x00000010) +#define MPI2_SAS_DEVICE_INFO_SATA_HOST (0x00000008) + +#define MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE (0x00000007) +#define MPI2_SAS_DEVICE_INFO_NO_DEVICE (0x00000000) +#define MPI2_SAS_DEVICE_INFO_END_DEVICE (0x00000001) +#define MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER (0x00000002) +#define MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER (0x00000003) + +/***************************************************************************** +* +* SAS Messages +* +*****************************************************************************/ + +/**************************************************************************** +* SMP Passthrough messages +****************************************************************************/ + +/*SMP Passthrough Request Message */ +typedef struct _MPI2_SMP_PASSTHROUGH_REQUEST { + U8 PassthroughFlags; /*0x00 */ + U8 PhysicalPort; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 RequestDataLength; /*0x04 */ + U8 SGLFlags; /*0x06*//*MPI v2.0 only. Reserved on MPI v2.5*/ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved1; /*0x0A */ + U32 Reserved2; /*0x0C */ + U64 SASAddress; /*0x10 */ + U32 Reserved3; /*0x18 */ + U32 Reserved4; /*0x1C */ + MPI2_SIMPLE_SGE_UNION SGL;/*0x20 */ +} MPI2_SMP_PASSTHROUGH_REQUEST, *PTR_MPI2_SMP_PASSTHROUGH_REQUEST, + Mpi2SmpPassthroughRequest_t, *pMpi2SmpPassthroughRequest_t; + +/*values for PassthroughFlags field */ +#define MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE (0x80) + +/*MPI v2.0: use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ + +/*SMP Passthrough Reply Message */ +typedef struct _MPI2_SMP_PASSTHROUGH_REPLY { + U8 PassthroughFlags; /*0x00 */ + U8 PhysicalPort; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 ResponseDataLength; /*0x04 */ + U8 SGLFlags; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved1; /*0x0A */ + U8 Reserved2; /*0x0C */ + U8 SASStatus; /*0x0D */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U32 Reserved3; /*0x14 */ + U8 ResponseData[4]; /*0x18 */ +} MPI2_SMP_PASSTHROUGH_REPLY, *PTR_MPI2_SMP_PASSTHROUGH_REPLY, + Mpi2SmpPassthroughReply_t, *pMpi2SmpPassthroughReply_t; + +/*values for PassthroughFlags field */ +#define MPI2_SMP_PT_REPLY_PT_FLAGS_IMMEDIATE (0x80) + +/*values for SASStatus field are at the top of this file */ + +/**************************************************************************** +* SATA Passthrough messages +****************************************************************************/ + +typedef union _MPI2_SATA_PT_SGE_UNION { + MPI2_SGE_SIMPLE_UNION MpiSimple; /*MPI v2.0 only */ + MPI2_SGE_CHAIN_UNION MpiChain; /*MPI v2.0 only */ + MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple; + MPI2_IEEE_SGE_CHAIN_UNION IeeeChain; /*MPI v2.0 only */ + MPI25_IEEE_SGE_CHAIN64 IeeeChain64; /*MPI v2.5 only */ +} MPI2_SATA_PT_SGE_UNION, *PTR_MPI2_SATA_PT_SGE_UNION, + Mpi2SataPTSGEUnion_t, *pMpi2SataPTSGEUnion_t; + +/*SATA Passthrough Request Message */ +typedef struct _MPI2_SATA_PASSTHROUGH_REQUEST { + U16 DevHandle; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 PassthroughFlags; /*0x04 */ + U8 SGLFlags; /*0x06*//*MPI v2.0 only. Reserved on MPI v2.5*/ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved1; /*0x0A */ + U32 Reserved2; /*0x0C */ + U32 Reserved3; /*0x10 */ + U32 Reserved4; /*0x14 */ + U32 DataLength; /*0x18 */ + U8 CommandFIS[20]; /*0x1C */ + MPI2_SATA_PT_SGE_UNION SGL;/*0x30*//*MPI v2.5: IEEE 64 elements only*/ +} MPI2_SATA_PASSTHROUGH_REQUEST, *PTR_MPI2_SATA_PASSTHROUGH_REQUEST, + Mpi2SataPassthroughRequest_t, + *pMpi2SataPassthroughRequest_t; + +/*values for PassthroughFlags field */ +#define MPI2_SATA_PT_REQ_PT_FLAGS_EXECUTE_DIAG (0x0100) +#define MPI2_SATA_PT_REQ_PT_FLAGS_FPDMA (0x0040) +#define MPI2_SATA_PT_REQ_PT_FLAGS_DMA (0x0020) +#define MPI2_SATA_PT_REQ_PT_FLAGS_PIO (0x0010) +#define MPI2_SATA_PT_REQ_PT_FLAGS_UNSPECIFIED_VU (0x0004) +#define MPI2_SATA_PT_REQ_PT_FLAGS_WRITE (0x0002) +#define MPI2_SATA_PT_REQ_PT_FLAGS_READ (0x0001) + +/*MPI v2.0: use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ + +/*SATA Passthrough Reply Message */ +typedef struct _MPI2_SATA_PASSTHROUGH_REPLY { + U16 DevHandle; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 PassthroughFlags; /*0x04 */ + U8 SGLFlags; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved1; /*0x0A */ + U8 Reserved2; /*0x0C */ + U8 SASStatus; /*0x0D */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U8 StatusFIS[20]; /*0x14 */ + U32 StatusControlRegisters; /*0x28 */ + U32 TransferCount; /*0x2C */ +} MPI2_SATA_PASSTHROUGH_REPLY, *PTR_MPI2_SATA_PASSTHROUGH_REPLY, + Mpi2SataPassthroughReply_t, *pMpi2SataPassthroughReply_t; + +/*values for SASStatus field are at the top of this file */ + +/**************************************************************************** +* SAS IO Unit Control messages +* (MPI v2.5 and earlier only. +* Replaced by IO Unit Control messages in MPI v2.6 and later.) +****************************************************************************/ + +/*SAS IO Unit Control Request Message */ +typedef struct _MPI2_SAS_IOUNIT_CONTROL_REQUEST { + U8 Operation; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 DevHandle; /*0x04 */ + U8 IOCParameter; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ + U16 Reserved4; /*0x0C */ + U8 PhyNum; /*0x0E */ + U8 PrimFlags; /*0x0F */ + U32 Primitive; /*0x10 */ + U8 LookupMethod; /*0x14 */ + U8 Reserved5; /*0x15 */ + U16 SlotNumber; /*0x16 */ + U64 LookupAddress; /*0x18 */ + U32 IOCParameterValue; /*0x20 */ + U32 Reserved7; /*0x24 */ + U32 Reserved8; /*0x28 */ +} MPI2_SAS_IOUNIT_CONTROL_REQUEST, + *PTR_MPI2_SAS_IOUNIT_CONTROL_REQUEST, + Mpi2SasIoUnitControlRequest_t, + *pMpi2SasIoUnitControlRequest_t; + +/*values for the Operation field */ +#define MPI2_SAS_OP_CLEAR_ALL_PERSISTENT (0x02) +#define MPI2_SAS_OP_PHY_LINK_RESET (0x06) +#define MPI2_SAS_OP_PHY_HARD_RESET (0x07) +#define MPI2_SAS_OP_PHY_CLEAR_ERROR_LOG (0x08) +#define MPI2_SAS_OP_SEND_PRIMITIVE (0x0A) +#define MPI2_SAS_OP_FORCE_FULL_DISCOVERY (0x0B) +#define MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL (0x0C) /* MPI v2.0 only */ +#define MPI2_SAS_OP_REMOVE_DEVICE (0x0D) +#define MPI2_SAS_OP_LOOKUP_MAPPING (0x0E) +#define MPI2_SAS_OP_SET_IOC_PARAMETER (0x0F) +#define MPI25_SAS_OP_ENABLE_FP_DEVICE (0x10) +#define MPI25_SAS_OP_DISABLE_FP_DEVICE (0x11) +#define MPI25_SAS_OP_ENABLE_FP_ALL (0x12) +#define MPI25_SAS_OP_DISABLE_FP_ALL (0x13) +#define MPI2_SAS_OP_DEV_ENABLE_NCQ (0x14) +#define MPI2_SAS_OP_DEV_DISABLE_NCQ (0x15) +#define MPI2_SAS_OP_PRODUCT_SPECIFIC_MIN (0x80) + +/*values for the PrimFlags field */ +#define MPI2_SAS_PRIMFLAGS_SINGLE (0x08) +#define MPI2_SAS_PRIMFLAGS_TRIPLE (0x02) +#define MPI2_SAS_PRIMFLAGS_REDUNDANT (0x01) + +/*values for the LookupMethod field */ +#define MPI2_SAS_LOOKUP_METHOD_SAS_ADDRESS (0x01) +#define MPI2_SAS_LOOKUP_METHOD_SAS_ENCLOSURE_SLOT (0x02) +#define MPI2_SAS_LOOKUP_METHOD_SAS_DEVICE_NAME (0x03) + +/*SAS IO Unit Control Reply Message */ +typedef struct _MPI2_SAS_IOUNIT_CONTROL_REPLY { + U8 Operation; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 DevHandle; /*0x04 */ + U8 IOCParameter; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ + U16 Reserved4; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_SAS_IOUNIT_CONTROL_REPLY, + *PTR_MPI2_SAS_IOUNIT_CONTROL_REPLY, + Mpi2SasIoUnitControlReply_t, *pMpi2SasIoUnitControlReply_t; + +#endif diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h new file mode 100644 index 000000000..17ef7f63b --- /dev/null +++ b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h @@ -0,0 +1,565 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * + * + * Name: mpi2_tool.h + * Title: MPI diagnostic tool structures and definitions + * Creation Date: March 26, 2007 + * + * mpi2_tool.h Version: 02.00.16 + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 12-18-07 02.00.01 Added Diagnostic Buffer Post and Diagnostic Release + * structures and defines. + * 02-29-08 02.00.02 Modified various names to make them 32-character unique. + * 05-06-09 02.00.03 Added ISTWI Read Write Tool and Diagnostic CLI Tool. + * 07-30-09 02.00.04 Added ExtendedType field to DiagnosticBufferPost request + * and reply messages. + * Added MPI2_DIAG_BUF_TYPE_EXTENDED. + * Incremented MPI2_DIAG_BUF_TYPE_COUNT. + * 05-12-10 02.00.05 Added Diagnostic Data Upload tool. + * 08-11-10 02.00.06 Added defines that were missing for Diagnostic Buffer + * Post Request. + * 05-25-11 02.00.07 Added Flags field and related defines to + * MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST. + * 11-18-11 02.00.08 Incorporating additions for MPI v2.5. + * 07-10-12 02.00.09 Add MPI v2.5 Toolbox Diagnostic CLI Tool Request + * message. + * 07-26-12 02.00.10 Modified MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST so that + * it uses MPI Chain SGE as well as MPI Simple SGE. + * 08-19-13 02.00.11 Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info. + * 01-08-14 02.00.12 Added MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC. + * 11-18-14 02.00.13 Updated copyright information. + * 08-25-16 02.00.14 Added new values for the Flags field of Toolbox Clean + * Tool Request Message. + * 07-22-18 02.00.15 Added defines for new TOOLBOX_PCIE_LANE_MARGINING tool. + * Added option for DeviceInfo field in ISTWI tool. + * 12-17-18 02.00.16 Shorten some defines to be compatible with DOS. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_TOOL_H +#define MPI2_TOOL_H + +/***************************************************************************** +* +* Toolbox Messages +* +*****************************************************************************/ + +/*defines for the Tools */ +#define MPI2_TOOLBOX_CLEAN_TOOL (0x00) +#define MPI2_TOOLBOX_MEMORY_MOVE_TOOL (0x01) +#define MPI2_TOOLBOX_DIAG_DATA_UPLOAD_TOOL (0x02) +#define MPI2_TOOLBOX_ISTWI_READ_WRITE_TOOL (0x03) +#define MPI2_TOOLBOX_BEACON_TOOL (0x05) +#define MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL (0x06) +#define MPI2_TOOLBOX_TEXT_DISPLAY_TOOL (0x07) +#define MPI26_TOOLBOX_BACKEND_PCIE_LANE_MARGIN (0x08) + +/**************************************************************************** +* Toolbox reply +****************************************************************************/ + +typedef struct _MPI2_TOOLBOX_REPLY { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_TOOLBOX_REPLY, *PTR_MPI2_TOOLBOX_REPLY, + Mpi2ToolboxReply_t, *pMpi2ToolboxReply_t; + +/**************************************************************************** +* Toolbox Clean Tool request +****************************************************************************/ + +typedef struct _MPI2_TOOLBOX_CLEAN_REQUEST { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U32 Flags; /*0x0C */ +} MPI2_TOOLBOX_CLEAN_REQUEST, *PTR_MPI2_TOOLBOX_CLEAN_REQUEST, + Mpi2ToolboxCleanRequest_t, *pMpi2ToolboxCleanRequest_t; + +/*values for the Flags field */ +#define MPI2_TOOLBOX_CLEAN_BOOT_SERVICES (0x80000000) +#define MPI2_TOOLBOX_CLEAN_PERSIST_MANUFACT_PAGES (0x40000000) +#define MPI2_TOOLBOX_CLEAN_OTHER_PERSIST_PAGES (0x20000000) +#define MPI2_TOOLBOX_CLEAN_FW_CURRENT (0x10000000) +#define MPI2_TOOLBOX_CLEAN_FW_BACKUP (0x08000000) +#define MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC (0x04000000) +#define MPI2_TOOLBOX_CLEAN_MEGARAID (0x02000000) +#define MPI2_TOOLBOX_CLEAN_INITIALIZATION (0x01000000) +#define MPI2_TOOLBOX_CLEAN_SBR (0x00800000) +#define MPI2_TOOLBOX_CLEAN_SBR_BACKUP (0x00400000) +#define MPI2_TOOLBOX_CLEAN_HIIM (0x00200000) +#define MPI2_TOOLBOX_CLEAN_HIIA (0x00100000) +#define MPI2_TOOLBOX_CLEAN_CTLR (0x00080000) +#define MPI2_TOOLBOX_CLEAN_IMR_FIRMWARE (0x00040000) +#define MPI2_TOOLBOX_CLEAN_MR_NVDATA (0x00020000) +#define MPI2_TOOLBOX_CLEAN_RESERVED_5_16 (0x0001FFE0) +#define MPI2_TOOLBOX_CLEAN_ALL_BUT_MPB (0x00000010) +#define MPI2_TOOLBOX_CLEAN_ENTIRE_FLASH (0x00000008) +#define MPI2_TOOLBOX_CLEAN_FLASH (0x00000004) +#define MPI2_TOOLBOX_CLEAN_SEEPROM (0x00000002) +#define MPI2_TOOLBOX_CLEAN_NVSRAM (0x00000001) + +/**************************************************************************** +* Toolbox Memory Move request +****************************************************************************/ + +typedef struct _MPI2_TOOLBOX_MEM_MOVE_REQUEST { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + MPI2_SGE_SIMPLE_UNION SGL; /*0x0C */ +} MPI2_TOOLBOX_MEM_MOVE_REQUEST, *PTR_MPI2_TOOLBOX_MEM_MOVE_REQUEST, + Mpi2ToolboxMemMoveRequest_t, *pMpi2ToolboxMemMoveRequest_t; + +/**************************************************************************** +* Toolbox Diagnostic Data Upload request +****************************************************************************/ + +typedef struct _MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U8 SGLFlags; /*0x0C */ + U8 Reserved5; /*0x0D */ + U16 Reserved6; /*0x0E */ + U32 Flags; /*0x10 */ + U32 DataLength; /*0x14 */ + MPI2_SGE_SIMPLE_UNION SGL; /*0x18 */ +} MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST, + *PTR_MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST, + Mpi2ToolboxDiagDataUploadRequest_t, + *pMpi2ToolboxDiagDataUploadRequest_t; + +/*use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ + +typedef struct _MPI2_DIAG_DATA_UPLOAD_HEADER { + U32 DiagDataLength; /*00h */ + U8 FormatCode; /*04h */ + U8 Reserved1; /*05h */ + U16 Reserved2; /*06h */ +} MPI2_DIAG_DATA_UPLOAD_HEADER, *PTR_MPI2_DIAG_DATA_UPLOAD_HEADER, + Mpi2DiagDataUploadHeader_t, *pMpi2DiagDataUploadHeader_t; + +/**************************************************************************** +* Toolbox ISTWI Read Write Tool +****************************************************************************/ + +/*Toolbox ISTWI Read Write Tool request message */ +typedef struct _MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U32 Reserved5; /*0x0C */ + U32 Reserved6; /*0x10 */ + U8 DevIndex; /*0x14 */ + U8 Action; /*0x15 */ + U8 SGLFlags; /*0x16 */ + U8 Flags; /*0x17 */ + U16 TxDataLength; /*0x18 */ + U16 RxDataLength; /*0x1A */ + U32 Reserved8; /*0x1C */ + U32 Reserved9; /*0x20 */ + U32 Reserved10; /*0x24 */ + U32 Reserved11; /*0x28 */ + U32 Reserved12; /*0x2C */ + MPI2_SGE_SIMPLE_UNION SGL; /*0x30 */ +} MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST, + *PTR_MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST, + Mpi2ToolboxIstwiReadWriteRequest_t, + *pMpi2ToolboxIstwiReadWriteRequest_t; + +/*values for the Action field */ +#define MPI2_TOOL_ISTWI_ACTION_READ_DATA (0x01) +#define MPI2_TOOL_ISTWI_ACTION_WRITE_DATA (0x02) +#define MPI2_TOOL_ISTWI_ACTION_SEQUENCE (0x03) +#define MPI2_TOOL_ISTWI_ACTION_RESERVE_BUS (0x10) +#define MPI2_TOOL_ISTWI_ACTION_RELEASE_BUS (0x11) +#define MPI2_TOOL_ISTWI_ACTION_RESET (0x12) + +/*use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ + +/*values for the Flags field */ +#define MPI2_TOOL_ISTWI_FLAG_AUTO_RESERVE_RELEASE (0x80) +#define MPI2_TOOL_ISTWI_FLAG_PAGE_ADDR_MASK (0x07) + +/*MPI26 TOOLBOX Request MsgFlags defines */ +#define MPI26_TOOL_ISTWI_MSGFLG_ADDR_MASK (0x01) +/*Request uses Man Page 43 device index addressing */ +#define MPI26_TOOL_ISTWI_MSGFLG_ADDR_INDEX (0x00) +/*Request uses Man Page 43 device info struct addressing */ +#define MPI26_TOOL_ISTWI_MSGFLG_ADDR_INFO (0x01) + +/*Toolbox ISTWI Read Write Tool reply message */ +typedef struct _MPI2_TOOLBOX_ISTWI_REPLY { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U8 DevIndex; /*0x14 */ + U8 Action; /*0x15 */ + U8 IstwiStatus; /*0x16 */ + U8 Reserved6; /*0x17 */ + U16 TxDataCount; /*0x18 */ + U16 RxDataCount; /*0x1A */ +} MPI2_TOOLBOX_ISTWI_REPLY, *PTR_MPI2_TOOLBOX_ISTWI_REPLY, + Mpi2ToolboxIstwiReply_t, *pMpi2ToolboxIstwiReply_t; + +/**************************************************************************** +* Toolbox Beacon Tool request +****************************************************************************/ + +typedef struct _MPI2_TOOLBOX_BEACON_REQUEST { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U8 Reserved5; /*0x0C */ + U8 PhysicalPort; /*0x0D */ + U8 Reserved6; /*0x0E */ + U8 Flags; /*0x0F */ +} MPI2_TOOLBOX_BEACON_REQUEST, *PTR_MPI2_TOOLBOX_BEACON_REQUEST, + Mpi2ToolboxBeaconRequest_t, *pMpi2ToolboxBeaconRequest_t; + +/*values for the Flags field */ +#define MPI2_TOOLBOX_FLAGS_BEACONMODE_OFF (0x00) +#define MPI2_TOOLBOX_FLAGS_BEACONMODE_ON (0x01) + +/**************************************************************************** +* Toolbox Diagnostic CLI Tool +****************************************************************************/ + +#define MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH (0x5C) + +/*MPI v2.0 Toolbox Diagnostic CLI Tool request message */ +typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U8 SGLFlags; /*0x0C */ + U8 Reserved5; /*0x0D */ + U16 Reserved6; /*0x0E */ + U32 DataLength; /*0x10 */ + U8 DiagnosticCliCommand[MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH];/*0x14 */ + MPI2_MPI_SGE_IO_UNION SGL; /*0x70 */ +} MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST, + *PTR_MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST, + Mpi2ToolboxDiagnosticCliRequest_t, + *pMpi2ToolboxDiagnosticCliRequest_t; + +/*use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ + +/*MPI v2.5 Toolbox Diagnostic CLI Tool request message */ +typedef struct _MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U32 Reserved5; /*0x0C */ + U32 DataLength; /*0x10 */ + U8 DiagnosticCliCommand[MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH];/*0x14 */ + MPI25_SGE_IO_UNION SGL; /* 0x70 */ +} MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST, + *PTR_MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST, + Mpi25ToolboxDiagnosticCliRequest_t, + *pMpi25ToolboxDiagnosticCliRequest_t; + +/*Toolbox Diagnostic CLI Tool reply message */ +typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U32 ReturnedDataLength; /*0x14 */ +} MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY, + *PTR_MPI2_TOOLBOX_DIAG_CLI_REPLY, + Mpi2ToolboxDiagnosticCliReply_t, + *pMpi2ToolboxDiagnosticCliReply_t; + + +/**************************************************************************** +* Toolbox Console Text Display Tool +****************************************************************************/ + +/* Toolbox Console Text Display Tool request message */ +typedef struct _MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST { + U8 Tool; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U8 Console; /* 0x0C */ + U8 Flags; /* 0x0D */ + U16 Reserved6; /* 0x0E */ + U8 TextToDisplay[4]; /* 0x10 */ +} MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST, +*PTR_MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST, +Mpi2ToolboxTextDisplayRequest_t, +*pMpi2ToolboxTextDisplayRequest_t; + +/* defines for the Console field */ +#define MPI2_TOOLBOX_CONSOLE_TYPE_MASK (0xF0) +#define MPI2_TOOLBOX_CONSOLE_TYPE_DEFAULT (0x00) +#define MPI2_TOOLBOX_CONSOLE_TYPE_UART (0x10) +#define MPI2_TOOLBOX_CONSOLE_TYPE_ETHERNET (0x20) + +#define MPI2_TOOLBOX_CONSOLE_NUMBER_MASK (0x0F) + +/* defines for the Flags field */ +#define MPI2_TOOLBOX_CONSOLE_FLAG_TIMESTAMP (0x01) + + +/*************************************************************************** + * Toolbox Backend Lane Margining Tool + *************************************************************************** + */ + +/*Toolbox Backend Lane Margining Tool request message */ +typedef struct _MPI26_TOOLBOX_LANE_MARGIN_REQUEST { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U8 Command; /*0x0C */ + U8 SwitchPort; /*0x0D */ + U16 DevHandle; /*0x0E */ + U8 RegisterOffset; /*0x10 */ + U8 Reserved5; /*0x11 */ + U16 DataLength; /*0x12 */ + MPI25_SGE_IO_UNION SGL; /*0x14 */ +} MPI26_TOOLBOX_LANE_MARGINING_REQUEST, + *PTR_MPI2_TOOLBOX_LANE_MARGINING_REQUEST, + Mpi26ToolboxLaneMarginingRequest_t, + *pMpi2ToolboxLaneMarginingRequest_t; + +/* defines for the Command field */ +#define MPI26_TOOL_MARGIN_COMMAND_ENTER_MARGIN_MODE (0x01) +#define MPI26_TOOL_MARGIN_COMMAND_READ_REGISTER_DATA (0x02) +#define MPI26_TOOL_MARGIN_COMMAND_WRITE_REGISTER_DATA (0x03) +#define MPI26_TOOL_MARGIN_COMMAND_EXIT_MARGIN_MODE (0x04) + + +/*Toolbox Backend Lane Margining Tool reply message */ +typedef struct _MPI26_TOOLBOX_LANE_MARGIN_REPLY { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U16 ReturnedDataLength; /*0x14 */ + U16 Reserved6; /*0x16 */ +} MPI26_TOOLBOX_LANE_MARGINING_REPLY, + *PTR_MPI26_TOOLBOX_LANE_MARGINING_REPLY, + Mpi26ToolboxLaneMarginingReply_t, + *pMpi26ToolboxLaneMarginingReply_t; + + +/***************************************************************************** +* +* Diagnostic Buffer Messages +* +*****************************************************************************/ + +/**************************************************************************** +* Diagnostic Buffer Post request +****************************************************************************/ + +typedef struct _MPI2_DIAG_BUFFER_POST_REQUEST { + U8 ExtendedType; /*0x00 */ + U8 BufferType; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U64 BufferAddress; /*0x0C */ + U32 BufferLength; /*0x14 */ + U32 Reserved5; /*0x18 */ + U32 Reserved6; /*0x1C */ + U32 Flags; /*0x20 */ + U32 ProductSpecific[23]; /*0x24 */ +} MPI2_DIAG_BUFFER_POST_REQUEST, *PTR_MPI2_DIAG_BUFFER_POST_REQUEST, + Mpi2DiagBufferPostRequest_t, *pMpi2DiagBufferPostRequest_t; + +/*values for the ExtendedType field */ +#define MPI2_DIAG_EXTENDED_TYPE_UTILIZATION (0x02) + +/*values for the BufferType field */ +#define MPI2_DIAG_BUF_TYPE_TRACE (0x00) +#define MPI2_DIAG_BUF_TYPE_SNAPSHOT (0x01) +#define MPI2_DIAG_BUF_TYPE_EXTENDED (0x02) +/*count of the number of buffer types */ +#define MPI2_DIAG_BUF_TYPE_COUNT (0x03) + +/*values for the Flags field */ +#define MPI2_DIAG_BUF_FLAG_RELEASE_ON_FULL (0x00000002) +#define MPI2_DIAG_BUF_FLAG_IMMEDIATE_RELEASE (0x00000001) + +/**************************************************************************** +* Diagnostic Buffer Post reply +****************************************************************************/ + +typedef struct _MPI2_DIAG_BUFFER_POST_REPLY { + U8 ExtendedType; /*0x00 */ + U8 BufferType; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U32 TransferLength; /*0x14 */ +} MPI2_DIAG_BUFFER_POST_REPLY, *PTR_MPI2_DIAG_BUFFER_POST_REPLY, + Mpi2DiagBufferPostReply_t, *pMpi2DiagBufferPostReply_t; + +/**************************************************************************** +* Diagnostic Release request +****************************************************************************/ + +typedef struct _MPI2_DIAG_RELEASE_REQUEST { + U8 Reserved1; /*0x00 */ + U8 BufferType; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ +} MPI2_DIAG_RELEASE_REQUEST, *PTR_MPI2_DIAG_RELEASE_REQUEST, + Mpi2DiagReleaseRequest_t, *pMpi2DiagReleaseRequest_t; + +/**************************************************************************** +* Diagnostic Buffer Post reply +****************************************************************************/ + +typedef struct _MPI2_DIAG_RELEASE_REPLY { + U8 Reserved1; /*0x00 */ + U8 BufferType; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_DIAG_RELEASE_REPLY, *PTR_MPI2_DIAG_RELEASE_REPLY, + Mpi2DiagReleaseReply_t, *pMpi2DiagReleaseReply_t; + +#endif diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_type.h b/drivers/scsi/mpt3sas/mpi/mpi2_type.h new file mode 100644 index 000000000..36494439a --- /dev/null +++ b/drivers/scsi/mpt3sas/mpi/mpi2_type.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2000-2014 Avago Technologies. All rights reserved. + * + * + * Name: mpi2_type.h + * Title: MPI basic type definitions + * Creation Date: August 16, 2006 + * + * mpi2_type.h Version: 02.00.01 + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 11-18-14 02.00.01 Updated copyright information. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_TYPE_H +#define MPI2_TYPE_H + +/******************************************************************************* + * Define * if it hasn't already been defined. By default + * * is defined to be a near pointer. MPI2_POINTER can be defined as + * a far pointer by defining * as "far *" before this header file is + * included. + */ + +/* the basic types may have already been included by mpi_type.h */ +#ifndef MPI_TYPE_H +/***************************************************************************** +* +* Basic Types +* +*****************************************************************************/ + +typedef u8 U8; +typedef __le16 U16; +typedef __le32 U32; +typedef __le64 U64 __attribute__ ((aligned(4))); + +/***************************************************************************** +* +* Pointer Types +* +*****************************************************************************/ + +typedef U8 *PU8; +typedef U16 *PU16; +typedef U32 *PU32; +typedef U64 *PU64; + +#endif + +#endif diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c new file mode 100644 index 000000000..809be43f4 --- /dev/null +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -0,0 +1,8970 @@ +/* + * This is the Fusion MPT base driver providing common API layer interface + * for access to MPT (Message Passing Technology) firmware. + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c + * Copyright (C) 2012-2014 LSI Corporation + * Copyright (C) 2013-2014 Avago Technologies + * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/kdev_t.h> +#include <linux/blkdev.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/dma-mapping.h> +#include <linux/io.h> +#include <linux/time.h> +#include <linux/ktime.h> +#include <linux/kthread.h> +#include <asm/page.h> /* To get host page size per arch */ +#include <linux/aer.h> + + +#include "mpt3sas_base.h" + +static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS]; + + +#define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */ + + /* maximum controller queue depth */ +#define MAX_HBA_QUEUE_DEPTH 30000 +#define MAX_CHAIN_DEPTH 100000 +static int max_queue_depth = -1; +module_param(max_queue_depth, int, 0444); +MODULE_PARM_DESC(max_queue_depth, " max controller queue depth "); + +static int max_sgl_entries = -1; +module_param(max_sgl_entries, int, 0444); +MODULE_PARM_DESC(max_sgl_entries, " max sg entries "); + +static int msix_disable = -1; +module_param(msix_disable, int, 0444); +MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)"); + +static int smp_affinity_enable = 1; +module_param(smp_affinity_enable, int, 0444); +MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)"); + +static int max_msix_vectors = -1; +module_param(max_msix_vectors, int, 0444); +MODULE_PARM_DESC(max_msix_vectors, + " max msix vectors"); + +static int irqpoll_weight = -1; +module_param(irqpoll_weight, int, 0444); +MODULE_PARM_DESC(irqpoll_weight, + "irq poll weight (default= one fourth of HBA queue depth)"); + +static int mpt3sas_fwfault_debug; +MODULE_PARM_DESC(mpt3sas_fwfault_debug, + " enable detection of firmware fault and halt firmware - (default=0)"); + +static int perf_mode = -1; +module_param(perf_mode, int, 0444); +MODULE_PARM_DESC(perf_mode, + "Performance mode (only for Aero/Sea Generation), options:\n\t\t" + "0 - balanced: high iops mode is enabled &\n\t\t" + "interrupt coalescing is enabled only on high iops queues,\n\t\t" + "1 - iops: high iops mode is disabled &\n\t\t" + "interrupt coalescing is enabled on all queues,\n\t\t" + "2 - latency: high iops mode is disabled &\n\t\t" + "interrupt coalescing is enabled on all queues with timeout value 0xA,\n" + "\t\tdefault - default perf_mode is 'balanced'" + ); + +static int poll_queues; +module_param(poll_queues, int, 0444); +MODULE_PARM_DESC(poll_queues, "Number of queues to be use for io_uring poll mode.\n\t\t" + "This parameter is effective only if host_tagset_enable=1. &\n\t\t" + "when poll_queues are enabled then &\n\t\t" + "perf_mode is set to latency mode. &\n\t\t" + ); + +enum mpt3sas_perf_mode { + MPT_PERF_MODE_DEFAULT = -1, + MPT_PERF_MODE_BALANCED = 0, + MPT_PERF_MODE_IOPS = 1, + MPT_PERF_MODE_LATENCY = 2, +}; + +static int +_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, + u32 ioc_state, int timeout); +static int +_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc); +static void +_base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc); + +static u32 +_base_readl_ext_retry(const volatile void __iomem *addr); + +/** + * mpt3sas_base_check_cmd_timeout - Function + * to check timeout and command termination due + * to Host reset. + * + * @ioc: per adapter object. + * @status: Status of issued command. + * @mpi_request:mf request pointer. + * @sz: size of buffer. + * + * Return: 1/0 Reset to be done or Not + */ +u8 +mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc, + u8 status, void *mpi_request, int sz) +{ + u8 issue_reset = 0; + + if (!(status & MPT3_CMD_RESET)) + issue_reset = 1; + + ioc_err(ioc, "Command %s\n", + issue_reset == 0 ? "terminated due to Host Reset" : "Timeout"); + _debug_dump_mf(mpi_request, sz); + + return issue_reset; +} + +/** + * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug. + * @val: ? + * @kp: ? + * + * Return: ? + */ +static int +_scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp) +{ + int ret = param_set_int(val, kp); + struct MPT3SAS_ADAPTER *ioc; + + if (ret) + return ret; + + /* global ioc spinlock to protect controller list on list operations */ + pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug); + spin_lock(&gioc_lock); + list_for_each_entry(ioc, &mpt3sas_ioc_list, list) + ioc->fwfault_debug = mpt3sas_fwfault_debug; + spin_unlock(&gioc_lock); + return 0; +} +module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug, + param_get_int, &mpt3sas_fwfault_debug, 0644); + +/** + * _base_readl_aero - retry readl for max three times. + * @addr: MPT Fusion system interface register address + * + * Retry the readl() for max three times if it gets zero value + * while reading the system interface register. + */ +static inline u32 +_base_readl_aero(const volatile void __iomem *addr) +{ + u32 i = 0, ret_val; + + do { + ret_val = readl(addr); + i++; + } while (ret_val == 0 && i < 3); + + return ret_val; +} + +static u32 +_base_readl_ext_retry(const volatile void __iomem *addr) +{ + u32 i, ret_val; + + for (i = 0 ; i < 30 ; i++) { + ret_val = readl(addr); + if (ret_val != 0) + break; + } + + return ret_val; +} + +static inline u32 +_base_readl(const volatile void __iomem *addr) +{ + return readl(addr); +} + +/** + * _base_clone_reply_to_sys_mem - copies reply to reply free iomem + * in BAR0 space. + * + * @ioc: per adapter object + * @reply: reply message frame(lower 32bit addr) + * @index: System request message index. + */ +static void +_base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER *ioc, u32 reply, + u32 index) +{ + /* + * 256 is offset within sys register. + * 256 offset MPI frame starts. Max MPI frame supported is 32. + * 32 * 128 = 4K. From here, Clone of reply free for mcpu starts + */ + u16 cmd_credit = ioc->facts.RequestCredit + 1; + void __iomem *reply_free_iomem = (void __iomem *)ioc->chip + + MPI_FRAME_START_OFFSET + + (cmd_credit * ioc->request_sz) + (index * sizeof(u32)); + + writel(reply, reply_free_iomem); +} + +/** + * _base_clone_mpi_to_sys_mem - Writes/copies MPI frames + * to system/BAR0 region. + * + * @dst_iomem: Pointer to the destination location in BAR0 space. + * @src: Pointer to the Source data. + * @size: Size of data to be copied. + */ +static void +_base_clone_mpi_to_sys_mem(void *dst_iomem, void *src, u32 size) +{ + int i; + u32 *src_virt_mem = (u32 *)src; + + for (i = 0; i < size/4; i++) + writel((u32)src_virt_mem[i], + (void __iomem *)dst_iomem + (i * 4)); +} + +/** + * _base_clone_to_sys_mem - Writes/copies data to system/BAR0 region + * + * @dst_iomem: Pointer to the destination location in BAR0 space. + * @src: Pointer to the Source data. + * @size: Size of data to be copied. + */ +static void +_base_clone_to_sys_mem(void __iomem *dst_iomem, void *src, u32 size) +{ + int i; + u32 *src_virt_mem = (u32 *)(src); + + for (i = 0; i < size/4; i++) + writel((u32)src_virt_mem[i], + (void __iomem *)dst_iomem + (i * 4)); +} + +/** + * _base_get_chain - Calculates and Returns virtual chain address + * for the provided smid in BAR0 space. + * + * @ioc: per adapter object + * @smid: system request message index + * @sge_chain_count: Scatter gather chain count. + * + * Return: the chain address. + */ +static inline void __iomem* +_base_get_chain(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u8 sge_chain_count) +{ + void __iomem *base_chain, *chain_virt; + u16 cmd_credit = ioc->facts.RequestCredit + 1; + + base_chain = (void __iomem *)ioc->chip + MPI_FRAME_START_OFFSET + + (cmd_credit * ioc->request_sz) + + REPLY_FREE_POOL_SIZE; + chain_virt = base_chain + (smid * ioc->facts.MaxChainDepth * + ioc->request_sz) + (sge_chain_count * ioc->request_sz); + return chain_virt; +} + +/** + * _base_get_chain_phys - Calculates and Returns physical address + * in BAR0 for scatter gather chains, for + * the provided smid. + * + * @ioc: per adapter object + * @smid: system request message index + * @sge_chain_count: Scatter gather chain count. + * + * Return: Physical chain address. + */ +static inline phys_addr_t +_base_get_chain_phys(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u8 sge_chain_count) +{ + phys_addr_t base_chain_phys, chain_phys; + u16 cmd_credit = ioc->facts.RequestCredit + 1; + + base_chain_phys = ioc->chip_phys + MPI_FRAME_START_OFFSET + + (cmd_credit * ioc->request_sz) + + REPLY_FREE_POOL_SIZE; + chain_phys = base_chain_phys + (smid * ioc->facts.MaxChainDepth * + ioc->request_sz) + (sge_chain_count * ioc->request_sz); + return chain_phys; +} + +/** + * _base_get_buffer_bar0 - Calculates and Returns BAR0 mapped Host + * buffer address for the provided smid. + * (Each smid can have 64K starts from 17024) + * + * @ioc: per adapter object + * @smid: system request message index + * + * Return: Pointer to buffer location in BAR0. + */ + +static void __iomem * +_base_get_buffer_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + u16 cmd_credit = ioc->facts.RequestCredit + 1; + // Added extra 1 to reach end of chain. + void __iomem *chain_end = _base_get_chain(ioc, + cmd_credit + 1, + ioc->facts.MaxChainDepth); + return chain_end + (smid * 64 * 1024); +} + +/** + * _base_get_buffer_phys_bar0 - Calculates and Returns BAR0 mapped + * Host buffer Physical address for the provided smid. + * (Each smid can have 64K starts from 17024) + * + * @ioc: per adapter object + * @smid: system request message index + * + * Return: Pointer to buffer location in BAR0. + */ +static phys_addr_t +_base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + u16 cmd_credit = ioc->facts.RequestCredit + 1; + phys_addr_t chain_end_phys = _base_get_chain_phys(ioc, + cmd_credit + 1, + ioc->facts.MaxChainDepth); + return chain_end_phys + (smid * 64 * 1024); +} + +/** + * _base_get_chain_buffer_dma_to_chain_buffer - Iterates chain + * lookup list and Provides chain_buffer + * address for the matching dma address. + * (Each smid can have 64K starts from 17024) + * + * @ioc: per adapter object + * @chain_buffer_dma: Chain buffer dma address. + * + * Return: Pointer to chain buffer. Or Null on Failure. + */ +static void * +_base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc, + dma_addr_t chain_buffer_dma) +{ + u16 index, j; + struct chain_tracker *ct; + + for (index = 0; index < ioc->scsiio_depth; index++) { + for (j = 0; j < ioc->chains_needed_per_io; j++) { + ct = &ioc->chain_lookup[index].chains_per_smid[j]; + if (ct && ct->chain_buffer_dma == chain_buffer_dma) + return ct->chain_buffer; + } + } + ioc_info(ioc, "Provided chain_buffer_dma address is not in the lookup list\n"); + return NULL; +} + +/** + * _clone_sg_entries - MPI EP's scsiio and config requests + * are handled here. Base function for + * double buffering, before submitting + * the requests. + * + * @ioc: per adapter object. + * @mpi_request: mf request pointer. + * @smid: system request message index. + */ +static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc, + void *mpi_request, u16 smid) +{ + Mpi2SGESimple32_t *sgel, *sgel_next; + u32 sgl_flags, sge_chain_count = 0; + bool is_write = false; + u16 i = 0; + void __iomem *buffer_iomem; + phys_addr_t buffer_iomem_phys; + void __iomem *buff_ptr; + phys_addr_t buff_ptr_phys; + void __iomem *dst_chain_addr[MCPU_MAX_CHAINS_PER_IO]; + void *src_chain_addr[MCPU_MAX_CHAINS_PER_IO]; + phys_addr_t dst_addr_phys; + MPI2RequestHeader_t *request_hdr; + struct scsi_cmnd *scmd; + struct scatterlist *sg_scmd = NULL; + int is_scsiio_req = 0; + + request_hdr = (MPI2RequestHeader_t *) mpi_request; + + if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) { + Mpi25SCSIIORequest_t *scsiio_request = + (Mpi25SCSIIORequest_t *)mpi_request; + sgel = (Mpi2SGESimple32_t *) &scsiio_request->SGL; + is_scsiio_req = 1; + } else if (request_hdr->Function == MPI2_FUNCTION_CONFIG) { + Mpi2ConfigRequest_t *config_req = + (Mpi2ConfigRequest_t *)mpi_request; + sgel = (Mpi2SGESimple32_t *) &config_req->PageBufferSGE; + } else + return; + + /* From smid we can get scsi_cmd, once we have sg_scmd, + * we just need to get sg_virt and sg_next to get virtual + * address associated with sgel->Address. + */ + + if (is_scsiio_req) { + /* Get scsi_cmd using smid */ + scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); + if (scmd == NULL) { + ioc_err(ioc, "scmd is NULL\n"); + return; + } + + /* Get sg_scmd from scmd provided */ + sg_scmd = scsi_sglist(scmd); + } + + /* + * 0 - 255 System register + * 256 - 4352 MPI Frame. (This is based on maxCredit 32) + * 4352 - 4864 Reply_free pool (512 byte is reserved + * considering maxCredit 32. Reply need extra + * room, for mCPU case kept four times of + * maxCredit). + * 4864 - 17152 SGE chain element. (32cmd * 3 chain of + * 128 byte size = 12288) + * 17152 - x Host buffer mapped with smid. + * (Each smid can have 64K Max IO.) + * BAR0+Last 1K MSIX Addr and Data + * Total size in use 2113664 bytes of 4MB BAR0 + */ + + buffer_iomem = _base_get_buffer_bar0(ioc, smid); + buffer_iomem_phys = _base_get_buffer_phys_bar0(ioc, smid); + + buff_ptr = buffer_iomem; + buff_ptr_phys = buffer_iomem_phys; + WARN_ON(buff_ptr_phys > U32_MAX); + + if (le32_to_cpu(sgel->FlagsLength) & + (MPI2_SGE_FLAGS_HOST_TO_IOC << MPI2_SGE_FLAGS_SHIFT)) + is_write = true; + + for (i = 0; i < MPT_MIN_PHYS_SEGMENTS + ioc->facts.MaxChainDepth; i++) { + + sgl_flags = + (le32_to_cpu(sgel->FlagsLength) >> MPI2_SGE_FLAGS_SHIFT); + + switch (sgl_flags & MPI2_SGE_FLAGS_ELEMENT_MASK) { + case MPI2_SGE_FLAGS_CHAIN_ELEMENT: + /* + * Helper function which on passing + * chain_buffer_dma returns chain_buffer. Get + * the virtual address for sgel->Address + */ + sgel_next = + _base_get_chain_buffer_dma_to_chain_buffer(ioc, + le32_to_cpu(sgel->Address)); + if (sgel_next == NULL) + return; + /* + * This is coping 128 byte chain + * frame (not a host buffer) + */ + dst_chain_addr[sge_chain_count] = + _base_get_chain(ioc, + smid, sge_chain_count); + src_chain_addr[sge_chain_count] = + (void *) sgel_next; + dst_addr_phys = _base_get_chain_phys(ioc, + smid, sge_chain_count); + WARN_ON(dst_addr_phys > U32_MAX); + sgel->Address = + cpu_to_le32(lower_32_bits(dst_addr_phys)); + sgel = sgel_next; + sge_chain_count++; + break; + case MPI2_SGE_FLAGS_SIMPLE_ELEMENT: + if (is_write) { + if (is_scsiio_req) { + _base_clone_to_sys_mem(buff_ptr, + sg_virt(sg_scmd), + (le32_to_cpu(sgel->FlagsLength) & + 0x00ffffff)); + /* + * FIXME: this relies on a a zero + * PCI mem_offset. + */ + sgel->Address = + cpu_to_le32((u32)buff_ptr_phys); + } else { + _base_clone_to_sys_mem(buff_ptr, + ioc->config_vaddr, + (le32_to_cpu(sgel->FlagsLength) & + 0x00ffffff)); + sgel->Address = + cpu_to_le32((u32)buff_ptr_phys); + } + } + buff_ptr += (le32_to_cpu(sgel->FlagsLength) & + 0x00ffffff); + buff_ptr_phys += (le32_to_cpu(sgel->FlagsLength) & + 0x00ffffff); + if ((le32_to_cpu(sgel->FlagsLength) & + (MPI2_SGE_FLAGS_END_OF_BUFFER + << MPI2_SGE_FLAGS_SHIFT))) + goto eob_clone_chain; + else { + /* + * Every single element in MPT will have + * associated sg_next. Better to sanity that + * sg_next is not NULL, but it will be a bug + * if it is null. + */ + if (is_scsiio_req) { + sg_scmd = sg_next(sg_scmd); + if (sg_scmd) + sgel++; + else + goto eob_clone_chain; + } + } + break; + } + } + +eob_clone_chain: + for (i = 0; i < sge_chain_count; i++) { + if (is_scsiio_req) + _base_clone_to_sys_mem(dst_chain_addr[i], + src_chain_addr[i], ioc->request_sz); + } +} + +/** + * mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc + * @arg: input argument, used to derive ioc + * + * Return: + * 0 if controller is removed from pci subsystem. + * -1 for other case. + */ +static int mpt3sas_remove_dead_ioc_func(void *arg) +{ + struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg; + struct pci_dev *pdev; + + if (!ioc) + return -1; + + pdev = ioc->pdev; + if (!pdev) + return -1; + pci_stop_and_remove_bus_device_locked(pdev); + return 0; +} + +/** + * _base_sync_drv_fw_timestamp - Sync Drive-Fw TimeStamp. + * @ioc: Per Adapter Object + * + * Return: nothing. + */ +static void _base_sync_drv_fw_timestamp(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi26IoUnitControlRequest_t *mpi_request; + Mpi26IoUnitControlReply_t *mpi_reply; + u16 smid; + ktime_t current_time; + u64 TimeStamp = 0; + u8 issue_reset = 0; + + mutex_lock(&ioc->scsih_cmds.mutex); + if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { + ioc_err(ioc, "scsih_cmd in use %s\n", __func__); + goto out; + } + ioc->scsih_cmds.status = MPT3_CMD_PENDING; + smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); + if (!smid) { + ioc_err(ioc, "Failed obtaining a smid %s\n", __func__); + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; + goto out; + } + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->scsih_cmds.smid = smid; + memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t)); + mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL; + mpi_request->Operation = MPI26_CTRL_OP_SET_IOC_PARAMETER; + mpi_request->IOCParameter = MPI26_SET_IOC_PARAMETER_SYNC_TIMESTAMP; + current_time = ktime_get_real(); + TimeStamp = ktime_to_ms(current_time); + mpi_request->Reserved7 = cpu_to_le32(TimeStamp >> 32); + mpi_request->IOCParameterValue = cpu_to_le32(TimeStamp & 0xFFFFFFFF); + init_completion(&ioc->scsih_cmds.done); + ioc->put_smid_default(ioc, smid); + dinitprintk(ioc, ioc_info(ioc, + "Io Unit Control Sync TimeStamp (sending), @time %lld ms\n", + TimeStamp)); + wait_for_completion_timeout(&ioc->scsih_cmds.done, + MPT3SAS_TIMESYNC_TIMEOUT_SECONDS*HZ); + if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { + mpt3sas_check_cmd_timeout(ioc, + ioc->scsih_cmds.status, mpi_request, + sizeof(Mpi2SasIoUnitControlRequest_t)/4, issue_reset); + goto issue_host_reset; + } + if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { + mpi_reply = ioc->scsih_cmds.reply; + dinitprintk(ioc, ioc_info(ioc, + "Io Unit Control sync timestamp (complete): ioc_status(0x%04x), loginfo(0x%08x)\n", + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo))); + } +issue_host_reset: + if (issue_reset) + mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; +out: + mutex_unlock(&ioc->scsih_cmds.mutex); +} + +/** + * _base_fault_reset_work - workq handling ioc fault conditions + * @work: input argument, used to derive ioc + * + * Context: sleep. + */ +static void +_base_fault_reset_work(struct work_struct *work) +{ + struct MPT3SAS_ADAPTER *ioc = + container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work); + unsigned long flags; + u32 doorbell; + int rc; + struct task_struct *p; + + + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + if ((ioc->shost_recovery && (ioc->ioc_coredump_loop == 0)) || + ioc->pci_error_recovery) + goto rearm_timer; + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + + doorbell = mpt3sas_base_get_iocstate(ioc, 0); + if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) { + ioc_err(ioc, "SAS host is non-operational !!!!\n"); + + /* It may be possible that EEH recovery can resolve some of + * pci bus failure issues rather removing the dead ioc function + * by considering controller is in a non-operational state. So + * here priority is given to the EEH recovery. If it doesn't + * not resolve this issue, mpt3sas driver will consider this + * controller to non-operational state and remove the dead ioc + * function. + */ + if (ioc->non_operational_loop++ < 5) { + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, + flags); + goto rearm_timer; + } + + /* + * Call _scsih_flush_pending_cmds callback so that we flush all + * pending commands back to OS. This call is required to avoid + * deadlock at block layer. Dead IOC will fail to do diag reset, + * and this call is safe since dead ioc will never return any + * command back from HW. + */ + mpt3sas_base_pause_mq_polling(ioc); + ioc->schedule_dead_ioc_flush_running_cmds(ioc); + /* + * Set remove_host flag early since kernel thread will + * take some time to execute. + */ + ioc->remove_host = 1; + /*Remove the Dead Host */ + p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc, + "%s_dead_ioc_%d", ioc->driver_name, ioc->id); + if (IS_ERR(p)) + ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread failed !!!!\n", + __func__); + else + ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread success !!!!\n", + __func__); + return; /* don't rearm timer */ + } + + if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) { + u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ? + ioc->manu_pg11.CoreDumpTOSec : + MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS; + + timeout /= (FAULT_POLLING_INTERVAL/1000); + + if (ioc->ioc_coredump_loop == 0) { + mpt3sas_print_coredump_info(ioc, + doorbell & MPI2_DOORBELL_DATA_MASK); + /* do not accept any IOs and disable the interrupts */ + spin_lock_irqsave( + &ioc->ioc_reset_in_progress_lock, flags); + ioc->shost_recovery = 1; + spin_unlock_irqrestore( + &ioc->ioc_reset_in_progress_lock, flags); + mpt3sas_base_mask_interrupts(ioc); + mpt3sas_base_pause_mq_polling(ioc); + _base_clear_outstanding_commands(ioc); + } + + ioc_info(ioc, "%s: CoreDump loop %d.", + __func__, ioc->ioc_coredump_loop); + + /* Wait until CoreDump completes or times out */ + if (ioc->ioc_coredump_loop++ < timeout) { + spin_lock_irqsave( + &ioc->ioc_reset_in_progress_lock, flags); + goto rearm_timer; + } + } + + if (ioc->ioc_coredump_loop) { + if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_COREDUMP) + ioc_err(ioc, "%s: CoreDump completed. LoopCount: %d", + __func__, ioc->ioc_coredump_loop); + else + ioc_err(ioc, "%s: CoreDump Timed out. LoopCount: %d", + __func__, ioc->ioc_coredump_loop); + ioc->ioc_coredump_loop = MPT3SAS_COREDUMP_LOOP_DONE; + } + ioc->non_operational_loop = 0; + if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) { + rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + ioc_warn(ioc, "%s: hard reset: %s\n", + __func__, rc == 0 ? "success" : "failed"); + doorbell = mpt3sas_base_get_iocstate(ioc, 0); + if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { + mpt3sas_print_fault_code(ioc, doorbell & + MPI2_DOORBELL_DATA_MASK); + } else if ((doorbell & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_COREDUMP) + mpt3sas_print_coredump_info(ioc, doorbell & + MPI2_DOORBELL_DATA_MASK); + if (rc && (doorbell & MPI2_IOC_STATE_MASK) != + MPI2_IOC_STATE_OPERATIONAL) + return; /* don't rearm timer */ + } + ioc->ioc_coredump_loop = 0; + if (ioc->time_sync_interval && + ++ioc->timestamp_update_count >= ioc->time_sync_interval) { + ioc->timestamp_update_count = 0; + _base_sync_drv_fw_timestamp(ioc); + } + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + rearm_timer: + if (ioc->fault_reset_work_q) + queue_delayed_work(ioc->fault_reset_work_q, + &ioc->fault_reset_work, + msecs_to_jiffies(FAULT_POLLING_INTERVAL)); + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); +} + +/** + * mpt3sas_base_start_watchdog - start the fault_reset_work_q + * @ioc: per adapter object + * + * Context: sleep. + */ +void +mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc) +{ + unsigned long flags; + + if (ioc->fault_reset_work_q) + return; + + ioc->timestamp_update_count = 0; + /* initialize fault polling */ + + INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work); + snprintf(ioc->fault_reset_work_q_name, + sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status", + ioc->driver_name, ioc->id); + ioc->fault_reset_work_q = + create_singlethread_workqueue(ioc->fault_reset_work_q_name); + if (!ioc->fault_reset_work_q) { + ioc_err(ioc, "%s: failed (line=%d)\n", __func__, __LINE__); + return; + } + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + if (ioc->fault_reset_work_q) + queue_delayed_work(ioc->fault_reset_work_q, + &ioc->fault_reset_work, + msecs_to_jiffies(FAULT_POLLING_INTERVAL)); + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); +} + +/** + * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q + * @ioc: per adapter object + * + * Context: sleep. + */ +void +mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc) +{ + unsigned long flags; + struct workqueue_struct *wq; + + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + wq = ioc->fault_reset_work_q; + ioc->fault_reset_work_q = NULL; + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + if (wq) { + if (!cancel_delayed_work_sync(&ioc->fault_reset_work)) + flush_workqueue(wq); + destroy_workqueue(wq); + } +} + +/** + * mpt3sas_base_fault_info - verbose translation of firmware FAULT code + * @ioc: per adapter object + * @fault_code: fault code + */ +void +mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code) +{ + ioc_err(ioc, "fault_state(0x%04x)!\n", fault_code); +} + +/** + * mpt3sas_base_coredump_info - verbose translation of firmware CoreDump state + * @ioc: per adapter object + * @fault_code: fault code + * + * Return: nothing. + */ +void +mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code) +{ + ioc_err(ioc, "coredump_state(0x%04x)!\n", fault_code); +} + +/** + * mpt3sas_base_wait_for_coredump_completion - Wait until coredump + * completes or times out + * @ioc: per adapter object + * @caller: caller function name + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER *ioc, + const char *caller) +{ + u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ? + ioc->manu_pg11.CoreDumpTOSec : + MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS; + + int ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_FAULT, + timeout); + + if (ioc_state) + ioc_err(ioc, + "%s: CoreDump timed out. (ioc_state=0x%x)\n", + caller, ioc_state); + else + ioc_info(ioc, + "%s: CoreDump completed. (ioc_state=0x%x)\n", + caller, ioc_state); + + return ioc_state; +} + +/** + * mpt3sas_halt_firmware - halt's mpt controller firmware + * @ioc: per adapter object + * + * For debugging timeout related issues. Writing 0xCOFFEE00 + * to the doorbell register will halt controller firmware. With + * the purpose to stop both driver and firmware, the enduser can + * obtain a ring buffer from controller UART. + */ +void +mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc) +{ + u32 doorbell; + + if (!ioc->fwfault_debug) + return; + + dump_stack(); + + doorbell = ioc->base_readl_ext_retry(&ioc->chip->Doorbell); + if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { + mpt3sas_print_fault_code(ioc, doorbell & + MPI2_DOORBELL_DATA_MASK); + } else if ((doorbell & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_COREDUMP) { + mpt3sas_print_coredump_info(ioc, doorbell & + MPI2_DOORBELL_DATA_MASK); + } else { + writel(0xC0FFEE00, &ioc->chip->Doorbell); + ioc_err(ioc, "Firmware is halted due to command timeout\n"); + } + + if (ioc->fwfault_debug == 2) + for (;;) + ; + else + panic("panic in %s\n", __func__); +} + +/** + * _base_sas_ioc_info - verbose translation of the ioc status + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @request_hdr: request mf + */ +static void +_base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply, + MPI2RequestHeader_t *request_hdr) +{ + u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & + MPI2_IOCSTATUS_MASK; + char *desc = NULL; + u16 frame_sz; + char *func_str = NULL; + + /* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */ + if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || + request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || + request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION) + return; + + if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) + return; + /* + * Older Firmware version doesn't support driver trigger pages. + * So, skip displaying 'config invalid type' type + * of error message. + */ + if (request_hdr->Function == MPI2_FUNCTION_CONFIG) { + Mpi2ConfigRequest_t *rqst = (Mpi2ConfigRequest_t *)request_hdr; + + if ((rqst->ExtPageType == + MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER) && + !(ioc->logging_level & MPT_DEBUG_CONFIG)) { + return; + } + } + + switch (ioc_status) { + +/**************************************************************************** +* Common IOCStatus values for all replies +****************************************************************************/ + + case MPI2_IOCSTATUS_INVALID_FUNCTION: + desc = "invalid function"; + break; + case MPI2_IOCSTATUS_BUSY: + desc = "busy"; + break; + case MPI2_IOCSTATUS_INVALID_SGL: + desc = "invalid sgl"; + break; + case MPI2_IOCSTATUS_INTERNAL_ERROR: + desc = "internal error"; + break; + case MPI2_IOCSTATUS_INVALID_VPID: + desc = "invalid vpid"; + break; + case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES: + desc = "insufficient resources"; + break; + case MPI2_IOCSTATUS_INSUFFICIENT_POWER: + desc = "insufficient power"; + break; + case MPI2_IOCSTATUS_INVALID_FIELD: + desc = "invalid field"; + break; + case MPI2_IOCSTATUS_INVALID_STATE: + desc = "invalid state"; + break; + case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED: + desc = "op state not supported"; + break; + +/**************************************************************************** +* Config IOCStatus values +****************************************************************************/ + + case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION: + desc = "config invalid action"; + break; + case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE: + desc = "config invalid type"; + break; + case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE: + desc = "config invalid page"; + break; + case MPI2_IOCSTATUS_CONFIG_INVALID_DATA: + desc = "config invalid data"; + break; + case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS: + desc = "config no defaults"; + break; + case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT: + desc = "config can't commit"; + break; + +/**************************************************************************** +* SCSI IO Reply +****************************************************************************/ + + case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: + case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: + case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: + case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: + case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: + case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: + case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: + case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: + case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: + case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: + case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: + case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: + break; + +/**************************************************************************** +* For use by SCSI Initiator and SCSI Target end-to-end data protection +****************************************************************************/ + + case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: + desc = "eedp guard error"; + break; + case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: + desc = "eedp ref tag error"; + break; + case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: + desc = "eedp app tag error"; + break; + +/**************************************************************************** +* SCSI Target values +****************************************************************************/ + + case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX: + desc = "target invalid io index"; + break; + case MPI2_IOCSTATUS_TARGET_ABORTED: + desc = "target aborted"; + break; + case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE: + desc = "target no conn retryable"; + break; + case MPI2_IOCSTATUS_TARGET_NO_CONNECTION: + desc = "target no connection"; + break; + case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH: + desc = "target xfer count mismatch"; + break; + case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR: + desc = "target data offset error"; + break; + case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA: + desc = "target too much write data"; + break; + case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT: + desc = "target iu too short"; + break; + case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT: + desc = "target ack nak timeout"; + break; + case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED: + desc = "target nak received"; + break; + +/**************************************************************************** +* Serial Attached SCSI values +****************************************************************************/ + + case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED: + desc = "smp request failed"; + break; + case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN: + desc = "smp data overrun"; + break; + +/**************************************************************************** +* Diagnostic Buffer Post / Diagnostic Release values +****************************************************************************/ + + case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED: + desc = "diagnostic released"; + break; + default: + break; + } + + if (!desc) + return; + + switch (request_hdr->Function) { + case MPI2_FUNCTION_CONFIG: + frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size; + func_str = "config_page"; + break; + case MPI2_FUNCTION_SCSI_TASK_MGMT: + frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t); + func_str = "task_mgmt"; + break; + case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL: + frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t); + func_str = "sas_iounit_ctl"; + break; + case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR: + frame_sz = sizeof(Mpi2SepRequest_t); + func_str = "enclosure"; + break; + case MPI2_FUNCTION_IOC_INIT: + frame_sz = sizeof(Mpi2IOCInitRequest_t); + func_str = "ioc_init"; + break; + case MPI2_FUNCTION_PORT_ENABLE: + frame_sz = sizeof(Mpi2PortEnableRequest_t); + func_str = "port_enable"; + break; + case MPI2_FUNCTION_SMP_PASSTHROUGH: + frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size; + func_str = "smp_passthru"; + break; + case MPI2_FUNCTION_NVME_ENCAPSULATED: + frame_sz = sizeof(Mpi26NVMeEncapsulatedRequest_t) + + ioc->sge_size; + func_str = "nvme_encapsulated"; + break; + default: + frame_sz = 32; + func_str = "unknown"; + break; + } + + ioc_warn(ioc, "ioc_status: %s(0x%04x), request(0x%p),(%s)\n", + desc, ioc_status, request_hdr, func_str); + + _debug_dump_mf(request_hdr, frame_sz/4); +} + +/** + * _base_display_event_data - verbose translation of firmware asyn events + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + */ +static void +_base_display_event_data(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventNotificationReply_t *mpi_reply) +{ + char *desc = NULL; + u16 event; + + if (!(ioc->logging_level & MPT_DEBUG_EVENTS)) + return; + + event = le16_to_cpu(mpi_reply->Event); + + switch (event) { + case MPI2_EVENT_LOG_DATA: + desc = "Log Data"; + break; + case MPI2_EVENT_STATE_CHANGE: + desc = "Status Change"; + break; + case MPI2_EVENT_HARD_RESET_RECEIVED: + desc = "Hard Reset Received"; + break; + case MPI2_EVENT_EVENT_CHANGE: + desc = "Event Change"; + break; + case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: + desc = "Device Status Change"; + break; + case MPI2_EVENT_IR_OPERATION_STATUS: + if (!ioc->hide_ir_msg) + desc = "IR Operation Status"; + break; + case MPI2_EVENT_SAS_DISCOVERY: + { + Mpi2EventDataSasDiscovery_t *event_data = + (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData; + ioc_info(ioc, "Discovery: (%s)", + event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ? + "start" : "stop"); + if (event_data->DiscoveryStatus) + pr_cont(" discovery_status(0x%08x)", + le32_to_cpu(event_data->DiscoveryStatus)); + pr_cont("\n"); + return; + } + case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: + desc = "SAS Broadcast Primitive"; + break; + case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE: + desc = "SAS Init Device Status Change"; + break; + case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW: + desc = "SAS Init Table Overflow"; + break; + case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: + desc = "SAS Topology Change List"; + break; + case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: + desc = "SAS Enclosure Device Status Change"; + break; + case MPI2_EVENT_IR_VOLUME: + if (!ioc->hide_ir_msg) + desc = "IR Volume"; + break; + case MPI2_EVENT_IR_PHYSICAL_DISK: + if (!ioc->hide_ir_msg) + desc = "IR Physical Disk"; + break; + case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: + if (!ioc->hide_ir_msg) + desc = "IR Configuration Change List"; + break; + case MPI2_EVENT_LOG_ENTRY_ADDED: + if (!ioc->hide_ir_msg) + desc = "Log Entry Added"; + break; + case MPI2_EVENT_TEMP_THRESHOLD: + desc = "Temperature Threshold"; + break; + case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION: + desc = "Cable Event"; + break; + case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR: + desc = "SAS Device Discovery Error"; + break; + case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE: + desc = "PCIE Device Status Change"; + break; + case MPI2_EVENT_PCIE_ENUMERATION: + { + Mpi26EventDataPCIeEnumeration_t *event_data = + (Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData; + ioc_info(ioc, "PCIE Enumeration: (%s)", + event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED ? + "start" : "stop"); + if (event_data->EnumerationStatus) + pr_cont("enumeration_status(0x%08x)", + le32_to_cpu(event_data->EnumerationStatus)); + pr_cont("\n"); + return; + } + case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: + desc = "PCIE Topology Change List"; + break; + } + + if (!desc) + return; + + ioc_info(ioc, "%s\n", desc); +} + +/** + * _base_sas_log_info - verbose translation of firmware log info + * @ioc: per adapter object + * @log_info: log info + */ +static void +_base_sas_log_info(struct MPT3SAS_ADAPTER *ioc, u32 log_info) +{ + union loginfo_type { + u32 loginfo; + struct { + u32 subcode:16; + u32 code:8; + u32 originator:4; + u32 bus_type:4; + } dw; + }; + union loginfo_type sas_loginfo; + char *originator_str = NULL; + + sas_loginfo.loginfo = log_info; + if (sas_loginfo.dw.bus_type != 3 /*SAS*/) + return; + + /* each nexus loss loginfo */ + if (log_info == 0x31170000) + return; + + /* eat the loginfos associated with task aborts */ + if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info == + 0x31140000 || log_info == 0x31130000)) + return; + + switch (sas_loginfo.dw.originator) { + case 0: + originator_str = "IOP"; + break; + case 1: + originator_str = "PL"; + break; + case 2: + if (!ioc->hide_ir_msg) + originator_str = "IR"; + else + originator_str = "WarpDrive"; + break; + } + + ioc_warn(ioc, "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n", + log_info, + originator_str, sas_loginfo.dw.code, sas_loginfo.dw.subcode); +} + +/** + * _base_display_reply_info - handle reply descriptors depending on IOC Status + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame (lower 32bit addr) + */ +static void +_base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + MPI2DefaultReply_t *mpi_reply; + u16 ioc_status; + u32 loginfo = 0; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + if (unlikely(!mpi_reply)) { + ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + ioc_status = le16_to_cpu(mpi_reply->IOCStatus); + + if ((ioc_status & MPI2_IOCSTATUS_MASK) && + (ioc->logging_level & MPT_DEBUG_REPLY)) { + _base_sas_ioc_info(ioc, mpi_reply, + mpt3sas_base_get_msg_frame(ioc, smid)); + } + + if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) { + loginfo = le32_to_cpu(mpi_reply->IOCLogInfo); + _base_sas_log_info(ioc, loginfo); + } + + if (ioc_status || loginfo) { + ioc_status &= MPI2_IOCSTATUS_MASK; + mpt3sas_trigger_mpi(ioc, ioc_status, loginfo); + } +} + +/** + * mpt3sas_base_done - base internal command completion routine + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * + * Return: + * 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +u8 +mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + MPI2DefaultReply_t *mpi_reply; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK) + return mpt3sas_check_for_pending_internal_cmds(ioc, smid); + + if (ioc->base_cmds.status == MPT3_CMD_NOT_USED) + return 1; + + ioc->base_cmds.status |= MPT3_CMD_COMPLETE; + if (mpi_reply) { + ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID; + memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); + } + ioc->base_cmds.status &= ~MPT3_CMD_PENDING; + + complete(&ioc->base_cmds.done); + return 1; +} + +/** + * _base_async_event - main callback handler for firmware asyn events + * @ioc: per adapter object + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * + * Return: + * 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +static u8 +_base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply) +{ + Mpi2EventNotificationReply_t *mpi_reply; + Mpi2EventAckRequest_t *ack_request; + u16 smid; + struct _event_ack_list *delayed_event_ack; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + if (!mpi_reply) + return 1; + if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION) + return 1; + + _base_display_event_data(ioc, mpi_reply); + + if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED)) + goto out; + smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); + if (!smid) { + delayed_event_ack = kzalloc(sizeof(*delayed_event_ack), + GFP_ATOMIC); + if (!delayed_event_ack) + goto out; + INIT_LIST_HEAD(&delayed_event_ack->list); + delayed_event_ack->Event = mpi_reply->Event; + delayed_event_ack->EventContext = mpi_reply->EventContext; + list_add_tail(&delayed_event_ack->list, + &ioc->delayed_event_ack_list); + dewtprintk(ioc, + ioc_info(ioc, "DELAYED: EVENT ACK: event (0x%04x)\n", + le16_to_cpu(mpi_reply->Event))); + goto out; + } + + ack_request = mpt3sas_base_get_msg_frame(ioc, smid); + memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t)); + ack_request->Function = MPI2_FUNCTION_EVENT_ACK; + ack_request->Event = mpi_reply->Event; + ack_request->EventContext = mpi_reply->EventContext; + ack_request->VF_ID = 0; /* TODO */ + ack_request->VP_ID = 0; + ioc->put_smid_default(ioc, smid); + + out: + + /* scsih callback handler */ + mpt3sas_scsih_event_callback(ioc, msix_index, reply); + + /* ctl callback handler */ + mpt3sas_ctl_event_callback(ioc, msix_index, reply); + + return 1; +} + +static struct scsiio_tracker * +_get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + struct scsi_cmnd *cmd; + + if (WARN_ON(!smid) || + WARN_ON(smid >= ioc->hi_priority_smid)) + return NULL; + + cmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); + if (cmd) + return scsi_cmd_priv(cmd); + + return NULL; +} + +/** + * _base_get_cb_idx - obtain the callback index + * @ioc: per adapter object + * @smid: system request message index + * + * Return: callback index. + */ +static u8 +_base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + int i; + u16 ctl_smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1; + u8 cb_idx = 0xFF; + + if (smid < ioc->hi_priority_smid) { + struct scsiio_tracker *st; + + if (smid < ctl_smid) { + st = _get_st_from_smid(ioc, smid); + if (st) + cb_idx = st->cb_idx; + } else if (smid == ctl_smid) + cb_idx = ioc->ctl_cb_idx; + } else if (smid < ioc->internal_smid) { + i = smid - ioc->hi_priority_smid; + cb_idx = ioc->hpr_lookup[i].cb_idx; + } else if (smid <= ioc->hba_queue_depth) { + i = smid - ioc->internal_smid; + cb_idx = ioc->internal_lookup[i].cb_idx; + } + return cb_idx; +} + +/** + * mpt3sas_base_pause_mq_polling - pause polling on the mq poll queues + * when driver is flushing out the IOs. + * @ioc: per adapter object + * + * Pause polling on the mq poll (io uring) queues when driver is flushing + * out the IOs. Otherwise we may see the race condition of completing the same + * IO from two paths. + * + * Returns nothing. + */ +void +mpt3sas_base_pause_mq_polling(struct MPT3SAS_ADAPTER *ioc) +{ + int iopoll_q_count = + ioc->reply_queue_count - ioc->iopoll_q_start_index; + int qid; + + for (qid = 0; qid < iopoll_q_count; qid++) + atomic_set(&ioc->io_uring_poll_queues[qid].pause, 1); + + /* + * wait for current poll to complete. + */ + for (qid = 0; qid < iopoll_q_count; qid++) { + while (atomic_read(&ioc->io_uring_poll_queues[qid].busy)) { + cpu_relax(); + udelay(500); + } + } +} + +/** + * mpt3sas_base_resume_mq_polling - Resume polling on mq poll queues. + * @ioc: per adapter object + * + * Returns nothing. + */ +void +mpt3sas_base_resume_mq_polling(struct MPT3SAS_ADAPTER *ioc) +{ + int iopoll_q_count = + ioc->reply_queue_count - ioc->iopoll_q_start_index; + int qid; + + for (qid = 0; qid < iopoll_q_count; qid++) + atomic_set(&ioc->io_uring_poll_queues[qid].pause, 0); +} + +/** + * mpt3sas_base_mask_interrupts - disable interrupts + * @ioc: per adapter object + * + * Disabling ResetIRQ, Reply and Doorbell Interrupts + */ +void +mpt3sas_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc) +{ + u32 him_register; + + ioc->mask_interrupts = 1; + him_register = ioc->base_readl(&ioc->chip->HostInterruptMask); + him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK; + writel(him_register, &ioc->chip->HostInterruptMask); + ioc->base_readl(&ioc->chip->HostInterruptMask); +} + +/** + * mpt3sas_base_unmask_interrupts - enable interrupts + * @ioc: per adapter object + * + * Enabling only Reply Interrupts + */ +void +mpt3sas_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc) +{ + u32 him_register; + + him_register = ioc->base_readl(&ioc->chip->HostInterruptMask); + him_register &= ~MPI2_HIM_RIM; + writel(him_register, &ioc->chip->HostInterruptMask); + ioc->mask_interrupts = 0; +} + +union reply_descriptor { + u64 word; + struct { + u32 low; + u32 high; + } u; +}; + +static u32 base_mod64(u64 dividend, u32 divisor) +{ + u32 remainder; + + if (!divisor) + pr_err("mpt3sas: DIVISOR is zero, in div fn\n"); + remainder = do_div(dividend, divisor); + return remainder; +} + +/** + * _base_process_reply_queue - Process reply descriptors from reply + * descriptor post queue. + * @reply_q: per IRQ's reply queue object. + * + * Return: number of reply descriptors processed from reply + * descriptor queue. + */ +static int +_base_process_reply_queue(struct adapter_reply_queue *reply_q) +{ + union reply_descriptor rd; + u64 completed_cmds; + u8 request_descript_type; + u16 smid; + u8 cb_idx; + u32 reply; + u8 msix_index = reply_q->msix_index; + struct MPT3SAS_ADAPTER *ioc = reply_q->ioc; + Mpi2ReplyDescriptorsUnion_t *rpf; + u8 rc; + + completed_cmds = 0; + if (!atomic_add_unless(&reply_q->busy, 1, 1)) + return completed_cmds; + + rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index]; + request_descript_type = rpf->Default.ReplyFlags + & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; + if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) { + atomic_dec(&reply_q->busy); + return completed_cmds; + } + + cb_idx = 0xFF; + do { + rd.word = le64_to_cpu(rpf->Words); + if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX) + goto out; + reply = 0; + smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1); + if (request_descript_type == + MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS || + request_descript_type == + MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS || + request_descript_type == + MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS) { + cb_idx = _base_get_cb_idx(ioc, smid); + if ((likely(cb_idx < MPT_MAX_CALLBACKS)) && + (likely(mpt_callbacks[cb_idx] != NULL))) { + rc = mpt_callbacks[cb_idx](ioc, smid, + msix_index, 0); + if (rc) + mpt3sas_base_free_smid(ioc, smid); + } + } else if (request_descript_type == + MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) { + reply = le32_to_cpu( + rpf->AddressReply.ReplyFrameAddress); + if (reply > ioc->reply_dma_max_address || + reply < ioc->reply_dma_min_address) + reply = 0; + if (smid) { + cb_idx = _base_get_cb_idx(ioc, smid); + if ((likely(cb_idx < MPT_MAX_CALLBACKS)) && + (likely(mpt_callbacks[cb_idx] != NULL))) { + rc = mpt_callbacks[cb_idx](ioc, smid, + msix_index, reply); + if (reply) + _base_display_reply_info(ioc, + smid, msix_index, reply); + if (rc) + mpt3sas_base_free_smid(ioc, + smid); + } + } else { + _base_async_event(ioc, msix_index, reply); + } + + /* reply free queue handling */ + if (reply) { + ioc->reply_free_host_index = + (ioc->reply_free_host_index == + (ioc->reply_free_queue_depth - 1)) ? + 0 : ioc->reply_free_host_index + 1; + ioc->reply_free[ioc->reply_free_host_index] = + cpu_to_le32(reply); + if (ioc->is_mcpu_endpoint) + _base_clone_reply_to_sys_mem(ioc, + reply, + ioc->reply_free_host_index); + writel(ioc->reply_free_host_index, + &ioc->chip->ReplyFreeHostIndex); + } + } + + rpf->Words = cpu_to_le64(ULLONG_MAX); + reply_q->reply_post_host_index = + (reply_q->reply_post_host_index == + (ioc->reply_post_queue_depth - 1)) ? 0 : + reply_q->reply_post_host_index + 1; + request_descript_type = + reply_q->reply_post_free[reply_q->reply_post_host_index]. + Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; + completed_cmds++; + /* Update the reply post host index after continuously + * processing the threshold number of Reply Descriptors. + * So that FW can find enough entries to post the Reply + * Descriptors in the reply descriptor post queue. + */ + if (completed_cmds >= ioc->thresh_hold) { + if (ioc->combined_reply_queue) { + writel(reply_q->reply_post_host_index | + ((msix_index & 7) << + MPI2_RPHI_MSIX_INDEX_SHIFT), + ioc->replyPostRegisterIndex[msix_index/8]); + } else { + writel(reply_q->reply_post_host_index | + (msix_index << + MPI2_RPHI_MSIX_INDEX_SHIFT), + &ioc->chip->ReplyPostHostIndex); + } + if (!reply_q->is_iouring_poll_q && + !reply_q->irq_poll_scheduled) { + reply_q->irq_poll_scheduled = true; + irq_poll_sched(&reply_q->irqpoll); + } + atomic_dec(&reply_q->busy); + return completed_cmds; + } + if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) + goto out; + if (!reply_q->reply_post_host_index) + rpf = reply_q->reply_post_free; + else + rpf++; + } while (1); + + out: + + if (!completed_cmds) { + atomic_dec(&reply_q->busy); + return completed_cmds; + } + + if (ioc->is_warpdrive) { + writel(reply_q->reply_post_host_index, + ioc->reply_post_host_index[msix_index]); + atomic_dec(&reply_q->busy); + return completed_cmds; + } + + /* Update Reply Post Host Index. + * For those HBA's which support combined reply queue feature + * 1. Get the correct Supplemental Reply Post Host Index Register. + * i.e. (msix_index / 8)th entry from Supplemental Reply Post Host + * Index Register address bank i.e replyPostRegisterIndex[], + * 2. Then update this register with new reply host index value + * in ReplyPostIndex field and the MSIxIndex field with + * msix_index value reduced to a value between 0 and 7, + * using a modulo 8 operation. Since each Supplemental Reply Post + * Host Index Register supports 8 MSI-X vectors. + * + * For other HBA's just update the Reply Post Host Index register with + * new reply host index value in ReplyPostIndex Field and msix_index + * value in MSIxIndex field. + */ + if (ioc->combined_reply_queue) + writel(reply_q->reply_post_host_index | ((msix_index & 7) << + MPI2_RPHI_MSIX_INDEX_SHIFT), + ioc->replyPostRegisterIndex[msix_index/8]); + else + writel(reply_q->reply_post_host_index | (msix_index << + MPI2_RPHI_MSIX_INDEX_SHIFT), + &ioc->chip->ReplyPostHostIndex); + atomic_dec(&reply_q->busy); + return completed_cmds; +} + +/** + * mpt3sas_blk_mq_poll - poll the blk mq poll queue + * @shost: Scsi_Host object + * @queue_num: hw ctx queue number + * + * Return number of entries that has been processed from poll queue. + */ +int mpt3sas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num) +{ + struct MPT3SAS_ADAPTER *ioc = + (struct MPT3SAS_ADAPTER *)shost->hostdata; + struct adapter_reply_queue *reply_q; + int num_entries = 0; + int qid = queue_num - ioc->iopoll_q_start_index; + + if (atomic_read(&ioc->io_uring_poll_queues[qid].pause) || + !atomic_add_unless(&ioc->io_uring_poll_queues[qid].busy, 1, 1)) + return 0; + + reply_q = ioc->io_uring_poll_queues[qid].reply_q; + + num_entries = _base_process_reply_queue(reply_q); + atomic_dec(&ioc->io_uring_poll_queues[qid].busy); + + return num_entries; +} + +/** + * _base_interrupt - MPT adapter (IOC) specific interrupt handler. + * @irq: irq number (not used) + * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure + * + * Return: IRQ_HANDLED if processed, else IRQ_NONE. + */ +static irqreturn_t +_base_interrupt(int irq, void *bus_id) +{ + struct adapter_reply_queue *reply_q = bus_id; + struct MPT3SAS_ADAPTER *ioc = reply_q->ioc; + + if (ioc->mask_interrupts) + return IRQ_NONE; + if (reply_q->irq_poll_scheduled) + return IRQ_HANDLED; + return ((_base_process_reply_queue(reply_q) > 0) ? + IRQ_HANDLED : IRQ_NONE); +} + +/** + * _base_irqpoll - IRQ poll callback handler + * @irqpoll: irq_poll object + * @budget: irq poll weight + * + * Return: number of reply descriptors processed + */ +static int +_base_irqpoll(struct irq_poll *irqpoll, int budget) +{ + struct adapter_reply_queue *reply_q; + int num_entries = 0; + + reply_q = container_of(irqpoll, struct adapter_reply_queue, + irqpoll); + if (reply_q->irq_line_enable) { + disable_irq_nosync(reply_q->os_irq); + reply_q->irq_line_enable = false; + } + num_entries = _base_process_reply_queue(reply_q); + if (num_entries < budget) { + irq_poll_complete(irqpoll); + reply_q->irq_poll_scheduled = false; + reply_q->irq_line_enable = true; + enable_irq(reply_q->os_irq); + /* + * Go for one more round of processing the + * reply descriptor post queue in case the HBA + * Firmware has posted some reply descriptors + * while reenabling the IRQ. + */ + _base_process_reply_queue(reply_q); + } + + return num_entries; +} + +/** + * _base_init_irqpolls - initliaze IRQ polls + * @ioc: per adapter object + * + * Return: nothing + */ +static void +_base_init_irqpolls(struct MPT3SAS_ADAPTER *ioc) +{ + struct adapter_reply_queue *reply_q, *next; + + if (list_empty(&ioc->reply_queue_list)) + return; + + list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { + if (reply_q->is_iouring_poll_q) + continue; + irq_poll_init(&reply_q->irqpoll, + ioc->hba_queue_depth/4, _base_irqpoll); + reply_q->irq_poll_scheduled = false; + reply_q->irq_line_enable = true; + reply_q->os_irq = pci_irq_vector(ioc->pdev, + reply_q->msix_index); + } +} + +/** + * _base_is_controller_msix_enabled - is controller support muli-reply queues + * @ioc: per adapter object + * + * Return: Whether or not MSI/X is enabled. + */ +static inline int +_base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc) +{ + return (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable; +} + +/** + * mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts + * @ioc: per adapter object + * @poll: poll over reply descriptor pools incase interrupt for + * timed-out SCSI command got delayed + * Context: non-ISR context + * + * Called when a Task Management request has completed. + */ +void +mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll) +{ + struct adapter_reply_queue *reply_q; + + /* If MSIX capability is turned off + * then multi-queues are not enabled + */ + if (!_base_is_controller_msix_enabled(ioc)) + return; + + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + if (ioc->shost_recovery || ioc->remove_host || + ioc->pci_error_recovery) + return; + /* TMs are on msix_index == 0 */ + if (reply_q->msix_index == 0) + continue; + + if (reply_q->is_iouring_poll_q) { + _base_process_reply_queue(reply_q); + continue; + } + + synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index)); + if (reply_q->irq_poll_scheduled) { + /* Calling irq_poll_disable will wait for any pending + * callbacks to have completed. + */ + irq_poll_disable(&reply_q->irqpoll); + irq_poll_enable(&reply_q->irqpoll); + /* check how the scheduled poll has ended, + * clean up only if necessary + */ + if (reply_q->irq_poll_scheduled) { + reply_q->irq_poll_scheduled = false; + reply_q->irq_line_enable = true; + enable_irq(reply_q->os_irq); + } + } + + if (poll) + _base_process_reply_queue(reply_q); + } +} + +/** + * mpt3sas_base_release_callback_handler - clear interrupt callback handler + * @cb_idx: callback index + */ +void +mpt3sas_base_release_callback_handler(u8 cb_idx) +{ + mpt_callbacks[cb_idx] = NULL; +} + +/** + * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler + * @cb_func: callback function + * + * Return: Index of @cb_func. + */ +u8 +mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func) +{ + u8 cb_idx; + + for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--) + if (mpt_callbacks[cb_idx] == NULL) + break; + + mpt_callbacks[cb_idx] = cb_func; + return cb_idx; +} + +/** + * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler + */ +void +mpt3sas_base_initialize_callback_handler(void) +{ + u8 cb_idx; + + for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++) + mpt3sas_base_release_callback_handler(cb_idx); +} + + +/** + * _base_build_zero_len_sge - build zero length sg entry + * @ioc: per adapter object + * @paddr: virtual address for SGE + * + * Create a zero length scatter gather entry to insure the IOCs hardware has + * something to use if the target device goes brain dead and tries + * to send data even when none is asked for. + */ +static void +_base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr) +{ + u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT | + MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST | + MPI2_SGE_FLAGS_SIMPLE_ELEMENT) << + MPI2_SGE_FLAGS_SHIFT); + ioc->base_add_sg_single(paddr, flags_length, -1); +} + +/** + * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr. + * @paddr: virtual address for SGE + * @flags_length: SGE flags and data transfer length + * @dma_addr: Physical address + */ +static void +_base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr) +{ + Mpi2SGESimple32_t *sgel = paddr; + + flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING | + MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT; + sgel->FlagsLength = cpu_to_le32(flags_length); + sgel->Address = cpu_to_le32(dma_addr); +} + + +/** + * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr. + * @paddr: virtual address for SGE + * @flags_length: SGE flags and data transfer length + * @dma_addr: Physical address + */ +static void +_base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr) +{ + Mpi2SGESimple64_t *sgel = paddr; + + flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING | + MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT; + sgel->FlagsLength = cpu_to_le32(flags_length); + sgel->Address = cpu_to_le64(dma_addr); +} + +/** + * _base_get_chain_buffer_tracker - obtain chain tracker + * @ioc: per adapter object + * @scmd: SCSI commands of the IO request + * + * Return: chain tracker from chain_lookup table using key as + * smid and smid's chain_offset. + */ +static struct chain_tracker * +_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, + struct scsi_cmnd *scmd) +{ + struct chain_tracker *chain_req; + struct scsiio_tracker *st = scsi_cmd_priv(scmd); + u16 smid = st->smid; + u8 chain_offset = + atomic_read(&ioc->chain_lookup[smid - 1].chain_offset); + + if (chain_offset == ioc->chains_needed_per_io) + return NULL; + + chain_req = &ioc->chain_lookup[smid - 1].chains_per_smid[chain_offset]; + atomic_inc(&ioc->chain_lookup[smid - 1].chain_offset); + return chain_req; +} + + +/** + * _base_build_sg - build generic sg + * @ioc: per adapter object + * @psge: virtual address for SGE + * @data_out_dma: physical address for WRITES + * @data_out_sz: data xfer size for WRITES + * @data_in_dma: physical address for READS + * @data_in_sz: data xfer size for READS + */ +static void +_base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge, + dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma, + size_t data_in_sz) +{ + u32 sgl_flags; + + if (!data_out_sz && !data_in_sz) { + _base_build_zero_len_sge(ioc, psge); + return; + } + + if (data_out_sz && data_in_sz) { + /* WRITE sgel first */ + sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | + MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC); + sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; + ioc->base_add_sg_single(psge, sgl_flags | + data_out_sz, data_out_dma); + + /* incr sgel */ + psge += ioc->sge_size; + + /* READ sgel last */ + sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | + MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | + MPI2_SGE_FLAGS_END_OF_LIST); + sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; + ioc->base_add_sg_single(psge, sgl_flags | + data_in_sz, data_in_dma); + } else if (data_out_sz) /* WRITE */ { + sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | + MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | + MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC); + sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; + ioc->base_add_sg_single(psge, sgl_flags | + data_out_sz, data_out_dma); + } else if (data_in_sz) /* READ */ { + sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | + MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | + MPI2_SGE_FLAGS_END_OF_LIST); + sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; + ioc->base_add_sg_single(psge, sgl_flags | + data_in_sz, data_in_dma); + } +} + +/* IEEE format sgls */ + +/** + * _base_build_nvme_prp - This function is called for NVMe end devices to build + * a native SGL (NVMe PRP). + * @ioc: per adapter object + * @smid: system request message index for getting asscociated SGL + * @nvme_encap_request: the NVMe request msg frame pointer + * @data_out_dma: physical address for WRITES + * @data_out_sz: data xfer size for WRITES + * @data_in_dma: physical address for READS + * @data_in_sz: data xfer size for READS + * + * The native SGL is built starting in the first PRP + * entry of the NVMe message (PRP1). If the data buffer is small enough to be + * described entirely using PRP1, then PRP2 is not used. If needed, PRP2 is + * used to describe a larger data buffer. If the data buffer is too large to + * describe using the two PRP entriess inside the NVMe message, then PRP1 + * describes the first data memory segment, and PRP2 contains a pointer to a PRP + * list located elsewhere in memory to describe the remaining data memory + * segments. The PRP list will be contiguous. + * + * The native SGL for NVMe devices is a Physical Region Page (PRP). A PRP + * consists of a list of PRP entries to describe a number of noncontigous + * physical memory segments as a single memory buffer, just as a SGL does. Note + * however, that this function is only used by the IOCTL call, so the memory + * given will be guaranteed to be contiguous. There is no need to translate + * non-contiguous SGL into a PRP in this case. All PRPs will describe + * contiguous space that is one page size each. + * + * Each NVMe message contains two PRP entries. The first (PRP1) either contains + * a PRP list pointer or a PRP element, depending upon the command. PRP2 + * contains the second PRP element if the memory being described fits within 2 + * PRP entries, or a PRP list pointer if the PRP spans more than two entries. + * + * A PRP list pointer contains the address of a PRP list, structured as a linear + * array of PRP entries. Each PRP entry in this list describes a segment of + * physical memory. + * + * Each 64-bit PRP entry comprises an address and an offset field. The address + * always points at the beginning of a 4KB physical memory page, and the offset + * describes where within that 4KB page the memory segment begins. Only the + * first element in a PRP list may contain a non-zero offset, implying that all + * memory segments following the first begin at the start of a 4KB page. + * + * Each PRP element normally describes 4KB of physical memory, with exceptions + * for the first and last elements in the list. If the memory being described + * by the list begins at a non-zero offset within the first 4KB page, then the + * first PRP element will contain a non-zero offset indicating where the region + * begins within the 4KB page. The last memory segment may end before the end + * of the 4KB segment, depending upon the overall size of the memory being + * described by the PRP list. + * + * Since PRP entries lack any indication of size, the overall data buffer length + * is used to determine where the end of the data memory buffer is located, and + * how many PRP entries are required to describe it. + */ +static void +_base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid, + Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request, + dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma, + size_t data_in_sz) +{ + int prp_size = NVME_PRP_SIZE; + __le64 *prp_entry, *prp1_entry, *prp2_entry; + __le64 *prp_page; + dma_addr_t prp_entry_dma, prp_page_dma, dma_addr; + u32 offset, entry_len; + u32 page_mask_result, page_mask; + size_t length; + struct mpt3sas_nvme_cmd *nvme_cmd = + (void *)nvme_encap_request->NVMe_Command; + + /* + * Not all commands require a data transfer. If no data, just return + * without constructing any PRP. + */ + if (!data_in_sz && !data_out_sz) + return; + prp1_entry = &nvme_cmd->prp1; + prp2_entry = &nvme_cmd->prp2; + prp_entry = prp1_entry; + /* + * For the PRP entries, use the specially allocated buffer of + * contiguous memory. + */ + prp_page = (__le64 *)mpt3sas_base_get_pcie_sgl(ioc, smid); + prp_page_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid); + + /* + * Check if we are within 1 entry of a page boundary we don't + * want our first entry to be a PRP List entry. + */ + page_mask = ioc->page_size - 1; + page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask; + if (!page_mask_result) { + /* Bump up to next page boundary. */ + prp_page = (__le64 *)((u8 *)prp_page + prp_size); + prp_page_dma = prp_page_dma + prp_size; + } + + /* + * Set PRP physical pointer, which initially points to the current PRP + * DMA memory page. + */ + prp_entry_dma = prp_page_dma; + + /* Get physical address and length of the data buffer. */ + if (data_in_sz) { + dma_addr = data_in_dma; + length = data_in_sz; + } else { + dma_addr = data_out_dma; + length = data_out_sz; + } + + /* Loop while the length is not zero. */ + while (length) { + /* + * Check if we need to put a list pointer here if we are at + * page boundary - prp_size (8 bytes). + */ + page_mask_result = (prp_entry_dma + prp_size) & page_mask; + if (!page_mask_result) { + /* + * This is the last entry in a PRP List, so we need to + * put a PRP list pointer here. What this does is: + * - bump the current memory pointer to the next + * address, which will be the next full page. + * - set the PRP Entry to point to that page. This + * is now the PRP List pointer. + * - bump the PRP Entry pointer the start of the + * next page. Since all of this PRP memory is + * contiguous, no need to get a new page - it's + * just the next address. + */ + prp_entry_dma++; + *prp_entry = cpu_to_le64(prp_entry_dma); + prp_entry++; + } + + /* Need to handle if entry will be part of a page. */ + offset = dma_addr & page_mask; + entry_len = ioc->page_size - offset; + + if (prp_entry == prp1_entry) { + /* + * Must fill in the first PRP pointer (PRP1) before + * moving on. + */ + *prp1_entry = cpu_to_le64(dma_addr); + + /* + * Now point to the second PRP entry within the + * command (PRP2). + */ + prp_entry = prp2_entry; + } else if (prp_entry == prp2_entry) { + /* + * Should the PRP2 entry be a PRP List pointer or just + * a regular PRP pointer? If there is more than one + * more page of data, must use a PRP List pointer. + */ + if (length > ioc->page_size) { + /* + * PRP2 will contain a PRP List pointer because + * more PRP's are needed with this command. The + * list will start at the beginning of the + * contiguous buffer. + */ + *prp2_entry = cpu_to_le64(prp_entry_dma); + + /* + * The next PRP Entry will be the start of the + * first PRP List. + */ + prp_entry = prp_page; + } else { + /* + * After this, the PRP Entries are complete. + * This command uses 2 PRP's and no PRP list. + */ + *prp2_entry = cpu_to_le64(dma_addr); + } + } else { + /* + * Put entry in list and bump the addresses. + * + * After PRP1 and PRP2 are filled in, this will fill in + * all remaining PRP entries in a PRP List, one per + * each time through the loop. + */ + *prp_entry = cpu_to_le64(dma_addr); + prp_entry++; + prp_entry_dma++; + } + + /* + * Bump the phys address of the command's data buffer by the + * entry_len. + */ + dma_addr += entry_len; + + /* Decrement length accounting for last partial page. */ + if (entry_len > length) + length = 0; + else + length -= entry_len; + } +} + +/** + * base_make_prp_nvme - Prepare PRPs (Physical Region Page) - + * SGLs specific to NVMe drives only + * + * @ioc: per adapter object + * @scmd: SCSI command from the mid-layer + * @mpi_request: mpi request + * @smid: msg Index + * @sge_count: scatter gather element count. + * + * Return: true: PRPs are built + * false: IEEE SGLs needs to be built + */ +static void +base_make_prp_nvme(struct MPT3SAS_ADAPTER *ioc, + struct scsi_cmnd *scmd, + Mpi25SCSIIORequest_t *mpi_request, + u16 smid, int sge_count) +{ + int sge_len, num_prp_in_chain = 0; + Mpi25IeeeSgeChain64_t *main_chain_element, *ptr_first_sgl; + __le64 *curr_buff; + dma_addr_t msg_dma, sge_addr, offset; + u32 page_mask, page_mask_result; + struct scatterlist *sg_scmd; + u32 first_prp_len; + int data_len = scsi_bufflen(scmd); + u32 nvme_pg_size; + + nvme_pg_size = max_t(u32, ioc->page_size, NVME_PRP_PAGE_SIZE); + /* + * Nvme has a very convoluted prp format. One prp is required + * for each page or partial page. Driver need to split up OS sg_list + * entries if it is longer than one page or cross a page + * boundary. Driver also have to insert a PRP list pointer entry as + * the last entry in each physical page of the PRP list. + * + * NOTE: The first PRP "entry" is actually placed in the first + * SGL entry in the main message as IEEE 64 format. The 2nd + * entry in the main message is the chain element, and the rest + * of the PRP entries are built in the contiguous pcie buffer. + */ + page_mask = nvme_pg_size - 1; + + /* + * Native SGL is needed. + * Put a chain element in main message frame that points to the first + * chain buffer. + * + * NOTE: The ChainOffset field must be 0 when using a chain pointer to + * a native SGL. + */ + + /* Set main message chain element pointer */ + main_chain_element = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL; + /* + * For NVMe the chain element needs to be the 2nd SG entry in the main + * message. + */ + main_chain_element = (Mpi25IeeeSgeChain64_t *) + ((u8 *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64)); + + /* + * For the PRP entries, use the specially allocated buffer of + * contiguous memory. Normal chain buffers can't be used + * because each chain buffer would need to be the size of an OS + * page (4k). + */ + curr_buff = mpt3sas_base_get_pcie_sgl(ioc, smid); + msg_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid); + + main_chain_element->Address = cpu_to_le64(msg_dma); + main_chain_element->NextChainOffset = 0; + main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | + MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR | + MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP; + + /* Build first prp, sge need not to be page aligned*/ + ptr_first_sgl = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL; + sg_scmd = scsi_sglist(scmd); + sge_addr = sg_dma_address(sg_scmd); + sge_len = sg_dma_len(sg_scmd); + + offset = sge_addr & page_mask; + first_prp_len = nvme_pg_size - offset; + + ptr_first_sgl->Address = cpu_to_le64(sge_addr); + ptr_first_sgl->Length = cpu_to_le32(first_prp_len); + + data_len -= first_prp_len; + + if (sge_len > first_prp_len) { + sge_addr += first_prp_len; + sge_len -= first_prp_len; + } else if (data_len && (sge_len == first_prp_len)) { + sg_scmd = sg_next(sg_scmd); + sge_addr = sg_dma_address(sg_scmd); + sge_len = sg_dma_len(sg_scmd); + } + + for (;;) { + offset = sge_addr & page_mask; + + /* Put PRP pointer due to page boundary*/ + page_mask_result = (uintptr_t)(curr_buff + 1) & page_mask; + if (unlikely(!page_mask_result)) { + scmd_printk(KERN_NOTICE, + scmd, "page boundary curr_buff: 0x%p\n", + curr_buff); + msg_dma += 8; + *curr_buff = cpu_to_le64(msg_dma); + curr_buff++; + num_prp_in_chain++; + } + + *curr_buff = cpu_to_le64(sge_addr); + curr_buff++; + msg_dma += 8; + num_prp_in_chain++; + + sge_addr += nvme_pg_size; + sge_len -= nvme_pg_size; + data_len -= nvme_pg_size; + + if (data_len <= 0) + break; + + if (sge_len > 0) + continue; + + sg_scmd = sg_next(sg_scmd); + sge_addr = sg_dma_address(sg_scmd); + sge_len = sg_dma_len(sg_scmd); + } + + main_chain_element->Length = + cpu_to_le32(num_prp_in_chain * sizeof(u64)); + return; +} + +static bool +base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc, + struct _pcie_device *pcie_device, struct scsi_cmnd *scmd, int sge_count) +{ + u32 data_length = 0; + bool build_prp = true; + + data_length = scsi_bufflen(scmd); + if (pcie_device && + (mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))) { + build_prp = false; + return build_prp; + } + + /* If Datalenth is <= 16K and number of SGE’s entries are <= 2 + * we built IEEE SGL + */ + if ((data_length <= NVME_PRP_PAGE_SIZE*4) && (sge_count <= 2)) + build_prp = false; + + return build_prp; +} + +/** + * _base_check_pcie_native_sgl - This function is called for PCIe end devices to + * determine if the driver needs to build a native SGL. If so, that native + * SGL is built in the special contiguous buffers allocated especially for + * PCIe SGL creation. If the driver will not build a native SGL, return + * TRUE and a normal IEEE SGL will be built. Currently this routine + * supports NVMe. + * @ioc: per adapter object + * @mpi_request: mf request pointer + * @smid: system request message index + * @scmd: scsi command + * @pcie_device: points to the PCIe device's info + * + * Return: 0 if native SGL was built, 1 if no SGL was built + */ +static int +_base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc, + Mpi25SCSIIORequest_t *mpi_request, u16 smid, struct scsi_cmnd *scmd, + struct _pcie_device *pcie_device) +{ + int sges_left; + + /* Get the SG list pointer and info. */ + sges_left = scsi_dma_map(scmd); + if (sges_left < 0) + return 1; + + /* Check if we need to build a native SG list. */ + if (!base_is_prp_possible(ioc, pcie_device, + scmd, sges_left)) { + /* We built a native SG list, just return. */ + goto out; + } + + /* + * Build native NVMe PRP. + */ + base_make_prp_nvme(ioc, scmd, mpi_request, + smid, sges_left); + + return 0; +out: + scsi_dma_unmap(scmd); + return 1; +} + +/** + * _base_add_sg_single_ieee - add sg element for IEEE format + * @paddr: virtual address for SGE + * @flags: SGE flags + * @chain_offset: number of 128 byte elements from start of segment + * @length: data transfer length + * @dma_addr: Physical address + */ +static void +_base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length, + dma_addr_t dma_addr) +{ + Mpi25IeeeSgeChain64_t *sgel = paddr; + + sgel->Flags = flags; + sgel->NextChainOffset = chain_offset; + sgel->Length = cpu_to_le32(length); + sgel->Address = cpu_to_le64(dma_addr); +} + +/** + * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format + * @ioc: per adapter object + * @paddr: virtual address for SGE + * + * Create a zero length scatter gather entry to insure the IOCs hardware has + * something to use if the target device goes brain dead and tries + * to send data even when none is asked for. + */ +static void +_base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr) +{ + u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | + MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR | + MPI25_IEEE_SGE_FLAGS_END_OF_LIST); + + _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1); +} + +/** + * _base_build_sg_scmd - main sg creation routine + * pcie_device is unused here! + * @ioc: per adapter object + * @scmd: scsi command + * @smid: system request message index + * @unused: unused pcie_device pointer + * Context: none. + * + * The main routine that builds scatter gather table from a given + * scsi request sent via the .queuecommand main handler. + * + * Return: 0 success, anything else error + */ +static int +_base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc, + struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *unused) +{ + Mpi2SCSIIORequest_t *mpi_request; + dma_addr_t chain_dma; + struct scatterlist *sg_scmd; + void *sg_local, *chain; + u32 chain_offset; + u32 chain_length; + u32 chain_flags; + int sges_left; + u32 sges_in_segment; + u32 sgl_flags; + u32 sgl_flags_last_element; + u32 sgl_flags_end_buffer; + struct chain_tracker *chain_req; + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + + /* init scatter gather flags */ + sgl_flags = MPI2_SGE_FLAGS_SIMPLE_ELEMENT; + if (scmd->sc_data_direction == DMA_TO_DEVICE) + sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC; + sgl_flags_last_element = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT) + << MPI2_SGE_FLAGS_SHIFT; + sgl_flags_end_buffer = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT | + MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST) + << MPI2_SGE_FLAGS_SHIFT; + sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; + + sg_scmd = scsi_sglist(scmd); + sges_left = scsi_dma_map(scmd); + if (sges_left < 0) + return -ENOMEM; + + sg_local = &mpi_request->SGL; + sges_in_segment = ioc->max_sges_in_main_message; + if (sges_left <= sges_in_segment) + goto fill_in_last_segment; + + mpi_request->ChainOffset = (offsetof(Mpi2SCSIIORequest_t, SGL) + + (sges_in_segment * ioc->sge_size))/4; + + /* fill in main message segment when there is a chain following */ + while (sges_in_segment) { + if (sges_in_segment == 1) + ioc->base_add_sg_single(sg_local, + sgl_flags_last_element | sg_dma_len(sg_scmd), + sg_dma_address(sg_scmd)); + else + ioc->base_add_sg_single(sg_local, sgl_flags | + sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); + sg_scmd = sg_next(sg_scmd); + sg_local += ioc->sge_size; + sges_left--; + sges_in_segment--; + } + + /* initializing the chain flags and pointers */ + chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT; + chain_req = _base_get_chain_buffer_tracker(ioc, scmd); + if (!chain_req) + return -1; + chain = chain_req->chain_buffer; + chain_dma = chain_req->chain_buffer_dma; + do { + sges_in_segment = (sges_left <= + ioc->max_sges_in_chain_message) ? sges_left : + ioc->max_sges_in_chain_message; + chain_offset = (sges_left == sges_in_segment) ? + 0 : (sges_in_segment * ioc->sge_size)/4; + chain_length = sges_in_segment * ioc->sge_size; + if (chain_offset) { + chain_offset = chain_offset << + MPI2_SGE_CHAIN_OFFSET_SHIFT; + chain_length += ioc->sge_size; + } + ioc->base_add_sg_single(sg_local, chain_flags | chain_offset | + chain_length, chain_dma); + sg_local = chain; + if (!chain_offset) + goto fill_in_last_segment; + + /* fill in chain segments */ + while (sges_in_segment) { + if (sges_in_segment == 1) + ioc->base_add_sg_single(sg_local, + sgl_flags_last_element | + sg_dma_len(sg_scmd), + sg_dma_address(sg_scmd)); + else + ioc->base_add_sg_single(sg_local, sgl_flags | + sg_dma_len(sg_scmd), + sg_dma_address(sg_scmd)); + sg_scmd = sg_next(sg_scmd); + sg_local += ioc->sge_size; + sges_left--; + sges_in_segment--; + } + + chain_req = _base_get_chain_buffer_tracker(ioc, scmd); + if (!chain_req) + return -1; + chain = chain_req->chain_buffer; + chain_dma = chain_req->chain_buffer_dma; + } while (1); + + + fill_in_last_segment: + + /* fill the last segment */ + while (sges_left) { + if (sges_left == 1) + ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer | + sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); + else + ioc->base_add_sg_single(sg_local, sgl_flags | + sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); + sg_scmd = sg_next(sg_scmd); + sg_local += ioc->sge_size; + sges_left--; + } + + return 0; +} + +/** + * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format + * @ioc: per adapter object + * @scmd: scsi command + * @smid: system request message index + * @pcie_device: Pointer to pcie_device. If set, the pcie native sgl will be + * constructed on need. + * Context: none. + * + * The main routine that builds scatter gather table from a given + * scsi request sent via the .queuecommand main handler. + * + * Return: 0 success, anything else error + */ +static int +_base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc, + struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *pcie_device) +{ + Mpi25SCSIIORequest_t *mpi_request; + dma_addr_t chain_dma; + struct scatterlist *sg_scmd; + void *sg_local, *chain; + u32 chain_offset; + u32 chain_length; + int sges_left; + u32 sges_in_segment; + u8 simple_sgl_flags; + u8 simple_sgl_flags_last; + u8 chain_sgl_flags; + struct chain_tracker *chain_req; + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + + /* init scatter gather flags */ + simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | + MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; + simple_sgl_flags_last = simple_sgl_flags | + MPI25_IEEE_SGE_FLAGS_END_OF_LIST; + chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | + MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; + + /* Check if we need to build a native SG list. */ + if ((pcie_device) && (_base_check_pcie_native_sgl(ioc, mpi_request, + smid, scmd, pcie_device) == 0)) { + /* We built a native SG list, just return. */ + return 0; + } + + sg_scmd = scsi_sglist(scmd); + sges_left = scsi_dma_map(scmd); + if (sges_left < 0) + return -ENOMEM; + + sg_local = &mpi_request->SGL; + sges_in_segment = (ioc->request_sz - + offsetof(Mpi25SCSIIORequest_t, SGL))/ioc->sge_size_ieee; + if (sges_left <= sges_in_segment) + goto fill_in_last_segment; + + mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) + + (offsetof(Mpi25SCSIIORequest_t, SGL)/ioc->sge_size_ieee); + + /* fill in main message segment when there is a chain following */ + while (sges_in_segment > 1) { + _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0, + sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); + sg_scmd = sg_next(sg_scmd); + sg_local += ioc->sge_size_ieee; + sges_left--; + sges_in_segment--; + } + + /* initializing the pointers */ + chain_req = _base_get_chain_buffer_tracker(ioc, scmd); + if (!chain_req) + return -1; + chain = chain_req->chain_buffer; + chain_dma = chain_req->chain_buffer_dma; + do { + sges_in_segment = (sges_left <= + ioc->max_sges_in_chain_message) ? sges_left : + ioc->max_sges_in_chain_message; + chain_offset = (sges_left == sges_in_segment) ? + 0 : sges_in_segment; + chain_length = sges_in_segment * ioc->sge_size_ieee; + if (chain_offset) + chain_length += ioc->sge_size_ieee; + _base_add_sg_single_ieee(sg_local, chain_sgl_flags, + chain_offset, chain_length, chain_dma); + + sg_local = chain; + if (!chain_offset) + goto fill_in_last_segment; + + /* fill in chain segments */ + while (sges_in_segment) { + _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0, + sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); + sg_scmd = sg_next(sg_scmd); + sg_local += ioc->sge_size_ieee; + sges_left--; + sges_in_segment--; + } + + chain_req = _base_get_chain_buffer_tracker(ioc, scmd); + if (!chain_req) + return -1; + chain = chain_req->chain_buffer; + chain_dma = chain_req->chain_buffer_dma; + } while (1); + + + fill_in_last_segment: + + /* fill the last segment */ + while (sges_left > 0) { + if (sges_left == 1) + _base_add_sg_single_ieee(sg_local, + simple_sgl_flags_last, 0, sg_dma_len(sg_scmd), + sg_dma_address(sg_scmd)); + else + _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0, + sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); + sg_scmd = sg_next(sg_scmd); + sg_local += ioc->sge_size_ieee; + sges_left--; + } + + return 0; +} + +/** + * _base_build_sg_ieee - build generic sg for IEEE format + * @ioc: per adapter object + * @psge: virtual address for SGE + * @data_out_dma: physical address for WRITES + * @data_out_sz: data xfer size for WRITES + * @data_in_dma: physical address for READS + * @data_in_sz: data xfer size for READS + */ +static void +_base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge, + dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma, + size_t data_in_sz) +{ + u8 sgl_flags; + + if (!data_out_sz && !data_in_sz) { + _base_build_zero_len_sge_ieee(ioc, psge); + return; + } + + if (data_out_sz && data_in_sz) { + /* WRITE sgel first */ + sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | + MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; + _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz, + data_out_dma); + + /* incr sgel */ + psge += ioc->sge_size_ieee; + + /* READ sgel last */ + sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST; + _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz, + data_in_dma); + } else if (data_out_sz) /* WRITE */ { + sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | + MPI25_IEEE_SGE_FLAGS_END_OF_LIST | + MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; + _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz, + data_out_dma); + } else if (data_in_sz) /* READ */ { + sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | + MPI25_IEEE_SGE_FLAGS_END_OF_LIST | + MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; + _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz, + data_in_dma); + } +} + +#define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10)) + +/** + * _base_config_dma_addressing - set dma addressing + * @ioc: per adapter object + * @pdev: PCI device struct + * + * Return: 0 for success, non-zero for failure. + */ +static int +_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) +{ + struct sysinfo s; + u64 coherent_dma_mask, dma_mask; + + if (ioc->is_mcpu_endpoint || sizeof(dma_addr_t) == 4) { + ioc->dma_mask = 32; + coherent_dma_mask = dma_mask = DMA_BIT_MASK(32); + /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */ + } else if (ioc->hba_mpi_version_belonged > MPI2_VERSION) { + ioc->dma_mask = 63; + coherent_dma_mask = dma_mask = DMA_BIT_MASK(63); + } else { + ioc->dma_mask = 64; + coherent_dma_mask = dma_mask = DMA_BIT_MASK(64); + } + + if (ioc->use_32bit_dma) + coherent_dma_mask = DMA_BIT_MASK(32); + + if (dma_set_mask(&pdev->dev, dma_mask) || + dma_set_coherent_mask(&pdev->dev, coherent_dma_mask)) + return -ENODEV; + + if (ioc->dma_mask > 32) { + ioc->base_add_sg_single = &_base_add_sg_single_64; + ioc->sge_size = sizeof(Mpi2SGESimple64_t); + } else { + ioc->base_add_sg_single = &_base_add_sg_single_32; + ioc->sge_size = sizeof(Mpi2SGESimple32_t); + } + + si_meminfo(&s); + ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n", + ioc->dma_mask, convert_to_kb(s.totalram)); + + return 0; +} + +/** + * _base_check_enable_msix - checks MSIX capabable. + * @ioc: per adapter object + * + * Check to see if card is capable of MSIX, and set number + * of available msix vectors + */ +static int +_base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc) +{ + int base; + u16 message_control; + + /* Check whether controller SAS2008 B0 controller, + * if it is SAS2008 B0 controller use IO-APIC instead of MSIX + */ + if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 && + ioc->pdev->revision == SAS2_PCI_DEVICE_B0_REVISION) { + return -EINVAL; + } + + base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX); + if (!base) { + dfailprintk(ioc, ioc_info(ioc, "msix not supported\n")); + return -EINVAL; + } + + /* get msix vector count */ + /* NUMA_IO not supported for older controllers */ + if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 || + ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 || + ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 || + ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 || + ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 || + ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 || + ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2) + ioc->msix_vector_count = 1; + else { + pci_read_config_word(ioc->pdev, base + 2, &message_control); + ioc->msix_vector_count = (message_control & 0x3FF) + 1; + } + dinitprintk(ioc, ioc_info(ioc, "msix is supported, vector_count(%d)\n", + ioc->msix_vector_count)); + return 0; +} + +/** + * mpt3sas_base_free_irq - free irq + * @ioc: per adapter object + * + * Freeing respective reply_queue from the list. + */ +void +mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc) +{ + unsigned int irq; + struct adapter_reply_queue *reply_q, *next; + + if (list_empty(&ioc->reply_queue_list)) + return; + + list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { + list_del(&reply_q->list); + if (reply_q->is_iouring_poll_q) { + kfree(reply_q); + continue; + } + + if (ioc->smp_affinity_enable) { + irq = pci_irq_vector(ioc->pdev, reply_q->msix_index); + irq_update_affinity_hint(irq, NULL); + } + free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index), + reply_q); + kfree(reply_q); + } +} + +/** + * _base_request_irq - request irq + * @ioc: per adapter object + * @index: msix index into vector table + * + * Inserting respective reply_queue into the list. + */ +static int +_base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index) +{ + struct pci_dev *pdev = ioc->pdev; + struct adapter_reply_queue *reply_q; + int r, qid; + + reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL); + if (!reply_q) { + ioc_err(ioc, "unable to allocate memory %zu!\n", + sizeof(struct adapter_reply_queue)); + return -ENOMEM; + } + reply_q->ioc = ioc; + reply_q->msix_index = index; + + atomic_set(&reply_q->busy, 0); + + if (index >= ioc->iopoll_q_start_index) { + qid = index - ioc->iopoll_q_start_index; + snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-mq-poll%d", + ioc->driver_name, ioc->id, qid); + reply_q->is_iouring_poll_q = 1; + ioc->io_uring_poll_queues[qid].reply_q = reply_q; + goto out; + } + + + if (ioc->msix_enable) + snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d", + ioc->driver_name, ioc->id, index); + else + snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d", + ioc->driver_name, ioc->id); + r = request_irq(pci_irq_vector(pdev, index), _base_interrupt, + IRQF_SHARED, reply_q->name, reply_q); + if (r) { + pr_err("%s: unable to allocate interrupt %d!\n", + reply_q->name, pci_irq_vector(pdev, index)); + kfree(reply_q); + return -EBUSY; + } +out: + INIT_LIST_HEAD(&reply_q->list); + list_add_tail(&reply_q->list, &ioc->reply_queue_list); + return 0; +} + +/** + * _base_assign_reply_queues - assigning msix index for each cpu + * @ioc: per adapter object + * + * The enduser would need to set the affinity via /proc/irq/#/smp_affinity + */ +static void +_base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) +{ + unsigned int cpu, nr_cpus, nr_msix, index = 0, irq; + struct adapter_reply_queue *reply_q; + int iopoll_q_count = ioc->reply_queue_count - + ioc->iopoll_q_start_index; + const struct cpumask *mask; + + if (!_base_is_controller_msix_enabled(ioc)) + return; + + if (ioc->msix_load_balance) + return; + + memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz); + + nr_cpus = num_online_cpus(); + nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count, + ioc->facts.MaxMSIxVectors); + if (!nr_msix) + return; + + if (ioc->smp_affinity_enable) { + + /* + * set irq affinity to local numa node for those irqs + * corresponding to high iops queues. + */ + if (ioc->high_iops_queues) { + mask = cpumask_of_node(dev_to_node(&ioc->pdev->dev)); + for (index = 0; index < ioc->high_iops_queues; + index++) { + irq = pci_irq_vector(ioc->pdev, index); + irq_set_affinity_and_hint(irq, mask); + } + } + + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + const cpumask_t *mask; + + if (reply_q->msix_index < ioc->high_iops_queues || + reply_q->msix_index >= ioc->iopoll_q_start_index) + continue; + + mask = pci_irq_get_affinity(ioc->pdev, + reply_q->msix_index); + if (!mask) { + ioc_warn(ioc, "no affinity for msi %x\n", + reply_q->msix_index); + goto fall_back; + } + + for_each_cpu_and(cpu, mask, cpu_online_mask) { + if (cpu >= ioc->cpu_msix_table_sz) + break; + ioc->cpu_msix_table[cpu] = reply_q->msix_index; + } + } + return; + } + +fall_back: + cpu = cpumask_first(cpu_online_mask); + nr_msix -= (ioc->high_iops_queues - iopoll_q_count); + index = 0; + + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + unsigned int i, group = nr_cpus / nr_msix; + + if (reply_q->msix_index < ioc->high_iops_queues || + reply_q->msix_index >= ioc->iopoll_q_start_index) + continue; + + if (cpu >= nr_cpus) + break; + + if (index < nr_cpus % nr_msix) + group++; + + for (i = 0 ; i < group ; i++) { + ioc->cpu_msix_table[cpu] = reply_q->msix_index; + cpu = cpumask_next(cpu, cpu_online_mask); + } + index++; + } +} + +/** + * _base_check_and_enable_high_iops_queues - enable high iops mode + * @ioc: per adapter object + * @hba_msix_vector_count: msix vectors supported by HBA + * + * Enable high iops queues only if + * - HBA is a SEA/AERO controller and + * - MSI-Xs vector supported by the HBA is 128 and + * - total CPU count in the system >=16 and + * - loaded driver with default max_msix_vectors module parameter and + * - system booted in non kdump mode + * + * Return: nothing. + */ +static void +_base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc, + int hba_msix_vector_count) +{ + u16 lnksta, speed; + + /* + * Disable high iops queues if io uring poll queues are enabled. + */ + if (perf_mode == MPT_PERF_MODE_IOPS || + perf_mode == MPT_PERF_MODE_LATENCY || + ioc->io_uring_poll_queues) { + ioc->high_iops_queues = 0; + return; + } + + if (perf_mode == MPT_PERF_MODE_DEFAULT) { + + pcie_capability_read_word(ioc->pdev, PCI_EXP_LNKSTA, &lnksta); + speed = lnksta & PCI_EXP_LNKSTA_CLS; + + if (speed < 0x4) { + ioc->high_iops_queues = 0; + return; + } + } + + if (!reset_devices && ioc->is_aero_ioc && + hba_msix_vector_count == MPT3SAS_GEN35_MAX_MSIX_QUEUES && + num_online_cpus() >= MPT3SAS_HIGH_IOPS_REPLY_QUEUES && + max_msix_vectors == -1) + ioc->high_iops_queues = MPT3SAS_HIGH_IOPS_REPLY_QUEUES; + else + ioc->high_iops_queues = 0; +} + +/** + * mpt3sas_base_disable_msix - disables msix + * @ioc: per adapter object + * + */ +void +mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc) +{ + if (!ioc->msix_enable) + return; + pci_free_irq_vectors(ioc->pdev); + ioc->msix_enable = 0; + kfree(ioc->io_uring_poll_queues); +} + +/** + * _base_alloc_irq_vectors - allocate msix vectors + * @ioc: per adapter object + * + */ +static int +_base_alloc_irq_vectors(struct MPT3SAS_ADAPTER *ioc) +{ + int i, irq_flags = PCI_IRQ_MSIX; + struct irq_affinity desc = { .pre_vectors = ioc->high_iops_queues }; + struct irq_affinity *descp = &desc; + /* + * Don't allocate msix vectors for poll_queues. + * msix_vectors is always within a range of FW supported reply queue. + */ + int nr_msix_vectors = ioc->iopoll_q_start_index; + + + if (ioc->smp_affinity_enable) + irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES; + else + descp = NULL; + + ioc_info(ioc, " %d %d %d\n", ioc->high_iops_queues, + ioc->reply_queue_count, nr_msix_vectors); + + i = pci_alloc_irq_vectors_affinity(ioc->pdev, + ioc->high_iops_queues, + nr_msix_vectors, irq_flags, descp); + + return i; +} + +/** + * _base_enable_msix - enables msix, failback to io_apic + * @ioc: per adapter object + * + */ +static int +_base_enable_msix(struct MPT3SAS_ADAPTER *ioc) +{ + int r; + int i, local_max_msix_vectors; + u8 try_msix = 0; + int iopoll_q_count = 0; + + ioc->msix_load_balance = false; + + if (msix_disable == -1 || msix_disable == 0) + try_msix = 1; + + if (!try_msix) + goto try_ioapic; + + if (_base_check_enable_msix(ioc) != 0) + goto try_ioapic; + + ioc_info(ioc, "MSI-X vectors supported: %d\n", ioc->msix_vector_count); + pr_info("\t no of cores: %d, max_msix_vectors: %d\n", + ioc->cpu_count, max_msix_vectors); + + ioc->reply_queue_count = + min_t(int, ioc->cpu_count, ioc->msix_vector_count); + + if (!ioc->rdpq_array_enable && max_msix_vectors == -1) + local_max_msix_vectors = (reset_devices) ? 1 : 8; + else + local_max_msix_vectors = max_msix_vectors; + + if (local_max_msix_vectors == 0) + goto try_ioapic; + + /* + * Enable msix_load_balance only if combined reply queue mode is + * disabled on SAS3 & above generation HBA devices. + */ + if (!ioc->combined_reply_queue && + ioc->hba_mpi_version_belonged != MPI2_VERSION) { + ioc_info(ioc, + "combined ReplyQueue is off, Enabling msix load balance\n"); + ioc->msix_load_balance = true; + } + + /* + * smp affinity setting is not need when msix load balance + * is enabled. + */ + if (ioc->msix_load_balance) + ioc->smp_affinity_enable = 0; + + if (!ioc->smp_affinity_enable || ioc->reply_queue_count <= 1) + ioc->shost->host_tagset = 0; + + /* + * Enable io uring poll queues only if host_tagset is enabled. + */ + if (ioc->shost->host_tagset) + iopoll_q_count = poll_queues; + + if (iopoll_q_count) { + ioc->io_uring_poll_queues = kcalloc(iopoll_q_count, + sizeof(struct io_uring_poll_queue), GFP_KERNEL); + if (!ioc->io_uring_poll_queues) + iopoll_q_count = 0; + } + + if (ioc->is_aero_ioc) + _base_check_and_enable_high_iops_queues(ioc, + ioc->msix_vector_count); + + /* + * Add high iops queues count to reply queue count if high iops queues + * are enabled. + */ + ioc->reply_queue_count = min_t(int, + ioc->reply_queue_count + ioc->high_iops_queues, + ioc->msix_vector_count); + + /* + * Adjust the reply queue count incase reply queue count + * exceeds the user provided MSIx vectors count. + */ + if (local_max_msix_vectors > 0) + ioc->reply_queue_count = min_t(int, local_max_msix_vectors, + ioc->reply_queue_count); + /* + * Add io uring poll queues count to reply queues count + * if io uring is enabled in driver. + */ + if (iopoll_q_count) { + if (ioc->reply_queue_count < (iopoll_q_count + MPT3_MIN_IRQS)) + iopoll_q_count = 0; + ioc->reply_queue_count = min_t(int, + ioc->reply_queue_count + iopoll_q_count, + ioc->msix_vector_count); + } + + /* + * Starting index of io uring poll queues in reply queue list. + */ + ioc->iopoll_q_start_index = + ioc->reply_queue_count - iopoll_q_count; + + r = _base_alloc_irq_vectors(ioc); + if (r < 0) { + ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n", r); + goto try_ioapic; + } + + /* + * Adjust the reply queue count if the allocated + * MSIx vectors is less then the requested number + * of MSIx vectors. + */ + if (r < ioc->iopoll_q_start_index) { + ioc->reply_queue_count = r + iopoll_q_count; + ioc->iopoll_q_start_index = + ioc->reply_queue_count - iopoll_q_count; + } + + ioc->msix_enable = 1; + for (i = 0; i < ioc->reply_queue_count; i++) { + r = _base_request_irq(ioc, i); + if (r) { + mpt3sas_base_free_irq(ioc); + mpt3sas_base_disable_msix(ioc); + goto try_ioapic; + } + } + + ioc_info(ioc, "High IOPs queues : %s\n", + ioc->high_iops_queues ? "enabled" : "disabled"); + + return 0; + +/* failback to io_apic interrupt routing */ + try_ioapic: + ioc->high_iops_queues = 0; + ioc_info(ioc, "High IOPs queues : disabled\n"); + ioc->reply_queue_count = 1; + ioc->iopoll_q_start_index = ioc->reply_queue_count - 0; + r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY); + if (r < 0) { + dfailprintk(ioc, + ioc_info(ioc, "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n", + r)); + } else + r = _base_request_irq(ioc, 0); + + return r; +} + +/** + * mpt3sas_base_unmap_resources - free controller resources + * @ioc: per adapter object + */ +static void +mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc) +{ + struct pci_dev *pdev = ioc->pdev; + + dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + + mpt3sas_base_free_irq(ioc); + mpt3sas_base_disable_msix(ioc); + + kfree(ioc->replyPostRegisterIndex); + ioc->replyPostRegisterIndex = NULL; + + + if (ioc->chip_phys) { + iounmap(ioc->chip); + ioc->chip_phys = 0; + } + + if (pci_is_enabled(pdev)) { + pci_release_selected_regions(ioc->pdev, ioc->bars); + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); + } +} + +static int +_base_diag_reset(struct MPT3SAS_ADAPTER *ioc); + +/** + * mpt3sas_base_check_for_fault_and_issue_reset - check if IOC is in fault state + * and if it is in fault state then issue diag reset. + * @ioc: per adapter object + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc) +{ + u32 ioc_state; + int rc = -EFAULT; + + dinitprintk(ioc, pr_info("%s\n", __func__)); + if (ioc->pci_error_recovery) + return 0; + ioc_state = mpt3sas_base_get_iocstate(ioc, 0); + dhsprintk(ioc, pr_info("%s: ioc_state(0x%08x)\n", __func__, ioc_state)); + + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { + mpt3sas_print_fault_code(ioc, ioc_state & + MPI2_DOORBELL_DATA_MASK); + mpt3sas_base_mask_interrupts(ioc); + rc = _base_diag_reset(ioc); + } else if ((ioc_state & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_COREDUMP) { + mpt3sas_print_coredump_info(ioc, ioc_state & + MPI2_DOORBELL_DATA_MASK); + mpt3sas_base_wait_for_coredump_completion(ioc, __func__); + mpt3sas_base_mask_interrupts(ioc); + rc = _base_diag_reset(ioc); + } + + return rc; +} + +/** + * mpt3sas_base_map_resources - map in controller resources (io/irq/memap) + * @ioc: per adapter object + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) +{ + struct pci_dev *pdev = ioc->pdev; + u32 memap_sz; + u32 pio_sz; + int i, r = 0, rc; + u64 pio_chip = 0; + phys_addr_t chip_phys = 0; + struct adapter_reply_queue *reply_q; + int iopoll_q_count = 0; + + dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + + ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); + if (pci_enable_device_mem(pdev)) { + ioc_warn(ioc, "pci_enable_device_mem: failed\n"); + ioc->bars = 0; + return -ENODEV; + } + + + if (pci_request_selected_regions(pdev, ioc->bars, + ioc->driver_name)) { + ioc_warn(ioc, "pci_request_selected_regions: failed\n"); + ioc->bars = 0; + r = -ENODEV; + goto out_fail; + } + +/* AER (Advanced Error Reporting) hooks */ + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + + + if (_base_config_dma_addressing(ioc, pdev) != 0) { + ioc_warn(ioc, "no suitable DMA mask for %s\n", pci_name(pdev)); + r = -ENODEV; + goto out_fail; + } + + for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) && + (!memap_sz || !pio_sz); i++) { + if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { + if (pio_sz) + continue; + pio_chip = (u64)pci_resource_start(pdev, i); + pio_sz = pci_resource_len(pdev, i); + } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { + if (memap_sz) + continue; + ioc->chip_phys = pci_resource_start(pdev, i); + chip_phys = ioc->chip_phys; + memap_sz = pci_resource_len(pdev, i); + ioc->chip = ioremap(ioc->chip_phys, memap_sz); + } + } + + if (ioc->chip == NULL) { + ioc_err(ioc, + "unable to map adapter memory! or resource not found\n"); + r = -EINVAL; + goto out_fail; + } + + mpt3sas_base_mask_interrupts(ioc); + + r = _base_get_ioc_facts(ioc); + if (r) { + rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc); + if (rc || (_base_get_ioc_facts(ioc))) + goto out_fail; + } + + if (!ioc->rdpq_array_enable_assigned) { + ioc->rdpq_array_enable = ioc->rdpq_array_capable; + ioc->rdpq_array_enable_assigned = 1; + } + + r = _base_enable_msix(ioc); + if (r) + goto out_fail; + + iopoll_q_count = ioc->reply_queue_count - ioc->iopoll_q_start_index; + for (i = 0; i < iopoll_q_count; i++) { + atomic_set(&ioc->io_uring_poll_queues[i].busy, 0); + atomic_set(&ioc->io_uring_poll_queues[i].pause, 0); + } + + if (!ioc->is_driver_loading) + _base_init_irqpolls(ioc); + /* Use the Combined reply queue feature only for SAS3 C0 & higher + * revision HBAs and also only when reply queue count is greater than 8 + */ + if (ioc->combined_reply_queue) { + /* Determine the Supplemental Reply Post Host Index Registers + * Addresse. Supplemental Reply Post Host Index Registers + * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and + * each register is at offset bytes of + * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one. + */ + ioc->replyPostRegisterIndex = kcalloc( + ioc->combined_reply_index_count, + sizeof(resource_size_t *), GFP_KERNEL); + if (!ioc->replyPostRegisterIndex) { + ioc_err(ioc, + "allocation for replyPostRegisterIndex failed!\n"); + r = -ENOMEM; + goto out_fail; + } + + for (i = 0; i < ioc->combined_reply_index_count; i++) { + ioc->replyPostRegisterIndex[i] = + (resource_size_t __iomem *) + ((u8 __force *)&ioc->chip->Doorbell + + MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET + + (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET)); + } + } + + if (ioc->is_warpdrive) { + ioc->reply_post_host_index[0] = (resource_size_t __iomem *) + &ioc->chip->ReplyPostHostIndex; + + for (i = 1; i < ioc->cpu_msix_table_sz; i++) + ioc->reply_post_host_index[i] = + (resource_size_t __iomem *) + ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1) + * 4))); + } + + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + if (reply_q->msix_index >= ioc->iopoll_q_start_index) { + pr_info("%s: enabled: index: %d\n", + reply_q->name, reply_q->msix_index); + continue; + } + + pr_info("%s: %s enabled: IRQ %d\n", + reply_q->name, + ioc->msix_enable ? "PCI-MSI-X" : "IO-APIC", + pci_irq_vector(ioc->pdev, reply_q->msix_index)); + } + + ioc_info(ioc, "iomem(%pap), mapped(0x%p), size(%d)\n", + &chip_phys, ioc->chip, memap_sz); + ioc_info(ioc, "ioport(0x%016llx), size(%d)\n", + (unsigned long long)pio_chip, pio_sz); + + /* Save PCI configuration state for recovery from PCI AER/EEH errors */ + pci_save_state(pdev); + return 0; + + out_fail: + mpt3sas_base_unmap_resources(ioc); + return r; +} + +/** + * mpt3sas_base_get_msg_frame - obtain request mf pointer + * @ioc: per adapter object + * @smid: system request message index(smid zero is invalid) + * + * Return: virt pointer to message frame. + */ +void * +mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + return (void *)(ioc->request + (smid * ioc->request_sz)); +} + +/** + * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr + * @ioc: per adapter object + * @smid: system request message index + * + * Return: virt pointer to sense buffer. + */ +void * +mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE)); +} + +/** + * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr + * @ioc: per adapter object + * @smid: system request message index + * + * Return: phys pointer to the low 32bit address of the sense buffer. + */ +__le32 +mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + return cpu_to_le32(ioc->sense_dma + ((smid - 1) * + SCSI_SENSE_BUFFERSIZE)); +} + +/** + * mpt3sas_base_get_pcie_sgl - obtain a PCIe SGL virt addr + * @ioc: per adapter object + * @smid: system request message index + * + * Return: virt pointer to a PCIe SGL. + */ +void * +mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + return (void *)(ioc->pcie_sg_lookup[smid - 1].pcie_sgl); +} + +/** + * mpt3sas_base_get_pcie_sgl_dma - obtain a PCIe SGL dma addr + * @ioc: per adapter object + * @smid: system request message index + * + * Return: phys pointer to the address of the PCIe buffer. + */ +dma_addr_t +mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + return ioc->pcie_sg_lookup[smid - 1].pcie_sgl_dma; +} + +/** + * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address + * @ioc: per adapter object + * @phys_addr: lower 32 physical addr of the reply + * + * Converts 32bit lower physical addr into a virt address. + */ +void * +mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr) +{ + if (!phys_addr) + return NULL; + return ioc->reply + (phys_addr - (u32)ioc->reply_dma); +} + +/** + * _base_get_msix_index - get the msix index + * @ioc: per adapter object + * @scmd: scsi_cmnd object + * + * Return: msix index of general reply queues, + * i.e. reply queue on which IO request's reply + * should be posted by the HBA firmware. + */ +static inline u8 +_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc, + struct scsi_cmnd *scmd) +{ + /* Enables reply_queue load balancing */ + if (ioc->msix_load_balance) + return ioc->reply_queue_count ? + base_mod64(atomic64_add_return(1, + &ioc->total_io_cnt), ioc->reply_queue_count) : 0; + + if (scmd && ioc->shost->nr_hw_queues > 1) { + u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); + + return blk_mq_unique_tag_to_hwq(tag) + + ioc->high_iops_queues; + } + + return ioc->cpu_msix_table[raw_smp_processor_id()]; +} + +/** + * _base_get_high_iops_msix_index - get the msix index of + * high iops queues + * @ioc: per adapter object + * @scmd: scsi_cmnd object + * + * Return: msix index of high iops reply queues. + * i.e. high iops reply queue on which IO request's + * reply should be posted by the HBA firmware. + */ +static inline u8 +_base_get_high_iops_msix_index(struct MPT3SAS_ADAPTER *ioc, + struct scsi_cmnd *scmd) +{ + /** + * Round robin the IO interrupts among the high iops + * reply queues in terms of batch count 16 when outstanding + * IOs on the target device is >=8. + */ + + if (scsi_device_busy(scmd->device) > MPT3SAS_DEVICE_HIGH_IOPS_DEPTH) + return base_mod64(( + atomic64_add_return(1, &ioc->high_iops_outstanding) / + MPT3SAS_HIGH_IOPS_BATCH_COUNT), + MPT3SAS_HIGH_IOPS_REPLY_QUEUES); + + return _base_get_msix_index(ioc, scmd); +} + +/** + * mpt3sas_base_get_smid - obtain a free smid from internal queue + * @ioc: per adapter object + * @cb_idx: callback index + * + * Return: smid (zero is invalid) + */ +u16 +mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx) +{ + unsigned long flags; + struct request_tracker *request; + u16 smid; + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + if (list_empty(&ioc->internal_free_list)) { + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + ioc_err(ioc, "%s: smid not available\n", __func__); + return 0; + } + + request = list_entry(ioc->internal_free_list.next, + struct request_tracker, tracker_list); + request->cb_idx = cb_idx; + smid = request->smid; + list_del(&request->tracker_list); + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + return smid; +} + +/** + * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue + * @ioc: per adapter object + * @cb_idx: callback index + * @scmd: pointer to scsi command object + * + * Return: smid (zero is invalid) + */ +u16 +mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx, + struct scsi_cmnd *scmd) +{ + struct scsiio_tracker *request = scsi_cmd_priv(scmd); + u16 smid; + u32 tag, unique_tag; + + unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); + tag = blk_mq_unique_tag_to_tag(unique_tag); + + /* + * Store hw queue number corresponding to the tag. + * This hw queue number is used later to determine + * the unique_tag using the logic below. This unique_tag + * is used to retrieve the scmd pointer corresponding + * to tag using scsi_host_find_tag() API. + * + * tag = smid - 1; + * unique_tag = ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag; + */ + ioc->io_queue_num[tag] = blk_mq_unique_tag_to_hwq(unique_tag); + + smid = tag + 1; + request->cb_idx = cb_idx; + request->smid = smid; + request->scmd = scmd; + INIT_LIST_HEAD(&request->chain_list); + return smid; +} + +/** + * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue + * @ioc: per adapter object + * @cb_idx: callback index + * + * Return: smid (zero is invalid) + */ +u16 +mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx) +{ + unsigned long flags; + struct request_tracker *request; + u16 smid; + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + if (list_empty(&ioc->hpr_free_list)) { + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + return 0; + } + + request = list_entry(ioc->hpr_free_list.next, + struct request_tracker, tracker_list); + request->cb_idx = cb_idx; + smid = request->smid; + list_del(&request->tracker_list); + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + return smid; +} + +static void +_base_recovery_check(struct MPT3SAS_ADAPTER *ioc) +{ + /* + * See _wait_for_commands_to_complete() call with regards to this code. + */ + if (ioc->shost_recovery && ioc->pending_io_count) { + ioc->pending_io_count = scsi_host_busy(ioc->shost); + if (ioc->pending_io_count == 0) + wake_up(&ioc->reset_wq); + } +} + +void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc, + struct scsiio_tracker *st) +{ + if (WARN_ON(st->smid == 0)) + return; + st->cb_idx = 0xFF; + st->direct_io = 0; + st->scmd = NULL; + atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0); + st->smid = 0; +} + +/** + * mpt3sas_base_free_smid - put smid back on free_list + * @ioc: per adapter object + * @smid: system request message index + */ +void +mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + unsigned long flags; + int i; + + if (smid < ioc->hi_priority_smid) { + struct scsiio_tracker *st; + void *request; + + st = _get_st_from_smid(ioc, smid); + if (!st) { + _base_recovery_check(ioc); + return; + } + + /* Clear MPI request frame */ + request = mpt3sas_base_get_msg_frame(ioc, smid); + memset(request, 0, ioc->request_sz); + + mpt3sas_base_clear_st(ioc, st); + _base_recovery_check(ioc); + ioc->io_queue_num[smid - 1] = 0; + return; + } + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + if (smid < ioc->internal_smid) { + /* hi-priority */ + i = smid - ioc->hi_priority_smid; + ioc->hpr_lookup[i].cb_idx = 0xFF; + list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list); + } else if (smid <= ioc->hba_queue_depth) { + /* internal queue */ + i = smid - ioc->internal_smid; + ioc->internal_lookup[i].cb_idx = 0xFF; + list_add(&ioc->internal_lookup[i].tracker_list, + &ioc->internal_free_list); + } + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); +} + +/** + * _base_mpi_ep_writeq - 32 bit write to MMIO + * @b: data payload + * @addr: address in MMIO space + * @writeq_lock: spin lock + * + * This special handling for MPI EP to take care of 32 bit + * environment where its not quarenteed to send the entire word + * in one transfer. + */ +static inline void +_base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr, + spinlock_t *writeq_lock) +{ + unsigned long flags; + + spin_lock_irqsave(writeq_lock, flags); + __raw_writel((u32)(b), addr); + __raw_writel((u32)(b >> 32), (addr + 4)); + spin_unlock_irqrestore(writeq_lock, flags); +} + +/** + * _base_writeq - 64 bit write to MMIO + * @b: data payload + * @addr: address in MMIO space + * @writeq_lock: spin lock + * + * Glue for handling an atomic 64 bit word to MMIO. This special handling takes + * care of 32 bit environment where its not quarenteed to send the entire word + * in one transfer. + */ +#if defined(writeq) && defined(CONFIG_64BIT) +static inline void +_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock) +{ + wmb(); + __raw_writeq(b, addr); + barrier(); +} +#else +static inline void +_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock) +{ + _base_mpi_ep_writeq(b, addr, writeq_lock); +} +#endif + +/** + * _base_set_and_get_msix_index - get the msix index and assign to msix_io + * variable of scsi tracker + * @ioc: per adapter object + * @smid: system request message index + * + * Return: msix index. + */ +static u8 +_base_set_and_get_msix_index(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + struct scsiio_tracker *st = NULL; + + if (smid < ioc->hi_priority_smid) + st = _get_st_from_smid(ioc, smid); + + if (st == NULL) + return _base_get_msix_index(ioc, NULL); + + st->msix_io = ioc->get_msix_index_for_smlio(ioc, st->scmd); + return st->msix_io; +} + +/** + * _base_put_smid_mpi_ep_scsi_io - send SCSI_IO request to firmware + * @ioc: per adapter object + * @smid: system request message index + * @handle: device handle + */ +static void +_base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc, + u16 smid, u16 handle) +{ + Mpi2RequestDescriptorUnion_t descriptor; + u64 *request = (u64 *)&descriptor; + void *mpi_req_iomem; + __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid); + + _clone_sg_entries(ioc, (void *) mfp, smid); + mpi_req_iomem = (void __force *)ioc->chip + + MPI_FRAME_START_OFFSET + (smid * ioc->request_sz); + _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp, + ioc->request_sz); + descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; + descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); + descriptor.SCSIIO.SMID = cpu_to_le16(smid); + descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); + descriptor.SCSIIO.LMID = 0; + _base_mpi_ep_writeq(*request, &ioc->chip->RequestDescriptorPostLow, + &ioc->scsi_lookup_lock); +} + +/** + * _base_put_smid_scsi_io - send SCSI_IO request to firmware + * @ioc: per adapter object + * @smid: system request message index + * @handle: device handle + */ +static void +_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle) +{ + Mpi2RequestDescriptorUnion_t descriptor; + u64 *request = (u64 *)&descriptor; + + + descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; + descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); + descriptor.SCSIIO.SMID = cpu_to_le16(smid); + descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); + descriptor.SCSIIO.LMID = 0; + _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, + &ioc->scsi_lookup_lock); +} + +/** + * _base_put_smid_fast_path - send fast path request to firmware + * @ioc: per adapter object + * @smid: system request message index + * @handle: device handle + */ +static void +_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u16 handle) +{ + Mpi2RequestDescriptorUnion_t descriptor; + u64 *request = (u64 *)&descriptor; + + descriptor.SCSIIO.RequestFlags = + MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO; + descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); + descriptor.SCSIIO.SMID = cpu_to_le16(smid); + descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); + descriptor.SCSIIO.LMID = 0; + _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, + &ioc->scsi_lookup_lock); +} + +/** + * _base_put_smid_hi_priority - send Task Management request to firmware + * @ioc: per adapter object + * @smid: system request message index + * @msix_task: msix_task will be same as msix of IO in case of task abort else 0 + */ +static void +_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u16 msix_task) +{ + Mpi2RequestDescriptorUnion_t descriptor; + void *mpi_req_iomem; + u64 *request; + + if (ioc->is_mcpu_endpoint) { + __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid); + + /* TBD 256 is offset within sys register. */ + mpi_req_iomem = (void __force *)ioc->chip + + MPI_FRAME_START_OFFSET + + (smid * ioc->request_sz); + _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp, + ioc->request_sz); + } + + request = (u64 *)&descriptor; + + descriptor.HighPriority.RequestFlags = + MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; + descriptor.HighPriority.MSIxIndex = msix_task; + descriptor.HighPriority.SMID = cpu_to_le16(smid); + descriptor.HighPriority.LMID = 0; + descriptor.HighPriority.Reserved1 = 0; + if (ioc->is_mcpu_endpoint) + _base_mpi_ep_writeq(*request, + &ioc->chip->RequestDescriptorPostLow, + &ioc->scsi_lookup_lock); + else + _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, + &ioc->scsi_lookup_lock); +} + +/** + * mpt3sas_base_put_smid_nvme_encap - send NVMe encapsulated request to + * firmware + * @ioc: per adapter object + * @smid: system request message index + */ +void +mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + Mpi2RequestDescriptorUnion_t descriptor; + u64 *request = (u64 *)&descriptor; + + descriptor.Default.RequestFlags = + MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED; + descriptor.Default.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); + descriptor.Default.SMID = cpu_to_le16(smid); + descriptor.Default.LMID = 0; + descriptor.Default.DescriptorTypeDependent = 0; + _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, + &ioc->scsi_lookup_lock); +} + +/** + * _base_put_smid_default - Default, primarily used for config pages + * @ioc: per adapter object + * @smid: system request message index + */ +static void +_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + Mpi2RequestDescriptorUnion_t descriptor; + void *mpi_req_iomem; + u64 *request; + + if (ioc->is_mcpu_endpoint) { + __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid); + + _clone_sg_entries(ioc, (void *) mfp, smid); + /* TBD 256 is offset within sys register */ + mpi_req_iomem = (void __force *)ioc->chip + + MPI_FRAME_START_OFFSET + (smid * ioc->request_sz); + _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp, + ioc->request_sz); + } + request = (u64 *)&descriptor; + descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; + descriptor.Default.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); + descriptor.Default.SMID = cpu_to_le16(smid); + descriptor.Default.LMID = 0; + descriptor.Default.DescriptorTypeDependent = 0; + if (ioc->is_mcpu_endpoint) + _base_mpi_ep_writeq(*request, + &ioc->chip->RequestDescriptorPostLow, + &ioc->scsi_lookup_lock); + else + _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, + &ioc->scsi_lookup_lock); +} + +/** + * _base_put_smid_scsi_io_atomic - send SCSI_IO request to firmware using + * Atomic Request Descriptor + * @ioc: per adapter object + * @smid: system request message index + * @handle: device handle, unused in this function, for function type match + * + * Return: nothing. + */ +static void +_base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u16 handle) +{ + Mpi26AtomicRequestDescriptor_t descriptor; + u32 *request = (u32 *)&descriptor; + + descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; + descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); + descriptor.SMID = cpu_to_le16(smid); + + writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); +} + +/** + * _base_put_smid_fast_path_atomic - send fast path request to firmware + * using Atomic Request Descriptor + * @ioc: per adapter object + * @smid: system request message index + * @handle: device handle, unused in this function, for function type match + * Return: nothing + */ +static void +_base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u16 handle) +{ + Mpi26AtomicRequestDescriptor_t descriptor; + u32 *request = (u32 *)&descriptor; + + descriptor.RequestFlags = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO; + descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); + descriptor.SMID = cpu_to_le16(smid); + + writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); +} + +/** + * _base_put_smid_hi_priority_atomic - send Task Management request to + * firmware using Atomic Request Descriptor + * @ioc: per adapter object + * @smid: system request message index + * @msix_task: msix_task will be same as msix of IO in case of task abort else 0 + * + * Return: nothing. + */ +static void +_base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u16 msix_task) +{ + Mpi26AtomicRequestDescriptor_t descriptor; + u32 *request = (u32 *)&descriptor; + + descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; + descriptor.MSIxIndex = msix_task; + descriptor.SMID = cpu_to_le16(smid); + + writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); +} + +/** + * _base_put_smid_default_atomic - Default, primarily used for config pages + * use Atomic Request Descriptor + * @ioc: per adapter object + * @smid: system request message index + * + * Return: nothing. + */ +static void +_base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + Mpi26AtomicRequestDescriptor_t descriptor; + u32 *request = (u32 *)&descriptor; + + descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; + descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid); + descriptor.SMID = cpu_to_le16(smid); + + writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); +} + +/** + * _base_display_OEMs_branding - Display branding string + * @ioc: per adapter object + */ +static void +_base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc) +{ + if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL) + return; + + switch (ioc->pdev->subsystem_vendor) { + case PCI_VENDOR_ID_INTEL: + switch (ioc->pdev->device) { + case MPI2_MFGPAGE_DEVID_SAS2008: + switch (ioc->pdev->subsystem_device) { + case MPT2SAS_INTEL_RMS2LL080_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_INTEL_RMS2LL080_BRANDING); + break; + case MPT2SAS_INTEL_RMS2LL040_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_INTEL_RMS2LL040_BRANDING); + break; + case MPT2SAS_INTEL_SSD910_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_INTEL_SSD910_BRANDING); + break; + default: + ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n", + ioc->pdev->subsystem_device); + break; + } + break; + case MPI2_MFGPAGE_DEVID_SAS2308_2: + switch (ioc->pdev->subsystem_device) { + case MPT2SAS_INTEL_RS25GB008_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_INTEL_RS25GB008_BRANDING); + break; + case MPT2SAS_INTEL_RMS25JB080_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_INTEL_RMS25JB080_BRANDING); + break; + case MPT2SAS_INTEL_RMS25JB040_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_INTEL_RMS25JB040_BRANDING); + break; + case MPT2SAS_INTEL_RMS25KB080_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_INTEL_RMS25KB080_BRANDING); + break; + case MPT2SAS_INTEL_RMS25KB040_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_INTEL_RMS25KB040_BRANDING); + break; + case MPT2SAS_INTEL_RMS25LB040_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_INTEL_RMS25LB040_BRANDING); + break; + case MPT2SAS_INTEL_RMS25LB080_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_INTEL_RMS25LB080_BRANDING); + break; + default: + ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n", + ioc->pdev->subsystem_device); + break; + } + break; + case MPI25_MFGPAGE_DEVID_SAS3008: + switch (ioc->pdev->subsystem_device) { + case MPT3SAS_INTEL_RMS3JC080_SSDID: + ioc_info(ioc, "%s\n", + MPT3SAS_INTEL_RMS3JC080_BRANDING); + break; + + case MPT3SAS_INTEL_RS3GC008_SSDID: + ioc_info(ioc, "%s\n", + MPT3SAS_INTEL_RS3GC008_BRANDING); + break; + case MPT3SAS_INTEL_RS3FC044_SSDID: + ioc_info(ioc, "%s\n", + MPT3SAS_INTEL_RS3FC044_BRANDING); + break; + case MPT3SAS_INTEL_RS3UC080_SSDID: + ioc_info(ioc, "%s\n", + MPT3SAS_INTEL_RS3UC080_BRANDING); + break; + default: + ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n", + ioc->pdev->subsystem_device); + break; + } + break; + default: + ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n", + ioc->pdev->subsystem_device); + break; + } + break; + case PCI_VENDOR_ID_DELL: + switch (ioc->pdev->device) { + case MPI2_MFGPAGE_DEVID_SAS2008: + switch (ioc->pdev->subsystem_device) { + case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING); + break; + case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING); + break; + case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING); + break; + case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING); + break; + case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING); + break; + case MPT2SAS_DELL_PERC_H200_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_DELL_PERC_H200_BRANDING); + break; + case MPT2SAS_DELL_6GBPS_SAS_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_DELL_6GBPS_SAS_BRANDING); + break; + default: + ioc_info(ioc, "Dell 6Gbps HBA: Subsystem ID: 0x%X\n", + ioc->pdev->subsystem_device); + break; + } + break; + case MPI25_MFGPAGE_DEVID_SAS3008: + switch (ioc->pdev->subsystem_device) { + case MPT3SAS_DELL_12G_HBA_SSDID: + ioc_info(ioc, "%s\n", + MPT3SAS_DELL_12G_HBA_BRANDING); + break; + default: + ioc_info(ioc, "Dell 12Gbps HBA: Subsystem ID: 0x%X\n", + ioc->pdev->subsystem_device); + break; + } + break; + default: + ioc_info(ioc, "Dell HBA: Subsystem ID: 0x%X\n", + ioc->pdev->subsystem_device); + break; + } + break; + case PCI_VENDOR_ID_CISCO: + switch (ioc->pdev->device) { + case MPI25_MFGPAGE_DEVID_SAS3008: + switch (ioc->pdev->subsystem_device) { + case MPT3SAS_CISCO_12G_8E_HBA_SSDID: + ioc_info(ioc, "%s\n", + MPT3SAS_CISCO_12G_8E_HBA_BRANDING); + break; + case MPT3SAS_CISCO_12G_8I_HBA_SSDID: + ioc_info(ioc, "%s\n", + MPT3SAS_CISCO_12G_8I_HBA_BRANDING); + break; + case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID: + ioc_info(ioc, "%s\n", + MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING); + break; + default: + ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n", + ioc->pdev->subsystem_device); + break; + } + break; + case MPI25_MFGPAGE_DEVID_SAS3108_1: + switch (ioc->pdev->subsystem_device) { + case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID: + ioc_info(ioc, "%s\n", + MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING); + break; + case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID: + ioc_info(ioc, "%s\n", + MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING); + break; + default: + ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n", + ioc->pdev->subsystem_device); + break; + } + break; + default: + ioc_info(ioc, "Cisco SAS HBA: Subsystem ID: 0x%X\n", + ioc->pdev->subsystem_device); + break; + } + break; + case MPT2SAS_HP_3PAR_SSVID: + switch (ioc->pdev->device) { + case MPI2_MFGPAGE_DEVID_SAS2004: + switch (ioc->pdev->subsystem_device) { + case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING); + break; + default: + ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n", + ioc->pdev->subsystem_device); + break; + } + break; + case MPI2_MFGPAGE_DEVID_SAS2308_2: + switch (ioc->pdev->subsystem_device) { + case MPT2SAS_HP_2_4_INTERNAL_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_HP_2_4_INTERNAL_BRANDING); + break; + case MPT2SAS_HP_2_4_EXTERNAL_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_HP_2_4_EXTERNAL_BRANDING); + break; + case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING); + break; + case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID: + ioc_info(ioc, "%s\n", + MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING); + break; + default: + ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n", + ioc->pdev->subsystem_device); + break; + } + break; + default: + ioc_info(ioc, "HP SAS HBA: Subsystem ID: 0x%X\n", + ioc->pdev->subsystem_device); + break; + } + break; + default: + break; + } +} + +/** + * _base_display_fwpkg_version - sends FWUpload request to pull FWPkg + * version from FW Image Header. + * @ioc: per adapter object + * + * Return: 0 for success, non-zero for failure. + */ + static int +_base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2FWImageHeader_t *fw_img_hdr; + Mpi26ComponentImageHeader_t *cmp_img_hdr; + Mpi25FWUploadRequest_t *mpi_request; + Mpi2FWUploadReply_t mpi_reply; + int r = 0, issue_diag_reset = 0; + u32 package_version = 0; + void *fwpkg_data = NULL; + dma_addr_t fwpkg_data_dma; + u16 smid, ioc_status; + size_t data_length; + + dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + + if (ioc->base_cmds.status & MPT3_CMD_PENDING) { + ioc_err(ioc, "%s: internal command already in use\n", __func__); + return -EAGAIN; + } + + data_length = sizeof(Mpi2FWImageHeader_t); + fwpkg_data = dma_alloc_coherent(&ioc->pdev->dev, data_length, + &fwpkg_data_dma, GFP_KERNEL); + if (!fwpkg_data) { + ioc_err(ioc, + "Memory allocation for fwpkg data failed at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -ENOMEM; + } + + smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); + if (!smid) { + ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + r = -EAGAIN; + goto out; + } + + ioc->base_cmds.status = MPT3_CMD_PENDING; + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->base_cmds.smid = smid; + memset(mpi_request, 0, sizeof(Mpi25FWUploadRequest_t)); + mpi_request->Function = MPI2_FUNCTION_FW_UPLOAD; + mpi_request->ImageType = MPI2_FW_UPLOAD_ITYPE_FW_FLASH; + mpi_request->ImageSize = cpu_to_le32(data_length); + ioc->build_sg(ioc, &mpi_request->SGL, 0, 0, fwpkg_data_dma, + data_length); + init_completion(&ioc->base_cmds.done); + ioc->put_smid_default(ioc, smid); + /* Wait for 15 seconds */ + wait_for_completion_timeout(&ioc->base_cmds.done, + FW_IMG_HDR_READ_TIMEOUT*HZ); + ioc_info(ioc, "%s: complete\n", __func__); + if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { + ioc_err(ioc, "%s: timeout\n", __func__); + _debug_dump_mf(mpi_request, + sizeof(Mpi25FWUploadRequest_t)/4); + issue_diag_reset = 1; + } else { + memset(&mpi_reply, 0, sizeof(Mpi2FWUploadReply_t)); + if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) { + memcpy(&mpi_reply, ioc->base_cmds.reply, + sizeof(Mpi2FWUploadReply_t)); + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { + fw_img_hdr = (Mpi2FWImageHeader_t *)fwpkg_data; + if (le32_to_cpu(fw_img_hdr->Signature) == + MPI26_IMAGE_HEADER_SIGNATURE0_MPI26) { + cmp_img_hdr = + (Mpi26ComponentImageHeader_t *) + (fwpkg_data); + package_version = + le32_to_cpu( + cmp_img_hdr->ApplicationSpecific); + } else + package_version = + le32_to_cpu( + fw_img_hdr->PackageVersion.Word); + if (package_version) + ioc_info(ioc, + "FW Package Ver(%02d.%02d.%02d.%02d)\n", + ((package_version) & 0xFF000000) >> 24, + ((package_version) & 0x00FF0000) >> 16, + ((package_version) & 0x0000FF00) >> 8, + (package_version) & 0x000000FF); + } else { + _debug_dump_mf(&mpi_reply, + sizeof(Mpi2FWUploadReply_t)/4); + } + } + } + ioc->base_cmds.status = MPT3_CMD_NOT_USED; +out: + if (fwpkg_data) + dma_free_coherent(&ioc->pdev->dev, data_length, fwpkg_data, + fwpkg_data_dma); + if (issue_diag_reset) { + if (ioc->drv_internal_flags & MPT_DRV_INTERNAL_FIRST_PE_ISSUED) + return -EFAULT; + if (mpt3sas_base_check_for_fault_and_issue_reset(ioc)) + return -EFAULT; + r = -EAGAIN; + } + return r; +} + +/** + * _base_display_ioc_capabilities - Display IOC's capabilities. + * @ioc: per adapter object + */ +static void +_base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc) +{ + int i = 0; + char desc[17] = {0}; + u32 iounit_pg1_flags; + u32 bios_version; + + bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion); + strncpy(desc, ioc->manu_pg0.ChipName, 16); + ioc_info(ioc, "%s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n", + desc, + (ioc->facts.FWVersion.Word & 0xFF000000) >> 24, + (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16, + (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, + ioc->facts.FWVersion.Word & 0x000000FF, + ioc->pdev->revision, + (bios_version & 0xFF000000) >> 24, + (bios_version & 0x00FF0000) >> 16, + (bios_version & 0x0000FF00) >> 8, + bios_version & 0x000000FF); + + _base_display_OEMs_branding(ioc); + + if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) { + pr_info("%sNVMe", i ? "," : ""); + i++; + } + + ioc_info(ioc, "Protocol=("); + + if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) { + pr_cont("Initiator"); + i++; + } + + if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) { + pr_cont("%sTarget", i ? "," : ""); + i++; + } + + i = 0; + pr_cont("), Capabilities=("); + + if (!ioc->hide_ir_msg) { + if (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) { + pr_cont("Raid"); + i++; + } + } + + if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) { + pr_cont("%sTLR", i ? "," : ""); + i++; + } + + if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) { + pr_cont("%sMulticast", i ? "," : ""); + i++; + } + + if (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) { + pr_cont("%sBIDI Target", i ? "," : ""); + i++; + } + + if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) { + pr_cont("%sEEDP", i ? "," : ""); + i++; + } + + if (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) { + pr_cont("%sSnapshot Buffer", i ? "," : ""); + i++; + } + + if (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) { + pr_cont("%sDiag Trace Buffer", i ? "," : ""); + i++; + } + + if (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) { + pr_cont("%sDiag Extended Buffer", i ? "," : ""); + i++; + } + + if (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) { + pr_cont("%sTask Set Full", i ? "," : ""); + i++; + } + + iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags); + if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) { + pr_cont("%sNCQ", i ? "," : ""); + i++; + } + + pr_cont(")\n"); +} + +/** + * mpt3sas_base_update_missing_delay - change the missing delay timers + * @ioc: per adapter object + * @device_missing_delay: amount of time till device is reported missing + * @io_missing_delay: interval IO is returned when there is a missing device + * + * Passed on the command line, this function will modify the device missing + * delay, as well as the io missing delay. This should be called at driver + * load time. + */ +void +mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc, + u16 device_missing_delay, u8 io_missing_delay) +{ + u16 dmd, dmd_new, dmd_orignal; + u8 io_missing_delay_original; + u16 sz; + Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; + Mpi2ConfigReply_t mpi_reply; + u8 num_phys = 0; + u16 ioc_status; + + mpt3sas_config_get_number_hba_phys(ioc, &num_phys); + if (!num_phys) + return; + + sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys * + sizeof(Mpi2SasIOUnit1PhyData_t)); + sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg1) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, + sas_iounit_pg1, sz))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + + /* device missing delay */ + dmd = sas_iounit_pg1->ReportDeviceMissingDelay; + if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16) + dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16; + else + dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; + dmd_orignal = dmd; + if (device_missing_delay > 0x7F) { + dmd = (device_missing_delay > 0x7F0) ? 0x7F0 : + device_missing_delay; + dmd = dmd / 16; + dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16; + } else + dmd = device_missing_delay; + sas_iounit_pg1->ReportDeviceMissingDelay = dmd; + + /* io missing delay */ + io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay; + sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay; + + if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, + sz)) { + if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16) + dmd_new = (dmd & + MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16; + else + dmd_new = + dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; + ioc_info(ioc, "device_missing_delay: old(%d), new(%d)\n", + dmd_orignal, dmd_new); + ioc_info(ioc, "ioc_missing_delay: old(%d), new(%d)\n", + io_missing_delay_original, + io_missing_delay); + ioc->device_missing_delay = dmd_new; + ioc->io_missing_delay = io_missing_delay; + } + +out: + kfree(sas_iounit_pg1); +} + +/** + * _base_update_ioc_page1_inlinewith_perf_mode - Update IOC Page1 fields + * according to performance mode. + * @ioc : per adapter object + * + * Return: zero on success; otherwise return EAGAIN error code asking the + * caller to retry. + */ +static int +_base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2IOCPage1_t ioc_pg1; + Mpi2ConfigReply_t mpi_reply; + int rc; + + rc = mpt3sas_config_get_ioc_pg1(ioc, &mpi_reply, &ioc->ioc_pg1_copy); + if (rc) + return rc; + memcpy(&ioc_pg1, &ioc->ioc_pg1_copy, sizeof(Mpi2IOCPage1_t)); + + switch (perf_mode) { + case MPT_PERF_MODE_DEFAULT: + case MPT_PERF_MODE_BALANCED: + if (ioc->high_iops_queues) { + ioc_info(ioc, + "Enable interrupt coalescing only for first\t" + "%d reply queues\n", + MPT3SAS_HIGH_IOPS_REPLY_QUEUES); + /* + * If 31st bit is zero then interrupt coalescing is + * enabled for all reply descriptor post queues. + * If 31st bit is set to one then user can + * enable/disable interrupt coalescing on per reply + * descriptor post queue group(8) basis. So to enable + * interrupt coalescing only on first reply descriptor + * post queue group 31st bit and zero th bit is enabled. + */ + ioc_pg1.ProductSpecific = cpu_to_le32(0x80000000 | + ((1 << MPT3SAS_HIGH_IOPS_REPLY_QUEUES/8) - 1)); + rc = mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1); + if (rc) + return rc; + ioc_info(ioc, "performance mode: balanced\n"); + return 0; + } + fallthrough; + case MPT_PERF_MODE_LATENCY: + /* + * Enable interrupt coalescing on all reply queues + * with timeout value 0xA + */ + ioc_pg1.CoalescingTimeout = cpu_to_le32(0xa); + ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING); + ioc_pg1.ProductSpecific = 0; + rc = mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1); + if (rc) + return rc; + ioc_info(ioc, "performance mode: latency\n"); + break; + case MPT_PERF_MODE_IOPS: + /* + * Enable interrupt coalescing on all reply queues. + */ + ioc_info(ioc, + "performance mode: iops with coalescing timeout: 0x%x\n", + le32_to_cpu(ioc_pg1.CoalescingTimeout)); + ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING); + ioc_pg1.ProductSpecific = 0; + rc = mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1); + if (rc) + return rc; + break; + } + return 0; +} + +/** + * _base_get_event_diag_triggers - get event diag trigger values from + * persistent pages + * @ioc : per adapter object + * + * Return: nothing. + */ +static int +_base_get_event_diag_triggers(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi26DriverTriggerPage2_t trigger_pg2; + struct SL_WH_EVENT_TRIGGER_T *event_tg; + MPI26_DRIVER_MPI_EVENT_TIGGER_ENTRY *mpi_event_tg; + Mpi2ConfigReply_t mpi_reply; + int r = 0, i = 0; + u16 count = 0; + u16 ioc_status; + + r = mpt3sas_config_get_driver_trigger_pg2(ioc, &mpi_reply, + &trigger_pg2); + if (r) + return r; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dinitprintk(ioc, + ioc_err(ioc, + "%s: Failed to get trigger pg2, ioc_status(0x%04x)\n", + __func__, ioc_status)); + return 0; + } + + if (le16_to_cpu(trigger_pg2.NumMPIEventTrigger)) { + count = le16_to_cpu(trigger_pg2.NumMPIEventTrigger); + count = min_t(u16, NUM_VALID_ENTRIES, count); + ioc->diag_trigger_event.ValidEntries = count; + + event_tg = &ioc->diag_trigger_event.EventTriggerEntry[0]; + mpi_event_tg = &trigger_pg2.MPIEventTriggers[0]; + for (i = 0; i < count; i++) { + event_tg->EventValue = le16_to_cpu( + mpi_event_tg->MPIEventCode); + event_tg->LogEntryQualifier = le16_to_cpu( + mpi_event_tg->MPIEventCodeSpecific); + event_tg++; + mpi_event_tg++; + } + } + return 0; +} + +/** + * _base_get_scsi_diag_triggers - get scsi diag trigger values from + * persistent pages + * @ioc : per adapter object + * + * Return: 0 on success; otherwise return failure status. + */ +static int +_base_get_scsi_diag_triggers(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi26DriverTriggerPage3_t trigger_pg3; + struct SL_WH_SCSI_TRIGGER_T *scsi_tg; + MPI26_DRIVER_SCSI_SENSE_TIGGER_ENTRY *mpi_scsi_tg; + Mpi2ConfigReply_t mpi_reply; + int r = 0, i = 0; + u16 count = 0; + u16 ioc_status; + + r = mpt3sas_config_get_driver_trigger_pg3(ioc, &mpi_reply, + &trigger_pg3); + if (r) + return r; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dinitprintk(ioc, + ioc_err(ioc, + "%s: Failed to get trigger pg3, ioc_status(0x%04x)\n", + __func__, ioc_status)); + return 0; + } + + if (le16_to_cpu(trigger_pg3.NumSCSISenseTrigger)) { + count = le16_to_cpu(trigger_pg3.NumSCSISenseTrigger); + count = min_t(u16, NUM_VALID_ENTRIES, count); + ioc->diag_trigger_scsi.ValidEntries = count; + + scsi_tg = &ioc->diag_trigger_scsi.SCSITriggerEntry[0]; + mpi_scsi_tg = &trigger_pg3.SCSISenseTriggers[0]; + for (i = 0; i < count; i++) { + scsi_tg->ASCQ = mpi_scsi_tg->ASCQ; + scsi_tg->ASC = mpi_scsi_tg->ASC; + scsi_tg->SenseKey = mpi_scsi_tg->SenseKey; + + scsi_tg++; + mpi_scsi_tg++; + } + } + return 0; +} + +/** + * _base_get_mpi_diag_triggers - get mpi diag trigger values from + * persistent pages + * @ioc : per adapter object + * + * Return: 0 on success; otherwise return failure status. + */ +static int +_base_get_mpi_diag_triggers(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi26DriverTriggerPage4_t trigger_pg4; + struct SL_WH_MPI_TRIGGER_T *status_tg; + MPI26_DRIVER_IOCSTATUS_LOGINFO_TIGGER_ENTRY *mpi_status_tg; + Mpi2ConfigReply_t mpi_reply; + int r = 0, i = 0; + u16 count = 0; + u16 ioc_status; + + r = mpt3sas_config_get_driver_trigger_pg4(ioc, &mpi_reply, + &trigger_pg4); + if (r) + return r; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dinitprintk(ioc, + ioc_err(ioc, + "%s: Failed to get trigger pg4, ioc_status(0x%04x)\n", + __func__, ioc_status)); + return 0; + } + + if (le16_to_cpu(trigger_pg4.NumIOCStatusLogInfoTrigger)) { + count = le16_to_cpu(trigger_pg4.NumIOCStatusLogInfoTrigger); + count = min_t(u16, NUM_VALID_ENTRIES, count); + ioc->diag_trigger_mpi.ValidEntries = count; + + status_tg = &ioc->diag_trigger_mpi.MPITriggerEntry[0]; + mpi_status_tg = &trigger_pg4.IOCStatusLoginfoTriggers[0]; + + for (i = 0; i < count; i++) { + status_tg->IOCStatus = le16_to_cpu( + mpi_status_tg->IOCStatus); + status_tg->IocLogInfo = le32_to_cpu( + mpi_status_tg->LogInfo); + + status_tg++; + mpi_status_tg++; + } + } + return 0; +} + +/** + * _base_get_master_diag_triggers - get master diag trigger values from + * persistent pages + * @ioc : per adapter object + * + * Return: nothing. + */ +static int +_base_get_master_diag_triggers(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi26DriverTriggerPage1_t trigger_pg1; + Mpi2ConfigReply_t mpi_reply; + int r; + u16 ioc_status; + + r = mpt3sas_config_get_driver_trigger_pg1(ioc, &mpi_reply, + &trigger_pg1); + if (r) + return r; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dinitprintk(ioc, + ioc_err(ioc, + "%s: Failed to get trigger pg1, ioc_status(0x%04x)\n", + __func__, ioc_status)); + return 0; + } + + if (le16_to_cpu(trigger_pg1.NumMasterTrigger)) + ioc->diag_trigger_master.MasterData |= + le32_to_cpu( + trigger_pg1.MasterTriggers[0].MasterTriggerFlags); + return 0; +} + +/** + * _base_check_for_trigger_pages_support - checks whether HBA FW supports + * driver trigger pages or not + * @ioc : per adapter object + * @trigger_flags : address where trigger page0's TriggerFlags value is copied + * + * Return: trigger flags mask if HBA FW supports driver trigger pages; + * otherwise returns %-EFAULT if driver trigger pages are not supported by FW or + * return EAGAIN if diag reset occurred due to FW fault and asking the + * caller to retry the command. + * + */ +static int +_base_check_for_trigger_pages_support(struct MPT3SAS_ADAPTER *ioc, u32 *trigger_flags) +{ + Mpi26DriverTriggerPage0_t trigger_pg0; + int r = 0; + Mpi2ConfigReply_t mpi_reply; + u16 ioc_status; + + r = mpt3sas_config_get_driver_trigger_pg0(ioc, &mpi_reply, + &trigger_pg0); + if (r) + return r; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + return -EFAULT; + + *trigger_flags = le16_to_cpu(trigger_pg0.TriggerFlags); + return 0; +} + +/** + * _base_get_diag_triggers - Retrieve diag trigger values from + * persistent pages. + * @ioc : per adapter object + * + * Return: zero on success; otherwise return EAGAIN error codes + * asking the caller to retry. + */ +static int +_base_get_diag_triggers(struct MPT3SAS_ADAPTER *ioc) +{ + int trigger_flags; + int r; + + /* + * Default setting of master trigger. + */ + ioc->diag_trigger_master.MasterData = + (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET); + + r = _base_check_for_trigger_pages_support(ioc, &trigger_flags); + if (r) { + if (r == -EAGAIN) + return r; + /* + * Don't go for error handling when FW doesn't support + * driver trigger pages. + */ + return 0; + } + + ioc->supports_trigger_pages = 1; + + /* + * Retrieve master diag trigger values from driver trigger pg1 + * if master trigger bit enabled in TriggerFlags. + */ + if ((u16)trigger_flags & + MPI26_DRIVER_TRIGGER0_FLAG_MASTER_TRIGGER_VALID) { + r = _base_get_master_diag_triggers(ioc); + if (r) + return r; + } + + /* + * Retrieve event diag trigger values from driver trigger pg2 + * if event trigger bit enabled in TriggerFlags. + */ + if ((u16)trigger_flags & + MPI26_DRIVER_TRIGGER0_FLAG_MPI_EVENT_TRIGGER_VALID) { + r = _base_get_event_diag_triggers(ioc); + if (r) + return r; + } + + /* + * Retrieve scsi diag trigger values from driver trigger pg3 + * if scsi trigger bit enabled in TriggerFlags. + */ + if ((u16)trigger_flags & + MPI26_DRIVER_TRIGGER0_FLAG_SCSI_SENSE_TRIGGER_VALID) { + r = _base_get_scsi_diag_triggers(ioc); + if (r) + return r; + } + /* + * Retrieve mpi error diag trigger values from driver trigger pg4 + * if loginfo trigger bit enabled in TriggerFlags. + */ + if ((u16)trigger_flags & + MPI26_DRIVER_TRIGGER0_FLAG_LOGINFO_TRIGGER_VALID) { + r = _base_get_mpi_diag_triggers(ioc); + if (r) + return r; + } + return 0; +} + +/** + * _base_update_diag_trigger_pages - Update the driver trigger pages after + * online FW update, in case updated FW supports driver + * trigger pages. + * @ioc : per adapter object + * + * Return: nothing. + */ +static void +_base_update_diag_trigger_pages(struct MPT3SAS_ADAPTER *ioc) +{ + + if (ioc->diag_trigger_master.MasterData) + mpt3sas_config_update_driver_trigger_pg1(ioc, + &ioc->diag_trigger_master, 1); + + if (ioc->diag_trigger_event.ValidEntries) + mpt3sas_config_update_driver_trigger_pg2(ioc, + &ioc->diag_trigger_event, 1); + + if (ioc->diag_trigger_scsi.ValidEntries) + mpt3sas_config_update_driver_trigger_pg3(ioc, + &ioc->diag_trigger_scsi, 1); + + if (ioc->diag_trigger_mpi.ValidEntries) + mpt3sas_config_update_driver_trigger_pg4(ioc, + &ioc->diag_trigger_mpi, 1); +} + +/** + * _base_assign_fw_reported_qd - Get FW reported QD for SAS/SATA devices. + * - On failure set default QD values. + * @ioc : per adapter object + * + * Returns 0 for success, non-zero for failure. + * + */ +static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2ConfigReply_t mpi_reply; + Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; + Mpi26PCIeIOUnitPage1_t pcie_iounit_pg1; + u16 depth; + int sz; + int rc = 0; + + ioc->max_wideport_qd = MPT3SAS_SAS_QUEUE_DEPTH; + ioc->max_narrowport_qd = MPT3SAS_SAS_QUEUE_DEPTH; + ioc->max_sata_qd = MPT3SAS_SATA_QUEUE_DEPTH; + ioc->max_nvme_qd = MPT3SAS_NVME_QUEUE_DEPTH; + if (!ioc->is_gen35_ioc) + goto out; + /* sas iounit page 1 */ + sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData); + sas_iounit_pg1 = kzalloc(sizeof(Mpi2SasIOUnitPage1_t), GFP_KERNEL); + if (!sas_iounit_pg1) { + pr_err("%s: failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return rc; + } + rc = mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, + sas_iounit_pg1, sz); + if (rc) { + pr_err("%s: failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + + depth = le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth); + ioc->max_wideport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH); + + depth = le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth); + ioc->max_narrowport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH); + + depth = sas_iounit_pg1->SATAMaxQDepth; + ioc->max_sata_qd = (depth ? depth : MPT3SAS_SATA_QUEUE_DEPTH); + + /* pcie iounit page 1 */ + rc = mpt3sas_config_get_pcie_iounit_pg1(ioc, &mpi_reply, + &pcie_iounit_pg1, sizeof(Mpi26PCIeIOUnitPage1_t)); + if (rc) { + pr_err("%s: failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + ioc->max_nvme_qd = (le16_to_cpu(pcie_iounit_pg1.NVMeMaxQueueDepth)) ? + (le16_to_cpu(pcie_iounit_pg1.NVMeMaxQueueDepth)) : + MPT3SAS_NVME_QUEUE_DEPTH; +out: + dinitprintk(ioc, pr_err( + "MaxWidePortQD: 0x%x MaxNarrowPortQD: 0x%x MaxSataQD: 0x%x MaxNvmeQD: 0x%x\n", + ioc->max_wideport_qd, ioc->max_narrowport_qd, + ioc->max_sata_qd, ioc->max_nvme_qd)); + kfree(sas_iounit_pg1); + return rc; +} + +/** + * mpt3sas_atto_validate_nvram - validate the ATTO nvram read from mfg pg1 + * + * @ioc : per adapter object + * @n : ptr to the ATTO nvram structure + * Return: 0 for success, non-zero for failure. + */ +static int +mpt3sas_atto_validate_nvram(struct MPT3SAS_ADAPTER *ioc, + struct ATTO_SAS_NVRAM *n) +{ + int r = -EINVAL; + union ATTO_SAS_ADDRESS *s1; + u32 len; + u8 *pb; + u8 ckSum; + + /* validate nvram checksum */ + pb = (u8 *) n; + ckSum = ATTO_SASNVR_CKSUM_SEED; + len = sizeof(struct ATTO_SAS_NVRAM); + + while (len--) + ckSum = ckSum + pb[len]; + + if (ckSum) { + ioc_err(ioc, "Invalid ATTO NVRAM checksum\n"); + return r; + } + + s1 = (union ATTO_SAS_ADDRESS *) n->SasAddr; + + if (n->Signature[0] != 'E' + || n->Signature[1] != 'S' + || n->Signature[2] != 'A' + || n->Signature[3] != 'S') + ioc_err(ioc, "Invalid ATTO NVRAM signature\n"); + else if (n->Version > ATTO_SASNVR_VERSION) + ioc_info(ioc, "Invalid ATTO NVRAM version"); + else if ((n->SasAddr[7] & (ATTO_SAS_ADDR_ALIGN - 1)) + || s1->b[0] != 0x50 + || s1->b[1] != 0x01 + || s1->b[2] != 0x08 + || (s1->b[3] & 0xF0) != 0x60 + || ((s1->b[3] & 0x0F) | le32_to_cpu(s1->d[1])) == 0) { + ioc_err(ioc, "Invalid ATTO SAS address\n"); + } else + r = 0; + return r; +} + +/** + * mpt3sas_atto_get_sas_addr - get the ATTO SAS address from mfg page 1 + * + * @ioc : per adapter object + * @*sas_addr : return sas address + * Return: 0 for success, non-zero for failure. + */ +static int +mpt3sas_atto_get_sas_addr(struct MPT3SAS_ADAPTER *ioc, union ATTO_SAS_ADDRESS *sas_addr) +{ + Mpi2ManufacturingPage1_t mfg_pg1; + Mpi2ConfigReply_t mpi_reply; + struct ATTO_SAS_NVRAM *nvram; + int r; + __be64 addr; + + r = mpt3sas_config_get_manufacturing_pg1(ioc, &mpi_reply, &mfg_pg1); + if (r) { + ioc_err(ioc, "Failed to read manufacturing page 1\n"); + return r; + } + + /* validate nvram */ + nvram = (struct ATTO_SAS_NVRAM *) mfg_pg1.VPD; + r = mpt3sas_atto_validate_nvram(ioc, nvram); + if (r) + return r; + + addr = *((__be64 *) nvram->SasAddr); + sas_addr->q = cpu_to_le64(be64_to_cpu(addr)); + return r; +} + +/** + * mpt3sas_atto_init - perform initializaion for ATTO branded + * adapter. + * @ioc : per adapter object + *5 + * Return: 0 for success, non-zero for failure. + */ +static int +mpt3sas_atto_init(struct MPT3SAS_ADAPTER *ioc) +{ + int sz = 0; + Mpi2BiosPage4_t *bios_pg4 = NULL; + Mpi2ConfigReply_t mpi_reply; + int r; + int ix; + union ATTO_SAS_ADDRESS sas_addr; + union ATTO_SAS_ADDRESS temp; + union ATTO_SAS_ADDRESS bias; + + r = mpt3sas_atto_get_sas_addr(ioc, &sas_addr); + if (r) + return r; + + /* get header first to get size */ + r = mpt3sas_config_get_bios_pg4(ioc, &mpi_reply, NULL, 0); + if (r) { + ioc_err(ioc, "Failed to read ATTO bios page 4 header.\n"); + return r; + } + + sz = mpi_reply.Header.PageLength * sizeof(u32); + bios_pg4 = kzalloc(sz, GFP_KERNEL); + if (!bios_pg4) { + ioc_err(ioc, "Failed to allocate memory for ATTO bios page.\n"); + return -ENOMEM; + } + + /* read bios page 4 */ + r = mpt3sas_config_get_bios_pg4(ioc, &mpi_reply, bios_pg4, sz); + if (r) { + ioc_err(ioc, "Failed to read ATTO bios page 4\n"); + goto out; + } + + /* Update bios page 4 with the ATTO WWID */ + bias.q = sas_addr.q; + bias.b[7] += ATTO_SAS_ADDR_DEVNAME_BIAS; + + for (ix = 0; ix < bios_pg4->NumPhys; ix++) { + temp.q = sas_addr.q; + temp.b[7] += ix; + bios_pg4->Phy[ix].ReassignmentWWID = temp.q; + bios_pg4->Phy[ix].ReassignmentDeviceName = bias.q; + } + r = mpt3sas_config_set_bios_pg4(ioc, &mpi_reply, bios_pg4, sz); + +out: + kfree(bios_pg4); + return r; +} + +/** + * _base_static_config_pages - static start of day config pages + * @ioc: per adapter object + */ +static int +_base_static_config_pages(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2ConfigReply_t mpi_reply; + u32 iounit_pg1_flags; + int tg_flags = 0; + int rc; + ioc->nvme_abort_timeout = 30; + + rc = mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, + &ioc->manu_pg0); + if (rc) + return rc; + if (ioc->ir_firmware) { + rc = mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply, + &ioc->manu_pg10); + if (rc) + return rc; + } + + if (ioc->pdev->vendor == MPI2_MFGPAGE_VENDORID_ATTO) { + rc = mpt3sas_atto_init(ioc); + if (rc) + return rc; + } + + /* + * Ensure correct T10 PI operation if vendor left EEDPTagMode + * flag unset in NVDATA. + */ + rc = mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, + &ioc->manu_pg11); + if (rc) + return rc; + if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) { + pr_err("%s: overriding NVDATA EEDPTagMode setting\n", + ioc->name); + ioc->manu_pg11.EEDPTagMode &= ~0x3; + ioc->manu_pg11.EEDPTagMode |= 0x1; + mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply, + &ioc->manu_pg11); + } + if (ioc->manu_pg11.AddlFlags2 & NVME_TASK_MNGT_CUSTOM_MASK) + ioc->tm_custom_handling = 1; + else { + ioc->tm_custom_handling = 0; + if (ioc->manu_pg11.NVMeAbortTO < NVME_TASK_ABORT_MIN_TIMEOUT) + ioc->nvme_abort_timeout = NVME_TASK_ABORT_MIN_TIMEOUT; + else if (ioc->manu_pg11.NVMeAbortTO > + NVME_TASK_ABORT_MAX_TIMEOUT) + ioc->nvme_abort_timeout = NVME_TASK_ABORT_MAX_TIMEOUT; + else + ioc->nvme_abort_timeout = ioc->manu_pg11.NVMeAbortTO; + } + ioc->time_sync_interval = + ioc->manu_pg11.TimeSyncInterval & MPT3SAS_TIMESYNC_MASK; + if (ioc->time_sync_interval) { + if (ioc->manu_pg11.TimeSyncInterval & MPT3SAS_TIMESYNC_UNIT_MASK) + ioc->time_sync_interval = + ioc->time_sync_interval * SECONDS_PER_HOUR; + else + ioc->time_sync_interval = + ioc->time_sync_interval * SECONDS_PER_MIN; + dinitprintk(ioc, ioc_info(ioc, + "Driver-FW TimeSync interval is %d seconds. ManuPg11 TimeSync Unit is in %s\n", + ioc->time_sync_interval, (ioc->manu_pg11.TimeSyncInterval & + MPT3SAS_TIMESYNC_UNIT_MASK) ? "Hour" : "Minute")); + } else { + if (ioc->is_gen35_ioc) + ioc_warn(ioc, + "TimeSync Interval in Manuf page-11 is not enabled. Periodic Time-Sync will be disabled\n"); + } + rc = _base_assign_fw_reported_qd(ioc); + if (rc) + return rc; + + /* + * ATTO doesn't use bios page 2 and 3 for bios settings. + */ + if (ioc->pdev->vendor == MPI2_MFGPAGE_VENDORID_ATTO) + ioc->bios_pg3.BiosVersion = 0; + else { + rc = mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2); + if (rc) + return rc; + rc = mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3); + if (rc) + return rc; + } + + rc = mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8); + if (rc) + return rc; + rc = mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0); + if (rc) + return rc; + rc = mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); + if (rc) + return rc; + rc = mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8); + if (rc) + return rc; + _base_display_ioc_capabilities(ioc); + + /* + * Enable task_set_full handling in iounit_pg1 when the + * facts capabilities indicate that its supported. + */ + iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags); + if ((ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING)) + iounit_pg1_flags &= + ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; + else + iounit_pg1_flags |= + MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; + ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags); + rc = mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); + if (rc) + return rc; + + if (ioc->iounit_pg8.NumSensors) + ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors; + if (ioc->is_aero_ioc) { + rc = _base_update_ioc_page1_inlinewith_perf_mode(ioc); + if (rc) + return rc; + } + if (ioc->is_gen35_ioc) { + if (ioc->is_driver_loading) { + rc = _base_get_diag_triggers(ioc); + if (rc) + return rc; + } else { + /* + * In case of online HBA FW update operation, + * check whether updated FW supports the driver trigger + * pages or not. + * - If previous FW has not supported driver trigger + * pages and newer FW supports them then update these + * pages with current diag trigger values. + * - If previous FW has supported driver trigger pages + * and new FW doesn't support them then disable + * support_trigger_pages flag. + */ + _base_check_for_trigger_pages_support(ioc, &tg_flags); + if (!ioc->supports_trigger_pages && tg_flags != -EFAULT) + _base_update_diag_trigger_pages(ioc); + else if (ioc->supports_trigger_pages && + tg_flags == -EFAULT) + ioc->supports_trigger_pages = 0; + } + } + return 0; +} + +/** + * mpt3sas_free_enclosure_list - release memory + * @ioc: per adapter object + * + * Free memory allocated during enclosure add. + */ +void +mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc) +{ + struct _enclosure_node *enclosure_dev, *enclosure_dev_next; + + /* Free enclosure list */ + list_for_each_entry_safe(enclosure_dev, + enclosure_dev_next, &ioc->enclosure_list, list) { + list_del(&enclosure_dev->list); + kfree(enclosure_dev); + } +} + +/** + * _base_release_memory_pools - release memory + * @ioc: per adapter object + * + * Free memory allocated from _base_allocate_memory_pools. + */ +static void +_base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) +{ + int i = 0; + int j = 0; + int dma_alloc_count = 0; + struct chain_tracker *ct; + int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1; + + dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + + if (ioc->request) { + dma_free_coherent(&ioc->pdev->dev, ioc->request_dma_sz, + ioc->request, ioc->request_dma); + dexitprintk(ioc, + ioc_info(ioc, "request_pool(0x%p): free\n", + ioc->request)); + ioc->request = NULL; + } + + if (ioc->sense) { + dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma); + dma_pool_destroy(ioc->sense_dma_pool); + dexitprintk(ioc, + ioc_info(ioc, "sense_pool(0x%p): free\n", + ioc->sense)); + ioc->sense = NULL; + } + + if (ioc->reply) { + dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma); + dma_pool_destroy(ioc->reply_dma_pool); + dexitprintk(ioc, + ioc_info(ioc, "reply_pool(0x%p): free\n", + ioc->reply)); + ioc->reply = NULL; + } + + if (ioc->reply_free) { + dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free, + ioc->reply_free_dma); + dma_pool_destroy(ioc->reply_free_dma_pool); + dexitprintk(ioc, + ioc_info(ioc, "reply_free_pool(0x%p): free\n", + ioc->reply_free)); + ioc->reply_free = NULL; + } + + if (ioc->reply_post) { + dma_alloc_count = DIV_ROUND_UP(count, + RDPQ_MAX_INDEX_IN_ONE_CHUNK); + for (i = 0; i < count; i++) { + if (i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0 + && dma_alloc_count) { + if (ioc->reply_post[i].reply_post_free) { + dma_pool_free( + ioc->reply_post_free_dma_pool, + ioc->reply_post[i].reply_post_free, + ioc->reply_post[i].reply_post_free_dma); + dexitprintk(ioc, ioc_info(ioc, + "reply_post_free_pool(0x%p): free\n", + ioc->reply_post[i].reply_post_free)); + ioc->reply_post[i].reply_post_free = + NULL; + } + --dma_alloc_count; + } + } + dma_pool_destroy(ioc->reply_post_free_dma_pool); + if (ioc->reply_post_free_array && + ioc->rdpq_array_enable) { + dma_pool_free(ioc->reply_post_free_array_dma_pool, + ioc->reply_post_free_array, + ioc->reply_post_free_array_dma); + ioc->reply_post_free_array = NULL; + } + dma_pool_destroy(ioc->reply_post_free_array_dma_pool); + kfree(ioc->reply_post); + } + + if (ioc->pcie_sgl_dma_pool) { + for (i = 0; i < ioc->scsiio_depth; i++) { + dma_pool_free(ioc->pcie_sgl_dma_pool, + ioc->pcie_sg_lookup[i].pcie_sgl, + ioc->pcie_sg_lookup[i].pcie_sgl_dma); + ioc->pcie_sg_lookup[i].pcie_sgl = NULL; + } + dma_pool_destroy(ioc->pcie_sgl_dma_pool); + } + kfree(ioc->pcie_sg_lookup); + ioc->pcie_sg_lookup = NULL; + + if (ioc->config_page) { + dexitprintk(ioc, + ioc_info(ioc, "config_page(0x%p): free\n", + ioc->config_page)); + dma_free_coherent(&ioc->pdev->dev, ioc->config_page_sz, + ioc->config_page, ioc->config_page_dma); + } + + kfree(ioc->hpr_lookup); + ioc->hpr_lookup = NULL; + kfree(ioc->internal_lookup); + ioc->internal_lookup = NULL; + if (ioc->chain_lookup) { + for (i = 0; i < ioc->scsiio_depth; i++) { + for (j = ioc->chains_per_prp_buffer; + j < ioc->chains_needed_per_io; j++) { + ct = &ioc->chain_lookup[i].chains_per_smid[j]; + if (ct && ct->chain_buffer) + dma_pool_free(ioc->chain_dma_pool, + ct->chain_buffer, + ct->chain_buffer_dma); + } + kfree(ioc->chain_lookup[i].chains_per_smid); + } + dma_pool_destroy(ioc->chain_dma_pool); + kfree(ioc->chain_lookup); + ioc->chain_lookup = NULL; + } + + kfree(ioc->io_queue_num); + ioc->io_queue_num = NULL; +} + +/** + * mpt3sas_check_same_4gb_region - checks whether all reply queues in a set are + * having same upper 32bits in their base memory address. + * @start_address: Base address of a reply queue set + * @pool_sz: Size of single Reply Descriptor Post Queues pool size + * + * Return: 1 if reply queues in a set have a same upper 32bits in their base + * memory address, else 0. + */ +static int +mpt3sas_check_same_4gb_region(dma_addr_t start_address, u32 pool_sz) +{ + dma_addr_t end_address; + + end_address = start_address + pool_sz - 1; + + if (upper_32_bits(start_address) == upper_32_bits(end_address)) + return 1; + else + return 0; +} + +/** + * _base_reduce_hba_queue_depth- Retry with reduced queue depth + * @ioc: Adapter object + * + * Return: 0 for success, non-zero for failure. + **/ +static inline int +_base_reduce_hba_queue_depth(struct MPT3SAS_ADAPTER *ioc) +{ + int reduce_sz = 64; + + if ((ioc->hba_queue_depth - reduce_sz) > + (ioc->internal_depth + INTERNAL_SCSIIO_CMDS_COUNT)) { + ioc->hba_queue_depth -= reduce_sz; + return 0; + } else + return -ENOMEM; +} + +/** + * _base_allocate_pcie_sgl_pool - Allocating DMA'able memory + * for pcie sgl pools. + * @ioc: Adapter object + * @sz: DMA Pool size + * + * Return: 0 for success, non-zero for failure. + */ + +static int +_base_allocate_pcie_sgl_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz) +{ + int i = 0, j = 0; + struct chain_tracker *ct; + + ioc->pcie_sgl_dma_pool = + dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, + ioc->page_size, 0); + if (!ioc->pcie_sgl_dma_pool) { + ioc_err(ioc, "PCIe SGL pool: dma_pool_create failed\n"); + return -ENOMEM; + } + + ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz; + ioc->chains_per_prp_buffer = + min(ioc->chains_per_prp_buffer, ioc->chains_needed_per_io); + for (i = 0; i < ioc->scsiio_depth; i++) { + ioc->pcie_sg_lookup[i].pcie_sgl = + dma_pool_alloc(ioc->pcie_sgl_dma_pool, GFP_KERNEL, + &ioc->pcie_sg_lookup[i].pcie_sgl_dma); + if (!ioc->pcie_sg_lookup[i].pcie_sgl) { + ioc_err(ioc, "PCIe SGL pool: dma_pool_alloc failed\n"); + return -EAGAIN; + } + + if (!mpt3sas_check_same_4gb_region( + ioc->pcie_sg_lookup[i].pcie_sgl_dma, sz)) { + ioc_err(ioc, "PCIE SGLs are not in same 4G !! pcie sgl (0x%p) dma = (0x%llx)\n", + ioc->pcie_sg_lookup[i].pcie_sgl, + (unsigned long long) + ioc->pcie_sg_lookup[i].pcie_sgl_dma); + ioc->use_32bit_dma = true; + return -EAGAIN; + } + + for (j = 0; j < ioc->chains_per_prp_buffer; j++) { + ct = &ioc->chain_lookup[i].chains_per_smid[j]; + ct->chain_buffer = + ioc->pcie_sg_lookup[i].pcie_sgl + + (j * ioc->chain_segment_sz); + ct->chain_buffer_dma = + ioc->pcie_sg_lookup[i].pcie_sgl_dma + + (j * ioc->chain_segment_sz); + } + } + dinitprintk(ioc, ioc_info(ioc, + "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n", + ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024)); + dinitprintk(ioc, ioc_info(ioc, + "Number of chains can fit in a PRP page(%d)\n", + ioc->chains_per_prp_buffer)); + return 0; +} + +/** + * _base_allocate_chain_dma_pool - Allocating DMA'able memory + * for chain dma pool. + * @ioc: Adapter object + * @sz: DMA Pool size + * + * Return: 0 for success, non-zero for failure. + */ +static int +_base_allocate_chain_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz) +{ + int i = 0, j = 0; + struct chain_tracker *ctr; + + ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev, + ioc->chain_segment_sz, 16, 0); + if (!ioc->chain_dma_pool) + return -ENOMEM; + + for (i = 0; i < ioc->scsiio_depth; i++) { + for (j = ioc->chains_per_prp_buffer; + j < ioc->chains_needed_per_io; j++) { + ctr = &ioc->chain_lookup[i].chains_per_smid[j]; + ctr->chain_buffer = dma_pool_alloc(ioc->chain_dma_pool, + GFP_KERNEL, &ctr->chain_buffer_dma); + if (!ctr->chain_buffer) + return -EAGAIN; + if (!mpt3sas_check_same_4gb_region( + ctr->chain_buffer_dma, ioc->chain_segment_sz)) { + ioc_err(ioc, + "Chain buffers are not in same 4G !!! Chain buff (0x%p) dma = (0x%llx)\n", + ctr->chain_buffer, + (unsigned long long)ctr->chain_buffer_dma); + ioc->use_32bit_dma = true; + return -EAGAIN; + } + } + } + dinitprintk(ioc, ioc_info(ioc, + "chain_lookup depth (%d), frame_size(%d), pool_size(%d kB)\n", + ioc->scsiio_depth, ioc->chain_segment_sz, ((ioc->scsiio_depth * + (ioc->chains_needed_per_io - ioc->chains_per_prp_buffer) * + ioc->chain_segment_sz))/1024)); + return 0; +} + +/** + * _base_allocate_sense_dma_pool - Allocating DMA'able memory + * for sense dma pool. + * @ioc: Adapter object + * @sz: DMA Pool size + * Return: 0 for success, non-zero for failure. + */ +static int +_base_allocate_sense_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz) +{ + ioc->sense_dma_pool = + dma_pool_create("sense pool", &ioc->pdev->dev, sz, 4, 0); + if (!ioc->sense_dma_pool) + return -ENOMEM; + ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, + GFP_KERNEL, &ioc->sense_dma); + if (!ioc->sense) + return -EAGAIN; + if (!mpt3sas_check_same_4gb_region(ioc->sense_dma, sz)) { + dinitprintk(ioc, pr_err( + "Bad Sense Pool! sense (0x%p) sense_dma = (0x%llx)\n", + ioc->sense, (unsigned long long) ioc->sense_dma)); + ioc->use_32bit_dma = true; + return -EAGAIN; + } + ioc_info(ioc, + "sense pool(0x%p) - dma(0x%llx): depth(%d), element_size(%d), pool_size (%d kB)\n", + ioc->sense, (unsigned long long)ioc->sense_dma, + ioc->scsiio_depth, SCSI_SENSE_BUFFERSIZE, sz/1024); + return 0; +} + +/** + * _base_allocate_reply_pool - Allocating DMA'able memory + * for reply pool. + * @ioc: Adapter object + * @sz: DMA Pool size + * Return: 0 for success, non-zero for failure. + */ +static int +_base_allocate_reply_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz) +{ + /* reply pool, 4 byte align */ + ioc->reply_dma_pool = dma_pool_create("reply pool", + &ioc->pdev->dev, sz, 4, 0); + if (!ioc->reply_dma_pool) + return -ENOMEM; + ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL, + &ioc->reply_dma); + if (!ioc->reply) + return -EAGAIN; + if (!mpt3sas_check_same_4gb_region(ioc->reply_dma, sz)) { + dinitprintk(ioc, pr_err( + "Bad Reply Pool! Reply (0x%p) Reply dma = (0x%llx)\n", + ioc->reply, (unsigned long long) ioc->reply_dma)); + ioc->use_32bit_dma = true; + return -EAGAIN; + } + ioc->reply_dma_min_address = (u32)(ioc->reply_dma); + ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz; + ioc_info(ioc, + "reply pool(0x%p) - dma(0x%llx): depth(%d), frame_size(%d), pool_size(%d kB)\n", + ioc->reply, (unsigned long long)ioc->reply_dma, + ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024); + return 0; +} + +/** + * _base_allocate_reply_free_dma_pool - Allocating DMA'able memory + * for reply free dma pool. + * @ioc: Adapter object + * @sz: DMA Pool size + * Return: 0 for success, non-zero for failure. + */ +static int +_base_allocate_reply_free_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz) +{ + /* reply free queue, 16 byte align */ + ioc->reply_free_dma_pool = dma_pool_create( + "reply_free pool", &ioc->pdev->dev, sz, 16, 0); + if (!ioc->reply_free_dma_pool) + return -ENOMEM; + ioc->reply_free = dma_pool_alloc(ioc->reply_free_dma_pool, + GFP_KERNEL, &ioc->reply_free_dma); + if (!ioc->reply_free) + return -EAGAIN; + if (!mpt3sas_check_same_4gb_region(ioc->reply_free_dma, sz)) { + dinitprintk(ioc, + pr_err("Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n", + ioc->reply_free, (unsigned long long) ioc->reply_free_dma)); + ioc->use_32bit_dma = true; + return -EAGAIN; + } + memset(ioc->reply_free, 0, sz); + dinitprintk(ioc, ioc_info(ioc, + "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n", + ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024)); + dinitprintk(ioc, ioc_info(ioc, + "reply_free_dma (0x%llx)\n", + (unsigned long long)ioc->reply_free_dma)); + return 0; +} + +/** + * _base_allocate_reply_post_free_array - Allocating DMA'able memory + * for reply post free array. + * @ioc: Adapter object + * @reply_post_free_array_sz: DMA Pool size + * Return: 0 for success, non-zero for failure. + */ + +static int +_base_allocate_reply_post_free_array(struct MPT3SAS_ADAPTER *ioc, + u32 reply_post_free_array_sz) +{ + ioc->reply_post_free_array_dma_pool = + dma_pool_create("reply_post_free_array pool", + &ioc->pdev->dev, reply_post_free_array_sz, 16, 0); + if (!ioc->reply_post_free_array_dma_pool) + return -ENOMEM; + ioc->reply_post_free_array = + dma_pool_alloc(ioc->reply_post_free_array_dma_pool, + GFP_KERNEL, &ioc->reply_post_free_array_dma); + if (!ioc->reply_post_free_array) + return -EAGAIN; + if (!mpt3sas_check_same_4gb_region(ioc->reply_post_free_array_dma, + reply_post_free_array_sz)) { + dinitprintk(ioc, pr_err( + "Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n", + ioc->reply_free, + (unsigned long long) ioc->reply_free_dma)); + ioc->use_32bit_dma = true; + return -EAGAIN; + } + return 0; +} +/** + * base_alloc_rdpq_dma_pool - Allocating DMA'able memory + * for reply queues. + * @ioc: per adapter object + * @sz: DMA Pool size + * Return: 0 for success, non-zero for failure. + */ +static int +base_alloc_rdpq_dma_pool(struct MPT3SAS_ADAPTER *ioc, int sz) +{ + int i = 0; + u32 dma_alloc_count = 0; + int reply_post_free_sz = ioc->reply_post_queue_depth * + sizeof(Mpi2DefaultReplyDescriptor_t); + int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1; + + ioc->reply_post = kcalloc(count, sizeof(struct reply_post_struct), + GFP_KERNEL); + if (!ioc->reply_post) + return -ENOMEM; + /* + * For INVADER_SERIES each set of 8 reply queues(0-7, 8-15, ..) and + * VENTURA_SERIES each set of 16 reply queues(0-15, 16-31, ..) should + * be within 4GB boundary i.e reply queues in a set must have same + * upper 32-bits in their memory address. so here driver is allocating + * the DMA'able memory for reply queues according. + * Driver uses limitation of + * VENTURA_SERIES to manage INVADER_SERIES as well. + */ + dma_alloc_count = DIV_ROUND_UP(count, + RDPQ_MAX_INDEX_IN_ONE_CHUNK); + ioc->reply_post_free_dma_pool = + dma_pool_create("reply_post_free pool", + &ioc->pdev->dev, sz, 16, 0); + if (!ioc->reply_post_free_dma_pool) + return -ENOMEM; + for (i = 0; i < count; i++) { + if ((i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0) && dma_alloc_count) { + ioc->reply_post[i].reply_post_free = + dma_pool_zalloc(ioc->reply_post_free_dma_pool, + GFP_KERNEL, + &ioc->reply_post[i].reply_post_free_dma); + if (!ioc->reply_post[i].reply_post_free) + return -ENOMEM; + /* + * Each set of RDPQ pool must satisfy 4gb boundary + * restriction. + * 1) Check if allocated resources for RDPQ pool are in + * the same 4GB range. + * 2) If #1 is true, continue with 64 bit DMA. + * 3) If #1 is false, return 1. which means free all the + * resources and set DMA mask to 32 and allocate. + */ + if (!mpt3sas_check_same_4gb_region( + ioc->reply_post[i].reply_post_free_dma, sz)) { + dinitprintk(ioc, + ioc_err(ioc, "bad Replypost free pool(0x%p)" + "reply_post_free_dma = (0x%llx)\n", + ioc->reply_post[i].reply_post_free, + (unsigned long long) + ioc->reply_post[i].reply_post_free_dma)); + return -EAGAIN; + } + dma_alloc_count--; + + } else { + ioc->reply_post[i].reply_post_free = + (Mpi2ReplyDescriptorsUnion_t *) + ((long)ioc->reply_post[i-1].reply_post_free + + reply_post_free_sz); + ioc->reply_post[i].reply_post_free_dma = + (dma_addr_t) + (ioc->reply_post[i-1].reply_post_free_dma + + reply_post_free_sz); + } + } + return 0; +} + +/** + * _base_allocate_memory_pools - allocate start of day memory pools + * @ioc: per adapter object + * + * Return: 0 success, anything else error. + */ +static int +_base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) +{ + struct mpt3sas_facts *facts; + u16 max_sge_elements; + u16 chains_needed_per_io; + u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz; + u32 retry_sz; + u32 rdpq_sz = 0, sense_sz = 0; + u16 max_request_credit, nvme_blocks_needed; + unsigned short sg_tablesize; + u16 sge_size; + int i; + int ret = 0, rc = 0; + + dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + + + retry_sz = 0; + facts = &ioc->facts; + + /* command line tunables for max sgl entries */ + if (max_sgl_entries != -1) + sg_tablesize = max_sgl_entries; + else { + if (ioc->hba_mpi_version_belonged == MPI2_VERSION) + sg_tablesize = MPT2SAS_SG_DEPTH; + else + sg_tablesize = MPT3SAS_SG_DEPTH; + } + + /* max sgl entries <= MPT_KDUMP_MIN_PHYS_SEGMENTS in KDUMP mode */ + if (reset_devices) + sg_tablesize = min_t(unsigned short, sg_tablesize, + MPT_KDUMP_MIN_PHYS_SEGMENTS); + + if (ioc->is_mcpu_endpoint) + ioc->shost->sg_tablesize = MPT_MIN_PHYS_SEGMENTS; + else { + if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS) + sg_tablesize = MPT_MIN_PHYS_SEGMENTS; + else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) { + sg_tablesize = min_t(unsigned short, sg_tablesize, + SG_MAX_SEGMENTS); + ioc_warn(ioc, "sg_tablesize(%u) is bigger than kernel defined SG_CHUNK_SIZE(%u)\n", + sg_tablesize, MPT_MAX_PHYS_SEGMENTS); + } + ioc->shost->sg_tablesize = sg_tablesize; + } + + ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)), + (facts->RequestCredit / 4)); + if (ioc->internal_depth < INTERNAL_CMDS_COUNT) { + if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT + + INTERNAL_SCSIIO_CMDS_COUNT)) { + ioc_err(ioc, "IOC doesn't have enough Request Credits, it has just %d number of credits\n", + facts->RequestCredit); + return -ENOMEM; + } + ioc->internal_depth = 10; + } + + ioc->hi_priority_depth = ioc->internal_depth - (5); + /* command line tunables for max controller queue depth */ + if (max_queue_depth != -1 && max_queue_depth != 0) { + max_request_credit = min_t(u16, max_queue_depth + + ioc->internal_depth, facts->RequestCredit); + if (max_request_credit > MAX_HBA_QUEUE_DEPTH) + max_request_credit = MAX_HBA_QUEUE_DEPTH; + } else if (reset_devices) + max_request_credit = min_t(u16, facts->RequestCredit, + (MPT3SAS_KDUMP_SCSI_IO_DEPTH + ioc->internal_depth)); + else + max_request_credit = min_t(u16, facts->RequestCredit, + MAX_HBA_QUEUE_DEPTH); + + /* Firmware maintains additional facts->HighPriorityCredit number of + * credits for HiPriprity Request messages, so hba queue depth will be + * sum of max_request_credit and high priority queue depth. + */ + ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth; + + /* request frame size */ + ioc->request_sz = facts->IOCRequestFrameSize * 4; + + /* reply frame size */ + ioc->reply_sz = facts->ReplyFrameSize * 4; + + /* chain segment size */ + if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { + if (facts->IOCMaxChainSegmentSize) + ioc->chain_segment_sz = + facts->IOCMaxChainSegmentSize * + MAX_CHAIN_ELEMT_SZ; + else + /* set to 128 bytes size if IOCMaxChainSegmentSize is zero */ + ioc->chain_segment_sz = DEFAULT_NUM_FWCHAIN_ELEMTS * + MAX_CHAIN_ELEMT_SZ; + } else + ioc->chain_segment_sz = ioc->request_sz; + + /* calculate the max scatter element size */ + sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee); + + retry_allocation: + total_sz = 0; + /* calculate number of sg elements left over in the 1st frame */ + max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) - + sizeof(Mpi2SGEIOUnion_t)) + sge_size); + ioc->max_sges_in_main_message = max_sge_elements/sge_size; + + /* now do the same for a chain buffer */ + max_sge_elements = ioc->chain_segment_sz - sge_size; + ioc->max_sges_in_chain_message = max_sge_elements/sge_size; + + /* + * MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE + */ + chains_needed_per_io = ((ioc->shost->sg_tablesize - + ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message) + + 1; + if (chains_needed_per_io > facts->MaxChainDepth) { + chains_needed_per_io = facts->MaxChainDepth; + ioc->shost->sg_tablesize = min_t(u16, + ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message + * chains_needed_per_io), ioc->shost->sg_tablesize); + } + ioc->chains_needed_per_io = chains_needed_per_io; + + /* reply free queue sizing - taking into account for 64 FW events */ + ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; + + /* mCPU manage single counters for simplicity */ + if (ioc->is_mcpu_endpoint) + ioc->reply_post_queue_depth = ioc->reply_free_queue_depth; + else { + /* calculate reply descriptor post queue depth */ + ioc->reply_post_queue_depth = ioc->hba_queue_depth + + ioc->reply_free_queue_depth + 1; + /* align the reply post queue on the next 16 count boundary */ + if (ioc->reply_post_queue_depth % 16) + ioc->reply_post_queue_depth += 16 - + (ioc->reply_post_queue_depth % 16); + } + + if (ioc->reply_post_queue_depth > + facts->MaxReplyDescriptorPostQueueDepth) { + ioc->reply_post_queue_depth = + facts->MaxReplyDescriptorPostQueueDepth - + (facts->MaxReplyDescriptorPostQueueDepth % 16); + ioc->hba_queue_depth = + ((ioc->reply_post_queue_depth - 64) / 2) - 1; + ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; + } + + ioc_info(ioc, + "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), " + "sge_per_io(%d), chains_per_io(%d)\n", + ioc->max_sges_in_main_message, + ioc->max_sges_in_chain_message, + ioc->shost->sg_tablesize, + ioc->chains_needed_per_io); + + /* reply post queue, 16 byte align */ + reply_post_free_sz = ioc->reply_post_queue_depth * + sizeof(Mpi2DefaultReplyDescriptor_t); + rdpq_sz = reply_post_free_sz * RDPQ_MAX_INDEX_IN_ONE_CHUNK; + if ((_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable) + || (ioc->reply_queue_count < RDPQ_MAX_INDEX_IN_ONE_CHUNK)) + rdpq_sz = reply_post_free_sz * ioc->reply_queue_count; + ret = base_alloc_rdpq_dma_pool(ioc, rdpq_sz); + if (ret == -EAGAIN) { + /* + * Free allocated bad RDPQ memory pools. + * Change dma coherent mask to 32 bit and reallocate RDPQ + */ + _base_release_memory_pools(ioc); + ioc->use_32bit_dma = true; + if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) { + ioc_err(ioc, + "32 DMA mask failed %s\n", pci_name(ioc->pdev)); + return -ENODEV; + } + if (base_alloc_rdpq_dma_pool(ioc, rdpq_sz)) + return -ENOMEM; + } else if (ret == -ENOMEM) + return -ENOMEM; + total_sz = rdpq_sz * (!ioc->rdpq_array_enable ? 1 : + DIV_ROUND_UP(ioc->reply_queue_count, RDPQ_MAX_INDEX_IN_ONE_CHUNK)); + ioc->scsiio_depth = ioc->hba_queue_depth - + ioc->hi_priority_depth - ioc->internal_depth; + + /* set the scsi host can_queue depth + * with some internal commands that could be outstanding + */ + ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT; + dinitprintk(ioc, + ioc_info(ioc, "scsi host: can_queue depth (%d)\n", + ioc->shost->can_queue)); + + /* contiguous pool for request and chains, 16 byte align, one extra " + * "frame for smid=0 + */ + ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth; + sz = ((ioc->scsiio_depth + 1) * ioc->request_sz); + + /* hi-priority queue */ + sz += (ioc->hi_priority_depth * ioc->request_sz); + + /* internal queue */ + sz += (ioc->internal_depth * ioc->request_sz); + + ioc->request_dma_sz = sz; + ioc->request = dma_alloc_coherent(&ioc->pdev->dev, sz, + &ioc->request_dma, GFP_KERNEL); + if (!ioc->request) { + ioc_err(ioc, "request pool: dma_alloc_coherent failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kB)\n", + ioc->hba_queue_depth, ioc->chains_needed_per_io, + ioc->request_sz, sz / 1024); + if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH) + goto out; + retry_sz = 64; + ioc->hba_queue_depth -= retry_sz; + _base_release_memory_pools(ioc); + goto retry_allocation; + } + + if (retry_sz) + ioc_err(ioc, "request pool: dma_alloc_coherent succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n", + ioc->hba_queue_depth, ioc->chains_needed_per_io, + ioc->request_sz, sz / 1024); + + /* hi-priority queue */ + ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) * + ioc->request_sz); + ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) * + ioc->request_sz); + + /* internal queue */ + ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth * + ioc->request_sz); + ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth * + ioc->request_sz); + + ioc_info(ioc, + "request pool(0x%p) - dma(0x%llx): " + "depth(%d), frame_size(%d), pool_size(%d kB)\n", + ioc->request, (unsigned long long) ioc->request_dma, + ioc->hba_queue_depth, ioc->request_sz, + (ioc->hba_queue_depth * ioc->request_sz) / 1024); + + total_sz += sz; + + dinitprintk(ioc, + ioc_info(ioc, "scsiio(0x%p): depth(%d)\n", + ioc->request, ioc->scsiio_depth)); + + ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH); + sz = ioc->scsiio_depth * sizeof(struct chain_lookup); + ioc->chain_lookup = kzalloc(sz, GFP_KERNEL); + if (!ioc->chain_lookup) { + ioc_err(ioc, "chain_lookup: __get_free_pages failed\n"); + goto out; + } + + sz = ioc->chains_needed_per_io * sizeof(struct chain_tracker); + for (i = 0; i < ioc->scsiio_depth; i++) { + ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL); + if (!ioc->chain_lookup[i].chains_per_smid) { + ioc_err(ioc, "chain_lookup: kzalloc failed\n"); + goto out; + } + } + + /* initialize hi-priority queue smid's */ + ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth, + sizeof(struct request_tracker), GFP_KERNEL); + if (!ioc->hpr_lookup) { + ioc_err(ioc, "hpr_lookup: kcalloc failed\n"); + goto out; + } + ioc->hi_priority_smid = ioc->scsiio_depth + 1; + dinitprintk(ioc, + ioc_info(ioc, "hi_priority(0x%p): depth(%d), start smid(%d)\n", + ioc->hi_priority, + ioc->hi_priority_depth, ioc->hi_priority_smid)); + + /* initialize internal queue smid's */ + ioc->internal_lookup = kcalloc(ioc->internal_depth, + sizeof(struct request_tracker), GFP_KERNEL); + if (!ioc->internal_lookup) { + ioc_err(ioc, "internal_lookup: kcalloc failed\n"); + goto out; + } + ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth; + dinitprintk(ioc, + ioc_info(ioc, "internal(0x%p): depth(%d), start smid(%d)\n", + ioc->internal, + ioc->internal_depth, ioc->internal_smid)); + + ioc->io_queue_num = kcalloc(ioc->scsiio_depth, + sizeof(u16), GFP_KERNEL); + if (!ioc->io_queue_num) + goto out; + /* + * The number of NVMe page sized blocks needed is: + * (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1 + * ((sg_tablesize * 8) - 1) is the max PRP's minus the first PRP entry + * that is placed in the main message frame. 8 is the size of each PRP + * entry or PRP list pointer entry. 8 is subtracted from page_size + * because of the PRP list pointer entry at the end of a page, so this + * is not counted as a PRP entry. The 1 added page is a round up. + * + * To avoid allocation failures due to the amount of memory that could + * be required for NVMe PRP's, only each set of NVMe blocks will be + * contiguous, so a new set is allocated for each possible I/O. + */ + + ioc->chains_per_prp_buffer = 0; + if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) { + nvme_blocks_needed = + (ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1; + nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE); + nvme_blocks_needed++; + + sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth; + ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL); + if (!ioc->pcie_sg_lookup) { + ioc_info(ioc, "PCIe SGL lookup: kzalloc failed\n"); + goto out; + } + sz = nvme_blocks_needed * ioc->page_size; + rc = _base_allocate_pcie_sgl_pool(ioc, sz); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; + total_sz += sz * ioc->scsiio_depth; + } + + rc = _base_allocate_chain_dma_pool(ioc, ioc->chain_segment_sz); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; + total_sz += ioc->chain_segment_sz * ((ioc->chains_needed_per_io - + ioc->chains_per_prp_buffer) * ioc->scsiio_depth); + dinitprintk(ioc, + ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n", + ioc->chain_depth, ioc->chain_segment_sz, + (ioc->chain_depth * ioc->chain_segment_sz) / 1024)); + /* sense buffers, 4 byte align */ + sense_sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE; + rc = _base_allocate_sense_dma_pool(ioc, sense_sz); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; + total_sz += sense_sz; + /* reply pool, 4 byte align */ + sz = ioc->reply_free_queue_depth * ioc->reply_sz; + rc = _base_allocate_reply_pool(ioc, sz); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; + total_sz += sz; + + /* reply free queue, 16 byte align */ + sz = ioc->reply_free_queue_depth * 4; + rc = _base_allocate_reply_free_dma_pool(ioc, sz); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; + dinitprintk(ioc, + ioc_info(ioc, "reply_free_dma (0x%llx)\n", + (unsigned long long)ioc->reply_free_dma)); + total_sz += sz; + if (ioc->rdpq_array_enable) { + reply_post_free_array_sz = ioc->reply_queue_count * + sizeof(Mpi2IOCInitRDPQArrayEntry); + rc = _base_allocate_reply_post_free_array(ioc, + reply_post_free_array_sz); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; + } + ioc->config_page_sz = 512; + ioc->config_page = dma_alloc_coherent(&ioc->pdev->dev, + ioc->config_page_sz, &ioc->config_page_dma, GFP_KERNEL); + if (!ioc->config_page) { + ioc_err(ioc, "config page: dma_pool_alloc failed\n"); + goto out; + } + + ioc_info(ioc, "config page(0x%p) - dma(0x%llx): size(%d)\n", + ioc->config_page, (unsigned long long)ioc->config_page_dma, + ioc->config_page_sz); + total_sz += ioc->config_page_sz; + + ioc_info(ioc, "Allocated physical memory: size(%d kB)\n", + total_sz / 1024); + ioc_info(ioc, "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n", + ioc->shost->can_queue, facts->RequestCredit); + ioc_info(ioc, "Scatter Gather Elements per IO(%d)\n", + ioc->shost->sg_tablesize); + return 0; + +try_32bit_dma: + _base_release_memory_pools(ioc); + if (ioc->use_32bit_dma && (ioc->dma_mask > 32)) { + /* Change dma coherent mask to 32 bit and reallocate */ + if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) { + pr_err("Setting 32 bit coherent DMA mask Failed %s\n", + pci_name(ioc->pdev)); + return -ENODEV; + } + } else if (_base_reduce_hba_queue_depth(ioc) != 0) + return -ENOMEM; + goto retry_allocation; + + out: + return -ENOMEM; +} + +/** + * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter. + * @ioc: Pointer to MPT_ADAPTER structure + * @cooked: Request raw or cooked IOC state + * + * Return: all IOC Doorbell register bits if cooked==0, else just the + * Doorbell bits in MPI_IOC_STATE_MASK. + */ +u32 +mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked) +{ + u32 s, sc; + + s = ioc->base_readl_ext_retry(&ioc->chip->Doorbell); + sc = s & MPI2_IOC_STATE_MASK; + return cooked ? sc : s; +} + +/** + * _base_wait_on_iocstate - waiting on a particular ioc state + * @ioc: ? + * @ioc_state: controller state { READY, OPERATIONAL, or RESET } + * @timeout: timeout in second + * + * Return: 0 for success, non-zero for failure. + */ +static int +_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout) +{ + u32 count, cntdn; + u32 current_state; + + count = 0; + cntdn = 1000 * timeout; + do { + current_state = mpt3sas_base_get_iocstate(ioc, 1); + if (current_state == ioc_state) + return 0; + if (count && current_state == MPI2_IOC_STATE_FAULT) + break; + if (count && current_state == MPI2_IOC_STATE_COREDUMP) + break; + + usleep_range(1000, 1500); + count++; + } while (--cntdn); + + return current_state; +} + +/** + * _base_dump_reg_set - This function will print hexdump of register set. + * @ioc: per adapter object + * + * Return: nothing. + */ +static inline void +_base_dump_reg_set(struct MPT3SAS_ADAPTER *ioc) +{ + unsigned int i, sz = 256; + u32 __iomem *reg = (u32 __iomem *)ioc->chip; + + ioc_info(ioc, "System Register set:\n"); + for (i = 0; i < (sz / sizeof(u32)); i++) + pr_info("%08x: %08x\n", (i * 4), readl(®[i])); +} + +/** + * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by + * a write to the doorbell) + * @ioc: per adapter object + * @timeout: timeout in seconds + * + * Return: 0 for success, non-zero for failure. + * + * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell. + */ + +static int +_base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout) +{ + u32 cntdn, count; + u32 int_status; + + count = 0; + cntdn = 1000 * timeout; + do { + int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus); + if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { + dhsprintk(ioc, + ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n", + __func__, count, timeout)); + return 0; + } + + usleep_range(1000, 1500); + count++; + } while (--cntdn); + + ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n", + __func__, count, int_status); + return -EFAULT; +} + +static int +_base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout) +{ + u32 cntdn, count; + u32 int_status; + + count = 0; + cntdn = 2000 * timeout; + do { + int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus); + if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { + dhsprintk(ioc, + ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n", + __func__, count, timeout)); + return 0; + } + + udelay(500); + count++; + } while (--cntdn); + + ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n", + __func__, count, int_status); + return -EFAULT; + +} + +/** + * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell. + * @ioc: per adapter object + * @timeout: timeout in second + * + * Return: 0 for success, non-zero for failure. + * + * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to + * doorbell. + */ +static int +_base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout) +{ + u32 cntdn, count; + u32 int_status; + u32 doorbell; + + count = 0; + cntdn = 1000 * timeout; + do { + int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus); + if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) { + dhsprintk(ioc, + ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n", + __func__, count, timeout)); + return 0; + } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { + doorbell = ioc->base_readl_ext_retry(&ioc->chip->Doorbell); + if ((doorbell & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_FAULT) { + mpt3sas_print_fault_code(ioc, doorbell); + return -EFAULT; + } + if ((doorbell & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_COREDUMP) { + mpt3sas_print_coredump_info(ioc, doorbell); + return -EFAULT; + } + } else if (int_status == 0xFFFFFFFF) + goto out; + + usleep_range(1000, 1500); + count++; + } while (--cntdn); + + out: + ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n", + __func__, count, int_status); + return -EFAULT; +} + +/** + * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use + * @ioc: per adapter object + * @timeout: timeout in second + * + * Return: 0 for success, non-zero for failure. + */ +static int +_base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout) +{ + u32 cntdn, count; + u32 doorbell_reg; + + count = 0; + cntdn = 1000 * timeout; + do { + doorbell_reg = ioc->base_readl_ext_retry(&ioc->chip->Doorbell); + if (!(doorbell_reg & MPI2_DOORBELL_USED)) { + dhsprintk(ioc, + ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n", + __func__, count, timeout)); + return 0; + } + + usleep_range(1000, 1500); + count++; + } while (--cntdn); + + ioc_err(ioc, "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n", + __func__, count, doorbell_reg); + return -EFAULT; +} + +/** + * _base_send_ioc_reset - send doorbell reset + * @ioc: per adapter object + * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET + * @timeout: timeout in second + * + * Return: 0 for success, non-zero for failure. + */ +static int +_base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout) +{ + u32 ioc_state; + int r = 0; + unsigned long flags; + + if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) { + ioc_err(ioc, "%s: unknown reset_type\n", __func__); + return -EFAULT; + } + + if (!(ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY)) + return -EFAULT; + + ioc_info(ioc, "sending message unit reset !!\n"); + + writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT, + &ioc->chip->Doorbell); + if ((_base_wait_for_doorbell_ack(ioc, 15))) { + r = -EFAULT; + goto out; + } + + ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout); + if (ioc_state) { + ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n", + __func__, ioc_state); + r = -EFAULT; + goto out; + } + out: + if (r != 0) { + ioc_state = mpt3sas_base_get_iocstate(ioc, 0); + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + /* + * Wait for IOC state CoreDump to clear only during + * HBA initialization & release time. + */ + if ((ioc_state & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_COREDUMP && (ioc->is_driver_loading == 1 || + ioc->fault_reset_work_q == NULL)) { + spin_unlock_irqrestore( + &ioc->ioc_reset_in_progress_lock, flags); + mpt3sas_print_coredump_info(ioc, ioc_state); + mpt3sas_base_wait_for_coredump_completion(ioc, + __func__); + spin_lock_irqsave( + &ioc->ioc_reset_in_progress_lock, flags); + } + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + } + ioc_info(ioc, "message unit reset: %s\n", + r == 0 ? "SUCCESS" : "FAILED"); + return r; +} + +/** + * mpt3sas_wait_for_ioc - IOC's operational state is checked here. + * @ioc: per adapter object + * @timeout: timeout in seconds + * + * Return: Waits up to timeout seconds for the IOC to + * become operational. Returns 0 if IOC is present + * and operational; otherwise returns %-EFAULT. + */ + +int +mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int timeout) +{ + int wait_state_count = 0; + u32 ioc_state; + + do { + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + if (ioc_state == MPI2_IOC_STATE_OPERATIONAL) + break; + + /* + * Watchdog thread will be started after IOC Initialization, so + * no need to wait here for IOC state to become operational + * when IOC Initialization is on. Instead the driver will + * return ETIME status, so that calling function can issue + * diag reset operation and retry the command. + */ + if (ioc->is_driver_loading) + return -ETIME; + + ssleep(1); + ioc_info(ioc, "%s: waiting for operational state(count=%d)\n", + __func__, ++wait_state_count); + } while (--timeout); + if (!timeout) { + ioc_err(ioc, "%s: failed due to ioc not operational\n", __func__); + return -EFAULT; + } + if (wait_state_count) + ioc_info(ioc, "ioc is operational\n"); + return 0; +} + +/** + * _base_handshake_req_reply_wait - send request thru doorbell interface + * @ioc: per adapter object + * @request_bytes: request length + * @request: pointer having request payload + * @reply_bytes: reply length + * @reply: pointer to reply payload + * @timeout: timeout in second + * + * Return: 0 for success, non-zero for failure. + */ +static int +_base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes, + u32 *request, int reply_bytes, u16 *reply, int timeout) +{ + MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply; + int i; + u8 failed; + __le32 *mfp; + + /* make sure doorbell is not in use */ + if ((ioc->base_readl_ext_retry(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) { + ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__); + return -EFAULT; + } + + /* clear pending doorbell interrupts from previous state changes */ + if (ioc->base_readl(&ioc->chip->HostInterruptStatus) & + MPI2_HIS_IOC2SYS_DB_STATUS) + writel(0, &ioc->chip->HostInterruptStatus); + + /* send message to ioc */ + writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) | + ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)), + &ioc->chip->Doorbell); + + if ((_base_spin_on_doorbell_int(ioc, 5))) { + ioc_err(ioc, "doorbell handshake int failed (line=%d)\n", + __LINE__); + return -EFAULT; + } + writel(0, &ioc->chip->HostInterruptStatus); + + if ((_base_wait_for_doorbell_ack(ioc, 5))) { + ioc_err(ioc, "doorbell handshake ack failed (line=%d)\n", + __LINE__); + return -EFAULT; + } + + /* send message 32-bits at a time */ + for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) { + writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell); + if ((_base_wait_for_doorbell_ack(ioc, 5))) + failed = 1; + } + + if (failed) { + ioc_err(ioc, "doorbell handshake sending request failed (line=%d)\n", + __LINE__); + return -EFAULT; + } + + /* now wait for the reply */ + if ((_base_wait_for_doorbell_int(ioc, timeout))) { + ioc_err(ioc, "doorbell handshake int failed (line=%d)\n", + __LINE__); + return -EFAULT; + } + + /* read the first two 16-bits, it gives the total length of the reply */ + reply[0] = le16_to_cpu(ioc->base_readl_ext_retry(&ioc->chip->Doorbell) + & MPI2_DOORBELL_DATA_MASK); + writel(0, &ioc->chip->HostInterruptStatus); + if ((_base_wait_for_doorbell_int(ioc, 5))) { + ioc_err(ioc, "doorbell handshake int failed (line=%d)\n", + __LINE__); + return -EFAULT; + } + reply[1] = le16_to_cpu(ioc->base_readl_ext_retry(&ioc->chip->Doorbell) + & MPI2_DOORBELL_DATA_MASK); + writel(0, &ioc->chip->HostInterruptStatus); + + for (i = 2; i < default_reply->MsgLength * 2; i++) { + if ((_base_wait_for_doorbell_int(ioc, 5))) { + ioc_err(ioc, "doorbell handshake int failed (line=%d)\n", + __LINE__); + return -EFAULT; + } + if (i >= reply_bytes/2) /* overflow case */ + ioc->base_readl_ext_retry(&ioc->chip->Doorbell); + else + reply[i] = le16_to_cpu( + ioc->base_readl_ext_retry(&ioc->chip->Doorbell) + & MPI2_DOORBELL_DATA_MASK); + writel(0, &ioc->chip->HostInterruptStatus); + } + + _base_wait_for_doorbell_int(ioc, 5); + if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) { + dhsprintk(ioc, + ioc_info(ioc, "doorbell is in use (line=%d)\n", + __LINE__)); + } + writel(0, &ioc->chip->HostInterruptStatus); + + if (ioc->logging_level & MPT_DEBUG_INIT) { + mfp = (__le32 *)reply; + pr_info("\toffset:data\n"); + for (i = 0; i < reply_bytes/4; i++) + ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4, + le32_to_cpu(mfp[i])); + } + return 0; +} + +/** + * mpt3sas_base_sas_iounit_control - send sas iounit control to FW + * @ioc: per adapter object + * @mpi_reply: the reply payload from FW + * @mpi_request: the request payload sent to FW + * + * The SAS IO Unit Control Request message allows the host to perform low-level + * operations, such as resets on the PHYs of the IO Unit, also allows the host + * to obtain the IOC assigned device handles for a device if it has other + * identifying information about the device, in addition allows the host to + * remove IOC resources associated with the device. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc, + Mpi2SasIoUnitControlReply_t *mpi_reply, + Mpi2SasIoUnitControlRequest_t *mpi_request) +{ + u16 smid; + u8 issue_reset = 0; + int rc; + void *request; + + dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + + mutex_lock(&ioc->base_cmds.mutex); + + if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) { + ioc_err(ioc, "%s: base_cmd in use\n", __func__); + rc = -EAGAIN; + goto out; + } + + rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT); + if (rc) + goto out; + + smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); + if (!smid) { + ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + rc = -EAGAIN; + goto out; + } + + rc = 0; + ioc->base_cmds.status = MPT3_CMD_PENDING; + request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->base_cmds.smid = smid; + memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t)); + if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET || + mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) + ioc->ioc_link_reset_in_progress = 1; + init_completion(&ioc->base_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->base_cmds.done, + msecs_to_jiffies(10000)); + if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET || + mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) && + ioc->ioc_link_reset_in_progress) + ioc->ioc_link_reset_in_progress = 0; + if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { + mpt3sas_check_cmd_timeout(ioc, ioc->base_cmds.status, + mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t)/4, + issue_reset); + goto issue_host_reset; + } + if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) + memcpy(mpi_reply, ioc->base_cmds.reply, + sizeof(Mpi2SasIoUnitControlReply_t)); + else + memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t)); + ioc->base_cmds.status = MPT3_CMD_NOT_USED; + goto out; + + issue_host_reset: + if (issue_reset) + mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + ioc->base_cmds.status = MPT3_CMD_NOT_USED; + rc = -EFAULT; + out: + mutex_unlock(&ioc->base_cmds.mutex); + return rc; +} + +/** + * mpt3sas_base_scsi_enclosure_processor - sending request to sep device + * @ioc: per adapter object + * @mpi_reply: the reply payload from FW + * @mpi_request: the request payload sent to FW + * + * The SCSI Enclosure Processor request message causes the IOC to + * communicate with SES devices to control LED status signals. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc, + Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request) +{ + u16 smid; + u8 issue_reset = 0; + int rc; + void *request; + + dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + + mutex_lock(&ioc->base_cmds.mutex); + + if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) { + ioc_err(ioc, "%s: base_cmd in use\n", __func__); + rc = -EAGAIN; + goto out; + } + + rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT); + if (rc) + goto out; + + smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); + if (!smid) { + ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + rc = -EAGAIN; + goto out; + } + + rc = 0; + ioc->base_cmds.status = MPT3_CMD_PENDING; + request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->base_cmds.smid = smid; + memset(request, 0, ioc->request_sz); + memcpy(request, mpi_request, sizeof(Mpi2SepReply_t)); + init_completion(&ioc->base_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->base_cmds.done, + msecs_to_jiffies(10000)); + if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { + mpt3sas_check_cmd_timeout(ioc, + ioc->base_cmds.status, mpi_request, + sizeof(Mpi2SepRequest_t)/4, issue_reset); + goto issue_host_reset; + } + if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) + memcpy(mpi_reply, ioc->base_cmds.reply, + sizeof(Mpi2SepReply_t)); + else + memset(mpi_reply, 0, sizeof(Mpi2SepReply_t)); + ioc->base_cmds.status = MPT3_CMD_NOT_USED; + goto out; + + issue_host_reset: + if (issue_reset) + mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + ioc->base_cmds.status = MPT3_CMD_NOT_USED; + rc = -EFAULT; + out: + mutex_unlock(&ioc->base_cmds.mutex); + return rc; +} + +/** + * _base_get_port_facts - obtain port facts reply and save in ioc + * @ioc: per adapter object + * @port: ? + * + * Return: 0 for success, non-zero for failure. + */ +static int +_base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port) +{ + Mpi2PortFactsRequest_t mpi_request; + Mpi2PortFactsReply_t mpi_reply; + struct mpt3sas_port_facts *pfacts; + int mpi_reply_sz, mpi_request_sz, r; + + dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + + mpi_reply_sz = sizeof(Mpi2PortFactsReply_t); + mpi_request_sz = sizeof(Mpi2PortFactsRequest_t); + memset(&mpi_request, 0, mpi_request_sz); + mpi_request.Function = MPI2_FUNCTION_PORT_FACTS; + mpi_request.PortNumber = port; + r = _base_handshake_req_reply_wait(ioc, mpi_request_sz, + (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5); + + if (r != 0) { + ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r); + return r; + } + + pfacts = &ioc->pfacts[port]; + memset(pfacts, 0, sizeof(struct mpt3sas_port_facts)); + pfacts->PortNumber = mpi_reply.PortNumber; + pfacts->VP_ID = mpi_reply.VP_ID; + pfacts->VF_ID = mpi_reply.VF_ID; + pfacts->MaxPostedCmdBuffers = + le16_to_cpu(mpi_reply.MaxPostedCmdBuffers); + + return 0; +} + +/** + * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL + * @ioc: per adapter object + * @timeout: + * + * Return: 0 for success, non-zero for failure. + */ +static int +_base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout) +{ + u32 ioc_state; + int rc; + + dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + + if (ioc->pci_error_recovery) { + dfailprintk(ioc, + ioc_info(ioc, "%s: host in pci error recovery\n", + __func__)); + return -EFAULT; + } + + ioc_state = mpt3sas_base_get_iocstate(ioc, 0); + dhsprintk(ioc, + ioc_info(ioc, "%s: ioc_state(0x%08x)\n", + __func__, ioc_state)); + + if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) || + (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL) + return 0; + + if (ioc_state & MPI2_DOORBELL_USED) { + dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n")); + goto issue_diag_reset; + } + + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { + mpt3sas_print_fault_code(ioc, ioc_state & + MPI2_DOORBELL_DATA_MASK); + goto issue_diag_reset; + } else if ((ioc_state & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_COREDUMP) { + ioc_info(ioc, + "%s: Skipping the diag reset here. (ioc_state=0x%x)\n", + __func__, ioc_state); + return -EFAULT; + } + + ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout); + if (ioc_state) { + dfailprintk(ioc, + ioc_info(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n", + __func__, ioc_state)); + return -EFAULT; + } + + issue_diag_reset: + rc = _base_diag_reset(ioc); + return rc; +} + +/** + * _base_get_ioc_facts - obtain ioc facts reply and save in ioc + * @ioc: per adapter object + * + * Return: 0 for success, non-zero for failure. + */ +static int +_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2IOCFactsRequest_t mpi_request; + Mpi2IOCFactsReply_t mpi_reply; + struct mpt3sas_facts *facts; + int mpi_reply_sz, mpi_request_sz, r; + + dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + + r = _base_wait_for_iocstate(ioc, 10); + if (r) { + dfailprintk(ioc, + ioc_info(ioc, "%s: failed getting to correct state\n", + __func__)); + return r; + } + mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t); + mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t); + memset(&mpi_request, 0, mpi_request_sz); + mpi_request.Function = MPI2_FUNCTION_IOC_FACTS; + r = _base_handshake_req_reply_wait(ioc, mpi_request_sz, + (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5); + + if (r != 0) { + ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r); + return r; + } + + facts = &ioc->facts; + memset(facts, 0, sizeof(struct mpt3sas_facts)); + facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion); + facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion); + facts->VP_ID = mpi_reply.VP_ID; + facts->VF_ID = mpi_reply.VF_ID; + facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions); + facts->MaxChainDepth = mpi_reply.MaxChainDepth; + facts->WhoInit = mpi_reply.WhoInit; + facts->NumberOfPorts = mpi_reply.NumberOfPorts; + facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors; + if (ioc->msix_enable && (facts->MaxMSIxVectors <= + MAX_COMBINED_MSIX_VECTORS(ioc->is_gen35_ioc))) + ioc->combined_reply_queue = 0; + facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit); + facts->MaxReplyDescriptorPostQueueDepth = + le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth); + facts->ProductID = le16_to_cpu(mpi_reply.ProductID); + facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities); + if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID)) + ioc->ir_firmware = 1; + if ((facts->IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) && (!reset_devices)) + ioc->rdpq_array_capable = 1; + if ((facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ) + && ioc->is_aero_ioc) + ioc->atomic_desc_capable = 1; + facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word); + facts->IOCRequestFrameSize = + le16_to_cpu(mpi_reply.IOCRequestFrameSize); + if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { + facts->IOCMaxChainSegmentSize = + le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize); + } + facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators); + facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets); + ioc->shost->max_id = -1; + facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders); + facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures); + facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags); + facts->HighPriorityCredit = + le16_to_cpu(mpi_reply.HighPriorityCredit); + facts->ReplyFrameSize = mpi_reply.ReplyFrameSize; + facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle); + facts->CurrentHostPageSize = mpi_reply.CurrentHostPageSize; + + /* + * Get the Page Size from IOC Facts. If it's 0, default to 4k. + */ + ioc->page_size = 1 << facts->CurrentHostPageSize; + if (ioc->page_size == 1) { + ioc_info(ioc, "CurrentHostPageSize is 0: Setting default host page size to 4k\n"); + ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K; + } + dinitprintk(ioc, + ioc_info(ioc, "CurrentHostPageSize(%d)\n", + facts->CurrentHostPageSize)); + + dinitprintk(ioc, + ioc_info(ioc, "hba queue depth(%d), max chains per io(%d)\n", + facts->RequestCredit, facts->MaxChainDepth)); + dinitprintk(ioc, + ioc_info(ioc, "request frame size(%d), reply frame size(%d)\n", + facts->IOCRequestFrameSize * 4, + facts->ReplyFrameSize * 4)); + return 0; +} + +/** + * _base_send_ioc_init - send ioc_init to firmware + * @ioc: per adapter object + * + * Return: 0 for success, non-zero for failure. + */ +static int +_base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2IOCInitRequest_t mpi_request; + Mpi2IOCInitReply_t mpi_reply; + int i, r = 0; + ktime_t current_time; + u16 ioc_status; + u32 reply_post_free_array_sz = 0; + + dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + + memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t)); + mpi_request.Function = MPI2_FUNCTION_IOC_INIT; + mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER; + mpi_request.VF_ID = 0; /* TODO */ + mpi_request.VP_ID = 0; + mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged); + mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION); + mpi_request.HostPageSize = MPT3SAS_HOST_PAGE_SIZE_4K; + + if (_base_is_controller_msix_enabled(ioc)) + mpi_request.HostMSIxVectors = ioc->reply_queue_count; + mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4); + mpi_request.ReplyDescriptorPostQueueDepth = + cpu_to_le16(ioc->reply_post_queue_depth); + mpi_request.ReplyFreeQueueDepth = + cpu_to_le16(ioc->reply_free_queue_depth); + + mpi_request.SenseBufferAddressHigh = + cpu_to_le32((u64)ioc->sense_dma >> 32); + mpi_request.SystemReplyAddressHigh = + cpu_to_le32((u64)ioc->reply_dma >> 32); + mpi_request.SystemRequestFrameBaseAddress = + cpu_to_le64((u64)ioc->request_dma); + mpi_request.ReplyFreeQueueAddress = + cpu_to_le64((u64)ioc->reply_free_dma); + + if (ioc->rdpq_array_enable) { + reply_post_free_array_sz = ioc->reply_queue_count * + sizeof(Mpi2IOCInitRDPQArrayEntry); + memset(ioc->reply_post_free_array, 0, reply_post_free_array_sz); + for (i = 0; i < ioc->reply_queue_count; i++) + ioc->reply_post_free_array[i].RDPQBaseAddress = + cpu_to_le64( + (u64)ioc->reply_post[i].reply_post_free_dma); + mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE; + mpi_request.ReplyDescriptorPostQueueAddress = + cpu_to_le64((u64)ioc->reply_post_free_array_dma); + } else { + mpi_request.ReplyDescriptorPostQueueAddress = + cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma); + } + + /* + * Set the flag to enable CoreDump state feature in IOC firmware. + */ + mpi_request.ConfigurationFlags |= + cpu_to_le16(MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE); + + /* This time stamp specifies number of milliseconds + * since epoch ~ midnight January 1, 1970. + */ + current_time = ktime_get_real(); + mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time)); + + if (ioc->logging_level & MPT_DEBUG_INIT) { + __le32 *mfp; + int i; + + mfp = (__le32 *)&mpi_request; + ioc_info(ioc, "\toffset:data\n"); + for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++) + ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4, + le32_to_cpu(mfp[i])); + } + + r = _base_handshake_req_reply_wait(ioc, + sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request, + sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 30); + + if (r != 0) { + ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r); + return r; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS || + mpi_reply.IOCLogInfo) { + ioc_err(ioc, "%s: failed\n", __func__); + r = -EIO; + } + + /* Reset TimeSync Counter*/ + ioc->timestamp_update_count = 0; + return r; +} + +/** + * mpt3sas_port_enable_done - command completion routine for port enable + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * + * Return: 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +u8 +mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + MPI2DefaultReply_t *mpi_reply; + u16 ioc_status; + + if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED) + return 1; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + if (!mpi_reply) + return 1; + + if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE) + return 1; + + ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING; + ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE; + ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID; + memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + ioc->port_enable_failed = 1; + + if (ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE_ASYNC) { + ioc->port_enable_cmds.status &= ~MPT3_CMD_COMPLETE_ASYNC; + if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { + mpt3sas_port_enable_complete(ioc); + return 1; + } else { + ioc->start_scan_failed = ioc_status; + ioc->start_scan = 0; + return 1; + } + } + complete(&ioc->port_enable_cmds.done); + return 1; +} + +/** + * _base_send_port_enable - send port_enable(discovery stuff) to firmware + * @ioc: per adapter object + * + * Return: 0 for success, non-zero for failure. + */ +static int +_base_send_port_enable(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2PortEnableRequest_t *mpi_request; + Mpi2PortEnableReply_t *mpi_reply; + int r = 0; + u16 smid; + u16 ioc_status; + + ioc_info(ioc, "sending port enable !!\n"); + + if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { + ioc_err(ioc, "%s: internal command already in use\n", __func__); + return -EAGAIN; + } + + smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx); + if (!smid) { + ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + return -EAGAIN; + } + + ioc->port_enable_cmds.status = MPT3_CMD_PENDING; + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->port_enable_cmds.smid = smid; + memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t)); + mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE; + + init_completion(&ioc->port_enable_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ); + if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) { + ioc_err(ioc, "%s: timeout\n", __func__); + _debug_dump_mf(mpi_request, + sizeof(Mpi2PortEnableRequest_t)/4); + if (ioc->port_enable_cmds.status & MPT3_CMD_RESET) + r = -EFAULT; + else + r = -ETIME; + goto out; + } + + mpi_reply = ioc->port_enable_cmds.reply; + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, "%s: failed with (ioc_status=0x%08x)\n", + __func__, ioc_status); + r = -EFAULT; + goto out; + } + + out: + ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; + ioc_info(ioc, "port enable: %s\n", r == 0 ? "SUCCESS" : "FAILED"); + return r; +} + +/** + * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply) + * @ioc: per adapter object + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2PortEnableRequest_t *mpi_request; + u16 smid; + + ioc_info(ioc, "sending port enable !!\n"); + + if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { + ioc_err(ioc, "%s: internal command already in use\n", __func__); + return -EAGAIN; + } + + smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx); + if (!smid) { + ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + return -EAGAIN; + } + ioc->drv_internal_flags |= MPT_DRV_INTERNAL_FIRST_PE_ISSUED; + ioc->port_enable_cmds.status = MPT3_CMD_PENDING; + ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE_ASYNC; + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->port_enable_cmds.smid = smid; + memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t)); + mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE; + + ioc->put_smid_default(ioc, smid); + return 0; +} + +/** + * _base_determine_wait_on_discovery - desposition + * @ioc: per adapter object + * + * Decide whether to wait on discovery to complete. Used to either + * locate boot device, or report volumes ahead of physical devices. + * + * Return: 1 for wait, 0 for don't wait. + */ +static int +_base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc) +{ + /* We wait for discovery to complete if IR firmware is loaded. + * The sas topology events arrive before PD events, so we need time to + * turn on the bit in ioc->pd_handles to indicate PD + * Also, it maybe required to report Volumes ahead of physical + * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set. + */ + if (ioc->ir_firmware) + return 1; + + /* if no Bios, then we don't need to wait */ + if (!ioc->bios_pg3.BiosVersion) + return 0; + + /* Bios is present, then we drop down here. + * + * If there any entries in the Bios Page 2, then we wait + * for discovery to complete. + */ + + /* Current Boot Device */ + if ((ioc->bios_pg2.CurrentBootDeviceForm & + MPI2_BIOSPAGE2_FORM_MASK) == + MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED && + /* Request Boot Device */ + (ioc->bios_pg2.ReqBootDeviceForm & + MPI2_BIOSPAGE2_FORM_MASK) == + MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED && + /* Alternate Request Boot Device */ + (ioc->bios_pg2.ReqAltBootDeviceForm & + MPI2_BIOSPAGE2_FORM_MASK) == + MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED) + return 0; + + return 1; +} + +/** + * _base_unmask_events - turn on notification for this event + * @ioc: per adapter object + * @event: firmware event + * + * The mask is stored in ioc->event_masks. + */ +static void +_base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event) +{ + u32 desired_event; + + if (event >= 128) + return; + + desired_event = (1 << (event % 32)); + + if (event < 32) + ioc->event_masks[0] &= ~desired_event; + else if (event < 64) + ioc->event_masks[1] &= ~desired_event; + else if (event < 96) + ioc->event_masks[2] &= ~desired_event; + else if (event < 128) + ioc->event_masks[3] &= ~desired_event; +} + +/** + * _base_event_notification - send event notification + * @ioc: per adapter object + * + * Return: 0 for success, non-zero for failure. + */ +static int +_base_event_notification(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2EventNotificationRequest_t *mpi_request; + u16 smid; + int r = 0; + int i, issue_diag_reset = 0; + + dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + + if (ioc->base_cmds.status & MPT3_CMD_PENDING) { + ioc_err(ioc, "%s: internal command already in use\n", __func__); + return -EAGAIN; + } + + smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); + if (!smid) { + ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + return -EAGAIN; + } + ioc->base_cmds.status = MPT3_CMD_PENDING; + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->base_cmds.smid = smid; + memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t)); + mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; + mpi_request->VF_ID = 0; /* TODO */ + mpi_request->VP_ID = 0; + for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) + mpi_request->EventMasks[i] = + cpu_to_le32(ioc->event_masks[i]); + init_completion(&ioc->base_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ); + if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { + ioc_err(ioc, "%s: timeout\n", __func__); + _debug_dump_mf(mpi_request, + sizeof(Mpi2EventNotificationRequest_t)/4); + if (ioc->base_cmds.status & MPT3_CMD_RESET) + r = -EFAULT; + else + issue_diag_reset = 1; + + } else + dinitprintk(ioc, ioc_info(ioc, "%s: complete\n", __func__)); + ioc->base_cmds.status = MPT3_CMD_NOT_USED; + + if (issue_diag_reset) { + if (ioc->drv_internal_flags & MPT_DRV_INTERNAL_FIRST_PE_ISSUED) + return -EFAULT; + if (mpt3sas_base_check_for_fault_and_issue_reset(ioc)) + return -EFAULT; + r = -EAGAIN; + } + return r; +} + +/** + * mpt3sas_base_validate_event_type - validating event types + * @ioc: per adapter object + * @event_type: firmware event + * + * This will turn on firmware event notification when application + * ask for that event. We don't mask events that are already enabled. + */ +void +mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type) +{ + int i, j; + u32 event_mask, desired_event; + u8 send_update_to_fw; + + for (i = 0, send_update_to_fw = 0; i < + MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) { + event_mask = ~event_type[i]; + desired_event = 1; + for (j = 0; j < 32; j++) { + if (!(event_mask & desired_event) && + (ioc->event_masks[i] & desired_event)) { + ioc->event_masks[i] &= ~desired_event; + send_update_to_fw = 1; + } + desired_event = (desired_event << 1); + } + } + + if (!send_update_to_fw) + return; + + mutex_lock(&ioc->base_cmds.mutex); + _base_event_notification(ioc); + mutex_unlock(&ioc->base_cmds.mutex); +} + +/** + * _base_diag_reset - the "big hammer" start of day reset + * @ioc: per adapter object + * + * Return: 0 for success, non-zero for failure. + */ +static int +_base_diag_reset(struct MPT3SAS_ADAPTER *ioc) +{ + u32 host_diagnostic; + u32 ioc_state; + u32 count; + u32 hcb_size; + + ioc_info(ioc, "sending diag reset !!\n"); + + pci_cfg_access_lock(ioc->pdev); + + drsprintk(ioc, ioc_info(ioc, "clear interrupts\n")); + + count = 0; + do { + /* Write magic sequence to WriteSequence register + * Loop until in diagnostic mode + */ + drsprintk(ioc, ioc_info(ioc, "write magic sequence\n")); + writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence); + writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence); + writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence); + writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence); + writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence); + writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence); + writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence); + + /* wait 100 msec */ + msleep(100); + + if (count++ > 20) { + ioc_info(ioc, + "Stop writing magic sequence after 20 retries\n"); + _base_dump_reg_set(ioc); + goto out; + } + + host_diagnostic = ioc->base_readl_ext_retry(&ioc->chip->HostDiagnostic); + drsprintk(ioc, + ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n", + count, host_diagnostic)); + + } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0); + + hcb_size = ioc->base_readl(&ioc->chip->HCBSize); + + drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n")); + writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER, + &ioc->chip->HostDiagnostic); + + /*This delay allows the chip PCIe hardware time to finish reset tasks*/ + msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000); + + /* Approximately 300 second max wait */ + for (count = 0; count < (300000000 / + MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) { + + host_diagnostic = ioc->base_readl_ext_retry(&ioc->chip->HostDiagnostic); + + if (host_diagnostic == 0xFFFFFFFF) { + ioc_info(ioc, + "Invalid host diagnostic register value\n"); + _base_dump_reg_set(ioc); + goto out; + } + if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER)) + break; + + msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC / 1000); + } + + if (host_diagnostic & MPI2_DIAG_HCB_MODE) { + + drsprintk(ioc, + ioc_info(ioc, "restart the adapter assuming the HCB Address points to good F/W\n")); + host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK; + host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW; + writel(host_diagnostic, &ioc->chip->HostDiagnostic); + + drsprintk(ioc, ioc_info(ioc, "re-enable the HCDW\n")); + writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE, + &ioc->chip->HCBSize); + } + + drsprintk(ioc, ioc_info(ioc, "restart the adapter\n")); + writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET, + &ioc->chip->HostDiagnostic); + + drsprintk(ioc, + ioc_info(ioc, "disable writes to the diagnostic register\n")); + writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence); + + drsprintk(ioc, ioc_info(ioc, "Wait for FW to go to the READY state\n")); + ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20); + if (ioc_state) { + ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n", + __func__, ioc_state); + _base_dump_reg_set(ioc); + goto out; + } + + pci_cfg_access_unlock(ioc->pdev); + ioc_info(ioc, "diag reset: SUCCESS\n"); + return 0; + + out: + pci_cfg_access_unlock(ioc->pdev); + ioc_err(ioc, "diag reset: FAILED\n"); + return -EFAULT; +} + +/** + * mpt3sas_base_make_ioc_ready - put controller in READY state + * @ioc: per adapter object + * @type: FORCE_BIG_HAMMER or SOFT_RESET + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type) +{ + u32 ioc_state; + int rc; + int count; + + dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + + if (ioc->pci_error_recovery) + return 0; + + ioc_state = mpt3sas_base_get_iocstate(ioc, 0); + dhsprintk(ioc, + ioc_info(ioc, "%s: ioc_state(0x%08x)\n", + __func__, ioc_state)); + + /* if in RESET state, it should move to READY state shortly */ + count = 0; + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) { + while ((ioc_state & MPI2_IOC_STATE_MASK) != + MPI2_IOC_STATE_READY) { + if (count++ == 10) { + ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n", + __func__, ioc_state); + return -EFAULT; + } + ssleep(1); + ioc_state = mpt3sas_base_get_iocstate(ioc, 0); + } + } + + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) + return 0; + + if (ioc_state & MPI2_DOORBELL_USED) { + ioc_info(ioc, "unexpected doorbell active!\n"); + goto issue_diag_reset; + } + + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { + mpt3sas_print_fault_code(ioc, ioc_state & + MPI2_DOORBELL_DATA_MASK); + goto issue_diag_reset; + } + + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) { + /* + * if host reset is invoked while watch dog thread is waiting + * for IOC state to be changed to Fault state then driver has + * to wait here for CoreDump state to clear otherwise reset + * will be issued to the FW and FW move the IOC state to + * reset state without copying the FW logs to coredump region. + */ + if (ioc->ioc_coredump_loop != MPT3SAS_COREDUMP_LOOP_DONE) { + mpt3sas_print_coredump_info(ioc, ioc_state & + MPI2_DOORBELL_DATA_MASK); + mpt3sas_base_wait_for_coredump_completion(ioc, + __func__); + } + goto issue_diag_reset; + } + + if (type == FORCE_BIG_HAMMER) + goto issue_diag_reset; + + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL) + if (!(_base_send_ioc_reset(ioc, + MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15))) { + return 0; + } + + issue_diag_reset: + rc = _base_diag_reset(ioc); + return rc; +} + +/** + * _base_make_ioc_operational - put controller in OPERATIONAL state + * @ioc: per adapter object + * + * Return: 0 for success, non-zero for failure. + */ +static int +_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc) +{ + int r, i, index, rc; + unsigned long flags; + u32 reply_address; + u16 smid; + struct _tr_list *delayed_tr, *delayed_tr_next; + struct _sc_list *delayed_sc, *delayed_sc_next; + struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next; + u8 hide_flag; + struct adapter_reply_queue *reply_q; + Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig; + + dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + + /* clean the delayed target reset list */ + list_for_each_entry_safe(delayed_tr, delayed_tr_next, + &ioc->delayed_tr_list, list) { + list_del(&delayed_tr->list); + kfree(delayed_tr); + } + + + list_for_each_entry_safe(delayed_tr, delayed_tr_next, + &ioc->delayed_tr_volume_list, list) { + list_del(&delayed_tr->list); + kfree(delayed_tr); + } + + list_for_each_entry_safe(delayed_sc, delayed_sc_next, + &ioc->delayed_sc_list, list) { + list_del(&delayed_sc->list); + kfree(delayed_sc); + } + + list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next, + &ioc->delayed_event_ack_list, list) { + list_del(&delayed_event_ack->list); + kfree(delayed_event_ack); + } + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + + /* hi-priority queue */ + INIT_LIST_HEAD(&ioc->hpr_free_list); + smid = ioc->hi_priority_smid; + for (i = 0; i < ioc->hi_priority_depth; i++, smid++) { + ioc->hpr_lookup[i].cb_idx = 0xFF; + ioc->hpr_lookup[i].smid = smid; + list_add_tail(&ioc->hpr_lookup[i].tracker_list, + &ioc->hpr_free_list); + } + + /* internal queue */ + INIT_LIST_HEAD(&ioc->internal_free_list); + smid = ioc->internal_smid; + for (i = 0; i < ioc->internal_depth; i++, smid++) { + ioc->internal_lookup[i].cb_idx = 0xFF; + ioc->internal_lookup[i].smid = smid; + list_add_tail(&ioc->internal_lookup[i].tracker_list, + &ioc->internal_free_list); + } + + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + + /* initialize Reply Free Queue */ + for (i = 0, reply_address = (u32)ioc->reply_dma ; + i < ioc->reply_free_queue_depth ; i++, reply_address += + ioc->reply_sz) { + ioc->reply_free[i] = cpu_to_le32(reply_address); + if (ioc->is_mcpu_endpoint) + _base_clone_reply_to_sys_mem(ioc, + reply_address, i); + } + + /* initialize reply queues */ + if (ioc->is_driver_loading) + _base_assign_reply_queues(ioc); + + /* initialize Reply Post Free Queue */ + index = 0; + reply_post_free_contig = ioc->reply_post[0].reply_post_free; + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + /* + * If RDPQ is enabled, switch to the next allocation. + * Otherwise advance within the contiguous region. + */ + if (ioc->rdpq_array_enable) { + reply_q->reply_post_free = + ioc->reply_post[index++].reply_post_free; + } else { + reply_q->reply_post_free = reply_post_free_contig; + reply_post_free_contig += ioc->reply_post_queue_depth; + } + + reply_q->reply_post_host_index = 0; + for (i = 0; i < ioc->reply_post_queue_depth; i++) + reply_q->reply_post_free[i].Words = + cpu_to_le64(ULLONG_MAX); + if (!_base_is_controller_msix_enabled(ioc)) + goto skip_init_reply_post_free_queue; + } + skip_init_reply_post_free_queue: + + r = _base_send_ioc_init(ioc); + if (r) { + /* + * No need to check IOC state for fault state & issue + * diag reset during host reset. This check is need + * only during driver load time. + */ + if (!ioc->is_driver_loading) + return r; + + rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc); + if (rc || (_base_send_ioc_init(ioc))) + return r; + } + + /* initialize reply free host index */ + ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1; + writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex); + + /* initialize reply post host index */ + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + if (ioc->combined_reply_queue) + writel((reply_q->msix_index & 7)<< + MPI2_RPHI_MSIX_INDEX_SHIFT, + ioc->replyPostRegisterIndex[reply_q->msix_index/8]); + else + writel(reply_q->msix_index << + MPI2_RPHI_MSIX_INDEX_SHIFT, + &ioc->chip->ReplyPostHostIndex); + + if (!_base_is_controller_msix_enabled(ioc)) + goto skip_init_reply_post_host_index; + } + + skip_init_reply_post_host_index: + + mpt3sas_base_unmask_interrupts(ioc); + + if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { + r = _base_display_fwpkg_version(ioc); + if (r) + return r; + } + + r = _base_static_config_pages(ioc); + if (r) + return r; + + r = _base_event_notification(ioc); + if (r) + return r; + + if (!ioc->shost_recovery) { + + if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier + == 0x80) { + hide_flag = (u8) ( + le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) & + MFG_PAGE10_HIDE_SSDS_MASK); + if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK) + ioc->mfg_pg10_hide_flag = hide_flag; + } + + ioc->wait_for_discovery_to_complete = + _base_determine_wait_on_discovery(ioc); + + return r; /* scan_start and scan_finished support */ + } + + r = _base_send_port_enable(ioc); + if (r) + return r; + + return r; +} + +/** + * mpt3sas_base_free_resources - free resources controller resources + * @ioc: per adapter object + */ +void +mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc) +{ + dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + + /* synchronizing freeing resource with pci_access_mutex lock */ + mutex_lock(&ioc->pci_access_mutex); + if (ioc->chip_phys && ioc->chip) { + mpt3sas_base_mask_interrupts(ioc); + ioc->shost_recovery = 1; + mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET); + ioc->shost_recovery = 0; + } + + mpt3sas_base_unmap_resources(ioc); + mutex_unlock(&ioc->pci_access_mutex); + return; +} + +/** + * mpt3sas_base_attach - attach controller instance + * @ioc: per adapter object + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) +{ + int r, i, rc; + int cpu_id, last_cpu_id = 0; + + dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + + /* setup cpu_msix_table */ + ioc->cpu_count = num_online_cpus(); + for_each_online_cpu(cpu_id) + last_cpu_id = cpu_id; + ioc->cpu_msix_table_sz = last_cpu_id + 1; + ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL); + ioc->reply_queue_count = 1; + if (!ioc->cpu_msix_table) { + ioc_info(ioc, "Allocation for cpu_msix_table failed!!!\n"); + r = -ENOMEM; + goto out_free_resources; + } + + if (ioc->is_warpdrive) { + ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz, + sizeof(resource_size_t *), GFP_KERNEL); + if (!ioc->reply_post_host_index) { + ioc_info(ioc, "Allocation for reply_post_host_index failed!!!\n"); + r = -ENOMEM; + goto out_free_resources; + } + } + + ioc->smp_affinity_enable = smp_affinity_enable; + + ioc->rdpq_array_enable_assigned = 0; + ioc->use_32bit_dma = false; + ioc->dma_mask = 64; + if (ioc->is_aero_ioc) { + ioc->base_readl = &_base_readl_aero; + ioc->base_readl_ext_retry = &_base_readl_ext_retry; + } else { + ioc->base_readl = &_base_readl; + ioc->base_readl_ext_retry = &_base_readl; + } + r = mpt3sas_base_map_resources(ioc); + if (r) + goto out_free_resources; + + pci_set_drvdata(ioc->pdev, ioc->shost); + r = _base_get_ioc_facts(ioc); + if (r) { + rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc); + if (rc || (_base_get_ioc_facts(ioc))) + goto out_free_resources; + } + + switch (ioc->hba_mpi_version_belonged) { + case MPI2_VERSION: + ioc->build_sg_scmd = &_base_build_sg_scmd; + ioc->build_sg = &_base_build_sg; + ioc->build_zero_len_sge = &_base_build_zero_len_sge; + ioc->get_msix_index_for_smlio = &_base_get_msix_index; + break; + case MPI25_VERSION: + case MPI26_VERSION: + /* + * In SAS3.0, + * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and + * Target Status - all require the IEEE formatted scatter gather + * elements. + */ + ioc->build_sg_scmd = &_base_build_sg_scmd_ieee; + ioc->build_sg = &_base_build_sg_ieee; + ioc->build_nvme_prp = &_base_build_nvme_prp; + ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee; + ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t); + if (ioc->high_iops_queues) + ioc->get_msix_index_for_smlio = + &_base_get_high_iops_msix_index; + else + ioc->get_msix_index_for_smlio = &_base_get_msix_index; + break; + } + if (ioc->atomic_desc_capable) { + ioc->put_smid_default = &_base_put_smid_default_atomic; + ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic; + ioc->put_smid_fast_path = + &_base_put_smid_fast_path_atomic; + ioc->put_smid_hi_priority = + &_base_put_smid_hi_priority_atomic; + } else { + ioc->put_smid_default = &_base_put_smid_default; + ioc->put_smid_fast_path = &_base_put_smid_fast_path; + ioc->put_smid_hi_priority = &_base_put_smid_hi_priority; + if (ioc->is_mcpu_endpoint) + ioc->put_smid_scsi_io = + &_base_put_smid_mpi_ep_scsi_io; + else + ioc->put_smid_scsi_io = &_base_put_smid_scsi_io; + } + /* + * These function pointers for other requests that don't + * the require IEEE scatter gather elements. + * + * For example Configuration Pages and SAS IOUNIT Control don't. + */ + ioc->build_sg_mpi = &_base_build_sg; + ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge; + + r = mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET); + if (r) + goto out_free_resources; + + ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts, + sizeof(struct mpt3sas_port_facts), GFP_KERNEL); + if (!ioc->pfacts) { + r = -ENOMEM; + goto out_free_resources; + } + + for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) { + r = _base_get_port_facts(ioc, i); + if (r) { + rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc); + if (rc || (_base_get_port_facts(ioc, i))) + goto out_free_resources; + } + } + + r = _base_allocate_memory_pools(ioc); + if (r) + goto out_free_resources; + + if (irqpoll_weight > 0) + ioc->thresh_hold = irqpoll_weight; + else + ioc->thresh_hold = ioc->hba_queue_depth/4; + + _base_init_irqpolls(ioc); + init_waitqueue_head(&ioc->reset_wq); + + /* allocate memory pd handle bitmask list */ + ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8); + if (ioc->facts.MaxDevHandle % 8) + ioc->pd_handles_sz++; + ioc->pd_handles = kzalloc(ioc->pd_handles_sz, + GFP_KERNEL); + if (!ioc->pd_handles) { + r = -ENOMEM; + goto out_free_resources; + } + ioc->blocking_handles = kzalloc(ioc->pd_handles_sz, + GFP_KERNEL); + if (!ioc->blocking_handles) { + r = -ENOMEM; + goto out_free_resources; + } + + /* allocate memory for pending OS device add list */ + ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8); + if (ioc->facts.MaxDevHandle % 8) + ioc->pend_os_device_add_sz++; + ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz, + GFP_KERNEL); + if (!ioc->pend_os_device_add) { + r = -ENOMEM; + goto out_free_resources; + } + + ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz; + ioc->device_remove_in_progress = + kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL); + if (!ioc->device_remove_in_progress) { + r = -ENOMEM; + goto out_free_resources; + } + + ioc->fwfault_debug = mpt3sas_fwfault_debug; + + /* base internal command bits */ + mutex_init(&ioc->base_cmds.mutex); + ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->base_cmds.status = MPT3_CMD_NOT_USED; + + /* port_enable command bits */ + ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; + + /* transport internal command bits */ + ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->transport_cmds.status = MPT3_CMD_NOT_USED; + mutex_init(&ioc->transport_cmds.mutex); + + /* scsih internal command bits */ + ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; + mutex_init(&ioc->scsih_cmds.mutex); + + /* task management internal command bits */ + ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->tm_cmds.status = MPT3_CMD_NOT_USED; + mutex_init(&ioc->tm_cmds.mutex); + + /* config page internal command bits */ + ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->config_cmds.status = MPT3_CMD_NOT_USED; + mutex_init(&ioc->config_cmds.mutex); + + /* ctl module internal command bits */ + ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); + ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; + mutex_init(&ioc->ctl_cmds.mutex); + + if (!ioc->base_cmds.reply || !ioc->port_enable_cmds.reply || + !ioc->transport_cmds.reply || !ioc->scsih_cmds.reply || + !ioc->tm_cmds.reply || !ioc->config_cmds.reply || + !ioc->ctl_cmds.reply || !ioc->ctl_cmds.sense) { + r = -ENOMEM; + goto out_free_resources; + } + + for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) + ioc->event_masks[i] = -1; + + /* here we enable the events we care about */ + _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY); + _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE); + _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST); + _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE); + _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE); + _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST); + _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME); + _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK); + _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS); + _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED); + _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD); + _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION); + _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR); + if (ioc->hba_mpi_version_belonged == MPI26_VERSION) { + if (ioc->is_gen35_ioc) { + _base_unmask_events(ioc, + MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE); + _base_unmask_events(ioc, MPI2_EVENT_PCIE_ENUMERATION); + _base_unmask_events(ioc, + MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST); + } + } + r = _base_make_ioc_operational(ioc); + if (r == -EAGAIN) { + r = _base_make_ioc_operational(ioc); + if (r) + goto out_free_resources; + } + + /* + * Copy current copy of IOCFacts in prev_fw_facts + * and it will be used during online firmware upgrade. + */ + memcpy(&ioc->prev_fw_facts, &ioc->facts, + sizeof(struct mpt3sas_facts)); + + ioc->non_operational_loop = 0; + ioc->ioc_coredump_loop = 0; + ioc->got_task_abort_from_ioctl = 0; + return 0; + + out_free_resources: + + ioc->remove_host = 1; + + mpt3sas_base_free_resources(ioc); + _base_release_memory_pools(ioc); + pci_set_drvdata(ioc->pdev, NULL); + kfree(ioc->cpu_msix_table); + if (ioc->is_warpdrive) + kfree(ioc->reply_post_host_index); + kfree(ioc->pd_handles); + kfree(ioc->blocking_handles); + kfree(ioc->device_remove_in_progress); + kfree(ioc->pend_os_device_add); + kfree(ioc->tm_cmds.reply); + kfree(ioc->transport_cmds.reply); + kfree(ioc->scsih_cmds.reply); + kfree(ioc->config_cmds.reply); + kfree(ioc->base_cmds.reply); + kfree(ioc->port_enable_cmds.reply); + kfree(ioc->ctl_cmds.reply); + kfree(ioc->ctl_cmds.sense); + kfree(ioc->pfacts); + ioc->ctl_cmds.reply = NULL; + ioc->base_cmds.reply = NULL; + ioc->tm_cmds.reply = NULL; + ioc->scsih_cmds.reply = NULL; + ioc->transport_cmds.reply = NULL; + ioc->config_cmds.reply = NULL; + ioc->pfacts = NULL; + return r; +} + + +/** + * mpt3sas_base_detach - remove controller instance + * @ioc: per adapter object + */ +void +mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc) +{ + dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + + mpt3sas_base_stop_watchdog(ioc); + mpt3sas_base_free_resources(ioc); + _base_release_memory_pools(ioc); + mpt3sas_free_enclosure_list(ioc); + pci_set_drvdata(ioc->pdev, NULL); + kfree(ioc->cpu_msix_table); + if (ioc->is_warpdrive) + kfree(ioc->reply_post_host_index); + kfree(ioc->pd_handles); + kfree(ioc->blocking_handles); + kfree(ioc->device_remove_in_progress); + kfree(ioc->pend_os_device_add); + kfree(ioc->pfacts); + kfree(ioc->ctl_cmds.reply); + kfree(ioc->ctl_cmds.sense); + kfree(ioc->base_cmds.reply); + kfree(ioc->port_enable_cmds.reply); + kfree(ioc->tm_cmds.reply); + kfree(ioc->transport_cmds.reply); + kfree(ioc->scsih_cmds.reply); + kfree(ioc->config_cmds.reply); +} + +/** + * _base_pre_reset_handler - pre reset handler + * @ioc: per adapter object + */ +static void _base_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc) +{ + mpt3sas_scsih_pre_reset_handler(ioc); + mpt3sas_ctl_pre_reset_handler(ioc); + dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__)); +} + +/** + * _base_clear_outstanding_mpt_commands - clears outstanding mpt commands + * @ioc: per adapter object + */ +static void +_base_clear_outstanding_mpt_commands(struct MPT3SAS_ADAPTER *ioc) +{ + dtmprintk(ioc, + ioc_info(ioc, "%s: clear outstanding mpt cmds\n", __func__)); + if (ioc->transport_cmds.status & MPT3_CMD_PENDING) { + ioc->transport_cmds.status |= MPT3_CMD_RESET; + mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid); + complete(&ioc->transport_cmds.done); + } + if (ioc->base_cmds.status & MPT3_CMD_PENDING) { + ioc->base_cmds.status |= MPT3_CMD_RESET; + mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid); + complete(&ioc->base_cmds.done); + } + if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { + ioc->port_enable_failed = 1; + ioc->port_enable_cmds.status |= MPT3_CMD_RESET; + mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid); + if (ioc->is_driver_loading) { + ioc->start_scan_failed = + MPI2_IOCSTATUS_INTERNAL_ERROR; + ioc->start_scan = 0; + } else { + complete(&ioc->port_enable_cmds.done); + } + } + if (ioc->config_cmds.status & MPT3_CMD_PENDING) { + ioc->config_cmds.status |= MPT3_CMD_RESET; + mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid); + ioc->config_cmds.smid = USHRT_MAX; + complete(&ioc->config_cmds.done); + } +} + +/** + * _base_clear_outstanding_commands - clear all outstanding commands + * @ioc: per adapter object + */ +static void _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc) +{ + mpt3sas_scsih_clear_outstanding_scsi_tm_commands(ioc); + mpt3sas_ctl_clear_outstanding_ioctls(ioc); + _base_clear_outstanding_mpt_commands(ioc); +} + +/** + * _base_reset_done_handler - reset done handler + * @ioc: per adapter object + */ +static void _base_reset_done_handler(struct MPT3SAS_ADAPTER *ioc) +{ + mpt3sas_scsih_reset_done_handler(ioc); + mpt3sas_ctl_reset_done_handler(ioc); + dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__)); +} + +/** + * mpt3sas_wait_for_commands_to_complete - reset controller + * @ioc: Pointer to MPT_ADAPTER structure + * + * This function is waiting 10s for all pending commands to complete + * prior to putting controller in reset. + */ +void +mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc) +{ + u32 ioc_state; + + ioc->pending_io_count = 0; + + ioc_state = mpt3sas_base_get_iocstate(ioc, 0); + if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) + return; + + /* pending command count */ + ioc->pending_io_count = scsi_host_busy(ioc->shost); + + if (!ioc->pending_io_count) + return; + + /* wait for pending commands to complete */ + wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ); +} + +/** + * _base_check_ioc_facts_changes - Look for increase/decrease of IOCFacts + * attributes during online firmware upgrade and update the corresponding + * IOC variables accordingly. + * + * @ioc: Pointer to MPT_ADAPTER structure + */ +static int +_base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc) +{ + u16 pd_handles_sz; + void *pd_handles = NULL, *blocking_handles = NULL; + void *pend_os_device_add = NULL, *device_remove_in_progress = NULL; + struct mpt3sas_facts *old_facts = &ioc->prev_fw_facts; + + if (ioc->facts.MaxDevHandle > old_facts->MaxDevHandle) { + pd_handles_sz = (ioc->facts.MaxDevHandle / 8); + if (ioc->facts.MaxDevHandle % 8) + pd_handles_sz++; + + pd_handles = krealloc(ioc->pd_handles, pd_handles_sz, + GFP_KERNEL); + if (!pd_handles) { + ioc_info(ioc, + "Unable to allocate the memory for pd_handles of sz: %d\n", + pd_handles_sz); + return -ENOMEM; + } + memset(pd_handles + ioc->pd_handles_sz, 0, + (pd_handles_sz - ioc->pd_handles_sz)); + ioc->pd_handles = pd_handles; + + blocking_handles = krealloc(ioc->blocking_handles, + pd_handles_sz, GFP_KERNEL); + if (!blocking_handles) { + ioc_info(ioc, + "Unable to allocate the memory for " + "blocking_handles of sz: %d\n", + pd_handles_sz); + return -ENOMEM; + } + memset(blocking_handles + ioc->pd_handles_sz, 0, + (pd_handles_sz - ioc->pd_handles_sz)); + ioc->blocking_handles = blocking_handles; + ioc->pd_handles_sz = pd_handles_sz; + + pend_os_device_add = krealloc(ioc->pend_os_device_add, + pd_handles_sz, GFP_KERNEL); + if (!pend_os_device_add) { + ioc_info(ioc, + "Unable to allocate the memory for pend_os_device_add of sz: %d\n", + pd_handles_sz); + return -ENOMEM; + } + memset(pend_os_device_add + ioc->pend_os_device_add_sz, 0, + (pd_handles_sz - ioc->pend_os_device_add_sz)); + ioc->pend_os_device_add = pend_os_device_add; + ioc->pend_os_device_add_sz = pd_handles_sz; + + device_remove_in_progress = krealloc( + ioc->device_remove_in_progress, pd_handles_sz, GFP_KERNEL); + if (!device_remove_in_progress) { + ioc_info(ioc, + "Unable to allocate the memory for " + "device_remove_in_progress of sz: %d\n " + , pd_handles_sz); + return -ENOMEM; + } + memset(device_remove_in_progress + + ioc->device_remove_in_progress_sz, 0, + (pd_handles_sz - ioc->device_remove_in_progress_sz)); + ioc->device_remove_in_progress = device_remove_in_progress; + ioc->device_remove_in_progress_sz = pd_handles_sz; + } + + memcpy(&ioc->prev_fw_facts, &ioc->facts, sizeof(struct mpt3sas_facts)); + return 0; +} + +/** + * mpt3sas_base_hard_reset_handler - reset controller + * @ioc: Pointer to MPT_ADAPTER structure + * @type: FORCE_BIG_HAMMER or SOFT_RESET + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, + enum reset_type type) +{ + int r; + unsigned long flags; + u32 ioc_state; + u8 is_fault = 0, is_trigger = 0; + + dtmprintk(ioc, ioc_info(ioc, "%s: enter\n", __func__)); + + if (ioc->pci_error_recovery) { + ioc_err(ioc, "%s: pci error recovery reset\n", __func__); + r = 0; + goto out_unlocked; + } + + if (mpt3sas_fwfault_debug) + mpt3sas_halt_firmware(ioc); + + /* wait for an active reset in progress to complete */ + mutex_lock(&ioc->reset_in_progress_mutex); + + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + ioc->shost_recovery = 1; + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) && + (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED))) { + is_trigger = 1; + ioc_state = mpt3sas_base_get_iocstate(ioc, 0); + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT || + (ioc_state & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_COREDUMP) { + is_fault = 1; + ioc->htb_rel.trigger_info_dwords[1] = + (ioc_state & MPI2_DOORBELL_DATA_MASK); + } + } + _base_pre_reset_handler(ioc); + mpt3sas_wait_for_commands_to_complete(ioc); + mpt3sas_base_mask_interrupts(ioc); + mpt3sas_base_pause_mq_polling(ioc); + r = mpt3sas_base_make_ioc_ready(ioc, type); + if (r) + goto out; + _base_clear_outstanding_commands(ioc); + + /* If this hard reset is called while port enable is active, then + * there is no reason to call make_ioc_operational + */ + if (ioc->is_driver_loading && ioc->port_enable_failed) { + ioc->remove_host = 1; + r = -EFAULT; + goto out; + } + r = _base_get_ioc_facts(ioc); + if (r) + goto out; + + r = _base_check_ioc_facts_changes(ioc); + if (r) { + ioc_info(ioc, + "Some of the parameters got changed in this new firmware" + " image and it requires system reboot\n"); + goto out; + } + if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable) + panic("%s: Issue occurred with flashing controller firmware." + "Please reboot the system and ensure that the correct" + " firmware version is running\n", ioc->name); + + r = _base_make_ioc_operational(ioc); + if (!r) + _base_reset_done_handler(ioc); + + out: + ioc_info(ioc, "%s: %s\n", __func__, r == 0 ? "SUCCESS" : "FAILED"); + + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + ioc->shost_recovery = 0; + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + ioc->ioc_reset_count++; + mutex_unlock(&ioc->reset_in_progress_mutex); + mpt3sas_base_resume_mq_polling(ioc); + + out_unlocked: + if ((r == 0) && is_trigger) { + if (is_fault) + mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT); + else + mpt3sas_trigger_master(ioc, + MASTER_TRIGGER_ADAPTER_RESET); + } + dtmprintk(ioc, ioc_info(ioc, "%s: exit\n", __func__)); + return r; +} diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h new file mode 100644 index 000000000..10055c7e4 --- /dev/null +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -0,0 +1,2072 @@ +/* + * This is the Fusion MPT base driver providing common API layer interface + * for access to MPT (Message Passing Technology) firmware. + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h + * Copyright (C) 2012-2014 LSI Corporation + * Copyright (C) 2013-2014 Avago Technologies + * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#ifndef MPT3SAS_BASE_H_INCLUDED +#define MPT3SAS_BASE_H_INCLUDED + +#include "mpi/mpi2_type.h" +#include "mpi/mpi2.h" +#include "mpi/mpi2_ioc.h" +#include "mpi/mpi2_cnfg.h" +#include "mpi/mpi2_init.h" +#include "mpi/mpi2_raid.h" +#include "mpi/mpi2_tool.h" +#include "mpi/mpi2_sas.h" +#include "mpi/mpi2_pci.h" +#include "mpi/mpi2_image.h" + +#include <scsi/scsi.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_host.h> +#include <scsi/scsi_tcq.h> +#include <scsi/scsi_transport_sas.h> +#include <scsi/scsi_dbg.h> +#include <scsi/scsi_eh.h> +#include <linux/pci.h> +#include <linux/poll.h> +#include <linux/irq_poll.h> + +#include "mpt3sas_debug.h" +#include "mpt3sas_trigger_diag.h" +#include "mpt3sas_trigger_pages.h" + +/* driver versioning info */ +#define MPT3SAS_DRIVER_NAME "mpt3sas" +#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>" +#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" +#define MPT3SAS_DRIVER_VERSION "43.100.00.00" +#define MPT3SAS_MAJOR_VERSION 43 +#define MPT3SAS_MINOR_VERSION 100 +#define MPT3SAS_BUILD_VERSION 0 +#define MPT3SAS_RELEASE_VERSION 00 + +#define MPT2SAS_DRIVER_NAME "mpt2sas" +#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" +#define MPT2SAS_DRIVER_VERSION "20.102.00.00" +#define MPT2SAS_MAJOR_VERSION 20 +#define MPT2SAS_MINOR_VERSION 102 +#define MPT2SAS_BUILD_VERSION 0 +#define MPT2SAS_RELEASE_VERSION 00 + +/* CoreDump: Default timeout */ +#define MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS (15) /*15 seconds*/ +#define MPT3SAS_COREDUMP_LOOP_DONE (0xFF) +#define MPT3SAS_TIMESYNC_TIMEOUT_SECONDS (10) /* 10 seconds */ +#define MPT3SAS_TIMESYNC_UPDATE_INTERVAL (900) /* 15 minutes */ +#define MPT3SAS_TIMESYNC_UNIT_MASK (0x80) /* bit 7 */ +#define MPT3SAS_TIMESYNC_MASK (0x7F) /* 0 - 6 bits */ +#define SECONDS_PER_MIN (60) +#define SECONDS_PER_HOUR (3600) +#define MPT3SAS_COREDUMP_LOOP_DONE (0xFF) +#define MPI26_SET_IOC_PARAMETER_SYNC_TIMESTAMP (0x81) + +/* + * Set MPT3SAS_SG_DEPTH value based on user input. + */ +#define MPT_MAX_PHYS_SEGMENTS SG_CHUNK_SIZE +#define MPT_MIN_PHYS_SEGMENTS 16 +#define MPT_KDUMP_MIN_PHYS_SEGMENTS 32 + +#define MCPU_MAX_CHAINS_PER_IO 3 + +#ifdef CONFIG_SCSI_MPT3SAS_MAX_SGE +#define MPT3SAS_SG_DEPTH CONFIG_SCSI_MPT3SAS_MAX_SGE +#else +#define MPT3SAS_SG_DEPTH MPT_MAX_PHYS_SEGMENTS +#endif + +#ifdef CONFIG_SCSI_MPT2SAS_MAX_SGE +#define MPT2SAS_SG_DEPTH CONFIG_SCSI_MPT2SAS_MAX_SGE +#else +#define MPT2SAS_SG_DEPTH MPT_MAX_PHYS_SEGMENTS +#endif + +/* + * Generic Defines + */ +#define MPT3SAS_SATA_QUEUE_DEPTH 32 +#define MPT3SAS_SAS_QUEUE_DEPTH 254 +#define MPT3SAS_RAID_QUEUE_DEPTH 128 +#define MPT3SAS_KDUMP_SCSI_IO_DEPTH 200 + +#define MPT3SAS_RAID_MAX_SECTORS 8192 +#define MPT3SAS_HOST_PAGE_SIZE_4K 12 +#define MPT3SAS_NVME_QUEUE_DEPTH 128 +#define MPT_NAME_LENGTH 32 /* generic length of strings */ +#define MPT_STRING_LENGTH 64 +#define MPI_FRAME_START_OFFSET 256 +#define REPLY_FREE_POOL_SIZE 512 /*(32 maxcredix *4)*(4 times)*/ + +#define MPT_MAX_CALLBACKS 32 + +#define MPT_MAX_HBA_NUM_PHYS 32 + +#define INTERNAL_CMDS_COUNT 10 /* reserved cmds */ +/* reserved for issuing internally framed scsi io cmds */ +#define INTERNAL_SCSIIO_CMDS_COUNT 3 + +#define MPI3_HIM_MASK 0xFFFFFFFF /* mask every bit*/ + +#define MPT3SAS_INVALID_DEVICE_HANDLE 0xFFFF + +#define MAX_CHAIN_ELEMT_SZ 16 +#define DEFAULT_NUM_FWCHAIN_ELEMTS 8 + +#define IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT 6 +#define FW_IMG_HDR_READ_TIMEOUT 15 + +#define IOC_OPERATIONAL_WAIT_COUNT 10 + +/* + * NVMe defines + */ +#define NVME_PRP_SIZE 8 /* PRP size */ +#define NVME_ERROR_RESPONSE_SIZE 16 /* Max NVME Error Response */ +#define NVME_TASK_ABORT_MIN_TIMEOUT 6 +#define NVME_TASK_ABORT_MAX_TIMEOUT 60 +#define NVME_TASK_MNGT_CUSTOM_MASK (0x0010) +#define NVME_PRP_PAGE_SIZE 4096 /* Page size */ + +struct mpt3sas_nvme_cmd { + u8 rsvd[24]; + __le64 prp1; + __le64 prp2; +}; + +/* + * logging format + */ +#define ioc_err(ioc, fmt, ...) \ + pr_err("%s: " fmt, (ioc)->name, ##__VA_ARGS__) +#define ioc_notice(ioc, fmt, ...) \ + pr_notice("%s: " fmt, (ioc)->name, ##__VA_ARGS__) +#define ioc_warn(ioc, fmt, ...) \ + pr_warn("%s: " fmt, (ioc)->name, ##__VA_ARGS__) +#define ioc_info(ioc, fmt, ...) \ + pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__) + +/* + * WarpDrive Specific Log codes + */ + +#define MPT2_WARPDRIVE_LOGENTRY (0x8002) +#define MPT2_WARPDRIVE_LC_SSDT (0x41) +#define MPT2_WARPDRIVE_LC_SSDLW (0x43) +#define MPT2_WARPDRIVE_LC_SSDLF (0x44) +#define MPT2_WARPDRIVE_LC_BRMF (0x4D) + +/* + * per target private data + */ +#define MPT_TARGET_FLAGS_RAID_COMPONENT 0x01 +#define MPT_TARGET_FLAGS_VOLUME 0x02 +#define MPT_TARGET_FLAGS_DELETED 0x04 +#define MPT_TARGET_FASTPATH_IO 0x08 +#define MPT_TARGET_FLAGS_PCIE_DEVICE 0x10 + +#define SAS2_PCI_DEVICE_B0_REVISION (0x01) +#define SAS3_PCI_DEVICE_C0_REVISION (0x02) + +/* Atlas PCIe Switch Management Port */ +#define MPI26_ATLAS_PCIe_SWITCH_DEVID (0x00B2) + +/* + * Intel HBA branding + */ +#define MPT2SAS_INTEL_RMS25JB080_BRANDING \ + "Intel(R) Integrated RAID Module RMS25JB080" +#define MPT2SAS_INTEL_RMS25JB040_BRANDING \ + "Intel(R) Integrated RAID Module RMS25JB040" +#define MPT2SAS_INTEL_RMS25KB080_BRANDING \ + "Intel(R) Integrated RAID Module RMS25KB080" +#define MPT2SAS_INTEL_RMS25KB040_BRANDING \ + "Intel(R) Integrated RAID Module RMS25KB040" +#define MPT2SAS_INTEL_RMS25LB040_BRANDING \ + "Intel(R) Integrated RAID Module RMS25LB040" +#define MPT2SAS_INTEL_RMS25LB080_BRANDING \ + "Intel(R) Integrated RAID Module RMS25LB080" +#define MPT2SAS_INTEL_RMS2LL080_BRANDING \ + "Intel Integrated RAID Module RMS2LL080" +#define MPT2SAS_INTEL_RMS2LL040_BRANDING \ + "Intel Integrated RAID Module RMS2LL040" +#define MPT2SAS_INTEL_RS25GB008_BRANDING \ + "Intel(R) RAID Controller RS25GB008" +#define MPT2SAS_INTEL_SSD910_BRANDING \ + "Intel(R) SSD 910 Series" + +#define MPT3SAS_INTEL_RMS3JC080_BRANDING \ + "Intel(R) Integrated RAID Module RMS3JC080" +#define MPT3SAS_INTEL_RS3GC008_BRANDING \ + "Intel(R) RAID Controller RS3GC008" +#define MPT3SAS_INTEL_RS3FC044_BRANDING \ + "Intel(R) RAID Controller RS3FC044" +#define MPT3SAS_INTEL_RS3UC080_BRANDING \ + "Intel(R) RAID Controller RS3UC080" + +/* + * Intel HBA SSDIDs + */ +#define MPT2SAS_INTEL_RMS25JB080_SSDID 0x3516 +#define MPT2SAS_INTEL_RMS25JB040_SSDID 0x3517 +#define MPT2SAS_INTEL_RMS25KB080_SSDID 0x3518 +#define MPT2SAS_INTEL_RMS25KB040_SSDID 0x3519 +#define MPT2SAS_INTEL_RMS25LB040_SSDID 0x351A +#define MPT2SAS_INTEL_RMS25LB080_SSDID 0x351B +#define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E +#define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F +#define MPT2SAS_INTEL_RS25GB008_SSDID 0x3000 +#define MPT2SAS_INTEL_SSD910_SSDID 0x3700 + +#define MPT3SAS_INTEL_RMS3JC080_SSDID 0x3521 +#define MPT3SAS_INTEL_RS3GC008_SSDID 0x3522 +#define MPT3SAS_INTEL_RS3FC044_SSDID 0x3523 +#define MPT3SAS_INTEL_RS3UC080_SSDID 0x3524 + +/* + * Dell HBA branding + */ +#define MPT2SAS_DELL_BRANDING_SIZE 32 + +#define MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING "Dell 6Gbps SAS HBA" +#define MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING "Dell PERC H200 Adapter" +#define MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING "Dell PERC H200 Integrated" +#define MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING "Dell PERC H200 Modular" +#define MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING "Dell PERC H200 Embedded" +#define MPT2SAS_DELL_PERC_H200_BRANDING "Dell PERC H200" +#define MPT2SAS_DELL_6GBPS_SAS_BRANDING "Dell 6Gbps SAS" + +#define MPT3SAS_DELL_12G_HBA_BRANDING \ + "Dell 12Gbps HBA" + +/* + * Dell HBA SSDIDs + */ +#define MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID 0x1F1C +#define MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID 0x1F1D +#define MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID 0x1F1E +#define MPT2SAS_DELL_PERC_H200_MODULAR_SSDID 0x1F1F +#define MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID 0x1F20 +#define MPT2SAS_DELL_PERC_H200_SSDID 0x1F21 +#define MPT2SAS_DELL_6GBPS_SAS_SSDID 0x1F22 + +#define MPT3SAS_DELL_12G_HBA_SSDID 0x1F46 + +/* + * Cisco HBA branding + */ +#define MPT3SAS_CISCO_12G_8E_HBA_BRANDING \ + "Cisco 9300-8E 12G SAS HBA" +#define MPT3SAS_CISCO_12G_8I_HBA_BRANDING \ + "Cisco 9300-8i 12G SAS HBA" +#define MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING \ + "Cisco 12G Modular SAS Pass through Controller" +#define MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING \ + "UCS C3X60 12G SAS Pass through Controller" +/* + * Cisco HBA SSSDIDs + */ +#define MPT3SAS_CISCO_12G_8E_HBA_SSDID 0x14C +#define MPT3SAS_CISCO_12G_8I_HBA_SSDID 0x154 +#define MPT3SAS_CISCO_12G_AVILA_HBA_SSDID 0x155 +#define MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID 0x156 + +/* + * status bits for ioc->diag_buffer_status + */ +#define MPT3_DIAG_BUFFER_IS_REGISTERED (0x01) +#define MPT3_DIAG_BUFFER_IS_RELEASED (0x02) +#define MPT3_DIAG_BUFFER_IS_DIAG_RESET (0x04) +#define MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED (0x08) +#define MPT3_DIAG_BUFFER_IS_APP_OWNED (0x10) + +/* + * HP HBA branding + */ +#define MPT2SAS_HP_3PAR_SSVID 0x1590 + +#define MPT2SAS_HP_2_4_INTERNAL_BRANDING \ + "HP H220 Host Bus Adapter" +#define MPT2SAS_HP_2_4_EXTERNAL_BRANDING \ + "HP H221 Host Bus Adapter" +#define MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING \ + "HP H222 Host Bus Adapter" +#define MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING \ + "HP H220i Host Bus Adapter" +#define MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING \ + "HP H210i Host Bus Adapter" + +/* + * HO HBA SSDIDs + */ +#define MPT2SAS_HP_2_4_INTERNAL_SSDID 0x0041 +#define MPT2SAS_HP_2_4_EXTERNAL_SSDID 0x0042 +#define MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID 0x0043 +#define MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID 0x0044 +#define MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID 0x0046 + +/* + * Combined Reply Queue constants, + * There are twelve Supplemental Reply Post Host Index Registers + * and each register is at offset 0x10 bytes from the previous one. + */ +#define MAX_COMBINED_MSIX_VECTORS(gen35) ((gen35 == 1) ? 16 : 8) +#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3 12 +#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35 16 +#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET (0x10) +#define MPT3_MIN_IRQS 1 + +/* OEM Identifiers */ +#define MFG10_OEM_ID_INVALID (0x00000000) +#define MFG10_OEM_ID_DELL (0x00000001) +#define MFG10_OEM_ID_FSC (0x00000002) +#define MFG10_OEM_ID_SUN (0x00000003) +#define MFG10_OEM_ID_IBM (0x00000004) + +/* GENERIC Flags 0*/ +#define MFG10_GF0_OCE_DISABLED (0x00000001) +#define MFG10_GF0_R1E_DRIVE_COUNT (0x00000002) +#define MFG10_GF0_R10_DISPLAY (0x00000004) +#define MFG10_GF0_SSD_DATA_SCRUB_DISABLE (0x00000008) +#define MFG10_GF0_SINGLE_DRIVE_R0 (0x00000010) + +#define VIRTUAL_IO_FAILED_RETRY (0x32010081) + +/* High IOPs definitions */ +#define MPT3SAS_DEVICE_HIGH_IOPS_DEPTH 8 +#define MPT3SAS_HIGH_IOPS_REPLY_QUEUES 8 +#define MPT3SAS_HIGH_IOPS_BATCH_COUNT 16 +#define MPT3SAS_GEN35_MAX_MSIX_QUEUES 128 +#define RDPQ_MAX_INDEX_IN_ONE_CHUNK 16 + +/* OEM Specific Flags will come from OEM specific header files */ +struct Mpi2ManufacturingPage10_t { + MPI2_CONFIG_PAGE_HEADER Header; /* 00h */ + U8 OEMIdentifier; /* 04h */ + U8 Reserved1; /* 05h */ + U16 Reserved2; /* 08h */ + U32 Reserved3; /* 0Ch */ + U32 GenericFlags0; /* 10h */ + U32 GenericFlags1; /* 14h */ + U32 Reserved4; /* 18h */ + U32 OEMSpecificFlags0; /* 1Ch */ + U32 OEMSpecificFlags1; /* 20h */ + U32 Reserved5[18]; /* 24h - 60h*/ +}; + + +/* Miscellaneous options */ +struct Mpi2ManufacturingPage11_t { + MPI2_CONFIG_PAGE_HEADER Header; /* 00h */ + __le32 Reserved1; /* 04h */ + u8 Reserved2; /* 08h */ + u8 EEDPTagMode; /* 09h */ + u8 Reserved3; /* 0Ah */ + u8 Reserved4; /* 0Bh */ + __le32 Reserved5[8]; /* 0Ch-2Ch */ + u16 AddlFlags2; /* 2Ch */ + u8 AddlFlags3; /* 2Eh */ + u8 Reserved6; /* 2Fh */ + __le32 Reserved7[7]; /* 30h - 4Bh */ + u8 NVMeAbortTO; /* 4Ch */ + u8 NumPerDevEvents; /* 4Dh */ + u8 HostTraceBufferDecrementSizeKB; /* 4Eh */ + u8 HostTraceBufferFlags; /* 4Fh */ + u16 HostTraceBufferMaxSizeKB; /* 50h */ + u16 HostTraceBufferMinSizeKB; /* 52h */ + u8 CoreDumpTOSec; /* 54h */ + u8 TimeSyncInterval; /* 55h */ + u16 Reserved9; /* 56h */ + __le32 Reserved10; /* 58h */ +}; + +/** + * struct MPT3SAS_TARGET - starget private hostdata + * @starget: starget object + * @sas_address: target sas address + * @raid_device: raid_device pointer to access volume data + * @handle: device handle + * @num_luns: number luns + * @flags: MPT_TARGET_FLAGS_XXX flags + * @deleted: target flaged for deletion + * @tm_busy: target is busy with TM request. + * @port: hba port entry containing target's port number info + * @sas_dev: The sas_device associated with this target + * @pcie_dev: The pcie device associated with this target + */ +struct MPT3SAS_TARGET { + struct scsi_target *starget; + u64 sas_address; + struct _raid_device *raid_device; + u16 handle; + int num_luns; + u32 flags; + u8 deleted; + u8 tm_busy; + struct hba_port *port; + struct _sas_device *sas_dev; + struct _pcie_device *pcie_dev; +}; + + +/* + * per device private data + */ +#define MPT_DEVICE_FLAGS_INIT 0x01 + +#define MFG_PAGE10_HIDE_SSDS_MASK (0x00000003) +#define MFG_PAGE10_HIDE_ALL_DISKS (0x00) +#define MFG_PAGE10_EXPOSE_ALL_DISKS (0x01) +#define MFG_PAGE10_HIDE_IF_VOL_PRESENT (0x02) + +/** + * struct MPT3SAS_DEVICE - sdev private hostdata + * @sas_target: starget private hostdata + * @lun: lun number + * @flags: MPT_DEVICE_XXX flags + * @configured_lun: lun is configured + * @block: device is in SDEV_BLOCK state + * @tlr_snoop_check: flag used in determining whether to disable TLR + * @eedp_enable: eedp support enable bit + * @eedp_type: 0(type_1), 1(type_2), 2(type_3) + * @eedp_block_length: block size + * @ata_command_pending: SATL passthrough outstanding for device + */ +struct MPT3SAS_DEVICE { + struct MPT3SAS_TARGET *sas_target; + unsigned int lun; + u32 flags; + u8 configured_lun; + u8 block; + u8 tlr_snoop_check; + u8 ignore_delay_remove; + /* Iopriority Command Handling */ + u8 ncq_prio_enable; + /* + * Bug workaround for SATL handling: the mpt2/3sas firmware + * doesn't return BUSY or TASK_SET_FULL for subsequent + * commands while a SATL pass through is in operation as the + * spec requires, it simply does nothing with them until the + * pass through completes, causing them possibly to timeout if + * the passthrough is a long executing command (like format or + * secure erase). This variable allows us to do the right + * thing while a SATL command is pending. + */ + unsigned long ata_command_pending; + +}; + +#define MPT3_CMD_NOT_USED 0x8000 /* free */ +#define MPT3_CMD_COMPLETE 0x0001 /* completed */ +#define MPT3_CMD_PENDING 0x0002 /* pending */ +#define MPT3_CMD_REPLY_VALID 0x0004 /* reply is valid */ +#define MPT3_CMD_RESET 0x0008 /* host reset dropped the command */ +#define MPT3_CMD_COMPLETE_ASYNC 0x0010 /* tells whether cmd completes in same thread or not */ + +/** + * struct _internal_cmd - internal commands struct + * @mutex: mutex + * @done: completion + * @reply: reply message pointer + * @sense: sense data + * @status: MPT3_CMD_XXX status + * @smid: system message id + */ +struct _internal_cmd { + struct mutex mutex; + struct completion done; + void *reply; + void *sense; + u16 status; + u16 smid; +}; + + + +/** + * struct _sas_device - attached device information + * @list: sas device list + * @starget: starget object + * @sas_address: device sas address + * @device_name: retrieved from the SAS IDENTIFY frame. + * @handle: device handle + * @sas_address_parent: sas address of parent expander or sas host + * @enclosure_handle: enclosure handle + * @enclosure_logical_id: enclosure logical identifier + * @volume_handle: volume handle (valid when hidden raid member) + * @volume_wwid: volume unique identifier + * @device_info: bitfield provides detailed info about the device + * @id: target id + * @channel: target channel + * @slot: number number + * @phy: phy identifier provided in sas device page 0 + * @responding: used in _scsih_sas_device_mark_responding + * @fast_path: fast path feature enable bit + * @pfa_led_on: flag for PFA LED status + * @pend_sas_rphy_add: flag to check if device is in sas_rphy_add() + * addition routine. + * @chassis_slot: chassis slot + * @is_chassis_slot_valid: chassis slot valid or not + * @port: hba port entry containing device's port number info + * @rphy: device's sas_rphy address used to identify this device structure in + * target_alloc callback function + */ +struct _sas_device { + struct list_head list; + struct scsi_target *starget; + u64 sas_address; + u64 device_name; + u16 handle; + u64 sas_address_parent; + u16 enclosure_handle; + u64 enclosure_logical_id; + u16 volume_handle; + u64 volume_wwid; + u32 device_info; + int id; + int channel; + u16 slot; + u8 phy; + u8 responding; + u8 fast_path; + u8 pfa_led_on; + u8 pend_sas_rphy_add; + u8 enclosure_level; + u8 chassis_slot; + u8 is_chassis_slot_valid; + u8 connector_name[5]; + struct kref refcount; + u8 port_type; + struct hba_port *port; + struct sas_rphy *rphy; +}; + +static inline void sas_device_get(struct _sas_device *s) +{ + kref_get(&s->refcount); +} + +static inline void sas_device_free(struct kref *r) +{ + kfree(container_of(r, struct _sas_device, refcount)); +} + +static inline void sas_device_put(struct _sas_device *s) +{ + kref_put(&s->refcount, sas_device_free); +} + +/* + * struct _pcie_device - attached PCIe device information + * @list: pcie device list + * @starget: starget object + * @wwid: device WWID + * @handle: device handle + * @device_info: bitfield provides detailed info about the device + * @id: target id + * @channel: target channel + * @slot: slot number + * @port_num: port number + * @responding: used in _scsih_pcie_device_mark_responding + * @fast_path: fast path feature enable bit + * @nvme_mdts: MaximumDataTransferSize from PCIe Device Page 2 for + * NVMe device only + * @enclosure_handle: enclosure handle + * @enclosure_logical_id: enclosure logical identifier + * @enclosure_level: The level of device's enclosure from the controller + * @connector_name: ASCII value of the Connector's name + * @serial_number: pointer of serial number string allocated runtime + * @access_status: Device's Access Status + * @shutdown_latency: NVMe device's RTD3 Entry Latency + * @refcount: reference count for deletion + */ +struct _pcie_device { + struct list_head list; + struct scsi_target *starget; + u64 wwid; + u16 handle; + u32 device_info; + int id; + int channel; + u16 slot; + u8 port_num; + u8 responding; + u8 fast_path; + u32 nvme_mdts; + u16 enclosure_handle; + u64 enclosure_logical_id; + u8 enclosure_level; + u8 connector_name[4]; + u8 *serial_number; + u8 reset_timeout; + u8 access_status; + u16 shutdown_latency; + struct kref refcount; +}; +/** + * pcie_device_get - Increment the pcie device reference count + * + * @p: pcie_device object + * + * When ever this function called it will increment the + * reference count of the pcie device for which this function called. + * + */ +static inline void pcie_device_get(struct _pcie_device *p) +{ + kref_get(&p->refcount); +} + +/** + * pcie_device_free - Release the pcie device object + * @r - kref object + * + * Free's the pcie device object. It will be called when reference count + * reaches to zero. + */ +static inline void pcie_device_free(struct kref *r) +{ + kfree(container_of(r, struct _pcie_device, refcount)); +} + +/** + * pcie_device_put - Decrement the pcie device reference count + * + * @p: pcie_device object + * + * When ever this function called it will decrement the + * reference count of the pcie device for which this function called. + * + * When refernce count reaches to Zero, this will call pcie_device_free to the + * pcie_device object. + */ +static inline void pcie_device_put(struct _pcie_device *p) +{ + kref_put(&p->refcount, pcie_device_free); +} +/** + * struct _raid_device - raid volume link list + * @list: sas device list + * @starget: starget object + * @sdev: scsi device struct (volumes are single lun) + * @wwid: unique identifier for the volume + * @handle: device handle + * @block_size: Block size of the volume + * @id: target id + * @channel: target channel + * @volume_type: the raid level + * @device_info: bitfield provides detailed info about the hidden components + * @num_pds: number of hidden raid components + * @responding: used in _scsih_raid_device_mark_responding + * @percent_complete: resync percent complete + * @direct_io_enabled: Whether direct io to PDs are allowed or not + * @stripe_exponent: X where 2powX is the stripe sz in blocks + * @block_exponent: X where 2powX is the block sz in bytes + * @max_lba: Maximum number of LBA in the volume + * @stripe_sz: Stripe Size of the volume + * @device_info: Device info of the volume member disk + * @pd_handle: Array of handles of the physical drives for direct I/O in le16 + */ +#define MPT_MAX_WARPDRIVE_PDS 8 +struct _raid_device { + struct list_head list; + struct scsi_target *starget; + struct scsi_device *sdev; + u64 wwid; + u16 handle; + u16 block_sz; + int id; + int channel; + u8 volume_type; + u8 num_pds; + u8 responding; + u8 percent_complete; + u8 direct_io_enabled; + u8 stripe_exponent; + u8 block_exponent; + u64 max_lba; + u32 stripe_sz; + u32 device_info; + u16 pd_handle[MPT_MAX_WARPDRIVE_PDS]; +}; + +/** + * struct _boot_device - boot device info + * + * @channel: sas, raid, or pcie channel + * @device: holds pointer for struct _sas_device, struct _raid_device or + * struct _pcie_device + */ +struct _boot_device { + int channel; + void *device; +}; + +/** + * struct _sas_port - wide/narrow sas port information + * @port_list: list of ports belonging to expander + * @num_phys: number of phys belonging to this port + * @remote_identify: attached device identification + * @rphy: sas transport rphy object + * @port: sas transport wide/narrow port object + * @hba_port: hba port entry containing port's port number info + * @phy_list: _sas_phy list objects belonging to this port + */ +struct _sas_port { + struct list_head port_list; + u8 num_phys; + struct sas_identify remote_identify; + struct sas_rphy *rphy; + struct sas_port *port; + struct hba_port *hba_port; + struct list_head phy_list; +}; + +/** + * struct _sas_phy - phy information + * @port_siblings: list of phys belonging to a port + * @identify: phy identification + * @remote_identify: attached device identification + * @phy: sas transport phy object + * @phy_id: unique phy id + * @handle: device handle for this phy + * @attached_handle: device handle for attached device + * @phy_belongs_to_port: port has been created for this phy + * @port: hba port entry containing port number info + */ +struct _sas_phy { + struct list_head port_siblings; + struct sas_identify identify; + struct sas_identify remote_identify; + struct sas_phy *phy; + u8 phy_id; + u16 handle; + u16 attached_handle; + u8 phy_belongs_to_port; + u8 hba_vphy; + struct hba_port *port; +}; + +/** + * struct _sas_node - sas_host/expander information + * @list: list of expanders + * @parent_dev: parent device class + * @num_phys: number phys belonging to this sas_host/expander + * @sas_address: sas address of this sas_host/expander + * @handle: handle for this sas_host/expander + * @sas_address_parent: sas address of parent expander or sas host + * @enclosure_handle: handle for this a member of an enclosure + * @device_info: bitwise defining capabilities of this sas_host/expander + * @responding: used in _scsih_expander_device_mark_responding + * @nr_phys_allocated: Allocated memory for this many count phys + * @phy: a list of phys that make up this sas_host/expander + * @sas_port_list: list of ports attached to this sas_host/expander + * @port: hba port entry containing node's port number info + * @rphy: sas_rphy object of this expander + */ +struct _sas_node { + struct list_head list; + struct device *parent_dev; + u8 num_phys; + u64 sas_address; + u16 handle; + u64 sas_address_parent; + u16 enclosure_handle; + u64 enclosure_logical_id; + u8 responding; + u8 nr_phys_allocated; + struct hba_port *port; + struct _sas_phy *phy; + struct list_head sas_port_list; + struct sas_rphy *rphy; +}; + +/** + * struct _enclosure_node - enclosure information + * @list: list of enclosures + * @pg0: enclosure pg0; + */ +struct _enclosure_node { + struct list_head list; + Mpi2SasEnclosurePage0_t pg0; +}; + +/** + * enum reset_type - reset state + * @FORCE_BIG_HAMMER: issue diagnostic reset + * @SOFT_RESET: issue message_unit_reset, if fails to to big hammer + */ +enum reset_type { + FORCE_BIG_HAMMER, + SOFT_RESET, +}; + +/** + * struct pcie_sg_list - PCIe SGL buffer (contiguous per I/O) + * @pcie_sgl: PCIe native SGL for NVMe devices + * @pcie_sgl_dma: physical address + */ +struct pcie_sg_list { + void *pcie_sgl; + dma_addr_t pcie_sgl_dma; +}; + +/** + * struct chain_tracker - firmware chain tracker + * @chain_buffer: chain buffer + * @chain_buffer_dma: physical address + * @tracker_list: list of free request (ioc->free_chain_list) + */ +struct chain_tracker { + void *chain_buffer; + dma_addr_t chain_buffer_dma; +}; + +struct chain_lookup { + struct chain_tracker *chains_per_smid; + atomic_t chain_offset; +}; + +/** + * struct scsiio_tracker - scsi mf request tracker + * @smid: system message id + * @cb_idx: callback index + * @direct_io: To indicate whether I/O is direct (WARPDRIVE) + * @chain_list: list of associated firmware chain tracker + * @msix_io: IO's msix + */ +struct scsiio_tracker { + u16 smid; + struct scsi_cmnd *scmd; + u8 cb_idx; + u8 direct_io; + struct pcie_sg_list pcie_sg_list; + struct list_head chain_list; + u16 msix_io; +}; + +/** + * struct request_tracker - firmware request tracker + * @smid: system message id + * @cb_idx: callback index + * @tracker_list: list of free request (ioc->free_list) + */ +struct request_tracker { + u16 smid; + u8 cb_idx; + struct list_head tracker_list; +}; + +/** + * struct _tr_list - target reset list + * @handle: device handle + * @state: state machine + */ +struct _tr_list { + struct list_head list; + u16 handle; + u16 state; +}; + +/** + * struct _sc_list - delayed SAS_IO_UNIT_CONTROL message list + * @handle: device handle + */ +struct _sc_list { + struct list_head list; + u16 handle; +}; + +/** + * struct _event_ack_list - delayed event acknowledgment list + * @Event: Event ID + * @EventContext: used to track the event uniquely + */ +struct _event_ack_list { + struct list_head list; + U16 Event; + U32 EventContext; +}; + +/** + * struct adapter_reply_queue - the reply queue struct + * @ioc: per adapter object + * @msix_index: msix index into vector table + * @vector: irq vector + * @reply_post_host_index: head index in the pool where FW completes IO + * @reply_post_free: reply post base virt address + * @name: the name registered to request_irq() + * @busy: isr is actively processing replies on another cpu + * @os_irq: irq number + * @irqpoll: irq_poll object + * @irq_poll_scheduled: Tells whether irq poll is scheduled or not + * @is_iouring_poll_q: Tells whether reply queues is assigned + * to io uring poll queues or not + * @list: this list +*/ +struct adapter_reply_queue { + struct MPT3SAS_ADAPTER *ioc; + u8 msix_index; + u32 reply_post_host_index; + Mpi2ReplyDescriptorsUnion_t *reply_post_free; + char name[MPT_NAME_LENGTH]; + atomic_t busy; + u32 os_irq; + struct irq_poll irqpoll; + bool irq_poll_scheduled; + bool irq_line_enable; + bool is_iouring_poll_q; + struct list_head list; +}; + +/** + * struct io_uring_poll_queue - the io uring poll queue structure + * @busy: Tells whether io uring poll queue is busy or not + * @pause: Tells whether IOs are paused on io uring poll queue or not + * @reply_q: reply queue mapped for io uring poll queue + */ +struct io_uring_poll_queue { + atomic_t busy; + atomic_t pause; + struct adapter_reply_queue *reply_q; +}; + +typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr); + +/* SAS3.0 support */ +typedef int (*MPT_BUILD_SG_SCMD)(struct MPT3SAS_ADAPTER *ioc, + struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *pcie_device); +typedef void (*MPT_BUILD_SG)(struct MPT3SAS_ADAPTER *ioc, void *psge, + dma_addr_t data_out_dma, size_t data_out_sz, + dma_addr_t data_in_dma, size_t data_in_sz); +typedef void (*MPT_BUILD_ZERO_LEN_SGE)(struct MPT3SAS_ADAPTER *ioc, + void *paddr); + +/* SAS3.5 support */ +typedef void (*NVME_BUILD_PRP)(struct MPT3SAS_ADAPTER *ioc, u16 smid, + Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request, + dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma, + size_t data_in_sz); + +/* To support atomic and non atomic descriptors*/ +typedef void (*PUT_SMID_IO_FP_HIP) (struct MPT3SAS_ADAPTER *ioc, u16 smid, + u16 funcdep); +typedef void (*PUT_SMID_DEFAULT) (struct MPT3SAS_ADAPTER *ioc, u16 smid); +typedef u32 (*BASE_READ_REG) (const volatile void __iomem *addr); +/* + * To get high iops reply queue's msix index when high iops mode is enabled + * else get the msix index of general reply queues. + */ +typedef u8 (*GET_MSIX_INDEX) (struct MPT3SAS_ADAPTER *ioc, + struct scsi_cmnd *scmd); + +/* IOC Facts and Port Facts converted from little endian to cpu */ +union mpi3_version_union { + MPI2_VERSION_STRUCT Struct; + u32 Word; +}; + +struct mpt3sas_facts { + u16 MsgVersion; + u16 HeaderVersion; + u8 IOCNumber; + u8 VP_ID; + u8 VF_ID; + u16 IOCExceptions; + u16 IOCStatus; + u32 IOCLogInfo; + u8 MaxChainDepth; + u8 WhoInit; + u8 NumberOfPorts; + u8 MaxMSIxVectors; + u16 RequestCredit; + u16 ProductID; + u32 IOCCapabilities; + union mpi3_version_union FWVersion; + u16 IOCRequestFrameSize; + u16 IOCMaxChainSegmentSize; + u16 MaxInitiators; + u16 MaxTargets; + u16 MaxSasExpanders; + u16 MaxEnclosures; + u16 ProtocolFlags; + u16 HighPriorityCredit; + u16 MaxReplyDescriptorPostQueueDepth; + u8 ReplyFrameSize; + u8 MaxVolumes; + u16 MaxDevHandle; + u16 MaxPersistentEntries; + u16 MinDevHandle; + u8 CurrentHostPageSize; +}; + +struct mpt3sas_port_facts { + u8 PortNumber; + u8 VP_ID; + u8 VF_ID; + u8 PortType; + u16 MaxPostedCmdBuffers; +}; + +struct reply_post_struct { + Mpi2ReplyDescriptorsUnion_t *reply_post_free; + dma_addr_t reply_post_free_dma; +}; + +/** + * struct virtual_phy - vSES phy structure + * sas_address: SAS Address of vSES device + * phy_mask: vSES device's phy number + * flags: flags used to manage this structure + */ +struct virtual_phy { + struct list_head list; + u64 sas_address; + u32 phy_mask; + u8 flags; +}; + +#define MPT_VPHY_FLAG_DIRTY_PHY 0x01 + +/** + * struct hba_port - Saves each HBA's Wide/Narrow port info + * @sas_address: sas address of this wide/narrow port's attached device + * @phy_mask: HBA PHY's belonging to this port + * @port_id: port number + * @flags: hba port flags + * @vphys_mask : mask of vSES devices Phy number + * @vphys_list : list containing vSES device structures + */ +struct hba_port { + struct list_head list; + u64 sas_address; + u32 phy_mask; + u8 port_id; + u8 flags; + u32 vphys_mask; + struct list_head vphys_list; +}; + +/* hba port flags */ +#define HBA_PORT_FLAG_DIRTY_PORT 0x01 +#define HBA_PORT_FLAG_NEW_PORT 0x02 + +#define MULTIPATH_DISABLED_PORT_ID 0xFF + +/** + * struct htb_rel_query - diagnostic buffer release reason + * @unique_id - unique id associated with this buffer. + * @buffer_rel_condition - Release condition ioctl/sysfs/reset + * @reserved + * @trigger_type - Master/Event/scsi/MPI + * @trigger_info_dwords - Data Correspondig to trigger type + */ +struct htb_rel_query { + u16 buffer_rel_condition; + u16 reserved; + u32 trigger_type; + u32 trigger_info_dwords[2]; +}; + +/* Buffer_rel_condition bit fields */ + +/* Bit 0 - Diag Buffer not Released */ +#define MPT3_DIAG_BUFFER_NOT_RELEASED (0x00) +/* Bit 0 - Diag Buffer Released */ +#define MPT3_DIAG_BUFFER_RELEASED (0x01) + +/* + * Bit 1 - Diag Buffer Released by IOCTL, + * This bit is valid only if Bit 0 is one + */ +#define MPT3_DIAG_BUFFER_REL_IOCTL (0x02 | MPT3_DIAG_BUFFER_RELEASED) + +/* + * Bit 2 - Diag Buffer Released by Trigger, + * This bit is valid only if Bit 0 is one + */ +#define MPT3_DIAG_BUFFER_REL_TRIGGER (0x04 | MPT3_DIAG_BUFFER_RELEASED) + +/* + * Bit 3 - Diag Buffer Released by SysFs, + * This bit is valid only if Bit 0 is one + */ +#define MPT3_DIAG_BUFFER_REL_SYSFS (0x08 | MPT3_DIAG_BUFFER_RELEASED) + +/* DIAG RESET Master trigger flags */ +#define MPT_DIAG_RESET_ISSUED_BY_DRIVER 0x00000000 +#define MPT_DIAG_RESET_ISSUED_BY_USER 0x00000001 + +typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc); +/** + * struct MPT3SAS_ADAPTER - per adapter struct + * @list: ioc_list + * @shost: shost object + * @id: unique adapter id + * @cpu_count: number online cpus + * @name: generic ioc string + * @tmp_string: tmp string used for logging + * @pdev: pci pdev object + * @pio_chip: physical io register space + * @chip: memory mapped register space + * @chip_phys: physical addrss prior to mapping + * @logging_level: see mpt3sas_debug.h + * @fwfault_debug: debuging FW timeouts + * @ir_firmware: IR firmware present + * @bars: bitmask of BAR's that must be configured + * @mask_interrupts: ignore interrupt + * @pci_access_mutex: Mutex to synchronize ioctl, sysfs show path and + * pci resource handling + * @fault_reset_work_q_name: fw fault work queue + * @fault_reset_work_q: "" + * @fault_reset_work: "" + * @firmware_event_name: fw event work queue + * @firmware_event_thread: "" + * @fw_event_lock: + * @fw_event_list: list of fw events + * @current_evet: current processing firmware event + * @fw_event_cleanup: set to one while cleaning up the fw events + * @aen_event_read_flag: event log was read + * @broadcast_aen_busy: broadcast aen waiting to be serviced + * @shost_recovery: host reset in progress + * @ioc_reset_in_progress_lock: + * @ioc_link_reset_in_progress: phy/hard reset in progress + * @ignore_loginfos: ignore loginfos during task management + * @remove_host: flag for when driver unloads, to avoid sending dev resets + * @pci_error_recovery: flag to prevent ioc access until slot reset completes + * @wait_for_discovery_to_complete: flag set at driver load time when + * waiting on reporting devices + * @is_driver_loading: flag set at driver load time + * @port_enable_failed: flag set when port enable has failed + * @start_scan: flag set from scan_start callback, cleared from _mpt3sas_fw_work + * @start_scan_failed: means port enable failed, return's the ioc_status + * @msix_enable: flag indicating msix is enabled + * @msix_vector_count: number msix vectors + * @cpu_msix_table: table for mapping cpus to msix index + * @cpu_msix_table_sz: table size + * @total_io_cnt: Gives total IO count, used to load balance the interrupts + * @ioc_coredump_loop: will have non-zero value when FW is in CoreDump state + * @timestamp_update_count: Counter to fire timeSync command + * time_sync_interval: Time sync interval read from man page 11 + * @high_iops_outstanding: used to load balance the interrupts + * within high iops reply queues + * @msix_load_balance: Enables load balancing of interrupts across + * the multiple MSIXs + * @schedule_dead_ioc_flush_running_cmds: callback to flush pending commands + * @thresh_hold: Max number of reply descriptors processed + * before updating Host Index + * @iopoll_q_start_index: starting index of io uring poll queues + * in reply queue list + * @drv_internal_flags: Bit map internal to driver + * @drv_support_bitmap: driver's supported feature bit map + * @use_32bit_dma: Flag to use 32 bit consistent dma mask + * @scsi_io_cb_idx: shost generated commands + * @tm_cb_idx: task management commands + * @scsih_cb_idx: scsih internal commands + * @transport_cb_idx: transport internal commands + * @ctl_cb_idx: clt internal commands + * @base_cb_idx: base internal commands + * @config_cb_idx: base internal commands + * @tm_tr_cb_idx : device removal target reset handshake + * @tm_tr_volume_cb_idx : volume removal target reset + * @base_cmds: + * @transport_cmds: + * @scsih_cmds: + * @tm_cmds: + * @ctl_cmds: + * @config_cmds: + * @base_add_sg_single: handler for either 32/64 bit sgl's + * @event_type: bits indicating which events to log + * @event_context: unique id for each logged event + * @event_log: event log pointer + * @event_masks: events that are masked + * @max_shutdown_latency: timeout value for NVMe shutdown operation, + * which is equal that NVMe drive's RTD3 Entry Latency + * which has reported maximum RTD3 Entry Latency value + * among attached NVMe drives. + * @facts: static facts data + * @prev_fw_facts: previous fw facts data + * @pfacts: static port facts data + * @manu_pg0: static manufacturing page 0 + * @manu_pg10: static manufacturing page 10 + * @manu_pg11: static manufacturing page 11 + * @bios_pg2: static bios page 2 + * @bios_pg3: static bios page 3 + * @ioc_pg8: static ioc page 8 + * @iounit_pg0: static iounit page 0 + * @iounit_pg1: static iounit page 1 + * @iounit_pg8: static iounit page 8 + * @sas_hba: sas host object + * @sas_expander_list: expander object list + * @enclosure_list: enclosure object list + * @sas_node_lock: + * @sas_device_list: sas device object list + * @sas_device_init_list: sas device object list (used only at init time) + * @sas_device_lock: + * @pcie_device_list: pcie device object list + * @pcie_device_init_list: pcie device object list (used only at init time) + * @pcie_device_lock: + * @io_missing_delay: time for IO completed by fw when PDR enabled + * @device_missing_delay: time for device missing by fw when PDR enabled + * @sas_id : used for setting volume target IDs + * @pcie_target_id: used for setting pcie target IDs + * @blocking_handles: bitmask used to identify which devices need blocking + * @pd_handles : bitmask for PD handles + * @pd_handles_sz : size of pd_handle bitmask + * @config_page_sz: config page size + * @config_page: reserve memory for config page payload + * @config_page_dma: + * @hba_queue_depth: hba request queue depth + * @sge_size: sg element size for either 32/64 bit + * @scsiio_depth: SCSI_IO queue depth + * @request_sz: per request frame size + * @request: pool of request frames + * @request_dma: + * @request_dma_sz: + * @scsi_lookup: firmware request tracker list + * @scsi_lookup_lock: + * @free_list: free list of request + * @pending_io_count: + * @reset_wq: + * @chain: pool of chains + * @chain_dma: + * @max_sges_in_main_message: number sg elements in main message + * @max_sges_in_chain_message: number sg elements per chain + * @chains_needed_per_io: max chains per io + * @chain_depth: total chains allocated + * @chain_segment_sz: gives the max number of + * SGEs accommodate on single chain buffer + * @hi_priority_smid: + * @hi_priority: + * @hi_priority_dma: + * @hi_priority_depth: + * @hpr_lookup: + * @hpr_free_list: + * @internal_smid: + * @internal: + * @internal_dma: + * @internal_depth: + * @internal_lookup: + * @internal_free_list: + * @sense: pool of sense + * @sense_dma: + * @sense_dma_pool: + * @reply_depth: hba reply queue depth: + * @reply_sz: per reply frame size: + * @reply: pool of replys: + * @reply_dma: + * @reply_dma_pool: + * @reply_free_queue_depth: reply free depth + * @reply_free: pool for reply free queue (32 bit addr) + * @reply_free_dma: + * @reply_free_dma_pool: + * @reply_free_host_index: tail index in pool to insert free replys + * @reply_post_queue_depth: reply post queue depth + * @reply_post_struct: struct for reply_post_free physical & virt address + * @rdpq_array_capable: FW supports multiple reply queue addresses in ioc_init + * @rdpq_array_enable: rdpq_array support is enabled in the driver + * @rdpq_array_enable_assigned: this ensures that rdpq_array_enable flag + * is assigned only ones + * @reply_queue_count: number of reply queue's + * @reply_queue_list: link list contaning the reply queue info + * @msix96_vector: 96 MSI-X vector support + * @replyPostRegisterIndex: index of next position in Reply Desc Post Queue + * @delayed_tr_list: target reset link list + * @delayed_tr_volume_list: volume target reset link list + * @delayed_sc_list: + * @delayed_event_ack_list: + * @temp_sensors_count: flag to carry the number of temperature sensors + * @pci_access_mutex: Mutex to synchronize ioctl,sysfs show path and + * pci resource handling. PCI resource freeing will lead to free + * vital hardware/memory resource, which might be in use by cli/sysfs + * path functions resulting in Null pointer reference followed by kernel + * crash. To avoid the above race condition we use mutex syncrhonization + * which ensures the syncrhonization between cli/sysfs_show path. + * @atomic_desc_capable: Atomic Request Descriptor support. + * @GET_MSIX_INDEX: Get the msix index of high iops queues. + * @multipath_on_hba: flag to determine multipath on hba is enabled or not + * @port_table_list: list containing HBA's wide/narrow port's info + */ +struct MPT3SAS_ADAPTER { + struct list_head list; + struct Scsi_Host *shost; + u8 id; + int cpu_count; + char name[MPT_NAME_LENGTH]; + char driver_name[MPT_NAME_LENGTH - 8]; + char tmp_string[MPT_STRING_LENGTH]; + struct pci_dev *pdev; + Mpi2SystemInterfaceRegs_t __iomem *chip; + phys_addr_t chip_phys; + int logging_level; + int fwfault_debug; + u8 ir_firmware; + int bars; + u8 mask_interrupts; + + /* fw fault handler */ + char fault_reset_work_q_name[20]; + struct workqueue_struct *fault_reset_work_q; + struct delayed_work fault_reset_work; + + /* fw event handler */ + char firmware_event_name[20]; + struct workqueue_struct *firmware_event_thread; + spinlock_t fw_event_lock; + struct list_head fw_event_list; + struct fw_event_work *current_event; + u8 fw_events_cleanup; + + /* misc flags */ + int aen_event_read_flag; + u8 broadcast_aen_busy; + u16 broadcast_aen_pending; + u8 shost_recovery; + u8 got_task_abort_from_ioctl; + + struct mutex reset_in_progress_mutex; + spinlock_t ioc_reset_in_progress_lock; + u8 ioc_link_reset_in_progress; + + u8 ignore_loginfos; + u8 remove_host; + u8 pci_error_recovery; + u8 wait_for_discovery_to_complete; + u8 is_driver_loading; + u8 port_enable_failed; + u8 start_scan; + u16 start_scan_failed; + + u8 msix_enable; + u16 msix_vector_count; + u8 *cpu_msix_table; + u16 cpu_msix_table_sz; + resource_size_t __iomem **reply_post_host_index; + u32 ioc_reset_count; + MPT3SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds; + u32 non_operational_loop; + u8 ioc_coredump_loop; + u32 timestamp_update_count; + u32 time_sync_interval; + atomic64_t total_io_cnt; + atomic64_t high_iops_outstanding; + bool msix_load_balance; + u16 thresh_hold; + u8 high_iops_queues; + u8 iopoll_q_start_index; + u32 drv_internal_flags; + u32 drv_support_bitmap; + u32 dma_mask; + bool enable_sdev_max_qd; + bool use_32bit_dma; + struct io_uring_poll_queue *io_uring_poll_queues; + + /* internal commands, callback index */ + u8 scsi_io_cb_idx; + u8 tm_cb_idx; + u8 transport_cb_idx; + u8 scsih_cb_idx; + u8 ctl_cb_idx; + u8 base_cb_idx; + u8 port_enable_cb_idx; + u8 config_cb_idx; + u8 tm_tr_cb_idx; + u8 tm_tr_volume_cb_idx; + u8 tm_sas_control_cb_idx; + struct _internal_cmd base_cmds; + struct _internal_cmd port_enable_cmds; + struct _internal_cmd transport_cmds; + struct _internal_cmd scsih_cmds; + struct _internal_cmd tm_cmds; + struct _internal_cmd ctl_cmds; + struct _internal_cmd config_cmds; + + MPT_ADD_SGE base_add_sg_single; + + /* function ptr for either IEEE or MPI sg elements */ + MPT_BUILD_SG_SCMD build_sg_scmd; + MPT_BUILD_SG build_sg; + MPT_BUILD_ZERO_LEN_SGE build_zero_len_sge; + u16 sge_size_ieee; + u16 hba_mpi_version_belonged; + + /* function ptr for MPI sg elements only */ + MPT_BUILD_SG build_sg_mpi; + MPT_BUILD_ZERO_LEN_SGE build_zero_len_sge_mpi; + + /* function ptr for NVMe PRP elements only */ + NVME_BUILD_PRP build_nvme_prp; + + /* event log */ + u32 event_type[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS]; + u32 event_context; + void *event_log; + u32 event_masks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS]; + + u8 tm_custom_handling; + u8 nvme_abort_timeout; + u16 max_shutdown_latency; + u16 max_wideport_qd; + u16 max_narrowport_qd; + u16 max_nvme_qd; + u8 max_sata_qd; + + /* static config pages */ + struct mpt3sas_facts facts; + struct mpt3sas_facts prev_fw_facts; + struct mpt3sas_port_facts *pfacts; + Mpi2ManufacturingPage0_t manu_pg0; + struct Mpi2ManufacturingPage10_t manu_pg10; + struct Mpi2ManufacturingPage11_t manu_pg11; + Mpi2BiosPage2_t bios_pg2; + Mpi2BiosPage3_t bios_pg3; + Mpi2IOCPage8_t ioc_pg8; + Mpi2IOUnitPage0_t iounit_pg0; + Mpi2IOUnitPage1_t iounit_pg1; + Mpi2IOUnitPage8_t iounit_pg8; + Mpi2IOCPage1_t ioc_pg1_copy; + + struct _boot_device req_boot_device; + struct _boot_device req_alt_boot_device; + struct _boot_device current_boot_device; + + /* sas hba, expander, and device list */ + struct _sas_node sas_hba; + struct list_head sas_expander_list; + struct list_head enclosure_list; + spinlock_t sas_node_lock; + struct list_head sas_device_list; + struct list_head sas_device_init_list; + spinlock_t sas_device_lock; + struct list_head pcie_device_list; + struct list_head pcie_device_init_list; + spinlock_t pcie_device_lock; + + struct list_head raid_device_list; + spinlock_t raid_device_lock; + u8 io_missing_delay; + u16 device_missing_delay; + int sas_id; + int pcie_target_id; + + void *blocking_handles; + void *pd_handles; + u16 pd_handles_sz; + + void *pend_os_device_add; + u16 pend_os_device_add_sz; + + /* config page */ + u16 config_page_sz; + void *config_page; + dma_addr_t config_page_dma; + void *config_vaddr; + + /* scsiio request */ + u16 hba_queue_depth; + u16 sge_size; + u16 scsiio_depth; + u16 request_sz; + u8 *request; + dma_addr_t request_dma; + u32 request_dma_sz; + struct pcie_sg_list *pcie_sg_lookup; + spinlock_t scsi_lookup_lock; + int pending_io_count; + wait_queue_head_t reset_wq; + u16 *io_queue_num; + + /* PCIe SGL */ + struct dma_pool *pcie_sgl_dma_pool; + /* Host Page Size */ + u32 page_size; + + /* chain */ + struct chain_lookup *chain_lookup; + struct list_head free_chain_list; + struct dma_pool *chain_dma_pool; + ulong chain_pages; + u16 max_sges_in_main_message; + u16 max_sges_in_chain_message; + u16 chains_needed_per_io; + u32 chain_depth; + u16 chain_segment_sz; + u16 chains_per_prp_buffer; + + /* hi-priority queue */ + u16 hi_priority_smid; + u8 *hi_priority; + dma_addr_t hi_priority_dma; + u16 hi_priority_depth; + struct request_tracker *hpr_lookup; + struct list_head hpr_free_list; + + /* internal queue */ + u16 internal_smid; + u8 *internal; + dma_addr_t internal_dma; + u16 internal_depth; + struct request_tracker *internal_lookup; + struct list_head internal_free_list; + + /* sense */ + u8 *sense; + dma_addr_t sense_dma; + struct dma_pool *sense_dma_pool; + + /* reply */ + u16 reply_sz; + u8 *reply; + dma_addr_t reply_dma; + u32 reply_dma_max_address; + u32 reply_dma_min_address; + struct dma_pool *reply_dma_pool; + + /* reply free queue */ + u16 reply_free_queue_depth; + __le32 *reply_free; + dma_addr_t reply_free_dma; + struct dma_pool *reply_free_dma_pool; + u32 reply_free_host_index; + + /* reply post queue */ + u16 reply_post_queue_depth; + struct reply_post_struct *reply_post; + u8 rdpq_array_capable; + u8 rdpq_array_enable; + u8 rdpq_array_enable_assigned; + struct dma_pool *reply_post_free_dma_pool; + struct dma_pool *reply_post_free_array_dma_pool; + Mpi2IOCInitRDPQArrayEntry *reply_post_free_array; + dma_addr_t reply_post_free_array_dma; + u8 reply_queue_count; + struct list_head reply_queue_list; + + u8 combined_reply_queue; + u8 combined_reply_index_count; + u8 smp_affinity_enable; + /* reply post register index */ + resource_size_t __iomem **replyPostRegisterIndex; + + struct list_head delayed_tr_list; + struct list_head delayed_tr_volume_list; + struct list_head delayed_sc_list; + struct list_head delayed_event_ack_list; + u8 temp_sensors_count; + struct mutex pci_access_mutex; + + /* diag buffer support */ + u8 *diag_buffer[MPI2_DIAG_BUF_TYPE_COUNT]; + u32 diag_buffer_sz[MPI2_DIAG_BUF_TYPE_COUNT]; + dma_addr_t diag_buffer_dma[MPI2_DIAG_BUF_TYPE_COUNT]; + u8 diag_buffer_status[MPI2_DIAG_BUF_TYPE_COUNT]; + u32 unique_id[MPI2_DIAG_BUF_TYPE_COUNT]; + u32 product_specific[MPI2_DIAG_BUF_TYPE_COUNT][23]; + u32 diagnostic_flags[MPI2_DIAG_BUF_TYPE_COUNT]; + u32 ring_buffer_offset; + u32 ring_buffer_sz; + struct htb_rel_query htb_rel; + u8 reset_from_user; + u8 is_warpdrive; + u8 is_mcpu_endpoint; + u8 hide_ir_msg; + u8 mfg_pg10_hide_flag; + u8 hide_drives; + spinlock_t diag_trigger_lock; + u8 diag_trigger_active; + u8 atomic_desc_capable; + BASE_READ_REG base_readl; + BASE_READ_REG base_readl_ext_retry; + struct SL_WH_MASTER_TRIGGER_T diag_trigger_master; + struct SL_WH_EVENT_TRIGGERS_T diag_trigger_event; + struct SL_WH_SCSI_TRIGGERS_T diag_trigger_scsi; + struct SL_WH_MPI_TRIGGERS_T diag_trigger_mpi; + u8 supports_trigger_pages; + void *device_remove_in_progress; + u16 device_remove_in_progress_sz; + u8 is_gen35_ioc; + u8 is_aero_ioc; + struct dentry *debugfs_root; + struct dentry *ioc_dump; + PUT_SMID_IO_FP_HIP put_smid_scsi_io; + PUT_SMID_IO_FP_HIP put_smid_fast_path; + PUT_SMID_IO_FP_HIP put_smid_hi_priority; + PUT_SMID_DEFAULT put_smid_default; + GET_MSIX_INDEX get_msix_index_for_smlio; + + u8 multipath_on_hba; + struct list_head port_table_list; +}; + +struct mpt3sas_debugfs_buffer { + void *buf; + u32 len; +}; + +#define MPT_DRV_SUPPORT_BITMAP_MEMMOVE 0x00000001 +#define MPT_DRV_SUPPORT_BITMAP_ADDNLQUERY 0x00000002 + +#define MPT_DRV_INTERNAL_FIRST_PE_ISSUED 0x00000001 + +typedef u8 (*MPT_CALLBACK)(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply); + +/* + * struct ATTO_SAS_NVRAM - ATTO NVRAM settings stored + * in Manufacturing page 1 used to get + * ATTO SasAddr. + */ +struct ATTO_SAS_NVRAM { + u8 Signature[4]; + u8 Version; +#define ATTO_SASNVR_VERSION 0 + + u8 Checksum; +#define ATTO_SASNVR_CKSUM_SEED 0x5A + u8 Pad[10]; + u8 SasAddr[8]; +#define ATTO_SAS_ADDR_ALIGN 64 + u8 Reserved[232]; +}; + +#define ATTO_SAS_ADDR_DEVNAME_BIAS 63 + +union ATTO_SAS_ADDRESS { + U8 b[8]; + U16 w[4]; + U32 d[2]; + U64 q; +}; + +/* base shared API */ +extern struct list_head mpt3sas_ioc_list; +extern char driver_name[MPT_NAME_LENGTH]; +/* spinlock on list operations over IOCs + * Case: when multiple warpdrive cards(IOCs) are in use + * Each IOC will added to the ioc list structure on initialization. + * Watchdog threads run at regular intervals to check IOC for any + * fault conditions which will trigger the dead_ioc thread to + * deallocate pci resource, resulting deleting the IOC netry from list, + * this deletion need to protected by spinlock to enusre that + * ioc removal is syncrhonized, if not synchronized it might lead to + * list_del corruption as the ioc list is traversed in cli path. + */ +extern spinlock_t gioc_lock; + +void mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc); + +int mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc); +int mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc); +int mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, + enum reset_type type); + +void *mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid); +void *mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid); +__le32 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, + u16 smid); +void *mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid); +dma_addr_t mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid); +void mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll); +void mpt3sas_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc); + +void mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u16 handle); +void mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u16 msix_task); +void mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid); +void mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid); +/* hi-priority queue */ +u16 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx); +u16 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx, + struct scsi_cmnd *scmd); +void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc, + struct scsiio_tracker *st); + +u16 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx); +void mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid); +void mpt3sas_base_initialize_callback_handler(void); +u8 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func); +void mpt3sas_base_release_callback_handler(u8 cb_idx); + +u8 mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply); +u8 mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply); +void *mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, + u32 phys_addr); + +u32 mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked); + +void mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code); +#define mpt3sas_print_fault_code(ioc, fault_code) \ +do { pr_err("%s fault info from func: %s\n", ioc->name, __func__); \ + mpt3sas_base_fault_info(ioc, fault_code); } while (0) + +void mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code); +#define mpt3sas_print_coredump_info(ioc, fault_code) \ +do { pr_err("%s fault info from func: %s\n", ioc->name, __func__); \ + mpt3sas_base_coredump_info(ioc, fault_code); } while (0) + +int mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER *ioc, + const char *caller); +int mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc, + Mpi2SasIoUnitControlReply_t *mpi_reply, + Mpi2SasIoUnitControlRequest_t *mpi_request); +int mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc, + Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request); + +void mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, + u32 *event_type); + +void mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc); + +void mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc, + u16 device_missing_delay, u8 io_missing_delay); + +int mpt3sas_base_check_for_fault_and_issue_reset( + struct MPT3SAS_ADAPTER *ioc); + +int mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc); + +void +mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc); + +u8 mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc, + u8 status, void *mpi_request, int sz); +#define mpt3sas_check_cmd_timeout(ioc, status, mpi_request, sz, issue_reset) \ +do { ioc_err(ioc, "In func: %s\n", __func__); \ + issue_reset = mpt3sas_base_check_cmd_timeout(ioc, \ + status, mpi_request, sz); } while (0) + +int mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int wait_count); +int mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type); +void mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc); +int mpt3sas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num); +void mpt3sas_base_pause_mq_polling(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_base_resume_mq_polling(struct MPT3SAS_ADAPTER *ioc); + +/* scsih shared API */ +struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, + u16 smid); +u8 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, + u32 reply); +void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_scsih_clear_outstanding_scsi_tm_commands( + struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc); + +int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, + uint channel, uint id, u64 lun, u8 type, u16 smid_task, + u16 msix_task, u8 timeout, u8 tr_method); +int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, + uint channel, uint id, u64 lun, u8 type, u16 smid_task, + u16 msix_task, u8 timeout, u8 tr_method); + +void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle); +void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle); +void mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, + struct hba_port *port); +void mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address, struct hba_port *port); +u8 mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, + u16 smid); +struct hba_port * +mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc, u8 port, + u8 bypass_dirty_port_flag); + +struct _sas_node *mpt3sas_scsih_expander_find_by_handle( + struct MPT3SAS_ADAPTER *ioc, u16 handle); +struct _sas_node *mpt3sas_scsih_expander_find_by_sas_address( + struct MPT3SAS_ADAPTER *ioc, u64 sas_address, + struct hba_port *port); +struct _sas_device *mpt3sas_get_sdev_by_addr( + struct MPT3SAS_ADAPTER *ioc, u64 sas_address, + struct hba_port *port); +struct _sas_device *__mpt3sas_get_sdev_by_addr( + struct MPT3SAS_ADAPTER *ioc, u64 sas_address, + struct hba_port *port); +struct _sas_device *mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, + u16 handle); +struct _pcie_device *mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, + u16 handle); + +void mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc); +struct _raid_device * +mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle); +void mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth); +struct _sas_device * +__mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER *ioc, struct sas_rphy *rphy); +struct virtual_phy * +mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER *ioc, + struct hba_port *port, u32 phy); + +/* config shared API */ +u8 mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply); +int mpt3sas_config_get_number_hba_phys(struct MPT3SAS_ADAPTER *ioc, + u8 *num_phys); +int mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage0_t *config_page); +int mpt3sas_config_get_manufacturing_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage1_t *config_page); + +int mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage7_t *config_page, + u16 sz); +int mpt3sas_config_get_manufacturing_pg10(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, + struct Mpi2ManufacturingPage10_t *config_page); + +int mpt3sas_config_get_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, + struct Mpi2ManufacturingPage11_t *config_page); +int mpt3sas_config_set_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, + struct Mpi2ManufacturingPage11_t *config_page); + +int mpt3sas_config_get_bios_pg2(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2BiosPage2_t *config_page); +int mpt3sas_config_get_bios_pg3(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2BiosPage3_t *config_page); +int mpt3sas_config_set_bios_pg4(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage4_t *config_page, + int sz_config_page); +int mpt3sas_config_get_bios_pg4(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage4_t *config_page, + int sz_config_page); +int mpt3sas_config_get_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2IOUnitPage0_t *config_page); +int mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage0_t *config_page, + u32 form, u32 handle); +int mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage1_t *config_page, + u32 form, u32 handle); +int mpt3sas_config_get_pcie_device_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeDevicePage0_t *config_page, + u32 form, u32 handle); +int mpt3sas_config_get_pcie_device_pg2(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeDevicePage2_t *config_page, + u32 form, u32 handle); +int mpt3sas_config_get_pcie_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeIOUnitPage1_t *config_page, + u16 sz); +int mpt3sas_config_get_sas_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage0_t *config_page, + u16 sz); +int mpt3sas_config_get_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2IOUnitPage1_t *config_page); +int mpt3sas_config_get_iounit_pg3(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage3_t *config_page, u16 sz); +int mpt3sas_config_set_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2IOUnitPage1_t *config_page); +int mpt3sas_config_get_iounit_pg8(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2IOUnitPage8_t *config_page); +int mpt3sas_config_get_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, + u16 sz); +int mpt3sas_config_set_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, + u16 sz); +int mpt3sas_config_get_ioc_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2IOCPage1_t *config_page); +int mpt3sas_config_set_ioc_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2IOCPage1_t *config_page); +int mpt3sas_config_get_ioc_pg8(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2IOCPage8_t *config_page); +int mpt3sas_config_get_expander_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2ExpanderPage0_t *config_page, + u32 form, u32 handle); +int mpt3sas_config_get_expander_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2ExpanderPage1_t *config_page, + u32 phy_number, u16 handle); +int mpt3sas_config_get_enclosure_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasEnclosurePage0_t *config_page, + u32 form, u32 handle); +int mpt3sas_config_get_phy_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2SasPhyPage0_t *config_page, u32 phy_number); +int mpt3sas_config_get_phy_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2SasPhyPage1_t *config_page, u32 phy_number); +int mpt3sas_config_get_raid_volume_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage1_t *config_page, u32 form, + u32 handle); +int mpt3sas_config_get_number_pds(struct MPT3SAS_ADAPTER *ioc, u16 handle, + u8 *num_pds); +int mpt3sas_config_get_raid_volume_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage0_t *config_page, u32 form, + u32 handle, u16 sz); +int mpt3sas_config_get_phys_disk_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2RaidPhysDiskPage0_t *config_page, + u32 form, u32 form_specific); +int mpt3sas_config_get_volume_handle(struct MPT3SAS_ADAPTER *ioc, u16 pd_handle, + u16 *volume_handle); +int mpt3sas_config_get_volume_wwid(struct MPT3SAS_ADAPTER *ioc, + u16 volume_handle, u64 *wwid); +int +mpt3sas_config_get_driver_trigger_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage0_t *config_page); +int +mpt3sas_config_get_driver_trigger_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage1_t *config_page); +int +mpt3sas_config_get_driver_trigger_pg2(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage2_t *config_page); +int +mpt3sas_config_get_driver_trigger_pg3(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage3_t *config_page); +int +mpt3sas_config_get_driver_trigger_pg4(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage4_t *config_page); +int +mpt3sas_config_update_driver_trigger_pg1(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_MASTER_TRIGGER_T *master_tg, bool set); +int +mpt3sas_config_update_driver_trigger_pg2(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_EVENT_TRIGGERS_T *event_tg, bool set); +int +mpt3sas_config_update_driver_trigger_pg3(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_SCSI_TRIGGERS_T *scsi_tg, bool set); +int +mpt3sas_config_update_driver_trigger_pg4(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_MPI_TRIGGERS_T *mpi_tg, bool set); + +/* ctl shared API */ +extern const struct attribute_group *mpt3sas_host_groups[]; +extern const struct attribute_group *mpt3sas_dev_groups[]; +void mpt3sas_ctl_init(ushort hbas_to_enumerate); +void mpt3sas_ctl_exit(ushort hbas_to_enumerate); +u8 mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply); +void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_ctl_clear_outstanding_ioctls(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_ctl_reset_done_handler(struct MPT3SAS_ADAPTER *ioc); +u8 mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, + u8 msix_index, u32 reply); +void mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventNotificationReply_t *mpi_reply); + +void mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, + u8 bits_to_register); +int mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type, + u8 *issue_reset); + +/* transport shared API */ +extern struct scsi_transport_template *mpt3sas_transport_template; +u8 mpt3sas_transport_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply); +struct _sas_port *mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, + u16 handle, u64 sas_address, struct hba_port *port); +void mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, + u64 sas_address_parent, struct hba_port *port); +int mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy + *mpt3sas_phy, Mpi2SasPhyPage0_t phy_pg0, struct device *parent_dev); +int mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, + struct _sas_phy *mpt3sas_phy, Mpi2ExpanderPage1_t expander_pg1, + struct device *parent_dev); +void mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address, u16 handle, u8 phy_number, u8 link_rate, + struct hba_port *port); +extern struct sas_function_template mpt3sas_transport_functions; +extern struct scsi_transport_template *mpt3sas_transport_template; +void +mpt3sas_transport_del_phy_from_an_existing_port(struct MPT3SAS_ADAPTER *ioc, + struct _sas_node *sas_node, struct _sas_phy *mpt3sas_phy); +void +mpt3sas_transport_add_phy_to_an_existing_port(struct MPT3SAS_ADAPTER *ioc, + struct _sas_node *sas_node, struct _sas_phy *mpt3sas_phy, + u64 sas_address, struct hba_port *port); +/* trigger data externs */ +void mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data); +void mpt3sas_process_trigger_data(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data); +void mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc, + u32 trigger_bitmask); +void mpt3sas_trigger_event(struct MPT3SAS_ADAPTER *ioc, u16 event, + u16 log_entry_qualifier); +void mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key, + u8 asc, u8 ascq); +void mpt3sas_trigger_mpi(struct MPT3SAS_ADAPTER *ioc, u16 ioc_status, + u32 loginfo); + +/* warpdrive APIs */ +u8 mpt3sas_get_num_volumes(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc, + struct _raid_device *raid_device); +void +mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, + struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request); + +/* NCQ Prio Handling Check */ +bool scsih_ncq_prio_supp(struct scsi_device *sdev); + +void mpt3sas_setup_debugfs(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_destroy_debugfs(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_init_debugfs(void); +void mpt3sas_exit_debugfs(void); + +/** + * _scsih_is_pcie_scsi_device - determines if device is an pcie scsi device + * @device_info: bitfield providing information about the device. + * Context: none + * + * Returns 1 if scsi device. + */ +static inline int +mpt3sas_scsih_is_pcie_scsi_device(u32 device_info) +{ + if ((device_info & + MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE) == MPI26_PCIE_DEVINFO_SCSI) + return 1; + else + return 0; +} +#endif /* MPT3SAS_BASE_H_INCLUDED */ diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c new file mode 100644 index 000000000..d114ef381 --- /dev/null +++ b/drivers/scsi/mpt3sas/mpt3sas_config.c @@ -0,0 +1,2794 @@ +/* + * This module provides common API for accessing firmware configuration pages + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c + * Copyright (C) 2012-2014 LSI Corporation + * Copyright (C) 2013-2014 Avago Technologies + * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/blkdev.h> +#include <linux/sched.h> +#include <linux/workqueue.h> +#include <linux/delay.h> +#include <linux/pci.h> + +#include "mpt3sas_base.h" + +/* local definitions */ + +/* Timeout for config page request (in seconds) */ +#define MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT 15 + +/* Common sgl flags for READING a config page. */ +#define MPT3_CONFIG_COMMON_SGLFLAGS ((MPI2_SGE_FLAGS_SIMPLE_ELEMENT | \ + MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER \ + | MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT) + +/* Common sgl flags for WRITING a config page. */ +#define MPT3_CONFIG_COMMON_WRITE_SGLFLAGS ((MPI2_SGE_FLAGS_SIMPLE_ELEMENT | \ + MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER \ + | MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC) \ + << MPI2_SGE_FLAGS_SHIFT) + +/** + * struct config_request - obtain dma memory via routine + * @sz: size + * @page: virt pointer + * @page_dma: phys pointer + * + */ +struct config_request { + u16 sz; + void *page; + dma_addr_t page_dma; +}; + +/** + * _config_display_some_debug - debug routine + * @ioc: per adapter object + * @smid: system request message index + * @calling_function_name: string pass from calling function + * @mpi_reply: reply message frame + * Context: none. + * + * Function for displaying debug info helpful when debugging issues + * in this module. + */ +static void +_config_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid, + char *calling_function_name, MPI2DefaultReply_t *mpi_reply) +{ + Mpi2ConfigRequest_t *mpi_request; + char *desc = NULL; + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + switch (mpi_request->Header.PageType & MPI2_CONFIG_PAGETYPE_MASK) { + case MPI2_CONFIG_PAGETYPE_IO_UNIT: + desc = "io_unit"; + break; + case MPI2_CONFIG_PAGETYPE_IOC: + desc = "ioc"; + break; + case MPI2_CONFIG_PAGETYPE_BIOS: + desc = "bios"; + break; + case MPI2_CONFIG_PAGETYPE_RAID_VOLUME: + desc = "raid_volume"; + break; + case MPI2_CONFIG_PAGETYPE_MANUFACTURING: + desc = "manufacturing"; + break; + case MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK: + desc = "physdisk"; + break; + case MPI2_CONFIG_PAGETYPE_EXTENDED: + switch (mpi_request->ExtPageType) { + case MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT: + desc = "sas_io_unit"; + break; + case MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER: + desc = "sas_expander"; + break; + case MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE: + desc = "sas_device"; + break; + case MPI2_CONFIG_EXTPAGETYPE_SAS_PHY: + desc = "sas_phy"; + break; + case MPI2_CONFIG_EXTPAGETYPE_LOG: + desc = "log"; + break; + case MPI2_CONFIG_EXTPAGETYPE_ENCLOSURE: + desc = "enclosure"; + break; + case MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG: + desc = "raid_config"; + break; + case MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING: + desc = "driver_mapping"; + break; + case MPI2_CONFIG_EXTPAGETYPE_SAS_PORT: + desc = "sas_port"; + break; + case MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING: + desc = "ext_manufacturing"; + break; + case MPI2_CONFIG_EXTPAGETYPE_PCIE_IO_UNIT: + desc = "pcie_io_unit"; + break; + case MPI2_CONFIG_EXTPAGETYPE_PCIE_SWITCH: + desc = "pcie_switch"; + break; + case MPI2_CONFIG_EXTPAGETYPE_PCIE_DEVICE: + desc = "pcie_device"; + break; + case MPI2_CONFIG_EXTPAGETYPE_PCIE_LINK: + desc = "pcie_link"; + break; + } + break; + } + + if (!desc) + return; + + ioc_info(ioc, "%s: %s(%d), action(%d), form(0x%08x), smid(%d)\n", + calling_function_name, desc, + mpi_request->Header.PageNumber, mpi_request->Action, + le32_to_cpu(mpi_request->PageAddress), smid); + + if (!mpi_reply) + return; + + if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo) + ioc_info(ioc, "\tiocstatus(0x%04x), loginfo(0x%08x)\n", + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo)); +} + +/** + * _config_alloc_config_dma_memory - obtain physical memory + * @ioc: per adapter object + * @mem: struct config_request + * + * A wrapper for obtaining dma-able memory for config page request. + * + * Return: 0 for success, non-zero for failure. + */ +static int +_config_alloc_config_dma_memory(struct MPT3SAS_ADAPTER *ioc, + struct config_request *mem) +{ + int r = 0; + + if (mem->sz > ioc->config_page_sz) { + mem->page = dma_alloc_coherent(&ioc->pdev->dev, mem->sz, + &mem->page_dma, GFP_KERNEL); + if (!mem->page) { + ioc_err(ioc, "%s: dma_alloc_coherent failed asking for (%d) bytes!!\n", + __func__, mem->sz); + r = -ENOMEM; + } + } else { /* use tmp buffer if less than 512 bytes */ + mem->page = ioc->config_page; + mem->page_dma = ioc->config_page_dma; + } + ioc->config_vaddr = mem->page; + return r; +} + +/** + * _config_free_config_dma_memory - wrapper to free the memory + * @ioc: per adapter object + * @mem: struct config_request + * + * A wrapper to free dma-able memory when using _config_alloc_config_dma_memory. + * + * Return: 0 for success, non-zero for failure. + */ +static void +_config_free_config_dma_memory(struct MPT3SAS_ADAPTER *ioc, + struct config_request *mem) +{ + if (mem->sz > ioc->config_page_sz) + dma_free_coherent(&ioc->pdev->dev, mem->sz, mem->page, + mem->page_dma); +} + +/** + * mpt3sas_config_done - config page completion routine + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * Context: none. + * + * The callback handler when using _config_request. + * + * Return: 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +u8 +mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + MPI2DefaultReply_t *mpi_reply; + + if (ioc->config_cmds.status == MPT3_CMD_NOT_USED) + return 1; + if (ioc->config_cmds.smid != smid) + return 1; + ioc->config_cmds.status |= MPT3_CMD_COMPLETE; + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + if (mpi_reply) { + ioc->config_cmds.status |= MPT3_CMD_REPLY_VALID; + memcpy(ioc->config_cmds.reply, mpi_reply, + mpi_reply->MsgLength*4); + } + ioc->config_cmds.status &= ~MPT3_CMD_PENDING; + if (ioc->logging_level & MPT_DEBUG_CONFIG) + _config_display_some_debug(ioc, smid, "config_done", mpi_reply); + ioc->config_cmds.smid = USHRT_MAX; + complete(&ioc->config_cmds.done); + return 1; +} + +/** + * _config_request - main routine for sending config page requests + * @ioc: per adapter object + * @mpi_request: request message frame + * @mpi_reply: reply mf payload returned from firmware + * @timeout: timeout in seconds + * @config_page: contents of the config page + * @config_page_sz: size of config page + * Context: sleep + * + * A generic API for config page requests to firmware. + * + * The ioc->config_cmds.status flag should be MPT3_CMD_NOT_USED before calling + * this API. + * + * The callback index is set inside `ioc->config_cb_idx. + * + * Return: 0 for success, non-zero for failure. + */ +static int +_config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t + *mpi_request, Mpi2ConfigReply_t *mpi_reply, int timeout, + void *config_page, u16 config_page_sz) +{ + u16 smid; + Mpi2ConfigRequest_t *config_request; + int r; + u8 retry_count, issue_host_reset = 0; + struct config_request mem; + u32 ioc_status = UINT_MAX; + + mutex_lock(&ioc->config_cmds.mutex); + if (ioc->config_cmds.status != MPT3_CMD_NOT_USED) { + ioc_err(ioc, "%s: config_cmd in use\n", __func__); + mutex_unlock(&ioc->config_cmds.mutex); + return -EAGAIN; + } + + retry_count = 0; + memset(&mem, 0, sizeof(struct config_request)); + + mpi_request->VF_ID = 0; /* TODO */ + mpi_request->VP_ID = 0; + + if (config_page) { + mpi_request->Header.PageVersion = mpi_reply->Header.PageVersion; + mpi_request->Header.PageNumber = mpi_reply->Header.PageNumber; + mpi_request->Header.PageType = mpi_reply->Header.PageType; + mpi_request->Header.PageLength = mpi_reply->Header.PageLength; + mpi_request->ExtPageLength = mpi_reply->ExtPageLength; + mpi_request->ExtPageType = mpi_reply->ExtPageType; + if (mpi_request->Header.PageLength) + mem.sz = mpi_request->Header.PageLength * 4; + else + mem.sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4; + r = _config_alloc_config_dma_memory(ioc, &mem); + if (r != 0) + goto out; + if (mpi_request->Action == + MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT || + mpi_request->Action == + MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM) { + ioc->base_add_sg_single(&mpi_request->PageBufferSGE, + MPT3_CONFIG_COMMON_WRITE_SGLFLAGS | mem.sz, + mem.page_dma); + memcpy(mem.page, config_page, min_t(u16, mem.sz, + config_page_sz)); + } else { + memset(config_page, 0, config_page_sz); + ioc->base_add_sg_single(&mpi_request->PageBufferSGE, + MPT3_CONFIG_COMMON_SGLFLAGS | mem.sz, mem.page_dma); + memset(mem.page, 0, min_t(u16, mem.sz, config_page_sz)); + } + } + + retry_config: + if (retry_count) { + if (retry_count > 2) { /* attempt only 2 retries */ + r = -EFAULT; + goto free_mem; + } + ioc_info(ioc, "%s: attempting retry (%d)\n", + __func__, retry_count); + } + + r = mpt3sas_wait_for_ioc(ioc, MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT); + if (r) { + if (r == -ETIME) + issue_host_reset = 1; + goto free_mem; + } + + smid = mpt3sas_base_get_smid(ioc, ioc->config_cb_idx); + if (!smid) { + ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + ioc->config_cmds.status = MPT3_CMD_NOT_USED; + r = -EAGAIN; + goto free_mem; + } + + r = 0; + memset(ioc->config_cmds.reply, 0, sizeof(Mpi2ConfigReply_t)); + ioc->config_cmds.status = MPT3_CMD_PENDING; + config_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->config_cmds.smid = smid; + memcpy(config_request, mpi_request, sizeof(Mpi2ConfigRequest_t)); + if (ioc->logging_level & MPT_DEBUG_CONFIG) + _config_display_some_debug(ioc, smid, "config_request", NULL); + init_completion(&ioc->config_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->config_cmds.done, timeout*HZ); + if (!(ioc->config_cmds.status & MPT3_CMD_COMPLETE)) { + if (!(ioc->logging_level & MPT_DEBUG_CONFIG)) + _config_display_some_debug(ioc, + smid, "config_request", NULL); + ioc_err(ioc, "%s: command timeout\n", __func__); + mpt3sas_base_check_cmd_timeout(ioc, ioc->config_cmds.status, + mpi_request, sizeof(Mpi2ConfigRequest_t) / 4); + retry_count++; + if (ioc->config_cmds.smid == smid) + mpt3sas_base_free_smid(ioc, smid); + if (ioc->config_cmds.status & MPT3_CMD_RESET) + goto retry_config; + if (ioc->shost_recovery || ioc->pci_error_recovery) { + issue_host_reset = 0; + r = -EFAULT; + } else + issue_host_reset = 1; + goto free_mem; + } + + if (ioc->config_cmds.status & MPT3_CMD_REPLY_VALID) { + memcpy(mpi_reply, ioc->config_cmds.reply, + sizeof(Mpi2ConfigReply_t)); + + /* Reply Frame Sanity Checks to workaround FW issues */ + if ((mpi_request->Header.PageType & 0xF) != + (mpi_reply->Header.PageType & 0xF)) { + if (!(ioc->logging_level & MPT_DEBUG_CONFIG)) + _config_display_some_debug(ioc, + smid, "config_request", NULL); + _debug_dump_mf(mpi_request, ioc->request_sz/4); + _debug_dump_reply(mpi_reply, ioc->reply_sz/4); + panic("%s: %s: Firmware BUG: mpi_reply mismatch: Requested PageType(0x%02x) Reply PageType(0x%02x)\n", + ioc->name, __func__, + mpi_request->Header.PageType & 0xF, + mpi_reply->Header.PageType & 0xF); + } + + if (((mpi_request->Header.PageType & 0xF) == + MPI2_CONFIG_PAGETYPE_EXTENDED) && + mpi_request->ExtPageType != mpi_reply->ExtPageType) { + if (!(ioc->logging_level & MPT_DEBUG_CONFIG)) + _config_display_some_debug(ioc, + smid, "config_request", NULL); + _debug_dump_mf(mpi_request, ioc->request_sz/4); + _debug_dump_reply(mpi_reply, ioc->reply_sz/4); + panic("%s: %s: Firmware BUG: mpi_reply mismatch: Requested ExtPageType(0x%02x) Reply ExtPageType(0x%02x)\n", + ioc->name, __func__, + mpi_request->ExtPageType, + mpi_reply->ExtPageType); + } + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) + & MPI2_IOCSTATUS_MASK; + } + + if (retry_count) + ioc_info(ioc, "%s: retry (%d) completed!!\n", + __func__, retry_count); + + if ((ioc_status == MPI2_IOCSTATUS_SUCCESS) && + config_page && mpi_request->Action == + MPI2_CONFIG_ACTION_PAGE_READ_CURRENT) { + u8 *p = (u8 *)mem.page; + + /* Config Page Sanity Checks to workaround FW issues */ + if (p) { + if ((mpi_request->Header.PageType & 0xF) != + (p[3] & 0xF)) { + if (!(ioc->logging_level & MPT_DEBUG_CONFIG)) + _config_display_some_debug(ioc, + smid, "config_request", NULL); + _debug_dump_mf(mpi_request, ioc->request_sz/4); + _debug_dump_reply(mpi_reply, ioc->reply_sz/4); + _debug_dump_config(p, min_t(u16, mem.sz, + config_page_sz)/4); + panic("%s: %s: Firmware BUG: config page mismatch: Requested PageType(0x%02x) Reply PageType(0x%02x)\n", + ioc->name, __func__, + mpi_request->Header.PageType & 0xF, + p[3] & 0xF); + } + + if (((mpi_request->Header.PageType & 0xF) == + MPI2_CONFIG_PAGETYPE_EXTENDED) && + (mpi_request->ExtPageType != p[6])) { + if (!(ioc->logging_level & MPT_DEBUG_CONFIG)) + _config_display_some_debug(ioc, + smid, "config_request", NULL); + _debug_dump_mf(mpi_request, ioc->request_sz/4); + _debug_dump_reply(mpi_reply, ioc->reply_sz/4); + _debug_dump_config(p, min_t(u16, mem.sz, + config_page_sz)/4); + panic("%s: %s: Firmware BUG: config page mismatch: Requested ExtPageType(0x%02x) Reply ExtPageType(0x%02x)\n", + ioc->name, __func__, + mpi_request->ExtPageType, p[6]); + } + } + memcpy(config_page, mem.page, min_t(u16, mem.sz, + config_page_sz)); + } + + free_mem: + if (config_page) + _config_free_config_dma_memory(ioc, &mem); + out: + ioc->config_cmds.status = MPT3_CMD_NOT_USED; + mutex_unlock(&ioc->config_cmds.mutex); + + if (issue_host_reset) { + if (ioc->drv_internal_flags & MPT_DRV_INTERNAL_FIRST_PE_ISSUED) { + mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + r = -EFAULT; + } else { + if (mpt3sas_base_check_for_fault_and_issue_reset(ioc)) + return -EFAULT; + r = -EAGAIN; + } + } + return r; +} + +/** + * mpt3sas_config_get_manufacturing_pg0 - obtain manufacturing page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage0_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_manufacturing_pg1 - obtain manufacturing page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_manufacturing_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage1_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI2_MANUFACTURING1_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_manufacturing_pg7 - obtain manufacturing page 7 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @sz: size of buffer passed in config_page + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage7_t *config_page, + u16 sz) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING; + mpi_request.Header.PageNumber = 7; + mpi_request.Header.PageVersion = MPI2_MANUFACTURING7_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sz); + out: + return r; +} + +/** + * mpt3sas_config_get_manufacturing_pg10 - obtain manufacturing page 10 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_manufacturing_pg10(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, + struct Mpi2ManufacturingPage10_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING; + mpi_request.Header.PageNumber = 10; + mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_manufacturing_pg11 - obtain manufacturing page 11 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, + struct Mpi2ManufacturingPage11_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING; + mpi_request.Header.PageNumber = 11; + mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_set_manufacturing_pg11 - set manufacturing page 11 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_set_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, + struct Mpi2ManufacturingPage11_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING; + mpi_request.Header.PageNumber = 11; + mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_bios_pg2 - obtain bios page 2 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_bios_pg2(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage2_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS; + mpi_request.Header.PageNumber = 2; + mpi_request.Header.PageVersion = MPI2_BIOSPAGE2_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_bios_pg3 - obtain bios page 3 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_bios_pg3(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2BiosPage3_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS; + mpi_request.Header.PageNumber = 3; + mpi_request.Header.PageVersion = MPI2_BIOSPAGE3_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + + out: + return r; +} + +/** + * mpt3sas_config_set_bios_pg4 - write out bios page 4 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @sz_config_pg: sizeof the config page + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_set_bios_pg4(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage4_t *config_page, + int sz_config_pg) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS; + mpi_request.Header.PageNumber = 4; + mpi_request.Header.PageVersion = MPI2_BIOSPAGE4_PAGEVERSION; + + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sz_config_pg); + out: + return r; +} + +/** + * mpt3sas_config_get_bios_pg4 - read bios page 4 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @sz_config_pg: sizeof the config page + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_bios_pg4(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage4_t *config_page, + int sz_config_pg) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS; + mpi_request.Header.PageNumber = 4; + mpi_request.Header.PageVersion = MPI2_BIOSPAGE4_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + /* + * The sizeof the page is variable. Allow for just the + * size to be returned + */ + if (config_page && sz_config_pg) { + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sz_config_pg); + } + +out: + return r; +} + +/** + * mpt3sas_config_get_iounit_pg0 - obtain iounit page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage0_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_IOUNITPAGE0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_iounit_pg1 - obtain iounit page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage1_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI2_IOUNITPAGE1_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_set_iounit_pg1 - set iounit page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_set_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage1_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI2_IOUNITPAGE1_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_iounit_pg3 - obtain iounit page 3 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @sz: size of buffer passed in config_page + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_iounit_pg3(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage3_t *config_page, u16 sz) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT; + mpi_request.Header.PageNumber = 3; + mpi_request.Header.PageVersion = MPI2_IOUNITPAGE3_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); + out: + return r; +} + +/** + * mpt3sas_config_get_iounit_pg8 - obtain iounit page 8 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_iounit_pg8(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage8_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT; + mpi_request.Header.PageNumber = 8; + mpi_request.Header.PageVersion = MPI2_IOUNITPAGE8_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_ioc_pg8 - obtain ioc page 8 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_ioc_pg8(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2IOCPage8_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IOC; + mpi_request.Header.PageNumber = 8; + mpi_request.Header.PageVersion = MPI2_IOCPAGE8_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} +/** + * mpt3sas_config_get_ioc_pg1 - obtain ioc page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_ioc_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2IOCPage1_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IOC; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI2_IOCPAGE8_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_set_ioc_pg1 - modify ioc page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_set_ioc_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2IOCPage1_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IOC; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI2_IOCPAGE8_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_sas_device_pg0 - obtain sas device page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @form: GET_NEXT_HANDLE or HANDLE + * @handle: device handle + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage0_t *config_page, + u32 form, u32 handle) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE; + mpi_request.Header.PageVersion = MPI2_SASDEVICE0_PAGEVERSION; + mpi_request.Header.PageNumber = 0; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_sas_device_pg1 - obtain sas device page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @form: GET_NEXT_HANDLE or HANDLE + * @handle: device handle + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage1_t *config_page, + u32 form, u32 handle) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE; + mpi_request.Header.PageVersion = MPI2_SASDEVICE1_PAGEVERSION; + mpi_request.Header.PageNumber = 1; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_pcie_device_pg0 - obtain pcie device page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @form: GET_NEXT_HANDLE or HANDLE + * @handle: device handle + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_pcie_device_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeDevicePage0_t *config_page, + u32 form, u32 handle) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_PCIE_DEVICE; + mpi_request.Header.PageVersion = MPI26_PCIEDEVICE0_PAGEVERSION; + mpi_request.Header.PageNumber = 0; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +/** + * mpt3sas_config_get_pcie_iounit_pg1 - obtain pcie iounit page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @sz: size of buffer passed in config_page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_pcie_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeIOUnitPage1_t *config_page, + u16 sz) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_PCIE_IO_UNIT; + mpi_request.Header.PageVersion = MPI26_PCIEIOUNITPAGE1_PAGEVERSION; + mpi_request.Header.PageNumber = 1; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); +out: + return r; +} + +/** + * mpt3sas_config_get_pcie_device_pg2 - obtain pcie device page 2 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @form: GET_NEXT_HANDLE or HANDLE + * @handle: device handle + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_pcie_device_pg2(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeDevicePage2_t *config_page, + u32 form, u32 handle) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_PCIE_DEVICE; + mpi_request.Header.PageVersion = MPI26_PCIEDEVICE2_PAGEVERSION; + mpi_request.Header.PageNumber = 2; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +/** + * mpt3sas_config_get_number_hba_phys - obtain number of phys on the host + * @ioc: per adapter object + * @num_phys: pointer returned with the number of phys + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_number_hba_phys(struct MPT3SAS_ADAPTER *ioc, u8 *num_phys) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + u16 ioc_status; + Mpi2ConfigReply_t mpi_reply; + Mpi2SasIOUnitPage0_t config_page; + + *num_phys = 0; + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, &mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, &mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, &config_page, + sizeof(Mpi2SasIOUnitPage0_t)); + if (!r) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status == MPI2_IOCSTATUS_SUCCESS) + *num_phys = config_page.NumPhys; + } + out: + return r; +} + +/** + * mpt3sas_config_get_sas_iounit_pg0 - obtain sas iounit page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @sz: size of buffer passed in config_page + * Context: sleep. + * + * Calling function should call config_get_number_hba_phys prior to + * this function, so enough memory is allocated for config_page. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_sas_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage0_t *config_page, + u16 sz) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); + out: + return r; +} + +/** + * mpt3sas_config_get_sas_iounit_pg1 - obtain sas iounit page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @sz: size of buffer passed in config_page + * Context: sleep. + * + * Calling function should call config_get_number_hba_phys prior to + * this function, so enough memory is allocated for config_page. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, + u16 sz) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE1_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); + out: + return r; +} + +/** + * mpt3sas_config_set_sas_iounit_pg1 - send sas iounit page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @sz: size of buffer passed in config_page + * Context: sleep. + * + * Calling function should call config_get_number_hba_phys prior to + * this function, so enough memory is allocated for config_page. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_set_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, + u16 sz) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE1_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; + _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); + out: + return r; +} + +/** + * mpt3sas_config_get_expander_pg0 - obtain expander page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @form: GET_NEXT_HANDLE or HANDLE + * @handle: expander handle + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_expander_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2ExpanderPage0_t *config_page, u32 form, u32 handle) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_SASEXPANDER0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_expander_pg1 - obtain expander page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @phy_number: phy number + * @handle: expander handle + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_expander_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2ExpanderPage1_t *config_page, u32 phy_number, + u16 handle) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI2_SASEXPANDER1_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = + cpu_to_le32(MPI2_SAS_EXPAND_PGAD_FORM_HNDL_PHY_NUM | + (phy_number << MPI2_SAS_EXPAND_PGAD_PHYNUM_SHIFT) | handle); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_enclosure_pg0 - obtain enclosure page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @form: GET_NEXT_HANDLE or HANDLE + * @handle: expander handle + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_enclosure_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2SasEnclosurePage0_t *config_page, u32 form, u32 handle) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_ENCLOSURE; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_SASENCLOSURE0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_phy_pg0 - obtain phy page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @phy_number: phy number + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_phy_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2SasPhyPage0_t *config_page, u32 phy_number) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_PHY; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_SASPHY0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = + cpu_to_le32(MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_phy_pg1 - obtain phy page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @phy_number: phy number + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_phy_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2SasPhyPage1_t *config_page, u32 phy_number) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_PHY; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI2_SASPHY1_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = + cpu_to_le32(MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_raid_volume_pg1 - obtain raid volume page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @form: GET_NEXT_HANDLE or HANDLE + * @handle: volume handle + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_raid_volume_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage1_t *config_page, u32 form, + u32 handle) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE1_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_number_pds - obtain number of phys disk assigned to volume + * @ioc: per adapter object + * @handle: volume handle + * @num_pds: returns pds count + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_number_pds(struct MPT3SAS_ADAPTER *ioc, u16 handle, + u8 *num_pds) +{ + Mpi2ConfigRequest_t mpi_request; + Mpi2RaidVolPage0_t config_page; + Mpi2ConfigReply_t mpi_reply; + int r; + u16 ioc_status; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + *num_pds = 0; + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, &mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = + cpu_to_le32(MPI2_RAID_VOLUME_PGAD_FORM_HANDLE | handle); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, &mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, &config_page, + sizeof(Mpi2RaidVolPage0_t)); + if (!r) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status == MPI2_IOCSTATUS_SUCCESS) + *num_pds = config_page.NumPhysDisks; + } + + out: + return r; +} + +/** + * mpt3sas_config_get_raid_volume_pg0 - obtain raid volume page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @form: GET_NEXT_HANDLE or HANDLE + * @handle: volume handle + * @sz: size of buffer passed in config_page + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_raid_volume_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage0_t *config_page, u32 form, + u32 handle, u16 sz) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); + out: + return r; +} + +/** + * mpt3sas_config_get_phys_disk_pg0 - obtain phys disk page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @form: GET_NEXT_PHYSDISKNUM, PHYSDISKNUM, DEVHANDLE + * @form_specific: specific to the form + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_phys_disk_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2RaidPhysDiskPage0_t *config_page, u32 form, + u32 form_specific) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_RAIDPHYSDISKPAGE0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = cpu_to_le32(form | form_specific); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_driver_trigger_pg0 - obtain driver trigger page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_driver_trigger_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage0_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = + MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * _config_set_driver_trigger_pg0 - write driver trigger page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +static int +_config_set_driver_trigger_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage0_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = + MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; + _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_update_driver_trigger_pg0 - update driver trigger page 0 + * @ioc: per adapter object + * @trigger_flag: trigger type bit map + * @set: set ot clear trigger values + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +static int +mpt3sas_config_update_driver_trigger_pg0(struct MPT3SAS_ADAPTER *ioc, + u16 trigger_flag, bool set) +{ + Mpi26DriverTriggerPage0_t tg_pg0; + Mpi2ConfigReply_t mpi_reply; + int rc; + u16 flags, ioc_status; + + rc = mpt3sas_config_get_driver_trigger_pg0(ioc, &mpi_reply, &tg_pg0); + if (rc) + return rc; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dcprintk(ioc, + ioc_err(ioc, + "%s: Failed to get trigger pg0, ioc_status(0x%04x)\n", + __func__, ioc_status)); + return -EFAULT; + } + + if (set) + flags = le16_to_cpu(tg_pg0.TriggerFlags) | trigger_flag; + else + flags = le16_to_cpu(tg_pg0.TriggerFlags) & ~trigger_flag; + + tg_pg0.TriggerFlags = cpu_to_le16(flags); + + rc = _config_set_driver_trigger_pg0(ioc, &mpi_reply, &tg_pg0); + if (rc) + return rc; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dcprintk(ioc, + ioc_err(ioc, + "%s: Failed to update trigger pg0, ioc_status(0x%04x)\n", + __func__, ioc_status)); + return -EFAULT; + } + + return 0; +} + +/** + * mpt3sas_config_get_driver_trigger_pg1 - obtain driver trigger page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_driver_trigger_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage1_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = + MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE1_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * _config_set_driver_trigger_pg1 - write driver trigger page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +static int +_config_set_driver_trigger_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage1_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = + MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE1_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; + _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_update_driver_trigger_pg1 - update driver trigger page 1 + * @ioc: per adapter object + * @master_tg: Master trigger bit map + * @set: set ot clear trigger values + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_update_driver_trigger_pg1(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_MASTER_TRIGGER_T *master_tg, bool set) +{ + Mpi26DriverTriggerPage1_t tg_pg1; + Mpi2ConfigReply_t mpi_reply; + int rc; + u16 ioc_status; + + rc = mpt3sas_config_update_driver_trigger_pg0(ioc, + MPI26_DRIVER_TRIGGER0_FLAG_MASTER_TRIGGER_VALID, set); + if (rc) + return rc; + + rc = mpt3sas_config_get_driver_trigger_pg1(ioc, &mpi_reply, &tg_pg1); + if (rc) + goto out; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dcprintk(ioc, + ioc_err(ioc, + "%s: Failed to get trigger pg1, ioc_status(0x%04x)\n", + __func__, ioc_status)); + rc = -EFAULT; + goto out; + } + + if (set) { + tg_pg1.NumMasterTrigger = cpu_to_le16(1); + tg_pg1.MasterTriggers[0].MasterTriggerFlags = cpu_to_le32( + master_tg->MasterData); + } else { + tg_pg1.NumMasterTrigger = 0; + tg_pg1.MasterTriggers[0].MasterTriggerFlags = 0; + } + + rc = _config_set_driver_trigger_pg1(ioc, &mpi_reply, &tg_pg1); + if (rc) + goto out; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dcprintk(ioc, + ioc_err(ioc, + "%s: Failed to get trigger pg1, ioc_status(0x%04x)\n", + __func__, ioc_status)); + rc = -EFAULT; + goto out; + } + + return 0; + +out: + mpt3sas_config_update_driver_trigger_pg0(ioc, + MPI26_DRIVER_TRIGGER0_FLAG_MASTER_TRIGGER_VALID, !set); + + return rc; +} + +/** + * mpt3sas_config_get_driver_trigger_pg2 - obtain driver trigger page 2 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_driver_trigger_pg2(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage2_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = + MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER; + mpi_request.Header.PageNumber = 2; + mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE2_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * _config_set_driver_trigger_pg2 - write driver trigger page 2 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +static int +_config_set_driver_trigger_pg2(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage2_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = + MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER; + mpi_request.Header.PageNumber = 2; + mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE2_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; + _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_update_driver_trigger_pg2 - update driver trigger page 2 + * @ioc: per adapter object + * @event_tg: list of Event Triggers + * @set: set ot clear trigger values + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_update_driver_trigger_pg2(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_EVENT_TRIGGERS_T *event_tg, bool set) +{ + Mpi26DriverTriggerPage2_t tg_pg2; + Mpi2ConfigReply_t mpi_reply; + int rc, i, count; + u16 ioc_status; + + rc = mpt3sas_config_update_driver_trigger_pg0(ioc, + MPI26_DRIVER_TRIGGER0_FLAG_MPI_EVENT_TRIGGER_VALID, set); + if (rc) + return rc; + + rc = mpt3sas_config_get_driver_trigger_pg2(ioc, &mpi_reply, &tg_pg2); + if (rc) + goto out; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dcprintk(ioc, + ioc_err(ioc, + "%s: Failed to get trigger pg2, ioc_status(0x%04x)\n", + __func__, ioc_status)); + rc = -EFAULT; + goto out; + } + + if (set) { + count = event_tg->ValidEntries; + tg_pg2.NumMPIEventTrigger = cpu_to_le16(count); + for (i = 0; i < count; i++) { + tg_pg2.MPIEventTriggers[i].MPIEventCode = + cpu_to_le16( + event_tg->EventTriggerEntry[i].EventValue); + tg_pg2.MPIEventTriggers[i].MPIEventCodeSpecific = + cpu_to_le16( + event_tg->EventTriggerEntry[i].LogEntryQualifier); + } + } else { + tg_pg2.NumMPIEventTrigger = 0; + memset(&tg_pg2.MPIEventTriggers[0], 0, + NUM_VALID_ENTRIES * sizeof( + MPI26_DRIVER_MPI_EVENT_TIGGER_ENTRY)); + } + + rc = _config_set_driver_trigger_pg2(ioc, &mpi_reply, &tg_pg2); + if (rc) + goto out; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dcprintk(ioc, + ioc_err(ioc, + "%s: Failed to get trigger pg2, ioc_status(0x%04x)\n", + __func__, ioc_status)); + rc = -EFAULT; + goto out; + } + + return 0; + +out: + mpt3sas_config_update_driver_trigger_pg0(ioc, + MPI26_DRIVER_TRIGGER0_FLAG_MPI_EVENT_TRIGGER_VALID, !set); + + return rc; +} + +/** + * mpt3sas_config_get_driver_trigger_pg3 - obtain driver trigger page 3 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_driver_trigger_pg3(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage3_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = + MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER; + mpi_request.Header.PageNumber = 3; + mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE3_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * _config_set_driver_trigger_pg3 - write driver trigger page 3 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +static int +_config_set_driver_trigger_pg3(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage3_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = + MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER; + mpi_request.Header.PageNumber = 3; + mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE3_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; + _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_update_driver_trigger_pg3 - update driver trigger page 3 + * @ioc: per adapter object + * @scsi_tg: scsi trigger list + * @set: set ot clear trigger values + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_update_driver_trigger_pg3(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_SCSI_TRIGGERS_T *scsi_tg, bool set) +{ + Mpi26DriverTriggerPage3_t tg_pg3; + Mpi2ConfigReply_t mpi_reply; + int rc, i, count; + u16 ioc_status; + + rc = mpt3sas_config_update_driver_trigger_pg0(ioc, + MPI26_DRIVER_TRIGGER0_FLAG_SCSI_SENSE_TRIGGER_VALID, set); + if (rc) + return rc; + + rc = mpt3sas_config_get_driver_trigger_pg3(ioc, &mpi_reply, &tg_pg3); + if (rc) + goto out; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dcprintk(ioc, + ioc_err(ioc, + "%s: Failed to get trigger pg3, ioc_status(0x%04x)\n", + __func__, ioc_status)); + return -EFAULT; + } + + if (set) { + count = scsi_tg->ValidEntries; + tg_pg3.NumSCSISenseTrigger = cpu_to_le16(count); + for (i = 0; i < count; i++) { + tg_pg3.SCSISenseTriggers[i].ASCQ = + scsi_tg->SCSITriggerEntry[i].ASCQ; + tg_pg3.SCSISenseTriggers[i].ASC = + scsi_tg->SCSITriggerEntry[i].ASC; + tg_pg3.SCSISenseTriggers[i].SenseKey = + scsi_tg->SCSITriggerEntry[i].SenseKey; + } + } else { + tg_pg3.NumSCSISenseTrigger = 0; + memset(&tg_pg3.SCSISenseTriggers[0], 0, + NUM_VALID_ENTRIES * sizeof( + MPI26_DRIVER_SCSI_SENSE_TIGGER_ENTRY)); + } + + rc = _config_set_driver_trigger_pg3(ioc, &mpi_reply, &tg_pg3); + if (rc) + goto out; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dcprintk(ioc, + ioc_err(ioc, + "%s: Failed to get trigger pg3, ioc_status(0x%04x)\n", + __func__, ioc_status)); + return -EFAULT; + } + + return 0; +out: + mpt3sas_config_update_driver_trigger_pg0(ioc, + MPI26_DRIVER_TRIGGER0_FLAG_SCSI_SENSE_TRIGGER_VALID, !set); + + return rc; +} + +/** + * mpt3sas_config_get_driver_trigger_pg4 - obtain driver trigger page 4 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_driver_trigger_pg4(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage4_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = + MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER; + mpi_request.Header.PageNumber = 4; + mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE4_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * _config_set_driver_trigger_pg4 - write driver trigger page 4 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +static int +_config_set_driver_trigger_pg4(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26DriverTriggerPage4_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = + MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER; + mpi_request.Header.PageNumber = 4; + mpi_request.Header.PageVersion = MPI26_DRIVER_TRIGGER_PAGE4_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; + _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_update_driver_trigger_pg4 - update driver trigger page 4 + * @ioc: per adapter object + * @mpi_tg: mpi trigger list + * @set: set ot clear trigger values + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_update_driver_trigger_pg4(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_MPI_TRIGGERS_T *mpi_tg, bool set) +{ + Mpi26DriverTriggerPage4_t tg_pg4; + Mpi2ConfigReply_t mpi_reply; + int rc, i, count; + u16 ioc_status; + + rc = mpt3sas_config_update_driver_trigger_pg0(ioc, + MPI26_DRIVER_TRIGGER0_FLAG_LOGINFO_TRIGGER_VALID, set); + if (rc) + return rc; + + rc = mpt3sas_config_get_driver_trigger_pg4(ioc, &mpi_reply, &tg_pg4); + if (rc) + goto out; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dcprintk(ioc, + ioc_err(ioc, + "%s: Failed to get trigger pg4, ioc_status(0x%04x)\n", + __func__, ioc_status)); + rc = -EFAULT; + goto out; + } + + if (set) { + count = mpi_tg->ValidEntries; + tg_pg4.NumIOCStatusLogInfoTrigger = cpu_to_le16(count); + for (i = 0; i < count; i++) { + tg_pg4.IOCStatusLoginfoTriggers[i].IOCStatus = + cpu_to_le16(mpi_tg->MPITriggerEntry[i].IOCStatus); + tg_pg4.IOCStatusLoginfoTriggers[i].LogInfo = + cpu_to_le32(mpi_tg->MPITriggerEntry[i].IocLogInfo); + } + } else { + tg_pg4.NumIOCStatusLogInfoTrigger = 0; + memset(&tg_pg4.IOCStatusLoginfoTriggers[0], 0, + NUM_VALID_ENTRIES * sizeof( + MPI26_DRIVER_IOCSTATUS_LOGINFO_TIGGER_ENTRY)); + } + + rc = _config_set_driver_trigger_pg4(ioc, &mpi_reply, &tg_pg4); + if (rc) + goto out; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dcprintk(ioc, + ioc_err(ioc, + "%s: Failed to get trigger pg4, ioc_status(0x%04x)\n", + __func__, ioc_status)); + rc = -EFAULT; + goto out; + } + + return 0; + +out: + mpt3sas_config_update_driver_trigger_pg0(ioc, + MPI26_DRIVER_TRIGGER0_FLAG_LOGINFO_TRIGGER_VALID, !set); + + return rc; +} + +/** + * mpt3sas_config_get_volume_handle - returns volume handle for give hidden + * raid components + * @ioc: per adapter object + * @pd_handle: phys disk handle + * @volume_handle: volume handle + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_volume_handle(struct MPT3SAS_ADAPTER *ioc, u16 pd_handle, + u16 *volume_handle) +{ + Mpi2RaidConfigurationPage0_t *config_page = NULL; + Mpi2ConfigRequest_t mpi_request; + Mpi2ConfigReply_t mpi_reply; + int r, i, config_page_sz; + u16 ioc_status; + int config_num; + u16 element_type; + u16 phys_disk_dev_handle; + + *volume_handle = 0; + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG; + mpi_request.Header.PageVersion = MPI2_RAIDCONFIG0_PAGEVERSION; + mpi_request.Header.PageNumber = 0; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, &mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + config_page_sz = (le16_to_cpu(mpi_reply.ExtPageLength) * 4); + config_page = kmalloc(config_page_sz, GFP_KERNEL); + if (!config_page) { + r = -1; + goto out; + } + + config_num = 0xff; + while (1) { + mpi_request.PageAddress = cpu_to_le32(config_num + + MPI2_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM); + r = _config_request(ioc, &mpi_request, &mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + config_page_sz); + if (r) + goto out; + r = -1; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + goto out; + for (i = 0; i < config_page->NumElements; i++) { + element_type = le16_to_cpu(config_page-> + ConfigElement[i].ElementFlags) & + MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE; + if (element_type == + MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT || + element_type == + MPI2_RAIDCONFIG0_EFLAGS_OCE_ELEMENT) { + phys_disk_dev_handle = + le16_to_cpu(config_page->ConfigElement[i]. + PhysDiskDevHandle); + if (phys_disk_dev_handle == pd_handle) { + *volume_handle = + le16_to_cpu(config_page-> + ConfigElement[i].VolDevHandle); + r = 0; + goto out; + } + } else if (element_type == + MPI2_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT) { + *volume_handle = 0; + r = 0; + goto out; + } + } + config_num = config_page->ConfigNum; + } + out: + kfree(config_page); + return r; +} + +/** + * mpt3sas_config_get_volume_wwid - returns wwid given the volume handle + * @ioc: per adapter object + * @volume_handle: volume handle + * @wwid: volume wwid + * Context: sleep. + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_volume_wwid(struct MPT3SAS_ADAPTER *ioc, u16 volume_handle, + u64 *wwid) +{ + Mpi2ConfigReply_t mpi_reply; + Mpi2RaidVolPage1_t raid_vol_pg1; + + *wwid = 0; + if (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply, + &raid_vol_pg1, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, + volume_handle))) { + *wwid = le64_to_cpu(raid_vol_pg1.WWID); + return 0; + } else + return -1; +} diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c new file mode 100644 index 000000000..0d8b1e942 --- /dev/null +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c @@ -0,0 +1,4189 @@ +/* + * Management Module Support for MPT (Message Passing Technology) based + * controllers + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.c + * Copyright (C) 2012-2014 LSI Corporation + * Copyright (C) 2013-2014 Avago Technologies + * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/compat.h> +#include <linux/poll.h> + +#include <linux/io.h> +#include <linux/uaccess.h> + +#include "mpt3sas_base.h" +#include "mpt3sas_ctl.h" + + +static struct fasync_struct *async_queue; +static DECLARE_WAIT_QUEUE_HEAD(ctl_poll_wait); + + +/** + * enum block_state - blocking state + * @NON_BLOCKING: non blocking + * @BLOCKING: blocking + * + * These states are for ioctls that need to wait for a response + * from firmware, so they probably require sleep. + */ +enum block_state { + NON_BLOCKING, + BLOCKING, +}; + +/** + * _ctl_display_some_debug - debug routine + * @ioc: per adapter object + * @smid: system request message index + * @calling_function_name: string pass from calling function + * @mpi_reply: reply message frame + * Context: none. + * + * Function for displaying debug info helpful when debugging issues + * in this module. + */ +static void +_ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid, + char *calling_function_name, MPI2DefaultReply_t *mpi_reply) +{ + Mpi2ConfigRequest_t *mpi_request; + char *desc = NULL; + + if (!(ioc->logging_level & MPT_DEBUG_IOCTL)) + return; + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + switch (mpi_request->Function) { + case MPI2_FUNCTION_SCSI_IO_REQUEST: + { + Mpi2SCSIIORequest_t *scsi_request = + (Mpi2SCSIIORequest_t *)mpi_request; + + snprintf(ioc->tmp_string, MPT_STRING_LENGTH, + "scsi_io, cmd(0x%02x), cdb_len(%d)", + scsi_request->CDB.CDB32[0], + le16_to_cpu(scsi_request->IoFlags) & 0xF); + desc = ioc->tmp_string; + break; + } + case MPI2_FUNCTION_SCSI_TASK_MGMT: + desc = "task_mgmt"; + break; + case MPI2_FUNCTION_IOC_INIT: + desc = "ioc_init"; + break; + case MPI2_FUNCTION_IOC_FACTS: + desc = "ioc_facts"; + break; + case MPI2_FUNCTION_CONFIG: + { + Mpi2ConfigRequest_t *config_request = + (Mpi2ConfigRequest_t *)mpi_request; + + snprintf(ioc->tmp_string, MPT_STRING_LENGTH, + "config, type(0x%02x), ext_type(0x%02x), number(%d)", + (config_request->Header.PageType & + MPI2_CONFIG_PAGETYPE_MASK), config_request->ExtPageType, + config_request->Header.PageNumber); + desc = ioc->tmp_string; + break; + } + case MPI2_FUNCTION_PORT_FACTS: + desc = "port_facts"; + break; + case MPI2_FUNCTION_PORT_ENABLE: + desc = "port_enable"; + break; + case MPI2_FUNCTION_EVENT_NOTIFICATION: + desc = "event_notification"; + break; + case MPI2_FUNCTION_FW_DOWNLOAD: + desc = "fw_download"; + break; + case MPI2_FUNCTION_FW_UPLOAD: + desc = "fw_upload"; + break; + case MPI2_FUNCTION_RAID_ACTION: + desc = "raid_action"; + break; + case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: + { + Mpi2SCSIIORequest_t *scsi_request = + (Mpi2SCSIIORequest_t *)mpi_request; + + snprintf(ioc->tmp_string, MPT_STRING_LENGTH, + "raid_pass, cmd(0x%02x), cdb_len(%d)", + scsi_request->CDB.CDB32[0], + le16_to_cpu(scsi_request->IoFlags) & 0xF); + desc = ioc->tmp_string; + break; + } + case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL: + desc = "sas_iounit_cntl"; + break; + case MPI2_FUNCTION_SATA_PASSTHROUGH: + desc = "sata_pass"; + break; + case MPI2_FUNCTION_DIAG_BUFFER_POST: + desc = "diag_buffer_post"; + break; + case MPI2_FUNCTION_DIAG_RELEASE: + desc = "diag_release"; + break; + case MPI2_FUNCTION_SMP_PASSTHROUGH: + desc = "smp_passthrough"; + break; + case MPI2_FUNCTION_TOOLBOX: + desc = "toolbox"; + break; + case MPI2_FUNCTION_NVME_ENCAPSULATED: + desc = "nvme_encapsulated"; + break; + } + + if (!desc) + return; + + ioc_info(ioc, "%s: %s, smid(%d)\n", calling_function_name, desc, smid); + + if (!mpi_reply) + return; + + if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo) + ioc_info(ioc, "\tiocstatus(0x%04x), loginfo(0x%08x)\n", + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo)); + + if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || + mpi_request->Function == + MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { + Mpi2SCSIIOReply_t *scsi_reply = + (Mpi2SCSIIOReply_t *)mpi_reply; + struct _sas_device *sas_device = NULL; + struct _pcie_device *pcie_device = NULL; + + sas_device = mpt3sas_get_sdev_by_handle(ioc, + le16_to_cpu(scsi_reply->DevHandle)); + if (sas_device) { + ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n", + (u64)sas_device->sas_address, + sas_device->phy); + ioc_warn(ioc, "\tenclosure_logical_id(0x%016llx), slot(%d)\n", + (u64)sas_device->enclosure_logical_id, + sas_device->slot); + sas_device_put(sas_device); + } + if (!sas_device) { + pcie_device = mpt3sas_get_pdev_by_handle(ioc, + le16_to_cpu(scsi_reply->DevHandle)); + if (pcie_device) { + ioc_warn(ioc, "\tWWID(0x%016llx), port(%d)\n", + (unsigned long long)pcie_device->wwid, + pcie_device->port_num); + if (pcie_device->enclosure_handle != 0) + ioc_warn(ioc, "\tenclosure_logical_id(0x%016llx), slot(%d)\n", + (u64)pcie_device->enclosure_logical_id, + pcie_device->slot); + pcie_device_put(pcie_device); + } + } + if (scsi_reply->SCSIState || scsi_reply->SCSIStatus) + ioc_info(ioc, "\tscsi_state(0x%02x), scsi_status(0x%02x)\n", + scsi_reply->SCSIState, + scsi_reply->SCSIStatus); + } +} + +/** + * mpt3sas_ctl_done - ctl module completion routine + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * Context: none. + * + * The callback handler when using ioc->ctl_cb_idx. + * + * Return: 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +u8 +mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + MPI2DefaultReply_t *mpi_reply; + Mpi2SCSIIOReply_t *scsiio_reply; + Mpi26NVMeEncapsulatedErrorReply_t *nvme_error_reply; + const void *sense_data; + u32 sz; + + if (ioc->ctl_cmds.status == MPT3_CMD_NOT_USED) + return 1; + if (ioc->ctl_cmds.smid != smid) + return 1; + ioc->ctl_cmds.status |= MPT3_CMD_COMPLETE; + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + if (mpi_reply) { + memcpy(ioc->ctl_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); + ioc->ctl_cmds.status |= MPT3_CMD_REPLY_VALID; + /* get sense data */ + if (mpi_reply->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || + mpi_reply->Function == + MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { + scsiio_reply = (Mpi2SCSIIOReply_t *)mpi_reply; + if (scsiio_reply->SCSIState & + MPI2_SCSI_STATE_AUTOSENSE_VALID) { + sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, + le32_to_cpu(scsiio_reply->SenseCount)); + sense_data = mpt3sas_base_get_sense_buffer(ioc, + smid); + memcpy(ioc->ctl_cmds.sense, sense_data, sz); + } + } + /* + * Get Error Response data for NVMe device. The ctl_cmds.sense + * buffer is used to store the Error Response data. + */ + if (mpi_reply->Function == MPI2_FUNCTION_NVME_ENCAPSULATED) { + nvme_error_reply = + (Mpi26NVMeEncapsulatedErrorReply_t *)mpi_reply; + sz = min_t(u32, NVME_ERROR_RESPONSE_SIZE, + le16_to_cpu(nvme_error_reply->ErrorResponseCount)); + sense_data = mpt3sas_base_get_sense_buffer(ioc, smid); + memcpy(ioc->ctl_cmds.sense, sense_data, sz); + } + } + + _ctl_display_some_debug(ioc, smid, "ctl_done", mpi_reply); + ioc->ctl_cmds.status &= ~MPT3_CMD_PENDING; + complete(&ioc->ctl_cmds.done); + return 1; +} + +/** + * _ctl_check_event_type - determines when an event needs logging + * @ioc: per adapter object + * @event: firmware event + * + * The bitmask in ioc->event_type[] indicates which events should be + * be saved in the driver event_log. This bitmask is set by application. + * + * Return: 1 when event should be captured, or zero means no match. + */ +static int +_ctl_check_event_type(struct MPT3SAS_ADAPTER *ioc, u16 event) +{ + u16 i; + u32 desired_event; + + if (event >= 128 || !event || !ioc->event_log) + return 0; + + desired_event = (1 << (event % 32)); + if (!desired_event) + desired_event = 1; + i = event / 32; + return desired_event & ioc->event_type[i]; +} + +/** + * mpt3sas_ctl_add_to_event_log - add event + * @ioc: per adapter object + * @mpi_reply: reply message frame + */ +void +mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventNotificationReply_t *mpi_reply) +{ + struct MPT3_IOCTL_EVENTS *event_log; + u16 event; + int i; + u32 sz, event_data_sz; + u8 send_aen = 0; + + if (!ioc->event_log) + return; + + event = le16_to_cpu(mpi_reply->Event); + + if (_ctl_check_event_type(ioc, event)) { + + /* insert entry into circular event_log */ + i = ioc->event_context % MPT3SAS_CTL_EVENT_LOG_SIZE; + event_log = ioc->event_log; + event_log[i].event = event; + event_log[i].context = ioc->event_context++; + + event_data_sz = le16_to_cpu(mpi_reply->EventDataLength)*4; + sz = min_t(u32, event_data_sz, MPT3_EVENT_DATA_SIZE); + memset(event_log[i].data, 0, MPT3_EVENT_DATA_SIZE); + memcpy(event_log[i].data, mpi_reply->EventData, sz); + send_aen = 1; + } + + /* This aen_event_read_flag flag is set until the + * application has read the event log. + * For MPI2_EVENT_LOG_ENTRY_ADDED, we always notify. + */ + if (event == MPI2_EVENT_LOG_ENTRY_ADDED || + (send_aen && !ioc->aen_event_read_flag)) { + ioc->aen_event_read_flag = 1; + wake_up_interruptible(&ctl_poll_wait); + if (async_queue) + kill_fasync(&async_queue, SIGIO, POLL_IN); + } +} + +/** + * mpt3sas_ctl_event_callback - firmware event handler (called at ISR time) + * @ioc: per adapter object + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * Context: interrupt. + * + * This function merely adds a new work task into ioc->firmware_event_thread. + * The tasks are worked from _firmware_event_work in user context. + * + * Return: 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +u8 +mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, + u32 reply) +{ + Mpi2EventNotificationReply_t *mpi_reply; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + if (mpi_reply) + mpt3sas_ctl_add_to_event_log(ioc, mpi_reply); + return 1; +} + +/** + * _ctl_verify_adapter - validates ioc_number passed from application + * @ioc_number: ? + * @iocpp: The ioc pointer is returned in this. + * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device & + * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device. + * + * Return: (-1) means error, else ioc_number. + */ +static int +_ctl_verify_adapter(int ioc_number, struct MPT3SAS_ADAPTER **iocpp, + int mpi_version) +{ + struct MPT3SAS_ADAPTER *ioc; + int version = 0; + /* global ioc lock to protect controller on list operations */ + spin_lock(&gioc_lock); + list_for_each_entry(ioc, &mpt3sas_ioc_list, list) { + if (ioc->id != ioc_number) + continue; + /* Check whether this ioctl command is from right + * ioctl device or not, if not continue the search. + */ + version = ioc->hba_mpi_version_belonged; + /* MPI25_VERSION and MPI26_VERSION uses same ioctl + * device. + */ + if (mpi_version == (MPI25_VERSION | MPI26_VERSION)) { + if ((version == MPI25_VERSION) || + (version == MPI26_VERSION)) + goto out; + else + continue; + } else { + if (version != mpi_version) + continue; + } +out: + spin_unlock(&gioc_lock); + *iocpp = ioc; + return ioc_number; + } + spin_unlock(&gioc_lock); + *iocpp = NULL; + return -1; +} + +/** + * mpt3sas_ctl_pre_reset_handler - reset callback handler (for ctl) + * @ioc: per adapter object + * + * The handler for doing any required cleanup or initialization. + */ +void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc) +{ + int i; + u8 issue_reset; + + dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__)); + for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { + if (!(ioc->diag_buffer_status[i] & + MPT3_DIAG_BUFFER_IS_REGISTERED)) + continue; + if ((ioc->diag_buffer_status[i] & + MPT3_DIAG_BUFFER_IS_RELEASED)) + continue; + + /* + * add a log message to indicate the release + */ + ioc_info(ioc, + "%s: Releasing the trace buffer due to adapter reset.", + __func__); + ioc->htb_rel.buffer_rel_condition = + MPT3_DIAG_BUFFER_REL_TRIGGER; + mpt3sas_send_diag_release(ioc, i, &issue_reset); + } +} + +/** + * mpt3sas_ctl_clear_outstanding_ioctls - clears outstanding ioctl cmd. + * @ioc: per adapter object + * + * The handler for doing any required cleanup or initialization. + */ +void mpt3sas_ctl_clear_outstanding_ioctls(struct MPT3SAS_ADAPTER *ioc) +{ + dtmprintk(ioc, + ioc_info(ioc, "%s: clear outstanding ioctl cmd\n", __func__)); + if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) { + ioc->ctl_cmds.status |= MPT3_CMD_RESET; + mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid); + complete(&ioc->ctl_cmds.done); + } +} + +/** + * mpt3sas_ctl_reset_done_handler - reset callback handler (for ctl) + * @ioc: per adapter object + * + * The handler for doing any required cleanup or initialization. + */ +void mpt3sas_ctl_reset_done_handler(struct MPT3SAS_ADAPTER *ioc) +{ + int i; + + dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__)); + + for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { + if (!(ioc->diag_buffer_status[i] & + MPT3_DIAG_BUFFER_IS_REGISTERED)) + continue; + if ((ioc->diag_buffer_status[i] & + MPT3_DIAG_BUFFER_IS_RELEASED)) + continue; + ioc->diag_buffer_status[i] |= + MPT3_DIAG_BUFFER_IS_DIAG_RESET; + } +} + +/** + * _ctl_fasync - + * @fd: ? + * @filep: ? + * @mode: ? + * + * Called when application request fasyn callback handler. + */ +static int +_ctl_fasync(int fd, struct file *filep, int mode) +{ + return fasync_helper(fd, filep, mode, &async_queue); +} + +/** + * _ctl_poll - + * @filep: ? + * @wait: ? + * + */ +static __poll_t +_ctl_poll(struct file *filep, poll_table *wait) +{ + struct MPT3SAS_ADAPTER *ioc; + + poll_wait(filep, &ctl_poll_wait, wait); + + /* global ioc lock to protect controller on list operations */ + spin_lock(&gioc_lock); + list_for_each_entry(ioc, &mpt3sas_ioc_list, list) { + if (ioc->aen_event_read_flag) { + spin_unlock(&gioc_lock); + return EPOLLIN | EPOLLRDNORM; + } + } + spin_unlock(&gioc_lock); + return 0; +} + +/** + * _ctl_set_task_mid - assign an active smid to tm request + * @ioc: per adapter object + * @karg: (struct mpt3_ioctl_command) + * @tm_request: pointer to mf from user space + * + * Return: 0 when an smid if found, else fail. + * during failure, the reply frame is filled. + */ +static int +_ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg, + Mpi2SCSITaskManagementRequest_t *tm_request) +{ + bool found = false; + u16 smid; + u16 handle; + struct scsi_cmnd *scmd; + struct MPT3SAS_DEVICE *priv_data; + Mpi2SCSITaskManagementReply_t *tm_reply; + u32 sz; + u32 lun; + char *desc = NULL; + + if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) + desc = "abort_task"; + else if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) + desc = "query_task"; + else + return 0; + + lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN); + + handle = le16_to_cpu(tm_request->DevHandle); + for (smid = ioc->scsiio_depth; smid && !found; smid--) { + struct scsiio_tracker *st; + __le16 task_mid; + + scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); + if (!scmd) + continue; + if (lun != scmd->device->lun) + continue; + priv_data = scmd->device->hostdata; + if (priv_data->sas_target == NULL) + continue; + if (priv_data->sas_target->handle != handle) + continue; + st = scsi_cmd_priv(scmd); + + /* + * If the given TaskMID from the user space is zero, then the + * first outstanding smid will be picked up. Otherwise, + * targeted smid will be the one. + */ + task_mid = cpu_to_le16(st->smid); + if (!tm_request->TaskMID) + tm_request->TaskMID = task_mid; + found = tm_request->TaskMID == task_mid; + } + + if (!found) { + dctlprintk(ioc, + ioc_info(ioc, "%s: handle(0x%04x), lun(%d), no active mid!!\n", + desc, le16_to_cpu(tm_request->DevHandle), + lun)); + tm_reply = ioc->ctl_cmds.reply; + tm_reply->DevHandle = tm_request->DevHandle; + tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; + tm_reply->TaskType = tm_request->TaskType; + tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4; + tm_reply->VP_ID = tm_request->VP_ID; + tm_reply->VF_ID = tm_request->VF_ID; + sz = min_t(u32, karg->max_reply_bytes, ioc->reply_sz); + if (copy_to_user(karg->reply_frame_buf_ptr, ioc->ctl_cmds.reply, + sz)) + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + return 1; + } + + dctlprintk(ioc, + ioc_info(ioc, "%s: handle(0x%04x), lun(%d), task_mid(%d)\n", + desc, le16_to_cpu(tm_request->DevHandle), lun, + le16_to_cpu(tm_request->TaskMID))); + return 0; +} + +/** + * _ctl_do_mpt_command - main handler for MPT3COMMAND opcode + * @ioc: per adapter object + * @karg: (struct mpt3_ioctl_command) + * @mf: pointer to mf in user space + */ +static long +_ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, + void __user *mf) +{ + MPI2RequestHeader_t *mpi_request = NULL, *request; + MPI2DefaultReply_t *mpi_reply; + Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request = NULL; + struct _pcie_device *pcie_device = NULL; + u16 smid; + unsigned long timeout; + u8 issue_reset; + u32 sz, sz_arg; + void *psge; + void *data_out = NULL; + dma_addr_t data_out_dma = 0; + size_t data_out_sz = 0; + void *data_in = NULL; + dma_addr_t data_in_dma = 0; + size_t data_in_sz = 0; + long ret; + u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE; + + issue_reset = 0; + + if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { + ioc_err(ioc, "%s: ctl_cmd in use\n", __func__); + ret = -EAGAIN; + goto out; + } + + ret = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT); + if (ret) + goto out; + + mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL); + if (!mpi_request) { + ioc_err(ioc, "%s: failed obtaining a memory for mpi_request\n", + __func__); + ret = -ENOMEM; + goto out; + } + + /* Check for overflow and wraparound */ + if (karg.data_sge_offset * 4 > ioc->request_sz || + karg.data_sge_offset > (UINT_MAX / 4)) { + ret = -EINVAL; + goto out; + } + + /* copy in request message frame from user */ + if (copy_from_user(mpi_request, mf, karg.data_sge_offset*4)) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, __LINE__, + __func__); + ret = -EFAULT; + goto out; + } + + if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) { + smid = mpt3sas_base_get_smid_hpr(ioc, ioc->ctl_cb_idx); + if (!smid) { + ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + ret = -EAGAIN; + goto out; + } + } else { + /* Use first reserved smid for passthrough ioctls */ + smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1; + } + + ret = 0; + ioc->ctl_cmds.status = MPT3_CMD_PENDING; + memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); + request = mpt3sas_base_get_msg_frame(ioc, smid); + memset(request, 0, ioc->request_sz); + memcpy(request, mpi_request, karg.data_sge_offset*4); + ioc->ctl_cmds.smid = smid; + data_out_sz = karg.data_out_size; + data_in_sz = karg.data_in_size; + + if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || + mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || + mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT || + mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH || + mpi_request->Function == MPI2_FUNCTION_NVME_ENCAPSULATED) { + + device_handle = le16_to_cpu(mpi_request->FunctionDependent1); + if (!device_handle || (device_handle > + ioc->facts.MaxDevHandle)) { + ret = -EINVAL; + mpt3sas_base_free_smid(ioc, smid); + goto out; + } + } + + /* obtain dma-able memory for data transfer */ + if (data_out_sz) /* WRITE */ { + data_out = dma_alloc_coherent(&ioc->pdev->dev, data_out_sz, + &data_out_dma, GFP_KERNEL); + if (!data_out) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + ret = -ENOMEM; + mpt3sas_base_free_smid(ioc, smid); + goto out; + } + if (copy_from_user(data_out, karg.data_out_buf_ptr, + data_out_sz)) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + ret = -EFAULT; + mpt3sas_base_free_smid(ioc, smid); + goto out; + } + } + + if (data_in_sz) /* READ */ { + data_in = dma_alloc_coherent(&ioc->pdev->dev, data_in_sz, + &data_in_dma, GFP_KERNEL); + if (!data_in) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + ret = -ENOMEM; + mpt3sas_base_free_smid(ioc, smid); + goto out; + } + } + + psge = (void *)request + (karg.data_sge_offset*4); + + /* send command to firmware */ + _ctl_display_some_debug(ioc, smid, "ctl_request", NULL); + + init_completion(&ioc->ctl_cmds.done); + switch (mpi_request->Function) { + case MPI2_FUNCTION_NVME_ENCAPSULATED: + { + nvme_encap_request = (Mpi26NVMeEncapsulatedRequest_t *)request; + if (!ioc->pcie_sg_lookup) { + dtmprintk(ioc, ioc_info(ioc, + "HBA doesn't support NVMe. Rejecting NVMe Encapsulated request.\n" + )); + + if (ioc->logging_level & MPT_DEBUG_TM) + _debug_dump_mf(nvme_encap_request, + ioc->request_sz/4); + mpt3sas_base_free_smid(ioc, smid); + ret = -EINVAL; + goto out; + } + /* + * Get the Physical Address of the sense buffer. + * Use Error Response buffer address field to hold the sense + * buffer address. + * Clear the internal sense buffer, which will potentially hold + * the Completion Queue Entry on return, or 0 if no Entry. + * Build the PRPs and set direction bits. + * Send the request. + */ + nvme_encap_request->ErrorResponseBaseAddress = + cpu_to_le64(ioc->sense_dma & 0xFFFFFFFF00000000UL); + nvme_encap_request->ErrorResponseBaseAddress |= + cpu_to_le64(le32_to_cpu( + mpt3sas_base_get_sense_buffer_dma(ioc, smid))); + nvme_encap_request->ErrorResponseAllocationLength = + cpu_to_le16(NVME_ERROR_RESPONSE_SIZE); + memset(ioc->ctl_cmds.sense, 0, NVME_ERROR_RESPONSE_SIZE); + ioc->build_nvme_prp(ioc, smid, nvme_encap_request, + data_out_dma, data_out_sz, data_in_dma, data_in_sz); + if (test_bit(device_handle, ioc->device_remove_in_progress)) { + dtmprintk(ioc, + ioc_info(ioc, "handle(0x%04x): ioctl failed due to device removal in progress\n", + device_handle)); + mpt3sas_base_free_smid(ioc, smid); + ret = -EINVAL; + goto out; + } + mpt3sas_base_put_smid_nvme_encap(ioc, smid); + break; + } + case MPI2_FUNCTION_SCSI_IO_REQUEST: + case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: + { + Mpi2SCSIIORequest_t *scsiio_request = + (Mpi2SCSIIORequest_t *)request; + scsiio_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; + scsiio_request->SenseBufferLowAddress = + mpt3sas_base_get_sense_buffer_dma(ioc, smid); + memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE); + if (test_bit(device_handle, ioc->device_remove_in_progress)) { + dtmprintk(ioc, + ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n", + device_handle)); + mpt3sas_base_free_smid(ioc, smid); + ret = -EINVAL; + goto out; + } + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) + ioc->put_smid_scsi_io(ioc, smid, device_handle); + else + ioc->put_smid_default(ioc, smid); + break; + } + case MPI2_FUNCTION_SCSI_TASK_MGMT: + { + Mpi2SCSITaskManagementRequest_t *tm_request = + (Mpi2SCSITaskManagementRequest_t *)request; + + dtmprintk(ioc, + ioc_info(ioc, "TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n", + le16_to_cpu(tm_request->DevHandle), + tm_request->TaskType)); + ioc->got_task_abort_from_ioctl = 1; + if (tm_request->TaskType == + MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK || + tm_request->TaskType == + MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) { + if (_ctl_set_task_mid(ioc, &karg, tm_request)) { + mpt3sas_base_free_smid(ioc, smid); + ioc->got_task_abort_from_ioctl = 0; + goto out; + } + } + ioc->got_task_abort_from_ioctl = 0; + + if (test_bit(device_handle, ioc->device_remove_in_progress)) { + dtmprintk(ioc, + ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n", + device_handle)); + mpt3sas_base_free_smid(ioc, smid); + ret = -EINVAL; + goto out; + } + mpt3sas_scsih_set_tm_flag(ioc, le16_to_cpu( + tm_request->DevHandle)); + ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + ioc->put_smid_hi_priority(ioc, smid, 0); + break; + } + case MPI2_FUNCTION_SMP_PASSTHROUGH: + { + Mpi2SmpPassthroughRequest_t *smp_request = + (Mpi2SmpPassthroughRequest_t *)mpi_request; + u8 *data; + + if (!ioc->multipath_on_hba) { + /* ioc determines which port to use */ + smp_request->PhysicalPort = 0xFF; + } + if (smp_request->PassthroughFlags & + MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE) + data = (u8 *)&smp_request->SGL; + else { + if (unlikely(data_out == NULL)) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + mpt3sas_base_free_smid(ioc, smid); + ret = -EINVAL; + goto out; + } + data = data_out; + } + + if (data[1] == 0x91 && (data[10] == 1 || data[10] == 2)) { + ioc->ioc_link_reset_in_progress = 1; + ioc->ignore_loginfos = 1; + } + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, + data_in_sz); + ioc->put_smid_default(ioc, smid); + break; + } + case MPI2_FUNCTION_SATA_PASSTHROUGH: + { + if (test_bit(device_handle, ioc->device_remove_in_progress)) { + dtmprintk(ioc, + ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n", + device_handle)); + mpt3sas_base_free_smid(ioc, smid); + ret = -EINVAL; + goto out; + } + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, + data_in_sz); + ioc->put_smid_default(ioc, smid); + break; + } + case MPI2_FUNCTION_FW_DOWNLOAD: + { + if (ioc->pdev->vendor == MPI2_MFGPAGE_VENDORID_ATTO) { + ioc_info(ioc, "Firmware download not supported for ATTO HBA.\n"); + ret = -EPERM; + break; + } + fallthrough; + } + case MPI2_FUNCTION_FW_UPLOAD: + { + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, + data_in_sz); + ioc->put_smid_default(ioc, smid); + break; + } + case MPI2_FUNCTION_TOOLBOX: + { + Mpi2ToolboxCleanRequest_t *toolbox_request = + (Mpi2ToolboxCleanRequest_t *)mpi_request; + + if ((toolbox_request->Tool == MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL) + || (toolbox_request->Tool == + MPI26_TOOLBOX_BACKEND_PCIE_LANE_MARGIN)) + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + else if (toolbox_request->Tool == + MPI2_TOOLBOX_MEMORY_MOVE_TOOL) { + Mpi2ToolboxMemMoveRequest_t *mem_move_request = + (Mpi2ToolboxMemMoveRequest_t *)request; + Mpi2SGESimple64_t tmp, *src = NULL, *dst = NULL; + + ioc->build_sg_mpi(ioc, psge, data_out_dma, + data_out_sz, data_in_dma, data_in_sz); + if (data_out_sz && !data_in_sz) { + dst = + (Mpi2SGESimple64_t *)&mem_move_request->SGL; + src = (void *)dst + ioc->sge_size; + + memcpy(&tmp, src, ioc->sge_size); + memcpy(src, dst, ioc->sge_size); + memcpy(dst, &tmp, ioc->sge_size); + } + if (ioc->logging_level & MPT_DEBUG_TM) { + ioc_info(ioc, + "Mpi2ToolboxMemMoveRequest_t request msg\n"); + _debug_dump_mf(mem_move_request, + ioc->request_sz/4); + } + } else + ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + ioc->put_smid_default(ioc, smid); + break; + } + case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL: + { + Mpi2SasIoUnitControlRequest_t *sasiounit_request = + (Mpi2SasIoUnitControlRequest_t *)mpi_request; + + if (sasiounit_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET + || sasiounit_request->Operation == + MPI2_SAS_OP_PHY_LINK_RESET) { + ioc->ioc_link_reset_in_progress = 1; + ioc->ignore_loginfos = 1; + } + /* drop to default case for posting the request */ + } + fallthrough; + default: + ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + ioc->put_smid_default(ioc, smid); + break; + } + + if (karg.timeout < MPT3_IOCTL_DEFAULT_TIMEOUT) + timeout = MPT3_IOCTL_DEFAULT_TIMEOUT; + else + timeout = karg.timeout; + wait_for_completion_timeout(&ioc->ctl_cmds.done, timeout*HZ); + if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) { + Mpi2SCSITaskManagementRequest_t *tm_request = + (Mpi2SCSITaskManagementRequest_t *)mpi_request; + mpt3sas_scsih_clear_tm_flag(ioc, le16_to_cpu( + tm_request->DevHandle)); + mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); + } else if ((mpi_request->Function == MPI2_FUNCTION_SMP_PASSTHROUGH || + mpi_request->Function == MPI2_FUNCTION_SAS_IO_UNIT_CONTROL) && + ioc->ioc_link_reset_in_progress) { + ioc->ioc_link_reset_in_progress = 0; + ioc->ignore_loginfos = 0; + } + if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { + mpt3sas_check_cmd_timeout(ioc, + ioc->ctl_cmds.status, mpi_request, + karg.data_sge_offset, issue_reset); + goto issue_host_reset; + } + + mpi_reply = ioc->ctl_cmds.reply; + + if (mpi_reply->Function == MPI2_FUNCTION_SCSI_TASK_MGMT && + (ioc->logging_level & MPT_DEBUG_TM)) { + Mpi2SCSITaskManagementReply_t *tm_reply = + (Mpi2SCSITaskManagementReply_t *)mpi_reply; + + ioc_info(ioc, "TASK_MGMT: IOCStatus(0x%04x), IOCLogInfo(0x%08x), TerminationCount(0x%08x)\n", + le16_to_cpu(tm_reply->IOCStatus), + le32_to_cpu(tm_reply->IOCLogInfo), + le32_to_cpu(tm_reply->TerminationCount)); + } + + /* copy out xdata to user */ + if (data_in_sz) { + if (copy_to_user(karg.data_in_buf_ptr, data_in, + data_in_sz)) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + ret = -ENODATA; + goto out; + } + } + + /* copy out reply message frame to user */ + if (karg.max_reply_bytes) { + sz = min_t(u32, karg.max_reply_bytes, ioc->reply_sz); + if (copy_to_user(karg.reply_frame_buf_ptr, ioc->ctl_cmds.reply, + sz)) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + ret = -ENODATA; + goto out; + } + } + + /* copy out sense/NVMe Error Response to user */ + if (karg.max_sense_bytes && (mpi_request->Function == + MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function == + MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || mpi_request->Function == + MPI2_FUNCTION_NVME_ENCAPSULATED)) { + if (karg.sense_data_ptr == NULL) { + ioc_info(ioc, "Response buffer provided by application is NULL; Response data will not be returned\n"); + goto out; + } + sz_arg = (mpi_request->Function == + MPI2_FUNCTION_NVME_ENCAPSULATED) ? NVME_ERROR_RESPONSE_SIZE : + SCSI_SENSE_BUFFERSIZE; + sz = min_t(u32, karg.max_sense_bytes, sz_arg); + if (copy_to_user(karg.sense_data_ptr, ioc->ctl_cmds.sense, + sz)) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + ret = -ENODATA; + goto out; + } + } + + issue_host_reset: + if (issue_reset) { + ret = -ENODATA; + if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || + mpi_request->Function == + MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || + mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH)) { + ioc_info(ioc, "issue target reset: handle = (0x%04x)\n", + le16_to_cpu(mpi_request->FunctionDependent1)); + mpt3sas_halt_firmware(ioc); + pcie_device = mpt3sas_get_pdev_by_handle(ioc, + le16_to_cpu(mpi_request->FunctionDependent1)); + if (pcie_device && (!ioc->tm_custom_handling) && + (!(mpt3sas_scsih_is_pcie_scsi_device( + pcie_device->device_info)))) + mpt3sas_scsih_issue_locked_tm(ioc, + le16_to_cpu(mpi_request->FunctionDependent1), + 0, 0, 0, + MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, + 0, pcie_device->reset_timeout, + MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE); + else + mpt3sas_scsih_issue_locked_tm(ioc, + le16_to_cpu(mpi_request->FunctionDependent1), + 0, 0, 0, + MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, + 0, 30, MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET); + } else + mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + } + + out: + if (pcie_device) + pcie_device_put(pcie_device); + + /* free memory associated with sg buffers */ + if (data_in) + dma_free_coherent(&ioc->pdev->dev, data_in_sz, data_in, + data_in_dma); + + if (data_out) + dma_free_coherent(&ioc->pdev->dev, data_out_sz, data_out, + data_out_dma); + + kfree(mpi_request); + ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; + return ret; +} + +/** + * _ctl_getiocinfo - main handler for MPT3IOCINFO opcode + * @ioc: per adapter object + * @arg: user space buffer containing ioctl content + */ +static long +_ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_ioctl_iocinfo karg; + + dctlprintk(ioc, ioc_info(ioc, "%s: enter\n", + __func__)); + + memset(&karg, 0 , sizeof(karg)); + if (ioc->pfacts) + karg.port_number = ioc->pfacts[0].PortNumber; + karg.hw_rev = ioc->pdev->revision; + karg.pci_id = ioc->pdev->device; + karg.subsystem_device = ioc->pdev->subsystem_device; + karg.subsystem_vendor = ioc->pdev->subsystem_vendor; + karg.pci_information.u.bits.bus = ioc->pdev->bus->number; + karg.pci_information.u.bits.device = PCI_SLOT(ioc->pdev->devfn); + karg.pci_information.u.bits.function = PCI_FUNC(ioc->pdev->devfn); + karg.pci_information.segment_id = pci_domain_nr(ioc->pdev->bus); + karg.firmware_version = ioc->facts.FWVersion.Word; + strcpy(karg.driver_version, ioc->driver_name); + strcat(karg.driver_version, "-"); + switch (ioc->hba_mpi_version_belonged) { + case MPI2_VERSION: + if (ioc->is_warpdrive) + karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2_SSS6200; + else + karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2; + strcat(karg.driver_version, MPT2SAS_DRIVER_VERSION); + break; + case MPI25_VERSION: + case MPI26_VERSION: + if (ioc->is_gen35_ioc) + karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS35; + else + karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3; + strcat(karg.driver_version, MPT3SAS_DRIVER_VERSION); + break; + } + karg.bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion); + + if (copy_to_user(arg, &karg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + return 0; +} + +/** + * _ctl_eventquery - main handler for MPT3EVENTQUERY opcode + * @ioc: per adapter object + * @arg: user space buffer containing ioctl content + */ +static long +_ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_ioctl_eventquery karg; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + dctlprintk(ioc, ioc_info(ioc, "%s: enter\n", + __func__)); + + karg.event_entries = MPT3SAS_CTL_EVENT_LOG_SIZE; + memcpy(karg.event_types, ioc->event_type, + MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32)); + + if (copy_to_user(arg, &karg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + return 0; +} + +/** + * _ctl_eventenable - main handler for MPT3EVENTENABLE opcode + * @ioc: per adapter object + * @arg: user space buffer containing ioctl content + */ +static long +_ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_ioctl_eventenable karg; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + dctlprintk(ioc, ioc_info(ioc, "%s: enter\n", + __func__)); + + memcpy(ioc->event_type, karg.event_types, + MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32)); + mpt3sas_base_validate_event_type(ioc, ioc->event_type); + + if (ioc->event_log) + return 0; + /* initialize event_log */ + ioc->event_context = 0; + ioc->aen_event_read_flag = 0; + ioc->event_log = kcalloc(MPT3SAS_CTL_EVENT_LOG_SIZE, + sizeof(struct MPT3_IOCTL_EVENTS), GFP_KERNEL); + if (!ioc->event_log) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -ENOMEM; + } + return 0; +} + +/** + * _ctl_eventreport - main handler for MPT3EVENTREPORT opcode + * @ioc: per adapter object + * @arg: user space buffer containing ioctl content + */ +static long +_ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_ioctl_eventreport karg; + u32 number_bytes, max_events, max; + struct mpt3_ioctl_eventreport __user *uarg = arg; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + dctlprintk(ioc, ioc_info(ioc, "%s: enter\n", + __func__)); + + number_bytes = karg.hdr.max_data_size - + sizeof(struct mpt3_ioctl_header); + max_events = number_bytes/sizeof(struct MPT3_IOCTL_EVENTS); + max = min_t(u32, MPT3SAS_CTL_EVENT_LOG_SIZE, max_events); + + /* If fewer than 1 event is requested, there must have + * been some type of error. + */ + if (!max || !ioc->event_log) + return -ENODATA; + + number_bytes = max * sizeof(struct MPT3_IOCTL_EVENTS); + if (copy_to_user(uarg->event_data, ioc->event_log, number_bytes)) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + /* reset flag so SIGIO can restart */ + ioc->aen_event_read_flag = 0; + return 0; +} + +/** + * _ctl_do_reset - main handler for MPT3HARDRESET opcode + * @ioc: per adapter object + * @arg: user space buffer containing ioctl content + */ +static long +_ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_ioctl_diag_reset karg; + int retval; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + if (ioc->shost_recovery || ioc->pci_error_recovery || + ioc->is_driver_loading) + return -EAGAIN; + + dctlprintk(ioc, ioc_info(ioc, "%s: enter\n", + __func__)); + + ioc->reset_from_user = 1; + retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + ioc_info(ioc, + "Ioctl: host reset: %s\n", ((!retval) ? "SUCCESS" : "FAILED")); + return 0; +} + +/** + * _ctl_btdh_search_sas_device - searching for sas device + * @ioc: per adapter object + * @btdh: btdh ioctl payload + */ +static int +_ctl_btdh_search_sas_device(struct MPT3SAS_ADAPTER *ioc, + struct mpt3_ioctl_btdh_mapping *btdh) +{ + struct _sas_device *sas_device; + unsigned long flags; + int rc = 0; + + if (list_empty(&ioc->sas_device_list)) + return rc; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + list_for_each_entry(sas_device, &ioc->sas_device_list, list) { + if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF && + btdh->handle == sas_device->handle) { + btdh->bus = sas_device->channel; + btdh->id = sas_device->id; + rc = 1; + goto out; + } else if (btdh->bus == sas_device->channel && btdh->id == + sas_device->id && btdh->handle == 0xFFFF) { + btdh->handle = sas_device->handle; + rc = 1; + goto out; + } + } + out: + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return rc; +} + +/** + * _ctl_btdh_search_pcie_device - searching for pcie device + * @ioc: per adapter object + * @btdh: btdh ioctl payload + */ +static int +_ctl_btdh_search_pcie_device(struct MPT3SAS_ADAPTER *ioc, + struct mpt3_ioctl_btdh_mapping *btdh) +{ + struct _pcie_device *pcie_device; + unsigned long flags; + int rc = 0; + + if (list_empty(&ioc->pcie_device_list)) + return rc; + + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) { + if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF && + btdh->handle == pcie_device->handle) { + btdh->bus = pcie_device->channel; + btdh->id = pcie_device->id; + rc = 1; + goto out; + } else if (btdh->bus == pcie_device->channel && btdh->id == + pcie_device->id && btdh->handle == 0xFFFF) { + btdh->handle = pcie_device->handle; + rc = 1; + goto out; + } + } + out: + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + return rc; +} + +/** + * _ctl_btdh_search_raid_device - searching for raid device + * @ioc: per adapter object + * @btdh: btdh ioctl payload + */ +static int +_ctl_btdh_search_raid_device(struct MPT3SAS_ADAPTER *ioc, + struct mpt3_ioctl_btdh_mapping *btdh) +{ + struct _raid_device *raid_device; + unsigned long flags; + int rc = 0; + + if (list_empty(&ioc->raid_device_list)) + return rc; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF && + btdh->handle == raid_device->handle) { + btdh->bus = raid_device->channel; + btdh->id = raid_device->id; + rc = 1; + goto out; + } else if (btdh->bus == raid_device->channel && btdh->id == + raid_device->id && btdh->handle == 0xFFFF) { + btdh->handle = raid_device->handle; + rc = 1; + goto out; + } + } + out: + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + return rc; +} + +/** + * _ctl_btdh_mapping - main handler for MPT3BTDHMAPPING opcode + * @ioc: per adapter object + * @arg: user space buffer containing ioctl content + */ +static long +_ctl_btdh_mapping(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_ioctl_btdh_mapping karg; + int rc; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + dctlprintk(ioc, ioc_info(ioc, "%s\n", + __func__)); + + rc = _ctl_btdh_search_sas_device(ioc, &karg); + if (!rc) + rc = _ctl_btdh_search_pcie_device(ioc, &karg); + if (!rc) + _ctl_btdh_search_raid_device(ioc, &karg); + + if (copy_to_user(arg, &karg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + return 0; +} + +/** + * _ctl_diag_capability - return diag buffer capability + * @ioc: per adapter object + * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED + * + * returns 1 when diag buffer support is enabled in firmware + */ +static u8 +_ctl_diag_capability(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type) +{ + u8 rc = 0; + + switch (buffer_type) { + case MPI2_DIAG_BUF_TYPE_TRACE: + if (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) + rc = 1; + break; + case MPI2_DIAG_BUF_TYPE_SNAPSHOT: + if (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) + rc = 1; + break; + case MPI2_DIAG_BUF_TYPE_EXTENDED: + if (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) + rc = 1; + } + + return rc; +} + +/** + * _ctl_diag_get_bufftype - return diag buffer type + * either TRACE, SNAPSHOT, or EXTENDED + * @ioc: per adapter object + * @unique_id: specifies the unique_id for the buffer + * + * returns MPT3_DIAG_UID_NOT_FOUND if the id not found + */ +static u8 +_ctl_diag_get_bufftype(struct MPT3SAS_ADAPTER *ioc, u32 unique_id) +{ + u8 index; + + for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) { + if (ioc->unique_id[index] == unique_id) + return index; + } + + return MPT3_DIAG_UID_NOT_FOUND; +} + +/** + * _ctl_diag_register_2 - wrapper for registering diag buffer support + * @ioc: per adapter object + * @diag_register: the diag_register struct passed in from user space + * + */ +static long +_ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc, + struct mpt3_diag_register *diag_register) +{ + int rc, i; + void *request_data = NULL; + dma_addr_t request_data_dma; + u32 request_data_sz = 0; + Mpi2DiagBufferPostRequest_t *mpi_request; + Mpi2DiagBufferPostReply_t *mpi_reply; + u8 buffer_type; + u16 smid; + u16 ioc_status; + u32 ioc_state; + u8 issue_reset = 0; + + dctlprintk(ioc, ioc_info(ioc, "%s\n", + __func__)); + + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { + ioc_err(ioc, "%s: failed due to ioc not operational\n", + __func__); + rc = -EAGAIN; + goto out; + } + + if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { + ioc_err(ioc, "%s: ctl_cmd in use\n", __func__); + rc = -EAGAIN; + goto out; + } + + buffer_type = diag_register->buffer_type; + if (!_ctl_diag_capability(ioc, buffer_type)) { + ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n", + __func__, buffer_type); + return -EPERM; + } + + if (diag_register->unique_id == 0) { + ioc_err(ioc, + "%s: Invalid UID(0x%08x), buffer_type(0x%02x)\n", __func__, + diag_register->unique_id, buffer_type); + return -EINVAL; + } + + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_APP_OWNED) && + !(ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_RELEASED)) { + ioc_err(ioc, + "%s: buffer_type(0x%02x) is already registered by application with UID(0x%08x)\n", + __func__, buffer_type, ioc->unique_id[buffer_type]); + return -EINVAL; + } + + if (ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_REGISTERED) { + /* + * If driver posts buffer initially, then an application wants + * to Register that buffer (own it) without Releasing first, + * the application Register command MUST have the same buffer + * type and size in the Register command (obtained from the + * Query command). Otherwise that Register command will be + * failed. If the application has released the buffer but wants + * to re-register it, it should be allowed as long as the + * Unique-Id/Size match. + */ + + if (ioc->unique_id[buffer_type] == MPT3DIAGBUFFUNIQUEID && + ioc->diag_buffer_sz[buffer_type] == + diag_register->requested_buffer_size) { + + if (!(ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_RELEASED)) { + dctlprintk(ioc, ioc_info(ioc, + "%s: diag_buffer (%d) ownership changed. old-ID(0x%08x), new-ID(0x%08x)\n", + __func__, buffer_type, + ioc->unique_id[buffer_type], + diag_register->unique_id)); + + /* + * Application wants to own the buffer with + * the same size. + */ + ioc->unique_id[buffer_type] = + diag_register->unique_id; + rc = 0; /* success */ + goto out; + } + } else if (ioc->unique_id[buffer_type] != + MPT3DIAGBUFFUNIQUEID) { + if (ioc->unique_id[buffer_type] != + diag_register->unique_id || + ioc->diag_buffer_sz[buffer_type] != + diag_register->requested_buffer_size || + !(ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_RELEASED)) { + ioc_err(ioc, + "%s: already has a registered buffer for buffer_type(0x%02x)\n", + __func__, buffer_type); + return -EINVAL; + } + } else { + ioc_err(ioc, "%s: already has a registered buffer for buffer_type(0x%02x)\n", + __func__, buffer_type); + return -EINVAL; + } + } else if (ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED) { + + if (ioc->unique_id[buffer_type] != MPT3DIAGBUFFUNIQUEID || + ioc->diag_buffer_sz[buffer_type] != + diag_register->requested_buffer_size) { + + ioc_err(ioc, + "%s: already a buffer is allocated for buffer_type(0x%02x) of size %d bytes, so please try registering again with same size\n", + __func__, buffer_type, + ioc->diag_buffer_sz[buffer_type]); + return -EINVAL; + } + } + + if (diag_register->requested_buffer_size % 4) { + ioc_err(ioc, "%s: the requested_buffer_size is not 4 byte aligned\n", + __func__); + return -EINVAL; + } + + smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx); + if (!smid) { + ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + rc = -EAGAIN; + goto out; + } + + rc = 0; + ioc->ctl_cmds.status = MPT3_CMD_PENDING; + memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + memset(mpi_request, 0, ioc->request_sz); + ioc->ctl_cmds.smid = smid; + + request_data = ioc->diag_buffer[buffer_type]; + request_data_sz = diag_register->requested_buffer_size; + ioc->unique_id[buffer_type] = diag_register->unique_id; + /* Reset ioc variables used for additional query commands */ + ioc->reset_from_user = 0; + memset(&ioc->htb_rel, 0, sizeof(struct htb_rel_query)); + ioc->diag_buffer_status[buffer_type] &= + MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED; + memcpy(ioc->product_specific[buffer_type], + diag_register->product_specific, MPT3_PRODUCT_SPECIFIC_DWORDS); + ioc->diagnostic_flags[buffer_type] = diag_register->diagnostic_flags; + + if (request_data) { + request_data_dma = ioc->diag_buffer_dma[buffer_type]; + if (request_data_sz != ioc->diag_buffer_sz[buffer_type]) { + dma_free_coherent(&ioc->pdev->dev, + ioc->diag_buffer_sz[buffer_type], + request_data, request_data_dma); + request_data = NULL; + } + } + + if (request_data == NULL) { + ioc->diag_buffer_sz[buffer_type] = 0; + ioc->diag_buffer_dma[buffer_type] = 0; + request_data = dma_alloc_coherent(&ioc->pdev->dev, + request_data_sz, &request_data_dma, GFP_KERNEL); + if (request_data == NULL) { + ioc_err(ioc, "%s: failed allocating memory for diag buffers, requested size(%d)\n", + __func__, request_data_sz); + mpt3sas_base_free_smid(ioc, smid); + rc = -ENOMEM; + goto out; + } + ioc->diag_buffer[buffer_type] = request_data; + ioc->diag_buffer_sz[buffer_type] = request_data_sz; + ioc->diag_buffer_dma[buffer_type] = request_data_dma; + } + + mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST; + mpi_request->BufferType = diag_register->buffer_type; + mpi_request->Flags = cpu_to_le32(diag_register->diagnostic_flags); + mpi_request->BufferAddress = cpu_to_le64(request_data_dma); + mpi_request->BufferLength = cpu_to_le32(request_data_sz); + mpi_request->VF_ID = 0; /* TODO */ + mpi_request->VP_ID = 0; + + dctlprintk(ioc, + ioc_info(ioc, "%s: diag_buffer(0x%p), dma(0x%llx), sz(%d)\n", + __func__, request_data, + (unsigned long long)request_data_dma, + le32_to_cpu(mpi_request->BufferLength))); + + for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++) + mpi_request->ProductSpecific[i] = + cpu_to_le32(ioc->product_specific[buffer_type][i]); + + init_completion(&ioc->ctl_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->ctl_cmds.done, + MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); + + if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { + mpt3sas_check_cmd_timeout(ioc, + ioc->ctl_cmds.status, mpi_request, + sizeof(Mpi2DiagBufferPostRequest_t)/4, issue_reset); + goto issue_host_reset; + } + + /* process the completed Reply Message Frame */ + if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { + ioc_err(ioc, "%s: no reply message\n", __func__); + rc = -EFAULT; + goto out; + } + + mpi_reply = ioc->ctl_cmds.reply; + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; + + if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { + ioc->diag_buffer_status[buffer_type] |= + MPT3_DIAG_BUFFER_IS_REGISTERED; + dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__)); + } else { + ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n", + __func__, + ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); + rc = -EFAULT; + } + + issue_host_reset: + if (issue_reset) + mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + + out: + + if (rc && request_data) { + dma_free_coherent(&ioc->pdev->dev, request_data_sz, + request_data, request_data_dma); + ioc->diag_buffer[buffer_type] = NULL; + ioc->diag_buffer_status[buffer_type] &= + ~MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED; + } + + ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; + return rc; +} + +/** + * mpt3sas_enable_diag_buffer - enabling diag_buffers support driver load time + * @ioc: per adapter object + * @bits_to_register: bitwise field where trace is bit 0, and snapshot is bit 1 + * + * This is called when command line option diag_buffer_enable is enabled + * at driver load time. + */ +void +mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register) +{ + struct mpt3_diag_register diag_register; + u32 ret_val; + u32 trace_buff_size = ioc->manu_pg11.HostTraceBufferMaxSizeKB<<10; + u32 min_trace_buff_size = 0; + u32 decr_trace_buff_size = 0; + + memset(&diag_register, 0, sizeof(struct mpt3_diag_register)); + + if (bits_to_register & 1) { + ioc_info(ioc, "registering trace buffer support\n"); + ioc->diag_trigger_master.MasterData = + (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET); + diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE; + diag_register.unique_id = + (ioc->hba_mpi_version_belonged == MPI2_VERSION) ? + (MPT2DIAGBUFFUNIQUEID):(MPT3DIAGBUFFUNIQUEID); + + if (trace_buff_size != 0) { + diag_register.requested_buffer_size = trace_buff_size; + min_trace_buff_size = + ioc->manu_pg11.HostTraceBufferMinSizeKB<<10; + decr_trace_buff_size = + ioc->manu_pg11.HostTraceBufferDecrementSizeKB<<10; + + if (min_trace_buff_size > trace_buff_size) { + /* The buff size is not set correctly */ + ioc_err(ioc, + "Min Trace Buff size (%d KB) greater than Max Trace Buff size (%d KB)\n", + min_trace_buff_size>>10, + trace_buff_size>>10); + ioc_err(ioc, + "Using zero Min Trace Buff Size\n"); + min_trace_buff_size = 0; + } + + if (decr_trace_buff_size == 0) { + /* + * retry the min size if decrement + * is not available. + */ + decr_trace_buff_size = + trace_buff_size - min_trace_buff_size; + } + } else { + /* register for 2MB buffers */ + diag_register.requested_buffer_size = 2 * (1024 * 1024); + } + + do { + ret_val = _ctl_diag_register_2(ioc, &diag_register); + + if (ret_val == -ENOMEM && min_trace_buff_size && + (trace_buff_size - decr_trace_buff_size) >= + min_trace_buff_size) { + /* adjust the buffer size */ + trace_buff_size -= decr_trace_buff_size; + diag_register.requested_buffer_size = + trace_buff_size; + } else + break; + } while (true); + + if (ret_val == -ENOMEM) + ioc_err(ioc, + "Cannot allocate trace buffer memory. Last memory tried = %d KB\n", + diag_register.requested_buffer_size>>10); + else if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] + & MPT3_DIAG_BUFFER_IS_REGISTERED) { + ioc_err(ioc, "Trace buffer memory %d KB allocated\n", + diag_register.requested_buffer_size>>10); + if (ioc->hba_mpi_version_belonged != MPI2_VERSION) + ioc->diag_buffer_status[ + MPI2_DIAG_BUF_TYPE_TRACE] |= + MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED; + } + } + + if (bits_to_register & 2) { + ioc_info(ioc, "registering snapshot buffer support\n"); + diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_SNAPSHOT; + /* register for 2MB buffers */ + diag_register.requested_buffer_size = 2 * (1024 * 1024); + diag_register.unique_id = 0x7075901; + _ctl_diag_register_2(ioc, &diag_register); + } + + if (bits_to_register & 4) { + ioc_info(ioc, "registering extended buffer support\n"); + diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_EXTENDED; + /* register for 2MB buffers */ + diag_register.requested_buffer_size = 2 * (1024 * 1024); + diag_register.unique_id = 0x7075901; + _ctl_diag_register_2(ioc, &diag_register); + } +} + +/** + * _ctl_diag_register - application register with driver + * @ioc: per adapter object + * @arg: user space buffer containing ioctl content + * + * This will allow the driver to setup any required buffers that will be + * needed by firmware to communicate with the driver. + */ +static long +_ctl_diag_register(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_diag_register karg; + long rc; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + rc = _ctl_diag_register_2(ioc, &karg); + + if (!rc && (ioc->diag_buffer_status[karg.buffer_type] & + MPT3_DIAG_BUFFER_IS_REGISTERED)) + ioc->diag_buffer_status[karg.buffer_type] |= + MPT3_DIAG_BUFFER_IS_APP_OWNED; + + return rc; +} + +/** + * _ctl_diag_unregister - application unregister with driver + * @ioc: per adapter object + * @arg: user space buffer containing ioctl content + * + * This will allow the driver to cleanup any memory allocated for diag + * messages and to free up any resources. + */ +static long +_ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_diag_unregister karg; + void *request_data; + dma_addr_t request_data_dma; + u32 request_data_sz; + u8 buffer_type; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + dctlprintk(ioc, ioc_info(ioc, "%s\n", + __func__)); + + buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id); + if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) { + ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n", + __func__, karg.unique_id); + return -EINVAL; + } + + if (!_ctl_diag_capability(ioc, buffer_type)) { + ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n", + __func__, buffer_type); + return -EPERM; + } + + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n", + __func__, buffer_type); + return -EINVAL; + } + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_RELEASED) == 0) { + ioc_err(ioc, "%s: buffer_type(0x%02x) has not been released\n", + __func__, buffer_type); + return -EINVAL; + } + + if (karg.unique_id != ioc->unique_id[buffer_type]) { + ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n", + __func__, karg.unique_id); + return -EINVAL; + } + + request_data = ioc->diag_buffer[buffer_type]; + if (!request_data) { + ioc_err(ioc, "%s: doesn't have memory allocated for buffer_type(0x%02x)\n", + __func__, buffer_type); + return -ENOMEM; + } + + if (ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED) { + ioc->unique_id[buffer_type] = MPT3DIAGBUFFUNIQUEID; + ioc->diag_buffer_status[buffer_type] &= + ~MPT3_DIAG_BUFFER_IS_APP_OWNED; + ioc->diag_buffer_status[buffer_type] &= + ~MPT3_DIAG_BUFFER_IS_REGISTERED; + } else { + request_data_sz = ioc->diag_buffer_sz[buffer_type]; + request_data_dma = ioc->diag_buffer_dma[buffer_type]; + dma_free_coherent(&ioc->pdev->dev, request_data_sz, + request_data, request_data_dma); + ioc->diag_buffer[buffer_type] = NULL; + ioc->diag_buffer_status[buffer_type] = 0; + } + return 0; +} + +/** + * _ctl_diag_query - query relevant info associated with diag buffers + * @ioc: per adapter object + * @arg: user space buffer containing ioctl content + * + * The application will send only buffer_type and unique_id. Driver will + * inspect unique_id first, if valid, fill in all the info. If unique_id is + * 0x00, the driver will return info specified by Buffer Type. + */ +static long +_ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_diag_query karg; + void *request_data; + int i; + u8 buffer_type; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + dctlprintk(ioc, ioc_info(ioc, "%s\n", + __func__)); + + karg.application_flags = 0; + buffer_type = karg.buffer_type; + + if (!_ctl_diag_capability(ioc, buffer_type)) { + ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n", + __func__, buffer_type); + return -EPERM; + } + + if (!(ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED)) { + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n", + __func__, buffer_type); + return -EINVAL; + } + } + + if (karg.unique_id) { + if (karg.unique_id != ioc->unique_id[buffer_type]) { + ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n", + __func__, karg.unique_id); + return -EINVAL; + } + } + + request_data = ioc->diag_buffer[buffer_type]; + if (!request_data) { + ioc_err(ioc, "%s: doesn't have buffer for buffer_type(0x%02x)\n", + __func__, buffer_type); + return -ENOMEM; + } + + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_REGISTERED)) + karg.application_flags |= MPT3_APP_FLAGS_BUFFER_VALID; + + if (!(ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_RELEASED)) + karg.application_flags |= MPT3_APP_FLAGS_FW_BUFFER_ACCESS; + + if (!(ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED)) + karg.application_flags |= MPT3_APP_FLAGS_DYNAMIC_BUFFER_ALLOC; + + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_APP_OWNED)) + karg.application_flags |= MPT3_APP_FLAGS_APP_OWNED; + + for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++) + karg.product_specific[i] = + ioc->product_specific[buffer_type][i]; + + karg.total_buffer_size = ioc->diag_buffer_sz[buffer_type]; + karg.driver_added_buffer_size = 0; + karg.unique_id = ioc->unique_id[buffer_type]; + karg.diagnostic_flags = ioc->diagnostic_flags[buffer_type]; + + if (copy_to_user(arg, &karg, sizeof(struct mpt3_diag_query))) { + ioc_err(ioc, "%s: unable to write mpt3_diag_query data @ %p\n", + __func__, arg); + return -EFAULT; + } + return 0; +} + +/** + * mpt3sas_send_diag_release - Diag Release Message + * @ioc: per adapter object + * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED + * @issue_reset: specifies whether host reset is required. + * + */ +int +mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type, + u8 *issue_reset) +{ + Mpi2DiagReleaseRequest_t *mpi_request; + Mpi2DiagReleaseReply_t *mpi_reply; + u16 smid; + u16 ioc_status; + u32 ioc_state; + int rc; + u8 reset_needed = 0; + + dctlprintk(ioc, ioc_info(ioc, "%s\n", + __func__)); + + rc = 0; + *issue_reset = 0; + + + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { + if (ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_REGISTERED) + ioc->diag_buffer_status[buffer_type] |= + MPT3_DIAG_BUFFER_IS_RELEASED; + dctlprintk(ioc, + ioc_info(ioc, "%s: skipping due to FAULT state\n", + __func__)); + rc = -EAGAIN; + goto out; + } + + if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { + ioc_err(ioc, "%s: ctl_cmd in use\n", __func__); + rc = -EAGAIN; + goto out; + } + + smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx); + if (!smid) { + ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + rc = -EAGAIN; + goto out; + } + + ioc->ctl_cmds.status = MPT3_CMD_PENDING; + memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + memset(mpi_request, 0, ioc->request_sz); + ioc->ctl_cmds.smid = smid; + + mpi_request->Function = MPI2_FUNCTION_DIAG_RELEASE; + mpi_request->BufferType = buffer_type; + mpi_request->VF_ID = 0; /* TODO */ + mpi_request->VP_ID = 0; + + init_completion(&ioc->ctl_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->ctl_cmds.done, + MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); + + if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { + mpt3sas_check_cmd_timeout(ioc, + ioc->ctl_cmds.status, mpi_request, + sizeof(Mpi2DiagReleaseRequest_t)/4, reset_needed); + *issue_reset = reset_needed; + rc = -EFAULT; + goto out; + } + + /* process the completed Reply Message Frame */ + if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { + ioc_err(ioc, "%s: no reply message\n", __func__); + rc = -EFAULT; + goto out; + } + + mpi_reply = ioc->ctl_cmds.reply; + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; + + if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { + ioc->diag_buffer_status[buffer_type] |= + MPT3_DIAG_BUFFER_IS_RELEASED; + dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__)); + } else { + ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n", + __func__, + ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); + rc = -EFAULT; + } + + out: + ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; + return rc; +} + +/** + * _ctl_diag_release - request to send Diag Release Message to firmware + * @ioc: ? + * @arg: user space buffer containing ioctl content + * + * This allows ownership of the specified buffer to returned to the driver, + * allowing an application to read the buffer without fear that firmware is + * overwriting information in the buffer. + */ +static long +_ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_diag_release karg; + void *request_data; + int rc; + u8 buffer_type; + u8 issue_reset = 0; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + dctlprintk(ioc, ioc_info(ioc, "%s\n", + __func__)); + + buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id); + if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) { + ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n", + __func__, karg.unique_id); + return -EINVAL; + } + + if (!_ctl_diag_capability(ioc, buffer_type)) { + ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n", + __func__, buffer_type); + return -EPERM; + } + + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n", + __func__, buffer_type); + return -EINVAL; + } + + if (karg.unique_id != ioc->unique_id[buffer_type]) { + ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n", + __func__, karg.unique_id); + return -EINVAL; + } + + if (ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_RELEASED) { + ioc_err(ioc, "%s: buffer_type(0x%02x) is already released\n", + __func__, buffer_type); + return -EINVAL; + } + + request_data = ioc->diag_buffer[buffer_type]; + + if (!request_data) { + ioc_err(ioc, "%s: doesn't have memory allocated for buffer_type(0x%02x)\n", + __func__, buffer_type); + return -ENOMEM; + } + + /* buffers were released by due to host reset */ + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_DIAG_RESET)) { + ioc->diag_buffer_status[buffer_type] |= + MPT3_DIAG_BUFFER_IS_RELEASED; + ioc->diag_buffer_status[buffer_type] &= + ~MPT3_DIAG_BUFFER_IS_DIAG_RESET; + ioc_err(ioc, "%s: buffer_type(0x%02x) was released due to host reset\n", + __func__, buffer_type); + return 0; + } + + rc = mpt3sas_send_diag_release(ioc, buffer_type, &issue_reset); + + if (issue_reset) + mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + + return rc; +} + +/** + * _ctl_diag_read_buffer - request for copy of the diag buffer + * @ioc: per adapter object + * @arg: user space buffer containing ioctl content + */ +static long +_ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_diag_read_buffer karg; + struct mpt3_diag_read_buffer __user *uarg = arg; + void *request_data, *diag_data; + Mpi2DiagBufferPostRequest_t *mpi_request; + Mpi2DiagBufferPostReply_t *mpi_reply; + int rc, i; + u8 buffer_type; + unsigned long request_size, copy_size; + u16 smid; + u16 ioc_status; + u8 issue_reset = 0; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + dctlprintk(ioc, ioc_info(ioc, "%s\n", + __func__)); + + buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id); + if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) { + ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n", + __func__, karg.unique_id); + return -EINVAL; + } + + if (!_ctl_diag_capability(ioc, buffer_type)) { + ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n", + __func__, buffer_type); + return -EPERM; + } + + if (karg.unique_id != ioc->unique_id[buffer_type]) { + ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n", + __func__, karg.unique_id); + return -EINVAL; + } + + request_data = ioc->diag_buffer[buffer_type]; + if (!request_data) { + ioc_err(ioc, "%s: doesn't have buffer for buffer_type(0x%02x)\n", + __func__, buffer_type); + return -ENOMEM; + } + + request_size = ioc->diag_buffer_sz[buffer_type]; + + if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) { + ioc_err(ioc, "%s: either the starting_offset or bytes_to_read are not 4 byte aligned\n", + __func__); + return -EINVAL; + } + + if (karg.starting_offset > request_size) + return -EINVAL; + + diag_data = (void *)(request_data + karg.starting_offset); + dctlprintk(ioc, + ioc_info(ioc, "%s: diag_buffer(%p), offset(%d), sz(%d)\n", + __func__, diag_data, karg.starting_offset, + karg.bytes_to_read)); + + /* Truncate data on requests that are too large */ + if ((diag_data + karg.bytes_to_read < diag_data) || + (diag_data + karg.bytes_to_read > request_data + request_size)) + copy_size = request_size - karg.starting_offset; + else + copy_size = karg.bytes_to_read; + + if (copy_to_user((void __user *)uarg->diagnostic_data, + diag_data, copy_size)) { + ioc_err(ioc, "%s: Unable to write mpt_diag_read_buffer_t data @ %p\n", + __func__, diag_data); + return -EFAULT; + } + + if ((karg.flags & MPT3_FLAGS_REREGISTER) == 0) + return 0; + + dctlprintk(ioc, + ioc_info(ioc, "%s: Reregister buffer_type(0x%02x)\n", + __func__, buffer_type)); + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_RELEASED) == 0) { + dctlprintk(ioc, + ioc_info(ioc, "%s: buffer_type(0x%02x) is still registered\n", + __func__, buffer_type)); + return 0; + } + /* Get a free request frame and save the message context. + */ + + if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { + ioc_err(ioc, "%s: ctl_cmd in use\n", __func__); + rc = -EAGAIN; + goto out; + } + + smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx); + if (!smid) { + ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + rc = -EAGAIN; + goto out; + } + + rc = 0; + ioc->ctl_cmds.status = MPT3_CMD_PENDING; + memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + memset(mpi_request, 0, ioc->request_sz); + ioc->ctl_cmds.smid = smid; + + mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST; + mpi_request->BufferType = buffer_type; + mpi_request->BufferLength = + cpu_to_le32(ioc->diag_buffer_sz[buffer_type]); + mpi_request->BufferAddress = + cpu_to_le64(ioc->diag_buffer_dma[buffer_type]); + for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++) + mpi_request->ProductSpecific[i] = + cpu_to_le32(ioc->product_specific[buffer_type][i]); + mpi_request->VF_ID = 0; /* TODO */ + mpi_request->VP_ID = 0; + + init_completion(&ioc->ctl_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->ctl_cmds.done, + MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); + + if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { + mpt3sas_check_cmd_timeout(ioc, + ioc->ctl_cmds.status, mpi_request, + sizeof(Mpi2DiagBufferPostRequest_t)/4, issue_reset); + goto issue_host_reset; + } + + /* process the completed Reply Message Frame */ + if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { + ioc_err(ioc, "%s: no reply message\n", __func__); + rc = -EFAULT; + goto out; + } + + mpi_reply = ioc->ctl_cmds.reply; + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; + + if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { + ioc->diag_buffer_status[buffer_type] |= + MPT3_DIAG_BUFFER_IS_REGISTERED; + ioc->diag_buffer_status[buffer_type] &= + ~MPT3_DIAG_BUFFER_IS_RELEASED; + dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__)); + } else { + ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n", + __func__, ioc_status, + le32_to_cpu(mpi_reply->IOCLogInfo)); + rc = -EFAULT; + } + + issue_host_reset: + if (issue_reset) + mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + + out: + + ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; + return rc; +} + +/** + * _ctl_addnl_diag_query - query relevant info associated with diag buffers + * @ioc: per adapter object + * @arg: user space buffer containing ioctl content + * + * The application will send only unique_id. Driver will + * inspect unique_id first, if valid, fill the details related to cause + * for diag buffer release. + */ +static long +_ctl_addnl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_addnl_diag_query karg; + u32 buffer_type = 0; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("%s: failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -EFAULT; + } + dctlprintk(ioc, ioc_info(ioc, "%s\n", __func__)); + if (karg.unique_id == 0) { + ioc_err(ioc, "%s: unique_id is(0x%08x)\n", + __func__, karg.unique_id); + return -EPERM; + } + buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id); + if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) { + ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n", + __func__, karg.unique_id); + return -EPERM; + } + memset(&karg.rel_query, 0, sizeof(karg.rel_query)); + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + ioc_info(ioc, "%s: buffer_type(0x%02x) is not registered\n", + __func__, buffer_type); + goto out; + } + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_RELEASED) == 0) { + ioc_err(ioc, "%s: buffer_type(0x%02x) is not released\n", + __func__, buffer_type); + return -EPERM; + } + memcpy(&karg.rel_query, &ioc->htb_rel, sizeof(karg.rel_query)); +out: + if (copy_to_user(arg, &karg, sizeof(struct mpt3_addnl_diag_query))) { + ioc_err(ioc, "%s: unable to write mpt3_addnl_diag_query data @ %p\n", + __func__, arg); + return -EFAULT; + } + return 0; +} + +#ifdef CONFIG_COMPAT +/** + * _ctl_compat_mpt_command - convert 32bit pointers to 64bit. + * @ioc: per adapter object + * @cmd: ioctl opcode + * @arg: (struct mpt3_ioctl_command32) + * + * MPT3COMMAND32 - Handle 32bit applications running on 64bit os. + */ +static long +_ctl_compat_mpt_command(struct MPT3SAS_ADAPTER *ioc, unsigned cmd, + void __user *arg) +{ + struct mpt3_ioctl_command32 karg32; + struct mpt3_ioctl_command32 __user *uarg; + struct mpt3_ioctl_command karg; + + if (_IOC_SIZE(cmd) != sizeof(struct mpt3_ioctl_command32)) + return -EINVAL; + + uarg = (struct mpt3_ioctl_command32 __user *) arg; + + if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + memset(&karg, 0, sizeof(struct mpt3_ioctl_command)); + karg.hdr.ioc_number = karg32.hdr.ioc_number; + karg.hdr.port_number = karg32.hdr.port_number; + karg.hdr.max_data_size = karg32.hdr.max_data_size; + karg.timeout = karg32.timeout; + karg.max_reply_bytes = karg32.max_reply_bytes; + karg.data_in_size = karg32.data_in_size; + karg.data_out_size = karg32.data_out_size; + karg.max_sense_bytes = karg32.max_sense_bytes; + karg.data_sge_offset = karg32.data_sge_offset; + karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr); + karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr); + karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr); + karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr); + return _ctl_do_mpt_command(ioc, karg, &uarg->mf); +} +#endif + +/** + * _ctl_ioctl_main - main ioctl entry point + * @file: (struct file) + * @cmd: ioctl opcode + * @arg: user space data buffer + * @compat: handles 32 bit applications in 64bit os + * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device & + * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device. + */ +static long +_ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg, + u8 compat, u16 mpi_version) +{ + struct MPT3SAS_ADAPTER *ioc; + struct mpt3_ioctl_header ioctl_header; + enum block_state state; + long ret = -ENOIOCTLCMD; + + /* get IOCTL header */ + if (copy_from_user(&ioctl_header, (char __user *)arg, + sizeof(struct mpt3_ioctl_header))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + if (_ctl_verify_adapter(ioctl_header.ioc_number, + &ioc, mpi_version) == -1 || !ioc) + return -ENODEV; + + /* pci_access_mutex lock acquired by ioctl path */ + mutex_lock(&ioc->pci_access_mutex); + + if (ioc->shost_recovery || ioc->pci_error_recovery || + ioc->is_driver_loading || ioc->remove_host) { + ret = -EAGAIN; + goto out_unlock_pciaccess; + } + + state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING; + if (state == NON_BLOCKING) { + if (!mutex_trylock(&ioc->ctl_cmds.mutex)) { + ret = -EAGAIN; + goto out_unlock_pciaccess; + } + } else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) { + ret = -ERESTARTSYS; + goto out_unlock_pciaccess; + } + + + switch (cmd) { + case MPT3IOCINFO: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_iocinfo)) + ret = _ctl_getiocinfo(ioc, arg); + break; +#ifdef CONFIG_COMPAT + case MPT3COMMAND32: +#endif + case MPT3COMMAND: + { + struct mpt3_ioctl_command __user *uarg; + struct mpt3_ioctl_command karg; + +#ifdef CONFIG_COMPAT + if (compat) { + ret = _ctl_compat_mpt_command(ioc, cmd, arg); + break; + } +#endif + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + ret = -EFAULT; + break; + } + + if (karg.hdr.ioc_number != ioctl_header.ioc_number) { + ret = -EINVAL; + break; + } + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_command)) { + uarg = arg; + ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf); + } + break; + } + case MPT3EVENTQUERY: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventquery)) + ret = _ctl_eventquery(ioc, arg); + break; + case MPT3EVENTENABLE: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventenable)) + ret = _ctl_eventenable(ioc, arg); + break; + case MPT3EVENTREPORT: + ret = _ctl_eventreport(ioc, arg); + break; + case MPT3HARDRESET: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_diag_reset)) + ret = _ctl_do_reset(ioc, arg); + break; + case MPT3BTDHMAPPING: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_btdh_mapping)) + ret = _ctl_btdh_mapping(ioc, arg); + break; + case MPT3DIAGREGISTER: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_register)) + ret = _ctl_diag_register(ioc, arg); + break; + case MPT3DIAGUNREGISTER: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_unregister)) + ret = _ctl_diag_unregister(ioc, arg); + break; + case MPT3DIAGQUERY: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_query)) + ret = _ctl_diag_query(ioc, arg); + break; + case MPT3DIAGRELEASE: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_release)) + ret = _ctl_diag_release(ioc, arg); + break; + case MPT3DIAGREADBUFFER: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_read_buffer)) + ret = _ctl_diag_read_buffer(ioc, arg); + break; + case MPT3ADDNLDIAGQUERY: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_addnl_diag_query)) + ret = _ctl_addnl_diag_query(ioc, arg); + break; + default: + dctlprintk(ioc, + ioc_info(ioc, "unsupported ioctl opcode(0x%08x)\n", + cmd)); + break; + } + + mutex_unlock(&ioc->ctl_cmds.mutex); +out_unlock_pciaccess: + mutex_unlock(&ioc->pci_access_mutex); + return ret; +} + +/** + * _ctl_ioctl - mpt3ctl main ioctl entry point (unlocked) + * @file: (struct file) + * @cmd: ioctl opcode + * @arg: ? + */ +static long +_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + long ret; + + /* pass MPI25_VERSION | MPI26_VERSION value, + * to indicate that this ioctl cmd + * came from mpt3ctl ioctl device. + */ + ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0, + MPI25_VERSION | MPI26_VERSION); + return ret; +} + +/** + * _ctl_mpt2_ioctl - mpt2ctl main ioctl entry point (unlocked) + * @file: (struct file) + * @cmd: ioctl opcode + * @arg: ? + */ +static long +_ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + long ret; + + /* pass MPI2_VERSION value, to indicate that this ioctl cmd + * came from mpt2ctl ioctl device. + */ + ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0, MPI2_VERSION); + return ret; +} +#ifdef CONFIG_COMPAT +/** + * _ctl_ioctl_compat - main ioctl entry point (compat) + * @file: ? + * @cmd: ? + * @arg: ? + * + * This routine handles 32 bit applications in 64bit os. + */ +static long +_ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg) +{ + long ret; + + ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1, + MPI25_VERSION | MPI26_VERSION); + return ret; +} + +/** + * _ctl_mpt2_ioctl_compat - main ioctl entry point (compat) + * @file: ? + * @cmd: ? + * @arg: ? + * + * This routine handles 32 bit applications in 64bit os. + */ +static long +_ctl_mpt2_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg) +{ + long ret; + + ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1, MPI2_VERSION); + return ret; +} +#endif + +/* scsi host attributes */ +/** + * version_fw_show - firmware version + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +version_fw_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n", + (ioc->facts.FWVersion.Word & 0xFF000000) >> 24, + (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16, + (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, + ioc->facts.FWVersion.Word & 0x000000FF); +} +static DEVICE_ATTR_RO(version_fw); + +/** + * version_bios_show - bios version + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +version_bios_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + u32 version = le32_to_cpu(ioc->bios_pg3.BiosVersion); + + return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n", + (version & 0xFF000000) >> 24, + (version & 0x00FF0000) >> 16, + (version & 0x0000FF00) >> 8, + version & 0x000000FF); +} +static DEVICE_ATTR_RO(version_bios); + +/** + * version_mpi_show - MPI (message passing interface) version + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +version_mpi_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%03x.%02x\n", + ioc->facts.MsgVersion, ioc->facts.HeaderVersion >> 8); +} +static DEVICE_ATTR_RO(version_mpi); + +/** + * version_product_show - product name + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +version_product_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, 16, "%s\n", ioc->manu_pg0.ChipName); +} +static DEVICE_ATTR_RO(version_product); + +/** + * version_nvdata_persistent_show - ndvata persistent version + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +version_nvdata_persistent_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%08xh\n", + le32_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word)); +} +static DEVICE_ATTR_RO(version_nvdata_persistent); + +/** + * version_nvdata_default_show - nvdata default version + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +version_nvdata_default_show(struct device *cdev, struct device_attribute + *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%08xh\n", + le32_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word)); +} +static DEVICE_ATTR_RO(version_nvdata_default); + +/** + * board_name_show - board name + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +board_name_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardName); +} +static DEVICE_ATTR_RO(board_name); + +/** + * board_assembly_show - board assembly name + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +board_assembly_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardAssembly); +} +static DEVICE_ATTR_RO(board_assembly); + +/** + * board_tracer_show - board tracer number + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +board_tracer_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardTracerNumber); +} +static DEVICE_ATTR_RO(board_tracer); + +/** + * io_delay_show - io missing delay + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * This is for firmware implemention for deboucing device + * removal events. + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +io_delay_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay); +} +static DEVICE_ATTR_RO(io_delay); + +/** + * device_delay_show - device missing delay + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * This is for firmware implemention for deboucing device + * removal events. + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +device_delay_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay); +} +static DEVICE_ATTR_RO(device_delay); + +/** + * fw_queue_depth_show - global credits + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * This is firmware queue depth limit + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +fw_queue_depth_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->facts.RequestCredit); +} +static DEVICE_ATTR_RO(fw_queue_depth); + +/** + * host_sas_address_show - sas address + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * This is the controller sas address + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +host_sas_address_show(struct device *cdev, struct device_attribute *attr, + char *buf) + +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "0x%016llx\n", + (unsigned long long)ioc->sas_hba.sas_address); +} +static DEVICE_ATTR_RO(host_sas_address); + +/** + * logging_level_show - logging level + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +logging_level_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->logging_level); +} +static ssize_t +logging_level_store(struct device *cdev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + int val = 0; + + if (sscanf(buf, "%x", &val) != 1) + return -EINVAL; + + ioc->logging_level = val; + ioc_info(ioc, "logging_level=%08xh\n", + ioc->logging_level); + return strlen(buf); +} +static DEVICE_ATTR_RW(logging_level); + +/** + * fwfault_debug_show - show/store fwfault_debug + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * mpt3sas_fwfault_debug is command line option + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +fwfault_debug_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%d\n", ioc->fwfault_debug); +} +static ssize_t +fwfault_debug_store(struct device *cdev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + int val = 0; + + if (sscanf(buf, "%d", &val) != 1) + return -EINVAL; + + ioc->fwfault_debug = val; + ioc_info(ioc, "fwfault_debug=%d\n", + ioc->fwfault_debug); + return strlen(buf); +} +static DEVICE_ATTR_RW(fwfault_debug); + +/** + * ioc_reset_count_show - ioc reset count + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * This is firmware queue depth limit + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +ioc_reset_count_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%d\n", ioc->ioc_reset_count); +} +static DEVICE_ATTR_RO(ioc_reset_count); + +/** + * reply_queue_count_show - number of reply queues + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * This is number of reply queues + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +reply_queue_count_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + u8 reply_queue_count; + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + if ((ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable) + reply_queue_count = ioc->reply_queue_count; + else + reply_queue_count = 1; + + return snprintf(buf, PAGE_SIZE, "%d\n", reply_queue_count); +} +static DEVICE_ATTR_RO(reply_queue_count); + +/** + * BRM_status_show - Backup Rail Monitor Status + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * This is number of reply queues + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +BRM_status_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + Mpi2IOUnitPage3_t io_unit_pg3; + Mpi2ConfigReply_t mpi_reply; + u16 backup_rail_monitor_status = 0; + u16 ioc_status; + int sz; + ssize_t rc = 0; + + if (!ioc->is_warpdrive) { + ioc_err(ioc, "%s: BRM attribute is only for warpdrive\n", + __func__); + return 0; + } + /* pci_access_mutex lock acquired by sysfs show path */ + mutex_lock(&ioc->pci_access_mutex); + if (ioc->pci_error_recovery || ioc->remove_host) + goto out; + + sz = sizeof(io_unit_pg3); + memset(&io_unit_pg3, 0, sz); + + if (mpt3sas_config_get_iounit_pg3(ioc, &mpi_reply, &io_unit_pg3, sz) != + 0) { + ioc_err(ioc, "%s: failed reading iounit_pg3\n", + __func__); + rc = -EINVAL; + goto out; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, "%s: iounit_pg3 failed with ioc_status(0x%04x)\n", + __func__, ioc_status); + rc = -EINVAL; + goto out; + } + + if (io_unit_pg3.GPIOCount < 25) { + ioc_err(ioc, "%s: iounit_pg3.GPIOCount less than 25 entries, detected (%d) entries\n", + __func__, io_unit_pg3.GPIOCount); + rc = -EINVAL; + goto out; + } + + /* BRM status is in bit zero of GPIOVal[24] */ + backup_rail_monitor_status = le16_to_cpu(io_unit_pg3.GPIOVal[24]); + rc = snprintf(buf, PAGE_SIZE, "%d\n", (backup_rail_monitor_status & 1)); + + out: + mutex_unlock(&ioc->pci_access_mutex); + return rc; +} +static DEVICE_ATTR_RO(BRM_status); + +struct DIAG_BUFFER_START { + __le32 Size; + __le32 DiagVersion; + u8 BufferType; + u8 Reserved[3]; + __le32 Reserved1; + __le32 Reserved2; + __le32 Reserved3; +}; + +/** + * host_trace_buffer_size_show - host buffer size (trace only) + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +host_trace_buffer_size_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + u32 size = 0; + struct DIAG_BUFFER_START *request_data; + + if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) { + ioc_err(ioc, "%s: host_trace_buffer is not registered\n", + __func__); + return 0; + } + + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + ioc_err(ioc, "%s: host_trace_buffer is not registered\n", + __func__); + return 0; + } + + request_data = (struct DIAG_BUFFER_START *) + ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]; + if ((le32_to_cpu(request_data->DiagVersion) == 0x00000000 || + le32_to_cpu(request_data->DiagVersion) == 0x01000000 || + le32_to_cpu(request_data->DiagVersion) == 0x01010000) && + le32_to_cpu(request_data->Reserved3) == 0x4742444c) + size = le32_to_cpu(request_data->Size); + + ioc->ring_buffer_sz = size; + return snprintf(buf, PAGE_SIZE, "%d\n", size); +} +static DEVICE_ATTR_RO(host_trace_buffer_size); + +/** + * host_trace_buffer_show - firmware ring buffer (trace only) + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * A sysfs 'read/write' shost attribute. + * + * You will only be able to read 4k bytes of ring buffer at a time. + * In order to read beyond 4k bytes, you will have to write out the + * offset to the same attribute, it will move the pointer. + */ +static ssize_t +host_trace_buffer_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + void *request_data; + u32 size; + + if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) { + ioc_err(ioc, "%s: host_trace_buffer is not registered\n", + __func__); + return 0; + } + + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + ioc_err(ioc, "%s: host_trace_buffer is not registered\n", + __func__); + return 0; + } + + if (ioc->ring_buffer_offset > ioc->ring_buffer_sz) + return 0; + + size = ioc->ring_buffer_sz - ioc->ring_buffer_offset; + size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size; + request_data = ioc->diag_buffer[0] + ioc->ring_buffer_offset; + memcpy(buf, request_data, size); + return size; +} + +static ssize_t +host_trace_buffer_store(struct device *cdev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + int val = 0; + + if (sscanf(buf, "%d", &val) != 1) + return -EINVAL; + + ioc->ring_buffer_offset = val; + return strlen(buf); +} +static DEVICE_ATTR_RW(host_trace_buffer); + + +/*****************************************/ + +/** + * host_trace_buffer_enable_show - firmware ring buffer (trace only) + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * A sysfs 'read/write' shost attribute. + * + * This is a mechnism to post/release host_trace_buffers + */ +static ssize_t +host_trace_buffer_enable_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + if ((!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) || + ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0)) + return snprintf(buf, PAGE_SIZE, "off\n"); + else if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED)) + return snprintf(buf, PAGE_SIZE, "release\n"); + else + return snprintf(buf, PAGE_SIZE, "post\n"); +} + +static ssize_t +host_trace_buffer_enable_store(struct device *cdev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + char str[10] = ""; + struct mpt3_diag_register diag_register; + u8 issue_reset = 0; + + /* don't allow post/release occurr while recovery is active */ + if (ioc->shost_recovery || ioc->remove_host || + ioc->pci_error_recovery || ioc->is_driver_loading) + return -EBUSY; + + if (sscanf(buf, "%9s", str) != 1) + return -EINVAL; + + if (!strcmp(str, "post")) { + /* exit out if host buffers are already posted */ + if ((ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) && + (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) && + ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED) == 0)) + goto out; + memset(&diag_register, 0, sizeof(struct mpt3_diag_register)); + ioc_info(ioc, "posting host trace buffers\n"); + diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE; + + if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0 && + ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE] != 0) { + /* post the same buffer allocated previously */ + diag_register.requested_buffer_size = + ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE]; + } else { + /* + * Free the diag buffer memory which was previously + * allocated by an application. + */ + if ((ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE] != 0) + && + (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_APP_OWNED)) { + dma_free_coherent(&ioc->pdev->dev, + ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE], + ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE], + ioc->diag_buffer_dma[MPI2_DIAG_BUF_TYPE_TRACE]); + ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE] = + NULL; + } + + diag_register.requested_buffer_size = (1024 * 1024); + } + + diag_register.unique_id = + (ioc->hba_mpi_version_belonged == MPI2_VERSION) ? + (MPT2DIAGBUFFUNIQUEID):(MPT3DIAGBUFFUNIQUEID); + ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] = 0; + _ctl_diag_register_2(ioc, &diag_register); + if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) { + ioc_info(ioc, + "Trace buffer %d KB allocated through sysfs\n", + diag_register.requested_buffer_size>>10); + if (ioc->hba_mpi_version_belonged != MPI2_VERSION) + ioc->diag_buffer_status[ + MPI2_DIAG_BUF_TYPE_TRACE] |= + MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED; + } + } else if (!strcmp(str, "release")) { + /* exit out if host buffers are already released */ + if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) + goto out; + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) + goto out; + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED)) + goto out; + ioc_info(ioc, "releasing host trace buffer\n"); + ioc->htb_rel.buffer_rel_condition = MPT3_DIAG_BUFFER_REL_SYSFS; + mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE, + &issue_reset); + } + + out: + return strlen(buf); +} +static DEVICE_ATTR_RW(host_trace_buffer_enable); + +/*********** diagnostic trigger suppport *********************************/ + +/** + * diag_trigger_master_show - show the diag_trigger_master attribute + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +diag_trigger_master_show(struct device *cdev, + struct device_attribute *attr, char *buf) + +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + unsigned long flags; + ssize_t rc; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + rc = sizeof(struct SL_WH_MASTER_TRIGGER_T); + memcpy(buf, &ioc->diag_trigger_master, rc); + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return rc; +} + +/** + * diag_trigger_master_store - store the diag_trigger_master attribute + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * @count: ? + * + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +diag_trigger_master_store(struct device *cdev, + struct device_attribute *attr, const char *buf, size_t count) + +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct SL_WH_MASTER_TRIGGER_T *master_tg; + unsigned long flags; + ssize_t rc; + bool set = 1; + + rc = min(sizeof(struct SL_WH_MASTER_TRIGGER_T), count); + + if (ioc->supports_trigger_pages) { + master_tg = kzalloc(sizeof(struct SL_WH_MASTER_TRIGGER_T), + GFP_KERNEL); + if (!master_tg) + return -ENOMEM; + + memcpy(master_tg, buf, rc); + if (!master_tg->MasterData) + set = 0; + if (mpt3sas_config_update_driver_trigger_pg1(ioc, master_tg, + set)) { + kfree(master_tg); + return -EFAULT; + } + kfree(master_tg); + } + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + memset(&ioc->diag_trigger_master, 0, + sizeof(struct SL_WH_MASTER_TRIGGER_T)); + memcpy(&ioc->diag_trigger_master, buf, rc); + ioc->diag_trigger_master.MasterData |= + (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET); + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return rc; +} +static DEVICE_ATTR_RW(diag_trigger_master); + + +/** + * diag_trigger_event_show - show the diag_trigger_event attribute + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +diag_trigger_event_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + unsigned long flags; + ssize_t rc; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + rc = sizeof(struct SL_WH_EVENT_TRIGGERS_T); + memcpy(buf, &ioc->diag_trigger_event, rc); + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return rc; +} + +/** + * diag_trigger_event_store - store the diag_trigger_event attribute + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * @count: ? + * + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +diag_trigger_event_store(struct device *cdev, + struct device_attribute *attr, const char *buf, size_t count) + +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct SL_WH_EVENT_TRIGGERS_T *event_tg; + unsigned long flags; + ssize_t sz; + bool set = 1; + + sz = min(sizeof(struct SL_WH_EVENT_TRIGGERS_T), count); + if (ioc->supports_trigger_pages) { + event_tg = kzalloc(sizeof(struct SL_WH_EVENT_TRIGGERS_T), + GFP_KERNEL); + if (!event_tg) + return -ENOMEM; + + memcpy(event_tg, buf, sz); + if (!event_tg->ValidEntries) + set = 0; + if (mpt3sas_config_update_driver_trigger_pg2(ioc, event_tg, + set)) { + kfree(event_tg); + return -EFAULT; + } + kfree(event_tg); + } + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + + memset(&ioc->diag_trigger_event, 0, + sizeof(struct SL_WH_EVENT_TRIGGERS_T)); + memcpy(&ioc->diag_trigger_event, buf, sz); + if (ioc->diag_trigger_event.ValidEntries > NUM_VALID_ENTRIES) + ioc->diag_trigger_event.ValidEntries = NUM_VALID_ENTRIES; + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return sz; +} +static DEVICE_ATTR_RW(diag_trigger_event); + + +/** + * diag_trigger_scsi_show - show the diag_trigger_scsi attribute + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +diag_trigger_scsi_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + unsigned long flags; + ssize_t rc; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + rc = sizeof(struct SL_WH_SCSI_TRIGGERS_T); + memcpy(buf, &ioc->diag_trigger_scsi, rc); + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return rc; +} + +/** + * diag_trigger_scsi_store - store the diag_trigger_scsi attribute + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * @count: ? + * + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +diag_trigger_scsi_store(struct device *cdev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct SL_WH_SCSI_TRIGGERS_T *scsi_tg; + unsigned long flags; + ssize_t sz; + bool set = 1; + + sz = min(sizeof(struct SL_WH_SCSI_TRIGGERS_T), count); + if (ioc->supports_trigger_pages) { + scsi_tg = kzalloc(sizeof(struct SL_WH_SCSI_TRIGGERS_T), + GFP_KERNEL); + if (!scsi_tg) + return -ENOMEM; + + memcpy(scsi_tg, buf, sz); + if (!scsi_tg->ValidEntries) + set = 0; + if (mpt3sas_config_update_driver_trigger_pg3(ioc, scsi_tg, + set)) { + kfree(scsi_tg); + return -EFAULT; + } + kfree(scsi_tg); + } + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + + memset(&ioc->diag_trigger_scsi, 0, sizeof(ioc->diag_trigger_scsi)); + memcpy(&ioc->diag_trigger_scsi, buf, sz); + if (ioc->diag_trigger_scsi.ValidEntries > NUM_VALID_ENTRIES) + ioc->diag_trigger_scsi.ValidEntries = NUM_VALID_ENTRIES; + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return sz; +} +static DEVICE_ATTR_RW(diag_trigger_scsi); + + +/** + * diag_trigger_mpi_show - show the diag_trigger_mpi attribute + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +diag_trigger_mpi_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + unsigned long flags; + ssize_t rc; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + rc = sizeof(struct SL_WH_MPI_TRIGGERS_T); + memcpy(buf, &ioc->diag_trigger_mpi, rc); + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return rc; +} + +/** + * diag_trigger_mpi_store - store the diag_trigger_mpi attribute + * @cdev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * @count: ? + * + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +diag_trigger_mpi_store(struct device *cdev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct SL_WH_MPI_TRIGGERS_T *mpi_tg; + unsigned long flags; + ssize_t sz; + bool set = 1; + + sz = min(sizeof(struct SL_WH_MPI_TRIGGERS_T), count); + if (ioc->supports_trigger_pages) { + mpi_tg = kzalloc(sizeof(struct SL_WH_MPI_TRIGGERS_T), + GFP_KERNEL); + if (!mpi_tg) + return -ENOMEM; + + memcpy(mpi_tg, buf, sz); + if (!mpi_tg->ValidEntries) + set = 0; + if (mpt3sas_config_update_driver_trigger_pg4(ioc, mpi_tg, + set)) { + kfree(mpi_tg); + return -EFAULT; + } + kfree(mpi_tg); + } + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + memset(&ioc->diag_trigger_mpi, 0, + sizeof(ioc->diag_trigger_mpi)); + memcpy(&ioc->diag_trigger_mpi, buf, sz); + if (ioc->diag_trigger_mpi.ValidEntries > NUM_VALID_ENTRIES) + ioc->diag_trigger_mpi.ValidEntries = NUM_VALID_ENTRIES; + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return sz; +} + +static DEVICE_ATTR_RW(diag_trigger_mpi); + +/*********** diagnostic trigger suppport *** END ****************************/ + +/*****************************************/ + +/** + * drv_support_bitmap_show - driver supported feature bitmap + * @cdev: pointer to embedded class device + * @attr: unused + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +drv_support_bitmap_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "0x%08x\n", ioc->drv_support_bitmap); +} +static DEVICE_ATTR_RO(drv_support_bitmap); + +/** + * enable_sdev_max_qd_show - display whether sdev max qd is enabled/disabled + * @cdev: pointer to embedded class device + * @attr: unused + * @buf: the buffer returned + * + * A sysfs read/write shost attribute. This attribute is used to set the + * targets queue depth to HBA IO queue depth if this attribute is enabled. + */ +static ssize_t +enable_sdev_max_qd_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%d\n", ioc->enable_sdev_max_qd); +} + +/** + * enable_sdev_max_qd_store - Enable/disable sdev max qd + * @cdev: pointer to embedded class device + * @attr: unused + * @buf: the buffer returned + * @count: unused + * + * A sysfs read/write shost attribute. This attribute is used to set the + * targets queue depth to HBA IO queue depth if this attribute is enabled. + * If this attribute is disabled then targets will have corresponding default + * queue depth. + */ +static ssize_t +enable_sdev_max_qd_store(struct device *cdev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct MPT3SAS_TARGET *sas_target_priv_data; + int val = 0; + struct scsi_device *sdev; + struct _raid_device *raid_device; + int qdepth; + + if (kstrtoint(buf, 0, &val) != 0) + return -EINVAL; + + switch (val) { + case 0: + ioc->enable_sdev_max_qd = 0; + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + sas_target_priv_data = sas_device_priv_data->sas_target; + if (!sas_target_priv_data) + continue; + + if (sas_target_priv_data->flags & + MPT_TARGET_FLAGS_VOLUME) { + raid_device = + mpt3sas_raid_device_find_by_handle(ioc, + sas_target_priv_data->handle); + + switch (raid_device->volume_type) { + case MPI2_RAID_VOL_TYPE_RAID0: + if (raid_device->device_info & + MPI2_SAS_DEVICE_INFO_SSP_TARGET) + qdepth = + MPT3SAS_SAS_QUEUE_DEPTH; + else + qdepth = + MPT3SAS_SATA_QUEUE_DEPTH; + break; + case MPI2_RAID_VOL_TYPE_RAID1E: + case MPI2_RAID_VOL_TYPE_RAID1: + case MPI2_RAID_VOL_TYPE_RAID10: + case MPI2_RAID_VOL_TYPE_UNKNOWN: + default: + qdepth = MPT3SAS_RAID_QUEUE_DEPTH; + } + } else if (sas_target_priv_data->flags & + MPT_TARGET_FLAGS_PCIE_DEVICE) + qdepth = ioc->max_nvme_qd; + else + qdepth = (sas_target_priv_data->sas_dev->port_type > 1) ? + ioc->max_wideport_qd : ioc->max_narrowport_qd; + + mpt3sas_scsih_change_queue_depth(sdev, qdepth); + } + break; + case 1: + ioc->enable_sdev_max_qd = 1; + shost_for_each_device(sdev, ioc->shost) + mpt3sas_scsih_change_queue_depth(sdev, + shost->can_queue); + break; + default: + return -EINVAL; + } + + return strlen(buf); +} +static DEVICE_ATTR_RW(enable_sdev_max_qd); + +static struct attribute *mpt3sas_host_attrs[] = { + &dev_attr_version_fw.attr, + &dev_attr_version_bios.attr, + &dev_attr_version_mpi.attr, + &dev_attr_version_product.attr, + &dev_attr_version_nvdata_persistent.attr, + &dev_attr_version_nvdata_default.attr, + &dev_attr_board_name.attr, + &dev_attr_board_assembly.attr, + &dev_attr_board_tracer.attr, + &dev_attr_io_delay.attr, + &dev_attr_device_delay.attr, + &dev_attr_logging_level.attr, + &dev_attr_fwfault_debug.attr, + &dev_attr_fw_queue_depth.attr, + &dev_attr_host_sas_address.attr, + &dev_attr_ioc_reset_count.attr, + &dev_attr_host_trace_buffer_size.attr, + &dev_attr_host_trace_buffer.attr, + &dev_attr_host_trace_buffer_enable.attr, + &dev_attr_reply_queue_count.attr, + &dev_attr_diag_trigger_master.attr, + &dev_attr_diag_trigger_event.attr, + &dev_attr_diag_trigger_scsi.attr, + &dev_attr_diag_trigger_mpi.attr, + &dev_attr_drv_support_bitmap.attr, + &dev_attr_BRM_status.attr, + &dev_attr_enable_sdev_max_qd.attr, + NULL, +}; + +static const struct attribute_group mpt3sas_host_attr_group = { + .attrs = mpt3sas_host_attrs +}; + +const struct attribute_group *mpt3sas_host_groups[] = { + &mpt3sas_host_attr_group, + NULL +}; + +/* device attributes */ + +/** + * sas_address_show - sas address + * @dev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * This is the sas address for the target + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +sas_address_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata; + + return snprintf(buf, PAGE_SIZE, "0x%016llx\n", + (unsigned long long)sas_device_priv_data->sas_target->sas_address); +} +static DEVICE_ATTR_RO(sas_address); + +/** + * sas_device_handle_show - device handle + * @dev: pointer to embedded class device + * @attr: ? + * @buf: the buffer returned + * + * This is the firmware assigned device handle + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +sas_device_handle_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata; + + return snprintf(buf, PAGE_SIZE, "0x%04x\n", + sas_device_priv_data->sas_target->handle); +} +static DEVICE_ATTR_RO(sas_device_handle); + +/** + * sas_ncq_prio_supported_show - Indicate if device supports NCQ priority + * @dev: pointer to embedded device + * @attr: sas_ncq_prio_supported attribute descriptor + * @buf: the buffer returned + * + * A sysfs 'read-only' sdev attribute, only works with SATA + */ +static ssize_t +sas_ncq_prio_supported_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + + return sysfs_emit(buf, "%d\n", scsih_ncq_prio_supp(sdev)); +} +static DEVICE_ATTR_RO(sas_ncq_prio_supported); + +/** + * sas_ncq_prio_enable_show - send prioritized io commands to device + * @dev: pointer to embedded device + * @attr: ? + * @buf: the buffer returned + * + * A sysfs 'read/write' sdev attribute, only works with SATA + */ +static ssize_t +sas_ncq_prio_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata; + + return snprintf(buf, PAGE_SIZE, "%d\n", + sas_device_priv_data->ncq_prio_enable); +} + +static ssize_t +sas_ncq_prio_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata; + bool ncq_prio_enable = 0; + + if (kstrtobool(buf, &ncq_prio_enable)) + return -EINVAL; + + if (!scsih_ncq_prio_supp(sdev)) + return -EINVAL; + + sas_device_priv_data->ncq_prio_enable = ncq_prio_enable; + return strlen(buf); +} +static DEVICE_ATTR_RW(sas_ncq_prio_enable); + +static struct attribute *mpt3sas_dev_attrs[] = { + &dev_attr_sas_address.attr, + &dev_attr_sas_device_handle.attr, + &dev_attr_sas_ncq_prio_supported.attr, + &dev_attr_sas_ncq_prio_enable.attr, + NULL, +}; + +static const struct attribute_group mpt3sas_dev_attr_group = { + .attrs = mpt3sas_dev_attrs +}; + +const struct attribute_group *mpt3sas_dev_groups[] = { + &mpt3sas_dev_attr_group, + NULL +}; + +/* file operations table for mpt3ctl device */ +static const struct file_operations ctl_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = _ctl_ioctl, + .poll = _ctl_poll, + .fasync = _ctl_fasync, +#ifdef CONFIG_COMPAT + .compat_ioctl = _ctl_ioctl_compat, +#endif +}; + +/* file operations table for mpt2ctl device */ +static const struct file_operations ctl_gen2_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = _ctl_mpt2_ioctl, + .poll = _ctl_poll, + .fasync = _ctl_fasync, +#ifdef CONFIG_COMPAT + .compat_ioctl = _ctl_mpt2_ioctl_compat, +#endif +}; + +static struct miscdevice ctl_dev = { + .minor = MPT3SAS_MINOR, + .name = MPT3SAS_DEV_NAME, + .fops = &ctl_fops, +}; + +static struct miscdevice gen2_ctl_dev = { + .minor = MPT2SAS_MINOR, + .name = MPT2SAS_DEV_NAME, + .fops = &ctl_gen2_fops, +}; + +/** + * mpt3sas_ctl_init - main entry point for ctl. + * @hbas_to_enumerate: ? + */ +void +mpt3sas_ctl_init(ushort hbas_to_enumerate) +{ + async_queue = NULL; + + /* Don't register mpt3ctl ioctl device if + * hbas_to_enumarate is one. + */ + if (hbas_to_enumerate != 1) + if (misc_register(&ctl_dev) < 0) + pr_err("%s can't register misc device [minor=%d]\n", + MPT3SAS_DRIVER_NAME, MPT3SAS_MINOR); + + /* Don't register mpt3ctl ioctl device if + * hbas_to_enumarate is two. + */ + if (hbas_to_enumerate != 2) + if (misc_register(&gen2_ctl_dev) < 0) + pr_err("%s can't register misc device [minor=%d]\n", + MPT2SAS_DRIVER_NAME, MPT2SAS_MINOR); + + init_waitqueue_head(&ctl_poll_wait); +} + +/** + * mpt3sas_ctl_exit - exit point for ctl + * @hbas_to_enumerate: ? + */ +void +mpt3sas_ctl_exit(ushort hbas_to_enumerate) +{ + struct MPT3SAS_ADAPTER *ioc; + int i; + + list_for_each_entry(ioc, &mpt3sas_ioc_list, list) { + + /* free memory associated to diag buffers */ + for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { + if (!ioc->diag_buffer[i]) + continue; + dma_free_coherent(&ioc->pdev->dev, + ioc->diag_buffer_sz[i], + ioc->diag_buffer[i], + ioc->diag_buffer_dma[i]); + ioc->diag_buffer[i] = NULL; + ioc->diag_buffer_status[i] = 0; + } + + kfree(ioc->event_log); + } + if (hbas_to_enumerate != 1) + misc_deregister(&ctl_dev); + if (hbas_to_enumerate != 2) + misc_deregister(&gen2_ctl_dev); +} diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h new file mode 100644 index 000000000..8f6ffb402 --- /dev/null +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h @@ -0,0 +1,451 @@ +/* + * Management Module Support for MPT (Message Passing Technology) based + * controllers + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.h + * Copyright (C) 2012-2014 LSI Corporation + * Copyright (C) 2013-2014 Avago Technologies + * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#ifndef MPT3SAS_CTL_H_INCLUDED +#define MPT3SAS_CTL_H_INCLUDED + +#ifdef __KERNEL__ +#include <linux/miscdevice.h> +#endif + +#include "mpt3sas_base.h" + +#ifndef MPT2SAS_MINOR +#define MPT2SAS_MINOR (MPT_MINOR + 1) +#endif +#ifndef MPT3SAS_MINOR +#define MPT3SAS_MINOR (MPT_MINOR + 2) +#endif +#define MPT2SAS_DEV_NAME "mpt2ctl" +#define MPT3SAS_DEV_NAME "mpt3ctl" +#define MPT3_MAGIC_NUMBER 'L' +#define MPT3_IOCTL_DEFAULT_TIMEOUT (10) /* in seconds */ + +/** + * IOCTL opcodes + */ +#define MPT3IOCINFO _IOWR(MPT3_MAGIC_NUMBER, 17, \ + struct mpt3_ioctl_iocinfo) +#define MPT3COMMAND _IOWR(MPT3_MAGIC_NUMBER, 20, \ + struct mpt3_ioctl_command) +#ifdef CONFIG_COMPAT +#define MPT3COMMAND32 _IOWR(MPT3_MAGIC_NUMBER, 20, \ + struct mpt3_ioctl_command32) +#endif +#define MPT3EVENTQUERY _IOWR(MPT3_MAGIC_NUMBER, 21, \ + struct mpt3_ioctl_eventquery) +#define MPT3EVENTENABLE _IOWR(MPT3_MAGIC_NUMBER, 22, \ + struct mpt3_ioctl_eventenable) +#define MPT3EVENTREPORT _IOWR(MPT3_MAGIC_NUMBER, 23, \ + struct mpt3_ioctl_eventreport) +#define MPT3HARDRESET _IOWR(MPT3_MAGIC_NUMBER, 24, \ + struct mpt3_ioctl_diag_reset) +#define MPT3BTDHMAPPING _IOWR(MPT3_MAGIC_NUMBER, 31, \ + struct mpt3_ioctl_btdh_mapping) + +/* diag buffer support */ +#define MPT3DIAGREGISTER _IOWR(MPT3_MAGIC_NUMBER, 26, \ + struct mpt3_diag_register) +#define MPT3DIAGRELEASE _IOWR(MPT3_MAGIC_NUMBER, 27, \ + struct mpt3_diag_release) +#define MPT3DIAGUNREGISTER _IOWR(MPT3_MAGIC_NUMBER, 28, \ + struct mpt3_diag_unregister) +#define MPT3DIAGQUERY _IOWR(MPT3_MAGIC_NUMBER, 29, \ + struct mpt3_diag_query) +#define MPT3DIAGREADBUFFER _IOWR(MPT3_MAGIC_NUMBER, 30, \ + struct mpt3_diag_read_buffer) +#define MPT3ADDNLDIAGQUERY _IOWR(MPT3_MAGIC_NUMBER, 32, \ + struct mpt3_addnl_diag_query) + +/* Trace Buffer default UniqueId */ +#define MPT2DIAGBUFFUNIQUEID (0x07075900) +#define MPT3DIAGBUFFUNIQUEID (0x4252434D) + +/* UID not found */ +#define MPT3_DIAG_UID_NOT_FOUND (0xFF) + + +/** + * struct mpt3_ioctl_header - main header structure + * @ioc_number - IOC unit number + * @port_number - IOC port number + * @max_data_size - maximum number bytes to transfer on read + */ +struct mpt3_ioctl_header { + uint32_t ioc_number; + uint32_t port_number; + uint32_t max_data_size; +}; + +/** + * struct mpt3_ioctl_diag_reset - diagnostic reset + * @hdr - generic header + */ +struct mpt3_ioctl_diag_reset { + struct mpt3_ioctl_header hdr; +}; + + +/** + * struct mpt3_ioctl_pci_info - pci device info + * @device - pci device id + * @function - pci function id + * @bus - pci bus id + * @segment_id - pci segment id + */ +struct mpt3_ioctl_pci_info { + union { + struct { + uint32_t device:5; + uint32_t function:3; + uint32_t bus:24; + } bits; + uint32_t word; + } u; + uint32_t segment_id; +}; + + +#define MPT2_IOCTL_INTERFACE_SCSI (0x00) +#define MPT2_IOCTL_INTERFACE_FC (0x01) +#define MPT2_IOCTL_INTERFACE_FC_IP (0x02) +#define MPT2_IOCTL_INTERFACE_SAS (0x03) +#define MPT2_IOCTL_INTERFACE_SAS2 (0x04) +#define MPT2_IOCTL_INTERFACE_SAS2_SSS6200 (0x05) +#define MPT3_IOCTL_INTERFACE_SAS3 (0x06) +#define MPT3_IOCTL_INTERFACE_SAS35 (0x07) +#define MPT2_IOCTL_VERSION_LENGTH (32) + +/** + * struct mpt3_ioctl_iocinfo - generic controller info + * @hdr - generic header + * @adapter_type - type of adapter (spi, fc, sas) + * @port_number - port number + * @pci_id - PCI Id + * @hw_rev - hardware revision + * @sub_system_device - PCI subsystem Device ID + * @sub_system_vendor - PCI subsystem Vendor ID + * @rsvd0 - reserved + * @firmware_version - firmware version + * @bios_version - BIOS version + * @driver_version - driver version - 32 ASCII characters + * @rsvd1 - reserved + * @scsi_id - scsi id of adapter 0 + * @rsvd2 - reserved + * @pci_information - pci info (2nd revision) + */ +struct mpt3_ioctl_iocinfo { + struct mpt3_ioctl_header hdr; + uint32_t adapter_type; + uint32_t port_number; + uint32_t pci_id; + uint32_t hw_rev; + uint32_t subsystem_device; + uint32_t subsystem_vendor; + uint32_t rsvd0; + uint32_t firmware_version; + uint32_t bios_version; + uint8_t driver_version[MPT2_IOCTL_VERSION_LENGTH]; + uint8_t rsvd1; + uint8_t scsi_id; + uint16_t rsvd2; + struct mpt3_ioctl_pci_info pci_information; +}; + + +/* number of event log entries */ +#define MPT3SAS_CTL_EVENT_LOG_SIZE (200) + +/** + * struct mpt3_ioctl_eventquery - query event count and type + * @hdr - generic header + * @event_entries - number of events returned by get_event_report + * @rsvd - reserved + * @event_types - type of events currently being captured + */ +struct mpt3_ioctl_eventquery { + struct mpt3_ioctl_header hdr; + uint16_t event_entries; + uint16_t rsvd; + uint32_t event_types[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS]; +}; + +/** + * struct mpt3_ioctl_eventenable - enable/disable event capturing + * @hdr - generic header + * @event_types - toggle off/on type of events to be captured + */ +struct mpt3_ioctl_eventenable { + struct mpt3_ioctl_header hdr; + uint32_t event_types[4]; +}; + +#define MPT3_EVENT_DATA_SIZE (192) +/** + * struct MPT3_IOCTL_EVENTS - + * @event - the event that was reported + * @context - unique value for each event assigned by driver + * @data - event data returned in fw reply message + */ +struct MPT3_IOCTL_EVENTS { + uint32_t event; + uint32_t context; + uint8_t data[MPT3_EVENT_DATA_SIZE]; +}; + +/** + * struct mpt3_ioctl_eventreport - returing event log + * @hdr - generic header + * @event_data - (see struct MPT3_IOCTL_EVENTS) + */ +struct mpt3_ioctl_eventreport { + struct mpt3_ioctl_header hdr; + struct MPT3_IOCTL_EVENTS event_data[1]; +}; + +/** + * struct mpt3_ioctl_command - generic mpt firmware passthru ioctl + * @hdr - generic header + * @timeout - command timeout in seconds. (if zero then use driver default + * value). + * @reply_frame_buf_ptr - reply location + * @data_in_buf_ptr - destination for read + * @data_out_buf_ptr - data source for write + * @sense_data_ptr - sense data location + * @max_reply_bytes - maximum number of reply bytes to be sent to app. + * @data_in_size - number bytes for data transfer in (read) + * @data_out_size - number bytes for data transfer out (write) + * @max_sense_bytes - maximum number of bytes for auto sense buffers + * @data_sge_offset - offset in words from the start of the request message to + * the first SGL + * @mf[1]; + */ +struct mpt3_ioctl_command { + struct mpt3_ioctl_header hdr; + uint32_t timeout; + void __user *reply_frame_buf_ptr; + void __user *data_in_buf_ptr; + void __user *data_out_buf_ptr; + void __user *sense_data_ptr; + uint32_t max_reply_bytes; + uint32_t data_in_size; + uint32_t data_out_size; + uint32_t max_sense_bytes; + uint32_t data_sge_offset; + uint8_t mf[1]; +}; + +#ifdef CONFIG_COMPAT +struct mpt3_ioctl_command32 { + struct mpt3_ioctl_header hdr; + uint32_t timeout; + uint32_t reply_frame_buf_ptr; + uint32_t data_in_buf_ptr; + uint32_t data_out_buf_ptr; + uint32_t sense_data_ptr; + uint32_t max_reply_bytes; + uint32_t data_in_size; + uint32_t data_out_size; + uint32_t max_sense_bytes; + uint32_t data_sge_offset; + uint8_t mf[1]; +}; +#endif + +/** + * struct mpt3_ioctl_btdh_mapping - mapping info + * @hdr - generic header + * @id - target device identification number + * @bus - SCSI bus number that the target device exists on + * @handle - device handle for the target device + * @rsvd - reserved + * + * To obtain a bus/id the application sets + * handle to valid handle, and bus/id to 0xFFFF. + * + * To obtain the device handle the application sets + * bus/id valid value, and the handle to 0xFFFF. + */ +struct mpt3_ioctl_btdh_mapping { + struct mpt3_ioctl_header hdr; + uint32_t id; + uint32_t bus; + uint16_t handle; + uint16_t rsvd; +}; + + + +/* application flags for mpt3_diag_register, mpt3_diag_query */ +#define MPT3_APP_FLAGS_APP_OWNED (0x0001) +#define MPT3_APP_FLAGS_BUFFER_VALID (0x0002) +#define MPT3_APP_FLAGS_FW_BUFFER_ACCESS (0x0004) +#define MPT3_APP_FLAGS_DYNAMIC_BUFFER_ALLOC (0x0008) + +/* flags for mpt3_diag_read_buffer */ +#define MPT3_FLAGS_REREGISTER (0x0001) + +#define MPT3_PRODUCT_SPECIFIC_DWORDS 23 + +/** + * struct mpt3_diag_register - application register with driver + * @hdr - generic header + * @reserved - + * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED + * @application_flags - misc flags + * @diagnostic_flags - specifies flags affecting command processing + * @product_specific - product specific information + * @requested_buffer_size - buffers size in bytes + * @unique_id - tag specified by application that is used to signal ownership + * of the buffer. + * + * This will allow the driver to setup any required buffers that will be + * needed by firmware to communicate with the driver. + */ +struct mpt3_diag_register { + struct mpt3_ioctl_header hdr; + uint8_t reserved; + uint8_t buffer_type; + uint16_t application_flags; + uint32_t diagnostic_flags; + uint32_t product_specific[MPT3_PRODUCT_SPECIFIC_DWORDS]; + uint32_t requested_buffer_size; + uint32_t unique_id; +}; + +/** + * struct mpt3_diag_unregister - application unregister with driver + * @hdr - generic header + * @unique_id - tag uniquely identifies the buffer to be unregistered + * + * This will allow the driver to cleanup any memory allocated for diag + * messages and to free up any resources. + */ +struct mpt3_diag_unregister { + struct mpt3_ioctl_header hdr; + uint32_t unique_id; +}; + +/** + * struct mpt3_diag_query - query relevant info associated with diag buffers + * @hdr - generic header + * @reserved - + * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED + * @application_flags - misc flags + * @diagnostic_flags - specifies flags affecting command processing + * @product_specific - product specific information + * @total_buffer_size - diag buffer size in bytes + * @driver_added_buffer_size - size of extra space appended to end of buffer + * @unique_id - unique id associated with this buffer. + * + * The application will send only buffer_type and unique_id. Driver will + * inspect unique_id first, if valid, fill in all the info. If unique_id is + * 0x00, the driver will return info specified by Buffer Type. + */ +struct mpt3_diag_query { + struct mpt3_ioctl_header hdr; + uint8_t reserved; + uint8_t buffer_type; + uint16_t application_flags; + uint32_t diagnostic_flags; + uint32_t product_specific[MPT3_PRODUCT_SPECIFIC_DWORDS]; + uint32_t total_buffer_size; + uint32_t driver_added_buffer_size; + uint32_t unique_id; +}; + +/** + * struct mpt3_diag_release - request to send Diag Release Message to firmware + * @hdr - generic header + * @unique_id - tag uniquely identifies the buffer to be released + * + * This allows ownership of the specified buffer to returned to the driver, + * allowing an application to read the buffer without fear that firmware is + * overwriting information in the buffer. + */ +struct mpt3_diag_release { + struct mpt3_ioctl_header hdr; + uint32_t unique_id; +}; + +/** + * struct mpt3_diag_read_buffer - request for copy of the diag buffer + * @hdr - generic header + * @status - + * @reserved - + * @flags - misc flags + * @starting_offset - starting offset within drivers buffer where to start + * reading data at into the specified application buffer + * @bytes_to_read - number of bytes to copy from the drivers buffer into the + * application buffer starting at starting_offset. + * @unique_id - unique id associated with this buffer. + * @diagnostic_data - data payload + */ +struct mpt3_diag_read_buffer { + struct mpt3_ioctl_header hdr; + uint8_t status; + uint8_t reserved; + uint16_t flags; + uint32_t starting_offset; + uint32_t bytes_to_read; + uint32_t unique_id; + uint32_t diagnostic_data[1]; +}; + +/** + * struct mpt3_addnl_diag_query - diagnostic buffer release reason + * @hdr - generic header + * @unique_id - unique id associated with this buffer. + * @rel_query - release query. + * @reserved2 + */ +struct mpt3_addnl_diag_query { + struct mpt3_ioctl_header hdr; + uint32_t unique_id; + struct htb_rel_query rel_query; + uint32_t reserved2[2]; +}; + +#endif /* MPT3SAS_CTL_H_INCLUDED */ diff --git a/drivers/scsi/mpt3sas/mpt3sas_debug.h b/drivers/scsi/mpt3sas/mpt3sas_debug.h new file mode 100644 index 000000000..cceeb2c16 --- /dev/null +++ b/drivers/scsi/mpt3sas/mpt3sas_debug.h @@ -0,0 +1,206 @@ +/* + * Logging Support for MPT (Message Passing Technology) based controllers + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_debug.c + * Copyright (C) 2012-2014 LSI Corporation + * Copyright (C) 2013-2014 Avago Technologies + * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#ifndef MPT3SAS_DEBUG_H_INCLUDED +#define MPT3SAS_DEBUG_H_INCLUDED + +#define MPT_DEBUG 0x00000001 +#define MPT_DEBUG_MSG_FRAME 0x00000002 +#define MPT_DEBUG_SG 0x00000004 +#define MPT_DEBUG_EVENTS 0x00000008 +#define MPT_DEBUG_EVENT_WORK_TASK 0x00000010 +#define MPT_DEBUG_INIT 0x00000020 +#define MPT_DEBUG_EXIT 0x00000040 +#define MPT_DEBUG_FAIL 0x00000080 +#define MPT_DEBUG_TM 0x00000100 +#define MPT_DEBUG_REPLY 0x00000200 +#define MPT_DEBUG_HANDSHAKE 0x00000400 +#define MPT_DEBUG_CONFIG 0x00000800 +#define MPT_DEBUG_DL 0x00001000 +#define MPT_DEBUG_RESET 0x00002000 +#define MPT_DEBUG_SCSI 0x00004000 +#define MPT_DEBUG_IOCTL 0x00008000 +#define MPT_DEBUG_SAS 0x00020000 +#define MPT_DEBUG_TRANSPORT 0x00040000 +#define MPT_DEBUG_TASK_SET_FULL 0x00080000 + +#define MPT_DEBUG_TRIGGER_DIAG 0x00200000 + + +#define MPT_CHECK_LOGGING(IOC, CMD, BITS) \ +{ \ + if (IOC->logging_level & BITS) \ + CMD; \ +} + +/* + * debug macros + */ + +#define dprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG) + +#define dsgprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SG) + +#define devtprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_EVENTS) + +#define dewtprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_EVENT_WORK_TASK) + +#define dinitprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_INIT) + +#define dexitprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_EXIT) + +#define dfailprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_FAIL) + +#define dtmprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TM) + +#define dreplyprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_REPLY) + +#define dhsprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_HANDSHAKE) + +#define dcprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_CONFIG) + +#define ddlprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_DL) + +#define drsprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_RESET) + +#define dsprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SCSI) + +#define dctlprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_IOCTL) + +#define dsasprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS) + +#define dsastransport(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS_WIDE) + +#define dmfprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_MSG_FRAME) + +#define dtsfprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TASK_SET_FULL) + +#define dtransportprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TRANSPORT) + +#define dTriggerDiagPrintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TRIGGER_DIAG) + + + +/* inline functions for dumping debug data*/ + +/** + * _debug_dump_mf - print message frame contents + * @mpi_request: pointer to message frame + * @sz: number of dwords + */ +static inline void +_debug_dump_mf(void *mpi_request, int sz) +{ + int i; + __le32 *mfp = (__le32 *)mpi_request; + + pr_info("mf:\n\t"); + for (i = 0; i < sz; i++) { + if (i && ((i % 8) == 0)) + pr_info("\n\t"); + pr_info("%08x ", le32_to_cpu(mfp[i])); + } + pr_info("\n"); +} +/** + * _debug_dump_reply - print message frame contents + * @mpi_request: pointer to message frame + * @sz: number of dwords + */ +static inline void +_debug_dump_reply(void *mpi_request, int sz) +{ + int i; + __le32 *mfp = (__le32 *)mpi_request; + + pr_info("reply:\n\t"); + for (i = 0; i < sz; i++) { + if (i && ((i % 8) == 0)) + pr_info("\n\t"); + pr_info("%08x ", le32_to_cpu(mfp[i])); + } + pr_info("\n"); +} +/** + * _debug_dump_config - print config page contents + * @mpi_request: pointer to message frame + * @sz: number of dwords + */ +static inline void +_debug_dump_config(void *mpi_request, int sz) +{ + int i; + __le32 *mfp = (__le32 *)mpi_request; + + pr_info("config:\n\t"); + for (i = 0; i < sz; i++) { + if (i && ((i % 8) == 0)) + pr_info("\n\t"); + pr_info("%08x ", le32_to_cpu(mfp[i])); + } + pr_info("\n"); +} + +#endif /* MPT3SAS_DEBUG_H_INCLUDED */ diff --git a/drivers/scsi/mpt3sas/mpt3sas_debugfs.c b/drivers/scsi/mpt3sas/mpt3sas_debugfs.c new file mode 100644 index 000000000..a6ab1db81 --- /dev/null +++ b/drivers/scsi/mpt3sas/mpt3sas_debugfs.c @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Debugfs interface Support for MPT (Message Passing Technology) based + * controllers. + * + * Copyright (C) 2020 Broadcom Inc. + * + * Authors: Broadcom Inc. + * Sreekanth Reddy <sreekanth.reddy@broadcom.com> + * Suganath Prabu <suganath-prabu.subramani@broadcom.com> + * + * Send feedback to : MPT-FusionLinux.pdl@broadcom.com) + * + **/ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/compat.h> +#include <linux/uio.h> + +#include <scsi/scsi.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_host.h> +#include "mpt3sas_base.h" +#include <linux/debugfs.h> + +static struct dentry *mpt3sas_debugfs_root; + +/* + * _debugfs_iocdump_read - copy ioc dump from debugfs buffer + * @filep: File Pointer + * @ubuf: Buffer to fill data + * @cnt: Length of the buffer + * @ppos: Offset in the file + */ + +static ssize_t +_debugfs_iocdump_read(struct file *filp, char __user *ubuf, size_t cnt, + loff_t *ppos) + +{ + struct mpt3sas_debugfs_buffer *debug = filp->private_data; + + if (!debug || !debug->buf) + return 0; + + return simple_read_from_buffer(ubuf, cnt, ppos, debug->buf, debug->len); +} + +/* + * _debugfs_iocdump_open : open the ioc_dump debugfs attribute file + */ +static int +_debugfs_iocdump_open(struct inode *inode, struct file *file) +{ + struct MPT3SAS_ADAPTER *ioc = inode->i_private; + struct mpt3sas_debugfs_buffer *debug; + + debug = kzalloc(sizeof(struct mpt3sas_debugfs_buffer), GFP_KERNEL); + if (!debug) + return -ENOMEM; + + debug->buf = (void *)ioc; + debug->len = sizeof(struct MPT3SAS_ADAPTER); + file->private_data = debug; + return 0; +} + +/* + * _debugfs_iocdump_release : release the ioc_dump debugfs attribute + * @inode: inode structure to the corresponds device + * @file: File pointer + */ +static int +_debugfs_iocdump_release(struct inode *inode, struct file *file) +{ + struct mpt3sas_debugfs_buffer *debug = file->private_data; + + if (!debug) + return 0; + + file->private_data = NULL; + kfree(debug); + return 0; +} + +static const struct file_operations mpt3sas_debugfs_iocdump_fops = { + .owner = THIS_MODULE, + .open = _debugfs_iocdump_open, + .read = _debugfs_iocdump_read, + .release = _debugfs_iocdump_release, +}; + +/* + * mpt3sas_init_debugfs : Create debugfs root for mpt3sas driver + */ +void mpt3sas_init_debugfs(void) +{ + mpt3sas_debugfs_root = debugfs_create_dir("mpt3sas", NULL); + if (!mpt3sas_debugfs_root) + pr_info("mpt3sas: Cannot create debugfs root\n"); +} + +/* + * mpt3sas_exit_debugfs : Remove debugfs root for mpt3sas driver + */ +void mpt3sas_exit_debugfs(void) +{ + debugfs_remove_recursive(mpt3sas_debugfs_root); +} + +/* + * mpt3sas_setup_debugfs : Setup debugfs per HBA adapter + * ioc: MPT3SAS_ADAPTER object + */ +void +mpt3sas_setup_debugfs(struct MPT3SAS_ADAPTER *ioc) +{ + char name[64]; + + snprintf(name, sizeof(name), "scsi_host%d", ioc->shost->host_no); + if (!ioc->debugfs_root) { + ioc->debugfs_root = + debugfs_create_dir(name, mpt3sas_debugfs_root); + if (!ioc->debugfs_root) { + dev_err(&ioc->pdev->dev, + "Cannot create per adapter debugfs directory\n"); + return; + } + } + + snprintf(name, sizeof(name), "ioc_dump"); + ioc->ioc_dump = debugfs_create_file(name, 0444, + ioc->debugfs_root, ioc, &mpt3sas_debugfs_iocdump_fops); + if (!ioc->ioc_dump) { + dev_err(&ioc->pdev->dev, + "Cannot create ioc_dump debugfs file\n"); + debugfs_remove(ioc->debugfs_root); + return; + } + + snprintf(name, sizeof(name), "host_recovery"); + debugfs_create_u8(name, 0444, ioc->debugfs_root, &ioc->shost_recovery); + +} + +/* + * mpt3sas_destroy_debugfs : Destroy debugfs per HBA adapter + * @ioc: MPT3SAS_ADAPTER object + */ +void mpt3sas_destroy_debugfs(struct MPT3SAS_ADAPTER *ioc) +{ + debugfs_remove_recursive(ioc->debugfs_root); +} + diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c new file mode 100644 index 000000000..2ea3bdc63 --- /dev/null +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -0,0 +1,12943 @@ +/* + * Scsi Host Layer for MPT (Message Passing Technology) based controllers + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c + * Copyright (C) 2012-2014 LSI Corporation + * Copyright (C) 2013-2014 Avago Technologies + * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/blkdev.h> +#include <linux/sched.h> +#include <linux/workqueue.h> +#include <linux/delay.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/aer.h> +#include <linux/raid_class.h> +#include <linux/blk-mq-pci.h> +#include <asm/unaligned.h> + +#include "mpt3sas_base.h" + +#define RAID_CHANNEL 1 + +#define PCIE_CHANNEL 2 + +/* forward proto's */ +static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, + struct _sas_node *sas_expander); +static void _firmware_event_work(struct work_struct *work); + +static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc, + struct _sas_device *sas_device); +static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, + u8 retry_count, u8 is_pd); +static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle); +static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc, + struct _pcie_device *pcie_device); +static void +_scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle); +static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid); +static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc); + +/* global parameters */ +LIST_HEAD(mpt3sas_ioc_list); +/* global ioc lock for list operations */ +DEFINE_SPINLOCK(gioc_lock); + +MODULE_AUTHOR(MPT3SAS_AUTHOR); +MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION); +MODULE_LICENSE("GPL"); +MODULE_VERSION(MPT3SAS_DRIVER_VERSION); +MODULE_ALIAS("mpt2sas"); + +/* local parameters */ +static u8 scsi_io_cb_idx = -1; +static u8 tm_cb_idx = -1; +static u8 ctl_cb_idx = -1; +static u8 base_cb_idx = -1; +static u8 port_enable_cb_idx = -1; +static u8 transport_cb_idx = -1; +static u8 scsih_cb_idx = -1; +static u8 config_cb_idx = -1; +static int mpt2_ids; +static int mpt3_ids; + +static u8 tm_tr_cb_idx = -1 ; +static u8 tm_tr_volume_cb_idx = -1 ; +static u8 tm_sas_control_cb_idx = -1; + +/* command line options */ +static u32 logging_level; +MODULE_PARM_DESC(logging_level, + " bits for enabling additional logging info (default=0)"); + + +static ushort max_sectors = 0xFFFF; +module_param(max_sectors, ushort, 0444); +MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767"); + + +static int missing_delay[2] = {-1, -1}; +module_param_array(missing_delay, int, NULL, 0444); +MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay"); + +/* scsi-mid layer global parmeter is max_report_luns, which is 511 */ +#define MPT3SAS_MAX_LUN (16895) +static u64 max_lun = MPT3SAS_MAX_LUN; +module_param(max_lun, ullong, 0444); +MODULE_PARM_DESC(max_lun, " max lun, default=16895 "); + +static ushort hbas_to_enumerate; +module_param(hbas_to_enumerate, ushort, 0444); +MODULE_PARM_DESC(hbas_to_enumerate, + " 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \ + 1 - enumerates only SAS 2.0 generation HBAs\n \ + 2 - enumerates only SAS 3.0 generation HBAs (default=0)"); + +/* diag_buffer_enable is bitwise + * bit 0 set = TRACE + * bit 1 set = SNAPSHOT + * bit 2 set = EXTENDED + * + * Either bit can be set, or both + */ +static int diag_buffer_enable = -1; +module_param(diag_buffer_enable, int, 0444); +MODULE_PARM_DESC(diag_buffer_enable, + " post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)"); +static int disable_discovery = -1; +module_param(disable_discovery, int, 0444); +MODULE_PARM_DESC(disable_discovery, " disable discovery "); + + +/* permit overriding the host protection capabilities mask (EEDP/T10 PI) */ +static int prot_mask = -1; +module_param(prot_mask, int, 0444); +MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 "); + +static bool enable_sdev_max_qd; +module_param(enable_sdev_max_qd, bool, 0444); +MODULE_PARM_DESC(enable_sdev_max_qd, + "Enable sdev max qd as can_queue, def=disabled(0)"); + +static int multipath_on_hba = -1; +module_param(multipath_on_hba, int, 0); +MODULE_PARM_DESC(multipath_on_hba, + "Multipath support to add same target device\n\t\t" + "as many times as it is visible to HBA from various paths\n\t\t" + "(by default:\n\t\t" + "\t SAS 2.0 & SAS 3.0 HBA - This will be disabled,\n\t\t" + "\t SAS 3.5 HBA - This will be enabled)"); + +static int host_tagset_enable = 1; +module_param(host_tagset_enable, int, 0444); +MODULE_PARM_DESC(host_tagset_enable, + "Shared host tagset enable/disable Default: enable(1)"); + +/* raid transport support */ +static struct raid_template *mpt3sas_raid_template; +static struct raid_template *mpt2sas_raid_template; + + +/** + * struct sense_info - common structure for obtaining sense keys + * @skey: sense key + * @asc: additional sense code + * @ascq: additional sense code qualifier + */ +struct sense_info { + u8 skey; + u8 asc; + u8 ascq; +}; + +#define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB) +#define MPT3SAS_TURN_ON_PFA_LED (0xFFFC) +#define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD) +#define MPT3SAS_ABRT_TASK_SET (0xFFFE) +#define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF) +/** + * struct fw_event_work - firmware event struct + * @list: link list framework + * @work: work object (ioc->fault_reset_work_q) + * @ioc: per adapter object + * @device_handle: device handle + * @VF_ID: virtual function id + * @VP_ID: virtual port id + * @ignore: flag meaning this event has been marked to ignore + * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h + * @refcount: kref for this event + * @event_data: reply event data payload follows + * + * This object stored on ioc->fw_event_list. + */ +struct fw_event_work { + struct list_head list; + struct work_struct work; + + struct MPT3SAS_ADAPTER *ioc; + u16 device_handle; + u8 VF_ID; + u8 VP_ID; + u8 ignore; + u16 event; + struct kref refcount; + char event_data[] __aligned(4); +}; + +static void fw_event_work_free(struct kref *r) +{ + kfree(container_of(r, struct fw_event_work, refcount)); +} + +static void fw_event_work_get(struct fw_event_work *fw_work) +{ + kref_get(&fw_work->refcount); +} + +static void fw_event_work_put(struct fw_event_work *fw_work) +{ + kref_put(&fw_work->refcount, fw_event_work_free); +} + +static struct fw_event_work *alloc_fw_event_work(int len) +{ + struct fw_event_work *fw_event; + + fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC); + if (!fw_event) + return NULL; + + kref_init(&fw_event->refcount); + return fw_event; +} + +/** + * struct _scsi_io_transfer - scsi io transfer + * @handle: sas device handle (assigned by firmware) + * @is_raid: flag set for hidden raid components + * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE, + * @data_length: data transfer length + * @data_dma: dma pointer to data + * @sense: sense data + * @lun: lun number + * @cdb_length: cdb length + * @cdb: cdb contents + * @timeout: timeout for this command + * @VF_ID: virtual function id + * @VP_ID: virtual port id + * @valid_reply: flag set for reply message + * @sense_length: sense length + * @ioc_status: ioc status + * @scsi_state: scsi state + * @scsi_status: scsi staus + * @log_info: log information + * @transfer_length: data length transfer when there is a reply message + * + * Used for sending internal scsi commands to devices within this module. + * Refer to _scsi_send_scsi_io(). + */ +struct _scsi_io_transfer { + u16 handle; + u8 is_raid; + enum dma_data_direction dir; + u32 data_length; + dma_addr_t data_dma; + u8 sense[SCSI_SENSE_BUFFERSIZE]; + u32 lun; + u8 cdb_length; + u8 cdb[32]; + u8 timeout; + u8 VF_ID; + u8 VP_ID; + u8 valid_reply; + /* the following bits are only valid when 'valid_reply = 1' */ + u32 sense_length; + u16 ioc_status; + u8 scsi_state; + u8 scsi_status; + u32 log_info; + u32 transfer_length; +}; + +/** + * _scsih_set_debug_level - global setting of ioc->logging_level. + * @val: ? + * @kp: ? + * + * Note: The logging levels are defined in mpt3sas_debug.h. + */ +static int +_scsih_set_debug_level(const char *val, const struct kernel_param *kp) +{ + int ret = param_set_int(val, kp); + struct MPT3SAS_ADAPTER *ioc; + + if (ret) + return ret; + + pr_info("setting logging_level(0x%08x)\n", logging_level); + spin_lock(&gioc_lock); + list_for_each_entry(ioc, &mpt3sas_ioc_list, list) + ioc->logging_level = logging_level; + spin_unlock(&gioc_lock); + return 0; +} +module_param_call(logging_level, _scsih_set_debug_level, param_get_int, + &logging_level, 0644); + +/** + * _scsih_srch_boot_sas_address - search based on sas_address + * @sas_address: sas address + * @boot_device: boot device object from bios page 2 + * + * Return: 1 when there's a match, 0 means no match. + */ +static inline int +_scsih_srch_boot_sas_address(u64 sas_address, + Mpi2BootDeviceSasWwid_t *boot_device) +{ + return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0; +} + +/** + * _scsih_srch_boot_device_name - search based on device name + * @device_name: device name specified in INDENTIFY fram + * @boot_device: boot device object from bios page 2 + * + * Return: 1 when there's a match, 0 means no match. + */ +static inline int +_scsih_srch_boot_device_name(u64 device_name, + Mpi2BootDeviceDeviceName_t *boot_device) +{ + return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0; +} + +/** + * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot + * @enclosure_logical_id: enclosure logical id + * @slot_number: slot number + * @boot_device: boot device object from bios page 2 + * + * Return: 1 when there's a match, 0 means no match. + */ +static inline int +_scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number, + Mpi2BootDeviceEnclosureSlot_t *boot_device) +{ + return (enclosure_logical_id == le64_to_cpu(boot_device-> + EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device-> + SlotNumber)) ? 1 : 0; +} + +/** + * mpt3sas_get_port_by_id - get hba port entry corresponding to provided + * port number from port list + * @ioc: per adapter object + * @port_id: port number + * @bypass_dirty_port_flag: when set look the matching hba port entry even + * if hba port entry is marked as dirty. + * + * Search for hba port entry corresponding to provided port number, + * if available return port object otherwise return NULL. + */ +struct hba_port * +mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc, + u8 port_id, u8 bypass_dirty_port_flag) +{ + struct hba_port *port, *port_next; + + /* + * When multipath_on_hba is disabled then + * search the hba_port entry using default + * port id i.e. 255 + */ + if (!ioc->multipath_on_hba) + port_id = MULTIPATH_DISABLED_PORT_ID; + + list_for_each_entry_safe(port, port_next, + &ioc->port_table_list, list) { + if (port->port_id != port_id) + continue; + if (bypass_dirty_port_flag) + return port; + if (port->flags & HBA_PORT_FLAG_DIRTY_PORT) + continue; + return port; + } + + /* + * Allocate hba_port object for default port id (i.e. 255) + * when multipath_on_hba is disabled for the HBA. + * And add this object to port_table_list. + */ + if (!ioc->multipath_on_hba) { + port = kzalloc(sizeof(struct hba_port), GFP_ATOMIC); + if (!port) + return NULL; + + port->port_id = port_id; + ioc_info(ioc, + "hba_port entry: %p, port: %d is added to hba_port list\n", + port, port->port_id); + list_add_tail(&port->list, + &ioc->port_table_list); + return port; + } + return NULL; +} + +/** + * mpt3sas_get_vphy_by_phy - get virtual_phy object corresponding to phy number + * @ioc: per adapter object + * @port: hba_port object + * @phy: phy number + * + * Return virtual_phy object corresponding to phy number. + */ +struct virtual_phy * +mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER *ioc, + struct hba_port *port, u32 phy) +{ + struct virtual_phy *vphy, *vphy_next; + + if (!port->vphys_mask) + return NULL; + + list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, list) { + if (vphy->phy_mask & (1 << phy)) + return vphy; + } + return NULL; +} + +/** + * _scsih_is_boot_device - search for matching boot device. + * @sas_address: sas address + * @device_name: device name specified in INDENTIFY fram + * @enclosure_logical_id: enclosure logical id + * @slot: slot number + * @form: specifies boot device form + * @boot_device: boot device object from bios page 2 + * + * Return: 1 when there's a match, 0 means no match. + */ +static int +_scsih_is_boot_device(u64 sas_address, u64 device_name, + u64 enclosure_logical_id, u16 slot, u8 form, + Mpi2BiosPage2BootDevice_t *boot_device) +{ + int rc = 0; + + switch (form) { + case MPI2_BIOSPAGE2_FORM_SAS_WWID: + if (!sas_address) + break; + rc = _scsih_srch_boot_sas_address( + sas_address, &boot_device->SasWwid); + break; + case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT: + if (!enclosure_logical_id) + break; + rc = _scsih_srch_boot_encl_slot( + enclosure_logical_id, + slot, &boot_device->EnclosureSlot); + break; + case MPI2_BIOSPAGE2_FORM_DEVICE_NAME: + if (!device_name) + break; + rc = _scsih_srch_boot_device_name( + device_name, &boot_device->DeviceName); + break; + case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED: + break; + } + + return rc; +} + +/** + * _scsih_get_sas_address - set the sas_address for given device handle + * @ioc: ? + * @handle: device handle + * @sas_address: sas address + * + * Return: 0 success, non-zero when failure + */ +static int +_scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle, + u64 *sas_address) +{ + Mpi2SasDevicePage0_t sas_device_pg0; + Mpi2ConfigReply_t mpi_reply; + u32 ioc_status; + + *sas_address = 0; + + if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, + MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -ENXIO; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; + if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { + /* For HBA, vSES doesn't return HBA SAS address. Instead return + * vSES's sas address. + */ + if ((handle <= ioc->sas_hba.num_phys) && + (!(le32_to_cpu(sas_device_pg0.DeviceInfo) & + MPI2_SAS_DEVICE_INFO_SEP))) + *sas_address = ioc->sas_hba.sas_address; + else + *sas_address = le64_to_cpu(sas_device_pg0.SASAddress); + return 0; + } + + /* we hit this because the given parent handle doesn't exist */ + if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) + return -ENXIO; + + /* else error case */ + ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n", + handle, ioc_status, __FILE__, __LINE__, __func__); + return -EIO; +} + +/** + * _scsih_determine_boot_device - determine boot device. + * @ioc: per adapter object + * @device: sas_device or pcie_device object + * @channel: SAS or PCIe channel + * + * Determines whether this device should be first reported device to + * to scsi-ml or sas transport, this purpose is for persistent boot device. + * There are primary, alternate, and current entries in bios page 2. The order + * priority is primary, alternate, then current. This routine saves + * the corresponding device object. + * The saved data to be used later in _scsih_probe_boot_devices(). + */ +static void +_scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device, + u32 channel) +{ + struct _sas_device *sas_device; + struct _pcie_device *pcie_device; + struct _raid_device *raid_device; + u64 sas_address; + u64 device_name; + u64 enclosure_logical_id; + u16 slot; + + /* only process this function when driver loads */ + if (!ioc->is_driver_loading) + return; + + /* no Bios, return immediately */ + if (!ioc->bios_pg3.BiosVersion) + return; + + if (channel == RAID_CHANNEL) { + raid_device = device; + sas_address = raid_device->wwid; + device_name = 0; + enclosure_logical_id = 0; + slot = 0; + } else if (channel == PCIE_CHANNEL) { + pcie_device = device; + sas_address = pcie_device->wwid; + device_name = 0; + enclosure_logical_id = 0; + slot = 0; + } else { + sas_device = device; + sas_address = sas_device->sas_address; + device_name = sas_device->device_name; + enclosure_logical_id = sas_device->enclosure_logical_id; + slot = sas_device->slot; + } + + if (!ioc->req_boot_device.device) { + if (_scsih_is_boot_device(sas_address, device_name, + enclosure_logical_id, slot, + (ioc->bios_pg2.ReqBootDeviceForm & + MPI2_BIOSPAGE2_FORM_MASK), + &ioc->bios_pg2.RequestedBootDevice)) { + dinitprintk(ioc, + ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n", + __func__, (u64)sas_address)); + ioc->req_boot_device.device = device; + ioc->req_boot_device.channel = channel; + } + } + + if (!ioc->req_alt_boot_device.device) { + if (_scsih_is_boot_device(sas_address, device_name, + enclosure_logical_id, slot, + (ioc->bios_pg2.ReqAltBootDeviceForm & + MPI2_BIOSPAGE2_FORM_MASK), + &ioc->bios_pg2.RequestedAltBootDevice)) { + dinitprintk(ioc, + ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n", + __func__, (u64)sas_address)); + ioc->req_alt_boot_device.device = device; + ioc->req_alt_boot_device.channel = channel; + } + } + + if (!ioc->current_boot_device.device) { + if (_scsih_is_boot_device(sas_address, device_name, + enclosure_logical_id, slot, + (ioc->bios_pg2.CurrentBootDeviceForm & + MPI2_BIOSPAGE2_FORM_MASK), + &ioc->bios_pg2.CurrentBootDevice)) { + dinitprintk(ioc, + ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n", + __func__, (u64)sas_address)); + ioc->current_boot_device.device = device; + ioc->current_boot_device.channel = channel; + } + } +} + +static struct _sas_device * +__mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc, + struct MPT3SAS_TARGET *tgt_priv) +{ + struct _sas_device *ret; + + assert_spin_locked(&ioc->sas_device_lock); + + ret = tgt_priv->sas_dev; + if (ret) + sas_device_get(ret); + + return ret; +} + +static struct _sas_device * +mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc, + struct MPT3SAS_TARGET *tgt_priv) +{ + struct _sas_device *ret; + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + return ret; +} + +static struct _pcie_device * +__mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc, + struct MPT3SAS_TARGET *tgt_priv) +{ + struct _pcie_device *ret; + + assert_spin_locked(&ioc->pcie_device_lock); + + ret = tgt_priv->pcie_dev; + if (ret) + pcie_device_get(ret); + + return ret; +} + +/** + * mpt3sas_get_pdev_from_target - pcie device search + * @ioc: per adapter object + * @tgt_priv: starget private object + * + * Context: This function will acquire ioc->pcie_device_lock and will release + * before returning the pcie_device object. + * + * This searches for pcie_device from target, then return pcie_device object. + */ +static struct _pcie_device * +mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc, + struct MPT3SAS_TARGET *tgt_priv) +{ + struct _pcie_device *ret; + unsigned long flags; + + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv); + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + + return ret; +} + + +/** + * __mpt3sas_get_sdev_by_rphy - sas device search + * @ioc: per adapter object + * @rphy: sas_rphy pointer + * + * Context: This function will acquire ioc->sas_device_lock and will release + * before returning the sas_device object. + * + * This searches for sas_device from rphy object + * then return sas_device object. + */ +struct _sas_device * +__mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER *ioc, + struct sas_rphy *rphy) +{ + struct _sas_device *sas_device; + + assert_spin_locked(&ioc->sas_device_lock); + + list_for_each_entry(sas_device, &ioc->sas_device_list, list) { + if (sas_device->rphy != rphy) + continue; + sas_device_get(sas_device); + return sas_device; + } + + sas_device = NULL; + list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) { + if (sas_device->rphy != rphy) + continue; + sas_device_get(sas_device); + return sas_device; + } + + return NULL; +} + +/** + * __mpt3sas_get_sdev_by_addr - get _sas_device object corresponding to provided + * sas address from sas_device_list list + * @ioc: per adapter object + * @sas_address: device sas address + * @port: port number + * + * Search for _sas_device object corresponding to provided sas address, + * if available return _sas_device object address otherwise return NULL. + */ +struct _sas_device * +__mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address, struct hba_port *port) +{ + struct _sas_device *sas_device; + + if (!port) + return NULL; + + assert_spin_locked(&ioc->sas_device_lock); + + list_for_each_entry(sas_device, &ioc->sas_device_list, list) { + if (sas_device->sas_address != sas_address) + continue; + if (sas_device->port != port) + continue; + sas_device_get(sas_device); + return sas_device; + } + + list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) { + if (sas_device->sas_address != sas_address) + continue; + if (sas_device->port != port) + continue; + sas_device_get(sas_device); + return sas_device; + } + + return NULL; +} + +/** + * mpt3sas_get_sdev_by_addr - sas device search + * @ioc: per adapter object + * @sas_address: sas address + * @port: hba port entry + * Context: Calling function should acquire ioc->sas_device_lock + * + * This searches for sas_device based on sas_address & port number, + * then return sas_device object. + */ +struct _sas_device * +mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address, struct hba_port *port) +{ + struct _sas_device *sas_device; + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_addr(ioc, + sas_address, port); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + return sas_device; +} + +static struct _sas_device * +__mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _sas_device *sas_device; + + assert_spin_locked(&ioc->sas_device_lock); + + list_for_each_entry(sas_device, &ioc->sas_device_list, list) + if (sas_device->handle == handle) + goto found_device; + + list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) + if (sas_device->handle == handle) + goto found_device; + + return NULL; + +found_device: + sas_device_get(sas_device); + return sas_device; +} + +/** + * mpt3sas_get_sdev_by_handle - sas device search + * @ioc: per adapter object + * @handle: sas device handle (assigned by firmware) + * Context: Calling function should acquire ioc->sas_device_lock + * + * This searches for sas_device based on sas_address, then return sas_device + * object. + */ +struct _sas_device * +mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _sas_device *sas_device; + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + return sas_device; +} + +/** + * _scsih_display_enclosure_chassis_info - display device location info + * @ioc: per adapter object + * @sas_device: per sas device object + * @sdev: scsi device struct + * @starget: scsi target struct + */ +static void +_scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc, + struct _sas_device *sas_device, struct scsi_device *sdev, + struct scsi_target *starget) +{ + if (sdev) { + if (sas_device->enclosure_handle != 0) + sdev_printk(KERN_INFO, sdev, + "enclosure logical id (0x%016llx), slot(%d) \n", + (unsigned long long) + sas_device->enclosure_logical_id, + sas_device->slot); + if (sas_device->connector_name[0] != '\0') + sdev_printk(KERN_INFO, sdev, + "enclosure level(0x%04x), connector name( %s)\n", + sas_device->enclosure_level, + sas_device->connector_name); + if (sas_device->is_chassis_slot_valid) + sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n", + sas_device->chassis_slot); + } else if (starget) { + if (sas_device->enclosure_handle != 0) + starget_printk(KERN_INFO, starget, + "enclosure logical id(0x%016llx), slot(%d) \n", + (unsigned long long) + sas_device->enclosure_logical_id, + sas_device->slot); + if (sas_device->connector_name[0] != '\0') + starget_printk(KERN_INFO, starget, + "enclosure level(0x%04x), connector name( %s)\n", + sas_device->enclosure_level, + sas_device->connector_name); + if (sas_device->is_chassis_slot_valid) + starget_printk(KERN_INFO, starget, + "chassis slot(0x%04x)\n", + sas_device->chassis_slot); + } else { + if (sas_device->enclosure_handle != 0) + ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n", + (u64)sas_device->enclosure_logical_id, + sas_device->slot); + if (sas_device->connector_name[0] != '\0') + ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n", + sas_device->enclosure_level, + sas_device->connector_name); + if (sas_device->is_chassis_slot_valid) + ioc_info(ioc, "chassis slot(0x%04x)\n", + sas_device->chassis_slot); + } +} + +/** + * _scsih_sas_device_remove - remove sas_device from list. + * @ioc: per adapter object + * @sas_device: the sas_device object + * Context: This function will acquire ioc->sas_device_lock. + * + * If sas_device is on the list, remove it and decrement its reference count. + */ +static void +_scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc, + struct _sas_device *sas_device) +{ + unsigned long flags; + + if (!sas_device) + return; + ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n", + sas_device->handle, (u64)sas_device->sas_address); + + _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL); + + /* + * The lock serializes access to the list, but we still need to verify + * that nobody removed the entry while we were waiting on the lock. + */ + spin_lock_irqsave(&ioc->sas_device_lock, flags); + if (!list_empty(&sas_device->list)) { + list_del_init(&sas_device->list); + sas_device_put(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +} + +/** + * _scsih_device_remove_by_handle - removing device object by handle + * @ioc: per adapter object + * @handle: device handle + */ +static void +_scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _sas_device *sas_device; + unsigned long flags; + + if (ioc->shost_recovery) + return; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); + if (sas_device) { + list_del_init(&sas_device->list); + sas_device_put(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (sas_device) { + _scsih_remove_device(ioc, sas_device); + sas_device_put(sas_device); + } +} + +/** + * mpt3sas_device_remove_by_sas_address - removing device object by + * sas address & port number + * @ioc: per adapter object + * @sas_address: device sas_address + * @port: hba port entry + * + * Return nothing. + */ +void +mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address, struct hba_port *port) +{ + struct _sas_device *sas_device; + unsigned long flags; + + if (ioc->shost_recovery) + return; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address, port); + if (sas_device) { + list_del_init(&sas_device->list); + sas_device_put(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (sas_device) { + _scsih_remove_device(ioc, sas_device); + sas_device_put(sas_device); + } +} + +/** + * _scsih_sas_device_add - insert sas_device to the list. + * @ioc: per adapter object + * @sas_device: the sas_device object + * Context: This function will acquire ioc->sas_device_lock. + * + * Adding new object to the ioc->sas_device_list. + */ +static void +_scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc, + struct _sas_device *sas_device) +{ + unsigned long flags; + + dewtprintk(ioc, + ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n", + __func__, sas_device->handle, + (u64)sas_device->sas_address)); + + dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, + NULL, NULL)); + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device_get(sas_device); + list_add_tail(&sas_device->list, &ioc->sas_device_list); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + if (ioc->hide_drives) { + clear_bit(sas_device->handle, ioc->pend_os_device_add); + return; + } + + if (!mpt3sas_transport_port_add(ioc, sas_device->handle, + sas_device->sas_address_parent, sas_device->port)) { + _scsih_sas_device_remove(ioc, sas_device); + } else if (!sas_device->starget) { + /* + * When asyn scanning is enabled, its not possible to remove + * devices while scanning is turned on due to an oops in + * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start() + */ + if (!ioc->is_driver_loading) { + mpt3sas_transport_port_remove(ioc, + sas_device->sas_address, + sas_device->sas_address_parent, + sas_device->port); + _scsih_sas_device_remove(ioc, sas_device); + } + } else + clear_bit(sas_device->handle, ioc->pend_os_device_add); +} + +/** + * _scsih_sas_device_init_add - insert sas_device to the list. + * @ioc: per adapter object + * @sas_device: the sas_device object + * Context: This function will acquire ioc->sas_device_lock. + * + * Adding new object at driver load time to the ioc->sas_device_init_list. + */ +static void +_scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc, + struct _sas_device *sas_device) +{ + unsigned long flags; + + dewtprintk(ioc, + ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n", + __func__, sas_device->handle, + (u64)sas_device->sas_address)); + + dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, + NULL, NULL)); + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device_get(sas_device); + list_add_tail(&sas_device->list, &ioc->sas_device_init_list); + _scsih_determine_boot_device(ioc, sas_device, 0); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +} + + +static struct _pcie_device * +__mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid) +{ + struct _pcie_device *pcie_device; + + assert_spin_locked(&ioc->pcie_device_lock); + + list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) + if (pcie_device->wwid == wwid) + goto found_device; + + list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list) + if (pcie_device->wwid == wwid) + goto found_device; + + return NULL; + +found_device: + pcie_device_get(pcie_device); + return pcie_device; +} + + +/** + * mpt3sas_get_pdev_by_wwid - pcie device search + * @ioc: per adapter object + * @wwid: wwid + * + * Context: This function will acquire ioc->pcie_device_lock and will release + * before returning the pcie_device object. + * + * This searches for pcie_device based on wwid, then return pcie_device object. + */ +static struct _pcie_device * +mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid) +{ + struct _pcie_device *pcie_device; + unsigned long flags; + + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid); + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + + return pcie_device; +} + + +static struct _pcie_device * +__mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id, + int channel) +{ + struct _pcie_device *pcie_device; + + assert_spin_locked(&ioc->pcie_device_lock); + + list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) + if (pcie_device->id == id && pcie_device->channel == channel) + goto found_device; + + list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list) + if (pcie_device->id == id && pcie_device->channel == channel) + goto found_device; + + return NULL; + +found_device: + pcie_device_get(pcie_device); + return pcie_device; +} + +static struct _pcie_device * +__mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _pcie_device *pcie_device; + + assert_spin_locked(&ioc->pcie_device_lock); + + list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) + if (pcie_device->handle == handle) + goto found_device; + + list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list) + if (pcie_device->handle == handle) + goto found_device; + + return NULL; + +found_device: + pcie_device_get(pcie_device); + return pcie_device; +} + + +/** + * mpt3sas_get_pdev_by_handle - pcie device search + * @ioc: per adapter object + * @handle: Firmware device handle + * + * Context: This function will acquire ioc->pcie_device_lock and will release + * before returning the pcie_device object. + * + * This searches for pcie_device based on handle, then return pcie_device + * object. + */ +struct _pcie_device * +mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _pcie_device *pcie_device; + unsigned long flags; + + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle); + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + + return pcie_device; +} + +/** + * _scsih_set_nvme_max_shutdown_latency - Update max_shutdown_latency. + * @ioc: per adapter object + * Context: This function will acquire ioc->pcie_device_lock + * + * Update ioc->max_shutdown_latency to that NVMe drives RTD3 Entry Latency + * which has reported maximum among all available NVMe drives. + * Minimum max_shutdown_latency will be six seconds. + */ +static void +_scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc) +{ + struct _pcie_device *pcie_device; + unsigned long flags; + u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT; + + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) { + if (pcie_device->shutdown_latency) { + if (shutdown_latency < pcie_device->shutdown_latency) + shutdown_latency = + pcie_device->shutdown_latency; + } + } + ioc->max_shutdown_latency = shutdown_latency; + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); +} + +/** + * _scsih_pcie_device_remove - remove pcie_device from list. + * @ioc: per adapter object + * @pcie_device: the pcie_device object + * Context: This function will acquire ioc->pcie_device_lock. + * + * If pcie_device is on the list, remove it and decrement its reference count. + */ +static void +_scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc, + struct _pcie_device *pcie_device) +{ + unsigned long flags; + int was_on_pcie_device_list = 0; + u8 update_latency = 0; + + if (!pcie_device) + return; + ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", + pcie_device->handle, (u64)pcie_device->wwid); + if (pcie_device->enclosure_handle != 0) + ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n", + (u64)pcie_device->enclosure_logical_id, + pcie_device->slot); + if (pcie_device->connector_name[0] != '\0') + ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n", + pcie_device->enclosure_level, + pcie_device->connector_name); + + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + if (!list_empty(&pcie_device->list)) { + list_del_init(&pcie_device->list); + was_on_pcie_device_list = 1; + } + if (pcie_device->shutdown_latency == ioc->max_shutdown_latency) + update_latency = 1; + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + if (was_on_pcie_device_list) { + kfree(pcie_device->serial_number); + pcie_device_put(pcie_device); + } + + /* + * This device's RTD3 Entry Latency matches IOC's + * max_shutdown_latency. Recalculate IOC's max_shutdown_latency + * from the available drives as current drive is getting removed. + */ + if (update_latency) + _scsih_set_nvme_max_shutdown_latency(ioc); +} + + +/** + * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle + * @ioc: per adapter object + * @handle: device handle + */ +static void +_scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _pcie_device *pcie_device; + unsigned long flags; + int was_on_pcie_device_list = 0; + u8 update_latency = 0; + + if (ioc->shost_recovery) + return; + + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle); + if (pcie_device) { + if (!list_empty(&pcie_device->list)) { + list_del_init(&pcie_device->list); + was_on_pcie_device_list = 1; + pcie_device_put(pcie_device); + } + if (pcie_device->shutdown_latency == ioc->max_shutdown_latency) + update_latency = 1; + } + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + if (was_on_pcie_device_list) { + _scsih_pcie_device_remove_from_sml(ioc, pcie_device); + pcie_device_put(pcie_device); + } + + /* + * This device's RTD3 Entry Latency matches IOC's + * max_shutdown_latency. Recalculate IOC's max_shutdown_latency + * from the available drives as current drive is getting removed. + */ + if (update_latency) + _scsih_set_nvme_max_shutdown_latency(ioc); +} + +/** + * _scsih_pcie_device_add - add pcie_device object + * @ioc: per adapter object + * @pcie_device: pcie_device object + * + * This is added to the pcie_device_list link list. + */ +static void +_scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc, + struct _pcie_device *pcie_device) +{ + unsigned long flags; + + dewtprintk(ioc, + ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n", + __func__, + pcie_device->handle, (u64)pcie_device->wwid)); + if (pcie_device->enclosure_handle != 0) + dewtprintk(ioc, + ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n", + __func__, + (u64)pcie_device->enclosure_logical_id, + pcie_device->slot)); + if (pcie_device->connector_name[0] != '\0') + dewtprintk(ioc, + ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n", + __func__, pcie_device->enclosure_level, + pcie_device->connector_name)); + + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + pcie_device_get(pcie_device); + list_add_tail(&pcie_device->list, &ioc->pcie_device_list); + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + + if (pcie_device->access_status == + MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) { + clear_bit(pcie_device->handle, ioc->pend_os_device_add); + return; + } + if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) { + _scsih_pcie_device_remove(ioc, pcie_device); + } else if (!pcie_device->starget) { + if (!ioc->is_driver_loading) { +/*TODO-- Need to find out whether this condition will occur or not*/ + clear_bit(pcie_device->handle, ioc->pend_os_device_add); + } + } else + clear_bit(pcie_device->handle, ioc->pend_os_device_add); +} + +/* + * _scsih_pcie_device_init_add - insert pcie_device to the init list. + * @ioc: per adapter object + * @pcie_device: the pcie_device object + * Context: This function will acquire ioc->pcie_device_lock. + * + * Adding new object at driver load time to the ioc->pcie_device_init_list. + */ +static void +_scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc, + struct _pcie_device *pcie_device) +{ + unsigned long flags; + + dewtprintk(ioc, + ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n", + __func__, + pcie_device->handle, (u64)pcie_device->wwid)); + if (pcie_device->enclosure_handle != 0) + dewtprintk(ioc, + ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n", + __func__, + (u64)pcie_device->enclosure_logical_id, + pcie_device->slot)); + if (pcie_device->connector_name[0] != '\0') + dewtprintk(ioc, + ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n", + __func__, pcie_device->enclosure_level, + pcie_device->connector_name)); + + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + pcie_device_get(pcie_device); + list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list); + if (pcie_device->access_status != + MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) + _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL); + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); +} +/** + * _scsih_raid_device_find_by_id - raid device search + * @ioc: per adapter object + * @id: sas device target id + * @channel: sas device channel + * Context: Calling function should acquire ioc->raid_device_lock + * + * This searches for raid_device based on target id, then return raid_device + * object. + */ +static struct _raid_device * +_scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel) +{ + struct _raid_device *raid_device, *r; + + r = NULL; + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (raid_device->id == id && raid_device->channel == channel) { + r = raid_device; + goto out; + } + } + + out: + return r; +} + +/** + * mpt3sas_raid_device_find_by_handle - raid device search + * @ioc: per adapter object + * @handle: sas device handle (assigned by firmware) + * Context: Calling function should acquire ioc->raid_device_lock + * + * This searches for raid_device based on handle, then return raid_device + * object. + */ +struct _raid_device * +mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _raid_device *raid_device, *r; + + r = NULL; + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (raid_device->handle != handle) + continue; + r = raid_device; + goto out; + } + + out: + return r; +} + +/** + * _scsih_raid_device_find_by_wwid - raid device search + * @ioc: per adapter object + * @wwid: ? + * Context: Calling function should acquire ioc->raid_device_lock + * + * This searches for raid_device based on wwid, then return raid_device + * object. + */ +static struct _raid_device * +_scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid) +{ + struct _raid_device *raid_device, *r; + + r = NULL; + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (raid_device->wwid != wwid) + continue; + r = raid_device; + goto out; + } + + out: + return r; +} + +/** + * _scsih_raid_device_add - add raid_device object + * @ioc: per adapter object + * @raid_device: raid_device object + * + * This is added to the raid_device_list link list. + */ +static void +_scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc, + struct _raid_device *raid_device) +{ + unsigned long flags; + + dewtprintk(ioc, + ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n", + __func__, + raid_device->handle, (u64)raid_device->wwid)); + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + list_add_tail(&raid_device->list, &ioc->raid_device_list); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); +} + +/** + * _scsih_raid_device_remove - delete raid_device object + * @ioc: per adapter object + * @raid_device: raid_device object + * + */ +static void +_scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc, + struct _raid_device *raid_device) +{ + unsigned long flags; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + list_del(&raid_device->list); + kfree(raid_device); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); +} + +/** + * mpt3sas_scsih_expander_find_by_handle - expander device search + * @ioc: per adapter object + * @handle: expander handle (assigned by firmware) + * Context: Calling function should acquire ioc->sas_device_lock + * + * This searches for expander device based on handle, then returns the + * sas_node object. + */ +struct _sas_node * +mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _sas_node *sas_expander, *r; + + r = NULL; + list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { + if (sas_expander->handle != handle) + continue; + r = sas_expander; + goto out; + } + out: + return r; +} + +/** + * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search + * @ioc: per adapter object + * @handle: enclosure handle (assigned by firmware) + * Context: Calling function should acquire ioc->sas_device_lock + * + * This searches for enclosure device based on handle, then returns the + * enclosure object. + */ +static struct _enclosure_node * +mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _enclosure_node *enclosure_dev, *r; + + r = NULL; + list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) { + if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle) + continue; + r = enclosure_dev; + goto out; + } +out: + return r; +} +/** + * mpt3sas_scsih_expander_find_by_sas_address - expander device search + * @ioc: per adapter object + * @sas_address: sas address + * @port: hba port entry + * Context: Calling function should acquire ioc->sas_node_lock. + * + * This searches for expander device based on sas_address & port number, + * then returns the sas_node object. + */ +struct _sas_node * +mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address, struct hba_port *port) +{ + struct _sas_node *sas_expander, *r = NULL; + + if (!port) + return r; + + list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { + if (sas_expander->sas_address != sas_address) + continue; + if (sas_expander->port != port) + continue; + r = sas_expander; + goto out; + } + out: + return r; +} + +/** + * _scsih_expander_node_add - insert expander device to the list. + * @ioc: per adapter object + * @sas_expander: the sas_device object + * Context: This function will acquire ioc->sas_node_lock. + * + * Adding new object to the ioc->sas_expander_list. + */ +static void +_scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc, + struct _sas_node *sas_expander) +{ + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + list_add_tail(&sas_expander->list, &ioc->sas_expander_list); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); +} + +/** + * _scsih_is_end_device - determines if device is an end device + * @device_info: bitfield providing information about the device. + * Context: none + * + * Return: 1 if end device. + */ +static int +_scsih_is_end_device(u32 device_info) +{ + if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE && + ((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) | + (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) | + (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE))) + return 1; + else + return 0; +} + +/** + * _scsih_is_nvme_pciescsi_device - determines if + * device is an pcie nvme/scsi device + * @device_info: bitfield providing information about the device. + * Context: none + * + * Returns 1 if device is pcie device type nvme/scsi. + */ +static int +_scsih_is_nvme_pciescsi_device(u32 device_info) +{ + if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE) + == MPI26_PCIE_DEVINFO_NVME) || + ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE) + == MPI26_PCIE_DEVINFO_SCSI)) + return 1; + else + return 0; +} + +/** + * _scsih_scsi_lookup_find_by_target - search for matching channel:id + * @ioc: per adapter object + * @id: target id + * @channel: channel + * Context: This function will acquire ioc->scsi_lookup_lock. + * + * This will search for a matching channel:id in the scsi_lookup array, + * returning 1 if found. + */ +static u8 +_scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id, + int channel) +{ + int smid; + struct scsi_cmnd *scmd; + + for (smid = 1; + smid <= ioc->shost->can_queue; smid++) { + scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); + if (!scmd) + continue; + if (scmd->device->id == id && + scmd->device->channel == channel) + return 1; + } + return 0; +} + +/** + * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun + * @ioc: per adapter object + * @id: target id + * @lun: lun number + * @channel: channel + * Context: This function will acquire ioc->scsi_lookup_lock. + * + * This will search for a matching channel:id:lun in the scsi_lookup array, + * returning 1 if found. + */ +static u8 +_scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id, + unsigned int lun, int channel) +{ + int smid; + struct scsi_cmnd *scmd; + + for (smid = 1; smid <= ioc->shost->can_queue; smid++) { + + scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); + if (!scmd) + continue; + if (scmd->device->id == id && + scmd->device->channel == channel && + scmd->device->lun == lun) + return 1; + } + return 0; +} + +/** + * mpt3sas_scsih_scsi_lookup_get - returns scmd entry + * @ioc: per adapter object + * @smid: system request message index + * + * Return: the smid stored scmd pointer. + * Then will dereference the stored scmd pointer. + */ +struct scsi_cmnd * +mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + struct scsi_cmnd *scmd = NULL; + struct scsiio_tracker *st; + Mpi25SCSIIORequest_t *mpi_request; + u16 tag = smid - 1; + + if (smid > 0 && + smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) { + u32 unique_tag = + ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag; + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + + /* + * If SCSI IO request is outstanding at driver level then + * DevHandle filed must be non-zero. If DevHandle is zero + * then it means that this smid is free at driver level, + * so return NULL. + */ + if (!mpi_request->DevHandle) + return scmd; + + scmd = scsi_host_find_tag(ioc->shost, unique_tag); + if (scmd) { + st = scsi_cmd_priv(scmd); + if (st->cb_idx == 0xFF || st->smid == 0) + scmd = NULL; + } + } + return scmd; +} + +/** + * scsih_change_queue_depth - setting device queue depth + * @sdev: scsi device struct + * @qdepth: requested queue depth + * + * Return: queue depth. + */ +static int +scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) +{ + struct Scsi_Host *shost = sdev->host; + int max_depth; + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct MPT3SAS_TARGET *sas_target_priv_data; + struct _sas_device *sas_device; + unsigned long flags; + + max_depth = shost->can_queue; + + /* + * limit max device queue for SATA to 32 if enable_sdev_max_qd + * is disabled. + */ + if (ioc->enable_sdev_max_qd || ioc->is_gen35_ioc) + goto not_sata; + + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + goto not_sata; + sas_target_priv_data = sas_device_priv_data->sas_target; + if (!sas_target_priv_data) + goto not_sata; + if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) + goto not_sata; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data); + if (sas_device) { + if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) + max_depth = MPT3SAS_SATA_QUEUE_DEPTH; + + sas_device_put(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + not_sata: + + if (!sdev->tagged_supported) + max_depth = 1; + if (qdepth > max_depth) + qdepth = max_depth; + scsi_change_queue_depth(sdev, qdepth); + sdev_printk(KERN_INFO, sdev, + "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n", + sdev->queue_depth, sdev->tagged_supported, + sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1)); + return sdev->queue_depth; +} + +/** + * mpt3sas_scsih_change_queue_depth - setting device queue depth + * @sdev: scsi device struct + * @qdepth: requested queue depth + * + * Returns nothing. + */ +void +mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) +{ + struct Scsi_Host *shost = sdev->host; + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + if (ioc->enable_sdev_max_qd) + qdepth = shost->can_queue; + + scsih_change_queue_depth(sdev, qdepth); +} + +/** + * scsih_target_alloc - target add routine + * @starget: scsi target struct + * + * Return: 0 if ok. Any other return is assumed to be an error and + * the device is ignored. + */ +static int +scsih_target_alloc(struct scsi_target *starget) +{ + struct Scsi_Host *shost = dev_to_shost(&starget->dev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_TARGET *sas_target_priv_data; + struct _sas_device *sas_device; + struct _raid_device *raid_device; + struct _pcie_device *pcie_device; + unsigned long flags; + struct sas_rphy *rphy; + + sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data), + GFP_KERNEL); + if (!sas_target_priv_data) + return -ENOMEM; + + starget->hostdata = sas_target_priv_data; + sas_target_priv_data->starget = starget; + sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; + + /* RAID volumes */ + if (starget->channel == RAID_CHANNEL) { + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = _scsih_raid_device_find_by_id(ioc, starget->id, + starget->channel); + if (raid_device) { + sas_target_priv_data->handle = raid_device->handle; + sas_target_priv_data->sas_address = raid_device->wwid; + sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME; + if (ioc->is_warpdrive) + sas_target_priv_data->raid_device = raid_device; + raid_device->starget = starget; + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + return 0; + } + + /* PCIe devices */ + if (starget->channel == PCIE_CHANNEL) { + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id, + starget->channel); + if (pcie_device) { + sas_target_priv_data->handle = pcie_device->handle; + sas_target_priv_data->sas_address = pcie_device->wwid; + sas_target_priv_data->port = NULL; + sas_target_priv_data->pcie_dev = pcie_device; + pcie_device->starget = starget; + pcie_device->id = starget->id; + pcie_device->channel = starget->channel; + sas_target_priv_data->flags |= + MPT_TARGET_FLAGS_PCIE_DEVICE; + if (pcie_device->fast_path) + sas_target_priv_data->flags |= + MPT_TARGET_FASTPATH_IO; + } + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + return 0; + } + + /* sas/sata devices */ + spin_lock_irqsave(&ioc->sas_device_lock, flags); + rphy = dev_to_rphy(starget->dev.parent); + sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy); + + if (sas_device) { + sas_target_priv_data->handle = sas_device->handle; + sas_target_priv_data->sas_address = sas_device->sas_address; + sas_target_priv_data->port = sas_device->port; + sas_target_priv_data->sas_dev = sas_device; + sas_device->starget = starget; + sas_device->id = starget->id; + sas_device->channel = starget->channel; + if (test_bit(sas_device->handle, ioc->pd_handles)) + sas_target_priv_data->flags |= + MPT_TARGET_FLAGS_RAID_COMPONENT; + if (sas_device->fast_path) + sas_target_priv_data->flags |= + MPT_TARGET_FASTPATH_IO; + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + return 0; +} + +/** + * scsih_target_destroy - target destroy routine + * @starget: scsi target struct + */ +static void +scsih_target_destroy(struct scsi_target *starget) +{ + struct Scsi_Host *shost = dev_to_shost(&starget->dev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_TARGET *sas_target_priv_data; + struct _sas_device *sas_device; + struct _raid_device *raid_device; + struct _pcie_device *pcie_device; + unsigned long flags; + + sas_target_priv_data = starget->hostdata; + if (!sas_target_priv_data) + return; + + if (starget->channel == RAID_CHANNEL) { + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = _scsih_raid_device_find_by_id(ioc, starget->id, + starget->channel); + if (raid_device) { + raid_device->starget = NULL; + raid_device->sdev = NULL; + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + goto out; + } + + if (starget->channel == PCIE_CHANNEL) { + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + pcie_device = __mpt3sas_get_pdev_from_target(ioc, + sas_target_priv_data); + if (pcie_device && (pcie_device->starget == starget) && + (pcie_device->id == starget->id) && + (pcie_device->channel == starget->channel)) + pcie_device->starget = NULL; + + if (pcie_device) { + /* + * Corresponding get() is in _scsih_target_alloc() + */ + sas_target_priv_data->pcie_dev = NULL; + pcie_device_put(pcie_device); + pcie_device_put(pcie_device); + } + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + goto out; + } + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data); + if (sas_device && (sas_device->starget == starget) && + (sas_device->id == starget->id) && + (sas_device->channel == starget->channel)) + sas_device->starget = NULL; + + if (sas_device) { + /* + * Corresponding get() is in _scsih_target_alloc() + */ + sas_target_priv_data->sas_dev = NULL; + sas_device_put(sas_device); + + sas_device_put(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + out: + kfree(sas_target_priv_data); + starget->hostdata = NULL; +} + +/** + * scsih_slave_alloc - device add routine + * @sdev: scsi device struct + * + * Return: 0 if ok. Any other return is assumed to be an error and + * the device is ignored. + */ +static int +scsih_slave_alloc(struct scsi_device *sdev) +{ + struct Scsi_Host *shost; + struct MPT3SAS_ADAPTER *ioc; + struct MPT3SAS_TARGET *sas_target_priv_data; + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct scsi_target *starget; + struct _raid_device *raid_device; + struct _sas_device *sas_device; + struct _pcie_device *pcie_device; + unsigned long flags; + + sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data), + GFP_KERNEL); + if (!sas_device_priv_data) + return -ENOMEM; + + sas_device_priv_data->lun = sdev->lun; + sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT; + + starget = scsi_target(sdev); + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->num_luns++; + sas_device_priv_data->sas_target = sas_target_priv_data; + sdev->hostdata = sas_device_priv_data; + if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT)) + sdev->no_uld_attach = 1; + + shost = dev_to_shost(&starget->dev); + ioc = shost_priv(shost); + if (starget->channel == RAID_CHANNEL) { + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = _scsih_raid_device_find_by_id(ioc, + starget->id, starget->channel); + if (raid_device) + raid_device->sdev = sdev; /* raid is single lun */ + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + } + if (starget->channel == PCIE_CHANNEL) { + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, + sas_target_priv_data->sas_address); + if (pcie_device && (pcie_device->starget == NULL)) { + sdev_printk(KERN_INFO, sdev, + "%s : pcie_device->starget set to starget @ %d\n", + __func__, __LINE__); + pcie_device->starget = starget; + } + + if (pcie_device) + pcie_device_put(pcie_device); + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + + } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_addr(ioc, + sas_target_priv_data->sas_address, + sas_target_priv_data->port); + if (sas_device && (sas_device->starget == NULL)) { + sdev_printk(KERN_INFO, sdev, + "%s : sas_device->starget set to starget @ %d\n", + __func__, __LINE__); + sas_device->starget = starget; + } + + if (sas_device) + sas_device_put(sas_device); + + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } + + return 0; +} + +/** + * scsih_slave_destroy - device destroy routine + * @sdev: scsi device struct + */ +static void +scsih_slave_destroy(struct scsi_device *sdev) +{ + struct MPT3SAS_TARGET *sas_target_priv_data; + struct scsi_target *starget; + struct Scsi_Host *shost; + struct MPT3SAS_ADAPTER *ioc; + struct _sas_device *sas_device; + struct _pcie_device *pcie_device; + unsigned long flags; + + if (!sdev->hostdata) + return; + + starget = scsi_target(sdev); + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->num_luns--; + + shost = dev_to_shost(&starget->dev); + ioc = shost_priv(shost); + + if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + pcie_device = __mpt3sas_get_pdev_from_target(ioc, + sas_target_priv_data); + if (pcie_device && !sas_target_priv_data->num_luns) + pcie_device->starget = NULL; + + if (pcie_device) + pcie_device_put(pcie_device); + + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + + } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_from_target(ioc, + sas_target_priv_data); + if (sas_device && !sas_target_priv_data->num_luns) + sas_device->starget = NULL; + + if (sas_device) + sas_device_put(sas_device); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } + + kfree(sdev->hostdata); + sdev->hostdata = NULL; +} + +/** + * _scsih_display_sata_capabilities - sata capabilities + * @ioc: per adapter object + * @handle: device handle + * @sdev: scsi device struct + */ +static void +_scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc, + u16 handle, struct scsi_device *sdev) +{ + Mpi2ConfigReply_t mpi_reply; + Mpi2SasDevicePage0_t sas_device_pg0; + u32 ioc_status; + u16 flags; + u32 device_info; + + if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, + MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + + flags = le16_to_cpu(sas_device_pg0.Flags); + device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); + + sdev_printk(KERN_INFO, sdev, + "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), " + "sw_preserve(%s)\n", + (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n", + (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n", + (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" : + "n", + (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n", + (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n", + (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n"); +} + +/* + * raid transport support - + * Enabled for SLES11 and newer, in older kernels the driver will panic when + * unloading the driver followed by a load - I believe that the subroutine + * raid_class_release() is not cleaning up properly. + */ + +/** + * scsih_is_raid - return boolean indicating device is raid volume + * @dev: the device struct object + */ +static int +scsih_is_raid(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host); + + if (ioc->is_warpdrive) + return 0; + return (sdev->channel == RAID_CHANNEL) ? 1 : 0; +} + +static int +scsih_is_nvme(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + + return (sdev->channel == PCIE_CHANNEL) ? 1 : 0; +} + +/** + * scsih_get_resync - get raid volume resync percent complete + * @dev: the device struct object + */ +static void +scsih_get_resync(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host); + static struct _raid_device *raid_device; + unsigned long flags; + Mpi2RaidVolPage0_t vol_pg0; + Mpi2ConfigReply_t mpi_reply; + u32 volume_status_flags; + u8 percent_complete; + u16 handle; + + percent_complete = 0; + handle = 0; + if (ioc->is_warpdrive) + goto out; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id, + sdev->channel); + if (raid_device) { + handle = raid_device->handle; + percent_complete = raid_device->percent_complete; + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + + if (!handle) + goto out; + + if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, + MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, + sizeof(Mpi2RaidVolPage0_t))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + percent_complete = 0; + goto out; + } + + volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags); + if (!(volume_status_flags & + MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS)) + percent_complete = 0; + + out: + + switch (ioc->hba_mpi_version_belonged) { + case MPI2_VERSION: + raid_set_resync(mpt2sas_raid_template, dev, percent_complete); + break; + case MPI25_VERSION: + case MPI26_VERSION: + raid_set_resync(mpt3sas_raid_template, dev, percent_complete); + break; + } +} + +/** + * scsih_get_state - get raid volume level + * @dev: the device struct object + */ +static void +scsih_get_state(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host); + static struct _raid_device *raid_device; + unsigned long flags; + Mpi2RaidVolPage0_t vol_pg0; + Mpi2ConfigReply_t mpi_reply; + u32 volstate; + enum raid_state state = RAID_STATE_UNKNOWN; + u16 handle = 0; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id, + sdev->channel); + if (raid_device) + handle = raid_device->handle; + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + + if (!raid_device) + goto out; + + if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, + MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, + sizeof(Mpi2RaidVolPage0_t))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + + volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags); + if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) { + state = RAID_STATE_RESYNCING; + goto out; + } + + switch (vol_pg0.VolumeState) { + case MPI2_RAID_VOL_STATE_OPTIMAL: + case MPI2_RAID_VOL_STATE_ONLINE: + state = RAID_STATE_ACTIVE; + break; + case MPI2_RAID_VOL_STATE_DEGRADED: + state = RAID_STATE_DEGRADED; + break; + case MPI2_RAID_VOL_STATE_FAILED: + case MPI2_RAID_VOL_STATE_MISSING: + state = RAID_STATE_OFFLINE; + break; + } + out: + switch (ioc->hba_mpi_version_belonged) { + case MPI2_VERSION: + raid_set_state(mpt2sas_raid_template, dev, state); + break; + case MPI25_VERSION: + case MPI26_VERSION: + raid_set_state(mpt3sas_raid_template, dev, state); + break; + } +} + +/** + * _scsih_set_level - set raid level + * @ioc: ? + * @sdev: scsi device struct + * @volume_type: volume type + */ +static void +_scsih_set_level(struct MPT3SAS_ADAPTER *ioc, + struct scsi_device *sdev, u8 volume_type) +{ + enum raid_level level = RAID_LEVEL_UNKNOWN; + + switch (volume_type) { + case MPI2_RAID_VOL_TYPE_RAID0: + level = RAID_LEVEL_0; + break; + case MPI2_RAID_VOL_TYPE_RAID10: + level = RAID_LEVEL_10; + break; + case MPI2_RAID_VOL_TYPE_RAID1E: + level = RAID_LEVEL_1E; + break; + case MPI2_RAID_VOL_TYPE_RAID1: + level = RAID_LEVEL_1; + break; + } + + switch (ioc->hba_mpi_version_belonged) { + case MPI2_VERSION: + raid_set_level(mpt2sas_raid_template, + &sdev->sdev_gendev, level); + break; + case MPI25_VERSION: + case MPI26_VERSION: + raid_set_level(mpt3sas_raid_template, + &sdev->sdev_gendev, level); + break; + } +} + + +/** + * _scsih_get_volume_capabilities - volume capabilities + * @ioc: per adapter object + * @raid_device: the raid_device object + * + * Return: 0 for success, else 1 + */ +static int +_scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc, + struct _raid_device *raid_device) +{ + Mpi2RaidVolPage0_t *vol_pg0; + Mpi2RaidPhysDiskPage0_t pd_pg0; + Mpi2SasDevicePage0_t sas_device_pg0; + Mpi2ConfigReply_t mpi_reply; + u16 sz; + u8 num_pds; + + if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle, + &num_pds)) || !num_pds) { + dfailprintk(ioc, + ioc_warn(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__)); + return 1; + } + + raid_device->num_pds = num_pds; + sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds * + sizeof(Mpi2RaidVol0PhysDisk_t)); + vol_pg0 = kzalloc(sz, GFP_KERNEL); + if (!vol_pg0) { + dfailprintk(ioc, + ioc_warn(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__)); + return 1; + } + + if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0, + MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) { + dfailprintk(ioc, + ioc_warn(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__)); + kfree(vol_pg0); + return 1; + } + + raid_device->volume_type = vol_pg0->VolumeType; + + /* figure out what the underlying devices are by + * obtaining the device_info bits for the 1st device + */ + if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply, + &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM, + vol_pg0->PhysDisk[0].PhysDiskNum))) { + if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, + &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, + le16_to_cpu(pd_pg0.DevHandle)))) { + raid_device->device_info = + le32_to_cpu(sas_device_pg0.DeviceInfo); + } + } + + kfree(vol_pg0); + return 0; +} + +/** + * _scsih_enable_tlr - setting TLR flags + * @ioc: per adapter object + * @sdev: scsi device struct + * + * Enabling Transaction Layer Retries for tape devices when + * vpd page 0x90 is present + * + */ +static void +_scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev) +{ + + /* only for TAPE */ + if (sdev->type != TYPE_TAPE) + return; + + if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR)) + return; + + sas_enable_tlr(sdev); + sdev_printk(KERN_INFO, sdev, "TLR %s\n", + sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled"); + return; + +} + +/** + * scsih_slave_configure - device configure routine. + * @sdev: scsi device struct + * + * Return: 0 if ok. Any other return is assumed to be an error and + * the device is ignored. + */ +static int +scsih_slave_configure(struct scsi_device *sdev) +{ + struct Scsi_Host *shost = sdev->host; + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct MPT3SAS_TARGET *sas_target_priv_data; + struct _sas_device *sas_device; + struct _pcie_device *pcie_device; + struct _raid_device *raid_device; + unsigned long flags; + int qdepth; + u8 ssp_target = 0; + char *ds = ""; + char *r_level = ""; + u16 handle, volume_handle = 0; + u64 volume_wwid = 0; + + qdepth = 1; + sas_device_priv_data = sdev->hostdata; + sas_device_priv_data->configured_lun = 1; + sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT; + sas_target_priv_data = sas_device_priv_data->sas_target; + handle = sas_target_priv_data->handle; + + /* raid volume handling */ + if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) { + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + if (!raid_device) { + dfailprintk(ioc, + ioc_warn(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__)); + return 1; + } + + if (_scsih_get_volume_capabilities(ioc, raid_device)) { + dfailprintk(ioc, + ioc_warn(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__)); + return 1; + } + + /* + * WARPDRIVE: Initialize the required data for Direct IO + */ + mpt3sas_init_warpdrive_properties(ioc, raid_device); + + /* RAID Queue Depth Support + * IS volume = underlying qdepth of drive type, either + * MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH + * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH) + */ + if (raid_device->device_info & + MPI2_SAS_DEVICE_INFO_SSP_TARGET) { + qdepth = MPT3SAS_SAS_QUEUE_DEPTH; + ds = "SSP"; + } else { + qdepth = MPT3SAS_SATA_QUEUE_DEPTH; + if (raid_device->device_info & + MPI2_SAS_DEVICE_INFO_SATA_DEVICE) + ds = "SATA"; + else + ds = "STP"; + } + + switch (raid_device->volume_type) { + case MPI2_RAID_VOL_TYPE_RAID0: + r_level = "RAID0"; + break; + case MPI2_RAID_VOL_TYPE_RAID1E: + qdepth = MPT3SAS_RAID_QUEUE_DEPTH; + if (ioc->manu_pg10.OEMIdentifier && + (le32_to_cpu(ioc->manu_pg10.GenericFlags0) & + MFG10_GF0_R10_DISPLAY) && + !(raid_device->num_pds % 2)) + r_level = "RAID10"; + else + r_level = "RAID1E"; + break; + case MPI2_RAID_VOL_TYPE_RAID1: + qdepth = MPT3SAS_RAID_QUEUE_DEPTH; + r_level = "RAID1"; + break; + case MPI2_RAID_VOL_TYPE_RAID10: + qdepth = MPT3SAS_RAID_QUEUE_DEPTH; + r_level = "RAID10"; + break; + case MPI2_RAID_VOL_TYPE_UNKNOWN: + default: + qdepth = MPT3SAS_RAID_QUEUE_DEPTH; + r_level = "RAIDX"; + break; + } + + if (!ioc->hide_ir_msg) + sdev_printk(KERN_INFO, sdev, + "%s: handle(0x%04x), wwid(0x%016llx)," + " pd_count(%d), type(%s)\n", + r_level, raid_device->handle, + (unsigned long long)raid_device->wwid, + raid_device->num_pds, ds); + + if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) { + blk_queue_max_hw_sectors(sdev->request_queue, + MPT3SAS_RAID_MAX_SECTORS); + sdev_printk(KERN_INFO, sdev, + "Set queue's max_sector to: %u\n", + MPT3SAS_RAID_MAX_SECTORS); + } + + mpt3sas_scsih_change_queue_depth(sdev, qdepth); + + /* raid transport support */ + if (!ioc->is_warpdrive) + _scsih_set_level(ioc, sdev, raid_device->volume_type); + return 0; + } + + /* non-raid handling */ + if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) { + if (mpt3sas_config_get_volume_handle(ioc, handle, + &volume_handle)) { + dfailprintk(ioc, + ioc_warn(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__)); + return 1; + } + if (volume_handle && mpt3sas_config_get_volume_wwid(ioc, + volume_handle, &volume_wwid)) { + dfailprintk(ioc, + ioc_warn(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__)); + return 1; + } + } + + /* PCIe handling */ + if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, + sas_device_priv_data->sas_target->sas_address); + if (!pcie_device) { + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + dfailprintk(ioc, + ioc_warn(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__)); + return 1; + } + + qdepth = ioc->max_nvme_qd; + ds = "NVMe"; + sdev_printk(KERN_INFO, sdev, + "%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n", + ds, handle, (unsigned long long)pcie_device->wwid, + pcie_device->port_num); + if (pcie_device->enclosure_handle != 0) + sdev_printk(KERN_INFO, sdev, + "%s: enclosure logical id(0x%016llx), slot(%d)\n", + ds, + (unsigned long long)pcie_device->enclosure_logical_id, + pcie_device->slot); + if (pcie_device->connector_name[0] != '\0') + sdev_printk(KERN_INFO, sdev, + "%s: enclosure level(0x%04x)," + "connector name( %s)\n", ds, + pcie_device->enclosure_level, + pcie_device->connector_name); + + if (pcie_device->nvme_mdts) + blk_queue_max_hw_sectors(sdev->request_queue, + pcie_device->nvme_mdts/512); + + pcie_device_put(pcie_device); + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + mpt3sas_scsih_change_queue_depth(sdev, qdepth); + /* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be + ** merged and can eliminate holes created during merging + ** operation. + **/ + blk_queue_flag_set(QUEUE_FLAG_NOMERGES, + sdev->request_queue); + blk_queue_virt_boundary(sdev->request_queue, + ioc->page_size - 1); + return 0; + } + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_addr(ioc, + sas_device_priv_data->sas_target->sas_address, + sas_device_priv_data->sas_target->port); + if (!sas_device) { + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + dfailprintk(ioc, + ioc_warn(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__)); + return 1; + } + + sas_device->volume_handle = volume_handle; + sas_device->volume_wwid = volume_wwid; + if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) { + qdepth = (sas_device->port_type > 1) ? + ioc->max_wideport_qd : ioc->max_narrowport_qd; + ssp_target = 1; + if (sas_device->device_info & + MPI2_SAS_DEVICE_INFO_SEP) { + sdev_printk(KERN_WARNING, sdev, + "set ignore_delay_remove for handle(0x%04x)\n", + sas_device_priv_data->sas_target->handle); + sas_device_priv_data->ignore_delay_remove = 1; + ds = "SES"; + } else + ds = "SSP"; + } else { + qdepth = ioc->max_sata_qd; + if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) + ds = "STP"; + else if (sas_device->device_info & + MPI2_SAS_DEVICE_INFO_SATA_DEVICE) + ds = "SATA"; + } + + sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \ + "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n", + ds, handle, (unsigned long long)sas_device->sas_address, + sas_device->phy, (unsigned long long)sas_device->device_name); + + _scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL); + + sas_device_put(sas_device); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + if (!ssp_target) + _scsih_display_sata_capabilities(ioc, handle, sdev); + + + mpt3sas_scsih_change_queue_depth(sdev, qdepth); + + if (ssp_target) { + sas_read_port_mode_page(sdev); + _scsih_enable_tlr(ioc, sdev); + } + + return 0; +} + +/** + * scsih_bios_param - fetch head, sector, cylinder info for a disk + * @sdev: scsi device struct + * @bdev: pointer to block device context + * @capacity: device size (in 512 byte sectors) + * @params: three element array to place output: + * params[0] number of heads (max 255) + * params[1] number of sectors (max 63) + * params[2] number of cylinders + */ +static int +scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev, + sector_t capacity, int params[]) +{ + int heads; + int sectors; + sector_t cylinders; + ulong dummy; + + heads = 64; + sectors = 32; + + dummy = heads * sectors; + cylinders = capacity; + sector_div(cylinders, dummy); + + /* + * Handle extended translation size for logical drives + * > 1Gb + */ + if ((ulong)capacity >= 0x200000) { + heads = 255; + sectors = 63; + dummy = heads * sectors; + cylinders = capacity; + sector_div(cylinders, dummy); + } + + /* return result */ + params[0] = heads; + params[1] = sectors; + params[2] = cylinders; + + return 0; +} + +/** + * _scsih_response_code - translation of device response code + * @ioc: per adapter object + * @response_code: response code returned by the device + */ +static void +_scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code) +{ + char *desc; + + switch (response_code) { + case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE: + desc = "task management request completed"; + break; + case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME: + desc = "invalid frame"; + break; + case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED: + desc = "task management request not supported"; + break; + case MPI2_SCSITASKMGMT_RSP_TM_FAILED: + desc = "task management request failed"; + break; + case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED: + desc = "task management request succeeded"; + break; + case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN: + desc = "invalid lun"; + break; + case 0xA: + desc = "overlapped tag attempted"; + break; + case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC: + desc = "task queued, however not sent to target"; + break; + default: + desc = "unknown"; + break; + } + ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc); +} + +/** + * _scsih_tm_done - tm completion routine + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * Context: none. + * + * The callback handler when using scsih_issue_tm. + * + * Return: 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +static u8 +_scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) +{ + MPI2DefaultReply_t *mpi_reply; + + if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED) + return 1; + if (ioc->tm_cmds.smid != smid) + return 1; + ioc->tm_cmds.status |= MPT3_CMD_COMPLETE; + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + if (mpi_reply) { + memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); + ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID; + } + ioc->tm_cmds.status &= ~MPT3_CMD_PENDING; + complete(&ioc->tm_cmds.done); + return 1; +} + +/** + * mpt3sas_scsih_set_tm_flag - set per target tm_busy + * @ioc: per adapter object + * @handle: device handle + * + * During taskmangement request, we need to freeze the device queue. + */ +void +mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + u8 skip = 0; + + shost_for_each_device(sdev, ioc->shost) { + if (skip) + continue; + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + if (sas_device_priv_data->sas_target->handle == handle) { + sas_device_priv_data->sas_target->tm_busy = 1; + skip = 1; + ioc->ignore_loginfos = 1; + } + } +} + +/** + * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy + * @ioc: per adapter object + * @handle: device handle + * + * During taskmangement request, we need to freeze the device queue. + */ +void +mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + u8 skip = 0; + + shost_for_each_device(sdev, ioc->shost) { + if (skip) + continue; + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + if (sas_device_priv_data->sas_target->handle == handle) { + sas_device_priv_data->sas_target->tm_busy = 0; + skip = 1; + ioc->ignore_loginfos = 0; + } + } +} + +/** + * scsih_tm_cmd_map_status - map the target reset & LUN reset TM status + * @ioc: per adapter object + * @channel: the channel assigned by the OS + * @id: the id assigned by the OS + * @lun: lun number + * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) + * @smid_task: smid assigned to the task + * + * Look whether TM has aborted the timed out SCSI command, if + * TM has aborted the IO then return SUCCESS else return FAILED. + */ +static int +scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER *ioc, uint channel, + uint id, uint lun, u8 type, u16 smid_task) +{ + + if (smid_task <= ioc->shost->can_queue) { + switch (type) { + case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: + if (!(_scsih_scsi_lookup_find_by_target(ioc, + id, channel))) + return SUCCESS; + break; + case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: + case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: + if (!(_scsih_scsi_lookup_find_by_lun(ioc, id, + lun, channel))) + return SUCCESS; + break; + default: + return SUCCESS; + } + } else if (smid_task == ioc->scsih_cmds.smid) { + if ((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) || + (ioc->scsih_cmds.status & MPT3_CMD_NOT_USED)) + return SUCCESS; + } else if (smid_task == ioc->ctl_cmds.smid) { + if ((ioc->ctl_cmds.status & MPT3_CMD_COMPLETE) || + (ioc->ctl_cmds.status & MPT3_CMD_NOT_USED)) + return SUCCESS; + } + + return FAILED; +} + +/** + * scsih_tm_post_processing - post processing of target & LUN reset + * @ioc: per adapter object + * @handle: device handle + * @channel: the channel assigned by the OS + * @id: the id assigned by the OS + * @lun: lun number + * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) + * @smid_task: smid assigned to the task + * + * Post processing of target & LUN reset. Due to interrupt latency + * issue it possible that interrupt for aborted IO might not be + * received yet. So before returning failure status, poll the + * reply descriptor pools for the reply of timed out SCSI command. + * Return FAILED status if reply for timed out is not received + * otherwise return SUCCESS. + */ +static int +scsih_tm_post_processing(struct MPT3SAS_ADAPTER *ioc, u16 handle, + uint channel, uint id, uint lun, u8 type, u16 smid_task) +{ + int rc; + + rc = scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task); + if (rc == SUCCESS) + return rc; + + ioc_info(ioc, + "Poll ReplyDescriptor queues for completion of" + " smid(%d), task_type(0x%02x), handle(0x%04x)\n", + smid_task, type, handle); + + /* + * Due to interrupt latency issues, driver may receive interrupt for + * TM first and then for aborted SCSI IO command. So, poll all the + * ReplyDescriptor pools before returning the FAILED status to SML. + */ + mpt3sas_base_mask_interrupts(ioc); + mpt3sas_base_sync_reply_irqs(ioc, 1); + mpt3sas_base_unmask_interrupts(ioc); + + return scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task); +} + +/** + * mpt3sas_scsih_issue_tm - main routine for sending tm requests + * @ioc: per adapter struct + * @handle: device handle + * @channel: the channel assigned by the OS + * @id: the id assigned by the OS + * @lun: lun number + * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) + * @smid_task: smid assigned to the task + * @msix_task: MSIX table index supplied by the OS + * @timeout: timeout in seconds + * @tr_method: Target Reset Method + * Context: user + * + * A generic API for sending task management requests to firmware. + * + * The callback index is set inside `ioc->tm_cb_idx`. + * The caller is responsible to check for outstanding commands. + * + * Return: SUCCESS or FAILED. + */ +int +mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel, + uint id, u64 lun, u8 type, u16 smid_task, u16 msix_task, + u8 timeout, u8 tr_method) +{ + Mpi2SCSITaskManagementRequest_t *mpi_request; + Mpi2SCSITaskManagementReply_t *mpi_reply; + Mpi25SCSIIORequest_t *request; + u16 smid = 0; + u32 ioc_state; + int rc; + u8 issue_reset = 0; + + lockdep_assert_held(&ioc->tm_cmds.mutex); + + if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) { + ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__); + return FAILED; + } + + if (ioc->shost_recovery || ioc->remove_host || + ioc->pci_error_recovery) { + ioc_info(ioc, "%s: host reset in progress!\n", __func__); + return FAILED; + } + + ioc_state = mpt3sas_base_get_iocstate(ioc, 0); + if (ioc_state & MPI2_DOORBELL_USED) { + dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n")); + rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + return (!rc) ? SUCCESS : FAILED; + } + + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { + mpt3sas_print_fault_code(ioc, ioc_state & + MPI2_DOORBELL_DATA_MASK); + rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + return (!rc) ? SUCCESS : FAILED; + } else if ((ioc_state & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_COREDUMP) { + mpt3sas_print_coredump_info(ioc, ioc_state & + MPI2_DOORBELL_DATA_MASK); + rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + return (!rc) ? SUCCESS : FAILED; + } + + smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx); + if (!smid) { + ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + return FAILED; + } + + dtmprintk(ioc, + ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n", + handle, type, smid_task, timeout, tr_method)); + ioc->tm_cmds.status = MPT3_CMD_PENDING; + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->tm_cmds.smid = smid; + memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); + memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t)); + mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->TaskType = type; + if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK || + type == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) + mpi_request->MsgFlags = tr_method; + mpi_request->TaskMID = cpu_to_le16(smid_task); + int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN); + mpt3sas_scsih_set_tm_flag(ioc, handle); + init_completion(&ioc->tm_cmds.done); + ioc->put_smid_hi_priority(ioc, smid, msix_task); + wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ); + if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) { + mpt3sas_check_cmd_timeout(ioc, + ioc->tm_cmds.status, mpi_request, + sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset); + if (issue_reset) { + rc = mpt3sas_base_hard_reset_handler(ioc, + FORCE_BIG_HAMMER); + rc = (!rc) ? SUCCESS : FAILED; + goto out; + } + } + + /* sync IRQs in case those were busy during flush. */ + mpt3sas_base_sync_reply_irqs(ioc, 0); + + if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) { + mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); + mpi_reply = ioc->tm_cmds.reply; + dtmprintk(ioc, + ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n", + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo), + le32_to_cpu(mpi_reply->TerminationCount))); + if (ioc->logging_level & MPT_DEBUG_TM) { + _scsih_response_code(ioc, mpi_reply->ResponseCode); + if (mpi_reply->IOCStatus) + _debug_dump_mf(mpi_request, + sizeof(Mpi2SCSITaskManagementRequest_t)/4); + } + } + + switch (type) { + case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK: + rc = SUCCESS; + /* + * If DevHandle filed in smid_task's entry of request pool + * doesn't match with device handle on which this task abort + * TM is received then it means that TM has successfully + * aborted the timed out command. Since smid_task's entry in + * request pool will be memset to zero once the timed out + * command is returned to the SML. If the command is not + * aborted then smid_task’s entry won’t be cleared and it + * will have same DevHandle value on which this task abort TM + * is received and driver will return the TM status as FAILED. + */ + request = mpt3sas_base_get_msg_frame(ioc, smid_task); + if (le16_to_cpu(request->DevHandle) != handle) + break; + + ioc_info(ioc, "Task abort tm failed: handle(0x%04x)," + "timeout(%d) tr_method(0x%x) smid(%d) msix_index(%d)\n", + handle, timeout, tr_method, smid_task, msix_task); + rc = FAILED; + break; + + case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: + case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: + case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: + rc = scsih_tm_post_processing(ioc, handle, channel, id, lun, + type, smid_task); + break; + case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK: + rc = SUCCESS; + break; + default: + rc = FAILED; + break; + } + +out: + mpt3sas_scsih_clear_tm_flag(ioc, handle); + ioc->tm_cmds.status = MPT3_CMD_NOT_USED; + return rc; +} + +int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, + uint channel, uint id, u64 lun, u8 type, u16 smid_task, + u16 msix_task, u8 timeout, u8 tr_method) +{ + int ret; + + mutex_lock(&ioc->tm_cmds.mutex); + ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type, + smid_task, msix_task, timeout, tr_method); + mutex_unlock(&ioc->tm_cmds.mutex); + + return ret; +} + +/** + * _scsih_tm_display_info - displays info about the device + * @ioc: per adapter struct + * @scmd: pointer to scsi command object + * + * Called by task management callback handlers. + */ +static void +_scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd) +{ + struct scsi_target *starget = scmd->device->sdev_target; + struct MPT3SAS_TARGET *priv_target = starget->hostdata; + struct _sas_device *sas_device = NULL; + struct _pcie_device *pcie_device = NULL; + unsigned long flags; + char *device_str = NULL; + + if (!priv_target) + return; + if (ioc->hide_ir_msg) + device_str = "WarpDrive"; + else + device_str = "volume"; + + scsi_print_command(scmd); + if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) { + starget_printk(KERN_INFO, starget, + "%s handle(0x%04x), %s wwid(0x%016llx)\n", + device_str, priv_target->handle, + device_str, (unsigned long long)priv_target->sas_address); + + } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target); + if (pcie_device) { + starget_printk(KERN_INFO, starget, + "handle(0x%04x), wwid(0x%016llx), port(%d)\n", + pcie_device->handle, + (unsigned long long)pcie_device->wwid, + pcie_device->port_num); + if (pcie_device->enclosure_handle != 0) + starget_printk(KERN_INFO, starget, + "enclosure logical id(0x%016llx), slot(%d)\n", + (unsigned long long) + pcie_device->enclosure_logical_id, + pcie_device->slot); + if (pcie_device->connector_name[0] != '\0') + starget_printk(KERN_INFO, starget, + "enclosure level(0x%04x), connector name( %s)\n", + pcie_device->enclosure_level, + pcie_device->connector_name); + pcie_device_put(pcie_device); + } + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + + } else { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target); + if (sas_device) { + if (priv_target->flags & + MPT_TARGET_FLAGS_RAID_COMPONENT) { + starget_printk(KERN_INFO, starget, + "volume handle(0x%04x), " + "volume wwid(0x%016llx)\n", + sas_device->volume_handle, + (unsigned long long)sas_device->volume_wwid); + } + starget_printk(KERN_INFO, starget, + "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n", + sas_device->handle, + (unsigned long long)sas_device->sas_address, + sas_device->phy); + + _scsih_display_enclosure_chassis_info(NULL, sas_device, + NULL, starget); + + sas_device_put(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } +} + +/** + * scsih_abort - eh threads main abort routine + * @scmd: pointer to scsi command object + * + * Return: SUCCESS if command aborted else FAILED + */ +static int +scsih_abort(struct scsi_cmnd *scmd) +{ + struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct scsiio_tracker *st = scsi_cmd_priv(scmd); + u16 handle; + int r; + + u8 timeout = 30; + struct _pcie_device *pcie_device = NULL; + sdev_printk(KERN_INFO, scmd->device, "attempting task abort!" + "scmd(0x%p), outstanding for %u ms & timeout %u ms\n", + scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc), + (scsi_cmd_to_rq(scmd)->timeout / HZ) * 1000); + _scsih_tm_display_info(ioc, scmd); + + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target || + ioc->remove_host) { + sdev_printk(KERN_INFO, scmd->device, + "device been deleted! scmd(0x%p)\n", scmd); + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + r = SUCCESS; + goto out; + } + + /* check for completed command */ + if (st == NULL || st->cb_idx == 0xFF) { + sdev_printk(KERN_INFO, scmd->device, "No reference found at " + "driver, assuming scmd(0x%p) might have completed\n", scmd); + scmd->result = DID_RESET << 16; + r = SUCCESS; + goto out; + } + + /* for hidden raid components and volumes this is not supported */ + if (sas_device_priv_data->sas_target->flags & + MPT_TARGET_FLAGS_RAID_COMPONENT || + sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) { + scmd->result = DID_RESET << 16; + r = FAILED; + goto out; + } + + mpt3sas_halt_firmware(ioc); + + handle = sas_device_priv_data->sas_target->handle; + pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); + if (pcie_device && (!ioc->tm_custom_handling) && + (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) + timeout = ioc->nvme_abort_timeout; + r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel, + scmd->device->id, scmd->device->lun, + MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, + st->smid, st->msix_io, timeout, 0); + /* Command must be cleared after abort */ + if (r == SUCCESS && st->cb_idx != 0xFF) + r = FAILED; + out: + sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n", + ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); + if (pcie_device) + pcie_device_put(pcie_device); + return r; +} + +/** + * scsih_dev_reset - eh threads main device reset routine + * @scmd: pointer to scsi command object + * + * Return: SUCCESS if command aborted else FAILED + */ +static int +scsih_dev_reset(struct scsi_cmnd *scmd) +{ + struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct _sas_device *sas_device = NULL; + struct _pcie_device *pcie_device = NULL; + u16 handle; + u8 tr_method = 0; + u8 tr_timeout = 30; + int r; + + struct scsi_target *starget = scmd->device->sdev_target; + struct MPT3SAS_TARGET *target_priv_data = starget->hostdata; + + sdev_printk(KERN_INFO, scmd->device, + "attempting device reset! scmd(0x%p)\n", scmd); + _scsih_tm_display_info(ioc, scmd); + + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target || + ioc->remove_host) { + sdev_printk(KERN_INFO, scmd->device, + "device been deleted! scmd(0x%p)\n", scmd); + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + r = SUCCESS; + goto out; + } + + /* for hidden raid components obtain the volume_handle */ + handle = 0; + if (sas_device_priv_data->sas_target->flags & + MPT_TARGET_FLAGS_RAID_COMPONENT) { + sas_device = mpt3sas_get_sdev_from_target(ioc, + target_priv_data); + if (sas_device) + handle = sas_device->volume_handle; + } else + handle = sas_device_priv_data->sas_target->handle; + + if (!handle) { + scmd->result = DID_RESET << 16; + r = FAILED; + goto out; + } + + pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); + + if (pcie_device && (!ioc->tm_custom_handling) && + (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) { + tr_timeout = pcie_device->reset_timeout; + tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; + } else + tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; + + r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel, + scmd->device->id, scmd->device->lun, + MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0, + tr_timeout, tr_method); + /* Check for busy commands after reset */ + if (r == SUCCESS && scsi_device_busy(scmd->device)) + r = FAILED; + out: + sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n", + ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); + + if (sas_device) + sas_device_put(sas_device); + if (pcie_device) + pcie_device_put(pcie_device); + + return r; +} + +/** + * scsih_target_reset - eh threads main target reset routine + * @scmd: pointer to scsi command object + * + * Return: SUCCESS if command aborted else FAILED + */ +static int +scsih_target_reset(struct scsi_cmnd *scmd) +{ + struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct _sas_device *sas_device = NULL; + struct _pcie_device *pcie_device = NULL; + u16 handle; + u8 tr_method = 0; + u8 tr_timeout = 30; + int r; + struct scsi_target *starget = scmd->device->sdev_target; + struct MPT3SAS_TARGET *target_priv_data = starget->hostdata; + + starget_printk(KERN_INFO, starget, + "attempting target reset! scmd(0x%p)\n", scmd); + _scsih_tm_display_info(ioc, scmd); + + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target || + ioc->remove_host) { + starget_printk(KERN_INFO, starget, + "target been deleted! scmd(0x%p)\n", scmd); + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + r = SUCCESS; + goto out; + } + + /* for hidden raid components obtain the volume_handle */ + handle = 0; + if (sas_device_priv_data->sas_target->flags & + MPT_TARGET_FLAGS_RAID_COMPONENT) { + sas_device = mpt3sas_get_sdev_from_target(ioc, + target_priv_data); + if (sas_device) + handle = sas_device->volume_handle; + } else + handle = sas_device_priv_data->sas_target->handle; + + if (!handle) { + scmd->result = DID_RESET << 16; + r = FAILED; + goto out; + } + + pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); + + if (pcie_device && (!ioc->tm_custom_handling) && + (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) { + tr_timeout = pcie_device->reset_timeout; + tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; + } else + tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; + r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel, + scmd->device->id, 0, + MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0, + tr_timeout, tr_method); + /* Check for busy commands after reset */ + if (r == SUCCESS && atomic_read(&starget->target_busy)) + r = FAILED; + out: + starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n", + ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); + + if (sas_device) + sas_device_put(sas_device); + if (pcie_device) + pcie_device_put(pcie_device); + return r; +} + + +/** + * scsih_host_reset - eh threads main host reset routine + * @scmd: pointer to scsi command object + * + * Return: SUCCESS if command aborted else FAILED + */ +static int +scsih_host_reset(struct scsi_cmnd *scmd) +{ + struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); + int r, retval; + + ioc_info(ioc, "attempting host reset! scmd(0x%p)\n", scmd); + scsi_print_command(scmd); + + if (ioc->is_driver_loading || ioc->remove_host) { + ioc_info(ioc, "Blocking the host reset\n"); + r = FAILED; + goto out; + } + + retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + r = (retval < 0) ? FAILED : SUCCESS; +out: + ioc_info(ioc, "host reset: %s scmd(0x%p)\n", + r == SUCCESS ? "SUCCESS" : "FAILED", scmd); + + return r; +} + +/** + * _scsih_fw_event_add - insert and queue up fw_event + * @ioc: per adapter object + * @fw_event: object describing the event + * Context: This function will acquire ioc->fw_event_lock. + * + * This adds the firmware event object into link list, then queues it up to + * be processed from user context. + */ +static void +_scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) +{ + unsigned long flags; + + if (ioc->firmware_event_thread == NULL) + return; + + spin_lock_irqsave(&ioc->fw_event_lock, flags); + fw_event_work_get(fw_event); + INIT_LIST_HEAD(&fw_event->list); + list_add_tail(&fw_event->list, &ioc->fw_event_list); + INIT_WORK(&fw_event->work, _firmware_event_work); + fw_event_work_get(fw_event); + queue_work(ioc->firmware_event_thread, &fw_event->work); + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); +} + +/** + * _scsih_fw_event_del_from_list - delete fw_event from the list + * @ioc: per adapter object + * @fw_event: object describing the event + * Context: This function will acquire ioc->fw_event_lock. + * + * If the fw_event is on the fw_event_list, remove it and do a put. + */ +static void +_scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work + *fw_event) +{ + unsigned long flags; + + spin_lock_irqsave(&ioc->fw_event_lock, flags); + if (!list_empty(&fw_event->list)) { + list_del_init(&fw_event->list); + fw_event_work_put(fw_event); + } + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); +} + + + /** + * mpt3sas_send_trigger_data_event - send event for processing trigger data + * @ioc: per adapter object + * @event_data: trigger event data + */ +void +mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data) +{ + struct fw_event_work *fw_event; + u16 sz; + + if (ioc->is_driver_loading) + return; + sz = sizeof(*event_data); + fw_event = alloc_fw_event_work(sz); + if (!fw_event) + return; + fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG; + fw_event->ioc = ioc; + memcpy(fw_event->event_data, event_data, sizeof(*event_data)); + _scsih_fw_event_add(ioc, fw_event); + fw_event_work_put(fw_event); +} + +/** + * _scsih_error_recovery_delete_devices - remove devices not responding + * @ioc: per adapter object + */ +static void +_scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc) +{ + struct fw_event_work *fw_event; + + fw_event = alloc_fw_event_work(0); + if (!fw_event) + return; + fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES; + fw_event->ioc = ioc; + _scsih_fw_event_add(ioc, fw_event); + fw_event_work_put(fw_event); +} + +/** + * mpt3sas_port_enable_complete - port enable completed (fake event) + * @ioc: per adapter object + */ +void +mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc) +{ + struct fw_event_work *fw_event; + + fw_event = alloc_fw_event_work(0); + if (!fw_event) + return; + fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE; + fw_event->ioc = ioc; + _scsih_fw_event_add(ioc, fw_event); + fw_event_work_put(fw_event); +} + +static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc) +{ + unsigned long flags; + struct fw_event_work *fw_event = NULL; + + spin_lock_irqsave(&ioc->fw_event_lock, flags); + if (!list_empty(&ioc->fw_event_list)) { + fw_event = list_first_entry(&ioc->fw_event_list, + struct fw_event_work, list); + list_del_init(&fw_event->list); + fw_event_work_put(fw_event); + } + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); + + return fw_event; +} + +/** + * _scsih_fw_event_cleanup_queue - cleanup event queue + * @ioc: per adapter object + * + * Walk the firmware event queue, either killing timers, or waiting + * for outstanding events to complete + * + * Context: task, can sleep + */ +static void +_scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc) +{ + struct fw_event_work *fw_event; + + if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) || + !ioc->firmware_event_thread) + return; + /* + * Set current running event as ignore, so that + * current running event will exit quickly. + * As diag reset has occurred it is of no use + * to process remaining stale event data entries. + */ + if (ioc->shost_recovery && ioc->current_event) + ioc->current_event->ignore = 1; + + ioc->fw_events_cleanup = 1; + while ((fw_event = dequeue_next_fw_event(ioc)) || + (fw_event = ioc->current_event)) { + + /* + * Don't call cancel_work_sync() for current_event + * other than MPT3SAS_REMOVE_UNRESPONDING_DEVICES; + * otherwise we may observe deadlock if current + * hard reset issued as part of processing the current_event. + * + * Orginal logic of cleaning the current_event is added + * for handling the back to back host reset issued by the user. + * i.e. during back to back host reset, driver use to process + * the two instances of MPT3SAS_REMOVE_UNRESPONDING_DEVICES + * event back to back and this made the drives to unregister + * the devices from SML. + */ + + if (fw_event == ioc->current_event && + ioc->current_event->event != + MPT3SAS_REMOVE_UNRESPONDING_DEVICES) { + ioc->current_event = NULL; + continue; + } + + /* + * Driver has to clear ioc->start_scan flag when + * it is cleaning up MPT3SAS_PORT_ENABLE_COMPLETE, + * otherwise scsi_scan_host() API waits for the + * 5 minute timer to expire. If we exit from + * scsi_scan_host() early then we can issue the + * new port enable request as part of current diag reset. + */ + if (fw_event->event == MPT3SAS_PORT_ENABLE_COMPLETE) { + ioc->port_enable_cmds.status |= MPT3_CMD_RESET; + ioc->start_scan = 0; + } + + /* + * Wait on the fw_event to complete. If this returns 1, then + * the event was never executed, and we need a put for the + * reference the work had on the fw_event. + * + * If it did execute, we wait for it to finish, and the put will + * happen from _firmware_event_work() + */ + if (cancel_work_sync(&fw_event->work)) + fw_event_work_put(fw_event); + + } + ioc->fw_events_cleanup = 0; +} + +/** + * _scsih_internal_device_block - block the sdev device + * @sdev: per device object + * @sas_device_priv_data : per device driver private data + * + * make sure device is blocked without error, if not + * print an error + */ +static void +_scsih_internal_device_block(struct scsi_device *sdev, + struct MPT3SAS_DEVICE *sas_device_priv_data) +{ + int r = 0; + + sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n", + sas_device_priv_data->sas_target->handle); + sas_device_priv_data->block = 1; + + r = scsi_internal_device_block_nowait(sdev); + if (r == -EINVAL) + sdev_printk(KERN_WARNING, sdev, + "device_block failed with return(%d) for handle(0x%04x)\n", + r, sas_device_priv_data->sas_target->handle); +} + +/** + * _scsih_internal_device_unblock - unblock the sdev device + * @sdev: per device object + * @sas_device_priv_data : per device driver private data + * make sure device is unblocked without error, if not retry + * by blocking and then unblocking + */ + +static void +_scsih_internal_device_unblock(struct scsi_device *sdev, + struct MPT3SAS_DEVICE *sas_device_priv_data) +{ + int r = 0; + + sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, " + "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle); + sas_device_priv_data->block = 0; + r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING); + if (r == -EINVAL) { + /* The device has been set to SDEV_RUNNING by SD layer during + * device addition but the request queue is still stopped by + * our earlier block call. We need to perform a block again + * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */ + + sdev_printk(KERN_WARNING, sdev, + "device_unblock failed with return(%d) for handle(0x%04x) " + "performing a block followed by an unblock\n", + r, sas_device_priv_data->sas_target->handle); + sas_device_priv_data->block = 1; + r = scsi_internal_device_block_nowait(sdev); + if (r) + sdev_printk(KERN_WARNING, sdev, "retried device_block " + "failed with return(%d) for handle(0x%04x)\n", + r, sas_device_priv_data->sas_target->handle); + + sas_device_priv_data->block = 0; + r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING); + if (r) + sdev_printk(KERN_WARNING, sdev, "retried device_unblock" + " failed with return(%d) for handle(0x%04x)\n", + r, sas_device_priv_data->sas_target->handle); + } +} + +/** + * _scsih_ublock_io_all_device - unblock every device + * @ioc: per adapter object + * + * change the device state from block to running + */ +static void +_scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc) +{ + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + if (!sas_device_priv_data->block) + continue; + + dewtprintk(ioc, sdev_printk(KERN_INFO, sdev, + "device_running, handle(0x%04x)\n", + sas_device_priv_data->sas_target->handle)); + _scsih_internal_device_unblock(sdev, sas_device_priv_data); + } +} + + +/** + * _scsih_ublock_io_device - prepare device to be deleted + * @ioc: per adapter object + * @sas_address: sas address + * @port: hba port entry + * + * unblock then put device in offline state + */ +static void +_scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address, struct hba_port *port) +{ + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) + continue; + if (sas_device_priv_data->sas_target->sas_address + != sas_address) + continue; + if (sas_device_priv_data->sas_target->port != port) + continue; + if (sas_device_priv_data->block) + _scsih_internal_device_unblock(sdev, + sas_device_priv_data); + } +} + +/** + * _scsih_block_io_all_device - set the device state to SDEV_BLOCK + * @ioc: per adapter object + * + * During device pull we need to appropriately set the sdev state. + */ +static void +_scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc) +{ + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + if (sas_device_priv_data->block) + continue; + if (sas_device_priv_data->ignore_delay_remove) { + sdev_printk(KERN_INFO, sdev, + "%s skip device_block for SES handle(0x%04x)\n", + __func__, sas_device_priv_data->sas_target->handle); + continue; + } + _scsih_internal_device_block(sdev, sas_device_priv_data); + } +} + +/** + * _scsih_block_io_device - set the device state to SDEV_BLOCK + * @ioc: per adapter object + * @handle: device handle + * + * During device pull we need to appropriately set the sdev state. + */ +static void +_scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + struct _sas_device *sas_device; + + sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); + + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + if (sas_device_priv_data->sas_target->handle != handle) + continue; + if (sas_device_priv_data->block) + continue; + if (sas_device && sas_device->pend_sas_rphy_add) + continue; + if (sas_device_priv_data->ignore_delay_remove) { + sdev_printk(KERN_INFO, sdev, + "%s skip device_block for SES handle(0x%04x)\n", + __func__, sas_device_priv_data->sas_target->handle); + continue; + } + _scsih_internal_device_block(sdev, sas_device_priv_data); + } + + if (sas_device) + sas_device_put(sas_device); +} + +/** + * _scsih_block_io_to_children_attached_to_ex + * @ioc: per adapter object + * @sas_expander: the sas_device object + * + * This routine set sdev state to SDEV_BLOCK for all devices + * attached to this expander. This function called when expander is + * pulled. + */ +static void +_scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc, + struct _sas_node *sas_expander) +{ + struct _sas_port *mpt3sas_port; + struct _sas_device *sas_device; + struct _sas_node *expander_sibling; + unsigned long flags; + + if (!sas_expander) + return; + + list_for_each_entry(mpt3sas_port, + &sas_expander->sas_port_list, port_list) { + if (mpt3sas_port->remote_identify.device_type == + SAS_END_DEVICE) { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_addr(ioc, + mpt3sas_port->remote_identify.sas_address, + mpt3sas_port->hba_port); + if (sas_device) { + set_bit(sas_device->handle, + ioc->blocking_handles); + sas_device_put(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } + } + + list_for_each_entry(mpt3sas_port, + &sas_expander->sas_port_list, port_list) { + + if (mpt3sas_port->remote_identify.device_type == + SAS_EDGE_EXPANDER_DEVICE || + mpt3sas_port->remote_identify.device_type == + SAS_FANOUT_EXPANDER_DEVICE) { + expander_sibling = + mpt3sas_scsih_expander_find_by_sas_address( + ioc, mpt3sas_port->remote_identify.sas_address, + mpt3sas_port->hba_port); + _scsih_block_io_to_children_attached_to_ex(ioc, + expander_sibling); + } + } +} + +/** + * _scsih_block_io_to_children_attached_directly + * @ioc: per adapter object + * @event_data: topology change event data + * + * This routine set sdev state to SDEV_BLOCK for all devices + * direct attached during device pull. + */ +static void +_scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataSasTopologyChangeList_t *event_data) +{ + int i; + u16 handle; + u16 reason_code; + + for (i = 0; i < event_data->NumEntries; i++) { + handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); + if (!handle) + continue; + reason_code = event_data->PHY[i].PhyStatus & + MPI2_EVENT_SAS_TOPO_RC_MASK; + if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING) + _scsih_block_io_device(ioc, handle); + } +} + +/** + * _scsih_block_io_to_pcie_children_attached_directly + * @ioc: per adapter object + * @event_data: topology change event data + * + * This routine set sdev state to SDEV_BLOCK for all devices + * direct attached during device pull/reconnect. + */ +static void +_scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc, + Mpi26EventDataPCIeTopologyChangeList_t *event_data) +{ + int i; + u16 handle; + u16 reason_code; + + for (i = 0; i < event_data->NumEntries; i++) { + handle = + le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); + if (!handle) + continue; + reason_code = event_data->PortEntry[i].PortStatus; + if (reason_code == + MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING) + _scsih_block_io_device(ioc, handle); + } +} +/** + * _scsih_tm_tr_send - send task management request + * @ioc: per adapter object + * @handle: device handle + * Context: interrupt time. + * + * This code is to initiate the device removal handshake protocol + * with controller firmware. This function will issue target reset + * using high priority request queue. It will send a sas iounit + * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion. + * + * This is designed to send muliple task management request at the same + * time to the fifo. If the fifo is full, we will append the request, + * and process it in a future completion. + */ +static void +_scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + Mpi2SCSITaskManagementRequest_t *mpi_request; + u16 smid; + struct _sas_device *sas_device = NULL; + struct _pcie_device *pcie_device = NULL; + struct MPT3SAS_TARGET *sas_target_priv_data = NULL; + u64 sas_address = 0; + unsigned long flags; + struct _tr_list *delayed_tr; + u32 ioc_state; + u8 tr_method = 0; + struct hba_port *port = NULL; + + if (ioc->pci_error_recovery) { + dewtprintk(ioc, + ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n", + __func__, handle)); + return; + } + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { + dewtprintk(ioc, + ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n", + __func__, handle)); + return; + } + + /* if PD, then return */ + if (test_bit(handle, ioc->pd_handles)) + return; + + clear_bit(handle, ioc->pend_os_device_add); + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); + if (sas_device && sas_device->starget && + sas_device->starget->hostdata) { + sas_target_priv_data = sas_device->starget->hostdata; + sas_target_priv_data->deleted = 1; + sas_address = sas_device->sas_address; + port = sas_device->port; + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (!sas_device) { + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle); + if (pcie_device && pcie_device->starget && + pcie_device->starget->hostdata) { + sas_target_priv_data = pcie_device->starget->hostdata; + sas_target_priv_data->deleted = 1; + sas_address = pcie_device->wwid; + } + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + if (pcie_device && (!ioc->tm_custom_handling) && + (!(mpt3sas_scsih_is_pcie_scsi_device( + pcie_device->device_info)))) + tr_method = + MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; + else + tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; + } + if (sas_target_priv_data) { + dewtprintk(ioc, + ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n", + handle, (u64)sas_address)); + if (sas_device) { + if (sas_device->enclosure_handle != 0) + dewtprintk(ioc, + ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n", + (u64)sas_device->enclosure_logical_id, + sas_device->slot)); + if (sas_device->connector_name[0] != '\0') + dewtprintk(ioc, + ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n", + sas_device->enclosure_level, + sas_device->connector_name)); + } else if (pcie_device) { + if (pcie_device->enclosure_handle != 0) + dewtprintk(ioc, + ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n", + (u64)pcie_device->enclosure_logical_id, + pcie_device->slot)); + if (pcie_device->connector_name[0] != '\0') + dewtprintk(ioc, + ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n", + pcie_device->enclosure_level, + pcie_device->connector_name)); + } + _scsih_ublock_io_device(ioc, sas_address, port); + sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; + } + + smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx); + if (!smid) { + delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); + if (!delayed_tr) + goto out; + INIT_LIST_HEAD(&delayed_tr->list); + delayed_tr->handle = handle; + list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list); + dewtprintk(ioc, + ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n", + handle)); + goto out; + } + + dewtprintk(ioc, + ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", + handle, smid, ioc->tm_tr_cb_idx)); + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; + mpi_request->MsgFlags = tr_method; + set_bit(handle, ioc->device_remove_in_progress); + ioc->put_smid_hi_priority(ioc, smid, 0); + mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL); + +out: + if (sas_device) + sas_device_put(sas_device); + if (pcie_device) + pcie_device_put(pcie_device); +} + +/** + * _scsih_tm_tr_complete - + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * Context: interrupt time. + * + * This is the target reset completion routine. + * This code is part of the code to initiate the device removal + * handshake protocol with controller firmware. + * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE) + * + * Return: 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +static u8 +_scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + u16 handle; + Mpi2SCSITaskManagementRequest_t *mpi_request_tm; + Mpi2SCSITaskManagementReply_t *mpi_reply = + mpt3sas_base_get_reply_virt_addr(ioc, reply); + Mpi2SasIoUnitControlRequest_t *mpi_request; + u16 smid_sas_ctrl; + u32 ioc_state; + struct _sc_list *delayed_sc; + + if (ioc->pci_error_recovery) { + dewtprintk(ioc, + ioc_info(ioc, "%s: host in pci error recovery\n", + __func__)); + return 1; + } + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { + dewtprintk(ioc, + ioc_info(ioc, "%s: host is not operational\n", + __func__)); + return 1; + } + if (unlikely(!mpi_reply)) { + ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return 1; + } + mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid); + handle = le16_to_cpu(mpi_request_tm->DevHandle); + if (handle != le16_to_cpu(mpi_reply->DevHandle)) { + dewtprintk(ioc, + ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n", + handle, + le16_to_cpu(mpi_reply->DevHandle), smid)); + return 0; + } + + mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); + dewtprintk(ioc, + ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n", + handle, smid, le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo), + le32_to_cpu(mpi_reply->TerminationCount))); + + smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx); + if (!smid_sas_ctrl) { + delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC); + if (!delayed_sc) + return _scsih_check_for_pending_tm(ioc, smid); + INIT_LIST_HEAD(&delayed_sc->list); + delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle); + list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list); + dewtprintk(ioc, + ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n", + handle)); + return _scsih_check_for_pending_tm(ioc, smid); + } + + dewtprintk(ioc, + ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", + handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx)); + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl); + memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; + mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE; + mpi_request->DevHandle = mpi_request_tm->DevHandle; + ioc->put_smid_default(ioc, smid_sas_ctrl); + + return _scsih_check_for_pending_tm(ioc, smid); +} + +/** _scsih_allow_scmd_to_device - check whether scmd needs to + * issue to IOC or not. + * @ioc: per adapter object + * @scmd: pointer to scsi command object + * + * Returns true if scmd can be issued to IOC otherwise returns false. + */ +inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc, + struct scsi_cmnd *scmd) +{ + + if (ioc->pci_error_recovery) + return false; + + if (ioc->hba_mpi_version_belonged == MPI2_VERSION) { + if (ioc->remove_host) + return false; + + return true; + } + + if (ioc->remove_host) { + + switch (scmd->cmnd[0]) { + case SYNCHRONIZE_CACHE: + case START_STOP: + return true; + default: + return false; + } + } + + return true; +} + +/** + * _scsih_sas_control_complete - completion routine + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * Context: interrupt time. + * + * This is the sas iounit control completion routine. + * This code is part of the code to initiate the device removal + * handshake protocol with controller firmware. + * + * Return: 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +static u8 +_scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply) +{ + Mpi2SasIoUnitControlReply_t *mpi_reply = + mpt3sas_base_get_reply_virt_addr(ioc, reply); + + if (likely(mpi_reply)) { + dewtprintk(ioc, + ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n", + le16_to_cpu(mpi_reply->DevHandle), smid, + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo))); + if (le16_to_cpu(mpi_reply->IOCStatus) == + MPI2_IOCSTATUS_SUCCESS) { + clear_bit(le16_to_cpu(mpi_reply->DevHandle), + ioc->device_remove_in_progress); + } + } else { + ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + } + return mpt3sas_check_for_pending_internal_cmds(ioc, smid); +} + +/** + * _scsih_tm_tr_volume_send - send target reset request for volumes + * @ioc: per adapter object + * @handle: device handle + * Context: interrupt time. + * + * This is designed to send muliple task management request at the same + * time to the fifo. If the fifo is full, we will append the request, + * and process it in a future completion. + */ +static void +_scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + Mpi2SCSITaskManagementRequest_t *mpi_request; + u16 smid; + struct _tr_list *delayed_tr; + + if (ioc->pci_error_recovery) { + dewtprintk(ioc, + ioc_info(ioc, "%s: host reset in progress!\n", + __func__)); + return; + } + + smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx); + if (!smid) { + delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); + if (!delayed_tr) + return; + INIT_LIST_HEAD(&delayed_tr->list); + delayed_tr->handle = handle; + list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list); + dewtprintk(ioc, + ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n", + handle)); + return; + } + + dewtprintk(ioc, + ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", + handle, smid, ioc->tm_tr_volume_cb_idx)); + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; + ioc->put_smid_hi_priority(ioc, smid, 0); +} + +/** + * _scsih_tm_volume_tr_complete - target reset completion + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * Context: interrupt time. + * + * Return: 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +static u8 +_scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply) +{ + u16 handle; + Mpi2SCSITaskManagementRequest_t *mpi_request_tm; + Mpi2SCSITaskManagementReply_t *mpi_reply = + mpt3sas_base_get_reply_virt_addr(ioc, reply); + + if (ioc->shost_recovery || ioc->pci_error_recovery) { + dewtprintk(ioc, + ioc_info(ioc, "%s: host reset in progress!\n", + __func__)); + return 1; + } + if (unlikely(!mpi_reply)) { + ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return 1; + } + + mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid); + handle = le16_to_cpu(mpi_request_tm->DevHandle); + if (handle != le16_to_cpu(mpi_reply->DevHandle)) { + dewtprintk(ioc, + ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n", + handle, le16_to_cpu(mpi_reply->DevHandle), + smid)); + return 0; + } + + dewtprintk(ioc, + ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n", + handle, smid, le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo), + le32_to_cpu(mpi_reply->TerminationCount))); + + return _scsih_check_for_pending_tm(ioc, smid); +} + +/** + * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages + * @ioc: per adapter object + * @smid: system request message index + * @event: Event ID + * @event_context: used to track events uniquely + * + * Context - processed in interrupt context. + */ +static void +_scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event, + U32 event_context) +{ + Mpi2EventAckRequest_t *ack_request; + int i = smid - ioc->internal_smid; + unsigned long flags; + + /* Without releasing the smid just update the + * call back index and reuse the same smid for + * processing this delayed request + */ + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx; + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + + dewtprintk(ioc, + ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n", + le16_to_cpu(event), smid, ioc->base_cb_idx)); + ack_request = mpt3sas_base_get_msg_frame(ioc, smid); + memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t)); + ack_request->Function = MPI2_FUNCTION_EVENT_ACK; + ack_request->Event = event; + ack_request->EventContext = event_context; + ack_request->VF_ID = 0; /* TODO */ + ack_request->VP_ID = 0; + ioc->put_smid_default(ioc, smid); +} + +/** + * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed + * sas_io_unit_ctrl messages + * @ioc: per adapter object + * @smid: system request message index + * @handle: device handle + * + * Context - processed in interrupt context. + */ +static void +_scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc, + u16 smid, u16 handle) +{ + Mpi2SasIoUnitControlRequest_t *mpi_request; + u32 ioc_state; + int i = smid - ioc->internal_smid; + unsigned long flags; + + if (ioc->remove_host) { + dewtprintk(ioc, + ioc_info(ioc, "%s: host has been removed\n", + __func__)); + return; + } else if (ioc->pci_error_recovery) { + dewtprintk(ioc, + ioc_info(ioc, "%s: host in pci error recovery\n", + __func__)); + return; + } + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { + dewtprintk(ioc, + ioc_info(ioc, "%s: host is not operational\n", + __func__)); + return; + } + + /* Without releasing the smid just update the + * call back index and reuse the same smid for + * processing this delayed request + */ + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx; + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + + dewtprintk(ioc, + ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", + handle, smid, ioc->tm_sas_control_cb_idx)); + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; + mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE; + mpi_request->DevHandle = cpu_to_le16(handle); + ioc->put_smid_default(ioc, smid); +} + +/** + * mpt3sas_check_for_pending_internal_cmds - check for pending internal messages + * @ioc: per adapter object + * @smid: system request message index + * + * Context: Executed in interrupt context + * + * This will check delayed internal messages list, and process the + * next request. + * + * Return: 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +u8 +mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + struct _sc_list *delayed_sc; + struct _event_ack_list *delayed_event_ack; + + if (!list_empty(&ioc->delayed_event_ack_list)) { + delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next, + struct _event_ack_list, list); + _scsih_issue_delayed_event_ack(ioc, smid, + delayed_event_ack->Event, delayed_event_ack->EventContext); + list_del(&delayed_event_ack->list); + kfree(delayed_event_ack); + return 0; + } + + if (!list_empty(&ioc->delayed_sc_list)) { + delayed_sc = list_entry(ioc->delayed_sc_list.next, + struct _sc_list, list); + _scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid, + delayed_sc->handle); + list_del(&delayed_sc->list); + kfree(delayed_sc); + return 0; + } + return 1; +} + +/** + * _scsih_check_for_pending_tm - check for pending task management + * @ioc: per adapter object + * @smid: system request message index + * + * This will check delayed target reset list, and feed the + * next reqeust. + * + * Return: 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +static u8 +_scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + struct _tr_list *delayed_tr; + + if (!list_empty(&ioc->delayed_tr_volume_list)) { + delayed_tr = list_entry(ioc->delayed_tr_volume_list.next, + struct _tr_list, list); + mpt3sas_base_free_smid(ioc, smid); + _scsih_tm_tr_volume_send(ioc, delayed_tr->handle); + list_del(&delayed_tr->list); + kfree(delayed_tr); + return 0; + } + + if (!list_empty(&ioc->delayed_tr_list)) { + delayed_tr = list_entry(ioc->delayed_tr_list.next, + struct _tr_list, list); + mpt3sas_base_free_smid(ioc, smid); + _scsih_tm_tr_send(ioc, delayed_tr->handle); + list_del(&delayed_tr->list); + kfree(delayed_tr); + return 0; + } + + return 1; +} + +/** + * _scsih_check_topo_delete_events - sanity check on topo events + * @ioc: per adapter object + * @event_data: the event data payload + * + * This routine added to better handle cable breaker. + * + * This handles the case where driver receives multiple expander + * add and delete events in a single shot. When there is a delete event + * the routine will void any pending add events waiting in the event queue. + */ +static void +_scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataSasTopologyChangeList_t *event_data) +{ + struct fw_event_work *fw_event; + Mpi2EventDataSasTopologyChangeList_t *local_event_data; + u16 expander_handle; + struct _sas_node *sas_expander; + unsigned long flags; + int i, reason_code; + u16 handle; + + for (i = 0 ; i < event_data->NumEntries; i++) { + handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); + if (!handle) + continue; + reason_code = event_data->PHY[i].PhyStatus & + MPI2_EVENT_SAS_TOPO_RC_MASK; + if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING) + _scsih_tm_tr_send(ioc, handle); + } + + expander_handle = le16_to_cpu(event_data->ExpanderDevHandle); + if (expander_handle < ioc->sas_hba.num_phys) { + _scsih_block_io_to_children_attached_directly(ioc, event_data); + return; + } + if (event_data->ExpStatus == + MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) { + /* put expander attached devices into blocking state */ + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc, + expander_handle); + _scsih_block_io_to_children_attached_to_ex(ioc, sas_expander); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + do { + handle = find_first_bit(ioc->blocking_handles, + ioc->facts.MaxDevHandle); + if (handle < ioc->facts.MaxDevHandle) + _scsih_block_io_device(ioc, handle); + } while (test_and_clear_bit(handle, ioc->blocking_handles)); + } else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING) + _scsih_block_io_to_children_attached_directly(ioc, event_data); + + if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING) + return; + + /* mark ignore flag for pending events */ + spin_lock_irqsave(&ioc->fw_event_lock, flags); + list_for_each_entry(fw_event, &ioc->fw_event_list, list) { + if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST || + fw_event->ignore) + continue; + local_event_data = (Mpi2EventDataSasTopologyChangeList_t *) + fw_event->event_data; + if (local_event_data->ExpStatus == + MPI2_EVENT_SAS_TOPO_ES_ADDED || + local_event_data->ExpStatus == + MPI2_EVENT_SAS_TOPO_ES_RESPONDING) { + if (le16_to_cpu(local_event_data->ExpanderDevHandle) == + expander_handle) { + dewtprintk(ioc, + ioc_info(ioc, "setting ignoring flag\n")); + fw_event->ignore = 1; + } + } + } + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); +} + +/** + * _scsih_check_pcie_topo_remove_events - sanity check on topo + * events + * @ioc: per adapter object + * @event_data: the event data payload + * + * This handles the case where driver receives multiple switch + * or device add and delete events in a single shot. When there + * is a delete event the routine will void any pending add + * events waiting in the event queue. + */ +static void +_scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc, + Mpi26EventDataPCIeTopologyChangeList_t *event_data) +{ + struct fw_event_work *fw_event; + Mpi26EventDataPCIeTopologyChangeList_t *local_event_data; + unsigned long flags; + int i, reason_code; + u16 handle, switch_handle; + + for (i = 0; i < event_data->NumEntries; i++) { + handle = + le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); + if (!handle) + continue; + reason_code = event_data->PortEntry[i].PortStatus; + if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING) + _scsih_tm_tr_send(ioc, handle); + } + + switch_handle = le16_to_cpu(event_data->SwitchDevHandle); + if (!switch_handle) { + _scsih_block_io_to_pcie_children_attached_directly( + ioc, event_data); + return; + } + /* TODO We are not supporting cascaded PCIe Switch removal yet*/ + if ((event_data->SwitchStatus + == MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) || + (event_data->SwitchStatus == + MPI26_EVENT_PCIE_TOPO_SS_RESPONDING)) + _scsih_block_io_to_pcie_children_attached_directly( + ioc, event_data); + + if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING) + return; + + /* mark ignore flag for pending events */ + spin_lock_irqsave(&ioc->fw_event_lock, flags); + list_for_each_entry(fw_event, &ioc->fw_event_list, list) { + if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST || + fw_event->ignore) + continue; + local_event_data = + (Mpi26EventDataPCIeTopologyChangeList_t *) + fw_event->event_data; + if (local_event_data->SwitchStatus == + MPI2_EVENT_SAS_TOPO_ES_ADDED || + local_event_data->SwitchStatus == + MPI2_EVENT_SAS_TOPO_ES_RESPONDING) { + if (le16_to_cpu(local_event_data->SwitchDevHandle) == + switch_handle) { + dewtprintk(ioc, + ioc_info(ioc, "setting ignoring flag for switch event\n")); + fw_event->ignore = 1; + } + } + } + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); +} + +/** + * _scsih_set_volume_delete_flag - setting volume delete flag + * @ioc: per adapter object + * @handle: device handle + * + * This returns nothing. + */ +static void +_scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _raid_device *raid_device; + struct MPT3SAS_TARGET *sas_target_priv_data; + unsigned long flags; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); + if (raid_device && raid_device->starget && + raid_device->starget->hostdata) { + sas_target_priv_data = + raid_device->starget->hostdata; + sas_target_priv_data->deleted = 1; + dewtprintk(ioc, + ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n", + handle, (u64)raid_device->wwid)); + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); +} + +/** + * _scsih_set_volume_handle_for_tr - set handle for target reset to volume + * @handle: input handle + * @a: handle for volume a + * @b: handle for volume b + * + * IR firmware only supports two raid volumes. The purpose of this + * routine is to set the volume handle in either a or b. When the given + * input handle is non-zero, or when a and b have not been set before. + */ +static void +_scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b) +{ + if (!handle || handle == *a || handle == *b) + return; + if (!*a) + *a = handle; + else if (!*b) + *b = handle; +} + +/** + * _scsih_check_ir_config_unhide_events - check for UNHIDE events + * @ioc: per adapter object + * @event_data: the event data payload + * Context: interrupt time. + * + * This routine will send target reset to volume, followed by target + * resets to the PDs. This is called when a PD has been removed, or + * volume has been deleted or removed. When the target reset is sent + * to volume, the PD target resets need to be queued to start upon + * completion of the volume target reset. + */ +static void +_scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataIrConfigChangeList_t *event_data) +{ + Mpi2EventIrConfigElement_t *element; + int i; + u16 handle, volume_handle, a, b; + struct _tr_list *delayed_tr; + + a = 0; + b = 0; + + if (ioc->is_warpdrive) + return; + + /* Volume Resets for Deleted or Removed */ + element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; + for (i = 0; i < event_data->NumElements; i++, element++) { + if (le32_to_cpu(event_data->Flags) & + MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) + continue; + if (element->ReasonCode == + MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED || + element->ReasonCode == + MPI2_EVENT_IR_CHANGE_RC_REMOVED) { + volume_handle = le16_to_cpu(element->VolDevHandle); + _scsih_set_volume_delete_flag(ioc, volume_handle); + _scsih_set_volume_handle_for_tr(volume_handle, &a, &b); + } + } + + /* Volume Resets for UNHIDE events */ + element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; + for (i = 0; i < event_data->NumElements; i++, element++) { + if (le32_to_cpu(event_data->Flags) & + MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) + continue; + if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) { + volume_handle = le16_to_cpu(element->VolDevHandle); + _scsih_set_volume_handle_for_tr(volume_handle, &a, &b); + } + } + + if (a) + _scsih_tm_tr_volume_send(ioc, a); + if (b) + _scsih_tm_tr_volume_send(ioc, b); + + /* PD target resets */ + element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; + for (i = 0; i < event_data->NumElements; i++, element++) { + if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE) + continue; + handle = le16_to_cpu(element->PhysDiskDevHandle); + volume_handle = le16_to_cpu(element->VolDevHandle); + clear_bit(handle, ioc->pd_handles); + if (!volume_handle) + _scsih_tm_tr_send(ioc, handle); + else if (volume_handle == a || volume_handle == b) { + delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); + BUG_ON(!delayed_tr); + INIT_LIST_HEAD(&delayed_tr->list); + delayed_tr->handle = handle; + list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list); + dewtprintk(ioc, + ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n", + handle)); + } else + _scsih_tm_tr_send(ioc, handle); + } +} + + +/** + * _scsih_check_volume_delete_events - set delete flag for volumes + * @ioc: per adapter object + * @event_data: the event data payload + * Context: interrupt time. + * + * This will handle the case when the cable connected to entire volume is + * pulled. We will take care of setting the deleted flag so normal IO will + * not be sent. + */ +static void +_scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataIrVolume_t *event_data) +{ + u32 state; + + if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED) + return; + state = le32_to_cpu(event_data->NewValue); + if (state == MPI2_RAID_VOL_STATE_MISSING || state == + MPI2_RAID_VOL_STATE_FAILED) + _scsih_set_volume_delete_flag(ioc, + le16_to_cpu(event_data->VolDevHandle)); +} + +/** + * _scsih_temp_threshold_events - display temperature threshold exceeded events + * @ioc: per adapter object + * @event_data: the temp threshold event data + * Context: interrupt time. + */ +static void +_scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataTemperature_t *event_data) +{ + u32 doorbell; + if (ioc->temp_sensors_count >= event_data->SensorNum) { + ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n", + le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ", + le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ", + le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ", + le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ", + event_data->SensorNum); + ioc_err(ioc, "Current Temp In Celsius: %d\n", + event_data->CurrentTemperature); + if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { + doorbell = mpt3sas_base_get_iocstate(ioc, 0); + if ((doorbell & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_FAULT) { + mpt3sas_print_fault_code(ioc, + doorbell & MPI2_DOORBELL_DATA_MASK); + } else if ((doorbell & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_COREDUMP) { + mpt3sas_print_coredump_info(ioc, + doorbell & MPI2_DOORBELL_DATA_MASK); + } + } + } +} + +static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending) +{ + struct MPT3SAS_DEVICE *priv = scmd->device->hostdata; + + if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16) + return 0; + + if (pending) + return test_and_set_bit(0, &priv->ata_command_pending); + + clear_bit(0, &priv->ata_command_pending); + return 0; +} + +/** + * _scsih_flush_running_cmds - completing outstanding commands. + * @ioc: per adapter object + * + * The flushing out of all pending scmd commands following host reset, + * where all IO is dropped to the floor. + */ +static void +_scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc) +{ + struct scsi_cmnd *scmd; + struct scsiio_tracker *st; + u16 smid; + int count = 0; + + for (smid = 1; smid <= ioc->scsiio_depth; smid++) { + scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); + if (!scmd) + continue; + count++; + _scsih_set_satl_pending(scmd, false); + st = scsi_cmd_priv(scmd); + mpt3sas_base_clear_st(ioc, st); + scsi_dma_unmap(scmd); + if (ioc->pci_error_recovery || ioc->remove_host) + scmd->result = DID_NO_CONNECT << 16; + else + scmd->result = DID_RESET << 16; + scsi_done(scmd); + } + dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count)); +} + +/** + * _scsih_setup_eedp - setup MPI request for EEDP transfer + * @ioc: per adapter object + * @scmd: pointer to scsi command object + * @mpi_request: pointer to the SCSI_IO request message frame + * + * Supporting protection 1 and 3. + */ +static void +_scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, + Mpi25SCSIIORequest_t *mpi_request) +{ + u16 eedp_flags; + Mpi25SCSIIORequest_t *mpi_request_3v = + (Mpi25SCSIIORequest_t *)mpi_request; + + switch (scsi_get_prot_op(scmd)) { + case SCSI_PROT_READ_STRIP: + eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP; + break; + case SCSI_PROT_WRITE_INSERT: + eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP; + break; + default: + return; + } + + if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK) + eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; + + if (scmd->prot_flags & SCSI_PROT_REF_CHECK) + eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG; + + if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT) { + eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG; + + mpi_request->CDB.EEDP32.PrimaryReferenceTag = + cpu_to_be32(scsi_prot_ref_tag(scmd)); + } + + mpi_request_3v->EEDPBlockSize = cpu_to_le16(scsi_prot_interval(scmd)); + + if (ioc->is_gen35_ioc) + eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE; + mpi_request->EEDPFlags = cpu_to_le16(eedp_flags); +} + +/** + * _scsih_eedp_error_handling - return sense code for EEDP errors + * @scmd: pointer to scsi command object + * @ioc_status: ioc status + */ +static void +_scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) +{ + u8 ascq; + + switch (ioc_status) { + case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: + ascq = 0x01; + break; + case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: + ascq = 0x02; + break; + case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: + ascq = 0x03; + break; + default: + ascq = 0x00; + break; + } + scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x10, ascq); + set_host_byte(scmd, DID_ABORT); +} + +/** + * scsih_qcmd - main scsi request entry point + * @shost: SCSI host pointer + * @scmd: pointer to scsi command object + * + * The callback index is set inside `ioc->scsi_io_cb_idx`. + * + * Return: 0 on success. If there's a failure, return either: + * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or + * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full + */ +static int +scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) +{ + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct MPT3SAS_TARGET *sas_target_priv_data; + struct _raid_device *raid_device; + struct request *rq = scsi_cmd_to_rq(scmd); + int class; + Mpi25SCSIIORequest_t *mpi_request; + struct _pcie_device *pcie_device = NULL; + u32 mpi_control; + u16 smid; + u16 handle; + + if (ioc->logging_level & MPT_DEBUG_SCSI) + scsi_print_command(scmd); + + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + return 0; + } + + if (!(_scsih_allow_scmd_to_device(ioc, scmd))) { + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + return 0; + } + + sas_target_priv_data = sas_device_priv_data->sas_target; + + /* invalid device handle */ + handle = sas_target_priv_data->handle; + + /* + * Avoid error handling escallation when device is disconnected + */ + if (handle == MPT3SAS_INVALID_DEVICE_HANDLE || sas_device_priv_data->block) { + if (scmd->device->host->shost_state == SHOST_RECOVERY && + scmd->cmnd[0] == TEST_UNIT_READY) { + scsi_build_sense(scmd, 0, UNIT_ATTENTION, 0x29, 0x07); + scsi_done(scmd); + return 0; + } + } + + if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) { + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + return 0; + } + + + if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) { + /* host recovery or link resets sent via IOCTLs */ + return SCSI_MLQUEUE_HOST_BUSY; + } else if (sas_target_priv_data->deleted) { + /* device has been deleted */ + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + return 0; + } else if (sas_target_priv_data->tm_busy || + sas_device_priv_data->block) { + /* device busy with task management */ + return SCSI_MLQUEUE_DEVICE_BUSY; + } + + /* + * Bug work around for firmware SATL handling. The loop + * is based on atomic operations and ensures consistency + * since we're lockless at this point + */ + do { + if (test_bit(0, &sas_device_priv_data->ata_command_pending)) + return SCSI_MLQUEUE_DEVICE_BUSY; + } while (_scsih_set_satl_pending(scmd, true)); + + if (scmd->sc_data_direction == DMA_FROM_DEVICE) + mpi_control = MPI2_SCSIIO_CONTROL_READ; + else if (scmd->sc_data_direction == DMA_TO_DEVICE) + mpi_control = MPI2_SCSIIO_CONTROL_WRITE; + else + mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER; + + /* set tags */ + mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; + /* NCQ Prio supported, make sure control indicated high priority */ + if (sas_device_priv_data->ncq_prio_enable) { + class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); + if (class == IOPRIO_CLASS_RT) + mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT; + } + /* Make sure Device is not raid volume. + * We do not expose raid functionality to upper layer for warpdrive. + */ + if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev)) + && !scsih_is_nvme(&scmd->device->sdev_gendev)) + && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32) + mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON; + + smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd); + if (!smid) { + ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + _scsih_set_satl_pending(scmd, false); + goto out; + } + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + memset(mpi_request, 0, ioc->request_sz); + _scsih_setup_eedp(ioc, scmd, mpi_request); + + if (scmd->cmd_len == 32) + mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT; + mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; + if (sas_device_priv_data->sas_target->flags & + MPT_TARGET_FLAGS_RAID_COMPONENT) + mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; + else + mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); + mpi_request->Control = cpu_to_le32(mpi_control); + mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len); + mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR; + mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; + mpi_request->SenseBufferLowAddress = + mpt3sas_base_get_sense_buffer_dma(ioc, smid); + mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4; + int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *) + mpi_request->LUN); + memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); + + if (mpi_request->DataLength) { + pcie_device = sas_target_priv_data->pcie_dev; + if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) { + mpt3sas_base_free_smid(ioc, smid); + _scsih_set_satl_pending(scmd, false); + goto out; + } + } else + ioc->build_zero_len_sge(ioc, &mpi_request->SGL); + + raid_device = sas_target_priv_data->raid_device; + if (raid_device && raid_device->direct_io_enabled) + mpt3sas_setup_direct_io(ioc, scmd, + raid_device, mpi_request); + + if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) { + if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) { + mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len | + MPI25_SCSIIO_IOFLAGS_FAST_PATH); + ioc->put_smid_fast_path(ioc, smid, handle); + } else + ioc->put_smid_scsi_io(ioc, smid, + le16_to_cpu(mpi_request->DevHandle)); + } else + ioc->put_smid_default(ioc, smid); + return 0; + + out: + return SCSI_MLQUEUE_HOST_BUSY; +} + +/** + * _scsih_normalize_sense - normalize descriptor and fixed format sense data + * @sense_buffer: sense data returned by target + * @data: normalized skey/asc/ascq + */ +static void +_scsih_normalize_sense(char *sense_buffer, struct sense_info *data) +{ + if ((sense_buffer[0] & 0x7F) >= 0x72) { + /* descriptor format */ + data->skey = sense_buffer[1] & 0x0F; + data->asc = sense_buffer[2]; + data->ascq = sense_buffer[3]; + } else { + /* fixed format */ + data->skey = sense_buffer[2] & 0x0F; + data->asc = sense_buffer[12]; + data->ascq = sense_buffer[13]; + } +} + +/** + * _scsih_scsi_ioc_info - translated non-successful SCSI_IO request + * @ioc: per adapter object + * @scmd: pointer to scsi command object + * @mpi_reply: reply mf payload returned from firmware + * @smid: ? + * + * scsi_status - SCSI Status code returned from target device + * scsi_state - state info associated with SCSI_IO determined by ioc + * ioc_status - ioc supplied status info + */ +static void +_scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, + Mpi2SCSIIOReply_t *mpi_reply, u16 smid) +{ + u32 response_info; + u8 *response_bytes; + u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & + MPI2_IOCSTATUS_MASK; + u8 scsi_state = mpi_reply->SCSIState; + u8 scsi_status = mpi_reply->SCSIStatus; + char *desc_ioc_state = NULL; + char *desc_scsi_status = NULL; + char *desc_scsi_state = ioc->tmp_string; + u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); + struct _sas_device *sas_device = NULL; + struct _pcie_device *pcie_device = NULL; + struct scsi_target *starget = scmd->device->sdev_target; + struct MPT3SAS_TARGET *priv_target = starget->hostdata; + char *device_str = NULL; + + if (!priv_target) + return; + if (ioc->hide_ir_msg) + device_str = "WarpDrive"; + else + device_str = "volume"; + + if (log_info == 0x31170000) + return; + + switch (ioc_status) { + case MPI2_IOCSTATUS_SUCCESS: + desc_ioc_state = "success"; + break; + case MPI2_IOCSTATUS_INVALID_FUNCTION: + desc_ioc_state = "invalid function"; + break; + case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: + desc_ioc_state = "scsi recovered error"; + break; + case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: + desc_ioc_state = "scsi invalid dev handle"; + break; + case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: + desc_ioc_state = "scsi device not there"; + break; + case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: + desc_ioc_state = "scsi data overrun"; + break; + case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: + desc_ioc_state = "scsi data underrun"; + break; + case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: + desc_ioc_state = "scsi io data error"; + break; + case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: + desc_ioc_state = "scsi protocol error"; + break; + case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: + desc_ioc_state = "scsi task terminated"; + break; + case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: + desc_ioc_state = "scsi residual mismatch"; + break; + case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: + desc_ioc_state = "scsi task mgmt failed"; + break; + case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: + desc_ioc_state = "scsi ioc terminated"; + break; + case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: + desc_ioc_state = "scsi ext terminated"; + break; + case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: + desc_ioc_state = "eedp guard error"; + break; + case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: + desc_ioc_state = "eedp ref tag error"; + break; + case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: + desc_ioc_state = "eedp app tag error"; + break; + case MPI2_IOCSTATUS_INSUFFICIENT_POWER: + desc_ioc_state = "insufficient power"; + break; + default: + desc_ioc_state = "unknown"; + break; + } + + switch (scsi_status) { + case MPI2_SCSI_STATUS_GOOD: + desc_scsi_status = "good"; + break; + case MPI2_SCSI_STATUS_CHECK_CONDITION: + desc_scsi_status = "check condition"; + break; + case MPI2_SCSI_STATUS_CONDITION_MET: + desc_scsi_status = "condition met"; + break; + case MPI2_SCSI_STATUS_BUSY: + desc_scsi_status = "busy"; + break; + case MPI2_SCSI_STATUS_INTERMEDIATE: + desc_scsi_status = "intermediate"; + break; + case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET: + desc_scsi_status = "intermediate condmet"; + break; + case MPI2_SCSI_STATUS_RESERVATION_CONFLICT: + desc_scsi_status = "reservation conflict"; + break; + case MPI2_SCSI_STATUS_COMMAND_TERMINATED: + desc_scsi_status = "command terminated"; + break; + case MPI2_SCSI_STATUS_TASK_SET_FULL: + desc_scsi_status = "task set full"; + break; + case MPI2_SCSI_STATUS_ACA_ACTIVE: + desc_scsi_status = "aca active"; + break; + case MPI2_SCSI_STATUS_TASK_ABORTED: + desc_scsi_status = "task aborted"; + break; + default: + desc_scsi_status = "unknown"; + break; + } + + desc_scsi_state[0] = '\0'; + if (!scsi_state) + desc_scsi_state = " "; + if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) + strcat(desc_scsi_state, "response info "); + if (scsi_state & MPI2_SCSI_STATE_TERMINATED) + strcat(desc_scsi_state, "state terminated "); + if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) + strcat(desc_scsi_state, "no status "); + if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED) + strcat(desc_scsi_state, "autosense failed "); + if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) + strcat(desc_scsi_state, "autosense valid "); + + scsi_print_command(scmd); + + if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) { + ioc_warn(ioc, "\t%s wwid(0x%016llx)\n", + device_str, (u64)priv_target->sas_address); + } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { + pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target); + if (pcie_device) { + ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n", + (u64)pcie_device->wwid, pcie_device->port_num); + if (pcie_device->enclosure_handle != 0) + ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n", + (u64)pcie_device->enclosure_logical_id, + pcie_device->slot); + if (pcie_device->connector_name[0]) + ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n", + pcie_device->enclosure_level, + pcie_device->connector_name); + pcie_device_put(pcie_device); + } + } else { + sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target); + if (sas_device) { + ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n", + (u64)sas_device->sas_address, sas_device->phy); + + _scsih_display_enclosure_chassis_info(ioc, sas_device, + NULL, NULL); + + sas_device_put(sas_device); + } + } + + ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n", + le16_to_cpu(mpi_reply->DevHandle), + desc_ioc_state, ioc_status, smid); + ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n", + scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd)); + ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n", + le16_to_cpu(mpi_reply->TaskTag), + le32_to_cpu(mpi_reply->TransferCount), scmd->result); + ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n", + desc_scsi_status, scsi_status, desc_scsi_state, scsi_state); + + if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { + struct sense_info data; + _scsih_normalize_sense(scmd->sense_buffer, &data); + ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n", + data.skey, data.asc, data.ascq, + le32_to_cpu(mpi_reply->SenseCount)); + } + if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) { + response_info = le32_to_cpu(mpi_reply->ResponseInfo); + response_bytes = (u8 *)&response_info; + _scsih_response_code(ioc, response_bytes[0]); + } +} + +/** + * _scsih_turn_on_pfa_led - illuminate PFA LED + * @ioc: per adapter object + * @handle: device handle + * Context: process + */ +static void +_scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + Mpi2SepReply_t mpi_reply; + Mpi2SepRequest_t mpi_request; + struct _sas_device *sas_device; + + sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); + if (!sas_device) + return; + + memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t)); + mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR; + mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS; + mpi_request.SlotStatus = + cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT); + mpi_request.DevHandle = cpu_to_le16(handle); + mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS; + if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply, + &mpi_request)) != 0) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + sas_device->pfa_led_on = 1; + + if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { + dewtprintk(ioc, + ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n", + le16_to_cpu(mpi_reply.IOCStatus), + le32_to_cpu(mpi_reply.IOCLogInfo))); + goto out; + } +out: + sas_device_put(sas_device); +} + +/** + * _scsih_turn_off_pfa_led - turn off Fault LED + * @ioc: per adapter object + * @sas_device: sas device whose PFA LED has to turned off + * Context: process + */ +static void +_scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc, + struct _sas_device *sas_device) +{ + Mpi2SepReply_t mpi_reply; + Mpi2SepRequest_t mpi_request; + + memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t)); + mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR; + mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS; + mpi_request.SlotStatus = 0; + mpi_request.Slot = cpu_to_le16(sas_device->slot); + mpi_request.DevHandle = 0; + mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle); + mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS; + if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply, + &mpi_request)) != 0) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + + if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { + dewtprintk(ioc, + ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n", + le16_to_cpu(mpi_reply.IOCStatus), + le32_to_cpu(mpi_reply.IOCLogInfo))); + return; + } +} + +/** + * _scsih_send_event_to_turn_on_pfa_led - fire delayed event + * @ioc: per adapter object + * @handle: device handle + * Context: interrupt. + */ +static void +_scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct fw_event_work *fw_event; + + fw_event = alloc_fw_event_work(0); + if (!fw_event) + return; + fw_event->event = MPT3SAS_TURN_ON_PFA_LED; + fw_event->device_handle = handle; + fw_event->ioc = ioc; + _scsih_fw_event_add(ioc, fw_event); + fw_event_work_put(fw_event); +} + +/** + * _scsih_smart_predicted_fault - process smart errors + * @ioc: per adapter object + * @handle: device handle + * Context: interrupt. + */ +static void +_scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct scsi_target *starget; + struct MPT3SAS_TARGET *sas_target_priv_data; + Mpi2EventNotificationReply_t *event_reply; + Mpi2EventDataSasDeviceStatusChange_t *event_data; + struct _sas_device *sas_device; + ssize_t sz; + unsigned long flags; + + /* only handle non-raid devices */ + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); + if (!sas_device) + goto out_unlock; + + starget = sas_device->starget; + sas_target_priv_data = starget->hostdata; + + if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) || + ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))) + goto out_unlock; + + _scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget); + + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) + _scsih_send_event_to_turn_on_pfa_led(ioc, handle); + + /* insert into event log */ + sz = offsetof(Mpi2EventNotificationReply_t, EventData) + + sizeof(Mpi2EventDataSasDeviceStatusChange_t); + event_reply = kzalloc(sz, GFP_ATOMIC); + if (!event_reply) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + + event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; + event_reply->Event = + cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE); + event_reply->MsgLength = sz/4; + event_reply->EventDataLength = + cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4); + event_data = (Mpi2EventDataSasDeviceStatusChange_t *) + event_reply->EventData; + event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA; + event_data->ASC = 0x5D; + event_data->DevHandle = cpu_to_le16(handle); + event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address); + mpt3sas_ctl_add_to_event_log(ioc, event_reply); + kfree(event_reply); +out: + if (sas_device) + sas_device_put(sas_device); + return; + +out_unlock: + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + goto out; +} + +/** + * _scsih_io_done - scsi request callback + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * + * Callback handler when using _scsih_qcmd. + * + * Return: 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +static u8 +_scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) +{ + Mpi25SCSIIORequest_t *mpi_request; + Mpi2SCSIIOReply_t *mpi_reply; + struct scsi_cmnd *scmd; + struct scsiio_tracker *st; + u16 ioc_status; + u32 xfer_cnt; + u8 scsi_state; + u8 scsi_status; + u32 log_info; + struct MPT3SAS_DEVICE *sas_device_priv_data; + u32 response_code = 0; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + + scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); + if (scmd == NULL) + return 1; + + _scsih_set_satl_pending(scmd, false); + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + + if (mpi_reply == NULL) { + scmd->result = DID_OK << 16; + goto out; + } + + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target || + sas_device_priv_data->sas_target->deleted) { + scmd->result = DID_NO_CONNECT << 16; + goto out; + } + ioc_status = le16_to_cpu(mpi_reply->IOCStatus); + + /* + * WARPDRIVE: If direct_io is set then it is directIO, + * the failed direct I/O should be redirected to volume + */ + st = scsi_cmd_priv(scmd); + if (st->direct_io && + ((ioc_status & MPI2_IOCSTATUS_MASK) + != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) { + st->direct_io = 0; + st->scmd = scmd; + memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); + mpi_request->DevHandle = + cpu_to_le16(sas_device_priv_data->sas_target->handle); + ioc->put_smid_scsi_io(ioc, smid, + sas_device_priv_data->sas_target->handle); + return 0; + } + /* turning off TLR */ + scsi_state = mpi_reply->SCSIState; + if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) + response_code = + le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF; + if (!sas_device_priv_data->tlr_snoop_check) { + sas_device_priv_data->tlr_snoop_check++; + if ((!ioc->is_warpdrive && + !scsih_is_raid(&scmd->device->sdev_gendev) && + !scsih_is_nvme(&scmd->device->sdev_gendev)) + && sas_is_tlr_enabled(scmd->device) && + response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) { + sas_disable_tlr(scmd->device); + sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n"); + } + } + + xfer_cnt = le32_to_cpu(mpi_reply->TransferCount); + scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt); + if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) + log_info = le32_to_cpu(mpi_reply->IOCLogInfo); + else + log_info = 0; + ioc_status &= MPI2_IOCSTATUS_MASK; + scsi_status = mpi_reply->SCSIStatus; + + if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 && + (scsi_status == MPI2_SCSI_STATUS_BUSY || + scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT || + scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) { + ioc_status = MPI2_IOCSTATUS_SUCCESS; + } + + if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { + struct sense_info data; + const void *sense_data = mpt3sas_base_get_sense_buffer(ioc, + smid); + u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, + le32_to_cpu(mpi_reply->SenseCount)); + memcpy(scmd->sense_buffer, sense_data, sz); + _scsih_normalize_sense(scmd->sense_buffer, &data); + /* failure prediction threshold exceeded */ + if (data.asc == 0x5D) + _scsih_smart_predicted_fault(ioc, + le16_to_cpu(mpi_reply->DevHandle)); + mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq); + + if ((ioc->logging_level & MPT_DEBUG_REPLY) && + ((scmd->sense_buffer[2] == UNIT_ATTENTION) || + (scmd->sense_buffer[2] == MEDIUM_ERROR) || + (scmd->sense_buffer[2] == HARDWARE_ERROR))) + _scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid); + } + switch (ioc_status) { + case MPI2_IOCSTATUS_BUSY: + case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES: + scmd->result = SAM_STAT_BUSY; + break; + + case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: + scmd->result = DID_NO_CONNECT << 16; + break; + + case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: + if (sas_device_priv_data->block) { + scmd->result = DID_TRANSPORT_DISRUPTED << 16; + goto out; + } + if (log_info == 0x31110630) { + if (scmd->retries > 2) { + scmd->result = DID_NO_CONNECT << 16; + scsi_device_set_state(scmd->device, + SDEV_OFFLINE); + } else { + scmd->result = DID_SOFT_ERROR << 16; + scmd->device->expecting_cc_ua = 1; + } + break; + } else if (log_info == VIRTUAL_IO_FAILED_RETRY) { + scmd->result = DID_RESET << 16; + break; + } else if ((scmd->device->channel == RAID_CHANNEL) && + (scsi_state == (MPI2_SCSI_STATE_TERMINATED | + MPI2_SCSI_STATE_NO_SCSI_STATUS))) { + scmd->result = DID_RESET << 16; + break; + } + scmd->result = DID_SOFT_ERROR << 16; + break; + case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: + case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: + scmd->result = DID_RESET << 16; + break; + + case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: + if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt)) + scmd->result = DID_SOFT_ERROR << 16; + else + scmd->result = (DID_OK << 16) | scsi_status; + break; + + case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: + scmd->result = (DID_OK << 16) | scsi_status; + + if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)) + break; + + if (xfer_cnt < scmd->underflow) { + if (scsi_status == SAM_STAT_BUSY) + scmd->result = SAM_STAT_BUSY; + else + scmd->result = DID_SOFT_ERROR << 16; + } else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED | + MPI2_SCSI_STATE_NO_SCSI_STATUS)) + scmd->result = DID_SOFT_ERROR << 16; + else if (scsi_state & MPI2_SCSI_STATE_TERMINATED) + scmd->result = DID_RESET << 16; + else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) { + mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID; + mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION; + scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, + 0x20, 0); + } + break; + + case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: + scsi_set_resid(scmd, 0); + fallthrough; + case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: + case MPI2_IOCSTATUS_SUCCESS: + scmd->result = (DID_OK << 16) | scsi_status; + if (response_code == + MPI2_SCSITASKMGMT_RSP_INVALID_FRAME || + (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED | + MPI2_SCSI_STATE_NO_SCSI_STATUS))) + scmd->result = DID_SOFT_ERROR << 16; + else if (scsi_state & MPI2_SCSI_STATE_TERMINATED) + scmd->result = DID_RESET << 16; + break; + + case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: + case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: + case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: + _scsih_eedp_error_handling(scmd, ioc_status); + break; + + case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: + case MPI2_IOCSTATUS_INVALID_FUNCTION: + case MPI2_IOCSTATUS_INVALID_SGL: + case MPI2_IOCSTATUS_INTERNAL_ERROR: + case MPI2_IOCSTATUS_INVALID_FIELD: + case MPI2_IOCSTATUS_INVALID_STATE: + case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: + case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: + case MPI2_IOCSTATUS_INSUFFICIENT_POWER: + default: + scmd->result = DID_SOFT_ERROR << 16; + break; + + } + + if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY)) + _scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid); + + out: + + scsi_dma_unmap(scmd); + mpt3sas_base_free_smid(ioc, smid); + scsi_done(scmd); + return 0; +} + +/** + * _scsih_update_vphys_after_reset - update the Port's + * vphys_list after reset + * @ioc: per adapter object + * + * Returns nothing. + */ +static void +_scsih_update_vphys_after_reset(struct MPT3SAS_ADAPTER *ioc) +{ + u16 sz, ioc_status; + int i; + Mpi2ConfigReply_t mpi_reply; + Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; + u16 attached_handle; + u64 attached_sas_addr; + u8 found = 0, port_id; + Mpi2SasPhyPage0_t phy_pg0; + struct hba_port *port, *port_next, *mport; + struct virtual_phy *vphy, *vphy_next; + struct _sas_device *sas_device; + + /* + * Mark all the vphys objects as dirty. + */ + list_for_each_entry_safe(port, port_next, + &ioc->port_table_list, list) { + if (!port->vphys_mask) + continue; + list_for_each_entry_safe(vphy, vphy_next, + &port->vphys_list, list) { + vphy->flags |= MPT_VPHY_FLAG_DIRTY_PHY; + } + } + + /* + * Read SASIOUnitPage0 to get each HBA Phy's data. + */ + sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + + (ioc->sas_hba.num_phys * sizeof(Mpi2SasIOUnit0PhyData_t)); + sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg0) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, + sas_iounit_pg0, sz)) != 0) + goto out; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + goto out; + /* + * Loop over each HBA Phy. + */ + for (i = 0; i < ioc->sas_hba.num_phys; i++) { + /* + * Check whether Phy's Negotiation Link Rate is > 1.5G or not. + */ + if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) < + MPI2_SAS_NEG_LINK_RATE_1_5) + continue; + /* + * Check whether Phy is connected to SEP device or not, + * if it is SEP device then read the Phy's SASPHYPage0 data to + * determine whether Phy is a virtual Phy or not. if it is + * virtual phy then it is conformed that the attached remote + * device is a HBA's vSES device. + */ + if (!(le32_to_cpu( + sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) & + MPI2_SAS_DEVICE_INFO_SEP)) + continue; + + if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, + i))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + continue; + } + + if (!(le32_to_cpu(phy_pg0.PhyInfo) & + MPI2_SAS_PHYINFO_VIRTUAL_PHY)) + continue; + /* + * Get the vSES device's SAS Address. + */ + attached_handle = le16_to_cpu( + sas_iounit_pg0->PhyData[i].AttachedDevHandle); + if (_scsih_get_sas_address(ioc, attached_handle, + &attached_sas_addr) != 0) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + continue; + } + + found = 0; + port = port_next = NULL; + /* + * Loop over each virtual_phy object from + * each port's vphys_list. + */ + list_for_each_entry_safe(port, + port_next, &ioc->port_table_list, list) { + if (!port->vphys_mask) + continue; + list_for_each_entry_safe(vphy, vphy_next, + &port->vphys_list, list) { + /* + * Continue with next virtual_phy object + * if the object is not marked as dirty. + */ + if (!(vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY)) + continue; + + /* + * Continue with next virtual_phy object + * if the object's SAS Address is not equals + * to current Phy's vSES device SAS Address. + */ + if (vphy->sas_address != attached_sas_addr) + continue; + /* + * Enable current Phy number bit in object's + * phy_mask field. + */ + if (!(vphy->phy_mask & (1 << i))) + vphy->phy_mask = (1 << i); + /* + * Get hba_port object from hba_port table + * corresponding to current phy's Port ID. + * if there is no hba_port object corresponding + * to Phy's Port ID then create a new hba_port + * object & add to hba_port table. + */ + port_id = sas_iounit_pg0->PhyData[i].Port; + mport = mpt3sas_get_port_by_id(ioc, port_id, 1); + if (!mport) { + mport = kzalloc( + sizeof(struct hba_port), GFP_KERNEL); + if (!mport) + break; + mport->port_id = port_id; + ioc_info(ioc, + "%s: hba_port entry: %p, port: %d is added to hba_port list\n", + __func__, mport, mport->port_id); + list_add_tail(&mport->list, + &ioc->port_table_list); + } + /* + * If mport & port pointers are not pointing to + * same hba_port object then it means that vSES + * device's Port ID got changed after reset and + * hence move current virtual_phy object from + * port's vphys_list to mport's vphys_list. + */ + if (port != mport) { + if (!mport->vphys_mask) + INIT_LIST_HEAD( + &mport->vphys_list); + mport->vphys_mask |= (1 << i); + port->vphys_mask &= ~(1 << i); + list_move(&vphy->list, + &mport->vphys_list); + sas_device = mpt3sas_get_sdev_by_addr( + ioc, attached_sas_addr, port); + if (sas_device) + sas_device->port = mport; + } + /* + * Earlier while updating the hba_port table, + * it is determined that there is no other + * direct attached device with mport's Port ID, + * Hence mport was marked as dirty. Only vSES + * device has this Port ID, so unmark the mport + * as dirt. + */ + if (mport->flags & HBA_PORT_FLAG_DIRTY_PORT) { + mport->sas_address = 0; + mport->phy_mask = 0; + mport->flags &= + ~HBA_PORT_FLAG_DIRTY_PORT; + } + /* + * Unmark current virtual_phy object as dirty. + */ + vphy->flags &= ~MPT_VPHY_FLAG_DIRTY_PHY; + found = 1; + break; + } + if (found) + break; + } + } +out: + kfree(sas_iounit_pg0); +} + +/** + * _scsih_get_port_table_after_reset - Construct temporary port table + * @ioc: per adapter object + * @port_table: address where port table needs to be constructed + * + * return number of HBA port entries available after reset. + */ +static int +_scsih_get_port_table_after_reset(struct MPT3SAS_ADAPTER *ioc, + struct hba_port *port_table) +{ + u16 sz, ioc_status; + int i, j; + Mpi2ConfigReply_t mpi_reply; + Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; + u16 attached_handle; + u64 attached_sas_addr; + u8 found = 0, port_count = 0, port_id; + + sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys + * sizeof(Mpi2SasIOUnit0PhyData_t)); + sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg0) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return port_count; + } + + if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, + sas_iounit_pg0, sz)) != 0) + goto out; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + goto out; + for (i = 0; i < ioc->sas_hba.num_phys; i++) { + found = 0; + if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) < + MPI2_SAS_NEG_LINK_RATE_1_5) + continue; + attached_handle = + le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle); + if (_scsih_get_sas_address( + ioc, attached_handle, &attached_sas_addr) != 0) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + continue; + } + + for (j = 0; j < port_count; j++) { + port_id = sas_iounit_pg0->PhyData[i].Port; + if (port_table[j].port_id == port_id && + port_table[j].sas_address == attached_sas_addr) { + port_table[j].phy_mask |= (1 << i); + found = 1; + break; + } + } + + if (found) + continue; + + port_id = sas_iounit_pg0->PhyData[i].Port; + port_table[port_count].port_id = port_id; + port_table[port_count].phy_mask = (1 << i); + port_table[port_count].sas_address = attached_sas_addr; + port_count++; + } +out: + kfree(sas_iounit_pg0); + return port_count; +} + +enum hba_port_matched_codes { + NOT_MATCHED = 0, + MATCHED_WITH_ADDR_AND_PHYMASK, + MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT, + MATCHED_WITH_ADDR_AND_SUBPHYMASK, + MATCHED_WITH_ADDR, +}; + +/** + * _scsih_look_and_get_matched_port_entry - Get matched hba port entry + * from HBA port table + * @ioc: per adapter object + * @port_entry: hba port entry from temporary port table which needs to be + * searched for matched entry in the HBA port table + * @matched_port_entry: save matched hba port entry here + * @count: count of matched entries + * + * return type of matched entry found. + */ +static enum hba_port_matched_codes +_scsih_look_and_get_matched_port_entry(struct MPT3SAS_ADAPTER *ioc, + struct hba_port *port_entry, + struct hba_port **matched_port_entry, int *count) +{ + struct hba_port *port_table_entry, *matched_port = NULL; + enum hba_port_matched_codes matched_code = NOT_MATCHED; + int lcount = 0; + *matched_port_entry = NULL; + + list_for_each_entry(port_table_entry, &ioc->port_table_list, list) { + if (!(port_table_entry->flags & HBA_PORT_FLAG_DIRTY_PORT)) + continue; + + if ((port_table_entry->sas_address == port_entry->sas_address) + && (port_table_entry->phy_mask == port_entry->phy_mask)) { + matched_code = MATCHED_WITH_ADDR_AND_PHYMASK; + matched_port = port_table_entry; + break; + } + + if ((port_table_entry->sas_address == port_entry->sas_address) + && (port_table_entry->phy_mask & port_entry->phy_mask) + && (port_table_entry->port_id == port_entry->port_id)) { + matched_code = MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT; + matched_port = port_table_entry; + continue; + } + + if ((port_table_entry->sas_address == port_entry->sas_address) + && (port_table_entry->phy_mask & port_entry->phy_mask)) { + if (matched_code == + MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT) + continue; + matched_code = MATCHED_WITH_ADDR_AND_SUBPHYMASK; + matched_port = port_table_entry; + continue; + } + + if (port_table_entry->sas_address == port_entry->sas_address) { + if (matched_code == + MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT) + continue; + if (matched_code == MATCHED_WITH_ADDR_AND_SUBPHYMASK) + continue; + matched_code = MATCHED_WITH_ADDR; + matched_port = port_table_entry; + lcount++; + } + } + + *matched_port_entry = matched_port; + if (matched_code == MATCHED_WITH_ADDR) + *count = lcount; + return matched_code; +} + +/** + * _scsih_del_phy_part_of_anther_port - remove phy if it + * is a part of anther port + *@ioc: per adapter object + *@port_table: port table after reset + *@index: hba port entry index + *@port_count: number of ports available after host reset + *@offset: HBA phy bit offset + * + */ +static void +_scsih_del_phy_part_of_anther_port(struct MPT3SAS_ADAPTER *ioc, + struct hba_port *port_table, + int index, u8 port_count, int offset) +{ + struct _sas_node *sas_node = &ioc->sas_hba; + u32 i, found = 0; + + for (i = 0; i < port_count; i++) { + if (i == index) + continue; + + if (port_table[i].phy_mask & (1 << offset)) { + mpt3sas_transport_del_phy_from_an_existing_port( + ioc, sas_node, &sas_node->phy[offset]); + found = 1; + break; + } + } + if (!found) + port_table[index].phy_mask |= (1 << offset); +} + +/** + * _scsih_add_or_del_phys_from_existing_port - add/remove phy to/from + * right port + *@ioc: per adapter object + *@hba_port_entry: hba port table entry + *@port_table: temporary port table + *@index: hba port entry index + *@port_count: number of ports available after host reset + * + */ +static void +_scsih_add_or_del_phys_from_existing_port(struct MPT3SAS_ADAPTER *ioc, + struct hba_port *hba_port_entry, struct hba_port *port_table, + int index, int port_count) +{ + u32 phy_mask, offset = 0; + struct _sas_node *sas_node = &ioc->sas_hba; + + phy_mask = hba_port_entry->phy_mask ^ port_table[index].phy_mask; + + for (offset = 0; offset < ioc->sas_hba.num_phys; offset++) { + if (phy_mask & (1 << offset)) { + if (!(port_table[index].phy_mask & (1 << offset))) { + _scsih_del_phy_part_of_anther_port( + ioc, port_table, index, port_count, + offset); + continue; + } + if (sas_node->phy[offset].phy_belongs_to_port) + mpt3sas_transport_del_phy_from_an_existing_port( + ioc, sas_node, &sas_node->phy[offset]); + mpt3sas_transport_add_phy_to_an_existing_port( + ioc, sas_node, &sas_node->phy[offset], + hba_port_entry->sas_address, + hba_port_entry); + } + } +} + +/** + * _scsih_del_dirty_vphy - delete virtual_phy objects marked as dirty. + * @ioc: per adapter object + * + * Returns nothing. + */ +static void +_scsih_del_dirty_vphy(struct MPT3SAS_ADAPTER *ioc) +{ + struct hba_port *port, *port_next; + struct virtual_phy *vphy, *vphy_next; + + list_for_each_entry_safe(port, port_next, + &ioc->port_table_list, list) { + if (!port->vphys_mask) + continue; + list_for_each_entry_safe(vphy, vphy_next, + &port->vphys_list, list) { + if (vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY) { + drsprintk(ioc, ioc_info(ioc, + "Deleting vphy %p entry from port id: %d\t, Phy_mask 0x%08x\n", + vphy, port->port_id, + vphy->phy_mask)); + port->vphys_mask &= ~vphy->phy_mask; + list_del(&vphy->list); + kfree(vphy); + } + } + if (!port->vphys_mask && !port->sas_address) + port->flags |= HBA_PORT_FLAG_DIRTY_PORT; + } +} + +/** + * _scsih_del_dirty_port_entries - delete dirty port entries from port list + * after host reset + *@ioc: per adapter object + * + */ +static void +_scsih_del_dirty_port_entries(struct MPT3SAS_ADAPTER *ioc) +{ + struct hba_port *port, *port_next; + + list_for_each_entry_safe(port, port_next, + &ioc->port_table_list, list) { + if (!(port->flags & HBA_PORT_FLAG_DIRTY_PORT) || + port->flags & HBA_PORT_FLAG_NEW_PORT) + continue; + + drsprintk(ioc, ioc_info(ioc, + "Deleting port table entry %p having Port: %d\t Phy_mask 0x%08x\n", + port, port->port_id, port->phy_mask)); + list_del(&port->list); + kfree(port); + } +} + +/** + * _scsih_sas_port_refresh - Update HBA port table after host reset + * @ioc: per adapter object + */ +static void +_scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc) +{ + u32 port_count = 0; + struct hba_port *port_table; + struct hba_port *port_table_entry; + struct hba_port *port_entry = NULL; + int i, j, count = 0, lcount = 0; + int ret; + u64 sas_addr; + u8 num_phys; + + drsprintk(ioc, ioc_info(ioc, + "updating ports for sas_host(0x%016llx)\n", + (unsigned long long)ioc->sas_hba.sas_address)); + + mpt3sas_config_get_number_hba_phys(ioc, &num_phys); + if (!num_phys) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + + if (num_phys > ioc->sas_hba.nr_phys_allocated) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + ioc->sas_hba.num_phys = num_phys; + + port_table = kcalloc(ioc->sas_hba.num_phys, + sizeof(struct hba_port), GFP_KERNEL); + if (!port_table) + return; + + port_count = _scsih_get_port_table_after_reset(ioc, port_table); + if (!port_count) + return; + + drsprintk(ioc, ioc_info(ioc, "New Port table\n")); + for (j = 0; j < port_count; j++) + drsprintk(ioc, ioc_info(ioc, + "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n", + port_table[j].port_id, + port_table[j].phy_mask, port_table[j].sas_address)); + + list_for_each_entry(port_table_entry, &ioc->port_table_list, list) + port_table_entry->flags |= HBA_PORT_FLAG_DIRTY_PORT; + + drsprintk(ioc, ioc_info(ioc, "Old Port table\n")); + port_table_entry = NULL; + list_for_each_entry(port_table_entry, &ioc->port_table_list, list) { + drsprintk(ioc, ioc_info(ioc, + "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n", + port_table_entry->port_id, + port_table_entry->phy_mask, + port_table_entry->sas_address)); + } + + for (j = 0; j < port_count; j++) { + ret = _scsih_look_and_get_matched_port_entry(ioc, + &port_table[j], &port_entry, &count); + if (!port_entry) { + drsprintk(ioc, ioc_info(ioc, + "No Matched entry for sas_addr(0x%16llx), Port:%d\n", + port_table[j].sas_address, + port_table[j].port_id)); + continue; + } + + switch (ret) { + case MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT: + case MATCHED_WITH_ADDR_AND_SUBPHYMASK: + _scsih_add_or_del_phys_from_existing_port(ioc, + port_entry, port_table, j, port_count); + break; + case MATCHED_WITH_ADDR: + sas_addr = port_table[j].sas_address; + for (i = 0; i < port_count; i++) { + if (port_table[i].sas_address == sas_addr) + lcount++; + } + + if (count > 1 || lcount > 1) + port_entry = NULL; + else + _scsih_add_or_del_phys_from_existing_port(ioc, + port_entry, port_table, j, port_count); + } + + if (!port_entry) + continue; + + if (port_entry->port_id != port_table[j].port_id) + port_entry->port_id = port_table[j].port_id; + port_entry->flags &= ~HBA_PORT_FLAG_DIRTY_PORT; + port_entry->phy_mask = port_table[j].phy_mask; + } + + port_table_entry = NULL; +} + +/** + * _scsih_alloc_vphy - allocate virtual_phy object + * @ioc: per adapter object + * @port_id: Port ID number + * @phy_num: HBA Phy number + * + * Returns allocated virtual_phy object. + */ +static struct virtual_phy * +_scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num) +{ + struct virtual_phy *vphy; + struct hba_port *port; + + port = mpt3sas_get_port_by_id(ioc, port_id, 0); + if (!port) + return NULL; + + vphy = mpt3sas_get_vphy_by_phy(ioc, port, phy_num); + if (!vphy) { + vphy = kzalloc(sizeof(struct virtual_phy), GFP_KERNEL); + if (!vphy) + return NULL; + + if (!port->vphys_mask) + INIT_LIST_HEAD(&port->vphys_list); + + /* + * Enable bit corresponding to HBA phy number on its + * parent hba_port object's vphys_mask field. + */ + port->vphys_mask |= (1 << phy_num); + vphy->phy_mask |= (1 << phy_num); + + list_add_tail(&vphy->list, &port->vphys_list); + + ioc_info(ioc, + "vphy entry: %p, port id: %d, phy:%d is added to port's vphys_list\n", + vphy, port->port_id, phy_num); + } + return vphy; +} + +/** + * _scsih_sas_host_refresh - refreshing sas host object contents + * @ioc: per adapter object + * Context: user + * + * During port enable, fw will send topology events for every device. Its + * possible that the handles may change from the previous setting, so this + * code keeping handles updating if changed. + */ +static void +_scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc) +{ + u16 sz; + u16 ioc_status; + int i; + Mpi2ConfigReply_t mpi_reply; + Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; + u16 attached_handle; + u8 link_rate, port_id; + struct hba_port *port; + Mpi2SasPhyPage0_t phy_pg0; + + dtmprintk(ioc, + ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n", + (u64)ioc->sas_hba.sas_address)); + + sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys + * sizeof(Mpi2SasIOUnit0PhyData_t)); + sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg0) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + + if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, + sas_iounit_pg0, sz)) != 0) + goto out; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + goto out; + for (i = 0; i < ioc->sas_hba.num_phys ; i++) { + link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4; + if (i == 0) + ioc->sas_hba.handle = le16_to_cpu( + sas_iounit_pg0->PhyData[0].ControllerDevHandle); + port_id = sas_iounit_pg0->PhyData[i].Port; + if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) { + port = kzalloc(sizeof(struct hba_port), GFP_KERNEL); + if (!port) + goto out; + + port->port_id = port_id; + ioc_info(ioc, + "hba_port entry: %p, port: %d is added to hba_port list\n", + port, port->port_id); + if (ioc->shost_recovery) + port->flags = HBA_PORT_FLAG_NEW_PORT; + list_add_tail(&port->list, &ioc->port_table_list); + } + /* + * Check whether current Phy belongs to HBA vSES device or not. + */ + if (le32_to_cpu(sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) & + MPI2_SAS_DEVICE_INFO_SEP && + (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) { + if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, + &phy_pg0, i))) { + ioc_err(ioc, + "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + if (!(le32_to_cpu(phy_pg0.PhyInfo) & + MPI2_SAS_PHYINFO_VIRTUAL_PHY)) + continue; + /* + * Allocate a virtual_phy object for vSES device, if + * this vSES device is hot added. + */ + if (!_scsih_alloc_vphy(ioc, port_id, i)) + goto out; + ioc->sas_hba.phy[i].hba_vphy = 1; + } + + /* + * Add new HBA phys to STL if these new phys got added as part + * of HBA Firmware upgrade/downgrade operation. + */ + if (!ioc->sas_hba.phy[i].phy) { + if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, + &phy_pg0, i))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + continue; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + continue; + } + ioc->sas_hba.phy[i].phy_id = i; + mpt3sas_transport_add_host_phy(ioc, + &ioc->sas_hba.phy[i], phy_pg0, + ioc->sas_hba.parent_dev); + continue; + } + ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; + attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i]. + AttachedDevHandle); + if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5) + link_rate = MPI2_SAS_NEG_LINK_RATE_1_5; + ioc->sas_hba.phy[i].port = + mpt3sas_get_port_by_id(ioc, port_id, 0); + mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address, + attached_handle, i, link_rate, + ioc->sas_hba.phy[i].port); + } + /* + * Clear the phy details if this phy got disabled as part of + * HBA Firmware upgrade/downgrade operation. + */ + for (i = ioc->sas_hba.num_phys; + i < ioc->sas_hba.nr_phys_allocated; i++) { + if (ioc->sas_hba.phy[i].phy && + ioc->sas_hba.phy[i].phy->negotiated_linkrate >= + SAS_LINK_RATE_1_5_GBPS) + mpt3sas_transport_update_links(ioc, + ioc->sas_hba.sas_address, 0, i, + MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED, NULL); + } + out: + kfree(sas_iounit_pg0); +} + +/** + * _scsih_sas_host_add - create sas host object + * @ioc: per adapter object + * + * Creating host side data object, stored in ioc->sas_hba + */ +static void +_scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc) +{ + int i; + Mpi2ConfigReply_t mpi_reply; + Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; + Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; + Mpi2SasPhyPage0_t phy_pg0; + Mpi2SasDevicePage0_t sas_device_pg0; + Mpi2SasEnclosurePage0_t enclosure_pg0; + u16 ioc_status; + u16 sz; + u8 device_missing_delay; + u8 num_phys, port_id; + struct hba_port *port; + + mpt3sas_config_get_number_hba_phys(ioc, &num_phys); + if (!num_phys) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + + ioc->sas_hba.nr_phys_allocated = max_t(u8, + MPT_MAX_HBA_NUM_PHYS, num_phys); + ioc->sas_hba.phy = kcalloc(ioc->sas_hba.nr_phys_allocated, + sizeof(struct _sas_phy), GFP_KERNEL); + if (!ioc->sas_hba.phy) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + ioc->sas_hba.num_phys = num_phys; + + /* sas_iounit page 0 */ + sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys * + sizeof(Mpi2SasIOUnit0PhyData_t)); + sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg0) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, + sas_iounit_pg0, sz))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + + /* sas_iounit page 1 */ + sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys * + sizeof(Mpi2SasIOUnit1PhyData_t)); + sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg1) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, + sas_iounit_pg1, sz))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + + ioc->io_missing_delay = + sas_iounit_pg1->IODeviceMissingDelay; + device_missing_delay = + sas_iounit_pg1->ReportDeviceMissingDelay; + if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16) + ioc->device_missing_delay = (device_missing_delay & + MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16; + else + ioc->device_missing_delay = device_missing_delay & + MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; + + ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev; + for (i = 0; i < ioc->sas_hba.num_phys ; i++) { + if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, + i))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + + if (i == 0) + ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0-> + PhyData[0].ControllerDevHandle); + + port_id = sas_iounit_pg0->PhyData[i].Port; + if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) { + port = kzalloc(sizeof(struct hba_port), GFP_KERNEL); + if (!port) + goto out; + + port->port_id = port_id; + ioc_info(ioc, + "hba_port entry: %p, port: %d is added to hba_port list\n", + port, port->port_id); + list_add_tail(&port->list, + &ioc->port_table_list); + } + + /* + * Check whether current Phy belongs to HBA vSES device or not. + */ + if ((le32_to_cpu(phy_pg0.PhyInfo) & + MPI2_SAS_PHYINFO_VIRTUAL_PHY) && + (phy_pg0.NegotiatedLinkRate >> 4) >= + MPI2_SAS_NEG_LINK_RATE_1_5) { + /* + * Allocate a virtual_phy object for vSES device. + */ + if (!_scsih_alloc_vphy(ioc, port_id, i)) + goto out; + ioc->sas_hba.phy[i].hba_vphy = 1; + } + + ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; + ioc->sas_hba.phy[i].phy_id = i; + ioc->sas_hba.phy[i].port = + mpt3sas_get_port_by_id(ioc, port_id, 0); + mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i], + phy_pg0, ioc->sas_hba.parent_dev); + } + if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, + MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + ioc->sas_hba.enclosure_handle = + le16_to_cpu(sas_device_pg0.EnclosureHandle); + ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress); + ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n", + ioc->sas_hba.handle, + (u64)ioc->sas_hba.sas_address, + ioc->sas_hba.num_phys); + + if (ioc->sas_hba.enclosure_handle) { + if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply, + &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, + ioc->sas_hba.enclosure_handle))) + ioc->sas_hba.enclosure_logical_id = + le64_to_cpu(enclosure_pg0.EnclosureLogicalID); + } + + out: + kfree(sas_iounit_pg1); + kfree(sas_iounit_pg0); +} + +/** + * _scsih_expander_add - creating expander object + * @ioc: per adapter object + * @handle: expander handle + * + * Creating expander object, stored in ioc->sas_expander_list. + * + * Return: 0 for success, else error. + */ +static int +_scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _sas_node *sas_expander; + struct _enclosure_node *enclosure_dev; + Mpi2ConfigReply_t mpi_reply; + Mpi2ExpanderPage0_t expander_pg0; + Mpi2ExpanderPage1_t expander_pg1; + u32 ioc_status; + u16 parent_handle; + u64 sas_address, sas_address_parent = 0; + int i; + unsigned long flags; + struct _sas_port *mpt3sas_port = NULL; + u8 port_id; + + int rc = 0; + + if (!handle) + return -1; + + if (ioc->shost_recovery || ioc->pci_error_recovery) + return -1; + + if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, + MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -1; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -1; + } + + /* handle out of order topology events */ + parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle); + if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent) + != 0) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -1; + } + + port_id = expander_pg0.PhysicalPort; + if (sas_address_parent != ioc->sas_hba.sas_address) { + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, + sas_address_parent, + mpt3sas_get_port_by_id(ioc, port_id, 0)); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + if (!sas_expander) { + rc = _scsih_expander_add(ioc, parent_handle); + if (rc != 0) + return rc; + } + } + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_address = le64_to_cpu(expander_pg0.SASAddress); + sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, + sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0)); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + + if (sas_expander) + return 0; + + sas_expander = kzalloc(sizeof(struct _sas_node), + GFP_KERNEL); + if (!sas_expander) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -1; + } + + sas_expander->handle = handle; + sas_expander->num_phys = expander_pg0.NumPhys; + sas_expander->sas_address_parent = sas_address_parent; + sas_expander->sas_address = sas_address; + sas_expander->port = mpt3sas_get_port_by_id(ioc, port_id, 0); + if (!sas_expander->port) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -1; + goto out_fail; + } + + ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", + handle, parent_handle, + (u64)sas_expander->sas_address, sas_expander->num_phys); + + if (!sas_expander->num_phys) { + rc = -1; + goto out_fail; + } + sas_expander->phy = kcalloc(sas_expander->num_phys, + sizeof(struct _sas_phy), GFP_KERNEL); + if (!sas_expander->phy) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -1; + goto out_fail; + } + + INIT_LIST_HEAD(&sas_expander->sas_port_list); + mpt3sas_port = mpt3sas_transport_port_add(ioc, handle, + sas_address_parent, sas_expander->port); + if (!mpt3sas_port) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -1; + goto out_fail; + } + sas_expander->parent_dev = &mpt3sas_port->rphy->dev; + sas_expander->rphy = mpt3sas_port->rphy; + + for (i = 0 ; i < sas_expander->num_phys ; i++) { + if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply, + &expander_pg1, i, handle))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -1; + goto out_fail; + } + sas_expander->phy[i].handle = handle; + sas_expander->phy[i].phy_id = i; + sas_expander->phy[i].port = + mpt3sas_get_port_by_id(ioc, port_id, 0); + + if ((mpt3sas_transport_add_expander_phy(ioc, + &sas_expander->phy[i], expander_pg1, + sas_expander->parent_dev))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -1; + goto out_fail; + } + } + + if (sas_expander->enclosure_handle) { + enclosure_dev = + mpt3sas_scsih_enclosure_find_by_handle(ioc, + sas_expander->enclosure_handle); + if (enclosure_dev) + sas_expander->enclosure_logical_id = + le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); + } + + _scsih_expander_node_add(ioc, sas_expander); + return 0; + + out_fail: + + if (mpt3sas_port) + mpt3sas_transport_port_remove(ioc, sas_expander->sas_address, + sas_address_parent, sas_expander->port); + kfree(sas_expander); + return rc; +} + +/** + * mpt3sas_expander_remove - removing expander object + * @ioc: per adapter object + * @sas_address: expander sas_address + * @port: hba port entry + */ +void +mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, + struct hba_port *port) +{ + struct _sas_node *sas_expander; + unsigned long flags; + + if (ioc->shost_recovery) + return; + + if (!port) + return; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, + sas_address, port); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + if (sas_expander) + _scsih_expander_node_remove(ioc, sas_expander); +} + +/** + * _scsih_done - internal SCSI_IO callback handler. + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * + * Callback handler when sending internal generated SCSI_IO. + * The callback index passed is `ioc->scsih_cb_idx` + * + * Return: 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +static u8 +_scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) +{ + MPI2DefaultReply_t *mpi_reply; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED) + return 1; + if (ioc->scsih_cmds.smid != smid) + return 1; + ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE; + if (mpi_reply) { + memcpy(ioc->scsih_cmds.reply, mpi_reply, + mpi_reply->MsgLength*4); + ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID; + } + ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING; + complete(&ioc->scsih_cmds.done); + return 1; +} + + + + +#define MPT3_MAX_LUNS (255) + + +/** + * _scsih_check_access_status - check access flags + * @ioc: per adapter object + * @sas_address: sas address + * @handle: sas device handle + * @access_status: errors returned during discovery of the device + * + * Return: 0 for success, else failure + */ +static u8 +_scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, + u16 handle, u8 access_status) +{ + u8 rc = 1; + char *desc = NULL; + + switch (access_status) { + case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS: + case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION: + rc = 0; + break; + case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED: + desc = "sata capability failed"; + break; + case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT: + desc = "sata affiliation conflict"; + break; + case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE: + desc = "route not addressable"; + break; + case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE: + desc = "smp error not addressable"; + break; + case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED: + desc = "device blocked"; + break; + case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX: + desc = "sata initialization failed"; + break; + default: + desc = "unknown"; + break; + } + + if (!rc) + return 0; + + ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n", + desc, (u64)sas_address, handle); + return rc; +} + +/** + * _scsih_check_device - checking device responsiveness + * @ioc: per adapter object + * @parent_sas_address: sas address of parent expander or sas host + * @handle: attached device handle + * @phy_number: phy number + * @link_rate: new link rate + */ +static void +_scsih_check_device(struct MPT3SAS_ADAPTER *ioc, + u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate) +{ + Mpi2ConfigReply_t mpi_reply; + Mpi2SasDevicePage0_t sas_device_pg0; + struct _sas_device *sas_device = NULL; + struct _enclosure_node *enclosure_dev = NULL; + u32 ioc_status; + unsigned long flags; + u64 sas_address; + struct scsi_target *starget; + struct MPT3SAS_TARGET *sas_target_priv_data; + u32 device_info; + struct hba_port *port; + + if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, + MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) + return; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + return; + + /* wide port handling ~ we need only handle device once for the phy that + * is matched in sas device page zero + */ + if (phy_number != sas_device_pg0.PhyNum) + return; + + /* check if this is end device */ + device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); + if (!(_scsih_is_end_device(device_info))) + return; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_address = le64_to_cpu(sas_device_pg0.SASAddress); + port = mpt3sas_get_port_by_id(ioc, sas_device_pg0.PhysicalPort, 0); + if (!port) + goto out_unlock; + sas_device = __mpt3sas_get_sdev_by_addr(ioc, + sas_address, port); + + if (!sas_device) + goto out_unlock; + + if (unlikely(sas_device->handle != handle)) { + starget = sas_device->starget; + sas_target_priv_data = starget->hostdata; + starget_printk(KERN_INFO, starget, + "handle changed from(0x%04x) to (0x%04x)!!!\n", + sas_device->handle, handle); + sas_target_priv_data->handle = handle; + sas_device->handle = handle; + if (le16_to_cpu(sas_device_pg0.Flags) & + MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { + sas_device->enclosure_level = + sas_device_pg0.EnclosureLevel; + memcpy(sas_device->connector_name, + sas_device_pg0.ConnectorName, 4); + sas_device->connector_name[4] = '\0'; + } else { + sas_device->enclosure_level = 0; + sas_device->connector_name[0] = '\0'; + } + + sas_device->enclosure_handle = + le16_to_cpu(sas_device_pg0.EnclosureHandle); + sas_device->is_chassis_slot_valid = 0; + enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc, + sas_device->enclosure_handle); + if (enclosure_dev) { + sas_device->enclosure_logical_id = + le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); + if (le16_to_cpu(enclosure_dev->pg0.Flags) & + MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { + sas_device->is_chassis_slot_valid = 1; + sas_device->chassis_slot = + enclosure_dev->pg0.ChassisSlot; + } + } + } + + /* check if device is present */ + if (!(le16_to_cpu(sas_device_pg0.Flags) & + MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { + ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n", + handle); + goto out_unlock; + } + + /* check if there were any issues with discovery */ + if (_scsih_check_access_status(ioc, sas_address, handle, + sas_device_pg0.AccessStatus)) + goto out_unlock; + + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + _scsih_ublock_io_device(ioc, sas_address, port); + + if (sas_device) + sas_device_put(sas_device); + return; + +out_unlock: + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (sas_device) + sas_device_put(sas_device); +} + +/** + * _scsih_add_device - creating sas device object + * @ioc: per adapter object + * @handle: sas device handle + * @phy_num: phy number end device attached to + * @is_pd: is this hidden raid component + * + * Creating end device object, stored in ioc->sas_device_list. + * + * Return: 0 for success, non-zero for failure. + */ +static int +_scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num, + u8 is_pd) +{ + Mpi2ConfigReply_t mpi_reply; + Mpi2SasDevicePage0_t sas_device_pg0; + struct _sas_device *sas_device; + struct _enclosure_node *enclosure_dev = NULL; + u32 ioc_status; + u64 sas_address; + u32 device_info; + u8 port_id; + + if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, + MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -1; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -1; + } + + /* check if this is end device */ + device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); + if (!(_scsih_is_end_device(device_info))) + return -1; + set_bit(handle, ioc->pend_os_device_add); + sas_address = le64_to_cpu(sas_device_pg0.SASAddress); + + /* check if device is present */ + if (!(le16_to_cpu(sas_device_pg0.Flags) & + MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { + ioc_err(ioc, "device is not present handle(0x04%x)!!!\n", + handle); + return -1; + } + + /* check if there were any issues with discovery */ + if (_scsih_check_access_status(ioc, sas_address, handle, + sas_device_pg0.AccessStatus)) + return -1; + + port_id = sas_device_pg0.PhysicalPort; + sas_device = mpt3sas_get_sdev_by_addr(ioc, + sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0)); + if (sas_device) { + clear_bit(handle, ioc->pend_os_device_add); + sas_device_put(sas_device); + return -1; + } + + if (sas_device_pg0.EnclosureHandle) { + enclosure_dev = + mpt3sas_scsih_enclosure_find_by_handle(ioc, + le16_to_cpu(sas_device_pg0.EnclosureHandle)); + if (enclosure_dev == NULL) + ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n", + sas_device_pg0.EnclosureHandle); + } + + sas_device = kzalloc(sizeof(struct _sas_device), + GFP_KERNEL); + if (!sas_device) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return 0; + } + + kref_init(&sas_device->refcount); + sas_device->handle = handle; + if (_scsih_get_sas_address(ioc, + le16_to_cpu(sas_device_pg0.ParentDevHandle), + &sas_device->sas_address_parent) != 0) + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + sas_device->enclosure_handle = + le16_to_cpu(sas_device_pg0.EnclosureHandle); + if (sas_device->enclosure_handle != 0) + sas_device->slot = + le16_to_cpu(sas_device_pg0.Slot); + sas_device->device_info = device_info; + sas_device->sas_address = sas_address; + sas_device->phy = sas_device_pg0.PhyNum; + sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) & + MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0; + sas_device->port = mpt3sas_get_port_by_id(ioc, port_id, 0); + if (!sas_device->port) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out; + } + + if (le16_to_cpu(sas_device_pg0.Flags) + & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { + sas_device->enclosure_level = + sas_device_pg0.EnclosureLevel; + memcpy(sas_device->connector_name, + sas_device_pg0.ConnectorName, 4); + sas_device->connector_name[4] = '\0'; + } else { + sas_device->enclosure_level = 0; + sas_device->connector_name[0] = '\0'; + } + /* get enclosure_logical_id & chassis_slot*/ + sas_device->is_chassis_slot_valid = 0; + if (enclosure_dev) { + sas_device->enclosure_logical_id = + le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); + if (le16_to_cpu(enclosure_dev->pg0.Flags) & + MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { + sas_device->is_chassis_slot_valid = 1; + sas_device->chassis_slot = + enclosure_dev->pg0.ChassisSlot; + } + } + + /* get device name */ + sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName); + sas_device->port_type = sas_device_pg0.MaxPortConnections; + ioc_info(ioc, + "handle(0x%0x) sas_address(0x%016llx) port_type(0x%0x)\n", + handle, sas_device->sas_address, sas_device->port_type); + + if (ioc->wait_for_discovery_to_complete) + _scsih_sas_device_init_add(ioc, sas_device); + else + _scsih_sas_device_add(ioc, sas_device); + +out: + sas_device_put(sas_device); + return 0; +} + +/** + * _scsih_remove_device - removing sas device object + * @ioc: per adapter object + * @sas_device: the sas_device object + */ +static void +_scsih_remove_device(struct MPT3SAS_ADAPTER *ioc, + struct _sas_device *sas_device) +{ + struct MPT3SAS_TARGET *sas_target_priv_data; + + if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) && + (sas_device->pfa_led_on)) { + _scsih_turn_off_pfa_led(ioc, sas_device); + sas_device->pfa_led_on = 0; + } + + dewtprintk(ioc, + ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n", + __func__, + sas_device->handle, (u64)sas_device->sas_address)); + + dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, + NULL, NULL)); + + if (sas_device->starget && sas_device->starget->hostdata) { + sas_target_priv_data = sas_device->starget->hostdata; + sas_target_priv_data->deleted = 1; + _scsih_ublock_io_device(ioc, sas_device->sas_address, + sas_device->port); + sas_target_priv_data->handle = + MPT3SAS_INVALID_DEVICE_HANDLE; + } + + if (!ioc->hide_drives) + mpt3sas_transport_port_remove(ioc, + sas_device->sas_address, + sas_device->sas_address_parent, + sas_device->port); + + ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n", + sas_device->handle, (u64)sas_device->sas_address); + + _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL); + + dewtprintk(ioc, + ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n", + __func__, + sas_device->handle, (u64)sas_device->sas_address)); + dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, + NULL, NULL)); +} + +/** + * _scsih_sas_topology_change_event_debug - debug for topology event + * @ioc: per adapter object + * @event_data: event data payload + * Context: user. + */ +static void +_scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataSasTopologyChangeList_t *event_data) +{ + int i; + u16 handle; + u16 reason_code; + u8 phy_number; + char *status_str = NULL; + u8 link_rate, prev_link_rate; + + switch (event_data->ExpStatus) { + case MPI2_EVENT_SAS_TOPO_ES_ADDED: + status_str = "add"; + break; + case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING: + status_str = "remove"; + break; + case MPI2_EVENT_SAS_TOPO_ES_RESPONDING: + case 0: + status_str = "responding"; + break; + case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING: + status_str = "remove delay"; + break; + default: + status_str = "unknown status"; + break; + } + ioc_info(ioc, "sas topology change: (%s)\n", status_str); + pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \ + "start_phy(%02d), count(%d)\n", + le16_to_cpu(event_data->ExpanderDevHandle), + le16_to_cpu(event_data->EnclosureHandle), + event_data->StartPhyNum, event_data->NumEntries); + for (i = 0; i < event_data->NumEntries; i++) { + handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); + if (!handle) + continue; + phy_number = event_data->StartPhyNum + i; + reason_code = event_data->PHY[i].PhyStatus & + MPI2_EVENT_SAS_TOPO_RC_MASK; + switch (reason_code) { + case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: + status_str = "target add"; + break; + case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: + status_str = "target remove"; + break; + case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING: + status_str = "delay target remove"; + break; + case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: + status_str = "link rate change"; + break; + case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE: + status_str = "target responding"; + break; + default: + status_str = "unknown"; + break; + } + link_rate = event_data->PHY[i].LinkRate >> 4; + prev_link_rate = event_data->PHY[i].LinkRate & 0xF; + pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \ + " link rate: new(0x%02x), old(0x%02x)\n", phy_number, + handle, status_str, link_rate, prev_link_rate); + + } +} + +/** + * _scsih_sas_topology_change_event - handle topology changes + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + * + */ +static int +_scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + int i; + u16 parent_handle, handle; + u16 reason_code; + u8 phy_number, max_phys; + struct _sas_node *sas_expander; + u64 sas_address; + unsigned long flags; + u8 link_rate, prev_link_rate; + struct hba_port *port; + Mpi2EventDataSasTopologyChangeList_t *event_data = + (Mpi2EventDataSasTopologyChangeList_t *) + fw_event->event_data; + + if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) + _scsih_sas_topology_change_event_debug(ioc, event_data); + + if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery) + return 0; + + if (!ioc->sas_hba.num_phys) + _scsih_sas_host_add(ioc); + else + _scsih_sas_host_refresh(ioc); + + if (fw_event->ignore) { + dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n")); + return 0; + } + + parent_handle = le16_to_cpu(event_data->ExpanderDevHandle); + port = mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0); + + /* handle expander add */ + if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED) + if (_scsih_expander_add(ioc, parent_handle) != 0) + return 0; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc, + parent_handle); + if (sas_expander) { + sas_address = sas_expander->sas_address; + max_phys = sas_expander->num_phys; + port = sas_expander->port; + } else if (parent_handle < ioc->sas_hba.num_phys) { + sas_address = ioc->sas_hba.sas_address; + max_phys = ioc->sas_hba.num_phys; + } else { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return 0; + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + + /* handle siblings events */ + for (i = 0; i < event_data->NumEntries; i++) { + if (fw_event->ignore) { + dewtprintk(ioc, + ioc_info(ioc, "ignoring expander event\n")); + return 0; + } + if (ioc->remove_host || ioc->pci_error_recovery) + return 0; + phy_number = event_data->StartPhyNum + i; + if (phy_number >= max_phys) + continue; + reason_code = event_data->PHY[i].PhyStatus & + MPI2_EVENT_SAS_TOPO_RC_MASK; + if ((event_data->PHY[i].PhyStatus & + MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code != + MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) + continue; + handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); + if (!handle) + continue; + link_rate = event_data->PHY[i].LinkRate >> 4; + prev_link_rate = event_data->PHY[i].LinkRate & 0xF; + switch (reason_code) { + case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: + + if (ioc->shost_recovery) + break; + + if (link_rate == prev_link_rate) + break; + + mpt3sas_transport_update_links(ioc, sas_address, + handle, phy_number, link_rate, port); + + if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5) + break; + + _scsih_check_device(ioc, sas_address, handle, + phy_number, link_rate); + + if (!test_bit(handle, ioc->pend_os_device_add)) + break; + + fallthrough; + + case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: + + if (ioc->shost_recovery) + break; + + mpt3sas_transport_update_links(ioc, sas_address, + handle, phy_number, link_rate, port); + + _scsih_add_device(ioc, handle, phy_number, 0); + + break; + case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: + + _scsih_device_remove_by_handle(ioc, handle); + break; + } + } + + /* handle expander removal */ + if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING && + sas_expander) + mpt3sas_expander_remove(ioc, sas_address, port); + + return 0; +} + +/** + * _scsih_sas_device_status_change_event_debug - debug for device event + * @ioc: ? + * @event_data: event data payload + * Context: user. + */ +static void +_scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataSasDeviceStatusChange_t *event_data) +{ + char *reason_str = NULL; + + switch (event_data->ReasonCode) { + case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA: + reason_str = "smart data"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED: + reason_str = "unsupported device discovered"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET: + reason_str = "internal device reset"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL: + reason_str = "internal task abort"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL: + reason_str = "internal task abort set"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL: + reason_str = "internal clear task set"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL: + reason_str = "internal query task"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE: + reason_str = "sata init failure"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET: + reason_str = "internal device reset complete"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL: + reason_str = "internal task abort complete"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION: + reason_str = "internal async notification"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY: + reason_str = "expander reduced functionality"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY: + reason_str = "expander reduced functionality complete"; + break; + default: + reason_str = "unknown reason"; + break; + } + ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)", + reason_str, le16_to_cpu(event_data->DevHandle), + (u64)le64_to_cpu(event_data->SASAddress), + le16_to_cpu(event_data->TaskTag)); + if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA) + pr_cont(", ASC(0x%x), ASCQ(0x%x)\n", + event_data->ASC, event_data->ASCQ); + pr_cont("\n"); +} + +/** + * _scsih_sas_device_status_change_event - handle device status change + * @ioc: per adapter object + * @event_data: The fw event + * Context: user. + */ +static void +_scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataSasDeviceStatusChange_t *event_data) +{ + struct MPT3SAS_TARGET *target_priv_data; + struct _sas_device *sas_device; + u64 sas_address; + unsigned long flags; + + /* In MPI Revision K (0xC), the internal device reset complete was + * implemented, so avoid setting tm_busy flag for older firmware. + */ + if ((ioc->facts.HeaderVersion >> 8) < 0xC) + return; + + if (event_data->ReasonCode != + MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET && + event_data->ReasonCode != + MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET) + return; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_address = le64_to_cpu(event_data->SASAddress); + sas_device = __mpt3sas_get_sdev_by_addr(ioc, + sas_address, + mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0)); + + if (!sas_device || !sas_device->starget) + goto out; + + target_priv_data = sas_device->starget->hostdata; + if (!target_priv_data) + goto out; + + if (event_data->ReasonCode == + MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET) + target_priv_data->tm_busy = 1; + else + target_priv_data->tm_busy = 0; + + if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) + ioc_info(ioc, + "%s tm_busy flag for handle(0x%04x)\n", + (target_priv_data->tm_busy == 1) ? "Enable" : "Disable", + target_priv_data->handle); + +out: + if (sas_device) + sas_device_put(sas_device); + + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +} + + +/** + * _scsih_check_pcie_access_status - check access flags + * @ioc: per adapter object + * @wwid: wwid + * @handle: sas device handle + * @access_status: errors returned during discovery of the device + * + * Return: 0 for success, else failure + */ +static u8 +_scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid, + u16 handle, u8 access_status) +{ + u8 rc = 1; + char *desc = NULL; + + switch (access_status) { + case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS: + case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION: + rc = 0; + break; + case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED: + desc = "PCIe device capability failed"; + break; + case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED: + desc = "PCIe device blocked"; + ioc_info(ioc, + "Device with Access Status (%s): wwid(0x%016llx), " + "handle(0x%04x)\n ll only be added to the internal list", + desc, (u64)wwid, handle); + rc = 0; + break; + case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED: + desc = "PCIe device mem space access failed"; + break; + case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE: + desc = "PCIe device unsupported"; + break; + case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED: + desc = "PCIe device MSIx Required"; + break; + case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX: + desc = "PCIe device init fail max"; + break; + case MPI26_PCIEDEV0_ASTATUS_UNKNOWN: + desc = "PCIe device status unknown"; + break; + case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT: + desc = "nvme ready timeout"; + break; + case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED: + desc = "nvme device configuration unsupported"; + break; + case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED: + desc = "nvme identify failed"; + break; + case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED: + desc = "nvme qconfig failed"; + break; + case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED: + desc = "nvme qcreation failed"; + break; + case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED: + desc = "nvme eventcfg failed"; + break; + case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED: + desc = "nvme get feature stat failed"; + break; + case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT: + desc = "nvme idle timeout"; + break; + case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS: + desc = "nvme failure status"; + break; + default: + ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n", + access_status, (u64)wwid, handle); + return rc; + } + + if (!rc) + return rc; + + ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n", + desc, (u64)wwid, handle); + return rc; +} + +/** + * _scsih_pcie_device_remove_from_sml - removing pcie device + * from SML and free up associated memory + * @ioc: per adapter object + * @pcie_device: the pcie_device object + */ +static void +_scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc, + struct _pcie_device *pcie_device) +{ + struct MPT3SAS_TARGET *sas_target_priv_data; + + dewtprintk(ioc, + ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n", + __func__, + pcie_device->handle, (u64)pcie_device->wwid)); + if (pcie_device->enclosure_handle != 0) + dewtprintk(ioc, + ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n", + __func__, + (u64)pcie_device->enclosure_logical_id, + pcie_device->slot)); + if (pcie_device->connector_name[0] != '\0') + dewtprintk(ioc, + ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n", + __func__, + pcie_device->enclosure_level, + pcie_device->connector_name)); + + if (pcie_device->starget && pcie_device->starget->hostdata) { + sas_target_priv_data = pcie_device->starget->hostdata; + sas_target_priv_data->deleted = 1; + _scsih_ublock_io_device(ioc, pcie_device->wwid, NULL); + sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; + } + + ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", + pcie_device->handle, (u64)pcie_device->wwid); + if (pcie_device->enclosure_handle != 0) + ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n", + (u64)pcie_device->enclosure_logical_id, + pcie_device->slot); + if (pcie_device->connector_name[0] != '\0') + ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n", + pcie_device->enclosure_level, + pcie_device->connector_name); + + if (pcie_device->starget && (pcie_device->access_status != + MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)) + scsi_remove_target(&pcie_device->starget->dev); + dewtprintk(ioc, + ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n", + __func__, + pcie_device->handle, (u64)pcie_device->wwid)); + if (pcie_device->enclosure_handle != 0) + dewtprintk(ioc, + ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n", + __func__, + (u64)pcie_device->enclosure_logical_id, + pcie_device->slot)); + if (pcie_device->connector_name[0] != '\0') + dewtprintk(ioc, + ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n", + __func__, + pcie_device->enclosure_level, + pcie_device->connector_name)); + + kfree(pcie_device->serial_number); +} + + +/** + * _scsih_pcie_check_device - checking device responsiveness + * @ioc: per adapter object + * @handle: attached device handle + */ +static void +_scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + Mpi2ConfigReply_t mpi_reply; + Mpi26PCIeDevicePage0_t pcie_device_pg0; + u32 ioc_status; + struct _pcie_device *pcie_device; + u64 wwid; + unsigned long flags; + struct scsi_target *starget; + struct MPT3SAS_TARGET *sas_target_priv_data; + u32 device_info; + + if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply, + &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) + return; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + return; + + /* check if this is end device */ + device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo); + if (!(_scsih_is_nvme_pciescsi_device(device_info))) + return; + + wwid = le64_to_cpu(pcie_device_pg0.WWID); + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid); + + if (!pcie_device) { + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + return; + } + + if (unlikely(pcie_device->handle != handle)) { + starget = pcie_device->starget; + sas_target_priv_data = starget->hostdata; + pcie_device->access_status = pcie_device_pg0.AccessStatus; + starget_printk(KERN_INFO, starget, + "handle changed from(0x%04x) to (0x%04x)!!!\n", + pcie_device->handle, handle); + sas_target_priv_data->handle = handle; + pcie_device->handle = handle; + + if (le32_to_cpu(pcie_device_pg0.Flags) & + MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) { + pcie_device->enclosure_level = + pcie_device_pg0.EnclosureLevel; + memcpy(&pcie_device->connector_name[0], + &pcie_device_pg0.ConnectorName[0], 4); + } else { + pcie_device->enclosure_level = 0; + pcie_device->connector_name[0] = '\0'; + } + } + + /* check if device is present */ + if (!(le32_to_cpu(pcie_device_pg0.Flags) & + MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) { + ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n", + handle); + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + pcie_device_put(pcie_device); + return; + } + + /* check if there were any issues with discovery */ + if (_scsih_check_pcie_access_status(ioc, wwid, handle, + pcie_device_pg0.AccessStatus)) { + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + pcie_device_put(pcie_device); + return; + } + + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + pcie_device_put(pcie_device); + + _scsih_ublock_io_device(ioc, wwid, NULL); + + return; +} + +/** + * _scsih_pcie_add_device - creating pcie device object + * @ioc: per adapter object + * @handle: pcie device handle + * + * Creating end device object, stored in ioc->pcie_device_list. + * + * Return: 1 means queue the event later, 0 means complete the event + */ +static int +_scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + Mpi26PCIeDevicePage0_t pcie_device_pg0; + Mpi26PCIeDevicePage2_t pcie_device_pg2; + Mpi2ConfigReply_t mpi_reply; + struct _pcie_device *pcie_device; + struct _enclosure_node *enclosure_dev; + u32 ioc_status; + u64 wwid; + + if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply, + &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return 0; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return 0; + } + + set_bit(handle, ioc->pend_os_device_add); + wwid = le64_to_cpu(pcie_device_pg0.WWID); + + /* check if device is present */ + if (!(le32_to_cpu(pcie_device_pg0.Flags) & + MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) { + ioc_err(ioc, "device is not present handle(0x04%x)!!!\n", + handle); + return 0; + } + + /* check if there were any issues with discovery */ + if (_scsih_check_pcie_access_status(ioc, wwid, handle, + pcie_device_pg0.AccessStatus)) + return 0; + + if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu + (pcie_device_pg0.DeviceInfo)))) + return 0; + + pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid); + if (pcie_device) { + clear_bit(handle, ioc->pend_os_device_add); + pcie_device_put(pcie_device); + return 0; + } + + /* PCIe Device Page 2 contains read-only information about a + * specific NVMe device; therefore, this page is only + * valid for NVMe devices and skip for pcie devices of type scsi. + */ + if (!(mpt3sas_scsih_is_pcie_scsi_device( + le32_to_cpu(pcie_device_pg0.DeviceInfo)))) { + if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply, + &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, + handle)) { + ioc_err(ioc, + "failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + return 0; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, + "failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + return 0; + } + } + + pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL); + if (!pcie_device) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return 0; + } + + kref_init(&pcie_device->refcount); + pcie_device->id = ioc->pcie_target_id++; + pcie_device->channel = PCIE_CHANNEL; + pcie_device->handle = handle; + pcie_device->access_status = pcie_device_pg0.AccessStatus; + pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo); + pcie_device->wwid = wwid; + pcie_device->port_num = pcie_device_pg0.PortNum; + pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) & + MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0; + + pcie_device->enclosure_handle = + le16_to_cpu(pcie_device_pg0.EnclosureHandle); + if (pcie_device->enclosure_handle != 0) + pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot); + + if (le32_to_cpu(pcie_device_pg0.Flags) & + MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) { + pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel; + memcpy(&pcie_device->connector_name[0], + &pcie_device_pg0.ConnectorName[0], 4); + } else { + pcie_device->enclosure_level = 0; + pcie_device->connector_name[0] = '\0'; + } + + /* get enclosure_logical_id */ + if (pcie_device->enclosure_handle) { + enclosure_dev = + mpt3sas_scsih_enclosure_find_by_handle(ioc, + pcie_device->enclosure_handle); + if (enclosure_dev) + pcie_device->enclosure_logical_id = + le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); + } + /* TODO -- Add device name once FW supports it */ + if (!(mpt3sas_scsih_is_pcie_scsi_device( + le32_to_cpu(pcie_device_pg0.DeviceInfo)))) { + pcie_device->nvme_mdts = + le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize); + pcie_device->shutdown_latency = + le16_to_cpu(pcie_device_pg2.ShutdownLatency); + /* + * Set IOC's max_shutdown_latency to drive's RTD3 Entry Latency + * if drive's RTD3 Entry Latency is greater then IOC's + * max_shutdown_latency. + */ + if (pcie_device->shutdown_latency > ioc->max_shutdown_latency) + ioc->max_shutdown_latency = + pcie_device->shutdown_latency; + if (pcie_device_pg2.ControllerResetTO) + pcie_device->reset_timeout = + pcie_device_pg2.ControllerResetTO; + else + pcie_device->reset_timeout = 30; + } else + pcie_device->reset_timeout = 30; + + if (ioc->wait_for_discovery_to_complete) + _scsih_pcie_device_init_add(ioc, pcie_device); + else + _scsih_pcie_device_add(ioc, pcie_device); + + pcie_device_put(pcie_device); + return 0; +} + +/** + * _scsih_pcie_topology_change_event_debug - debug for topology + * event + * @ioc: per adapter object + * @event_data: event data payload + * Context: user. + */ +static void +_scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc, + Mpi26EventDataPCIeTopologyChangeList_t *event_data) +{ + int i; + u16 handle; + u16 reason_code; + u8 port_number; + char *status_str = NULL; + u8 link_rate, prev_link_rate; + + switch (event_data->SwitchStatus) { + case MPI26_EVENT_PCIE_TOPO_SS_ADDED: + status_str = "add"; + break; + case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING: + status_str = "remove"; + break; + case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING: + case 0: + status_str = "responding"; + break; + case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING: + status_str = "remove delay"; + break; + default: + status_str = "unknown status"; + break; + } + ioc_info(ioc, "pcie topology change: (%s)\n", status_str); + pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)" + "start_port(%02d), count(%d)\n", + le16_to_cpu(event_data->SwitchDevHandle), + le16_to_cpu(event_data->EnclosureHandle), + event_data->StartPortNum, event_data->NumEntries); + for (i = 0; i < event_data->NumEntries; i++) { + handle = + le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); + if (!handle) + continue; + port_number = event_data->StartPortNum + i; + reason_code = event_data->PortEntry[i].PortStatus; + switch (reason_code) { + case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED: + status_str = "target add"; + break; + case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: + status_str = "target remove"; + break; + case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: + status_str = "delay target remove"; + break; + case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED: + status_str = "link rate change"; + break; + case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE: + status_str = "target responding"; + break; + default: + status_str = "unknown"; + break; + } + link_rate = event_data->PortEntry[i].CurrentPortInfo & + MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK; + prev_link_rate = event_data->PortEntry[i].PreviousPortInfo & + MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK; + pr_info("\tport(%02d), attached_handle(0x%04x): %s:" + " link rate: new(0x%02x), old(0x%02x)\n", port_number, + handle, status_str, link_rate, prev_link_rate); + } +} + +/** + * _scsih_pcie_topology_change_event - handle PCIe topology + * changes + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + * + */ +static void +_scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + int i; + u16 handle; + u16 reason_code; + u8 link_rate, prev_link_rate; + unsigned long flags; + int rc; + Mpi26EventDataPCIeTopologyChangeList_t *event_data = + (Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data; + struct _pcie_device *pcie_device; + + if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) + _scsih_pcie_topology_change_event_debug(ioc, event_data); + + if (ioc->shost_recovery || ioc->remove_host || + ioc->pci_error_recovery) + return; + + if (fw_event->ignore) { + dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n")); + return; + } + + /* handle siblings events */ + for (i = 0; i < event_data->NumEntries; i++) { + if (fw_event->ignore) { + dewtprintk(ioc, + ioc_info(ioc, "ignoring switch event\n")); + return; + } + if (ioc->remove_host || ioc->pci_error_recovery) + return; + reason_code = event_data->PortEntry[i].PortStatus; + handle = + le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); + if (!handle) + continue; + + link_rate = event_data->PortEntry[i].CurrentPortInfo + & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK; + prev_link_rate = event_data->PortEntry[i].PreviousPortInfo + & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK; + + switch (reason_code) { + case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED: + if (ioc->shost_recovery) + break; + if (link_rate == prev_link_rate) + break; + if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5) + break; + + _scsih_pcie_check_device(ioc, handle); + + /* This code after this point handles the test case + * where a device has been added, however its returning + * BUSY for sometime. Then before the Device Missing + * Delay expires and the device becomes READY, the + * device is removed and added back. + */ + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle); + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + + if (pcie_device) { + pcie_device_put(pcie_device); + break; + } + + if (!test_bit(handle, ioc->pend_os_device_add)) + break; + + dewtprintk(ioc, + ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n", + handle)); + event_data->PortEntry[i].PortStatus &= 0xF0; + event_data->PortEntry[i].PortStatus |= + MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED; + fallthrough; + case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED: + if (ioc->shost_recovery) + break; + if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5) + break; + + rc = _scsih_pcie_add_device(ioc, handle); + if (!rc) { + /* mark entry vacant */ + /* TODO This needs to be reviewed and fixed, + * we dont have an entry + * to make an event void like vacant + */ + event_data->PortEntry[i].PortStatus |= + MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE; + } + break; + case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: + _scsih_pcie_device_remove_by_handle(ioc, handle); + break; + } + } +} + +/** + * _scsih_pcie_device_status_change_event_debug - debug for device event + * @ioc: ? + * @event_data: event data payload + * Context: user. + */ +static void +_scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, + Mpi26EventDataPCIeDeviceStatusChange_t *event_data) +{ + char *reason_str = NULL; + + switch (event_data->ReasonCode) { + case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA: + reason_str = "smart data"; + break; + case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED: + reason_str = "unsupported device discovered"; + break; + case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET: + reason_str = "internal device reset"; + break; + case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL: + reason_str = "internal task abort"; + break; + case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL: + reason_str = "internal task abort set"; + break; + case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL: + reason_str = "internal clear task set"; + break; + case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL: + reason_str = "internal query task"; + break; + case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE: + reason_str = "device init failure"; + break; + case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET: + reason_str = "internal device reset complete"; + break; + case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL: + reason_str = "internal task abort complete"; + break; + case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION: + reason_str = "internal async notification"; + break; + case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED: + reason_str = "pcie hot reset failed"; + break; + default: + reason_str = "unknown reason"; + break; + } + + ioc_info(ioc, "PCIE device status change: (%s)\n" + "\thandle(0x%04x), WWID(0x%016llx), tag(%d)", + reason_str, le16_to_cpu(event_data->DevHandle), + (u64)le64_to_cpu(event_data->WWID), + le16_to_cpu(event_data->TaskTag)); + if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA) + pr_cont(", ASC(0x%x), ASCQ(0x%x)\n", + event_data->ASC, event_data->ASCQ); + pr_cont("\n"); +} + +/** + * _scsih_pcie_device_status_change_event - handle device status + * change + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + */ +static void +_scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + struct MPT3SAS_TARGET *target_priv_data; + struct _pcie_device *pcie_device; + u64 wwid; + unsigned long flags; + Mpi26EventDataPCIeDeviceStatusChange_t *event_data = + (Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data; + if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) + _scsih_pcie_device_status_change_event_debug(ioc, + event_data); + + if (event_data->ReasonCode != + MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET && + event_data->ReasonCode != + MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET) + return; + + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + wwid = le64_to_cpu(event_data->WWID); + pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid); + + if (!pcie_device || !pcie_device->starget) + goto out; + + target_priv_data = pcie_device->starget->hostdata; + if (!target_priv_data) + goto out; + + if (event_data->ReasonCode == + MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET) + target_priv_data->tm_busy = 1; + else + target_priv_data->tm_busy = 0; +out: + if (pcie_device) + pcie_device_put(pcie_device); + + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); +} + +/** + * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure + * event + * @ioc: per adapter object + * @event_data: event data payload + * Context: user. + */ +static void +_scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataSasEnclDevStatusChange_t *event_data) +{ + char *reason_str = NULL; + + switch (event_data->ReasonCode) { + case MPI2_EVENT_SAS_ENCL_RC_ADDED: + reason_str = "enclosure add"; + break; + case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING: + reason_str = "enclosure remove"; + break; + default: + reason_str = "unknown reason"; + break; + } + + ioc_info(ioc, "enclosure status change: (%s)\n" + "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n", + reason_str, + le16_to_cpu(event_data->EnclosureHandle), + (u64)le64_to_cpu(event_data->EnclosureLogicalID), + le16_to_cpu(event_data->StartSlot)); +} + +/** + * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + */ +static void +_scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + Mpi2ConfigReply_t mpi_reply; + struct _enclosure_node *enclosure_dev = NULL; + Mpi2EventDataSasEnclDevStatusChange_t *event_data = + (Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data; + int rc; + u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle); + + if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) + _scsih_sas_enclosure_dev_status_change_event_debug(ioc, + (Mpi2EventDataSasEnclDevStatusChange_t *) + fw_event->event_data); + if (ioc->shost_recovery) + return; + + if (enclosure_handle) + enclosure_dev = + mpt3sas_scsih_enclosure_find_by_handle(ioc, + enclosure_handle); + switch (event_data->ReasonCode) { + case MPI2_EVENT_SAS_ENCL_RC_ADDED: + if (!enclosure_dev) { + enclosure_dev = + kzalloc(sizeof(struct _enclosure_node), + GFP_KERNEL); + if (!enclosure_dev) { + ioc_info(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply, + &enclosure_dev->pg0, + MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, + enclosure_handle); + + if (rc || (le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK)) { + kfree(enclosure_dev); + return; + } + + list_add_tail(&enclosure_dev->list, + &ioc->enclosure_list); + } + break; + case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING: + if (enclosure_dev) { + list_del(&enclosure_dev->list); + kfree(enclosure_dev); + } + break; + default: + break; + } +} + +/** + * _scsih_sas_broadcast_primitive_event - handle broadcast events + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + */ +static void +_scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + struct scsi_cmnd *scmd; + struct scsi_device *sdev; + struct scsiio_tracker *st; + u16 smid, handle; + u32 lun; + struct MPT3SAS_DEVICE *sas_device_priv_data; + u32 termination_count; + u32 query_count; + Mpi2SCSITaskManagementReply_t *mpi_reply; + Mpi2EventDataSasBroadcastPrimitive_t *event_data = + (Mpi2EventDataSasBroadcastPrimitive_t *) + fw_event->event_data; + u16 ioc_status; + unsigned long flags; + int r; + u8 max_retries = 0; + u8 task_abort_retries; + + mutex_lock(&ioc->tm_cmds.mutex); + ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n", + __func__, event_data->PhyNum, event_data->PortWidth); + + _scsih_block_io_all_device(ioc); + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + mpi_reply = ioc->tm_cmds.reply; + broadcast_aen_retry: + + /* sanity checks for retrying this loop */ + if (max_retries++ == 5) { + dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__)); + goto out; + } else if (max_retries > 1) + dewtprintk(ioc, + ioc_info(ioc, "%s: %d retry\n", + __func__, max_retries - 1)); + + termination_count = 0; + query_count = 0; + for (smid = 1; smid <= ioc->scsiio_depth; smid++) { + if (ioc->shost_recovery) + goto out; + scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); + if (!scmd) + continue; + st = scsi_cmd_priv(scmd); + sdev = scmd->device; + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) + continue; + /* skip hidden raid components */ + if (sas_device_priv_data->sas_target->flags & + MPT_TARGET_FLAGS_RAID_COMPONENT) + continue; + /* skip volumes */ + if (sas_device_priv_data->sas_target->flags & + MPT_TARGET_FLAGS_VOLUME) + continue; + /* skip PCIe devices */ + if (sas_device_priv_data->sas_target->flags & + MPT_TARGET_FLAGS_PCIE_DEVICE) + continue; + + handle = sas_device_priv_data->sas_target->handle; + lun = sas_device_priv_data->lun; + query_count++; + + if (ioc->shost_recovery) + goto out; + + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun, + MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid, + st->msix_io, 30, 0); + if (r == FAILED) { + sdev_printk(KERN_WARNING, sdev, + "mpt3sas_scsih_issue_tm: FAILED when sending " + "QUERY_TASK: scmd(%p)\n", scmd); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + goto broadcast_aen_retry; + } + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) + & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + sdev_printk(KERN_WARNING, sdev, + "query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n", + ioc_status, scmd); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + goto broadcast_aen_retry; + } + + /* see if IO is still owned by IOC and target */ + if (mpi_reply->ResponseCode == + MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED || + mpi_reply->ResponseCode == + MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) { + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + continue; + } + task_abort_retries = 0; + tm_retry: + if (task_abort_retries++ == 60) { + dewtprintk(ioc, + ioc_info(ioc, "%s: ABORT_TASK: giving up\n", + __func__)); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + goto broadcast_aen_retry; + } + + if (ioc->shost_recovery) + goto out_no_lock; + + r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id, + sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, + st->smid, st->msix_io, 30, 0); + if (r == FAILED || st->cb_idx != 0xFF) { + sdev_printk(KERN_WARNING, sdev, + "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : " + "scmd(%p)\n", scmd); + goto tm_retry; + } + + if (task_abort_retries > 1) + sdev_printk(KERN_WARNING, sdev, + "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):" + " scmd(%p)\n", + task_abort_retries - 1, scmd); + + termination_count += le32_to_cpu(mpi_reply->TerminationCount); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + } + + if (ioc->broadcast_aen_pending) { + dewtprintk(ioc, + ioc_info(ioc, + "%s: loop back due to pending AEN\n", + __func__)); + ioc->broadcast_aen_pending = 0; + goto broadcast_aen_retry; + } + + out: + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + out_no_lock: + + dewtprintk(ioc, + ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n", + __func__, query_count, termination_count)); + + ioc->broadcast_aen_busy = 0; + if (!ioc->shost_recovery) + _scsih_ublock_io_all_device(ioc); + mutex_unlock(&ioc->tm_cmds.mutex); +} + +/** + * _scsih_sas_discovery_event - handle discovery events + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + */ +static void +_scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + Mpi2EventDataSasDiscovery_t *event_data = + (Mpi2EventDataSasDiscovery_t *) fw_event->event_data; + + if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) { + ioc_info(ioc, "discovery event: (%s)", + event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ? + "start" : "stop"); + if (event_data->DiscoveryStatus) + pr_cont("discovery_status(0x%08x)", + le32_to_cpu(event_data->DiscoveryStatus)); + pr_cont("\n"); + } + + if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED && + !ioc->sas_hba.num_phys) { + if (disable_discovery > 0 && ioc->shost_recovery) { + /* Wait for the reset to complete */ + while (ioc->shost_recovery) + ssleep(1); + } + _scsih_sas_host_add(ioc); + } +} + +/** + * _scsih_sas_device_discovery_error_event - display SAS device discovery error + * events + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + */ +static void +_scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + Mpi25EventDataSasDeviceDiscoveryError_t *event_data = + (Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data; + + switch (event_data->ReasonCode) { + case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED: + ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n", + le16_to_cpu(event_data->DevHandle), + (u64)le64_to_cpu(event_data->SASAddress), + event_data->PhysicalPort); + break; + case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT: + ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n", + le16_to_cpu(event_data->DevHandle), + (u64)le64_to_cpu(event_data->SASAddress), + event_data->PhysicalPort); + break; + default: + break; + } +} + +/** + * _scsih_pcie_enumeration_event - handle enumeration events + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + */ +static void +_scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + Mpi26EventDataPCIeEnumeration_t *event_data = + (Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data; + + if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)) + return; + + ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x", + (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ? + "started" : "completed", + event_data->Flags); + if (event_data->EnumerationStatus) + pr_cont("enumeration_status(0x%08x)", + le32_to_cpu(event_data->EnumerationStatus)); + pr_cont("\n"); +} + +/** + * _scsih_ir_fastpath - turn on fastpath for IR physdisk + * @ioc: per adapter object + * @handle: device handle for physical disk + * @phys_disk_num: physical disk number + * + * Return: 0 for success, else failure. + */ +static int +_scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num) +{ + Mpi2RaidActionRequest_t *mpi_request; + Mpi2RaidActionReply_t *mpi_reply; + u16 smid; + u8 issue_reset = 0; + int rc = 0; + u16 ioc_status; + u32 log_info; + + if (ioc->hba_mpi_version_belonged == MPI2_VERSION) + return rc; + + mutex_lock(&ioc->scsih_cmds.mutex); + + if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { + ioc_err(ioc, "%s: scsih_cmd in use\n", __func__); + rc = -EAGAIN; + goto out; + } + ioc->scsih_cmds.status = MPT3_CMD_PENDING; + + smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); + if (!smid) { + ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; + rc = -EAGAIN; + goto out; + } + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->scsih_cmds.smid = smid; + memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t)); + + mpi_request->Function = MPI2_FUNCTION_RAID_ACTION; + mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN; + mpi_request->PhysDiskNum = phys_disk_num; + + dewtprintk(ioc, + ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n", + handle, phys_disk_num)); + + init_completion(&ioc->scsih_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); + + if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { + mpt3sas_check_cmd_timeout(ioc, + ioc->scsih_cmds.status, mpi_request, + sizeof(Mpi2RaidActionRequest_t)/4, issue_reset); + rc = -EFAULT; + goto out; + } + + if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { + + mpi_reply = ioc->scsih_cmds.reply; + ioc_status = le16_to_cpu(mpi_reply->IOCStatus); + if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) + log_info = le32_to_cpu(mpi_reply->IOCLogInfo); + else + log_info = 0; + ioc_status &= MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dewtprintk(ioc, + ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n", + ioc_status, log_info)); + rc = -EFAULT; + } else + dewtprintk(ioc, + ioc_info(ioc, "IR RAID_ACTION: completed successfully\n")); + } + + out: + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; + mutex_unlock(&ioc->scsih_cmds.mutex); + + if (issue_reset) + mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + return rc; +} + +/** + * _scsih_reprobe_lun - reprobing lun + * @sdev: scsi device struct + * @no_uld_attach: sdev->no_uld_attach flag setting + * + **/ +static void +_scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach) +{ + sdev->no_uld_attach = no_uld_attach ? 1 : 0; + sdev_printk(KERN_INFO, sdev, "%s raid component\n", + sdev->no_uld_attach ? "hiding" : "exposing"); + WARN_ON(scsi_device_reprobe(sdev)); +} + +/** + * _scsih_sas_volume_add - add new volume + * @ioc: per adapter object + * @element: IR config element data + * Context: user. + */ +static void +_scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventIrConfigElement_t *element) +{ + struct _raid_device *raid_device; + unsigned long flags; + u64 wwid; + u16 handle = le16_to_cpu(element->VolDevHandle); + int rc; + + mpt3sas_config_get_volume_wwid(ioc, handle, &wwid); + if (!wwid) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + + if (raid_device) + return; + + raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL); + if (!raid_device) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + + raid_device->id = ioc->sas_id++; + raid_device->channel = RAID_CHANNEL; + raid_device->handle = handle; + raid_device->wwid = wwid; + _scsih_raid_device_add(ioc, raid_device); + if (!ioc->wait_for_discovery_to_complete) { + rc = scsi_add_device(ioc->shost, RAID_CHANNEL, + raid_device->id, 0); + if (rc) + _scsih_raid_device_remove(ioc, raid_device); + } else { + spin_lock_irqsave(&ioc->raid_device_lock, flags); + _scsih_determine_boot_device(ioc, raid_device, 1); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + } +} + +/** + * _scsih_sas_volume_delete - delete volume + * @ioc: per adapter object + * @handle: volume device handle + * Context: user. + */ +static void +_scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _raid_device *raid_device; + unsigned long flags; + struct MPT3SAS_TARGET *sas_target_priv_data; + struct scsi_target *starget = NULL; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); + if (raid_device) { + if (raid_device->starget) { + starget = raid_device->starget; + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->deleted = 1; + } + ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", + raid_device->handle, (u64)raid_device->wwid); + list_del(&raid_device->list); + kfree(raid_device); + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + if (starget) + scsi_remove_target(&starget->dev); +} + +/** + * _scsih_sas_pd_expose - expose pd component to /dev/sdX + * @ioc: per adapter object + * @element: IR config element data + * Context: user. + */ +static void +_scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventIrConfigElement_t *element) +{ + struct _sas_device *sas_device; + struct scsi_target *starget = NULL; + struct MPT3SAS_TARGET *sas_target_priv_data; + unsigned long flags; + u16 handle = le16_to_cpu(element->PhysDiskDevHandle); + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); + if (sas_device) { + sas_device->volume_handle = 0; + sas_device->volume_wwid = 0; + clear_bit(handle, ioc->pd_handles); + if (sas_device->starget && sas_device->starget->hostdata) { + starget = sas_device->starget; + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->flags &= + ~MPT_TARGET_FLAGS_RAID_COMPONENT; + } + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (!sas_device) + return; + + /* exposing raid component */ + if (starget) + starget_for_each_device(starget, NULL, _scsih_reprobe_lun); + + sas_device_put(sas_device); +} + +/** + * _scsih_sas_pd_hide - hide pd component from /dev/sdX + * @ioc: per adapter object + * @element: IR config element data + * Context: user. + */ +static void +_scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventIrConfigElement_t *element) +{ + struct _sas_device *sas_device; + struct scsi_target *starget = NULL; + struct MPT3SAS_TARGET *sas_target_priv_data; + unsigned long flags; + u16 handle = le16_to_cpu(element->PhysDiskDevHandle); + u16 volume_handle = 0; + u64 volume_wwid = 0; + + mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle); + if (volume_handle) + mpt3sas_config_get_volume_wwid(ioc, volume_handle, + &volume_wwid); + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); + if (sas_device) { + set_bit(handle, ioc->pd_handles); + if (sas_device->starget && sas_device->starget->hostdata) { + starget = sas_device->starget; + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->flags |= + MPT_TARGET_FLAGS_RAID_COMPONENT; + sas_device->volume_handle = volume_handle; + sas_device->volume_wwid = volume_wwid; + } + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (!sas_device) + return; + + /* hiding raid component */ + _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); + + if (starget) + starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun); + + sas_device_put(sas_device); +} + +/** + * _scsih_sas_pd_delete - delete pd component + * @ioc: per adapter object + * @element: IR config element data + * Context: user. + */ +static void +_scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventIrConfigElement_t *element) +{ + u16 handle = le16_to_cpu(element->PhysDiskDevHandle); + + _scsih_device_remove_by_handle(ioc, handle); +} + +/** + * _scsih_sas_pd_add - remove pd component + * @ioc: per adapter object + * @element: IR config element data + * Context: user. + */ +static void +_scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventIrConfigElement_t *element) +{ + struct _sas_device *sas_device; + u16 handle = le16_to_cpu(element->PhysDiskDevHandle); + Mpi2ConfigReply_t mpi_reply; + Mpi2SasDevicePage0_t sas_device_pg0; + u32 ioc_status; + u64 sas_address; + u16 parent_handle; + + set_bit(handle, ioc->pd_handles); + + sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); + if (sas_device) { + _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); + sas_device_put(sas_device); + return; + } + + if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, + MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + + parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); + if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) + mpt3sas_transport_update_links(ioc, sas_address, handle, + sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5, + mpt3sas_get_port_by_id(ioc, + sas_device_pg0.PhysicalPort, 0)); + + _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); + _scsih_add_device(ioc, handle, 0, 1); +} + +/** + * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events + * @ioc: per adapter object + * @event_data: event data payload + * Context: user. + */ +static void +_scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataIrConfigChangeList_t *event_data) +{ + Mpi2EventIrConfigElement_t *element; + u8 element_type; + int i; + char *reason_str = NULL, *element_str = NULL; + + element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; + + ioc_info(ioc, "raid config change: (%s), elements(%d)\n", + le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ? + "foreign" : "native", + event_data->NumElements); + for (i = 0; i < event_data->NumElements; i++, element++) { + switch (element->ReasonCode) { + case MPI2_EVENT_IR_CHANGE_RC_ADDED: + reason_str = "add"; + break; + case MPI2_EVENT_IR_CHANGE_RC_REMOVED: + reason_str = "remove"; + break; + case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE: + reason_str = "no change"; + break; + case MPI2_EVENT_IR_CHANGE_RC_HIDE: + reason_str = "hide"; + break; + case MPI2_EVENT_IR_CHANGE_RC_UNHIDE: + reason_str = "unhide"; + break; + case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED: + reason_str = "volume_created"; + break; + case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED: + reason_str = "volume_deleted"; + break; + case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED: + reason_str = "pd_created"; + break; + case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED: + reason_str = "pd_deleted"; + break; + default: + reason_str = "unknown reason"; + break; + } + element_type = le16_to_cpu(element->ElementFlags) & + MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK; + switch (element_type) { + case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT: + element_str = "volume"; + break; + case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT: + element_str = "phys disk"; + break; + case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT: + element_str = "hot spare"; + break; + default: + element_str = "unknown element"; + break; + } + pr_info("\t(%s:%s), vol handle(0x%04x), " \ + "pd handle(0x%04x), pd num(0x%02x)\n", element_str, + reason_str, le16_to_cpu(element->VolDevHandle), + le16_to_cpu(element->PhysDiskDevHandle), + element->PhysDiskNum); + } +} + +/** + * _scsih_sas_ir_config_change_event - handle ir configuration change events + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + */ +static void +_scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + Mpi2EventIrConfigElement_t *element; + int i; + u8 foreign_config; + Mpi2EventDataIrConfigChangeList_t *event_data = + (Mpi2EventDataIrConfigChangeList_t *) + fw_event->event_data; + + if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) && + (!ioc->hide_ir_msg)) + _scsih_sas_ir_config_change_event_debug(ioc, event_data); + + foreign_config = (le32_to_cpu(event_data->Flags) & + MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0; + + element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; + if (ioc->shost_recovery && + ioc->hba_mpi_version_belonged != MPI2_VERSION) { + for (i = 0; i < event_data->NumElements; i++, element++) { + if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE) + _scsih_ir_fastpath(ioc, + le16_to_cpu(element->PhysDiskDevHandle), + element->PhysDiskNum); + } + return; + } + + for (i = 0; i < event_data->NumElements; i++, element++) { + + switch (element->ReasonCode) { + case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED: + case MPI2_EVENT_IR_CHANGE_RC_ADDED: + if (!foreign_config) + _scsih_sas_volume_add(ioc, element); + break; + case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED: + case MPI2_EVENT_IR_CHANGE_RC_REMOVED: + if (!foreign_config) + _scsih_sas_volume_delete(ioc, + le16_to_cpu(element->VolDevHandle)); + break; + case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED: + if (!ioc->is_warpdrive) + _scsih_sas_pd_hide(ioc, element); + break; + case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED: + if (!ioc->is_warpdrive) + _scsih_sas_pd_expose(ioc, element); + break; + case MPI2_EVENT_IR_CHANGE_RC_HIDE: + if (!ioc->is_warpdrive) + _scsih_sas_pd_add(ioc, element); + break; + case MPI2_EVENT_IR_CHANGE_RC_UNHIDE: + if (!ioc->is_warpdrive) + _scsih_sas_pd_delete(ioc, element); + break; + } + } +} + +/** + * _scsih_sas_ir_volume_event - IR volume event + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + */ +static void +_scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + u64 wwid; + unsigned long flags; + struct _raid_device *raid_device; + u16 handle; + u32 state; + int rc; + Mpi2EventDataIrVolume_t *event_data = + (Mpi2EventDataIrVolume_t *) fw_event->event_data; + + if (ioc->shost_recovery) + return; + + if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED) + return; + + handle = le16_to_cpu(event_data->VolDevHandle); + state = le32_to_cpu(event_data->NewValue); + if (!ioc->hide_ir_msg) + dewtprintk(ioc, + ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n", + __func__, handle, + le32_to_cpu(event_data->PreviousValue), + state)); + switch (state) { + case MPI2_RAID_VOL_STATE_MISSING: + case MPI2_RAID_VOL_STATE_FAILED: + _scsih_sas_volume_delete(ioc, handle); + break; + + case MPI2_RAID_VOL_STATE_ONLINE: + case MPI2_RAID_VOL_STATE_DEGRADED: + case MPI2_RAID_VOL_STATE_OPTIMAL: + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + + if (raid_device) + break; + + mpt3sas_config_get_volume_wwid(ioc, handle, &wwid); + if (!wwid) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + break; + } + + raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL); + if (!raid_device) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + break; + } + + raid_device->id = ioc->sas_id++; + raid_device->channel = RAID_CHANNEL; + raid_device->handle = handle; + raid_device->wwid = wwid; + _scsih_raid_device_add(ioc, raid_device); + rc = scsi_add_device(ioc->shost, RAID_CHANNEL, + raid_device->id, 0); + if (rc) + _scsih_raid_device_remove(ioc, raid_device); + break; + + case MPI2_RAID_VOL_STATE_INITIALIZING: + default: + break; + } +} + +/** + * _scsih_sas_ir_physical_disk_event - PD event + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + */ +static void +_scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + u16 handle, parent_handle; + u32 state; + struct _sas_device *sas_device; + Mpi2ConfigReply_t mpi_reply; + Mpi2SasDevicePage0_t sas_device_pg0; + u32 ioc_status; + Mpi2EventDataIrPhysicalDisk_t *event_data = + (Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data; + u64 sas_address; + + if (ioc->shost_recovery) + return; + + if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED) + return; + + handle = le16_to_cpu(event_data->PhysDiskDevHandle); + state = le32_to_cpu(event_data->NewValue); + + if (!ioc->hide_ir_msg) + dewtprintk(ioc, + ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n", + __func__, handle, + le32_to_cpu(event_data->PreviousValue), + state)); + + switch (state) { + case MPI2_RAID_PD_STATE_ONLINE: + case MPI2_RAID_PD_STATE_DEGRADED: + case MPI2_RAID_PD_STATE_REBUILDING: + case MPI2_RAID_PD_STATE_OPTIMAL: + case MPI2_RAID_PD_STATE_HOT_SPARE: + + if (!ioc->is_warpdrive) + set_bit(handle, ioc->pd_handles); + + sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); + if (sas_device) { + sas_device_put(sas_device); + return; + } + + if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, + &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, + handle))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + + parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); + if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) + mpt3sas_transport_update_links(ioc, sas_address, handle, + sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5, + mpt3sas_get_port_by_id(ioc, + sas_device_pg0.PhysicalPort, 0)); + + _scsih_add_device(ioc, handle, 0, 1); + + break; + + case MPI2_RAID_PD_STATE_OFFLINE: + case MPI2_RAID_PD_STATE_NOT_CONFIGURED: + case MPI2_RAID_PD_STATE_NOT_COMPATIBLE: + default: + break; + } +} + +/** + * _scsih_sas_ir_operation_status_event_debug - debug for IR op event + * @ioc: per adapter object + * @event_data: event data payload + * Context: user. + */ +static void +_scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataIrOperationStatus_t *event_data) +{ + char *reason_str = NULL; + + switch (event_data->RAIDOperation) { + case MPI2_EVENT_IR_RAIDOP_RESYNC: + reason_str = "resync"; + break; + case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION: + reason_str = "online capacity expansion"; + break; + case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK: + reason_str = "consistency check"; + break; + case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT: + reason_str = "background init"; + break; + case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT: + reason_str = "make data consistent"; + break; + } + + if (!reason_str) + return; + + ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n", + reason_str, + le16_to_cpu(event_data->VolDevHandle), + event_data->PercentComplete); +} + +/** + * _scsih_sas_ir_operation_status_event - handle RAID operation events + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + */ +static void +_scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + Mpi2EventDataIrOperationStatus_t *event_data = + (Mpi2EventDataIrOperationStatus_t *) + fw_event->event_data; + static struct _raid_device *raid_device; + unsigned long flags; + u16 handle; + + if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) && + (!ioc->hide_ir_msg)) + _scsih_sas_ir_operation_status_event_debug(ioc, + event_data); + + /* code added for raid transport support */ + if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) { + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + handle = le16_to_cpu(event_data->VolDevHandle); + raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); + if (raid_device) + raid_device->percent_complete = + event_data->PercentComplete; + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + } +} + +/** + * _scsih_prep_device_scan - initialize parameters prior to device scan + * @ioc: per adapter object + * + * Set the deleted flag prior to device scan. If the device is found during + * the scan, then we clear the deleted flag. + */ +static void +_scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc) +{ + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (sas_device_priv_data && sas_device_priv_data->sas_target) + sas_device_priv_data->sas_target->deleted = 1; + } +} + +/** + * _scsih_update_device_qdepth - Update QD during Reset. + * @ioc: per adapter object + * + */ +static void +_scsih_update_device_qdepth(struct MPT3SAS_ADAPTER *ioc) +{ + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct MPT3SAS_TARGET *sas_target_priv_data; + struct _sas_device *sas_device; + struct scsi_device *sdev; + u16 qdepth; + + ioc_info(ioc, "Update devices with firmware reported queue depth\n"); + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (sas_device_priv_data && sas_device_priv_data->sas_target) { + sas_target_priv_data = sas_device_priv_data->sas_target; + sas_device = sas_device_priv_data->sas_target->sas_dev; + if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) + qdepth = ioc->max_nvme_qd; + else if (sas_device && + sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) + qdepth = (sas_device->port_type > 1) ? + ioc->max_wideport_qd : ioc->max_narrowport_qd; + else if (sas_device && + sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) + qdepth = ioc->max_sata_qd; + else + continue; + mpt3sas_scsih_change_queue_depth(sdev, qdepth); + } + } +} + +/** + * _scsih_mark_responding_sas_device - mark a sas_devices as responding + * @ioc: per adapter object + * @sas_device_pg0: SAS Device page 0 + * + * After host reset, find out whether devices are still responding. + * Used in _scsih_remove_unresponsive_sas_devices. + */ +static void +_scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc, +Mpi2SasDevicePage0_t *sas_device_pg0) +{ + struct MPT3SAS_TARGET *sas_target_priv_data = NULL; + struct scsi_target *starget; + struct _sas_device *sas_device = NULL; + struct _enclosure_node *enclosure_dev = NULL; + unsigned long flags; + struct hba_port *port = mpt3sas_get_port_by_id( + ioc, sas_device_pg0->PhysicalPort, 0); + + if (sas_device_pg0->EnclosureHandle) { + enclosure_dev = + mpt3sas_scsih_enclosure_find_by_handle(ioc, + le16_to_cpu(sas_device_pg0->EnclosureHandle)); + if (enclosure_dev == NULL) + ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n", + sas_device_pg0->EnclosureHandle); + } + spin_lock_irqsave(&ioc->sas_device_lock, flags); + list_for_each_entry(sas_device, &ioc->sas_device_list, list) { + if (sas_device->sas_address != le64_to_cpu( + sas_device_pg0->SASAddress)) + continue; + if (sas_device->slot != le16_to_cpu(sas_device_pg0->Slot)) + continue; + if (sas_device->port != port) + continue; + sas_device->responding = 1; + starget = sas_device->starget; + if (starget && starget->hostdata) { + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->tm_busy = 0; + sas_target_priv_data->deleted = 0; + } else + sas_target_priv_data = NULL; + if (starget) { + starget_printk(KERN_INFO, starget, + "handle(0x%04x), sas_addr(0x%016llx)\n", + le16_to_cpu(sas_device_pg0->DevHandle), + (unsigned long long) + sas_device->sas_address); + + if (sas_device->enclosure_handle != 0) + starget_printk(KERN_INFO, starget, + "enclosure logical id(0x%016llx), slot(%d)\n", + (unsigned long long) + sas_device->enclosure_logical_id, + sas_device->slot); + } + if (le16_to_cpu(sas_device_pg0->Flags) & + MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { + sas_device->enclosure_level = + sas_device_pg0->EnclosureLevel; + memcpy(&sas_device->connector_name[0], + &sas_device_pg0->ConnectorName[0], 4); + } else { + sas_device->enclosure_level = 0; + sas_device->connector_name[0] = '\0'; + } + + sas_device->enclosure_handle = + le16_to_cpu(sas_device_pg0->EnclosureHandle); + sas_device->is_chassis_slot_valid = 0; + if (enclosure_dev) { + sas_device->enclosure_logical_id = le64_to_cpu( + enclosure_dev->pg0.EnclosureLogicalID); + if (le16_to_cpu(enclosure_dev->pg0.Flags) & + MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { + sas_device->is_chassis_slot_valid = 1; + sas_device->chassis_slot = + enclosure_dev->pg0.ChassisSlot; + } + } + + if (sas_device->handle == le16_to_cpu( + sas_device_pg0->DevHandle)) + goto out; + pr_info("\thandle changed from(0x%04x)!!!\n", + sas_device->handle); + sas_device->handle = le16_to_cpu( + sas_device_pg0->DevHandle); + if (sas_target_priv_data) + sas_target_priv_data->handle = + le16_to_cpu(sas_device_pg0->DevHandle); + goto out; + } + out: + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +} + +/** + * _scsih_create_enclosure_list_after_reset - Free Existing list, + * And create enclosure list by scanning all Enclosure Page(0)s + * @ioc: per adapter object + */ +static void +_scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc) +{ + struct _enclosure_node *enclosure_dev; + Mpi2ConfigReply_t mpi_reply; + u16 enclosure_handle; + int rc; + + /* Free existing enclosure list */ + mpt3sas_free_enclosure_list(ioc); + + /* Re constructing enclosure list after reset*/ + enclosure_handle = 0xFFFF; + do { + enclosure_dev = + kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL); + if (!enclosure_dev) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply, + &enclosure_dev->pg0, + MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE, + enclosure_handle); + + if (rc || (le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK)) { + kfree(enclosure_dev); + return; + } + list_add_tail(&enclosure_dev->list, + &ioc->enclosure_list); + enclosure_handle = + le16_to_cpu(enclosure_dev->pg0.EnclosureHandle); + } while (1); +} + +/** + * _scsih_search_responding_sas_devices - + * @ioc: per adapter object + * + * After host reset, find out whether devices are still responding. + * If not remove. + */ +static void +_scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2SasDevicePage0_t sas_device_pg0; + Mpi2ConfigReply_t mpi_reply; + u16 ioc_status; + u16 handle; + u32 device_info; + + ioc_info(ioc, "search for end-devices: start\n"); + + if (list_empty(&ioc->sas_device_list)) + goto out; + + handle = 0xFFFF; + while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, + &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, + handle))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + break; + handle = le16_to_cpu(sas_device_pg0.DevHandle); + device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); + if (!(_scsih_is_end_device(device_info))) + continue; + _scsih_mark_responding_sas_device(ioc, &sas_device_pg0); + } + + out: + ioc_info(ioc, "search for end-devices: complete\n"); +} + +/** + * _scsih_mark_responding_pcie_device - mark a pcie_device as responding + * @ioc: per adapter object + * @pcie_device_pg0: PCIe Device page 0 + * + * After host reset, find out whether devices are still responding. + * Used in _scsih_remove_unresponding_devices. + */ +static void +_scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc, + Mpi26PCIeDevicePage0_t *pcie_device_pg0) +{ + struct MPT3SAS_TARGET *sas_target_priv_data = NULL; + struct scsi_target *starget; + struct _pcie_device *pcie_device; + unsigned long flags; + + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) { + if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID)) + && (pcie_device->slot == le16_to_cpu( + pcie_device_pg0->Slot))) { + pcie_device->access_status = + pcie_device_pg0->AccessStatus; + pcie_device->responding = 1; + starget = pcie_device->starget; + if (starget && starget->hostdata) { + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->tm_busy = 0; + sas_target_priv_data->deleted = 0; + } else + sas_target_priv_data = NULL; + if (starget) { + starget_printk(KERN_INFO, starget, + "handle(0x%04x), wwid(0x%016llx) ", + pcie_device->handle, + (unsigned long long)pcie_device->wwid); + if (pcie_device->enclosure_handle != 0) + starget_printk(KERN_INFO, starget, + "enclosure logical id(0x%016llx), " + "slot(%d)\n", + (unsigned long long) + pcie_device->enclosure_logical_id, + pcie_device->slot); + } + + if (((le32_to_cpu(pcie_device_pg0->Flags)) & + MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) && + (ioc->hba_mpi_version_belonged != MPI2_VERSION)) { + pcie_device->enclosure_level = + pcie_device_pg0->EnclosureLevel; + memcpy(&pcie_device->connector_name[0], + &pcie_device_pg0->ConnectorName[0], 4); + } else { + pcie_device->enclosure_level = 0; + pcie_device->connector_name[0] = '\0'; + } + + if (pcie_device->handle == le16_to_cpu( + pcie_device_pg0->DevHandle)) + goto out; + pr_info("\thandle changed from(0x%04x)!!!\n", + pcie_device->handle); + pcie_device->handle = le16_to_cpu( + pcie_device_pg0->DevHandle); + if (sas_target_priv_data) + sas_target_priv_data->handle = + le16_to_cpu(pcie_device_pg0->DevHandle); + goto out; + } + } + + out: + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); +} + +/** + * _scsih_search_responding_pcie_devices - + * @ioc: per adapter object + * + * After host reset, find out whether devices are still responding. + * If not remove. + */ +static void +_scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi26PCIeDevicePage0_t pcie_device_pg0; + Mpi2ConfigReply_t mpi_reply; + u16 ioc_status; + u16 handle; + u32 device_info; + + ioc_info(ioc, "search for end-devices: start\n"); + + if (list_empty(&ioc->pcie_device_list)) + goto out; + + handle = 0xFFFF; + while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply, + &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, + handle))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n", + __func__, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + handle = le16_to_cpu(pcie_device_pg0.DevHandle); + device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo); + if (!(_scsih_is_nvme_pciescsi_device(device_info))) + continue; + _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0); + } +out: + ioc_info(ioc, "search for PCIe end-devices: complete\n"); +} + +/** + * _scsih_mark_responding_raid_device - mark a raid_device as responding + * @ioc: per adapter object + * @wwid: world wide identifier for raid volume + * @handle: device handle + * + * After host reset, find out whether devices are still responding. + * Used in _scsih_remove_unresponsive_raid_devices. + */ +static void +_scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid, + u16 handle) +{ + struct MPT3SAS_TARGET *sas_target_priv_data = NULL; + struct scsi_target *starget; + struct _raid_device *raid_device; + unsigned long flags; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (raid_device->wwid == wwid && raid_device->starget) { + starget = raid_device->starget; + if (starget && starget->hostdata) { + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->deleted = 0; + } else + sas_target_priv_data = NULL; + raid_device->responding = 1; + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + starget_printk(KERN_INFO, raid_device->starget, + "handle(0x%04x), wwid(0x%016llx)\n", handle, + (unsigned long long)raid_device->wwid); + + /* + * WARPDRIVE: The handles of the PDs might have changed + * across the host reset so re-initialize the + * required data for Direct IO + */ + mpt3sas_init_warpdrive_properties(ioc, raid_device); + spin_lock_irqsave(&ioc->raid_device_lock, flags); + if (raid_device->handle == handle) { + spin_unlock_irqrestore(&ioc->raid_device_lock, + flags); + return; + } + pr_info("\thandle changed from(0x%04x)!!!\n", + raid_device->handle); + raid_device->handle = handle; + if (sas_target_priv_data) + sas_target_priv_data->handle = handle; + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + return; + } + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); +} + +/** + * _scsih_search_responding_raid_devices - + * @ioc: per adapter object + * + * After host reset, find out whether devices are still responding. + * If not remove. + */ +static void +_scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2RaidVolPage1_t volume_pg1; + Mpi2RaidVolPage0_t volume_pg0; + Mpi2RaidPhysDiskPage0_t pd_pg0; + Mpi2ConfigReply_t mpi_reply; + u16 ioc_status; + u16 handle; + u8 phys_disk_num; + + if (!ioc->ir_firmware) + return; + + ioc_info(ioc, "search for raid volumes: start\n"); + + if (list_empty(&ioc->raid_device_list)) + goto out; + + handle = 0xFFFF; + while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply, + &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + break; + handle = le16_to_cpu(volume_pg1.DevHandle); + + if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, + &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, + sizeof(Mpi2RaidVolPage0_t))) + continue; + + if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL || + volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE || + volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) + _scsih_mark_responding_raid_device(ioc, + le64_to_cpu(volume_pg1.WWID), handle); + } + + /* refresh the pd_handles */ + if (!ioc->is_warpdrive) { + phys_disk_num = 0xFF; + memset(ioc->pd_handles, 0, ioc->pd_handles_sz); + while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply, + &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM, + phys_disk_num))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + break; + phys_disk_num = pd_pg0.PhysDiskNum; + handle = le16_to_cpu(pd_pg0.DevHandle); + set_bit(handle, ioc->pd_handles); + } + } + out: + ioc_info(ioc, "search for responding raid volumes: complete\n"); +} + +/** + * _scsih_mark_responding_expander - mark a expander as responding + * @ioc: per adapter object + * @expander_pg0:SAS Expander Config Page0 + * + * After host reset, find out whether devices are still responding. + * Used in _scsih_remove_unresponsive_expanders. + */ +static void +_scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc, + Mpi2ExpanderPage0_t *expander_pg0) +{ + struct _sas_node *sas_expander = NULL; + unsigned long flags; + int i; + struct _enclosure_node *enclosure_dev = NULL; + u16 handle = le16_to_cpu(expander_pg0->DevHandle); + u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle); + u64 sas_address = le64_to_cpu(expander_pg0->SASAddress); + struct hba_port *port = mpt3sas_get_port_by_id( + ioc, expander_pg0->PhysicalPort, 0); + + if (enclosure_handle) + enclosure_dev = + mpt3sas_scsih_enclosure_find_by_handle(ioc, + enclosure_handle); + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { + if (sas_expander->sas_address != sas_address) + continue; + if (sas_expander->port != port) + continue; + sas_expander->responding = 1; + + if (enclosure_dev) { + sas_expander->enclosure_logical_id = + le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); + sas_expander->enclosure_handle = + le16_to_cpu(expander_pg0->EnclosureHandle); + } + + if (sas_expander->handle == handle) + goto out; + pr_info("\texpander(0x%016llx): handle changed" \ + " from(0x%04x) to (0x%04x)!!!\n", + (unsigned long long)sas_expander->sas_address, + sas_expander->handle, handle); + sas_expander->handle = handle; + for (i = 0 ; i < sas_expander->num_phys ; i++) + sas_expander->phy[i].handle = handle; + goto out; + } + out: + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); +} + +/** + * _scsih_search_responding_expanders - + * @ioc: per adapter object + * + * After host reset, find out whether devices are still responding. + * If not remove. + */ +static void +_scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2ExpanderPage0_t expander_pg0; + Mpi2ConfigReply_t mpi_reply; + u16 ioc_status; + u64 sas_address; + u16 handle; + u8 port; + + ioc_info(ioc, "search for expanders: start\n"); + + if (list_empty(&ioc->sas_expander_list)) + goto out; + + handle = 0xFFFF; + while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, + MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) { + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + break; + + handle = le16_to_cpu(expander_pg0.DevHandle); + sas_address = le64_to_cpu(expander_pg0.SASAddress); + port = expander_pg0.PhysicalPort; + pr_info( + "\texpander present: handle(0x%04x), sas_addr(0x%016llx), port:%d\n", + handle, (unsigned long long)sas_address, + (ioc->multipath_on_hba ? + port : MULTIPATH_DISABLED_PORT_ID)); + _scsih_mark_responding_expander(ioc, &expander_pg0); + } + + out: + ioc_info(ioc, "search for expanders: complete\n"); +} + +/** + * _scsih_remove_unresponding_devices - removing unresponding devices + * @ioc: per adapter object + */ +static void +_scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc) +{ + struct _sas_device *sas_device, *sas_device_next; + struct _sas_node *sas_expander, *sas_expander_next; + struct _raid_device *raid_device, *raid_device_next; + struct _pcie_device *pcie_device, *pcie_device_next; + struct list_head tmp_list; + unsigned long flags; + LIST_HEAD(head); + + ioc_info(ioc, "removing unresponding devices: start\n"); + + /* removing unresponding end devices */ + ioc_info(ioc, "removing unresponding devices: end-devices\n"); + /* + * Iterate, pulling off devices marked as non-responding. We become the + * owner for the reference the list had on any object we prune. + */ + spin_lock_irqsave(&ioc->sas_device_lock, flags); + + /* + * Clean up the sas_device_init_list list as + * driver goes for fresh scan as part of diag reset. + */ + list_for_each_entry_safe(sas_device, sas_device_next, + &ioc->sas_device_init_list, list) { + list_del_init(&sas_device->list); + sas_device_put(sas_device); + } + + list_for_each_entry_safe(sas_device, sas_device_next, + &ioc->sas_device_list, list) { + if (!sas_device->responding) + list_move_tail(&sas_device->list, &head); + else + sas_device->responding = 0; + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + /* + * Now, uninitialize and remove the unresponding devices we pruned. + */ + list_for_each_entry_safe(sas_device, sas_device_next, &head, list) { + _scsih_remove_device(ioc, sas_device); + list_del_init(&sas_device->list); + sas_device_put(sas_device); + } + + ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n"); + INIT_LIST_HEAD(&head); + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + /* + * Clean up the pcie_device_init_list list as + * driver goes for fresh scan as part of diag reset. + */ + list_for_each_entry_safe(pcie_device, pcie_device_next, + &ioc->pcie_device_init_list, list) { + list_del_init(&pcie_device->list); + pcie_device_put(pcie_device); + } + + list_for_each_entry_safe(pcie_device, pcie_device_next, + &ioc->pcie_device_list, list) { + if (!pcie_device->responding) + list_move_tail(&pcie_device->list, &head); + else + pcie_device->responding = 0; + } + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + + list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) { + _scsih_pcie_device_remove_from_sml(ioc, pcie_device); + list_del_init(&pcie_device->list); + pcie_device_put(pcie_device); + } + + /* removing unresponding volumes */ + if (ioc->ir_firmware) { + ioc_info(ioc, "removing unresponding devices: volumes\n"); + list_for_each_entry_safe(raid_device, raid_device_next, + &ioc->raid_device_list, list) { + if (!raid_device->responding) + _scsih_sas_volume_delete(ioc, + raid_device->handle); + else + raid_device->responding = 0; + } + } + + /* removing unresponding expanders */ + ioc_info(ioc, "removing unresponding devices: expanders\n"); + spin_lock_irqsave(&ioc->sas_node_lock, flags); + INIT_LIST_HEAD(&tmp_list); + list_for_each_entry_safe(sas_expander, sas_expander_next, + &ioc->sas_expander_list, list) { + if (!sas_expander->responding) + list_move_tail(&sas_expander->list, &tmp_list); + else + sas_expander->responding = 0; + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list, + list) { + _scsih_expander_node_remove(ioc, sas_expander); + } + + ioc_info(ioc, "removing unresponding devices: complete\n"); + + /* unblock devices */ + _scsih_ublock_io_all_device(ioc); +} + +static void +_scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc, + struct _sas_node *sas_expander, u16 handle) +{ + Mpi2ExpanderPage1_t expander_pg1; + Mpi2ConfigReply_t mpi_reply; + int i; + + for (i = 0 ; i < sas_expander->num_phys ; i++) { + if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply, + &expander_pg1, i, handle))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + + mpt3sas_transport_update_links(ioc, sas_expander->sas_address, + le16_to_cpu(expander_pg1.AttachedDevHandle), i, + expander_pg1.NegotiatedLinkRate >> 4, + sas_expander->port); + } +} + +/** + * _scsih_scan_for_devices_after_reset - scan for devices after host reset + * @ioc: per adapter object + */ +static void +_scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2ExpanderPage0_t expander_pg0; + Mpi2SasDevicePage0_t sas_device_pg0; + Mpi26PCIeDevicePage0_t pcie_device_pg0; + Mpi2RaidVolPage1_t *volume_pg1; + Mpi2RaidVolPage0_t *volume_pg0; + Mpi2RaidPhysDiskPage0_t pd_pg0; + Mpi2EventIrConfigElement_t element; + Mpi2ConfigReply_t mpi_reply; + u8 phys_disk_num, port_id; + u16 ioc_status; + u16 handle, parent_handle; + u64 sas_address; + struct _sas_device *sas_device; + struct _pcie_device *pcie_device; + struct _sas_node *expander_device; + static struct _raid_device *raid_device; + u8 retry_count; + unsigned long flags; + + volume_pg0 = kzalloc(sizeof(*volume_pg0), GFP_KERNEL); + if (!volume_pg0) + return; + + volume_pg1 = kzalloc(sizeof(*volume_pg1), GFP_KERNEL); + if (!volume_pg1) { + kfree(volume_pg0); + return; + } + + ioc_info(ioc, "scan devices: start\n"); + + _scsih_sas_host_refresh(ioc); + + ioc_info(ioc, "\tscan devices: expanders start\n"); + + /* expanders */ + handle = 0xFFFF; + while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, + MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + handle = le16_to_cpu(expander_pg0.DevHandle); + spin_lock_irqsave(&ioc->sas_node_lock, flags); + port_id = expander_pg0.PhysicalPort; + expander_device = mpt3sas_scsih_expander_find_by_sas_address( + ioc, le64_to_cpu(expander_pg0.SASAddress), + mpt3sas_get_port_by_id(ioc, port_id, 0)); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + if (expander_device) + _scsih_refresh_expander_links(ioc, expander_device, + handle); + else { + ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n", + handle, + (u64)le64_to_cpu(expander_pg0.SASAddress)); + _scsih_expander_add(ioc, handle); + ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n", + handle, + (u64)le64_to_cpu(expander_pg0.SASAddress)); + } + } + + ioc_info(ioc, "\tscan devices: expanders complete\n"); + + if (!ioc->ir_firmware) + goto skip_to_sas; + + ioc_info(ioc, "\tscan devices: phys disk start\n"); + + /* phys disk */ + phys_disk_num = 0xFF; + while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply, + &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM, + phys_disk_num))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + phys_disk_num = pd_pg0.PhysDiskNum; + handle = le16_to_cpu(pd_pg0.DevHandle); + sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); + if (sas_device) { + sas_device_put(sas_device); + continue; + } + if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, + &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, + handle) != 0) + continue; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); + if (!_scsih_get_sas_address(ioc, parent_handle, + &sas_address)) { + ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n", + handle, + (u64)le64_to_cpu(sas_device_pg0.SASAddress)); + port_id = sas_device_pg0.PhysicalPort; + mpt3sas_transport_update_links(ioc, sas_address, + handle, sas_device_pg0.PhyNum, + MPI2_SAS_NEG_LINK_RATE_1_5, + mpt3sas_get_port_by_id(ioc, port_id, 0)); + set_bit(handle, ioc->pd_handles); + retry_count = 0; + /* This will retry adding the end device. + * _scsih_add_device() will decide on retries and + * return "1" when it should be retried + */ + while (_scsih_add_device(ioc, handle, retry_count++, + 1)) { + ssleep(1); + } + ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n", + handle, + (u64)le64_to_cpu(sas_device_pg0.SASAddress)); + } + } + + ioc_info(ioc, "\tscan devices: phys disk complete\n"); + + ioc_info(ioc, "\tscan devices: volumes start\n"); + + /* volumes */ + handle = 0xFFFF; + while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply, + volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + handle = le16_to_cpu(volume_pg1->DevHandle); + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = _scsih_raid_device_find_by_wwid(ioc, + le64_to_cpu(volume_pg1->WWID)); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + if (raid_device) + continue; + if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, + volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, + sizeof(Mpi2RaidVolPage0_t))) + continue; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + if (volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL || + volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_ONLINE || + volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) { + memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t)); + element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED; + element.VolDevHandle = volume_pg1->DevHandle; + ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n", + volume_pg1->DevHandle); + _scsih_sas_volume_add(ioc, &element); + ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n", + volume_pg1->DevHandle); + } + } + + ioc_info(ioc, "\tscan devices: volumes complete\n"); + + skip_to_sas: + + ioc_info(ioc, "\tscan devices: end devices start\n"); + + /* sas devices */ + handle = 0xFFFF; + while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, + &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, + handle))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + handle = le16_to_cpu(sas_device_pg0.DevHandle); + if (!(_scsih_is_end_device( + le32_to_cpu(sas_device_pg0.DeviceInfo)))) + continue; + port_id = sas_device_pg0.PhysicalPort; + sas_device = mpt3sas_get_sdev_by_addr(ioc, + le64_to_cpu(sas_device_pg0.SASAddress), + mpt3sas_get_port_by_id(ioc, port_id, 0)); + if (sas_device) { + sas_device_put(sas_device); + continue; + } + parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); + if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) { + ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n", + handle, + (u64)le64_to_cpu(sas_device_pg0.SASAddress)); + mpt3sas_transport_update_links(ioc, sas_address, handle, + sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5, + mpt3sas_get_port_by_id(ioc, port_id, 0)); + retry_count = 0; + /* This will retry adding the end device. + * _scsih_add_device() will decide on retries and + * return "1" when it should be retried + */ + while (_scsih_add_device(ioc, handle, retry_count++, + 0)) { + ssleep(1); + } + ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n", + handle, + (u64)le64_to_cpu(sas_device_pg0.SASAddress)); + } + } + ioc_info(ioc, "\tscan devices: end devices complete\n"); + ioc_info(ioc, "\tscan devices: pcie end devices start\n"); + + /* pcie devices */ + handle = 0xFFFF; + while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply, + &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, + handle))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) + & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + handle = le16_to_cpu(pcie_device_pg0.DevHandle); + if (!(_scsih_is_nvme_pciescsi_device( + le32_to_cpu(pcie_device_pg0.DeviceInfo)))) + continue; + pcie_device = mpt3sas_get_pdev_by_wwid(ioc, + le64_to_cpu(pcie_device_pg0.WWID)); + if (pcie_device) { + pcie_device_put(pcie_device); + continue; + } + retry_count = 0; + parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle); + _scsih_pcie_add_device(ioc, handle); + + ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n", + handle, (u64)le64_to_cpu(pcie_device_pg0.WWID)); + } + + kfree(volume_pg0); + kfree(volume_pg1); + + ioc_info(ioc, "\tpcie devices: pcie end devices complete\n"); + ioc_info(ioc, "scan devices: complete\n"); +} + +/** + * mpt3sas_scsih_pre_reset_handler - reset callback handler (for scsih) + * @ioc: per adapter object + * + * The handler for doing any required cleanup or initialization. + */ +void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc) +{ + dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__)); +} + +/** + * mpt3sas_scsih_clear_outstanding_scsi_tm_commands - clears outstanding + * scsi & tm cmds. + * @ioc: per adapter object + * + * The handler for doing any required cleanup or initialization. + */ +void +mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc) +{ + dtmprintk(ioc, + ioc_info(ioc, "%s: clear outstanding scsi & tm cmds\n", __func__)); + if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) { + ioc->scsih_cmds.status |= MPT3_CMD_RESET; + mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid); + complete(&ioc->scsih_cmds.done); + } + if (ioc->tm_cmds.status & MPT3_CMD_PENDING) { + ioc->tm_cmds.status |= MPT3_CMD_RESET; + mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid); + complete(&ioc->tm_cmds.done); + } + + memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz); + memset(ioc->device_remove_in_progress, 0, + ioc->device_remove_in_progress_sz); + _scsih_fw_event_cleanup_queue(ioc); + _scsih_flush_running_cmds(ioc); +} + +/** + * mpt3sas_scsih_reset_done_handler - reset callback handler (for scsih) + * @ioc: per adapter object + * + * The handler for doing any required cleanup or initialization. + */ +void +mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc) +{ + dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__)); + if (!(disable_discovery > 0 && !ioc->sas_hba.num_phys)) { + if (ioc->multipath_on_hba) { + _scsih_sas_port_refresh(ioc); + _scsih_update_vphys_after_reset(ioc); + } + _scsih_prep_device_scan(ioc); + _scsih_create_enclosure_list_after_reset(ioc); + _scsih_search_responding_sas_devices(ioc); + _scsih_search_responding_pcie_devices(ioc); + _scsih_search_responding_raid_devices(ioc); + _scsih_search_responding_expanders(ioc); + _scsih_error_recovery_delete_devices(ioc); + } +} + +/** + * _mpt3sas_fw_work - delayed task for processing firmware events + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + */ +static void +_mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) +{ + ioc->current_event = fw_event; + _scsih_fw_event_del_from_list(ioc, fw_event); + + /* the queue is being flushed so ignore this event */ + if (ioc->remove_host || ioc->pci_error_recovery) { + fw_event_work_put(fw_event); + ioc->current_event = NULL; + return; + } + + switch (fw_event->event) { + case MPT3SAS_PROCESS_TRIGGER_DIAG: + mpt3sas_process_trigger_data(ioc, + (struct SL_WH_TRIGGERS_EVENT_DATA_T *) + fw_event->event_data); + break; + case MPT3SAS_REMOVE_UNRESPONDING_DEVICES: + while (scsi_host_in_recovery(ioc->shost) || + ioc->shost_recovery) { + /* + * If we're unloading or cancelling the work, bail. + * Otherwise, this can become an infinite loop. + */ + if (ioc->remove_host || ioc->fw_events_cleanup) + goto out; + ssleep(1); + } + _scsih_remove_unresponding_devices(ioc); + _scsih_del_dirty_vphy(ioc); + _scsih_del_dirty_port_entries(ioc); + if (ioc->is_gen35_ioc) + _scsih_update_device_qdepth(ioc); + _scsih_scan_for_devices_after_reset(ioc); + /* + * If diag reset has occurred during the driver load + * then driver has to complete the driver load operation + * by executing the following items: + *- Register the devices from sas_device_init_list to SML + *- clear is_driver_loading flag, + *- start the watchdog thread. + * In happy driver load path, above things are taken care of when + * driver executes scsih_scan_finished(). + */ + if (ioc->is_driver_loading) + _scsih_complete_devices_scanning(ioc); + _scsih_set_nvme_max_shutdown_latency(ioc); + break; + case MPT3SAS_PORT_ENABLE_COMPLETE: + ioc->start_scan = 0; + if (missing_delay[0] != -1 && missing_delay[1] != -1) + mpt3sas_base_update_missing_delay(ioc, missing_delay[0], + missing_delay[1]); + dewtprintk(ioc, + ioc_info(ioc, "port enable: complete from worker thread\n")); + break; + case MPT3SAS_TURN_ON_PFA_LED: + _scsih_turn_on_pfa_led(ioc, fw_event->device_handle); + break; + case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: + _scsih_sas_topology_change_event(ioc, fw_event); + break; + case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: + if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) + _scsih_sas_device_status_change_event_debug(ioc, + (Mpi2EventDataSasDeviceStatusChange_t *) + fw_event->event_data); + break; + case MPI2_EVENT_SAS_DISCOVERY: + _scsih_sas_discovery_event(ioc, fw_event); + break; + case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR: + _scsih_sas_device_discovery_error_event(ioc, fw_event); + break; + case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: + _scsih_sas_broadcast_primitive_event(ioc, fw_event); + break; + case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: + _scsih_sas_enclosure_dev_status_change_event(ioc, + fw_event); + break; + case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: + _scsih_sas_ir_config_change_event(ioc, fw_event); + break; + case MPI2_EVENT_IR_VOLUME: + _scsih_sas_ir_volume_event(ioc, fw_event); + break; + case MPI2_EVENT_IR_PHYSICAL_DISK: + _scsih_sas_ir_physical_disk_event(ioc, fw_event); + break; + case MPI2_EVENT_IR_OPERATION_STATUS: + _scsih_sas_ir_operation_status_event(ioc, fw_event); + break; + case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE: + _scsih_pcie_device_status_change_event(ioc, fw_event); + break; + case MPI2_EVENT_PCIE_ENUMERATION: + _scsih_pcie_enumeration_event(ioc, fw_event); + break; + case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: + _scsih_pcie_topology_change_event(ioc, fw_event); + ioc->current_event = NULL; + return; + } +out: + fw_event_work_put(fw_event); + ioc->current_event = NULL; +} + +/** + * _firmware_event_work + * @work: The fw_event_work object + * Context: user. + * + * wrappers for the work thread handling firmware events + */ + +static void +_firmware_event_work(struct work_struct *work) +{ + struct fw_event_work *fw_event = container_of(work, + struct fw_event_work, work); + + _mpt3sas_fw_work(fw_event->ioc, fw_event); +} + +/** + * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time) + * @ioc: per adapter object + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * Context: interrupt. + * + * This function merely adds a new work task into ioc->firmware_event_thread. + * The tasks are worked from _firmware_event_work in user context. + * + * Return: 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +u8 +mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, + u32 reply) +{ + struct fw_event_work *fw_event; + Mpi2EventNotificationReply_t *mpi_reply; + u16 event; + u16 sz; + Mpi26EventDataActiveCableExcept_t *ActiveCableEventData; + + /* events turned off due to host reset */ + if (ioc->pci_error_recovery) + return 1; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + + if (unlikely(!mpi_reply)) { + ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return 1; + } + + event = le16_to_cpu(mpi_reply->Event); + + if (event != MPI2_EVENT_LOG_ENTRY_ADDED) + mpt3sas_trigger_event(ioc, event, 0); + + switch (event) { + /* handle these */ + case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: + { + Mpi2EventDataSasBroadcastPrimitive_t *baen_data = + (Mpi2EventDataSasBroadcastPrimitive_t *) + mpi_reply->EventData; + + if (baen_data->Primitive != + MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT) + return 1; + + if (ioc->broadcast_aen_busy) { + ioc->broadcast_aen_pending++; + return 1; + } else + ioc->broadcast_aen_busy = 1; + break; + } + + case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: + _scsih_check_topo_delete_events(ioc, + (Mpi2EventDataSasTopologyChangeList_t *) + mpi_reply->EventData); + /* + * No need to add the topology change list + * event to fw event work queue when + * diag reset is going on. Since during diag + * reset driver scan the devices by reading + * sas device page0's not by processing the + * events. + */ + if (ioc->shost_recovery) + return 1; + break; + case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: + _scsih_check_pcie_topo_remove_events(ioc, + (Mpi26EventDataPCIeTopologyChangeList_t *) + mpi_reply->EventData); + if (ioc->shost_recovery) + return 1; + break; + case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: + _scsih_check_ir_config_unhide_events(ioc, + (Mpi2EventDataIrConfigChangeList_t *) + mpi_reply->EventData); + break; + case MPI2_EVENT_IR_VOLUME: + _scsih_check_volume_delete_events(ioc, + (Mpi2EventDataIrVolume_t *) + mpi_reply->EventData); + break; + case MPI2_EVENT_LOG_ENTRY_ADDED: + { + Mpi2EventDataLogEntryAdded_t *log_entry; + u32 log_code; + + if (!ioc->is_warpdrive) + break; + + log_entry = (Mpi2EventDataLogEntryAdded_t *) + mpi_reply->EventData; + log_code = le32_to_cpu(*(__le32 *)log_entry->LogData); + + if (le16_to_cpu(log_entry->LogEntryQualifier) + != MPT2_WARPDRIVE_LOGENTRY) + break; + + switch (log_code) { + case MPT2_WARPDRIVE_LC_SSDT: + ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n"); + break; + case MPT2_WARPDRIVE_LC_SSDLW: + ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n"); + break; + case MPT2_WARPDRIVE_LC_SSDLF: + ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n"); + break; + case MPT2_WARPDRIVE_LC_BRMF: + ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n"); + break; + } + + break; + } + case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: + _scsih_sas_device_status_change_event(ioc, + (Mpi2EventDataSasDeviceStatusChange_t *) + mpi_reply->EventData); + break; + case MPI2_EVENT_IR_OPERATION_STATUS: + case MPI2_EVENT_SAS_DISCOVERY: + case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR: + case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: + case MPI2_EVENT_IR_PHYSICAL_DISK: + case MPI2_EVENT_PCIE_ENUMERATION: + case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE: + break; + + case MPI2_EVENT_TEMP_THRESHOLD: + _scsih_temp_threshold_events(ioc, + (Mpi2EventDataTemperature_t *) + mpi_reply->EventData); + break; + case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION: + ActiveCableEventData = + (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData; + switch (ActiveCableEventData->ReasonCode) { + case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER: + ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n", + ActiveCableEventData->ReceptacleID); + pr_notice("cannot be powered and devices connected\n"); + pr_notice("to this active cable will not be seen\n"); + pr_notice("This active cable requires %d mW of power\n", + le32_to_cpu( + ActiveCableEventData->ActiveCablePowerRequirement)); + break; + + case MPI26_EVENT_ACTIVE_CABLE_DEGRADED: + ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n", + ActiveCableEventData->ReceptacleID); + pr_notice( + "is not running at optimal speed(12 Gb/s rate)\n"); + break; + } + + break; + + default: /* ignore the rest */ + return 1; + } + + sz = le16_to_cpu(mpi_reply->EventDataLength) * 4; + fw_event = alloc_fw_event_work(sz); + if (!fw_event) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return 1; + } + + memcpy(fw_event->event_data, mpi_reply->EventData, sz); + fw_event->ioc = ioc; + fw_event->VF_ID = mpi_reply->VF_ID; + fw_event->VP_ID = mpi_reply->VP_ID; + fw_event->event = event; + _scsih_fw_event_add(ioc, fw_event); + fw_event_work_put(fw_event); + return 1; +} + +/** + * _scsih_expander_node_remove - removing expander device from list. + * @ioc: per adapter object + * @sas_expander: the sas_device object + * + * Removing object and freeing associated memory from the + * ioc->sas_expander_list. + */ +static void +_scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, + struct _sas_node *sas_expander) +{ + struct _sas_port *mpt3sas_port, *next; + unsigned long flags; + int port_id; + + /* remove sibling ports attached to this expander */ + list_for_each_entry_safe(mpt3sas_port, next, + &sas_expander->sas_port_list, port_list) { + if (ioc->shost_recovery) + return; + if (mpt3sas_port->remote_identify.device_type == + SAS_END_DEVICE) + mpt3sas_device_remove_by_sas_address(ioc, + mpt3sas_port->remote_identify.sas_address, + mpt3sas_port->hba_port); + else if (mpt3sas_port->remote_identify.device_type == + SAS_EDGE_EXPANDER_DEVICE || + mpt3sas_port->remote_identify.device_type == + SAS_FANOUT_EXPANDER_DEVICE) + mpt3sas_expander_remove(ioc, + mpt3sas_port->remote_identify.sas_address, + mpt3sas_port->hba_port); + } + + port_id = sas_expander->port->port_id; + + mpt3sas_transport_port_remove(ioc, sas_expander->sas_address, + sas_expander->sas_address_parent, sas_expander->port); + + ioc_info(ioc, + "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n", + sas_expander->handle, (unsigned long long) + sas_expander->sas_address, + port_id); + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + list_del(&sas_expander->list); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + + kfree(sas_expander->phy); + kfree(sas_expander); +} + +/** + * _scsih_nvme_shutdown - NVMe shutdown notification + * @ioc: per adapter object + * + * Sending IoUnitControl request with shutdown operation code to alert IOC that + * the host system is shutting down so that IOC can issue NVMe shutdown to + * NVMe drives attached to it. + */ +static void +_scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi26IoUnitControlRequest_t *mpi_request; + Mpi26IoUnitControlReply_t *mpi_reply; + u16 smid; + + /* are there any NVMe devices ? */ + if (list_empty(&ioc->pcie_device_list)) + return; + + mutex_lock(&ioc->scsih_cmds.mutex); + + if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { + ioc_err(ioc, "%s: scsih_cmd in use\n", __func__); + goto out; + } + + ioc->scsih_cmds.status = MPT3_CMD_PENDING; + + smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); + if (!smid) { + ioc_err(ioc, + "%s: failed obtaining a smid\n", __func__); + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; + goto out; + } + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->scsih_cmds.smid = smid; + memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t)); + mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL; + mpi_request->Operation = MPI26_CTRL_OP_SHUTDOWN; + + init_completion(&ioc->scsih_cmds.done); + ioc->put_smid_default(ioc, smid); + /* Wait for max_shutdown_latency seconds */ + ioc_info(ioc, + "Io Unit Control shutdown (sending), Shutdown latency %d sec\n", + ioc->max_shutdown_latency); + wait_for_completion_timeout(&ioc->scsih_cmds.done, + ioc->max_shutdown_latency*HZ); + + if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { + ioc_err(ioc, "%s: timeout\n", __func__); + goto out; + } + + if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { + mpi_reply = ioc->scsih_cmds.reply; + ioc_info(ioc, "Io Unit Control shutdown (complete):" + "ioc_status(0x%04x), loginfo(0x%08x)\n", + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo)); + } + out: + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; + mutex_unlock(&ioc->scsih_cmds.mutex); +} + + +/** + * _scsih_ir_shutdown - IR shutdown notification + * @ioc: per adapter object + * + * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that + * the host system is shutting down. + */ +static void +_scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2RaidActionRequest_t *mpi_request; + Mpi2RaidActionReply_t *mpi_reply; + u16 smid; + + /* is IR firmware build loaded ? */ + if (!ioc->ir_firmware) + return; + + /* are there any volumes ? */ + if (list_empty(&ioc->raid_device_list)) + return; + + mutex_lock(&ioc->scsih_cmds.mutex); + + if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { + ioc_err(ioc, "%s: scsih_cmd in use\n", __func__); + goto out; + } + ioc->scsih_cmds.status = MPT3_CMD_PENDING; + + smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); + if (!smid) { + ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; + goto out; + } + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->scsih_cmds.smid = smid; + memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t)); + + mpi_request->Function = MPI2_FUNCTION_RAID_ACTION; + mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED; + + if (!ioc->hide_ir_msg) + ioc_info(ioc, "IR shutdown (sending)\n"); + init_completion(&ioc->scsih_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); + + if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { + ioc_err(ioc, "%s: timeout\n", __func__); + goto out; + } + + if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { + mpi_reply = ioc->scsih_cmds.reply; + if (!ioc->hide_ir_msg) + ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n", + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo)); + } + + out: + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; + mutex_unlock(&ioc->scsih_cmds.mutex); +} + +/** + * _scsih_get_shost_and_ioc - get shost and ioc + * and verify whether they are NULL or not + * @pdev: PCI device struct + * @shost: address of scsi host pointer + * @ioc: address of HBA adapter pointer + * + * Return zero if *shost and *ioc are not NULL otherwise return error number. + */ +static int +_scsih_get_shost_and_ioc(struct pci_dev *pdev, + struct Scsi_Host **shost, struct MPT3SAS_ADAPTER **ioc) +{ + *shost = pci_get_drvdata(pdev); + if (*shost == NULL) { + dev_err(&pdev->dev, "pdev's driver data is null\n"); + return -ENXIO; + } + + *ioc = shost_priv(*shost); + if (*ioc == NULL) { + dev_err(&pdev->dev, "shost's private data is null\n"); + return -ENXIO; + } + + return 0; +} + +/** + * scsih_remove - detach and remove add host + * @pdev: PCI device struct + * + * Routine called when unloading the driver. + */ +static void scsih_remove(struct pci_dev *pdev) +{ + struct Scsi_Host *shost; + struct MPT3SAS_ADAPTER *ioc; + struct _sas_port *mpt3sas_port, *next_port; + struct _raid_device *raid_device, *next; + struct MPT3SAS_TARGET *sas_target_priv_data; + struct _pcie_device *pcie_device, *pcienext; + struct workqueue_struct *wq; + unsigned long flags; + Mpi2ConfigReply_t mpi_reply; + struct hba_port *port, *port_next; + + if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) + return; + + ioc->remove_host = 1; + + if (!pci_device_is_present(pdev)) { + mpt3sas_base_pause_mq_polling(ioc); + _scsih_flush_running_cmds(ioc); + } + + _scsih_fw_event_cleanup_queue(ioc); + + spin_lock_irqsave(&ioc->fw_event_lock, flags); + wq = ioc->firmware_event_thread; + ioc->firmware_event_thread = NULL; + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); + if (wq) + destroy_workqueue(wq); + /* + * Copy back the unmodified ioc page1. so that on next driver load, + * current modified changes on ioc page1 won't take effect. + */ + if (ioc->is_aero_ioc) + mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, + &ioc->ioc_pg1_copy); + /* release all the volumes */ + _scsih_ir_shutdown(ioc); + mpt3sas_destroy_debugfs(ioc); + sas_remove_host(shost); + list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list, + list) { + if (raid_device->starget) { + sas_target_priv_data = + raid_device->starget->hostdata; + sas_target_priv_data->deleted = 1; + scsi_remove_target(&raid_device->starget->dev); + } + ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", + raid_device->handle, (u64)raid_device->wwid); + _scsih_raid_device_remove(ioc, raid_device); + } + list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list, + list) { + _scsih_pcie_device_remove_from_sml(ioc, pcie_device); + list_del_init(&pcie_device->list); + pcie_device_put(pcie_device); + } + + /* free ports attached to the sas_host */ + list_for_each_entry_safe(mpt3sas_port, next_port, + &ioc->sas_hba.sas_port_list, port_list) { + if (mpt3sas_port->remote_identify.device_type == + SAS_END_DEVICE) + mpt3sas_device_remove_by_sas_address(ioc, + mpt3sas_port->remote_identify.sas_address, + mpt3sas_port->hba_port); + else if (mpt3sas_port->remote_identify.device_type == + SAS_EDGE_EXPANDER_DEVICE || + mpt3sas_port->remote_identify.device_type == + SAS_FANOUT_EXPANDER_DEVICE) + mpt3sas_expander_remove(ioc, + mpt3sas_port->remote_identify.sas_address, + mpt3sas_port->hba_port); + } + + list_for_each_entry_safe(port, port_next, + &ioc->port_table_list, list) { + list_del(&port->list); + kfree(port); + } + + /* free phys attached to the sas_host */ + if (ioc->sas_hba.num_phys) { + kfree(ioc->sas_hba.phy); + ioc->sas_hba.phy = NULL; + ioc->sas_hba.num_phys = 0; + } + + mpt3sas_base_detach(ioc); + spin_lock(&gioc_lock); + list_del(&ioc->list); + spin_unlock(&gioc_lock); + scsi_host_put(shost); +} + +/** + * scsih_shutdown - routine call during system shutdown + * @pdev: PCI device struct + */ +static void +scsih_shutdown(struct pci_dev *pdev) +{ + struct Scsi_Host *shost; + struct MPT3SAS_ADAPTER *ioc; + struct workqueue_struct *wq; + unsigned long flags; + Mpi2ConfigReply_t mpi_reply; + + if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) + return; + + ioc->remove_host = 1; + + if (!pci_device_is_present(pdev)) { + mpt3sas_base_pause_mq_polling(ioc); + _scsih_flush_running_cmds(ioc); + } + + _scsih_fw_event_cleanup_queue(ioc); + + spin_lock_irqsave(&ioc->fw_event_lock, flags); + wq = ioc->firmware_event_thread; + ioc->firmware_event_thread = NULL; + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); + if (wq) + destroy_workqueue(wq); + /* + * Copy back the unmodified ioc page1 so that on next driver load, + * current modified changes on ioc page1 won't take effect. + */ + if (ioc->is_aero_ioc) + mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, + &ioc->ioc_pg1_copy); + + _scsih_ir_shutdown(ioc); + _scsih_nvme_shutdown(ioc); + mpt3sas_base_mask_interrupts(ioc); + mpt3sas_base_stop_watchdog(ioc); + ioc->shost_recovery = 1; + mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET); + ioc->shost_recovery = 0; + mpt3sas_base_free_irq(ioc); + mpt3sas_base_disable_msix(ioc); +} + + +/** + * _scsih_probe_boot_devices - reports 1st device + * @ioc: per adapter object + * + * If specified in bios page 2, this routine reports the 1st + * device scsi-ml or sas transport for persistent boot device + * purposes. Please refer to function _scsih_determine_boot_device() + */ +static void +_scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc) +{ + u32 channel; + void *device; + struct _sas_device *sas_device; + struct _raid_device *raid_device; + struct _pcie_device *pcie_device; + u16 handle; + u64 sas_address_parent; + u64 sas_address; + unsigned long flags; + int rc; + int tid; + struct hba_port *port; + + /* no Bios, return immediately */ + if (!ioc->bios_pg3.BiosVersion) + return; + + device = NULL; + if (ioc->req_boot_device.device) { + device = ioc->req_boot_device.device; + channel = ioc->req_boot_device.channel; + } else if (ioc->req_alt_boot_device.device) { + device = ioc->req_alt_boot_device.device; + channel = ioc->req_alt_boot_device.channel; + } else if (ioc->current_boot_device.device) { + device = ioc->current_boot_device.device; + channel = ioc->current_boot_device.channel; + } + + if (!device) + return; + + if (channel == RAID_CHANNEL) { + raid_device = device; + /* + * If this boot vd is already registered with SML then + * no need to register it again as part of device scanning + * after diag reset during driver load operation. + */ + if (raid_device->starget) + return; + rc = scsi_add_device(ioc->shost, RAID_CHANNEL, + raid_device->id, 0); + if (rc) + _scsih_raid_device_remove(ioc, raid_device); + } else if (channel == PCIE_CHANNEL) { + pcie_device = device; + /* + * If this boot NVMe device is already registered with SML then + * no need to register it again as part of device scanning + * after diag reset during driver load operation. + */ + if (pcie_device->starget) + return; + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + tid = pcie_device->id; + list_move_tail(&pcie_device->list, &ioc->pcie_device_list); + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0); + if (rc) + _scsih_pcie_device_remove(ioc, pcie_device); + } else { + sas_device = device; + /* + * If this boot sas/sata device is already registered with SML + * then no need to register it again as part of device scanning + * after diag reset during driver load operation. + */ + if (sas_device->starget) + return; + spin_lock_irqsave(&ioc->sas_device_lock, flags); + handle = sas_device->handle; + sas_address_parent = sas_device->sas_address_parent; + sas_address = sas_device->sas_address; + port = sas_device->port; + list_move_tail(&sas_device->list, &ioc->sas_device_list); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + if (ioc->hide_drives) + return; + + if (!port) + return; + + if (!mpt3sas_transport_port_add(ioc, handle, + sas_address_parent, port)) { + _scsih_sas_device_remove(ioc, sas_device); + } else if (!sas_device->starget) { + if (!ioc->is_driver_loading) { + mpt3sas_transport_port_remove(ioc, + sas_address, + sas_address_parent, port); + _scsih_sas_device_remove(ioc, sas_device); + } + } + } +} + +/** + * _scsih_probe_raid - reporting raid volumes to scsi-ml + * @ioc: per adapter object + * + * Called during initial loading of the driver. + */ +static void +_scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc) +{ + struct _raid_device *raid_device, *raid_next; + int rc; + + list_for_each_entry_safe(raid_device, raid_next, + &ioc->raid_device_list, list) { + if (raid_device->starget) + continue; + rc = scsi_add_device(ioc->shost, RAID_CHANNEL, + raid_device->id, 0); + if (rc) + _scsih_raid_device_remove(ioc, raid_device); + } +} + +static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc) +{ + struct _sas_device *sas_device = NULL; + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + if (!list_empty(&ioc->sas_device_init_list)) { + sas_device = list_first_entry(&ioc->sas_device_init_list, + struct _sas_device, list); + sas_device_get(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + return sas_device; +} + +static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc, + struct _sas_device *sas_device) +{ + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + + /* + * Since we dropped the lock during the call to port_add(), we need to + * be careful here that somebody else didn't move or delete this item + * while we were busy with other things. + * + * If it was on the list, we need a put() for the reference the list + * had. Either way, we need a get() for the destination list. + */ + if (!list_empty(&sas_device->list)) { + list_del_init(&sas_device->list); + sas_device_put(sas_device); + } + + sas_device_get(sas_device); + list_add_tail(&sas_device->list, &ioc->sas_device_list); + + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +} + +/** + * _scsih_probe_sas - reporting sas devices to sas transport + * @ioc: per adapter object + * + * Called during initial loading of the driver. + */ +static void +_scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc) +{ + struct _sas_device *sas_device; + + if (ioc->hide_drives) + return; + + while ((sas_device = get_next_sas_device(ioc))) { + if (!mpt3sas_transport_port_add(ioc, sas_device->handle, + sas_device->sas_address_parent, sas_device->port)) { + _scsih_sas_device_remove(ioc, sas_device); + sas_device_put(sas_device); + continue; + } else if (!sas_device->starget) { + /* + * When asyn scanning is enabled, its not possible to + * remove devices while scanning is turned on due to an + * oops in scsi_sysfs_add_sdev()->add_device()-> + * sysfs_addrm_start() + */ + if (!ioc->is_driver_loading) { + mpt3sas_transport_port_remove(ioc, + sas_device->sas_address, + sas_device->sas_address_parent, + sas_device->port); + _scsih_sas_device_remove(ioc, sas_device); + sas_device_put(sas_device); + continue; + } + } + sas_device_make_active(ioc, sas_device); + sas_device_put(sas_device); + } +} + +/** + * get_next_pcie_device - Get the next pcie device + * @ioc: per adapter object + * + * Get the next pcie device from pcie_device_init_list list. + * + * Return: pcie device structure if pcie_device_init_list list is not empty + * otherwise returns NULL + */ +static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc) +{ + struct _pcie_device *pcie_device = NULL; + unsigned long flags; + + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + if (!list_empty(&ioc->pcie_device_init_list)) { + pcie_device = list_first_entry(&ioc->pcie_device_init_list, + struct _pcie_device, list); + pcie_device_get(pcie_device); + } + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + + return pcie_device; +} + +/** + * pcie_device_make_active - Add pcie device to pcie_device_list list + * @ioc: per adapter object + * @pcie_device: pcie device object + * + * Add the pcie device which has registered with SCSI Transport Later to + * pcie_device_list list + */ +static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc, + struct _pcie_device *pcie_device) +{ + unsigned long flags; + + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + + if (!list_empty(&pcie_device->list)) { + list_del_init(&pcie_device->list); + pcie_device_put(pcie_device); + } + pcie_device_get(pcie_device); + list_add_tail(&pcie_device->list, &ioc->pcie_device_list); + + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); +} + +/** + * _scsih_probe_pcie - reporting PCIe devices to scsi-ml + * @ioc: per adapter object + * + * Called during initial loading of the driver. + */ +static void +_scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc) +{ + struct _pcie_device *pcie_device; + int rc; + + /* PCIe Device List */ + while ((pcie_device = get_next_pcie_device(ioc))) { + if (pcie_device->starget) { + pcie_device_put(pcie_device); + continue; + } + if (pcie_device->access_status == + MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) { + pcie_device_make_active(ioc, pcie_device); + pcie_device_put(pcie_device); + continue; + } + rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, + pcie_device->id, 0); + if (rc) { + _scsih_pcie_device_remove(ioc, pcie_device); + pcie_device_put(pcie_device); + continue; + } else if (!pcie_device->starget) { + /* + * When async scanning is enabled, its not possible to + * remove devices while scanning is turned on due to an + * oops in scsi_sysfs_add_sdev()->add_device()-> + * sysfs_addrm_start() + */ + if (!ioc->is_driver_loading) { + /* TODO-- Need to find out whether this condition will + * occur or not + */ + _scsih_pcie_device_remove(ioc, pcie_device); + pcie_device_put(pcie_device); + continue; + } + } + pcie_device_make_active(ioc, pcie_device); + pcie_device_put(pcie_device); + } +} + +/** + * _scsih_probe_devices - probing for devices + * @ioc: per adapter object + * + * Called during initial loading of the driver. + */ +static void +_scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc) +{ + u16 volume_mapping_flags; + + if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR)) + return; /* return when IOC doesn't support initiator mode */ + + _scsih_probe_boot_devices(ioc); + + if (ioc->ir_firmware) { + volume_mapping_flags = + le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) & + MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE; + if (volume_mapping_flags == + MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) { + _scsih_probe_raid(ioc); + _scsih_probe_sas(ioc); + } else { + _scsih_probe_sas(ioc); + _scsih_probe_raid(ioc); + } + } else { + _scsih_probe_sas(ioc); + _scsih_probe_pcie(ioc); + } +} + +/** + * scsih_scan_start - scsi lld callback for .scan_start + * @shost: SCSI host pointer + * + * The shost has the ability to discover targets on its own instead + * of scanning the entire bus. In our implemention, we will kick off + * firmware discovery. + */ +static void +scsih_scan_start(struct Scsi_Host *shost) +{ + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + int rc; + if (diag_buffer_enable != -1 && diag_buffer_enable != 0) + mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable); + else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0) + mpt3sas_enable_diag_buffer(ioc, 1); + + if (disable_discovery > 0) + return; + + ioc->start_scan = 1; + rc = mpt3sas_port_enable(ioc); + + if (rc != 0) + ioc_info(ioc, "port enable: FAILED\n"); +} + +/** + * _scsih_complete_devices_scanning - add the devices to sml and + * complete ioc initialization. + * @ioc: per adapter object + * + * Return nothing. + */ +static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc) +{ + + if (ioc->wait_for_discovery_to_complete) { + ioc->wait_for_discovery_to_complete = 0; + _scsih_probe_devices(ioc); + } + + mpt3sas_base_start_watchdog(ioc); + ioc->is_driver_loading = 0; +} + +/** + * scsih_scan_finished - scsi lld callback for .scan_finished + * @shost: SCSI host pointer + * @time: elapsed time of the scan in jiffies + * + * This function will be called periodicallyn until it returns 1 with the + * scsi_host and the elapsed time of the scan in jiffies. In our implemention, + * we wait for firmware discovery to complete, then return 1. + */ +static int +scsih_scan_finished(struct Scsi_Host *shost, unsigned long time) +{ + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + u32 ioc_state; + int issue_hard_reset = 0; + + if (disable_discovery > 0) { + ioc->is_driver_loading = 0; + ioc->wait_for_discovery_to_complete = 0; + return 1; + } + + if (time >= (300 * HZ)) { + ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; + ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n"); + ioc->is_driver_loading = 0; + return 1; + } + + if (ioc->start_scan) { + ioc_state = mpt3sas_base_get_iocstate(ioc, 0); + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { + mpt3sas_print_fault_code(ioc, ioc_state & + MPI2_DOORBELL_DATA_MASK); + issue_hard_reset = 1; + goto out; + } else if ((ioc_state & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_COREDUMP) { + mpt3sas_base_coredump_info(ioc, ioc_state & + MPI2_DOORBELL_DATA_MASK); + mpt3sas_base_wait_for_coredump_completion(ioc, __func__); + issue_hard_reset = 1; + goto out; + } + return 0; + } + + if (ioc->port_enable_cmds.status & MPT3_CMD_RESET) { + ioc_info(ioc, + "port enable: aborted due to diag reset\n"); + ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; + goto out; + } + if (ioc->start_scan_failed) { + ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n", + ioc->start_scan_failed); + ioc->is_driver_loading = 0; + ioc->wait_for_discovery_to_complete = 0; + ioc->remove_host = 1; + return 1; + } + + ioc_info(ioc, "port enable: SUCCESS\n"); + ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; + _scsih_complete_devices_scanning(ioc); + +out: + if (issue_hard_reset) { + ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; + if (mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET)) + ioc->is_driver_loading = 0; + } + return 1; +} + +/** + * scsih_map_queues - map reply queues with request queues + * @shost: SCSI host pointer + */ +static void scsih_map_queues(struct Scsi_Host *shost) +{ + struct MPT3SAS_ADAPTER *ioc = + (struct MPT3SAS_ADAPTER *)shost->hostdata; + struct blk_mq_queue_map *map; + int i, qoff, offset; + int nr_msix_vectors = ioc->iopoll_q_start_index; + int iopoll_q_count = ioc->reply_queue_count - nr_msix_vectors; + + if (shost->nr_hw_queues == 1) + return; + + for (i = 0, qoff = 0; i < shost->nr_maps; i++) { + map = &shost->tag_set.map[i]; + map->nr_queues = 0; + offset = 0; + if (i == HCTX_TYPE_DEFAULT) { + map->nr_queues = + nr_msix_vectors - ioc->high_iops_queues; + offset = ioc->high_iops_queues; + } else if (i == HCTX_TYPE_POLL) + map->nr_queues = iopoll_q_count; + + if (!map->nr_queues) + BUG_ON(i == HCTX_TYPE_DEFAULT); + + /* + * The poll queue(s) doesn't have an IRQ (and hence IRQ + * affinity), so use the regular blk-mq cpu mapping + */ + map->queue_offset = qoff; + if (i != HCTX_TYPE_POLL) + blk_mq_pci_map_queues(map, ioc->pdev, offset); + else + blk_mq_map_queues(map); + + qoff += map->nr_queues; + } +} + +/* shost template for SAS 2.0 HBA devices */ +static struct scsi_host_template mpt2sas_driver_template = { + .module = THIS_MODULE, + .name = "Fusion MPT SAS Host", + .proc_name = MPT2SAS_DRIVER_NAME, + .queuecommand = scsih_qcmd, + .target_alloc = scsih_target_alloc, + .slave_alloc = scsih_slave_alloc, + .slave_configure = scsih_slave_configure, + .target_destroy = scsih_target_destroy, + .slave_destroy = scsih_slave_destroy, + .scan_finished = scsih_scan_finished, + .scan_start = scsih_scan_start, + .change_queue_depth = scsih_change_queue_depth, + .eh_abort_handler = scsih_abort, + .eh_device_reset_handler = scsih_dev_reset, + .eh_target_reset_handler = scsih_target_reset, + .eh_host_reset_handler = scsih_host_reset, + .bios_param = scsih_bios_param, + .can_queue = 1, + .this_id = -1, + .sg_tablesize = MPT2SAS_SG_DEPTH, + .max_sectors = 32767, + .cmd_per_lun = 7, + .shost_groups = mpt3sas_host_groups, + .sdev_groups = mpt3sas_dev_groups, + .track_queue_depth = 1, + .cmd_size = sizeof(struct scsiio_tracker), +}; + +/* raid transport support for SAS 2.0 HBA devices */ +static struct raid_function_template mpt2sas_raid_functions = { + .cookie = &mpt2sas_driver_template, + .is_raid = scsih_is_raid, + .get_resync = scsih_get_resync, + .get_state = scsih_get_state, +}; + +/* shost template for SAS 3.0 HBA devices */ +static struct scsi_host_template mpt3sas_driver_template = { + .module = THIS_MODULE, + .name = "Fusion MPT SAS Host", + .proc_name = MPT3SAS_DRIVER_NAME, + .queuecommand = scsih_qcmd, + .target_alloc = scsih_target_alloc, + .slave_alloc = scsih_slave_alloc, + .slave_configure = scsih_slave_configure, + .target_destroy = scsih_target_destroy, + .slave_destroy = scsih_slave_destroy, + .scan_finished = scsih_scan_finished, + .scan_start = scsih_scan_start, + .change_queue_depth = scsih_change_queue_depth, + .eh_abort_handler = scsih_abort, + .eh_device_reset_handler = scsih_dev_reset, + .eh_target_reset_handler = scsih_target_reset, + .eh_host_reset_handler = scsih_host_reset, + .bios_param = scsih_bios_param, + .can_queue = 1, + .this_id = -1, + .sg_tablesize = MPT3SAS_SG_DEPTH, + .max_sectors = 32767, + .max_segment_size = 0xffffffff, + .cmd_per_lun = 128, + .shost_groups = mpt3sas_host_groups, + .sdev_groups = mpt3sas_dev_groups, + .track_queue_depth = 1, + .cmd_size = sizeof(struct scsiio_tracker), + .map_queues = scsih_map_queues, + .mq_poll = mpt3sas_blk_mq_poll, +}; + +/* raid transport support for SAS 3.0 HBA devices */ +static struct raid_function_template mpt3sas_raid_functions = { + .cookie = &mpt3sas_driver_template, + .is_raid = scsih_is_raid, + .get_resync = scsih_get_resync, + .get_state = scsih_get_state, +}; + +/** + * _scsih_determine_hba_mpi_version - determine in which MPI version class + * this device belongs to. + * @pdev: PCI device struct + * + * return MPI2_VERSION for SAS 2.0 HBA devices, + * MPI25_VERSION for SAS 3.0 HBA devices, and + * MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices + */ +static u16 +_scsih_determine_hba_mpi_version(struct pci_dev *pdev) +{ + + switch (pdev->device) { + case MPI2_MFGPAGE_DEVID_SSS6200: + case MPI2_MFGPAGE_DEVID_SAS2004: + case MPI2_MFGPAGE_DEVID_SAS2008: + case MPI2_MFGPAGE_DEVID_SAS2108_1: + case MPI2_MFGPAGE_DEVID_SAS2108_2: + case MPI2_MFGPAGE_DEVID_SAS2108_3: + case MPI2_MFGPAGE_DEVID_SAS2116_1: + case MPI2_MFGPAGE_DEVID_SAS2116_2: + case MPI2_MFGPAGE_DEVID_SAS2208_1: + case MPI2_MFGPAGE_DEVID_SAS2208_2: + case MPI2_MFGPAGE_DEVID_SAS2208_3: + case MPI2_MFGPAGE_DEVID_SAS2208_4: + case MPI2_MFGPAGE_DEVID_SAS2208_5: + case MPI2_MFGPAGE_DEVID_SAS2208_6: + case MPI2_MFGPAGE_DEVID_SAS2308_1: + case MPI2_MFGPAGE_DEVID_SAS2308_2: + case MPI2_MFGPAGE_DEVID_SAS2308_3: + case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP: + case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1: + return MPI2_VERSION; + case MPI25_MFGPAGE_DEVID_SAS3004: + case MPI25_MFGPAGE_DEVID_SAS3008: + case MPI25_MFGPAGE_DEVID_SAS3108_1: + case MPI25_MFGPAGE_DEVID_SAS3108_2: + case MPI25_MFGPAGE_DEVID_SAS3108_5: + case MPI25_MFGPAGE_DEVID_SAS3108_6: + return MPI25_VERSION; + case MPI26_MFGPAGE_DEVID_SAS3216: + case MPI26_MFGPAGE_DEVID_SAS3224: + case MPI26_MFGPAGE_DEVID_SAS3316_1: + case MPI26_MFGPAGE_DEVID_SAS3316_2: + case MPI26_MFGPAGE_DEVID_SAS3316_3: + case MPI26_MFGPAGE_DEVID_SAS3316_4: + case MPI26_MFGPAGE_DEVID_SAS3324_1: + case MPI26_MFGPAGE_DEVID_SAS3324_2: + case MPI26_MFGPAGE_DEVID_SAS3324_3: + case MPI26_MFGPAGE_DEVID_SAS3324_4: + case MPI26_MFGPAGE_DEVID_SAS3508: + case MPI26_MFGPAGE_DEVID_SAS3508_1: + case MPI26_MFGPAGE_DEVID_SAS3408: + case MPI26_MFGPAGE_DEVID_SAS3516: + case MPI26_MFGPAGE_DEVID_SAS3516_1: + case MPI26_MFGPAGE_DEVID_SAS3416: + case MPI26_MFGPAGE_DEVID_SAS3616: + case MPI26_ATLAS_PCIe_SWITCH_DEVID: + case MPI26_MFGPAGE_DEVID_CFG_SEC_3916: + case MPI26_MFGPAGE_DEVID_HARD_SEC_3916: + case MPI26_MFGPAGE_DEVID_CFG_SEC_3816: + case MPI26_MFGPAGE_DEVID_HARD_SEC_3816: + case MPI26_MFGPAGE_DEVID_INVALID0_3916: + case MPI26_MFGPAGE_DEVID_INVALID1_3916: + case MPI26_MFGPAGE_DEVID_INVALID0_3816: + case MPI26_MFGPAGE_DEVID_INVALID1_3816: + return MPI26_VERSION; + } + return 0; +} + +/** + * _scsih_probe - attach and add scsi host + * @pdev: PCI device struct + * @id: pci device id + * + * Return: 0 success, anything else error. + */ +static int +_scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct MPT3SAS_ADAPTER *ioc; + struct Scsi_Host *shost = NULL; + int rv; + u16 hba_mpi_version; + int iopoll_q_count = 0; + + /* Determine in which MPI version class this pci device belongs */ + hba_mpi_version = _scsih_determine_hba_mpi_version(pdev); + if (hba_mpi_version == 0) + return -ENODEV; + + /* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one, + * for other generation HBA's return with -ENODEV + */ + if ((hbas_to_enumerate == 1) && (hba_mpi_version != MPI2_VERSION)) + return -ENODEV; + + /* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two, + * for other generation HBA's return with -ENODEV + */ + if ((hbas_to_enumerate == 2) && (!(hba_mpi_version == MPI25_VERSION + || hba_mpi_version == MPI26_VERSION))) + return -ENODEV; + + switch (hba_mpi_version) { + case MPI2_VERSION: + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | + PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); + /* Use mpt2sas driver host template for SAS 2.0 HBA's */ + shost = scsi_host_alloc(&mpt2sas_driver_template, + sizeof(struct MPT3SAS_ADAPTER)); + if (!shost) + return -ENODEV; + ioc = shost_priv(shost); + memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER)); + ioc->hba_mpi_version_belonged = hba_mpi_version; + ioc->id = mpt2_ids++; + sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME); + switch (pdev->device) { + case MPI2_MFGPAGE_DEVID_SSS6200: + ioc->is_warpdrive = 1; + ioc->hide_ir_msg = 1; + break; + case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP: + case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1: + ioc->is_mcpu_endpoint = 1; + break; + default: + ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS; + break; + } + + if (multipath_on_hba == -1 || multipath_on_hba == 0) + ioc->multipath_on_hba = 0; + else + ioc->multipath_on_hba = 1; + + break; + case MPI25_VERSION: + case MPI26_VERSION: + /* Use mpt3sas driver host template for SAS 3.0 HBA's */ + shost = scsi_host_alloc(&mpt3sas_driver_template, + sizeof(struct MPT3SAS_ADAPTER)); + if (!shost) + return -ENODEV; + ioc = shost_priv(shost); + memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER)); + ioc->hba_mpi_version_belonged = hba_mpi_version; + ioc->id = mpt3_ids++; + sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME); + switch (pdev->device) { + case MPI26_MFGPAGE_DEVID_SAS3508: + case MPI26_MFGPAGE_DEVID_SAS3508_1: + case MPI26_MFGPAGE_DEVID_SAS3408: + case MPI26_MFGPAGE_DEVID_SAS3516: + case MPI26_MFGPAGE_DEVID_SAS3516_1: + case MPI26_MFGPAGE_DEVID_SAS3416: + case MPI26_MFGPAGE_DEVID_SAS3616: + case MPI26_ATLAS_PCIe_SWITCH_DEVID: + ioc->is_gen35_ioc = 1; + break; + case MPI26_MFGPAGE_DEVID_INVALID0_3816: + case MPI26_MFGPAGE_DEVID_INVALID0_3916: + dev_err(&pdev->dev, + "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Invalid", + pdev->device, pdev->subsystem_vendor, + pdev->subsystem_device); + return 1; + case MPI26_MFGPAGE_DEVID_INVALID1_3816: + case MPI26_MFGPAGE_DEVID_INVALID1_3916: + dev_err(&pdev->dev, + "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Tampered", + pdev->device, pdev->subsystem_vendor, + pdev->subsystem_device); + return 1; + case MPI26_MFGPAGE_DEVID_CFG_SEC_3816: + case MPI26_MFGPAGE_DEVID_CFG_SEC_3916: + dev_info(&pdev->dev, + "HBA is in Configurable Secure mode\n"); + fallthrough; + case MPI26_MFGPAGE_DEVID_HARD_SEC_3816: + case MPI26_MFGPAGE_DEVID_HARD_SEC_3916: + ioc->is_aero_ioc = ioc->is_gen35_ioc = 1; + break; + default: + ioc->is_gen35_ioc = ioc->is_aero_ioc = 0; + } + if ((ioc->hba_mpi_version_belonged == MPI25_VERSION && + pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) || + (ioc->hba_mpi_version_belonged == MPI26_VERSION)) { + ioc->combined_reply_queue = 1; + if (ioc->is_gen35_ioc) + ioc->combined_reply_index_count = + MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35; + else + ioc->combined_reply_index_count = + MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3; + } + + switch (ioc->is_gen35_ioc) { + case 0: + if (multipath_on_hba == -1 || multipath_on_hba == 0) + ioc->multipath_on_hba = 0; + else + ioc->multipath_on_hba = 1; + break; + case 1: + if (multipath_on_hba == -1 || multipath_on_hba > 0) + ioc->multipath_on_hba = 1; + else + ioc->multipath_on_hba = 0; + break; + default: + break; + } + + break; + default: + return -ENODEV; + } + + INIT_LIST_HEAD(&ioc->list); + spin_lock(&gioc_lock); + list_add_tail(&ioc->list, &mpt3sas_ioc_list); + spin_unlock(&gioc_lock); + ioc->shost = shost; + ioc->pdev = pdev; + ioc->scsi_io_cb_idx = scsi_io_cb_idx; + ioc->tm_cb_idx = tm_cb_idx; + ioc->ctl_cb_idx = ctl_cb_idx; + ioc->base_cb_idx = base_cb_idx; + ioc->port_enable_cb_idx = port_enable_cb_idx; + ioc->transport_cb_idx = transport_cb_idx; + ioc->scsih_cb_idx = scsih_cb_idx; + ioc->config_cb_idx = config_cb_idx; + ioc->tm_tr_cb_idx = tm_tr_cb_idx; + ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx; + ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx; + ioc->logging_level = logging_level; + ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds; + /* Host waits for minimum of six seconds */ + ioc->max_shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT; + /* + * Enable MEMORY MOVE support flag. + */ + ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE; + /* Enable ADDITIONAL QUERY support flag. */ + ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_ADDNLQUERY; + + ioc->enable_sdev_max_qd = enable_sdev_max_qd; + + /* misc semaphores and spin locks */ + mutex_init(&ioc->reset_in_progress_mutex); + /* initializing pci_access_mutex lock */ + mutex_init(&ioc->pci_access_mutex); + spin_lock_init(&ioc->ioc_reset_in_progress_lock); + spin_lock_init(&ioc->scsi_lookup_lock); + spin_lock_init(&ioc->sas_device_lock); + spin_lock_init(&ioc->sas_node_lock); + spin_lock_init(&ioc->fw_event_lock); + spin_lock_init(&ioc->raid_device_lock); + spin_lock_init(&ioc->pcie_device_lock); + spin_lock_init(&ioc->diag_trigger_lock); + + INIT_LIST_HEAD(&ioc->sas_device_list); + INIT_LIST_HEAD(&ioc->sas_device_init_list); + INIT_LIST_HEAD(&ioc->sas_expander_list); + INIT_LIST_HEAD(&ioc->enclosure_list); + INIT_LIST_HEAD(&ioc->pcie_device_list); + INIT_LIST_HEAD(&ioc->pcie_device_init_list); + INIT_LIST_HEAD(&ioc->fw_event_list); + INIT_LIST_HEAD(&ioc->raid_device_list); + INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list); + INIT_LIST_HEAD(&ioc->delayed_tr_list); + INIT_LIST_HEAD(&ioc->delayed_sc_list); + INIT_LIST_HEAD(&ioc->delayed_event_ack_list); + INIT_LIST_HEAD(&ioc->delayed_tr_volume_list); + INIT_LIST_HEAD(&ioc->reply_queue_list); + INIT_LIST_HEAD(&ioc->port_table_list); + + sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id); + + /* init shost parameters */ + shost->max_cmd_len = 32; + shost->max_lun = max_lun; + shost->transportt = mpt3sas_transport_template; + shost->unique_id = ioc->id; + + if (ioc->is_mcpu_endpoint) { + /* mCPU MPI support 64K max IO */ + shost->max_sectors = 128; + ioc_info(ioc, "The max_sectors value is set to %d\n", + shost->max_sectors); + } else { + if (max_sectors != 0xFFFF) { + if (max_sectors < 64) { + shost->max_sectors = 64; + ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n", + max_sectors); + } else if (max_sectors > 32767) { + shost->max_sectors = 32767; + ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n", + max_sectors); + } else { + shost->max_sectors = max_sectors & 0xFFFE; + ioc_info(ioc, "The max_sectors value is set to %d\n", + shost->max_sectors); + } + } + } + /* register EEDP capabilities with SCSI layer */ + if (prot_mask >= 0) + scsi_host_set_prot(shost, (prot_mask & 0x07)); + else + scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION + | SHOST_DIF_TYPE2_PROTECTION + | SHOST_DIF_TYPE3_PROTECTION); + + scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); + + /* event thread */ + snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name), + "fw_event_%s%d", ioc->driver_name, ioc->id); + ioc->firmware_event_thread = alloc_ordered_workqueue( + ioc->firmware_event_name, 0); + if (!ioc->firmware_event_thread) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rv = -ENODEV; + goto out_thread_fail; + } + + shost->host_tagset = 0; + + if (ioc->is_gen35_ioc && host_tagset_enable) + shost->host_tagset = 1; + + ioc->is_driver_loading = 1; + if ((mpt3sas_base_attach(ioc))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rv = -ENODEV; + goto out_attach_fail; + } + + if (ioc->is_warpdrive) { + if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS) + ioc->hide_drives = 0; + else if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_HIDE_ALL_DISKS) + ioc->hide_drives = 1; + else { + if (mpt3sas_get_num_volumes(ioc)) + ioc->hide_drives = 1; + else + ioc->hide_drives = 0; + } + } else + ioc->hide_drives = 0; + + shost->nr_hw_queues = 1; + + if (shost->host_tagset) { + shost->nr_hw_queues = + ioc->reply_queue_count - ioc->high_iops_queues; + + iopoll_q_count = + ioc->reply_queue_count - ioc->iopoll_q_start_index; + + shost->nr_maps = iopoll_q_count ? 3 : 1; + + dev_info(&ioc->pdev->dev, + "Max SCSIIO MPT commands: %d shared with nr_hw_queues = %d\n", + shost->can_queue, shost->nr_hw_queues); + } + + rv = scsi_add_host(shost, &pdev->dev); + if (rv) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out_add_shost_fail; + } + + scsi_scan_host(shost); + mpt3sas_setup_debugfs(ioc); + return 0; +out_add_shost_fail: + mpt3sas_base_detach(ioc); + out_attach_fail: + destroy_workqueue(ioc->firmware_event_thread); + out_thread_fail: + spin_lock(&gioc_lock); + list_del(&ioc->list); + spin_unlock(&gioc_lock); + scsi_host_put(shost); + return rv; +} + +/** + * scsih_suspend - power management suspend main entry point + * @dev: Device struct + * + * Return: 0 success, anything else error. + */ +static int __maybe_unused +scsih_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct Scsi_Host *shost; + struct MPT3SAS_ADAPTER *ioc; + int rc; + + rc = _scsih_get_shost_and_ioc(pdev, &shost, &ioc); + if (rc) + return rc; + + mpt3sas_base_stop_watchdog(ioc); + scsi_block_requests(shost); + _scsih_nvme_shutdown(ioc); + ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state\n", + pdev, pci_name(pdev)); + + mpt3sas_base_free_resources(ioc); + return 0; +} + +/** + * scsih_resume - power management resume main entry point + * @dev: Device struct + * + * Return: 0 success, anything else error. + */ +static int __maybe_unused +scsih_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct Scsi_Host *shost; + struct MPT3SAS_ADAPTER *ioc; + pci_power_t device_state = pdev->current_state; + int r; + + r = _scsih_get_shost_and_ioc(pdev, &shost, &ioc); + if (r) + return r; + + ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n", + pdev, pci_name(pdev), device_state); + + ioc->pdev = pdev; + r = mpt3sas_base_map_resources(ioc); + if (r) + return r; + ioc_info(ioc, "Issuing Hard Reset as part of OS Resume\n"); + mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET); + scsi_unblock_requests(shost); + mpt3sas_base_start_watchdog(ioc); + return 0; +} + +/** + * scsih_pci_error_detected - Called when a PCI error is detected. + * @pdev: PCI device struct + * @state: PCI channel state + * + * Description: Called when a PCI error is detected. + * + * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT. + */ +static pci_ers_result_t +scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct Scsi_Host *shost; + struct MPT3SAS_ADAPTER *ioc; + + if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) + return PCI_ERS_RESULT_DISCONNECT; + + ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state); + + switch (state) { + case pci_channel_io_normal: + return PCI_ERS_RESULT_CAN_RECOVER; + case pci_channel_io_frozen: + /* Fatal error, prepare for slot reset */ + ioc->pci_error_recovery = 1; + scsi_block_requests(ioc->shost); + mpt3sas_base_stop_watchdog(ioc); + mpt3sas_base_free_resources(ioc); + return PCI_ERS_RESULT_NEED_RESET; + case pci_channel_io_perm_failure: + /* Permanent error, prepare for device removal */ + ioc->pci_error_recovery = 1; + mpt3sas_base_stop_watchdog(ioc); + mpt3sas_base_pause_mq_polling(ioc); + _scsih_flush_running_cmds(ioc); + return PCI_ERS_RESULT_DISCONNECT; + } + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * scsih_pci_slot_reset - Called when PCI slot has been reset. + * @pdev: PCI device struct + * + * Description: This routine is called by the pci error recovery + * code after the PCI slot has been reset, just before we + * should resume normal operations. + */ +static pci_ers_result_t +scsih_pci_slot_reset(struct pci_dev *pdev) +{ + struct Scsi_Host *shost; + struct MPT3SAS_ADAPTER *ioc; + int rc; + + if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) + return PCI_ERS_RESULT_DISCONNECT; + + ioc_info(ioc, "PCI error: slot reset callback!!\n"); + + ioc->pci_error_recovery = 0; + ioc->pdev = pdev; + pci_restore_state(pdev); + rc = mpt3sas_base_map_resources(ioc); + if (rc) + return PCI_ERS_RESULT_DISCONNECT; + + ioc_info(ioc, "Issuing Hard Reset as part of PCI Slot Reset\n"); + rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + + ioc_warn(ioc, "hard reset: %s\n", + (rc == 0) ? "success" : "failed"); + + if (!rc) + return PCI_ERS_RESULT_RECOVERED; + else + return PCI_ERS_RESULT_DISCONNECT; +} + +/** + * scsih_pci_resume() - resume normal ops after PCI reset + * @pdev: pointer to PCI device + * + * Called when the error recovery driver tells us that its + * OK to resume normal operation. Use completion to allow + * halted scsi ops to resume. + */ +static void +scsih_pci_resume(struct pci_dev *pdev) +{ + struct Scsi_Host *shost; + struct MPT3SAS_ADAPTER *ioc; + + if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) + return; + + ioc_info(ioc, "PCI error: resume callback!!\n"); + + mpt3sas_base_start_watchdog(ioc); + scsi_unblock_requests(ioc->shost); +} + +/** + * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers + * @pdev: pointer to PCI device + */ +static pci_ers_result_t +scsih_pci_mmio_enabled(struct pci_dev *pdev) +{ + struct Scsi_Host *shost; + struct MPT3SAS_ADAPTER *ioc; + + if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) + return PCI_ERS_RESULT_DISCONNECT; + + ioc_info(ioc, "PCI error: mmio enabled callback!!\n"); + + /* TODO - dump whatever for debugging purposes */ + + /* This called only if scsih_pci_error_detected returns + * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still + * works, no need to reset slot. + */ + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * scsih_ncq_prio_supp - Check for NCQ command priority support + * @sdev: scsi device struct + * + * This is called when a user indicates they would like to enable + * ncq command priorities. This works only on SATA devices. + */ +bool scsih_ncq_prio_supp(struct scsi_device *sdev) +{ + struct scsi_vpd *vpd; + bool ncq_prio_supp = false; + + rcu_read_lock(); + vpd = rcu_dereference(sdev->vpd_pg89); + if (!vpd || vpd->len < 214) + goto out; + + ncq_prio_supp = (vpd->data[213] >> 4) & 1; +out: + rcu_read_unlock(); + + return ncq_prio_supp; +} +/* + * The pci device ids are defined in mpi/mpi2_cnfg.h. + */ +static const struct pci_device_id mpt3sas_pci_table[] = { + /* Spitfire ~ 2004 */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004, + PCI_ANY_ID, PCI_ANY_ID }, + /* Falcon ~ 2008 */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008, + PCI_ANY_ID, PCI_ANY_ID }, + /* Liberator ~ 2108 */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3, + PCI_ANY_ID, PCI_ANY_ID }, + /* Meteor ~ 2116 */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2, + PCI_ANY_ID, PCI_ANY_ID }, + /* Thunderbolt ~ 2208 */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6, + PCI_ANY_ID, PCI_ANY_ID }, + /* Mustang ~ 2308 */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1, + PCI_ANY_ID, PCI_ANY_ID }, + /* SSS6200 */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200, + PCI_ANY_ID, PCI_ANY_ID }, + /* Fury ~ 3004 and 3008 */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008, + PCI_ANY_ID, PCI_ANY_ID }, + /* Invader ~ 3108 */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6, + PCI_ANY_ID, PCI_ANY_ID }, + /* Cutlass ~ 3216 and 3224 */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224, + PCI_ANY_ID, PCI_ANY_ID }, + /* Intruder ~ 3316 and 3324 */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4, + PCI_ANY_ID, PCI_ANY_ID }, + /* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/ + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416, + PCI_ANY_ID, PCI_ANY_ID }, + /* Mercator ~ 3616*/ + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616, + PCI_ANY_ID, PCI_ANY_ID }, + + /* Aero SI 0x00E1 Configurable Secure + * 0x00E2 Hard Secure + */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916, + PCI_ANY_ID, PCI_ANY_ID }, + + /* + * Aero SI –> 0x00E0 Invalid, 0x00E3 Tampered + */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3916, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3916, + PCI_ANY_ID, PCI_ANY_ID }, + + /* Atlas PCIe Switch Management Port */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID, + PCI_ANY_ID, PCI_ANY_ID }, + + /* Sea SI 0x00E5 Configurable Secure + * 0x00E6 Hard Secure + */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816, + PCI_ANY_ID, PCI_ANY_ID }, + + /* + * ATTO Branded ExpressSAS H12xx GT + */ + { MPI2_MFGPAGE_VENDORID_ATTO, MPI26_MFGPAGE_DEVID_HARD_SEC_3816, + PCI_ANY_ID, PCI_ANY_ID }, + + /* + * Sea SI –> 0x00E4 Invalid, 0x00E7 Tampered + */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3816, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3816, + PCI_ANY_ID, PCI_ANY_ID }, + + {0} /* Terminating entry */ +}; +MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table); + +static struct pci_error_handlers _mpt3sas_err_handler = { + .error_detected = scsih_pci_error_detected, + .mmio_enabled = scsih_pci_mmio_enabled, + .slot_reset = scsih_pci_slot_reset, + .resume = scsih_pci_resume, +}; + +static SIMPLE_DEV_PM_OPS(scsih_pm_ops, scsih_suspend, scsih_resume); + +static struct pci_driver mpt3sas_driver = { + .name = MPT3SAS_DRIVER_NAME, + .id_table = mpt3sas_pci_table, + .probe = _scsih_probe, + .remove = scsih_remove, + .shutdown = scsih_shutdown, + .err_handler = &_mpt3sas_err_handler, + .driver.pm = &scsih_pm_ops, +}; + +/** + * scsih_init - main entry point for this driver. + * + * Return: 0 success, anything else error. + */ +static int +scsih_init(void) +{ + mpt2_ids = 0; + mpt3_ids = 0; + + mpt3sas_base_initialize_callback_handler(); + + /* queuecommand callback hander */ + scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done); + + /* task management callback handler */ + tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done); + + /* base internal commands callback handler */ + base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done); + port_enable_cb_idx = mpt3sas_base_register_callback_handler( + mpt3sas_port_enable_done); + + /* transport internal commands callback handler */ + transport_cb_idx = mpt3sas_base_register_callback_handler( + mpt3sas_transport_done); + + /* scsih internal commands callback handler */ + scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done); + + /* configuration page API internal commands callback handler */ + config_cb_idx = mpt3sas_base_register_callback_handler( + mpt3sas_config_done); + + /* ctl module callback handler */ + ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done); + + tm_tr_cb_idx = mpt3sas_base_register_callback_handler( + _scsih_tm_tr_complete); + + tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler( + _scsih_tm_volume_tr_complete); + + tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler( + _scsih_sas_control_complete); + + mpt3sas_init_debugfs(); + return 0; +} + +/** + * scsih_exit - exit point for this driver (when it is a module). + * + * Return: 0 success, anything else error. + */ +static void +scsih_exit(void) +{ + + mpt3sas_base_release_callback_handler(scsi_io_cb_idx); + mpt3sas_base_release_callback_handler(tm_cb_idx); + mpt3sas_base_release_callback_handler(base_cb_idx); + mpt3sas_base_release_callback_handler(port_enable_cb_idx); + mpt3sas_base_release_callback_handler(transport_cb_idx); + mpt3sas_base_release_callback_handler(scsih_cb_idx); + mpt3sas_base_release_callback_handler(config_cb_idx); + mpt3sas_base_release_callback_handler(ctl_cb_idx); + + mpt3sas_base_release_callback_handler(tm_tr_cb_idx); + mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx); + mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx); + +/* raid transport support */ + if (hbas_to_enumerate != 1) + raid_class_release(mpt3sas_raid_template); + if (hbas_to_enumerate != 2) + raid_class_release(mpt2sas_raid_template); + sas_release_transport(mpt3sas_transport_template); + mpt3sas_exit_debugfs(); +} + +/** + * _mpt3sas_init - main entry point for this driver. + * + * Return: 0 success, anything else error. + */ +static int __init +_mpt3sas_init(void) +{ + int error; + + pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME, + MPT3SAS_DRIVER_VERSION); + + mpt3sas_transport_template = + sas_attach_transport(&mpt3sas_transport_functions); + if (!mpt3sas_transport_template) + return -ENODEV; + + /* No need attach mpt3sas raid functions template + * if hbas_to_enumarate value is one. + */ + if (hbas_to_enumerate != 1) { + mpt3sas_raid_template = + raid_class_attach(&mpt3sas_raid_functions); + if (!mpt3sas_raid_template) { + sas_release_transport(mpt3sas_transport_template); + return -ENODEV; + } + } + + /* No need to attach mpt2sas raid functions template + * if hbas_to_enumarate value is two + */ + if (hbas_to_enumerate != 2) { + mpt2sas_raid_template = + raid_class_attach(&mpt2sas_raid_functions); + if (!mpt2sas_raid_template) { + sas_release_transport(mpt3sas_transport_template); + return -ENODEV; + } + } + + error = scsih_init(); + if (error) { + scsih_exit(); + return error; + } + + mpt3sas_ctl_init(hbas_to_enumerate); + + error = pci_register_driver(&mpt3sas_driver); + if (error) { + mpt3sas_ctl_exit(hbas_to_enumerate); + scsih_exit(); + } + + return error; +} + +/** + * _mpt3sas_exit - exit point for this driver (when it is a module). + * + */ +static void __exit +_mpt3sas_exit(void) +{ + pr_info("mpt3sas version %s unloading\n", + MPT3SAS_DRIVER_VERSION); + + mpt3sas_ctl_exit(hbas_to_enumerate); + + pci_unregister_driver(&mpt3sas_driver); + + scsih_exit(); +} + +module_init(_mpt3sas_init); +module_exit(_mpt3sas_exit); diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c new file mode 100644 index 000000000..e8a4750f6 --- /dev/null +++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c @@ -0,0 +1,2200 @@ +/* + * SAS Transport Layer for MPT (Message Passing Technology) based controllers + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_transport.c + * Copyright (C) 2012-2014 LSI Corporation + * Copyright (C) 2013-2014 Avago Technologies + * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/sched.h> +#include <linux/workqueue.h> +#include <linux/delay.h> +#include <linux/pci.h> + +#include <scsi/scsi.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_host.h> +#include <scsi/scsi_transport_sas.h> +#include <scsi/scsi_dbg.h> + +#include "mpt3sas_base.h" + +/** + * _transport_get_port_id_by_sas_phy - get zone's port id that Phy belong to + * @phy: sas_phy object + * + * Return Port number + */ +static inline u8 +_transport_get_port_id_by_sas_phy(struct sas_phy *phy) +{ + u8 port_id = 0xFF; + struct hba_port *port = phy->hostdata; + + if (port) + port_id = port->port_id; + + return port_id; +} + +/** + * _transport_sas_node_find_by_sas_address - sas node search + * @ioc: per adapter object + * @sas_address: sas address of expander or sas host + * @port: hba port entry + * Context: Calling function should acquire ioc->sas_node_lock. + * + * Search for either hba phys or expander device based on handle, then returns + * the sas_node object. + */ +static struct _sas_node * +_transport_sas_node_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address, struct hba_port *port) +{ + if (ioc->sas_hba.sas_address == sas_address) + return &ioc->sas_hba; + else + return mpt3sas_scsih_expander_find_by_sas_address(ioc, + sas_address, port); +} + +/** + * _transport_get_port_id_by_rphy - Get Port number from rphy object + * @ioc: per adapter object + * @rphy: sas_rphy object + * + * Returns Port number. + */ +static u8 +_transport_get_port_id_by_rphy(struct MPT3SAS_ADAPTER *ioc, + struct sas_rphy *rphy) +{ + struct _sas_node *sas_expander; + struct _sas_device *sas_device; + unsigned long flags; + u8 port_id = 0xFF; + + if (!rphy) + return port_id; + + if (rphy->identify.device_type == SAS_EDGE_EXPANDER_DEVICE || + rphy->identify.device_type == SAS_FANOUT_EXPANDER_DEVICE) { + spin_lock_irqsave(&ioc->sas_node_lock, flags); + list_for_each_entry(sas_expander, + &ioc->sas_expander_list, list) { + if (sas_expander->rphy == rphy) { + port_id = sas_expander->port->port_id; + break; + } + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + } else if (rphy->identify.device_type == SAS_END_DEVICE) { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy); + if (sas_device) { + port_id = sas_device->port->port_id; + sas_device_put(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } + + return port_id; +} + +/** + * _transport_convert_phy_link_rate - + * @link_rate: link rate returned from mpt firmware + * + * Convert link_rate from mpi fusion into sas_transport form. + */ +static enum sas_linkrate +_transport_convert_phy_link_rate(u8 link_rate) +{ + enum sas_linkrate rc; + + switch (link_rate) { + case MPI2_SAS_NEG_LINK_RATE_1_5: + rc = SAS_LINK_RATE_1_5_GBPS; + break; + case MPI2_SAS_NEG_LINK_RATE_3_0: + rc = SAS_LINK_RATE_3_0_GBPS; + break; + case MPI2_SAS_NEG_LINK_RATE_6_0: + rc = SAS_LINK_RATE_6_0_GBPS; + break; + case MPI25_SAS_NEG_LINK_RATE_12_0: + rc = SAS_LINK_RATE_12_0_GBPS; + break; + case MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED: + rc = SAS_PHY_DISABLED; + break; + case MPI2_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED: + rc = SAS_LINK_RATE_FAILED; + break; + case MPI2_SAS_NEG_LINK_RATE_PORT_SELECTOR: + rc = SAS_SATA_PORT_SELECTOR; + break; + case MPI2_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS: + rc = SAS_PHY_RESET_IN_PROGRESS; + break; + + default: + case MPI2_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE: + case MPI2_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE: + rc = SAS_LINK_RATE_UNKNOWN; + break; + } + return rc; +} + +/** + * _transport_set_identify - set identify for phys and end devices + * @ioc: per adapter object + * @handle: device handle + * @identify: sas identify info + * + * Populates sas identify info. + * + * Return: 0 for success, non-zero for failure. + */ +static int +_transport_set_identify(struct MPT3SAS_ADAPTER *ioc, u16 handle, + struct sas_identify *identify) +{ + Mpi2SasDevicePage0_t sas_device_pg0; + Mpi2ConfigReply_t mpi_reply; + u32 device_info; + u32 ioc_status; + + if (ioc->shost_recovery || ioc->pci_error_recovery) { + ioc_info(ioc, "%s: host reset in progress!\n", __func__); + return -EFAULT; + } + + if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, + MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -ENXIO; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x) failure at %s:%d/%s()!\n", + handle, ioc_status, __FILE__, __LINE__, __func__); + return -EIO; + } + + memset(identify, 0, sizeof(struct sas_identify)); + device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); + + /* sas_address */ + identify->sas_address = le64_to_cpu(sas_device_pg0.SASAddress); + + /* phy number of the parent device this device is linked to */ + identify->phy_identifier = sas_device_pg0.PhyNum; + + /* device_type */ + switch (device_info & MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) { + case MPI2_SAS_DEVICE_INFO_NO_DEVICE: + identify->device_type = SAS_PHY_UNUSED; + break; + case MPI2_SAS_DEVICE_INFO_END_DEVICE: + identify->device_type = SAS_END_DEVICE; + break; + case MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER: + identify->device_type = SAS_EDGE_EXPANDER_DEVICE; + break; + case MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER: + identify->device_type = SAS_FANOUT_EXPANDER_DEVICE; + break; + } + + /* initiator_port_protocols */ + if (device_info & MPI2_SAS_DEVICE_INFO_SSP_INITIATOR) + identify->initiator_port_protocols |= SAS_PROTOCOL_SSP; + if (device_info & MPI2_SAS_DEVICE_INFO_STP_INITIATOR) + identify->initiator_port_protocols |= SAS_PROTOCOL_STP; + if (device_info & MPI2_SAS_DEVICE_INFO_SMP_INITIATOR) + identify->initiator_port_protocols |= SAS_PROTOCOL_SMP; + if (device_info & MPI2_SAS_DEVICE_INFO_SATA_HOST) + identify->initiator_port_protocols |= SAS_PROTOCOL_SATA; + + /* target_port_protocols */ + if (device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) + identify->target_port_protocols |= SAS_PROTOCOL_SSP; + if (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) + identify->target_port_protocols |= SAS_PROTOCOL_STP; + if (device_info & MPI2_SAS_DEVICE_INFO_SMP_TARGET) + identify->target_port_protocols |= SAS_PROTOCOL_SMP; + if (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) + identify->target_port_protocols |= SAS_PROTOCOL_SATA; + + return 0; +} + +/** + * mpt3sas_transport_done - internal transport layer callback handler. + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * + * Callback handler when sending internal generated transport cmds. + * The callback index passed is `ioc->transport_cb_idx` + * + * Return: 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +u8 +mpt3sas_transport_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + MPI2DefaultReply_t *mpi_reply; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + if (ioc->transport_cmds.status == MPT3_CMD_NOT_USED) + return 1; + if (ioc->transport_cmds.smid != smid) + return 1; + ioc->transport_cmds.status |= MPT3_CMD_COMPLETE; + if (mpi_reply) { + memcpy(ioc->transport_cmds.reply, mpi_reply, + mpi_reply->MsgLength*4); + ioc->transport_cmds.status |= MPT3_CMD_REPLY_VALID; + } + ioc->transport_cmds.status &= ~MPT3_CMD_PENDING; + complete(&ioc->transport_cmds.done); + return 1; +} + +/* report manufacture request structure */ +struct rep_manu_request { + u8 smp_frame_type; + u8 function; + u8 reserved; + u8 request_length; +}; + +/* report manufacture reply structure */ +struct rep_manu_reply { + u8 smp_frame_type; /* 0x41 */ + u8 function; /* 0x01 */ + u8 function_result; + u8 response_length; + u16 expander_change_count; + u8 reserved0[2]; + u8 sas_format; + u8 reserved2[3]; + u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN]; + u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN]; + u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN]; + u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN]; + u16 component_id; + u8 component_revision_id; + u8 reserved3; + u8 vendor_specific[8]; +}; + +/** + * _transport_expander_report_manufacture - obtain SMP report_manufacture + * @ioc: per adapter object + * @sas_address: expander sas address + * @edev: the sas_expander_device object + * @port_id: Port ID number + * + * Fills in the sas_expander_device object when SMP port is created. + * + * Return: 0 for success, non-zero for failure. + */ +static int +_transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address, struct sas_expander_device *edev, u8 port_id) +{ + Mpi2SmpPassthroughRequest_t *mpi_request; + Mpi2SmpPassthroughReply_t *mpi_reply; + struct rep_manu_reply *manufacture_reply; + struct rep_manu_request *manufacture_request; + int rc; + u16 smid; + void *psge; + u8 issue_reset = 0; + void *data_out = NULL; + dma_addr_t data_out_dma; + dma_addr_t data_in_dma; + size_t data_in_sz; + size_t data_out_sz; + + if (ioc->shost_recovery || ioc->pci_error_recovery) { + ioc_info(ioc, "%s: host reset in progress!\n", __func__); + return -EFAULT; + } + + mutex_lock(&ioc->transport_cmds.mutex); + + if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) { + ioc_err(ioc, "%s: transport_cmds in use\n", __func__); + rc = -EAGAIN; + goto out; + } + ioc->transport_cmds.status = MPT3_CMD_PENDING; + + rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT); + if (rc) + goto out; + + smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx); + if (!smid) { + ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + rc = -EAGAIN; + goto out; + } + + rc = 0; + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->transport_cmds.smid = smid; + + data_out_sz = sizeof(struct rep_manu_request); + data_in_sz = sizeof(struct rep_manu_reply); + data_out = dma_alloc_coherent(&ioc->pdev->dev, data_out_sz + data_in_sz, + &data_out_dma, GFP_KERNEL); + if (!data_out) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + rc = -ENOMEM; + mpt3sas_base_free_smid(ioc, smid); + goto out; + } + + data_in_dma = data_out_dma + sizeof(struct rep_manu_request); + + manufacture_request = data_out; + manufacture_request->smp_frame_type = 0x40; + manufacture_request->function = 1; + manufacture_request->reserved = 0; + manufacture_request->request_length = 0; + + memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH; + mpi_request->PhysicalPort = port_id; + mpi_request->SASAddress = cpu_to_le64(sas_address); + mpi_request->RequestDataLength = cpu_to_le16(data_out_sz); + psge = &mpi_request->SGL; + + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, + data_in_sz); + + dtransportprintk(ioc, + ioc_info(ioc, "report_manufacture - send to sas_addr(0x%016llx)\n", + (u64)sas_address)); + init_completion(&ioc->transport_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ); + + if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) { + ioc_err(ioc, "%s: timeout\n", __func__); + _debug_dump_mf(mpi_request, + sizeof(Mpi2SmpPassthroughRequest_t)/4); + if (!(ioc->transport_cmds.status & MPT3_CMD_RESET)) + issue_reset = 1; + goto issue_host_reset; + } + + dtransportprintk(ioc, ioc_info(ioc, "report_manufacture - complete\n")); + + if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) { + u8 *tmp; + + mpi_reply = ioc->transport_cmds.reply; + + dtransportprintk(ioc, + ioc_info(ioc, "report_manufacture - reply data transfer size(%d)\n", + le16_to_cpu(mpi_reply->ResponseDataLength))); + + if (le16_to_cpu(mpi_reply->ResponseDataLength) != + sizeof(struct rep_manu_reply)) + goto out; + + manufacture_reply = data_out + sizeof(struct rep_manu_request); + strncpy(edev->vendor_id, manufacture_reply->vendor_id, + SAS_EXPANDER_VENDOR_ID_LEN); + strncpy(edev->product_id, manufacture_reply->product_id, + SAS_EXPANDER_PRODUCT_ID_LEN); + strncpy(edev->product_rev, manufacture_reply->product_rev, + SAS_EXPANDER_PRODUCT_REV_LEN); + edev->level = manufacture_reply->sas_format & 1; + if (edev->level) { + strncpy(edev->component_vendor_id, + manufacture_reply->component_vendor_id, + SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN); + tmp = (u8 *)&manufacture_reply->component_id; + edev->component_id = tmp[0] << 8 | tmp[1]; + edev->component_revision_id = + manufacture_reply->component_revision_id; + } + } else + dtransportprintk(ioc, + ioc_info(ioc, "report_manufacture - no reply\n")); + + issue_host_reset: + if (issue_reset) + mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + out: + ioc->transport_cmds.status = MPT3_CMD_NOT_USED; + if (data_out) + dma_free_coherent(&ioc->pdev->dev, data_out_sz + data_in_sz, + data_out, data_out_dma); + + mutex_unlock(&ioc->transport_cmds.mutex); + return rc; +} + + +/** + * _transport_delete_port - helper function to removing a port + * @ioc: per adapter object + * @mpt3sas_port: mpt3sas per port object + */ +static void +_transport_delete_port(struct MPT3SAS_ADAPTER *ioc, + struct _sas_port *mpt3sas_port) +{ + u64 sas_address = mpt3sas_port->remote_identify.sas_address; + struct hba_port *port = mpt3sas_port->hba_port; + enum sas_device_type device_type = + mpt3sas_port->remote_identify.device_type; + + dev_printk(KERN_INFO, &mpt3sas_port->port->dev, + "remove: sas_addr(0x%016llx)\n", + (unsigned long long) sas_address); + + ioc->logging_level |= MPT_DEBUG_TRANSPORT; + if (device_type == SAS_END_DEVICE) + mpt3sas_device_remove_by_sas_address(ioc, + sas_address, port); + else if (device_type == SAS_EDGE_EXPANDER_DEVICE || + device_type == SAS_FANOUT_EXPANDER_DEVICE) + mpt3sas_expander_remove(ioc, sas_address, port); + ioc->logging_level &= ~MPT_DEBUG_TRANSPORT; +} + +/** + * _transport_delete_phy - helper function to removing single phy from port + * @ioc: per adapter object + * @mpt3sas_port: mpt3sas per port object + * @mpt3sas_phy: mpt3sas per phy object + */ +static void +_transport_delete_phy(struct MPT3SAS_ADAPTER *ioc, + struct _sas_port *mpt3sas_port, struct _sas_phy *mpt3sas_phy) +{ + u64 sas_address = mpt3sas_port->remote_identify.sas_address; + + dev_printk(KERN_INFO, &mpt3sas_phy->phy->dev, + "remove: sas_addr(0x%016llx), phy(%d)\n", + (unsigned long long) sas_address, mpt3sas_phy->phy_id); + + list_del(&mpt3sas_phy->port_siblings); + mpt3sas_port->num_phys--; + sas_port_delete_phy(mpt3sas_port->port, mpt3sas_phy->phy); + mpt3sas_phy->phy_belongs_to_port = 0; +} + +/** + * _transport_add_phy - helper function to adding single phy to port + * @ioc: per adapter object + * @mpt3sas_port: mpt3sas per port object + * @mpt3sas_phy: mpt3sas per phy object + */ +static void +_transport_add_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_port *mpt3sas_port, + struct _sas_phy *mpt3sas_phy) +{ + u64 sas_address = mpt3sas_port->remote_identify.sas_address; + + dev_printk(KERN_INFO, &mpt3sas_phy->phy->dev, + "add: sas_addr(0x%016llx), phy(%d)\n", (unsigned long long) + sas_address, mpt3sas_phy->phy_id); + + list_add_tail(&mpt3sas_phy->port_siblings, &mpt3sas_port->phy_list); + mpt3sas_port->num_phys++; + sas_port_add_phy(mpt3sas_port->port, mpt3sas_phy->phy); + mpt3sas_phy->phy_belongs_to_port = 1; +} + +/** + * mpt3sas_transport_add_phy_to_an_existing_port - adding new phy to existing port + * @ioc: per adapter object + * @sas_node: sas node object (either expander or sas host) + * @mpt3sas_phy: mpt3sas per phy object + * @sas_address: sas address of device/expander were phy needs to be added to + * @port: hba port entry + */ +void +mpt3sas_transport_add_phy_to_an_existing_port(struct MPT3SAS_ADAPTER *ioc, + struct _sas_node *sas_node, struct _sas_phy *mpt3sas_phy, + u64 sas_address, struct hba_port *port) +{ + struct _sas_port *mpt3sas_port; + struct _sas_phy *phy_srch; + + if (mpt3sas_phy->phy_belongs_to_port == 1) + return; + + if (!port) + return; + + list_for_each_entry(mpt3sas_port, &sas_node->sas_port_list, + port_list) { + if (mpt3sas_port->remote_identify.sas_address != + sas_address) + continue; + if (mpt3sas_port->hba_port != port) + continue; + list_for_each_entry(phy_srch, &mpt3sas_port->phy_list, + port_siblings) { + if (phy_srch == mpt3sas_phy) + return; + } + _transport_add_phy(ioc, mpt3sas_port, mpt3sas_phy); + return; + } + +} + +/** + * mpt3sas_transport_del_phy_from_an_existing_port - delete phy from existing port + * @ioc: per adapter object + * @sas_node: sas node object (either expander or sas host) + * @mpt3sas_phy: mpt3sas per phy object + */ +void +mpt3sas_transport_del_phy_from_an_existing_port(struct MPT3SAS_ADAPTER *ioc, + struct _sas_node *sas_node, struct _sas_phy *mpt3sas_phy) +{ + struct _sas_port *mpt3sas_port, *next; + struct _sas_phy *phy_srch; + + if (mpt3sas_phy->phy_belongs_to_port == 0) + return; + + list_for_each_entry_safe(mpt3sas_port, next, &sas_node->sas_port_list, + port_list) { + list_for_each_entry(phy_srch, &mpt3sas_port->phy_list, + port_siblings) { + if (phy_srch != mpt3sas_phy) + continue; + + /* + * Don't delete port during host reset, + * just delete phy. + */ + if (mpt3sas_port->num_phys == 1 && !ioc->shost_recovery) + _transport_delete_port(ioc, mpt3sas_port); + else + _transport_delete_phy(ioc, mpt3sas_port, + mpt3sas_phy); + return; + } + } +} + +/** + * _transport_sanity_check - sanity check when adding a new port + * @ioc: per adapter object + * @sas_node: sas node object (either expander or sas host) + * @sas_address: sas address of device being added + * @port: hba port entry + * + * See the explanation above from _transport_delete_duplicate_port + */ +static void +_transport_sanity_check(struct MPT3SAS_ADAPTER *ioc, struct _sas_node *sas_node, + u64 sas_address, struct hba_port *port) +{ + int i; + + for (i = 0; i < sas_node->num_phys; i++) { + if (sas_node->phy[i].remote_identify.sas_address != sas_address) + continue; + if (sas_node->phy[i].port != port) + continue; + if (sas_node->phy[i].phy_belongs_to_port == 1) + mpt3sas_transport_del_phy_from_an_existing_port(ioc, + sas_node, &sas_node->phy[i]); + } +} + +/** + * mpt3sas_transport_port_add - insert port to the list + * @ioc: per adapter object + * @handle: handle of attached device + * @sas_address: sas address of parent expander or sas host + * @hba_port: hba port entry + * Context: This function will acquire ioc->sas_node_lock. + * + * Adding new port object to the sas_node->sas_port_list. + * + * Return: mpt3sas_port. + */ +struct _sas_port * +mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle, + u64 sas_address, struct hba_port *hba_port) +{ + struct _sas_phy *mpt3sas_phy, *next; + struct _sas_port *mpt3sas_port; + unsigned long flags; + struct _sas_node *sas_node; + struct sas_rphy *rphy; + struct _sas_device *sas_device = NULL; + int i; + struct sas_port *port; + struct virtual_phy *vphy = NULL; + + if (!hba_port) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return NULL; + } + + mpt3sas_port = kzalloc(sizeof(struct _sas_port), + GFP_KERNEL); + if (!mpt3sas_port) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return NULL; + } + + INIT_LIST_HEAD(&mpt3sas_port->port_list); + INIT_LIST_HEAD(&mpt3sas_port->phy_list); + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_node = _transport_sas_node_find_by_sas_address(ioc, + sas_address, hba_port); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + + if (!sas_node) { + ioc_err(ioc, "%s: Could not find parent sas_address(0x%016llx)!\n", + __func__, (u64)sas_address); + goto out_fail; + } + + if ((_transport_set_identify(ioc, handle, + &mpt3sas_port->remote_identify))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out_fail; + } + + if (mpt3sas_port->remote_identify.device_type == SAS_PHY_UNUSED) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out_fail; + } + + mpt3sas_port->hba_port = hba_port; + _transport_sanity_check(ioc, sas_node, + mpt3sas_port->remote_identify.sas_address, hba_port); + + for (i = 0; i < sas_node->num_phys; i++) { + if (sas_node->phy[i].remote_identify.sas_address != + mpt3sas_port->remote_identify.sas_address) + continue; + if (sas_node->phy[i].port != hba_port) + continue; + list_add_tail(&sas_node->phy[i].port_siblings, + &mpt3sas_port->phy_list); + mpt3sas_port->num_phys++; + if (sas_node->handle <= ioc->sas_hba.num_phys) { + if (!sas_node->phy[i].hba_vphy) { + hba_port->phy_mask |= (1 << i); + continue; + } + + vphy = mpt3sas_get_vphy_by_phy(ioc, hba_port, i); + if (!vphy) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out_fail; + } + } + } + + if (!mpt3sas_port->num_phys) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out_fail; + } + + if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) { + sas_device = mpt3sas_get_sdev_by_addr(ioc, + mpt3sas_port->remote_identify.sas_address, + mpt3sas_port->hba_port); + if (!sas_device) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out_fail; + } + sas_device->pend_sas_rphy_add = 1; + } + + if (!sas_node->parent_dev) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out_fail; + } + port = sas_port_alloc_num(sas_node->parent_dev); + if (!port || (sas_port_add(port))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out_fail; + } + + list_for_each_entry(mpt3sas_phy, &mpt3sas_port->phy_list, + port_siblings) { + if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) + dev_printk(KERN_INFO, &port->dev, + "add: handle(0x%04x), sas_addr(0x%016llx), phy(%d)\n", + handle, (unsigned long long) + mpt3sas_port->remote_identify.sas_address, + mpt3sas_phy->phy_id); + sas_port_add_phy(port, mpt3sas_phy->phy); + mpt3sas_phy->phy_belongs_to_port = 1; + mpt3sas_phy->port = hba_port; + } + + mpt3sas_port->port = port; + if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) { + rphy = sas_end_device_alloc(port); + sas_device->rphy = rphy; + if (sas_node->handle <= ioc->sas_hba.num_phys) { + if (!vphy) + hba_port->sas_address = + sas_device->sas_address; + else + vphy->sas_address = + sas_device->sas_address; + } + } else { + rphy = sas_expander_alloc(port, + mpt3sas_port->remote_identify.device_type); + if (sas_node->handle <= ioc->sas_hba.num_phys) + hba_port->sas_address = + mpt3sas_port->remote_identify.sas_address; + } + + if (!rphy) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out_delete_port; + } + + rphy->identify = mpt3sas_port->remote_identify; + + if ((sas_rphy_add(rphy))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + sas_rphy_free(rphy); + rphy = NULL; + goto out_delete_port; + } + + if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) { + sas_device->pend_sas_rphy_add = 0; + sas_device_put(sas_device); + } + + dev_info(&rphy->dev, + "add: handle(0x%04x), sas_addr(0x%016llx)\n", handle, + (unsigned long long)mpt3sas_port->remote_identify.sas_address); + + mpt3sas_port->rphy = rphy; + spin_lock_irqsave(&ioc->sas_node_lock, flags); + list_add_tail(&mpt3sas_port->port_list, &sas_node->sas_port_list); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + + /* fill in report manufacture */ + if (mpt3sas_port->remote_identify.device_type == + MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER || + mpt3sas_port->remote_identify.device_type == + MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER) + _transport_expander_report_manufacture(ioc, + mpt3sas_port->remote_identify.sas_address, + rphy_to_expander_device(rphy), hba_port->port_id); + return mpt3sas_port; + +out_delete_port: + sas_port_delete(port); + +out_fail: + list_for_each_entry_safe(mpt3sas_phy, next, &mpt3sas_port->phy_list, + port_siblings) + list_del(&mpt3sas_phy->port_siblings); + kfree(mpt3sas_port); + return NULL; +} + +/** + * mpt3sas_transport_port_remove - remove port from the list + * @ioc: per adapter object + * @sas_address: sas address of attached device + * @sas_address_parent: sas address of parent expander or sas host + * @port: hba port entry + * Context: This function will acquire ioc->sas_node_lock. + * + * Removing object and freeing associated memory from the + * ioc->sas_port_list. + */ +void +mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, + u64 sas_address_parent, struct hba_port *port) +{ + int i; + unsigned long flags; + struct _sas_port *mpt3sas_port, *next; + struct _sas_node *sas_node; + u8 found = 0; + struct _sas_phy *mpt3sas_phy, *next_phy; + struct hba_port *hba_port_next, *hba_port = NULL; + struct virtual_phy *vphy, *vphy_next = NULL; + + if (!port) + return; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_node = _transport_sas_node_find_by_sas_address(ioc, + sas_address_parent, port); + if (!sas_node) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return; + } + list_for_each_entry_safe(mpt3sas_port, next, &sas_node->sas_port_list, + port_list) { + if (mpt3sas_port->remote_identify.sas_address != sas_address) + continue; + if (mpt3sas_port->hba_port != port) + continue; + found = 1; + list_del(&mpt3sas_port->port_list); + goto out; + } + out: + if (!found) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return; + } + + if (sas_node->handle <= ioc->sas_hba.num_phys && + (ioc->multipath_on_hba)) { + if (port->vphys_mask) { + list_for_each_entry_safe(vphy, vphy_next, + &port->vphys_list, list) { + if (vphy->sas_address != sas_address) + continue; + ioc_info(ioc, + "remove vphy entry: %p of port:%p,from %d port's vphys list\n", + vphy, port, port->port_id); + port->vphys_mask &= ~vphy->phy_mask; + list_del(&vphy->list); + kfree(vphy); + } + } + + list_for_each_entry_safe(hba_port, hba_port_next, + &ioc->port_table_list, list) { + if (hba_port != port) + continue; + /* + * Delete hba_port object if + * - hba_port object's sas address matches with current + * removed device's sas address and no vphy's + * associated with it. + * - Current removed device is a vSES device and + * none of the other direct attached device have + * this vSES device's port number (hence hba_port + * object sas_address field will be zero). + */ + if ((hba_port->sas_address == sas_address || + !hba_port->sas_address) && !hba_port->vphys_mask) { + ioc_info(ioc, + "remove hba_port entry: %p port: %d from hba_port list\n", + hba_port, hba_port->port_id); + list_del(&hba_port->list); + kfree(hba_port); + } else if (hba_port->sas_address == sas_address && + hba_port->vphys_mask) { + /* + * Current removed device is a non vSES device + * and a vSES device has the same port number + * as of current device's port number. Hence + * only clear the sas_address filed, don't + * delete the hba_port object. + */ + ioc_info(ioc, + "clearing sas_address from hba_port entry: %p port: %d from hba_port list\n", + hba_port, hba_port->port_id); + port->sas_address = 0; + } + break; + } + } + + for (i = 0; i < sas_node->num_phys; i++) { + if (sas_node->phy[i].remote_identify.sas_address == sas_address) + memset(&sas_node->phy[i].remote_identify, 0 , + sizeof(struct sas_identify)); + } + + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + + list_for_each_entry_safe(mpt3sas_phy, next_phy, + &mpt3sas_port->phy_list, port_siblings) { + if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) + dev_printk(KERN_INFO, &mpt3sas_port->port->dev, + "remove: sas_addr(0x%016llx), phy(%d)\n", + (unsigned long long) + mpt3sas_port->remote_identify.sas_address, + mpt3sas_phy->phy_id); + mpt3sas_phy->phy_belongs_to_port = 0; + if (!ioc->remove_host) + sas_port_delete_phy(mpt3sas_port->port, + mpt3sas_phy->phy); + list_del(&mpt3sas_phy->port_siblings); + } + if (!ioc->remove_host) + sas_port_delete(mpt3sas_port->port); + ioc_info(ioc, "%s: removed: sas_addr(0x%016llx)\n", + __func__, (unsigned long long)sas_address); + kfree(mpt3sas_port); +} + +/** + * mpt3sas_transport_add_host_phy - report sas_host phy to transport + * @ioc: per adapter object + * @mpt3sas_phy: mpt3sas per phy object + * @phy_pg0: sas phy page 0 + * @parent_dev: parent device class object + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy + *mpt3sas_phy, Mpi2SasPhyPage0_t phy_pg0, struct device *parent_dev) +{ + struct sas_phy *phy; + int phy_index = mpt3sas_phy->phy_id; + + + INIT_LIST_HEAD(&mpt3sas_phy->port_siblings); + phy = sas_phy_alloc(parent_dev, phy_index); + if (!phy) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -1; + } + if ((_transport_set_identify(ioc, mpt3sas_phy->handle, + &mpt3sas_phy->identify))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + sas_phy_free(phy); + return -1; + } + phy->identify = mpt3sas_phy->identify; + mpt3sas_phy->attached_handle = le16_to_cpu(phy_pg0.AttachedDevHandle); + if (mpt3sas_phy->attached_handle) + _transport_set_identify(ioc, mpt3sas_phy->attached_handle, + &mpt3sas_phy->remote_identify); + phy->identify.phy_identifier = mpt3sas_phy->phy_id; + phy->negotiated_linkrate = _transport_convert_phy_link_rate( + phy_pg0.NegotiatedLinkRate & MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL); + phy->minimum_linkrate_hw = _transport_convert_phy_link_rate( + phy_pg0.HwLinkRate & MPI2_SAS_HWRATE_MIN_RATE_MASK); + phy->maximum_linkrate_hw = _transport_convert_phy_link_rate( + phy_pg0.HwLinkRate >> 4); + phy->minimum_linkrate = _transport_convert_phy_link_rate( + phy_pg0.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK); + phy->maximum_linkrate = _transport_convert_phy_link_rate( + phy_pg0.ProgrammedLinkRate >> 4); + phy->hostdata = mpt3sas_phy->port; + + if ((sas_phy_add(phy))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + sas_phy_free(phy); + return -1; + } + if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) + dev_printk(KERN_INFO, &phy->dev, + "add: handle(0x%04x), sas_addr(0x%016llx)\n" + "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n", + mpt3sas_phy->handle, (unsigned long long) + mpt3sas_phy->identify.sas_address, + mpt3sas_phy->attached_handle, + (unsigned long long) + mpt3sas_phy->remote_identify.sas_address); + mpt3sas_phy->phy = phy; + return 0; +} + + +/** + * mpt3sas_transport_add_expander_phy - report expander phy to transport + * @ioc: per adapter object + * @mpt3sas_phy: mpt3sas per phy object + * @expander_pg1: expander page 1 + * @parent_dev: parent device class object + * + * Return: 0 for success, non-zero for failure. + */ +int +mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy + *mpt3sas_phy, Mpi2ExpanderPage1_t expander_pg1, + struct device *parent_dev) +{ + struct sas_phy *phy; + int phy_index = mpt3sas_phy->phy_id; + + INIT_LIST_HEAD(&mpt3sas_phy->port_siblings); + phy = sas_phy_alloc(parent_dev, phy_index); + if (!phy) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -1; + } + if ((_transport_set_identify(ioc, mpt3sas_phy->handle, + &mpt3sas_phy->identify))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + sas_phy_free(phy); + return -1; + } + phy->identify = mpt3sas_phy->identify; + mpt3sas_phy->attached_handle = + le16_to_cpu(expander_pg1.AttachedDevHandle); + if (mpt3sas_phy->attached_handle) + _transport_set_identify(ioc, mpt3sas_phy->attached_handle, + &mpt3sas_phy->remote_identify); + phy->identify.phy_identifier = mpt3sas_phy->phy_id; + phy->negotiated_linkrate = _transport_convert_phy_link_rate( + expander_pg1.NegotiatedLinkRate & + MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL); + phy->minimum_linkrate_hw = _transport_convert_phy_link_rate( + expander_pg1.HwLinkRate & MPI2_SAS_HWRATE_MIN_RATE_MASK); + phy->maximum_linkrate_hw = _transport_convert_phy_link_rate( + expander_pg1.HwLinkRate >> 4); + phy->minimum_linkrate = _transport_convert_phy_link_rate( + expander_pg1.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK); + phy->maximum_linkrate = _transport_convert_phy_link_rate( + expander_pg1.ProgrammedLinkRate >> 4); + phy->hostdata = mpt3sas_phy->port; + + if ((sas_phy_add(phy))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + sas_phy_free(phy); + return -1; + } + if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) + dev_printk(KERN_INFO, &phy->dev, + "add: handle(0x%04x), sas_addr(0x%016llx)\n" + "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n", + mpt3sas_phy->handle, (unsigned long long) + mpt3sas_phy->identify.sas_address, + mpt3sas_phy->attached_handle, + (unsigned long long) + mpt3sas_phy->remote_identify.sas_address); + mpt3sas_phy->phy = phy; + return 0; +} + +/** + * mpt3sas_transport_update_links - refreshing phy link changes + * @ioc: per adapter object + * @sas_address: sas address of parent expander or sas host + * @handle: attached device handle + * @phy_number: phy number + * @link_rate: new link rate + * @port: hba port entry + * + * Return nothing. + */ +void +mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address, u16 handle, u8 phy_number, u8 link_rate, + struct hba_port *port) +{ + unsigned long flags; + struct _sas_node *sas_node; + struct _sas_phy *mpt3sas_phy; + struct hba_port *hba_port = NULL; + + if (ioc->shost_recovery || ioc->pci_error_recovery) + return; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_node = _transport_sas_node_find_by_sas_address(ioc, + sas_address, port); + if (!sas_node) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return; + } + + mpt3sas_phy = &sas_node->phy[phy_number]; + mpt3sas_phy->attached_handle = handle; + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + if (handle && (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) { + _transport_set_identify(ioc, handle, + &mpt3sas_phy->remote_identify); + if ((sas_node->handle <= ioc->sas_hba.num_phys) && + (ioc->multipath_on_hba)) { + list_for_each_entry(hba_port, + &ioc->port_table_list, list) { + if (hba_port->sas_address == sas_address && + hba_port == port) + hba_port->phy_mask |= + (1 << mpt3sas_phy->phy_id); + } + } + mpt3sas_transport_add_phy_to_an_existing_port(ioc, sas_node, + mpt3sas_phy, mpt3sas_phy->remote_identify.sas_address, + port); + } else + memset(&mpt3sas_phy->remote_identify, 0 , sizeof(struct + sas_identify)); + + if (mpt3sas_phy->phy) + mpt3sas_phy->phy->negotiated_linkrate = + _transport_convert_phy_link_rate(link_rate); + + if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) + dev_printk(KERN_INFO, &mpt3sas_phy->phy->dev, + "refresh: parent sas_addr(0x%016llx),\n" + "\tlink_rate(0x%02x), phy(%d)\n" + "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n", + (unsigned long long)sas_address, + link_rate, phy_number, handle, (unsigned long long) + mpt3sas_phy->remote_identify.sas_address); +} + +static inline void * +phy_to_ioc(struct sas_phy *phy) +{ + struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); + return shost_priv(shost); +} + +static inline void * +rphy_to_ioc(struct sas_rphy *rphy) +{ + struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent); + return shost_priv(shost); +} + +/* report phy error log structure */ +struct phy_error_log_request { + u8 smp_frame_type; /* 0x40 */ + u8 function; /* 0x11 */ + u8 allocated_response_length; + u8 request_length; /* 02 */ + u8 reserved_1[5]; + u8 phy_identifier; + u8 reserved_2[2]; +}; + +/* report phy error log reply structure */ +struct phy_error_log_reply { + u8 smp_frame_type; /* 0x41 */ + u8 function; /* 0x11 */ + u8 function_result; + u8 response_length; + __be16 expander_change_count; + u8 reserved_1[3]; + u8 phy_identifier; + u8 reserved_2[2]; + __be32 invalid_dword; + __be32 running_disparity_error; + __be32 loss_of_dword_sync; + __be32 phy_reset_problem; +}; + +/** + * _transport_get_expander_phy_error_log - return expander counters + * @ioc: per adapter object + * @phy: The sas phy object + * + * Return: 0 for success, non-zero for failure. + * + */ +static int +_transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc, + struct sas_phy *phy) +{ + Mpi2SmpPassthroughRequest_t *mpi_request; + Mpi2SmpPassthroughReply_t *mpi_reply; + struct phy_error_log_request *phy_error_log_request; + struct phy_error_log_reply *phy_error_log_reply; + int rc; + u16 smid; + void *psge; + u8 issue_reset = 0; + void *data_out = NULL; + dma_addr_t data_out_dma; + u32 sz; + + if (ioc->shost_recovery || ioc->pci_error_recovery) { + ioc_info(ioc, "%s: host reset in progress!\n", __func__); + return -EFAULT; + } + + mutex_lock(&ioc->transport_cmds.mutex); + + if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) { + ioc_err(ioc, "%s: transport_cmds in use\n", __func__); + rc = -EAGAIN; + goto out; + } + ioc->transport_cmds.status = MPT3_CMD_PENDING; + + rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT); + if (rc) + goto out; + + smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx); + if (!smid) { + ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + rc = -EAGAIN; + goto out; + } + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->transport_cmds.smid = smid; + + sz = sizeof(struct phy_error_log_request) + + sizeof(struct phy_error_log_reply); + data_out = dma_alloc_coherent(&ioc->pdev->dev, sz, &data_out_dma, + GFP_KERNEL); + if (!data_out) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + rc = -ENOMEM; + mpt3sas_base_free_smid(ioc, smid); + goto out; + } + + rc = -EINVAL; + memset(data_out, 0, sz); + phy_error_log_request = data_out; + phy_error_log_request->smp_frame_type = 0x40; + phy_error_log_request->function = 0x11; + phy_error_log_request->request_length = 2; + phy_error_log_request->allocated_response_length = 0; + phy_error_log_request->phy_identifier = phy->number; + + memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH; + mpi_request->PhysicalPort = _transport_get_port_id_by_sas_phy(phy); + mpi_request->VF_ID = 0; /* TODO */ + mpi_request->VP_ID = 0; + mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address); + mpi_request->RequestDataLength = + cpu_to_le16(sizeof(struct phy_error_log_request)); + psge = &mpi_request->SGL; + + ioc->build_sg(ioc, psge, data_out_dma, + sizeof(struct phy_error_log_request), + data_out_dma + sizeof(struct phy_error_log_request), + sizeof(struct phy_error_log_reply)); + + dtransportprintk(ioc, + ioc_info(ioc, "phy_error_log - send to sas_addr(0x%016llx), phy(%d)\n", + (u64)phy->identify.sas_address, + phy->number)); + init_completion(&ioc->transport_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ); + + if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) { + ioc_err(ioc, "%s: timeout\n", __func__); + _debug_dump_mf(mpi_request, + sizeof(Mpi2SmpPassthroughRequest_t)/4); + if (!(ioc->transport_cmds.status & MPT3_CMD_RESET)) + issue_reset = 1; + goto issue_host_reset; + } + + dtransportprintk(ioc, ioc_info(ioc, "phy_error_log - complete\n")); + + if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) { + + mpi_reply = ioc->transport_cmds.reply; + + dtransportprintk(ioc, + ioc_info(ioc, "phy_error_log - reply data transfer size(%d)\n", + le16_to_cpu(mpi_reply->ResponseDataLength))); + + if (le16_to_cpu(mpi_reply->ResponseDataLength) != + sizeof(struct phy_error_log_reply)) + goto out; + + phy_error_log_reply = data_out + + sizeof(struct phy_error_log_request); + + dtransportprintk(ioc, + ioc_info(ioc, "phy_error_log - function_result(%d)\n", + phy_error_log_reply->function_result)); + + phy->invalid_dword_count = + be32_to_cpu(phy_error_log_reply->invalid_dword); + phy->running_disparity_error_count = + be32_to_cpu(phy_error_log_reply->running_disparity_error); + phy->loss_of_dword_sync_count = + be32_to_cpu(phy_error_log_reply->loss_of_dword_sync); + phy->phy_reset_problem_count = + be32_to_cpu(phy_error_log_reply->phy_reset_problem); + rc = 0; + } else + dtransportprintk(ioc, + ioc_info(ioc, "phy_error_log - no reply\n")); + + issue_host_reset: + if (issue_reset) + mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + out: + ioc->transport_cmds.status = MPT3_CMD_NOT_USED; + if (data_out) + dma_free_coherent(&ioc->pdev->dev, sz, data_out, data_out_dma); + + mutex_unlock(&ioc->transport_cmds.mutex); + return rc; +} + +/** + * _transport_get_linkerrors - return phy counters for both hba and expanders + * @phy: The sas phy object + * + * Return: 0 for success, non-zero for failure. + * + */ +static int +_transport_get_linkerrors(struct sas_phy *phy) +{ + struct MPT3SAS_ADAPTER *ioc = phy_to_ioc(phy); + unsigned long flags; + Mpi2ConfigReply_t mpi_reply; + Mpi2SasPhyPage1_t phy_pg1; + struct hba_port *port = phy->hostdata; + int port_id = port->port_id; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + if (_transport_sas_node_find_by_sas_address(ioc, + phy->identify.sas_address, + mpt3sas_get_port_by_id(ioc, port_id, 0)) == NULL) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return -EINVAL; + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + + if (phy->identify.sas_address != ioc->sas_hba.sas_address) + return _transport_get_expander_phy_error_log(ioc, phy); + + /* get hba phy error logs */ + if ((mpt3sas_config_get_phy_pg1(ioc, &mpi_reply, &phy_pg1, + phy->number))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -ENXIO; + } + + if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) + ioc_info(ioc, "phy(%d), ioc_status (0x%04x), loginfo(0x%08x)\n", + phy->number, + le16_to_cpu(mpi_reply.IOCStatus), + le32_to_cpu(mpi_reply.IOCLogInfo)); + + phy->invalid_dword_count = le32_to_cpu(phy_pg1.InvalidDwordCount); + phy->running_disparity_error_count = + le32_to_cpu(phy_pg1.RunningDisparityErrorCount); + phy->loss_of_dword_sync_count = + le32_to_cpu(phy_pg1.LossDwordSynchCount); + phy->phy_reset_problem_count = + le32_to_cpu(phy_pg1.PhyResetProblemCount); + return 0; +} + +/** + * _transport_get_enclosure_identifier - + * @rphy: The sas phy object + * @identifier: ? + * + * Obtain the enclosure logical id for an expander. + * Return: 0 for success, non-zero for failure. + */ +static int +_transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) +{ + struct MPT3SAS_ADAPTER *ioc = rphy_to_ioc(rphy); + struct _sas_device *sas_device; + unsigned long flags; + int rc; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy); + if (sas_device) { + *identifier = sas_device->enclosure_logical_id; + rc = 0; + sas_device_put(sas_device); + } else { + *identifier = 0; + rc = -ENXIO; + } + + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return rc; +} + +/** + * _transport_get_bay_identifier - + * @rphy: The sas phy object + * + * Return: the slot id for a device that resides inside an enclosure. + */ +static int +_transport_get_bay_identifier(struct sas_rphy *rphy) +{ + struct MPT3SAS_ADAPTER *ioc = rphy_to_ioc(rphy); + struct _sas_device *sas_device; + unsigned long flags; + int rc; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy); + if (sas_device) { + rc = sas_device->slot; + sas_device_put(sas_device); + } else { + rc = -ENXIO; + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return rc; +} + +/* phy control request structure */ +struct phy_control_request { + u8 smp_frame_type; /* 0x40 */ + u8 function; /* 0x91 */ + u8 allocated_response_length; + u8 request_length; /* 0x09 */ + u16 expander_change_count; + u8 reserved_1[3]; + u8 phy_identifier; + u8 phy_operation; + u8 reserved_2[13]; + u64 attached_device_name; + u8 programmed_min_physical_link_rate; + u8 programmed_max_physical_link_rate; + u8 reserved_3[6]; +}; + +/* phy control reply structure */ +struct phy_control_reply { + u8 smp_frame_type; /* 0x41 */ + u8 function; /* 0x11 */ + u8 function_result; + u8 response_length; +}; + +#define SMP_PHY_CONTROL_LINK_RESET (0x01) +#define SMP_PHY_CONTROL_HARD_RESET (0x02) +#define SMP_PHY_CONTROL_DISABLE (0x03) + +/** + * _transport_expander_phy_control - expander phy control + * @ioc: per adapter object + * @phy: The sas phy object + * @phy_operation: ? + * + * Return: 0 for success, non-zero for failure. + * + */ +static int +_transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc, + struct sas_phy *phy, u8 phy_operation) +{ + Mpi2SmpPassthroughRequest_t *mpi_request; + Mpi2SmpPassthroughReply_t *mpi_reply; + struct phy_control_request *phy_control_request; + struct phy_control_reply *phy_control_reply; + int rc; + u16 smid; + void *psge; + u8 issue_reset = 0; + void *data_out = NULL; + dma_addr_t data_out_dma; + u32 sz; + + if (ioc->shost_recovery || ioc->pci_error_recovery) { + ioc_info(ioc, "%s: host reset in progress!\n", __func__); + return -EFAULT; + } + + mutex_lock(&ioc->transport_cmds.mutex); + + if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) { + ioc_err(ioc, "%s: transport_cmds in use\n", __func__); + rc = -EAGAIN; + goto out; + } + ioc->transport_cmds.status = MPT3_CMD_PENDING; + + rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT); + if (rc) + goto out; + + smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx); + if (!smid) { + ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + rc = -EAGAIN; + goto out; + } + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->transport_cmds.smid = smid; + + sz = sizeof(struct phy_control_request) + + sizeof(struct phy_control_reply); + data_out = dma_alloc_coherent(&ioc->pdev->dev, sz, &data_out_dma, + GFP_KERNEL); + if (!data_out) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + rc = -ENOMEM; + mpt3sas_base_free_smid(ioc, smid); + goto out; + } + + rc = -EINVAL; + memset(data_out, 0, sz); + phy_control_request = data_out; + phy_control_request->smp_frame_type = 0x40; + phy_control_request->function = 0x91; + phy_control_request->request_length = 9; + phy_control_request->allocated_response_length = 0; + phy_control_request->phy_identifier = phy->number; + phy_control_request->phy_operation = phy_operation; + phy_control_request->programmed_min_physical_link_rate = + phy->minimum_linkrate << 4; + phy_control_request->programmed_max_physical_link_rate = + phy->maximum_linkrate << 4; + + memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH; + mpi_request->PhysicalPort = _transport_get_port_id_by_sas_phy(phy); + mpi_request->VF_ID = 0; /* TODO */ + mpi_request->VP_ID = 0; + mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address); + mpi_request->RequestDataLength = + cpu_to_le16(sizeof(struct phy_error_log_request)); + psge = &mpi_request->SGL; + + ioc->build_sg(ioc, psge, data_out_dma, + sizeof(struct phy_control_request), + data_out_dma + sizeof(struct phy_control_request), + sizeof(struct phy_control_reply)); + + dtransportprintk(ioc, + ioc_info(ioc, "phy_control - send to sas_addr(0x%016llx), phy(%d), opcode(%d)\n", + (u64)phy->identify.sas_address, + phy->number, phy_operation)); + init_completion(&ioc->transport_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ); + + if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) { + ioc_err(ioc, "%s: timeout\n", __func__); + _debug_dump_mf(mpi_request, + sizeof(Mpi2SmpPassthroughRequest_t)/4); + if (!(ioc->transport_cmds.status & MPT3_CMD_RESET)) + issue_reset = 1; + goto issue_host_reset; + } + + dtransportprintk(ioc, ioc_info(ioc, "phy_control - complete\n")); + + if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) { + + mpi_reply = ioc->transport_cmds.reply; + + dtransportprintk(ioc, + ioc_info(ioc, "phy_control - reply data transfer size(%d)\n", + le16_to_cpu(mpi_reply->ResponseDataLength))); + + if (le16_to_cpu(mpi_reply->ResponseDataLength) != + sizeof(struct phy_control_reply)) + goto out; + + phy_control_reply = data_out + + sizeof(struct phy_control_request); + + dtransportprintk(ioc, + ioc_info(ioc, "phy_control - function_result(%d)\n", + phy_control_reply->function_result)); + + rc = 0; + } else + dtransportprintk(ioc, + ioc_info(ioc, "phy_control - no reply\n")); + + issue_host_reset: + if (issue_reset) + mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + out: + ioc->transport_cmds.status = MPT3_CMD_NOT_USED; + if (data_out) + dma_free_coherent(&ioc->pdev->dev, sz, data_out, + data_out_dma); + + mutex_unlock(&ioc->transport_cmds.mutex); + return rc; +} + +/** + * _transport_phy_reset - + * @phy: The sas phy object + * @hard_reset: + * + * Return: 0 for success, non-zero for failure. + */ +static int +_transport_phy_reset(struct sas_phy *phy, int hard_reset) +{ + struct MPT3SAS_ADAPTER *ioc = phy_to_ioc(phy); + Mpi2SasIoUnitControlReply_t mpi_reply; + Mpi2SasIoUnitControlRequest_t mpi_request; + struct hba_port *port = phy->hostdata; + int port_id = port->port_id; + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + if (_transport_sas_node_find_by_sas_address(ioc, + phy->identify.sas_address, + mpt3sas_get_port_by_id(ioc, port_id, 0)) == NULL) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return -EINVAL; + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + + /* handle expander phys */ + if (phy->identify.sas_address != ioc->sas_hba.sas_address) + return _transport_expander_phy_control(ioc, phy, + (hard_reset == 1) ? SMP_PHY_CONTROL_HARD_RESET : + SMP_PHY_CONTROL_LINK_RESET); + + /* handle hba phys */ + memset(&mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); + mpi_request.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; + mpi_request.Operation = hard_reset ? + MPI2_SAS_OP_PHY_HARD_RESET : MPI2_SAS_OP_PHY_LINK_RESET; + mpi_request.PhyNum = phy->number; + + if ((mpt3sas_base_sas_iounit_control(ioc, &mpi_reply, &mpi_request))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -ENXIO; + } + + if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) + ioc_info(ioc, "phy(%d), ioc_status(0x%04x), loginfo(0x%08x)\n", + phy->number, le16_to_cpu(mpi_reply.IOCStatus), + le32_to_cpu(mpi_reply.IOCLogInfo)); + + return 0; +} + +/** + * _transport_phy_enable - enable/disable phys + * @phy: The sas phy object + * @enable: enable phy when true + * + * Only support sas_host direct attached phys. + * Return: 0 for success, non-zero for failure. + */ +static int +_transport_phy_enable(struct sas_phy *phy, int enable) +{ + struct MPT3SAS_ADAPTER *ioc = phy_to_ioc(phy); + Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; + Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; + Mpi2ConfigReply_t mpi_reply; + u16 ioc_status; + u16 sz; + int rc = 0; + unsigned long flags; + int i, discovery_active; + struct hba_port *port = phy->hostdata; + int port_id = port->port_id; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + if (_transport_sas_node_find_by_sas_address(ioc, + phy->identify.sas_address, + mpt3sas_get_port_by_id(ioc, port_id, 0)) == NULL) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return -EINVAL; + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + + /* handle expander phys */ + if (phy->identify.sas_address != ioc->sas_hba.sas_address) + return _transport_expander_phy_control(ioc, phy, + (enable == 1) ? SMP_PHY_CONTROL_LINK_RESET : + SMP_PHY_CONTROL_DISABLE); + + /* handle hba phys */ + + /* read sas_iounit page 0 */ + sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys * + sizeof(Mpi2SasIOUnit0PhyData_t)); + sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg0) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -ENOMEM; + goto out; + } + if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, + sas_iounit_pg0, sz))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -ENXIO; + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -EIO; + goto out; + } + + /* unable to enable/disable phys when when discovery is active */ + for (i = 0, discovery_active = 0; i < ioc->sas_hba.num_phys ; i++) { + if (sas_iounit_pg0->PhyData[i].PortFlags & + MPI2_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS) { + ioc_err(ioc, "discovery is active on port = %d, phy = %d: unable to enable/disable phys, try again later!\n", + sas_iounit_pg0->PhyData[i].Port, i); + discovery_active = 1; + } + } + + if (discovery_active) { + rc = -EAGAIN; + goto out; + } + + /* read sas_iounit page 1 */ + sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys * + sizeof(Mpi2SasIOUnit1PhyData_t)); + sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg1) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -ENOMEM; + goto out; + } + if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, + sas_iounit_pg1, sz))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -ENXIO; + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -EIO; + goto out; + } + + /* copy Port/PortFlags/PhyFlags from page 0 */ + for (i = 0; i < ioc->sas_hba.num_phys ; i++) { + sas_iounit_pg1->PhyData[i].Port = + sas_iounit_pg0->PhyData[i].Port; + sas_iounit_pg1->PhyData[i].PortFlags = + (sas_iounit_pg0->PhyData[i].PortFlags & + MPI2_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG); + sas_iounit_pg1->PhyData[i].PhyFlags = + (sas_iounit_pg0->PhyData[i].PhyFlags & + (MPI2_SASIOUNIT0_PHYFLAGS_ZONING_ENABLED + + MPI2_SASIOUNIT0_PHYFLAGS_PHY_DISABLED)); + } + + if (enable) + sas_iounit_pg1->PhyData[phy->number].PhyFlags + &= ~MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE; + else + sas_iounit_pg1->PhyData[phy->number].PhyFlags + |= MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE; + + mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, sz); + + /* link reset */ + if (enable) + _transport_phy_reset(phy, 0); + + out: + kfree(sas_iounit_pg1); + kfree(sas_iounit_pg0); + return rc; +} + +/** + * _transport_phy_speed - set phy min/max link rates + * @phy: The sas phy object + * @rates: rates defined in sas_phy_linkrates + * + * Only support sas_host direct attached phys. + * + * Return: 0 for success, non-zero for failure. + */ +static int +_transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates) +{ + struct MPT3SAS_ADAPTER *ioc = phy_to_ioc(phy); + Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; + Mpi2SasPhyPage0_t phy_pg0; + Mpi2ConfigReply_t mpi_reply; + u16 ioc_status; + u16 sz; + int i; + int rc = 0; + unsigned long flags; + struct hba_port *port = phy->hostdata; + int port_id = port->port_id; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + if (_transport_sas_node_find_by_sas_address(ioc, + phy->identify.sas_address, + mpt3sas_get_port_by_id(ioc, port_id, 0)) == NULL) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return -EINVAL; + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + + if (!rates->minimum_linkrate) + rates->minimum_linkrate = phy->minimum_linkrate; + else if (rates->minimum_linkrate < phy->minimum_linkrate_hw) + rates->minimum_linkrate = phy->minimum_linkrate_hw; + + if (!rates->maximum_linkrate) + rates->maximum_linkrate = phy->maximum_linkrate; + else if (rates->maximum_linkrate > phy->maximum_linkrate_hw) + rates->maximum_linkrate = phy->maximum_linkrate_hw; + + /* handle expander phys */ + if (phy->identify.sas_address != ioc->sas_hba.sas_address) { + phy->minimum_linkrate = rates->minimum_linkrate; + phy->maximum_linkrate = rates->maximum_linkrate; + return _transport_expander_phy_control(ioc, phy, + SMP_PHY_CONTROL_LINK_RESET); + } + + /* handle hba phys */ + + /* sas_iounit page 1 */ + sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys * + sizeof(Mpi2SasIOUnit1PhyData_t)); + sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg1) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -ENOMEM; + goto out; + } + if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, + sas_iounit_pg1, sz))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -ENXIO; + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -EIO; + goto out; + } + + for (i = 0; i < ioc->sas_hba.num_phys; i++) { + if (phy->number != i) { + sas_iounit_pg1->PhyData[i].MaxMinLinkRate = + (ioc->sas_hba.phy[i].phy->minimum_linkrate + + (ioc->sas_hba.phy[i].phy->maximum_linkrate << 4)); + } else { + sas_iounit_pg1->PhyData[i].MaxMinLinkRate = + (rates->minimum_linkrate + + (rates->maximum_linkrate << 4)); + } + } + + if (mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, + sz)) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + rc = -ENXIO; + goto out; + } + + /* link reset */ + _transport_phy_reset(phy, 0); + + /* read phy page 0, then update the rates in the sas transport phy */ + if (!mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, + phy->number)) { + phy->minimum_linkrate = _transport_convert_phy_link_rate( + phy_pg0.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK); + phy->maximum_linkrate = _transport_convert_phy_link_rate( + phy_pg0.ProgrammedLinkRate >> 4); + phy->negotiated_linkrate = _transport_convert_phy_link_rate( + phy_pg0.NegotiatedLinkRate & + MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL); + } + + out: + kfree(sas_iounit_pg1); + return rc; +} + +static int +_transport_map_smp_buffer(struct device *dev, struct bsg_buffer *buf, + dma_addr_t *dma_addr, size_t *dma_len, void **p) +{ + /* Check if the request is split across multiple segments */ + if (buf->sg_cnt > 1) { + *p = dma_alloc_coherent(dev, buf->payload_len, dma_addr, + GFP_KERNEL); + if (!*p) + return -ENOMEM; + *dma_len = buf->payload_len; + } else { + if (!dma_map_sg(dev, buf->sg_list, 1, DMA_BIDIRECTIONAL)) + return -ENOMEM; + *dma_addr = sg_dma_address(buf->sg_list); + *dma_len = sg_dma_len(buf->sg_list); + *p = NULL; + } + + return 0; +} + +static void +_transport_unmap_smp_buffer(struct device *dev, struct bsg_buffer *buf, + dma_addr_t dma_addr, void *p) +{ + if (p) + dma_free_coherent(dev, buf->payload_len, p, dma_addr); + else + dma_unmap_sg(dev, buf->sg_list, 1, DMA_BIDIRECTIONAL); +} + +/** + * _transport_smp_handler - transport portal for smp passthru + * @job: ? + * @shost: shost object + * @rphy: sas transport rphy object + * + * This used primarily for smp_utils. + * Example: + * smp_rep_general /sys/class/bsg/expander-5:0 + */ +static void +_transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, + struct sas_rphy *rphy) +{ + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + Mpi2SmpPassthroughRequest_t *mpi_request; + Mpi2SmpPassthroughReply_t *mpi_reply; + int rc; + u16 smid; + void *psge; + dma_addr_t dma_addr_in; + dma_addr_t dma_addr_out; + void *addr_in = NULL; + void *addr_out = NULL; + size_t dma_len_in; + size_t dma_len_out; + unsigned int reslen = 0; + + if (ioc->shost_recovery || ioc->pci_error_recovery) { + ioc_info(ioc, "%s: host reset in progress!\n", __func__); + rc = -EFAULT; + goto job_done; + } + + rc = mutex_lock_interruptible(&ioc->transport_cmds.mutex); + if (rc) + goto job_done; + + if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) { + ioc_err(ioc, "%s: transport_cmds in use\n", + __func__); + rc = -EAGAIN; + goto out; + } + ioc->transport_cmds.status = MPT3_CMD_PENDING; + + rc = _transport_map_smp_buffer(&ioc->pdev->dev, &job->request_payload, + &dma_addr_out, &dma_len_out, &addr_out); + if (rc) + goto out; + if (addr_out) { + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, addr_out, + job->request_payload.payload_len); + } + + rc = _transport_map_smp_buffer(&ioc->pdev->dev, &job->reply_payload, + &dma_addr_in, &dma_len_in, &addr_in); + if (rc) + goto unmap_out; + + rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT); + if (rc) + goto unmap_in; + + smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx); + if (!smid) { + ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); + rc = -EAGAIN; + goto unmap_in; + } + + rc = 0; + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->transport_cmds.smid = smid; + + memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH; + mpi_request->PhysicalPort = _transport_get_port_id_by_rphy(ioc, rphy); + mpi_request->SASAddress = (rphy) ? + cpu_to_le64(rphy->identify.sas_address) : + cpu_to_le64(ioc->sas_hba.sas_address); + mpi_request->RequestDataLength = cpu_to_le16(dma_len_out - 4); + psge = &mpi_request->SGL; + + ioc->build_sg(ioc, psge, dma_addr_out, dma_len_out - 4, dma_addr_in, + dma_len_in - 4); + + dtransportprintk(ioc, + ioc_info(ioc, "%s: sending smp request\n", __func__)); + + init_completion(&ioc->transport_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ); + + if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) { + ioc_err(ioc, "%s: timeout\n", __func__); + _debug_dump_mf(mpi_request, + sizeof(Mpi2SmpPassthroughRequest_t)/4); + if (!(ioc->transport_cmds.status & MPT3_CMD_RESET)) { + mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + rc = -ETIMEDOUT; + goto unmap_in; + } + } + + dtransportprintk(ioc, ioc_info(ioc, "%s - complete\n", __func__)); + + if (!(ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID)) { + dtransportprintk(ioc, + ioc_info(ioc, "%s: no reply\n", __func__)); + rc = -ENXIO; + goto unmap_in; + } + + mpi_reply = ioc->transport_cmds.reply; + + dtransportprintk(ioc, + ioc_info(ioc, "%s: reply data transfer size(%d)\n", + __func__, + le16_to_cpu(mpi_reply->ResponseDataLength))); + + memcpy(job->reply, mpi_reply, sizeof(*mpi_reply)); + job->reply_len = sizeof(*mpi_reply); + reslen = le16_to_cpu(mpi_reply->ResponseDataLength); + + if (addr_in) { + sg_copy_to_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, addr_in, + job->reply_payload.payload_len); + } + + rc = 0; + unmap_in: + _transport_unmap_smp_buffer(&ioc->pdev->dev, &job->reply_payload, + dma_addr_in, addr_in); + unmap_out: + _transport_unmap_smp_buffer(&ioc->pdev->dev, &job->request_payload, + dma_addr_out, addr_out); + out: + ioc->transport_cmds.status = MPT3_CMD_NOT_USED; + mutex_unlock(&ioc->transport_cmds.mutex); +job_done: + bsg_job_done(job, rc, reslen); +} + +struct sas_function_template mpt3sas_transport_functions = { + .get_linkerrors = _transport_get_linkerrors, + .get_enclosure_identifier = _transport_get_enclosure_identifier, + .get_bay_identifier = _transport_get_bay_identifier, + .phy_reset = _transport_phy_reset, + .phy_enable = _transport_phy_enable, + .set_phy_speed = _transport_phy_speed, + .smp_handler = _transport_smp_handler, +}; + +struct scsi_transport_template *mpt3sas_transport_template; diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c new file mode 100644 index 000000000..d9b7d0ee2 --- /dev/null +++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c @@ -0,0 +1,473 @@ +/* + * This module provides common API to set Diagnostic trigger for MPT + * (Message Passing Technology) based controllers + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c + * Copyright (C) 2012-2014 LSI Corporation + * Copyright (C) 2013-2014 Avago Technologies + * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/compat.h> +#include <linux/poll.h> + +#include <linux/io.h> +#include <linux/uaccess.h> + +#include "mpt3sas_base.h" + +/** + * _mpt3sas_raise_sigio - notifiy app + * @ioc: per adapter object + * @event_data: ? + */ +static void +_mpt3sas_raise_sigio(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data) +{ + Mpi2EventNotificationReply_t *mpi_reply; + u16 sz, event_data_sz; + unsigned long flags; + + dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: enter\n", __func__)); + + sz = offsetof(Mpi2EventNotificationReply_t, EventData) + + sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T) + 4; + mpi_reply = kzalloc(sz, GFP_KERNEL); + if (!mpi_reply) + goto out; + mpi_reply->Event = cpu_to_le16(MPI3_EVENT_DIAGNOSTIC_TRIGGER_FIRED); + event_data_sz = (sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T) + 4) / 4; + mpi_reply->EventDataLength = cpu_to_le16(event_data_sz); + memcpy(&mpi_reply->EventData, event_data, + sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T)); + dTriggerDiagPrintk(ioc, + ioc_info(ioc, "%s: add to driver event log\n", + __func__)); + mpt3sas_ctl_add_to_event_log(ioc, mpi_reply); + kfree(mpi_reply); + out: + + /* clearing the diag_trigger_active flag */ + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + dTriggerDiagPrintk(ioc, + ioc_info(ioc, "%s: clearing diag_trigger_active flag\n", + __func__)); + ioc->diag_trigger_active = 0; + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + + dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n", + __func__)); +} + +/** + * mpt3sas_process_trigger_data - process the event data for the trigger + * @ioc: per adapter object + * @event_data: ? + */ +void +mpt3sas_process_trigger_data(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data) +{ + u8 issue_reset = 0; + u32 *trig_data = (u32 *)&event_data->u.master; + + dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: enter\n", __func__)); + + /* release the diag buffer trace */ + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED) == 0) { + /* + * add a log message so that user knows which event caused + * the release + */ + ioc_info(ioc, + "%s: Releasing the trace buffer. Trigger_Type 0x%08x, Data[0] 0x%08x, Data[1] 0x%08x\n", + __func__, event_data->trigger_type, + trig_data[0], trig_data[1]); + mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE, + &issue_reset); + } + + ioc->htb_rel.buffer_rel_condition = MPT3_DIAG_BUFFER_REL_TRIGGER; + if (event_data) { + ioc->htb_rel.trigger_type = event_data->trigger_type; + switch (event_data->trigger_type) { + case MPT3SAS_TRIGGER_SCSI: + memcpy(&ioc->htb_rel.trigger_info_dwords, + &event_data->u.scsi, + sizeof(struct SL_WH_SCSI_TRIGGER_T)); + break; + case MPT3SAS_TRIGGER_MPI: + memcpy(&ioc->htb_rel.trigger_info_dwords, + &event_data->u.mpi, + sizeof(struct SL_WH_MPI_TRIGGER_T)); + break; + case MPT3SAS_TRIGGER_MASTER: + ioc->htb_rel.trigger_info_dwords[0] = + event_data->u.master.MasterData; + break; + case MPT3SAS_TRIGGER_EVENT: + memcpy(&ioc->htb_rel.trigger_info_dwords, + &event_data->u.event, + sizeof(struct SL_WH_EVENT_TRIGGER_T)); + break; + default: + ioc_err(ioc, "%d - Is not a valid Trigger type\n", + event_data->trigger_type); + break; + } + } + _mpt3sas_raise_sigio(ioc, event_data); + + dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n", + __func__)); +} + +/** + * mpt3sas_trigger_master - Master trigger handler + * @ioc: per adapter object + * @trigger_bitmask: + * + */ +void +mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc, u32 trigger_bitmask) +{ + struct SL_WH_TRIGGERS_EVENT_DATA_T event_data; + unsigned long flags; + u8 found_match = 0; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + + if (trigger_bitmask & MASTER_TRIGGER_FW_FAULT || + trigger_bitmask & MASTER_TRIGGER_ADAPTER_RESET) + goto by_pass_checks; + + /* check to see if trace buffers are currently registered */ + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + /* check to see if trace buffers are currently released */ + if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + by_pass_checks: + + dTriggerDiagPrintk(ioc, + ioc_info(ioc, "%s: enter - trigger_bitmask = 0x%08x\n", + __func__, trigger_bitmask)); + + /* don't send trigger if an trigger is currently active */ + if (ioc->diag_trigger_active) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + goto out; + } + + /* check for the trigger condition */ + if (ioc->diag_trigger_master.MasterData & trigger_bitmask) { + found_match = 1; + ioc->diag_trigger_active = 1; + dTriggerDiagPrintk(ioc, + ioc_info(ioc, "%s: setting diag_trigger_active flag\n", + __func__)); + } + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + + if (!found_match) + goto out; + + memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T)); + event_data.trigger_type = MPT3SAS_TRIGGER_MASTER; + event_data.u.master.MasterData = trigger_bitmask; + + if (trigger_bitmask & MASTER_TRIGGER_FW_FAULT || + trigger_bitmask & MASTER_TRIGGER_ADAPTER_RESET) { + ioc->htb_rel.trigger_type = MPT3SAS_TRIGGER_MASTER; + ioc->htb_rel.trigger_info_dwords[0] = trigger_bitmask; + if (ioc->reset_from_user) + ioc->htb_rel.trigger_info_dwords[1] = + MPT_DIAG_RESET_ISSUED_BY_USER; + _mpt3sas_raise_sigio(ioc, &event_data); + } else + mpt3sas_send_trigger_data_event(ioc, &event_data); + + out: + dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n", + __func__)); +} + +/** + * mpt3sas_trigger_event - Event trigger handler + * @ioc: per adapter object + * @event: ? + * @log_entry_qualifier: ? + * + */ +void +mpt3sas_trigger_event(struct MPT3SAS_ADAPTER *ioc, u16 event, + u16 log_entry_qualifier) +{ + struct SL_WH_TRIGGERS_EVENT_DATA_T event_data; + struct SL_WH_EVENT_TRIGGER_T *event_trigger; + int i; + unsigned long flags; + u8 found_match; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + + /* check to see if trace buffers are currently registered */ + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + /* check to see if trace buffers are currently released */ + if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + dTriggerDiagPrintk(ioc, + ioc_info(ioc, "%s: enter - event = 0x%04x, log_entry_qualifier = 0x%04x\n", + __func__, event, log_entry_qualifier)); + + /* don't send trigger if an trigger is currently active */ + if (ioc->diag_trigger_active) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + goto out; + } + + /* check for the trigger condition */ + event_trigger = ioc->diag_trigger_event.EventTriggerEntry; + for (i = 0 , found_match = 0; i < ioc->diag_trigger_event.ValidEntries + && !found_match; i++, event_trigger++) { + if (event_trigger->EventValue != event) + continue; + if (event == MPI2_EVENT_LOG_ENTRY_ADDED) { + if (event_trigger->LogEntryQualifier == + log_entry_qualifier) + found_match = 1; + continue; + } + found_match = 1; + ioc->diag_trigger_active = 1; + dTriggerDiagPrintk(ioc, + ioc_info(ioc, "%s: setting diag_trigger_active flag\n", + __func__)); + } + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + + if (!found_match) + goto out; + + dTriggerDiagPrintk(ioc, + ioc_info(ioc, "%s: setting diag_trigger_active flag\n", + __func__)); + memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T)); + event_data.trigger_type = MPT3SAS_TRIGGER_EVENT; + event_data.u.event.EventValue = event; + event_data.u.event.LogEntryQualifier = log_entry_qualifier; + mpt3sas_send_trigger_data_event(ioc, &event_data); + out: + dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n", + __func__)); +} + +/** + * mpt3sas_trigger_scsi - SCSI trigger handler + * @ioc: per adapter object + * @sense_key: ? + * @asc: ? + * @ascq: ? + * + */ +void +mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key, u8 asc, + u8 ascq) +{ + struct SL_WH_TRIGGERS_EVENT_DATA_T event_data; + struct SL_WH_SCSI_TRIGGER_T *scsi_trigger; + int i; + unsigned long flags; + u8 found_match; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + + /* check to see if trace buffers are currently registered */ + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + /* check to see if trace buffers are currently released */ + if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + dTriggerDiagPrintk(ioc, + ioc_info(ioc, "%s: enter - sense_key = 0x%02x, asc = 0x%02x, ascq = 0x%02x\n", + __func__, sense_key, asc, ascq)); + + /* don't send trigger if an trigger is currently active */ + if (ioc->diag_trigger_active) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + goto out; + } + + /* check for the trigger condition */ + scsi_trigger = ioc->diag_trigger_scsi.SCSITriggerEntry; + for (i = 0 , found_match = 0; i < ioc->diag_trigger_scsi.ValidEntries + && !found_match; i++, scsi_trigger++) { + if (scsi_trigger->SenseKey != sense_key) + continue; + if (!(scsi_trigger->ASC == 0xFF || scsi_trigger->ASC == asc)) + continue; + if (!(scsi_trigger->ASCQ == 0xFF || scsi_trigger->ASCQ == ascq)) + continue; + found_match = 1; + ioc->diag_trigger_active = 1; + } + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + + if (!found_match) + goto out; + + dTriggerDiagPrintk(ioc, + ioc_info(ioc, "%s: setting diag_trigger_active flag\n", + __func__)); + memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T)); + event_data.trigger_type = MPT3SAS_TRIGGER_SCSI; + event_data.u.scsi.SenseKey = sense_key; + event_data.u.scsi.ASC = asc; + event_data.u.scsi.ASCQ = ascq; + mpt3sas_send_trigger_data_event(ioc, &event_data); + out: + dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n", + __func__)); +} + +/** + * mpt3sas_trigger_mpi - MPI trigger handler + * @ioc: per adapter object + * @ioc_status: ? + * @loginfo: ? + * + */ +void +mpt3sas_trigger_mpi(struct MPT3SAS_ADAPTER *ioc, u16 ioc_status, u32 loginfo) +{ + struct SL_WH_TRIGGERS_EVENT_DATA_T event_data; + struct SL_WH_MPI_TRIGGER_T *mpi_trigger; + int i; + unsigned long flags; + u8 found_match; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + + /* check to see if trace buffers are currently registered */ + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + /* check to see if trace buffers are currently released */ + if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + dTriggerDiagPrintk(ioc, + ioc_info(ioc, "%s: enter - ioc_status = 0x%04x, loginfo = 0x%08x\n", + __func__, ioc_status, loginfo)); + + /* don't send trigger if an trigger is currently active */ + if (ioc->diag_trigger_active) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + goto out; + } + + /* check for the trigger condition */ + mpi_trigger = ioc->diag_trigger_mpi.MPITriggerEntry; + for (i = 0 , found_match = 0; i < ioc->diag_trigger_mpi.ValidEntries + && !found_match; i++, mpi_trigger++) { + if (mpi_trigger->IOCStatus != ioc_status) + continue; + if (!(mpi_trigger->IocLogInfo == 0xFFFFFFFF || + mpi_trigger->IocLogInfo == loginfo)) + continue; + found_match = 1; + ioc->diag_trigger_active = 1; + } + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + + if (!found_match) + goto out; + + dTriggerDiagPrintk(ioc, + ioc_info(ioc, "%s: setting diag_trigger_active flag\n", + __func__)); + memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T)); + event_data.trigger_type = MPT3SAS_TRIGGER_MPI; + event_data.u.mpi.IOCStatus = ioc_status; + event_data.u.mpi.IocLogInfo = loginfo; + mpt3sas_send_trigger_data_event(ioc, &event_data); + out: + dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n", + __func__)); +} diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h new file mode 100644 index 000000000..405eada26 --- /dev/null +++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h @@ -0,0 +1,194 @@ +/* + * This is the Fusion MPT base driver providing common API layer interface + * to set Diagnostic triggers for MPT (Message Passing Technology) based + * controllers + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h + * Copyright (C) 2012-2014 LSI Corporation + * Copyright (C) 2013-2014 Avago Technologies + * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + /* Diagnostic Trigger Configuration Data Structures */ + +#ifndef MPT3SAS_TRIGGER_DIAG_H_INCLUDED +#define MPT3SAS_TRIGGER_DIAG_H_INCLUDED + +/* limitation on number of entries */ +#define NUM_VALID_ENTRIES (20) + +/* trigger types */ +#define MPT3SAS_TRIGGER_MASTER (1) +#define MPT3SAS_TRIGGER_EVENT (2) +#define MPT3SAS_TRIGGER_SCSI (3) +#define MPT3SAS_TRIGGER_MPI (4) + +/* trigger names */ +#define MASTER_TRIGGER_FILE_NAME "diag_trigger_master" +#define EVENT_TRIGGERS_FILE_NAME "diag_trigger_event" +#define SCSI_TRIGGERS_FILE_NAME "diag_trigger_scsi" +#define MPI_TRIGGER_FILE_NAME "diag_trigger_mpi" + +/* master trigger bitmask */ +#define MASTER_TRIGGER_FW_FAULT (0x00000001) +#define MASTER_TRIGGER_ADAPTER_RESET (0x00000002) +#define MASTER_TRIGGER_TASK_MANAGMENT (0x00000004) +#define MASTER_TRIGGER_DEVICE_REMOVAL (0x00000008) + +/* fake firmware event for trigger */ +#define MPI3_EVENT_DIAGNOSTIC_TRIGGER_FIRED (0x6E) + +/** + * MasterTrigger is a single U32 passed to/from sysfs. + * + * Bit Flags (enables) include: + * 1. FW Faults + * 2. Adapter Reset issued by driver + * 3. TMs + * 4. Device Remove Event sent by FW + */ + +struct SL_WH_MASTER_TRIGGER_T { + uint32_t MasterData; +}; + +/** + * struct SL_WH_EVENT_TRIGGER_T - Definition of an event trigger element + * @EventValue: Event Code to trigger on + * @LogEntryQualifier: Type of FW event that logged (Log Entry Added Event only) + * + * Defines an event that should induce a DIAG_TRIGGER driver event if observed. + */ +struct SL_WH_EVENT_TRIGGER_T { + uint16_t EventValue; + uint16_t LogEntryQualifier; +}; + +/** + * struct SL_WH_EVENT_TRIGGERS_T - Structure passed to/from sysfs containing a + * list of Event Triggers to be monitored for. + * @ValidEntries: Number of _SL_WH_EVENT_TRIGGER_T structures contained in this + * structure. + * @EventTriggerEntry: List of Event trigger elements. + * + * This binary structure is transferred via sysfs to get/set Event Triggers + * in the Linux Driver. + */ + +struct SL_WH_EVENT_TRIGGERS_T { + uint32_t ValidEntries; + struct SL_WH_EVENT_TRIGGER_T EventTriggerEntry[NUM_VALID_ENTRIES]; +}; + +/** + * struct SL_WH_SCSI_TRIGGER_T - Definition of a SCSI trigger element + * @ASCQ: Additional Sense Code Qualifier. Can be specific or 0xFF for + * wildcard. + * @ASC: Additional Sense Code. Can be specific or 0xFF for wildcard + * @SenseKey: SCSI Sense Key + * + * Defines a sense key (single or many variants) that should induce a + * DIAG_TRIGGER driver event if observed. + */ +struct SL_WH_SCSI_TRIGGER_T { + U8 ASCQ; + U8 ASC; + U8 SenseKey; + U8 Reserved; +}; + +/** + * struct SL_WH_SCSI_TRIGGERS_T - Structure passed to/from sysfs containing a + * list of SCSI sense codes that should trigger a DIAG_SERVICE event when + * observed. + * @ValidEntries: Number of _SL_WH_SCSI_TRIGGER_T structures contained in this + * structure. + * @SCSITriggerEntry: List of SCSI Sense Code trigger elements. + * + * This binary structure is transferred via sysfs to get/set SCSI Sense Code + * Triggers in the Linux Driver. + */ +struct SL_WH_SCSI_TRIGGERS_T { + uint32_t ValidEntries; + struct SL_WH_SCSI_TRIGGER_T SCSITriggerEntry[NUM_VALID_ENTRIES]; +}; + +/** + * struct SL_WH_MPI_TRIGGER_T - Definition of an MPI trigger element + * @IOCStatus: MPI IOCStatus + * @IocLogInfo: MPI IocLogInfo. Can be specific or 0xFFFFFFFF for wildcard + * + * Defines a MPI IOCStatus/IocLogInfo pair that should induce a DIAG_TRIGGER + * driver event if observed. + */ +struct SL_WH_MPI_TRIGGER_T { + uint16_t IOCStatus; + uint16_t Reserved; + uint32_t IocLogInfo; +}; + +/** + * struct SL_WH_MPI_TRIGGERS_T - Structure passed to/from sysfs containing a + * list of MPI IOCStatus/IocLogInfo pairs that should trigger a DIAG_SERVICE + * event when observed. + * @ValidEntries: Number of _SL_WH_MPI_TRIGGER_T structures contained in this + * structure. + * @MPITriggerEntry: List of MPI IOCStatus/IocLogInfo trigger elements. + * + * This binary structure is transferred via sysfs to get/set MPI Error Triggers + * in the Linux Driver. + */ +struct SL_WH_MPI_TRIGGERS_T { + uint32_t ValidEntries; + struct SL_WH_MPI_TRIGGER_T MPITriggerEntry[NUM_VALID_ENTRIES]; +}; + +/** + * struct SL_WH_TRIGGERS_EVENT_DATA_T - event data for trigger + * @trigger_type: trigger type (see MPT3SAS_TRIGGER_XXXX) + * @u: trigger condition that caused trigger to be sent + */ +struct SL_WH_TRIGGERS_EVENT_DATA_T { + uint32_t trigger_type; + union { + struct SL_WH_MASTER_TRIGGER_T master; + struct SL_WH_EVENT_TRIGGER_T event; + struct SL_WH_SCSI_TRIGGER_T scsi; + struct SL_WH_MPI_TRIGGER_T mpi; + } u; +}; +#endif /* MPT3SAS_TRIGGER_DIAG_H_INCLUDED */ diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_pages.h b/drivers/scsi/mpt3sas/mpt3sas_trigger_pages.h new file mode 100644 index 000000000..5f3328f01 --- /dev/null +++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_pages.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/* + * This is the Fusion MPT base driver providing common API layer interface + * to store diag trigger values into persistent driver triggers pages + * for MPT (Message Passing Technology) based controllers. + * + * Copyright (C) 2020 Broadcom Inc. + * + * Authors: Broadcom Inc. + * Sreekanth Reddy <sreekanth.reddy@broadcom.com> + * + * Send feedback to : MPT-FusionLinux.pdl@broadcom.com) + */ + +#include "mpi/mpi2_cnfg.h" + +#ifndef MPI2_TRIGGER_PAGES_H +#define MPI2_TRIGGER_PAGES_H + +#define MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER (0xE0) +#define MPI26_DRIVER_TRIGGER_PAGE0_PAGEVERSION (0x01) +typedef struct _MPI26_CONFIG_PAGE_DRIVER_TIGGER_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U16 TriggerFlags; /* 0x08 */ + U16 Reserved0xA; /* 0x0A */ + U32 Reserved0xC[61]; /* 0x0C */ +} _MPI26_CONFIG_PAGE_DRIVER_TIGGER_0, Mpi26DriverTriggerPage0_t; + +/* Trigger Flags */ +#define MPI26_DRIVER_TRIGGER0_FLAG_MASTER_TRIGGER_VALID (0x0001) +#define MPI26_DRIVER_TRIGGER0_FLAG_MPI_EVENT_TRIGGER_VALID (0x0002) +#define MPI26_DRIVER_TRIGGER0_FLAG_SCSI_SENSE_TRIGGER_VALID (0x0004) +#define MPI26_DRIVER_TRIGGER0_FLAG_LOGINFO_TRIGGER_VALID (0x0008) + +#define MPI26_DRIVER_TRIGGER_PAGE1_PAGEVERSION (0x01) +typedef struct _MPI26_DRIVER_MASTER_TIGGER_ENTRY { + U32 MasterTriggerFlags; +} MPI26_DRIVER_MASTER_TIGGER_ENTRY; + +#define MPI26_MAX_MASTER_TRIGGERS (1) +typedef struct _MPI26_CONFIG_PAGE_DRIVER_TIGGER_1 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U16 NumMasterTrigger; /* 0x08 */ + U16 Reserved0xA; /* 0x0A */ + MPI26_DRIVER_MASTER_TIGGER_ENTRY MasterTriggers[MPI26_MAX_MASTER_TRIGGERS]; /* 0x0C */ +} MPI26_CONFIG_PAGE_DRIVER_TIGGER_1, Mpi26DriverTriggerPage1_t; + +#define MPI26_DRIVER_TRIGGER_PAGE2_PAGEVERSION (0x01) +typedef struct _MPI26_DRIVER_MPI_EVENT_TIGGER_ENTRY { + U16 MPIEventCode; /* 0x00 */ + U16 MPIEventCodeSpecific; /* 0x02 */ +} MPI26_DRIVER_MPI_EVENT_TIGGER_ENTRY; + +#define MPI26_MAX_MPI_EVENT_TRIGGERS (20) +typedef struct _MPI26_CONFIG_PAGE_DRIVER_TIGGER_2 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U16 NumMPIEventTrigger; /* 0x08 */ + U16 Reserved0xA; /* 0x0A */ + MPI26_DRIVER_MPI_EVENT_TIGGER_ENTRY MPIEventTriggers[MPI26_MAX_MPI_EVENT_TRIGGERS]; /* 0x0C */ +} MPI26_CONFIG_PAGE_DRIVER_TIGGER_2, Mpi26DriverTriggerPage2_t; + +#define MPI26_DRIVER_TRIGGER_PAGE3_PAGEVERSION (0x01) +typedef struct _MPI26_DRIVER_SCSI_SENSE_TIGGER_ENTRY { + U8 ASCQ; /* 0x00 */ + U8 ASC; /* 0x01 */ + U8 SenseKey; /* 0x02 */ + U8 Reserved; /* 0x03 */ +} MPI26_DRIVER_SCSI_SENSE_TIGGER_ENTRY; + +#define MPI26_MAX_SCSI_SENSE_TRIGGERS (20) +typedef struct _MPI26_CONFIG_PAGE_DRIVER_TIGGER_3 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U16 NumSCSISenseTrigger; /* 0x08 */ + U16 Reserved0xA; /* 0x0A */ + MPI26_DRIVER_SCSI_SENSE_TIGGER_ENTRY SCSISenseTriggers[MPI26_MAX_SCSI_SENSE_TRIGGERS]; /* 0x0C */ +} MPI26_CONFIG_PAGE_DRIVER_TIGGER_3, Mpi26DriverTriggerPage3_t; + +#define MPI26_DRIVER_TRIGGER_PAGE4_PAGEVERSION (0x01) +typedef struct _MPI26_DRIVER_IOCSTATUS_LOGINFO_TIGGER_ENTRY { + U16 IOCStatus; /* 0x00 */ + U16 Reserved; /* 0x02 */ + U32 LogInfo; /* 0x04 */ +} MPI26_DRIVER_IOCSTATUS_LOGINFO_TIGGER_ENTRY; + +#define MPI26_MAX_LOGINFO_TRIGGERS (20) +typedef struct _MPI26_CONFIG_PAGE_DRIVER_TIGGER_4 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */ + U16 NumIOCStatusLogInfoTrigger; /* 0x08 */ + U16 Reserved0xA; /* 0x0A */ + MPI26_DRIVER_IOCSTATUS_LOGINFO_TIGGER_ENTRY IOCStatusLoginfoTriggers[MPI26_MAX_LOGINFO_TRIGGERS]; /* 0x0C */ +} MPI26_CONFIG_PAGE_DRIVER_TIGGER_4, Mpi26DriverTriggerPage4_t; + +#endif diff --git a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c new file mode 100644 index 000000000..cc07ba41f --- /dev/null +++ b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c @@ -0,0 +1,299 @@ +/* + * Scsi Host Layer for MPT (Message Passing Technology) based controllers + * + * Copyright (C) 2012-2014 LSI Corporation + * Copyright (C) 2013-2015 Avago Technologies + * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program. + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <asm/unaligned.h> + +#include "mpt3sas_base.h" + +/** + * _warpdrive_disable_ddio - Disable direct I/O for all the volumes + * @ioc: per adapter object + */ +static void +_warpdrive_disable_ddio(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2RaidVolPage1_t vol_pg1; + Mpi2ConfigReply_t mpi_reply; + struct _raid_device *raid_device; + u16 handle; + u16 ioc_status; + unsigned long flags; + + handle = 0xFFFF; + while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply, + &vol_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) + break; + handle = le16_to_cpu(vol_pg1.DevHandle); + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); + if (raid_device) + raid_device->direct_io_enabled = 0; + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + } + return; +} + + +/** + * mpt3sas_get_num_volumes - Get number of volumes in the ioc + * @ioc: per adapter object + */ +u8 +mpt3sas_get_num_volumes(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2RaidVolPage1_t vol_pg1; + Mpi2ConfigReply_t mpi_reply; + u16 handle; + u8 vol_cnt = 0; + u16 ioc_status; + + handle = 0xFFFF; + while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply, + &vol_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) + break; + vol_cnt++; + handle = le16_to_cpu(vol_pg1.DevHandle); + } + return vol_cnt; +} + + +/** + * mpt3sas_init_warpdrive_properties - Set properties for warpdrive direct I/O. + * @ioc: per adapter object + * @raid_device: the raid_device object + */ +void +mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc, + struct _raid_device *raid_device) +{ + Mpi2RaidVolPage0_t *vol_pg0; + Mpi2RaidPhysDiskPage0_t pd_pg0; + Mpi2ConfigReply_t mpi_reply; + u16 sz; + u8 num_pds, count; + unsigned long stripe_sz, block_sz; + u8 stripe_exp, block_exp; + u64 dev_max_lba; + + if (!ioc->is_warpdrive) + return; + + if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS) { + ioc_info(ioc, "WarpDrive : Direct IO is disabled globally as drives are exposed\n"); + return; + } + if (mpt3sas_get_num_volumes(ioc) > 1) { + _warpdrive_disable_ddio(ioc); + ioc_info(ioc, "WarpDrive : Direct IO is disabled globally as number of drives > 1\n"); + return; + } + if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle, + &num_pds)) || !num_pds) { + ioc_info(ioc, "WarpDrive : Direct IO is disabled Failure in computing number of drives\n"); + return; + } + + sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds * + sizeof(Mpi2RaidVol0PhysDisk_t)); + vol_pg0 = kzalloc(sz, GFP_KERNEL); + if (!vol_pg0) { + ioc_info(ioc, "WarpDrive : Direct IO is disabled Memory allocation failure for RVPG0\n"); + return; + } + + if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0, + MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) { + ioc_info(ioc, "WarpDrive : Direct IO is disabled Failure in retrieving RVPG0\n"); + kfree(vol_pg0); + return; + } + + /* + * WARPDRIVE:If number of physical disks in a volume exceeds the max pds + * assumed for WARPDRIVE, disable direct I/O + */ + if (num_pds > MPT_MAX_WARPDRIVE_PDS) { + ioc_warn(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x): num_mem=%d, max_mem_allowed=%d\n", + raid_device->handle, num_pds, MPT_MAX_WARPDRIVE_PDS); + kfree(vol_pg0); + return; + } + for (count = 0; count < num_pds; count++) { + if (mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply, + &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM, + vol_pg0->PhysDisk[count].PhysDiskNum) || + le16_to_cpu(pd_pg0.DevHandle) == + MPT3SAS_INVALID_DEVICE_HANDLE) { + ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x) member handle retrieval failed for member number=%d\n", + raid_device->handle, + vol_pg0->PhysDisk[count].PhysDiskNum); + goto out_error; + } + /* Disable direct I/O if member drive lba exceeds 4 bytes */ + dev_max_lba = le64_to_cpu(pd_pg0.DeviceMaxLBA); + if (dev_max_lba >> 32) { + ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x) member handle (0x%04x) unsupported max lba 0x%016llx\n", + raid_device->handle, + le16_to_cpu(pd_pg0.DevHandle), + (u64)dev_max_lba); + goto out_error; + } + + raid_device->pd_handle[count] = le16_to_cpu(pd_pg0.DevHandle); + } + + /* + * Assumption for WD: Direct I/O is not supported if the volume is + * not RAID0 + */ + if (raid_device->volume_type != MPI2_RAID_VOL_TYPE_RAID0) { + ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x): type=%d, s_sz=%uK, blk_size=%u\n", + raid_device->handle, raid_device->volume_type, + (le32_to_cpu(vol_pg0->StripeSize) * + le16_to_cpu(vol_pg0->BlockSize)) / 1024, + le16_to_cpu(vol_pg0->BlockSize)); + goto out_error; + } + + stripe_sz = le32_to_cpu(vol_pg0->StripeSize); + stripe_exp = find_first_bit(&stripe_sz, 32); + if (stripe_exp == 32) { + ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x) invalid stripe sz %uK\n", + raid_device->handle, + (le32_to_cpu(vol_pg0->StripeSize) * + le16_to_cpu(vol_pg0->BlockSize)) / 1024); + goto out_error; + } + raid_device->stripe_exponent = stripe_exp; + block_sz = le16_to_cpu(vol_pg0->BlockSize); + block_exp = find_first_bit(&block_sz, 16); + if (block_exp == 16) { + ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x) invalid block sz %u\n", + raid_device->handle, le16_to_cpu(vol_pg0->BlockSize)); + goto out_error; + } + raid_device->block_exponent = block_exp; + raid_device->direct_io_enabled = 1; + + ioc_info(ioc, "WarpDrive : Direct IO is Enabled for the drive with handle(0x%04x)\n", + raid_device->handle); + /* + * WARPDRIVE: Though the following fields are not used for direct IO, + * stored for future purpose: + */ + raid_device->max_lba = le64_to_cpu(vol_pg0->MaxLBA); + raid_device->stripe_sz = le32_to_cpu(vol_pg0->StripeSize); + raid_device->block_sz = le16_to_cpu(vol_pg0->BlockSize); + + + kfree(vol_pg0); + return; + +out_error: + raid_device->direct_io_enabled = 0; + for (count = 0; count < num_pds; count++) + raid_device->pd_handle[count] = 0; + kfree(vol_pg0); + return; +} + +/** + * mpt3sas_setup_direct_io - setup MPI request for WARPDRIVE Direct I/O + * @ioc: per adapter object + * @scmd: pointer to scsi command object + * @raid_device: pointer to raid device data structure + * @mpi_request: pointer to the SCSI_IO reqest message frame + */ +void +mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, + struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request) +{ + sector_t v_lba, p_lba, stripe_off, column, io_size; + u32 stripe_sz, stripe_exp; + u8 num_pds, cmd = scmd->cmnd[0]; + struct scsiio_tracker *st = scsi_cmd_priv(scmd); + + if (cmd != READ_10 && cmd != WRITE_10 && + cmd != READ_16 && cmd != WRITE_16) + return; + + if (cmd == READ_10 || cmd == WRITE_10) + v_lba = get_unaligned_be32(&mpi_request->CDB.CDB32[2]); + else + v_lba = get_unaligned_be64(&mpi_request->CDB.CDB32[2]); + + io_size = scsi_bufflen(scmd) >> raid_device->block_exponent; + + if (v_lba + io_size - 1 > raid_device->max_lba) + return; + + stripe_sz = raid_device->stripe_sz; + stripe_exp = raid_device->stripe_exponent; + stripe_off = v_lba & (stripe_sz - 1); + + /* Return unless IO falls within a stripe */ + if (stripe_off + io_size > stripe_sz) + return; + + num_pds = raid_device->num_pds; + p_lba = v_lba >> stripe_exp; + column = sector_div(p_lba, num_pds); + p_lba = (p_lba << stripe_exp) + stripe_off; + mpi_request->DevHandle = cpu_to_le16(raid_device->pd_handle[column]); + + if (cmd == READ_10 || cmd == WRITE_10) + put_unaligned_be32(lower_32_bits(p_lba), + &mpi_request->CDB.CDB32[2]); + else + put_unaligned_be64(p_lba, &mpi_request->CDB.CDB32[2]); + + st->direct_io = 1; +} |