summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/mpi3mr
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/scsi/mpi3mr
parentInitial commit. (diff)
downloadlinux-upstream.tar.xz
linux-upstream.zip
Adding upstream version 6.1.76.upstream/6.1.76upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--drivers/scsi/mpi3mr/Kconfig9
-rw-r--r--drivers/scsi/mpi3mr/Makefile6
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h2414
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_image.h275
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_init.h117
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_ioc.h1063
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_pci.h16
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_sas.h46
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_transport.h470
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h1399
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_app.c1868
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_debug.h197
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c5816
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c5360
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_transport.c3291
15 files changed, 22347 insertions, 0 deletions
diff --git a/drivers/scsi/mpi3mr/Kconfig b/drivers/scsi/mpi3mr/Kconfig
new file mode 100644
index 000000000..f48740cd5
--- /dev/null
+++ b/drivers/scsi/mpi3mr/Kconfig
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+config SCSI_MPI3MR
+ tristate "Broadcom MPI3 Storage Controller Device Driver"
+ depends on PCI && SCSI
+ select BLK_DEV_BSGLIB
+ select SCSI_SAS_ATTRS
+ help
+ MPI3 based Storage & RAID Controllers Driver.
diff --git a/drivers/scsi/mpi3mr/Makefile b/drivers/scsi/mpi3mr/Makefile
new file mode 100644
index 000000000..3bf8cf34e
--- /dev/null
+++ b/drivers/scsi/mpi3mr/Makefile
@@ -0,0 +1,6 @@
+# mpi3mr makefile
+obj-$(CONFIG_SCSI_MPI3MR) += mpi3mr.o
+mpi3mr-y += mpi3mr_os.o \
+ mpi3mr_fw.o \
+ mpi3mr_app.o \
+ mpi3mr_transport.o
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
new file mode 100644
index 000000000..0a2af4891
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
@@ -0,0 +1,2414 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2017-2022 Broadcom Inc. All rights reserved.
+ */
+#ifndef MPI30_CNFG_H
+#define MPI30_CNFG_H 1
+#define MPI3_CONFIG_PAGETYPE_IO_UNIT (0x00)
+#define MPI3_CONFIG_PAGETYPE_MANUFACTURING (0x01)
+#define MPI3_CONFIG_PAGETYPE_IOC (0x02)
+#define MPI3_CONFIG_PAGETYPE_DRIVER (0x03)
+#define MPI3_CONFIG_PAGETYPE_SECURITY (0x04)
+#define MPI3_CONFIG_PAGETYPE_ENCLOSURE (0x11)
+#define MPI3_CONFIG_PAGETYPE_DEVICE (0x12)
+#define MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT (0x20)
+#define MPI3_CONFIG_PAGETYPE_SAS_EXPANDER (0x21)
+#define MPI3_CONFIG_PAGETYPE_SAS_PHY (0x23)
+#define MPI3_CONFIG_PAGETYPE_SAS_PORT (0x24)
+#define MPI3_CONFIG_PAGETYPE_PCIE_IO_UNIT (0x30)
+#define MPI3_CONFIG_PAGETYPE_PCIE_SWITCH (0x31)
+#define MPI3_CONFIG_PAGETYPE_PCIE_LINK (0x33)
+#define MPI3_CONFIG_PAGEATTR_MASK (0xf0)
+#define MPI3_CONFIG_PAGEATTR_READ_ONLY (0x00)
+#define MPI3_CONFIG_PAGEATTR_CHANGEABLE (0x10)
+#define MPI3_CONFIG_PAGEATTR_PERSISTENT (0x20)
+#define MPI3_CONFIG_ACTION_PAGE_HEADER (0x00)
+#define MPI3_CONFIG_ACTION_READ_DEFAULT (0x01)
+#define MPI3_CONFIG_ACTION_READ_CURRENT (0x02)
+#define MPI3_CONFIG_ACTION_WRITE_CURRENT (0x03)
+#define MPI3_CONFIG_ACTION_READ_PERSISTENT (0x04)
+#define MPI3_CONFIG_ACTION_WRITE_PERSISTENT (0x05)
+#define MPI3_DEVICE_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI3_DEVICE_PGAD_FORM_HANDLE (0x20000000)
+#define MPI3_DEVICE_PGAD_HANDLE_MASK (0x0000ffff)
+#define MPI3_SAS_EXPAND_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI3_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM (0x10000000)
+#define MPI3_SAS_EXPAND_PGAD_FORM_HANDLE (0x20000000)
+#define MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK (0x00ff0000)
+#define MPI3_SAS_EXPAND_PGAD_PHYNUM_SHIFT (16)
+#define MPI3_SAS_EXPAND_PGAD_HANDLE_MASK (0x0000ffff)
+#define MPI3_SAS_PHY_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_SAS_PHY_PGAD_FORM_PHY_NUMBER (0x00000000)
+#define MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK (0x000000ff)
+#define MPI3_SASPORT_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_SASPORT_PGAD_FORM_GET_NEXT_PORT (0x00000000)
+#define MPI3_SASPORT_PGAD_FORM_PORT_NUM (0x10000000)
+#define MPI3_SASPORT_PGAD_PORT_NUMBER_MASK (0x000000ff)
+#define MPI3_ENCLOS_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI3_ENCLOS_PGAD_FORM_HANDLE (0x10000000)
+#define MPI3_ENCLOS_PGAD_HANDLE_MASK (0x0000ffff)
+#define MPI3_PCIE_SWITCH_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_PCIE_SWITCH_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI3_PCIE_SWITCH_PGAD_FORM_HANDLE_PORT_NUM (0x10000000)
+#define MPI3_PCIE_SWITCH_PGAD_FORM_HANDLE (0x20000000)
+#define MPI3_PCIE_SWITCH_PGAD_PORTNUM_MASK (0x00ff0000)
+#define MPI3_PCIE_SWITCH_PGAD_PORTNUM_SHIFT (16)
+#define MPI3_PCIE_SWITCH_PGAD_HANDLE_MASK (0x0000ffff)
+#define MPI3_PCIE_LINK_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_PCIE_LINK_PGAD_FORM_GET_NEXT_LINK (0x00000000)
+#define MPI3_PCIE_LINK_PGAD_FORM_LINK_NUM (0x10000000)
+#define MPI3_PCIE_LINK_PGAD_LINKNUM_MASK (0x000000ff)
+#define MPI3_SECURITY_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_SECURITY_PGAD_FORM_GET_NEXT_SLOT (0x00000000)
+#define MPI3_SECURITY_PGAD_FORM_SOT_NUM (0x10000000)
+#define MPI3_SECURITY_PGAD_SLOT_GROUP_MASK (0x0000ff00)
+#define MPI3_SECURITY_PGAD_SLOT_MASK (0x000000ff)
+struct mpi3_config_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 reserved0a;
+ u8 page_version;
+ u8 page_number;
+ u8 page_type;
+ u8 action;
+ __le32 page_address;
+ __le16 page_length;
+ __le16 reserved16;
+ __le32 reserved18[2];
+ union mpi3_sge_union sgl;
+};
+
+struct mpi3_config_page_header {
+ u8 page_version;
+ u8 reserved01;
+ u8 page_number;
+ u8 page_attribute;
+ __le16 page_length;
+ u8 page_type;
+ u8 reserved07;
+};
+
+#define MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK (0xf0)
+#define MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT (4)
+#define MPI3_SAS_NEG_LINK_RATE_PHYSICAL_MASK (0x0f)
+#define MPI3_SAS_NEG_LINK_RATE_PHYSICAL_SHIFT (0)
+#define MPI3_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE (0x00)
+#define MPI3_SAS_NEG_LINK_RATE_PHY_DISABLED (0x01)
+#define MPI3_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED (0x02)
+#define MPI3_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE (0x03)
+#define MPI3_SAS_NEG_LINK_RATE_PORT_SELECTOR (0x04)
+#define MPI3_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS (0x05)
+#define MPI3_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY (0x06)
+#define MPI3_SAS_NEG_LINK_RATE_1_5 (0x08)
+#define MPI3_SAS_NEG_LINK_RATE_3_0 (0x09)
+#define MPI3_SAS_NEG_LINK_RATE_6_0 (0x0a)
+#define MPI3_SAS_NEG_LINK_RATE_12_0 (0x0b)
+#define MPI3_SAS_NEG_LINK_RATE_22_5 (0x0c)
+#define MPI3_SAS_APHYINFO_INSIDE_ZPSDS_PERSISTENT (0x00000040)
+#define MPI3_SAS_APHYINFO_REQUESTED_INSIDE_ZPSDS (0x00000020)
+#define MPI3_SAS_APHYINFO_BREAK_REPLY_CAPABLE (0x00000010)
+#define MPI3_SAS_APHYINFO_REASON_MASK (0x0000000f)
+#define MPI3_SAS_APHYINFO_REASON_UNKNOWN (0x00000000)
+#define MPI3_SAS_APHYINFO_REASON_POWER_ON (0x00000001)
+#define MPI3_SAS_APHYINFO_REASON_HARD_RESET (0x00000002)
+#define MPI3_SAS_APHYINFO_REASON_SMP_PHY_CONTROL (0x00000003)
+#define MPI3_SAS_APHYINFO_REASON_LOSS_OF_SYNC (0x00000004)
+#define MPI3_SAS_APHYINFO_REASON_MULTIPLEXING_SEQ (0x00000005)
+#define MPI3_SAS_APHYINFO_REASON_IT_NEXUS_LOSS_TIMER (0x00000006)
+#define MPI3_SAS_APHYINFO_REASON_BREAK_TIMEOUT (0x00000007)
+#define MPI3_SAS_APHYINFO_REASON_PHY_TEST_STOPPED (0x00000008)
+#define MPI3_SAS_APHYINFO_REASON_EXP_REDUCED_FUNC (0x00000009)
+#define MPI3_SAS_PHYINFO_STATUS_MASK (0xc0000000)
+#define MPI3_SAS_PHYINFO_STATUS_SHIFT (30)
+#define MPI3_SAS_PHYINFO_STATUS_ACCESSIBLE (0x00000000)
+#define MPI3_SAS_PHYINFO_STATUS_NOT_EXIST (0x40000000)
+#define MPI3_SAS_PHYINFO_STATUS_VACANT (0x80000000)
+#define MPI3_SAS_PHYINFO_PHY_POWER_CONDITION_MASK (0x18000000)
+#define MPI3_SAS_PHYINFO_PHY_POWER_CONDITION_ACTIVE (0x00000000)
+#define MPI3_SAS_PHYINFO_PHY_POWER_CONDITION_PARTIAL (0x08000000)
+#define MPI3_SAS_PHYINFO_PHY_POWER_CONDITION_SLUMBER (0x10000000)
+#define MPI3_SAS_NEG_LINK_RATE_PHYSICAL_SHIFT (0)
+#define MPI3_SAS_PHYINFO_REQUESTED_INSIDE_ZPSDS_CHANGED_MASK (0x04000000)
+#define MPI3_SAS_PHYINFO_REQUESTED_INSIDE_ZPSDS_CHANGED_SHIFT (26)
+#define MPI3_SAS_PHYINFO_INSIDE_ZPSDS_PERSISTENT_MASK (0x02000000)
+#define MPI3_SAS_PHYINFO_INSIDE_ZPSDS_PERSISTENT_SHIFT (25)
+#define MPI3_SAS_PHYINFO_REQUESTED_INSIDE_ZPSDS_MASK (0x01000000)
+#define MPI3_SAS_PHYINFO_REQUESTED_INSIDE_ZPSDS_SHIFT (24)
+#define MPI3_SAS_PHYINFO_ZONE_GROUP_PERSISTENT (0x00400000)
+#define MPI3_SAS_PHYINFO_INSIDE_ZPSDS_WITHIN (0x00200000)
+#define MPI3_SAS_PHYINFO_ZONING_ENABLED (0x00100000)
+#define MPI3_SAS_PHYINFO_REASON_MASK (0x000f0000)
+#define MPI3_SAS_PHYINFO_REASON_UNKNOWN (0x00000000)
+#define MPI3_SAS_PHYINFO_REASON_POWER_ON (0x00010000)
+#define MPI3_SAS_PHYINFO_REASON_HARD_RESET (0x00020000)
+#define MPI3_SAS_PHYINFO_REASON_SMP_PHY_CONTROL (0x00030000)
+#define MPI3_SAS_PHYINFO_REASON_LOSS_OF_SYNC (0x00040000)
+#define MPI3_SAS_PHYINFO_REASON_MULTIPLEXING_SEQ (0x00050000)
+#define MPI3_SAS_PHYINFO_REASON_IT_NEXUS_LOSS_TIMER (0x00060000)
+#define MPI3_SAS_PHYINFO_REASON_BREAK_TIMEOUT (0x00070000)
+#define MPI3_SAS_PHYINFO_REASON_PHY_TEST_STOPPED (0x00080000)
+#define MPI3_SAS_PHYINFO_REASON_EXP_REDUCED_FUNC (0x00090000)
+#define MPI3_SAS_PHYINFO_SATA_PORT_ACTIVE (0x00004000)
+#define MPI3_SAS_PHYINFO_SATA_PORT_SELECTOR_PRESENT (0x00002000)
+#define MPI3_SAS_PHYINFO_VIRTUAL_PHY (0x00001000)
+#define MPI3_SAS_PHYINFO_PARTIAL_PATHWAY_TIME_MASK (0x00000f00)
+#define MPI3_SAS_PHYINFO_PARTIAL_PATHWAY_TIME_SHIFT (8)
+#define MPI3_SAS_PHYINFO_ROUTING_ATTRIBUTE_MASK (0x000000f0)
+#define MPI3_SAS_PHYINFO_ROUTING_ATTRIBUTE_DIRECT (0x00000000)
+#define MPI3_SAS_PHYINFO_ROUTING_ATTRIBUTE_SUBTRACTIVE (0x00000010)
+#define MPI3_SAS_PHYINFO_ROUTING_ATTRIBUTE_TABLE (0x00000020)
+#define MPI3_SAS_PRATE_MAX_RATE_MASK (0xf0)
+#define MPI3_SAS_PRATE_MAX_RATE_NOT_PROGRAMMABLE (0x00)
+#define MPI3_SAS_PRATE_MAX_RATE_1_5 (0x80)
+#define MPI3_SAS_PRATE_MAX_RATE_3_0 (0x90)
+#define MPI3_SAS_PRATE_MAX_RATE_6_0 (0xa0)
+#define MPI3_SAS_PRATE_MAX_RATE_12_0 (0xb0)
+#define MPI3_SAS_PRATE_MAX_RATE_22_5 (0xc0)
+#define MPI3_SAS_PRATE_MIN_RATE_MASK (0x0f)
+#define MPI3_SAS_PRATE_MIN_RATE_NOT_PROGRAMMABLE (0x00)
+#define MPI3_SAS_PRATE_MIN_RATE_1_5 (0x08)
+#define MPI3_SAS_PRATE_MIN_RATE_3_0 (0x09)
+#define MPI3_SAS_PRATE_MIN_RATE_6_0 (0x0a)
+#define MPI3_SAS_PRATE_MIN_RATE_12_0 (0x0b)
+#define MPI3_SAS_PRATE_MIN_RATE_22_5 (0x0c)
+#define MPI3_SAS_HWRATE_MAX_RATE_MASK (0xf0)
+#define MPI3_SAS_HWRATE_MAX_RATE_1_5 (0x80)
+#define MPI3_SAS_HWRATE_MAX_RATE_3_0 (0x90)
+#define MPI3_SAS_HWRATE_MAX_RATE_6_0 (0xa0)
+#define MPI3_SAS_HWRATE_MAX_RATE_12_0 (0xb0)
+#define MPI3_SAS_HWRATE_MAX_RATE_22_5 (0xc0)
+#define MPI3_SAS_HWRATE_MIN_RATE_MASK (0x0f)
+#define MPI3_SAS_HWRATE_MIN_RATE_1_5 (0x08)
+#define MPI3_SAS_HWRATE_MIN_RATE_3_0 (0x09)
+#define MPI3_SAS_HWRATE_MIN_RATE_6_0 (0x0a)
+#define MPI3_SAS_HWRATE_MIN_RATE_12_0 (0x0b)
+#define MPI3_SAS_HWRATE_MIN_RATE_22_5 (0x0c)
+#define MPI3_SLOT_INVALID (0xffff)
+#define MPI3_SLOT_INDEX_INVALID (0xffff)
+#define MPI3_LINK_CHANGE_COUNT_INVALID (0xffff)
+#define MPI3_RATE_CHANGE_COUNT_INVALID (0xffff)
+#define MPI3_TEMP_SENSOR_LOCATION_INTERNAL (0x0)
+#define MPI3_TEMP_SENSOR_LOCATION_INLET (0x1)
+#define MPI3_TEMP_SENSOR_LOCATION_OUTLET (0x2)
+#define MPI3_TEMP_SENSOR_LOCATION_DRAM (0x3)
+#define MPI3_MFGPAGE_VENDORID_BROADCOM (0x1000)
+#define MPI3_MFGPAGE_DEVID_SAS4116 (0x00a5)
+struct mpi3_man_page0 {
+ struct mpi3_config_page_header header;
+ u8 chip_revision[8];
+ u8 chip_name[32];
+ u8 board_name[32];
+ u8 board_assembly[32];
+ u8 board_tracer_number[32];
+ __le32 board_power;
+ __le32 reserved94;
+ __le32 reserved98;
+ u8 oem;
+ u8 profile_identifier;
+ __le16 flags;
+ u8 board_mfg_day;
+ u8 board_mfg_month;
+ __le16 board_mfg_year;
+ u8 board_rework_day;
+ u8 board_rework_month;
+ __le16 board_rework_year;
+ u8 board_revision[8];
+ u8 e_pack_fru[16];
+ u8 product_name[256];
+};
+
+#define MPI3_MAN0_PAGEVERSION (0x00)
+#define MPI3_MAN0_FLAGS_SWITCH_PRESENT (0x0002)
+#define MPI3_MAN0_FLAGS_EXPANDER_PRESENT (0x0001)
+#define MPI3_MAN1_VPD_SIZE (512)
+struct mpi3_man_page1 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08[2];
+ u8 vpd[MPI3_MAN1_VPD_SIZE];
+};
+
+#define MPI3_MAN1_PAGEVERSION (0x00)
+struct mpi3_man_page2 {
+ struct mpi3_config_page_header header;
+ u8 flags;
+ u8 reserved09[3];
+ __le32 reserved0c[3];
+ u8 oem_board_tracer_number[32];
+};
+#define MPI3_MAN2_PAGEVERSION (0x00)
+#define MPI3_MAN2_FLAGS_TRACER_PRESENT (0x01)
+struct mpi3_man5_phy_entry {
+ __le64 ioc_wwid;
+ __le64 device_name;
+ __le64 sata_wwid;
+};
+
+#ifndef MPI3_MAN5_PHY_MAX
+#define MPI3_MAN5_PHY_MAX (1)
+#endif
+struct mpi3_man_page5 {
+ struct mpi3_config_page_header header;
+ u8 num_phys;
+ u8 reserved09[3];
+ __le32 reserved0c;
+ struct mpi3_man5_phy_entry phy[MPI3_MAN5_PHY_MAX];
+};
+
+#define MPI3_MAN5_PAGEVERSION (0x00)
+struct mpi3_man6_gpio_entry {
+ u8 function_code;
+ u8 function_flags;
+ __le16 flags;
+ u8 param1;
+ u8 param2;
+ __le16 reserved06;
+ __le32 param3;
+};
+
+#define MPI3_MAN6_GPIO_FUNCTION_GENERIC (0x00)
+#define MPI3_MAN6_GPIO_FUNCTION_ALTERNATE (0x01)
+#define MPI3_MAN6_GPIO_FUNCTION_EXT_INTERRUPT (0x02)
+#define MPI3_MAN6_GPIO_FUNCTION_GLOBAL_ACTIVITY (0x03)
+#define MPI3_MAN6_GPIO_FUNCTION_OVER_TEMPERATURE (0x04)
+#define MPI3_MAN6_GPIO_FUNCTION_PORT_STATUS_GREEN (0x05)
+#define MPI3_MAN6_GPIO_FUNCTION_PORT_STATUS_YELLOW (0x06)
+#define MPI3_MAN6_GPIO_FUNCTION_CABLE_MANAGEMENT (0x07)
+#define MPI3_MAN6_GPIO_FUNCTION_BKPLANE_MGMT_TYPE (0x08)
+#define MPI3_MAN6_GPIO_FUNCTION_ISTWI_RESET (0x0a)
+#define MPI3_MAN6_GPIO_FUNCTION_BACKEND_PCIE_RESET (0x0b)
+#define MPI3_MAN6_GPIO_FUNCTION_GLOBAL_FAULT (0x0c)
+#define MPI3_MAN6_GPIO_FUNCTION_PBLP_STATUS_CHANGE (0x0d)
+#define MPI3_MAN6_GPIO_FUNCTION_EPACK_ONLINE (0x0e)
+#define MPI3_MAN6_GPIO_FUNCTION_EPACK_FAULT (0x0f)
+#define MPI3_MAN6_GPIO_FUNCTION_CTRL_TYPE (0x10)
+#define MPI3_MAN6_GPIO_FUNCTION_LICENSE (0x11)
+#define MPI3_MAN6_GPIO_FUNCTION_REFCLK_CONTROL (0x12)
+#define MPI3_MAN6_GPIO_FUNCTION_BACKEND_PCIE_RESET_CLAMP (0x13)
+#define MPI3_MAN6_GPIO_FUNCTION_AUXILIARY_POWER (0x14)
+#define MPI3_MAN6_GPIO_FUNCTION_RAID_DATA_CACHE_DIRTY (0x15)
+#define MPI3_MAN6_GPIO_FUNCTION_BOARD_FAN_CONTROL (0x16)
+#define MPI3_MAN6_GPIO_FUNCTION_BOARD_FAN_FAULT (0x17)
+#define MPI3_MAN6_GPIO_FUNCTION_POWER_BRAKE (0x18)
+#define MPI3_MAN6_GPIO_ISTWI_RESET_FUNCTIONFLAGS_DEVSELECT_MASK (0x01)
+#define MPI3_MAN6_GPIO_ISTWI_RESET_FUNCTIONFLAGS_DEVSELECT_ISTWI (0x00)
+#define MPI3_MAN6_GPIO_ISTWI_RESET_FUNCTIONFLAGS_DEVSELECT_RECEPTACLEID (0x01)
+#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_MASK (0xf0)
+#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_GENERIC (0x00)
+#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_CABLE_MGMT (0x10)
+#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_ACTIVE_CABLE_OVERCURRENT (0x20)
+#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_TRIGGER_MASK (0x01)
+#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_TRIGGER_EDGE (0x00)
+#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_TRIGGER_LEVEL (0x01)
+#define MPI3_MAN6_GPIO_PORT_GREEN_PARAM1_PHY_STATUS_ALL_UP (0x00)
+#define MPI3_MAN6_GPIO_PORT_GREEN_PARAM1_PHY_STATUS_ONE_OR_MORE_UP (0x01)
+#define MPI3_MAN6_GPIO_CABLE_MGMT_PARAM1_INTERFACE_MODULE_PRESENT (0x00)
+#define MPI3_MAN6_GPIO_CABLE_MGMT_PARAM1_INTERFACE_ACTIVE_CABLE_ENABLE (0x01)
+#define MPI3_MAN6_GPIO_CABLE_MGMT_PARAM1_INTERFACE_CABLE_MGMT_ENABLE (0x02)
+#define MPI3_MAN6_GPIO_LICENSE_PARAM1_TYPE_IBUTTON (0x00)
+#define MPI3_MAN6_GPIO_FLAGS_SLEW_RATE_MASK (0x0100)
+#define MPI3_MAN6_GPIO_FLAGS_SLEW_RATE_FAST_EDGE (0x0100)
+#define MPI3_MAN6_GPIO_FLAGS_SLEW_RATE_SLOW_EDGE (0x0000)
+#define MPI3_MAN6_GPIO_FLAGS_DRIVE_STRENGTH_MASK (0x00c0)
+#define MPI3_MAN6_GPIO_FLAGS_DRIVE_STRENGTH_100OHM (0x0000)
+#define MPI3_MAN6_GPIO_FLAGS_DRIVE_STRENGTH_66OHM (0x0040)
+#define MPI3_MAN6_GPIO_FLAGS_DRIVE_STRENGTH_50OHM (0x0080)
+#define MPI3_MAN6_GPIO_FLAGS_DRIVE_STRENGTH_33OHM (0x00c0)
+#define MPI3_MAN6_GPIO_FLAGS_ALT_DATA_SEL_MASK (0x0030)
+#define MPI3_MAN6_GPIO_FLAGS_ALT_DATA_SEL_SHIFT (4)
+#define MPI3_MAN6_GPIO_FLAGS_ACTIVE_HIGH (0x0008)
+#define MPI3_MAN6_GPIO_FLAGS_BI_DIR_ENABLED (0x0004)
+#define MPI3_MAN6_GPIO_FLAGS_DIRECTION_MASK (0x0003)
+#define MPI3_MAN6_GPIO_FLAGS_DIRECTION_INPUT (0x0000)
+#define MPI3_MAN6_GPIO_FLAGS_DIRECTION_OPEN_DRAIN_OUTPUT (0x0001)
+#define MPI3_MAN6_GPIO_FLAGS_DIRECTION_OPEN_SOURCE_OUTPUT (0x0002)
+#define MPI3_MAN6_GPIO_FLAGS_DIRECTION_PUSH_PULL_OUTPUT (0x0003)
+#ifndef MPI3_MAN6_GPIO_MAX
+#define MPI3_MAN6_GPIO_MAX (1)
+#endif
+struct mpi3_man_page6 {
+ struct mpi3_config_page_header header;
+ __le16 flags;
+ __le16 reserved0a;
+ u8 num_gpio;
+ u8 reserved0d[3];
+ struct mpi3_man6_gpio_entry gpio[MPI3_MAN6_GPIO_MAX];
+};
+
+#define MPI3_MAN6_PAGEVERSION (0x00)
+#define MPI3_MAN6_FLAGS_HEARTBEAT_LED_DISABLED (0x0001)
+struct mpi3_man7_receptacle_info {
+ __le32 name[4];
+ u8 location;
+ u8 connector_type;
+ u8 ped_clk;
+ u8 connector_id;
+ __le32 reserved14;
+};
+
+#define MPI3_MAN7_LOCATION_UNKNOWN (0x00)
+#define MPI3_MAN7_LOCATION_INTERNAL (0x01)
+#define MPI3_MAN7_LOCATION_EXTERNAL (0x02)
+#define MPI3_MAN7_LOCATION_VIRTUAL (0x03)
+#define MPI3_MAN7_LOCATION_HOST (0x04)
+#define MPI3_MAN7_CONNECTOR_TYPE_NO_INFO (0x00)
+#define MPI3_MAN7_PEDCLK_ROUTING_MASK (0x10)
+#define MPI3_MAN7_PEDCLK_ROUTING_DIRECT (0x00)
+#define MPI3_MAN7_PEDCLK_ROUTING_CLOCK_BUFFER (0x10)
+#define MPI3_MAN7_PEDCLK_ID_MASK (0x0f)
+#ifndef MPI3_MAN7_RECEPTACLE_INFO_MAX
+#define MPI3_MAN7_RECEPTACLE_INFO_MAX (1)
+#endif
+struct mpi3_man_page7 {
+ struct mpi3_config_page_header header;
+ __le32 flags;
+ u8 num_receptacles;
+ u8 reserved0d[3];
+ __le32 enclosure_name[4];
+ struct mpi3_man7_receptacle_info receptacle_info[MPI3_MAN7_RECEPTACLE_INFO_MAX];
+};
+
+#define MPI3_MAN7_PAGEVERSION (0x00)
+#define MPI3_MAN7_FLAGS_BASE_ENCLOSURE_LEVEL_MASK (0x01)
+#define MPI3_MAN7_FLAGS_BASE_ENCLOSURE_LEVEL_0 (0x00)
+#define MPI3_MAN7_FLAGS_BASE_ENCLOSURE_LEVEL_1 (0x01)
+struct mpi3_man8_phy_info {
+ u8 receptacle_id;
+ u8 connector_lane;
+ __le16 reserved02;
+ __le16 slotx1;
+ __le16 slotx2;
+ __le16 slotx4;
+ __le16 reserved0a;
+ __le32 reserved0c;
+};
+
+#define MPI3_MAN8_PHY_INFO_RECEPTACLE_ID_NOT_ASSOCIATED (0xff)
+#define MPI3_MAN8_PHY_INFO_CONNECTOR_LANE_NOT_ASSOCIATED (0xff)
+#ifndef MPI3_MAN8_PHY_INFO_MAX
+#define MPI3_MAN8_PHY_INFO_MAX (1)
+#endif
+struct mpi3_man_page8 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_phys;
+ u8 reserved0d[3];
+ struct mpi3_man8_phy_info phy_info[MPI3_MAN8_PHY_INFO_MAX];
+};
+
+#define MPI3_MAN8_PAGEVERSION (0x00)
+struct mpi3_man9_rsrc_entry {
+ __le32 maximum;
+ __le32 decrement;
+ __le32 minimum;
+ __le32 actual;
+};
+
+enum mpi3_man9_resources {
+ MPI3_MAN9_RSRC_OUTSTANDING_REQS = 0,
+ MPI3_MAN9_RSRC_TARGET_CMDS = 1,
+ MPI3_MAN9_RSRC_RESERVED02 = 2,
+ MPI3_MAN9_RSRC_NVME = 3,
+ MPI3_MAN9_RSRC_INITIATORS = 4,
+ MPI3_MAN9_RSRC_VDS = 5,
+ MPI3_MAN9_RSRC_ENCLOSURES = 6,
+ MPI3_MAN9_RSRC_ENCLOSURE_PHYS = 7,
+ MPI3_MAN9_RSRC_EXPANDERS = 8,
+ MPI3_MAN9_RSRC_PCIE_SWITCHES = 9,
+ MPI3_MAN9_RSRC_RESERVED10 = 10,
+ MPI3_MAN9_RSRC_HOST_PD_DRIVES = 11,
+ MPI3_MAN9_RSRC_ADV_HOST_PD_DRIVES = 12,
+ MPI3_MAN9_RSRC_RAID_PD_DRIVES = 13,
+ MPI3_MAN9_RSRC_DRV_DIAG_BUF = 14,
+ MPI3_MAN9_RSRC_NAMESPACE_COUNT = 15,
+ MPI3_MAN9_RSRC_NUM_RESOURCES
+};
+
+#define MPI3_MAN9_MIN_OUTSTANDING_REQS (1)
+#define MPI3_MAN9_MAX_OUTSTANDING_REQS (65000)
+#define MPI3_MAN9_MIN_TARGET_CMDS (0)
+#define MPI3_MAN9_MAX_TARGET_CMDS (65535)
+#define MPI3_MAN9_MIN_NVME_TARGETS (0)
+#define MPI3_MAN9_MIN_INITIATORS (0)
+#define MPI3_MAN9_MIN_VDS (0)
+#define MPI3_MAN9_MIN_ENCLOSURES (1)
+#define MPI3_MAN9_MAX_ENCLOSURES (65535)
+#define MPI3_MAN9_MIN_ENCLOSURE_PHYS (0)
+#define MPI3_MAN9_MIN_EXPANDERS (0)
+#define MPI3_MAN9_MAX_EXPANDERS (65535)
+#define MPI3_MAN9_MIN_PCIE_SWITCHES (0)
+#define MPI3_MAN9_MIN_HOST_PD_DRIVES (0)
+#define MPI3_MAN9_ADV_HOST_PD_DRIVES (0)
+#define MPI3_MAN9_RAID_PD_DRIVES (0)
+#define MPI3_MAN9_DRIVER_DIAG_BUFFER (0)
+#define MPI3_MAN9_MIN_NAMESPACE_COUNT (1)
+#define MPI3_MAN9_MIN_EXPANDERS (0)
+#define MPI3_MAN9_MAX_EXPANDERS (65535)
+struct mpi3_man_page9 {
+ struct mpi3_config_page_header header;
+ u8 num_resources;
+ u8 reserved09;
+ __le16 reserved0a;
+ __le32 reserved0c;
+ __le32 reserved10;
+ __le32 reserved14;
+ __le32 reserved18;
+ __le32 reserved1c;
+ struct mpi3_man9_rsrc_entry resource[MPI3_MAN9_RSRC_NUM_RESOURCES];
+};
+
+#define MPI3_MAN9_PAGEVERSION (0x00)
+struct mpi3_man10_istwi_ctrlr_entry {
+ __le16 slave_address;
+ __le16 flags;
+ u8 scl_low_override;
+ u8 scl_high_override;
+ __le16 reserved06;
+};
+
+#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_BUS_SPEED_MASK (0x000c)
+#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_BUS_SPEED_100K (0x0000)
+#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_BUS_SPEED_400K (0x0004)
+#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_SLAVE_ENABLED (0x0002)
+#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_MASTER_ENABLED (0x0001)
+#ifndef MPI3_MAN10_ISTWI_CTRLR_MAX
+#define MPI3_MAN10_ISTWI_CTRLR_MAX (1)
+#endif
+struct mpi3_man_page10 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_istwi_ctrl;
+ u8 reserved0d[3];
+ struct mpi3_man10_istwi_ctrlr_entry istwi_controller[MPI3_MAN10_ISTWI_CTRLR_MAX];
+};
+
+#define MPI3_MAN10_PAGEVERSION (0x00)
+struct mpi3_man11_mux_device_format {
+ u8 max_channel;
+ u8 reserved01[3];
+ __le32 reserved04;
+};
+
+struct mpi3_man11_temp_sensor_device_format {
+ u8 type;
+ u8 reserved01[3];
+ u8 temp_channel[4];
+};
+
+#define MPI3_MAN11_TEMP_SENSOR_TYPE_MAX6654 (0x00)
+#define MPI3_MAN11_TEMP_SENSOR_TYPE_EMC1442 (0x01)
+#define MPI3_MAN11_TEMP_SENSOR_TYPE_ADT7476 (0x02)
+#define MPI3_MAN11_TEMP_SENSOR_TYPE_SE97B (0x03)
+#define MPI3_MAN11_TEMP_SENSOR_CHANNEL_LOCATION_MASK (0xe0)
+#define MPI3_MAN11_TEMP_SENSOR_CHANNEL_LOCATION_SHIFT (5)
+#define MPI3_MAN11_TEMP_SENSOR_CHANNEL_ENABLED (0x01)
+struct mpi3_man11_seeprom_device_format {
+ u8 size;
+ u8 page_write_size;
+ __le16 reserved02;
+ __le32 reserved04;
+};
+
+#define MPI3_MAN11_SEEPROM_SIZE_1KBITS (0x01)
+#define MPI3_MAN11_SEEPROM_SIZE_2KBITS (0x02)
+#define MPI3_MAN11_SEEPROM_SIZE_4KBITS (0x03)
+#define MPI3_MAN11_SEEPROM_SIZE_8KBITS (0x04)
+#define MPI3_MAN11_SEEPROM_SIZE_16KBITS (0x05)
+#define MPI3_MAN11_SEEPROM_SIZE_32KBITS (0x06)
+#define MPI3_MAN11_SEEPROM_SIZE_64KBITS (0x07)
+#define MPI3_MAN11_SEEPROM_SIZE_128KBITS (0x08)
+struct mpi3_man11_ddr_spd_device_format {
+ u8 channel;
+ u8 reserved01[3];
+ __le32 reserved04;
+};
+
+struct mpi3_man11_cable_mgmt_device_format {
+ u8 type;
+ u8 receptacle_id;
+ __le16 reserved02;
+ __le32 reserved04;
+};
+
+#define MPI3_MAN11_CABLE_MGMT_TYPE_SFF_8636 (0x00)
+struct mpi3_man11_bkplane_spec_ubm_format {
+ __le16 flags;
+ __le16 reserved02;
+};
+
+#define MPI3_MAN11_BKPLANE_UBM_FLAGS_REFCLK_POLICY_ALWAYS_ENABLED (0x0200)
+#define MPI3_MAN11_BKPLANE_UBM_FLAGS_FORCE_POLLING (0x0100)
+#define MPI3_MAN11_BKPLANE_UBM_FLAGS_MAX_FRU_MASK (0x00f0)
+#define MPI3_MAN11_BKPLANE_UBM_FLAGS_MAX_FRU_SHIFT (4)
+#define MPI3_MAN11_BKPLANE_UBM_FLAGS_POLL_INTERVAL_MASK (0x000f)
+#define MPI3_MAN11_BKPLANE_UBM_FLAGS_POLL_INTERVAL_SHIFT (0)
+struct mpi3_man11_bkplane_spec_non_ubm_format {
+ __le16 flags;
+ u8 reserved02;
+ u8 type;
+};
+
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_GROUP_MASK (0xf000)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_GROUP_SHIFT (12)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_REFCLK_POLICY_ALWAYS_ENABLED (0x0200)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_LINKWIDTH_MASK (0x00c0)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_LINKWIDTH_4 (0x0000)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_LINKWIDTH_2 (0x0040)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_LINKWIDTH_1 (0x0080)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_PRESENCE_DETECT_MASK (0x0030)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_PRESENCE_DETECT_GPIO (0x0000)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_PRESENCE_DETECT_REG (0x0010)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_POLL_INTERVAL_MASK (0x000f)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_POLL_INTERVAL_SHIFT (0)
+#define MPI3_MAN11_BKPLANE_NON_UBM_TYPE_VPP (0x00)
+union mpi3_man11_bkplane_spec_format {
+ struct mpi3_man11_bkplane_spec_ubm_format ubm;
+ struct mpi3_man11_bkplane_spec_non_ubm_format non_ubm;
+};
+
+struct mpi3_man11_bkplane_mgmt_device_format {
+ u8 type;
+ u8 receptacle_id;
+ u8 reset_info;
+ u8 reserved03;
+ union mpi3_man11_bkplane_spec_format backplane_mgmt_specific;
+};
+
+#define MPI3_MAN11_BKPLANE_MGMT_TYPE_UBM (0x00)
+#define MPI3_MAN11_BKPLANE_MGMT_TYPE_NON_UBM (0x01)
+#define MPI3_MAN11_BACKPLANE_RESETINFO_ASSERT_TIME_MASK (0xf0)
+#define MPI3_MAN11_BACKPLANE_RESETINFO_ASSERT_TIME_SHIFT (4)
+#define MPI3_MAN11_BACKPLANE_RESETINFO_READY_TIME_MASK (0x0f)
+#define MPI3_MAN11_BACKPLANE_RESETINFO_READY_TIME_SHIFT (0)
+struct mpi3_man11_gas_gauge_device_format {
+ u8 type;
+ u8 reserved01[3];
+ __le32 reserved04;
+};
+
+#define MPI3_MAN11_GAS_GAUGE_TYPE_STANDARD (0x00)
+struct mpi3_man11_mgmt_ctrlr_device_format {
+ __le32 reserved00;
+ __le32 reserved04;
+};
+struct mpi3_man11_board_fan_device_format {
+ u8 flags;
+ u8 reserved01;
+ u8 min_fan_speed;
+ u8 max_fan_speed;
+ __le32 reserved04;
+};
+#define MPI3_MAN11_BOARD_FAN_FLAGS_FAN_CTRLR_TYPE_MASK (0x07)
+#define MPI3_MAN11_BOARD_FAN_FLAGS_FAN_CTRLR_TYPE_AMC6821 (0x00)
+union mpi3_man11_device_specific_format {
+ struct mpi3_man11_mux_device_format mux;
+ struct mpi3_man11_temp_sensor_device_format temp_sensor;
+ struct mpi3_man11_seeprom_device_format seeprom;
+ struct mpi3_man11_ddr_spd_device_format ddr_spd;
+ struct mpi3_man11_cable_mgmt_device_format cable_mgmt;
+ struct mpi3_man11_bkplane_mgmt_device_format bkplane_mgmt;
+ struct mpi3_man11_gas_gauge_device_format gas_gauge;
+ struct mpi3_man11_mgmt_ctrlr_device_format mgmt_controller;
+ struct mpi3_man11_board_fan_device_format board_fan;
+ __le32 words[2];
+};
+struct mpi3_man11_istwi_device_format {
+ u8 device_type;
+ u8 controller;
+ u8 reserved02;
+ u8 flags;
+ __le16 device_address;
+ u8 mux_channel;
+ u8 mux_index;
+ union mpi3_man11_device_specific_format device_specific;
+};
+
+#define MPI3_MAN11_ISTWI_DEVTYPE_MUX (0x00)
+#define MPI3_MAN11_ISTWI_DEVTYPE_TEMP_SENSOR (0x01)
+#define MPI3_MAN11_ISTWI_DEVTYPE_SEEPROM (0x02)
+#define MPI3_MAN11_ISTWI_DEVTYPE_DDR_SPD (0x03)
+#define MPI3_MAN11_ISTWI_DEVTYPE_CABLE_MGMT (0x04)
+#define MPI3_MAN11_ISTWI_DEVTYPE_BACKPLANE_MGMT (0x05)
+#define MPI3_MAN11_ISTWI_DEVTYPE_GAS_GAUGE (0x06)
+#define MPI3_MAN11_ISTWI_DEVTYPE_MGMT_CONTROLLER (0x07)
+#define MPI3_MAN11_ISTWI_DEVTYPE_BOARD_FAN (0x08)
+#define MPI3_MAN11_ISTWI_FLAGS_MUX_PRESENT (0x01)
+#ifndef MPI3_MAN11_ISTWI_DEVICE_MAX
+#define MPI3_MAN11_ISTWI_DEVICE_MAX (1)
+#endif
+struct mpi3_man_page11 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_istwi_dev;
+ u8 reserved0d[3];
+ struct mpi3_man11_istwi_device_format istwi_device[MPI3_MAN11_ISTWI_DEVICE_MAX];
+};
+
+#define MPI3_MAN11_PAGEVERSION (0x00)
+#ifndef MPI3_MAN12_NUM_SGPIO_MAX
+#define MPI3_MAN12_NUM_SGPIO_MAX (1)
+#endif
+struct mpi3_man12_sgpio_info {
+ u8 slot_count;
+ u8 reserved01[3];
+ __le32 reserved04;
+ u8 phy_order[32];
+};
+
+struct mpi3_man_page12 {
+ struct mpi3_config_page_header header;
+ __le32 flags;
+ __le32 s_clock_freq;
+ __le32 activity_modulation;
+ u8 num_sgpio;
+ u8 reserved15[3];
+ __le32 reserved18;
+ __le32 reserved1c;
+ __le32 pattern[8];
+ struct mpi3_man12_sgpio_info sgpio_info[MPI3_MAN12_NUM_SGPIO_MAX];
+};
+
+#define MPI3_MAN12_PAGEVERSION (0x00)
+#define MPI3_MAN12_FLAGS_ERROR_PRESENCE_ENABLED (0x0400)
+#define MPI3_MAN12_FLAGS_ACTIVITY_INVERT_ENABLED (0x0200)
+#define MPI3_MAN12_FLAGS_GROUP_ID_DISABLED (0x0100)
+#define MPI3_MAN12_FLAGS_SIO_CLK_FILTER_ENABLED (0x0004)
+#define MPI3_MAN12_FLAGS_SCLOCK_SLOAD_TYPE_MASK (0x0002)
+#define MPI3_MAN12_FLAGS_SCLOCK_SLOAD_TYPE_PUSH_PULL (0x0000)
+#define MPI3_MAN12_FLAGS_SCLOCK_SLOAD_TYPE_OPEN_DRAIN (0x0002)
+#define MPI3_MAN12_FLAGS_SDATAOUT_TYPE_MASK (0x0001)
+#define MPI3_MAN12_FLAGS_SDATAOUT_TYPE_PUSH_PULL (0x0000)
+#define MPI3_MAN12_FLAGS_SDATAOUT_TYPE_OPEN_DRAIN (0x0001)
+#define MPI3_MAN12_SIO_CLK_FREQ_MIN (32)
+#define MPI3_MAN12_SIO_CLK_FREQ_MAX (100000)
+#define MPI3_MAN12_ACTIVITY_MODULATION_FORCE_OFF_MASK (0x0000f000)
+#define MPI3_MAN12_ACTIVITY_MODULATION_FORCE_OFF_SHIFT (12)
+#define MPI3_MAN12_ACTIVITY_MODULATION_MAX_ON_MASK (0x00000f00)
+#define MPI3_MAN12_ACTIVITY_MODULATION_MAX_ON_SHIFT (8)
+#define MPI3_MAN12_ACTIVITY_MODULATION_STRETCH_OFF_MASK (0x000000f0)
+#define MPI3_MAN12_ACTIVITY_MODULATION_STRETCH_OFF_SHIFT (4)
+#define MPI3_MAN12_ACTIVITY_MODULATION_STRETCH_ON_MASK (0x0000000f)
+#define MPI3_MAN12_ACTIVITY_MODULATION_STRETCH_ON_SHIFT (0)
+#define MPI3_MAN12_PATTERN_RATE_MASK (0xe0000000)
+#define MPI3_MAN12_PATTERN_RATE_2_HZ (0x00000000)
+#define MPI3_MAN12_PATTERN_RATE_4_HZ (0x20000000)
+#define MPI3_MAN12_PATTERN_RATE_8_HZ (0x40000000)
+#define MPI3_MAN12_PATTERN_RATE_16_HZ (0x60000000)
+#define MPI3_MAN12_PATTERN_RATE_10_HZ (0x80000000)
+#define MPI3_MAN12_PATTERN_RATE_20_HZ (0xa0000000)
+#define MPI3_MAN12_PATTERN_RATE_40_HZ (0xc0000000)
+#define MPI3_MAN12_PATTERN_LENGTH_MASK (0x1f000000)
+#define MPI3_MAN12_PATTERN_LENGTH_SHIFT (24)
+#define MPI3_MAN12_PATTERN_BIT_PATTERN_MASK (0x00ffffff)
+#define MPI3_MAN12_PATTERN_BIT_PATTERN_SHIFT (0)
+#ifndef MPI3_MAN13_NUM_TRANSLATION_MAX
+#define MPI3_MAN13_NUM_TRANSLATION_MAX (1)
+#endif
+struct mpi3_man13_translation_info {
+ __le32 slot_status;
+ __le32 mask;
+ u8 activity;
+ u8 locate;
+ u8 error;
+ u8 reserved0b;
+};
+
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_FAULT (0x20000000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_DEVICE_OFF (0x10000000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_DEVICE_ACTIVITY (0x00800000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_DO_NOT_REMOVE (0x00400000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_DEVICE_MISSING (0x00100000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_INSERT (0x00080000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_REMOVAL (0x00040000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_IDENTIFY (0x00020000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_OK (0x00008000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_RESERVED_DEVICE (0x00004000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_HOT_SPARE (0x00002000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_CONSISTENCY_CHECK (0x00001000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_IN_CRITICAL_ARRAY (0x00000800)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_IN_FAILED_ARRAY (0x00000400)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_REBUILD_REMAP (0x00000200)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_REBUILD_REMAP_ABORT (0x00000100)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_PREDICTED_FAILURE (0x00000040)
+#define MPI3_MAN13_BLINK_PATTERN_FORCE_OFF (0x00)
+#define MPI3_MAN13_BLINK_PATTERN_FORCE_ON (0x01)
+#define MPI3_MAN13_BLINK_PATTERN_PATTERN_0 (0x02)
+#define MPI3_MAN13_BLINK_PATTERN_PATTERN_1 (0x03)
+#define MPI3_MAN13_BLINK_PATTERN_PATTERN_2 (0x04)
+#define MPI3_MAN13_BLINK_PATTERN_PATTERN_3 (0x05)
+#define MPI3_MAN13_BLINK_PATTERN_PATTERN_4 (0x06)
+#define MPI3_MAN13_BLINK_PATTERN_PATTERN_5 (0x07)
+#define MPI3_MAN13_BLINK_PATTERN_PATTERN_6 (0x08)
+#define MPI3_MAN13_BLINK_PATTERN_PATTERN_7 (0x09)
+#define MPI3_MAN13_BLINK_PATTERN_ACTIVITY (0x0a)
+#define MPI3_MAN13_BLINK_PATTERN_ACTIVITY_TRAIL (0x0b)
+struct mpi3_man_page13 {
+ struct mpi3_config_page_header header;
+ u8 num_trans;
+ u8 reserved09[3];
+ __le32 reserved0c;
+ struct mpi3_man13_translation_info translation[MPI3_MAN13_NUM_TRANSLATION_MAX];
+};
+
+#define MPI3_MAN13_PAGEVERSION (0x00)
+struct mpi3_man_page14 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_slot_groups;
+ u8 num_slots;
+ __le16 max_cert_chain_length;
+ __le32 sealed_slots;
+ __le32 populated_slots;
+ __le32 mgmt_pt_updatable_slots;
+};
+#define MPI3_MAN14_PAGEVERSION (0x00)
+#define MPI3_MAN14_NUMSLOTS_MAX (32)
+#ifndef MPI3_MAN15_VERSION_RECORD_MAX
+#define MPI3_MAN15_VERSION_RECORD_MAX 1
+#endif
+struct mpi3_man15_version_record {
+ __le16 spdm_version;
+ __le16 reserved02;
+};
+
+struct mpi3_man_page15 {
+ struct mpi3_config_page_header header;
+ u8 num_version_records;
+ u8 reserved09[3];
+ __le32 reserved0c;
+ struct mpi3_man15_version_record version_record[MPI3_MAN15_VERSION_RECORD_MAX];
+};
+
+#define MPI3_MAN15_PAGEVERSION (0x00)
+#ifndef MPI3_MAN16_CERT_ALGO_MAX
+#define MPI3_MAN16_CERT_ALGO_MAX 1
+#endif
+struct mpi3_man16_certificate_algorithm {
+ u8 slot_group;
+ u8 reserved01[3];
+ __le32 base_asym_algo;
+ __le32 base_hash_algo;
+ __le32 reserved0c[3];
+};
+
+struct mpi3_man_page16 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_cert_algos;
+ u8 reserved0d[3];
+ struct mpi3_man16_certificate_algorithm certificate_algorithm[MPI3_MAN16_CERT_ALGO_MAX];
+};
+
+#define MPI3_MAN16_PAGEVERSION (0x00)
+#ifndef MPI3_MAN17_HASH_ALGORITHM_MAX
+#define MPI3_MAN17_HASH_ALGORITHM_MAX 1
+#endif
+struct mpi3_man17_hash_algorithm {
+ u8 meas_specification;
+ u8 reserved01[3];
+ __le32 measurement_hash_algo;
+ __le32 reserved08[2];
+};
+
+struct mpi3_man_page17 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_hash_algos;
+ u8 reserved0d[3];
+ struct mpi3_man17_hash_algorithm hash_algorithm[MPI3_MAN17_HASH_ALGORITHM_MAX];
+};
+
+#define MPI3_MAN17_PAGEVERSION (0x00)
+struct mpi3_man_page20 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ __le32 nonpremium_features;
+ u8 allowed_personalities;
+ u8 reserved11[3];
+};
+
+#define MPI3_MAN20_PAGEVERSION (0x00)
+#define MPI3_MAN20_ALLOWEDPERSON_RAID_MASK (0x02)
+#define MPI3_MAN20_ALLOWEDPERSON_RAID_ALLOWED (0x02)
+#define MPI3_MAN20_ALLOWEDPERSON_RAID_NOT_ALLOWED (0x00)
+#define MPI3_MAN20_ALLOWEDPERSON_EHBA_MASK (0x01)
+#define MPI3_MAN20_ALLOWEDPERSON_EHBA_ALLOWED (0x01)
+#define MPI3_MAN20_ALLOWEDPERSON_EHBA_NOT_ALLOWED (0x00)
+#define MPI3_MAN20_NONPREMUIM_DISABLE_PD_DEGRADED_MASK (0x01)
+#define MPI3_MAN20_NONPREMUIM_DISABLE_PD_DEGRADED_ENABLED (0x00)
+#define MPI3_MAN20_NONPREMUIM_DISABLE_PD_DEGRADED_DISABLED (0x01)
+struct mpi3_man_page21 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ __le32 flags;
+};
+
+#define MPI3_MAN21_PAGEVERSION (0x00)
+#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_MASK (0x00000060)
+#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_BLOCK (0x00000000)
+#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_ALLOW (0x00000020)
+#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_WARN (0x00000040)
+#define MPI3_MAN21_FLAGS_BLOCK_SSD_WR_CACHE_CHANGE_MASK (0x00000008)
+#define MPI3_MAN21_FLAGS_BLOCK_SSD_WR_CACHE_CHANGE_ALLOW (0x00000000)
+#define MPI3_MAN21_FLAGS_BLOCK_SSD_WR_CACHE_CHANGE_PREVENT (0x00000008)
+#define MPI3_MAN21_FLAGS_SES_VPD_ASSOC_MASK (0x00000001)
+#define MPI3_MAN21_FLAGS_SES_VPD_ASSOC_DEFAULT (0x00000000)
+#define MPI3_MAN21_FLAGS_SES_VPD_ASSOC_OEM_SPECIFIC (0x00000001)
+#ifndef MPI3_MAN_PROD_SPECIFIC_MAX
+#define MPI3_MAN_PROD_SPECIFIC_MAX (1)
+#endif
+struct mpi3_man_page_product_specific {
+ struct mpi3_config_page_header header;
+ __le32 product_specific_info[MPI3_MAN_PROD_SPECIFIC_MAX];
+};
+
+struct mpi3_io_unit_page0 {
+ struct mpi3_config_page_header header;
+ __le64 unique_value;
+ __le32 nvdata_version_default;
+ __le32 nvdata_version_persistent;
+};
+
+#define MPI3_IOUNIT0_PAGEVERSION (0x00)
+struct mpi3_io_unit_page1 {
+ struct mpi3_config_page_header header;
+ __le32 flags;
+ u8 dmd_io_delay;
+ u8 dmd_report_pcie;
+ u8 dmd_report_sata;
+ u8 dmd_report_sas;
+};
+
+#define MPI3_IOUNIT1_PAGEVERSION (0x00)
+#define MPI3_IOUNIT1_FLAGS_NVME_WRITE_CACHE_MASK (0x00000030)
+#define MPI3_IOUNIT1_FLAGS_NVME_WRITE_CACHE_ENABLE (0x00000000)
+#define MPI3_IOUNIT1_FLAGS_NVME_WRITE_CACHE_DISABLE (0x00000010)
+#define MPI3_IOUNIT1_FLAGS_NVME_WRITE_CACHE_NO_MODIFY (0x00000020)
+#define MPI3_IOUNIT1_FLAGS_ATA_SECURITY_FREEZE_LOCK (0x00000008)
+#define MPI3_IOUNIT1_FLAGS_WRITE_SAME_BUFFER (0x00000004)
+#define MPI3_IOUNIT1_FLAGS_SATA_WRITE_CACHE_MASK (0x00000003)
+#define MPI3_IOUNIT1_FLAGS_SATA_WRITE_CACHE_ENABLE (0x00000000)
+#define MPI3_IOUNIT1_FLAGS_SATA_WRITE_CACHE_DISABLE (0x00000001)
+#define MPI3_IOUNIT1_FLAGS_SATA_WRITE_CACHE_UNCHANGED (0x00000002)
+#define MPI3_IOUNIT1_DMD_REPORT_DELAY_TIME_MASK (0x7f)
+#define MPI3_IOUNIT1_DMD_REPORT_UNIT_16_SEC (0x80)
+#ifndef MPI3_IO_UNIT2_GPIO_VAL_MAX
+#define MPI3_IO_UNIT2_GPIO_VAL_MAX (1)
+#endif
+struct mpi3_io_unit_page2 {
+ struct mpi3_config_page_header header;
+ u8 gpio_count;
+ u8 reserved09[3];
+ __le16 gpio_val[MPI3_IO_UNIT2_GPIO_VAL_MAX];
+};
+
+#define MPI3_IOUNIT2_PAGEVERSION (0x00)
+#define MPI3_IOUNIT2_GPIO_FUNCTION_MASK (0xfffc)
+#define MPI3_IOUNIT2_GPIO_FUNCTION_SHIFT (2)
+#define MPI3_IOUNIT2_GPIO_SETTING_MASK (0x0001)
+#define MPI3_IOUNIT2_GPIO_SETTING_OFF (0x0000)
+#define MPI3_IOUNIT2_GPIO_SETTING_ON (0x0001)
+struct mpi3_io_unit3_sensor {
+ __le16 flags;
+ u8 threshold_margin;
+ u8 reserved03;
+ __le16 threshold[3];
+ __le16 reserved0a;
+ __le32 reserved0c;
+ __le32 reserved10;
+ __le32 reserved14;
+};
+
+#define MPI3_IOUNIT3_SENSOR_FLAGS_FATAL_EVENT_ENABLED (0x0010)
+#define MPI3_IOUNIT3_SENSOR_FLAGS_FATAL_ACTION_ENABLED (0x0008)
+#define MPI3_IOUNIT3_SENSOR_FLAGS_CRITICAL_EVENT_ENABLED (0x0004)
+#define MPI3_IOUNIT3_SENSOR_FLAGS_CRITICAL_ACTION_ENABLED (0x0002)
+#define MPI3_IOUNIT3_SENSOR_FLAGS_WARNING_EVENT_ENABLED (0x0001)
+#ifndef MPI3_IO_UNIT3_SENSOR_MAX
+#define MPI3_IO_UNIT3_SENSOR_MAX (1)
+#endif
+struct mpi3_io_unit_page3 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_sensors;
+ u8 nominal_poll_interval;
+ u8 warning_poll_interval;
+ u8 reserved0f;
+ struct mpi3_io_unit3_sensor sensor[MPI3_IO_UNIT3_SENSOR_MAX];
+};
+
+#define MPI3_IOUNIT3_PAGEVERSION (0x00)
+struct mpi3_io_unit4_sensor {
+ __le16 current_temperature;
+ __le16 reserved02;
+ u8 flags;
+ u8 reserved05[3];
+ __le16 istwi_index;
+ u8 channel;
+ u8 reserved0b;
+ __le32 reserved0c;
+};
+
+#define MPI3_IOUNIT4_SENSOR_FLAGS_LOC_MASK (0xe0)
+#define MPI3_IOUNIT4_SENSOR_FLAGS_LOC_SHIFT (5)
+#define MPI3_IOUNIT4_SENSOR_FLAGS_TEMP_VALID (0x01)
+#define MPI3_IOUNIT4_SENSOR_ISTWI_INDEX_INTERNAL (0xffff)
+#define MPI3_IOUNIT4_SENSOR_CHANNEL_RESERVED (0xff)
+#ifndef MPI3_IO_UNIT4_SENSOR_MAX
+#define MPI3_IO_UNIT4_SENSOR_MAX (1)
+#endif
+struct mpi3_io_unit_page4 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_sensors;
+ u8 reserved0d[3];
+ struct mpi3_io_unit4_sensor sensor[MPI3_IO_UNIT4_SENSOR_MAX];
+};
+
+#define MPI3_IOUNIT4_PAGEVERSION (0x00)
+struct mpi3_io_unit5_spinup_group {
+ u8 max_target_spinup;
+ u8 spinup_delay;
+ u8 spinup_flags;
+ u8 reserved03;
+};
+
+#define MPI3_IOUNIT5_SPINUP_FLAGS_DISABLE (0x01)
+#ifndef MPI3_IO_UNIT5_PHY_MAX
+#define MPI3_IO_UNIT5_PHY_MAX (4)
+#endif
+struct mpi3_io_unit_page5 {
+ struct mpi3_config_page_header header;
+ struct mpi3_io_unit5_spinup_group spinup_group_parameters[4];
+ __le32 reserved18;
+ __le32 reserved1c;
+ __le16 device_shutdown;
+ __le16 reserved22;
+ u8 pcie_device_wait_time;
+ u8 sata_device_wait_time;
+ u8 spinup_encl_drive_count;
+ u8 spinup_encl_delay;
+ u8 num_phys;
+ u8 pe_initial_spinup_delay;
+ u8 topology_stable_time;
+ u8 flags;
+ u8 phy[MPI3_IO_UNIT5_PHY_MAX];
+};
+
+#define MPI3_IOUNIT5_PAGEVERSION (0x00)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_NO_ACTION (0x00)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_DIRECT_ATTACHED (0x01)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_EXPANDER_ATTACHED (0x02)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SWITCH_ATTACHED (0x02)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_DIRECT_AND_EXPANDER (0x03)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_DIRECT_AND_SWITCH (0x03)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SATA_HDD_MASK (0x0300)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SATA_HDD_SHIFT (8)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAS_HDD_MASK (0x00c0)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAS_HDD_SHIFT (6)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_NVME_SSD_MASK (0x0030)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_NVME_SSD_SHIFT (4)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SATA_SSD_MASK (0x000c)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SATA_SSD_SHIFT (2)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAS_SSD_MASK (0x0003)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAS_SSD_SHIFT (0)
+#define MPI3_IOUNIT5_FLAGS_SATAPUIS_MASK (0x0c)
+#define MPI3_IOUNIT5_FLAGS_SATAPUIS_NOT_SUPPORTED (0x00)
+#define MPI3_IOUNIT5_FLAGS_SATAPUIS_OS_CONTROLLED (0x04)
+#define MPI3_IOUNIT5_FLAGS_SATAPUIS_APP_CONTROLLED (0x08)
+#define MPI3_IOUNIT5_FLAGS_SATAPUIS_BLOCKED (0x0c)
+#define MPI3_IOUNIT5_FLAGS_POWER_CAPABLE_SPINUP (0x02)
+#define MPI3_IOUNIT5_FLAGS_AUTO_PORT_ENABLE (0x01)
+#define MPI3_IOUNIT5_PHY_SPINUP_GROUP_MASK (0x03)
+struct mpi3_io_unit_page6 {
+ struct mpi3_config_page_header header;
+ __le32 board_power_requirement;
+ __le32 pci_slot_power_allocation;
+ u8 flags;
+ u8 reserved11[3];
+};
+
+#define MPI3_IOUNIT6_PAGEVERSION (0x00)
+#define MPI3_IOUNIT6_FLAGS_ACT_CABLE_PWR_EXC (0x01)
+#ifndef MPI3_IOUNIT8_DIGEST_MAX
+#define MPI3_IOUNIT8_DIGEST_MAX (1)
+#endif
+union mpi3_iounit8_digest {
+ __le32 dword[16];
+ __le16 word[32];
+ u8 byte[64];
+};
+
+struct mpi3_io_unit_page8 {
+ struct mpi3_config_page_header header;
+ u8 sb_mode;
+ u8 sb_state;
+ __le16 reserved0a;
+ u8 num_slots;
+ u8 slots_available;
+ u8 current_key_encryption_algo;
+ u8 key_digest_hash_algo;
+ union mpi3_version_union current_svn;
+ __le32 reserved14;
+ __le32 current_key[128];
+ union mpi3_iounit8_digest digest[MPI3_IOUNIT8_DIGEST_MAX];
+};
+
+#define MPI3_IOUNIT8_PAGEVERSION (0x00)
+#define MPI3_IOUNIT8_SBMODE_SECURE_DEBUG (0x04)
+#define MPI3_IOUNIT8_SBMODE_HARD_SECURE (0x02)
+#define MPI3_IOUNIT8_SBMODE_CONFIG_SECURE (0x01)
+#define MPI3_IOUNIT8_SBSTATE_SVN_UPDATE_PENDING (0x04)
+#define MPI3_IOUNIT8_SBSTATE_KEY_UPDATE_PENDING (0x02)
+#define MPI3_IOUNIT8_SBSTATE_SECURE_BOOT_ENABLED (0x01)
+struct mpi3_io_unit_page9 {
+ struct mpi3_config_page_header header;
+ __le32 flags;
+ __le16 first_device;
+ __le16 reserved0e;
+};
+
+#define MPI3_IOUNIT9_PAGEVERSION (0x00)
+#define MPI3_IOUNIT9_FLAGS_UBM_ENCLOSURE_ORDER_MASK (0x00000006)
+#define MPI3_IOUNIT9_FLAGS_UBM_ENCLOSURE_ORDER_SHIFT (1)
+#define MPI3_IOUNIT9_FLAGS_UBM_ENCLOSURE_ORDER_NONE (0x00000000)
+#define MPI3_IOUNIT9_FLAGS_UBM_ENCLOSURE_ORDER_RECEPTACLE (0x00000002)
+#define MPI3_IOUNIT9_FLAGS_UBM_ENCLOSURE_ORDER_BACKPLANE_TYPE (0x00000004)
+#define MPI3_IOUNIT9_FLAGS_VDFIRST_ENABLED (0x00000001)
+#define MPI3_IOUNIT9_FIRSTDEVICE_UNKNOWN (0xffff)
+struct mpi3_io_unit_page10 {
+ struct mpi3_config_page_header header;
+ u8 flags;
+ u8 reserved09[3];
+ __le32 silicon_id;
+ u8 fw_version_minor;
+ u8 fw_version_major;
+ u8 hw_version_minor;
+ u8 hw_version_major;
+ u8 part_number[16];
+};
+#define MPI3_IOUNIT10_PAGEVERSION (0x00)
+#define MPI3_IOUNIT10_FLAGS_VALID (0x01)
+#define MPI3_IOUNIT10_FLAGS_ACTIVEID_MASK (0x02)
+#define MPI3_IOUNIT10_FLAGS_ACTIVEID_FIRST_REGION (0x00)
+#define MPI3_IOUNIT10_FLAGS_ACTIVEID_SECOND_REGION (0x02)
+#define MPI3_IOUNIT10_FLAGS_PBLP_EXPECTED (0x80)
+#ifndef MPI3_IOUNIT11_PROFILE_MAX
+#define MPI3_IOUNIT11_PROFILE_MAX (1)
+#endif
+struct mpi3_iounit11_profile {
+ u8 profile_identifier;
+ u8 reserved01[3];
+ __le16 max_vds;
+ __le16 max_host_pds;
+ __le16 max_adv_host_pds;
+ __le16 max_raid_pds;
+ __le16 max_nvme;
+ __le16 max_outstanding_requests;
+ __le16 subsystem_id;
+ __le16 reserved12;
+ __le32 reserved14[2];
+};
+struct mpi3_io_unit_page11 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_profiles;
+ u8 current_profile_identifier;
+ __le16 reserved0e;
+ struct mpi3_iounit11_profile profile[MPI3_IOUNIT11_PROFILE_MAX];
+};
+#define MPI3_IOUNIT11_PAGEVERSION (0x00)
+#ifndef MPI3_IOUNIT12_BUCKET_MAX
+#define MPI3_IOUNIT12_BUCKET_MAX (1)
+#endif
+struct mpi3_iounit12_bucket {
+ u8 coalescing_depth;
+ u8 coalescing_timeout;
+ __le16 io_count_low_boundary;
+ __le32 reserved04;
+};
+struct mpi3_io_unit_page12 {
+ struct mpi3_config_page_header header;
+ __le32 flags;
+ __le32 reserved0c[4];
+ u8 num_buckets;
+ u8 reserved1d[3];
+ struct mpi3_iounit12_bucket bucket[MPI3_IOUNIT12_BUCKET_MAX];
+};
+#define MPI3_IOUNIT12_PAGEVERSION (0x00)
+#define MPI3_IOUNIT12_FLAGS_NUMPASSES_MASK (0x00000300)
+#define MPI3_IOUNIT12_FLAGS_NUMPASSES_SHIFT (8)
+#define MPI3_IOUNIT12_FLAGS_NUMPASSES_8 (0x00000000)
+#define MPI3_IOUNIT12_FLAGS_NUMPASSES_16 (0x00000100)
+#define MPI3_IOUNIT12_FLAGS_NUMPASSES_32 (0x00000200)
+#define MPI3_IOUNIT12_FLAGS_NUMPASSES_64 (0x00000300)
+#define MPI3_IOUNIT12_FLAGS_PASSPERIOD_MASK (0x00000003)
+#define MPI3_IOUNIT12_FLAGS_PASSPERIOD_DISABLED (0x00000000)
+#define MPI3_IOUNIT12_FLAGS_PASSPERIOD_500US (0x00000001)
+#define MPI3_IOUNIT12_FLAGS_PASSPERIOD_1MS (0x00000002)
+#define MPI3_IOUNIT12_FLAGS_PASSPERIOD_2MS (0x00000003)
+#ifndef MPI3_IOUNIT13_FUNC_MAX
+#define MPI3_IOUNIT13_FUNC_MAX (1)
+#endif
+struct mpi3_iounit13_allowed_function {
+ __le16 sub_function;
+ u8 function_code;
+ u8 fuction_flags;
+};
+#define MPI3_IOUNIT13_FUNCTION_FLAGS_ADMIN_BLOCKED (0x04)
+#define MPI3_IOUNIT13_FUNCTION_FLAGS_OOB_BLOCKED (0x02)
+#define MPI3_IOUNIT13_FUNCTION_FLAGS_CHECK_SUBFUNCTION_ENABLED (0x01)
+struct mpi3_io_unit_page13 {
+ struct mpi3_config_page_header header;
+ __le16 flags;
+ __le16 reserved0a;
+ u8 num_allowed_functions;
+ u8 reserved0d[3];
+ struct mpi3_iounit13_allowed_function allowed_function[MPI3_IOUNIT13_FUNC_MAX];
+};
+#define MPI3_IOUNIT13_PAGEVERSION (0x00)
+#define MPI3_IOUNIT13_FLAGS_ADMIN_BLOCKED (0x0002)
+#define MPI3_IOUNIT13_FLAGS_OOB_BLOCKED (0x0001)
+struct mpi3_ioc_page0 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ __le16 vendor_id;
+ __le16 device_id;
+ u8 revision_id;
+ u8 reserved11[3];
+ __le32 class_code;
+ __le16 subsystem_vendor_id;
+ __le16 subsystem_id;
+};
+
+#define MPI3_IOC0_PAGEVERSION (0x00)
+struct mpi3_ioc_page1 {
+ struct mpi3_config_page_header header;
+ __le32 coalescing_timeout;
+ u8 coalescing_depth;
+ u8 obsolete;
+ __le16 reserved0e;
+};
+#define MPI3_IOC1_PAGEVERSION (0x00)
+#ifndef MPI3_IOC2_EVENTMASK_WORDS
+#define MPI3_IOC2_EVENTMASK_WORDS (4)
+#endif
+struct mpi3_ioc_page2 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ __le16 sas_broadcast_primitive_masks;
+ __le16 sas_notify_primitive_masks;
+ __le32 event_masks[MPI3_IOC2_EVENTMASK_WORDS];
+};
+
+#define MPI3_IOC2_PAGEVERSION (0x00)
+#define MPI3_DRIVER_FLAGS_ADMINRAIDPD_BLOCKED (0x0010)
+#define MPI3_DRIVER_FLAGS_OOBRAIDPD_BLOCKED (0x0008)
+#define MPI3_DRIVER_FLAGS_OOBRAIDVD_BLOCKED (0x0004)
+#define MPI3_DRIVER_FLAGS_OOBADVHOSTPD_BLOCKED (0x0002)
+#define MPI3_DRIVER_FLAGS_OOBHOSTPD_BLOCKED (0x0001)
+struct mpi3_allowed_cmd_scsi {
+ __le16 service_action;
+ u8 operation_code;
+ u8 command_flags;
+};
+
+struct mpi3_allowed_cmd_ata {
+ u8 subcommand;
+ u8 reserved01;
+ u8 command;
+ u8 command_flags;
+};
+
+struct mpi3_allowed_cmd_nvme {
+ u8 reserved00;
+ u8 nvme_cmd_flags;
+ u8 op_code;
+ u8 command_flags;
+};
+
+#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_SUBQ_TYPE_MASK (0x80)
+#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_SUBQ_TYPE_IO (0x00)
+#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_SUBQ_TYPE_ADMIN (0x80)
+#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_CMDSET_MASK (0x3f)
+#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_CMDSET_NVM (0x00)
+union mpi3_allowed_cmd {
+ struct mpi3_allowed_cmd_scsi scsi;
+ struct mpi3_allowed_cmd_ata ata;
+ struct mpi3_allowed_cmd_nvme nvme;
+};
+
+#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_ADMINRAIDPD_BLOCKED (0x20)
+#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_OOBRAIDPD_BLOCKED (0x10)
+#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_OOBRAIDVD_BLOCKED (0x08)
+#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_OOBADVHOSTPD_BLOCKED (0x04)
+#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_OOBHOSTPD_BLOCKED (0x02)
+#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_CHECKSUBCMD_ENABLED (0x01)
+#ifndef MPI3_ALLOWED_CMDS_MAX
+#define MPI3_ALLOWED_CMDS_MAX (1)
+#endif
+struct mpi3_driver_page0 {
+ struct mpi3_config_page_header header;
+ __le32 bsd_options;
+ u8 ssu_timeout;
+ u8 io_timeout;
+ u8 tur_retries;
+ u8 tur_interval;
+ u8 reserved10;
+ u8 security_key_timeout;
+ __le16 reserved12;
+ __le32 reserved14;
+ __le32 reserved18;
+};
+#define MPI3_DRIVER0_PAGEVERSION (0x00)
+#define MPI3_DRIVER0_BSDOPTS_HEADLESS_MODE_ENABLE (0x00000008)
+#define MPI3_DRIVER0_BSDOPTS_DIS_HII_CONFIG_UTIL (0x00000004)
+#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_MASK (0x00000003)
+#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_AND_DEVS (0x00000000)
+#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_ONLY (0x00000001)
+struct mpi3_driver_page1 {
+ struct mpi3_config_page_header header;
+ __le32 flags;
+ __le32 reserved0c;
+ __le16 host_diag_trace_max_size;
+ __le16 host_diag_trace_min_size;
+ __le16 host_diag_trace_decrement_size;
+ __le16 reserved16;
+ __le16 host_diag_fw_max_size;
+ __le16 host_diag_fw_min_size;
+ __le16 host_diag_fw_decrement_size;
+ __le16 reserved1e;
+ __le16 host_diag_driver_max_size;
+ __le16 host_diag_driver_min_size;
+ __le16 host_diag_driver_decrement_size;
+ __le16 reserved26;
+};
+
+#define MPI3_DRIVER1_PAGEVERSION (0x00)
+#ifndef MPI3_DRIVER2_TRIGGER_MAX
+#define MPI3_DRIVER2_TRIGGER_MAX (1)
+#endif
+struct mpi3_driver2_trigger_event {
+ u8 type;
+ u8 flags;
+ u8 reserved02;
+ u8 event;
+ __le32 reserved04[3];
+};
+
+struct mpi3_driver2_trigger_scsi_sense {
+ u8 type;
+ u8 flags;
+ __le16 reserved02;
+ u8 ascq;
+ u8 asc;
+ u8 sense_key;
+ u8 reserved07;
+ __le32 reserved08[2];
+};
+
+#define MPI3_DRIVER2_TRIGGER_SCSI_SENSE_ASCQ_MATCH_ALL (0xff)
+#define MPI3_DRIVER2_TRIGGER_SCSI_SENSE_ASC_MATCH_ALL (0xff)
+#define MPI3_DRIVER2_TRIGGER_SCSI_SENSE_SENSE_KEY_MATCH_ALL (0xff)
+struct mpi3_driver2_trigger_reply {
+ u8 type;
+ u8 flags;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+ __le32 ioc_log_info_mask;
+ __le32 reserved0c;
+};
+
+#define MPI3_DRIVER2_TRIGGER_REPLY_IOCSTATUS_MATCH_ALL (0xffff)
+union mpi3_driver2_trigger_element {
+ struct mpi3_driver2_trigger_event event;
+ struct mpi3_driver2_trigger_scsi_sense scsi_sense;
+ struct mpi3_driver2_trigger_reply reply;
+};
+
+#define MPI3_DRIVER2_TRIGGER_TYPE_EVENT (0x00)
+#define MPI3_DRIVER2_TRIGGER_TYPE_SCSI_SENSE (0x01)
+#define MPI3_DRIVER2_TRIGGER_TYPE_REPLY (0x02)
+#define MPI3_DRIVER2_TRIGGER_FLAGS_DIAG_TRACE_RELEASE (0x02)
+#define MPI3_DRIVER2_TRIGGER_FLAGS_DIAG_FW_RELEASE (0x01)
+struct mpi3_driver_page2 {
+ struct mpi3_config_page_header header;
+ __le64 master_trigger;
+ __le32 reserved10[3];
+ u8 num_triggers;
+ u8 reserved1d[3];
+ union mpi3_driver2_trigger_element trigger[MPI3_DRIVER2_TRIGGER_MAX];
+};
+
+#define MPI3_DRIVER2_PAGEVERSION (0x00)
+#define MPI3_DRIVER2_MASTERTRIGGER_DIAG_TRACE_RELEASE (0x8000000000000000ULL)
+#define MPI3_DRIVER2_MASTERTRIGGER_DIAG_FW_RELEASE (0x4000000000000000ULL)
+#define MPI3_DRIVER2_MASTERTRIGGER_SNAPDUMP (0x2000000000000000ULL)
+#define MPI3_DRIVER2_MASTERTRIGGER_DEVICE_REMOVAL_ENABLED (0x0000000000000004ULL)
+#define MPI3_DRIVER2_MASTERTRIGGER_TASK_MANAGEMENT_ENABLED (0x0000000000000002ULL)
+struct mpi3_driver_page10 {
+ struct mpi3_config_page_header header;
+ __le16 flags;
+ __le16 reserved0a;
+ u8 num_allowed_commands;
+ u8 reserved0d[3];
+ union mpi3_allowed_cmd allowed_command[MPI3_ALLOWED_CMDS_MAX];
+};
+
+#define MPI3_DRIVER10_PAGEVERSION (0x00)
+struct mpi3_driver_page20 {
+ struct mpi3_config_page_header header;
+ __le16 flags;
+ __le16 reserved0a;
+ u8 num_allowed_commands;
+ u8 reserved0d[3];
+ union mpi3_allowed_cmd allowed_command[MPI3_ALLOWED_CMDS_MAX];
+};
+
+#define MPI3_DRIVER20_PAGEVERSION (0x00)
+struct mpi3_driver_page30 {
+ struct mpi3_config_page_header header;
+ __le16 flags;
+ __le16 reserved0a;
+ u8 num_allowed_commands;
+ u8 reserved0d[3];
+ union mpi3_allowed_cmd allowed_command[MPI3_ALLOWED_CMDS_MAX];
+};
+
+#define MPI3_DRIVER30_PAGEVERSION (0x00)
+union mpi3_security_mac {
+ __le32 dword[16];
+ __le16 word[32];
+ u8 byte[64];
+};
+
+union mpi3_security_nonce {
+ __le32 dword[16];
+ __le16 word[32];
+ u8 byte[64];
+};
+
+union mpi3_security0_cert_chain {
+ __le32 dword[1024];
+ __le16 word[2048];
+ u8 byte[4096];
+};
+
+struct mpi3_security_page0 {
+ struct mpi3_config_page_header header;
+ u8 slot_num_group;
+ u8 slot_num;
+ __le16 cert_chain_length;
+ u8 cert_chain_flags;
+ u8 reserved0d[3];
+ __le32 base_asym_algo;
+ __le32 base_hash_algo;
+ __le32 reserved18[4];
+ union mpi3_security_mac mac;
+ union mpi3_security_nonce nonce;
+ union mpi3_security0_cert_chain certificate_chain;
+};
+
+#define MPI3_SECURITY0_PAGEVERSION (0x00)
+#define MPI3_SECURITY0_CERTCHAIN_FLAGS_AUTH_API_MASK (0x0e)
+#define MPI3_SECURITY0_CERTCHAIN_FLAGS_AUTH_API_UNUSED (0x00)
+#define MPI3_SECURITY0_CERTCHAIN_FLAGS_AUTH_API_CERBERUS (0x02)
+#define MPI3_SECURITY0_CERTCHAIN_FLAGS_AUTH_API_SPDM (0x04)
+#define MPI3_SECURITY0_CERTCHAIN_FLAGS_SEALED (0x01)
+#ifndef MPI3_SECURITY1_KEY_RECORD_MAX
+#define MPI3_SECURITY1_KEY_RECORD_MAX 1
+#endif
+#ifndef MPI3_SECURITY1_PAD_MAX
+#define MPI3_SECURITY1_PAD_MAX 1
+#endif
+union mpi3_security1_key_data {
+ __le32 dword[128];
+ __le16 word[256];
+ u8 byte[512];
+};
+
+struct mpi3_security1_key_record {
+ u8 flags;
+ u8 consumer;
+ __le16 key_data_size;
+ __le32 additional_key_data;
+ __le32 reserved08[2];
+ union mpi3_security1_key_data key_data;
+};
+
+#define MPI3_SECURITY1_KEY_RECORD_FLAGS_TYPE_MASK (0x1f)
+#define MPI3_SECURITY1_KEY_RECORD_FLAGS_TYPE_NOT_VALID (0x00)
+#define MPI3_SECURITY1_KEY_RECORD_FLAGS_TYPE_HMAC (0x01)
+#define MPI3_SECURITY1_KEY_RECORD_FLAGS_TYPE_AES (0x02)
+#define MPI3_SECURITY1_KEY_RECORD_FLAGS_TYPE_ECDSA_PRIVATE (0x03)
+#define MPI3_SECURITY1_KEY_RECORD_FLAGS_TYPE_ECDSA_PUBLIC (0x04)
+#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_NOT_VALID (0x00)
+#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_SAFESTORE (0x01)
+#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_CERT_CHAIN (0x02)
+#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_DEVICE_KEY (0x03)
+#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_CACHE_OFFLOAD (0x04)
+struct mpi3_security_page1 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08[2];
+ union mpi3_security_mac mac;
+ union mpi3_security_nonce nonce;
+ u8 num_keys;
+ u8 reserved91[3];
+ __le32 reserved94[3];
+ struct mpi3_security1_key_record key_record[MPI3_SECURITY1_KEY_RECORD_MAX];
+ u8 pad[MPI3_SECURITY1_PAD_MAX];
+};
+
+#define MPI3_SECURITY1_PAGEVERSION (0x00)
+struct mpi3_sas_io_unit0_phy_data {
+ u8 io_unit_port;
+ u8 port_flags;
+ u8 phy_flags;
+ u8 negotiated_link_rate;
+ __le16 controller_phy_device_info;
+ __le16 reserved06;
+ __le16 attached_dev_handle;
+ __le16 controller_dev_handle;
+ __le32 discovery_status;
+ __le32 reserved10;
+};
+
+#ifndef MPI3_SAS_IO_UNIT0_PHY_MAX
+#define MPI3_SAS_IO_UNIT0_PHY_MAX (1)
+#endif
+struct mpi3_sas_io_unit_page0 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_phys;
+ u8 init_status;
+ __le16 reserved0e;
+ struct mpi3_sas_io_unit0_phy_data phy_data[MPI3_SAS_IO_UNIT0_PHY_MAX];
+};
+
+#define MPI3_SASIOUNIT0_PAGEVERSION (0x00)
+#define MPI3_SASIOUNIT0_INITSTATUS_NO_ERRORS (0x00)
+#define MPI3_SASIOUNIT0_INITSTATUS_NEEDS_INITIALIZATION (0x01)
+#define MPI3_SASIOUNIT0_INITSTATUS_NO_TARGETS_ALLOCATED (0x02)
+#define MPI3_SASIOUNIT0_INITSTATUS_BAD_NUM_PHYS (0x04)
+#define MPI3_SASIOUNIT0_INITSTATUS_UNSUPPORTED_CONFIG (0x05)
+#define MPI3_SASIOUNIT0_INITSTATUS_HOST_PHYS_ENABLED (0x06)
+#define MPI3_SASIOUNIT0_INITSTATUS_PRODUCT_SPECIFIC_MIN (0xf0)
+#define MPI3_SASIOUNIT0_INITSTATUS_PRODUCT_SPECIFIC_MAX (0xff)
+#define MPI3_SASIOUNIT0_PORTFLAGS_DISC_IN_PROGRESS (0x08)
+#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG_MASK (0x03)
+#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG_IOUNIT1 (0x00)
+#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG_DYNAMIC (0x01)
+#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG_BACKPLANE (0x02)
+#define MPI3_SASIOUNIT0_PHYFLAGS_INIT_PERSIST_CONNECT (0x40)
+#define MPI3_SASIOUNIT0_PHYFLAGS_TARG_PERSIST_CONNECT (0x20)
+#define MPI3_SASIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08)
+#define MPI3_SASIOUNIT0_PHYFLAGS_VIRTUAL_PHY (0x02)
+#define MPI3_SASIOUNIT0_PHYFLAGS_HOST_PHY (0x01)
+struct mpi3_sas_io_unit1_phy_data {
+ u8 io_unit_port;
+ u8 port_flags;
+ u8 phy_flags;
+ u8 max_min_link_rate;
+ __le16 controller_phy_device_info;
+ __le16 max_target_port_connect_time;
+ __le32 reserved08;
+};
+
+#ifndef MPI3_SAS_IO_UNIT1_PHY_MAX
+#define MPI3_SAS_IO_UNIT1_PHY_MAX (1)
+#endif
+struct mpi3_sas_io_unit_page1 {
+ struct mpi3_config_page_header header;
+ __le16 control_flags;
+ __le16 sas_narrow_max_queue_depth;
+ __le16 additional_control_flags;
+ __le16 sas_wide_max_queue_depth;
+ u8 num_phys;
+ u8 sata_max_q_depth;
+ __le16 reserved12;
+ struct mpi3_sas_io_unit1_phy_data phy_data[MPI3_SAS_IO_UNIT1_PHY_MAX];
+};
+
+#define MPI3_SASIOUNIT1_PAGEVERSION (0x00)
+#define MPI3_SASIOUNIT1_CONTROL_CONTROLLER_DEVICE_SELF_TEST (0x8000)
+#define MPI3_SASIOUNIT1_CONTROL_SATA_SW_PRESERVE (0x1000)
+#define MPI3_SASIOUNIT1_CONTROL_SATA_48BIT_LBA_REQUIRED (0x0080)
+#define MPI3_SASIOUNIT1_CONTROL_SATA_SMART_REQUIRED (0x0040)
+#define MPI3_SASIOUNIT1_CONTROL_SATA_NCQ_REQUIRED (0x0020)
+#define MPI3_SASIOUNIT1_CONTROL_SATA_FUA_REQUIRED (0x0010)
+#define MPI3_SASIOUNIT1_CONTROL_TABLE_SUBTRACTIVE_ILLEGAL (0x0008)
+#define MPI3_SASIOUNIT1_CONTROL_SUBTRACTIVE_ILLEGAL (0x0004)
+#define MPI3_SASIOUNIT1_CONTROL_FIRST_LVL_DISC_ONLY (0x0002)
+#define MPI3_SASIOUNIT1_CONTROL_HARD_RESET_MASK (0x0001)
+#define MPI3_SASIOUNIT1_CONTROL_HARD_RESET_DEVICE_NAME (0x0000)
+#define MPI3_SASIOUNIT1_CONTROL_HARD_RESET_SAS_ADDRESS (0x0001)
+#define MPI3_SASIOUNIT1_ACONTROL_DA_PERSIST_CONNECT (0x0100)
+#define MPI3_SASIOUNIT1_ACONTROL_MULTI_PORT_DOMAIN_ILLEGAL (0x0080)
+#define MPI3_SASIOUNIT1_ACONTROL_SATA_ASYNCHROUNOUS_NOTIFICATION (0x0040)
+#define MPI3_SASIOUNIT1_ACONTROL_INVALID_TOPOLOGY_CORRECTION (0x0020)
+#define MPI3_SASIOUNIT1_ACONTROL_PORT_ENABLE_ONLY_SATA_LINK_RESET (0x0010)
+#define MPI3_SASIOUNIT1_ACONTROL_OTHER_AFFILIATION_SATA_LINK_RESET (0x0008)
+#define MPI3_SASIOUNIT1_ACONTROL_SELF_AFFILIATION_SATA_LINK_RESET (0x0004)
+#define MPI3_SASIOUNIT1_ACONTROL_NO_AFFILIATION_SATA_LINK_RESET (0x0002)
+#define MPI3_SASIOUNIT1_ACONTROL_ALLOW_TABLE_TO_TABLE (0x0001)
+#define MPI3_SASIOUNIT1_PORT_FLAGS_AUTO_PORT_CONFIG (0x01)
+#define MPI3_SASIOUNIT1_PHYFLAGS_INIT_PERSIST_CONNECT (0x40)
+#define MPI3_SASIOUNIT1_PHYFLAGS_TARG_PERSIST_CONNECT (0x20)
+#define MPI3_SASIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08)
+#define MPI3_SASIOUNIT1_MMLR_MAX_RATE_MASK (0xf0)
+#define MPI3_SASIOUNIT1_MMLR_MAX_RATE_SHIFT (4)
+#define MPI3_SASIOUNIT1_MMLR_MAX_RATE_6_0 (0xa0)
+#define MPI3_SASIOUNIT1_MMLR_MAX_RATE_12_0 (0xb0)
+#define MPI3_SASIOUNIT1_MMLR_MAX_RATE_22_5 (0xc0)
+#define MPI3_SASIOUNIT1_MMLR_MIN_RATE_MASK (0x0f)
+#define MPI3_SASIOUNIT1_MMLR_MIN_RATE_6_0 (0x0a)
+#define MPI3_SASIOUNIT1_MMLR_MIN_RATE_12_0 (0x0b)
+#define MPI3_SASIOUNIT1_MMLR_MIN_RATE_22_5 (0x0c)
+struct mpi3_sas_io_unit2_phy_pm_settings {
+ u8 control_flags;
+ u8 reserved01;
+ __le16 inactivity_timer_exponent;
+ u8 sata_partial_timeout;
+ u8 reserved05;
+ u8 sata_slumber_timeout;
+ u8 reserved07;
+ u8 sas_partial_timeout;
+ u8 reserved09;
+ u8 sas_slumber_timeout;
+ u8 reserved0b;
+};
+
+#ifndef MPI3_SAS_IO_UNIT2_PHY_MAX
+#define MPI3_SAS_IO_UNIT2_PHY_MAX (1)
+#endif
+struct mpi3_sas_io_unit_page2 {
+ struct mpi3_config_page_header header;
+ u8 num_phys;
+ u8 reserved09[3];
+ __le32 reserved0c;
+ struct mpi3_sas_io_unit2_phy_pm_settings sas_phy_power_management_settings[MPI3_SAS_IO_UNIT2_PHY_MAX];
+};
+
+#define MPI3_SASIOUNIT2_PAGEVERSION (0x00)
+#define MPI3_SASIOUNIT2_CONTROL_SAS_SLUMBER_ENABLE (0x08)
+#define MPI3_SASIOUNIT2_CONTROL_SAS_PARTIAL_ENABLE (0x04)
+#define MPI3_SASIOUNIT2_CONTROL_SATA_SLUMBER_ENABLE (0x02)
+#define MPI3_SASIOUNIT2_CONTROL_SATA_PARTIAL_ENABLE (0x01)
+#define MPI3_SASIOUNIT2_ITE_SAS_SLUMBER_MASK (0x7000)
+#define MPI3_SASIOUNIT2_ITE_SAS_SLUMBER_SHIFT (12)
+#define MPI3_SASIOUNIT2_ITE_SAS_PARTIAL_MASK (0x0700)
+#define MPI3_SASIOUNIT2_ITE_SAS_PARTIAL_SHIFT (8)
+#define MPI3_SASIOUNIT2_ITE_SATA_SLUMBER_MASK (0x0070)
+#define MPI3_SASIOUNIT2_ITE_SATA_SLUMBER_SHIFT (4)
+#define MPI3_SASIOUNIT2_ITE_SATA_PARTIAL_MASK (0x0007)
+#define MPI3_SASIOUNIT2_ITE_SATA_PARTIAL_SHIFT (0)
+#define MPI3_SASIOUNIT2_ITE_EXP_TEN_SECONDS (7)
+#define MPI3_SASIOUNIT2_ITE_EXP_ONE_SECOND (6)
+#define MPI3_SASIOUNIT2_ITE_EXP_HUNDRED_MILLISECONDS (5)
+#define MPI3_SASIOUNIT2_ITE_EXP_TEN_MILLISECONDS (4)
+#define MPI3_SASIOUNIT2_ITE_EXP_ONE_MILLISECOND (3)
+#define MPI3_SASIOUNIT2_ITE_EXP_HUNDRED_MICROSECONDS (2)
+#define MPI3_SASIOUNIT2_ITE_EXP_TEN_MICROSECONDS (1)
+#define MPI3_SASIOUNIT2_ITE_EXP_ONE_MICROSECOND (0)
+struct mpi3_sas_io_unit_page3 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ __le32 power_management_capabilities;
+};
+
+#define MPI3_SASIOUNIT3_PAGEVERSION (0x00)
+#define MPI3_SASIOUNIT3_PM_HOST_SAS_SLUMBER_MODE (0x00000800)
+#define MPI3_SASIOUNIT3_PM_HOST_SAS_PARTIAL_MODE (0x00000400)
+#define MPI3_SASIOUNIT3_PM_HOST_SATA_SLUMBER_MODE (0x00000200)
+#define MPI3_SASIOUNIT3_PM_HOST_SATA_PARTIAL_MODE (0x00000100)
+#define MPI3_SASIOUNIT3_PM_IOUNIT_SAS_SLUMBER_MODE (0x00000008)
+#define MPI3_SASIOUNIT3_PM_IOUNIT_SAS_PARTIAL_MODE (0x00000004)
+#define MPI3_SASIOUNIT3_PM_IOUNIT_SATA_SLUMBER_MODE (0x00000002)
+#define MPI3_SASIOUNIT3_PM_IOUNIT_SATA_PARTIAL_MODE (0x00000001)
+struct mpi3_sas_expander_page0 {
+ struct mpi3_config_page_header header;
+ u8 io_unit_port;
+ u8 report_gen_length;
+ __le16 enclosure_handle;
+ __le32 reserved0c;
+ __le64 sas_address;
+ __le32 discovery_status;
+ __le16 dev_handle;
+ __le16 parent_dev_handle;
+ __le16 expander_change_count;
+ __le16 expander_route_indexes;
+ u8 num_phys;
+ u8 sas_level;
+ __le16 flags;
+ __le16 stp_bus_inactivity_time_limit;
+ __le16 stp_max_connect_time_limit;
+ __le16 stp_smp_nexus_loss_time;
+ __le16 max_num_routed_sas_addresses;
+ __le64 active_zone_manager_sas_address;
+ __le16 zone_lock_inactivity_limit;
+ __le16 reserved3a;
+ u8 time_to_reduced_func;
+ u8 initial_time_to_reduced_func;
+ u8 max_reduced_func_time;
+ u8 exp_status;
+};
+
+#define MPI3_SASEXPANDER0_PAGEVERSION (0x00)
+#define MPI3_SASEXPANDER0_FLAGS_REDUCED_FUNCTIONALITY (0x2000)
+#define MPI3_SASEXPANDER0_FLAGS_ZONE_LOCKED (0x1000)
+#define MPI3_SASEXPANDER0_FLAGS_SUPPORTED_PHYSICAL_PRES (0x0800)
+#define MPI3_SASEXPANDER0_FLAGS_ASSERTED_PHYSICAL_PRES (0x0400)
+#define MPI3_SASEXPANDER0_FLAGS_ZONING_SUPPORT (0x0200)
+#define MPI3_SASEXPANDER0_FLAGS_ENABLED_ZONING (0x0100)
+#define MPI3_SASEXPANDER0_FLAGS_TABLE_TO_TABLE_SUPPORT (0x0080)
+#define MPI3_SASEXPANDER0_FLAGS_CONNECTOR_END_DEVICE (0x0010)
+#define MPI3_SASEXPANDER0_FLAGS_OTHERS_CONFIG (0x0004)
+#define MPI3_SASEXPANDER0_FLAGS_CONFIG_IN_PROGRESS (0x0002)
+#define MPI3_SASEXPANDER0_FLAGS_ROUTE_TABLE_CONFIG (0x0001)
+#define MPI3_SASEXPANDER0_ES_NOT_RESPONDING (0x02)
+#define MPI3_SASEXPANDER0_ES_RESPONDING (0x03)
+#define MPI3_SASEXPANDER0_ES_DELAY_NOT_RESPONDING (0x04)
+struct mpi3_sas_expander_page1 {
+ struct mpi3_config_page_header header;
+ u8 io_unit_port;
+ u8 reserved09[3];
+ u8 num_phys;
+ u8 phy;
+ __le16 num_table_entries_programmed;
+ u8 programmed_link_rate;
+ u8 hw_link_rate;
+ __le16 attached_dev_handle;
+ __le32 phy_info;
+ __le16 attached_device_info;
+ __le16 reserved1a;
+ __le16 expander_dev_handle;
+ u8 change_count;
+ u8 negotiated_link_rate;
+ u8 phy_identifier;
+ u8 attached_phy_identifier;
+ u8 reserved22;
+ u8 discovery_info;
+ __le32 attached_phy_info;
+ u8 zone_group;
+ u8 self_config_status;
+ __le16 reserved2a;
+ __le16 slot;
+ __le16 slot_index;
+};
+
+#define MPI3_SASEXPANDER1_PAGEVERSION (0x00)
+#define MPI3_SASEXPANDER1_DISCINFO_BAD_PHY_DISABLED (0x04)
+#define MPI3_SASEXPANDER1_DISCINFO_LINK_STATUS_CHANGE (0x02)
+#define MPI3_SASEXPANDER1_DISCINFO_NO_ROUTING_ENTRIES (0x01)
+#ifndef MPI3_SASEXPANDER2_MAX_NUM_PHYS
+#define MPI3_SASEXPANDER2_MAX_NUM_PHYS (1)
+#endif
+struct mpi3_sasexpander2_phy_element {
+ u8 link_change_count;
+ u8 reserved01;
+ __le16 rate_change_count;
+ __le32 reserved04;
+};
+
+struct mpi3_sas_expander_page2 {
+ struct mpi3_config_page_header header;
+ u8 num_phys;
+ u8 reserved09;
+ __le16 dev_handle;
+ __le32 reserved0c;
+ struct mpi3_sasexpander2_phy_element phy[MPI3_SASEXPANDER2_MAX_NUM_PHYS];
+};
+
+#define MPI3_SASEXPANDER2_PAGEVERSION (0x00)
+struct mpi3_sas_port_page0 {
+ struct mpi3_config_page_header header;
+ u8 port_number;
+ u8 reserved09;
+ u8 port_width;
+ u8 reserved0b;
+ u8 zone_group;
+ u8 reserved0d[3];
+ __le64 sas_address;
+ __le16 device_info;
+ __le16 reserved1a;
+ __le32 reserved1c;
+};
+
+#define MPI3_SASPORT0_PAGEVERSION (0x00)
+struct mpi3_sas_phy_page0 {
+ struct mpi3_config_page_header header;
+ __le16 owner_dev_handle;
+ __le16 reserved0a;
+ __le16 attached_dev_handle;
+ u8 attached_phy_identifier;
+ u8 reserved0f;
+ __le32 attached_phy_info;
+ u8 programmed_link_rate;
+ u8 hw_link_rate;
+ u8 change_count;
+ u8 flags;
+ __le32 phy_info;
+ u8 negotiated_link_rate;
+ u8 reserved1d[3];
+ __le16 slot;
+ __le16 slot_index;
+};
+
+#define MPI3_SASPHY0_PAGEVERSION (0x00)
+#define MPI3_SASPHY0_FLAGS_SGPIO_DIRECT_ATTACH_ENC (0x01)
+struct mpi3_sas_phy_page1 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ __le32 invalid_dword_count;
+ __le32 running_disparity_error_count;
+ __le32 loss_dword_synch_count;
+ __le32 phy_reset_problem_count;
+};
+
+#define MPI3_SASPHY1_PAGEVERSION (0x00)
+struct mpi3_sas_phy2_phy_event {
+ u8 phy_event_code;
+ u8 reserved01[3];
+ __le32 phy_event_info;
+};
+
+#ifndef MPI3_SAS_PHY2_PHY_EVENT_MAX
+#define MPI3_SAS_PHY2_PHY_EVENT_MAX (1)
+#endif
+struct mpi3_sas_phy_page2 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_phy_events;
+ u8 reserved0d[3];
+ struct mpi3_sas_phy2_phy_event phy_event[MPI3_SAS_PHY2_PHY_EVENT_MAX];
+};
+
+#define MPI3_SASPHY2_PAGEVERSION (0x00)
+struct mpi3_sas_phy3_phy_event_config {
+ u8 phy_event_code;
+ u8 reserved01[3];
+ u8 counter_type;
+ u8 threshold_window;
+ u8 time_units;
+ u8 reserved07;
+ __le32 event_threshold;
+ __le16 threshold_flags;
+ __le16 reserved0e;
+};
+
+#define MPI3_SASPHY3_EVENT_CODE_NO_EVENT (0x00)
+#define MPI3_SASPHY3_EVENT_CODE_INVALID_DWORD (0x01)
+#define MPI3_SASPHY3_EVENT_CODE_RUNNING_DISPARITY_ERROR (0x02)
+#define MPI3_SASPHY3_EVENT_CODE_LOSS_DWORD_SYNC (0x03)
+#define MPI3_SASPHY3_EVENT_CODE_PHY_RESET_PROBLEM (0x04)
+#define MPI3_SASPHY3_EVENT_CODE_ELASTICITY_BUF_OVERFLOW (0x05)
+#define MPI3_SASPHY3_EVENT_CODE_RX_ERROR (0x06)
+#define MPI3_SASPHY3_EVENT_CODE_INV_SPL_PACKETS (0x07)
+#define MPI3_SASPHY3_EVENT_CODE_LOSS_SPL_PACKET_SYNC (0x08)
+#define MPI3_SASPHY3_EVENT_CODE_RX_ADDR_FRAME_ERROR (0x20)
+#define MPI3_SASPHY3_EVENT_CODE_TX_AC_OPEN_REJECT (0x21)
+#define MPI3_SASPHY3_EVENT_CODE_RX_AC_OPEN_REJECT (0x22)
+#define MPI3_SASPHY3_EVENT_CODE_TX_RC_OPEN_REJECT (0x23)
+#define MPI3_SASPHY3_EVENT_CODE_RX_RC_OPEN_REJECT (0x24)
+#define MPI3_SASPHY3_EVENT_CODE_RX_AIP_PARTIAL_WAITING_ON (0x25)
+#define MPI3_SASPHY3_EVENT_CODE_RX_AIP_CONNECT_WAITING_ON (0x26)
+#define MPI3_SASPHY3_EVENT_CODE_TX_BREAK (0x27)
+#define MPI3_SASPHY3_EVENT_CODE_RX_BREAK (0x28)
+#define MPI3_SASPHY3_EVENT_CODE_BREAK_TIMEOUT (0x29)
+#define MPI3_SASPHY3_EVENT_CODE_CONNECTION (0x2a)
+#define MPI3_SASPHY3_EVENT_CODE_PEAKTX_PATHWAY_BLOCKED (0x2b)
+#define MPI3_SASPHY3_EVENT_CODE_PEAKTX_ARB_WAIT_TIME (0x2c)
+#define MPI3_SASPHY3_EVENT_CODE_PEAK_ARB_WAIT_TIME (0x2d)
+#define MPI3_SASPHY3_EVENT_CODE_PEAK_CONNECT_TIME (0x2e)
+#define MPI3_SASPHY3_EVENT_CODE_PERSIST_CONN (0x2f)
+#define MPI3_SASPHY3_EVENT_CODE_TX_SSP_FRAMES (0x40)
+#define MPI3_SASPHY3_EVENT_CODE_RX_SSP_FRAMES (0x41)
+#define MPI3_SASPHY3_EVENT_CODE_TX_SSP_ERROR_FRAMES (0x42)
+#define MPI3_SASPHY3_EVENT_CODE_RX_SSP_ERROR_FRAMES (0x43)
+#define MPI3_SASPHY3_EVENT_CODE_TX_CREDIT_BLOCKED (0x44)
+#define MPI3_SASPHY3_EVENT_CODE_RX_CREDIT_BLOCKED (0x45)
+#define MPI3_SASPHY3_EVENT_CODE_TX_SATA_FRAMES (0x50)
+#define MPI3_SASPHY3_EVENT_CODE_RX_SATA_FRAMES (0x51)
+#define MPI3_SASPHY3_EVENT_CODE_SATA_OVERFLOW (0x52)
+#define MPI3_SASPHY3_EVENT_CODE_TX_SMP_FRAMES (0x60)
+#define MPI3_SASPHY3_EVENT_CODE_RX_SMP_FRAMES (0x61)
+#define MPI3_SASPHY3_EVENT_CODE_RX_SMP_ERROR_FRAMES (0x63)
+#define MPI3_SASPHY3_EVENT_CODE_HOTPLUG_TIMEOUT (0xd0)
+#define MPI3_SASPHY3_EVENT_CODE_MISALIGNED_MUX_PRIMITIVE (0xd1)
+#define MPI3_SASPHY3_EVENT_CODE_RX_AIP (0xd2)
+#define MPI3_SASPHY3_EVENT_CODE_LCARB_WAIT_TIME (0xd3)
+#define MPI3_SASPHY3_EVENT_CODE_RCVD_CONN_RESP_WAIT_TIME (0xd4)
+#define MPI3_SASPHY3_EVENT_CODE_LCCONN_TIME (0xd5)
+#define MPI3_SASPHY3_EVENT_CODE_SSP_TX_START_TRANSMIT (0xd6)
+#define MPI3_SASPHY3_EVENT_CODE_SATA_TX_START (0xd7)
+#define MPI3_SASPHY3_EVENT_CODE_SMP_TX_START_TRANSMT (0xd8)
+#define MPI3_SASPHY3_EVENT_CODE_TX_SMP_BREAK_CONN (0xd9)
+#define MPI3_SASPHY3_EVENT_CODE_SSP_RX_START_RECEIVE (0xda)
+#define MPI3_SASPHY3_EVENT_CODE_SATA_RX_START_RECEIVE (0xdb)
+#define MPI3_SASPHY3_EVENT_CODE_SMP_RX_START_RECEIVE (0xdc)
+#define MPI3_SASPHY3_COUNTER_TYPE_WRAPPING (0x00)
+#define MPI3_SASPHY3_COUNTER_TYPE_SATURATING (0x01)
+#define MPI3_SASPHY3_COUNTER_TYPE_PEAK_VALUE (0x02)
+#define MPI3_SASPHY3_TIME_UNITS_10_MICROSECONDS (0x00)
+#define MPI3_SASPHY3_TIME_UNITS_100_MICROSECONDS (0x01)
+#define MPI3_SASPHY3_TIME_UNITS_1_MILLISECOND (0x02)
+#define MPI3_SASPHY3_TIME_UNITS_10_MILLISECONDS (0x03)
+#define MPI3_SASPHY3_TFLAGS_PHY_RESET (0x0002)
+#define MPI3_SASPHY3_TFLAGS_EVENT_NOTIFY (0x0001)
+#ifndef MPI3_SAS_PHY3_PHY_EVENT_MAX
+#define MPI3_SAS_PHY3_PHY_EVENT_MAX (1)
+#endif
+struct mpi3_sas_phy_page3 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_phy_events;
+ u8 reserved0d[3];
+ struct mpi3_sas_phy3_phy_event_config phy_event_config[MPI3_SAS_PHY3_PHY_EVENT_MAX];
+};
+
+#define MPI3_SASPHY3_PAGEVERSION (0x00)
+struct mpi3_sas_phy_page4 {
+ struct mpi3_config_page_header header;
+ u8 reserved08[3];
+ u8 flags;
+ u8 initial_frame[28];
+};
+
+#define MPI3_SASPHY4_PAGEVERSION (0x00)
+#define MPI3_SASPHY4_FLAGS_FRAME_VALID (0x02)
+#define MPI3_SASPHY4_FLAGS_SATA_FRAME (0x01)
+#define MPI3_PCIE_LINK_RETIMERS_MASK (0x30)
+#define MPI3_PCIE_LINK_RETIMERS_SHIFT (4)
+#define MPI3_PCIE_NEG_LINK_RATE_MASK (0x0f)
+#define MPI3_PCIE_NEG_LINK_RATE_UNKNOWN (0x00)
+#define MPI3_PCIE_NEG_LINK_RATE_PHY_DISABLED (0x01)
+#define MPI3_PCIE_NEG_LINK_RATE_2_5 (0x02)
+#define MPI3_PCIE_NEG_LINK_RATE_5_0 (0x03)
+#define MPI3_PCIE_NEG_LINK_RATE_8_0 (0x04)
+#define MPI3_PCIE_NEG_LINK_RATE_16_0 (0x05)
+#define MPI3_PCIE_NEG_LINK_RATE_32_0 (0x06)
+#define MPI3_PCIE_ASPM_ENABLE_NONE (0x0)
+#define MPI3_PCIE_ASPM_ENABLE_L0S (0x1)
+#define MPI3_PCIE_ASPM_ENABLE_L1 (0x2)
+#define MPI3_PCIE_ASPM_ENABLE_L0S_L1 (0x3)
+#define MPI3_PCIE_ASPM_SUPPORT_NONE (0x0)
+#define MPI3_PCIE_ASPM_SUPPORT_L0S (0x1)
+#define MPI3_PCIE_ASPM_SUPPORT_L1 (0x2)
+#define MPI3_PCIE_ASPM_SUPPORT_L0S_L1 (0x3)
+struct mpi3_pcie_io_unit0_phy_data {
+ u8 link;
+ u8 link_flags;
+ u8 phy_flags;
+ u8 negotiated_link_rate;
+ __le16 attached_dev_handle;
+ __le16 controller_dev_handle;
+ __le32 enumeration_status;
+ u8 io_unit_port;
+ u8 reserved0d[3];
+};
+
+#define MPI3_PCIEIOUNIT0_LINKFLAGS_CONFIG_SOURCE_MASK (0x10)
+#define MPI3_PCIEIOUNIT0_LINKFLAGS_CONFIG_SOURCE_IOUNIT1 (0x00)
+#define MPI3_PCIEIOUNIT0_LINKFLAGS_CONFIG_SOURCE_BKPLANE (0x10)
+#define MPI3_PCIEIOUNIT0_LINKFLAGS_ENUM_IN_PROGRESS (0x08)
+#define MPI3_PCIEIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08)
+#define MPI3_PCIEIOUNIT0_PHYFLAGS_HOST_PHY (0x01)
+#define MPI3_PCIEIOUNIT0_ES_MAX_SWITCH_DEPTH_EXCEEDED (0x80000000)
+#define MPI3_PCIEIOUNIT0_ES_MAX_SWITCHES_EXCEEDED (0x40000000)
+#define MPI3_PCIEIOUNIT0_ES_MAX_ENDPOINTS_EXCEEDED (0x20000000)
+#define MPI3_PCIEIOUNIT0_ES_INSUFFICIENT_RESOURCES (0x10000000)
+#ifndef MPI3_PCIE_IO_UNIT0_PHY_MAX
+#define MPI3_PCIE_IO_UNIT0_PHY_MAX (1)
+#endif
+struct mpi3_pcie_io_unit_page0 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_phys;
+ u8 init_status;
+ u8 aspm;
+ u8 reserved0f;
+ struct mpi3_pcie_io_unit0_phy_data phy_data[MPI3_PCIE_IO_UNIT0_PHY_MAX];
+};
+
+#define MPI3_PCIEIOUNIT0_PAGEVERSION (0x00)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_NO_ERRORS (0x00)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_NEEDS_INITIALIZATION (0x01)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_NO_TARGETS_ALLOCATED (0x02)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_RESOURCE_ALLOC_FAILED (0x03)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_BAD_NUM_PHYS (0x04)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_UNSUPPORTED_CONFIG (0x05)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_HOST_PORT_MISMATCH (0x06)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_PHYS_NOT_CONSECUTIVE (0x07)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_BAD_CLOCKING_MODE (0x08)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_PROD_SPEC_START (0xf0)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_PROD_SPEC_END (0xff)
+#define MPI3_PCIEIOUNIT0_ASPM_SWITCH_STATES_MASK (0xc0)
+#define MPI3_PCIEIOUNIT0_ASPM_SWITCH_STATES_SHIFT (6)
+#define MPI3_PCIEIOUNIT0_ASPM_DIRECT_STATES_MASK (0x30)
+#define MPI3_PCIEIOUNIT0_ASPM_DIRECT_STATES_SHIFT (4)
+#define MPI3_PCIEIOUNIT0_ASPM_SWITCH_SUPPORT_MASK (0x0c)
+#define MPI3_PCIEIOUNIT0_ASPM_SWITCH_SUPPORT_SHIFT (2)
+#define MPI3_PCIEIOUNIT0_ASPM_DIRECT_SUPPORT_MASK (0x03)
+#define MPI3_PCIEIOUNIT0_ASPM_DIRECT_SUPPORT_SHIFT (0)
+struct mpi3_pcie_io_unit1_phy_data {
+ u8 link;
+ u8 link_flags;
+ u8 phy_flags;
+ u8 max_min_link_rate;
+ __le32 reserved04;
+ __le32 reserved08;
+};
+
+#define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_MASK (0x03)
+#define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_DIS_SEPARATE_REFCLK (0x00)
+#define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_EN_SRIS (0x01)
+#define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_EN_SRNS (0x02)
+#define MPI3_PCIEIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_MASK (0xf0)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_SHIFT (4)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_2_5 (0x20)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_5_0 (0x30)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_8_0 (0x40)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_16_0 (0x50)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_32_0 (0x60)
+#ifndef MPI3_PCIE_IO_UNIT1_PHY_MAX
+#define MPI3_PCIE_IO_UNIT1_PHY_MAX (1)
+#endif
+struct mpi3_pcie_io_unit_page1 {
+ struct mpi3_config_page_header header;
+ __le32 control_flags;
+ __le32 reserved0c;
+ u8 num_phys;
+ u8 reserved11;
+ u8 aspm;
+ u8 reserved13;
+ struct mpi3_pcie_io_unit1_phy_data phy_data[MPI3_PCIE_IO_UNIT1_PHY_MAX];
+};
+
+#define MPI3_PCIEIOUNIT1_PAGEVERSION (0x00)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_PERST_OVERRIDE_MASK (0xe0000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_PERST_OVERRIDE_NONE (0x00000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_PERST_OVERRIDE_DEASSERT (0x20000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_PERST_OVERRIDE_ASSERT (0x40000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_PERST_OVERRIDE_BACKPLANE_ERROR (0x60000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_REFCLK_OVERRIDE_MASK (0x1c000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_REFCLK_OVERRIDE_NONE (0x00000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_REFCLK_OVERRIDE_DEASSERT (0x04000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_REFCLK_OVERRIDE_ASSERT (0x08000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_REFCLK_OVERRIDE_BACKPLANE_ERROR (0x0c000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_OVERRIDE_DISABLE (0x00000080)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_DISABLE (0x00000040)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_MASK (0x00000030)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SHIFT (4)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SRIS_SRNS_DISABLED (0x00000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SRIS_ENABLED (0x00000010)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SRNS_ENABLED (0x00000020)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MASK (0x0000000f)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_USE_BACKPLANE (0x00000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_2_5 (0x00000002)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_5_0 (0x00000003)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_8_0 (0x00000004)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_16_0 (0x00000005)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_32_0 (0x00000006)
+#define MPI3_PCIEIOUNIT1_ASPM_SWITCH_MASK (0x0c)
+#define MPI3_PCIEIOUNIT1_ASPM_SWITCH_SHIFT (2)
+#define MPI3_PCIEIOUNIT1_ASPM_DIRECT_MASK (0x03)
+#define MPI3_PCIEIOUNIT1_ASPM_DIRECT_SHIFT (0)
+struct mpi3_pcie_io_unit_page2 {
+ struct mpi3_config_page_header header;
+ __le16 nvme_max_q_dx1;
+ __le16 nvme_max_q_dx2;
+ u8 nvme_abort_to;
+ u8 reserved0d;
+ __le16 nvme_max_q_dx4;
+};
+
+#define MPI3_PCIEIOUNIT2_PAGEVERSION (0x00)
+#define MPI3_PCIEIOUNIT3_ERROR_RECEIVER_ERROR (0)
+#define MPI3_PCIEIOUNIT3_ERROR_RECOVERY (1)
+#define MPI3_PCIEIOUNIT3_ERROR_CORRECTABLE_ERROR_MSG (2)
+#define MPI3_PCIEIOUNIT3_ERROR_BAD_DLLP (3)
+#define MPI3_PCIEIOUNIT3_ERROR_BAD_TLP (4)
+#define MPI3_PCIEIOUNIT3_NUM_ERROR_INDEX (5)
+struct mpi3_pcie_io_unit3_error {
+ __le16 threshold_count;
+ __le16 reserved02;
+};
+
+struct mpi3_pcie_io_unit_page3 {
+ struct mpi3_config_page_header header;
+ u8 threshold_window;
+ u8 threshold_action;
+ u8 escalation_count;
+ u8 escalation_action;
+ u8 num_errors;
+ u8 reserved0d[3];
+ struct mpi3_pcie_io_unit3_error error[MPI3_PCIEIOUNIT3_NUM_ERROR_INDEX];
+};
+
+#define MPI3_PCIEIOUNIT3_PAGEVERSION (0x00)
+#define MPI3_PCIEIOUNIT3_ACTION_NO_ACTION (0x00)
+#define MPI3_PCIEIOUNIT3_ACTION_HOT_RESET (0x01)
+#define MPI3_PCIEIOUNIT3_ACTION_REDUCE_LINK_RATE_ONLY (0x02)
+#define MPI3_PCIEIOUNIT3_ACTION_REDUCE_LINK_RATE_NO_ACCESS (0x03)
+struct mpi3_pcie_switch_page0 {
+ struct mpi3_config_page_header header;
+ u8 io_unit_port;
+ u8 switch_status;
+ u8 reserved0a[2];
+ __le16 dev_handle;
+ __le16 parent_dev_handle;
+ u8 num_ports;
+ u8 pcie_level;
+ __le16 reserved12;
+ __le32 reserved14;
+ __le32 reserved18;
+ __le32 reserved1c;
+};
+
+#define MPI3_PCIESWITCH0_PAGEVERSION (0x00)
+#define MPI3_PCIESWITCH0_SS_NOT_RESPONDING (0x02)
+#define MPI3_PCIESWITCH0_SS_RESPONDING (0x03)
+#define MPI3_PCIESWITCH0_SS_DELAY_NOT_RESPONDING (0x04)
+struct mpi3_pcie_switch_page1 {
+ struct mpi3_config_page_header header;
+ u8 io_unit_port;
+ u8 flags;
+ __le16 reserved0a;
+ u8 num_ports;
+ u8 port_num;
+ __le16 attached_dev_handle;
+ __le16 switch_dev_handle;
+ u8 negotiated_port_width;
+ u8 negotiated_link_rate;
+ __le16 slot;
+ __le16 slot_index;
+ __le32 reserved18;
+};
+
+#define MPI3_PCIESWITCH1_PAGEVERSION (0x00)
+#define MPI3_PCIESWITCH1_FLAGS_ASPMSTATE_MASK (0x0c)
+#define MPI3_PCIESWITCH1_FLAGS_ASPMSTATE_SHIFT (2)
+#define MPI3_PCIESWITCH1_FLAGS_ASPMSUPPORT_MASK (0x03)
+#define MPI3_PCIESWITCH1_FLAGS_ASPMSUPPORT_SHIFT (0)
+#ifndef MPI3_PCIESWITCH2_MAX_NUM_PORTS
+#define MPI3_PCIESWITCH2_MAX_NUM_PORTS (1)
+#endif
+struct mpi3_pcieswitch2_port_element {
+ __le16 link_change_count;
+ __le16 rate_change_count;
+ __le32 reserved04;
+};
+
+struct mpi3_pcie_switch_page2 {
+ struct mpi3_config_page_header header;
+ u8 num_ports;
+ u8 reserved09;
+ __le16 dev_handle;
+ __le32 reserved0c;
+ struct mpi3_pcieswitch2_port_element port[MPI3_PCIESWITCH2_MAX_NUM_PORTS];
+};
+
+#define MPI3_PCIESWITCH2_PAGEVERSION (0x00)
+struct mpi3_pcie_link_page0 {
+ struct mpi3_config_page_header header;
+ u8 link;
+ u8 reserved09[3];
+ __le32 reserved0c;
+ __le32 receiver_error_count;
+ __le32 recovery_count;
+ __le32 corr_error_msg_count;
+ __le32 non_fatal_error_msg_count;
+ __le32 fatal_error_msg_count;
+ __le32 non_fatal_error_count;
+ __le32 fatal_error_count;
+ __le32 bad_dllp_count;
+ __le32 bad_tlp_count;
+};
+
+#define MPI3_PCIELINK0_PAGEVERSION (0x00)
+struct mpi3_enclosure_page0 {
+ struct mpi3_config_page_header header;
+ __le64 enclosure_logical_id;
+ __le16 flags;
+ __le16 enclosure_handle;
+ __le16 num_slots;
+ __le16 reserved16;
+ u8 io_unit_port;
+ u8 enclosure_level;
+ __le16 sep_dev_handle;
+ u8 chassis_slot;
+ u8 reserved1d[3];
+};
+
+#define MPI3_ENCLOSURE0_PAGEVERSION (0x00)
+#define MPI3_ENCLS0_FLAGS_ENCL_TYPE_MASK (0xc000)
+#define MPI3_ENCLS0_FLAGS_ENCL_TYPE_VIRTUAL (0x0000)
+#define MPI3_ENCLS0_FLAGS_ENCL_TYPE_SAS (0x4000)
+#define MPI3_ENCLS0_FLAGS_ENCL_TYPE_PCIE (0x8000)
+#define MPI3_ENCLS0_FLAGS_CHASSIS_SLOT_VALID (0x0020)
+#define MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK (0x0010)
+#define MPI3_ENCLS0_FLAGS_ENCL_DEV_NOT_FOUND (0x0000)
+#define MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT (0x0010)
+#define MPI3_ENCLS0_FLAGS_MNG_MASK (0x000f)
+#define MPI3_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000)
+#define MPI3_ENCLS0_FLAGS_MNG_IOC_SES (0x0001)
+#define MPI3_ENCLS0_FLAGS_MNG_SES_ENCLOSURE (0x0002)
+#define MPI3_DEVICE_DEVFORM_SAS_SATA (0x00)
+#define MPI3_DEVICE_DEVFORM_PCIE (0x01)
+#define MPI3_DEVICE_DEVFORM_VD (0x02)
+struct mpi3_device0_sas_sata_format {
+ __le64 sas_address;
+ __le16 flags;
+ __le16 device_info;
+ u8 phy_num;
+ u8 attached_phy_identifier;
+ u8 max_port_connections;
+ u8 zone_group;
+};
+
+#define MPI3_DEVICE0_SASSATA_FLAGS_WRITE_SAME_UNMAP_NCQ (0x0400)
+#define MPI3_DEVICE0_SASSATA_FLAGS_SLUMBER_CAP (0x0200)
+#define MPI3_DEVICE0_SASSATA_FLAGS_PARTIAL_CAP (0x0100)
+#define MPI3_DEVICE0_SASSATA_FLAGS_ASYNC_NOTIFY (0x0080)
+#define MPI3_DEVICE0_SASSATA_FLAGS_SW_PRESERVE (0x0040)
+#define MPI3_DEVICE0_SASSATA_FLAGS_UNSUPP_DEV (0x0020)
+#define MPI3_DEVICE0_SASSATA_FLAGS_48BIT_LBA (0x0010)
+#define MPI3_DEVICE0_SASSATA_FLAGS_SMART_SUPP (0x0008)
+#define MPI3_DEVICE0_SASSATA_FLAGS_NCQ_SUPP (0x0004)
+#define MPI3_DEVICE0_SASSATA_FLAGS_FUA_SUPP (0x0002)
+#define MPI3_DEVICE0_SASSATA_FLAGS_PERSIST_CAP (0x0001)
+struct mpi3_device0_pcie_format {
+ u8 supported_link_rates;
+ u8 max_port_width;
+ u8 negotiated_port_width;
+ u8 negotiated_link_rate;
+ u8 port_num;
+ u8 controller_reset_to;
+ __le16 device_info;
+ __le32 maximum_data_transfer_size;
+ __le32 capabilities;
+ __le16 noiob;
+ u8 nvme_abort_to;
+ u8 page_size;
+ __le16 shutdown_latency;
+ u8 recovery_info;
+ u8 reserved17;
+};
+
+#define MPI3_DEVICE0_PCIE_LINK_RATE_32_0_SUPP (0x10)
+#define MPI3_DEVICE0_PCIE_LINK_RATE_16_0_SUPP (0x08)
+#define MPI3_DEVICE0_PCIE_LINK_RATE_8_0_SUPP (0x04)
+#define MPI3_DEVICE0_PCIE_LINK_RATE_5_0_SUPP (0x02)
+#define MPI3_DEVICE0_PCIE_LINK_RATE_2_5_SUPP (0x01)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK (0x0007)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NO_DEVICE (0x0000)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE (0x0001)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SWITCH_DEVICE (0x0002)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE (0x0003)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_ASPM_MASK (0x0030)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_ASPM_SHIFT (4)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_MASK (0x00c0)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_SHIFT (6)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_0 (0x0000)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_1 (0x0040)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_2 (0x0080)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_3 (0x00c0)
+#define MPI3_DEVICE0_PCIE_CAP_SGL_EXTRA_LENGTH_SUPPORTED (0x00000020)
+#define MPI3_DEVICE0_PCIE_CAP_METADATA_SEPARATED (0x00000010)
+#define MPI3_DEVICE0_PCIE_CAP_SGL_DWORD_ALIGN_REQUIRED (0x00000008)
+#define MPI3_DEVICE0_PCIE_CAP_SGL_FORMAT_SGL (0x00000004)
+#define MPI3_DEVICE0_PCIE_CAP_SGL_FORMAT_PRP (0x00000000)
+#define MPI3_DEVICE0_PCIE_CAP_BIT_BUCKET_SGL_SUPP (0x00000002)
+#define MPI3_DEVICE0_PCIE_CAP_SGL_SUPP (0x00000001)
+#define MPI3_DEVICE0_PCIE_CAP_ASPM_MASK (0x000000c0)
+#define MPI3_DEVICE0_PCIE_CAP_ASPM_SHIFT (6)
+#define MPI3_DEVICE0_PCIE_RECOVER_METHOD_MASK (0xe0)
+#define MPI3_DEVICE0_PCIE_RECOVER_METHOD_NS_MGMT (0x00)
+#define MPI3_DEVICE0_PCIE_RECOVER_METHOD_FORMAT (0x20)
+#define MPI3_DEVICE0_PCIE_RECOVER_REASON_MASK (0x1f)
+#define MPI3_DEVICE0_PCIE_RECOVER_REASON_NO_NS (0x00)
+#define MPI3_DEVICE0_PCIE_RECOVER_REASON_NO_NSID_1 (0x01)
+#define MPI3_DEVICE0_PCIE_RECOVER_REASON_TOO_MANY_NS (0x02)
+#define MPI3_DEVICE0_PCIE_RECOVER_REASON_PROTECTION (0x03)
+#define MPI3_DEVICE0_PCIE_RECOVER_REASON_METADATA_SZ (0x04)
+#define MPI3_DEVICE0_PCIE_RECOVER_REASON_LBA_DATA_SZ (0x05)
+struct mpi3_device0_vd_format {
+ u8 vd_state;
+ u8 raid_level;
+ __le16 device_info;
+ __le16 flags;
+ __le16 io_throttle_group;
+ __le16 io_throttle_group_low;
+ __le16 io_throttle_group_high;
+ __le32 reserved0c;
+};
+#define MPI3_DEVICE0_VD_STATE_OFFLINE (0x00)
+#define MPI3_DEVICE0_VD_STATE_PARTIALLY_DEGRADED (0x01)
+#define MPI3_DEVICE0_VD_STATE_DEGRADED (0x02)
+#define MPI3_DEVICE0_VD_STATE_OPTIMAL (0x03)
+#define MPI3_DEVICE0_VD_RAIDLEVEL_RAID_0 (0)
+#define MPI3_DEVICE0_VD_RAIDLEVEL_RAID_1 (1)
+#define MPI3_DEVICE0_VD_RAIDLEVEL_RAID_5 (5)
+#define MPI3_DEVICE0_VD_RAIDLEVEL_RAID_6 (6)
+#define MPI3_DEVICE0_VD_RAIDLEVEL_RAID_10 (10)
+#define MPI3_DEVICE0_VD_RAIDLEVEL_RAID_50 (50)
+#define MPI3_DEVICE0_VD_RAIDLEVEL_RAID_60 (60)
+#define MPI3_DEVICE0_VD_DEVICE_INFO_HDD (0x0010)
+#define MPI3_DEVICE0_VD_DEVICE_INFO_SSD (0x0008)
+#define MPI3_DEVICE0_VD_DEVICE_INFO_NVME (0x0004)
+#define MPI3_DEVICE0_VD_DEVICE_INFO_SATA (0x0002)
+#define MPI3_DEVICE0_VD_DEVICE_INFO_SAS (0x0001)
+#define MPI3_DEVICE0_VD_FLAGS_IO_THROTTLE_GROUP_QD_MASK (0xf000)
+#define MPI3_DEVICE0_VD_FLAGS_IO_THROTTLE_GROUP_QD_SHIFT (12)
+union mpi3_device0_dev_spec_format {
+ struct mpi3_device0_sas_sata_format sas_sata_format;
+ struct mpi3_device0_pcie_format pcie_format;
+ struct mpi3_device0_vd_format vd_format;
+};
+
+struct mpi3_device_page0 {
+ struct mpi3_config_page_header header;
+ __le16 dev_handle;
+ __le16 parent_dev_handle;
+ __le16 slot;
+ __le16 enclosure_handle;
+ __le64 wwid;
+ __le16 persistent_id;
+ u8 io_unit_port;
+ u8 access_status;
+ __le16 flags;
+ __le16 reserved1e;
+ __le16 slot_index;
+ __le16 queue_depth;
+ u8 reserved24[3];
+ u8 device_form;
+ union mpi3_device0_dev_spec_format device_specific;
+};
+
+#define MPI3_DEVICE0_PAGEVERSION (0x00)
+#define MPI3_DEVICE0_PARENT_INVALID (0xffff)
+#define MPI3_DEVICE0_ENCLOSURE_HANDLE_NO_ENCLOSURE (0x0000)
+#define MPI3_DEVICE0_WWID_INVALID (0xffffffffffffffff)
+#define MPI3_DEVICE0_PERSISTENTID_INVALID (0xffff)
+#define MPI3_DEVICE0_IOUNITPORT_INVALID (0xff)
+#define MPI3_DEVICE0_ASTATUS_NO_ERRORS (0x00)
+#define MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION (0x01)
+#define MPI3_DEVICE0_ASTATUS_CAP_UNSUPPORTED (0x02)
+#define MPI3_DEVICE0_ASTATUS_DEVICE_BLOCKED (0x03)
+#define MPI3_DEVICE0_ASTATUS_UNAUTHORIZED (0x04)
+#define MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY (0x05)
+#define MPI3_DEVICE0_ASTATUS_PREPARE (0x06)
+#define MPI3_DEVICE0_ASTATUS_SAFE_MODE (0x07)
+#define MPI3_DEVICE0_ASTATUS_GENERIC_MAX (0x0f)
+#define MPI3_DEVICE0_ASTATUS_SAS_UNKNOWN (0x10)
+#define MPI3_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE (0x11)
+#define MPI3_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE (0x12)
+#define MPI3_DEVICE0_ASTATUS_SAS_MAX (0x1f)
+#define MPI3_DEVICE0_ASTATUS_SIF_UNKNOWN (0x20)
+#define MPI3_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT (0x21)
+#define MPI3_DEVICE0_ASTATUS_SIF_DIAG (0x22)
+#define MPI3_DEVICE0_ASTATUS_SIF_IDENTIFICATION (0x23)
+#define MPI3_DEVICE0_ASTATUS_SIF_CHECK_POWER (0x24)
+#define MPI3_DEVICE0_ASTATUS_SIF_PIO_SN (0x25)
+#define MPI3_DEVICE0_ASTATUS_SIF_MDMA_SN (0x26)
+#define MPI3_DEVICE0_ASTATUS_SIF_UDMA_SN (0x27)
+#define MPI3_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION (0x28)
+#define MPI3_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE (0x29)
+#define MPI3_DEVICE0_ASTATUS_SIF_MAX (0x2f)
+#define MPI3_DEVICE0_ASTATUS_PCIE_UNKNOWN (0x30)
+#define MPI3_DEVICE0_ASTATUS_PCIE_MEM_SPACE_ACCESS (0x31)
+#define MPI3_DEVICE0_ASTATUS_PCIE_UNSUPPORTED (0x32)
+#define MPI3_DEVICE0_ASTATUS_PCIE_MSIX_REQUIRED (0x33)
+#define MPI3_DEVICE0_ASTATUS_PCIE_ECRC_REQUIRED (0x34)
+#define MPI3_DEVICE0_ASTATUS_PCIE_MAX (0x3f)
+#define MPI3_DEVICE0_ASTATUS_NVME_UNKNOWN (0x40)
+#define MPI3_DEVICE0_ASTATUS_NVME_READY_TIMEOUT (0x41)
+#define MPI3_DEVICE0_ASTATUS_NVME_DEVCFG_UNSUPPORTED (0x42)
+#define MPI3_DEVICE0_ASTATUS_NVME_IDENTIFY_FAILED (0x43)
+#define MPI3_DEVICE0_ASTATUS_NVME_QCONFIG_FAILED (0x44)
+#define MPI3_DEVICE0_ASTATUS_NVME_QCREATION_FAILED (0x45)
+#define MPI3_DEVICE0_ASTATUS_NVME_EVENTCFG_FAILED (0x46)
+#define MPI3_DEVICE0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED (0x47)
+#define MPI3_DEVICE0_ASTATUS_NVME_IDLE_TIMEOUT (0x48)
+#define MPI3_DEVICE0_ASTATUS_NVME_CTRL_FAILURE_STATUS (0x49)
+#define MPI3_DEVICE0_ASTATUS_NVME_INSUFFICIENT_POWER (0x4a)
+#define MPI3_DEVICE0_ASTATUS_NVME_DOORBELL_STRIDE (0x4b)
+#define MPI3_DEVICE0_ASTATUS_NVME_MEM_PAGE_MIN_SIZE (0x4c)
+#define MPI3_DEVICE0_ASTATUS_NVME_MEMORY_ALLOCATION (0x4d)
+#define MPI3_DEVICE0_ASTATUS_NVME_COMPLETION_TIME (0x4e)
+#define MPI3_DEVICE0_ASTATUS_NVME_BAR (0x4f)
+#define MPI3_DEVICE0_ASTATUS_NVME_NS_DESCRIPTOR (0x50)
+#define MPI3_DEVICE0_ASTATUS_NVME_INCOMPATIBLE_SETTINGS (0x51)
+#define MPI3_DEVICE0_ASTATUS_NVME_TOO_MANY_ERRORS (0x52)
+#define MPI3_DEVICE0_ASTATUS_NVME_MAX (0x5f)
+#define MPI3_DEVICE0_ASTATUS_VD_UNKNOWN (0x80)
+#define MPI3_DEVICE0_ASTATUS_VD_MAX (0x8f)
+#define MPI3_DEVICE0_FLAGS_CONTROLLER_DEV_HANDLE (0x0080)
+#define MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED (0x0010)
+#define MPI3_DEVICE0_FLAGS_HIDDEN (0x0008)
+#define MPI3_DEVICE0_FLAGS_ATT_METHOD_VIRTUAL (0x0004)
+#define MPI3_DEVICE0_FLAGS_ATT_METHOD_DIR_ATTACHED (0x0002)
+#define MPI3_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001)
+#define MPI3_DEVICE0_QUEUE_DEPTH_NOT_APPLICABLE (0x0000)
+struct mpi3_device1_sas_sata_format {
+ __le32 reserved00;
+};
+struct mpi3_device1_pcie_format {
+ __le16 vendor_id;
+ __le16 device_id;
+ __le16 subsystem_vendor_id;
+ __le16 subsystem_id;
+ __le32 reserved08;
+ u8 revision_id;
+ u8 reserved0d;
+ __le16 pci_parameters;
+};
+
+#define MPI3_DEVICE1_PCIE_PARAMS_DATA_SIZE_128B (0x0)
+#define MPI3_DEVICE1_PCIE_PARAMS_DATA_SIZE_256B (0x1)
+#define MPI3_DEVICE1_PCIE_PARAMS_DATA_SIZE_512B (0x2)
+#define MPI3_DEVICE1_PCIE_PARAMS_DATA_SIZE_1024B (0x3)
+#define MPI3_DEVICE1_PCIE_PARAMS_DATA_SIZE_2048B (0x4)
+#define MPI3_DEVICE1_PCIE_PARAMS_DATA_SIZE_4096B (0x5)
+#define MPI3_DEVICE1_PCIE_PARAMS_MAX_READ_REQ_MASK (0x01c0)
+#define MPI3_DEVICE1_PCIE_PARAMS_MAX_READ_REQ_SHIFT (6)
+#define MPI3_DEVICE1_PCIE_PARAMS_CURR_MAX_PAYLOAD_MASK (0x0038)
+#define MPI3_DEVICE1_PCIE_PARAMS_CURR_MAX_PAYLOAD_SHIFT (3)
+#define MPI3_DEVICE1_PCIE_PARAMS_SUPP_MAX_PAYLOAD_MASK (0x0007)
+#define MPI3_DEVICE1_PCIE_PARAMS_SUPP_MAX_PAYLOAD_SHIFT (0)
+struct mpi3_device1_vd_format {
+ __le32 reserved00;
+};
+
+union mpi3_device1_dev_spec_format {
+ struct mpi3_device1_sas_sata_format sas_sata_format;
+ struct mpi3_device1_pcie_format pcie_format;
+ struct mpi3_device1_vd_format vd_format;
+};
+
+struct mpi3_device_page1 {
+ struct mpi3_config_page_header header;
+ __le16 dev_handle;
+ __le16 reserved0a;
+ __le16 link_change_count;
+ __le16 rate_change_count;
+ __le16 tm_count;
+ __le16 reserved12;
+ __le32 reserved14[10];
+ u8 reserved3c[3];
+ u8 device_form;
+ union mpi3_device1_dev_spec_format device_specific;
+};
+
+#define MPI3_DEVICE1_PAGEVERSION (0x00)
+#define MPI3_DEVICE1_COUNTER_MAX (0xfffe)
+#define MPI3_DEVICE1_COUNTER_INVALID (0xffff)
+#endif
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_image.h b/drivers/scsi/mpi3mr/mpi/mpi30_image.h
new file mode 100644
index 000000000..64c588159
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_image.h
@@ -0,0 +1,275 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2018-2022 Broadcom Inc. All rights reserved.
+ */
+#ifndef MPI30_IMAGE_H
+#define MPI30_IMAGE_H 1
+struct mpi3_comp_image_version {
+ __le16 build_num;
+ __le16 customer_id;
+ u8 phase_minor;
+ u8 phase_major;
+ u8 gen_minor;
+ u8 gen_major;
+};
+
+struct mpi3_hash_exclusion_format {
+ __le32 offset;
+ __le32 size;
+};
+
+#define MPI3_IMAGE_HASH_EXCUSION_NUM (4)
+struct mpi3_component_image_header {
+ __le32 signature0;
+ __le32 load_address;
+ __le32 data_size;
+ __le32 start_offset;
+ __le32 signature1;
+ __le32 flash_offset;
+ __le32 image_size;
+ __le32 version_string_offset;
+ __le32 build_date_string_offset;
+ __le32 build_time_string_offset;
+ __le32 environment_variable_offset;
+ __le32 application_specific;
+ __le32 signature2;
+ __le32 header_size;
+ __le32 crc;
+ __le32 flags;
+ __le32 secondary_flash_offset;
+ __le32 etp_offset;
+ __le32 etp_size;
+ union mpi3_version_union rmc_interface_version;
+ union mpi3_version_union etp_interface_version;
+ struct mpi3_comp_image_version component_image_version;
+ struct mpi3_hash_exclusion_format hash_exclusion[MPI3_IMAGE_HASH_EXCUSION_NUM];
+ __le32 next_image_header_offset;
+ union mpi3_version_union security_version;
+ __le32 reserved84[31];
+};
+
+#define MPI3_IMAGE_HEADER_SIGNATURE0_MPI3 (0xeb00003e)
+#define MPI3_IMAGE_HEADER_LOAD_ADDRESS_INVALID (0x00000000)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_APPLICATION (0x20505041)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_FIRST_MUTABLE (0x20434d46)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_BSP (0x20505342)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_ROM_BIOS (0x534f4942)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_HII_X64 (0x4d494948)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_HII_ARM (0x41494948)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_CPLD (0x444c5043)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_SPD (0x20445053)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_GAS_GAUGE (0x20534147)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_PBLP (0x504c4250)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST (0x464e414d)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_OEM (0x204d454f)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_RMC (0x20434d52)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_SMM (0x204d4d53)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_PSW (0x20575350)
+#define MPI3_IMAGE_HEADER_SIGNATURE2_VALUE (0x50584546)
+#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_MASK (0x00000030)
+#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_CDI (0x00000000)
+#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_DI (0x00000010)
+#define MPI3_IMAGE_HEADER_FLAGS_SIGNED_NVDATA (0x00000008)
+#define MPI3_IMAGE_HEADER_FLAGS_REQUIRES_ACTIVATION (0x00000004)
+#define MPI3_IMAGE_HEADER_FLAGS_COMPRESSED (0x00000002)
+#define MPI3_IMAGE_HEADER_FLAGS_FLASH (0x00000001)
+#define MPI3_IMAGE_HEADER_SIGNATURE0_OFFSET (0x00)
+#define MPI3_IMAGE_HEADER_LOAD_ADDRESS_OFFSET (0x04)
+#define MPI3_IMAGE_HEADER_DATA_SIZE_OFFSET (0x08)
+#define MPI3_IMAGE_HEADER_START_OFFSET_OFFSET (0x0c)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_OFFSET (0x10)
+#define MPI3_IMAGE_HEADER_FLASH_OFFSET_OFFSET (0x14)
+#define MPI3_IMAGE_HEADER_FLASH_SIZE_OFFSET (0x18)
+#define MPI3_IMAGE_HEADER_VERSION_STRING_OFFSET_OFFSET (0x1c)
+#define MPI3_IMAGE_HEADER_BUILD_DATE_STRING_OFFSET_OFFSET (0x20)
+#define MPI3_IMAGE_HEADER_BUILD_TIME_OFFSET_OFFSET (0x24)
+#define MPI3_IMAGE_HEADER_ENVIROMENT_VAR_OFFSET_OFFSET (0x28)
+#define MPI3_IMAGE_HEADER_APPLICATION_SPECIFIC_OFFSET (0x2c)
+#define MPI3_IMAGE_HEADER_SIGNATURE2_OFFSET (0x30)
+#define MPI3_IMAGE_HEADER_HEADER_SIZE_OFFSET (0x34)
+#define MPI3_IMAGE_HEADER_CRC_OFFSET (0x38)
+#define MPI3_IMAGE_HEADER_FLAGS_OFFSET (0x3c)
+#define MPI3_IMAGE_HEADER_SECONDARY_FLASH_OFFSET_OFFSET (0x40)
+#define MPI3_IMAGE_HEADER_ETP_OFFSET_OFFSET (0x44)
+#define MPI3_IMAGE_HEADER_ETP_SIZE_OFFSET (0x48)
+#define MPI3_IMAGE_HEADER_RMC_INTERFACE_VER_OFFSET (0x4c)
+#define MPI3_IMAGE_HEADER_ETP_INTERFACE_VER_OFFSET (0x50)
+#define MPI3_IMAGE_HEADER_COMPONENT_IMAGE_VER_OFFSET (0x54)
+#define MPI3_IMAGE_HEADER_HASH_EXCLUSION_OFFSET (0x5c)
+#define MPI3_IMAGE_HEADER_NEXT_IMAGE_HEADER_OFFSET_OFFSET (0x7c)
+#define MPI3_IMAGE_HEADER_SIZE (0x100)
+#ifndef MPI3_CI_MANIFEST_MPI_MAX
+#define MPI3_CI_MANIFEST_MPI_MAX (1)
+#endif
+struct mpi3_ci_manifest_mpi_comp_image_ref {
+ __le32 signature1;
+ __le32 reserved04[3];
+ struct mpi3_comp_image_version component_image_version;
+ __le32 component_image_version_string_offset;
+ __le32 crc;
+};
+
+struct mpi3_ci_manifest_mpi {
+ u8 manifest_type;
+ u8 reserved01[3];
+ __le32 reserved04[3];
+ u8 num_image_references;
+ u8 release_level;
+ __le16 reserved12;
+ __le16 reserved14;
+ __le16 flags;
+ __le32 reserved18[2];
+ __le16 vendor_id;
+ __le16 device_id;
+ __le16 subsystem_vendor_id;
+ __le16 subsystem_id;
+ __le32 reserved28[2];
+ union mpi3_version_union package_security_version;
+ __le32 reserved34;
+ struct mpi3_comp_image_version package_version;
+ __le32 package_version_string_offset;
+ __le32 package_build_date_string_offset;
+ __le32 package_build_time_string_offset;
+ __le32 reserved4c;
+ __le32 diag_authorization_identifier[16];
+ struct mpi3_ci_manifest_mpi_comp_image_ref component_image_ref[MPI3_CI_MANIFEST_MPI_MAX];
+};
+
+#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_DEV (0x00)
+#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_PREALPHA (0x10)
+#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_ALPHA (0x20)
+#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_BETA (0x30)
+#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_RC (0x40)
+#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_GCA (0x50)
+#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_POINT (0x60)
+#define MPI3_CI_MANIFEST_MPI_FLAGS_DIAG_AUTHORIZATION (0x01)
+#define MPI3_CI_MANIFEST_MPI_SUBSYSTEMID_IGNORED (0xffff)
+#define MPI3_CI_MANIFEST_MPI_PKG_VER_STR_OFF_UNSPECIFIED (0x00000000)
+#define MPI3_CI_MANIFEST_MPI_PKG_BUILD_DATE_STR_OFF_UNSPECIFIED (0x00000000)
+#define MPI3_CI_MANIFEST_MPI_PKG_BUILD_TIME_STR_OFF_UNSPECIFIED (0x00000000)
+union mpi3_ci_manifest {
+ struct mpi3_ci_manifest_mpi mpi;
+ __le32 dword[1];
+};
+
+#define MPI3_CI_MANIFEST_TYPE_MPI (0x00)
+struct mpi3_extended_image_header {
+ u8 image_type;
+ u8 reserved01[3];
+ __le32 checksum;
+ __le32 image_size;
+ __le32 next_image_header_offset;
+ __le32 reserved10[4];
+ __le32 identify_string[8];
+};
+
+#define MPI3_EXT_IMAGE_IMAGETYPE_OFFSET (0x00)
+#define MPI3_EXT_IMAGE_IMAGESIZE_OFFSET (0x08)
+#define MPI3_EXT_IMAGE_NEXTIMAGE_OFFSET (0x0c)
+#define MPI3_EXT_IMAGE_HEADER_SIZE (0x40)
+#define MPI3_EXT_IMAGE_TYPE_UNSPECIFIED (0x00)
+#define MPI3_EXT_IMAGE_TYPE_NVDATA (0x03)
+#define MPI3_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07)
+#define MPI3_EXT_IMAGE_TYPE_ENCRYPTED_HASH (0x09)
+#define MPI3_EXT_IMAGE_TYPE_RDE (0x0a)
+#define MPI3_EXT_IMAGE_TYPE_AUXILIARY_PROCESSOR (0x0b)
+#define MPI3_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC (0x80)
+#define MPI3_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC (0xff)
+struct mpi3_supported_device {
+ __le16 device_id;
+ __le16 vendor_id;
+ __le16 device_id_mask;
+ __le16 reserved06;
+ u8 low_pci_rev;
+ u8 high_pci_rev;
+ __le16 reserved0a;
+ __le32 reserved0c;
+};
+
+#ifndef MPI3_SUPPORTED_DEVICE_MAX
+#define MPI3_SUPPORTED_DEVICE_MAX (1)
+#endif
+struct mpi3_supported_devices_data {
+ u8 image_version;
+ u8 reserved01;
+ u8 num_devices;
+ u8 reserved03;
+ __le32 reserved04;
+ struct mpi3_supported_device supported_device[MPI3_SUPPORTED_DEVICE_MAX];
+};
+
+#ifndef MPI3_ENCRYPTED_HASH_MAX
+#define MPI3_ENCRYPTED_HASH_MAX (1)
+#endif
+struct mpi3_encrypted_hash_entry {
+ u8 hash_image_type;
+ u8 hash_algorithm;
+ u8 encryption_algorithm;
+ u8 reserved03;
+ __le32 reserved04;
+ __le32 encrypted_hash[MPI3_ENCRYPTED_HASH_MAX];
+};
+
+#define MPI3_HASH_IMAGE_TYPE_KEY_WITH_SIGNATURE (0x03)
+#define MPI3_HASH_ALGORITHM_VERSION_MASK (0xe0)
+#define MPI3_HASH_ALGORITHM_VERSION_NONE (0x00)
+#define MPI3_HASH_ALGORITHM_VERSION_SHA1 (0x20)
+#define MPI3_HASH_ALGORITHM_VERSION_SHA2 (0x40)
+#define MPI3_HASH_ALGORITHM_VERSION_SHA3 (0x60)
+#define MPI3_HASH_ALGORITHM_SIZE_MASK (0x1f)
+#define MPI3_HASH_ALGORITHM_SIZE_UNUSED (0x00)
+#define MPI3_HASH_ALGORITHM_SIZE_SHA256 (0x01)
+#define MPI3_HASH_ALGORITHM_SIZE_SHA512 (0x02)
+#define MPI3_HASH_ALGORITHM_SIZE_SHA384 (0x03)
+#define MPI3_ENCRYPTION_ALGORITHM_UNUSED (0x00)
+#define MPI3_ENCRYPTION_ALGORITHM_RSA256 (0x01)
+#define MPI3_ENCRYPTION_ALGORITHM_RSA512 (0x02)
+#define MPI3_ENCRYPTION_ALGORITHM_RSA1024 (0x03)
+#define MPI3_ENCRYPTION_ALGORITHM_RSA2048 (0x04)
+#define MPI3_ENCRYPTION_ALGORITHM_RSA4096 (0x05)
+#define MPI3_ENCRYPTION_ALGORITHM_RSA3072 (0x06)
+#ifndef MPI3_PUBLIC_KEY_MAX
+#define MPI3_PUBLIC_KEY_MAX (1)
+#endif
+struct mpi3_encrypted_key_with_hash_entry {
+ u8 hash_image_type;
+ u8 hash_algorithm;
+ u8 encryption_algorithm;
+ u8 reserved03;
+ __le32 reserved04;
+ __le32 public_key[MPI3_PUBLIC_KEY_MAX];
+};
+
+#ifndef MPI3_ENCRYPTED_HASH_ENTRY_MAX
+#define MPI3_ENCRYPTED_HASH_ENTRY_MAX (1)
+#endif
+struct mpi3_encrypted_hash_data {
+ u8 image_version;
+ u8 num_hash;
+ __le16 reserved02;
+ __le32 reserved04;
+ struct mpi3_encrypted_hash_entry encrypted_hash_entry[MPI3_ENCRYPTED_HASH_ENTRY_MAX];
+};
+
+#ifndef MPI3_AUX_PROC_DATA_MAX
+#define MPI3_AUX_PROC_DATA_MAX (1)
+#endif
+struct mpi3_aux_processor_data {
+ u8 boot_method;
+ u8 num_load_addr;
+ u8 reserved02;
+ u8 type;
+ __le32 version;
+ __le32 load_address[8];
+ __le32 reserved28[22];
+ __le32 aux_processor_data[MPI3_AUX_PROC_DATA_MAX];
+};
+
+#define MPI3_AUX_PROC_DATA_OFFSET (0x80)
+#define MPI3_AUXPROCESSOR_BOOT_METHOD_MO_MSG (0x00)
+#define MPI3_AUXPROCESSOR_BOOT_METHOD_MO_DOORBELL (0x01)
+#define MPI3_AUXPROCESSOR_BOOT_METHOD_COMPONENT (0x02)
+#define MPI3_AUXPROCESSOR_TYPE_ARM_A15 (0x00)
+#define MPI3_AUXPROCESSOR_TYPE_ARM_M0 (0x01)
+#define MPI3_AUXPROCESSOR_TYPE_ARM_R4 (0x02)
+#endif
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_init.h b/drivers/scsi/mpi3mr/mpi/mpi30_init.h
new file mode 100644
index 000000000..3c03610ec
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_init.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2016-2022 Broadcom Inc. All rights reserved.
+ */
+#ifndef MPI30_INIT_H
+#define MPI30_INIT_H 1
+struct mpi3_scsi_io_cdb_eedp32 {
+ u8 cdb[20];
+ __be32 primary_reference_tag;
+ __le16 primary_application_tag;
+ __le16 primary_application_tag_mask;
+ __le32 transfer_length;
+};
+
+union mpi3_scsi_io_cdb_union {
+ u8 cdb32[32];
+ struct mpi3_scsi_io_cdb_eedp32 eedp32;
+ struct mpi3_sge_common sge;
+};
+
+struct mpi3_scsi_io_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 dev_handle;
+ __le32 flags;
+ __le32 skip_count;
+ __le32 data_length;
+ u8 lun[8];
+ union mpi3_scsi_io_cdb_union cdb;
+ union mpi3_sge_union sgl[4];
+};
+
+#define MPI3_SCSIIO_MSGFLAGS_METASGL_VALID (0x80)
+#define MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE (0x40)
+#define MPI3_SCSIIO_FLAGS_LARGE_CDB (0x60000000)
+#define MPI3_SCSIIO_FLAGS_CDB_16_OR_LESS (0x00000000)
+#define MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16 (0x20000000)
+#define MPI3_SCSIIO_FLAGS_CDB_IN_SEPARATE_BUFFER (0x40000000)
+#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_MASK (0x07000000)
+#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ (0x00000000)
+#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_HEADOFQ (0x01000000)
+#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_ORDEREDQ (0x02000000)
+#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_ACAQ (0x04000000)
+#define MPI3_SCSIIO_FLAGS_CMDPRI_MASK (0x00f00000)
+#define MPI3_SCSIIO_FLAGS_CMDPRI_SHIFT (20)
+#define MPI3_SCSIIO_FLAGS_DATADIRECTION_MASK (0x000c0000)
+#define MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER (0x00000000)
+#define MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE (0x00040000)
+#define MPI3_SCSIIO_FLAGS_DATADIRECTION_READ (0x00080000)
+#define MPI3_SCSIIO_FLAGS_DMAOPERATION_MASK (0x00030000)
+#define MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI (0x00010000)
+#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_MASK (0x000000f0)
+#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING (0x00000010)
+#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_PROD_SPECIFIC (0x00000080)
+#define MPI3_SCSIIO_METASGL_INDEX (3)
+struct mpi3_scsi_io_reply {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 ioc_use_only08;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+ u8 scsi_status;
+ u8 scsi_state;
+ __le16 dev_handle;
+ __le32 transfer_count;
+ __le32 sense_count;
+ __le32 response_data;
+ __le16 task_tag;
+ __le16 scsi_status_qualifier;
+ __le32 eedp_error_offset;
+ __le16 eedp_observed_app_tag;
+ __le16 eedp_observed_guard;
+ __le32 eedp_observed_ref_tag;
+ __le64 sense_data_buffer_address;
+};
+
+#define MPI3_SCSIIO_REPLY_MSGFLAGS_REFTAG_OBSERVED_VALID (0x01)
+#define MPI3_SCSIIO_REPLY_MSGFLAGS_APPTAG_OBSERVED_VALID (0x02)
+#define MPI3_SCSIIO_REPLY_MSGFLAGS_GUARD_OBSERVED_VALID (0x04)
+#define MPI3_SCSI_STATUS_GOOD (0x00)
+#define MPI3_SCSI_STATUS_CHECK_CONDITION (0x02)
+#define MPI3_SCSI_STATUS_CONDITION_MET (0x04)
+#define MPI3_SCSI_STATUS_BUSY (0x08)
+#define MPI3_SCSI_STATUS_INTERMEDIATE (0x10)
+#define MPI3_SCSI_STATUS_INTERMEDIATE_CONDMET (0x14)
+#define MPI3_SCSI_STATUS_RESERVATION_CONFLICT (0x18)
+#define MPI3_SCSI_STATUS_COMMAND_TERMINATED (0x22)
+#define MPI3_SCSI_STATUS_TASK_SET_FULL (0x28)
+#define MPI3_SCSI_STATUS_ACA_ACTIVE (0x30)
+#define MPI3_SCSI_STATUS_TASK_ABORTED (0x40)
+#define MPI3_SCSI_STATE_SENSE_MASK (0x03)
+#define MPI3_SCSI_STATE_SENSE_VALID (0x00)
+#define MPI3_SCSI_STATE_SENSE_FAILED (0x01)
+#define MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY (0x02)
+#define MPI3_SCSI_STATE_SENSE_NOT_AVAILABLE (0x03)
+#define MPI3_SCSI_STATE_NO_SCSI_STATUS (0x04)
+#define MPI3_SCSI_STATE_TERMINATED (0x08)
+#define MPI3_SCSI_STATE_RESPONSE_DATA_VALID (0x10)
+#define MPI3_SCSI_RSP_RESPONSECODE_MASK (0x000000ff)
+#define MPI3_SCSI_RSP_RESPONSECODE_SHIFT (0)
+#define MPI3_SCSI_RSP_ARI2_MASK (0x0000ff00)
+#define MPI3_SCSI_RSP_ARI2_SHIFT (8)
+#define MPI3_SCSI_RSP_ARI1_MASK (0x00ff0000)
+#define MPI3_SCSI_RSP_ARI1_SHIFT (16)
+#define MPI3_SCSI_RSP_ARI0_MASK (0xff000000)
+#define MPI3_SCSI_RSP_ARI0_SHIFT (24)
+#define MPI3_SCSI_TASKTAG_UNKNOWN (0xffff)
+#endif
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
new file mode 100644
index 000000000..1c6c6730d
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
@@ -0,0 +1,1063 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2016-2022 Broadcom Inc. All rights reserved.
+ */
+#ifndef MPI30_IOC_H
+#define MPI30_IOC_H 1
+struct mpi3_ioc_init_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 reserved0a;
+ union mpi3_version_union mpi_version;
+ __le64 time_stamp;
+ u8 reserved18;
+ u8 who_init;
+ __le16 reserved1a;
+ __le16 reply_free_queue_depth;
+ __le16 reserved1e;
+ __le64 reply_free_queue_address;
+ __le32 reserved28;
+ __le16 sense_buffer_free_queue_depth;
+ __le16 sense_buffer_length;
+ __le64 sense_buffer_free_queue_address;
+ __le64 driver_information_address;
+};
+
+#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_MASK (0x03)
+#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_NOT_USED (0x00)
+#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_SEPARATED (0x01)
+#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_INLINE (0x02)
+#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_BOTH (0x03)
+#define MPI3_WHOINIT_NOT_INITIALIZED (0x00)
+#define MPI3_WHOINIT_ROM_BIOS (0x02)
+#define MPI3_WHOINIT_HOST_DRIVER (0x03)
+#define MPI3_WHOINIT_MANUFACTURER (0x04)
+
+struct mpi3_ioc_facts_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 reserved0a;
+ __le32 reserved0c;
+ union mpi3_sge_union sgl;
+};
+
+struct mpi3_ioc_facts_data {
+ __le16 ioc_facts_data_length;
+ __le16 reserved02;
+ union mpi3_version_union mpi_version;
+ struct mpi3_comp_image_version fw_version;
+ __le32 ioc_capabilities;
+ u8 ioc_number;
+ u8 who_init;
+ __le16 max_msix_vectors;
+ __le16 max_outstanding_requests;
+ __le16 product_id;
+ __le16 ioc_request_frame_size;
+ __le16 reply_frame_size;
+ __le16 ioc_exceptions;
+ __le16 max_persistent_id;
+ u8 sge_modifier_mask;
+ u8 sge_modifier_value;
+ u8 sge_modifier_shift;
+ u8 protocol_flags;
+ __le16 max_sas_initiators;
+ __le16 max_data_length;
+ __le16 max_sas_expanders;
+ __le16 max_enclosures;
+ __le16 min_dev_handle;
+ __le16 max_dev_handle;
+ __le16 max_pcie_switches;
+ __le16 max_nvme;
+ __le16 reserved38;
+ __le16 max_vds;
+ __le16 max_host_pds;
+ __le16 max_adv_host_pds;
+ __le16 max_raid_pds;
+ __le16 max_posted_cmd_buffers;
+ __le32 flags;
+ __le16 max_operational_request_queues;
+ __le16 max_operational_reply_queues;
+ __le16 shutdown_timeout;
+ __le16 reserved4e;
+ __le32 diag_trace_size;
+ __le32 diag_fw_size;
+ __le32 diag_driver_size;
+ u8 max_host_pd_ns_count;
+ u8 max_adv_host_pd_ns_count;
+ u8 max_raidpd_ns_count;
+ u8 max_devices_per_throttle_group;
+ __le16 io_throttle_data_length;
+ __le16 max_io_throttle_group;
+ __le16 io_throttle_low;
+ __le16 io_throttle_high;
+};
+#define MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_MASK (0x80000000)
+#define MPI3_IOCFACTS_CAPABILITY_SUPERVISOR_IOC (0x00000000)
+#define MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_IOC (0x80000000)
+#define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_MASK (0x00000600)
+#define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_FIXED_THRESHOLD (0x00000000)
+#define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_OUTSTANDING_IO (0x00000200)
+#define MPI3_IOCFACTS_CAPABILITY_COMPLETE_RESET_CAPABLE (0x00000100)
+#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_ENABLED (0x00000080)
+#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_FW_ENABLED (0x00000040)
+#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_DRIVER_ENABLED (0x00000020)
+#define MPI3_IOCFACTS_CAPABILITY_ADVANCED_HOST_PD_ENABLED (0x00000010)
+#define MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE (0x00000008)
+#define MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED (0x00000002)
+#define MPI3_IOCFACTS_CAPABILITY_COALESCE_CTRL_SUPPORTED (0x00000001)
+#define MPI3_IOCFACTS_PID_TYPE_MASK (0xf000)
+#define MPI3_IOCFACTS_PID_TYPE_SHIFT (12)
+#define MPI3_IOCFACTS_PID_PRODUCT_MASK (0x0f00)
+#define MPI3_IOCFACTS_PID_PRODUCT_SHIFT (8)
+#define MPI3_IOCFACTS_PID_FAMILY_MASK (0x00ff)
+#define MPI3_IOCFACTS_PID_FAMILY_SHIFT (0)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_REKEY (0x2000)
+#define MPI3_IOCFACTS_EXCEPT_SAS_DISABLED (0x1000)
+#define MPI3_IOCFACTS_EXCEPT_SAFE_MODE (0x0800)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_MASK (0x0700)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_NONE (0x0000)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_LOCAL_VIA_MGMT (0x0100)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_EXT_VIA_MGMT (0x0200)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_DRIVE_EXT_VIA_MGMT (0x0300)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_LOCAL_VIA_OOB (0x0400)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_EXT_VIA_OOB (0x0500)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_DRIVE_EXT_VIA_OOB (0x0600)
+#define MPI3_IOCFACTS_EXCEPT_PCIE_DISABLED (0x0080)
+#define MPI3_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE (0x0040)
+#define MPI3_IOCFACTS_EXCEPT_MANUFACT_CHECKSUM_FAIL (0x0020)
+#define MPI3_IOCFACTS_EXCEPT_FW_CHECKSUM_FAIL (0x0010)
+#define MPI3_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL (0x0008)
+#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_MASK (0x0001)
+#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_PRIMARY (0x0000)
+#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_SECONDARY (0x0001)
+#define MPI3_IOCFACTS_PROTOCOL_SAS (0x0010)
+#define MPI3_IOCFACTS_PROTOCOL_SATA (0x0008)
+#define MPI3_IOCFACTS_PROTOCOL_NVME (0x0004)
+#define MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR (0x0002)
+#define MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET (0x0001)
+#define MPI3_IOCFACTS_MAX_DATA_LENGTH_NOT_REPORTED (0x0000)
+#define MPI3_IOCFACTS_FLAGS_SIGNED_NVDATA_REQUIRED (0x00010000)
+#define MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK (0x0000ff00)
+#define MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT (8)
+#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK (0x00000030)
+#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_NOT_STARTED (0x00000000)
+#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_IN_PROGRESS (0x00000010)
+#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_COMPLETE (0x00000020)
+#define MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK (0x0000000f)
+#define MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA (0x00000000)
+#define MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR (0x00000002)
+#define MPI3_IOCFACTS_IO_THROTTLE_DATA_LENGTH_NOT_REQUIRED (0x0000)
+#define MPI3_IOCFACTS_MAX_IO_THROTTLE_GROUP_NOT_REQUIRED (0x0000)
+struct mpi3_mgmt_passthrough_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 reserved0a;
+ __le32 reserved0c[5];
+ union mpi3_sge_union command_sgl;
+ union mpi3_sge_union response_sgl;
+};
+
+struct mpi3_create_request_queue_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 flags;
+ u8 burst;
+ __le16 size;
+ __le16 queue_id;
+ __le16 reply_queue_id;
+ __le16 reserved12;
+ __le32 reserved14;
+ __le64 base_address;
+};
+
+#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_MASK (0x80)
+#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED (0x80)
+#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_CONTIGUOUS (0x00)
+#define MPI3_CREATE_REQUEST_QUEUE_SIZE_MINIMUM (2)
+struct mpi3_delete_request_queue_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 queue_id;
+};
+
+struct mpi3_create_reply_queue_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 flags;
+ u8 reserved0b;
+ __le16 size;
+ __le16 queue_id;
+ __le16 msix_index;
+ __le16 reserved12;
+ __le32 reserved14;
+ __le64 base_address;
+};
+
+#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_MASK (0x80)
+#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_SEGMENTED (0x80)
+#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_CONTIGUOUS (0x00)
+#define MPI3_CREATE_REPLY_QUEUE_FLAGS_COALESCE_DISABLE (0x02)
+#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_MASK (0x01)
+#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_DISABLE (0x00)
+#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE (0x01)
+#define MPI3_CREATE_REPLY_QUEUE_SIZE_MINIMUM (2)
+struct mpi3_delete_reply_queue_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 queue_id;
+};
+
+struct mpi3_port_enable_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 reserved0a;
+};
+
+#define MPI3_EVENT_LOG_DATA (0x01)
+#define MPI3_EVENT_CHANGE (0x02)
+#define MPI3_EVENT_GPIO_INTERRUPT (0x04)
+#define MPI3_EVENT_CABLE_MGMT (0x06)
+#define MPI3_EVENT_DEVICE_ADDED (0x07)
+#define MPI3_EVENT_DEVICE_INFO_CHANGED (0x08)
+#define MPI3_EVENT_PREPARE_FOR_RESET (0x09)
+#define MPI3_EVENT_COMP_IMAGE_ACT_START (0x0a)
+#define MPI3_EVENT_ENCL_DEVICE_ADDED (0x0b)
+#define MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE (0x0c)
+#define MPI3_EVENT_DEVICE_STATUS_CHANGE (0x0d)
+#define MPI3_EVENT_ENERGY_PACK_CHANGE (0x0e)
+#define MPI3_EVENT_SAS_DISCOVERY (0x11)
+#define MPI3_EVENT_SAS_BROADCAST_PRIMITIVE (0x12)
+#define MPI3_EVENT_SAS_NOTIFY_PRIMITIVE (0x13)
+#define MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE (0x14)
+#define MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW (0x15)
+#define MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST (0x16)
+#define MPI3_EVENT_SAS_PHY_COUNTER (0x18)
+#define MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR (0x19)
+#define MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST (0x20)
+#define MPI3_EVENT_PCIE_ENUMERATION (0x22)
+#define MPI3_EVENT_PCIE_ERROR_THRESHOLD (0x23)
+#define MPI3_EVENT_HARD_RESET_RECEIVED (0x40)
+#define MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE (0x50)
+#define MPI3_EVENT_MIN_PRODUCT_SPECIFIC (0x60)
+#define MPI3_EVENT_MAX_PRODUCT_SPECIFIC (0x7f)
+#define MPI3_EVENT_NOTIFY_EVENTMASK_WORDS (4)
+struct mpi3_event_notification_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 reserved0a;
+ __le16 sas_broadcast_primitive_masks;
+ __le16 sas_notify_primitive_masks;
+ __le32 event_masks[MPI3_EVENT_NOTIFY_EVENTMASK_WORDS];
+};
+
+struct mpi3_event_notification_reply {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 ioc_use_only08;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+ u8 event_data_length;
+ u8 event;
+ __le16 ioc_change_count;
+ __le32 event_context;
+ __le32 event_data[1];
+};
+
+#define MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK (0x01)
+#define MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED (0x01)
+#define MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_NOT_REQUIRED (0x00)
+#define MPI3_EVENT_NOTIFY_MSGFLAGS_EVENT_ORIGINALITY_MASK (0x02)
+#define MPI3_EVENT_NOTIFY_MSGFLAGS_EVENT_ORIGINALITY_ORIGINAL (0x00)
+#define MPI3_EVENT_NOTIFY_MSGFLAGS_EVENT_ORIGINALITY_REPLAY (0x02)
+struct mpi3_event_data_gpio_interrupt {
+ u8 gpio_num;
+ u8 reserved01[3];
+};
+struct mpi3_event_data_cable_management {
+ __le32 active_cable_power_requirement;
+ u8 status;
+ u8 receptacle_id;
+ __le16 reserved06;
+};
+
+#define MPI3_EVENT_CABLE_MGMT_ACT_CABLE_PWR_INVALID (0xffffffff)
+#define MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER (0x00)
+#define MPI3_EVENT_CABLE_MGMT_STATUS_PRESENT (0x01)
+#define MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED (0x02)
+struct mpi3_event_ack_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 reserved0a;
+ u8 event;
+ u8 reserved0d[3];
+ __le32 event_context;
+};
+
+struct mpi3_event_data_prepare_for_reset {
+ u8 reason_code;
+ u8 reserved01;
+ __le16 reserved02;
+};
+
+#define MPI3_EVENT_PREPARE_RESET_RC_START (0x01)
+#define MPI3_EVENT_PREPARE_RESET_RC_ABORT (0x02)
+struct mpi3_event_data_comp_image_activation {
+ __le32 reserved00;
+};
+
+struct mpi3_event_data_device_status_change {
+ __le16 task_tag;
+ u8 reason_code;
+ u8 io_unit_port;
+ __le16 parent_dev_handle;
+ __le16 dev_handle;
+ __le64 wwid;
+ u8 lun[8];
+};
+
+#define MPI3_EVENT_DEV_STAT_RC_MOVED (0x01)
+#define MPI3_EVENT_DEV_STAT_RC_HIDDEN (0x02)
+#define MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN (0x03)
+#define MPI3_EVENT_DEV_STAT_RC_ASYNC_NOTIFICATION (0x04)
+#define MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT (0x20)
+#define MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP (0x21)
+#define MPI3_EVENT_DEV_STAT_RC_INT_TASK_ABORT_STRT (0x22)
+#define MPI3_EVENT_DEV_STAT_RC_INT_TASK_ABORT_CMP (0x23)
+#define MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT (0x24)
+#define MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP (0x25)
+#define MPI3_EVENT_DEV_STAT_RC_PCIE_HOT_RESET_FAILED (0x30)
+#define MPI3_EVENT_DEV_STAT_RC_EXPANDER_REDUCED_FUNC_STRT (0x40)
+#define MPI3_EVENT_DEV_STAT_RC_EXPANDER_REDUCED_FUNC_CMP (0x41)
+#define MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING (0x50)
+struct mpi3_event_data_energy_pack_change {
+ __le32 reserved00;
+ __le16 shutdown_timeout;
+ __le16 reserved06;
+};
+
+struct mpi3_event_data_sas_discovery {
+ u8 flags;
+ u8 reason_code;
+ u8 io_unit_port;
+ u8 reserved03;
+ __le32 discovery_status;
+};
+
+#define MPI3_EVENT_SAS_DISC_FLAGS_DEVICE_CHANGE (0x02)
+#define MPI3_EVENT_SAS_DISC_FLAGS_IN_PROGRESS (0x01)
+#define MPI3_EVENT_SAS_DISC_RC_STARTED (0x01)
+#define MPI3_EVENT_SAS_DISC_RC_COMPLETED (0x02)
+#define MPI3_SAS_DISC_STATUS_MAX_ENCLOSURES_EXCEED (0x80000000)
+#define MPI3_SAS_DISC_STATUS_MAX_EXPANDERS_EXCEED (0x40000000)
+#define MPI3_SAS_DISC_STATUS_MAX_DEVICES_EXCEED (0x20000000)
+#define MPI3_SAS_DISC_STATUS_MAX_TOPO_PHYS_EXCEED (0x10000000)
+#define MPI3_SAS_DISC_STATUS_INVALID_CEI (0x00010000)
+#define MPI3_SAS_DISC_STATUS_FECEI_MISMATCH (0x00008000)
+#define MPI3_SAS_DISC_STATUS_MULTIPLE_DEVICES_IN_SLOT (0x00004000)
+#define MPI3_SAS_DISC_STATUS_NECEI_MISMATCH (0x00002000)
+#define MPI3_SAS_DISC_STATUS_TOO_MANY_SLOTS (0x00001000)
+#define MPI3_SAS_DISC_STATUS_EXP_MULTI_SUBTRACTIVE (0x00000800)
+#define MPI3_SAS_DISC_STATUS_MULTI_PORT_DOMAIN (0x00000400)
+#define MPI3_SAS_DISC_STATUS_TABLE_TO_SUBTRACTIVE_LINK (0x00000200)
+#define MPI3_SAS_DISC_STATUS_UNSUPPORTED_DEVICE (0x00000100)
+#define MPI3_SAS_DISC_STATUS_TABLE_LINK (0x00000080)
+#define MPI3_SAS_DISC_STATUS_SUBTRACTIVE_LINK (0x00000040)
+#define MPI3_SAS_DISC_STATUS_SMP_CRC_ERROR (0x00000020)
+#define MPI3_SAS_DISC_STATUS_SMP_FUNCTION_FAILED (0x00000010)
+#define MPI3_SAS_DISC_STATUS_SMP_TIMEOUT (0x00000008)
+#define MPI3_SAS_DISC_STATUS_MULTIPLE_PORTS (0x00000004)
+#define MPI3_SAS_DISC_STATUS_INVALID_SAS_ADDRESS (0x00000002)
+#define MPI3_SAS_DISC_STATUS_LOOP_DETECTED (0x00000001)
+struct mpi3_event_data_sas_broadcast_primitive {
+ u8 phy_num;
+ u8 io_unit_port;
+ u8 port_width;
+ u8 primitive;
+};
+
+#define MPI3_EVENT_BROADCAST_PRIMITIVE_CHANGE (0x01)
+#define MPI3_EVENT_BROADCAST_PRIMITIVE_SES (0x02)
+#define MPI3_EVENT_BROADCAST_PRIMITIVE_EXPANDER (0x03)
+#define MPI3_EVENT_BROADCAST_PRIMITIVE_ASYNCHRONOUS_EVENT (0x04)
+#define MPI3_EVENT_BROADCAST_PRIMITIVE_RESERVED3 (0x05)
+#define MPI3_EVENT_BROADCAST_PRIMITIVE_RESERVED4 (0x06)
+#define MPI3_EVENT_BROADCAST_PRIMITIVE_CHANGE0_RESERVED (0x07)
+#define MPI3_EVENT_BROADCAST_PRIMITIVE_CHANGE1_RESERVED (0x08)
+struct mpi3_event_data_sas_notify_primitive {
+ u8 phy_num;
+ u8 io_unit_port;
+ u8 reserved02;
+ u8 primitive;
+};
+
+#define MPI3_EVENT_NOTIFY_PRIMITIVE_ENABLE_SPINUP (0x01)
+#define MPI3_EVENT_NOTIFY_PRIMITIVE_POWER_LOSS_EXPECTED (0x02)
+#define MPI3_EVENT_NOTIFY_PRIMITIVE_RESERVED1 (0x03)
+#define MPI3_EVENT_NOTIFY_PRIMITIVE_RESERVED2 (0x04)
+#ifndef MPI3_EVENT_SAS_TOPO_PHY_COUNT
+#define MPI3_EVENT_SAS_TOPO_PHY_COUNT (1)
+#endif
+struct mpi3_event_sas_topo_phy_entry {
+ __le16 attached_dev_handle;
+ u8 link_rate;
+ u8 status;
+};
+
+#define MPI3_EVENT_SAS_TOPO_LR_CURRENT_MASK (0xf0)
+#define MPI3_EVENT_SAS_TOPO_LR_CURRENT_SHIFT (4)
+#define MPI3_EVENT_SAS_TOPO_LR_PREV_MASK (0x0f)
+#define MPI3_EVENT_SAS_TOPO_LR_PREV_SHIFT (0)
+#define MPI3_EVENT_SAS_TOPO_LR_UNKNOWN_LINK_RATE (0x00)
+#define MPI3_EVENT_SAS_TOPO_LR_PHY_DISABLED (0x01)
+#define MPI3_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED (0x02)
+#define MPI3_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE (0x03)
+#define MPI3_EVENT_SAS_TOPO_LR_PORT_SELECTOR (0x04)
+#define MPI3_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS (0x05)
+#define MPI3_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY (0x06)
+#define MPI3_EVENT_SAS_TOPO_LR_RATE_6_0 (0x0a)
+#define MPI3_EVENT_SAS_TOPO_LR_RATE_12_0 (0x0b)
+#define MPI3_EVENT_SAS_TOPO_LR_RATE_22_5 (0x0c)
+#define MPI3_EVENT_SAS_TOPO_PHY_STATUS_MASK (0xc0)
+#define MPI3_EVENT_SAS_TOPO_PHY_STATUS_SHIFT (6)
+#define MPI3_EVENT_SAS_TOPO_PHY_STATUS_ACCESSIBLE (0x00)
+#define MPI3_EVENT_SAS_TOPO_PHY_STATUS_NO_EXIST (0x40)
+#define MPI3_EVENT_SAS_TOPO_PHY_STATUS_VACANT (0x80)
+#define MPI3_EVENT_SAS_TOPO_PHY_RC_MASK (0x0f)
+#define MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING (0x02)
+#define MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED (0x03)
+#define MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE (0x04)
+#define MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING (0x05)
+#define MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING (0x06)
+struct mpi3_event_data_sas_topology_change_list {
+ __le16 enclosure_handle;
+ __le16 expander_dev_handle;
+ u8 num_phys;
+ u8 reserved05[3];
+ u8 num_entries;
+ u8 start_phy_num;
+ u8 exp_status;
+ u8 io_unit_port;
+ struct mpi3_event_sas_topo_phy_entry phy_entry[MPI3_EVENT_SAS_TOPO_PHY_COUNT];
+};
+
+#define MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER (0x00)
+#define MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING (0x02)
+#define MPI3_EVENT_SAS_TOPO_ES_RESPONDING (0x03)
+#define MPI3_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING (0x04)
+struct mpi3_event_data_sas_phy_counter {
+ __le64 time_stamp;
+ __le32 reserved08;
+ u8 phy_event_code;
+ u8 phy_num;
+ __le16 reserved0e;
+ __le32 phy_event_info;
+ u8 counter_type;
+ u8 threshold_window;
+ u8 time_units;
+ u8 reserved17;
+ __le32 event_threshold;
+ __le16 threshold_flags;
+ __le16 reserved1e;
+};
+
+struct mpi3_event_data_sas_device_disc_err {
+ __le16 dev_handle;
+ u8 reason_code;
+ u8 io_unit_port;
+ __le32 reserved04;
+ __le64 sas_address;
+};
+
+#define MPI3_EVENT_SAS_DISC_ERR_RC_SMP_FAILED (0x01)
+#define MPI3_EVENT_SAS_DISC_ERR_RC_SMP_TIMEOUT (0x02)
+struct mpi3_event_data_pcie_enumeration {
+ u8 flags;
+ u8 reason_code;
+ u8 io_unit_port;
+ u8 reserved03;
+ __le32 enumeration_status;
+};
+
+#define MPI3_EVENT_PCIE_ENUM_FLAGS_DEVICE_CHANGE (0x02)
+#define MPI3_EVENT_PCIE_ENUM_FLAGS_IN_PROGRESS (0x01)
+#define MPI3_EVENT_PCIE_ENUM_RC_STARTED (0x01)
+#define MPI3_EVENT_PCIE_ENUM_RC_COMPLETED (0x02)
+#define MPI3_EVENT_PCIE_ENUM_ES_MAX_SWITCH_DEPTH_EXCEED (0x80000000)
+#define MPI3_EVENT_PCIE_ENUM_ES_MAX_SWITCHES_EXCEED (0x40000000)
+#define MPI3_EVENT_PCIE_ENUM_ES_MAX_DEVICES_EXCEED (0x20000000)
+#define MPI3_EVENT_PCIE_ENUM_ES_RESOURCES_EXHAUSTED (0x10000000)
+#ifndef MPI3_EVENT_PCIE_TOPO_PORT_COUNT
+#define MPI3_EVENT_PCIE_TOPO_PORT_COUNT (1)
+#endif
+struct mpi3_event_pcie_topo_port_entry {
+ __le16 attached_dev_handle;
+ u8 port_status;
+ u8 reserved03;
+ u8 current_port_info;
+ u8 reserved05;
+ u8 previous_port_info;
+ u8 reserved07;
+};
+
+#define MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING (0x02)
+#define MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED (0x03)
+#define MPI3_EVENT_PCIE_TOPO_PS_NO_CHANGE (0x04)
+#define MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING (0x05)
+#define MPI3_EVENT_PCIE_TOPO_PS_RESPONDING (0x06)
+#define MPI3_EVENT_PCIE_TOPO_PI_LANES_MASK (0xf0)
+#define MPI3_EVENT_PCIE_TOPO_PI_LANES_UNKNOWN (0x00)
+#define MPI3_EVENT_PCIE_TOPO_PI_LANES_1 (0x10)
+#define MPI3_EVENT_PCIE_TOPO_PI_LANES_2 (0x20)
+#define MPI3_EVENT_PCIE_TOPO_PI_LANES_4 (0x30)
+#define MPI3_EVENT_PCIE_TOPO_PI_LANES_8 (0x40)
+#define MPI3_EVENT_PCIE_TOPO_PI_LANES_16 (0x50)
+#define MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK (0x0f)
+#define MPI3_EVENT_PCIE_TOPO_PI_RATE_UNKNOWN (0x00)
+#define MPI3_EVENT_PCIE_TOPO_PI_RATE_DISABLED (0x01)
+#define MPI3_EVENT_PCIE_TOPO_PI_RATE_2_5 (0x02)
+#define MPI3_EVENT_PCIE_TOPO_PI_RATE_5_0 (0x03)
+#define MPI3_EVENT_PCIE_TOPO_PI_RATE_8_0 (0x04)
+#define MPI3_EVENT_PCIE_TOPO_PI_RATE_16_0 (0x05)
+#define MPI3_EVENT_PCIE_TOPO_PI_RATE_32_0 (0x06)
+struct mpi3_event_data_pcie_topology_change_list {
+ __le16 enclosure_handle;
+ __le16 switch_dev_handle;
+ u8 num_ports;
+ u8 reserved05[3];
+ u8 num_entries;
+ u8 start_port_num;
+ u8 switch_status;
+ u8 io_unit_port;
+ __le32 reserved0c;
+ struct mpi3_event_pcie_topo_port_entry port_entry[MPI3_EVENT_PCIE_TOPO_PORT_COUNT];
+};
+
+#define MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH (0x00)
+#define MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING (0x02)
+#define MPI3_EVENT_PCIE_TOPO_SS_RESPONDING (0x03)
+#define MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING (0x04)
+struct mpi3_event_data_pcie_error_threshold {
+ __le64 timestamp;
+ u8 reason_code;
+ u8 port;
+ __le16 switch_dev_handle;
+ u8 error;
+ u8 action;
+ __le16 threshold_count;
+ __le16 attached_dev_handle;
+ __le16 reserved12;
+};
+
+#define MPI3_EVENT_PCI_ERROR_RC_THRESHOLD_EXCEEDED (0x00)
+#define MPI3_EVENT_PCI_ERROR_RC_ESCALATION (0x01)
+struct mpi3_event_data_sas_init_dev_status_change {
+ u8 reason_code;
+ u8 io_unit_port;
+ __le16 dev_handle;
+ __le32 reserved04;
+ __le64 sas_address;
+};
+
+#define MPI3_EVENT_SAS_INIT_RC_ADDED (0x01)
+#define MPI3_EVENT_SAS_INIT_RC_NOT_RESPONDING (0x02)
+struct mpi3_event_data_sas_init_table_overflow {
+ __le16 max_init;
+ __le16 current_init;
+ __le32 reserved04;
+ __le64 sas_address;
+};
+
+struct mpi3_event_data_hard_reset_received {
+ u8 reserved00;
+ u8 io_unit_port;
+ __le16 reserved02;
+};
+
+struct mpi3_event_data_diag_buffer_status_change {
+ u8 type;
+ u8 reason_code;
+ __le16 reserved02;
+ __le32 reserved04;
+};
+
+#define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RELEASED (0x01)
+#define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_PAUSED (0x02)
+#define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RESUMED (0x03)
+#define MPI3_PEL_LOCALE_FLAGS_NON_BLOCKING_BOOT_EVENT (0x0200)
+#define MPI3_PEL_LOCALE_FLAGS_BLOCKING_BOOT_EVENT (0x0100)
+#define MPI3_PEL_LOCALE_FLAGS_PCIE (0x0080)
+#define MPI3_PEL_LOCALE_FLAGS_CONFIGURATION (0x0040)
+#define MPI3_PEL_LOCALE_FLAGS_CONTROLER (0x0020)
+#define MPI3_PEL_LOCALE_FLAGS_SAS (0x0010)
+#define MPI3_PEL_LOCALE_FLAGS_EPACK (0x0008)
+#define MPI3_PEL_LOCALE_FLAGS_ENCLOSURE (0x0004)
+#define MPI3_PEL_LOCALE_FLAGS_PD (0x0002)
+#define MPI3_PEL_LOCALE_FLAGS_VD (0x0001)
+#define MPI3_PEL_CLASS_DEBUG (0x00)
+#define MPI3_PEL_CLASS_PROGRESS (0x01)
+#define MPI3_PEL_CLASS_INFORMATIONAL (0x02)
+#define MPI3_PEL_CLASS_WARNING (0x03)
+#define MPI3_PEL_CLASS_CRITICAL (0x04)
+#define MPI3_PEL_CLASS_FATAL (0x05)
+#define MPI3_PEL_CLASS_FAULT (0x06)
+#define MPI3_PEL_CLEARTYPE_CLEAR (0x00)
+#define MPI3_PEL_WAITTIME_INFINITE_WAIT (0x00)
+#define MPI3_PEL_ACTION_GET_SEQNUM (0x01)
+#define MPI3_PEL_ACTION_MARK_CLEAR (0x02)
+#define MPI3_PEL_ACTION_GET_LOG (0x03)
+#define MPI3_PEL_ACTION_GET_COUNT (0x04)
+#define MPI3_PEL_ACTION_WAIT (0x05)
+#define MPI3_PEL_ACTION_ABORT (0x06)
+#define MPI3_PEL_ACTION_GET_PRINT_STRINGS (0x07)
+#define MPI3_PEL_ACTION_ACKNOWLEDGE (0x08)
+#define MPI3_PEL_STATUS_SUCCESS (0x00)
+#define MPI3_PEL_STATUS_NOT_FOUND (0x01)
+#define MPI3_PEL_STATUS_ABORTED (0x02)
+#define MPI3_PEL_STATUS_NOT_READY (0x03)
+struct mpi3_pel_seq {
+ __le32 newest;
+ __le32 oldest;
+ __le32 clear;
+ __le32 shutdown;
+ __le32 boot;
+ __le32 last_acknowledged;
+};
+
+struct mpi3_pel_entry {
+ __le64 time_stamp;
+ __le32 sequence_number;
+ __le16 log_code;
+ __le16 arg_type;
+ __le16 locale;
+ u8 class;
+ u8 flags;
+ u8 ext_num;
+ u8 num_exts;
+ u8 arg_data_size;
+ u8 fixed_format_strings_size;
+ __le32 reserved18[2];
+ __le32 pel_info[24];
+};
+
+#define MPI3_PEL_FLAGS_COMPLETE_RESET_NEEDED (0x02)
+#define MPI3_PEL_FLAGS_ACK_NEEDED (0x01)
+struct mpi3_pel_list {
+ __le32 log_count;
+ __le32 reserved04;
+ struct mpi3_pel_entry entry[1];
+};
+
+struct mpi3_pel_arg_map {
+ u8 arg_type;
+ u8 length;
+ __le16 start_location;
+};
+
+#define MPI3_PEL_ARG_MAP_ARG_TYPE_APPEND_STRING (0x00)
+#define MPI3_PEL_ARG_MAP_ARG_TYPE_INTEGER (0x01)
+#define MPI3_PEL_ARG_MAP_ARG_TYPE_STRING (0x02)
+#define MPI3_PEL_ARG_MAP_ARG_TYPE_BIT_FIELD (0x03)
+struct mpi3_pel_print_string {
+ __le16 log_code;
+ __le16 string_length;
+ u8 num_arg_map;
+ u8 reserved05[3];
+ struct mpi3_pel_arg_map arg_map[1];
+};
+
+struct mpi3_pel_print_string_list {
+ __le32 num_print_strings;
+ __le32 residual_bytes_remain;
+ __le32 reserved08[2];
+ struct mpi3_pel_print_string print_string[1];
+};
+
+#ifndef MPI3_PEL_ACTION_SPECIFIC_MAX
+#define MPI3_PEL_ACTION_SPECIFIC_MAX (1)
+#endif
+struct mpi3_pel_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ __le32 action_specific[MPI3_PEL_ACTION_SPECIFIC_MAX];
+};
+
+struct mpi3_pel_req_action_get_sequence_numbers {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ __le32 reserved0c[5];
+ union mpi3_sge_union sgl;
+};
+
+struct mpi3_pel_req_action_clear_log_marker {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ u8 clear_type;
+ u8 reserved0d[3];
+};
+
+struct mpi3_pel_req_action_get_log {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ __le32 starting_sequence_number;
+ __le16 locale;
+ u8 class;
+ u8 reserved13;
+ __le32 reserved14[3];
+ union mpi3_sge_union sgl;
+};
+
+struct mpi3_pel_req_action_get_count {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ __le32 starting_sequence_number;
+ __le16 locale;
+ u8 class;
+ u8 reserved13;
+ __le32 reserved14[3];
+ union mpi3_sge_union sgl;
+};
+
+struct mpi3_pel_req_action_wait {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ __le32 starting_sequence_number;
+ __le16 locale;
+ u8 class;
+ u8 reserved13;
+ __le16 wait_time;
+ __le16 reserved16;
+ __le32 reserved18[2];
+};
+
+struct mpi3_pel_req_action_abort {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ __le32 reserved0c;
+ __le16 abort_host_tag;
+ __le16 reserved12;
+ __le32 reserved14;
+};
+
+struct mpi3_pel_req_action_get_print_strings {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ __le32 reserved0c;
+ __le16 start_log_code;
+ __le16 reserved12;
+ __le32 reserved14[3];
+ union mpi3_sge_union sgl;
+};
+
+struct mpi3_pel_req_action_acknowledge {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ __le32 sequence_number;
+ __le32 reserved10;
+};
+
+#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_MASK (0x03)
+#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_NO_GUIDANCE (0x00)
+#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_CONTINUE_OP (0x01)
+#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_TRANSITION_TO_FAULT (0x02)
+struct mpi3_pel_reply {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 ioc_use_only08;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+ u8 action;
+ u8 reserved11;
+ __le16 reserved12;
+ __le16 pe_log_status;
+ __le16 reserved16;
+ __le32 transfer_length;
+};
+
+struct mpi3_ci_download_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ __le32 signature1;
+ __le32 total_image_size;
+ __le32 image_offset;
+ __le32 segment_size;
+ __le32 reserved1c;
+ union mpi3_sge_union sgl;
+};
+
+#define MPI3_CI_DOWNLOAD_MSGFLAGS_LAST_SEGMENT (0x80)
+#define MPI3_CI_DOWNLOAD_MSGFLAGS_FORCE_FMC_ENABLE (0x40)
+#define MPI3_CI_DOWNLOAD_MSGFLAGS_SIGNED_NVDATA (0x20)
+#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_MASK (0x03)
+#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_FAST (0x00)
+#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_MEDIUM (0x01)
+#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_SLOW (0x02)
+#define MPI3_CI_DOWNLOAD_ACTION_DOWNLOAD (0x01)
+#define MPI3_CI_DOWNLOAD_ACTION_ONLINE_ACTIVATION (0x02)
+#define MPI3_CI_DOWNLOAD_ACTION_OFFLINE_ACTIVATION (0x03)
+#define MPI3_CI_DOWNLOAD_ACTION_GET_STATUS (0x04)
+#define MPI3_CI_DOWNLOAD_ACTION_CANCEL_OFFLINE_ACTIVATION (0x05)
+struct mpi3_ci_download_reply {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 ioc_use_only08;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+ u8 flags;
+ u8 cache_dirty;
+ u8 pending_count;
+ u8 reserved13;
+};
+
+#define MPI3_CI_DOWNLOAD_FLAGS_DOWNLOAD_IN_PROGRESS (0x80)
+#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_FAILURE (0x40)
+#define MPI3_CI_DOWNLOAD_FLAGS_OFFLINE_ACTIVATION_REQUIRED (0x20)
+#define MPI3_CI_DOWNLOAD_FLAGS_KEY_UPDATE_PENDING (0x10)
+#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_MASK (0x0e)
+#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_NOT_NEEDED (0x00)
+#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_AWAITING (0x02)
+#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_ONLINE_PENDING (0x04)
+#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_OFFLINE_PENDING (0x06)
+#define MPI3_CI_DOWNLOAD_FLAGS_COMPATIBLE (0x01)
+struct mpi3_ci_upload_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 reserved0a;
+ __le32 signature1;
+ __le32 reserved10;
+ __le32 image_offset;
+ __le32 segment_size;
+ __le32 reserved1c;
+ union mpi3_sge_union sgl;
+};
+
+#define MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_MASK (0x01)
+#define MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY (0x00)
+#define MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_SECONDARY (0x01)
+#define MPI3_CI_UPLOAD_MSGFLAGS_FORMAT_MASK (0x02)
+#define MPI3_CI_UPLOAD_MSGFLAGS_FORMAT_FLASH (0x00)
+#define MPI3_CI_UPLOAD_MSGFLAGS_FORMAT_EXECUTABLE (0x02)
+#define MPI3_CTRL_OP_FORCE_FULL_DISCOVERY (0x01)
+#define MPI3_CTRL_OP_LOOKUP_MAPPING (0x02)
+#define MPI3_CTRL_OP_UPDATE_TIMESTAMP (0x04)
+#define MPI3_CTRL_OP_GET_TIMESTAMP (0x05)
+#define MPI3_CTRL_OP_GET_IOC_CHANGE_COUNT (0x06)
+#define MPI3_CTRL_OP_CHANGE_PROFILE (0x07)
+#define MPI3_CTRL_OP_REMOVE_DEVICE (0x10)
+#define MPI3_CTRL_OP_CLOSE_PERSISTENT_CONNECTION (0x11)
+#define MPI3_CTRL_OP_HIDDEN_ACK (0x12)
+#define MPI3_CTRL_OP_CLEAR_DEVICE_COUNTERS (0x13)
+#define MPI3_CTRL_OP_SEND_SAS_PRIMITIVE (0x20)
+#define MPI3_CTRL_OP_SAS_PHY_CONTROL (0x21)
+#define MPI3_CTRL_OP_READ_INTERNAL_BUS (0x23)
+#define MPI3_CTRL_OP_WRITE_INTERNAL_BUS (0x24)
+#define MPI3_CTRL_OP_PCIE_LINK_CONTROL (0x30)
+#define MPI3_CTRL_OP_LOOKUP_MAPPING_PARAM8_LOOKUP_METHOD_INDEX (0x00)
+#define MPI3_CTRL_OP_UPDATE_TIMESTAMP_PARAM64_TIMESTAMP_INDEX (0x00)
+#define MPI3_CTRL_OP_CHANGE_PROFILE_PARAM8_PROFILE_ID_INDEX (0x00)
+#define MPI3_CTRL_OP_REMOVE_DEVICE_PARAM16_DEVHANDLE_INDEX (0x00)
+#define MPI3_CTRL_OP_CLOSE_PERSIST_CONN_PARAM16_DEVHANDLE_INDEX (0x00)
+#define MPI3_CTRL_OP_HIDDEN_ACK_PARAM16_DEVHANDLE_INDEX (0x00)
+#define MPI3_CTRL_OP_CLEAR_DEVICE_COUNTERS_PARAM16_DEVHANDLE_INDEX (0x00)
+#define MPI3_CTRL_OP_SEND_SAS_PRIM_PARAM8_PHY_INDEX (0x00)
+#define MPI3_CTRL_OP_SEND_SAS_PRIM_PARAM8_PRIMSEQ_INDEX (0x01)
+#define MPI3_CTRL_OP_SEND_SAS_PRIM_PARAM32_PRIMITIVE_INDEX (0x00)
+#define MPI3_CTRL_OP_SAS_PHY_CONTROL_PARAM8_ACTION_INDEX (0x00)
+#define MPI3_CTRL_OP_SAS_PHY_CONTROL_PARAM8_PHY_INDEX (0x01)
+#define MPI3_CTRL_OP_READ_INTERNAL_BUS_PARAM64_ADDRESS_INDEX (0x00)
+#define MPI3_CTRL_OP_WRITE_INTERNAL_BUS_PARAM64_ADDRESS_INDEX (0x00)
+#define MPI3_CTRL_OP_WRITE_INTERNAL_BUS_PARAM32_VALUE_INDEX (0x00)
+#define MPI3_CTRL_OP_PCIE_LINK_CONTROL_PARAM8_ACTION_INDEX (0x00)
+#define MPI3_CTRL_OP_PCIE_LINK_CONTROL_PARAM8_LINK_INDEX (0x01)
+#define MPI3_CTRL_LOOKUP_METHOD_WWID_ADDRESS (0x01)
+#define MPI3_CTRL_LOOKUP_METHOD_ENCLOSURE_SLOT (0x02)
+#define MPI3_CTRL_LOOKUP_METHOD_SAS_DEVICE_NAME (0x03)
+#define MPI3_CTRL_LOOKUP_METHOD_PERSISTENT_ID (0x04)
+#define MPI3_CTRL_LOOKUP_METHOD_WWIDADDR_PARAM16_DEVH_INDEX (0)
+#define MPI3_CTRL_LOOKUP_METHOD_WWIDADDR_PARAM64_WWID_INDEX (0)
+#define MPI3_CTRL_LOOKUP_METHOD_ENCLSLOT_PARAM16_SLOTNUM_INDEX (0)
+#define MPI3_CTRL_LOOKUP_METHOD_ENCLSLOT_PARAM64_ENCLOSURELID_INDEX (0)
+#define MPI3_CTRL_LOOKUP_METHOD_SASDEVNAME_PARAM16_DEVH_INDEX (0)
+#define MPI3_CTRL_LOOKUP_METHOD_SASDEVNAME_PARAM64_DEVNAME_INDEX (0)
+#define MPI3_CTRL_LOOKUP_METHOD_PERSISTID_PARAM16_DEVH_INDEX (0)
+#define MPI3_CTRL_LOOKUP_METHOD_PERSISTID_PARAM16_PERSISTENT_ID_INDEX (1)
+#define MPI3_CTRL_LOOKUP_METHOD_VALUE16_DEVH_INDEX (0)
+#define MPI3_CTRL_GET_TIMESTAMP_VALUE64_TIMESTAMP_INDEX (0)
+#define MPI3_CTRL_GET_IOC_CHANGE_COUNT_VALUE16_CHANGECOUNT_INDEX (0)
+#define MPI3_CTRL_READ_INTERNAL_BUS_VALUE32_VALUE_INDEX (0)
+#define MPI3_CTRL_PRIMFLAGS_SINGLE (0x01)
+#define MPI3_CTRL_PRIMFLAGS_TRIPLE (0x03)
+#define MPI3_CTRL_PRIMFLAGS_REDUNDANT (0x06)
+#define MPI3_CTRL_ACTION_NOP (0x00)
+#define MPI3_CTRL_ACTION_LINK_RESET (0x01)
+#define MPI3_CTRL_ACTION_HARD_RESET (0x02)
+#define MPI3_CTRL_ACTION_CLEAR_ERROR_LOG (0x05)
+struct mpi3_iounit_control_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 reserved0a;
+ u8 operation;
+ __le32 reserved0c;
+ __le64 param64[2];
+ __le32 param32[4];
+ __le16 param16[4];
+ u8 param8[8];
+};
+
+struct mpi3_iounit_control_reply {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 ioc_use_only08;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+ __le64 value64[2];
+ __le32 value32[4];
+ __le16 value16[4];
+ u8 value8[8];
+};
+#endif
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_pci.h b/drivers/scsi/mpi3mr/mpi/mpi30_pci.h
new file mode 100644
index 000000000..b7a5df011
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_pci.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2016-2022 Broadcom Inc. All rights reserved.
+ *
+ */
+#ifndef MPI30_PCI_H
+#define MPI30_PCI_H 1
+
+#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_MASK (0x0002)
+#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_FAIL_ONLY (0x0000)
+#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_ALL (0x0002)
+#define MPI3_NVME_FLAGS_SUBMISSIONQ_MASK (0x0001)
+#define MPI3_NVME_FLAGS_SUBMISSIONQ_IO (0x0000)
+#define MPI3_NVME_FLAGS_SUBMISSIONQ_ADMIN (0x0001)
+
+#endif
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_sas.h b/drivers/scsi/mpi3mr/mpi/mpi30_sas.h
new file mode 100644
index 000000000..e587f77cc
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_sas.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2016-2022 Broadcom Inc. All rights reserved.
+ */
+#ifndef MPI30_SAS_H
+#define MPI30_SAS_H 1
+#define MPI3_SAS_DEVICE_INFO_SSP_TARGET (0x00000100)
+#define MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET (0x00000080)
+#define MPI3_SAS_DEVICE_INFO_SMP_TARGET (0x00000040)
+#define MPI3_SAS_DEVICE_INFO_SSP_INITIATOR (0x00000020)
+#define MPI3_SAS_DEVICE_INFO_STP_INITIATOR (0x00000010)
+#define MPI3_SAS_DEVICE_INFO_SMP_INITIATOR (0x00000008)
+#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK (0x00000007)
+#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_NO_DEVICE (0x00000000)
+#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE (0x00000001)
+#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_EXPANDER (0x00000002)
+struct mpi3_smp_passthrough_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 reserved0a;
+ u8 io_unit_port;
+ __le32 reserved0c[3];
+ __le64 sas_address;
+ struct mpi3_sge_common request_sge;
+ struct mpi3_sge_common response_sge;
+};
+
+struct mpi3_smp_passthrough_reply {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 ioc_use_only08;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+ __le16 response_data_length;
+ __le16 reserved12;
+};
+#endif
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
new file mode 100644
index 000000000..9b76b9632
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
@@ -0,0 +1,470 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2016-2022 Broadcom Inc. All rights reserved.
+ */
+#ifndef MPI30_TRANSPORT_H
+#define MPI30_TRANSPORT_H 1
+struct mpi3_version_struct {
+ u8 dev;
+ u8 unit;
+ u8 minor;
+ u8 major;
+};
+
+union mpi3_version_union {
+ struct mpi3_version_struct mpi3_version;
+ __le32 word;
+};
+
+#define MPI3_VERSION_MAJOR (3)
+#define MPI3_VERSION_MINOR (0)
+#define MPI3_VERSION_UNIT (26)
+#define MPI3_VERSION_DEV (0)
+#define MPI3_DEVHANDLE_INVALID (0xffff)
+struct mpi3_sysif_oper_queue_indexes {
+ __le16 producer_index;
+ __le16 reserved02;
+ __le16 consumer_index;
+ __le16 reserved06;
+};
+
+struct mpi3_sysif_registers {
+ __le64 ioc_information;
+ union mpi3_version_union version;
+ __le32 reserved0c[2];
+ __le32 ioc_configuration;
+ __le32 reserved18;
+ __le32 ioc_status;
+ __le32 reserved20;
+ __le32 admin_queue_num_entries;
+ __le64 admin_request_queue_address;
+ __le64 admin_reply_queue_address;
+ __le32 reserved38[2];
+ __le32 coalesce_control;
+ __le32 reserved44[1007];
+ __le16 admin_request_queue_pi;
+ __le16 reserved1002;
+ __le16 admin_reply_queue_ci;
+ __le16 reserved1006;
+ struct mpi3_sysif_oper_queue_indexes oper_queue_indexes[383];
+ __le32 reserved1c00;
+ __le32 write_sequence;
+ __le32 host_diagnostic;
+ __le32 reserved1c0c;
+ __le32 fault;
+ __le32 fault_info[3];
+ __le32 reserved1c20[4];
+ __le64 hcb_address;
+ __le32 hcb_size;
+ __le32 reserved1c3c;
+ __le32 reply_free_host_index;
+ __le32 sense_buffer_free_host_index;
+ __le32 reserved1c48[2];
+ __le64 diag_rw_data;
+ __le64 diag_rw_address;
+ __le16 diag_rw_control;
+ __le16 diag_rw_status;
+ __le32 reserved1c64[35];
+ __le32 scratchpad[4];
+ __le32 reserved1d00[192];
+ __le32 device_assigned_registers[2048];
+};
+
+#define MPI3_SYSIF_IOC_INFO_LOW_OFFSET (0x00000000)
+#define MPI3_SYSIF_IOC_INFO_HIGH_OFFSET (0x00000004)
+#define MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK (0xff000000)
+#define MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT (24)
+#define MPI3_SYSIF_IOC_INFO_LOW_HCB_DISABLED (0x00000001)
+#define MPI3_SYSIF_IOC_CONFIG_OFFSET (0x00000014)
+#define MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ (0x00f00000)
+#define MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT (20)
+#define MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ (0x000f0000)
+#define MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT (16)
+#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_MASK (0x0000c000)
+#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NO (0x00000000)
+#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL (0x00004000)
+#define MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ (0x00002000)
+#define MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE (0x00000010)
+#define MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC (0x00000001)
+#define MPI3_SYSIF_IOC_STATUS_OFFSET (0x0000001c)
+#define MPI3_SYSIF_IOC_STATUS_RESET_HISTORY (0x00000010)
+#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK (0x0000000c)
+#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_SHIFT (0x00000002)
+#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_NONE (0x00000000)
+#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS (0x00000004)
+#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE (0x00000008)
+#define MPI3_SYSIF_IOC_STATUS_FAULT (0x00000002)
+#define MPI3_SYSIF_IOC_STATUS_READY (0x00000001)
+#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_OFFSET (0x00000024)
+#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REQ_MASK (0x0fff)
+#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REPLY_OFFSET (0x00000026)
+#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REPLY_MASK (0x0fff0000)
+#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REPLY_SHIFT (16)
+#define MPI3_SYSIF_ADMIN_REQ_Q_ADDR_LOW_OFFSET (0x00000028)
+#define MPI3_SYSIF_ADMIN_REQ_Q_ADDR_HIGH_OFFSET (0x0000002c)
+#define MPI3_SYSIF_ADMIN_REPLY_Q_ADDR_LOW_OFFSET (0x00000030)
+#define MPI3_SYSIF_ADMIN_REPLY_Q_ADDR_HIGH_OFFSET (0x00000034)
+#define MPI3_SYSIF_COALESCE_CONTROL_OFFSET (0x00000040)
+#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_MASK (0xc0000000)
+#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_NO_CHANGE (0x00000000)
+#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_DISABLE (0x40000000)
+#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_ENABLE (0xc0000000)
+#define MPI3_SYSIF_COALESCE_CONTROL_VALID (0x20000000)
+#define MPI3_SYSIF_COALESCE_CONTROL_MSIX_IDX_MASK (0x01ff0000)
+#define MPI3_SYSIF_COALESCE_CONTROL_MSIX_IDX_SHIFT (16)
+#define MPI3_SYSIF_COALESCE_CONTROL_TIMEOUT_MASK (0x0000ff00)
+#define MPI3_SYSIF_COALESCE_CONTROL_TIMEOUT_SHIFT (8)
+#define MPI3_SYSIF_COALESCE_CONTROL_DEPTH_MASK (0x000000ff)
+#define MPI3_SYSIF_COALESCE_CONTROL_DEPTH_SHIFT (0)
+#define MPI3_SYSIF_ADMIN_REQ_Q_PI_OFFSET (0x00001000)
+#define MPI3_SYSIF_ADMIN_REPLY_Q_CI_OFFSET (0x00001004)
+#define MPI3_SYSIF_OPER_REQ_Q_PI_OFFSET (0x00001008)
+#define MPI3_SYSIF_OPER_REQ_Q_N_PI_OFFSET(N) (MPI3_SYSIF_OPER_REQ_Q_PI_OFFSET + (((N) - 1) * 8))
+#define MPI3_SYSIF_OPER_REPLY_Q_CI_OFFSET (0x0000100c)
+#define MPI3_SYSIF_OPER_REPLY_Q_N_CI_OFFSET(N) (MPI3_SYSIF_OPER_REPLY_Q_CI_OFFSET + (((N) - 1) * 8))
+#define MPI3_SYSIF_WRITE_SEQUENCE_OFFSET (0x00001c04)
+#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_MASK (0x0000000f)
+#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH (0x0)
+#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST (0xf)
+#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND (0x4)
+#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD (0xb)
+#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH (0x2)
+#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH (0x7)
+#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH (0xd)
+#define MPI3_SYSIF_HOST_DIAG_OFFSET (0x00001c08)
+#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_MASK (0x00000700)
+#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_NO_RESET (0x00000000)
+#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET (0x00000100)
+#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_HOST_CONTROL_BOOT_RESET (0x00000200)
+#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_COMPLETE_RESET (0x00000300)
+#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT (0x00000700)
+#define MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS (0x00000080)
+#define MPI3_SYSIF_HOST_DIAG_SECURE_BOOT (0x00000040)
+#define MPI3_SYSIF_HOST_DIAG_CLEAR_INVALID_FW_IMAGE (0x00000020)
+#define MPI3_SYSIF_HOST_DIAG_INVALID_FW_IMAGE (0x00000010)
+#define MPI3_SYSIF_HOST_DIAG_HCBENABLE (0x00000008)
+#define MPI3_SYSIF_HOST_DIAG_HCBMODE (0x00000004)
+#define MPI3_SYSIF_HOST_DIAG_DIAG_RW_ENABLE (0x00000002)
+#define MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE (0x00000001)
+#define MPI3_SYSIF_FAULT_OFFSET (0x00001c10)
+#define MPI3_SYSIF_FAULT_FUNC_AREA_MASK (0xff000000)
+#define MPI3_SYSIF_FAULT_FUNC_AREA_SHIFT (24)
+#define MPI3_SYSIF_FAULT_FUNC_AREA_MPI_DEFINED (0x00000000)
+#define MPI3_SYSIF_FAULT_CODE_MASK (0x0000ffff)
+#define MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET (0x0000f000)
+#define MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET (0x0000f001)
+#define MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS (0x0000f002)
+#define MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED (0x0000f003)
+#define MPI3_SYSIF_FAULT_CODE_SOFT_RESET_NEEDED (0x0000f004)
+#define MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED (0x0000f005)
+#define MPI3_SYSIF_FAULT_CODE_TEMP_THRESHOLD_EXCEEDED (0x0000f006)
+#define MPI3_SYSIF_FAULT_INFO0_OFFSET (0x00001c14)
+#define MPI3_SYSIF_FAULT_INFO1_OFFSET (0x00001c18)
+#define MPI3_SYSIF_FAULT_INFO2_OFFSET (0x00001c1c)
+#define MPI3_SYSIF_HCB_ADDRESS_LOW_OFFSET (0x00001c30)
+#define MPI3_SYSIF_HCB_ADDRESS_HIGH_OFFSET (0x00001c34)
+#define MPI3_SYSIF_HCB_SIZE_OFFSET (0x00001c38)
+#define MPI3_SYSIF_HCB_SIZE_SIZE_MASK (0xfffff000)
+#define MPI3_SYSIF_HCB_SIZE_SIZE_SHIFT (12)
+#define MPI3_SYSIF_HCB_SIZE_HCDW_ENABLE (0x00000001)
+#define MPI3_SYSIF_REPLY_FREE_HOST_INDEX_OFFSET (0x00001c40)
+#define MPI3_SYSIF_SENSE_BUF_FREE_HOST_INDEX_OFFSET (0x00001c44)
+#define MPI3_SYSIF_DIAG_RW_DATA_LOW_OFFSET (0x00001c50)
+#define MPI3_SYSIF_DIAG_RW_DATA_HIGH_OFFSET (0x00001c54)
+#define MPI3_SYSIF_DIAG_RW_ADDRESS_LOW_OFFSET (0x00001c58)
+#define MPI3_SYSIF_DIAG_RW_ADDRESS_HIGH_OFFSET (0x00001c5c)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_OFFSET (0x00001c60)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_MASK (0x00000030)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_1BYTE (0x00000000)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_2BYTES (0x00000010)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_4BYTES (0x00000020)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_8BYTES (0x00000030)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_RESET (0x00000004)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_DIR_MASK (0x00000002)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_DIR_READ (0x00000000)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_DIR_WRITE (0x00000002)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_START (0x00000001)
+#define MPI3_SYSIF_DIAG_RW_STATUS_OFFSET (0x00001c62)
+#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_MASK (0x0000000e)
+#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_SUCCESS (0x00000000)
+#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_INV_ADDR (0x00000002)
+#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_ACC_ERR (0x00000004)
+#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_PAR_ERR (0x00000006)
+#define MPI3_SYSIF_DIAG_RW_STATUS_BUSY (0x00000001)
+#define MPI3_SYSIF_SCRATCHPAD0_OFFSET (0x00001cf0)
+#define MPI3_SYSIF_SCRATCHPAD1_OFFSET (0x00001cf4)
+#define MPI3_SYSIF_SCRATCHPAD2_OFFSET (0x00001cf8)
+#define MPI3_SYSIF_SCRATCHPAD3_OFFSET (0x00001cfc)
+#define MPI3_SYSIF_DEVICE_ASSIGNED_REGS_OFFSET (0x00002000)
+#define MPI3_SYSIF_DIAG_SAVE_TIMEOUT (60)
+struct mpi3_default_reply_descriptor {
+ __le32 descriptor_type_dependent1[2];
+ __le16 request_queue_ci;
+ __le16 request_queue_id;
+ __le16 descriptor_type_dependent2;
+ __le16 reply_flags;
+};
+
+#define MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK (0x0001)
+#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK (0xf000)
+#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY (0x0000)
+#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS (0x1000)
+#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_TARGET_COMMAND_BUFFER (0x2000)
+#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS (0x3000)
+#define MPI3_REPLY_DESCRIPT_REQUEST_QUEUE_ID_INVALID (0xffff)
+struct mpi3_address_reply_descriptor {
+ __le64 reply_frame_address;
+ __le16 request_queue_ci;
+ __le16 request_queue_id;
+ __le16 reserved0c;
+ __le16 reply_flags;
+};
+
+struct mpi3_success_reply_descriptor {
+ __le32 reserved00[2];
+ __le16 request_queue_ci;
+ __le16 request_queue_id;
+ __le16 host_tag;
+ __le16 reply_flags;
+};
+
+struct mpi3_target_command_buffer_reply_descriptor {
+ __le32 reserved00;
+ __le16 initiator_dev_handle;
+ u8 phy_num;
+ u8 reserved07;
+ __le16 request_queue_ci;
+ __le16 request_queue_id;
+ __le16 io_index;
+ __le16 reply_flags;
+};
+
+struct mpi3_status_reply_descriptor {
+ __le16 ioc_status;
+ __le16 reserved02;
+ __le32 ioc_log_info;
+ __le16 request_queue_ci;
+ __le16 request_queue_id;
+ __le16 host_tag;
+ __le16 reply_flags;
+};
+
+#define MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL (0x8000)
+#define MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK (0x7fff)
+#define MPI3_REPLY_DESCRIPT_STATUS_IOCLOGINFO_TYPE_MASK (0xf0000000)
+#define MPI3_REPLY_DESCRIPT_STATUS_IOCLOGINFO_TYPE_NO_INFO (0x00000000)
+#define MPI3_REPLY_DESCRIPT_STATUS_IOCLOGINFO_TYPE_SAS (0x30000000)
+#define MPI3_REPLY_DESCRIPT_STATUS_IOCLOGINFO_DATA_MASK (0x0fffffff)
+union mpi3_reply_descriptors_union {
+ struct mpi3_default_reply_descriptor default_reply;
+ struct mpi3_address_reply_descriptor address_reply;
+ struct mpi3_success_reply_descriptor success;
+ struct mpi3_target_command_buffer_reply_descriptor target_command_buffer;
+ struct mpi3_status_reply_descriptor status;
+ __le32 words[4];
+};
+
+struct mpi3_sge_common {
+ __le64 address;
+ __le32 length;
+ u8 reserved0c[3];
+ u8 flags;
+};
+
+struct mpi3_sge_bit_bucket {
+ __le64 reserved00;
+ __le32 length;
+ u8 reserved0c[3];
+ u8 flags;
+};
+
+struct mpi3_sge_extended_eedp {
+ u8 user_data_size;
+ u8 reserved01;
+ __le16 eedp_flags;
+ __le32 secondary_reference_tag;
+ __le16 secondary_application_tag;
+ __le16 application_tag_translation_mask;
+ __le16 reserved0c;
+ u8 extended_operation;
+ u8 flags;
+};
+
+union mpi3_sge_union {
+ struct mpi3_sge_common simple;
+ struct mpi3_sge_common chain;
+ struct mpi3_sge_common last_chain;
+ struct mpi3_sge_bit_bucket bit_bucket;
+ struct mpi3_sge_extended_eedp eedp;
+ __le32 words[4];
+};
+
+#define MPI3_SGE_FLAGS_ELEMENT_TYPE_MASK (0xf0)
+#define MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE (0x00)
+#define MPI3_SGE_FLAGS_ELEMENT_TYPE_BIT_BUCKET (0x10)
+#define MPI3_SGE_FLAGS_ELEMENT_TYPE_CHAIN (0x20)
+#define MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN (0x30)
+#define MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED (0xf0)
+#define MPI3_SGE_FLAGS_END_OF_LIST (0x08)
+#define MPI3_SGE_FLAGS_END_OF_BUFFER (0x04)
+#define MPI3_SGE_FLAGS_DLAS_MASK (0x03)
+#define MPI3_SGE_FLAGS_DLAS_SYSTEM (0x00)
+#define MPI3_SGE_FLAGS_DLAS_IOC_UDP (0x01)
+#define MPI3_SGE_FLAGS_DLAS_IOC_CTL (0x02)
+#define MPI3_SGE_EXT_OPER_EEDP (0x00)
+#define MPI3_EEDPFLAGS_INCR_PRI_REF_TAG (0x8000)
+#define MPI3_EEDPFLAGS_INCR_SEC_REF_TAG (0x4000)
+#define MPI3_EEDPFLAGS_INCR_PRI_APP_TAG (0x2000)
+#define MPI3_EEDPFLAGS_INCR_SEC_APP_TAG (0x1000)
+#define MPI3_EEDPFLAGS_ESC_PASSTHROUGH (0x0800)
+#define MPI3_EEDPFLAGS_CHK_REF_TAG (0x0400)
+#define MPI3_EEDPFLAGS_CHK_APP_TAG (0x0200)
+#define MPI3_EEDPFLAGS_CHK_GUARD (0x0100)
+#define MPI3_EEDPFLAGS_ESC_MODE_MASK (0x00c0)
+#define MPI3_EEDPFLAGS_ESC_MODE_DO_NOT_DISABLE (0x0040)
+#define MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE (0x0080)
+#define MPI3_EEDPFLAGS_ESC_MODE_APPTAG_REFTAG_DISABLE (0x00c0)
+#define MPI3_EEDPFLAGS_HOST_GUARD_MASK (0x0030)
+#define MPI3_EEDPFLAGS_HOST_GUARD_T10_CRC (0x0000)
+#define MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM (0x0010)
+#define MPI3_EEDPFLAGS_HOST_GUARD_OEM_SPECIFIC (0x0020)
+#define MPI3_EEDPFLAGS_PT_REF_TAG (0x0008)
+#define MPI3_EEDPFLAGS_EEDP_OP_MASK (0x0007)
+#define MPI3_EEDPFLAGS_EEDP_OP_CHECK (0x0001)
+#define MPI3_EEDPFLAGS_EEDP_OP_STRIP (0x0002)
+#define MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE (0x0003)
+#define MPI3_EEDPFLAGS_EEDP_OP_INSERT (0x0004)
+#define MPI3_EEDPFLAGS_EEDP_OP_REPLACE (0x0006)
+#define MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN (0x0007)
+#define MPI3_EEDP_UDS_512 (0x01)
+#define MPI3_EEDP_UDS_520 (0x02)
+#define MPI3_EEDP_UDS_4080 (0x03)
+#define MPI3_EEDP_UDS_4088 (0x04)
+#define MPI3_EEDP_UDS_4096 (0x05)
+#define MPI3_EEDP_UDS_4104 (0x06)
+#define MPI3_EEDP_UDS_4160 (0x07)
+struct mpi3_request_header {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 function_dependent;
+};
+
+struct mpi3_default_reply {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 ioc_use_only08;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+};
+
+#define MPI3_HOST_TAG_INVALID (0xffff)
+#define MPI3_FUNCTION_IOC_FACTS (0x01)
+#define MPI3_FUNCTION_IOC_INIT (0x02)
+#define MPI3_FUNCTION_PORT_ENABLE (0x03)
+#define MPI3_FUNCTION_EVENT_NOTIFICATION (0x04)
+#define MPI3_FUNCTION_EVENT_ACK (0x05)
+#define MPI3_FUNCTION_CI_DOWNLOAD (0x06)
+#define MPI3_FUNCTION_CI_UPLOAD (0x07)
+#define MPI3_FUNCTION_IO_UNIT_CONTROL (0x08)
+#define MPI3_FUNCTION_PERSISTENT_EVENT_LOG (0x09)
+#define MPI3_FUNCTION_MGMT_PASSTHROUGH (0x0a)
+#define MPI3_FUNCTION_CONFIG (0x10)
+#define MPI3_FUNCTION_SCSI_IO (0x20)
+#define MPI3_FUNCTION_SCSI_TASK_MGMT (0x21)
+#define MPI3_FUNCTION_SMP_PASSTHROUGH (0x22)
+#define MPI3_FUNCTION_NVME_ENCAPSULATED (0x24)
+#define MPI3_FUNCTION_TARGET_ASSIST (0x30)
+#define MPI3_FUNCTION_TARGET_STATUS_SEND (0x31)
+#define MPI3_FUNCTION_TARGET_MODE_ABORT (0x32)
+#define MPI3_FUNCTION_TARGET_CMD_BUF_POST_BASE (0x33)
+#define MPI3_FUNCTION_TARGET_CMD_BUF_POST_LIST (0x34)
+#define MPI3_FUNCTION_CREATE_REQUEST_QUEUE (0x70)
+#define MPI3_FUNCTION_DELETE_REQUEST_QUEUE (0x71)
+#define MPI3_FUNCTION_CREATE_REPLY_QUEUE (0x72)
+#define MPI3_FUNCTION_DELETE_REPLY_QUEUE (0x73)
+#define MPI3_FUNCTION_TOOLBOX (0x80)
+#define MPI3_FUNCTION_DIAG_BUFFER_POST (0x81)
+#define MPI3_FUNCTION_DIAG_BUFFER_MANAGE (0x82)
+#define MPI3_FUNCTION_DIAG_BUFFER_UPLOAD (0x83)
+#define MPI3_FUNCTION_MIN_IOC_USE_ONLY (0xc0)
+#define MPI3_FUNCTION_MAX_IOC_USE_ONLY (0xef)
+#define MPI3_FUNCTION_MIN_PRODUCT_SPECIFIC (0xf0)
+#define MPI3_FUNCTION_MAX_PRODUCT_SPECIFIC (0xff)
+#define MPI3_IOCSTATUS_LOG_INFO_AVAIL_MASK (0x8000)
+#define MPI3_IOCSTATUS_LOG_INFO_AVAILABLE (0x8000)
+#define MPI3_IOCSTATUS_STATUS_MASK (0x7fff)
+#define MPI3_IOCSTATUS_SUCCESS (0x0000)
+#define MPI3_IOCSTATUS_INVALID_FUNCTION (0x0001)
+#define MPI3_IOCSTATUS_BUSY (0x0002)
+#define MPI3_IOCSTATUS_INVALID_SGL (0x0003)
+#define MPI3_IOCSTATUS_INTERNAL_ERROR (0x0004)
+#define MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES (0x0006)
+#define MPI3_IOCSTATUS_INVALID_FIELD (0x0007)
+#define MPI3_IOCSTATUS_INVALID_STATE (0x0008)
+#define MPI3_IOCSTATUS_INSUFFICIENT_POWER (0x000a)
+#define MPI3_IOCSTATUS_INVALID_CHANGE_COUNT (0x000b)
+#define MPI3_IOCSTATUS_ALLOWED_CMD_BLOCK (0x000c)
+#define MPI3_IOCSTATUS_SUPERVISOR_ONLY (0x000d)
+#define MPI3_IOCSTATUS_FAILURE (0x001f)
+#define MPI3_IOCSTATUS_CONFIG_INVALID_ACTION (0x0020)
+#define MPI3_IOCSTATUS_CONFIG_INVALID_TYPE (0x0021)
+#define MPI3_IOCSTATUS_CONFIG_INVALID_PAGE (0x0022)
+#define MPI3_IOCSTATUS_CONFIG_INVALID_DATA (0x0023)
+#define MPI3_IOCSTATUS_CONFIG_NO_DEFAULTS (0x0024)
+#define MPI3_IOCSTATUS_CONFIG_CANT_COMMIT (0x0025)
+#define MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR (0x0040)
+#define MPI3_IOCSTATUS_SCSI_TM_NOT_SUPPORTED (0x0041)
+#define MPI3_IOCSTATUS_SCSI_INVALID_DEVHANDLE (0x0042)
+#define MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE (0x0043)
+#define MPI3_IOCSTATUS_SCSI_DATA_OVERRUN (0x0044)
+#define MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN (0x0045)
+#define MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR (0x0046)
+#define MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR (0x0047)
+#define MPI3_IOCSTATUS_SCSI_TASK_TERMINATED (0x0048)
+#define MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH (0x0049)
+#define MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED (0x004a)
+#define MPI3_IOCSTATUS_SCSI_IOC_TERMINATED (0x004b)
+#define MPI3_IOCSTATUS_SCSI_EXT_TERMINATED (0x004c)
+#define MPI3_IOCSTATUS_EEDP_GUARD_ERROR (0x004d)
+#define MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR (0x004e)
+#define MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR (0x004f)
+#define MPI3_IOCSTATUS_TARGET_INVALID_IO_INDEX (0x0062)
+#define MPI3_IOCSTATUS_TARGET_ABORTED (0x0063)
+#define MPI3_IOCSTATUS_TARGET_NO_CONN_RETRYABLE (0x0064)
+#define MPI3_IOCSTATUS_TARGET_NO_CONNECTION (0x0065)
+#define MPI3_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH (0x006a)
+#define MPI3_IOCSTATUS_TARGET_DATA_OFFSET_ERROR (0x006d)
+#define MPI3_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA (0x006e)
+#define MPI3_IOCSTATUS_TARGET_IU_TOO_SHORT (0x006f)
+#define MPI3_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT (0x0070)
+#define MPI3_IOCSTATUS_TARGET_NAK_RECEIVED (0x0071)
+#define MPI3_IOCSTATUS_SAS_SMP_REQUEST_FAILED (0x0090)
+#define MPI3_IOCSTATUS_SAS_SMP_DATA_OVERRUN (0x0091)
+#define MPI3_IOCSTATUS_DIAGNOSTIC_RELEASED (0x00a0)
+#define MPI3_IOCSTATUS_CI_UNSUPPORTED (0x00b0)
+#define MPI3_IOCSTATUS_CI_UPDATE_SEQUENCE (0x00b1)
+#define MPI3_IOCSTATUS_CI_VALIDATION_FAILED (0x00b2)
+#define MPI3_IOCSTATUS_CI_KEY_UPDATE_PENDING (0x00b3)
+#define MPI3_IOCSTATUS_CI_KEY_UPDATE_NOT_POSSIBLE (0x00b4)
+#define MPI3_IOCSTATUS_SECURITY_KEY_REQUIRED (0x00c0)
+#define MPI3_IOCSTATUS_SECURITY_VIOLATION (0x00c1)
+#define MPI3_IOCSTATUS_INVALID_QUEUE_ID (0x0f00)
+#define MPI3_IOCSTATUS_INVALID_QUEUE_SIZE (0x0f01)
+#define MPI3_IOCSTATUS_INVALID_MSIX_VECTOR (0x0f02)
+#define MPI3_IOCSTATUS_INVALID_REPLY_QUEUE_ID (0x0f03)
+#define MPI3_IOCSTATUS_INVALID_QUEUE_DELETION (0x0f04)
+#define MPI3_IOCLOGINFO_TYPE_MASK (0xf0000000)
+#define MPI3_IOCLOGINFO_TYPE_SHIFT (28)
+#define MPI3_IOCLOGINFO_TYPE_NONE (0x0)
+#define MPI3_IOCLOGINFO_TYPE_SAS (0x3)
+#define MPI3_IOCLOGINFO_LOG_DATA_MASK (0x0fffffff)
+#endif
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
new file mode 100644
index 000000000..de6914d57
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -0,0 +1,1399 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Driver for Broadcom MPI3 Storage Controllers
+ *
+ * Copyright (C) 2017-2022 Broadcom Inc.
+ * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
+ *
+ */
+
+#ifndef MPI3MR_H_INCLUDED
+#define MPI3MR_H_INCLUDED
+
+#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
+#include <linux/blk-mq-pci.h>
+#include <linux/delay.h>
+#include <linux/dmapool.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/utsname.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <uapi/scsi/scsi_bsg_mpi3mr.h>
+#include <scsi/scsi_transport_sas.h>
+
+#include "mpi/mpi30_transport.h"
+#include "mpi/mpi30_cnfg.h"
+#include "mpi/mpi30_image.h"
+#include "mpi/mpi30_init.h"
+#include "mpi/mpi30_ioc.h"
+#include "mpi/mpi30_sas.h"
+#include "mpi/mpi30_pci.h"
+#include "mpi3mr_debug.h"
+
+/* Global list and lock for storing multiple adapters managed by the driver */
+extern spinlock_t mrioc_list_lock;
+extern struct list_head mrioc_list;
+extern int prot_mask;
+extern atomic64_t event_counter;
+
+#define MPI3MR_DRIVER_VERSION "8.2.0.3.0"
+#define MPI3MR_DRIVER_RELDATE "08-September-2022"
+
+#define MPI3MR_DRIVER_NAME "mpi3mr"
+#define MPI3MR_DRIVER_LICENSE "GPL"
+#define MPI3MR_DRIVER_AUTHOR "Broadcom Inc. <mpi3mr-linuxdrv.pdl@broadcom.com>"
+#define MPI3MR_DRIVER_DESC "MPI3 Storage Controller Device Driver"
+
+#define MPI3MR_NAME_LENGTH 32
+#define IOCNAME "%s: "
+
+#define MPI3MR_MAX_SECTORS 2048
+
+/* Definitions for internal SGL and Chain SGL buffers */
+#define MPI3MR_PAGE_SIZE_4K 4096
+#define MPI3MR_SG_DEPTH (MPI3MR_PAGE_SIZE_4K / sizeof(struct mpi3_sge_common))
+
+/* Definitions for MAX values for shost */
+#define MPI3MR_MAX_CMDS_LUN 128
+#define MPI3MR_MAX_CDB_LENGTH 32
+
+/* Admin queue management definitions */
+#define MPI3MR_ADMIN_REQ_Q_SIZE (2 * MPI3MR_PAGE_SIZE_4K)
+#define MPI3MR_ADMIN_REPLY_Q_SIZE (4 * MPI3MR_PAGE_SIZE_4K)
+#define MPI3MR_ADMIN_REQ_FRAME_SZ 128
+#define MPI3MR_ADMIN_REPLY_FRAME_SZ 16
+
+/* Operational queue management definitions */
+#define MPI3MR_OP_REQ_Q_QD 512
+#define MPI3MR_OP_REP_Q_QD 1024
+#define MPI3MR_OP_REP_Q_QD4K 4096
+#define MPI3MR_OP_REQ_Q_SEG_SIZE 4096
+#define MPI3MR_OP_REP_Q_SEG_SIZE 4096
+#define MPI3MR_MAX_SEG_LIST_SIZE 4096
+
+/* Reserved Host Tag definitions */
+#define MPI3MR_HOSTTAG_INVALID 0xFFFF
+#define MPI3MR_HOSTTAG_INITCMDS 1
+#define MPI3MR_HOSTTAG_BSG_CMDS 2
+#define MPI3MR_HOSTTAG_PEL_ABORT 3
+#define MPI3MR_HOSTTAG_PEL_WAIT 4
+#define MPI3MR_HOSTTAG_BLK_TMS 5
+#define MPI3MR_HOSTTAG_CFG_CMDS 6
+#define MPI3MR_HOSTTAG_TRANSPORT_CMDS 7
+
+#define MPI3MR_NUM_DEVRMCMD 16
+#define MPI3MR_HOSTTAG_DEVRMCMD_MIN (MPI3MR_HOSTTAG_TRANSPORT_CMDS + 1)
+#define MPI3MR_HOSTTAG_DEVRMCMD_MAX (MPI3MR_HOSTTAG_DEVRMCMD_MIN + \
+ MPI3MR_NUM_DEVRMCMD - 1)
+
+#define MPI3MR_INTERNAL_CMDS_RESVD MPI3MR_HOSTTAG_DEVRMCMD_MAX
+#define MPI3MR_NUM_EVTACKCMD 4
+#define MPI3MR_HOSTTAG_EVTACKCMD_MIN (MPI3MR_HOSTTAG_DEVRMCMD_MAX + 1)
+#define MPI3MR_HOSTTAG_EVTACKCMD_MAX (MPI3MR_HOSTTAG_EVTACKCMD_MIN + \
+ MPI3MR_NUM_EVTACKCMD - 1)
+
+/* Reduced resource count definition for crash kernel */
+#define MPI3MR_HOST_IOS_KDUMP 128
+
+/* command/controller interaction timeout definitions in seconds */
+#define MPI3MR_INTADMCMD_TIMEOUT 60
+#define MPI3MR_PORTENABLE_TIMEOUT 300
+#define MPI3MR_PORTENABLE_POLL_INTERVAL 5
+#define MPI3MR_ABORTTM_TIMEOUT 60
+#define MPI3MR_RESETTM_TIMEOUT 60
+#define MPI3MR_RESET_HOST_IOWAIT_TIMEOUT 5
+#define MPI3MR_TSUPDATE_INTERVAL 900
+#define MPI3MR_DEFAULT_SHUTDOWN_TIME 120
+#define MPI3MR_RAID_ERRREC_RESET_TIMEOUT 180
+#define MPI3MR_PREPARE_FOR_RESET_TIMEOUT 180
+#define MPI3MR_RESET_ACK_TIMEOUT 30
+
+#define MPI3MR_WATCHDOG_INTERVAL 1000 /* in milli seconds */
+
+#define MPI3MR_DEFAULT_CFG_PAGE_SZ 1024 /* in bytes */
+
+#define MPI3MR_RESET_TOPOLOGY_SETTLE_TIME 10
+
+#define MPI3MR_SCMD_TIMEOUT (60 * HZ)
+#define MPI3MR_EH_SCMD_TIMEOUT (60 * HZ)
+
+/* Internal admin command state definitions*/
+#define MPI3MR_CMD_NOTUSED 0x8000
+#define MPI3MR_CMD_COMPLETE 0x0001
+#define MPI3MR_CMD_PENDING 0x0002
+#define MPI3MR_CMD_REPLY_VALID 0x0004
+#define MPI3MR_CMD_RESET 0x0008
+
+/* Definitions for Event replies and sense buffer allocated per controller */
+#define MPI3MR_NUM_EVT_REPLIES 64
+#define MPI3MR_SENSE_BUF_SZ 256
+#define MPI3MR_SENSEBUF_FACTOR 3
+#define MPI3MR_CHAINBUF_FACTOR 3
+#define MPI3MR_CHAINBUFDIX_FACTOR 2
+
+/* Invalid target device handle */
+#define MPI3MR_INVALID_DEV_HANDLE 0xFFFF
+
+/* Controller Reset related definitions */
+#define MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT 5
+#define MPI3MR_MAX_RESET_RETRY_COUNT 3
+
+/* ResponseCode definitions */
+#define MPI3MR_RI_MASK_RESPCODE (0x000000FF)
+#define MPI3MR_RSP_IO_QUEUED_ON_IOC \
+ MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC
+
+#define MPI3MR_DEFAULT_MDTS (128 * 1024)
+#define MPI3MR_DEFAULT_PGSZEXP (12)
+
+/* Command retry count definitions */
+#define MPI3MR_DEV_RMHS_RETRY_COUNT 3
+#define MPI3MR_PEL_RETRY_COUNT 3
+
+/* Default target device queue depth */
+#define MPI3MR_DEFAULT_SDEV_QD 32
+
+/* Definitions for Threaded IRQ poll*/
+#define MPI3MR_IRQ_POLL_SLEEP 2
+#define MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT 8
+
+/* Definitions for the controller security status*/
+#define MPI3MR_CTLR_SECURITY_STATUS_MASK 0x0C
+#define MPI3MR_CTLR_SECURE_DBG_STATUS_MASK 0x02
+
+#define MPI3MR_INVALID_DEVICE 0x00
+#define MPI3MR_CONFIG_SECURE_DEVICE 0x04
+#define MPI3MR_HARD_SECURE_DEVICE 0x08
+#define MPI3MR_TAMPERED_DEVICE 0x0C
+
+/* SGE Flag definition */
+#define MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST \
+ (MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE | MPI3_SGE_FLAGS_DLAS_SYSTEM | \
+ MPI3_SGE_FLAGS_END_OF_LIST)
+
+/* MSI Index from Reply Queue Index */
+#define REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, offset) (qidx + offset)
+
+/*
+ * Maximum data transfer size definitions for management
+ * application commands
+ */
+#define MPI3MR_MAX_APP_XFER_SIZE (1 * 1024 * 1024)
+#define MPI3MR_MAX_APP_XFER_SEGMENTS 512
+/*
+ * 2048 sectors are for data buffers and additional 512 sectors for
+ * other buffers
+ */
+#define MPI3MR_MAX_APP_XFER_SECTORS (2048 + 512)
+
+/**
+ * struct mpi3mr_nvme_pt_sge - Structure to store SGEs for NVMe
+ * Encapsulated commands.
+ *
+ * @base_addr: Physical address
+ * @length: SGE length
+ * @rsvd: Reserved
+ * @rsvd1: Reserved
+ * @sgl_type: sgl type
+ */
+struct mpi3mr_nvme_pt_sge {
+ u64 base_addr;
+ u32 length;
+ u16 rsvd;
+ u8 rsvd1;
+ u8 sgl_type;
+};
+
+/**
+ * struct mpi3mr_buf_map - local structure to
+ * track kernel and user buffers associated with an BSG
+ * structure.
+ *
+ * @bsg_buf: BSG buffer virtual address
+ * @bsg_buf_len: BSG buffer length
+ * @kern_buf: Kernel buffer virtual address
+ * @kern_buf_len: Kernel buffer length
+ * @kern_buf_dma: Kernel buffer DMA address
+ * @data_dir: Data direction.
+ */
+struct mpi3mr_buf_map {
+ void *bsg_buf;
+ u32 bsg_buf_len;
+ void *kern_buf;
+ u32 kern_buf_len;
+ dma_addr_t kern_buf_dma;
+ u8 data_dir;
+};
+
+/* IOC State definitions */
+enum mpi3mr_iocstate {
+ MRIOC_STATE_READY = 1,
+ MRIOC_STATE_RESET,
+ MRIOC_STATE_FAULT,
+ MRIOC_STATE_BECOMING_READY,
+ MRIOC_STATE_RESET_REQUESTED,
+ MRIOC_STATE_UNRECOVERABLE,
+};
+
+/* Reset reason code definitions*/
+enum mpi3mr_reset_reason {
+ MPI3MR_RESET_FROM_BRINGUP = 1,
+ MPI3MR_RESET_FROM_FAULT_WATCH = 2,
+ MPI3MR_RESET_FROM_APP = 3,
+ MPI3MR_RESET_FROM_EH_HOS = 4,
+ MPI3MR_RESET_FROM_TM_TIMEOUT = 5,
+ MPI3MR_RESET_FROM_APP_TIMEOUT = 6,
+ MPI3MR_RESET_FROM_MUR_FAILURE = 7,
+ MPI3MR_RESET_FROM_CTLR_CLEANUP = 8,
+ MPI3MR_RESET_FROM_CIACTIV_FAULT = 9,
+ MPI3MR_RESET_FROM_PE_TIMEOUT = 10,
+ MPI3MR_RESET_FROM_TSU_TIMEOUT = 11,
+ MPI3MR_RESET_FROM_DELREQQ_TIMEOUT = 12,
+ MPI3MR_RESET_FROM_DELREPQ_TIMEOUT = 13,
+ MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT = 14,
+ MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT = 15,
+ MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT = 16,
+ MPI3MR_RESET_FROM_IOCINIT_TIMEOUT = 17,
+ MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT = 18,
+ MPI3MR_RESET_FROM_EVTACK_TIMEOUT = 19,
+ MPI3MR_RESET_FROM_CIACTVRST_TIMER = 20,
+ MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT = 21,
+ MPI3MR_RESET_FROM_PELABORT_TIMEOUT = 22,
+ MPI3MR_RESET_FROM_SYSFS = 23,
+ MPI3MR_RESET_FROM_SYSFS_TIMEOUT = 24,
+ MPI3MR_RESET_FROM_FIRMWARE = 27,
+ MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT = 29,
+ MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT = 30,
+};
+
+/* Queue type definitions */
+enum queue_type {
+ MPI3MR_DEFAULT_QUEUE = 0,
+ MPI3MR_POLL_QUEUE,
+};
+
+/**
+ * struct mpi3mr_compimg_ver - replica of component image
+ * version defined in mpi30_image.h in host endianness
+ *
+ */
+struct mpi3mr_compimg_ver {
+ u16 build_num;
+ u16 cust_id;
+ u8 ph_minor;
+ u8 ph_major;
+ u8 gen_minor;
+ u8 gen_major;
+};
+
+/**
+ * struct mpi3mr_ioc_facs - replica of component image version
+ * defined in mpi30_ioc.h in host endianness
+ *
+ */
+struct mpi3mr_ioc_facts {
+ u32 ioc_capabilities;
+ struct mpi3mr_compimg_ver fw_ver;
+ u32 mpi_version;
+ u16 max_reqs;
+ u16 product_id;
+ u16 op_req_sz;
+ u16 reply_sz;
+ u16 exceptions;
+ u16 max_perids;
+ u16 max_pds;
+ u16 max_sasexpanders;
+ u16 max_sasinitiators;
+ u16 max_enclosures;
+ u16 max_pcie_switches;
+ u16 max_nvme;
+ u16 max_vds;
+ u16 max_hpds;
+ u16 max_advhpds;
+ u16 max_raid_pds;
+ u16 min_devhandle;
+ u16 max_devhandle;
+ u16 max_op_req_q;
+ u16 max_op_reply_q;
+ u16 shutdown_timeout;
+ u8 ioc_num;
+ u8 who_init;
+ u16 max_msix_vectors;
+ u8 personality;
+ u8 dma_mask;
+ u8 protocol_flags;
+ u8 sge_mod_mask;
+ u8 sge_mod_value;
+ u8 sge_mod_shift;
+ u8 max_dev_per_tg;
+ u16 max_io_throttle_group;
+ u16 io_throttle_data_length;
+ u16 io_throttle_low;
+ u16 io_throttle_high;
+
+};
+
+/**
+ * struct segments - memory descriptor structure to store
+ * virtual and dma addresses for operational queue segments.
+ *
+ * @segment: virtual address
+ * @segment_dma: dma address
+ */
+struct segments {
+ void *segment;
+ dma_addr_t segment_dma;
+};
+
+/**
+ * struct op_req_qinfo - Operational Request Queue Information
+ *
+ * @ci: consumer index
+ * @pi: producer index
+ * @num_request: Maximum number of entries in the queue
+ * @qid: Queue Id starting from 1
+ * @reply_qid: Associated reply queue Id
+ * @num_segments: Number of discontiguous memory segments
+ * @segment_qd: Depth of each segments
+ * @q_lock: Concurrent queue access lock
+ * @q_segments: Segment descriptor pointer
+ * @q_segment_list: Segment list base virtual address
+ * @q_segment_list_dma: Segment list base DMA address
+ */
+struct op_req_qinfo {
+ u16 ci;
+ u16 pi;
+ u16 num_requests;
+ u16 qid;
+ u16 reply_qid;
+ u16 num_segments;
+ u16 segment_qd;
+ spinlock_t q_lock;
+ struct segments *q_segments;
+ void *q_segment_list;
+ dma_addr_t q_segment_list_dma;
+};
+
+/**
+ * struct op_reply_qinfo - Operational Reply Queue Information
+ *
+ * @ci: consumer index
+ * @qid: Queue Id starting from 1
+ * @num_replies: Maximum number of entries in the queue
+ * @num_segments: Number of discontiguous memory segments
+ * @segment_qd: Depth of each segments
+ * @q_segments: Segment descriptor pointer
+ * @q_segment_list: Segment list base virtual address
+ * @q_segment_list_dma: Segment list base DMA address
+ * @ephase: Expected phased identifier for the reply queue
+ * @pend_ios: Number of IOs pending in HW for this queue
+ * @enable_irq_poll: Flag to indicate polling is enabled
+ * @in_use: Queue is handled by poll/ISR
+ * @qtype: Type of queue (types defined in enum queue_type)
+ */
+struct op_reply_qinfo {
+ u16 ci;
+ u16 qid;
+ u16 num_replies;
+ u16 num_segments;
+ u16 segment_qd;
+ struct segments *q_segments;
+ void *q_segment_list;
+ dma_addr_t q_segment_list_dma;
+ u8 ephase;
+ atomic_t pend_ios;
+ bool enable_irq_poll;
+ atomic_t in_use;
+ enum queue_type qtype;
+};
+
+/**
+ * struct mpi3mr_intr_info - Interrupt cookie information
+ *
+ * @mrioc: Adapter instance reference
+ * @os_irq: irq number
+ * @msix_index: MSIx index
+ * @op_reply_q: Associated operational reply queue
+ * @name: Dev name for the irq claiming device
+ */
+struct mpi3mr_intr_info {
+ struct mpi3mr_ioc *mrioc;
+ int os_irq;
+ u16 msix_index;
+ struct op_reply_qinfo *op_reply_q;
+ char name[MPI3MR_NAME_LENGTH];
+};
+
+/**
+ * struct mpi3mr_throttle_group_info - Throttle group info
+ *
+ * @io_divert: Flag indicates io divert is on or off for the TG
+ * @need_qd_reduction: Flag to indicate QD reduction is needed
+ * @qd_reduction: Queue Depth reduction in units of 10%
+ * @fw_qd: QueueDepth value reported by the firmware
+ * @modified_qd: Modified QueueDepth value due to throttling
+ * @id: Throttle Group ID.
+ * @high: High limit to turn on throttling in 512 byte blocks
+ * @low: Low limit to turn off throttling in 512 byte blocks
+ * @pend_large_data_sz: Counter to track pending large data
+ */
+struct mpi3mr_throttle_group_info {
+ u8 io_divert;
+ u8 need_qd_reduction;
+ u8 qd_reduction;
+ u16 fw_qd;
+ u16 modified_qd;
+ u16 id;
+ u32 high;
+ u32 low;
+ atomic_t pend_large_data_sz;
+};
+
+/* HBA port flags */
+#define MPI3MR_HBA_PORT_FLAG_DIRTY 0x01
+
+/**
+ * struct mpi3mr_hba_port - HBA's port information
+ * @port_id: Port number
+ * @flags: HBA port flags
+ */
+struct mpi3mr_hba_port {
+ struct list_head list;
+ u8 port_id;
+ u8 flags;
+};
+
+/**
+ * struct mpi3mr_sas_port - Internal SAS port information
+ * @port_list: List of ports belonging to a SAS node
+ * @num_phys: Number of phys associated with port
+ * @marked_responding: used while refresing the sas ports
+ * @lowest_phy: lowest phy ID of current sas port
+ * @phy_mask: phy_mask of current sas port
+ * @hba_port: HBA port entry
+ * @remote_identify: Attached device identification
+ * @rphy: SAS transport layer rphy object
+ * @port: SAS transport layer port object
+ * @phy_list: mpi3mr_sas_phy objects belonging to this port
+ */
+struct mpi3mr_sas_port {
+ struct list_head port_list;
+ u8 num_phys;
+ u8 marked_responding;
+ int lowest_phy;
+ u32 phy_mask;
+ struct mpi3mr_hba_port *hba_port;
+ struct sas_identify remote_identify;
+ struct sas_rphy *rphy;
+ struct sas_port *port;
+ struct list_head phy_list;
+};
+
+/**
+ * struct mpi3mr_sas_phy - Internal SAS Phy information
+ * @port_siblings: List of phys belonging to a port
+ * @identify: Phy identification
+ * @remote_identify: Attached device identification
+ * @phy: SAS transport layer Phy object
+ * @phy_id: Unique phy id within a port
+ * @handle: Firmware device handle for this phy
+ * @attached_handle: Firmware device handle for attached device
+ * @phy_belongs_to_port: Flag to indicate phy belongs to port
+ @hba_port: HBA port entry
+ */
+struct mpi3mr_sas_phy {
+ struct list_head port_siblings;
+ struct sas_identify identify;
+ struct sas_identify remote_identify;
+ struct sas_phy *phy;
+ u8 phy_id;
+ u16 handle;
+ u16 attached_handle;
+ u8 phy_belongs_to_port;
+ struct mpi3mr_hba_port *hba_port;
+};
+
+/**
+ * struct mpi3mr_sas_node - SAS host/expander information
+ * @list: List of sas nodes in a controller
+ * @parent_dev: Parent device class
+ * @num_phys: Number phys belonging to sas_node
+ * @sas_address: SAS address of sas_node
+ * @handle: Firmware device handle for this sas_host/expander
+ * @sas_address_parent: SAS address of parent expander or host
+ * @enclosure_handle: Firmware handle of enclosure of this node
+ * @device_info: Capabilities of this sas_host/expander
+ * @non_responding: used to refresh the expander devices during reset
+ * @host_node: Flag to indicate this is a host_node
+ * @hba_port: HBA port entry
+ * @phy: A list of phys that make up this sas_host/expander
+ * @sas_port_list: List of internal ports of this node
+ * @rphy: sas_rphy object of this expander node
+ */
+struct mpi3mr_sas_node {
+ struct list_head list;
+ struct device *parent_dev;
+ u8 num_phys;
+ u64 sas_address;
+ u16 handle;
+ u64 sas_address_parent;
+ u16 enclosure_handle;
+ u64 enclosure_logical_id;
+ u8 non_responding;
+ u8 host_node;
+ struct mpi3mr_hba_port *hba_port;
+ struct mpi3mr_sas_phy *phy;
+ struct list_head sas_port_list;
+ struct sas_rphy *rphy;
+};
+
+/**
+ * struct mpi3mr_enclosure_node - enclosure information
+ * @list: List of enclosures
+ * @pg0: Enclosure page 0;
+ */
+struct mpi3mr_enclosure_node {
+ struct list_head list;
+ struct mpi3_enclosure_page0 pg0;
+};
+
+/**
+ * struct tgt_dev_sas_sata - SAS/SATA device specific
+ * information cached from firmware given data
+ *
+ * @sas_address: World wide unique SAS address
+ * @sas_address_parent: Sas address of parent expander or host
+ * @dev_info: Device information bits
+ * @phy_id: Phy identifier provided in device page 0
+ * @attached_phy_id: Attached phy identifier provided in device page 0
+ * @sas_transport_attached: Is this device exposed to transport
+ * @pend_sas_rphy_add: Flag to check device is in process of add
+ * @hba_port: HBA port entry
+ * @rphy: SAS transport layer rphy object
+ */
+struct tgt_dev_sas_sata {
+ u64 sas_address;
+ u64 sas_address_parent;
+ u16 dev_info;
+ u8 phy_id;
+ u8 attached_phy_id;
+ u8 sas_transport_attached;
+ u8 pend_sas_rphy_add;
+ struct mpi3mr_hba_port *hba_port;
+ struct sas_rphy *rphy;
+};
+
+/**
+ * struct tgt_dev_pcie - PCIe device specific information cached
+ * from firmware given data
+ *
+ * @mdts: Maximum data transfer size
+ * @capb: Device capabilities
+ * @pgsz: Device page size
+ * @abort_to: Timeout for abort TM
+ * @reset_to: Timeout for Target/LUN reset TM
+ * @dev_info: Device information bits
+ */
+struct tgt_dev_pcie {
+ u32 mdts;
+ u16 capb;
+ u8 pgsz;
+ u8 abort_to;
+ u8 reset_to;
+ u16 dev_info;
+};
+
+/**
+ * struct tgt_dev_vd - virtual device specific information
+ * cached from firmware given data
+ *
+ * @state: State of the VD
+ * @tg_qd_reduction: Queue Depth reduction in units of 10%
+ * @tg_id: VDs throttle group ID
+ * @high: High limit to turn on throttling in 512 byte blocks
+ * @low: Low limit to turn off throttling in 512 byte blocks
+ * @tg: Pointer to throttle group info
+ */
+struct tgt_dev_vd {
+ u8 state;
+ u8 tg_qd_reduction;
+ u16 tg_id;
+ u32 tg_high;
+ u32 tg_low;
+ struct mpi3mr_throttle_group_info *tg;
+};
+
+
+/**
+ * union _form_spec_inf - union of device specific information
+ */
+union _form_spec_inf {
+ struct tgt_dev_sas_sata sas_sata_inf;
+ struct tgt_dev_pcie pcie_inf;
+ struct tgt_dev_vd vd_inf;
+};
+
+
+
+/**
+ * struct mpi3mr_tgt_dev - target device data structure
+ *
+ * @list: List pointer
+ * @starget: Scsi_target pointer
+ * @dev_handle: FW device handle
+ * @parent_handle: FW parent device handle
+ * @slot: Slot number
+ * @encl_handle: FW enclosure handle
+ * @perst_id: FW assigned Persistent ID
+ * @devpg0_flag: Device Page0 flag
+ * @dev_type: SAS/SATA/PCIE device type
+ * @is_hidden: Should be exposed to upper layers or not
+ * @host_exposed: Already exposed to host or not
+ * @io_unit_port: IO Unit port ID
+ * @non_stl: Is this device not to be attached with SAS TL
+ * @io_throttle_enabled: I/O throttling needed or not
+ * @q_depth: Device specific Queue Depth
+ * @wwid: World wide ID
+ * @enclosure_logical_id: Enclosure logical identifier
+ * @dev_spec: Device type specific information
+ * @ref_count: Reference count
+ */
+struct mpi3mr_tgt_dev {
+ struct list_head list;
+ struct scsi_target *starget;
+ u16 dev_handle;
+ u16 parent_handle;
+ u16 slot;
+ u16 encl_handle;
+ u16 perst_id;
+ u16 devpg0_flag;
+ u8 dev_type;
+ u8 is_hidden;
+ u8 host_exposed;
+ u8 io_unit_port;
+ u8 non_stl;
+ u8 io_throttle_enabled;
+ u16 q_depth;
+ u64 wwid;
+ u64 enclosure_logical_id;
+ union _form_spec_inf dev_spec;
+ struct kref ref_count;
+};
+
+/**
+ * mpi3mr_tgtdev_get - k reference incrementor
+ * @s: Target device reference
+ *
+ * Increment target device reference count.
+ */
+static inline void mpi3mr_tgtdev_get(struct mpi3mr_tgt_dev *s)
+{
+ kref_get(&s->ref_count);
+}
+
+/**
+ * mpi3mr_free_tgtdev - target device memory dealloctor
+ * @r: k reference pointer of the target device
+ *
+ * Free target device memory when no reference.
+ */
+static inline void mpi3mr_free_tgtdev(struct kref *r)
+{
+ kfree(container_of(r, struct mpi3mr_tgt_dev, ref_count));
+}
+
+/**
+ * mpi3mr_tgtdev_put - k reference decrementor
+ * @s: Target device reference
+ *
+ * Decrement target device reference count.
+ */
+static inline void mpi3mr_tgtdev_put(struct mpi3mr_tgt_dev *s)
+{
+ kref_put(&s->ref_count, mpi3mr_free_tgtdev);
+}
+
+
+/**
+ * struct mpi3mr_stgt_priv_data - SCSI target private structure
+ *
+ * @starget: Scsi_target pointer
+ * @dev_handle: FW device handle
+ * @perst_id: FW assigned Persistent ID
+ * @num_luns: Number of Logical Units
+ * @block_io: I/O blocked to the device or not
+ * @dev_removed: Device removed in the Firmware
+ * @dev_removedelay: Device is waiting to be removed in FW
+ * @dev_type: Device type
+ * @io_throttle_enabled: I/O throttling needed or not
+ * @io_divert: Flag indicates io divert is on or off for the dev
+ * @throttle_group: Pointer to throttle group info
+ * @tgt_dev: Internal target device pointer
+ * @pend_count: Counter to track pending I/Os during error
+ * handling
+ */
+struct mpi3mr_stgt_priv_data {
+ struct scsi_target *starget;
+ u16 dev_handle;
+ u16 perst_id;
+ u32 num_luns;
+ atomic_t block_io;
+ u8 dev_removed;
+ u8 dev_removedelay;
+ u8 dev_type;
+ u8 io_throttle_enabled;
+ u8 io_divert;
+ struct mpi3mr_throttle_group_info *throttle_group;
+ struct mpi3mr_tgt_dev *tgt_dev;
+ u32 pend_count;
+};
+
+/**
+ * struct mpi3mr_stgt_priv_data - SCSI device private structure
+ *
+ * @tgt_priv_data: Scsi_target private data pointer
+ * @lun_id: LUN ID of the device
+ * @ncq_prio_enable: NCQ priority enable for SATA device
+ * @pend_count: Counter to track pending I/Os during error
+ * handling
+ */
+struct mpi3mr_sdev_priv_data {
+ struct mpi3mr_stgt_priv_data *tgt_priv_data;
+ u32 lun_id;
+ u8 ncq_prio_enable;
+ u32 pend_count;
+};
+
+/**
+ * struct mpi3mr_drv_cmd - Internal command tracker
+ *
+ * @mutex: Command mutex
+ * @done: Completeor for wakeup
+ * @reply: Firmware reply for internal commands
+ * @sensebuf: Sensebuf for SCSI IO commands
+ * @iou_rc: IO Unit control reason code
+ * @state: Command State
+ * @dev_handle: Firmware handle for device specific commands
+ * @ioc_status: IOC status from the firmware
+ * @ioc_loginfo:IOC log info from the firmware
+ * @is_waiting: Is the command issued in block mode
+ * @is_sense: Is Sense data present
+ * @retry_count: Retry count for retriable commands
+ * @host_tag: Host tag used by the command
+ * @callback: Callback for non blocking commands
+ */
+struct mpi3mr_drv_cmd {
+ struct mutex mutex;
+ struct completion done;
+ void *reply;
+ u8 *sensebuf;
+ u8 iou_rc;
+ u16 state;
+ u16 dev_handle;
+ u16 ioc_status;
+ u32 ioc_loginfo;
+ u8 is_waiting;
+ u8 is_sense;
+ u8 retry_count;
+ u16 host_tag;
+
+ void (*callback)(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd);
+};
+
+/**
+ * struct dma_memory_desc - memory descriptor structure to store
+ * virtual address, dma address and size for any generic dma
+ * memory allocations in the driver.
+ *
+ * @size: buffer size
+ * @addr: virtual address
+ * @dma_addr: dma address
+ */
+struct dma_memory_desc {
+ u32 size;
+ void *addr;
+ dma_addr_t dma_addr;
+};
+
+
+/**
+ * struct chain_element - memory descriptor structure to store
+ * virtual and dma addresses for chain elements.
+ *
+ * @addr: virtual address
+ * @dma_addr: dma address
+ */
+struct chain_element {
+ void *addr;
+ dma_addr_t dma_addr;
+};
+
+/**
+ * struct scmd_priv - SCSI command private data
+ *
+ * @host_tag: Host tag specific to operational queue
+ * @in_lld_scope: Command in LLD scope or not
+ * @meta_sg_valid: DIX command with meta data SGL or not
+ * @scmd: SCSI Command pointer
+ * @req_q_idx: Operational request queue index
+ * @chain_idx: Chain frame index
+ * @meta_chain_idx: Chain frame index of meta data SGL
+ * @mpi3mr_scsiio_req: MPI SCSI IO request
+ */
+struct scmd_priv {
+ u16 host_tag;
+ u8 in_lld_scope;
+ u8 meta_sg_valid;
+ struct scsi_cmnd *scmd;
+ u16 req_q_idx;
+ int chain_idx;
+ int meta_chain_idx;
+ u8 mpi3mr_scsiio_req[MPI3MR_ADMIN_REQ_FRAME_SZ];
+};
+
+/**
+ * struct mpi3mr_ioc - Adapter anchor structure stored in shost
+ * private data
+ *
+ * @list: List pointer
+ * @pdev: PCI device pointer
+ * @shost: Scsi_Host pointer
+ * @id: Controller ID
+ * @cpu_count: Number of online CPUs
+ * @irqpoll_sleep: usleep unit used in threaded isr irqpoll
+ * @name: Controller ASCII name
+ * @driver_name: Driver ASCII name
+ * @sysif_regs: System interface registers virtual address
+ * @sysif_regs_phys: System interface registers physical address
+ * @bars: PCI BARS
+ * @dma_mask: DMA mask
+ * @msix_count: Number of MSIX vectors used
+ * @intr_enabled: Is interrupts enabled
+ * @num_admin_req: Number of admin requests
+ * @admin_req_q_sz: Admin request queue size
+ * @admin_req_pi: Admin request queue producer index
+ * @admin_req_ci: Admin request queue consumer index
+ * @admin_req_base: Admin request queue base virtual address
+ * @admin_req_dma: Admin request queue base dma address
+ * @admin_req_lock: Admin queue access lock
+ * @num_admin_replies: Number of admin replies
+ * @admin_reply_q_sz: Admin reply queue size
+ * @admin_reply_ci: Admin reply queue consumer index
+ * @admin_reply_ephase:Admin reply queue expected phase
+ * @admin_reply_base: Admin reply queue base virtual address
+ * @admin_reply_dma: Admin reply queue base dma address
+ * @admin_reply_q_in_use: Queue is handled by poll/ISR
+ * @ready_timeout: Controller ready timeout
+ * @intr_info: Interrupt cookie pointer
+ * @intr_info_count: Number of interrupt cookies
+ * @is_intr_info_set: Flag to indicate intr info is setup
+ * @num_queues: Number of operational queues
+ * @num_op_req_q: Number of operational request queues
+ * @req_qinfo: Operational request queue info pointer
+ * @num_op_reply_q: Number of operational reply queues
+ * @op_reply_qinfo: Operational reply queue info pointer
+ * @init_cmds: Command tracker for initialization commands
+ * @cfg_cmds: Command tracker for configuration requests
+ * @facts: Cached IOC facts data
+ * @op_reply_desc_sz: Operational reply descriptor size
+ * @num_reply_bufs: Number of reply buffers allocated
+ * @reply_buf_pool: Reply buffer pool
+ * @reply_buf: Reply buffer base virtual address
+ * @reply_buf_dma: Reply buffer DMA address
+ * @reply_buf_dma_max_address: Reply DMA address max limit
+ * @reply_free_qsz: Reply free queue size
+ * @reply_free_q_pool: Reply free queue pool
+ * @reply_free_q: Reply free queue base virtual address
+ * @reply_free_q_dma: Reply free queue base DMA address
+ * @reply_free_queue_lock: Reply free queue lock
+ * @reply_free_queue_host_index: Reply free queue host index
+ * @num_sense_bufs: Number of sense buffers
+ * @sense_buf_pool: Sense buffer pool
+ * @sense_buf: Sense buffer base virtual address
+ * @sense_buf_dma: Sense buffer base DMA address
+ * @sense_buf_q_sz: Sense buffer queue size
+ * @sense_buf_q_pool: Sense buffer queue pool
+ * @sense_buf_q: Sense buffer queue virtual address
+ * @sense_buf_q_dma: Sense buffer queue DMA address
+ * @sbq_lock: Sense buffer queue lock
+ * @sbq_host_index: Sense buffer queuehost index
+ * @event_masks: Event mask bitmap
+ * @fwevt_worker_name: Firmware event worker thread name
+ * @fwevt_worker_thread: Firmware event worker thread
+ * @fwevt_lock: Firmware event lock
+ * @fwevt_list: Firmware event list
+ * @watchdog_work_q_name: Fault watchdog worker thread name
+ * @watchdog_work_q: Fault watchdog worker thread
+ * @watchdog_work: Fault watchdog work
+ * @watchdog_lock: Fault watchdog lock
+ * @is_driver_loading: Is driver still loading
+ * @scan_started: Async scan started
+ * @scan_failed: Asycn scan failed
+ * @stop_drv_processing: Stop all command processing
+ * @device_refresh_on: Don't process the events until devices are refreshed
+ * @max_host_ios: Maximum host I/O count
+ * @chain_buf_count: Chain buffer count
+ * @chain_buf_pool: Chain buffer pool
+ * @chain_sgl_list: Chain SGL list
+ * @chain_bitmap: Chain buffer allocator bitmap
+ * @chain_buf_lock: Chain buffer list lock
+ * @bsg_cmds: Command tracker for BSG command
+ * @host_tm_cmds: Command tracker for task management commands
+ * @dev_rmhs_cmds: Command tracker for device removal commands
+ * @evtack_cmds: Command tracker for event ack commands
+ * @devrem_bitmap: Device removal bitmap
+ * @dev_handle_bitmap_bits: Number of bits in device handle bitmap
+ * @removepend_bitmap: Remove pending bitmap
+ * @delayed_rmhs_list: Delayed device removal list
+ * @evtack_cmds_bitmap: Event Ack bitmap
+ * @delayed_evtack_cmds_list: Delayed event acknowledgment list
+ * @ts_update_counter: Timestamp update counter
+ * @reset_in_progress: Reset in progress flag
+ * @unrecoverable: Controller unrecoverable flag
+ * @prev_reset_result: Result of previous reset
+ * @reset_mutex: Controller reset mutex
+ * @reset_waitq: Controller reset wait queue
+ * @prepare_for_reset: Prepare for reset event received
+ * @prepare_for_reset_timeout_counter: Prepare for reset timeout
+ * @prp_list_virt: NVMe encapsulated PRP list virtual base
+ * @prp_list_dma: NVMe encapsulated PRP list DMA
+ * @prp_sz: NVME encapsulated PRP list size
+ * @diagsave_timeout: Diagnostic information save timeout
+ * @logging_level: Controller debug logging level
+ * @flush_io_count: I/O count to flush after reset
+ * @current_event: Firmware event currently in process
+ * @driver_info: Driver, Kernel, OS information to firmware
+ * @change_count: Topology change count
+ * @pel_enabled: Persistent Event Log(PEL) enabled or not
+ * @pel_abort_requested: PEL abort is requested or not
+ * @pel_class: PEL Class identifier
+ * @pel_locale: PEL Locale identifier
+ * @pel_cmds: Command tracker for PEL wait command
+ * @pel_abort_cmd: Command tracker for PEL abort command
+ * @pel_newest_seqnum: Newest PEL sequenece number
+ * @pel_seqnum_virt: PEL sequence number virtual address
+ * @pel_seqnum_dma: PEL sequence number DMA address
+ * @pel_seqnum_sz: PEL sequenece number size
+ * @op_reply_q_offset: Operational reply queue offset with MSIx
+ * @default_qcount: Total Default queues
+ * @active_poll_qcount: Currently active poll queue count
+ * @requested_poll_qcount: User requested poll queue count
+ * @bsg_dev: BSG device structure
+ * @bsg_queue: Request queue for BSG device
+ * @stop_bsgs: Stop BSG request flag
+ * @logdata_buf: Circular buffer to store log data entries
+ * @logdata_buf_idx: Index of entry in buffer to store
+ * @logdata_entry_sz: log data entry size
+ * @pend_large_data_sz: Counter to track pending large data
+ * @io_throttle_data_length: I/O size to track in 512b blocks
+ * @io_throttle_high: I/O size to start throttle in 512b blocks
+ * @io_throttle_low: I/O size to stop throttle in 512b blocks
+ * @num_io_throttle_group: Maximum number of throttle groups
+ * @throttle_groups: Pointer to throttle group info structures
+ * @cfg_page: Default memory for configuration pages
+ * @cfg_page_dma: Configuration page DMA address
+ * @cfg_page_sz: Default configuration page memory size
+ * @sas_transport_enabled: SAS transport enabled or not
+ * @scsi_device_channel: Channel ID for SCSI devices
+ * @transport_cmds: Command tracker for SAS transport commands
+ * @sas_hba: SAS node for the controller
+ * @sas_expander_list: SAS node list of expanders
+ * @sas_node_lock: Lock to protect SAS node list
+ * @hba_port_table_list: List of HBA Ports
+ * @enclosure_list: List of Enclosure objects
+ */
+struct mpi3mr_ioc {
+ struct list_head list;
+ struct pci_dev *pdev;
+ struct Scsi_Host *shost;
+ u8 id;
+ int cpu_count;
+ bool enable_segqueue;
+ u32 irqpoll_sleep;
+
+ char name[MPI3MR_NAME_LENGTH];
+ char driver_name[MPI3MR_NAME_LENGTH];
+
+ volatile struct mpi3_sysif_registers __iomem *sysif_regs;
+ resource_size_t sysif_regs_phys;
+ int bars;
+ u64 dma_mask;
+
+ u16 msix_count;
+ u8 intr_enabled;
+
+ u16 num_admin_req;
+ u32 admin_req_q_sz;
+ u16 admin_req_pi;
+ u16 admin_req_ci;
+ void *admin_req_base;
+ dma_addr_t admin_req_dma;
+ spinlock_t admin_req_lock;
+
+ u16 num_admin_replies;
+ u32 admin_reply_q_sz;
+ u16 admin_reply_ci;
+ u8 admin_reply_ephase;
+ void *admin_reply_base;
+ dma_addr_t admin_reply_dma;
+ atomic_t admin_reply_q_in_use;
+
+ u32 ready_timeout;
+
+ struct mpi3mr_intr_info *intr_info;
+ u16 intr_info_count;
+ bool is_intr_info_set;
+
+ u16 num_queues;
+ u16 num_op_req_q;
+ struct op_req_qinfo *req_qinfo;
+
+ u16 num_op_reply_q;
+ struct op_reply_qinfo *op_reply_qinfo;
+
+ struct mpi3mr_drv_cmd init_cmds;
+ struct mpi3mr_drv_cmd cfg_cmds;
+ struct mpi3mr_ioc_facts facts;
+ u16 op_reply_desc_sz;
+
+ u32 num_reply_bufs;
+ struct dma_pool *reply_buf_pool;
+ u8 *reply_buf;
+ dma_addr_t reply_buf_dma;
+ dma_addr_t reply_buf_dma_max_address;
+
+ u16 reply_free_qsz;
+ u16 reply_sz;
+ struct dma_pool *reply_free_q_pool;
+ __le64 *reply_free_q;
+ dma_addr_t reply_free_q_dma;
+ spinlock_t reply_free_queue_lock;
+ u32 reply_free_queue_host_index;
+
+ u32 num_sense_bufs;
+ struct dma_pool *sense_buf_pool;
+ u8 *sense_buf;
+ dma_addr_t sense_buf_dma;
+
+ u16 sense_buf_q_sz;
+ struct dma_pool *sense_buf_q_pool;
+ __le64 *sense_buf_q;
+ dma_addr_t sense_buf_q_dma;
+ spinlock_t sbq_lock;
+ u32 sbq_host_index;
+ u32 event_masks[MPI3_EVENT_NOTIFY_EVENTMASK_WORDS];
+
+ char fwevt_worker_name[MPI3MR_NAME_LENGTH];
+ struct workqueue_struct *fwevt_worker_thread;
+ spinlock_t fwevt_lock;
+ struct list_head fwevt_list;
+
+ char watchdog_work_q_name[20];
+ struct workqueue_struct *watchdog_work_q;
+ struct delayed_work watchdog_work;
+ spinlock_t watchdog_lock;
+
+ u8 is_driver_loading;
+ u8 scan_started;
+ u16 scan_failed;
+ u8 stop_drv_processing;
+ u8 device_refresh_on;
+
+ u16 max_host_ios;
+ spinlock_t tgtdev_lock;
+ struct list_head tgtdev_list;
+
+ u32 chain_buf_count;
+ struct dma_pool *chain_buf_pool;
+ struct chain_element *chain_sgl_list;
+ void *chain_bitmap;
+ spinlock_t chain_buf_lock;
+
+ struct mpi3mr_drv_cmd bsg_cmds;
+ struct mpi3mr_drv_cmd host_tm_cmds;
+ struct mpi3mr_drv_cmd dev_rmhs_cmds[MPI3MR_NUM_DEVRMCMD];
+ struct mpi3mr_drv_cmd evtack_cmds[MPI3MR_NUM_EVTACKCMD];
+ void *devrem_bitmap;
+ u16 dev_handle_bitmap_bits;
+ void *removepend_bitmap;
+ struct list_head delayed_rmhs_list;
+ void *evtack_cmds_bitmap;
+ struct list_head delayed_evtack_cmds_list;
+
+ u32 ts_update_counter;
+ u8 reset_in_progress;
+ u8 unrecoverable;
+ int prev_reset_result;
+ struct mutex reset_mutex;
+ wait_queue_head_t reset_waitq;
+
+ u8 prepare_for_reset;
+ u16 prepare_for_reset_timeout_counter;
+
+ void *prp_list_virt;
+ dma_addr_t prp_list_dma;
+ u32 prp_sz;
+
+ u16 diagsave_timeout;
+ int logging_level;
+ u16 flush_io_count;
+
+ struct mpi3mr_fwevt *current_event;
+ struct mpi3_driver_info_layout driver_info;
+ u16 change_count;
+
+ u8 pel_enabled;
+ u8 pel_abort_requested;
+ u8 pel_class;
+ u16 pel_locale;
+ struct mpi3mr_drv_cmd pel_cmds;
+ struct mpi3mr_drv_cmd pel_abort_cmd;
+
+ u32 pel_newest_seqnum;
+ void *pel_seqnum_virt;
+ dma_addr_t pel_seqnum_dma;
+ u32 pel_seqnum_sz;
+
+ u16 op_reply_q_offset;
+ u16 default_qcount;
+ u16 active_poll_qcount;
+ u16 requested_poll_qcount;
+
+ struct device bsg_dev;
+ struct request_queue *bsg_queue;
+ u8 stop_bsgs;
+ u8 *logdata_buf;
+ u16 logdata_buf_idx;
+ u16 logdata_entry_sz;
+
+ atomic_t pend_large_data_sz;
+ u32 io_throttle_data_length;
+ u32 io_throttle_high;
+ u32 io_throttle_low;
+ u16 num_io_throttle_group;
+ struct mpi3mr_throttle_group_info *throttle_groups;
+
+ void *cfg_page;
+ dma_addr_t cfg_page_dma;
+ u16 cfg_page_sz;
+
+ u8 sas_transport_enabled;
+ u8 scsi_device_channel;
+ struct mpi3mr_drv_cmd transport_cmds;
+ struct mpi3mr_sas_node sas_hba;
+ struct list_head sas_expander_list;
+ spinlock_t sas_node_lock;
+ struct list_head hba_port_table_list;
+ struct list_head enclosure_list;
+};
+
+/**
+ * struct mpi3mr_fwevt - Firmware event structure.
+ *
+ * @list: list head
+ * @work: Work structure
+ * @mrioc: Adapter instance reference
+ * @event_id: MPI3 firmware event ID
+ * @send_ack: Event acknowledgment required or not
+ * @process_evt: Bottomhalf processing required or not
+ * @evt_ctx: Event context to send in Ack
+ * @event_data_size: size of the event data in bytes
+ * @pending_at_sml: waiting for device add/remove API to complete
+ * @discard: discard this event
+ * @ref_count: kref count
+ * @event_data: Actual MPI3 event data
+ */
+struct mpi3mr_fwevt {
+ struct list_head list;
+ struct work_struct work;
+ struct mpi3mr_ioc *mrioc;
+ u16 event_id;
+ bool send_ack;
+ bool process_evt;
+ u32 evt_ctx;
+ u16 event_data_size;
+ bool pending_at_sml;
+ bool discard;
+ struct kref ref_count;
+ char event_data[] __aligned(4);
+};
+
+
+/**
+ * struct delayed_dev_rmhs_node - Delayed device removal node
+ *
+ * @list: list head
+ * @handle: Device handle
+ * @iou_rc: IO Unit Control Reason Code
+ */
+struct delayed_dev_rmhs_node {
+ struct list_head list;
+ u16 handle;
+ u8 iou_rc;
+};
+
+/**
+ * struct delayed_evt_ack_node - Delayed event ack node
+ * @list: list head
+ * @event: MPI3 event ID
+ * @event_ctx: event context
+ */
+struct delayed_evt_ack_node {
+ struct list_head list;
+ u8 event;
+ u32 event_ctx;
+};
+
+int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc);
+void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc);
+int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc);
+int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume);
+void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc);
+int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async);
+int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req,
+u16 admin_req_sz, u8 ignore_reset);
+int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
+ struct op_req_qinfo *opreqq, u8 *req);
+void mpi3mr_add_sg_single(void *paddr, u8 flags, u32 length,
+ dma_addr_t dma_addr);
+void mpi3mr_build_zero_len_sge(void *paddr);
+void *mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc *mrioc,
+ dma_addr_t phys_addr);
+void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc,
+ dma_addr_t phys_addr);
+void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc,
+ u64 sense_buf_dma);
+
+void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc);
+void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc);
+void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply);
+void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
+ struct mpi3_default_reply_descriptor *reply_desc,
+ u64 *reply_dma, u16 qidx);
+void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc);
+void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc);
+
+int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
+ u32 reset_reason, u8 snapdump);
+void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc);
+void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc);
+
+enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc);
+int mpi3mr_process_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
+ u32 event_ctx);
+
+void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout);
+void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc);
+void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc);
+void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc);
+void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc);
+void mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc *mrioc);
+void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code);
+void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc);
+void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code);
+int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
+ struct op_reply_qinfo *op_reply_q);
+int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num);
+void mpi3mr_bsg_init(struct mpi3mr_ioc *mrioc);
+void mpi3mr_bsg_exit(struct mpi3mr_ioc *mrioc);
+int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
+ u16 handle, uint lun, u16 htag, ulong timeout,
+ struct mpi3mr_drv_cmd *drv_cmd,
+ u8 *resp_code, struct scsi_cmnd *scmd);
+struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle(
+ struct mpi3mr_ioc *mrioc, u16 handle);
+void mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd);
+int mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd);
+void mpi3mr_app_save_logdata(struct mpi3mr_ioc *mrioc, char *event_data,
+ u16 event_data_size);
+struct mpi3mr_enclosure_node *mpi3mr_enclosure_find_by_handle(
+ struct mpi3mr_ioc *mrioc, u16 handle);
+extern const struct attribute_group *mpi3mr_host_groups[];
+extern const struct attribute_group *mpi3mr_dev_groups[];
+
+extern struct sas_function_template mpi3mr_transport_functions;
+extern struct scsi_transport_template *mpi3mr_transport_template;
+
+int mpi3mr_cfg_get_dev_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_device_page0 *dev_pg0, u16 pg_sz, u32 form, u32 form_spec);
+int mpi3mr_cfg_get_sas_phy_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_phy_page0 *phy_pg0, u16 pg_sz, u32 form,
+ u32 form_spec);
+int mpi3mr_cfg_get_sas_phy_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_phy_page1 *phy_pg1, u16 pg_sz, u32 form,
+ u32 form_spec);
+int mpi3mr_cfg_get_sas_exp_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_expander_page0 *exp_pg0, u16 pg_sz, u32 form,
+ u32 form_spec);
+int mpi3mr_cfg_get_sas_exp_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_expander_page1 *exp_pg1, u16 pg_sz, u32 form,
+ u32 form_spec);
+int mpi3mr_cfg_get_enclosure_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_enclosure_page0 *encl_pg0, u16 pg_sz, u32 form,
+ u32 form_spec);
+int mpi3mr_cfg_get_sas_io_unit_pg0(struct mpi3mr_ioc *mrioc,
+ struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0, u16 pg_sz);
+int mpi3mr_cfg_get_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
+ struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz);
+int mpi3mr_cfg_set_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
+ struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz);
+int mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc *mrioc,
+ struct mpi3_driver_page1 *driver_pg1, u16 pg_sz);
+
+u8 mpi3mr_is_expander_device(u16 device_info);
+int mpi3mr_expander_add(struct mpi3mr_ioc *mrioc, u16 handle);
+void mpi3mr_expander_remove(struct mpi3mr_ioc *mrioc, u64 sas_address,
+ struct mpi3mr_hba_port *hba_port);
+struct mpi3mr_sas_node *__mpi3mr_expander_find_by_handle(struct mpi3mr_ioc
+ *mrioc, u16 handle);
+struct mpi3mr_hba_port *mpi3mr_get_hba_port_by_id(struct mpi3mr_ioc *mrioc,
+ u8 port_id);
+void mpi3mr_sas_host_refresh(struct mpi3mr_ioc *mrioc);
+void mpi3mr_sas_host_add(struct mpi3mr_ioc *mrioc);
+void mpi3mr_update_links(struct mpi3mr_ioc *mrioc,
+ u64 sas_address_parent, u16 handle, u8 phy_number, u8 link_rate,
+ struct mpi3mr_hba_port *hba_port);
+void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev);
+int mpi3mr_report_tgtdev_to_sas_transport(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev);
+void mpi3mr_remove_tgtdev_from_sas_transport(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev);
+struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_addr_and_rphy(
+ struct mpi3mr_ioc *mrioc, u64 sas_address, struct sas_rphy *rphy);
+void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc,
+ bool device_add);
+void mpi3mr_refresh_sas_ports(struct mpi3mr_ioc *mrioc);
+void mpi3mr_refresh_expanders(struct mpi3mr_ioc *mrioc);
+void mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc *mrioc);
+void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc);
+void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc);
+void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc);
+int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc);
+void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_node *sas_expander);
+#endif /*MPI3MR_H_INCLUDED*/
diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
new file mode 100644
index 000000000..8c662d087
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi3mr_app.c
@@ -0,0 +1,1868 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Broadcom MPI3 Storage Controllers
+ *
+ * Copyright (C) 2017-2022 Broadcom Inc.
+ * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
+ *
+ */
+
+#include "mpi3mr.h"
+#include <linux/bsg-lib.h>
+#include <uapi/scsi/scsi_bsg_mpi3mr.h>
+
+/**
+ * mpi3mr_bsg_pel_abort - sends PEL abort request
+ * @mrioc: Adapter instance reference
+ *
+ * This function sends PEL abort request to the firmware through
+ * admin request queue.
+ *
+ * Return: 0 on success, -1 on failure
+ */
+static int mpi3mr_bsg_pel_abort(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3_pel_req_action_abort pel_abort_req;
+ struct mpi3_pel_reply *pel_reply;
+ int retval = 0;
+ u16 pe_log_status;
+
+ if (mrioc->reset_in_progress) {
+ dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__);
+ return -1;
+ }
+ if (mrioc->stop_bsgs) {
+ dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__);
+ return -1;
+ }
+
+ memset(&pel_abort_req, 0, sizeof(pel_abort_req));
+ mutex_lock(&mrioc->pel_abort_cmd.mutex);
+ if (mrioc->pel_abort_cmd.state & MPI3MR_CMD_PENDING) {
+ dprint_bsg_err(mrioc, "%s: command is in use\n", __func__);
+ mutex_unlock(&mrioc->pel_abort_cmd.mutex);
+ return -1;
+ }
+ mrioc->pel_abort_cmd.state = MPI3MR_CMD_PENDING;
+ mrioc->pel_abort_cmd.is_waiting = 1;
+ mrioc->pel_abort_cmd.callback = NULL;
+ pel_abort_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_ABORT);
+ pel_abort_req.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
+ pel_abort_req.action = MPI3_PEL_ACTION_ABORT;
+ pel_abort_req.abort_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
+
+ mrioc->pel_abort_requested = 1;
+ init_completion(&mrioc->pel_abort_cmd.done);
+ retval = mpi3mr_admin_request_post(mrioc, &pel_abort_req,
+ sizeof(pel_abort_req), 0);
+ if (retval) {
+ retval = -1;
+ dprint_bsg_err(mrioc, "%s: admin request post failed\n",
+ __func__);
+ mrioc->pel_abort_requested = 0;
+ goto out_unlock;
+ }
+
+ wait_for_completion_timeout(&mrioc->pel_abort_cmd.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->pel_abort_cmd.state & MPI3MR_CMD_COMPLETE)) {
+ mrioc->pel_abort_cmd.is_waiting = 0;
+ dprint_bsg_err(mrioc, "%s: command timedout\n", __func__);
+ if (!(mrioc->pel_abort_cmd.state & MPI3MR_CMD_RESET))
+ mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_PELABORT_TIMEOUT, 1);
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->pel_abort_cmd.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ != MPI3_IOCSTATUS_SUCCESS) {
+ dprint_bsg_err(mrioc,
+ "%s: command failed, ioc_status(0x%04x) log_info(0x%08x)\n",
+ __func__, (mrioc->pel_abort_cmd.ioc_status &
+ MPI3_IOCSTATUS_STATUS_MASK),
+ mrioc->pel_abort_cmd.ioc_loginfo);
+ retval = -1;
+ goto out_unlock;
+ }
+ if (mrioc->pel_abort_cmd.state & MPI3MR_CMD_REPLY_VALID) {
+ pel_reply = (struct mpi3_pel_reply *)mrioc->pel_abort_cmd.reply;
+ pe_log_status = le16_to_cpu(pel_reply->pe_log_status);
+ if (pe_log_status != MPI3_PEL_STATUS_SUCCESS) {
+ dprint_bsg_err(mrioc,
+ "%s: command failed, pel_status(0x%04x)\n",
+ __func__, pe_log_status);
+ retval = -1;
+ }
+ }
+
+out_unlock:
+ mrioc->pel_abort_cmd.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->pel_abort_cmd.mutex);
+ return retval;
+}
+/**
+ * mpi3mr_bsg_verify_adapter - verify adapter number is valid
+ * @ioc_number: Adapter number
+ *
+ * This function returns the adapter instance pointer of given
+ * adapter number. If adapter number does not match with the
+ * driver's adapter list, driver returns NULL.
+ *
+ * Return: adapter instance reference
+ */
+static struct mpi3mr_ioc *mpi3mr_bsg_verify_adapter(int ioc_number)
+{
+ struct mpi3mr_ioc *mrioc = NULL;
+
+ spin_lock(&mrioc_list_lock);
+ list_for_each_entry(mrioc, &mrioc_list, list) {
+ if (mrioc->id == ioc_number) {
+ spin_unlock(&mrioc_list_lock);
+ return mrioc;
+ }
+ }
+ spin_unlock(&mrioc_list_lock);
+ return NULL;
+}
+
+/**
+ * mpi3mr_enable_logdata - Handler for log data enable
+ * @mrioc: Adapter instance reference
+ * @job: BSG job reference
+ *
+ * This function enables log data caching in the driver if not
+ * already enabled and return the maximum number of log data
+ * entries that can be cached in the driver.
+ *
+ * Return: 0 on success and proper error codes on failure
+ */
+static long mpi3mr_enable_logdata(struct mpi3mr_ioc *mrioc,
+ struct bsg_job *job)
+{
+ struct mpi3mr_logdata_enable logdata_enable;
+
+ if (!mrioc->logdata_buf) {
+ mrioc->logdata_entry_sz =
+ (mrioc->reply_sz - (sizeof(struct mpi3_event_notification_reply) - 4))
+ + MPI3MR_BSG_LOGDATA_ENTRY_HEADER_SZ;
+ mrioc->logdata_buf_idx = 0;
+ mrioc->logdata_buf = kcalloc(MPI3MR_BSG_LOGDATA_MAX_ENTRIES,
+ mrioc->logdata_entry_sz, GFP_KERNEL);
+
+ if (!mrioc->logdata_buf)
+ return -ENOMEM;
+ }
+
+ memset(&logdata_enable, 0, sizeof(logdata_enable));
+ logdata_enable.max_entries =
+ MPI3MR_BSG_LOGDATA_MAX_ENTRIES;
+ if (job->request_payload.payload_len >= sizeof(logdata_enable)) {
+ sg_copy_from_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ &logdata_enable, sizeof(logdata_enable));
+ return 0;
+ }
+
+ return -EINVAL;
+}
+/**
+ * mpi3mr_get_logdata - Handler for get log data
+ * @mrioc: Adapter instance reference
+ * @job: BSG job pointer
+ * This function copies the log data entries to the user buffer
+ * when log caching is enabled in the driver.
+ *
+ * Return: 0 on success and proper error codes on failure
+ */
+static long mpi3mr_get_logdata(struct mpi3mr_ioc *mrioc,
+ struct bsg_job *job)
+{
+ u16 num_entries, sz, entry_sz = mrioc->logdata_entry_sz;
+
+ if ((!mrioc->logdata_buf) || (job->request_payload.payload_len < entry_sz))
+ return -EINVAL;
+
+ num_entries = job->request_payload.payload_len / entry_sz;
+ if (num_entries > MPI3MR_BSG_LOGDATA_MAX_ENTRIES)
+ num_entries = MPI3MR_BSG_LOGDATA_MAX_ENTRIES;
+ sz = num_entries * entry_sz;
+
+ if (job->request_payload.payload_len >= sz) {
+ sg_copy_from_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ mrioc->logdata_buf, sz);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+/**
+ * mpi3mr_bsg_pel_enable - Handler for PEL enable driver
+ * @mrioc: Adapter instance reference
+ * @job: BSG job pointer
+ *
+ * This function is the handler for PEL enable driver.
+ * Validates the application given class and locale and if
+ * requires aborts the existing PEL wait request and/or issues
+ * new PEL wait request to the firmware and returns.
+ *
+ * Return: 0 on success and proper error codes on failure.
+ */
+static long mpi3mr_bsg_pel_enable(struct mpi3mr_ioc *mrioc,
+ struct bsg_job *job)
+{
+ long rval = -EINVAL;
+ struct mpi3mr_bsg_out_pel_enable pel_enable;
+ u8 issue_pel_wait;
+ u8 tmp_class;
+ u16 tmp_locale;
+
+ if (job->request_payload.payload_len != sizeof(pel_enable)) {
+ dprint_bsg_err(mrioc, "%s: invalid size argument\n",
+ __func__);
+ return rval;
+ }
+
+ if (mrioc->unrecoverable) {
+ dprint_bsg_err(mrioc, "%s: unrecoverable controller\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ if (mrioc->reset_in_progress) {
+ dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__);
+ return -EAGAIN;
+ }
+
+ if (mrioc->stop_bsgs) {
+ dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__);
+ return -EAGAIN;
+ }
+
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ &pel_enable, sizeof(pel_enable));
+
+ if (pel_enable.pel_class > MPI3_PEL_CLASS_FAULT) {
+ dprint_bsg_err(mrioc, "%s: out of range class %d sent\n",
+ __func__, pel_enable.pel_class);
+ rval = 0;
+ goto out;
+ }
+ if (!mrioc->pel_enabled)
+ issue_pel_wait = 1;
+ else {
+ if ((mrioc->pel_class <= pel_enable.pel_class) &&
+ !((mrioc->pel_locale & pel_enable.pel_locale) ^
+ pel_enable.pel_locale)) {
+ issue_pel_wait = 0;
+ rval = 0;
+ } else {
+ pel_enable.pel_locale |= mrioc->pel_locale;
+
+ if (mrioc->pel_class < pel_enable.pel_class)
+ pel_enable.pel_class = mrioc->pel_class;
+
+ rval = mpi3mr_bsg_pel_abort(mrioc);
+ if (rval) {
+ dprint_bsg_err(mrioc,
+ "%s: pel_abort failed, status(%ld)\n",
+ __func__, rval);
+ goto out;
+ }
+ issue_pel_wait = 1;
+ }
+ }
+ if (issue_pel_wait) {
+ tmp_class = mrioc->pel_class;
+ tmp_locale = mrioc->pel_locale;
+ mrioc->pel_class = pel_enable.pel_class;
+ mrioc->pel_locale = pel_enable.pel_locale;
+ mrioc->pel_enabled = 1;
+ rval = mpi3mr_pel_get_seqnum_post(mrioc, NULL);
+ if (rval) {
+ mrioc->pel_class = tmp_class;
+ mrioc->pel_locale = tmp_locale;
+ mrioc->pel_enabled = 0;
+ dprint_bsg_err(mrioc,
+ "%s: pel get sequence number failed, status(%ld)\n",
+ __func__, rval);
+ }
+ }
+
+out:
+ return rval;
+}
+/**
+ * mpi3mr_get_all_tgt_info - Get all target information
+ * @mrioc: Adapter instance reference
+ * @job: BSG job reference
+ *
+ * This function copies the driver managed target devices device
+ * handle, persistent ID, bus ID and taret ID to the user
+ * provided buffer for the specific controller. This function
+ * also provides the number of devices managed by the driver for
+ * the specific controller.
+ *
+ * Return: 0 on success and proper error codes on failure
+ */
+static long mpi3mr_get_all_tgt_info(struct mpi3mr_ioc *mrioc,
+ struct bsg_job *job)
+{
+ u16 num_devices = 0, i = 0, size;
+ unsigned long flags;
+ struct mpi3mr_tgt_dev *tgtdev;
+ struct mpi3mr_device_map_info *devmap_info = NULL;
+ struct mpi3mr_all_tgt_info *alltgt_info = NULL;
+ uint32_t min_entrylen = 0, kern_entrylen = 0, usr_entrylen = 0;
+
+ if (job->request_payload.payload_len < sizeof(u32)) {
+ dprint_bsg_err(mrioc, "%s: invalid size argument\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
+ num_devices++;
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
+ if ((job->request_payload.payload_len <= sizeof(u64)) ||
+ list_empty(&mrioc->tgtdev_list)) {
+ sg_copy_from_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ &num_devices, sizeof(num_devices));
+ return 0;
+ }
+
+ kern_entrylen = num_devices * sizeof(*devmap_info);
+ size = sizeof(u64) + kern_entrylen;
+ alltgt_info = kzalloc(size, GFP_KERNEL);
+ if (!alltgt_info)
+ return -ENOMEM;
+
+ devmap_info = alltgt_info->dmi;
+ memset((u8 *)devmap_info, 0xFF, kern_entrylen);
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
+ if (i < num_devices) {
+ devmap_info[i].handle = tgtdev->dev_handle;
+ devmap_info[i].perst_id = tgtdev->perst_id;
+ if (tgtdev->host_exposed && tgtdev->starget) {
+ devmap_info[i].target_id = tgtdev->starget->id;
+ devmap_info[i].bus_id =
+ tgtdev->starget->channel;
+ }
+ i++;
+ }
+ }
+ num_devices = i;
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
+ alltgt_info->num_devices = num_devices;
+
+ usr_entrylen = (job->request_payload.payload_len - sizeof(u64)) /
+ sizeof(*devmap_info);
+ usr_entrylen *= sizeof(*devmap_info);
+ min_entrylen = min(usr_entrylen, kern_entrylen);
+
+ sg_copy_from_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ alltgt_info, (min_entrylen + sizeof(u64)));
+ kfree(alltgt_info);
+ return 0;
+}
+/**
+ * mpi3mr_get_change_count - Get topology change count
+ * @mrioc: Adapter instance reference
+ * @job: BSG job reference
+ *
+ * This function copies the toplogy change count provided by the
+ * driver in events and cached in the driver to the user
+ * provided buffer for the specific controller.
+ *
+ * Return: 0 on success and proper error codes on failure
+ */
+static long mpi3mr_get_change_count(struct mpi3mr_ioc *mrioc,
+ struct bsg_job *job)
+{
+ struct mpi3mr_change_count chgcnt;
+
+ memset(&chgcnt, 0, sizeof(chgcnt));
+ chgcnt.change_count = mrioc->change_count;
+ if (job->request_payload.payload_len >= sizeof(chgcnt)) {
+ sg_copy_from_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ &chgcnt, sizeof(chgcnt));
+ return 0;
+ }
+ return -EINVAL;
+}
+
+/**
+ * mpi3mr_bsg_adp_reset - Issue controller reset
+ * @mrioc: Adapter instance reference
+ * @job: BSG job reference
+ *
+ * This function identifies the user provided reset type and
+ * issues approporiate reset to the controller and wait for that
+ * to complete and reinitialize the controller and then returns
+ *
+ * Return: 0 on success and proper error codes on failure
+ */
+static long mpi3mr_bsg_adp_reset(struct mpi3mr_ioc *mrioc,
+ struct bsg_job *job)
+{
+ long rval = -EINVAL;
+ u8 save_snapdump;
+ struct mpi3mr_bsg_adp_reset adpreset;
+
+ if (job->request_payload.payload_len !=
+ sizeof(adpreset)) {
+ dprint_bsg_err(mrioc, "%s: invalid size argument\n",
+ __func__);
+ goto out;
+ }
+
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ &adpreset, sizeof(adpreset));
+
+ switch (adpreset.reset_type) {
+ case MPI3MR_BSG_ADPRESET_SOFT:
+ save_snapdump = 0;
+ break;
+ case MPI3MR_BSG_ADPRESET_DIAG_FAULT:
+ save_snapdump = 1;
+ break;
+ default:
+ dprint_bsg_err(mrioc, "%s: unknown reset_type(%d)\n",
+ __func__, adpreset.reset_type);
+ goto out;
+ }
+
+ rval = mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_APP,
+ save_snapdump);
+
+ if (rval)
+ dprint_bsg_err(mrioc,
+ "%s: reset handler returned error(%ld) for reset type %d\n",
+ __func__, rval, adpreset.reset_type);
+out:
+ return rval;
+}
+
+/**
+ * mpi3mr_bsg_populate_adpinfo - Get adapter info command handler
+ * @mrioc: Adapter instance reference
+ * @job: BSG job reference
+ *
+ * This function provides adapter information for the given
+ * controller
+ *
+ * Return: 0 on success and proper error codes on failure
+ */
+static long mpi3mr_bsg_populate_adpinfo(struct mpi3mr_ioc *mrioc,
+ struct bsg_job *job)
+{
+ enum mpi3mr_iocstate ioc_state;
+ struct mpi3mr_bsg_in_adpinfo adpinfo;
+
+ memset(&adpinfo, 0, sizeof(adpinfo));
+ adpinfo.adp_type = MPI3MR_BSG_ADPTYPE_AVGFAMILY;
+ adpinfo.pci_dev_id = mrioc->pdev->device;
+ adpinfo.pci_dev_hw_rev = mrioc->pdev->revision;
+ adpinfo.pci_subsys_dev_id = mrioc->pdev->subsystem_device;
+ adpinfo.pci_subsys_ven_id = mrioc->pdev->subsystem_vendor;
+ adpinfo.pci_bus = mrioc->pdev->bus->number;
+ adpinfo.pci_dev = PCI_SLOT(mrioc->pdev->devfn);
+ adpinfo.pci_func = PCI_FUNC(mrioc->pdev->devfn);
+ adpinfo.pci_seg_id = pci_domain_nr(mrioc->pdev->bus);
+ adpinfo.app_intfc_ver = MPI3MR_IOCTL_VERSION;
+
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ if (ioc_state == MRIOC_STATE_UNRECOVERABLE)
+ adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_UNRECOVERABLE;
+ else if ((mrioc->reset_in_progress) || (mrioc->stop_bsgs))
+ adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_IN_RESET;
+ else if (ioc_state == MRIOC_STATE_FAULT)
+ adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_FAULT;
+ else
+ adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_OPERATIONAL;
+
+ memcpy((u8 *)&adpinfo.driver_info, (u8 *)&mrioc->driver_info,
+ sizeof(adpinfo.driver_info));
+
+ if (job->request_payload.payload_len >= sizeof(adpinfo)) {
+ sg_copy_from_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ &adpinfo, sizeof(adpinfo));
+ return 0;
+ }
+ return -EINVAL;
+}
+
+/**
+ * mpi3mr_bsg_process_drv_cmds - Driver Command handler
+ * @job: BSG job reference
+ *
+ * This function is the top level handler for driver commands,
+ * this does basic validation of the buffer and identifies the
+ * opcode and switches to correct sub handler.
+ *
+ * Return: 0 on success and proper error codes on failure
+ */
+static long mpi3mr_bsg_process_drv_cmds(struct bsg_job *job)
+{
+ long rval = -EINVAL;
+ struct mpi3mr_ioc *mrioc = NULL;
+ struct mpi3mr_bsg_packet *bsg_req = NULL;
+ struct mpi3mr_bsg_drv_cmd *drvrcmd = NULL;
+
+ bsg_req = job->request;
+ drvrcmd = &bsg_req->cmd.drvrcmd;
+
+ mrioc = mpi3mr_bsg_verify_adapter(drvrcmd->mrioc_id);
+ if (!mrioc)
+ return -ENODEV;
+
+ if (drvrcmd->opcode == MPI3MR_DRVBSG_OPCODE_ADPINFO) {
+ rval = mpi3mr_bsg_populate_adpinfo(mrioc, job);
+ return rval;
+ }
+
+ if (mutex_lock_interruptible(&mrioc->bsg_cmds.mutex))
+ return -ERESTARTSYS;
+
+ switch (drvrcmd->opcode) {
+ case MPI3MR_DRVBSG_OPCODE_ADPRESET:
+ rval = mpi3mr_bsg_adp_reset(mrioc, job);
+ break;
+ case MPI3MR_DRVBSG_OPCODE_ALLTGTDEVINFO:
+ rval = mpi3mr_get_all_tgt_info(mrioc, job);
+ break;
+ case MPI3MR_DRVBSG_OPCODE_GETCHGCNT:
+ rval = mpi3mr_get_change_count(mrioc, job);
+ break;
+ case MPI3MR_DRVBSG_OPCODE_LOGDATAENABLE:
+ rval = mpi3mr_enable_logdata(mrioc, job);
+ break;
+ case MPI3MR_DRVBSG_OPCODE_GETLOGDATA:
+ rval = mpi3mr_get_logdata(mrioc, job);
+ break;
+ case MPI3MR_DRVBSG_OPCODE_PELENABLE:
+ rval = mpi3mr_bsg_pel_enable(mrioc, job);
+ break;
+ case MPI3MR_DRVBSG_OPCODE_UNKNOWN:
+ default:
+ pr_err("%s: unsupported driver command opcode %d\n",
+ MPI3MR_DRIVER_NAME, drvrcmd->opcode);
+ break;
+ }
+ mutex_unlock(&mrioc->bsg_cmds.mutex);
+ return rval;
+}
+
+/**
+ * mpi3mr_bsg_build_sgl - SGL construction for MPI commands
+ * @mpi_req: MPI request
+ * @sgl_offset: offset to start sgl in the MPI request
+ * @drv_bufs: DMA address of the buffers to be placed in sgl
+ * @bufcnt: Number of DMA buffers
+ * @is_rmc: Does the buffer list has management command buffer
+ * @is_rmr: Does the buffer list has management response buffer
+ * @num_datasges: Number of data buffers in the list
+ *
+ * This function places the DMA address of the given buffers in
+ * proper format as SGEs in the given MPI request.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_bsg_build_sgl(u8 *mpi_req, uint32_t sgl_offset,
+ struct mpi3mr_buf_map *drv_bufs, u8 bufcnt, u8 is_rmc,
+ u8 is_rmr, u8 num_datasges)
+{
+ u8 *sgl = (mpi_req + sgl_offset), count = 0;
+ struct mpi3_mgmt_passthrough_request *rmgmt_req =
+ (struct mpi3_mgmt_passthrough_request *)mpi_req;
+ struct mpi3mr_buf_map *drv_buf_iter = drv_bufs;
+ u8 sgl_flags, sgl_flags_last;
+
+ sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
+ MPI3_SGE_FLAGS_DLAS_SYSTEM | MPI3_SGE_FLAGS_END_OF_BUFFER;
+ sgl_flags_last = sgl_flags | MPI3_SGE_FLAGS_END_OF_LIST;
+
+ if (is_rmc) {
+ mpi3mr_add_sg_single(&rmgmt_req->command_sgl,
+ sgl_flags_last, drv_buf_iter->kern_buf_len,
+ drv_buf_iter->kern_buf_dma);
+ sgl = (u8 *)drv_buf_iter->kern_buf + drv_buf_iter->bsg_buf_len;
+ drv_buf_iter++;
+ count++;
+ if (is_rmr) {
+ mpi3mr_add_sg_single(&rmgmt_req->response_sgl,
+ sgl_flags_last, drv_buf_iter->kern_buf_len,
+ drv_buf_iter->kern_buf_dma);
+ drv_buf_iter++;
+ count++;
+ } else
+ mpi3mr_build_zero_len_sge(
+ &rmgmt_req->response_sgl);
+ }
+ if (!num_datasges) {
+ mpi3mr_build_zero_len_sge(sgl);
+ return;
+ }
+ for (; count < bufcnt; count++, drv_buf_iter++) {
+ if (drv_buf_iter->data_dir == DMA_NONE)
+ continue;
+ if (num_datasges == 1 || !is_rmc)
+ mpi3mr_add_sg_single(sgl, sgl_flags_last,
+ drv_buf_iter->kern_buf_len, drv_buf_iter->kern_buf_dma);
+ else
+ mpi3mr_add_sg_single(sgl, sgl_flags,
+ drv_buf_iter->kern_buf_len, drv_buf_iter->kern_buf_dma);
+ sgl += sizeof(struct mpi3_sge_common);
+ num_datasges--;
+ }
+}
+
+/**
+ * mpi3mr_get_nvme_data_fmt - returns the NVMe data format
+ * @nvme_encap_request: NVMe encapsulated MPI request
+ *
+ * This function returns the type of the data format specified
+ * in user provided NVMe command in NVMe encapsulated request.
+ *
+ * Return: Data format of the NVMe command (PRP/SGL etc)
+ */
+static unsigned int mpi3mr_get_nvme_data_fmt(
+ struct mpi3_nvme_encapsulated_request *nvme_encap_request)
+{
+ u8 format = 0;
+
+ format = ((nvme_encap_request->command[0] & 0xc000) >> 14);
+ return format;
+
+}
+
+/**
+ * mpi3mr_build_nvme_sgl - SGL constructor for NVME
+ * encapsulated request
+ * @mrioc: Adapter instance reference
+ * @nvme_encap_request: NVMe encapsulated MPI request
+ * @drv_bufs: DMA address of the buffers to be placed in sgl
+ * @bufcnt: Number of DMA buffers
+ *
+ * This function places the DMA address of the given buffers in
+ * proper format as SGEs in the given NVMe encapsulated request.
+ *
+ * Return: 0 on success, -1 on failure
+ */
+static int mpi3mr_build_nvme_sgl(struct mpi3mr_ioc *mrioc,
+ struct mpi3_nvme_encapsulated_request *nvme_encap_request,
+ struct mpi3mr_buf_map *drv_bufs, u8 bufcnt)
+{
+ struct mpi3mr_nvme_pt_sge *nvme_sgl;
+ u64 sgl_ptr;
+ u8 count;
+ size_t length = 0;
+ struct mpi3mr_buf_map *drv_buf_iter = drv_bufs;
+ u64 sgemod_mask = ((u64)((mrioc->facts.sge_mod_mask) <<
+ mrioc->facts.sge_mod_shift) << 32);
+ u64 sgemod_val = ((u64)(mrioc->facts.sge_mod_value) <<
+ mrioc->facts.sge_mod_shift) << 32;
+
+ /*
+ * Not all commands require a data transfer. If no data, just return
+ * without constructing any sgl.
+ */
+ for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
+ if (drv_buf_iter->data_dir == DMA_NONE)
+ continue;
+ sgl_ptr = (u64)drv_buf_iter->kern_buf_dma;
+ length = drv_buf_iter->kern_buf_len;
+ break;
+ }
+ if (!length)
+ return 0;
+
+ if (sgl_ptr & sgemod_mask) {
+ dprint_bsg_err(mrioc,
+ "%s: SGL address collides with SGE modifier\n",
+ __func__);
+ return -1;
+ }
+
+ sgl_ptr &= ~sgemod_mask;
+ sgl_ptr |= sgemod_val;
+ nvme_sgl = (struct mpi3mr_nvme_pt_sge *)
+ ((u8 *)(nvme_encap_request->command) + MPI3MR_NVME_CMD_SGL_OFFSET);
+ memset(nvme_sgl, 0, sizeof(struct mpi3mr_nvme_pt_sge));
+ nvme_sgl->base_addr = sgl_ptr;
+ nvme_sgl->length = length;
+ return 0;
+}
+
+/**
+ * mpi3mr_build_nvme_prp - PRP constructor for NVME
+ * encapsulated request
+ * @mrioc: Adapter instance reference
+ * @nvme_encap_request: NVMe encapsulated MPI request
+ * @drv_bufs: DMA address of the buffers to be placed in SGL
+ * @bufcnt: Number of DMA buffers
+ *
+ * This function places the DMA address of the given buffers in
+ * proper format as PRP entries in the given NVMe encapsulated
+ * request.
+ *
+ * Return: 0 on success, -1 on failure
+ */
+static int mpi3mr_build_nvme_prp(struct mpi3mr_ioc *mrioc,
+ struct mpi3_nvme_encapsulated_request *nvme_encap_request,
+ struct mpi3mr_buf_map *drv_bufs, u8 bufcnt)
+{
+ int prp_size = MPI3MR_NVME_PRP_SIZE;
+ __le64 *prp_entry, *prp1_entry, *prp2_entry;
+ __le64 *prp_page;
+ dma_addr_t prp_entry_dma, prp_page_dma, dma_addr;
+ u32 offset, entry_len, dev_pgsz;
+ u32 page_mask_result, page_mask;
+ size_t length = 0;
+ u8 count;
+ struct mpi3mr_buf_map *drv_buf_iter = drv_bufs;
+ u64 sgemod_mask = ((u64)((mrioc->facts.sge_mod_mask) <<
+ mrioc->facts.sge_mod_shift) << 32);
+ u64 sgemod_val = ((u64)(mrioc->facts.sge_mod_value) <<
+ mrioc->facts.sge_mod_shift) << 32;
+ u16 dev_handle = nvme_encap_request->dev_handle;
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
+ if (!tgtdev) {
+ dprint_bsg_err(mrioc, "%s: invalid device handle 0x%04x\n",
+ __func__, dev_handle);
+ return -1;
+ }
+
+ if (tgtdev->dev_spec.pcie_inf.pgsz == 0) {
+ dprint_bsg_err(mrioc,
+ "%s: NVMe device page size is zero for handle 0x%04x\n",
+ __func__, dev_handle);
+ mpi3mr_tgtdev_put(tgtdev);
+ return -1;
+ }
+
+ dev_pgsz = 1 << (tgtdev->dev_spec.pcie_inf.pgsz);
+ mpi3mr_tgtdev_put(tgtdev);
+
+ /*
+ * Not all commands require a data transfer. If no data, just return
+ * without constructing any PRP.
+ */
+ for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
+ if (drv_buf_iter->data_dir == DMA_NONE)
+ continue;
+ dma_addr = drv_buf_iter->kern_buf_dma;
+ length = drv_buf_iter->kern_buf_len;
+ break;
+ }
+
+ if (!length)
+ return 0;
+
+ mrioc->prp_sz = 0;
+ mrioc->prp_list_virt = dma_alloc_coherent(&mrioc->pdev->dev,
+ dev_pgsz, &mrioc->prp_list_dma, GFP_KERNEL);
+
+ if (!mrioc->prp_list_virt)
+ return -1;
+ mrioc->prp_sz = dev_pgsz;
+
+ /*
+ * Set pointers to PRP1 and PRP2, which are in the NVMe command.
+ * PRP1 is located at a 24 byte offset from the start of the NVMe
+ * command. Then set the current PRP entry pointer to PRP1.
+ */
+ prp1_entry = (__le64 *)((u8 *)(nvme_encap_request->command) +
+ MPI3MR_NVME_CMD_PRP1_OFFSET);
+ prp2_entry = (__le64 *)((u8 *)(nvme_encap_request->command) +
+ MPI3MR_NVME_CMD_PRP2_OFFSET);
+ prp_entry = prp1_entry;
+ /*
+ * For the PRP entries, use the specially allocated buffer of
+ * contiguous memory.
+ */
+ prp_page = (__le64 *)mrioc->prp_list_virt;
+ prp_page_dma = mrioc->prp_list_dma;
+
+ /*
+ * Check if we are within 1 entry of a page boundary we don't
+ * want our first entry to be a PRP List entry.
+ */
+ page_mask = dev_pgsz - 1;
+ page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask;
+ if (!page_mask_result) {
+ dprint_bsg_err(mrioc, "%s: PRP page is not page aligned\n",
+ __func__);
+ goto err_out;
+ }
+
+ /*
+ * Set PRP physical pointer, which initially points to the current PRP
+ * DMA memory page.
+ */
+ prp_entry_dma = prp_page_dma;
+
+
+ /* Loop while the length is not zero. */
+ while (length) {
+ page_mask_result = (prp_entry_dma + prp_size) & page_mask;
+ if (!page_mask_result && (length > dev_pgsz)) {
+ dprint_bsg_err(mrioc,
+ "%s: single PRP page is not sufficient\n",
+ __func__);
+ goto err_out;
+ }
+
+ /* Need to handle if entry will be part of a page. */
+ offset = dma_addr & page_mask;
+ entry_len = dev_pgsz - offset;
+
+ if (prp_entry == prp1_entry) {
+ /*
+ * Must fill in the first PRP pointer (PRP1) before
+ * moving on.
+ */
+ *prp1_entry = cpu_to_le64(dma_addr);
+ if (*prp1_entry & sgemod_mask) {
+ dprint_bsg_err(mrioc,
+ "%s: PRP1 address collides with SGE modifier\n",
+ __func__);
+ goto err_out;
+ }
+ *prp1_entry &= ~sgemod_mask;
+ *prp1_entry |= sgemod_val;
+
+ /*
+ * Now point to the second PRP entry within the
+ * command (PRP2).
+ */
+ prp_entry = prp2_entry;
+ } else if (prp_entry == prp2_entry) {
+ /*
+ * Should the PRP2 entry be a PRP List pointer or just
+ * a regular PRP pointer? If there is more than one
+ * more page of data, must use a PRP List pointer.
+ */
+ if (length > dev_pgsz) {
+ /*
+ * PRP2 will contain a PRP List pointer because
+ * more PRP's are needed with this command. The
+ * list will start at the beginning of the
+ * contiguous buffer.
+ */
+ *prp2_entry = cpu_to_le64(prp_entry_dma);
+ if (*prp2_entry & sgemod_mask) {
+ dprint_bsg_err(mrioc,
+ "%s: PRP list address collides with SGE modifier\n",
+ __func__);
+ goto err_out;
+ }
+ *prp2_entry &= ~sgemod_mask;
+ *prp2_entry |= sgemod_val;
+
+ /*
+ * The next PRP Entry will be the start of the
+ * first PRP List.
+ */
+ prp_entry = prp_page;
+ continue;
+ } else {
+ /*
+ * After this, the PRP Entries are complete.
+ * This command uses 2 PRP's and no PRP list.
+ */
+ *prp2_entry = cpu_to_le64(dma_addr);
+ if (*prp2_entry & sgemod_mask) {
+ dprint_bsg_err(mrioc,
+ "%s: PRP2 collides with SGE modifier\n",
+ __func__);
+ goto err_out;
+ }
+ *prp2_entry &= ~sgemod_mask;
+ *prp2_entry |= sgemod_val;
+ }
+ } else {
+ /*
+ * Put entry in list and bump the addresses.
+ *
+ * After PRP1 and PRP2 are filled in, this will fill in
+ * all remaining PRP entries in a PRP List, one per
+ * each time through the loop.
+ */
+ *prp_entry = cpu_to_le64(dma_addr);
+ if (*prp_entry & sgemod_mask) {
+ dprint_bsg_err(mrioc,
+ "%s: PRP address collides with SGE modifier\n",
+ __func__);
+ goto err_out;
+ }
+ *prp_entry &= ~sgemod_mask;
+ *prp_entry |= sgemod_val;
+ prp_entry++;
+ prp_entry_dma += prp_size;
+ }
+
+ /*
+ * Bump the phys address of the command's data buffer by the
+ * entry_len.
+ */
+ dma_addr += entry_len;
+
+ /* decrement length accounting for last partial page. */
+ if (entry_len > length)
+ length = 0;
+ else
+ length -= entry_len;
+ }
+ return 0;
+err_out:
+ if (mrioc->prp_list_virt) {
+ dma_free_coherent(&mrioc->pdev->dev, mrioc->prp_sz,
+ mrioc->prp_list_virt, mrioc->prp_list_dma);
+ mrioc->prp_list_virt = NULL;
+ }
+ return -1;
+}
+/**
+ * mpi3mr_bsg_process_mpt_cmds - MPI Pass through BSG handler
+ * @job: BSG job reference
+ *
+ * This function is the top level handler for MPI Pass through
+ * command, this does basic validation of the input data buffers,
+ * identifies the given buffer types and MPI command, allocates
+ * DMAable memory for user given buffers, construstcs SGL
+ * properly and passes the command to the firmware.
+ *
+ * Once the MPI command is completed the driver copies the data
+ * if any and reply, sense information to user provided buffers.
+ * If the command is timed out then issues controller reset
+ * prior to returning.
+ *
+ * Return: 0 on success and proper error codes on failure
+ */
+
+static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply_payload_rcv_len)
+{
+ long rval = -EINVAL;
+
+ struct mpi3mr_ioc *mrioc = NULL;
+ u8 *mpi_req = NULL, *sense_buff_k = NULL;
+ u8 mpi_msg_size = 0;
+ struct mpi3mr_bsg_packet *bsg_req = NULL;
+ struct mpi3mr_bsg_mptcmd *karg;
+ struct mpi3mr_buf_entry *buf_entries = NULL;
+ struct mpi3mr_buf_map *drv_bufs = NULL, *drv_buf_iter = NULL;
+ u8 count, bufcnt = 0, is_rmcb = 0, is_rmrb = 0, din_cnt = 0, dout_cnt = 0;
+ u8 invalid_be = 0, erb_offset = 0xFF, mpirep_offset = 0xFF, sg_entries = 0;
+ u8 block_io = 0, resp_code = 0, nvme_fmt = 0;
+ struct mpi3_request_header *mpi_header = NULL;
+ struct mpi3_status_reply_descriptor *status_desc;
+ struct mpi3_scsi_task_mgmt_request *tm_req;
+ u32 erbsz = MPI3MR_SENSE_BUF_SZ, tmplen;
+ u16 dev_handle;
+ struct mpi3mr_tgt_dev *tgtdev;
+ struct mpi3mr_stgt_priv_data *stgt_priv = NULL;
+ struct mpi3mr_bsg_in_reply_buf *bsg_reply_buf = NULL;
+ u32 din_size = 0, dout_size = 0;
+ u8 *din_buf = NULL, *dout_buf = NULL;
+ u8 *sgl_iter = NULL, *sgl_din_iter = NULL, *sgl_dout_iter = NULL;
+
+ bsg_req = job->request;
+ karg = (struct mpi3mr_bsg_mptcmd *)&bsg_req->cmd.mptcmd;
+
+ mrioc = mpi3mr_bsg_verify_adapter(karg->mrioc_id);
+ if (!mrioc)
+ return -ENODEV;
+
+ if (karg->timeout < MPI3MR_APP_DEFAULT_TIMEOUT)
+ karg->timeout = MPI3MR_APP_DEFAULT_TIMEOUT;
+
+ mpi_req = kzalloc(MPI3MR_ADMIN_REQ_FRAME_SZ, GFP_KERNEL);
+ if (!mpi_req)
+ return -ENOMEM;
+ mpi_header = (struct mpi3_request_header *)mpi_req;
+
+ bufcnt = karg->buf_entry_list.num_of_entries;
+ drv_bufs = kzalloc((sizeof(*drv_bufs) * bufcnt), GFP_KERNEL);
+ if (!drv_bufs) {
+ rval = -ENOMEM;
+ goto out;
+ }
+
+ dout_buf = kzalloc(job->request_payload.payload_len,
+ GFP_KERNEL);
+ if (!dout_buf) {
+ rval = -ENOMEM;
+ goto out;
+ }
+
+ din_buf = kzalloc(job->reply_payload.payload_len,
+ GFP_KERNEL);
+ if (!din_buf) {
+ rval = -ENOMEM;
+ goto out;
+ }
+
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ dout_buf, job->request_payload.payload_len);
+
+ buf_entries = karg->buf_entry_list.buf_entry;
+ sgl_din_iter = din_buf;
+ sgl_dout_iter = dout_buf;
+ drv_buf_iter = drv_bufs;
+
+ for (count = 0; count < bufcnt; count++, buf_entries++, drv_buf_iter++) {
+
+ if (sgl_dout_iter > (dout_buf + job->request_payload.payload_len)) {
+ dprint_bsg_err(mrioc, "%s: data_out buffer length mismatch\n",
+ __func__);
+ rval = -EINVAL;
+ goto out;
+ }
+ if (sgl_din_iter > (din_buf + job->reply_payload.payload_len)) {
+ dprint_bsg_err(mrioc, "%s: data_in buffer length mismatch\n",
+ __func__);
+ rval = -EINVAL;
+ goto out;
+ }
+
+ switch (buf_entries->buf_type) {
+ case MPI3MR_BSG_BUFTYPE_RAIDMGMT_CMD:
+ sgl_iter = sgl_dout_iter;
+ sgl_dout_iter += buf_entries->buf_len;
+ drv_buf_iter->data_dir = DMA_TO_DEVICE;
+ is_rmcb = 1;
+ if (count != 0)
+ invalid_be = 1;
+ break;
+ case MPI3MR_BSG_BUFTYPE_RAIDMGMT_RESP:
+ sgl_iter = sgl_din_iter;
+ sgl_din_iter += buf_entries->buf_len;
+ drv_buf_iter->data_dir = DMA_FROM_DEVICE;
+ is_rmrb = 1;
+ if (count != 1 || !is_rmcb)
+ invalid_be = 1;
+ break;
+ case MPI3MR_BSG_BUFTYPE_DATA_IN:
+ sgl_iter = sgl_din_iter;
+ sgl_din_iter += buf_entries->buf_len;
+ drv_buf_iter->data_dir = DMA_FROM_DEVICE;
+ din_cnt++;
+ din_size += drv_buf_iter->bsg_buf_len;
+ if ((din_cnt > 1) && !is_rmcb)
+ invalid_be = 1;
+ break;
+ case MPI3MR_BSG_BUFTYPE_DATA_OUT:
+ sgl_iter = sgl_dout_iter;
+ sgl_dout_iter += buf_entries->buf_len;
+ drv_buf_iter->data_dir = DMA_TO_DEVICE;
+ dout_cnt++;
+ dout_size += drv_buf_iter->bsg_buf_len;
+ if ((dout_cnt > 1) && !is_rmcb)
+ invalid_be = 1;
+ break;
+ case MPI3MR_BSG_BUFTYPE_MPI_REPLY:
+ sgl_iter = sgl_din_iter;
+ sgl_din_iter += buf_entries->buf_len;
+ drv_buf_iter->data_dir = DMA_NONE;
+ mpirep_offset = count;
+ break;
+ case MPI3MR_BSG_BUFTYPE_ERR_RESPONSE:
+ sgl_iter = sgl_din_iter;
+ sgl_din_iter += buf_entries->buf_len;
+ drv_buf_iter->data_dir = DMA_NONE;
+ erb_offset = count;
+ break;
+ case MPI3MR_BSG_BUFTYPE_MPI_REQUEST:
+ sgl_iter = sgl_dout_iter;
+ sgl_dout_iter += buf_entries->buf_len;
+ drv_buf_iter->data_dir = DMA_NONE;
+ mpi_msg_size = buf_entries->buf_len;
+ if ((!mpi_msg_size || (mpi_msg_size % 4)) ||
+ (mpi_msg_size > MPI3MR_ADMIN_REQ_FRAME_SZ)) {
+ dprint_bsg_err(mrioc, "%s: invalid MPI message size\n",
+ __func__);
+ rval = -EINVAL;
+ goto out;
+ }
+ memcpy(mpi_req, sgl_iter, buf_entries->buf_len);
+ break;
+ default:
+ invalid_be = 1;
+ break;
+ }
+ if (invalid_be) {
+ dprint_bsg_err(mrioc, "%s: invalid buffer entries passed\n",
+ __func__);
+ rval = -EINVAL;
+ goto out;
+ }
+
+ drv_buf_iter->bsg_buf = sgl_iter;
+ drv_buf_iter->bsg_buf_len = buf_entries->buf_len;
+
+ }
+ if (!is_rmcb && (dout_cnt || din_cnt)) {
+ sg_entries = dout_cnt + din_cnt;
+ if (((mpi_msg_size) + (sg_entries *
+ sizeof(struct mpi3_sge_common))) > MPI3MR_ADMIN_REQ_FRAME_SZ) {
+ dprint_bsg_err(mrioc,
+ "%s:%d: invalid message size passed\n",
+ __func__, __LINE__);
+ rval = -EINVAL;
+ goto out;
+ }
+ }
+ if (din_size > MPI3MR_MAX_APP_XFER_SIZE) {
+ dprint_bsg_err(mrioc,
+ "%s:%d: invalid data transfer size passed for function 0x%x din_size=%d\n",
+ __func__, __LINE__, mpi_header->function, din_size);
+ rval = -EINVAL;
+ goto out;
+ }
+ if (dout_size > MPI3MR_MAX_APP_XFER_SIZE) {
+ dprint_bsg_err(mrioc,
+ "%s:%d: invalid data transfer size passed for function 0x%x dout_size = %d\n",
+ __func__, __LINE__, mpi_header->function, dout_size);
+ rval = -EINVAL;
+ goto out;
+ }
+
+ drv_buf_iter = drv_bufs;
+ for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
+ if (drv_buf_iter->data_dir == DMA_NONE)
+ continue;
+
+ drv_buf_iter->kern_buf_len = drv_buf_iter->bsg_buf_len;
+ if (is_rmcb && !count)
+ drv_buf_iter->kern_buf_len += ((dout_cnt + din_cnt) *
+ sizeof(struct mpi3_sge_common));
+
+ if (!drv_buf_iter->kern_buf_len)
+ continue;
+
+ drv_buf_iter->kern_buf = dma_alloc_coherent(&mrioc->pdev->dev,
+ drv_buf_iter->kern_buf_len, &drv_buf_iter->kern_buf_dma,
+ GFP_KERNEL);
+ if (!drv_buf_iter->kern_buf) {
+ rval = -ENOMEM;
+ goto out;
+ }
+ if (drv_buf_iter->data_dir == DMA_TO_DEVICE) {
+ tmplen = min(drv_buf_iter->kern_buf_len,
+ drv_buf_iter->bsg_buf_len);
+ memcpy(drv_buf_iter->kern_buf, drv_buf_iter->bsg_buf, tmplen);
+ }
+ }
+
+ if (erb_offset != 0xFF) {
+ sense_buff_k = kzalloc(erbsz, GFP_KERNEL);
+ if (!sense_buff_k) {
+ rval = -ENOMEM;
+ goto out;
+ }
+ }
+
+ if (mutex_lock_interruptible(&mrioc->bsg_cmds.mutex)) {
+ rval = -ERESTARTSYS;
+ goto out;
+ }
+ if (mrioc->bsg_cmds.state & MPI3MR_CMD_PENDING) {
+ rval = -EAGAIN;
+ dprint_bsg_err(mrioc, "%s: command is in use\n", __func__);
+ mutex_unlock(&mrioc->bsg_cmds.mutex);
+ goto out;
+ }
+ if (mrioc->unrecoverable) {
+ dprint_bsg_err(mrioc, "%s: unrecoverable controller\n",
+ __func__);
+ rval = -EFAULT;
+ mutex_unlock(&mrioc->bsg_cmds.mutex);
+ goto out;
+ }
+ if (mrioc->reset_in_progress) {
+ dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__);
+ rval = -EAGAIN;
+ mutex_unlock(&mrioc->bsg_cmds.mutex);
+ goto out;
+ }
+ if (mrioc->stop_bsgs) {
+ dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__);
+ rval = -EAGAIN;
+ mutex_unlock(&mrioc->bsg_cmds.mutex);
+ goto out;
+ }
+
+ if (mpi_header->function == MPI3_BSG_FUNCTION_NVME_ENCAPSULATED) {
+ nvme_fmt = mpi3mr_get_nvme_data_fmt(
+ (struct mpi3_nvme_encapsulated_request *)mpi_req);
+ if (nvme_fmt == MPI3MR_NVME_DATA_FORMAT_PRP) {
+ if (mpi3mr_build_nvme_prp(mrioc,
+ (struct mpi3_nvme_encapsulated_request *)mpi_req,
+ drv_bufs, bufcnt)) {
+ rval = -ENOMEM;
+ mutex_unlock(&mrioc->bsg_cmds.mutex);
+ goto out;
+ }
+ } else if (nvme_fmt == MPI3MR_NVME_DATA_FORMAT_SGL1 ||
+ nvme_fmt == MPI3MR_NVME_DATA_FORMAT_SGL2) {
+ if (mpi3mr_build_nvme_sgl(mrioc,
+ (struct mpi3_nvme_encapsulated_request *)mpi_req,
+ drv_bufs, bufcnt)) {
+ rval = -EINVAL;
+ mutex_unlock(&mrioc->bsg_cmds.mutex);
+ goto out;
+ }
+ } else {
+ dprint_bsg_err(mrioc,
+ "%s:invalid NVMe command format\n", __func__);
+ rval = -EINVAL;
+ mutex_unlock(&mrioc->bsg_cmds.mutex);
+ goto out;
+ }
+ } else {
+ mpi3mr_bsg_build_sgl(mpi_req, (mpi_msg_size),
+ drv_bufs, bufcnt, is_rmcb, is_rmrb,
+ (dout_cnt + din_cnt));
+ }
+
+ if (mpi_header->function == MPI3_BSG_FUNCTION_SCSI_TASK_MGMT) {
+ tm_req = (struct mpi3_scsi_task_mgmt_request *)mpi_req;
+ if (tm_req->task_type !=
+ MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
+ dev_handle = tm_req->dev_handle;
+ block_io = 1;
+ }
+ }
+ if (block_io) {
+ tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
+ if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) {
+ stgt_priv = (struct mpi3mr_stgt_priv_data *)
+ tgtdev->starget->hostdata;
+ atomic_inc(&stgt_priv->block_io);
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+ }
+
+ mrioc->bsg_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->bsg_cmds.is_waiting = 1;
+ mrioc->bsg_cmds.callback = NULL;
+ mrioc->bsg_cmds.is_sense = 0;
+ mrioc->bsg_cmds.sensebuf = sense_buff_k;
+ memset(mrioc->bsg_cmds.reply, 0, mrioc->reply_sz);
+ mpi_header->host_tag = cpu_to_le16(MPI3MR_HOSTTAG_BSG_CMDS);
+ if (mrioc->logging_level & MPI3_DEBUG_BSG_INFO) {
+ dprint_bsg_info(mrioc,
+ "%s: posting bsg request to the controller\n", __func__);
+ dprint_dump(mpi_req, MPI3MR_ADMIN_REQ_FRAME_SZ,
+ "bsg_mpi3_req");
+ if (mpi_header->function == MPI3_BSG_FUNCTION_MGMT_PASSTHROUGH) {
+ drv_buf_iter = &drv_bufs[0];
+ dprint_dump(drv_buf_iter->kern_buf,
+ drv_buf_iter->kern_buf_len, "mpi3_mgmt_req");
+ }
+ }
+
+ init_completion(&mrioc->bsg_cmds.done);
+ rval = mpi3mr_admin_request_post(mrioc, mpi_req,
+ MPI3MR_ADMIN_REQ_FRAME_SZ, 0);
+
+
+ if (rval) {
+ mrioc->bsg_cmds.is_waiting = 0;
+ dprint_bsg_err(mrioc,
+ "%s: posting bsg request is failed\n", __func__);
+ rval = -EAGAIN;
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->bsg_cmds.done,
+ (karg->timeout * HZ));
+ if (block_io && stgt_priv)
+ atomic_dec(&stgt_priv->block_io);
+ if (!(mrioc->bsg_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ mrioc->bsg_cmds.is_waiting = 0;
+ rval = -EAGAIN;
+ if (mrioc->bsg_cmds.state & MPI3MR_CMD_RESET)
+ goto out_unlock;
+ dprint_bsg_err(mrioc,
+ "%s: bsg request timedout after %d seconds\n", __func__,
+ karg->timeout);
+ if (mrioc->logging_level & MPI3_DEBUG_BSG_ERROR) {
+ dprint_dump(mpi_req, MPI3MR_ADMIN_REQ_FRAME_SZ,
+ "bsg_mpi3_req");
+ if (mpi_header->function ==
+ MPI3_BSG_FUNCTION_MGMT_PASSTHROUGH) {
+ drv_buf_iter = &drv_bufs[0];
+ dprint_dump(drv_buf_iter->kern_buf,
+ drv_buf_iter->kern_buf_len, "mpi3_mgmt_req");
+ }
+ }
+
+ if ((mpi_header->function == MPI3_BSG_FUNCTION_NVME_ENCAPSULATED) ||
+ (mpi_header->function == MPI3_BSG_FUNCTION_SCSI_IO))
+ mpi3mr_issue_tm(mrioc,
+ MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
+ mpi_header->function_dependent, 0,
+ MPI3MR_HOSTTAG_BLK_TMS, MPI3MR_RESETTM_TIMEOUT,
+ &mrioc->host_tm_cmds, &resp_code, NULL);
+ if (!(mrioc->bsg_cmds.state & MPI3MR_CMD_COMPLETE) &&
+ !(mrioc->bsg_cmds.state & MPI3MR_CMD_RESET))
+ mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_APP_TIMEOUT, 1);
+ goto out_unlock;
+ }
+ dprint_bsg_info(mrioc, "%s: bsg request is completed\n", __func__);
+
+ if (mrioc->prp_list_virt) {
+ dma_free_coherent(&mrioc->pdev->dev, mrioc->prp_sz,
+ mrioc->prp_list_virt, mrioc->prp_list_dma);
+ mrioc->prp_list_virt = NULL;
+ }
+
+ if ((mrioc->bsg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ != MPI3_IOCSTATUS_SUCCESS) {
+ dprint_bsg_info(mrioc,
+ "%s: command failed, ioc_status(0x%04x) log_info(0x%08x)\n",
+ __func__,
+ (mrioc->bsg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ mrioc->bsg_cmds.ioc_loginfo);
+ }
+
+ if ((mpirep_offset != 0xFF) &&
+ drv_bufs[mpirep_offset].bsg_buf_len) {
+ drv_buf_iter = &drv_bufs[mpirep_offset];
+ drv_buf_iter->kern_buf_len = (sizeof(*bsg_reply_buf) - 1 +
+ mrioc->reply_sz);
+ bsg_reply_buf = kzalloc(drv_buf_iter->kern_buf_len, GFP_KERNEL);
+
+ if (!bsg_reply_buf) {
+ rval = -ENOMEM;
+ goto out_unlock;
+ }
+ if (mrioc->bsg_cmds.state & MPI3MR_CMD_REPLY_VALID) {
+ bsg_reply_buf->mpi_reply_type =
+ MPI3MR_BSG_MPI_REPLY_BUFTYPE_ADDRESS;
+ memcpy(bsg_reply_buf->reply_buf,
+ mrioc->bsg_cmds.reply, mrioc->reply_sz);
+ } else {
+ bsg_reply_buf->mpi_reply_type =
+ MPI3MR_BSG_MPI_REPLY_BUFTYPE_STATUS;
+ status_desc = (struct mpi3_status_reply_descriptor *)
+ bsg_reply_buf->reply_buf;
+ status_desc->ioc_status = mrioc->bsg_cmds.ioc_status;
+ status_desc->ioc_log_info = mrioc->bsg_cmds.ioc_loginfo;
+ }
+ tmplen = min(drv_buf_iter->kern_buf_len,
+ drv_buf_iter->bsg_buf_len);
+ memcpy(drv_buf_iter->bsg_buf, bsg_reply_buf, tmplen);
+ }
+
+ if (erb_offset != 0xFF && mrioc->bsg_cmds.sensebuf &&
+ mrioc->bsg_cmds.is_sense) {
+ drv_buf_iter = &drv_bufs[erb_offset];
+ tmplen = min(erbsz, drv_buf_iter->bsg_buf_len);
+ memcpy(drv_buf_iter->bsg_buf, sense_buff_k, tmplen);
+ }
+
+ drv_buf_iter = drv_bufs;
+ for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
+ if (drv_buf_iter->data_dir == DMA_NONE)
+ continue;
+ if (drv_buf_iter->data_dir == DMA_FROM_DEVICE) {
+ tmplen = min(drv_buf_iter->kern_buf_len,
+ drv_buf_iter->bsg_buf_len);
+ memcpy(drv_buf_iter->bsg_buf,
+ drv_buf_iter->kern_buf, tmplen);
+ }
+ }
+
+out_unlock:
+ if (din_buf) {
+ *reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ din_buf, job->reply_payload.payload_len);
+ }
+ mrioc->bsg_cmds.is_sense = 0;
+ mrioc->bsg_cmds.sensebuf = NULL;
+ mrioc->bsg_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->bsg_cmds.mutex);
+out:
+ kfree(sense_buff_k);
+ kfree(dout_buf);
+ kfree(din_buf);
+ kfree(mpi_req);
+ if (drv_bufs) {
+ drv_buf_iter = drv_bufs;
+ for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
+ if (drv_buf_iter->kern_buf && drv_buf_iter->kern_buf_dma)
+ dma_free_coherent(&mrioc->pdev->dev,
+ drv_buf_iter->kern_buf_len,
+ drv_buf_iter->kern_buf,
+ drv_buf_iter->kern_buf_dma);
+ }
+ kfree(drv_bufs);
+ }
+ kfree(bsg_reply_buf);
+ return rval;
+}
+
+/**
+ * mpi3mr_app_save_logdata - Save Log Data events
+ * @mrioc: Adapter instance reference
+ * @event_data: event data associated with log data event
+ * @event_data_size: event data size to copy
+ *
+ * If log data event caching is enabled by the applicatiobns,
+ * then this function saves the log data in the circular queue
+ * and Sends async signal SIGIO to indicate there is an async
+ * event from the firmware to the event monitoring applications.
+ *
+ * Return:Nothing
+ */
+void mpi3mr_app_save_logdata(struct mpi3mr_ioc *mrioc, char *event_data,
+ u16 event_data_size)
+{
+ u32 index = mrioc->logdata_buf_idx, sz;
+ struct mpi3mr_logdata_entry *entry;
+
+ if (!(mrioc->logdata_buf))
+ return;
+
+ entry = (struct mpi3mr_logdata_entry *)
+ (mrioc->logdata_buf + (index * mrioc->logdata_entry_sz));
+ entry->valid_entry = 1;
+ sz = min(mrioc->logdata_entry_sz, event_data_size);
+ memcpy(entry->data, event_data, sz);
+ mrioc->logdata_buf_idx =
+ ((++index) % MPI3MR_BSG_LOGDATA_MAX_ENTRIES);
+ atomic64_inc(&event_counter);
+}
+
+/**
+ * mpi3mr_bsg_request - bsg request entry point
+ * @job: BSG job reference
+ *
+ * This is driver's entry point for bsg requests
+ *
+ * Return: 0 on success and proper error codes on failure
+ */
+static int mpi3mr_bsg_request(struct bsg_job *job)
+{
+ long rval = -EINVAL;
+ unsigned int reply_payload_rcv_len = 0;
+
+ struct mpi3mr_bsg_packet *bsg_req = job->request;
+
+ switch (bsg_req->cmd_type) {
+ case MPI3MR_DRV_CMD:
+ rval = mpi3mr_bsg_process_drv_cmds(job);
+ break;
+ case MPI3MR_MPT_CMD:
+ rval = mpi3mr_bsg_process_mpt_cmds(job, &reply_payload_rcv_len);
+ break;
+ default:
+ pr_err("%s: unsupported BSG command(0x%08x)\n",
+ MPI3MR_DRIVER_NAME, bsg_req->cmd_type);
+ break;
+ }
+
+ bsg_job_done(job, rval, reply_payload_rcv_len);
+
+ return 0;
+}
+
+/**
+ * mpi3mr_bsg_exit - de-registration from bsg layer
+ *
+ * This will be called during driver unload and all
+ * bsg resources allocated during load will be freed.
+ *
+ * Return:Nothing
+ */
+void mpi3mr_bsg_exit(struct mpi3mr_ioc *mrioc)
+{
+ struct device *bsg_dev = &mrioc->bsg_dev;
+ if (!mrioc->bsg_queue)
+ return;
+
+ bsg_remove_queue(mrioc->bsg_queue);
+ mrioc->bsg_queue = NULL;
+
+ device_del(bsg_dev);
+ put_device(bsg_dev);
+}
+
+/**
+ * mpi3mr_bsg_node_release -release bsg device node
+ * @dev: bsg device node
+ *
+ * decrements bsg dev parent reference count
+ *
+ * Return:Nothing
+ */
+static void mpi3mr_bsg_node_release(struct device *dev)
+{
+ put_device(dev->parent);
+}
+
+/**
+ * mpi3mr_bsg_init - registration with bsg layer
+ *
+ * This will be called during driver load and it will
+ * register driver with bsg layer
+ *
+ * Return:Nothing
+ */
+void mpi3mr_bsg_init(struct mpi3mr_ioc *mrioc)
+{
+ struct device *bsg_dev = &mrioc->bsg_dev;
+ struct device *parent = &mrioc->shost->shost_gendev;
+
+ device_initialize(bsg_dev);
+
+ bsg_dev->parent = get_device(parent);
+ bsg_dev->release = mpi3mr_bsg_node_release;
+
+ dev_set_name(bsg_dev, "mpi3mrctl%u", mrioc->id);
+
+ if (device_add(bsg_dev)) {
+ ioc_err(mrioc, "%s: bsg device add failed\n",
+ dev_name(bsg_dev));
+ put_device(bsg_dev);
+ return;
+ }
+
+ mrioc->bsg_queue = bsg_setup_queue(bsg_dev, dev_name(bsg_dev),
+ mpi3mr_bsg_request, NULL, 0);
+ if (IS_ERR(mrioc->bsg_queue)) {
+ ioc_err(mrioc, "%s: bsg registration failed\n",
+ dev_name(bsg_dev));
+ device_del(bsg_dev);
+ put_device(bsg_dev);
+ return;
+ }
+
+ blk_queue_max_segments(mrioc->bsg_queue, MPI3MR_MAX_APP_XFER_SEGMENTS);
+ blk_queue_max_hw_sectors(mrioc->bsg_queue, MPI3MR_MAX_APP_XFER_SECTORS);
+
+ return;
+}
+
+/**
+ * version_fw_show - SysFS callback for firmware version read
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * Return: sysfs_emit() return after copying firmware version
+ */
+static ssize_t
+version_fw_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+ struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver;
+
+ return sysfs_emit(buf, "%d.%d.%d.%d.%05d-%05d\n",
+ fwver->gen_major, fwver->gen_minor, fwver->ph_major,
+ fwver->ph_minor, fwver->cust_id, fwver->build_num);
+}
+static DEVICE_ATTR_RO(version_fw);
+
+/**
+ * fw_queue_depth_show - SysFS callback for firmware max cmds
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * Return: sysfs_emit() return after copying firmware max commands
+ */
+static ssize_t
+fw_queue_depth_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+
+ return sysfs_emit(buf, "%d\n", mrioc->facts.max_reqs);
+}
+static DEVICE_ATTR_RO(fw_queue_depth);
+
+/**
+ * op_req_q_count_show - SysFS callback for request queue count
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * Return: sysfs_emit() return after copying request queue count
+ */
+static ssize_t
+op_req_q_count_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+
+ return sysfs_emit(buf, "%d\n", mrioc->num_op_req_q);
+}
+static DEVICE_ATTR_RO(op_req_q_count);
+
+/**
+ * reply_queue_count_show - SysFS callback for reply queue count
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * Return: sysfs_emit() return after copying reply queue count
+ */
+static ssize_t
+reply_queue_count_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+
+ return sysfs_emit(buf, "%d\n", mrioc->num_op_reply_q);
+}
+
+static DEVICE_ATTR_RO(reply_queue_count);
+
+/**
+ * logging_level_show - Show controller debug level
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * A sysfs 'read/write' shost attribute, to show the current
+ * debug log level used by the driver for the specific
+ * controller.
+ *
+ * Return: sysfs_emit() return
+ */
+static ssize_t
+logging_level_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+
+ return sysfs_emit(buf, "%08xh\n", mrioc->logging_level);
+}
+
+/**
+ * logging_level_store- Change controller debug level
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ * @count: size of the buffer
+ *
+ * A sysfs 'read/write' shost attribute, to change the current
+ * debug log level used by the driver for the specific
+ * controller.
+ *
+ * Return: strlen() return
+ */
+static ssize_t
+logging_level_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+ int val = 0;
+
+ if (kstrtoint(buf, 0, &val) != 0)
+ return -EINVAL;
+
+ mrioc->logging_level = val;
+ ioc_info(mrioc, "logging_level=%08xh\n", mrioc->logging_level);
+ return strlen(buf);
+}
+static DEVICE_ATTR_RW(logging_level);
+
+/**
+ * adp_state_show() - SysFS callback for adapter state show
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * Return: sysfs_emit() return after copying adapter state
+ */
+static ssize_t
+adp_state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+ enum mpi3mr_iocstate ioc_state;
+ uint8_t adp_state;
+
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ if (ioc_state == MRIOC_STATE_UNRECOVERABLE)
+ adp_state = MPI3MR_BSG_ADPSTATE_UNRECOVERABLE;
+ else if ((mrioc->reset_in_progress) || (mrioc->stop_bsgs))
+ adp_state = MPI3MR_BSG_ADPSTATE_IN_RESET;
+ else if (ioc_state == MRIOC_STATE_FAULT)
+ adp_state = MPI3MR_BSG_ADPSTATE_FAULT;
+ else
+ adp_state = MPI3MR_BSG_ADPSTATE_OPERATIONAL;
+
+ return sysfs_emit(buf, "%u\n", adp_state);
+}
+
+static DEVICE_ATTR_RO(adp_state);
+
+static struct attribute *mpi3mr_host_attrs[] = {
+ &dev_attr_version_fw.attr,
+ &dev_attr_fw_queue_depth.attr,
+ &dev_attr_op_req_q_count.attr,
+ &dev_attr_reply_queue_count.attr,
+ &dev_attr_logging_level.attr,
+ &dev_attr_adp_state.attr,
+ NULL,
+};
+
+static const struct attribute_group mpi3mr_host_attr_group = {
+ .attrs = mpi3mr_host_attrs
+};
+
+const struct attribute_group *mpi3mr_host_groups[] = {
+ &mpi3mr_host_attr_group,
+ NULL,
+};
+
+
+/*
+ * SCSI Device attributes under sysfs
+ */
+
+/**
+ * sas_address_show - SysFS callback for dev SASaddress display
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * Return: sysfs_emit() return after copying SAS address of the
+ * specific SAS/SATA end device.
+ */
+static ssize_t
+sas_address_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct mpi3mr_sdev_priv_data *sdev_priv_data;
+ struct mpi3mr_stgt_priv_data *tgt_priv_data;
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ sdev_priv_data = sdev->hostdata;
+ if (!sdev_priv_data)
+ return 0;
+
+ tgt_priv_data = sdev_priv_data->tgt_priv_data;
+ if (!tgt_priv_data)
+ return 0;
+ tgtdev = tgt_priv_data->tgt_dev;
+ if (!tgtdev || tgtdev->dev_type != MPI3_DEVICE_DEVFORM_SAS_SATA)
+ return 0;
+ return sysfs_emit(buf, "0x%016llx\n",
+ (unsigned long long)tgtdev->dev_spec.sas_sata_inf.sas_address);
+}
+
+static DEVICE_ATTR_RO(sas_address);
+
+/**
+ * device_handle_show - SysFS callback for device handle display
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * Return: sysfs_emit() return after copying firmware internal
+ * device handle of the specific device.
+ */
+static ssize_t
+device_handle_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct mpi3mr_sdev_priv_data *sdev_priv_data;
+ struct mpi3mr_stgt_priv_data *tgt_priv_data;
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ sdev_priv_data = sdev->hostdata;
+ if (!sdev_priv_data)
+ return 0;
+
+ tgt_priv_data = sdev_priv_data->tgt_priv_data;
+ if (!tgt_priv_data)
+ return 0;
+ tgtdev = tgt_priv_data->tgt_dev;
+ if (!tgtdev)
+ return 0;
+ return sysfs_emit(buf, "0x%04x\n", tgtdev->dev_handle);
+}
+
+static DEVICE_ATTR_RO(device_handle);
+
+/**
+ * persistent_id_show - SysFS callback for persisten ID display
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * Return: sysfs_emit() return after copying persistent ID of the
+ * of the specific device.
+ */
+static ssize_t
+persistent_id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct mpi3mr_sdev_priv_data *sdev_priv_data;
+ struct mpi3mr_stgt_priv_data *tgt_priv_data;
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ sdev_priv_data = sdev->hostdata;
+ if (!sdev_priv_data)
+ return 0;
+
+ tgt_priv_data = sdev_priv_data->tgt_priv_data;
+ if (!tgt_priv_data)
+ return 0;
+ tgtdev = tgt_priv_data->tgt_dev;
+ if (!tgtdev)
+ return 0;
+ return sysfs_emit(buf, "%d\n", tgtdev->perst_id);
+}
+static DEVICE_ATTR_RO(persistent_id);
+
+static struct attribute *mpi3mr_dev_attrs[] = {
+ &dev_attr_sas_address.attr,
+ &dev_attr_device_handle.attr,
+ &dev_attr_persistent_id.attr,
+ NULL,
+};
+
+static const struct attribute_group mpi3mr_dev_attr_group = {
+ .attrs = mpi3mr_dev_attrs
+};
+
+const struct attribute_group *mpi3mr_dev_groups[] = {
+ &mpi3mr_dev_attr_group,
+ NULL,
+};
diff --git a/drivers/scsi/mpi3mr/mpi3mr_debug.h b/drivers/scsi/mpi3mr/mpi3mr_debug.h
new file mode 100644
index 000000000..ee6edd832
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi3mr_debug.h
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Driver for Broadcom MPI3 Storage Controllers
+ *
+ * Copyright (C) 2017-2022 Broadcom Inc.
+ * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
+ *
+ */
+
+#ifndef MPI3SAS_DEBUG_H_INCLUDED
+
+#define MPI3SAS_DEBUG_H_INCLUDED
+
+/*
+ * debug levels
+ */
+
+#define MPI3_DEBUG_EVENT 0x00000001
+#define MPI3_DEBUG_EVENT_WORK_TASK 0x00000002
+#define MPI3_DEBUG_INIT 0x00000004
+#define MPI3_DEBUG_EXIT 0x00000008
+#define MPI3_DEBUG_TM 0x00000010
+#define MPI3_DEBUG_RESET 0x00000020
+#define MPI3_DEBUG_SCSI_ERROR 0x00000040
+#define MPI3_DEBUG_REPLY 0x00000080
+#define MPI3_DEBUG_CFG_ERROR 0x00000100
+#define MPI3_DEBUG_TRANSPORT_ERROR 0x00000200
+#define MPI3_DEBUG_BSG_ERROR 0x00008000
+#define MPI3_DEBUG_BSG_INFO 0x00010000
+#define MPI3_DEBUG_SCSI_INFO 0x00020000
+#define MPI3_DEBUG_CFG_INFO 0x00040000
+#define MPI3_DEBUG_TRANSPORT_INFO 0x00080000
+#define MPI3_DEBUG 0x01000000
+#define MPI3_DEBUG_SG 0x02000000
+
+
+/*
+ * debug macros
+ */
+
+#define ioc_err(ioc, fmt, ...) \
+ pr_err("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+#define ioc_notice(ioc, fmt, ...) \
+ pr_notice("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+#define ioc_warn(ioc, fmt, ...) \
+ pr_warn("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+#define ioc_info(ioc, fmt, ...) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+
+#define dprint(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_event_th(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_EVENT) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_event_bh(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_EVENT_WORK_TASK) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_init(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_INIT) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_exit(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_EXIT) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_tm(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_TM) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_reply(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_REPLY) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_reset(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_RESET) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_scsi_info(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_SCSI_INFO) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_scsi_err(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_SCSI_ERROR) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_scsi_command(ioc, SCMD, LOG_LEVEL) \
+ do { \
+ if (ioc->logging_level & LOG_LEVEL) \
+ scsi_print_command(SCMD); \
+ } while (0)
+
+
+#define dprint_bsg_info(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_BSG_INFO) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_bsg_err(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_BSG_ERROR) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_cfg_info(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_CFG_INFO) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_cfg_err(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_CFG_ERROR) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+#define dprint_transport_info(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_transport_err(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_TRANSPORT_ERROR) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#endif /* MPT3SAS_DEBUG_H_INCLUDED */
+
+/**
+ * dprint_dump - print contents of a memory buffer
+ * @req: Pointer to a memory buffer
+ * @sz: Memory buffer size
+ * @namestr: Name String to identify the buffer type
+ */
+static inline void
+dprint_dump(void *req, int sz, const char *name_string)
+{
+ int i;
+ __le32 *mfp = (__le32 *)req;
+
+ sz = sz/4;
+ if (name_string)
+ pr_info("%s:\n\t", name_string);
+ else
+ pr_info("request:\n\t");
+ for (i = 0; i < sz; i++) {
+ if (i && ((i % 8) == 0))
+ pr_info("\n\t");
+ pr_info("%08x ", le32_to_cpu(mfp[i]));
+ }
+ pr_info("\n");
+}
+
+/**
+ * dprint_dump_req - print message frame contents
+ * @req: pointer to message frame
+ * @sz: number of dwords
+ */
+static inline void
+dprint_dump_req(void *req, int sz)
+{
+ int i;
+ __le32 *mfp = (__le32 *)req;
+
+ pr_info("request:\n\t");
+ for (i = 0; i < sz; i++) {
+ if (i && ((i % 8) == 0))
+ pr_info("\n\t");
+ pr_info("%08x ", le32_to_cpu(mfp[i]));
+ }
+ pr_info("\n");
+}
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
new file mode 100644
index 000000000..d2c7de804
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -0,0 +1,5816 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Broadcom MPI3 Storage Controllers
+ *
+ * Copyright (C) 2017-2022 Broadcom Inc.
+ * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
+ *
+ */
+
+#include "mpi3mr.h"
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+static int
+mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, u32 reset_reason);
+static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc);
+static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
+ struct mpi3_ioc_facts_data *facts_data);
+static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd);
+
+static int poll_queues;
+module_param(poll_queues, int, 0444);
+MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)");
+
+#if defined(writeq) && defined(CONFIG_64BIT)
+static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr)
+{
+ writeq(b, addr);
+}
+#else
+static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr)
+{
+ __u64 data_out = b;
+
+ writel((u32)(data_out), addr);
+ writel((u32)(data_out >> 32), (addr + 4));
+}
+#endif
+
+static inline bool
+mpi3mr_check_req_qfull(struct op_req_qinfo *op_req_q)
+{
+ u16 pi, ci, max_entries;
+ bool is_qfull = false;
+
+ pi = op_req_q->pi;
+ ci = READ_ONCE(op_req_q->ci);
+ max_entries = op_req_q->num_requests;
+
+ if ((ci == (pi + 1)) || ((!ci) && (pi == (max_entries - 1))))
+ is_qfull = true;
+
+ return is_qfull;
+}
+
+static void mpi3mr_sync_irqs(struct mpi3mr_ioc *mrioc)
+{
+ u16 i, max_vectors;
+
+ max_vectors = mrioc->intr_info_count;
+
+ for (i = 0; i < max_vectors; i++)
+ synchronize_irq(pci_irq_vector(mrioc->pdev, i));
+}
+
+void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc)
+{
+ mrioc->intr_enabled = 0;
+ mpi3mr_sync_irqs(mrioc);
+}
+
+void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc)
+{
+ mrioc->intr_enabled = 1;
+}
+
+static void mpi3mr_cleanup_isr(struct mpi3mr_ioc *mrioc)
+{
+ u16 i;
+
+ mpi3mr_ioc_disable_intr(mrioc);
+
+ if (!mrioc->intr_info)
+ return;
+
+ for (i = 0; i < mrioc->intr_info_count; i++)
+ free_irq(pci_irq_vector(mrioc->pdev, i),
+ (mrioc->intr_info + i));
+
+ kfree(mrioc->intr_info);
+ mrioc->intr_info = NULL;
+ mrioc->intr_info_count = 0;
+ mrioc->is_intr_info_set = false;
+ pci_free_irq_vectors(mrioc->pdev);
+}
+
+void mpi3mr_add_sg_single(void *paddr, u8 flags, u32 length,
+ dma_addr_t dma_addr)
+{
+ struct mpi3_sge_common *sgel = paddr;
+
+ sgel->flags = flags;
+ sgel->length = cpu_to_le32(length);
+ sgel->address = cpu_to_le64(dma_addr);
+}
+
+void mpi3mr_build_zero_len_sge(void *paddr)
+{
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+
+ mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1);
+}
+
+void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc,
+ dma_addr_t phys_addr)
+{
+ if (!phys_addr)
+ return NULL;
+
+ if ((phys_addr < mrioc->reply_buf_dma) ||
+ (phys_addr > mrioc->reply_buf_dma_max_address))
+ return NULL;
+
+ return mrioc->reply_buf + (phys_addr - mrioc->reply_buf_dma);
+}
+
+void *mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc *mrioc,
+ dma_addr_t phys_addr)
+{
+ if (!phys_addr)
+ return NULL;
+
+ return mrioc->sense_buf + (phys_addr - mrioc->sense_buf_dma);
+}
+
+static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc,
+ u64 reply_dma)
+{
+ u32 old_idx = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mrioc->reply_free_queue_lock, flags);
+ old_idx = mrioc->reply_free_queue_host_index;
+ mrioc->reply_free_queue_host_index = (
+ (mrioc->reply_free_queue_host_index ==
+ (mrioc->reply_free_qsz - 1)) ? 0 :
+ (mrioc->reply_free_queue_host_index + 1));
+ mrioc->reply_free_q[old_idx] = cpu_to_le64(reply_dma);
+ writel(mrioc->reply_free_queue_host_index,
+ &mrioc->sysif_regs->reply_free_host_index);
+ spin_unlock_irqrestore(&mrioc->reply_free_queue_lock, flags);
+}
+
+void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc,
+ u64 sense_buf_dma)
+{
+ u32 old_idx = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mrioc->sbq_lock, flags);
+ old_idx = mrioc->sbq_host_index;
+ mrioc->sbq_host_index = ((mrioc->sbq_host_index ==
+ (mrioc->sense_buf_q_sz - 1)) ? 0 :
+ (mrioc->sbq_host_index + 1));
+ mrioc->sense_buf_q[old_idx] = cpu_to_le64(sense_buf_dma);
+ writel(mrioc->sbq_host_index,
+ &mrioc->sysif_regs->sense_buffer_free_host_index);
+ spin_unlock_irqrestore(&mrioc->sbq_lock, flags);
+}
+
+static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply)
+{
+ char *desc = NULL;
+ u16 event;
+
+ event = event_reply->event;
+
+ switch (event) {
+ case MPI3_EVENT_LOG_DATA:
+ desc = "Log Data";
+ break;
+ case MPI3_EVENT_CHANGE:
+ desc = "Event Change";
+ break;
+ case MPI3_EVENT_GPIO_INTERRUPT:
+ desc = "GPIO Interrupt";
+ break;
+ case MPI3_EVENT_CABLE_MGMT:
+ desc = "Cable Management";
+ break;
+ case MPI3_EVENT_ENERGY_PACK_CHANGE:
+ desc = "Energy Pack Change";
+ break;
+ case MPI3_EVENT_DEVICE_ADDED:
+ {
+ struct mpi3_device_page0 *event_data =
+ (struct mpi3_device_page0 *)event_reply->event_data;
+ ioc_info(mrioc, "Device Added: dev=0x%04x Form=0x%x\n",
+ event_data->dev_handle, event_data->device_form);
+ return;
+ }
+ case MPI3_EVENT_DEVICE_INFO_CHANGED:
+ {
+ struct mpi3_device_page0 *event_data =
+ (struct mpi3_device_page0 *)event_reply->event_data;
+ ioc_info(mrioc, "Device Info Changed: dev=0x%04x Form=0x%x\n",
+ event_data->dev_handle, event_data->device_form);
+ return;
+ }
+ case MPI3_EVENT_DEVICE_STATUS_CHANGE:
+ {
+ struct mpi3_event_data_device_status_change *event_data =
+ (struct mpi3_event_data_device_status_change *)event_reply->event_data;
+ ioc_info(mrioc, "Device status Change: dev=0x%04x RC=0x%x\n",
+ event_data->dev_handle, event_data->reason_code);
+ return;
+ }
+ case MPI3_EVENT_SAS_DISCOVERY:
+ {
+ struct mpi3_event_data_sas_discovery *event_data =
+ (struct mpi3_event_data_sas_discovery *)event_reply->event_data;
+ ioc_info(mrioc, "SAS Discovery: (%s) status (0x%08x)\n",
+ (event_data->reason_code == MPI3_EVENT_SAS_DISC_RC_STARTED) ?
+ "start" : "stop",
+ le32_to_cpu(event_data->discovery_status));
+ return;
+ }
+ case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
+ desc = "SAS Broadcast Primitive";
+ break;
+ case MPI3_EVENT_SAS_NOTIFY_PRIMITIVE:
+ desc = "SAS Notify Primitive";
+ break;
+ case MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
+ desc = "SAS Init Device Status Change";
+ break;
+ case MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW:
+ desc = "SAS Init Table Overflow";
+ break;
+ case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
+ desc = "SAS Topology Change List";
+ break;
+ case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
+ desc = "Enclosure Device Status Change";
+ break;
+ case MPI3_EVENT_ENCL_DEVICE_ADDED:
+ desc = "Enclosure Added";
+ break;
+ case MPI3_EVENT_HARD_RESET_RECEIVED:
+ desc = "Hard Reset Received";
+ break;
+ case MPI3_EVENT_SAS_PHY_COUNTER:
+ desc = "SAS PHY Counter";
+ break;
+ case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
+ desc = "SAS Device Discovery Error";
+ break;
+ case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
+ desc = "PCIE Topology Change List";
+ break;
+ case MPI3_EVENT_PCIE_ENUMERATION:
+ {
+ struct mpi3_event_data_pcie_enumeration *event_data =
+ (struct mpi3_event_data_pcie_enumeration *)event_reply->event_data;
+ ioc_info(mrioc, "PCIE Enumeration: (%s)",
+ (event_data->reason_code ==
+ MPI3_EVENT_PCIE_ENUM_RC_STARTED) ? "start" : "stop");
+ if (event_data->enumeration_status)
+ ioc_info(mrioc, "enumeration_status(0x%08x)\n",
+ le32_to_cpu(event_data->enumeration_status));
+ return;
+ }
+ case MPI3_EVENT_PREPARE_FOR_RESET:
+ desc = "Prepare For Reset";
+ break;
+ }
+
+ if (!desc)
+ return;
+
+ ioc_info(mrioc, "%s\n", desc);
+}
+
+static void mpi3mr_handle_events(struct mpi3mr_ioc *mrioc,
+ struct mpi3_default_reply *def_reply)
+{
+ struct mpi3_event_notification_reply *event_reply =
+ (struct mpi3_event_notification_reply *)def_reply;
+
+ mrioc->change_count = le16_to_cpu(event_reply->ioc_change_count);
+ mpi3mr_print_event_data(mrioc, event_reply);
+ mpi3mr_os_handle_events(mrioc, event_reply);
+}
+
+static struct mpi3mr_drv_cmd *
+mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag,
+ struct mpi3_default_reply *def_reply)
+{
+ u16 idx;
+
+ switch (host_tag) {
+ case MPI3MR_HOSTTAG_INITCMDS:
+ return &mrioc->init_cmds;
+ case MPI3MR_HOSTTAG_CFG_CMDS:
+ return &mrioc->cfg_cmds;
+ case MPI3MR_HOSTTAG_BSG_CMDS:
+ return &mrioc->bsg_cmds;
+ case MPI3MR_HOSTTAG_BLK_TMS:
+ return &mrioc->host_tm_cmds;
+ case MPI3MR_HOSTTAG_PEL_ABORT:
+ return &mrioc->pel_abort_cmd;
+ case MPI3MR_HOSTTAG_PEL_WAIT:
+ return &mrioc->pel_cmds;
+ case MPI3MR_HOSTTAG_TRANSPORT_CMDS:
+ return &mrioc->transport_cmds;
+ case MPI3MR_HOSTTAG_INVALID:
+ if (def_reply && def_reply->function ==
+ MPI3_FUNCTION_EVENT_NOTIFICATION)
+ mpi3mr_handle_events(mrioc, def_reply);
+ return NULL;
+ default:
+ break;
+ }
+ if (host_tag >= MPI3MR_HOSTTAG_DEVRMCMD_MIN &&
+ host_tag <= MPI3MR_HOSTTAG_DEVRMCMD_MAX) {
+ idx = host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
+ return &mrioc->dev_rmhs_cmds[idx];
+ }
+
+ if (host_tag >= MPI3MR_HOSTTAG_EVTACKCMD_MIN &&
+ host_tag <= MPI3MR_HOSTTAG_EVTACKCMD_MAX) {
+ idx = host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
+ return &mrioc->evtack_cmds[idx];
+ }
+
+ return NULL;
+}
+
+static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
+ struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma)
+{
+ u16 reply_desc_type, host_tag = 0;
+ u16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
+ u32 ioc_loginfo = 0;
+ struct mpi3_status_reply_descriptor *status_desc;
+ struct mpi3_address_reply_descriptor *addr_desc;
+ struct mpi3_success_reply_descriptor *success_desc;
+ struct mpi3_default_reply *def_reply = NULL;
+ struct mpi3mr_drv_cmd *cmdptr = NULL;
+ struct mpi3_scsi_io_reply *scsi_reply;
+ u8 *sense_buf = NULL;
+
+ *reply_dma = 0;
+ reply_desc_type = le16_to_cpu(reply_desc->reply_flags) &
+ MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
+ switch (reply_desc_type) {
+ case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
+ status_desc = (struct mpi3_status_reply_descriptor *)reply_desc;
+ host_tag = le16_to_cpu(status_desc->host_tag);
+ ioc_status = le16_to_cpu(status_desc->ioc_status);
+ if (ioc_status &
+ MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
+ ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info);
+ ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
+ break;
+ case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
+ addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc;
+ *reply_dma = le64_to_cpu(addr_desc->reply_frame_address);
+ def_reply = mpi3mr_get_reply_virt_addr(mrioc, *reply_dma);
+ if (!def_reply)
+ goto out;
+ host_tag = le16_to_cpu(def_reply->host_tag);
+ ioc_status = le16_to_cpu(def_reply->ioc_status);
+ if (ioc_status &
+ MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
+ ioc_loginfo = le32_to_cpu(def_reply->ioc_log_info);
+ ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
+ if (def_reply->function == MPI3_FUNCTION_SCSI_IO) {
+ scsi_reply = (struct mpi3_scsi_io_reply *)def_reply;
+ sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc,
+ le64_to_cpu(scsi_reply->sense_data_buffer_address));
+ }
+ break;
+ case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
+ success_desc = (struct mpi3_success_reply_descriptor *)reply_desc;
+ host_tag = le16_to_cpu(success_desc->host_tag);
+ break;
+ default:
+ break;
+ }
+
+ cmdptr = mpi3mr_get_drv_cmd(mrioc, host_tag, def_reply);
+ if (cmdptr) {
+ if (cmdptr->state & MPI3MR_CMD_PENDING) {
+ cmdptr->state |= MPI3MR_CMD_COMPLETE;
+ cmdptr->ioc_loginfo = ioc_loginfo;
+ cmdptr->ioc_status = ioc_status;
+ cmdptr->state &= ~MPI3MR_CMD_PENDING;
+ if (def_reply) {
+ cmdptr->state |= MPI3MR_CMD_REPLY_VALID;
+ memcpy((u8 *)cmdptr->reply, (u8 *)def_reply,
+ mrioc->reply_sz);
+ }
+ if (sense_buf && cmdptr->sensebuf) {
+ cmdptr->is_sense = 1;
+ memcpy(cmdptr->sensebuf, sense_buf,
+ MPI3MR_SENSE_BUF_SZ);
+ }
+ if (cmdptr->is_waiting) {
+ complete(&cmdptr->done);
+ cmdptr->is_waiting = 0;
+ } else if (cmdptr->callback)
+ cmdptr->callback(mrioc, cmdptr);
+ }
+ }
+out:
+ if (sense_buf)
+ mpi3mr_repost_sense_buf(mrioc,
+ le64_to_cpu(scsi_reply->sense_data_buffer_address));
+}
+
+int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
+{
+ u32 exp_phase = mrioc->admin_reply_ephase;
+ u32 admin_reply_ci = mrioc->admin_reply_ci;
+ u32 num_admin_replies = 0;
+ u64 reply_dma = 0;
+ struct mpi3_default_reply_descriptor *reply_desc;
+
+ if (!atomic_add_unless(&mrioc->admin_reply_q_in_use, 1, 1))
+ return 0;
+
+ reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
+ admin_reply_ci;
+
+ if ((le16_to_cpu(reply_desc->reply_flags) &
+ MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
+ atomic_dec(&mrioc->admin_reply_q_in_use);
+ return 0;
+ }
+
+ do {
+ if (mrioc->unrecoverable)
+ break;
+
+ mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci);
+ mpi3mr_process_admin_reply_desc(mrioc, reply_desc, &reply_dma);
+ if (reply_dma)
+ mpi3mr_repost_reply_buf(mrioc, reply_dma);
+ num_admin_replies++;
+ if (++admin_reply_ci == mrioc->num_admin_replies) {
+ admin_reply_ci = 0;
+ exp_phase ^= 1;
+ }
+ reply_desc =
+ (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
+ admin_reply_ci;
+ if ((le16_to_cpu(reply_desc->reply_flags) &
+ MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
+ break;
+ } while (1);
+
+ writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
+ mrioc->admin_reply_ci = admin_reply_ci;
+ mrioc->admin_reply_ephase = exp_phase;
+ atomic_dec(&mrioc->admin_reply_q_in_use);
+
+ return num_admin_replies;
+}
+
+/**
+ * mpi3mr_get_reply_desc - get reply descriptor frame corresponding to
+ * queue's consumer index from operational reply descriptor queue.
+ * @op_reply_q: op_reply_qinfo object
+ * @reply_ci: operational reply descriptor's queue consumer index
+ *
+ * Returns reply descriptor frame address
+ */
+static inline struct mpi3_default_reply_descriptor *
+mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci)
+{
+ void *segment_base_addr;
+ struct segments *segments = op_reply_q->q_segments;
+ struct mpi3_default_reply_descriptor *reply_desc = NULL;
+
+ segment_base_addr =
+ segments[reply_ci / op_reply_q->segment_qd].segment;
+ reply_desc = (struct mpi3_default_reply_descriptor *)segment_base_addr +
+ (reply_ci % op_reply_q->segment_qd);
+ return reply_desc;
+}
+
+/**
+ * mpi3mr_process_op_reply_q - Operational reply queue handler
+ * @mrioc: Adapter instance reference
+ * @op_reply_q: Operational reply queue info
+ *
+ * Checks the specific operational reply queue and drains the
+ * reply queue entries until the queue is empty and process the
+ * individual reply descriptors.
+ *
+ * Return: 0 if queue is already processed,or number of reply
+ * descriptors processed.
+ */
+int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
+ struct op_reply_qinfo *op_reply_q)
+{
+ struct op_req_qinfo *op_req_q;
+ u32 exp_phase;
+ u32 reply_ci;
+ u32 num_op_reply = 0;
+ u64 reply_dma = 0;
+ struct mpi3_default_reply_descriptor *reply_desc;
+ u16 req_q_idx = 0, reply_qidx;
+
+ reply_qidx = op_reply_q->qid - 1;
+
+ if (!atomic_add_unless(&op_reply_q->in_use, 1, 1))
+ return 0;
+
+ exp_phase = op_reply_q->ephase;
+ reply_ci = op_reply_q->ci;
+
+ reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
+ if ((le16_to_cpu(reply_desc->reply_flags) &
+ MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
+ atomic_dec(&op_reply_q->in_use);
+ return 0;
+ }
+
+ do {
+ if (mrioc->unrecoverable)
+ break;
+
+ req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1;
+ op_req_q = &mrioc->req_qinfo[req_q_idx];
+
+ WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci));
+ mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma,
+ reply_qidx);
+ atomic_dec(&op_reply_q->pend_ios);
+ if (reply_dma)
+ mpi3mr_repost_reply_buf(mrioc, reply_dma);
+ num_op_reply++;
+
+ if (++reply_ci == op_reply_q->num_replies) {
+ reply_ci = 0;
+ exp_phase ^= 1;
+ }
+
+ reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
+
+ if ((le16_to_cpu(reply_desc->reply_flags) &
+ MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
+ break;
+#ifndef CONFIG_PREEMPT_RT
+ /*
+ * Exit completion loop to avoid CPU lockup
+ * Ensure remaining completion happens from threaded ISR.
+ */
+ if (num_op_reply > mrioc->max_host_ios) {
+ op_reply_q->enable_irq_poll = true;
+ break;
+ }
+#endif
+ } while (1);
+
+ writel(reply_ci,
+ &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index);
+ op_reply_q->ci = reply_ci;
+ op_reply_q->ephase = exp_phase;
+
+ atomic_dec(&op_reply_q->in_use);
+ return num_op_reply;
+}
+
+/**
+ * mpi3mr_blk_mq_poll - Operational reply queue handler
+ * @shost: SCSI Host reference
+ * @queue_num: Request queue number (w.r.t OS it is hardware context number)
+ *
+ * Checks the specific operational reply queue and drains the
+ * reply queue entries until the queue is empty and process the
+ * individual reply descriptors.
+ *
+ * Return: 0 if queue is already processed,or number of reply
+ * descriptors processed.
+ */
+int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
+{
+ int num_entries = 0;
+ struct mpi3mr_ioc *mrioc;
+
+ mrioc = (struct mpi3mr_ioc *)shost->hostdata;
+
+ if ((mrioc->reset_in_progress || mrioc->prepare_for_reset ||
+ mrioc->unrecoverable))
+ return 0;
+
+ num_entries = mpi3mr_process_op_reply_q(mrioc,
+ &mrioc->op_reply_qinfo[queue_num]);
+
+ return num_entries;
+}
+
+static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata)
+{
+ struct mpi3mr_intr_info *intr_info = privdata;
+ struct mpi3mr_ioc *mrioc;
+ u16 midx;
+ u32 num_admin_replies = 0, num_op_reply = 0;
+
+ if (!intr_info)
+ return IRQ_NONE;
+
+ mrioc = intr_info->mrioc;
+
+ if (!mrioc->intr_enabled)
+ return IRQ_NONE;
+
+ midx = intr_info->msix_index;
+
+ if (!midx)
+ num_admin_replies = mpi3mr_process_admin_reply_q(mrioc);
+ if (intr_info->op_reply_q)
+ num_op_reply = mpi3mr_process_op_reply_q(mrioc,
+ intr_info->op_reply_q);
+
+ if (num_admin_replies || num_op_reply)
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
+}
+
+#ifndef CONFIG_PREEMPT_RT
+
+static irqreturn_t mpi3mr_isr(int irq, void *privdata)
+{
+ struct mpi3mr_intr_info *intr_info = privdata;
+ int ret;
+
+ if (!intr_info)
+ return IRQ_NONE;
+
+ /* Call primary ISR routine */
+ ret = mpi3mr_isr_primary(irq, privdata);
+
+ /*
+ * If more IOs are expected, schedule IRQ polling thread.
+ * Otherwise exit from ISR.
+ */
+ if (!intr_info->op_reply_q)
+ return ret;
+
+ if (!intr_info->op_reply_q->enable_irq_poll ||
+ !atomic_read(&intr_info->op_reply_q->pend_ios))
+ return ret;
+
+ disable_irq_nosync(intr_info->os_irq);
+
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * mpi3mr_isr_poll - Reply queue polling routine
+ * @irq: IRQ
+ * @privdata: Interrupt info
+ *
+ * poll for pending I/O completions in a loop until pending I/Os
+ * present or controller queue depth I/Os are processed.
+ *
+ * Return: IRQ_NONE or IRQ_HANDLED
+ */
+static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata)
+{
+ struct mpi3mr_intr_info *intr_info = privdata;
+ struct mpi3mr_ioc *mrioc;
+ u16 midx;
+ u32 num_op_reply = 0;
+
+ if (!intr_info || !intr_info->op_reply_q)
+ return IRQ_NONE;
+
+ mrioc = intr_info->mrioc;
+ midx = intr_info->msix_index;
+
+ /* Poll for pending IOs completions */
+ do {
+ if (!mrioc->intr_enabled || mrioc->unrecoverable)
+ break;
+
+ if (!midx)
+ mpi3mr_process_admin_reply_q(mrioc);
+ if (intr_info->op_reply_q)
+ num_op_reply +=
+ mpi3mr_process_op_reply_q(mrioc,
+ intr_info->op_reply_q);
+
+ usleep_range(MPI3MR_IRQ_POLL_SLEEP, 10 * MPI3MR_IRQ_POLL_SLEEP);
+
+ } while (atomic_read(&intr_info->op_reply_q->pend_ios) &&
+ (num_op_reply < mrioc->max_host_ios));
+
+ intr_info->op_reply_q->enable_irq_poll = false;
+ enable_irq(intr_info->os_irq);
+
+ return IRQ_HANDLED;
+}
+
+#endif
+
+/**
+ * mpi3mr_request_irq - Request IRQ and register ISR
+ * @mrioc: Adapter instance reference
+ * @index: IRQ vector index
+ *
+ * Request threaded ISR with primary ISR and secondary
+ *
+ * Return: 0 on success and non zero on failures.
+ */
+static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index)
+{
+ struct pci_dev *pdev = mrioc->pdev;
+ struct mpi3mr_intr_info *intr_info = mrioc->intr_info + index;
+ int retval = 0;
+
+ intr_info->mrioc = mrioc;
+ intr_info->msix_index = index;
+ intr_info->op_reply_q = NULL;
+
+ snprintf(intr_info->name, MPI3MR_NAME_LENGTH, "%s%d-msix%d",
+ mrioc->driver_name, mrioc->id, index);
+
+#ifndef CONFIG_PREEMPT_RT
+ retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr,
+ mpi3mr_isr_poll, IRQF_SHARED, intr_info->name, intr_info);
+#else
+ retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr_primary,
+ NULL, IRQF_SHARED, intr_info->name, intr_info);
+#endif
+ if (retval) {
+ ioc_err(mrioc, "%s: Unable to allocate interrupt %d!\n",
+ intr_info->name, pci_irq_vector(pdev, index));
+ return retval;
+ }
+
+ intr_info->os_irq = pci_irq_vector(pdev, index);
+ return retval;
+}
+
+static void mpi3mr_calc_poll_queues(struct mpi3mr_ioc *mrioc, u16 max_vectors)
+{
+ if (!mrioc->requested_poll_qcount)
+ return;
+
+ /* Reserved for Admin and Default Queue */
+ if (max_vectors > 2 &&
+ (mrioc->requested_poll_qcount < max_vectors - 2)) {
+ ioc_info(mrioc,
+ "enabled polled queues (%d) msix (%d)\n",
+ mrioc->requested_poll_qcount, max_vectors);
+ } else {
+ ioc_info(mrioc,
+ "disabled polled queues (%d) msix (%d) because of no resources for default queue\n",
+ mrioc->requested_poll_qcount, max_vectors);
+ mrioc->requested_poll_qcount = 0;
+ }
+}
+
+/**
+ * mpi3mr_setup_isr - Setup ISR for the controller
+ * @mrioc: Adapter instance reference
+ * @setup_one: Request one IRQ or more
+ *
+ * Allocate IRQ vectors and call mpi3mr_request_irq to setup ISR
+ *
+ * Return: 0 on success and non zero on failures.
+ */
+static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one)
+{
+ unsigned int irq_flags = PCI_IRQ_MSIX;
+ int max_vectors, min_vec;
+ int retval;
+ int i;
+ struct irq_affinity desc = { .pre_vectors = 1, .post_vectors = 1 };
+
+ if (mrioc->is_intr_info_set)
+ return 0;
+
+ mpi3mr_cleanup_isr(mrioc);
+
+ if (setup_one || reset_devices) {
+ max_vectors = 1;
+ retval = pci_alloc_irq_vectors(mrioc->pdev,
+ 1, max_vectors, irq_flags);
+ if (retval < 0) {
+ ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n",
+ retval);
+ goto out_failed;
+ }
+ } else {
+ max_vectors =
+ min_t(int, mrioc->cpu_count + 1 +
+ mrioc->requested_poll_qcount, mrioc->msix_count);
+
+ mpi3mr_calc_poll_queues(mrioc, max_vectors);
+
+ ioc_info(mrioc,
+ "MSI-X vectors supported: %d, no of cores: %d,",
+ mrioc->msix_count, mrioc->cpu_count);
+ ioc_info(mrioc,
+ "MSI-x vectors requested: %d poll_queues %d\n",
+ max_vectors, mrioc->requested_poll_qcount);
+
+ desc.post_vectors = mrioc->requested_poll_qcount;
+ min_vec = desc.pre_vectors + desc.post_vectors;
+ irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
+
+ retval = pci_alloc_irq_vectors_affinity(mrioc->pdev,
+ min_vec, max_vectors, irq_flags, &desc);
+
+ if (retval < 0) {
+ ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n",
+ retval);
+ goto out_failed;
+ }
+
+
+ /*
+ * If only one MSI-x is allocated, then MSI-x 0 will be shared
+ * between Admin queue and operational queue
+ */
+ if (retval == min_vec)
+ mrioc->op_reply_q_offset = 0;
+ else if (retval != (max_vectors)) {
+ ioc_info(mrioc,
+ "allocated vectors (%d) are less than configured (%d)\n",
+ retval, max_vectors);
+ }
+
+ max_vectors = retval;
+ mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0;
+
+ mpi3mr_calc_poll_queues(mrioc, max_vectors);
+
+ }
+
+ mrioc->intr_info = kzalloc(sizeof(struct mpi3mr_intr_info) * max_vectors,
+ GFP_KERNEL);
+ if (!mrioc->intr_info) {
+ retval = -ENOMEM;
+ pci_free_irq_vectors(mrioc->pdev);
+ goto out_failed;
+ }
+ for (i = 0; i < max_vectors; i++) {
+ retval = mpi3mr_request_irq(mrioc, i);
+ if (retval) {
+ mrioc->intr_info_count = i;
+ goto out_failed;
+ }
+ }
+ if (reset_devices || !setup_one)
+ mrioc->is_intr_info_set = true;
+ mrioc->intr_info_count = max_vectors;
+ mpi3mr_ioc_enable_intr(mrioc);
+ return 0;
+
+out_failed:
+ mpi3mr_cleanup_isr(mrioc);
+
+ return retval;
+}
+
+static const struct {
+ enum mpi3mr_iocstate value;
+ char *name;
+} mrioc_states[] = {
+ { MRIOC_STATE_READY, "ready" },
+ { MRIOC_STATE_FAULT, "fault" },
+ { MRIOC_STATE_RESET, "reset" },
+ { MRIOC_STATE_BECOMING_READY, "becoming ready" },
+ { MRIOC_STATE_RESET_REQUESTED, "reset requested" },
+ { MRIOC_STATE_UNRECOVERABLE, "unrecoverable error" },
+};
+
+static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state)
+{
+ int i;
+ char *name = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(mrioc_states); i++) {
+ if (mrioc_states[i].value == mrioc_state) {
+ name = mrioc_states[i].name;
+ break;
+ }
+ }
+ return name;
+}
+
+/* Reset reason to name mapper structure*/
+static const struct {
+ enum mpi3mr_reset_reason value;
+ char *name;
+} mpi3mr_reset_reason_codes[] = {
+ { MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" },
+ { MPI3MR_RESET_FROM_FAULT_WATCH, "fault" },
+ { MPI3MR_RESET_FROM_APP, "application invocation" },
+ { MPI3MR_RESET_FROM_EH_HOS, "error handling" },
+ { MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" },
+ { MPI3MR_RESET_FROM_APP_TIMEOUT, "application command timeout" },
+ { MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" },
+ { MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" },
+ { MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" },
+ { MPI3MR_RESET_FROM_PE_TIMEOUT, "port enable timeout" },
+ { MPI3MR_RESET_FROM_TSU_TIMEOUT, "time stamp update timeout" },
+ { MPI3MR_RESET_FROM_DELREQQ_TIMEOUT, "delete request queue timeout" },
+ { MPI3MR_RESET_FROM_DELREPQ_TIMEOUT, "delete reply queue timeout" },
+ {
+ MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT,
+ "create request queue timeout"
+ },
+ {
+ MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT,
+ "create reply queue timeout"
+ },
+ { MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT, "IOC facts timeout" },
+ { MPI3MR_RESET_FROM_IOCINIT_TIMEOUT, "IOC init timeout" },
+ { MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT, "event notify timeout" },
+ { MPI3MR_RESET_FROM_EVTACK_TIMEOUT, "event acknowledgment timeout" },
+ {
+ MPI3MR_RESET_FROM_CIACTVRST_TIMER,
+ "component image activation timeout"
+ },
+ {
+ MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT,
+ "get package version timeout"
+ },
+ { MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" },
+ { MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" },
+ { MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronous reset" },
+ { MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT, "configuration request timeout"},
+ { MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT, "timeout of a SAS transport layer request" },
+};
+
+/**
+ * mpi3mr_reset_rc_name - get reset reason code name
+ * @reason_code: reset reason code value
+ *
+ * Map reset reason to an NULL terminated ASCII string
+ *
+ * Return: name corresponding to reset reason value or NULL.
+ */
+static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code)
+{
+ int i;
+ char *name = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_reason_codes); i++) {
+ if (mpi3mr_reset_reason_codes[i].value == reason_code) {
+ name = mpi3mr_reset_reason_codes[i].name;
+ break;
+ }
+ }
+ return name;
+}
+
+/* Reset type to name mapper structure*/
+static const struct {
+ u16 reset_type;
+ char *name;
+} mpi3mr_reset_types[] = {
+ { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, "soft" },
+ { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, "diag fault" },
+};
+
+/**
+ * mpi3mr_reset_type_name - get reset type name
+ * @reset_type: reset type value
+ *
+ * Map reset type to an NULL terminated ASCII string
+ *
+ * Return: name corresponding to reset type value or NULL.
+ */
+static const char *mpi3mr_reset_type_name(u16 reset_type)
+{
+ int i;
+ char *name = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_types); i++) {
+ if (mpi3mr_reset_types[i].reset_type == reset_type) {
+ name = mpi3mr_reset_types[i].name;
+ break;
+ }
+ }
+ return name;
+}
+
+/**
+ * mpi3mr_print_fault_info - Display fault information
+ * @mrioc: Adapter instance reference
+ *
+ * Display the controller fault information if there is a
+ * controller fault.
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc)
+{
+ u32 ioc_status, code, code1, code2, code3;
+
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+
+ if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
+ code = readl(&mrioc->sysif_regs->fault);
+ code1 = readl(&mrioc->sysif_regs->fault_info[0]);
+ code2 = readl(&mrioc->sysif_regs->fault_info[1]);
+ code3 = readl(&mrioc->sysif_regs->fault_info[2]);
+
+ ioc_info(mrioc,
+ "fault code(0x%08X): Additional code: (0x%08X:0x%08X:0x%08X)\n",
+ code, code1, code2, code3);
+ }
+}
+
+/**
+ * mpi3mr_get_iocstate - Get IOC State
+ * @mrioc: Adapter instance reference
+ *
+ * Return a proper IOC state enum based on the IOC status and
+ * IOC configuration and unrcoverable state of the controller.
+ *
+ * Return: Current IOC state.
+ */
+enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc)
+{
+ u32 ioc_status, ioc_config;
+ u8 ready, enabled;
+
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+
+ if (mrioc->unrecoverable)
+ return MRIOC_STATE_UNRECOVERABLE;
+ if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)
+ return MRIOC_STATE_FAULT;
+
+ ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY);
+ enabled = (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC);
+
+ if (ready && enabled)
+ return MRIOC_STATE_READY;
+ if ((!ready) && (!enabled))
+ return MRIOC_STATE_RESET;
+ if ((!ready) && (enabled))
+ return MRIOC_STATE_BECOMING_READY;
+
+ return MRIOC_STATE_RESET_REQUESTED;
+}
+
+/**
+ * mpi3mr_clear_reset_history - clear reset history
+ * @mrioc: Adapter instance reference
+ *
+ * Write the reset history bit in IOC status to clear the bit,
+ * if it is already set.
+ *
+ * Return: Nothing.
+ */
+static inline void mpi3mr_clear_reset_history(struct mpi3mr_ioc *mrioc)
+{
+ u32 ioc_status;
+
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
+ writel(ioc_status, &mrioc->sysif_regs->ioc_status);
+}
+
+/**
+ * mpi3mr_issue_and_process_mur - Message unit Reset handler
+ * @mrioc: Adapter instance reference
+ * @reset_reason: Reset reason code
+ *
+ * Issue Message unit Reset to the controller and wait for it to
+ * be complete.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
+ u32 reset_reason)
+{
+ u32 ioc_config, timeout, ioc_status;
+ int retval = -1;
+
+ ioc_info(mrioc, "Issuing Message unit Reset(MUR)\n");
+ if (mrioc->unrecoverable) {
+ ioc_info(mrioc, "IOC is unrecoverable MUR not issued\n");
+ return retval;
+ }
+ mpi3mr_clear_reset_history(mrioc);
+ writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]);
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
+ writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
+
+ timeout = MPI3MR_RESET_ACK_TIMEOUT * 10;
+ do {
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) {
+ mpi3mr_clear_reset_history(mrioc);
+ break;
+ }
+ if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
+ mpi3mr_print_fault_info(mrioc);
+ break;
+ }
+ msleep(100);
+ } while (--timeout);
+
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ if (timeout && !((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
+ (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
+ (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
+ retval = 0;
+
+ ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%x)/(0x%x)\n",
+ (!retval) ? "successful" : "failed", ioc_status, ioc_config);
+ return retval;
+}
+
+/**
+ * mpi3mr_revalidate_factsdata - validate IOCFacts parameters
+ * during reset/resume
+ * @mrioc: Adapter instance reference
+ *
+ * Return zero if the new IOCFacts parameters value is compatible with
+ * older values else return -EPERM
+ */
+static int
+mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
+{
+ void *removepend_bitmap;
+
+ if (mrioc->facts.reply_sz > mrioc->reply_sz) {
+ ioc_err(mrioc,
+ "cannot increase reply size from %d to %d\n",
+ mrioc->reply_sz, mrioc->facts.reply_sz);
+ return -EPERM;
+ }
+
+ if (mrioc->facts.max_op_reply_q < mrioc->num_op_reply_q) {
+ ioc_err(mrioc,
+ "cannot reduce number of operational reply queues from %d to %d\n",
+ mrioc->num_op_reply_q,
+ mrioc->facts.max_op_reply_q);
+ return -EPERM;
+ }
+
+ if (mrioc->facts.max_op_req_q < mrioc->num_op_req_q) {
+ ioc_err(mrioc,
+ "cannot reduce number of operational request queues from %d to %d\n",
+ mrioc->num_op_req_q, mrioc->facts.max_op_req_q);
+ return -EPERM;
+ }
+
+ if ((mrioc->sas_transport_enabled) && (mrioc->facts.ioc_capabilities &
+ MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED))
+ ioc_err(mrioc,
+ "critical error: multipath capability is enabled at the\n"
+ "\tcontroller while sas transport support is enabled at the\n"
+ "\tdriver, please reboot the system or reload the driver\n");
+
+ if (mrioc->facts.max_devhandle > mrioc->dev_handle_bitmap_bits) {
+ removepend_bitmap = bitmap_zalloc(mrioc->facts.max_devhandle,
+ GFP_KERNEL);
+ if (!removepend_bitmap) {
+ ioc_err(mrioc,
+ "failed to increase removepend_bitmap bits from %d to %d\n",
+ mrioc->dev_handle_bitmap_bits,
+ mrioc->facts.max_devhandle);
+ return -EPERM;
+ }
+ bitmap_free(mrioc->removepend_bitmap);
+ mrioc->removepend_bitmap = removepend_bitmap;
+ ioc_info(mrioc,
+ "increased bits of dev_handle_bitmap from %d to %d\n",
+ mrioc->dev_handle_bitmap_bits,
+ mrioc->facts.max_devhandle);
+ mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle;
+ }
+
+ return 0;
+}
+
+/**
+ * mpi3mr_bring_ioc_ready - Bring controller to ready state
+ * @mrioc: Adapter instance reference
+ *
+ * Set Enable IOC bit in IOC configuration register and wait for
+ * the controller to become ready.
+ *
+ * Return: 0 on success, appropriate error on failure.
+ */
+static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
+{
+ u32 ioc_config, ioc_status, timeout, host_diagnostic;
+ int retval = 0;
+ enum mpi3mr_iocstate ioc_state;
+ u64 base_info;
+
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information);
+ ioc_info(mrioc, "ioc_status(0x%08x), ioc_config(0x%08x), ioc_info(0x%016llx) at the bringup\n",
+ ioc_status, ioc_config, base_info);
+
+ /*The timeout value is in 2sec unit, changing it to seconds*/
+ mrioc->ready_timeout =
+ ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >>
+ MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2;
+
+ ioc_info(mrioc, "ready timeout: %d seconds\n", mrioc->ready_timeout);
+
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ ioc_info(mrioc, "controller is in %s state during detection\n",
+ mpi3mr_iocstate_name(ioc_state));
+
+ if (ioc_state == MRIOC_STATE_BECOMING_READY ||
+ ioc_state == MRIOC_STATE_RESET_REQUESTED) {
+ timeout = mrioc->ready_timeout * 10;
+ do {
+ msleep(100);
+ } while (--timeout);
+
+ if (!pci_device_is_present(mrioc->pdev)) {
+ mrioc->unrecoverable = 1;
+ ioc_err(mrioc,
+ "controller is not present while waiting to reset\n");
+ retval = -1;
+ goto out_device_not_present;
+ }
+
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ ioc_info(mrioc,
+ "controller is in %s state after waiting to reset\n",
+ mpi3mr_iocstate_name(ioc_state));
+ }
+
+ if (ioc_state == MRIOC_STATE_READY) {
+ ioc_info(mrioc, "issuing message unit reset (MUR) to bring to reset state\n");
+ retval = mpi3mr_issue_and_process_mur(mrioc,
+ MPI3MR_RESET_FROM_BRINGUP);
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ if (retval)
+ ioc_err(mrioc,
+ "message unit reset failed with error %d current state %s\n",
+ retval, mpi3mr_iocstate_name(ioc_state));
+ }
+ if (ioc_state != MRIOC_STATE_RESET) {
+ if (ioc_state == MRIOC_STATE_FAULT) {
+ timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
+ mpi3mr_print_fault_info(mrioc);
+ do {
+ host_diagnostic =
+ readl(&mrioc->sysif_regs->host_diagnostic);
+ if (!(host_diagnostic &
+ MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
+ break;
+ if (!pci_device_is_present(mrioc->pdev)) {
+ mrioc->unrecoverable = 1;
+ ioc_err(mrioc, "controller is not present at the bringup\n");
+ goto out_device_not_present;
+ }
+ msleep(100);
+ } while (--timeout);
+ }
+ mpi3mr_print_fault_info(mrioc);
+ ioc_info(mrioc, "issuing soft reset to bring to reset state\n");
+ retval = mpi3mr_issue_reset(mrioc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
+ MPI3MR_RESET_FROM_BRINGUP);
+ if (retval) {
+ ioc_err(mrioc,
+ "soft reset failed with error %d\n", retval);
+ goto out_failed;
+ }
+ }
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ if (ioc_state != MRIOC_STATE_RESET) {
+ ioc_err(mrioc,
+ "cannot bring controller to reset state, current state: %s\n",
+ mpi3mr_iocstate_name(ioc_state));
+ goto out_failed;
+ }
+ mpi3mr_clear_reset_history(mrioc);
+ retval = mpi3mr_setup_admin_qpair(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "failed to setup admin queues: error %d\n",
+ retval);
+ goto out_failed;
+ }
+
+ ioc_info(mrioc, "bringing controller to ready state\n");
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
+ writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
+
+ timeout = mrioc->ready_timeout * 10;
+ do {
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ if (ioc_state == MRIOC_STATE_READY) {
+ ioc_info(mrioc,
+ "successfully transitioned to %s state\n",
+ mpi3mr_iocstate_name(ioc_state));
+ return 0;
+ }
+ if (!pci_device_is_present(mrioc->pdev)) {
+ mrioc->unrecoverable = 1;
+ ioc_err(mrioc,
+ "controller is not present at the bringup\n");
+ retval = -1;
+ goto out_device_not_present;
+ }
+ msleep(100);
+ } while (--timeout);
+
+out_failed:
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ ioc_err(mrioc,
+ "failed to bring to ready state, current state: %s\n",
+ mpi3mr_iocstate_name(ioc_state));
+out_device_not_present:
+ return retval;
+}
+
+/**
+ * mpi3mr_soft_reset_success - Check softreset is success or not
+ * @ioc_status: IOC status register value
+ * @ioc_config: IOC config register value
+ *
+ * Check whether the soft reset is successful or not based on
+ * IOC status and IOC config register values.
+ *
+ * Return: True when the soft reset is success, false otherwise.
+ */
+static inline bool
+mpi3mr_soft_reset_success(u32 ioc_status, u32 ioc_config)
+{
+ if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
+ (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
+ return true;
+ return false;
+}
+
+/**
+ * mpi3mr_diagfault_success - Check diag fault is success or not
+ * @mrioc: Adapter reference
+ * @ioc_status: IOC status register value
+ *
+ * Check whether the controller hit diag reset fault code.
+ *
+ * Return: True when there is diag fault, false otherwise.
+ */
+static inline bool mpi3mr_diagfault_success(struct mpi3mr_ioc *mrioc,
+ u32 ioc_status)
+{
+ u32 fault;
+
+ if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT))
+ return false;
+ fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
+ if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) {
+ mpi3mr_print_fault_info(mrioc);
+ return true;
+ }
+ return false;
+}
+
+/**
+ * mpi3mr_set_diagsave - Set diag save bit for snapdump
+ * @mrioc: Adapter reference
+ *
+ * Set diag save bit in IOC configuration register to enable
+ * snapdump.
+ *
+ * Return: Nothing.
+ */
+static inline void mpi3mr_set_diagsave(struct mpi3mr_ioc *mrioc)
+{
+ u32 ioc_config;
+
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE;
+ writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
+}
+
+/**
+ * mpi3mr_issue_reset - Issue reset to the controller
+ * @mrioc: Adapter reference
+ * @reset_type: Reset type
+ * @reset_reason: Reset reason code
+ *
+ * Unlock the host diagnostic registers and write the specific
+ * reset type to that, wait for reset acknowledgment from the
+ * controller, if the reset is not successful retry for the
+ * predefined number of times.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type,
+ u32 reset_reason)
+{
+ int retval = -1;
+ u8 unlock_retry_count = 0;
+ u32 host_diagnostic, ioc_status, ioc_config;
+ u32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10;
+
+ if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) &&
+ (reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT))
+ return retval;
+ if (mrioc->unrecoverable)
+ return retval;
+ if (reset_reason == MPI3MR_RESET_FROM_FIRMWARE) {
+ retval = 0;
+ return retval;
+ }
+
+ ioc_info(mrioc, "%s reset due to %s(0x%x)\n",
+ mpi3mr_reset_type_name(reset_type),
+ mpi3mr_reset_rc_name(reset_reason), reset_reason);
+
+ mpi3mr_clear_reset_history(mrioc);
+ do {
+ ioc_info(mrioc,
+ "Write magic sequence to unlock host diag register (retry=%d)\n",
+ ++unlock_retry_count);
+ if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) {
+ ioc_err(mrioc,
+ "%s reset failed due to unlock failure, host_diagnostic(0x%08x)\n",
+ mpi3mr_reset_type_name(reset_type),
+ host_diagnostic);
+ mrioc->unrecoverable = 1;
+ return retval;
+ }
+
+ writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH,
+ &mrioc->sysif_regs->write_sequence);
+ writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST,
+ &mrioc->sysif_regs->write_sequence);
+ writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
+ &mrioc->sysif_regs->write_sequence);
+ writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD,
+ &mrioc->sysif_regs->write_sequence);
+ writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH,
+ &mrioc->sysif_regs->write_sequence);
+ writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH,
+ &mrioc->sysif_regs->write_sequence);
+ writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH,
+ &mrioc->sysif_regs->write_sequence);
+ usleep_range(1000, 1100);
+ host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
+ ioc_info(mrioc,
+ "wrote magic sequence: retry_count(%d), host_diagnostic(0x%08x)\n",
+ unlock_retry_count, host_diagnostic);
+ } while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE));
+
+ writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]);
+ writel(host_diagnostic | reset_type,
+ &mrioc->sysif_regs->host_diagnostic);
+ switch (reset_type) {
+ case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET:
+ do {
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ ioc_config =
+ readl(&mrioc->sysif_regs->ioc_configuration);
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
+ && mpi3mr_soft_reset_success(ioc_status, ioc_config)
+ ) {
+ mpi3mr_clear_reset_history(mrioc);
+ retval = 0;
+ break;
+ }
+ msleep(100);
+ } while (--timeout);
+ mpi3mr_print_fault_info(mrioc);
+ break;
+ case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT:
+ do {
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ if (mpi3mr_diagfault_success(mrioc, ioc_status)) {
+ retval = 0;
+ break;
+ }
+ msleep(100);
+ } while (--timeout);
+ break;
+ default:
+ break;
+ }
+
+ writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
+ &mrioc->sysif_regs->write_sequence);
+
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ ioc_info(mrioc,
+ "ioc_status/ioc_onfig after %s reset is (0x%x)/(0x%x)\n",
+ (!retval)?"successful":"failed", ioc_status,
+ ioc_config);
+ if (retval)
+ mrioc->unrecoverable = 1;
+ return retval;
+}
+
+/**
+ * mpi3mr_admin_request_post - Post request to admin queue
+ * @mrioc: Adapter reference
+ * @admin_req: MPI3 request
+ * @admin_req_sz: Request size
+ * @ignore_reset: Ignore reset in process
+ *
+ * Post the MPI3 request into admin request queue and
+ * inform the controller, if the queue is full return
+ * appropriate error.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req,
+ u16 admin_req_sz, u8 ignore_reset)
+{
+ u16 areq_pi = 0, areq_ci = 0, max_entries = 0;
+ int retval = 0;
+ unsigned long flags;
+ u8 *areq_entry;
+
+ if (mrioc->unrecoverable) {
+ ioc_err(mrioc, "%s : Unrecoverable controller\n", __func__);
+ return -EFAULT;
+ }
+
+ spin_lock_irqsave(&mrioc->admin_req_lock, flags);
+ areq_pi = mrioc->admin_req_pi;
+ areq_ci = mrioc->admin_req_ci;
+ max_entries = mrioc->num_admin_req;
+ if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) &&
+ (areq_pi == (max_entries - 1)))) {
+ ioc_err(mrioc, "AdminReqQ full condition detected\n");
+ retval = -EAGAIN;
+ goto out;
+ }
+ if (!ignore_reset && mrioc->reset_in_progress) {
+ ioc_err(mrioc, "AdminReqQ submit reset in progress\n");
+ retval = -EAGAIN;
+ goto out;
+ }
+ areq_entry = (u8 *)mrioc->admin_req_base +
+ (areq_pi * MPI3MR_ADMIN_REQ_FRAME_SZ);
+ memset(areq_entry, 0, MPI3MR_ADMIN_REQ_FRAME_SZ);
+ memcpy(areq_entry, (u8 *)admin_req, admin_req_sz);
+
+ if (++areq_pi == max_entries)
+ areq_pi = 0;
+ mrioc->admin_req_pi = areq_pi;
+
+ writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
+
+out:
+ spin_unlock_irqrestore(&mrioc->admin_req_lock, flags);
+
+ return retval;
+}
+
+/**
+ * mpi3mr_free_op_req_q_segments - free request memory segments
+ * @mrioc: Adapter instance reference
+ * @q_idx: operational request queue index
+ *
+ * Free memory segments allocated for operational request queue
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
+{
+ u16 j;
+ int size;
+ struct segments *segments;
+
+ segments = mrioc->req_qinfo[q_idx].q_segments;
+ if (!segments)
+ return;
+
+ if (mrioc->enable_segqueue) {
+ size = MPI3MR_OP_REQ_Q_SEG_SIZE;
+ if (mrioc->req_qinfo[q_idx].q_segment_list) {
+ dma_free_coherent(&mrioc->pdev->dev,
+ MPI3MR_MAX_SEG_LIST_SIZE,
+ mrioc->req_qinfo[q_idx].q_segment_list,
+ mrioc->req_qinfo[q_idx].q_segment_list_dma);
+ mrioc->req_qinfo[q_idx].q_segment_list = NULL;
+ }
+ } else
+ size = mrioc->req_qinfo[q_idx].segment_qd *
+ mrioc->facts.op_req_sz;
+
+ for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) {
+ if (!segments[j].segment)
+ continue;
+ dma_free_coherent(&mrioc->pdev->dev,
+ size, segments[j].segment, segments[j].segment_dma);
+ segments[j].segment = NULL;
+ }
+ kfree(mrioc->req_qinfo[q_idx].q_segments);
+ mrioc->req_qinfo[q_idx].q_segments = NULL;
+ mrioc->req_qinfo[q_idx].qid = 0;
+}
+
+/**
+ * mpi3mr_free_op_reply_q_segments - free reply memory segments
+ * @mrioc: Adapter instance reference
+ * @q_idx: operational reply queue index
+ *
+ * Free memory segments allocated for operational reply queue
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
+{
+ u16 j;
+ int size;
+ struct segments *segments;
+
+ segments = mrioc->op_reply_qinfo[q_idx].q_segments;
+ if (!segments)
+ return;
+
+ if (mrioc->enable_segqueue) {
+ size = MPI3MR_OP_REP_Q_SEG_SIZE;
+ if (mrioc->op_reply_qinfo[q_idx].q_segment_list) {
+ dma_free_coherent(&mrioc->pdev->dev,
+ MPI3MR_MAX_SEG_LIST_SIZE,
+ mrioc->op_reply_qinfo[q_idx].q_segment_list,
+ mrioc->op_reply_qinfo[q_idx].q_segment_list_dma);
+ mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL;
+ }
+ } else
+ size = mrioc->op_reply_qinfo[q_idx].segment_qd *
+ mrioc->op_reply_desc_sz;
+
+ for (j = 0; j < mrioc->op_reply_qinfo[q_idx].num_segments; j++) {
+ if (!segments[j].segment)
+ continue;
+ dma_free_coherent(&mrioc->pdev->dev,
+ size, segments[j].segment, segments[j].segment_dma);
+ segments[j].segment = NULL;
+ }
+
+ kfree(mrioc->op_reply_qinfo[q_idx].q_segments);
+ mrioc->op_reply_qinfo[q_idx].q_segments = NULL;
+ mrioc->op_reply_qinfo[q_idx].qid = 0;
+}
+
+/**
+ * mpi3mr_delete_op_reply_q - delete operational reply queue
+ * @mrioc: Adapter instance reference
+ * @qidx: operational reply queue index
+ *
+ * Delete operatinal reply queue by issuing MPI request
+ * through admin queue.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
+{
+ struct mpi3_delete_reply_queue_request delq_req;
+ struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
+ int retval = 0;
+ u16 reply_qid = 0, midx;
+
+ reply_qid = op_reply_q->qid;
+
+ midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
+
+ if (!reply_qid) {
+ retval = -1;
+ ioc_err(mrioc, "Issue DelRepQ: called with invalid ReqQID\n");
+ goto out;
+ }
+
+ (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount-- :
+ mrioc->active_poll_qcount--;
+
+ memset(&delq_req, 0, sizeof(delq_req));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "Issue DelRepQ: Init command is in use\n");
+ mutex_unlock(&mrioc->init_cmds.mutex);
+ goto out;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ delq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ delq_req.function = MPI3_FUNCTION_DELETE_REPLY_QUEUE;
+ delq_req.queue_id = cpu_to_le16(reply_qid);
+
+ init_completion(&mrioc->init_cmds.done);
+ retval = mpi3mr_admin_request_post(mrioc, &delq_req, sizeof(delq_req),
+ 1);
+ if (retval) {
+ ioc_err(mrioc, "Issue DelRepQ: Admin Post failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->init_cmds.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "delete reply queue timed out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_DELREPQ_TIMEOUT);
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc,
+ "Issue DelRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
+ (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ mrioc->init_cmds.ioc_loginfo);
+ retval = -1;
+ goto out_unlock;
+ }
+ mrioc->intr_info[midx].op_reply_q = NULL;
+
+ mpi3mr_free_op_reply_q_segments(mrioc, qidx);
+out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->init_cmds.mutex);
+out:
+
+ return retval;
+}
+
+/**
+ * mpi3mr_alloc_op_reply_q_segments -Alloc segmented reply pool
+ * @mrioc: Adapter instance reference
+ * @qidx: request queue index
+ *
+ * Allocate segmented memory pools for operational reply
+ * queue.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
+{
+ struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
+ int i, size;
+ u64 *q_segment_list_entry = NULL;
+ struct segments *segments;
+
+ if (mrioc->enable_segqueue) {
+ op_reply_q->segment_qd =
+ MPI3MR_OP_REP_Q_SEG_SIZE / mrioc->op_reply_desc_sz;
+
+ size = MPI3MR_OP_REP_Q_SEG_SIZE;
+
+ op_reply_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
+ MPI3MR_MAX_SEG_LIST_SIZE, &op_reply_q->q_segment_list_dma,
+ GFP_KERNEL);
+ if (!op_reply_q->q_segment_list)
+ return -ENOMEM;
+ q_segment_list_entry = (u64 *)op_reply_q->q_segment_list;
+ } else {
+ op_reply_q->segment_qd = op_reply_q->num_replies;
+ size = op_reply_q->num_replies * mrioc->op_reply_desc_sz;
+ }
+
+ op_reply_q->num_segments = DIV_ROUND_UP(op_reply_q->num_replies,
+ op_reply_q->segment_qd);
+
+ op_reply_q->q_segments = kcalloc(op_reply_q->num_segments,
+ sizeof(struct segments), GFP_KERNEL);
+ if (!op_reply_q->q_segments)
+ return -ENOMEM;
+
+ segments = op_reply_q->q_segments;
+ for (i = 0; i < op_reply_q->num_segments; i++) {
+ segments[i].segment =
+ dma_alloc_coherent(&mrioc->pdev->dev,
+ size, &segments[i].segment_dma, GFP_KERNEL);
+ if (!segments[i].segment)
+ return -ENOMEM;
+ if (mrioc->enable_segqueue)
+ q_segment_list_entry[i] =
+ (unsigned long)segments[i].segment_dma;
+ }
+
+ return 0;
+}
+
+/**
+ * mpi3mr_alloc_op_req_q_segments - Alloc segmented req pool.
+ * @mrioc: Adapter instance reference
+ * @qidx: request queue index
+ *
+ * Allocate segmented memory pools for operational request
+ * queue.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
+{
+ struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
+ int i, size;
+ u64 *q_segment_list_entry = NULL;
+ struct segments *segments;
+
+ if (mrioc->enable_segqueue) {
+ op_req_q->segment_qd =
+ MPI3MR_OP_REQ_Q_SEG_SIZE / mrioc->facts.op_req_sz;
+
+ size = MPI3MR_OP_REQ_Q_SEG_SIZE;
+
+ op_req_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
+ MPI3MR_MAX_SEG_LIST_SIZE, &op_req_q->q_segment_list_dma,
+ GFP_KERNEL);
+ if (!op_req_q->q_segment_list)
+ return -ENOMEM;
+ q_segment_list_entry = (u64 *)op_req_q->q_segment_list;
+
+ } else {
+ op_req_q->segment_qd = op_req_q->num_requests;
+ size = op_req_q->num_requests * mrioc->facts.op_req_sz;
+ }
+
+ op_req_q->num_segments = DIV_ROUND_UP(op_req_q->num_requests,
+ op_req_q->segment_qd);
+
+ op_req_q->q_segments = kcalloc(op_req_q->num_segments,
+ sizeof(struct segments), GFP_KERNEL);
+ if (!op_req_q->q_segments)
+ return -ENOMEM;
+
+ segments = op_req_q->q_segments;
+ for (i = 0; i < op_req_q->num_segments; i++) {
+ segments[i].segment =
+ dma_alloc_coherent(&mrioc->pdev->dev,
+ size, &segments[i].segment_dma, GFP_KERNEL);
+ if (!segments[i].segment)
+ return -ENOMEM;
+ if (mrioc->enable_segqueue)
+ q_segment_list_entry[i] =
+ (unsigned long)segments[i].segment_dma;
+ }
+
+ return 0;
+}
+
+/**
+ * mpi3mr_create_op_reply_q - create operational reply queue
+ * @mrioc: Adapter instance reference
+ * @qidx: operational reply queue index
+ *
+ * Create operatinal reply queue by issuing MPI request
+ * through admin queue.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
+{
+ struct mpi3_create_reply_queue_request create_req;
+ struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
+ int retval = 0;
+ u16 reply_qid = 0, midx;
+
+ reply_qid = op_reply_q->qid;
+
+ midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
+
+ if (reply_qid) {
+ retval = -1;
+ ioc_err(mrioc, "CreateRepQ: called for duplicate qid %d\n",
+ reply_qid);
+
+ return retval;
+ }
+
+ reply_qid = qidx + 1;
+ op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
+ if (!mrioc->pdev->revision)
+ op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K;
+ op_reply_q->ci = 0;
+ op_reply_q->ephase = 1;
+ atomic_set(&op_reply_q->pend_ios, 0);
+ atomic_set(&op_reply_q->in_use, 0);
+ op_reply_q->enable_irq_poll = false;
+
+ if (!op_reply_q->q_segments) {
+ retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx);
+ if (retval) {
+ mpi3mr_free_op_reply_q_segments(mrioc, qidx);
+ goto out;
+ }
+ }
+
+ memset(&create_req, 0, sizeof(create_req));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "CreateRepQ: Init command is in use\n");
+ goto out_unlock;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ create_req.function = MPI3_FUNCTION_CREATE_REPLY_QUEUE;
+ create_req.queue_id = cpu_to_le16(reply_qid);
+
+ if (midx < (mrioc->intr_info_count - mrioc->requested_poll_qcount))
+ op_reply_q->qtype = MPI3MR_DEFAULT_QUEUE;
+ else
+ op_reply_q->qtype = MPI3MR_POLL_QUEUE;
+
+ if (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) {
+ create_req.flags =
+ MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE;
+ create_req.msix_index =
+ cpu_to_le16(mrioc->intr_info[midx].msix_index);
+ } else {
+ create_req.msix_index = cpu_to_le16(mrioc->intr_info_count - 1);
+ ioc_info(mrioc, "create reply queue(polled): for qid(%d), midx(%d)\n",
+ reply_qid, midx);
+ if (!mrioc->active_poll_qcount)
+ disable_irq_nosync(pci_irq_vector(mrioc->pdev,
+ mrioc->intr_info_count - 1));
+ }
+
+ if (mrioc->enable_segqueue) {
+ create_req.flags |=
+ MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
+ create_req.base_address = cpu_to_le64(
+ op_reply_q->q_segment_list_dma);
+ } else
+ create_req.base_address = cpu_to_le64(
+ op_reply_q->q_segments[0].segment_dma);
+
+ create_req.size = cpu_to_le16(op_reply_q->num_replies);
+
+ init_completion(&mrioc->init_cmds.done);
+ retval = mpi3mr_admin_request_post(mrioc, &create_req,
+ sizeof(create_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "CreateRepQ: Admin Post failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->init_cmds.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "create reply queue timed out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT);
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc,
+ "CreateRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
+ (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ mrioc->init_cmds.ioc_loginfo);
+ retval = -1;
+ goto out_unlock;
+ }
+ op_reply_q->qid = reply_qid;
+ if (midx < mrioc->intr_info_count)
+ mrioc->intr_info[midx].op_reply_q = op_reply_q;
+
+ (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount++ :
+ mrioc->active_poll_qcount++;
+
+out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->init_cmds.mutex);
+out:
+
+ return retval;
+}
+
+/**
+ * mpi3mr_create_op_req_q - create operational request queue
+ * @mrioc: Adapter instance reference
+ * @idx: operational request queue index
+ * @reply_qid: Reply queue ID
+ *
+ * Create operatinal request queue by issuing MPI request
+ * through admin queue.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_create_op_req_q(struct mpi3mr_ioc *mrioc, u16 idx,
+ u16 reply_qid)
+{
+ struct mpi3_create_request_queue_request create_req;
+ struct op_req_qinfo *op_req_q = mrioc->req_qinfo + idx;
+ int retval = 0;
+ u16 req_qid = 0;
+
+ req_qid = op_req_q->qid;
+
+ if (req_qid) {
+ retval = -1;
+ ioc_err(mrioc, "CreateReqQ: called for duplicate qid %d\n",
+ req_qid);
+
+ return retval;
+ }
+ req_qid = idx + 1;
+
+ op_req_q->num_requests = MPI3MR_OP_REQ_Q_QD;
+ op_req_q->ci = 0;
+ op_req_q->pi = 0;
+ op_req_q->reply_qid = reply_qid;
+ spin_lock_init(&op_req_q->q_lock);
+
+ if (!op_req_q->q_segments) {
+ retval = mpi3mr_alloc_op_req_q_segments(mrioc, idx);
+ if (retval) {
+ mpi3mr_free_op_req_q_segments(mrioc, idx);
+ goto out;
+ }
+ }
+
+ memset(&create_req, 0, sizeof(create_req));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "CreateReqQ: Init command is in use\n");
+ goto out_unlock;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ create_req.function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE;
+ create_req.queue_id = cpu_to_le16(req_qid);
+ if (mrioc->enable_segqueue) {
+ create_req.flags =
+ MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
+ create_req.base_address = cpu_to_le64(
+ op_req_q->q_segment_list_dma);
+ } else
+ create_req.base_address = cpu_to_le64(
+ op_req_q->q_segments[0].segment_dma);
+ create_req.reply_queue_id = cpu_to_le16(reply_qid);
+ create_req.size = cpu_to_le16(op_req_q->num_requests);
+
+ init_completion(&mrioc->init_cmds.done);
+ retval = mpi3mr_admin_request_post(mrioc, &create_req,
+ sizeof(create_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "CreateReqQ: Admin Post failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->init_cmds.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "create request queue timed out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT);
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc,
+ "CreateReqQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
+ (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ mrioc->init_cmds.ioc_loginfo);
+ retval = -1;
+ goto out_unlock;
+ }
+ op_req_q->qid = req_qid;
+
+out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->init_cmds.mutex);
+out:
+
+ return retval;
+}
+
+/**
+ * mpi3mr_create_op_queues - create operational queue pairs
+ * @mrioc: Adapter instance reference
+ *
+ * Allocate memory for operational queue meta data and call
+ * create request and reply queue functions.
+ *
+ * Return: 0 on success, non-zero on failures.
+ */
+static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc)
+{
+ int retval = 0;
+ u16 num_queues = 0, i = 0, msix_count_op_q = 1;
+
+ num_queues = min_t(int, mrioc->facts.max_op_reply_q,
+ mrioc->facts.max_op_req_q);
+
+ msix_count_op_q =
+ mrioc->intr_info_count - mrioc->op_reply_q_offset;
+ if (!mrioc->num_queues)
+ mrioc->num_queues = min_t(int, num_queues, msix_count_op_q);
+ /*
+ * During reset set the num_queues to the number of queues
+ * that was set before the reset.
+ */
+ num_queues = mrioc->num_op_reply_q ?
+ mrioc->num_op_reply_q : mrioc->num_queues;
+ ioc_info(mrioc, "trying to create %d operational queue pairs\n",
+ num_queues);
+
+ if (!mrioc->req_qinfo) {
+ mrioc->req_qinfo = kcalloc(num_queues,
+ sizeof(struct op_req_qinfo), GFP_KERNEL);
+ if (!mrioc->req_qinfo) {
+ retval = -1;
+ goto out_failed;
+ }
+
+ mrioc->op_reply_qinfo = kzalloc(sizeof(struct op_reply_qinfo) *
+ num_queues, GFP_KERNEL);
+ if (!mrioc->op_reply_qinfo) {
+ retval = -1;
+ goto out_failed;
+ }
+ }
+
+ if (mrioc->enable_segqueue)
+ ioc_info(mrioc,
+ "allocating operational queues through segmented queues\n");
+
+ for (i = 0; i < num_queues; i++) {
+ if (mpi3mr_create_op_reply_q(mrioc, i)) {
+ ioc_err(mrioc, "Cannot create OP RepQ %d\n", i);
+ break;
+ }
+ if (mpi3mr_create_op_req_q(mrioc, i,
+ mrioc->op_reply_qinfo[i].qid)) {
+ ioc_err(mrioc, "Cannot create OP ReqQ %d\n", i);
+ mpi3mr_delete_op_reply_q(mrioc, i);
+ break;
+ }
+ }
+
+ if (i == 0) {
+ /* Not even one queue is created successfully*/
+ retval = -1;
+ goto out_failed;
+ }
+ mrioc->num_op_reply_q = mrioc->num_op_req_q = i;
+ ioc_info(mrioc,
+ "successfully created %d operational queue pairs(default/polled) queue = (%d/%d)\n",
+ mrioc->num_op_reply_q, mrioc->default_qcount,
+ mrioc->active_poll_qcount);
+
+ return retval;
+out_failed:
+ kfree(mrioc->req_qinfo);
+ mrioc->req_qinfo = NULL;
+
+ kfree(mrioc->op_reply_qinfo);
+ mrioc->op_reply_qinfo = NULL;
+
+ return retval;
+}
+
+/**
+ * mpi3mr_op_request_post - Post request to operational queue
+ * @mrioc: Adapter reference
+ * @op_req_q: Operational request queue info
+ * @req: MPI3 request
+ *
+ * Post the MPI3 request into operational request queue and
+ * inform the controller, if the queue is full return
+ * appropriate error.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
+ struct op_req_qinfo *op_req_q, u8 *req)
+{
+ u16 pi = 0, max_entries, reply_qidx = 0, midx;
+ int retval = 0;
+ unsigned long flags;
+ u8 *req_entry;
+ void *segment_base_addr;
+ u16 req_sz = mrioc->facts.op_req_sz;
+ struct segments *segments = op_req_q->q_segments;
+
+ reply_qidx = op_req_q->reply_qid - 1;
+
+ if (mrioc->unrecoverable)
+ return -EFAULT;
+
+ spin_lock_irqsave(&op_req_q->q_lock, flags);
+ pi = op_req_q->pi;
+ max_entries = op_req_q->num_requests;
+
+ if (mpi3mr_check_req_qfull(op_req_q)) {
+ midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(
+ reply_qidx, mrioc->op_reply_q_offset);
+ mpi3mr_process_op_reply_q(mrioc, mrioc->intr_info[midx].op_reply_q);
+
+ if (mpi3mr_check_req_qfull(op_req_q)) {
+ retval = -EAGAIN;
+ goto out;
+ }
+ }
+
+ if (mrioc->reset_in_progress) {
+ ioc_err(mrioc, "OpReqQ submit reset in progress\n");
+ retval = -EAGAIN;
+ goto out;
+ }
+
+ segment_base_addr = segments[pi / op_req_q->segment_qd].segment;
+ req_entry = (u8 *)segment_base_addr +
+ ((pi % op_req_q->segment_qd) * req_sz);
+
+ memset(req_entry, 0, req_sz);
+ memcpy(req_entry, req, MPI3MR_ADMIN_REQ_FRAME_SZ);
+
+ if (++pi == max_entries)
+ pi = 0;
+ op_req_q->pi = pi;
+
+#ifndef CONFIG_PREEMPT_RT
+ if (atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios)
+ > MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT)
+ mrioc->op_reply_qinfo[reply_qidx].enable_irq_poll = true;
+#else
+ atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios);
+#endif
+
+ writel(op_req_q->pi,
+ &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].producer_index);
+
+out:
+ spin_unlock_irqrestore(&op_req_q->q_lock, flags);
+ return retval;
+}
+
+/**
+ * mpi3mr_check_rh_fault_ioc - check reset history and fault
+ * controller
+ * @mrioc: Adapter instance reference
+ * @reason_code: reason code for the fault.
+ *
+ * This routine will save snapdump and fault the controller with
+ * the given reason code if it is not already in the fault or
+ * not asynchronosuly reset. This will be used to handle
+ * initilaization time faults/resets/timeout as in those cases
+ * immediate soft reset invocation is not required.
+ *
+ * Return: None.
+ */
+void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code)
+{
+ u32 ioc_status, host_diagnostic, timeout;
+
+ if (mrioc->unrecoverable) {
+ ioc_err(mrioc, "controller is unrecoverable\n");
+ return;
+ }
+
+ if (!pci_device_is_present(mrioc->pdev)) {
+ mrioc->unrecoverable = 1;
+ ioc_err(mrioc, "controller is not present\n");
+ return;
+ }
+
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
+ (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
+ mpi3mr_print_fault_info(mrioc);
+ return;
+ }
+ mpi3mr_set_diagsave(mrioc);
+ mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ reason_code);
+ timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
+ do {
+ host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
+ if (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
+ break;
+ msleep(100);
+ } while (--timeout);
+}
+
+/**
+ * mpi3mr_sync_timestamp - Issue time stamp sync request
+ * @mrioc: Adapter reference
+ *
+ * Issue IO unit control MPI request to synchornize firmware
+ * timestamp with host time.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_sync_timestamp(struct mpi3mr_ioc *mrioc)
+{
+ ktime_t current_time;
+ struct mpi3_iounit_control_request iou_ctrl;
+ int retval = 0;
+
+ memset(&iou_ctrl, 0, sizeof(iou_ctrl));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "Issue IOUCTL time_stamp: command is in use\n");
+ mutex_unlock(&mrioc->init_cmds.mutex);
+ goto out;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ iou_ctrl.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL;
+ iou_ctrl.operation = MPI3_CTRL_OP_UPDATE_TIMESTAMP;
+ current_time = ktime_get_real();
+ iou_ctrl.param64[0] = cpu_to_le64(ktime_to_ms(current_time));
+
+ init_completion(&mrioc->init_cmds.done);
+ retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl,
+ sizeof(iou_ctrl), 0);
+ if (retval) {
+ ioc_err(mrioc, "Issue IOUCTL time_stamp: Admin Post failed\n");
+ goto out_unlock;
+ }
+
+ wait_for_completion_timeout(&mrioc->init_cmds.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "Issue IOUCTL time_stamp: command timed out\n");
+ mrioc->init_cmds.is_waiting = 0;
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
+ mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_TSU_TIMEOUT, 1);
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc,
+ "Issue IOUCTL time_stamp: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
+ (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ mrioc->init_cmds.ioc_loginfo);
+ retval = -1;
+ goto out_unlock;
+ }
+
+out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->init_cmds.mutex);
+
+out:
+ return retval;
+}
+
+/**
+ * mpi3mr_print_pkg_ver - display controller fw package version
+ * @mrioc: Adapter reference
+ *
+ * Retrieve firmware package version from the component image
+ * header of the controller flash and display it.
+ *
+ * Return: 0 on success and non-zero on failure.
+ */
+static int mpi3mr_print_pkg_ver(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3_ci_upload_request ci_upload;
+ int retval = -1;
+ void *data = NULL;
+ dma_addr_t data_dma;
+ struct mpi3_ci_manifest_mpi *manifest;
+ u32 data_len = sizeof(struct mpi3_ci_manifest_mpi);
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+
+ data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ memset(&ci_upload, 0, sizeof(ci_upload));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ ioc_err(mrioc, "sending get package version failed due to command in use\n");
+ mutex_unlock(&mrioc->init_cmds.mutex);
+ goto out;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ ci_upload.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ ci_upload.function = MPI3_FUNCTION_CI_UPLOAD;
+ ci_upload.msg_flags = MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY;
+ ci_upload.signature1 = cpu_to_le32(MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST);
+ ci_upload.image_offset = cpu_to_le32(MPI3_IMAGE_HEADER_SIZE);
+ ci_upload.segment_size = cpu_to_le32(data_len);
+
+ mpi3mr_add_sg_single(&ci_upload.sgl, sgl_flags, data_len,
+ data_dma);
+ init_completion(&mrioc->init_cmds.done);
+ retval = mpi3mr_admin_request_post(mrioc, &ci_upload,
+ sizeof(ci_upload), 1);
+ if (retval) {
+ ioc_err(mrioc, "posting get package version failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->init_cmds.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "get package version timed out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT);
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ == MPI3_IOCSTATUS_SUCCESS) {
+ manifest = (struct mpi3_ci_manifest_mpi *) data;
+ if (manifest->manifest_type == MPI3_CI_MANIFEST_TYPE_MPI) {
+ ioc_info(mrioc,
+ "firmware package version(%d.%d.%d.%d.%05d-%05d)\n",
+ manifest->package_version.gen_major,
+ manifest->package_version.gen_minor,
+ manifest->package_version.phase_major,
+ manifest->package_version.phase_minor,
+ manifest->package_version.customer_id,
+ manifest->package_version.build_num);
+ }
+ }
+ retval = 0;
+out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->init_cmds.mutex);
+
+out:
+ if (data)
+ dma_free_coherent(&mrioc->pdev->dev, data_len, data,
+ data_dma);
+ return retval;
+}
+
+/**
+ * mpi3mr_watchdog_work - watchdog thread to monitor faults
+ * @work: work struct
+ *
+ * Watch dog work periodically executed (1 second interval) to
+ * monitor firmware fault and to issue periodic timer sync to
+ * the firmware.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_watchdog_work(struct work_struct *work)
+{
+ struct mpi3mr_ioc *mrioc =
+ container_of(work, struct mpi3mr_ioc, watchdog_work.work);
+ unsigned long flags;
+ enum mpi3mr_iocstate ioc_state;
+ u32 fault, host_diagnostic, ioc_status;
+ u32 reset_reason = MPI3MR_RESET_FROM_FAULT_WATCH;
+
+ if (mrioc->reset_in_progress)
+ return;
+
+ if (!mrioc->unrecoverable && !pci_device_is_present(mrioc->pdev)) {
+ ioc_err(mrioc, "watchdog could not detect the controller\n");
+ mrioc->unrecoverable = 1;
+ }
+
+ if (mrioc->unrecoverable) {
+ ioc_err(mrioc,
+ "flush pending commands for unrecoverable controller\n");
+ mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
+ return;
+ }
+
+ if (mrioc->ts_update_counter++ >= MPI3MR_TSUPDATE_INTERVAL) {
+ mrioc->ts_update_counter = 0;
+ mpi3mr_sync_timestamp(mrioc);
+ }
+
+ if ((mrioc->prepare_for_reset) &&
+ ((mrioc->prepare_for_reset_timeout_counter++) >=
+ MPI3MR_PREPARE_FOR_RESET_TIMEOUT)) {
+ mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_CIACTVRST_TIMER, 1);
+ return;
+ }
+
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
+ mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0);
+ return;
+ }
+
+ /*Check for fault state every one second and issue Soft reset*/
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ if (ioc_state != MRIOC_STATE_FAULT)
+ goto schedule_work;
+
+ fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
+ host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
+ if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) {
+ if (!mrioc->diagsave_timeout) {
+ mpi3mr_print_fault_info(mrioc);
+ ioc_warn(mrioc, "diag save in progress\n");
+ }
+ if ((mrioc->diagsave_timeout++) <= MPI3_SYSIF_DIAG_SAVE_TIMEOUT)
+ goto schedule_work;
+ }
+
+ mpi3mr_print_fault_info(mrioc);
+ mrioc->diagsave_timeout = 0;
+
+ switch (fault) {
+ case MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED:
+ case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED:
+ ioc_warn(mrioc,
+ "controller requires system power cycle, marking controller as unrecoverable\n");
+ mrioc->unrecoverable = 1;
+ goto schedule_work;
+ case MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS:
+ goto schedule_work;
+ case MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET:
+ reset_reason = MPI3MR_RESET_FROM_CIACTIV_FAULT;
+ break;
+ default:
+ break;
+ }
+ mpi3mr_soft_reset_handler(mrioc, reset_reason, 0);
+ return;
+
+schedule_work:
+ spin_lock_irqsave(&mrioc->watchdog_lock, flags);
+ if (mrioc->watchdog_work_q)
+ queue_delayed_work(mrioc->watchdog_work_q,
+ &mrioc->watchdog_work,
+ msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
+ spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
+ return;
+}
+
+/**
+ * mpi3mr_start_watchdog - Start watchdog
+ * @mrioc: Adapter instance reference
+ *
+ * Create and start the watchdog thread to monitor controller
+ * faults.
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc)
+{
+ if (mrioc->watchdog_work_q)
+ return;
+
+ INIT_DELAYED_WORK(&mrioc->watchdog_work, mpi3mr_watchdog_work);
+ snprintf(mrioc->watchdog_work_q_name,
+ sizeof(mrioc->watchdog_work_q_name), "watchdog_%s%d", mrioc->name,
+ mrioc->id);
+ mrioc->watchdog_work_q =
+ create_singlethread_workqueue(mrioc->watchdog_work_q_name);
+ if (!mrioc->watchdog_work_q) {
+ ioc_err(mrioc, "%s: failed (line=%d)\n", __func__, __LINE__);
+ return;
+ }
+
+ if (mrioc->watchdog_work_q)
+ queue_delayed_work(mrioc->watchdog_work_q,
+ &mrioc->watchdog_work,
+ msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
+}
+
+/**
+ * mpi3mr_stop_watchdog - Stop watchdog
+ * @mrioc: Adapter instance reference
+ *
+ * Stop the watchdog thread created to monitor controller
+ * faults.
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc)
+{
+ unsigned long flags;
+ struct workqueue_struct *wq;
+
+ spin_lock_irqsave(&mrioc->watchdog_lock, flags);
+ wq = mrioc->watchdog_work_q;
+ mrioc->watchdog_work_q = NULL;
+ spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
+ if (wq) {
+ if (!cancel_delayed_work_sync(&mrioc->watchdog_work))
+ flush_workqueue(wq);
+ destroy_workqueue(wq);
+ }
+}
+
+/**
+ * mpi3mr_setup_admin_qpair - Setup admin queue pair
+ * @mrioc: Adapter instance reference
+ *
+ * Allocate memory for admin queue pair if required and register
+ * the admin queue with the controller.
+ *
+ * Return: 0 on success, non-zero on failures.
+ */
+static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc)
+{
+ int retval = 0;
+ u32 num_admin_entries = 0;
+
+ mrioc->admin_req_q_sz = MPI3MR_ADMIN_REQ_Q_SIZE;
+ mrioc->num_admin_req = mrioc->admin_req_q_sz /
+ MPI3MR_ADMIN_REQ_FRAME_SZ;
+ mrioc->admin_req_ci = mrioc->admin_req_pi = 0;
+ mrioc->admin_req_base = NULL;
+
+ mrioc->admin_reply_q_sz = MPI3MR_ADMIN_REPLY_Q_SIZE;
+ mrioc->num_admin_replies = mrioc->admin_reply_q_sz /
+ MPI3MR_ADMIN_REPLY_FRAME_SZ;
+ mrioc->admin_reply_ci = 0;
+ mrioc->admin_reply_ephase = 1;
+ mrioc->admin_reply_base = NULL;
+ atomic_set(&mrioc->admin_reply_q_in_use, 0);
+
+ if (!mrioc->admin_req_base) {
+ mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev,
+ mrioc->admin_req_q_sz, &mrioc->admin_req_dma, GFP_KERNEL);
+
+ if (!mrioc->admin_req_base) {
+ retval = -1;
+ goto out_failed;
+ }
+
+ mrioc->admin_reply_base = dma_alloc_coherent(&mrioc->pdev->dev,
+ mrioc->admin_reply_q_sz, &mrioc->admin_reply_dma,
+ GFP_KERNEL);
+
+ if (!mrioc->admin_reply_base) {
+ retval = -1;
+ goto out_failed;
+ }
+ }
+
+ num_admin_entries = (mrioc->num_admin_replies << 16) |
+ (mrioc->num_admin_req);
+ writel(num_admin_entries, &mrioc->sysif_regs->admin_queue_num_entries);
+ mpi3mr_writeq(mrioc->admin_req_dma,
+ &mrioc->sysif_regs->admin_request_queue_address);
+ mpi3mr_writeq(mrioc->admin_reply_dma,
+ &mrioc->sysif_regs->admin_reply_queue_address);
+ writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
+ writel(mrioc->admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
+ return retval;
+
+out_failed:
+
+ if (mrioc->admin_reply_base) {
+ dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
+ mrioc->admin_reply_base, mrioc->admin_reply_dma);
+ mrioc->admin_reply_base = NULL;
+ }
+ if (mrioc->admin_req_base) {
+ dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
+ mrioc->admin_req_base, mrioc->admin_req_dma);
+ mrioc->admin_req_base = NULL;
+ }
+ return retval;
+}
+
+/**
+ * mpi3mr_issue_iocfacts - Send IOC Facts
+ * @mrioc: Adapter instance reference
+ * @facts_data: Cached IOC facts data
+ *
+ * Issue IOC Facts MPI request through admin queue and wait for
+ * the completion of it or time out.
+ *
+ * Return: 0 on success, non-zero on failures.
+ */
+static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc,
+ struct mpi3_ioc_facts_data *facts_data)
+{
+ struct mpi3_ioc_facts_request iocfacts_req;
+ void *data = NULL;
+ dma_addr_t data_dma;
+ u32 data_len = sizeof(*facts_data);
+ int retval = 0;
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+
+ data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
+ GFP_KERNEL);
+
+ if (!data) {
+ retval = -1;
+ goto out;
+ }
+
+ memset(&iocfacts_req, 0, sizeof(iocfacts_req));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "Issue IOCFacts: Init command is in use\n");
+ mutex_unlock(&mrioc->init_cmds.mutex);
+ goto out;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ iocfacts_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ iocfacts_req.function = MPI3_FUNCTION_IOC_FACTS;
+
+ mpi3mr_add_sg_single(&iocfacts_req.sgl, sgl_flags, data_len,
+ data_dma);
+
+ init_completion(&mrioc->init_cmds.done);
+ retval = mpi3mr_admin_request_post(mrioc, &iocfacts_req,
+ sizeof(iocfacts_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "Issue IOCFacts: Admin Post failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->init_cmds.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "ioc_facts timed out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT);
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc,
+ "Issue IOCFacts: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
+ (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ mrioc->init_cmds.ioc_loginfo);
+ retval = -1;
+ goto out_unlock;
+ }
+ memcpy(facts_data, (u8 *)data, data_len);
+ mpi3mr_process_factsdata(mrioc, facts_data);
+out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->init_cmds.mutex);
+
+out:
+ if (data)
+ dma_free_coherent(&mrioc->pdev->dev, data_len, data, data_dma);
+
+ return retval;
+}
+
+/**
+ * mpi3mr_check_reset_dma_mask - Process IOC facts data
+ * @mrioc: Adapter instance reference
+ *
+ * Check whether the new DMA mask requested through IOCFacts by
+ * firmware needs to be set, if so set it .
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static inline int mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc *mrioc)
+{
+ struct pci_dev *pdev = mrioc->pdev;
+ int r;
+ u64 facts_dma_mask = DMA_BIT_MASK(mrioc->facts.dma_mask);
+
+ if (!mrioc->facts.dma_mask || (mrioc->dma_mask <= facts_dma_mask))
+ return 0;
+
+ ioc_info(mrioc, "Changing DMA mask from 0x%016llx to 0x%016llx\n",
+ mrioc->dma_mask, facts_dma_mask);
+
+ r = dma_set_mask_and_coherent(&pdev->dev, facts_dma_mask);
+ if (r) {
+ ioc_err(mrioc, "Setting DMA mask to 0x%016llx failed: %d\n",
+ facts_dma_mask, r);
+ return r;
+ }
+ mrioc->dma_mask = facts_dma_mask;
+ return r;
+}
+
+/**
+ * mpi3mr_process_factsdata - Process IOC facts data
+ * @mrioc: Adapter instance reference
+ * @facts_data: Cached IOC facts data
+ *
+ * Convert IOC facts data into cpu endianness and cache it in
+ * the driver .
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
+ struct mpi3_ioc_facts_data *facts_data)
+{
+ u32 ioc_config, req_sz, facts_flags;
+
+ if ((le16_to_cpu(facts_data->ioc_facts_data_length)) !=
+ (sizeof(*facts_data) / 4)) {
+ ioc_warn(mrioc,
+ "IOCFactsdata length mismatch driver_sz(%zu) firmware_sz(%d)\n",
+ sizeof(*facts_data),
+ le16_to_cpu(facts_data->ioc_facts_data_length) * 4);
+ }
+
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >>
+ MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT);
+ if (le16_to_cpu(facts_data->ioc_request_frame_size) != (req_sz / 4)) {
+ ioc_err(mrioc,
+ "IOCFacts data reqFrameSize mismatch hw_size(%d) firmware_sz(%d)\n",
+ req_sz / 4, le16_to_cpu(facts_data->ioc_request_frame_size));
+ }
+
+ memset(&mrioc->facts, 0, sizeof(mrioc->facts));
+
+ facts_flags = le32_to_cpu(facts_data->flags);
+ mrioc->facts.op_req_sz = req_sz;
+ mrioc->op_reply_desc_sz = 1 << ((ioc_config &
+ MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >>
+ MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT);
+
+ mrioc->facts.ioc_num = facts_data->ioc_number;
+ mrioc->facts.who_init = facts_data->who_init;
+ mrioc->facts.max_msix_vectors = le16_to_cpu(facts_data->max_msix_vectors);
+ mrioc->facts.personality = (facts_flags &
+ MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK);
+ mrioc->facts.dma_mask = (facts_flags &
+ MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
+ MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
+ mrioc->facts.protocol_flags = facts_data->protocol_flags;
+ mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word);
+ mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_requests);
+ mrioc->facts.product_id = le16_to_cpu(facts_data->product_id);
+ mrioc->facts.reply_sz = le16_to_cpu(facts_data->reply_frame_size) * 4;
+ mrioc->facts.exceptions = le16_to_cpu(facts_data->ioc_exceptions);
+ mrioc->facts.max_perids = le16_to_cpu(facts_data->max_persistent_id);
+ mrioc->facts.max_vds = le16_to_cpu(facts_data->max_vds);
+ mrioc->facts.max_hpds = le16_to_cpu(facts_data->max_host_pds);
+ mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_adv_host_pds);
+ mrioc->facts.max_raid_pds = le16_to_cpu(facts_data->max_raid_pds);
+ mrioc->facts.max_nvme = le16_to_cpu(facts_data->max_nvme);
+ mrioc->facts.max_pcie_switches =
+ le16_to_cpu(facts_data->max_pcie_switches);
+ mrioc->facts.max_sasexpanders =
+ le16_to_cpu(facts_data->max_sas_expanders);
+ mrioc->facts.max_sasinitiators =
+ le16_to_cpu(facts_data->max_sas_initiators);
+ mrioc->facts.max_enclosures = le16_to_cpu(facts_data->max_enclosures);
+ mrioc->facts.min_devhandle = le16_to_cpu(facts_data->min_dev_handle);
+ mrioc->facts.max_devhandle = le16_to_cpu(facts_data->max_dev_handle);
+ mrioc->facts.max_op_req_q =
+ le16_to_cpu(facts_data->max_operational_request_queues);
+ mrioc->facts.max_op_reply_q =
+ le16_to_cpu(facts_data->max_operational_reply_queues);
+ mrioc->facts.ioc_capabilities =
+ le32_to_cpu(facts_data->ioc_capabilities);
+ mrioc->facts.fw_ver.build_num =
+ le16_to_cpu(facts_data->fw_version.build_num);
+ mrioc->facts.fw_ver.cust_id =
+ le16_to_cpu(facts_data->fw_version.customer_id);
+ mrioc->facts.fw_ver.ph_minor = facts_data->fw_version.phase_minor;
+ mrioc->facts.fw_ver.ph_major = facts_data->fw_version.phase_major;
+ mrioc->facts.fw_ver.gen_minor = facts_data->fw_version.gen_minor;
+ mrioc->facts.fw_ver.gen_major = facts_data->fw_version.gen_major;
+ mrioc->msix_count = min_t(int, mrioc->msix_count,
+ mrioc->facts.max_msix_vectors);
+ mrioc->facts.sge_mod_mask = facts_data->sge_modifier_mask;
+ mrioc->facts.sge_mod_value = facts_data->sge_modifier_value;
+ mrioc->facts.sge_mod_shift = facts_data->sge_modifier_shift;
+ mrioc->facts.shutdown_timeout =
+ le16_to_cpu(facts_data->shutdown_timeout);
+
+ mrioc->facts.max_dev_per_tg =
+ facts_data->max_devices_per_throttle_group;
+ mrioc->facts.io_throttle_data_length =
+ le16_to_cpu(facts_data->io_throttle_data_length);
+ mrioc->facts.max_io_throttle_group =
+ le16_to_cpu(facts_data->max_io_throttle_group);
+ mrioc->facts.io_throttle_low = le16_to_cpu(facts_data->io_throttle_low);
+ mrioc->facts.io_throttle_high =
+ le16_to_cpu(facts_data->io_throttle_high);
+
+ /* Store in 512b block count */
+ if (mrioc->facts.io_throttle_data_length)
+ mrioc->io_throttle_data_length =
+ (mrioc->facts.io_throttle_data_length * 2 * 4);
+ else
+ /* set the length to 1MB + 1K to disable throttle */
+ mrioc->io_throttle_data_length = MPI3MR_MAX_SECTORS + 2;
+
+ mrioc->io_throttle_high = (mrioc->facts.io_throttle_high * 2 * 1024);
+ mrioc->io_throttle_low = (mrioc->facts.io_throttle_low * 2 * 1024);
+
+ ioc_info(mrioc, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),",
+ mrioc->facts.ioc_num, mrioc->facts.max_op_req_q,
+ mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle);
+ ioc_info(mrioc,
+ "maxreqs(%d), mindh(%d) maxvectors(%d) maxperids(%d)\n",
+ mrioc->facts.max_reqs, mrioc->facts.min_devhandle,
+ mrioc->facts.max_msix_vectors, mrioc->facts.max_perids);
+ ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ",
+ mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value,
+ mrioc->facts.sge_mod_shift);
+ ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x\n",
+ mrioc->facts.dma_mask, (facts_flags &
+ MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK));
+ ioc_info(mrioc,
+ "max_dev_per_throttle_group(%d), max_throttle_groups(%d)\n",
+ mrioc->facts.max_dev_per_tg, mrioc->facts.max_io_throttle_group);
+ ioc_info(mrioc,
+ "io_throttle_data_len(%dKiB), io_throttle_high(%dMiB), io_throttle_low(%dMiB)\n",
+ mrioc->facts.io_throttle_data_length * 4,
+ mrioc->facts.io_throttle_high, mrioc->facts.io_throttle_low);
+}
+
+/**
+ * mpi3mr_alloc_reply_sense_bufs - Send IOC Init
+ * @mrioc: Adapter instance reference
+ *
+ * Allocate and initialize the reply free buffers, sense
+ * buffers, reply free queue and sense buffer queue.
+ *
+ * Return: 0 on success, non-zero on failures.
+ */
+static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
+{
+ int retval = 0;
+ u32 sz, i;
+
+ if (mrioc->init_cmds.reply)
+ return retval;
+
+ mrioc->init_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
+ if (!mrioc->init_cmds.reply)
+ goto out_failed;
+
+ mrioc->bsg_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
+ if (!mrioc->bsg_cmds.reply)
+ goto out_failed;
+
+ mrioc->transport_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
+ if (!mrioc->transport_cmds.reply)
+ goto out_failed;
+
+ for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
+ mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->reply_sz,
+ GFP_KERNEL);
+ if (!mrioc->dev_rmhs_cmds[i].reply)
+ goto out_failed;
+ }
+
+ for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
+ mrioc->evtack_cmds[i].reply = kzalloc(mrioc->reply_sz,
+ GFP_KERNEL);
+ if (!mrioc->evtack_cmds[i].reply)
+ goto out_failed;
+ }
+
+ mrioc->host_tm_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
+ if (!mrioc->host_tm_cmds.reply)
+ goto out_failed;
+
+ mrioc->pel_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
+ if (!mrioc->pel_cmds.reply)
+ goto out_failed;
+
+ mrioc->pel_abort_cmd.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
+ if (!mrioc->pel_abort_cmd.reply)
+ goto out_failed;
+
+ mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle;
+ mrioc->removepend_bitmap = bitmap_zalloc(mrioc->dev_handle_bitmap_bits,
+ GFP_KERNEL);
+ if (!mrioc->removepend_bitmap)
+ goto out_failed;
+
+ mrioc->devrem_bitmap = bitmap_zalloc(MPI3MR_NUM_DEVRMCMD, GFP_KERNEL);
+ if (!mrioc->devrem_bitmap)
+ goto out_failed;
+
+ mrioc->evtack_cmds_bitmap = bitmap_zalloc(MPI3MR_NUM_EVTACKCMD,
+ GFP_KERNEL);
+ if (!mrioc->evtack_cmds_bitmap)
+ goto out_failed;
+
+ mrioc->num_reply_bufs = mrioc->facts.max_reqs + MPI3MR_NUM_EVT_REPLIES;
+ mrioc->reply_free_qsz = mrioc->num_reply_bufs + 1;
+ mrioc->num_sense_bufs = mrioc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR;
+ mrioc->sense_buf_q_sz = mrioc->num_sense_bufs + 1;
+
+ /* reply buffer pool, 16 byte align */
+ sz = mrioc->num_reply_bufs * mrioc->reply_sz;
+ mrioc->reply_buf_pool = dma_pool_create("reply_buf pool",
+ &mrioc->pdev->dev, sz, 16, 0);
+ if (!mrioc->reply_buf_pool) {
+ ioc_err(mrioc, "reply buf pool: dma_pool_create failed\n");
+ goto out_failed;
+ }
+
+ mrioc->reply_buf = dma_pool_zalloc(mrioc->reply_buf_pool, GFP_KERNEL,
+ &mrioc->reply_buf_dma);
+ if (!mrioc->reply_buf)
+ goto out_failed;
+
+ mrioc->reply_buf_dma_max_address = mrioc->reply_buf_dma + sz;
+
+ /* reply free queue, 8 byte align */
+ sz = mrioc->reply_free_qsz * 8;
+ mrioc->reply_free_q_pool = dma_pool_create("reply_free_q pool",
+ &mrioc->pdev->dev, sz, 8, 0);
+ if (!mrioc->reply_free_q_pool) {
+ ioc_err(mrioc, "reply_free_q pool: dma_pool_create failed\n");
+ goto out_failed;
+ }
+ mrioc->reply_free_q = dma_pool_zalloc(mrioc->reply_free_q_pool,
+ GFP_KERNEL, &mrioc->reply_free_q_dma);
+ if (!mrioc->reply_free_q)
+ goto out_failed;
+
+ /* sense buffer pool, 4 byte align */
+ sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
+ mrioc->sense_buf_pool = dma_pool_create("sense_buf pool",
+ &mrioc->pdev->dev, sz, 4, 0);
+ if (!mrioc->sense_buf_pool) {
+ ioc_err(mrioc, "sense_buf pool: dma_pool_create failed\n");
+ goto out_failed;
+ }
+ mrioc->sense_buf = dma_pool_zalloc(mrioc->sense_buf_pool, GFP_KERNEL,
+ &mrioc->sense_buf_dma);
+ if (!mrioc->sense_buf)
+ goto out_failed;
+
+ /* sense buffer queue, 8 byte align */
+ sz = mrioc->sense_buf_q_sz * 8;
+ mrioc->sense_buf_q_pool = dma_pool_create("sense_buf_q pool",
+ &mrioc->pdev->dev, sz, 8, 0);
+ if (!mrioc->sense_buf_q_pool) {
+ ioc_err(mrioc, "sense_buf_q pool: dma_pool_create failed\n");
+ goto out_failed;
+ }
+ mrioc->sense_buf_q = dma_pool_zalloc(mrioc->sense_buf_q_pool,
+ GFP_KERNEL, &mrioc->sense_buf_q_dma);
+ if (!mrioc->sense_buf_q)
+ goto out_failed;
+
+ return retval;
+
+out_failed:
+ retval = -1;
+ return retval;
+}
+
+/**
+ * mpimr_initialize_reply_sbuf_queues - initialize reply sense
+ * buffers
+ * @mrioc: Adapter instance reference
+ *
+ * Helper function to initialize reply and sense buffers along
+ * with some debug prints.
+ *
+ * Return: None.
+ */
+static void mpimr_initialize_reply_sbuf_queues(struct mpi3mr_ioc *mrioc)
+{
+ u32 sz, i;
+ dma_addr_t phy_addr;
+
+ sz = mrioc->num_reply_bufs * mrioc->reply_sz;
+ ioc_info(mrioc,
+ "reply buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
+ mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->reply_sz,
+ (sz / 1024), (unsigned long long)mrioc->reply_buf_dma);
+ sz = mrioc->reply_free_qsz * 8;
+ ioc_info(mrioc,
+ "reply_free_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
+ mrioc->reply_free_q, mrioc->reply_free_qsz, 8, (sz / 1024),
+ (unsigned long long)mrioc->reply_free_q_dma);
+ sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
+ ioc_info(mrioc,
+ "sense_buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
+ mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSE_BUF_SZ,
+ (sz / 1024), (unsigned long long)mrioc->sense_buf_dma);
+ sz = mrioc->sense_buf_q_sz * 8;
+ ioc_info(mrioc,
+ "sense_buf_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
+ mrioc->sense_buf_q, mrioc->sense_buf_q_sz, 8, (sz / 1024),
+ (unsigned long long)mrioc->sense_buf_q_dma);
+
+ /* initialize Reply buffer Queue */
+ for (i = 0, phy_addr = mrioc->reply_buf_dma;
+ i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->reply_sz)
+ mrioc->reply_free_q[i] = cpu_to_le64(phy_addr);
+ mrioc->reply_free_q[i] = cpu_to_le64(0);
+
+ /* initialize Sense Buffer Queue */
+ for (i = 0, phy_addr = mrioc->sense_buf_dma;
+ i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSE_BUF_SZ)
+ mrioc->sense_buf_q[i] = cpu_to_le64(phy_addr);
+ mrioc->sense_buf_q[i] = cpu_to_le64(0);
+}
+
+/**
+ * mpi3mr_issue_iocinit - Send IOC Init
+ * @mrioc: Adapter instance reference
+ *
+ * Issue IOC Init MPI request through admin queue and wait for
+ * the completion of it or time out.
+ *
+ * Return: 0 on success, non-zero on failures.
+ */
+static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3_ioc_init_request iocinit_req;
+ struct mpi3_driver_info_layout *drv_info;
+ dma_addr_t data_dma;
+ u32 data_len = sizeof(*drv_info);
+ int retval = 0;
+ ktime_t current_time;
+
+ drv_info = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
+ GFP_KERNEL);
+ if (!drv_info) {
+ retval = -1;
+ goto out;
+ }
+ mpimr_initialize_reply_sbuf_queues(mrioc);
+
+ drv_info->information_length = cpu_to_le32(data_len);
+ strscpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature));
+ strscpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name));
+ strscpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version));
+ strscpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name));
+ strscpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version));
+ strscpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE,
+ sizeof(drv_info->driver_release_date));
+ drv_info->driver_capabilities = 0;
+ memcpy((u8 *)&mrioc->driver_info, (u8 *)drv_info,
+ sizeof(mrioc->driver_info));
+
+ memset(&iocinit_req, 0, sizeof(iocinit_req));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "Issue IOCInit: Init command is in use\n");
+ mutex_unlock(&mrioc->init_cmds.mutex);
+ goto out;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ iocinit_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ iocinit_req.function = MPI3_FUNCTION_IOC_INIT;
+ iocinit_req.mpi_version.mpi3_version.dev = MPI3_VERSION_DEV;
+ iocinit_req.mpi_version.mpi3_version.unit = MPI3_VERSION_UNIT;
+ iocinit_req.mpi_version.mpi3_version.major = MPI3_VERSION_MAJOR;
+ iocinit_req.mpi_version.mpi3_version.minor = MPI3_VERSION_MINOR;
+ iocinit_req.who_init = MPI3_WHOINIT_HOST_DRIVER;
+ iocinit_req.reply_free_queue_depth = cpu_to_le16(mrioc->reply_free_qsz);
+ iocinit_req.reply_free_queue_address =
+ cpu_to_le64(mrioc->reply_free_q_dma);
+ iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSE_BUF_SZ);
+ iocinit_req.sense_buffer_free_queue_depth =
+ cpu_to_le16(mrioc->sense_buf_q_sz);
+ iocinit_req.sense_buffer_free_queue_address =
+ cpu_to_le64(mrioc->sense_buf_q_dma);
+ iocinit_req.driver_information_address = cpu_to_le64(data_dma);
+
+ current_time = ktime_get_real();
+ iocinit_req.time_stamp = cpu_to_le64(ktime_to_ms(current_time));
+
+ init_completion(&mrioc->init_cmds.done);
+ retval = mpi3mr_admin_request_post(mrioc, &iocinit_req,
+ sizeof(iocinit_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "Issue IOCInit: Admin Post failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->init_cmds.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_IOCINIT_TIMEOUT);
+ ioc_err(mrioc, "ioc_init timed out\n");
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc,
+ "Issue IOCInit: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
+ (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ mrioc->init_cmds.ioc_loginfo);
+ retval = -1;
+ goto out_unlock;
+ }
+
+ mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs;
+ writel(mrioc->reply_free_queue_host_index,
+ &mrioc->sysif_regs->reply_free_host_index);
+
+ mrioc->sbq_host_index = mrioc->num_sense_bufs;
+ writel(mrioc->sbq_host_index,
+ &mrioc->sysif_regs->sense_buffer_free_host_index);
+out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->init_cmds.mutex);
+
+out:
+ if (drv_info)
+ dma_free_coherent(&mrioc->pdev->dev, data_len, drv_info,
+ data_dma);
+
+ return retval;
+}
+
+/**
+ * mpi3mr_unmask_events - Unmask events in event mask bitmap
+ * @mrioc: Adapter instance reference
+ * @event: MPI event ID
+ *
+ * Un mask the specific event by resetting the event_mask
+ * bitmap.
+ *
+ * Return: 0 on success, non-zero on failures.
+ */
+static void mpi3mr_unmask_events(struct mpi3mr_ioc *mrioc, u16 event)
+{
+ u32 desired_event;
+ u8 word;
+
+ if (event >= 128)
+ return;
+
+ desired_event = (1 << (event % 32));
+ word = event / 32;
+
+ mrioc->event_masks[word] &= ~desired_event;
+}
+
+/**
+ * mpi3mr_issue_event_notification - Send event notification
+ * @mrioc: Adapter instance reference
+ *
+ * Issue event notification MPI request through admin queue and
+ * wait for the completion of it or time out.
+ *
+ * Return: 0 on success, non-zero on failures.
+ */
+static int mpi3mr_issue_event_notification(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3_event_notification_request evtnotify_req;
+ int retval = 0;
+ u8 i;
+
+ memset(&evtnotify_req, 0, sizeof(evtnotify_req));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "Issue EvtNotify: Init command is in use\n");
+ mutex_unlock(&mrioc->init_cmds.mutex);
+ goto out;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ evtnotify_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ evtnotify_req.function = MPI3_FUNCTION_EVENT_NOTIFICATION;
+ for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
+ evtnotify_req.event_masks[i] =
+ cpu_to_le32(mrioc->event_masks[i]);
+ init_completion(&mrioc->init_cmds.done);
+ retval = mpi3mr_admin_request_post(mrioc, &evtnotify_req,
+ sizeof(evtnotify_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "Issue EvtNotify: Admin Post failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->init_cmds.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "event notification timed out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT);
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc,
+ "Issue EvtNotify: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
+ (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ mrioc->init_cmds.ioc_loginfo);
+ retval = -1;
+ goto out_unlock;
+ }
+
+out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->init_cmds.mutex);
+out:
+ return retval;
+}
+
+/**
+ * mpi3mr_process_event_ack - Process event acknowledgment
+ * @mrioc: Adapter instance reference
+ * @event: MPI3 event ID
+ * @event_ctx: event context
+ *
+ * Send event acknowledgment through admin queue and wait for
+ * it to complete.
+ *
+ * Return: 0 on success, non-zero on failures.
+ */
+int mpi3mr_process_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
+ u32 event_ctx)
+{
+ struct mpi3_event_ack_request evtack_req;
+ int retval = 0;
+
+ memset(&evtack_req, 0, sizeof(evtack_req));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "Send EvtAck: Init command is in use\n");
+ mutex_unlock(&mrioc->init_cmds.mutex);
+ goto out;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ evtack_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ evtack_req.function = MPI3_FUNCTION_EVENT_ACK;
+ evtack_req.event = event;
+ evtack_req.event_context = cpu_to_le32(event_ctx);
+
+ init_completion(&mrioc->init_cmds.done);
+ retval = mpi3mr_admin_request_post(mrioc, &evtack_req,
+ sizeof(evtack_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "Send EvtAck: Admin Post failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->init_cmds.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "Issue EvtNotify: command timed out\n");
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
+ mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_EVTACK_TIMEOUT, 1);
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc,
+ "Send EvtAck: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
+ (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ mrioc->init_cmds.ioc_loginfo);
+ retval = -1;
+ goto out_unlock;
+ }
+
+out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->init_cmds.mutex);
+out:
+ return retval;
+}
+
+/**
+ * mpi3mr_alloc_chain_bufs - Allocate chain buffers
+ * @mrioc: Adapter instance reference
+ *
+ * Allocate chain buffers and set a bitmap to indicate free
+ * chain buffers. Chain buffers are used to pass the SGE
+ * information along with MPI3 SCSI IO requests for host I/O.
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc)
+{
+ int retval = 0;
+ u32 sz, i;
+ u16 num_chains;
+
+ if (mrioc->chain_sgl_list)
+ return retval;
+
+ num_chains = mrioc->max_host_ios / MPI3MR_CHAINBUF_FACTOR;
+
+ if (prot_mask & (SHOST_DIX_TYPE0_PROTECTION
+ | SHOST_DIX_TYPE1_PROTECTION
+ | SHOST_DIX_TYPE2_PROTECTION
+ | SHOST_DIX_TYPE3_PROTECTION))
+ num_chains += (num_chains / MPI3MR_CHAINBUFDIX_FACTOR);
+
+ mrioc->chain_buf_count = num_chains;
+ sz = sizeof(struct chain_element) * num_chains;
+ mrioc->chain_sgl_list = kzalloc(sz, GFP_KERNEL);
+ if (!mrioc->chain_sgl_list)
+ goto out_failed;
+
+ sz = MPI3MR_PAGE_SIZE_4K;
+ mrioc->chain_buf_pool = dma_pool_create("chain_buf pool",
+ &mrioc->pdev->dev, sz, 16, 0);
+ if (!mrioc->chain_buf_pool) {
+ ioc_err(mrioc, "chain buf pool: dma_pool_create failed\n");
+ goto out_failed;
+ }
+
+ for (i = 0; i < num_chains; i++) {
+ mrioc->chain_sgl_list[i].addr =
+ dma_pool_zalloc(mrioc->chain_buf_pool, GFP_KERNEL,
+ &mrioc->chain_sgl_list[i].dma_addr);
+
+ if (!mrioc->chain_sgl_list[i].addr)
+ goto out_failed;
+ }
+ mrioc->chain_bitmap = bitmap_zalloc(num_chains, GFP_KERNEL);
+ if (!mrioc->chain_bitmap)
+ goto out_failed;
+ return retval;
+out_failed:
+ retval = -1;
+ return retval;
+}
+
+/**
+ * mpi3mr_port_enable_complete - Mark port enable complete
+ * @mrioc: Adapter instance reference
+ * @drv_cmd: Internal command tracker
+ *
+ * Call back for asynchronous port enable request sets the
+ * driver command to indicate port enable request is complete.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_port_enable_complete(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd)
+{
+ drv_cmd->callback = NULL;
+ mrioc->scan_started = 0;
+ if (drv_cmd->state & MPI3MR_CMD_RESET)
+ mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
+ else
+ mrioc->scan_failed = drv_cmd->ioc_status;
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+}
+
+/**
+ * mpi3mr_issue_port_enable - Issue Port Enable
+ * @mrioc: Adapter instance reference
+ * @async: Flag to wait for completion or not
+ *
+ * Issue Port Enable MPI request through admin queue and if the
+ * async flag is not set wait for the completion of the port
+ * enable or time out.
+ *
+ * Return: 0 on success, non-zero on failures.
+ */
+int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async)
+{
+ struct mpi3_port_enable_request pe_req;
+ int retval = 0;
+ u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT;
+
+ memset(&pe_req, 0, sizeof(pe_req));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "Issue PortEnable: Init command is in use\n");
+ mutex_unlock(&mrioc->init_cmds.mutex);
+ goto out;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ if (async) {
+ mrioc->init_cmds.is_waiting = 0;
+ mrioc->init_cmds.callback = mpi3mr_port_enable_complete;
+ } else {
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ init_completion(&mrioc->init_cmds.done);
+ }
+ pe_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ pe_req.function = MPI3_FUNCTION_PORT_ENABLE;
+
+ retval = mpi3mr_admin_request_post(mrioc, &pe_req, sizeof(pe_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "Issue PortEnable: Admin Post failed\n");
+ goto out_unlock;
+ }
+ if (async) {
+ mutex_unlock(&mrioc->init_cmds.mutex);
+ goto out;
+ }
+
+ wait_for_completion_timeout(&mrioc->init_cmds.done, (pe_timeout * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "port enable timed out\n");
+ retval = -1;
+ mpi3mr_check_rh_fault_ioc(mrioc, MPI3MR_RESET_FROM_PE_TIMEOUT);
+ goto out_unlock;
+ }
+ mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds);
+
+out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->init_cmds.mutex);
+out:
+ return retval;
+}
+
+/* Protocol type to name mapper structure */
+static const struct {
+ u8 protocol;
+ char *name;
+} mpi3mr_protocols[] = {
+ { MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR, "Initiator" },
+ { MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET, "Target" },
+ { MPI3_IOCFACTS_PROTOCOL_NVME, "NVMe attachment" },
+};
+
+/* Capability to name mapper structure*/
+static const struct {
+ u32 capability;
+ char *name;
+} mpi3mr_capabilities[] = {
+ { MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE, "RAID" },
+ { MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED, "MultiPath" },
+};
+
+/**
+ * mpi3mr_print_ioc_info - Display controller information
+ * @mrioc: Adapter instance reference
+ *
+ * Display controller personalit, capability, supported
+ * protocols etc.
+ *
+ * Return: Nothing
+ */
+static void
+mpi3mr_print_ioc_info(struct mpi3mr_ioc *mrioc)
+{
+ int i = 0, bytes_written = 0;
+ char personality[16];
+ char protocol[50] = {0};
+ char capabilities[100] = {0};
+ struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver;
+
+ switch (mrioc->facts.personality) {
+ case MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA:
+ strncpy(personality, "Enhanced HBA", sizeof(personality));
+ break;
+ case MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR:
+ strncpy(personality, "RAID", sizeof(personality));
+ break;
+ default:
+ strncpy(personality, "Unknown", sizeof(personality));
+ break;
+ }
+
+ ioc_info(mrioc, "Running in %s Personality", personality);
+
+ ioc_info(mrioc, "FW version(%d.%d.%d.%d.%d.%d)\n",
+ fwver->gen_major, fwver->gen_minor, fwver->ph_major,
+ fwver->ph_minor, fwver->cust_id, fwver->build_num);
+
+ for (i = 0; i < ARRAY_SIZE(mpi3mr_protocols); i++) {
+ if (mrioc->facts.protocol_flags &
+ mpi3mr_protocols[i].protocol) {
+ bytes_written += scnprintf(protocol + bytes_written,
+ sizeof(protocol) - bytes_written, "%s%s",
+ bytes_written ? "," : "",
+ mpi3mr_protocols[i].name);
+ }
+ }
+
+ bytes_written = 0;
+ for (i = 0; i < ARRAY_SIZE(mpi3mr_capabilities); i++) {
+ if (mrioc->facts.protocol_flags &
+ mpi3mr_capabilities[i].capability) {
+ bytes_written += scnprintf(capabilities + bytes_written,
+ sizeof(capabilities) - bytes_written, "%s%s",
+ bytes_written ? "," : "",
+ mpi3mr_capabilities[i].name);
+ }
+ }
+
+ ioc_info(mrioc, "Protocol=(%s), Capabilities=(%s)\n",
+ protocol, capabilities);
+}
+
+/**
+ * mpi3mr_cleanup_resources - Free PCI resources
+ * @mrioc: Adapter instance reference
+ *
+ * Unmap PCI device memory and disable PCI device.
+ *
+ * Return: 0 on success and non-zero on failure.
+ */
+void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc)
+{
+ struct pci_dev *pdev = mrioc->pdev;
+
+ mpi3mr_cleanup_isr(mrioc);
+
+ if (mrioc->sysif_regs) {
+ iounmap((void __iomem *)mrioc->sysif_regs);
+ mrioc->sysif_regs = NULL;
+ }
+
+ if (pci_is_enabled(pdev)) {
+ if (mrioc->bars)
+ pci_release_selected_regions(pdev, mrioc->bars);
+ pci_disable_device(pdev);
+ }
+}
+
+/**
+ * mpi3mr_setup_resources - Enable PCI resources
+ * @mrioc: Adapter instance reference
+ *
+ * Enable PCI device memory, MSI-x registers and set DMA mask.
+ *
+ * Return: 0 on success and non-zero on failure.
+ */
+int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc)
+{
+ struct pci_dev *pdev = mrioc->pdev;
+ u32 memap_sz = 0;
+ int i, retval = 0, capb = 0;
+ u16 message_control;
+ u64 dma_mask = mrioc->dma_mask ? mrioc->dma_mask :
+ (((dma_get_required_mask(&pdev->dev) > DMA_BIT_MASK(32)) &&
+ (sizeof(dma_addr_t) > 4)) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32));
+
+ if (pci_enable_device_mem(pdev)) {
+ ioc_err(mrioc, "pci_enable_device_mem: failed\n");
+ retval = -ENODEV;
+ goto out_failed;
+ }
+
+ capb = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+ if (!capb) {
+ ioc_err(mrioc, "Unable to find MSI-X Capabilities\n");
+ retval = -ENODEV;
+ goto out_failed;
+ }
+ mrioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
+
+ if (pci_request_selected_regions(pdev, mrioc->bars,
+ mrioc->driver_name)) {
+ ioc_err(mrioc, "pci_request_selected_regions: failed\n");
+ retval = -ENODEV;
+ goto out_failed;
+ }
+
+ for (i = 0; (i < DEVICE_COUNT_RESOURCE); i++) {
+ if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
+ mrioc->sysif_regs_phys = pci_resource_start(pdev, i);
+ memap_sz = pci_resource_len(pdev, i);
+ mrioc->sysif_regs =
+ ioremap(mrioc->sysif_regs_phys, memap_sz);
+ break;
+ }
+ }
+
+ pci_set_master(pdev);
+
+ retval = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
+ if (retval) {
+ if (dma_mask != DMA_BIT_MASK(32)) {
+ ioc_warn(mrioc, "Setting 64 bit DMA mask failed\n");
+ dma_mask = DMA_BIT_MASK(32);
+ retval = dma_set_mask_and_coherent(&pdev->dev,
+ dma_mask);
+ }
+ if (retval) {
+ mrioc->dma_mask = 0;
+ ioc_err(mrioc, "Setting 32 bit DMA mask also failed\n");
+ goto out_failed;
+ }
+ }
+ mrioc->dma_mask = dma_mask;
+
+ if (!mrioc->sysif_regs) {
+ ioc_err(mrioc,
+ "Unable to map adapter memory or resource not found\n");
+ retval = -EINVAL;
+ goto out_failed;
+ }
+
+ pci_read_config_word(pdev, capb + 2, &message_control);
+ mrioc->msix_count = (message_control & 0x3FF) + 1;
+
+ pci_save_state(pdev);
+
+ pci_set_drvdata(pdev, mrioc->shost);
+
+ mpi3mr_ioc_disable_intr(mrioc);
+
+ ioc_info(mrioc, "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
+ (unsigned long long)mrioc->sysif_regs_phys,
+ mrioc->sysif_regs, memap_sz);
+ ioc_info(mrioc, "Number of MSI-X vectors found in capabilities: (%d)\n",
+ mrioc->msix_count);
+
+ if (!reset_devices && poll_queues > 0)
+ mrioc->requested_poll_qcount = min_t(int, poll_queues,
+ mrioc->msix_count - 2);
+ return retval;
+
+out_failed:
+ mpi3mr_cleanup_resources(mrioc);
+ return retval;
+}
+
+/**
+ * mpi3mr_enable_events - Enable required events
+ * @mrioc: Adapter instance reference
+ *
+ * This routine unmasks the events required by the driver by
+ * sennding appropriate event mask bitmapt through an event
+ * notification request.
+ *
+ * Return: 0 on success and non-zero on failure.
+ */
+static int mpi3mr_enable_events(struct mpi3mr_ioc *mrioc)
+{
+ int retval = 0;
+ u32 i;
+
+ for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
+ mrioc->event_masks[i] = -1;
+
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_ADDED);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_ADDED);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_ENUMERATION);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_PREPARE_FOR_RESET);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE);
+
+ retval = mpi3mr_issue_event_notification(mrioc);
+ if (retval)
+ ioc_err(mrioc, "failed to issue event notification %d\n",
+ retval);
+ return retval;
+}
+
+/**
+ * mpi3mr_init_ioc - Initialize the controller
+ * @mrioc: Adapter instance reference
+ *
+ * This the controller initialization routine, executed either
+ * after soft reset or from pci probe callback.
+ * Setup the required resources, memory map the controller
+ * registers, create admin and operational reply queue pairs,
+ * allocate required memory for reply pool, sense buffer pool,
+ * issue IOC init request to the firmware, unmask the events and
+ * issue port enable to discover SAS/SATA/NVMe devies and RAID
+ * volumes.
+ *
+ * Return: 0 on success and non-zero on failure.
+ */
+int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc)
+{
+ int retval = 0;
+ u8 retry = 0;
+ struct mpi3_ioc_facts_data facts_data;
+ u32 sz;
+
+retry_init:
+ retval = mpi3mr_bring_ioc_ready(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Failed to bring ioc ready: error %d\n",
+ retval);
+ goto out_failed_noretry;
+ }
+
+ retval = mpi3mr_setup_isr(mrioc, 1);
+ if (retval) {
+ ioc_err(mrioc, "Failed to setup ISR error %d\n",
+ retval);
+ goto out_failed_noretry;
+ }
+
+ retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
+ if (retval) {
+ ioc_err(mrioc, "Failed to Issue IOC Facts %d\n",
+ retval);
+ goto out_failed;
+ }
+
+ mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD;
+
+ mrioc->num_io_throttle_group = mrioc->facts.max_io_throttle_group;
+ atomic_set(&mrioc->pend_large_data_sz, 0);
+
+ if (reset_devices)
+ mrioc->max_host_ios = min_t(int, mrioc->max_host_ios,
+ MPI3MR_HOST_IOS_KDUMP);
+
+ if (!(mrioc->facts.ioc_capabilities &
+ MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED)) {
+ mrioc->sas_transport_enabled = 1;
+ mrioc->scsi_device_channel = 1;
+ mrioc->shost->max_channel = 1;
+ mrioc->shost->transportt = mpi3mr_transport_template;
+ }
+
+ mrioc->reply_sz = mrioc->facts.reply_sz;
+
+ retval = mpi3mr_check_reset_dma_mask(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Resetting dma mask failed %d\n",
+ retval);
+ goto out_failed_noretry;
+ }
+
+ mpi3mr_print_ioc_info(mrioc);
+
+ if (!mrioc->cfg_page) {
+ dprint_init(mrioc, "allocating config page buffers\n");
+ mrioc->cfg_page_sz = MPI3MR_DEFAULT_CFG_PAGE_SZ;
+ mrioc->cfg_page = dma_alloc_coherent(&mrioc->pdev->dev,
+ mrioc->cfg_page_sz, &mrioc->cfg_page_dma, GFP_KERNEL);
+ if (!mrioc->cfg_page) {
+ retval = -1;
+ goto out_failed_noretry;
+ }
+ }
+
+ if (!mrioc->init_cmds.reply) {
+ retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
+ if (retval) {
+ ioc_err(mrioc,
+ "%s :Failed to allocated reply sense buffers %d\n",
+ __func__, retval);
+ goto out_failed_noretry;
+ }
+ }
+
+ if (!mrioc->chain_sgl_list) {
+ retval = mpi3mr_alloc_chain_bufs(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
+ retval);
+ goto out_failed_noretry;
+ }
+ }
+
+ retval = mpi3mr_issue_iocinit(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Failed to Issue IOC Init %d\n",
+ retval);
+ goto out_failed;
+ }
+
+ retval = mpi3mr_print_pkg_ver(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "failed to get package version\n");
+ goto out_failed;
+ }
+
+ retval = mpi3mr_setup_isr(mrioc, 0);
+ if (retval) {
+ ioc_err(mrioc, "Failed to re-setup ISR, error %d\n",
+ retval);
+ goto out_failed_noretry;
+ }
+
+ retval = mpi3mr_create_op_queues(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Failed to create OpQueues error %d\n",
+ retval);
+ goto out_failed;
+ }
+
+ if (!mrioc->pel_seqnum_virt) {
+ dprint_init(mrioc, "allocating memory for pel_seqnum_virt\n");
+ mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq);
+ mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev,
+ mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma,
+ GFP_KERNEL);
+ if (!mrioc->pel_seqnum_virt) {
+ retval = -ENOMEM;
+ goto out_failed_noretry;
+ }
+ }
+
+ if (!mrioc->throttle_groups && mrioc->num_io_throttle_group) {
+ dprint_init(mrioc, "allocating memory for throttle groups\n");
+ sz = sizeof(struct mpi3mr_throttle_group_info);
+ mrioc->throttle_groups = kcalloc(mrioc->num_io_throttle_group, sz, GFP_KERNEL);
+ if (!mrioc->throttle_groups) {
+ retval = -1;
+ goto out_failed_noretry;
+ }
+ }
+
+ retval = mpi3mr_enable_events(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "failed to enable events %d\n",
+ retval);
+ goto out_failed;
+ }
+
+ ioc_info(mrioc, "controller initialization completed successfully\n");
+ return retval;
+out_failed:
+ if (retry < 2) {
+ retry++;
+ ioc_warn(mrioc, "retrying controller initialization, retry_count:%d\n",
+ retry);
+ mpi3mr_memset_buffers(mrioc);
+ goto retry_init;
+ }
+ retval = -1;
+out_failed_noretry:
+ ioc_err(mrioc, "controller initialization failed\n");
+ mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ MPI3MR_RESET_FROM_CTLR_CLEANUP);
+ mrioc->unrecoverable = 1;
+ return retval;
+}
+
+/**
+ * mpi3mr_reinit_ioc - Re-Initialize the controller
+ * @mrioc: Adapter instance reference
+ * @is_resume: Called from resume or reset path
+ *
+ * This the controller re-initialization routine, executed from
+ * the soft reset handler or resume callback. Creates
+ * operational reply queue pairs, allocate required memory for
+ * reply pool, sense buffer pool, issue IOC init request to the
+ * firmware, unmask the events and issue port enable to discover
+ * SAS/SATA/NVMe devices and RAID volumes.
+ *
+ * Return: 0 on success and non-zero on failure.
+ */
+int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume)
+{
+ int retval = 0;
+ u8 retry = 0;
+ struct mpi3_ioc_facts_data facts_data;
+ u32 pe_timeout, ioc_status;
+
+retry_init:
+ pe_timeout =
+ (MPI3MR_PORTENABLE_TIMEOUT / MPI3MR_PORTENABLE_POLL_INTERVAL);
+
+ dprint_reset(mrioc, "bringing up the controller to ready state\n");
+ retval = mpi3mr_bring_ioc_ready(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "failed to bring to ready state\n");
+ goto out_failed_noretry;
+ }
+
+ if (is_resume) {
+ dprint_reset(mrioc, "setting up single ISR\n");
+ retval = mpi3mr_setup_isr(mrioc, 1);
+ if (retval) {
+ ioc_err(mrioc, "failed to setup ISR\n");
+ goto out_failed_noretry;
+ }
+ } else
+ mpi3mr_ioc_enable_intr(mrioc);
+
+ dprint_reset(mrioc, "getting ioc_facts\n");
+ retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
+ if (retval) {
+ ioc_err(mrioc, "failed to get ioc_facts\n");
+ goto out_failed;
+ }
+
+ dprint_reset(mrioc, "validating ioc_facts\n");
+ retval = mpi3mr_revalidate_factsdata(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "failed to revalidate ioc_facts data\n");
+ goto out_failed_noretry;
+ }
+
+ mpi3mr_print_ioc_info(mrioc);
+
+ dprint_reset(mrioc, "sending ioc_init\n");
+ retval = mpi3mr_issue_iocinit(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "failed to send ioc_init\n");
+ goto out_failed;
+ }
+
+ dprint_reset(mrioc, "getting package version\n");
+ retval = mpi3mr_print_pkg_ver(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "failed to get package version\n");
+ goto out_failed;
+ }
+
+ if (is_resume) {
+ dprint_reset(mrioc, "setting up multiple ISR\n");
+ retval = mpi3mr_setup_isr(mrioc, 0);
+ if (retval) {
+ ioc_err(mrioc, "failed to re-setup ISR\n");
+ goto out_failed_noretry;
+ }
+ }
+
+ dprint_reset(mrioc, "creating operational queue pairs\n");
+ retval = mpi3mr_create_op_queues(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "failed to create operational queue pairs\n");
+ goto out_failed;
+ }
+
+ if (!mrioc->pel_seqnum_virt) {
+ dprint_reset(mrioc, "allocating memory for pel_seqnum_virt\n");
+ mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq);
+ mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev,
+ mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma,
+ GFP_KERNEL);
+ if (!mrioc->pel_seqnum_virt) {
+ retval = -ENOMEM;
+ goto out_failed_noretry;
+ }
+ }
+
+ if (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q) {
+ ioc_err(mrioc,
+ "cannot create minimum number of operational queues expected:%d created:%d\n",
+ mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q);
+ retval = -1;
+ goto out_failed_noretry;
+ }
+
+ dprint_reset(mrioc, "enabling events\n");
+ retval = mpi3mr_enable_events(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "failed to enable events\n");
+ goto out_failed;
+ }
+
+ mrioc->device_refresh_on = 1;
+ mpi3mr_add_event_wait_for_device_refresh(mrioc);
+
+ ioc_info(mrioc, "sending port enable\n");
+ retval = mpi3mr_issue_port_enable(mrioc, 1);
+ if (retval) {
+ ioc_err(mrioc, "failed to issue port enable\n");
+ goto out_failed;
+ }
+ do {
+ ssleep(MPI3MR_PORTENABLE_POLL_INTERVAL);
+ if (mrioc->init_cmds.state == MPI3MR_CMD_NOTUSED)
+ break;
+ if (!pci_device_is_present(mrioc->pdev))
+ mrioc->unrecoverable = 1;
+ if (mrioc->unrecoverable) {
+ retval = -1;
+ goto out_failed_noretry;
+ }
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
+ (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
+ mpi3mr_print_fault_info(mrioc);
+ mrioc->init_cmds.is_waiting = 0;
+ mrioc->init_cmds.callback = NULL;
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ goto out_failed;
+ }
+ } while (--pe_timeout);
+
+ if (!pe_timeout) {
+ ioc_err(mrioc, "port enable timed out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_PE_TIMEOUT);
+ mrioc->init_cmds.is_waiting = 0;
+ mrioc->init_cmds.callback = NULL;
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ goto out_failed;
+ } else if (mrioc->scan_failed) {
+ ioc_err(mrioc,
+ "port enable failed with status=0x%04x\n",
+ mrioc->scan_failed);
+ } else
+ ioc_info(mrioc, "port enable completed successfully\n");
+
+ ioc_info(mrioc, "controller %s completed successfully\n",
+ (is_resume)?"resume":"re-initialization");
+ return retval;
+out_failed:
+ if (retry < 2) {
+ retry++;
+ ioc_warn(mrioc, "retrying controller %s, retry_count:%d\n",
+ (is_resume)?"resume":"re-initialization", retry);
+ mpi3mr_memset_buffers(mrioc);
+ goto retry_init;
+ }
+ retval = -1;
+out_failed_noretry:
+ ioc_err(mrioc, "controller %s is failed\n",
+ (is_resume)?"resume":"re-initialization");
+ mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ MPI3MR_RESET_FROM_CTLR_CLEANUP);
+ mrioc->unrecoverable = 1;
+ return retval;
+}
+
+/**
+ * mpi3mr_memset_op_reply_q_buffers - memset the operational reply queue's
+ * segments
+ * @mrioc: Adapter instance reference
+ * @qidx: Operational reply queue index
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_memset_op_reply_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
+{
+ struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
+ struct segments *segments;
+ int i, size;
+
+ if (!op_reply_q->q_segments)
+ return;
+
+ size = op_reply_q->segment_qd * mrioc->op_reply_desc_sz;
+ segments = op_reply_q->q_segments;
+ for (i = 0; i < op_reply_q->num_segments; i++)
+ memset(segments[i].segment, 0, size);
+}
+
+/**
+ * mpi3mr_memset_op_req_q_buffers - memset the operational request queue's
+ * segments
+ * @mrioc: Adapter instance reference
+ * @qidx: Operational request queue index
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
+{
+ struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
+ struct segments *segments;
+ int i, size;
+
+ if (!op_req_q->q_segments)
+ return;
+
+ size = op_req_q->segment_qd * mrioc->facts.op_req_sz;
+ segments = op_req_q->q_segments;
+ for (i = 0; i < op_req_q->num_segments; i++)
+ memset(segments[i].segment, 0, size);
+}
+
+/**
+ * mpi3mr_memset_buffers - memset memory for a controller
+ * @mrioc: Adapter instance reference
+ *
+ * clear all the memory allocated for a controller, typically
+ * called post reset to reuse the memory allocated during the
+ * controller init.
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
+{
+ u16 i;
+ struct mpi3mr_throttle_group_info *tg;
+
+ mrioc->change_count = 0;
+ mrioc->active_poll_qcount = 0;
+ mrioc->default_qcount = 0;
+ if (mrioc->admin_req_base)
+ memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz);
+ if (mrioc->admin_reply_base)
+ memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz);
+ atomic_set(&mrioc->admin_reply_q_in_use, 0);
+
+ if (mrioc->init_cmds.reply) {
+ memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply));
+ memset(mrioc->bsg_cmds.reply, 0,
+ sizeof(*mrioc->bsg_cmds.reply));
+ memset(mrioc->host_tm_cmds.reply, 0,
+ sizeof(*mrioc->host_tm_cmds.reply));
+ memset(mrioc->pel_cmds.reply, 0,
+ sizeof(*mrioc->pel_cmds.reply));
+ memset(mrioc->pel_abort_cmd.reply, 0,
+ sizeof(*mrioc->pel_abort_cmd.reply));
+ memset(mrioc->transport_cmds.reply, 0,
+ sizeof(*mrioc->transport_cmds.reply));
+ for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
+ memset(mrioc->dev_rmhs_cmds[i].reply, 0,
+ sizeof(*mrioc->dev_rmhs_cmds[i].reply));
+ for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++)
+ memset(mrioc->evtack_cmds[i].reply, 0,
+ sizeof(*mrioc->evtack_cmds[i].reply));
+ bitmap_clear(mrioc->removepend_bitmap, 0,
+ mrioc->dev_handle_bitmap_bits);
+ bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD);
+ bitmap_clear(mrioc->evtack_cmds_bitmap, 0,
+ MPI3MR_NUM_EVTACKCMD);
+ }
+
+ for (i = 0; i < mrioc->num_queues; i++) {
+ mrioc->op_reply_qinfo[i].qid = 0;
+ mrioc->op_reply_qinfo[i].ci = 0;
+ mrioc->op_reply_qinfo[i].num_replies = 0;
+ mrioc->op_reply_qinfo[i].ephase = 0;
+ atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0);
+ atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0);
+ mpi3mr_memset_op_reply_q_buffers(mrioc, i);
+
+ mrioc->req_qinfo[i].ci = 0;
+ mrioc->req_qinfo[i].pi = 0;
+ mrioc->req_qinfo[i].num_requests = 0;
+ mrioc->req_qinfo[i].qid = 0;
+ mrioc->req_qinfo[i].reply_qid = 0;
+ spin_lock_init(&mrioc->req_qinfo[i].q_lock);
+ mpi3mr_memset_op_req_q_buffers(mrioc, i);
+ }
+
+ atomic_set(&mrioc->pend_large_data_sz, 0);
+ if (mrioc->throttle_groups) {
+ tg = mrioc->throttle_groups;
+ for (i = 0; i < mrioc->num_io_throttle_group; i++, tg++) {
+ tg->id = 0;
+ tg->fw_qd = 0;
+ tg->modified_qd = 0;
+ tg->io_divert = 0;
+ tg->need_qd_reduction = 0;
+ tg->high = 0;
+ tg->low = 0;
+ tg->qd_reduction = 0;
+ atomic_set(&tg->pend_large_data_sz, 0);
+ }
+ }
+}
+
+/**
+ * mpi3mr_free_mem - Free memory allocated for a controller
+ * @mrioc: Adapter instance reference
+ *
+ * Free all the memory allocated for a controller.
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
+{
+ u16 i;
+ struct mpi3mr_intr_info *intr_info;
+
+ mpi3mr_free_enclosure_list(mrioc);
+
+ if (mrioc->sense_buf_pool) {
+ if (mrioc->sense_buf)
+ dma_pool_free(mrioc->sense_buf_pool, mrioc->sense_buf,
+ mrioc->sense_buf_dma);
+ dma_pool_destroy(mrioc->sense_buf_pool);
+ mrioc->sense_buf = NULL;
+ mrioc->sense_buf_pool = NULL;
+ }
+ if (mrioc->sense_buf_q_pool) {
+ if (mrioc->sense_buf_q)
+ dma_pool_free(mrioc->sense_buf_q_pool,
+ mrioc->sense_buf_q, mrioc->sense_buf_q_dma);
+ dma_pool_destroy(mrioc->sense_buf_q_pool);
+ mrioc->sense_buf_q = NULL;
+ mrioc->sense_buf_q_pool = NULL;
+ }
+
+ if (mrioc->reply_buf_pool) {
+ if (mrioc->reply_buf)
+ dma_pool_free(mrioc->reply_buf_pool, mrioc->reply_buf,
+ mrioc->reply_buf_dma);
+ dma_pool_destroy(mrioc->reply_buf_pool);
+ mrioc->reply_buf = NULL;
+ mrioc->reply_buf_pool = NULL;
+ }
+ if (mrioc->reply_free_q_pool) {
+ if (mrioc->reply_free_q)
+ dma_pool_free(mrioc->reply_free_q_pool,
+ mrioc->reply_free_q, mrioc->reply_free_q_dma);
+ dma_pool_destroy(mrioc->reply_free_q_pool);
+ mrioc->reply_free_q = NULL;
+ mrioc->reply_free_q_pool = NULL;
+ }
+
+ for (i = 0; i < mrioc->num_op_req_q; i++)
+ mpi3mr_free_op_req_q_segments(mrioc, i);
+
+ for (i = 0; i < mrioc->num_op_reply_q; i++)
+ mpi3mr_free_op_reply_q_segments(mrioc, i);
+
+ for (i = 0; i < mrioc->intr_info_count; i++) {
+ intr_info = mrioc->intr_info + i;
+ intr_info->op_reply_q = NULL;
+ }
+
+ kfree(mrioc->req_qinfo);
+ mrioc->req_qinfo = NULL;
+ mrioc->num_op_req_q = 0;
+
+ kfree(mrioc->op_reply_qinfo);
+ mrioc->op_reply_qinfo = NULL;
+ mrioc->num_op_reply_q = 0;
+
+ kfree(mrioc->init_cmds.reply);
+ mrioc->init_cmds.reply = NULL;
+
+ kfree(mrioc->bsg_cmds.reply);
+ mrioc->bsg_cmds.reply = NULL;
+
+ kfree(mrioc->host_tm_cmds.reply);
+ mrioc->host_tm_cmds.reply = NULL;
+
+ kfree(mrioc->pel_cmds.reply);
+ mrioc->pel_cmds.reply = NULL;
+
+ kfree(mrioc->pel_abort_cmd.reply);
+ mrioc->pel_abort_cmd.reply = NULL;
+
+ for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
+ kfree(mrioc->evtack_cmds[i].reply);
+ mrioc->evtack_cmds[i].reply = NULL;
+ }
+
+ bitmap_free(mrioc->removepend_bitmap);
+ mrioc->removepend_bitmap = NULL;
+
+ bitmap_free(mrioc->devrem_bitmap);
+ mrioc->devrem_bitmap = NULL;
+
+ bitmap_free(mrioc->evtack_cmds_bitmap);
+ mrioc->evtack_cmds_bitmap = NULL;
+
+ bitmap_free(mrioc->chain_bitmap);
+ mrioc->chain_bitmap = NULL;
+
+ kfree(mrioc->transport_cmds.reply);
+ mrioc->transport_cmds.reply = NULL;
+
+ for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
+ kfree(mrioc->dev_rmhs_cmds[i].reply);
+ mrioc->dev_rmhs_cmds[i].reply = NULL;
+ }
+
+ if (mrioc->chain_buf_pool) {
+ for (i = 0; i < mrioc->chain_buf_count; i++) {
+ if (mrioc->chain_sgl_list[i].addr) {
+ dma_pool_free(mrioc->chain_buf_pool,
+ mrioc->chain_sgl_list[i].addr,
+ mrioc->chain_sgl_list[i].dma_addr);
+ mrioc->chain_sgl_list[i].addr = NULL;
+ }
+ }
+ dma_pool_destroy(mrioc->chain_buf_pool);
+ mrioc->chain_buf_pool = NULL;
+ }
+
+ kfree(mrioc->chain_sgl_list);
+ mrioc->chain_sgl_list = NULL;
+
+ if (mrioc->admin_reply_base) {
+ dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
+ mrioc->admin_reply_base, mrioc->admin_reply_dma);
+ mrioc->admin_reply_base = NULL;
+ }
+ if (mrioc->admin_req_base) {
+ dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
+ mrioc->admin_req_base, mrioc->admin_req_dma);
+ mrioc->admin_req_base = NULL;
+ }
+ if (mrioc->cfg_page) {
+ dma_free_coherent(&mrioc->pdev->dev, mrioc->cfg_page_sz,
+ mrioc->cfg_page, mrioc->cfg_page_dma);
+ mrioc->cfg_page = NULL;
+ }
+ if (mrioc->pel_seqnum_virt) {
+ dma_free_coherent(&mrioc->pdev->dev, mrioc->pel_seqnum_sz,
+ mrioc->pel_seqnum_virt, mrioc->pel_seqnum_dma);
+ mrioc->pel_seqnum_virt = NULL;
+ }
+
+ kfree(mrioc->throttle_groups);
+ mrioc->throttle_groups = NULL;
+
+ kfree(mrioc->logdata_buf);
+ mrioc->logdata_buf = NULL;
+
+}
+
+/**
+ * mpi3mr_issue_ioc_shutdown - shutdown controller
+ * @mrioc: Adapter instance reference
+ *
+ * Send shutodwn notification to the controller and wait for the
+ * shutdown_timeout for it to be completed.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
+{
+ u32 ioc_config, ioc_status;
+ u8 retval = 1;
+ u32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10;
+
+ ioc_info(mrioc, "Issuing shutdown Notification\n");
+ if (mrioc->unrecoverable) {
+ ioc_warn(mrioc,
+ "IOC is unrecoverable shutdown is not issued\n");
+ return;
+ }
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
+ == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) {
+ ioc_info(mrioc, "shutdown already in progress\n");
+ return;
+ }
+
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL;
+ ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ;
+
+ writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
+
+ if (mrioc->facts.shutdown_timeout)
+ timeout = mrioc->facts.shutdown_timeout * 10;
+
+ do {
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
+ == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) {
+ retval = 0;
+ break;
+ }
+ msleep(100);
+ } while (--timeout);
+
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+
+ if (retval) {
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
+ == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS)
+ ioc_warn(mrioc,
+ "shutdown still in progress after timeout\n");
+ }
+
+ ioc_info(mrioc,
+ "Base IOC Sts/Config after %s shutdown is (0x%x)/(0x%x)\n",
+ (!retval) ? "successful" : "failed", ioc_status,
+ ioc_config);
+}
+
+/**
+ * mpi3mr_cleanup_ioc - Cleanup controller
+ * @mrioc: Adapter instance reference
+ *
+ * controller cleanup handler, Message unit reset or soft reset
+ * and shutdown notification is issued to the controller.
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc)
+{
+ enum mpi3mr_iocstate ioc_state;
+
+ dprint_exit(mrioc, "cleaning up the controller\n");
+ mpi3mr_ioc_disable_intr(mrioc);
+
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+
+ if ((!mrioc->unrecoverable) && (!mrioc->reset_in_progress) &&
+ (ioc_state == MRIOC_STATE_READY)) {
+ if (mpi3mr_issue_and_process_mur(mrioc,
+ MPI3MR_RESET_FROM_CTLR_CLEANUP))
+ mpi3mr_issue_reset(mrioc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
+ MPI3MR_RESET_FROM_MUR_FAILURE);
+ mpi3mr_issue_ioc_shutdown(mrioc);
+ }
+ dprint_exit(mrioc, "controller cleanup completed\n");
+}
+
+/**
+ * mpi3mr_drv_cmd_comp_reset - Flush a internal driver command
+ * @mrioc: Adapter instance reference
+ * @cmdptr: Internal command tracker
+ *
+ * Complete an internal driver commands with state indicating it
+ * is completed due to reset.
+ *
+ * Return: Nothing.
+ */
+static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *cmdptr)
+{
+ if (cmdptr->state & MPI3MR_CMD_PENDING) {
+ cmdptr->state |= MPI3MR_CMD_RESET;
+ cmdptr->state &= ~MPI3MR_CMD_PENDING;
+ if (cmdptr->is_waiting) {
+ complete(&cmdptr->done);
+ cmdptr->is_waiting = 0;
+ } else if (cmdptr->callback)
+ cmdptr->callback(mrioc, cmdptr);
+ }
+}
+
+/**
+ * mpi3mr_flush_drv_cmds - Flush internaldriver commands
+ * @mrioc: Adapter instance reference
+ *
+ * Flush all internal driver commands post reset
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3mr_drv_cmd *cmdptr;
+ u8 i;
+
+ cmdptr = &mrioc->init_cmds;
+ mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
+
+ cmdptr = &mrioc->cfg_cmds;
+ mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
+
+ cmdptr = &mrioc->bsg_cmds;
+ mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
+ cmdptr = &mrioc->host_tm_cmds;
+ mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
+
+ for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
+ cmdptr = &mrioc->dev_rmhs_cmds[i];
+ mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
+ }
+
+ for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
+ cmdptr = &mrioc->evtack_cmds[i];
+ mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
+ }
+
+ cmdptr = &mrioc->pel_cmds;
+ mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
+
+ cmdptr = &mrioc->pel_abort_cmd;
+ mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
+
+ cmdptr = &mrioc->transport_cmds;
+ mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
+}
+
+/**
+ * mpi3mr_pel_wait_post - Issue PEL Wait
+ * @mrioc: Adapter instance reference
+ * @drv_cmd: Internal command tracker
+ *
+ * Issue PEL Wait MPI request through admin queue and return.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_pel_wait_post(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd)
+{
+ struct mpi3_pel_req_action_wait pel_wait;
+
+ mrioc->pel_abort_requested = false;
+
+ memset(&pel_wait, 0, sizeof(pel_wait));
+ drv_cmd->state = MPI3MR_CMD_PENDING;
+ drv_cmd->is_waiting = 0;
+ drv_cmd->callback = mpi3mr_pel_wait_complete;
+ drv_cmd->ioc_status = 0;
+ drv_cmd->ioc_loginfo = 0;
+ pel_wait.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
+ pel_wait.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
+ pel_wait.action = MPI3_PEL_ACTION_WAIT;
+ pel_wait.starting_sequence_number = cpu_to_le32(mrioc->pel_newest_seqnum);
+ pel_wait.locale = cpu_to_le16(mrioc->pel_locale);
+ pel_wait.class = cpu_to_le16(mrioc->pel_class);
+ pel_wait.wait_time = MPI3_PEL_WAITTIME_INFINITE_WAIT;
+ dprint_bsg_info(mrioc, "sending pel_wait seqnum(%d), class(%d), locale(0x%08x)\n",
+ mrioc->pel_newest_seqnum, mrioc->pel_class, mrioc->pel_locale);
+
+ if (mpi3mr_admin_request_post(mrioc, &pel_wait, sizeof(pel_wait), 0)) {
+ dprint_bsg_err(mrioc,
+ "Issuing PELWait: Admin post failed\n");
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ drv_cmd->callback = NULL;
+ drv_cmd->retry_count = 0;
+ mrioc->pel_enabled = false;
+ }
+}
+
+/**
+ * mpi3mr_pel_get_seqnum_post - Issue PEL Get Sequence number
+ * @mrioc: Adapter instance reference
+ * @drv_cmd: Internal command tracker
+ *
+ * Issue PEL get sequence number MPI request through admin queue
+ * and return.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd)
+{
+ struct mpi3_pel_req_action_get_sequence_numbers pel_getseq_req;
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+ int retval = 0;
+
+ memset(&pel_getseq_req, 0, sizeof(pel_getseq_req));
+ mrioc->pel_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->pel_cmds.is_waiting = 0;
+ mrioc->pel_cmds.ioc_status = 0;
+ mrioc->pel_cmds.ioc_loginfo = 0;
+ mrioc->pel_cmds.callback = mpi3mr_pel_get_seqnum_complete;
+ pel_getseq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
+ pel_getseq_req.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
+ pel_getseq_req.action = MPI3_PEL_ACTION_GET_SEQNUM;
+ mpi3mr_add_sg_single(&pel_getseq_req.sgl, sgl_flags,
+ mrioc->pel_seqnum_sz, mrioc->pel_seqnum_dma);
+
+ retval = mpi3mr_admin_request_post(mrioc, &pel_getseq_req,
+ sizeof(pel_getseq_req), 0);
+ if (retval) {
+ if (drv_cmd) {
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ drv_cmd->callback = NULL;
+ drv_cmd->retry_count = 0;
+ }
+ mrioc->pel_enabled = false;
+ }
+
+ return retval;
+}
+
+/**
+ * mpi3mr_pel_wait_complete - PELWait Completion callback
+ * @mrioc: Adapter instance reference
+ * @drv_cmd: Internal command tracker
+ *
+ * This is a callback handler for the PELWait request and
+ * firmware completes a PELWait request when it is aborted or a
+ * new PEL entry is available. This sends AEN to the application
+ * and if the PELwait completion is not due to PELAbort then
+ * this will send a request for new PEL Sequence number
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd)
+{
+ struct mpi3_pel_reply *pel_reply = NULL;
+ u16 ioc_status, pe_log_status;
+ bool do_retry = false;
+
+ if (drv_cmd->state & MPI3MR_CMD_RESET)
+ goto cleanup_drv_cmd;
+
+ ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "%s: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
+ __func__, ioc_status, drv_cmd->ioc_loginfo);
+ dprint_bsg_err(mrioc,
+ "pel_wait: failed with ioc_status(0x%04x), log_info(0x%08x)\n",
+ ioc_status, drv_cmd->ioc_loginfo);
+ do_retry = true;
+ }
+
+ if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
+ pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply;
+
+ if (!pel_reply) {
+ dprint_bsg_err(mrioc,
+ "pel_wait: failed due to no reply\n");
+ goto out_failed;
+ }
+
+ pe_log_status = le16_to_cpu(pel_reply->pe_log_status);
+ if ((pe_log_status != MPI3_PEL_STATUS_SUCCESS) &&
+ (pe_log_status != MPI3_PEL_STATUS_ABORTED)) {
+ ioc_err(mrioc, "%s: Failed pe_log_status(0x%04x)\n",
+ __func__, pe_log_status);
+ dprint_bsg_err(mrioc,
+ "pel_wait: failed due to pel_log_status(0x%04x)\n",
+ pe_log_status);
+ do_retry = true;
+ }
+
+ if (do_retry) {
+ if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) {
+ drv_cmd->retry_count++;
+ dprint_bsg_err(mrioc, "pel_wait: retrying(%d)\n",
+ drv_cmd->retry_count);
+ mpi3mr_pel_wait_post(mrioc, drv_cmd);
+ return;
+ }
+ dprint_bsg_err(mrioc,
+ "pel_wait: failed after all retries(%d)\n",
+ drv_cmd->retry_count);
+ goto out_failed;
+ }
+ atomic64_inc(&event_counter);
+ if (!mrioc->pel_abort_requested) {
+ mrioc->pel_cmds.retry_count = 0;
+ mpi3mr_pel_get_seqnum_post(mrioc, &mrioc->pel_cmds);
+ }
+
+ return;
+out_failed:
+ mrioc->pel_enabled = false;
+cleanup_drv_cmd:
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ drv_cmd->callback = NULL;
+ drv_cmd->retry_count = 0;
+}
+
+/**
+ * mpi3mr_pel_get_seqnum_complete - PELGetSeqNum Completion callback
+ * @mrioc: Adapter instance reference
+ * @drv_cmd: Internal command tracker
+ *
+ * This is a callback handler for the PEL get sequence number
+ * request and a new PEL wait request will be issued to the
+ * firmware from this
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd)
+{
+ struct mpi3_pel_reply *pel_reply = NULL;
+ struct mpi3_pel_seq *pel_seqnum_virt;
+ u16 ioc_status;
+ bool do_retry = false;
+
+ pel_seqnum_virt = (struct mpi3_pel_seq *)mrioc->pel_seqnum_virt;
+
+ if (drv_cmd->state & MPI3MR_CMD_RESET)
+ goto cleanup_drv_cmd;
+
+ ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ dprint_bsg_err(mrioc,
+ "pel_get_seqnum: failed with ioc_status(0x%04x), log_info(0x%08x)\n",
+ ioc_status, drv_cmd->ioc_loginfo);
+ do_retry = true;
+ }
+
+ if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
+ pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply;
+ if (!pel_reply) {
+ dprint_bsg_err(mrioc,
+ "pel_get_seqnum: failed due to no reply\n");
+ goto out_failed;
+ }
+
+ if (le16_to_cpu(pel_reply->pe_log_status) != MPI3_PEL_STATUS_SUCCESS) {
+ dprint_bsg_err(mrioc,
+ "pel_get_seqnum: failed due to pel_log_status(0x%04x)\n",
+ le16_to_cpu(pel_reply->pe_log_status));
+ do_retry = true;
+ }
+
+ if (do_retry) {
+ if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) {
+ drv_cmd->retry_count++;
+ dprint_bsg_err(mrioc,
+ "pel_get_seqnum: retrying(%d)\n",
+ drv_cmd->retry_count);
+ mpi3mr_pel_get_seqnum_post(mrioc, drv_cmd);
+ return;
+ }
+
+ dprint_bsg_err(mrioc,
+ "pel_get_seqnum: failed after all retries(%d)\n",
+ drv_cmd->retry_count);
+ goto out_failed;
+ }
+ mrioc->pel_newest_seqnum = le32_to_cpu(pel_seqnum_virt->newest) + 1;
+ drv_cmd->retry_count = 0;
+ mpi3mr_pel_wait_post(mrioc, drv_cmd);
+
+ return;
+out_failed:
+ mrioc->pel_enabled = false;
+cleanup_drv_cmd:
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ drv_cmd->callback = NULL;
+ drv_cmd->retry_count = 0;
+}
+
+/**
+ * mpi3mr_soft_reset_handler - Reset the controller
+ * @mrioc: Adapter instance reference
+ * @reset_reason: Reset reason code
+ * @snapdump: Flag to generate snapdump in firmware or not
+ *
+ * This is an handler for recovering controller by issuing soft
+ * reset are diag fault reset. This is a blocking function and
+ * when one reset is executed if any other resets they will be
+ * blocked. All BSG requests will be blocked during the reset. If
+ * controller reset is successful then the controller will be
+ * reinitalized, otherwise the controller will be marked as not
+ * recoverable
+ *
+ * In snapdump bit is set, the controller is issued with diag
+ * fault reset so that the firmware can create a snap dump and
+ * post that the firmware will result in F000 fault and the
+ * driver will issue soft reset to recover from that.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
+ u32 reset_reason, u8 snapdump)
+{
+ int retval = 0, i;
+ unsigned long flags;
+ u32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
+
+ /* Block the reset handler until diag save in progress*/
+ dprint_reset(mrioc,
+ "soft_reset_handler: check and block on diagsave_timeout(%d)\n",
+ mrioc->diagsave_timeout);
+ while (mrioc->diagsave_timeout)
+ ssleep(1);
+ /*
+ * Block new resets until the currently executing one is finished and
+ * return the status of the existing reset for all blocked resets
+ */
+ dprint_reset(mrioc, "soft_reset_handler: acquiring reset_mutex\n");
+ if (!mutex_trylock(&mrioc->reset_mutex)) {
+ ioc_info(mrioc,
+ "controller reset triggered by %s is blocked due to another reset in progress\n",
+ mpi3mr_reset_rc_name(reset_reason));
+ do {
+ ssleep(1);
+ } while (mrioc->reset_in_progress == 1);
+ ioc_info(mrioc,
+ "returning previous reset result(%d) for the reset triggered by %s\n",
+ mrioc->prev_reset_result,
+ mpi3mr_reset_rc_name(reset_reason));
+ return mrioc->prev_reset_result;
+ }
+ ioc_info(mrioc, "controller reset is triggered by %s\n",
+ mpi3mr_reset_rc_name(reset_reason));
+
+ mrioc->device_refresh_on = 0;
+ mrioc->reset_in_progress = 1;
+ mrioc->stop_bsgs = 1;
+ mrioc->prev_reset_result = -1;
+
+ if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) &&
+ (reset_reason != MPI3MR_RESET_FROM_FIRMWARE) &&
+ (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) {
+ for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
+ mrioc->event_masks[i] = -1;
+
+ dprint_reset(mrioc, "soft_reset_handler: masking events\n");
+ mpi3mr_issue_event_notification(mrioc);
+ }
+
+ mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT);
+
+ mpi3mr_ioc_disable_intr(mrioc);
+
+ if (snapdump) {
+ mpi3mr_set_diagsave(mrioc);
+ retval = mpi3mr_issue_reset(mrioc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
+ if (!retval) {
+ do {
+ host_diagnostic =
+ readl(&mrioc->sysif_regs->host_diagnostic);
+ if (!(host_diagnostic &
+ MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
+ break;
+ msleep(100);
+ } while (--timeout);
+ }
+ }
+
+ retval = mpi3mr_issue_reset(mrioc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, reset_reason);
+ if (retval) {
+ ioc_err(mrioc, "Failed to issue soft reset to the ioc\n");
+ goto out;
+ }
+ if (mrioc->num_io_throttle_group !=
+ mrioc->facts.max_io_throttle_group) {
+ ioc_err(mrioc,
+ "max io throttle group doesn't match old(%d), new(%d)\n",
+ mrioc->num_io_throttle_group,
+ mrioc->facts.max_io_throttle_group);
+ retval = -EPERM;
+ goto out;
+ }
+
+ mpi3mr_flush_delayed_cmd_lists(mrioc);
+ mpi3mr_flush_drv_cmds(mrioc);
+ bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD);
+ bitmap_clear(mrioc->removepend_bitmap, 0,
+ mrioc->dev_handle_bitmap_bits);
+ bitmap_clear(mrioc->evtack_cmds_bitmap, 0, MPI3MR_NUM_EVTACKCMD);
+ mpi3mr_flush_host_io(mrioc);
+ mpi3mr_cleanup_fwevt_list(mrioc);
+ mpi3mr_invalidate_devhandles(mrioc);
+ mpi3mr_free_enclosure_list(mrioc);
+
+ if (mrioc->prepare_for_reset) {
+ mrioc->prepare_for_reset = 0;
+ mrioc->prepare_for_reset_timeout_counter = 0;
+ }
+ mpi3mr_memset_buffers(mrioc);
+ retval = mpi3mr_reinit_ioc(mrioc, 0);
+ if (retval) {
+ pr_err(IOCNAME "reinit after soft reset failed: reason %d\n",
+ mrioc->name, reset_reason);
+ goto out;
+ }
+ ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME);
+
+out:
+ if (!retval) {
+ mrioc->diagsave_timeout = 0;
+ mrioc->reset_in_progress = 0;
+ mrioc->pel_abort_requested = 0;
+ if (mrioc->pel_enabled) {
+ mrioc->pel_cmds.retry_count = 0;
+ mpi3mr_pel_wait_post(mrioc, &mrioc->pel_cmds);
+ }
+
+ mrioc->device_refresh_on = 0;
+
+ mrioc->ts_update_counter = 0;
+ spin_lock_irqsave(&mrioc->watchdog_lock, flags);
+ if (mrioc->watchdog_work_q)
+ queue_delayed_work(mrioc->watchdog_work_q,
+ &mrioc->watchdog_work,
+ msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
+ spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
+ mrioc->stop_bsgs = 0;
+ if (mrioc->pel_enabled)
+ atomic64_inc(&event_counter);
+ } else {
+ mpi3mr_issue_reset(mrioc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
+ mrioc->device_refresh_on = 0;
+ mrioc->unrecoverable = 1;
+ mrioc->reset_in_progress = 0;
+ retval = -1;
+ mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
+ }
+ mrioc->prev_reset_result = retval;
+ mutex_unlock(&mrioc->reset_mutex);
+ ioc_info(mrioc, "controller reset is %s\n",
+ ((retval == 0) ? "successful" : "failed"));
+ return retval;
+}
+
+
+/**
+ * mpi3mr_free_config_dma_memory - free memory for config page
+ * @mrioc: Adapter instance reference
+ * @mem_desc: memory descriptor structure
+ *
+ * Check whether the size of the buffer specified by the memory
+ * descriptor is greater than the default page size if so then
+ * free the memory pointed by the descriptor.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_free_config_dma_memory(struct mpi3mr_ioc *mrioc,
+ struct dma_memory_desc *mem_desc)
+{
+ if ((mem_desc->size > mrioc->cfg_page_sz) && mem_desc->addr) {
+ dma_free_coherent(&mrioc->pdev->dev, mem_desc->size,
+ mem_desc->addr, mem_desc->dma_addr);
+ mem_desc->addr = NULL;
+ }
+}
+
+/**
+ * mpi3mr_alloc_config_dma_memory - Alloc memory for config page
+ * @mrioc: Adapter instance reference
+ * @mem_desc: Memory descriptor to hold dma memory info
+ *
+ * This function allocates new dmaable memory or provides the
+ * default config page dmaable memory based on the memory size
+ * described by the descriptor.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_alloc_config_dma_memory(struct mpi3mr_ioc *mrioc,
+ struct dma_memory_desc *mem_desc)
+{
+ if (mem_desc->size > mrioc->cfg_page_sz) {
+ mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev,
+ mem_desc->size, &mem_desc->dma_addr, GFP_KERNEL);
+ if (!mem_desc->addr)
+ return -ENOMEM;
+ } else {
+ mem_desc->addr = mrioc->cfg_page;
+ mem_desc->dma_addr = mrioc->cfg_page_dma;
+ memset(mem_desc->addr, 0, mrioc->cfg_page_sz);
+ }
+ return 0;
+}
+
+/**
+ * mpi3mr_post_cfg_req - Issue config requests and wait
+ * @mrioc: Adapter instance reference
+ * @cfg_req: Configuration request
+ * @timeout: Timeout in seconds
+ * @ioc_status: Pointer to return ioc status
+ *
+ * A generic function for posting MPI3 configuration request to
+ * the firmware. This blocks for the completion of request for
+ * timeout seconds and if the request times out this function
+ * faults the controller with proper reason code.
+ *
+ * On successful completion of the request this function returns
+ * appropriate ioc status from the firmware back to the caller.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_post_cfg_req(struct mpi3mr_ioc *mrioc,
+ struct mpi3_config_request *cfg_req, int timeout, u16 *ioc_status)
+{
+ int retval = 0;
+
+ mutex_lock(&mrioc->cfg_cmds.mutex);
+ if (mrioc->cfg_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "sending config request failed due to command in use\n");
+ mutex_unlock(&mrioc->cfg_cmds.mutex);
+ goto out;
+ }
+ mrioc->cfg_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->cfg_cmds.is_waiting = 1;
+ mrioc->cfg_cmds.callback = NULL;
+ mrioc->cfg_cmds.ioc_status = 0;
+ mrioc->cfg_cmds.ioc_loginfo = 0;
+
+ cfg_req->host_tag = cpu_to_le16(MPI3MR_HOSTTAG_CFG_CMDS);
+ cfg_req->function = MPI3_FUNCTION_CONFIG;
+
+ init_completion(&mrioc->cfg_cmds.done);
+ dprint_cfg_info(mrioc, "posting config request\n");
+ if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
+ dprint_dump(cfg_req, sizeof(struct mpi3_config_request),
+ "mpi3_cfg_req");
+ retval = mpi3mr_admin_request_post(mrioc, cfg_req, sizeof(*cfg_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "posting config request failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->cfg_cmds.done, (timeout * HZ));
+ if (!(mrioc->cfg_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT);
+ ioc_err(mrioc, "config request timed out\n");
+ retval = -1;
+ goto out_unlock;
+ }
+ *ioc_status = mrioc->cfg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
+ if ((*ioc_status) != MPI3_IOCSTATUS_SUCCESS)
+ dprint_cfg_err(mrioc,
+ "cfg_page request returned with ioc_status(0x%04x), log_info(0x%08x)\n",
+ *ioc_status, mrioc->cfg_cmds.ioc_loginfo);
+
+out_unlock:
+ mrioc->cfg_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->cfg_cmds.mutex);
+
+out:
+ return retval;
+}
+
+/**
+ * mpi3mr_process_cfg_req - config page request processor
+ * @mrioc: Adapter instance reference
+ * @cfg_req: Configuration request
+ * @cfg_hdr: Configuration page header
+ * @timeout: Timeout in seconds
+ * @ioc_status: Pointer to return ioc status
+ * @cfg_buf: Memory pointer to copy config page or header
+ * @cfg_buf_sz: Size of the memory to get config page or header
+ *
+ * This is handler for config page read, write and config page
+ * header read operations.
+ *
+ * This function expects the cfg_req to be populated with page
+ * type, page number, action for the header read and with page
+ * address for all other operations.
+ *
+ * The cfg_hdr can be passed as null for reading required header
+ * details for read/write pages the cfg_hdr should point valid
+ * configuration page header.
+ *
+ * This allocates dmaable memory based on the size of the config
+ * buffer and set the SGE of the cfg_req.
+ *
+ * For write actions, the config page data has to be passed in
+ * the cfg_buf and size of the data has to be mentioned in the
+ * cfg_buf_sz.
+ *
+ * For read/header actions, on successful completion of the
+ * request with successful ioc_status the data will be copied
+ * into the cfg_buf limited to a minimum of actual page size and
+ * cfg_buf_sz
+ *
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_process_cfg_req(struct mpi3mr_ioc *mrioc,
+ struct mpi3_config_request *cfg_req,
+ struct mpi3_config_page_header *cfg_hdr, int timeout, u16 *ioc_status,
+ void *cfg_buf, u32 cfg_buf_sz)
+{
+ struct dma_memory_desc mem_desc;
+ int retval = -1;
+ u8 invalid_action = 0;
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+
+ memset(&mem_desc, 0, sizeof(struct dma_memory_desc));
+
+ if (cfg_req->action == MPI3_CONFIG_ACTION_PAGE_HEADER)
+ mem_desc.size = sizeof(struct mpi3_config_page_header);
+ else {
+ if (!cfg_hdr) {
+ ioc_err(mrioc, "null config header passed for config action(%d), page_type(0x%02x), page_num(%d)\n",
+ cfg_req->action, cfg_req->page_type,
+ cfg_req->page_number);
+ goto out;
+ }
+ switch (cfg_hdr->page_attribute & MPI3_CONFIG_PAGEATTR_MASK) {
+ case MPI3_CONFIG_PAGEATTR_READ_ONLY:
+ if (cfg_req->action
+ != MPI3_CONFIG_ACTION_READ_CURRENT)
+ invalid_action = 1;
+ break;
+ case MPI3_CONFIG_PAGEATTR_CHANGEABLE:
+ if ((cfg_req->action ==
+ MPI3_CONFIG_ACTION_READ_PERSISTENT) ||
+ (cfg_req->action ==
+ MPI3_CONFIG_ACTION_WRITE_PERSISTENT))
+ invalid_action = 1;
+ break;
+ case MPI3_CONFIG_PAGEATTR_PERSISTENT:
+ default:
+ break;
+ }
+ if (invalid_action) {
+ ioc_err(mrioc,
+ "config action(%d) is not allowed for page_type(0x%02x), page_num(%d) with page_attribute(0x%02x)\n",
+ cfg_req->action, cfg_req->page_type,
+ cfg_req->page_number, cfg_hdr->page_attribute);
+ goto out;
+ }
+ mem_desc.size = le16_to_cpu(cfg_hdr->page_length) * 4;
+ cfg_req->page_length = cfg_hdr->page_length;
+ cfg_req->page_version = cfg_hdr->page_version;
+ }
+ if (mpi3mr_alloc_config_dma_memory(mrioc, &mem_desc))
+ goto out;
+
+ mpi3mr_add_sg_single(&cfg_req->sgl, sgl_flags, mem_desc.size,
+ mem_desc.dma_addr);
+
+ if ((cfg_req->action == MPI3_CONFIG_ACTION_WRITE_PERSISTENT) ||
+ (cfg_req->action == MPI3_CONFIG_ACTION_WRITE_CURRENT)) {
+ memcpy(mem_desc.addr, cfg_buf, min_t(u16, mem_desc.size,
+ cfg_buf_sz));
+ dprint_cfg_info(mrioc, "config buffer to be written\n");
+ if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
+ dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf");
+ }
+
+ if (mpi3mr_post_cfg_req(mrioc, cfg_req, timeout, ioc_status))
+ goto out;
+
+ retval = 0;
+ if ((*ioc_status == MPI3_IOCSTATUS_SUCCESS) &&
+ (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_PERSISTENT) &&
+ (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_CURRENT)) {
+ memcpy(cfg_buf, mem_desc.addr, min_t(u16, mem_desc.size,
+ cfg_buf_sz));
+ dprint_cfg_info(mrioc, "config buffer read\n");
+ if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
+ dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf");
+ }
+
+out:
+ mpi3mr_free_config_dma_memory(mrioc, &mem_desc);
+ return retval;
+}
+
+/**
+ * mpi3mr_cfg_get_dev_pg0 - Read current device page0
+ * @mrioc: Adapter instance reference
+ * @ioc_status: Pointer to return ioc status
+ * @dev_pg0: Pointer to return device page 0
+ * @pg_sz: Size of the memory allocated to the page pointer
+ * @form: The form to be used for addressing the page
+ * @form_spec: Form specific information like device handle
+ *
+ * This is handler for config page read for a specific device
+ * page0. The ioc_status has the controller returned ioc_status.
+ * This routine doesn't check ioc_status to decide whether the
+ * page read is success or not and it is the callers
+ * responsibility.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_dev_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_device_page0 *dev_pg0, u16 pg_sz, u32 form, u32 form_spec)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u32 page_address;
+
+ memset(dev_pg0, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DEVICE;
+ cfg_req.page_number = 0;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "device page0 header read failed\n");
+ goto out_failed;
+ }
+ if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "device page0 header read failed with ioc_status(0x%04x)\n",
+ *ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+ page_address = ((form & MPI3_DEVICE_PGAD_FORM_MASK) |
+ (form_spec & MPI3_DEVICE_PGAD_HANDLE_MASK));
+ cfg_req.page_address = cpu_to_le32(page_address);
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, dev_pg0, pg_sz)) {
+ ioc_err(mrioc, "device page0 read failed\n");
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+
+/**
+ * mpi3mr_cfg_get_sas_phy_pg0 - Read current SAS Phy page0
+ * @mrioc: Adapter instance reference
+ * @ioc_status: Pointer to return ioc status
+ * @phy_pg0: Pointer to return SAS Phy page 0
+ * @pg_sz: Size of the memory allocated to the page pointer
+ * @form: The form to be used for addressing the page
+ * @form_spec: Form specific information like phy number
+ *
+ * This is handler for config page read for a specific SAS Phy
+ * page0. The ioc_status has the controller returned ioc_status.
+ * This routine doesn't check ioc_status to decide whether the
+ * page read is success or not and it is the callers
+ * responsibility.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_sas_phy_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_phy_page0 *phy_pg0, u16 pg_sz, u32 form,
+ u32 form_spec)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u32 page_address;
+
+ memset(phy_pg0, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY;
+ cfg_req.page_number = 0;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "sas phy page0 header read failed\n");
+ goto out_failed;
+ }
+ if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas phy page0 header read failed with ioc_status(0x%04x)\n",
+ *ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+ page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) |
+ (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK));
+ cfg_req.page_address = cpu_to_le32(page_address);
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg0, pg_sz)) {
+ ioc_err(mrioc, "sas phy page0 read failed\n");
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+/**
+ * mpi3mr_cfg_get_sas_phy_pg1 - Read current SAS Phy page1
+ * @mrioc: Adapter instance reference
+ * @ioc_status: Pointer to return ioc status
+ * @phy_pg1: Pointer to return SAS Phy page 1
+ * @pg_sz: Size of the memory allocated to the page pointer
+ * @form: The form to be used for addressing the page
+ * @form_spec: Form specific information like phy number
+ *
+ * This is handler for config page read for a specific SAS Phy
+ * page1. The ioc_status has the controller returned ioc_status.
+ * This routine doesn't check ioc_status to decide whether the
+ * page read is success or not and it is the callers
+ * responsibility.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_sas_phy_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_phy_page1 *phy_pg1, u16 pg_sz, u32 form,
+ u32 form_spec)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u32 page_address;
+
+ memset(phy_pg1, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY;
+ cfg_req.page_number = 1;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "sas phy page1 header read failed\n");
+ goto out_failed;
+ }
+ if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas phy page1 header read failed with ioc_status(0x%04x)\n",
+ *ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+ page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) |
+ (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK));
+ cfg_req.page_address = cpu_to_le32(page_address);
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg1, pg_sz)) {
+ ioc_err(mrioc, "sas phy page1 read failed\n");
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+
+/**
+ * mpi3mr_cfg_get_sas_exp_pg0 - Read current SAS Expander page0
+ * @mrioc: Adapter instance reference
+ * @ioc_status: Pointer to return ioc status
+ * @exp_pg0: Pointer to return SAS Expander page 0
+ * @pg_sz: Size of the memory allocated to the page pointer
+ * @form: The form to be used for addressing the page
+ * @form_spec: Form specific information like device handle
+ *
+ * This is handler for config page read for a specific SAS
+ * Expander page0. The ioc_status has the controller returned
+ * ioc_status. This routine doesn't check ioc_status to decide
+ * whether the page read is success or not and it is the callers
+ * responsibility.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_sas_exp_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_expander_page0 *exp_pg0, u16 pg_sz, u32 form,
+ u32 form_spec)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u32 page_address;
+
+ memset(exp_pg0, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER;
+ cfg_req.page_number = 0;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "expander page0 header read failed\n");
+ goto out_failed;
+ }
+ if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "expander page0 header read failed with ioc_status(0x%04x)\n",
+ *ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+ page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) |
+ (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK |
+ MPI3_SAS_EXPAND_PGAD_HANDLE_MASK)));
+ cfg_req.page_address = cpu_to_le32(page_address);
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg0, pg_sz)) {
+ ioc_err(mrioc, "expander page0 read failed\n");
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+/**
+ * mpi3mr_cfg_get_sas_exp_pg1 - Read current SAS Expander page1
+ * @mrioc: Adapter instance reference
+ * @ioc_status: Pointer to return ioc status
+ * @exp_pg1: Pointer to return SAS Expander page 1
+ * @pg_sz: Size of the memory allocated to the page pointer
+ * @form: The form to be used for addressing the page
+ * @form_spec: Form specific information like phy number
+ *
+ * This is handler for config page read for a specific SAS
+ * Expander page1. The ioc_status has the controller returned
+ * ioc_status. This routine doesn't check ioc_status to decide
+ * whether the page read is success or not and it is the callers
+ * responsibility.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_sas_exp_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_expander_page1 *exp_pg1, u16 pg_sz, u32 form,
+ u32 form_spec)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u32 page_address;
+
+ memset(exp_pg1, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER;
+ cfg_req.page_number = 1;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "expander page1 header read failed\n");
+ goto out_failed;
+ }
+ if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "expander page1 header read failed with ioc_status(0x%04x)\n",
+ *ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+ page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) |
+ (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK |
+ MPI3_SAS_EXPAND_PGAD_HANDLE_MASK)));
+ cfg_req.page_address = cpu_to_le32(page_address);
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg1, pg_sz)) {
+ ioc_err(mrioc, "expander page1 read failed\n");
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+/**
+ * mpi3mr_cfg_get_enclosure_pg0 - Read current Enclosure page0
+ * @mrioc: Adapter instance reference
+ * @ioc_status: Pointer to return ioc status
+ * @encl_pg0: Pointer to return Enclosure page 0
+ * @pg_sz: Size of the memory allocated to the page pointer
+ * @form: The form to be used for addressing the page
+ * @form_spec: Form specific information like device handle
+ *
+ * This is handler for config page read for a specific Enclosure
+ * page0. The ioc_status has the controller returned ioc_status.
+ * This routine doesn't check ioc_status to decide whether the
+ * page read is success or not and it is the callers
+ * responsibility.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_enclosure_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_enclosure_page0 *encl_pg0, u16 pg_sz, u32 form,
+ u32 form_spec)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u32 page_address;
+
+ memset(encl_pg0, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_ENCLOSURE;
+ cfg_req.page_number = 0;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "enclosure page0 header read failed\n");
+ goto out_failed;
+ }
+ if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "enclosure page0 header read failed with ioc_status(0x%04x)\n",
+ *ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+ page_address = ((form & MPI3_ENCLOS_PGAD_FORM_MASK) |
+ (form_spec & MPI3_ENCLOS_PGAD_HANDLE_MASK));
+ cfg_req.page_address = cpu_to_le32(page_address);
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, encl_pg0, pg_sz)) {
+ ioc_err(mrioc, "enclosure page0 read failed\n");
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+
+/**
+ * mpi3mr_cfg_get_sas_io_unit_pg0 - Read current SASIOUnit page0
+ * @mrioc: Adapter instance reference
+ * @sas_io_unit_pg0: Pointer to return SAS IO Unit page 0
+ * @pg_sz: Size of the memory allocated to the page pointer
+ *
+ * This is handler for config page read for the SAS IO Unit
+ * page0. This routine checks ioc_status to decide whether the
+ * page read is success or not.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_sas_io_unit_pg0(struct mpi3mr_ioc *mrioc,
+ struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0, u16 pg_sz)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u16 ioc_status = 0;
+
+ memset(sas_io_unit_pg0, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
+ cfg_req.page_number = 0;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "sas io unit page0 header read failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas io unit page0 header read failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg0, pg_sz)) {
+ ioc_err(mrioc, "sas io unit page0 read failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas io unit page0 read failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+/**
+ * mpi3mr_cfg_get_sas_io_unit_pg1 - Read current SASIOUnit page1
+ * @mrioc: Adapter instance reference
+ * @sas_io_unit_pg1: Pointer to return SAS IO Unit page 1
+ * @pg_sz: Size of the memory allocated to the page pointer
+ *
+ * This is handler for config page read for the SAS IO Unit
+ * page1. This routine checks ioc_status to decide whether the
+ * page read is success or not.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
+ struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u16 ioc_status = 0;
+
+ memset(sas_io_unit_pg1, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
+ cfg_req.page_number = 1;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "sas io unit page1 header read failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
+ ioc_err(mrioc, "sas io unit page1 read failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas io unit page1 read failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+/**
+ * mpi3mr_cfg_set_sas_io_unit_pg1 - Write SASIOUnit page1
+ * @mrioc: Adapter instance reference
+ * @sas_io_unit_pg1: Pointer to the SAS IO Unit page 1 to write
+ * @pg_sz: Size of the memory allocated to the page pointer
+ *
+ * This is handler for config page write for the SAS IO Unit
+ * page1. This routine checks ioc_status to decide whether the
+ * page read is success or not. This will modify both current
+ * and persistent page.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_set_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
+ struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u16 ioc_status = 0;
+
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
+ cfg_req.page_number = 1;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "sas io unit page1 header read failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_WRITE_CURRENT;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
+ ioc_err(mrioc, "sas io unit page1 write current failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas io unit page1 write current failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+
+ cfg_req.action = MPI3_CONFIG_ACTION_WRITE_PERSISTENT;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
+ ioc_err(mrioc, "sas io unit page1 write persistent failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas io unit page1 write persistent failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+/**
+ * mpi3mr_cfg_get_driver_pg1 - Read current Driver page1
+ * @mrioc: Adapter instance reference
+ * @driver_pg1: Pointer to return Driver page 1
+ * @pg_sz: Size of the memory allocated to the page pointer
+ *
+ * This is handler for config page read for the Driver page1.
+ * This routine checks ioc_status to decide whether the page
+ * read is success or not.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc *mrioc,
+ struct mpi3_driver_page1 *driver_pg1, u16 pg_sz)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u16 ioc_status = 0;
+
+ memset(driver_pg1, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DRIVER;
+ cfg_req.page_number = 1;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "driver page1 header read failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "driver page1 header read failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, driver_pg1, pg_sz)) {
+ ioc_err(mrioc, "driver page1 read failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "driver page1 read failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
new file mode 100644
index 000000000..85f5b349c
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -0,0 +1,5360 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Broadcom MPI3 Storage Controllers
+ *
+ * Copyright (C) 2017-2022 Broadcom Inc.
+ * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
+ *
+ */
+
+#include "mpi3mr.h"
+
+/* global driver scop variables */
+LIST_HEAD(mrioc_list);
+DEFINE_SPINLOCK(mrioc_list_lock);
+static int mrioc_ids;
+static int warn_non_secure_ctlr;
+atomic64_t event_counter;
+
+MODULE_AUTHOR(MPI3MR_DRIVER_AUTHOR);
+MODULE_DESCRIPTION(MPI3MR_DRIVER_DESC);
+MODULE_LICENSE(MPI3MR_DRIVER_LICENSE);
+MODULE_VERSION(MPI3MR_DRIVER_VERSION);
+
+/* Module parameters*/
+int prot_mask = -1;
+module_param(prot_mask, int, 0);
+MODULE_PARM_DESC(prot_mask, "Host protection capabilities mask, def=0x07");
+
+static int prot_guard_mask = 3;
+module_param(prot_guard_mask, int, 0);
+MODULE_PARM_DESC(prot_guard_mask, " Host protection guard mask, def=3");
+static int logging_level;
+module_param(logging_level, int, 0);
+MODULE_PARM_DESC(logging_level,
+ " bits for enabling additional logging info (default=0)");
+
+/* Forward declarations*/
+static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
+ struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx);
+
+#define MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION (0xFFFF)
+
+#define MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH (0xFFFE)
+
+/**
+ * mpi3mr_host_tag_for_scmd - Get host tag for a scmd
+ * @mrioc: Adapter instance reference
+ * @scmd: SCSI command reference
+ *
+ * Calculate the host tag based on block tag for a given scmd.
+ *
+ * Return: Valid host tag or MPI3MR_HOSTTAG_INVALID.
+ */
+static u16 mpi3mr_host_tag_for_scmd(struct mpi3mr_ioc *mrioc,
+ struct scsi_cmnd *scmd)
+{
+ struct scmd_priv *priv = NULL;
+ u32 unique_tag;
+ u16 host_tag, hw_queue;
+
+ unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
+
+ hw_queue = blk_mq_unique_tag_to_hwq(unique_tag);
+ if (hw_queue >= mrioc->num_op_reply_q)
+ return MPI3MR_HOSTTAG_INVALID;
+ host_tag = blk_mq_unique_tag_to_tag(unique_tag);
+
+ if (WARN_ON(host_tag >= mrioc->max_host_ios))
+ return MPI3MR_HOSTTAG_INVALID;
+
+ priv = scsi_cmd_priv(scmd);
+ /*host_tag 0 is invalid hence incrementing by 1*/
+ priv->host_tag = host_tag + 1;
+ priv->scmd = scmd;
+ priv->in_lld_scope = 1;
+ priv->req_q_idx = hw_queue;
+ priv->meta_chain_idx = -1;
+ priv->chain_idx = -1;
+ priv->meta_sg_valid = 0;
+ return priv->host_tag;
+}
+
+/**
+ * mpi3mr_scmd_from_host_tag - Get SCSI command from host tag
+ * @mrioc: Adapter instance reference
+ * @host_tag: Host tag
+ * @qidx: Operational queue index
+ *
+ * Identify the block tag from the host tag and queue index and
+ * retrieve associated scsi command using scsi_host_find_tag().
+ *
+ * Return: SCSI command reference or NULL.
+ */
+static struct scsi_cmnd *mpi3mr_scmd_from_host_tag(
+ struct mpi3mr_ioc *mrioc, u16 host_tag, u16 qidx)
+{
+ struct scsi_cmnd *scmd = NULL;
+ struct scmd_priv *priv = NULL;
+ u32 unique_tag = host_tag - 1;
+
+ if (WARN_ON(host_tag > mrioc->max_host_ios))
+ goto out;
+
+ unique_tag |= (qidx << BLK_MQ_UNIQUE_TAG_BITS);
+
+ scmd = scsi_host_find_tag(mrioc->shost, unique_tag);
+ if (scmd) {
+ priv = scsi_cmd_priv(scmd);
+ if (!priv->in_lld_scope)
+ scmd = NULL;
+ }
+out:
+ return scmd;
+}
+
+/**
+ * mpi3mr_clear_scmd_priv - Cleanup SCSI command private date
+ * @mrioc: Adapter instance reference
+ * @scmd: SCSI command reference
+ *
+ * Invalidate the SCSI command private data to mark the command
+ * is not in LLD scope anymore.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_clear_scmd_priv(struct mpi3mr_ioc *mrioc,
+ struct scsi_cmnd *scmd)
+{
+ struct scmd_priv *priv = NULL;
+
+ priv = scsi_cmd_priv(scmd);
+
+ if (WARN_ON(priv->in_lld_scope == 0))
+ return;
+ priv->host_tag = MPI3MR_HOSTTAG_INVALID;
+ priv->req_q_idx = 0xFFFF;
+ priv->scmd = NULL;
+ priv->in_lld_scope = 0;
+ priv->meta_sg_valid = 0;
+ if (priv->chain_idx >= 0) {
+ clear_bit(priv->chain_idx, mrioc->chain_bitmap);
+ priv->chain_idx = -1;
+ }
+ if (priv->meta_chain_idx >= 0) {
+ clear_bit(priv->meta_chain_idx, mrioc->chain_bitmap);
+ priv->meta_chain_idx = -1;
+ }
+}
+
+static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle,
+ struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc);
+static void mpi3mr_fwevt_worker(struct work_struct *work);
+
+/**
+ * mpi3mr_fwevt_free - firmware event memory dealloctor
+ * @r: k reference pointer of the firmware event
+ *
+ * Free firmware event memory when no reference.
+ */
+static void mpi3mr_fwevt_free(struct kref *r)
+{
+ kfree(container_of(r, struct mpi3mr_fwevt, ref_count));
+}
+
+/**
+ * mpi3mr_fwevt_get - k reference incrementor
+ * @fwevt: Firmware event reference
+ *
+ * Increment firmware event reference count.
+ */
+static void mpi3mr_fwevt_get(struct mpi3mr_fwevt *fwevt)
+{
+ kref_get(&fwevt->ref_count);
+}
+
+/**
+ * mpi3mr_fwevt_put - k reference decrementor
+ * @fwevt: Firmware event reference
+ *
+ * decrement firmware event reference count.
+ */
+static void mpi3mr_fwevt_put(struct mpi3mr_fwevt *fwevt)
+{
+ kref_put(&fwevt->ref_count, mpi3mr_fwevt_free);
+}
+
+/**
+ * mpi3mr_alloc_fwevt - Allocate firmware event
+ * @len: length of firmware event data to allocate
+ *
+ * Allocate firmware event with required length and initialize
+ * the reference counter.
+ *
+ * Return: firmware event reference.
+ */
+static struct mpi3mr_fwevt *mpi3mr_alloc_fwevt(int len)
+{
+ struct mpi3mr_fwevt *fwevt;
+
+ fwevt = kzalloc(sizeof(*fwevt) + len, GFP_ATOMIC);
+ if (!fwevt)
+ return NULL;
+
+ kref_init(&fwevt->ref_count);
+ return fwevt;
+}
+
+/**
+ * mpi3mr_fwevt_add_to_list - Add firmware event to the list
+ * @mrioc: Adapter instance reference
+ * @fwevt: Firmware event reference
+ *
+ * Add the given firmware event to the firmware event list.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_fwevt_add_to_list(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_fwevt *fwevt)
+{
+ unsigned long flags;
+
+ if (!mrioc->fwevt_worker_thread)
+ return;
+
+ spin_lock_irqsave(&mrioc->fwevt_lock, flags);
+ /* get fwevt reference count while adding it to fwevt_list */
+ mpi3mr_fwevt_get(fwevt);
+ INIT_LIST_HEAD(&fwevt->list);
+ list_add_tail(&fwevt->list, &mrioc->fwevt_list);
+ INIT_WORK(&fwevt->work, mpi3mr_fwevt_worker);
+ /* get fwevt reference count while enqueueing it to worker queue */
+ mpi3mr_fwevt_get(fwevt);
+ queue_work(mrioc->fwevt_worker_thread, &fwevt->work);
+ spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
+}
+
+/**
+ * mpi3mr_fwevt_del_from_list - Delete firmware event from list
+ * @mrioc: Adapter instance reference
+ * @fwevt: Firmware event reference
+ *
+ * Delete the given firmware event from the firmware event list.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_fwevt_del_from_list(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_fwevt *fwevt)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mrioc->fwevt_lock, flags);
+ if (!list_empty(&fwevt->list)) {
+ list_del_init(&fwevt->list);
+ /*
+ * Put fwevt reference count after
+ * removing it from fwevt_list
+ */
+ mpi3mr_fwevt_put(fwevt);
+ }
+ spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
+}
+
+/**
+ * mpi3mr_dequeue_fwevt - Dequeue firmware event from the list
+ * @mrioc: Adapter instance reference
+ *
+ * Dequeue a firmware event from the firmware event list.
+ *
+ * Return: firmware event.
+ */
+static struct mpi3mr_fwevt *mpi3mr_dequeue_fwevt(
+ struct mpi3mr_ioc *mrioc)
+{
+ unsigned long flags;
+ struct mpi3mr_fwevt *fwevt = NULL;
+
+ spin_lock_irqsave(&mrioc->fwevt_lock, flags);
+ if (!list_empty(&mrioc->fwevt_list)) {
+ fwevt = list_first_entry(&mrioc->fwevt_list,
+ struct mpi3mr_fwevt, list);
+ list_del_init(&fwevt->list);
+ /*
+ * Put fwevt reference count after
+ * removing it from fwevt_list
+ */
+ mpi3mr_fwevt_put(fwevt);
+ }
+ spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
+
+ return fwevt;
+}
+
+/**
+ * mpi3mr_cancel_work - cancel firmware event
+ * @fwevt: fwevt object which needs to be canceled
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_cancel_work(struct mpi3mr_fwevt *fwevt)
+{
+ /*
+ * Wait on the fwevt to complete. If this returns 1, then
+ * the event was never executed.
+ *
+ * If it did execute, we wait for it to finish, and the put will
+ * happen from mpi3mr_process_fwevt()
+ */
+ if (cancel_work_sync(&fwevt->work)) {
+ /*
+ * Put fwevt reference count after
+ * dequeuing it from worker queue
+ */
+ mpi3mr_fwevt_put(fwevt);
+ /*
+ * Put fwevt reference count to neutralize
+ * kref_init increment
+ */
+ mpi3mr_fwevt_put(fwevt);
+ }
+}
+
+/**
+ * mpi3mr_cleanup_fwevt_list - Cleanup firmware event list
+ * @mrioc: Adapter instance reference
+ *
+ * Flush all pending firmware events from the firmware event
+ * list.
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3mr_fwevt *fwevt = NULL;
+
+ if ((list_empty(&mrioc->fwevt_list) && !mrioc->current_event) ||
+ !mrioc->fwevt_worker_thread)
+ return;
+
+ while ((fwevt = mpi3mr_dequeue_fwevt(mrioc)))
+ mpi3mr_cancel_work(fwevt);
+
+ if (mrioc->current_event) {
+ fwevt = mrioc->current_event;
+ /*
+ * Don't call cancel_work_sync() API for the
+ * fwevt work if the controller reset is
+ * get called as part of processing the
+ * same fwevt work (or) when worker thread is
+ * waiting for device add/remove APIs to complete.
+ * Otherwise we will see deadlock.
+ */
+ if (current_work() == &fwevt->work || fwevt->pending_at_sml) {
+ fwevt->discard = 1;
+ return;
+ }
+
+ mpi3mr_cancel_work(fwevt);
+ }
+}
+
+/**
+ * mpi3mr_queue_qd_reduction_event - Queue TG QD reduction event
+ * @mrioc: Adapter instance reference
+ * @tg: Throttle group information pointer
+ *
+ * Accessor to queue on synthetically generated driver event to
+ * the event worker thread, the driver event will be used to
+ * reduce the QD of all VDs in the TG from the worker thread.
+ *
+ * Return: None.
+ */
+static void mpi3mr_queue_qd_reduction_event(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_throttle_group_info *tg)
+{
+ struct mpi3mr_fwevt *fwevt;
+ u16 sz = sizeof(struct mpi3mr_throttle_group_info *);
+
+ /*
+ * If the QD reduction event is already queued due to throttle and if
+ * the QD is not restored through device info change event
+ * then dont queue further reduction events
+ */
+ if (tg->fw_qd != tg->modified_qd)
+ return;
+
+ fwevt = mpi3mr_alloc_fwevt(sz);
+ if (!fwevt) {
+ ioc_warn(mrioc, "failed to queue TG QD reduction event\n");
+ return;
+ }
+ *(struct mpi3mr_throttle_group_info **)fwevt->event_data = tg;
+ fwevt->mrioc = mrioc;
+ fwevt->event_id = MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION;
+ fwevt->send_ack = 0;
+ fwevt->process_evt = 1;
+ fwevt->evt_ctx = 0;
+ fwevt->event_data_size = sz;
+ tg->modified_qd = max_t(u16, (tg->fw_qd * tg->qd_reduction) / 10, 8);
+
+ dprint_event_bh(mrioc, "qd reduction event queued for tg_id(%d)\n",
+ tg->id);
+ mpi3mr_fwevt_add_to_list(mrioc, fwevt);
+}
+
+/**
+ * mpi3mr_invalidate_devhandles -Invalidate device handles
+ * @mrioc: Adapter instance reference
+ *
+ * Invalidate the device handles in the target device structures
+ * . Called post reset prior to reinitializing the controller.
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+ struct mpi3mr_stgt_priv_data *tgt_priv;
+
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
+ tgtdev->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
+ if (tgtdev->starget && tgtdev->starget->hostdata) {
+ tgt_priv = tgtdev->starget->hostdata;
+ tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
+ tgt_priv->io_throttle_enabled = 0;
+ tgt_priv->io_divert = 0;
+ tgt_priv->throttle_group = NULL;
+ if (tgtdev->host_exposed)
+ atomic_set(&tgt_priv->block_io, 1);
+ }
+ }
+}
+
+/**
+ * mpi3mr_print_scmd - print individual SCSI command
+ * @rq: Block request
+ * @data: Adapter instance reference
+ *
+ * Print the SCSI command details if it is in LLD scope.
+ *
+ * Return: true always.
+ */
+static bool mpi3mr_print_scmd(struct request *rq, void *data)
+{
+ struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data;
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
+ struct scmd_priv *priv = NULL;
+
+ if (scmd) {
+ priv = scsi_cmd_priv(scmd);
+ if (!priv->in_lld_scope)
+ goto out;
+
+ ioc_info(mrioc, "%s :Host Tag = %d, qid = %d\n",
+ __func__, priv->host_tag, priv->req_q_idx + 1);
+ scsi_print_command(scmd);
+ }
+
+out:
+ return(true);
+}
+
+/**
+ * mpi3mr_flush_scmd - Flush individual SCSI command
+ * @rq: Block request
+ * @data: Adapter instance reference
+ *
+ * Return the SCSI command to the upper layers if it is in LLD
+ * scope.
+ *
+ * Return: true always.
+ */
+
+static bool mpi3mr_flush_scmd(struct request *rq, void *data)
+{
+ struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data;
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
+ struct scmd_priv *priv = NULL;
+
+ if (scmd) {
+ priv = scsi_cmd_priv(scmd);
+ if (!priv->in_lld_scope)
+ goto out;
+
+ if (priv->meta_sg_valid)
+ dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd),
+ scsi_prot_sg_count(scmd), scmd->sc_data_direction);
+ mpi3mr_clear_scmd_priv(mrioc, scmd);
+ scsi_dma_unmap(scmd);
+ scmd->result = DID_RESET << 16;
+ scsi_print_command(scmd);
+ scsi_done(scmd);
+ mrioc->flush_io_count++;
+ }
+
+out:
+ return(true);
+}
+
+/**
+ * mpi3mr_count_dev_pending - Count commands pending for a lun
+ * @rq: Block request
+ * @data: SCSI device reference
+ *
+ * This is an iterator function called for each SCSI command in
+ * a host and if the command is pending in the LLD for the
+ * specific device(lun) then device specific pending I/O counter
+ * is updated in the device structure.
+ *
+ * Return: true always.
+ */
+
+static bool mpi3mr_count_dev_pending(struct request *rq, void *data)
+{
+ struct scsi_device *sdev = (struct scsi_device *)data;
+ struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata;
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
+ struct scmd_priv *priv;
+
+ if (scmd) {
+ priv = scsi_cmd_priv(scmd);
+ if (!priv->in_lld_scope)
+ goto out;
+ if (scmd->device == sdev)
+ sdev_priv_data->pend_count++;
+ }
+
+out:
+ return true;
+}
+
+/**
+ * mpi3mr_count_tgt_pending - Count commands pending for target
+ * @rq: Block request
+ * @data: SCSI target reference
+ *
+ * This is an iterator function called for each SCSI command in
+ * a host and if the command is pending in the LLD for the
+ * specific target then target specific pending I/O counter is
+ * updated in the target structure.
+ *
+ * Return: true always.
+ */
+
+static bool mpi3mr_count_tgt_pending(struct request *rq, void *data)
+{
+ struct scsi_target *starget = (struct scsi_target *)data;
+ struct mpi3mr_stgt_priv_data *stgt_priv_data = starget->hostdata;
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
+ struct scmd_priv *priv;
+
+ if (scmd) {
+ priv = scsi_cmd_priv(scmd);
+ if (!priv->in_lld_scope)
+ goto out;
+ if (scmd->device && (scsi_target(scmd->device) == starget))
+ stgt_priv_data->pend_count++;
+ }
+
+out:
+ return true;
+}
+
+/**
+ * mpi3mr_flush_host_io - Flush host I/Os
+ * @mrioc: Adapter instance reference
+ *
+ * Flush all of the pending I/Os by calling
+ * blk_mq_tagset_busy_iter() for each possible tag. This is
+ * executed post controller reset
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc)
+{
+ struct Scsi_Host *shost = mrioc->shost;
+
+ mrioc->flush_io_count = 0;
+ ioc_info(mrioc, "%s :Flushing Host I/O cmds post reset\n", __func__);
+ blk_mq_tagset_busy_iter(&shost->tag_set,
+ mpi3mr_flush_scmd, (void *)mrioc);
+ ioc_info(mrioc, "%s :Flushed %d Host I/O cmds\n", __func__,
+ mrioc->flush_io_count);
+}
+
+/**
+ * mpi3mr_flush_cmds_for_unrecovered_controller - Flush all pending cmds
+ * @mrioc: Adapter instance reference
+ *
+ * This function waits for currently running IO poll threads to
+ * exit and then flushes all host I/Os and any internal pending
+ * cmds. This is executed after controller is marked as
+ * unrecoverable.
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc)
+{
+ struct Scsi_Host *shost = mrioc->shost;
+ int i;
+
+ if (!mrioc->unrecoverable)
+ return;
+
+ if (mrioc->op_reply_qinfo) {
+ for (i = 0; i < mrioc->num_queues; i++) {
+ while (atomic_read(&mrioc->op_reply_qinfo[i].in_use))
+ udelay(500);
+ atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0);
+ }
+ }
+ mrioc->flush_io_count = 0;
+ blk_mq_tagset_busy_iter(&shost->tag_set,
+ mpi3mr_flush_scmd, (void *)mrioc);
+ mpi3mr_flush_delayed_cmd_lists(mrioc);
+ mpi3mr_flush_drv_cmds(mrioc);
+}
+
+/**
+ * mpi3mr_alloc_tgtdev - target device allocator
+ *
+ * Allocate target device instance and initialize the reference
+ * count
+ *
+ * Return: target device instance.
+ */
+static struct mpi3mr_tgt_dev *mpi3mr_alloc_tgtdev(void)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ tgtdev = kzalloc(sizeof(*tgtdev), GFP_ATOMIC);
+ if (!tgtdev)
+ return NULL;
+ kref_init(&tgtdev->ref_count);
+ return tgtdev;
+}
+
+/**
+ * mpi3mr_tgtdev_add_to_list -Add tgtdevice to the list
+ * @mrioc: Adapter instance reference
+ * @tgtdev: Target device
+ *
+ * Add the target device to the target device list
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_tgtdev_add_to_list(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ mpi3mr_tgtdev_get(tgtdev);
+ INIT_LIST_HEAD(&tgtdev->list);
+ list_add_tail(&tgtdev->list, &mrioc->tgtdev_list);
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+}
+
+/**
+ * mpi3mr_tgtdev_del_from_list -Delete tgtdevice from the list
+ * @mrioc: Adapter instance reference
+ * @tgtdev: Target device
+ *
+ * Remove the target device from the target device list
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_tgtdev_del_from_list(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ if (!list_empty(&tgtdev->list)) {
+ list_del_init(&tgtdev->list);
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+}
+
+/**
+ * __mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle
+ * @mrioc: Adapter instance reference
+ * @handle: Device handle
+ *
+ * Accessor to retrieve target device from the device handle.
+ * Non Lock version
+ *
+ * Return: Target device reference.
+ */
+static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_handle(
+ struct mpi3mr_ioc *mrioc, u16 handle)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ assert_spin_locked(&mrioc->tgtdev_lock);
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
+ if (tgtdev->dev_handle == handle)
+ goto found_tgtdev;
+ return NULL;
+
+found_tgtdev:
+ mpi3mr_tgtdev_get(tgtdev);
+ return tgtdev;
+}
+
+/**
+ * mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle
+ * @mrioc: Adapter instance reference
+ * @handle: Device handle
+ *
+ * Accessor to retrieve target device from the device handle.
+ * Lock version
+ *
+ * Return: Target device reference.
+ */
+struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle(
+ struct mpi3mr_ioc *mrioc, u16 handle)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle);
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+ return tgtdev;
+}
+
+/**
+ * __mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persist ID
+ * @mrioc: Adapter instance reference
+ * @persist_id: Persistent ID
+ *
+ * Accessor to retrieve target device from the Persistent ID.
+ * Non Lock version
+ *
+ * Return: Target device reference.
+ */
+static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_perst_id(
+ struct mpi3mr_ioc *mrioc, u16 persist_id)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ assert_spin_locked(&mrioc->tgtdev_lock);
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
+ if (tgtdev->perst_id == persist_id)
+ goto found_tgtdev;
+ return NULL;
+
+found_tgtdev:
+ mpi3mr_tgtdev_get(tgtdev);
+ return tgtdev;
+}
+
+/**
+ * mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persistent ID
+ * @mrioc: Adapter instance reference
+ * @persist_id: Persistent ID
+ *
+ * Accessor to retrieve target device from the Persistent ID.
+ * Lock version
+ *
+ * Return: Target device reference.
+ */
+static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_perst_id(
+ struct mpi3mr_ioc *mrioc, u16 persist_id)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, persist_id);
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+ return tgtdev;
+}
+
+/**
+ * __mpi3mr_get_tgtdev_from_tgtpriv -Get tgtdev from tgt private
+ * @mrioc: Adapter instance reference
+ * @tgt_priv: Target private data
+ *
+ * Accessor to return target device from the target private
+ * data. Non Lock version
+ *
+ * Return: Target device reference.
+ */
+static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_from_tgtpriv(
+ struct mpi3mr_ioc *mrioc, struct mpi3mr_stgt_priv_data *tgt_priv)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ assert_spin_locked(&mrioc->tgtdev_lock);
+ tgtdev = tgt_priv->tgt_dev;
+ if (tgtdev)
+ mpi3mr_tgtdev_get(tgtdev);
+ return tgtdev;
+}
+
+/**
+ * mpi3mr_set_io_divert_for_all_vd_in_tg -set divert for TG VDs
+ * @mrioc: Adapter instance reference
+ * @tg: Throttle group information pointer
+ * @divert_value: 1 or 0
+ *
+ * Accessor to set io_divert flag for each device associated
+ * with the given throttle group with the given value.
+ *
+ * Return: None.
+ */
+static void mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_throttle_group_info *tg, u8 divert_value)
+{
+ unsigned long flags;
+ struct mpi3mr_tgt_dev *tgtdev;
+ struct mpi3mr_stgt_priv_data *tgt_priv;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
+ if (tgtdev->starget && tgtdev->starget->hostdata) {
+ tgt_priv = tgtdev->starget->hostdata;
+ if (tgt_priv->throttle_group == tg)
+ tgt_priv->io_divert = divert_value;
+ }
+ }
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+}
+
+/**
+ * mpi3mr_print_device_event_notice - print notice related to post processing of
+ * device event after controller reset.
+ *
+ * @mrioc: Adapter instance reference
+ * @device_add: true for device add event and false for device removal event
+ *
+ * Return: None.
+ */
+void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc,
+ bool device_add)
+{
+ ioc_notice(mrioc, "Device %s was in progress before the reset and\n",
+ (device_add ? "addition" : "removal"));
+ ioc_notice(mrioc, "completed after reset, verify whether the exposed devices\n");
+ ioc_notice(mrioc, "are matched with attached devices for correctness\n");
+}
+
+/**
+ * mpi3mr_remove_tgtdev_from_host - Remove dev from upper layers
+ * @mrioc: Adapter instance reference
+ * @tgtdev: Target device structure
+ *
+ * Checks whether the device is exposed to upper layers and if it
+ * is then remove the device from upper layers by calling
+ * scsi_remove_target().
+ *
+ * Return: 0 on success, non zero on failure.
+ */
+void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev)
+{
+ struct mpi3mr_stgt_priv_data *tgt_priv;
+
+ ioc_info(mrioc, "%s :Removing handle(0x%04x), wwid(0x%016llx)\n",
+ __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid);
+ if (tgtdev->starget && tgtdev->starget->hostdata) {
+ tgt_priv = tgtdev->starget->hostdata;
+ atomic_set(&tgt_priv->block_io, 0);
+ tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
+ }
+
+ if (!mrioc->sas_transport_enabled || (tgtdev->dev_type !=
+ MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl) {
+ if (tgtdev->starget) {
+ if (mrioc->current_event)
+ mrioc->current_event->pending_at_sml = 1;
+ scsi_remove_target(&tgtdev->starget->dev);
+ tgtdev->host_exposed = 0;
+ if (mrioc->current_event) {
+ mrioc->current_event->pending_at_sml = 0;
+ if (mrioc->current_event->discard) {
+ mpi3mr_print_device_event_notice(mrioc,
+ false);
+ return;
+ }
+ }
+ }
+ } else
+ mpi3mr_remove_tgtdev_from_sas_transport(mrioc, tgtdev);
+
+ ioc_info(mrioc, "%s :Removed handle(0x%04x), wwid(0x%016llx)\n",
+ __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid);
+}
+
+/**
+ * mpi3mr_report_tgtdev_to_host - Expose device to upper layers
+ * @mrioc: Adapter instance reference
+ * @perst_id: Persistent ID of the device
+ *
+ * Checks whether the device can be exposed to upper layers and
+ * if it is not then expose the device to upper layers by
+ * calling scsi_scan_target().
+ *
+ * Return: 0 on success, non zero on failure.
+ */
+static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc,
+ u16 perst_id)
+{
+ int retval = 0;
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ if (mrioc->reset_in_progress)
+ return -1;
+
+ tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id);
+ if (!tgtdev) {
+ retval = -1;
+ goto out;
+ }
+ if (tgtdev->is_hidden || tgtdev->host_exposed) {
+ retval = -1;
+ goto out;
+ }
+ if (!mrioc->sas_transport_enabled || (tgtdev->dev_type !=
+ MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl){
+ tgtdev->host_exposed = 1;
+ if (mrioc->current_event)
+ mrioc->current_event->pending_at_sml = 1;
+ scsi_scan_target(&mrioc->shost->shost_gendev,
+ mrioc->scsi_device_channel, tgtdev->perst_id,
+ SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
+ if (!tgtdev->starget)
+ tgtdev->host_exposed = 0;
+ if (mrioc->current_event) {
+ mrioc->current_event->pending_at_sml = 0;
+ if (mrioc->current_event->discard) {
+ mpi3mr_print_device_event_notice(mrioc, true);
+ goto out;
+ }
+ }
+ } else
+ mpi3mr_report_tgtdev_to_sas_transport(mrioc, tgtdev);
+out:
+ if (tgtdev)
+ mpi3mr_tgtdev_put(tgtdev);
+
+ return retval;
+}
+
+/**
+ * mpi3mr_change_queue_depth- Change QD callback handler
+ * @sdev: SCSI device reference
+ * @q_depth: Queue depth
+ *
+ * Validate and limit QD and call scsi_change_queue_depth.
+ *
+ * Return: return value of scsi_change_queue_depth
+ */
+static int mpi3mr_change_queue_depth(struct scsi_device *sdev,
+ int q_depth)
+{
+ struct scsi_target *starget = scsi_target(sdev);
+ struct Scsi_Host *shost = dev_to_shost(&starget->dev);
+ int retval = 0;
+
+ if (!sdev->tagged_supported)
+ q_depth = 1;
+ if (q_depth > shost->can_queue)
+ q_depth = shost->can_queue;
+ else if (!q_depth)
+ q_depth = MPI3MR_DEFAULT_SDEV_QD;
+ retval = scsi_change_queue_depth(sdev, q_depth);
+ sdev->max_queue_depth = sdev->queue_depth;
+
+ return retval;
+}
+
+/**
+ * mpi3mr_update_sdev - Update SCSI device information
+ * @sdev: SCSI device reference
+ * @data: target device reference
+ *
+ * This is an iterator function called for each SCSI device in a
+ * target to update the target specific information into each
+ * SCSI device.
+ *
+ * Return: Nothing.
+ */
+static void
+mpi3mr_update_sdev(struct scsi_device *sdev, void *data)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ tgtdev = (struct mpi3mr_tgt_dev *)data;
+ if (!tgtdev)
+ return;
+
+ mpi3mr_change_queue_depth(sdev, tgtdev->q_depth);
+ switch (tgtdev->dev_type) {
+ case MPI3_DEVICE_DEVFORM_PCIE:
+ /*The block layer hw sector size = 512*/
+ if ((tgtdev->dev_spec.pcie_inf.dev_info &
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) {
+ blk_queue_max_hw_sectors(sdev->request_queue,
+ tgtdev->dev_spec.pcie_inf.mdts / 512);
+ if (tgtdev->dev_spec.pcie_inf.pgsz == 0)
+ blk_queue_virt_boundary(sdev->request_queue,
+ ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1));
+ else
+ blk_queue_virt_boundary(sdev->request_queue,
+ ((1 << tgtdev->dev_spec.pcie_inf.pgsz) - 1));
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * mpi3mr_rfresh_tgtdevs - Refresh target device exposure
+ * @mrioc: Adapter instance reference
+ *
+ * This is executed post controller reset to identify any
+ * missing devices during reset and remove from the upper layers
+ * or expose any newly detected device to the upper layers.
+ *
+ * Return: Nothing.
+ */
+
+void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next;
+
+ list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
+ list) {
+ if (tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) {
+ dprint_reset(mrioc, "removing target device with perst_id(%d)\n",
+ tgtdev->perst_id);
+ if (tgtdev->host_exposed)
+ mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
+ mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+ }
+
+ tgtdev = NULL;
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
+ if ((tgtdev->dev_handle != MPI3MR_INVALID_DEV_HANDLE) &&
+ !tgtdev->is_hidden) {
+ if (!tgtdev->host_exposed)
+ mpi3mr_report_tgtdev_to_host(mrioc,
+ tgtdev->perst_id);
+ else if (tgtdev->starget)
+ starget_for_each_device(tgtdev->starget,
+ (void *)tgtdev, mpi3mr_update_sdev);
+ }
+ }
+}
+
+/**
+ * mpi3mr_update_tgtdev - DevStatusChange evt bottomhalf
+ * @mrioc: Adapter instance reference
+ * @tgtdev: Target device internal structure
+ * @dev_pg0: New device page0
+ * @is_added: Flag to indicate the device is just added
+ *
+ * Update the information from the device page0 into the driver
+ * cached target device structure.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev, struct mpi3_device_page0 *dev_pg0,
+ bool is_added)
+{
+ u16 flags = 0;
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
+ struct mpi3mr_enclosure_node *enclosure_dev = NULL;
+ u8 prot_mask = 0;
+
+ tgtdev->perst_id = le16_to_cpu(dev_pg0->persistent_id);
+ tgtdev->dev_handle = le16_to_cpu(dev_pg0->dev_handle);
+ tgtdev->dev_type = dev_pg0->device_form;
+ tgtdev->io_unit_port = dev_pg0->io_unit_port;
+ tgtdev->encl_handle = le16_to_cpu(dev_pg0->enclosure_handle);
+ tgtdev->parent_handle = le16_to_cpu(dev_pg0->parent_dev_handle);
+ tgtdev->slot = le16_to_cpu(dev_pg0->slot);
+ tgtdev->q_depth = le16_to_cpu(dev_pg0->queue_depth);
+ tgtdev->wwid = le64_to_cpu(dev_pg0->wwid);
+ tgtdev->devpg0_flag = le16_to_cpu(dev_pg0->flags);
+
+ if (tgtdev->encl_handle)
+ enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc,
+ tgtdev->encl_handle);
+ if (enclosure_dev)
+ tgtdev->enclosure_logical_id = le64_to_cpu(
+ enclosure_dev->pg0.enclosure_logical_id);
+
+ flags = tgtdev->devpg0_flag;
+
+ tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN);
+
+ if (is_added == true)
+ tgtdev->io_throttle_enabled =
+ (flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0;
+
+
+ if (tgtdev->starget && tgtdev->starget->hostdata) {
+ scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
+ tgtdev->starget->hostdata;
+ scsi_tgt_priv_data->perst_id = tgtdev->perst_id;
+ scsi_tgt_priv_data->dev_handle = tgtdev->dev_handle;
+ scsi_tgt_priv_data->dev_type = tgtdev->dev_type;
+ scsi_tgt_priv_data->io_throttle_enabled =
+ tgtdev->io_throttle_enabled;
+ if (is_added == true)
+ atomic_set(&scsi_tgt_priv_data->block_io, 0);
+ }
+
+ switch (dev_pg0->access_status) {
+ case MPI3_DEVICE0_ASTATUS_NO_ERRORS:
+ case MPI3_DEVICE0_ASTATUS_PREPARE:
+ case MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION:
+ case MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY:
+ break;
+ default:
+ tgtdev->is_hidden = 1;
+ break;
+ }
+
+ switch (tgtdev->dev_type) {
+ case MPI3_DEVICE_DEVFORM_SAS_SATA:
+ {
+ struct mpi3_device0_sas_sata_format *sasinf =
+ &dev_pg0->device_specific.sas_sata_format;
+ u16 dev_info = le16_to_cpu(sasinf->device_info);
+
+ tgtdev->dev_spec.sas_sata_inf.dev_info = dev_info;
+ tgtdev->dev_spec.sas_sata_inf.sas_address =
+ le64_to_cpu(sasinf->sas_address);
+ tgtdev->dev_spec.sas_sata_inf.phy_id = sasinf->phy_num;
+ tgtdev->dev_spec.sas_sata_inf.attached_phy_id =
+ sasinf->attached_phy_identifier;
+ if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) !=
+ MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE)
+ tgtdev->is_hidden = 1;
+ else if (!(dev_info & (MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET |
+ MPI3_SAS_DEVICE_INFO_SSP_TARGET)))
+ tgtdev->is_hidden = 1;
+
+ if (((tgtdev->devpg0_flag &
+ MPI3_DEVICE0_FLAGS_ATT_METHOD_DIR_ATTACHED)
+ && (tgtdev->devpg0_flag &
+ MPI3_DEVICE0_FLAGS_ATT_METHOD_VIRTUAL)) ||
+ (tgtdev->parent_handle == 0xFFFF))
+ tgtdev->non_stl = 1;
+ if (tgtdev->dev_spec.sas_sata_inf.hba_port)
+ tgtdev->dev_spec.sas_sata_inf.hba_port->port_id =
+ dev_pg0->io_unit_port;
+ break;
+ }
+ case MPI3_DEVICE_DEVFORM_PCIE:
+ {
+ struct mpi3_device0_pcie_format *pcieinf =
+ &dev_pg0->device_specific.pcie_format;
+ u16 dev_info = le16_to_cpu(pcieinf->device_info);
+
+ tgtdev->dev_spec.pcie_inf.dev_info = dev_info;
+ tgtdev->dev_spec.pcie_inf.capb =
+ le32_to_cpu(pcieinf->capabilities);
+ tgtdev->dev_spec.pcie_inf.mdts = MPI3MR_DEFAULT_MDTS;
+ /* 2^12 = 4096 */
+ tgtdev->dev_spec.pcie_inf.pgsz = 12;
+ if (dev_pg0->access_status == MPI3_DEVICE0_ASTATUS_NO_ERRORS) {
+ tgtdev->dev_spec.pcie_inf.mdts =
+ le32_to_cpu(pcieinf->maximum_data_transfer_size);
+ tgtdev->dev_spec.pcie_inf.pgsz = pcieinf->page_size;
+ tgtdev->dev_spec.pcie_inf.reset_to =
+ max_t(u8, pcieinf->controller_reset_to,
+ MPI3MR_INTADMCMD_TIMEOUT);
+ tgtdev->dev_spec.pcie_inf.abort_to =
+ max_t(u8, pcieinf->nvme_abort_to,
+ MPI3MR_INTADMCMD_TIMEOUT);
+ }
+ if (tgtdev->dev_spec.pcie_inf.mdts > (1024 * 1024))
+ tgtdev->dev_spec.pcie_inf.mdts = (1024 * 1024);
+ if (((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) &&
+ ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE))
+ tgtdev->is_hidden = 1;
+ tgtdev->non_stl = 1;
+ if (!mrioc->shost)
+ break;
+ prot_mask = scsi_host_get_prot(mrioc->shost);
+ if (prot_mask & SHOST_DIX_TYPE0_PROTECTION) {
+ scsi_host_set_prot(mrioc->shost, prot_mask & 0x77);
+ ioc_info(mrioc,
+ "%s : Disabling DIX0 prot capability\n", __func__);
+ ioc_info(mrioc,
+ "because HBA does not support DIX0 operation on NVME drives\n");
+ }
+ break;
+ }
+ case MPI3_DEVICE_DEVFORM_VD:
+ {
+ struct mpi3_device0_vd_format *vdinf =
+ &dev_pg0->device_specific.vd_format;
+ struct mpi3mr_throttle_group_info *tg = NULL;
+ u16 vdinf_io_throttle_group =
+ le16_to_cpu(vdinf->io_throttle_group);
+
+ tgtdev->dev_spec.vd_inf.state = vdinf->vd_state;
+ if (vdinf->vd_state == MPI3_DEVICE0_VD_STATE_OFFLINE)
+ tgtdev->is_hidden = 1;
+ tgtdev->non_stl = 1;
+ tgtdev->dev_spec.vd_inf.tg_id = vdinf_io_throttle_group;
+ tgtdev->dev_spec.vd_inf.tg_high =
+ le16_to_cpu(vdinf->io_throttle_group_high) * 2048;
+ tgtdev->dev_spec.vd_inf.tg_low =
+ le16_to_cpu(vdinf->io_throttle_group_low) * 2048;
+ if (vdinf_io_throttle_group < mrioc->num_io_throttle_group) {
+ tg = mrioc->throttle_groups + vdinf_io_throttle_group;
+ tg->id = vdinf_io_throttle_group;
+ tg->high = tgtdev->dev_spec.vd_inf.tg_high;
+ tg->low = tgtdev->dev_spec.vd_inf.tg_low;
+ tg->qd_reduction =
+ tgtdev->dev_spec.vd_inf.tg_qd_reduction;
+ if (is_added == true)
+ tg->fw_qd = tgtdev->q_depth;
+ tg->modified_qd = tgtdev->q_depth;
+ }
+ tgtdev->dev_spec.vd_inf.tg = tg;
+ if (scsi_tgt_priv_data)
+ scsi_tgt_priv_data->throttle_group = tg;
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+/**
+ * mpi3mr_devstatuschg_evt_bh - DevStatusChange evt bottomhalf
+ * @mrioc: Adapter instance reference
+ * @fwevt: Firmware event information.
+ *
+ * Process Device status Change event and based on device's new
+ * information, either expose the device to the upper layers, or
+ * remove the device from upper layers.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_fwevt *fwevt)
+{
+ u16 dev_handle = 0;
+ u8 uhide = 0, delete = 0, cleanup = 0;
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ struct mpi3_event_data_device_status_change *evtdata =
+ (struct mpi3_event_data_device_status_change *)fwevt->event_data;
+
+ dev_handle = le16_to_cpu(evtdata->dev_handle);
+ ioc_info(mrioc,
+ "%s :device status change: handle(0x%04x): reason code(0x%x)\n",
+ __func__, dev_handle, evtdata->reason_code);
+ switch (evtdata->reason_code) {
+ case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
+ delete = 1;
+ break;
+ case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN:
+ uhide = 1;
+ break;
+ case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING:
+ delete = 1;
+ cleanup = 1;
+ break;
+ default:
+ ioc_info(mrioc, "%s :Unhandled reason code(0x%x)\n", __func__,
+ evtdata->reason_code);
+ break;
+ }
+
+ tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
+ if (!tgtdev)
+ goto out;
+ if (uhide) {
+ tgtdev->is_hidden = 0;
+ if (!tgtdev->host_exposed)
+ mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id);
+ }
+ if (tgtdev->starget && tgtdev->starget->hostdata) {
+ if (delete)
+ mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
+ }
+ if (cleanup) {
+ mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+
+out:
+ if (tgtdev)
+ mpi3mr_tgtdev_put(tgtdev);
+}
+
+/**
+ * mpi3mr_devinfochg_evt_bh - DeviceInfoChange evt bottomhalf
+ * @mrioc: Adapter instance reference
+ * @dev_pg0: New device page0
+ *
+ * Process Device Info Change event and based on device's new
+ * information, either expose the device to the upper layers, or
+ * remove the device from upper layers or update the details of
+ * the device.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_ioc *mrioc,
+ struct mpi3_device_page0 *dev_pg0)
+{
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ u16 dev_handle = 0, perst_id = 0;
+
+ perst_id = le16_to_cpu(dev_pg0->persistent_id);
+ dev_handle = le16_to_cpu(dev_pg0->dev_handle);
+ ioc_info(mrioc,
+ "%s :Device info change: handle(0x%04x): persist_id(0x%x)\n",
+ __func__, dev_handle, perst_id);
+ tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
+ if (!tgtdev)
+ goto out;
+ mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, false);
+ if (!tgtdev->is_hidden && !tgtdev->host_exposed)
+ mpi3mr_report_tgtdev_to_host(mrioc, perst_id);
+ if (tgtdev->is_hidden && tgtdev->host_exposed)
+ mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
+ if (!tgtdev->is_hidden && tgtdev->host_exposed && tgtdev->starget)
+ starget_for_each_device(tgtdev->starget, (void *)tgtdev,
+ mpi3mr_update_sdev);
+out:
+ if (tgtdev)
+ mpi3mr_tgtdev_put(tgtdev);
+}
+
+/**
+ * mpi3mr_free_enclosure_list - release enclosures
+ * @mrioc: Adapter instance reference
+ *
+ * Free memory allocated during encloure add.
+ *
+ * Return nothing.
+ */
+void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3mr_enclosure_node *enclosure_dev, *enclosure_dev_next;
+
+ list_for_each_entry_safe(enclosure_dev,
+ enclosure_dev_next, &mrioc->enclosure_list, list) {
+ list_del(&enclosure_dev->list);
+ kfree(enclosure_dev);
+ }
+}
+
+/**
+ * mpi3mr_enclosure_find_by_handle - enclosure search by handle
+ * @mrioc: Adapter instance reference
+ * @handle: Firmware device handle of the enclosure
+ *
+ * This searches for enclosure device based on handle, then returns the
+ * enclosure object.
+ *
+ * Return: Enclosure object reference or NULL
+ */
+struct mpi3mr_enclosure_node *mpi3mr_enclosure_find_by_handle(
+ struct mpi3mr_ioc *mrioc, u16 handle)
+{
+ struct mpi3mr_enclosure_node *enclosure_dev, *r = NULL;
+
+ list_for_each_entry(enclosure_dev, &mrioc->enclosure_list, list) {
+ if (le16_to_cpu(enclosure_dev->pg0.enclosure_handle) != handle)
+ continue;
+ r = enclosure_dev;
+ goto out;
+ }
+out:
+ return r;
+}
+
+/**
+ * mpi3mr_encldev_add_chg_evt_debug - debug for enclosure event
+ * @mrioc: Adapter instance reference
+ * @encl_pg0: Enclosure page 0.
+ * @is_added: Added event or not
+ *
+ * Return nothing.
+ */
+static void mpi3mr_encldev_add_chg_evt_debug(struct mpi3mr_ioc *mrioc,
+ struct mpi3_enclosure_page0 *encl_pg0, u8 is_added)
+{
+ char *reason_str = NULL;
+
+ if (!(mrioc->logging_level & MPI3_DEBUG_EVENT_WORK_TASK))
+ return;
+
+ if (is_added)
+ reason_str = "enclosure added";
+ else
+ reason_str = "enclosure dev status changed";
+
+ ioc_info(mrioc,
+ "%s: handle(0x%04x), enclosure logical id(0x%016llx)\n",
+ reason_str, le16_to_cpu(encl_pg0->enclosure_handle),
+ (unsigned long long)le64_to_cpu(encl_pg0->enclosure_logical_id));
+ ioc_info(mrioc,
+ "number of slots(%d), port(%d), flags(0x%04x), present(%d)\n",
+ le16_to_cpu(encl_pg0->num_slots), encl_pg0->io_unit_port,
+ le16_to_cpu(encl_pg0->flags),
+ ((le16_to_cpu(encl_pg0->flags) &
+ MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4));
+}
+
+/**
+ * mpi3mr_encldev_add_chg_evt_bh - Enclosure evt bottomhalf
+ * @mrioc: Adapter instance reference
+ * @fwevt: Firmware event reference
+ *
+ * Prints information about the Enclosure device status or
+ * Enclosure add events if logging is enabled and add or remove
+ * the enclosure from the controller's internal list of
+ * enclosures.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_encldev_add_chg_evt_bh(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_fwevt *fwevt)
+{
+ struct mpi3mr_enclosure_node *enclosure_dev = NULL;
+ struct mpi3_enclosure_page0 *encl_pg0;
+ u16 encl_handle;
+ u8 added, present;
+
+ encl_pg0 = (struct mpi3_enclosure_page0 *) fwevt->event_data;
+ added = (fwevt->event_id == MPI3_EVENT_ENCL_DEVICE_ADDED) ? 1 : 0;
+ mpi3mr_encldev_add_chg_evt_debug(mrioc, encl_pg0, added);
+
+
+ encl_handle = le16_to_cpu(encl_pg0->enclosure_handle);
+ present = ((le16_to_cpu(encl_pg0->flags) &
+ MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4);
+
+ if (encl_handle)
+ enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc,
+ encl_handle);
+ if (!enclosure_dev && present) {
+ enclosure_dev =
+ kzalloc(sizeof(struct mpi3mr_enclosure_node),
+ GFP_KERNEL);
+ if (!enclosure_dev)
+ return;
+ list_add_tail(&enclosure_dev->list,
+ &mrioc->enclosure_list);
+ }
+ if (enclosure_dev) {
+ if (!present) {
+ list_del(&enclosure_dev->list);
+ kfree(enclosure_dev);
+ } else
+ memcpy(&enclosure_dev->pg0, encl_pg0,
+ sizeof(enclosure_dev->pg0));
+
+ }
+}
+
+/**
+ * mpi3mr_sastopochg_evt_debug - SASTopoChange details
+ * @mrioc: Adapter instance reference
+ * @event_data: SAS topology change list event data
+ *
+ * Prints information about the SAS topology change event.
+ *
+ * Return: Nothing.
+ */
+static void
+mpi3mr_sastopochg_evt_debug(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_data_sas_topology_change_list *event_data)
+{
+ int i;
+ u16 handle;
+ u8 reason_code, phy_number;
+ char *status_str = NULL;
+ u8 link_rate, prev_link_rate;
+
+ switch (event_data->exp_status) {
+ case MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
+ status_str = "remove";
+ break;
+ case MPI3_EVENT_SAS_TOPO_ES_RESPONDING:
+ status_str = "responding";
+ break;
+ case MPI3_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
+ status_str = "remove delay";
+ break;
+ case MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER:
+ status_str = "direct attached";
+ break;
+ default:
+ status_str = "unknown status";
+ break;
+ }
+ ioc_info(mrioc, "%s :sas topology change: (%s)\n",
+ __func__, status_str);
+ ioc_info(mrioc,
+ "%s :\texpander_handle(0x%04x), port(%d), enclosure_handle(0x%04x) start_phy(%02d), num_entries(%d)\n",
+ __func__, le16_to_cpu(event_data->expander_dev_handle),
+ event_data->io_unit_port,
+ le16_to_cpu(event_data->enclosure_handle),
+ event_data->start_phy_num, event_data->num_entries);
+ for (i = 0; i < event_data->num_entries; i++) {
+ handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle);
+ if (!handle)
+ continue;
+ phy_number = event_data->start_phy_num + i;
+ reason_code = event_data->phy_entry[i].status &
+ MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
+ switch (reason_code) {
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
+ status_str = "target remove";
+ break;
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING:
+ status_str = "delay target remove";
+ break;
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
+ status_str = "link status change";
+ break;
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE:
+ status_str = "link status no change";
+ break;
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
+ status_str = "target responding";
+ break;
+ default:
+ status_str = "unknown";
+ break;
+ }
+ link_rate = event_data->phy_entry[i].link_rate >> 4;
+ prev_link_rate = event_data->phy_entry[i].link_rate & 0xF;
+ ioc_info(mrioc,
+ "%s :\tphy(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n",
+ __func__, phy_number, handle, status_str, link_rate,
+ prev_link_rate);
+ }
+}
+
+/**
+ * mpi3mr_sastopochg_evt_bh - SASTopologyChange evt bottomhalf
+ * @mrioc: Adapter instance reference
+ * @fwevt: Firmware event reference
+ *
+ * Prints information about the SAS topology change event and
+ * for "not responding" event code, removes the device from the
+ * upper layers.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_fwevt *fwevt)
+{
+ struct mpi3_event_data_sas_topology_change_list *event_data =
+ (struct mpi3_event_data_sas_topology_change_list *)fwevt->event_data;
+ int i;
+ u16 handle;
+ u8 reason_code;
+ u64 exp_sas_address = 0, parent_sas_address = 0;
+ struct mpi3mr_hba_port *hba_port = NULL;
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ struct mpi3mr_sas_node *sas_expander = NULL;
+ unsigned long flags;
+ u8 link_rate, prev_link_rate, parent_phy_number;
+
+ mpi3mr_sastopochg_evt_debug(mrioc, event_data);
+ if (mrioc->sas_transport_enabled) {
+ hba_port = mpi3mr_get_hba_port_by_id(mrioc,
+ event_data->io_unit_port);
+ if (le16_to_cpu(event_data->expander_dev_handle)) {
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ sas_expander = __mpi3mr_expander_find_by_handle(mrioc,
+ le16_to_cpu(event_data->expander_dev_handle));
+ if (sas_expander) {
+ exp_sas_address = sas_expander->sas_address;
+ hba_port = sas_expander->hba_port;
+ }
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ parent_sas_address = exp_sas_address;
+ } else
+ parent_sas_address = mrioc->sas_hba.sas_address;
+ }
+
+ for (i = 0; i < event_data->num_entries; i++) {
+ if (fwevt->discard)
+ return;
+ handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle);
+ if (!handle)
+ continue;
+ tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
+ if (!tgtdev)
+ continue;
+
+ reason_code = event_data->phy_entry[i].status &
+ MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
+
+ switch (reason_code) {
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
+ if (tgtdev->host_exposed)
+ mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
+ mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
+ mpi3mr_tgtdev_put(tgtdev);
+ break;
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE:
+ {
+ if (!mrioc->sas_transport_enabled || tgtdev->non_stl
+ || tgtdev->is_hidden)
+ break;
+ link_rate = event_data->phy_entry[i].link_rate >> 4;
+ prev_link_rate = event_data->phy_entry[i].link_rate & 0xF;
+ if (link_rate == prev_link_rate)
+ break;
+ if (!parent_sas_address)
+ break;
+ parent_phy_number = event_data->start_phy_num + i;
+ mpi3mr_update_links(mrioc, parent_sas_address, handle,
+ parent_phy_number, link_rate, hba_port);
+ break;
+ }
+ default:
+ break;
+ }
+ if (tgtdev)
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+
+ if (mrioc->sas_transport_enabled && (event_data->exp_status ==
+ MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING)) {
+ if (sas_expander)
+ mpi3mr_expander_remove(mrioc, exp_sas_address,
+ hba_port);
+ }
+}
+
+/**
+ * mpi3mr_pcietopochg_evt_debug - PCIeTopoChange details
+ * @mrioc: Adapter instance reference
+ * @event_data: PCIe topology change list event data
+ *
+ * Prints information about the PCIe topology change event.
+ *
+ * Return: Nothing.
+ */
+static void
+mpi3mr_pcietopochg_evt_debug(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_data_pcie_topology_change_list *event_data)
+{
+ int i;
+ u16 handle;
+ u16 reason_code;
+ u8 port_number;
+ char *status_str = NULL;
+ u8 link_rate, prev_link_rate;
+
+ switch (event_data->switch_status) {
+ case MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
+ status_str = "remove";
+ break;
+ case MPI3_EVENT_PCIE_TOPO_SS_RESPONDING:
+ status_str = "responding";
+ break;
+ case MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
+ status_str = "remove delay";
+ break;
+ case MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH:
+ status_str = "direct attached";
+ break;
+ default:
+ status_str = "unknown status";
+ break;
+ }
+ ioc_info(mrioc, "%s :pcie topology change: (%s)\n",
+ __func__, status_str);
+ ioc_info(mrioc,
+ "%s :\tswitch_handle(0x%04x), enclosure_handle(0x%04x) start_port(%02d), num_entries(%d)\n",
+ __func__, le16_to_cpu(event_data->switch_dev_handle),
+ le16_to_cpu(event_data->enclosure_handle),
+ event_data->start_port_num, event_data->num_entries);
+ for (i = 0; i < event_data->num_entries; i++) {
+ handle =
+ le16_to_cpu(event_data->port_entry[i].attached_dev_handle);
+ if (!handle)
+ continue;
+ port_number = event_data->start_port_num + i;
+ reason_code = event_data->port_entry[i].port_status;
+ switch (reason_code) {
+ case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
+ status_str = "target remove";
+ break;
+ case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
+ status_str = "delay target remove";
+ break;
+ case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
+ status_str = "link status change";
+ break;
+ case MPI3_EVENT_PCIE_TOPO_PS_NO_CHANGE:
+ status_str = "link status no change";
+ break;
+ case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING:
+ status_str = "target responding";
+ break;
+ default:
+ status_str = "unknown";
+ break;
+ }
+ link_rate = event_data->port_entry[i].current_port_info &
+ MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
+ prev_link_rate = event_data->port_entry[i].previous_port_info &
+ MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
+ ioc_info(mrioc,
+ "%s :\tport(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n",
+ __func__, port_number, handle, status_str, link_rate,
+ prev_link_rate);
+ }
+}
+
+/**
+ * mpi3mr_pcietopochg_evt_bh - PCIeTopologyChange evt bottomhalf
+ * @mrioc: Adapter instance reference
+ * @fwevt: Firmware event reference
+ *
+ * Prints information about the PCIe topology change event and
+ * for "not responding" event code, removes the device from the
+ * upper layers.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_fwevt *fwevt)
+{
+ struct mpi3_event_data_pcie_topology_change_list *event_data =
+ (struct mpi3_event_data_pcie_topology_change_list *)fwevt->event_data;
+ int i;
+ u16 handle;
+ u8 reason_code;
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+
+ mpi3mr_pcietopochg_evt_debug(mrioc, event_data);
+
+ for (i = 0; i < event_data->num_entries; i++) {
+ if (fwevt->discard)
+ return;
+ handle =
+ le16_to_cpu(event_data->port_entry[i].attached_dev_handle);
+ if (!handle)
+ continue;
+ tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
+ if (!tgtdev)
+ continue;
+
+ reason_code = event_data->port_entry[i].port_status;
+
+ switch (reason_code) {
+ case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
+ if (tgtdev->host_exposed)
+ mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
+ mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
+ mpi3mr_tgtdev_put(tgtdev);
+ break;
+ default:
+ break;
+ }
+ if (tgtdev)
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+}
+
+/**
+ * mpi3mr_logdata_evt_bh - Log data event bottomhalf
+ * @mrioc: Adapter instance reference
+ * @fwevt: Firmware event reference
+ *
+ * Extracts the event data and calls application interfacing
+ * function to process the event further.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_logdata_evt_bh(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_fwevt *fwevt)
+{
+ mpi3mr_app_save_logdata(mrioc, fwevt->event_data,
+ fwevt->event_data_size);
+}
+
+/**
+ * mpi3mr_update_sdev_qd - Update SCSI device queue depath
+ * @sdev: SCSI device reference
+ * @data: Queue depth reference
+ *
+ * This is an iterator function called for each SCSI device in a
+ * target to update the QD of each SCSI device.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_update_sdev_qd(struct scsi_device *sdev, void *data)
+{
+ u16 *q_depth = (u16 *)data;
+
+ scsi_change_queue_depth(sdev, (int)*q_depth);
+ sdev->max_queue_depth = sdev->queue_depth;
+}
+
+/**
+ * mpi3mr_set_qd_for_all_vd_in_tg -set QD for TG VDs
+ * @mrioc: Adapter instance reference
+ * @tg: Throttle group information pointer
+ *
+ * Accessor to reduce QD for each device associated with the
+ * given throttle group.
+ *
+ * Return: None.
+ */
+static void mpi3mr_set_qd_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_throttle_group_info *tg)
+{
+ unsigned long flags;
+ struct mpi3mr_tgt_dev *tgtdev;
+ struct mpi3mr_stgt_priv_data *tgt_priv;
+
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
+ if (tgtdev->starget && tgtdev->starget->hostdata) {
+ tgt_priv = tgtdev->starget->hostdata;
+ if (tgt_priv->throttle_group == tg) {
+ dprint_event_bh(mrioc,
+ "updating qd due to throttling for persist_id(%d) original_qd(%d), reduced_qd (%d)\n",
+ tgt_priv->perst_id, tgtdev->q_depth,
+ tg->modified_qd);
+ starget_for_each_device(tgtdev->starget,
+ (void *)&tg->modified_qd,
+ mpi3mr_update_sdev_qd);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+}
+
+/**
+ * mpi3mr_fwevt_bh - Firmware event bottomhalf handler
+ * @mrioc: Adapter instance reference
+ * @fwevt: Firmware event reference
+ *
+ * Identifies the firmware event and calls corresponding bottomg
+ * half handler and sends event acknowledgment if required.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_fwevt *fwevt)
+{
+ struct mpi3_device_page0 *dev_pg0 = NULL;
+ u16 perst_id, handle, dev_info;
+ struct mpi3_device0_sas_sata_format *sasinf = NULL;
+
+ mpi3mr_fwevt_del_from_list(mrioc, fwevt);
+ mrioc->current_event = fwevt;
+
+ if (mrioc->stop_drv_processing)
+ goto out;
+
+ if (mrioc->unrecoverable) {
+ dprint_event_bh(mrioc,
+ "ignoring event(0x%02x) in bottom half handler due to unrecoverable controller\n",
+ fwevt->event_id);
+ goto out;
+ }
+
+ if (!fwevt->process_evt)
+ goto evt_ack;
+
+ switch (fwevt->event_id) {
+ case MPI3_EVENT_DEVICE_ADDED:
+ {
+ dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data;
+ perst_id = le16_to_cpu(dev_pg0->persistent_id);
+ handle = le16_to_cpu(dev_pg0->dev_handle);
+ if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID)
+ mpi3mr_report_tgtdev_to_host(mrioc, perst_id);
+ else if (mrioc->sas_transport_enabled &&
+ (dev_pg0->device_form == MPI3_DEVICE_DEVFORM_SAS_SATA)) {
+ sasinf = &dev_pg0->device_specific.sas_sata_format;
+ dev_info = le16_to_cpu(sasinf->device_info);
+ if (!mrioc->sas_hba.num_phys)
+ mpi3mr_sas_host_add(mrioc);
+ else
+ mpi3mr_sas_host_refresh(mrioc);
+
+ if (mpi3mr_is_expander_device(dev_info))
+ mpi3mr_expander_add(mrioc, handle);
+ }
+ break;
+ }
+ case MPI3_EVENT_DEVICE_INFO_CHANGED:
+ {
+ dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data;
+ perst_id = le16_to_cpu(dev_pg0->persistent_id);
+ if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID)
+ mpi3mr_devinfochg_evt_bh(mrioc, dev_pg0);
+ break;
+ }
+ case MPI3_EVENT_DEVICE_STATUS_CHANGE:
+ {
+ mpi3mr_devstatuschg_evt_bh(mrioc, fwevt);
+ break;
+ }
+ case MPI3_EVENT_ENCL_DEVICE_ADDED:
+ case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
+ {
+ mpi3mr_encldev_add_chg_evt_bh(mrioc, fwevt);
+ break;
+ }
+
+ case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
+ {
+ mpi3mr_sastopochg_evt_bh(mrioc, fwevt);
+ break;
+ }
+ case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
+ {
+ mpi3mr_pcietopochg_evt_bh(mrioc, fwevt);
+ break;
+ }
+ case MPI3_EVENT_LOG_DATA:
+ {
+ mpi3mr_logdata_evt_bh(mrioc, fwevt);
+ break;
+ }
+ case MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION:
+ {
+ struct mpi3mr_throttle_group_info *tg;
+
+ tg = *(struct mpi3mr_throttle_group_info **)fwevt->event_data;
+ dprint_event_bh(mrioc,
+ "qd reduction event processed for tg_id(%d) reduction_needed(%d)\n",
+ tg->id, tg->need_qd_reduction);
+ if (tg->need_qd_reduction) {
+ mpi3mr_set_qd_for_all_vd_in_tg(mrioc, tg);
+ tg->need_qd_reduction = 0;
+ }
+ break;
+ }
+ case MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH:
+ {
+ while (mrioc->device_refresh_on)
+ msleep(500);
+
+ dprint_event_bh(mrioc,
+ "scan for non responding and newly added devices after soft reset started\n");
+ if (mrioc->sas_transport_enabled) {
+ mpi3mr_refresh_sas_ports(mrioc);
+ mpi3mr_refresh_expanders(mrioc);
+ }
+ mpi3mr_rfresh_tgtdevs(mrioc);
+ ioc_info(mrioc,
+ "scan for non responding and newly added devices after soft reset completed\n");
+ break;
+ }
+ default:
+ break;
+ }
+
+evt_ack:
+ if (fwevt->send_ack)
+ mpi3mr_process_event_ack(mrioc, fwevt->event_id,
+ fwevt->evt_ctx);
+out:
+ /* Put fwevt reference count to neutralize kref_init increment */
+ mpi3mr_fwevt_put(fwevt);
+ mrioc->current_event = NULL;
+}
+
+/**
+ * mpi3mr_fwevt_worker - Firmware event worker
+ * @work: Work struct containing firmware event
+ *
+ * Extracts the firmware event and calls mpi3mr_fwevt_bh.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_fwevt_worker(struct work_struct *work)
+{
+ struct mpi3mr_fwevt *fwevt = container_of(work, struct mpi3mr_fwevt,
+ work);
+ mpi3mr_fwevt_bh(fwevt->mrioc, fwevt);
+ /*
+ * Put fwevt reference count after
+ * dequeuing it from worker queue
+ */
+ mpi3mr_fwevt_put(fwevt);
+}
+
+/**
+ * mpi3mr_create_tgtdev - Create and add a target device
+ * @mrioc: Adapter instance reference
+ * @dev_pg0: Device Page 0 data
+ *
+ * If the device specified by the device page 0 data is not
+ * present in the driver's internal list, allocate the memory
+ * for the device, populate the data and add to the list, else
+ * update the device data. The key is persistent ID.
+ *
+ * Return: 0 on success, -ENOMEM on memory allocation failure
+ */
+static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc,
+ struct mpi3_device_page0 *dev_pg0)
+{
+ int retval = 0;
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ u16 perst_id = 0;
+
+ perst_id = le16_to_cpu(dev_pg0->persistent_id);
+ if (perst_id == MPI3_DEVICE0_PERSISTENTID_INVALID)
+ return retval;
+
+ tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id);
+ if (tgtdev) {
+ mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true);
+ mpi3mr_tgtdev_put(tgtdev);
+ } else {
+ tgtdev = mpi3mr_alloc_tgtdev();
+ if (!tgtdev)
+ return -ENOMEM;
+ mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true);
+ mpi3mr_tgtdev_add_to_list(mrioc, tgtdev);
+ }
+
+ return retval;
+}
+
+/**
+ * mpi3mr_flush_delayed_cmd_lists - Flush pending commands
+ * @mrioc: Adapter instance reference
+ *
+ * Flush pending commands in the delayed lists due to a
+ * controller reset or driver removal as a cleanup.
+ *
+ * Return: Nothing
+ */
+void mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc *mrioc)
+{
+ struct delayed_dev_rmhs_node *_rmhs_node;
+ struct delayed_evt_ack_node *_evtack_node;
+
+ dprint_reset(mrioc, "flushing delayed dev_remove_hs commands\n");
+ while (!list_empty(&mrioc->delayed_rmhs_list)) {
+ _rmhs_node = list_entry(mrioc->delayed_rmhs_list.next,
+ struct delayed_dev_rmhs_node, list);
+ list_del(&_rmhs_node->list);
+ kfree(_rmhs_node);
+ }
+ dprint_reset(mrioc, "flushing delayed event ack commands\n");
+ while (!list_empty(&mrioc->delayed_evtack_cmds_list)) {
+ _evtack_node = list_entry(mrioc->delayed_evtack_cmds_list.next,
+ struct delayed_evt_ack_node, list);
+ list_del(&_evtack_node->list);
+ kfree(_evtack_node);
+ }
+}
+
+/**
+ * mpi3mr_dev_rmhs_complete_iou - Device removal IOUC completion
+ * @mrioc: Adapter instance reference
+ * @drv_cmd: Internal command tracker
+ *
+ * Issues a target reset TM to the firmware from the device
+ * removal TM pend list or retry the removal handshake sequence
+ * based on the IOU control request IOC status.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd)
+{
+ u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
+ struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
+
+ if (drv_cmd->state & MPI3MR_CMD_RESET)
+ goto clear_drv_cmd;
+
+ ioc_info(mrioc,
+ "%s :dev_rmhs_iouctrl_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x)\n",
+ __func__, drv_cmd->dev_handle, drv_cmd->ioc_status,
+ drv_cmd->ioc_loginfo);
+ if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ if (drv_cmd->retry_count < MPI3MR_DEV_RMHS_RETRY_COUNT) {
+ drv_cmd->retry_count++;
+ ioc_info(mrioc,
+ "%s :dev_rmhs_iouctrl_complete: handle(0x%04x)retrying handshake retry=%d\n",
+ __func__, drv_cmd->dev_handle,
+ drv_cmd->retry_count);
+ mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle,
+ drv_cmd, drv_cmd->iou_rc);
+ return;
+ }
+ ioc_err(mrioc,
+ "%s :dev removal handshake failed after all retries: handle(0x%04x)\n",
+ __func__, drv_cmd->dev_handle);
+ } else {
+ ioc_info(mrioc,
+ "%s :dev removal handshake completed successfully: handle(0x%04x)\n",
+ __func__, drv_cmd->dev_handle);
+ clear_bit(drv_cmd->dev_handle, mrioc->removepend_bitmap);
+ }
+
+ if (!list_empty(&mrioc->delayed_rmhs_list)) {
+ delayed_dev_rmhs = list_entry(mrioc->delayed_rmhs_list.next,
+ struct delayed_dev_rmhs_node, list);
+ drv_cmd->dev_handle = delayed_dev_rmhs->handle;
+ drv_cmd->retry_count = 0;
+ drv_cmd->iou_rc = delayed_dev_rmhs->iou_rc;
+ ioc_info(mrioc,
+ "%s :dev_rmhs_iouctrl_complete: processing delayed TM: handle(0x%04x)\n",
+ __func__, drv_cmd->dev_handle);
+ mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, drv_cmd,
+ drv_cmd->iou_rc);
+ list_del(&delayed_dev_rmhs->list);
+ kfree(delayed_dev_rmhs);
+ return;
+ }
+
+clear_drv_cmd:
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ drv_cmd->callback = NULL;
+ drv_cmd->retry_count = 0;
+ drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
+ clear_bit(cmd_idx, mrioc->devrem_bitmap);
+}
+
+/**
+ * mpi3mr_dev_rmhs_complete_tm - Device removal TM completion
+ * @mrioc: Adapter instance reference
+ * @drv_cmd: Internal command tracker
+ *
+ * Issues a target reset TM to the firmware from the device
+ * removal TM pend list or issue IO unit control request as
+ * part of device removal or hidden acknowledgment handshake.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd)
+{
+ struct mpi3_iounit_control_request iou_ctrl;
+ u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
+ struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL;
+ int retval;
+
+ if (drv_cmd->state & MPI3MR_CMD_RESET)
+ goto clear_drv_cmd;
+
+ if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
+ tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply;
+
+ if (tm_reply)
+ pr_info(IOCNAME
+ "dev_rmhs_tr_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x), term_count(%d)\n",
+ mrioc->name, drv_cmd->dev_handle, drv_cmd->ioc_status,
+ drv_cmd->ioc_loginfo,
+ le32_to_cpu(tm_reply->termination_count));
+
+ pr_info(IOCNAME "Issuing IOU CTL: handle(0x%04x) dev_rmhs idx(%d)\n",
+ mrioc->name, drv_cmd->dev_handle, cmd_idx);
+
+ memset(&iou_ctrl, 0, sizeof(iou_ctrl));
+
+ drv_cmd->state = MPI3MR_CMD_PENDING;
+ drv_cmd->is_waiting = 0;
+ drv_cmd->callback = mpi3mr_dev_rmhs_complete_iou;
+ iou_ctrl.operation = drv_cmd->iou_rc;
+ iou_ctrl.param16[0] = cpu_to_le16(drv_cmd->dev_handle);
+ iou_ctrl.host_tag = cpu_to_le16(drv_cmd->host_tag);
+ iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL;
+
+ retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl, sizeof(iou_ctrl),
+ 1);
+ if (retval) {
+ pr_err(IOCNAME "Issue DevRmHsTMIOUCTL: Admin post failed\n",
+ mrioc->name);
+ goto clear_drv_cmd;
+ }
+
+ return;
+clear_drv_cmd:
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ drv_cmd->callback = NULL;
+ drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
+ drv_cmd->retry_count = 0;
+ clear_bit(cmd_idx, mrioc->devrem_bitmap);
+}
+
+/**
+ * mpi3mr_dev_rmhs_send_tm - Issue TM for device removal
+ * @mrioc: Adapter instance reference
+ * @handle: Device handle
+ * @cmdparam: Internal command tracker
+ * @iou_rc: IO unit reason code
+ *
+ * Issues a target reset TM to the firmware or add it to a pend
+ * list as part of device removal or hidden acknowledgment
+ * handshake.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle,
+ struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc)
+{
+ struct mpi3_scsi_task_mgmt_request tm_req;
+ int retval = 0;
+ u16 cmd_idx = MPI3MR_NUM_DEVRMCMD;
+ u8 retrycount = 5;
+ struct mpi3mr_drv_cmd *drv_cmd = cmdparam;
+ struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
+
+ if (drv_cmd)
+ goto issue_cmd;
+ do {
+ cmd_idx = find_first_zero_bit(mrioc->devrem_bitmap,
+ MPI3MR_NUM_DEVRMCMD);
+ if (cmd_idx < MPI3MR_NUM_DEVRMCMD) {
+ if (!test_and_set_bit(cmd_idx, mrioc->devrem_bitmap))
+ break;
+ cmd_idx = MPI3MR_NUM_DEVRMCMD;
+ }
+ } while (retrycount--);
+
+ if (cmd_idx >= MPI3MR_NUM_DEVRMCMD) {
+ delayed_dev_rmhs = kzalloc(sizeof(*delayed_dev_rmhs),
+ GFP_ATOMIC);
+ if (!delayed_dev_rmhs)
+ return;
+ INIT_LIST_HEAD(&delayed_dev_rmhs->list);
+ delayed_dev_rmhs->handle = handle;
+ delayed_dev_rmhs->iou_rc = iou_rc;
+ list_add_tail(&delayed_dev_rmhs->list,
+ &mrioc->delayed_rmhs_list);
+ ioc_info(mrioc, "%s :DevRmHs: tr:handle(0x%04x) is postponed\n",
+ __func__, handle);
+ return;
+ }
+ drv_cmd = &mrioc->dev_rmhs_cmds[cmd_idx];
+
+issue_cmd:
+ cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
+ ioc_info(mrioc,
+ "%s :Issuing TR TM: for devhandle 0x%04x with dev_rmhs %d\n",
+ __func__, handle, cmd_idx);
+
+ memset(&tm_req, 0, sizeof(tm_req));
+ if (drv_cmd->state & MPI3MR_CMD_PENDING) {
+ ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__);
+ goto out;
+ }
+ drv_cmd->state = MPI3MR_CMD_PENDING;
+ drv_cmd->is_waiting = 0;
+ drv_cmd->callback = mpi3mr_dev_rmhs_complete_tm;
+ drv_cmd->dev_handle = handle;
+ drv_cmd->iou_rc = iou_rc;
+ tm_req.dev_handle = cpu_to_le16(handle);
+ tm_req.task_type = MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
+ tm_req.host_tag = cpu_to_le16(drv_cmd->host_tag);
+ tm_req.task_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INVALID);
+ tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT;
+
+ set_bit(handle, mrioc->removepend_bitmap);
+ retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "%s :Issue DevRmHsTM: Admin Post failed\n",
+ __func__);
+ goto out_failed;
+ }
+out:
+ return;
+out_failed:
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ drv_cmd->callback = NULL;
+ drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
+ drv_cmd->retry_count = 0;
+ clear_bit(cmd_idx, mrioc->devrem_bitmap);
+}
+
+/**
+ * mpi3mr_complete_evt_ack - event ack request completion
+ * @mrioc: Adapter instance reference
+ * @drv_cmd: Internal command tracker
+ *
+ * This is the completion handler for non blocking event
+ * acknowledgment sent to the firmware and this will issue any
+ * pending event acknowledgment request.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_complete_evt_ack(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd)
+{
+ u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
+ struct delayed_evt_ack_node *delayed_evtack = NULL;
+
+ if (drv_cmd->state & MPI3MR_CMD_RESET)
+ goto clear_drv_cmd;
+
+ if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ dprint_event_th(mrioc,
+ "immediate event ack failed with ioc_status(0x%04x) log_info(0x%08x)\n",
+ (drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ drv_cmd->ioc_loginfo);
+ }
+
+ if (!list_empty(&mrioc->delayed_evtack_cmds_list)) {
+ delayed_evtack =
+ list_entry(mrioc->delayed_evtack_cmds_list.next,
+ struct delayed_evt_ack_node, list);
+ mpi3mr_send_event_ack(mrioc, delayed_evtack->event, drv_cmd,
+ delayed_evtack->event_ctx);
+ list_del(&delayed_evtack->list);
+ kfree(delayed_evtack);
+ return;
+ }
+clear_drv_cmd:
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ drv_cmd->callback = NULL;
+ clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap);
+}
+
+/**
+ * mpi3mr_send_event_ack - Issue event acknwoledgment request
+ * @mrioc: Adapter instance reference
+ * @event: MPI3 event id
+ * @cmdparam: Internal command tracker
+ * @event_ctx: event context
+ *
+ * Issues event acknowledgment request to the firmware if there
+ * is a free command to send the event ack else it to a pend
+ * list so that it will be processed on a completion of a prior
+ * event acknowledgment .
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
+ struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx)
+{
+ struct mpi3_event_ack_request evtack_req;
+ int retval = 0;
+ u8 retrycount = 5;
+ u16 cmd_idx = MPI3MR_NUM_EVTACKCMD;
+ struct mpi3mr_drv_cmd *drv_cmd = cmdparam;
+ struct delayed_evt_ack_node *delayed_evtack = NULL;
+
+ if (drv_cmd) {
+ dprint_event_th(mrioc,
+ "sending delayed event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n",
+ event, event_ctx);
+ goto issue_cmd;
+ }
+ dprint_event_th(mrioc,
+ "sending event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n",
+ event, event_ctx);
+ do {
+ cmd_idx = find_first_zero_bit(mrioc->evtack_cmds_bitmap,
+ MPI3MR_NUM_EVTACKCMD);
+ if (cmd_idx < MPI3MR_NUM_EVTACKCMD) {
+ if (!test_and_set_bit(cmd_idx,
+ mrioc->evtack_cmds_bitmap))
+ break;
+ cmd_idx = MPI3MR_NUM_EVTACKCMD;
+ }
+ } while (retrycount--);
+
+ if (cmd_idx >= MPI3MR_NUM_EVTACKCMD) {
+ delayed_evtack = kzalloc(sizeof(*delayed_evtack),
+ GFP_ATOMIC);
+ if (!delayed_evtack)
+ return;
+ INIT_LIST_HEAD(&delayed_evtack->list);
+ delayed_evtack->event = event;
+ delayed_evtack->event_ctx = event_ctx;
+ list_add_tail(&delayed_evtack->list,
+ &mrioc->delayed_evtack_cmds_list);
+ dprint_event_th(mrioc,
+ "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is postponed\n",
+ event, event_ctx);
+ return;
+ }
+ drv_cmd = &mrioc->evtack_cmds[cmd_idx];
+
+issue_cmd:
+ cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
+
+ memset(&evtack_req, 0, sizeof(evtack_req));
+ if (drv_cmd->state & MPI3MR_CMD_PENDING) {
+ dprint_event_th(mrioc,
+ "sending event ack failed due to command in use\n");
+ goto out;
+ }
+ drv_cmd->state = MPI3MR_CMD_PENDING;
+ drv_cmd->is_waiting = 0;
+ drv_cmd->callback = mpi3mr_complete_evt_ack;
+ evtack_req.host_tag = cpu_to_le16(drv_cmd->host_tag);
+ evtack_req.function = MPI3_FUNCTION_EVENT_ACK;
+ evtack_req.event = event;
+ evtack_req.event_context = cpu_to_le32(event_ctx);
+ retval = mpi3mr_admin_request_post(mrioc, &evtack_req,
+ sizeof(evtack_req), 1);
+ if (retval) {
+ dprint_event_th(mrioc,
+ "posting event ack request is failed\n");
+ goto out_failed;
+ }
+
+ dprint_event_th(mrioc,
+ "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is posted\n",
+ event, event_ctx);
+out:
+ return;
+out_failed:
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ drv_cmd->callback = NULL;
+ clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap);
+}
+
+/**
+ * mpi3mr_pcietopochg_evt_th - PCIETopologyChange evt tophalf
+ * @mrioc: Adapter instance reference
+ * @event_reply: event data
+ *
+ * Checks for the reason code and based on that either block I/O
+ * to device, or unblock I/O to the device, or start the device
+ * removal handshake with reason as remove with the firmware for
+ * PCIe devices.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_pcietopochg_evt_th(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply)
+{
+ struct mpi3_event_data_pcie_topology_change_list *topo_evt =
+ (struct mpi3_event_data_pcie_topology_change_list *)event_reply->event_data;
+ int i;
+ u16 handle;
+ u8 reason_code;
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
+
+ for (i = 0; i < topo_evt->num_entries; i++) {
+ handle = le16_to_cpu(topo_evt->port_entry[i].attached_dev_handle);
+ if (!handle)
+ continue;
+ reason_code = topo_evt->port_entry[i].port_status;
+ scsi_tgt_priv_data = NULL;
+ tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
+ if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata)
+ scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
+ tgtdev->starget->hostdata;
+ switch (reason_code) {
+ case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
+ if (scsi_tgt_priv_data) {
+ scsi_tgt_priv_data->dev_removed = 1;
+ scsi_tgt_priv_data->dev_removedelay = 0;
+ atomic_set(&scsi_tgt_priv_data->block_io, 0);
+ }
+ mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL,
+ MPI3_CTRL_OP_REMOVE_DEVICE);
+ break;
+ case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
+ if (scsi_tgt_priv_data) {
+ scsi_tgt_priv_data->dev_removedelay = 1;
+ atomic_inc(&scsi_tgt_priv_data->block_io);
+ }
+ break;
+ case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING:
+ if (scsi_tgt_priv_data &&
+ scsi_tgt_priv_data->dev_removedelay) {
+ scsi_tgt_priv_data->dev_removedelay = 0;
+ atomic_dec_if_positive
+ (&scsi_tgt_priv_data->block_io);
+ }
+ break;
+ case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
+ default:
+ break;
+ }
+ if (tgtdev)
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+}
+
+/**
+ * mpi3mr_sastopochg_evt_th - SASTopologyChange evt tophalf
+ * @mrioc: Adapter instance reference
+ * @event_reply: event data
+ *
+ * Checks for the reason code and based on that either block I/O
+ * to device, or unblock I/O to the device, or start the device
+ * removal handshake with reason as remove with the firmware for
+ * SAS/SATA devices.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_sastopochg_evt_th(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply)
+{
+ struct mpi3_event_data_sas_topology_change_list *topo_evt =
+ (struct mpi3_event_data_sas_topology_change_list *)event_reply->event_data;
+ int i;
+ u16 handle;
+ u8 reason_code;
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
+
+ for (i = 0; i < topo_evt->num_entries; i++) {
+ handle = le16_to_cpu(topo_evt->phy_entry[i].attached_dev_handle);
+ if (!handle)
+ continue;
+ reason_code = topo_evt->phy_entry[i].status &
+ MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
+ scsi_tgt_priv_data = NULL;
+ tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
+ if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata)
+ scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
+ tgtdev->starget->hostdata;
+ switch (reason_code) {
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
+ if (scsi_tgt_priv_data) {
+ scsi_tgt_priv_data->dev_removed = 1;
+ scsi_tgt_priv_data->dev_removedelay = 0;
+ atomic_set(&scsi_tgt_priv_data->block_io, 0);
+ }
+ mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL,
+ MPI3_CTRL_OP_REMOVE_DEVICE);
+ break;
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING:
+ if (scsi_tgt_priv_data) {
+ scsi_tgt_priv_data->dev_removedelay = 1;
+ atomic_inc(&scsi_tgt_priv_data->block_io);
+ }
+ break;
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
+ if (scsi_tgt_priv_data &&
+ scsi_tgt_priv_data->dev_removedelay) {
+ scsi_tgt_priv_data->dev_removedelay = 0;
+ atomic_dec_if_positive
+ (&scsi_tgt_priv_data->block_io);
+ }
+ break;
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
+ default:
+ break;
+ }
+ if (tgtdev)
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+}
+
+/**
+ * mpi3mr_devstatuschg_evt_th - DeviceStatusChange evt tophalf
+ * @mrioc: Adapter instance reference
+ * @event_reply: event data
+ *
+ * Checks for the reason code and based on that either block I/O
+ * to device, or unblock I/O to the device, or start the device
+ * removal handshake with reason as remove/hide acknowledgment
+ * with the firmware.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply)
+{
+ u16 dev_handle = 0;
+ u8 ublock = 0, block = 0, hide = 0, delete = 0, remove = 0;
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
+ struct mpi3_event_data_device_status_change *evtdata =
+ (struct mpi3_event_data_device_status_change *)event_reply->event_data;
+
+ if (mrioc->stop_drv_processing)
+ goto out;
+
+ dev_handle = le16_to_cpu(evtdata->dev_handle);
+
+ switch (evtdata->reason_code) {
+ case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT:
+ case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT:
+ block = 1;
+ break;
+ case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
+ delete = 1;
+ hide = 1;
+ break;
+ case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING:
+ delete = 1;
+ remove = 1;
+ break;
+ case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP:
+ case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP:
+ ublock = 1;
+ break;
+ default:
+ break;
+ }
+
+ tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
+ if (!tgtdev)
+ goto out;
+ if (hide)
+ tgtdev->is_hidden = hide;
+ if (tgtdev->starget && tgtdev->starget->hostdata) {
+ scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
+ tgtdev->starget->hostdata;
+ if (block)
+ atomic_inc(&scsi_tgt_priv_data->block_io);
+ if (delete)
+ scsi_tgt_priv_data->dev_removed = 1;
+ if (ublock)
+ atomic_dec_if_positive(&scsi_tgt_priv_data->block_io);
+ }
+ if (remove)
+ mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL,
+ MPI3_CTRL_OP_REMOVE_DEVICE);
+ if (hide)
+ mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL,
+ MPI3_CTRL_OP_HIDDEN_ACK);
+
+out:
+ if (tgtdev)
+ mpi3mr_tgtdev_put(tgtdev);
+}
+
+/**
+ * mpi3mr_preparereset_evt_th - Prepare for reset event tophalf
+ * @mrioc: Adapter instance reference
+ * @event_reply: event data
+ *
+ * Blocks and unblocks host level I/O based on the reason code
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_preparereset_evt_th(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply)
+{
+ struct mpi3_event_data_prepare_for_reset *evtdata =
+ (struct mpi3_event_data_prepare_for_reset *)event_reply->event_data;
+
+ if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_START) {
+ dprint_event_th(mrioc,
+ "prepare for reset event top half with rc=start\n");
+ if (mrioc->prepare_for_reset)
+ return;
+ mrioc->prepare_for_reset = 1;
+ mrioc->prepare_for_reset_timeout_counter = 0;
+ } else if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_ABORT) {
+ dprint_event_th(mrioc,
+ "prepare for reset top half with rc=abort\n");
+ mrioc->prepare_for_reset = 0;
+ mrioc->prepare_for_reset_timeout_counter = 0;
+ }
+ if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
+ == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
+ mpi3mr_send_event_ack(mrioc, event_reply->event, NULL,
+ le32_to_cpu(event_reply->event_context));
+}
+
+/**
+ * mpi3mr_energypackchg_evt_th - Energy pack change evt tophalf
+ * @mrioc: Adapter instance reference
+ * @event_reply: event data
+ *
+ * Identifies the new shutdown timeout value and update.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_energypackchg_evt_th(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply)
+{
+ struct mpi3_event_data_energy_pack_change *evtdata =
+ (struct mpi3_event_data_energy_pack_change *)event_reply->event_data;
+ u16 shutdown_timeout = le16_to_cpu(evtdata->shutdown_timeout);
+
+ if (shutdown_timeout <= 0) {
+ ioc_warn(mrioc,
+ "%s :Invalid Shutdown Timeout received = %d\n",
+ __func__, shutdown_timeout);
+ return;
+ }
+
+ ioc_info(mrioc,
+ "%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n",
+ __func__, mrioc->facts.shutdown_timeout, shutdown_timeout);
+ mrioc->facts.shutdown_timeout = shutdown_timeout;
+}
+
+/**
+ * mpi3mr_cablemgmt_evt_th - Cable management event tophalf
+ * @mrioc: Adapter instance reference
+ * @event_reply: event data
+ *
+ * Displays Cable manegemt event details.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_cablemgmt_evt_th(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply)
+{
+ struct mpi3_event_data_cable_management *evtdata =
+ (struct mpi3_event_data_cable_management *)event_reply->event_data;
+
+ switch (evtdata->status) {
+ case MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER:
+ {
+ ioc_info(mrioc, "An active cable with receptacle_id %d cannot be powered.\n"
+ "Devices connected to this cable are not detected.\n"
+ "This cable requires %d mW of power.\n",
+ evtdata->receptacle_id,
+ le32_to_cpu(evtdata->active_cable_power_requirement));
+ break;
+ }
+ case MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED:
+ {
+ ioc_info(mrioc, "A cable with receptacle_id %d is not running at optimal speed\n",
+ evtdata->receptacle_id);
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+/**
+ * mpi3mr_add_event_wait_for_device_refresh - Add Wait for Device Refresh Event
+ * @mrioc: Adapter instance reference
+ *
+ * Add driver specific event to make sure that the driver won't process the
+ * events until all the devices are refreshed during soft reset.
+ *
+ * Return: Nothing
+ */
+void mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3mr_fwevt *fwevt = NULL;
+
+ fwevt = mpi3mr_alloc_fwevt(0);
+ if (!fwevt) {
+ dprint_event_th(mrioc,
+ "failed to schedule bottom half handler for event(0x%02x)\n",
+ MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH);
+ return;
+ }
+ fwevt->mrioc = mrioc;
+ fwevt->event_id = MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH;
+ fwevt->send_ack = 0;
+ fwevt->process_evt = 1;
+ fwevt->evt_ctx = 0;
+ fwevt->event_data_size = 0;
+ mpi3mr_fwevt_add_to_list(mrioc, fwevt);
+}
+
+/**
+ * mpi3mr_os_handle_events - Firmware event handler
+ * @mrioc: Adapter instance reference
+ * @event_reply: event data
+ *
+ * Identify whteher the event has to handled and acknowledged
+ * and either process the event in the tophalf and/or schedule a
+ * bottom half through mpi3mr_fwevt_worker.
+ *
+ * Return: Nothing
+ */
+void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply)
+{
+ u16 evt_type, sz;
+ struct mpi3mr_fwevt *fwevt = NULL;
+ bool ack_req = 0, process_evt_bh = 0;
+
+ if (mrioc->stop_drv_processing)
+ return;
+
+ if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
+ == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
+ ack_req = 1;
+
+ evt_type = event_reply->event;
+
+ switch (evt_type) {
+ case MPI3_EVENT_DEVICE_ADDED:
+ {
+ struct mpi3_device_page0 *dev_pg0 =
+ (struct mpi3_device_page0 *)event_reply->event_data;
+ if (mpi3mr_create_tgtdev(mrioc, dev_pg0))
+ ioc_err(mrioc,
+ "%s :Failed to add device in the device add event\n",
+ __func__);
+ else
+ process_evt_bh = 1;
+ break;
+ }
+ case MPI3_EVENT_DEVICE_STATUS_CHANGE:
+ {
+ process_evt_bh = 1;
+ mpi3mr_devstatuschg_evt_th(mrioc, event_reply);
+ break;
+ }
+ case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
+ {
+ process_evt_bh = 1;
+ mpi3mr_sastopochg_evt_th(mrioc, event_reply);
+ break;
+ }
+ case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
+ {
+ process_evt_bh = 1;
+ mpi3mr_pcietopochg_evt_th(mrioc, event_reply);
+ break;
+ }
+ case MPI3_EVENT_PREPARE_FOR_RESET:
+ {
+ mpi3mr_preparereset_evt_th(mrioc, event_reply);
+ ack_req = 0;
+ break;
+ }
+ case MPI3_EVENT_DEVICE_INFO_CHANGED:
+ case MPI3_EVENT_LOG_DATA:
+ case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
+ case MPI3_EVENT_ENCL_DEVICE_ADDED:
+ {
+ process_evt_bh = 1;
+ break;
+ }
+ case MPI3_EVENT_ENERGY_PACK_CHANGE:
+ {
+ mpi3mr_energypackchg_evt_th(mrioc, event_reply);
+ break;
+ }
+ case MPI3_EVENT_CABLE_MGMT:
+ {
+ mpi3mr_cablemgmt_evt_th(mrioc, event_reply);
+ break;
+ }
+ case MPI3_EVENT_SAS_DISCOVERY:
+ case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
+ case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
+ case MPI3_EVENT_PCIE_ENUMERATION:
+ break;
+ default:
+ ioc_info(mrioc, "%s :event 0x%02x is not handled\n",
+ __func__, evt_type);
+ break;
+ }
+ if (process_evt_bh || ack_req) {
+ sz = event_reply->event_data_length * 4;
+ fwevt = mpi3mr_alloc_fwevt(sz);
+ if (!fwevt) {
+ ioc_info(mrioc, "%s :failure at %s:%d/%s()!\n",
+ __func__, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ memcpy(fwevt->event_data, event_reply->event_data, sz);
+ fwevt->mrioc = mrioc;
+ fwevt->event_id = evt_type;
+ fwevt->send_ack = ack_req;
+ fwevt->process_evt = process_evt_bh;
+ fwevt->evt_ctx = le32_to_cpu(event_reply->event_context);
+ mpi3mr_fwevt_add_to_list(mrioc, fwevt);
+ }
+}
+
+/**
+ * mpi3mr_setup_eedp - Setup EEDP information in MPI3 SCSI IO
+ * @mrioc: Adapter instance reference
+ * @scmd: SCSI command reference
+ * @scsiio_req: MPI3 SCSI IO request
+ *
+ * Identifies the protection information flags from the SCSI
+ * command and set appropriate flags in the MPI3 SCSI IO
+ * request.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_setup_eedp(struct mpi3mr_ioc *mrioc,
+ struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req)
+{
+ u16 eedp_flags = 0;
+ unsigned char prot_op = scsi_get_prot_op(scmd);
+
+ switch (prot_op) {
+ case SCSI_PROT_NORMAL:
+ return;
+ case SCSI_PROT_READ_STRIP:
+ eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE;
+ break;
+ case SCSI_PROT_WRITE_INSERT:
+ eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT;
+ break;
+ case SCSI_PROT_READ_INSERT:
+ eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT;
+ scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
+ break;
+ case SCSI_PROT_WRITE_STRIP:
+ eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE;
+ scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
+ break;
+ case SCSI_PROT_READ_PASS:
+ eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK;
+ scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
+ break;
+ case SCSI_PROT_WRITE_PASS:
+ if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) {
+ eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN;
+ scsiio_req->sgl[0].eedp.application_tag_translation_mask =
+ 0xffff;
+ } else
+ eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK;
+
+ scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
+ break;
+ default:
+ return;
+ }
+
+ if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK)
+ eedp_flags |= MPI3_EEDPFLAGS_CHK_GUARD;
+
+ if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM)
+ eedp_flags |= MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM;
+
+ if (scmd->prot_flags & SCSI_PROT_REF_CHECK) {
+ eedp_flags |= MPI3_EEDPFLAGS_CHK_REF_TAG |
+ MPI3_EEDPFLAGS_INCR_PRI_REF_TAG;
+ scsiio_req->cdb.eedp32.primary_reference_tag =
+ cpu_to_be32(scsi_prot_ref_tag(scmd));
+ }
+
+ if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT)
+ eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG;
+
+ eedp_flags |= MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE;
+
+ switch (scsi_prot_interval(scmd)) {
+ case 512:
+ scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_512;
+ break;
+ case 520:
+ scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_520;
+ break;
+ case 4080:
+ scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4080;
+ break;
+ case 4088:
+ scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4088;
+ break;
+ case 4096:
+ scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4096;
+ break;
+ case 4104:
+ scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4104;
+ break;
+ case 4160:
+ scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4160;
+ break;
+ default:
+ break;
+ }
+
+ scsiio_req->sgl[0].eedp.eedp_flags = cpu_to_le16(eedp_flags);
+ scsiio_req->sgl[0].eedp.flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED;
+}
+
+/**
+ * mpi3mr_build_sense_buffer - Map sense information
+ * @desc: Sense type
+ * @buf: Sense buffer to populate
+ * @key: Sense key
+ * @asc: Additional sense code
+ * @ascq: Additional sense code qualifier
+ *
+ * Maps the given sense information into either descriptor or
+ * fixed format sense data.
+ *
+ * Return: Nothing
+ */
+static inline void mpi3mr_build_sense_buffer(int desc, u8 *buf, u8 key,
+ u8 asc, u8 ascq)
+{
+ if (desc) {
+ buf[0] = 0x72; /* descriptor, current */
+ buf[1] = key;
+ buf[2] = asc;
+ buf[3] = ascq;
+ buf[7] = 0;
+ } else {
+ buf[0] = 0x70; /* fixed, current */
+ buf[2] = key;
+ buf[7] = 0xa;
+ buf[12] = asc;
+ buf[13] = ascq;
+ }
+}
+
+/**
+ * mpi3mr_map_eedp_error - Map EEDP errors from IOC status
+ * @scmd: SCSI command reference
+ * @ioc_status: status of MPI3 request
+ *
+ * Maps the EEDP error status of the SCSI IO request to sense
+ * data.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_map_eedp_error(struct scsi_cmnd *scmd,
+ u16 ioc_status)
+{
+ u8 ascq = 0;
+
+ switch (ioc_status) {
+ case MPI3_IOCSTATUS_EEDP_GUARD_ERROR:
+ ascq = 0x01;
+ break;
+ case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR:
+ ascq = 0x02;
+ break;
+ case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR:
+ ascq = 0x03;
+ break;
+ default:
+ ascq = 0x00;
+ break;
+ }
+
+ mpi3mr_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x10, ascq);
+ scmd->result = (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
+}
+
+/**
+ * mpi3mr_process_op_reply_desc - reply descriptor handler
+ * @mrioc: Adapter instance reference
+ * @reply_desc: Operational reply descriptor
+ * @reply_dma: place holder for reply DMA address
+ * @qidx: Operational queue index
+ *
+ * Process the operational reply descriptor and identifies the
+ * descriptor type. Based on the descriptor map the MPI3 request
+ * status to a SCSI command status and calls scsi_done call
+ * back.
+ *
+ * Return: Nothing
+ */
+void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
+ struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma, u16 qidx)
+{
+ u16 reply_desc_type, host_tag = 0;
+ u16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
+ u32 ioc_loginfo = 0;
+ struct mpi3_status_reply_descriptor *status_desc = NULL;
+ struct mpi3_address_reply_descriptor *addr_desc = NULL;
+ struct mpi3_success_reply_descriptor *success_desc = NULL;
+ struct mpi3_scsi_io_reply *scsi_reply = NULL;
+ struct scsi_cmnd *scmd = NULL;
+ struct scmd_priv *priv = NULL;
+ u8 *sense_buf = NULL;
+ u8 scsi_state = 0, scsi_status = 0, sense_state = 0;
+ u32 xfer_count = 0, sense_count = 0, resp_data = 0;
+ u16 dev_handle = 0xFFFF;
+ struct scsi_sense_hdr sshdr;
+ struct mpi3mr_stgt_priv_data *stgt_priv_data = NULL;
+ struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL;
+ u32 ioc_pend_data_len = 0, tg_pend_data_len = 0, data_len_blks = 0;
+ struct mpi3mr_throttle_group_info *tg = NULL;
+ u8 throttle_enabled_dev = 0;
+
+ *reply_dma = 0;
+ reply_desc_type = le16_to_cpu(reply_desc->reply_flags) &
+ MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
+ switch (reply_desc_type) {
+ case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
+ status_desc = (struct mpi3_status_reply_descriptor *)reply_desc;
+ host_tag = le16_to_cpu(status_desc->host_tag);
+ ioc_status = le16_to_cpu(status_desc->ioc_status);
+ if (ioc_status &
+ MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
+ ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info);
+ ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
+ break;
+ case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
+ addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc;
+ *reply_dma = le64_to_cpu(addr_desc->reply_frame_address);
+ scsi_reply = mpi3mr_get_reply_virt_addr(mrioc,
+ *reply_dma);
+ if (!scsi_reply) {
+ panic("%s: scsi_reply is NULL, this shouldn't happen\n",
+ mrioc->name);
+ goto out;
+ }
+ host_tag = le16_to_cpu(scsi_reply->host_tag);
+ ioc_status = le16_to_cpu(scsi_reply->ioc_status);
+ scsi_status = scsi_reply->scsi_status;
+ scsi_state = scsi_reply->scsi_state;
+ dev_handle = le16_to_cpu(scsi_reply->dev_handle);
+ sense_state = (scsi_state & MPI3_SCSI_STATE_SENSE_MASK);
+ xfer_count = le32_to_cpu(scsi_reply->transfer_count);
+ sense_count = le32_to_cpu(scsi_reply->sense_count);
+ resp_data = le32_to_cpu(scsi_reply->response_data);
+ sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc,
+ le64_to_cpu(scsi_reply->sense_data_buffer_address));
+ if (ioc_status &
+ MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
+ ioc_loginfo = le32_to_cpu(scsi_reply->ioc_log_info);
+ ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
+ if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY)
+ panic("%s: Ran out of sense buffers\n", mrioc->name);
+ break;
+ case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
+ success_desc = (struct mpi3_success_reply_descriptor *)reply_desc;
+ host_tag = le16_to_cpu(success_desc->host_tag);
+ break;
+ default:
+ break;
+ }
+ scmd = mpi3mr_scmd_from_host_tag(mrioc, host_tag, qidx);
+ if (!scmd) {
+ panic("%s: Cannot Identify scmd for host_tag 0x%x\n",
+ mrioc->name, host_tag);
+ goto out;
+ }
+ priv = scsi_cmd_priv(scmd);
+
+ data_len_blks = scsi_bufflen(scmd) >> 9;
+ sdev_priv_data = scmd->device->hostdata;
+ if (sdev_priv_data) {
+ stgt_priv_data = sdev_priv_data->tgt_priv_data;
+ if (stgt_priv_data) {
+ tg = stgt_priv_data->throttle_group;
+ throttle_enabled_dev =
+ stgt_priv_data->io_throttle_enabled;
+ }
+ }
+ if (unlikely((data_len_blks >= mrioc->io_throttle_data_length) &&
+ throttle_enabled_dev)) {
+ ioc_pend_data_len = atomic_sub_return(data_len_blks,
+ &mrioc->pend_large_data_sz);
+ if (tg) {
+ tg_pend_data_len = atomic_sub_return(data_len_blks,
+ &tg->pend_large_data_sz);
+ if (tg->io_divert && ((ioc_pend_data_len <=
+ mrioc->io_throttle_low) &&
+ (tg_pend_data_len <= tg->low))) {
+ tg->io_divert = 0;
+ mpi3mr_set_io_divert_for_all_vd_in_tg(
+ mrioc, tg, 0);
+ }
+ } else {
+ if (ioc_pend_data_len <= mrioc->io_throttle_low)
+ stgt_priv_data->io_divert = 0;
+ }
+ } else if (unlikely((stgt_priv_data && stgt_priv_data->io_divert))) {
+ ioc_pend_data_len = atomic_read(&mrioc->pend_large_data_sz);
+ if (!tg) {
+ if (ioc_pend_data_len <= mrioc->io_throttle_low)
+ stgt_priv_data->io_divert = 0;
+
+ } else if (ioc_pend_data_len <= mrioc->io_throttle_low) {
+ tg_pend_data_len = atomic_read(&tg->pend_large_data_sz);
+ if (tg->io_divert && (tg_pend_data_len <= tg->low)) {
+ tg->io_divert = 0;
+ mpi3mr_set_io_divert_for_all_vd_in_tg(
+ mrioc, tg, 0);
+ }
+ }
+ }
+
+ if (success_desc) {
+ scmd->result = DID_OK << 16;
+ goto out_success;
+ }
+
+ scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_count);
+ if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN &&
+ xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY ||
+ scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT ||
+ scsi_status == MPI3_SCSI_STATUS_TASK_SET_FULL))
+ ioc_status = MPI3_IOCSTATUS_SUCCESS;
+
+ if ((sense_state == MPI3_SCSI_STATE_SENSE_VALID) && sense_count &&
+ sense_buf) {
+ u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, sense_count);
+
+ memcpy(scmd->sense_buffer, sense_buf, sz);
+ }
+
+ switch (ioc_status) {
+ case MPI3_IOCSTATUS_BUSY:
+ case MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES:
+ scmd->result = SAM_STAT_BUSY;
+ break;
+ case MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
+ scmd->result = DID_NO_CONNECT << 16;
+ break;
+ case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
+ scmd->result = DID_SOFT_ERROR << 16;
+ break;
+ case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED:
+ case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED:
+ scmd->result = DID_RESET << 16;
+ break;
+ case MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
+ if ((xfer_count == 0) || (scmd->underflow > xfer_count))
+ scmd->result = DID_SOFT_ERROR << 16;
+ else
+ scmd->result = (DID_OK << 16) | scsi_status;
+ break;
+ case MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN:
+ scmd->result = (DID_OK << 16) | scsi_status;
+ if (sense_state == MPI3_SCSI_STATE_SENSE_VALID)
+ break;
+ if (xfer_count < scmd->underflow) {
+ if (scsi_status == SAM_STAT_BUSY)
+ scmd->result = SAM_STAT_BUSY;
+ else
+ scmd->result = DID_SOFT_ERROR << 16;
+ } else if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) ||
+ (sense_state != MPI3_SCSI_STATE_SENSE_NOT_AVAILABLE))
+ scmd->result = DID_SOFT_ERROR << 16;
+ else if (scsi_state & MPI3_SCSI_STATE_TERMINATED)
+ scmd->result = DID_RESET << 16;
+ break;
+ case MPI3_IOCSTATUS_SCSI_DATA_OVERRUN:
+ scsi_set_resid(scmd, 0);
+ fallthrough;
+ case MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR:
+ case MPI3_IOCSTATUS_SUCCESS:
+ scmd->result = (DID_OK << 16) | scsi_status;
+ if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) ||
+ (sense_state == MPI3_SCSI_STATE_SENSE_FAILED) ||
+ (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY))
+ scmd->result = DID_SOFT_ERROR << 16;
+ else if (scsi_state & MPI3_SCSI_STATE_TERMINATED)
+ scmd->result = DID_RESET << 16;
+ break;
+ case MPI3_IOCSTATUS_EEDP_GUARD_ERROR:
+ case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR:
+ case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR:
+ mpi3mr_map_eedp_error(scmd, ioc_status);
+ break;
+ case MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR:
+ case MPI3_IOCSTATUS_INVALID_FUNCTION:
+ case MPI3_IOCSTATUS_INVALID_SGL:
+ case MPI3_IOCSTATUS_INTERNAL_ERROR:
+ case MPI3_IOCSTATUS_INVALID_FIELD:
+ case MPI3_IOCSTATUS_INVALID_STATE:
+ case MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR:
+ case MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
+ case MPI3_IOCSTATUS_INSUFFICIENT_POWER:
+ default:
+ scmd->result = DID_SOFT_ERROR << 16;
+ break;
+ }
+
+ if (scmd->result != (DID_OK << 16) && (scmd->cmnd[0] != ATA_12) &&
+ (scmd->cmnd[0] != ATA_16) &&
+ mrioc->logging_level & MPI3_DEBUG_SCSI_ERROR) {
+ ioc_info(mrioc, "%s :scmd->result 0x%x\n", __func__,
+ scmd->result);
+ scsi_print_command(scmd);
+ ioc_info(mrioc,
+ "%s :Command issued to handle 0x%02x returned with error 0x%04x loginfo 0x%08x, qid %d\n",
+ __func__, dev_handle, ioc_status, ioc_loginfo,
+ priv->req_q_idx + 1);
+ ioc_info(mrioc,
+ " host_tag %d scsi_state 0x%02x scsi_status 0x%02x, xfer_cnt %d resp_data 0x%x\n",
+ host_tag, scsi_state, scsi_status, xfer_count, resp_data);
+ if (sense_buf) {
+ scsi_normalize_sense(sense_buf, sense_count, &sshdr);
+ ioc_info(mrioc,
+ "%s :sense_count 0x%x, sense_key 0x%x ASC 0x%x, ASCQ 0x%x\n",
+ __func__, sense_count, sshdr.sense_key,
+ sshdr.asc, sshdr.ascq);
+ }
+ }
+out_success:
+ if (priv->meta_sg_valid) {
+ dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd),
+ scsi_prot_sg_count(scmd), scmd->sc_data_direction);
+ }
+ mpi3mr_clear_scmd_priv(mrioc, scmd);
+ scsi_dma_unmap(scmd);
+ scsi_done(scmd);
+out:
+ if (sense_buf)
+ mpi3mr_repost_sense_buf(mrioc,
+ le64_to_cpu(scsi_reply->sense_data_buffer_address));
+}
+
+/**
+ * mpi3mr_get_chain_idx - get free chain buffer index
+ * @mrioc: Adapter instance reference
+ *
+ * Try to get a free chain buffer index from the free pool.
+ *
+ * Return: -1 on failure or the free chain buffer index
+ */
+static int mpi3mr_get_chain_idx(struct mpi3mr_ioc *mrioc)
+{
+ u8 retry_count = 5;
+ int cmd_idx = -1;
+
+ do {
+ spin_lock(&mrioc->chain_buf_lock);
+ cmd_idx = find_first_zero_bit(mrioc->chain_bitmap,
+ mrioc->chain_buf_count);
+ if (cmd_idx < mrioc->chain_buf_count) {
+ set_bit(cmd_idx, mrioc->chain_bitmap);
+ spin_unlock(&mrioc->chain_buf_lock);
+ break;
+ }
+ spin_unlock(&mrioc->chain_buf_lock);
+ cmd_idx = -1;
+ } while (retry_count--);
+ return cmd_idx;
+}
+
+/**
+ * mpi3mr_prepare_sg_scmd - build scatter gather list
+ * @mrioc: Adapter instance reference
+ * @scmd: SCSI command reference
+ * @scsiio_req: MPI3 SCSI IO request
+ *
+ * This function maps SCSI command's data and protection SGEs to
+ * MPI request SGEs. If required additional 4K chain buffer is
+ * used to send the SGEs.
+ *
+ * Return: 0 on success, -ENOMEM on dma_map_sg failure
+ */
+static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc,
+ struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req)
+{
+ dma_addr_t chain_dma;
+ struct scatterlist *sg_scmd;
+ void *sg_local, *chain;
+ u32 chain_length;
+ int sges_left, chain_idx;
+ u32 sges_in_segment;
+ u8 simple_sgl_flags;
+ u8 simple_sgl_flags_last;
+ u8 last_chain_sgl_flags;
+ struct chain_element *chain_req;
+ struct scmd_priv *priv = NULL;
+ u32 meta_sg = le32_to_cpu(scsiio_req->flags) &
+ MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI;
+
+ priv = scsi_cmd_priv(scmd);
+
+ simple_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
+ MPI3_SGE_FLAGS_DLAS_SYSTEM;
+ simple_sgl_flags_last = simple_sgl_flags |
+ MPI3_SGE_FLAGS_END_OF_LIST;
+ last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN |
+ MPI3_SGE_FLAGS_DLAS_SYSTEM;
+
+ if (meta_sg)
+ sg_local = &scsiio_req->sgl[MPI3_SCSIIO_METASGL_INDEX];
+ else
+ sg_local = &scsiio_req->sgl;
+
+ if (!scsiio_req->data_length && !meta_sg) {
+ mpi3mr_build_zero_len_sge(sg_local);
+ return 0;
+ }
+
+ if (meta_sg) {
+ sg_scmd = scsi_prot_sglist(scmd);
+ sges_left = dma_map_sg(&mrioc->pdev->dev,
+ scsi_prot_sglist(scmd),
+ scsi_prot_sg_count(scmd),
+ scmd->sc_data_direction);
+ priv->meta_sg_valid = 1; /* To unmap meta sg DMA */
+ } else {
+ sg_scmd = scsi_sglist(scmd);
+ sges_left = scsi_dma_map(scmd);
+ }
+
+ if (sges_left < 0) {
+ sdev_printk(KERN_ERR, scmd->device,
+ "scsi_dma_map failed: request for %d bytes!\n",
+ scsi_bufflen(scmd));
+ return -ENOMEM;
+ }
+ if (sges_left > MPI3MR_SG_DEPTH) {
+ sdev_printk(KERN_ERR, scmd->device,
+ "scsi_dma_map returned unsupported sge count %d!\n",
+ sges_left);
+ return -ENOMEM;
+ }
+
+ sges_in_segment = (mrioc->facts.op_req_sz -
+ offsetof(struct mpi3_scsi_io_request, sgl)) / sizeof(struct mpi3_sge_common);
+
+ if (scsiio_req->sgl[0].eedp.flags ==
+ MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED && !meta_sg) {
+ sg_local += sizeof(struct mpi3_sge_common);
+ sges_in_segment--;
+ /* Reserve 1st segment (scsiio_req->sgl[0]) for eedp */
+ }
+
+ if (scsiio_req->msg_flags ==
+ MPI3_SCSIIO_MSGFLAGS_METASGL_VALID && !meta_sg) {
+ sges_in_segment--;
+ /* Reserve last segment (scsiio_req->sgl[3]) for meta sg */
+ }
+
+ if (meta_sg)
+ sges_in_segment = 1;
+
+ if (sges_left <= sges_in_segment)
+ goto fill_in_last_segment;
+
+ /* fill in main message segment when there is a chain following */
+ while (sges_in_segment > 1) {
+ mpi3mr_add_sg_single(sg_local, simple_sgl_flags,
+ sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
+ sg_scmd = sg_next(sg_scmd);
+ sg_local += sizeof(struct mpi3_sge_common);
+ sges_left--;
+ sges_in_segment--;
+ }
+
+ chain_idx = mpi3mr_get_chain_idx(mrioc);
+ if (chain_idx < 0)
+ return -1;
+ chain_req = &mrioc->chain_sgl_list[chain_idx];
+ if (meta_sg)
+ priv->meta_chain_idx = chain_idx;
+ else
+ priv->chain_idx = chain_idx;
+
+ chain = chain_req->addr;
+ chain_dma = chain_req->dma_addr;
+ sges_in_segment = sges_left;
+ chain_length = sges_in_segment * sizeof(struct mpi3_sge_common);
+
+ mpi3mr_add_sg_single(sg_local, last_chain_sgl_flags,
+ chain_length, chain_dma);
+
+ sg_local = chain;
+
+fill_in_last_segment:
+ while (sges_left > 0) {
+ if (sges_left == 1)
+ mpi3mr_add_sg_single(sg_local,
+ simple_sgl_flags_last, sg_dma_len(sg_scmd),
+ sg_dma_address(sg_scmd));
+ else
+ mpi3mr_add_sg_single(sg_local, simple_sgl_flags,
+ sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
+ sg_scmd = sg_next(sg_scmd);
+ sg_local += sizeof(struct mpi3_sge_common);
+ sges_left--;
+ }
+
+ return 0;
+}
+
+/**
+ * mpi3mr_build_sg_scmd - build scatter gather list for SCSI IO
+ * @mrioc: Adapter instance reference
+ * @scmd: SCSI command reference
+ * @scsiio_req: MPI3 SCSI IO request
+ *
+ * This function calls mpi3mr_prepare_sg_scmd for constructing
+ * both data SGEs and protection information SGEs in the MPI
+ * format from the SCSI Command as appropriate .
+ *
+ * Return: return value of mpi3mr_prepare_sg_scmd.
+ */
+static int mpi3mr_build_sg_scmd(struct mpi3mr_ioc *mrioc,
+ struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req)
+{
+ int ret;
+
+ ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req);
+ if (ret)
+ return ret;
+
+ if (scsiio_req->msg_flags == MPI3_SCSIIO_MSGFLAGS_METASGL_VALID) {
+ /* There is a valid meta sg */
+ scsiio_req->flags |=
+ cpu_to_le32(MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI);
+ ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req);
+ }
+
+ return ret;
+}
+
+/**
+ * mpi3mr_tm_response_name - get TM response as a string
+ * @resp_code: TM response code
+ *
+ * Convert known task management response code as a readable
+ * string.
+ *
+ * Return: response code string.
+ */
+static const char *mpi3mr_tm_response_name(u8 resp_code)
+{
+ char *desc;
+
+ switch (resp_code) {
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE:
+ desc = "task management request completed";
+ break;
+ case MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME:
+ desc = "invalid frame";
+ break;
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED:
+ desc = "task management request not supported";
+ break;
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED:
+ desc = "task management request failed";
+ break;
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED:
+ desc = "task management request succeeded";
+ break;
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN:
+ desc = "invalid LUN";
+ break;
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG:
+ desc = "overlapped tag attempted";
+ break;
+ case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC:
+ desc = "task queued, however not sent to target";
+ break;
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED:
+ desc = "task management request denied by NVMe device";
+ break;
+ default:
+ desc = "unknown";
+ break;
+ }
+
+ return desc;
+}
+
+inline void mpi3mr_poll_pend_io_completions(struct mpi3mr_ioc *mrioc)
+{
+ int i;
+ int num_of_reply_queues =
+ mrioc->num_op_reply_q + mrioc->op_reply_q_offset;
+
+ for (i = mrioc->op_reply_q_offset; i < num_of_reply_queues; i++)
+ mpi3mr_process_op_reply_q(mrioc,
+ mrioc->intr_info[i].op_reply_q);
+}
+
+/**
+ * mpi3mr_issue_tm - Issue Task Management request
+ * @mrioc: Adapter instance reference
+ * @tm_type: Task Management type
+ * @handle: Device handle
+ * @lun: lun ID
+ * @htag: Host tag of the TM request
+ * @timeout: TM timeout value
+ * @drv_cmd: Internal command tracker
+ * @resp_code: Response code place holder
+ * @scmd: SCSI command
+ *
+ * Issues a Task Management Request to the controller for a
+ * specified target, lun and command and wait for its completion
+ * and check TM response. Recover the TM if it timed out by
+ * issuing controller reset.
+ *
+ * Return: 0 on success, non-zero on errors
+ */
+int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
+ u16 handle, uint lun, u16 htag, ulong timeout,
+ struct mpi3mr_drv_cmd *drv_cmd,
+ u8 *resp_code, struct scsi_cmnd *scmd)
+{
+ struct mpi3_scsi_task_mgmt_request tm_req;
+ struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL;
+ int retval = 0;
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
+ struct scmd_priv *cmd_priv = NULL;
+ struct scsi_device *sdev = NULL;
+ struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL;
+
+ ioc_info(mrioc, "%s :Issue TM: TM type (0x%x) for devhandle 0x%04x\n",
+ __func__, tm_type, handle);
+ if (mrioc->unrecoverable) {
+ retval = -1;
+ ioc_err(mrioc, "%s :Issue TM: Unrecoverable controller\n",
+ __func__);
+ goto out;
+ }
+
+ memset(&tm_req, 0, sizeof(tm_req));
+ mutex_lock(&drv_cmd->mutex);
+ if (drv_cmd->state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__);
+ mutex_unlock(&drv_cmd->mutex);
+ goto out;
+ }
+ if (mrioc->reset_in_progress) {
+ retval = -1;
+ ioc_err(mrioc, "%s :Issue TM: Reset in progress\n", __func__);
+ mutex_unlock(&drv_cmd->mutex);
+ goto out;
+ }
+
+ drv_cmd->state = MPI3MR_CMD_PENDING;
+ drv_cmd->is_waiting = 1;
+ drv_cmd->callback = NULL;
+ tm_req.dev_handle = cpu_to_le16(handle);
+ tm_req.task_type = tm_type;
+ tm_req.host_tag = cpu_to_le16(htag);
+
+ int_to_scsilun(lun, (struct scsi_lun *)tm_req.lun);
+ tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT;
+
+ tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
+
+ if (scmd) {
+ sdev = scmd->device;
+ sdev_priv_data = sdev->hostdata;
+ scsi_tgt_priv_data = ((sdev_priv_data) ?
+ sdev_priv_data->tgt_priv_data : NULL);
+ } else {
+ if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata)
+ scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
+ tgtdev->starget->hostdata;
+ }
+
+ if (scsi_tgt_priv_data)
+ atomic_inc(&scsi_tgt_priv_data->block_io);
+
+ if (tgtdev && (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)) {
+ if (cmd_priv && tgtdev->dev_spec.pcie_inf.abort_to)
+ timeout = tgtdev->dev_spec.pcie_inf.abort_to;
+ else if (!cmd_priv && tgtdev->dev_spec.pcie_inf.reset_to)
+ timeout = tgtdev->dev_spec.pcie_inf.reset_to;
+ }
+
+ init_completion(&drv_cmd->done);
+ retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "%s :Issue TM: Admin Post failed\n", __func__);
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&drv_cmd->done, (timeout * HZ));
+
+ if (!(drv_cmd->state & MPI3MR_CMD_COMPLETE)) {
+ drv_cmd->is_waiting = 0;
+ retval = -1;
+ if (!(drv_cmd->state & MPI3MR_CMD_RESET)) {
+ dprint_tm(mrioc,
+ "task management request timed out after %ld seconds\n",
+ timeout);
+ if (mrioc->logging_level & MPI3_DEBUG_TM)
+ dprint_dump_req(&tm_req, sizeof(tm_req)/4);
+ mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_TM_TIMEOUT, 1);
+ }
+ goto out_unlock;
+ }
+
+ if (!(drv_cmd->state & MPI3MR_CMD_REPLY_VALID)) {
+ dprint_tm(mrioc, "invalid task management reply message\n");
+ retval = -1;
+ goto out_unlock;
+ }
+
+ tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply;
+
+ switch (drv_cmd->ioc_status) {
+ case MPI3_IOCSTATUS_SUCCESS:
+ *resp_code = le32_to_cpu(tm_reply->response_data) &
+ MPI3MR_RI_MASK_RESPCODE;
+ break;
+ case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
+ *resp_code = MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE;
+ break;
+ default:
+ dprint_tm(mrioc,
+ "task management request to handle(0x%04x) is failed with ioc_status(0x%04x) log_info(0x%08x)\n",
+ handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo);
+ retval = -1;
+ goto out_unlock;
+ }
+
+ switch (*resp_code) {
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED:
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE:
+ break;
+ case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC:
+ if (tm_type != MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
+ retval = -1;
+ break;
+ default:
+ retval = -1;
+ break;
+ }
+
+ dprint_tm(mrioc,
+ "task management request type(%d) completed for handle(0x%04x) with ioc_status(0x%04x), log_info(0x%08x), termination_count(%d), response:%s(0x%x)\n",
+ tm_type, handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo,
+ le32_to_cpu(tm_reply->termination_count),
+ mpi3mr_tm_response_name(*resp_code), *resp_code);
+
+ if (!retval) {
+ mpi3mr_ioc_disable_intr(mrioc);
+ mpi3mr_poll_pend_io_completions(mrioc);
+ mpi3mr_ioc_enable_intr(mrioc);
+ mpi3mr_poll_pend_io_completions(mrioc);
+ mpi3mr_process_admin_reply_q(mrioc);
+ }
+ switch (tm_type) {
+ case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
+ if (!scsi_tgt_priv_data)
+ break;
+ scsi_tgt_priv_data->pend_count = 0;
+ blk_mq_tagset_busy_iter(&mrioc->shost->tag_set,
+ mpi3mr_count_tgt_pending,
+ (void *)scsi_tgt_priv_data->starget);
+ break;
+ case MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
+ if (!sdev_priv_data)
+ break;
+ sdev_priv_data->pend_count = 0;
+ blk_mq_tagset_busy_iter(&mrioc->shost->tag_set,
+ mpi3mr_count_dev_pending, (void *)sdev);
+ break;
+ default:
+ break;
+ }
+
+out_unlock:
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&drv_cmd->mutex);
+ if (scsi_tgt_priv_data)
+ atomic_dec_if_positive(&scsi_tgt_priv_data->block_io);
+ if (tgtdev)
+ mpi3mr_tgtdev_put(tgtdev);
+out:
+ return retval;
+}
+
+/**
+ * mpi3mr_bios_param - BIOS param callback
+ * @sdev: SCSI device reference
+ * @bdev: Block device reference
+ * @capacity: Capacity in logical sectors
+ * @params: Parameter array
+ *
+ * Just the parameters with heads/secots/cylinders.
+ *
+ * Return: 0 always
+ */
+static int mpi3mr_bios_param(struct scsi_device *sdev,
+ struct block_device *bdev, sector_t capacity, int params[])
+{
+ int heads;
+ int sectors;
+ sector_t cylinders;
+ ulong dummy;
+
+ heads = 64;
+ sectors = 32;
+
+ dummy = heads * sectors;
+ cylinders = capacity;
+ sector_div(cylinders, dummy);
+
+ if ((ulong)capacity >= 0x200000) {
+ heads = 255;
+ sectors = 63;
+ dummy = heads * sectors;
+ cylinders = capacity;
+ sector_div(cylinders, dummy);
+ }
+
+ params[0] = heads;
+ params[1] = sectors;
+ params[2] = cylinders;
+ return 0;
+}
+
+/**
+ * mpi3mr_map_queues - Map queues callback handler
+ * @shost: SCSI host reference
+ *
+ * Maps default and poll queues.
+ *
+ * Return: return zero.
+ */
+static void mpi3mr_map_queues(struct Scsi_Host *shost)
+{
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+ int i, qoff, offset;
+ struct blk_mq_queue_map *map = NULL;
+
+ offset = mrioc->op_reply_q_offset;
+
+ for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
+ map = &shost->tag_set.map[i];
+
+ map->nr_queues = 0;
+
+ if (i == HCTX_TYPE_DEFAULT)
+ map->nr_queues = mrioc->default_qcount;
+ else if (i == HCTX_TYPE_POLL)
+ map->nr_queues = mrioc->active_poll_qcount;
+
+ if (!map->nr_queues) {
+ BUG_ON(i == HCTX_TYPE_DEFAULT);
+ continue;
+ }
+
+ /*
+ * The poll queue(s) doesn't have an IRQ (and hence IRQ
+ * affinity), so use the regular blk-mq cpu mapping
+ */
+ map->queue_offset = qoff;
+ if (i != HCTX_TYPE_POLL)
+ blk_mq_pci_map_queues(map, mrioc->pdev, offset);
+ else
+ blk_mq_map_queues(map);
+
+ qoff += map->nr_queues;
+ offset += map->nr_queues;
+ }
+}
+
+/**
+ * mpi3mr_get_fw_pending_ios - Calculate pending I/O count
+ * @mrioc: Adapter instance reference
+ *
+ * Calculate the pending I/Os for the controller and return.
+ *
+ * Return: Number of pending I/Os
+ */
+static inline int mpi3mr_get_fw_pending_ios(struct mpi3mr_ioc *mrioc)
+{
+ u16 i;
+ uint pend_ios = 0;
+
+ for (i = 0; i < mrioc->num_op_reply_q; i++)
+ pend_ios += atomic_read(&mrioc->op_reply_qinfo[i].pend_ios);
+ return pend_ios;
+}
+
+/**
+ * mpi3mr_print_pending_host_io - print pending I/Os
+ * @mrioc: Adapter instance reference
+ *
+ * Print number of pending I/Os and each I/O details prior to
+ * reset for debug purpose.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_print_pending_host_io(struct mpi3mr_ioc *mrioc)
+{
+ struct Scsi_Host *shost = mrioc->shost;
+
+ ioc_info(mrioc, "%s :Pending commands prior to reset: %d\n",
+ __func__, mpi3mr_get_fw_pending_ios(mrioc));
+ blk_mq_tagset_busy_iter(&shost->tag_set,
+ mpi3mr_print_scmd, (void *)mrioc);
+}
+
+/**
+ * mpi3mr_wait_for_host_io - block for I/Os to complete
+ * @mrioc: Adapter instance reference
+ * @timeout: time out in seconds
+ * Waits for pending I/Os for the given adapter to complete or
+ * to hit the timeout.
+ *
+ * Return: Nothing
+ */
+void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout)
+{
+ enum mpi3mr_iocstate iocstate;
+ int i = 0;
+
+ iocstate = mpi3mr_get_iocstate(mrioc);
+ if (iocstate != MRIOC_STATE_READY)
+ return;
+
+ if (!mpi3mr_get_fw_pending_ios(mrioc))
+ return;
+ ioc_info(mrioc,
+ "%s :Waiting for %d seconds prior to reset for %d I/O\n",
+ __func__, timeout, mpi3mr_get_fw_pending_ios(mrioc));
+
+ for (i = 0; i < timeout; i++) {
+ if (!mpi3mr_get_fw_pending_ios(mrioc))
+ break;
+ iocstate = mpi3mr_get_iocstate(mrioc);
+ if (iocstate != MRIOC_STATE_READY)
+ break;
+ msleep(1000);
+ }
+
+ ioc_info(mrioc, "%s :Pending I/Os after wait is: %d\n", __func__,
+ mpi3mr_get_fw_pending_ios(mrioc));
+}
+
+/**
+ * mpi3mr_eh_host_reset - Host reset error handling callback
+ * @scmd: SCSI command reference
+ *
+ * Issue controller reset if the scmd is for a Physical Device,
+ * if the scmd is for RAID volume, then wait for
+ * MPI3MR_RAID_ERRREC_RESET_TIMEOUT and checke whether any
+ * pending I/Os prior to issuing reset to the controller.
+ *
+ * Return: SUCCESS of successful reset else FAILED
+ */
+static int mpi3mr_eh_host_reset(struct scsi_cmnd *scmd)
+{
+ struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
+ struct mpi3mr_stgt_priv_data *stgt_priv_data;
+ struct mpi3mr_sdev_priv_data *sdev_priv_data;
+ u8 dev_type = MPI3_DEVICE_DEVFORM_VD;
+ int retval = FAILED, ret;
+
+ sdev_priv_data = scmd->device->hostdata;
+ if (sdev_priv_data && sdev_priv_data->tgt_priv_data) {
+ stgt_priv_data = sdev_priv_data->tgt_priv_data;
+ dev_type = stgt_priv_data->dev_type;
+ }
+
+ if (dev_type == MPI3_DEVICE_DEVFORM_VD) {
+ mpi3mr_wait_for_host_io(mrioc,
+ MPI3MR_RAID_ERRREC_RESET_TIMEOUT);
+ if (!mpi3mr_get_fw_pending_ios(mrioc)) {
+ retval = SUCCESS;
+ goto out;
+ }
+ }
+
+ mpi3mr_print_pending_host_io(mrioc);
+ ret = mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_EH_HOS, 1);
+ if (ret)
+ goto out;
+
+ retval = SUCCESS;
+out:
+ sdev_printk(KERN_INFO, scmd->device,
+ "Host reset is %s for scmd(%p)\n",
+ ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+
+ return retval;
+}
+
+/**
+ * mpi3mr_eh_target_reset - Target reset error handling callback
+ * @scmd: SCSI command reference
+ *
+ * Issue Target reset Task Management and verify the scmd is
+ * terminated successfully and return status accordingly.
+ *
+ * Return: SUCCESS of successful termination of the scmd else
+ * FAILED
+ */
+static int mpi3mr_eh_target_reset(struct scsi_cmnd *scmd)
+{
+ struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
+ struct mpi3mr_stgt_priv_data *stgt_priv_data;
+ struct mpi3mr_sdev_priv_data *sdev_priv_data;
+ u16 dev_handle;
+ u8 resp_code = 0;
+ int retval = FAILED, ret = 0;
+
+ sdev_printk(KERN_INFO, scmd->device,
+ "Attempting Target Reset! scmd(%p)\n", scmd);
+ scsi_print_command(scmd);
+
+ sdev_priv_data = scmd->device->hostdata;
+ if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "SCSI device is not available\n");
+ retval = SUCCESS;
+ goto out;
+ }
+
+ stgt_priv_data = sdev_priv_data->tgt_priv_data;
+ dev_handle = stgt_priv_data->dev_handle;
+ if (stgt_priv_data->dev_removed) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s:target(handle = 0x%04x) is removed, target reset is not issued\n",
+ mrioc->name, dev_handle);
+ retval = FAILED;
+ goto out;
+ }
+ sdev_printk(KERN_INFO, scmd->device,
+ "Target Reset is issued to handle(0x%04x)\n",
+ dev_handle);
+
+ ret = mpi3mr_issue_tm(mrioc,
+ MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, dev_handle,
+ sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS,
+ MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd);
+
+ if (ret)
+ goto out;
+
+ if (stgt_priv_data->pend_count) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: target has %d pending commands, target reset is failed\n",
+ mrioc->name, stgt_priv_data->pend_count);
+ goto out;
+ }
+
+ retval = SUCCESS;
+out:
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: target reset is %s for scmd(%p)\n", mrioc->name,
+ ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+
+ return retval;
+}
+
+/**
+ * mpi3mr_eh_dev_reset- Device reset error handling callback
+ * @scmd: SCSI command reference
+ *
+ * Issue lun reset Task Management and verify the scmd is
+ * terminated successfully and return status accordingly.
+ *
+ * Return: SUCCESS of successful termination of the scmd else
+ * FAILED
+ */
+static int mpi3mr_eh_dev_reset(struct scsi_cmnd *scmd)
+{
+ struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
+ struct mpi3mr_stgt_priv_data *stgt_priv_data;
+ struct mpi3mr_sdev_priv_data *sdev_priv_data;
+ u16 dev_handle;
+ u8 resp_code = 0;
+ int retval = FAILED, ret = 0;
+
+ sdev_printk(KERN_INFO, scmd->device,
+ "Attempting Device(lun) Reset! scmd(%p)\n", scmd);
+ scsi_print_command(scmd);
+
+ sdev_priv_data = scmd->device->hostdata;
+ if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "SCSI device is not available\n");
+ retval = SUCCESS;
+ goto out;
+ }
+
+ stgt_priv_data = sdev_priv_data->tgt_priv_data;
+ dev_handle = stgt_priv_data->dev_handle;
+ if (stgt_priv_data->dev_removed) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: device(handle = 0x%04x) is removed, device(LUN) reset is not issued\n",
+ mrioc->name, dev_handle);
+ retval = FAILED;
+ goto out;
+ }
+ sdev_printk(KERN_INFO, scmd->device,
+ "Device(lun) Reset is issued to handle(0x%04x)\n", dev_handle);
+
+ ret = mpi3mr_issue_tm(mrioc,
+ MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, dev_handle,
+ sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS,
+ MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd);
+
+ if (ret)
+ goto out;
+
+ if (sdev_priv_data->pend_count) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: device has %d pending commands, device(LUN) reset is failed\n",
+ mrioc->name, sdev_priv_data->pend_count);
+ goto out;
+ }
+ retval = SUCCESS;
+out:
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: device(LUN) reset is %s for scmd(%p)\n", mrioc->name,
+ ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+
+ return retval;
+}
+
+/**
+ * mpi3mr_scan_start - Scan start callback handler
+ * @shost: SCSI host reference
+ *
+ * Issue port enable request asynchronously.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_scan_start(struct Scsi_Host *shost)
+{
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+
+ mrioc->scan_started = 1;
+ ioc_info(mrioc, "%s :Issuing Port Enable\n", __func__);
+ if (mpi3mr_issue_port_enable(mrioc, 1)) {
+ ioc_err(mrioc, "%s :Issuing port enable failed\n", __func__);
+ mrioc->scan_started = 0;
+ mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
+ }
+}
+
+/**
+ * mpi3mr_scan_finished - Scan finished callback handler
+ * @shost: SCSI host reference
+ * @time: Jiffies from the scan start
+ *
+ * Checks whether the port enable is completed or timedout or
+ * failed and set the scan status accordingly after taking any
+ * recovery if required.
+ *
+ * Return: 1 on scan finished or timed out, 0 for in progress
+ */
+static int mpi3mr_scan_finished(struct Scsi_Host *shost,
+ unsigned long time)
+{
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+ u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT;
+ u32 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
+ (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
+ ioc_err(mrioc, "port enable failed due to fault or reset\n");
+ mpi3mr_print_fault_info(mrioc);
+ mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
+ mrioc->scan_started = 0;
+ mrioc->init_cmds.is_waiting = 0;
+ mrioc->init_cmds.callback = NULL;
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ }
+
+ if (time >= (pe_timeout * HZ)) {
+ ioc_err(mrioc, "port enable failed due to time out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_PE_TIMEOUT);
+ mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
+ mrioc->scan_started = 0;
+ mrioc->init_cmds.is_waiting = 0;
+ mrioc->init_cmds.callback = NULL;
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ }
+
+ if (mrioc->scan_started)
+ return 0;
+
+ if (mrioc->scan_failed) {
+ ioc_err(mrioc,
+ "port enable failed with status=0x%04x\n",
+ mrioc->scan_failed);
+ } else
+ ioc_info(mrioc, "port enable is successfully completed\n");
+
+ mpi3mr_start_watchdog(mrioc);
+ mrioc->is_driver_loading = 0;
+ mrioc->stop_bsgs = 0;
+ return 1;
+}
+
+/**
+ * mpi3mr_slave_destroy - Slave destroy callback handler
+ * @sdev: SCSI device reference
+ *
+ * Cleanup and free per device(lun) private data.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_slave_destroy(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost;
+ struct mpi3mr_ioc *mrioc;
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
+ struct mpi3mr_tgt_dev *tgt_dev = NULL;
+ unsigned long flags;
+ struct scsi_target *starget;
+ struct sas_rphy *rphy = NULL;
+
+ if (!sdev->hostdata)
+ return;
+
+ starget = scsi_target(sdev);
+ shost = dev_to_shost(&starget->dev);
+ mrioc = shost_priv(shost);
+ scsi_tgt_priv_data = starget->hostdata;
+
+ scsi_tgt_priv_data->num_luns--;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ if (starget->channel == mrioc->scsi_device_channel)
+ tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
+ else if (mrioc->sas_transport_enabled && !starget->channel) {
+ rphy = dev_to_rphy(starget->dev.parent);
+ tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
+ rphy->identify.sas_address, rphy);
+ }
+
+ if (tgt_dev && (!scsi_tgt_priv_data->num_luns))
+ tgt_dev->starget = NULL;
+ if (tgt_dev)
+ mpi3mr_tgtdev_put(tgt_dev);
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
+ kfree(sdev->hostdata);
+ sdev->hostdata = NULL;
+}
+
+/**
+ * mpi3mr_target_destroy - Target destroy callback handler
+ * @starget: SCSI target reference
+ *
+ * Cleanup and free per target private data.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_target_destroy(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost;
+ struct mpi3mr_ioc *mrioc;
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
+ struct mpi3mr_tgt_dev *tgt_dev;
+ unsigned long flags;
+
+ if (!starget->hostdata)
+ return;
+
+ shost = dev_to_shost(&starget->dev);
+ mrioc = shost_priv(shost);
+ scsi_tgt_priv_data = starget->hostdata;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgt_dev = __mpi3mr_get_tgtdev_from_tgtpriv(mrioc, scsi_tgt_priv_data);
+ if (tgt_dev && (tgt_dev->starget == starget) &&
+ (tgt_dev->perst_id == starget->id))
+ tgt_dev->starget = NULL;
+ if (tgt_dev) {
+ scsi_tgt_priv_data->tgt_dev = NULL;
+ scsi_tgt_priv_data->perst_id = 0;
+ mpi3mr_tgtdev_put(tgt_dev);
+ mpi3mr_tgtdev_put(tgt_dev);
+ }
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
+ kfree(starget->hostdata);
+ starget->hostdata = NULL;
+}
+
+/**
+ * mpi3mr_slave_configure - Slave configure callback handler
+ * @sdev: SCSI device reference
+ *
+ * Configure queue depth, max hardware sectors and virt boundary
+ * as required
+ *
+ * Return: 0 always.
+ */
+static int mpi3mr_slave_configure(struct scsi_device *sdev)
+{
+ struct scsi_target *starget;
+ struct Scsi_Host *shost;
+ struct mpi3mr_ioc *mrioc;
+ struct mpi3mr_tgt_dev *tgt_dev = NULL;
+ unsigned long flags;
+ int retval = 0;
+ struct sas_rphy *rphy = NULL;
+
+ starget = scsi_target(sdev);
+ shost = dev_to_shost(&starget->dev);
+ mrioc = shost_priv(shost);
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ if (starget->channel == mrioc->scsi_device_channel)
+ tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
+ else if (mrioc->sas_transport_enabled && !starget->channel) {
+ rphy = dev_to_rphy(starget->dev.parent);
+ tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
+ rphy->identify.sas_address, rphy);
+ }
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+ if (!tgt_dev)
+ return -ENXIO;
+
+ mpi3mr_change_queue_depth(sdev, tgt_dev->q_depth);
+
+ sdev->eh_timeout = MPI3MR_EH_SCMD_TIMEOUT;
+ blk_queue_rq_timeout(sdev->request_queue, MPI3MR_SCMD_TIMEOUT);
+
+ switch (tgt_dev->dev_type) {
+ case MPI3_DEVICE_DEVFORM_PCIE:
+ /*The block layer hw sector size = 512*/
+ if ((tgt_dev->dev_spec.pcie_inf.dev_info &
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) {
+ blk_queue_max_hw_sectors(sdev->request_queue,
+ tgt_dev->dev_spec.pcie_inf.mdts / 512);
+ if (tgt_dev->dev_spec.pcie_inf.pgsz == 0)
+ blk_queue_virt_boundary(sdev->request_queue,
+ ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1));
+ else
+ blk_queue_virt_boundary(sdev->request_queue,
+ ((1 << tgt_dev->dev_spec.pcie_inf.pgsz) - 1));
+ }
+ break;
+ default:
+ break;
+ }
+
+ mpi3mr_tgtdev_put(tgt_dev);
+
+ return retval;
+}
+
+/**
+ * mpi3mr_slave_alloc -Slave alloc callback handler
+ * @sdev: SCSI device reference
+ *
+ * Allocate per device(lun) private data and initialize it.
+ *
+ * Return: 0 on success -ENOMEM on memory allocation failure.
+ */
+static int mpi3mr_slave_alloc(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost;
+ struct mpi3mr_ioc *mrioc;
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
+ struct mpi3mr_tgt_dev *tgt_dev = NULL;
+ struct mpi3mr_sdev_priv_data *scsi_dev_priv_data;
+ unsigned long flags;
+ struct scsi_target *starget;
+ int retval = 0;
+ struct sas_rphy *rphy = NULL;
+
+ starget = scsi_target(sdev);
+ shost = dev_to_shost(&starget->dev);
+ mrioc = shost_priv(shost);
+ scsi_tgt_priv_data = starget->hostdata;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+
+ if (starget->channel == mrioc->scsi_device_channel)
+ tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
+ else if (mrioc->sas_transport_enabled && !starget->channel) {
+ rphy = dev_to_rphy(starget->dev.parent);
+ tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
+ rphy->identify.sas_address, rphy);
+ }
+
+ if (tgt_dev) {
+ if (tgt_dev->starget == NULL)
+ tgt_dev->starget = starget;
+ mpi3mr_tgtdev_put(tgt_dev);
+ retval = 0;
+ } else {
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+ return -ENXIO;
+ }
+
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
+ scsi_dev_priv_data = kzalloc(sizeof(*scsi_dev_priv_data), GFP_KERNEL);
+ if (!scsi_dev_priv_data)
+ return -ENOMEM;
+
+ scsi_dev_priv_data->lun_id = sdev->lun;
+ scsi_dev_priv_data->tgt_priv_data = scsi_tgt_priv_data;
+ sdev->hostdata = scsi_dev_priv_data;
+
+ scsi_tgt_priv_data->num_luns++;
+
+ return retval;
+}
+
+/**
+ * mpi3mr_target_alloc - Target alloc callback handler
+ * @starget: SCSI target reference
+ *
+ * Allocate per target private data and initialize it.
+ *
+ * Return: 0 on success -ENOMEM on memory allocation failure.
+ */
+static int mpi3mr_target_alloc(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(&starget->dev);
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
+ struct mpi3mr_tgt_dev *tgt_dev;
+ unsigned long flags;
+ int retval = 0;
+ struct sas_rphy *rphy = NULL;
+ bool update_stgt_priv_data = false;
+
+ scsi_tgt_priv_data = kzalloc(sizeof(*scsi_tgt_priv_data), GFP_KERNEL);
+ if (!scsi_tgt_priv_data)
+ return -ENOMEM;
+
+ starget->hostdata = scsi_tgt_priv_data;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+
+ if (starget->channel == mrioc->scsi_device_channel) {
+ tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
+ if (tgt_dev && !tgt_dev->is_hidden)
+ update_stgt_priv_data = true;
+ else
+ retval = -ENXIO;
+ } else if (mrioc->sas_transport_enabled && !starget->channel) {
+ rphy = dev_to_rphy(starget->dev.parent);
+ tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
+ rphy->identify.sas_address, rphy);
+ if (tgt_dev && !tgt_dev->is_hidden && !tgt_dev->non_stl &&
+ (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA))
+ update_stgt_priv_data = true;
+ else
+ retval = -ENXIO;
+ }
+
+ if (update_stgt_priv_data) {
+ scsi_tgt_priv_data->starget = starget;
+ scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle;
+ scsi_tgt_priv_data->perst_id = tgt_dev->perst_id;
+ scsi_tgt_priv_data->dev_type = tgt_dev->dev_type;
+ scsi_tgt_priv_data->tgt_dev = tgt_dev;
+ tgt_dev->starget = starget;
+ atomic_set(&scsi_tgt_priv_data->block_io, 0);
+ retval = 0;
+ scsi_tgt_priv_data->io_throttle_enabled =
+ tgt_dev->io_throttle_enabled;
+ if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_VD)
+ scsi_tgt_priv_data->throttle_group =
+ tgt_dev->dev_spec.vd_inf.tg;
+ }
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
+ return retval;
+}
+
+/**
+ * mpi3mr_check_return_unmap - Whether an unmap is allowed
+ * @mrioc: Adapter instance reference
+ * @scmd: SCSI Command reference
+ *
+ * The controller hardware cannot handle certain unmap commands
+ * for NVMe drives, this routine checks those and return true
+ * and completes the SCSI command with proper status and sense
+ * data.
+ *
+ * Return: TRUE for not allowed unmap, FALSE otherwise.
+ */
+static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc,
+ struct scsi_cmnd *scmd)
+{
+ unsigned char *buf;
+ u16 param_len, desc_len, trunc_param_len;
+
+ trunc_param_len = param_len = get_unaligned_be16(scmd->cmnd + 7);
+
+ if (mrioc->pdev->revision) {
+ if ((param_len > 24) && ((param_len - 8) & 0xF)) {
+ trunc_param_len -= (param_len - 8) & 0xF;
+ dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR);
+ dprint_scsi_err(mrioc,
+ "truncating param_len from (%d) to (%d)\n",
+ param_len, trunc_param_len);
+ put_unaligned_be16(trunc_param_len, scmd->cmnd + 7);
+ dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR);
+ }
+ return false;
+ }
+
+ if (!param_len) {
+ ioc_warn(mrioc,
+ "%s: cdb received with zero parameter length\n",
+ __func__);
+ scsi_print_command(scmd);
+ scmd->result = DID_OK << 16;
+ scsi_done(scmd);
+ return true;
+ }
+
+ if (param_len < 24) {
+ ioc_warn(mrioc,
+ "%s: cdb received with invalid param_len: %d\n",
+ __func__, param_len);
+ scsi_print_command(scmd);
+ scmd->result = SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x1A, 0);
+ scsi_done(scmd);
+ return true;
+ }
+ if (param_len != scsi_bufflen(scmd)) {
+ ioc_warn(mrioc,
+ "%s: cdb received with param_len: %d bufflen: %d\n",
+ __func__, param_len, scsi_bufflen(scmd));
+ scsi_print_command(scmd);
+ scmd->result = SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x1A, 0);
+ scsi_done(scmd);
+ return true;
+ }
+ buf = kzalloc(scsi_bufflen(scmd), GFP_ATOMIC);
+ if (!buf) {
+ scsi_print_command(scmd);
+ scmd->result = SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x55, 0x03);
+ scsi_done(scmd);
+ return true;
+ }
+ scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
+ desc_len = get_unaligned_be16(&buf[2]);
+
+ if (desc_len < 16) {
+ ioc_warn(mrioc,
+ "%s: Invalid descriptor length in param list: %d\n",
+ __func__, desc_len);
+ scsi_print_command(scmd);
+ scmd->result = SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x26, 0);
+ scsi_done(scmd);
+ kfree(buf);
+ return true;
+ }
+
+ if (param_len > (desc_len + 8)) {
+ trunc_param_len = desc_len + 8;
+ scsi_print_command(scmd);
+ dprint_scsi_err(mrioc,
+ "truncating param_len(%d) to desc_len+8(%d)\n",
+ param_len, trunc_param_len);
+ put_unaligned_be16(trunc_param_len, scmd->cmnd + 7);
+ scsi_print_command(scmd);
+ }
+
+ kfree(buf);
+ return false;
+}
+
+/**
+ * mpi3mr_allow_scmd_to_fw - Command is allowed during shutdown
+ * @scmd: SCSI Command reference
+ *
+ * Checks whether a cdb is allowed during shutdown or not.
+ *
+ * Return: TRUE for allowed commands, FALSE otherwise.
+ */
+
+inline bool mpi3mr_allow_scmd_to_fw(struct scsi_cmnd *scmd)
+{
+ switch (scmd->cmnd[0]) {
+ case SYNCHRONIZE_CACHE:
+ case START_STOP:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * mpi3mr_qcmd - I/O request despatcher
+ * @shost: SCSI Host reference
+ * @scmd: SCSI Command reference
+ *
+ * Issues the SCSI Command as an MPI3 request.
+ *
+ * Return: 0 on successful queueing of the request or if the
+ * request is completed with failure.
+ * SCSI_MLQUEUE_DEVICE_BUSY when the device is busy.
+ * SCSI_MLQUEUE_HOST_BUSY when the host queue is full.
+ */
+static int mpi3mr_qcmd(struct Scsi_Host *shost,
+ struct scsi_cmnd *scmd)
+{
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+ struct mpi3mr_stgt_priv_data *stgt_priv_data;
+ struct mpi3mr_sdev_priv_data *sdev_priv_data;
+ struct scmd_priv *scmd_priv_data = NULL;
+ struct mpi3_scsi_io_request *scsiio_req = NULL;
+ struct op_req_qinfo *op_req_q = NULL;
+ int retval = 0;
+ u16 dev_handle;
+ u16 host_tag;
+ u32 scsiio_flags = 0, data_len_blks = 0;
+ struct request *rq = scsi_cmd_to_rq(scmd);
+ int iprio_class;
+ u8 is_pcie_dev = 0;
+ u32 tracked_io_sz = 0;
+ u32 ioc_pend_data_len = 0, tg_pend_data_len = 0;
+ struct mpi3mr_throttle_group_info *tg = NULL;
+
+ if (mrioc->unrecoverable) {
+ scmd->result = DID_ERROR << 16;
+ scsi_done(scmd);
+ goto out;
+ }
+
+ sdev_priv_data = scmd->device->hostdata;
+ if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scsi_done(scmd);
+ goto out;
+ }
+
+ if (mrioc->stop_drv_processing &&
+ !(mpi3mr_allow_scmd_to_fw(scmd))) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scsi_done(scmd);
+ goto out;
+ }
+
+ if (mrioc->reset_in_progress) {
+ retval = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+
+ stgt_priv_data = sdev_priv_data->tgt_priv_data;
+
+ if (atomic_read(&stgt_priv_data->block_io)) {
+ if (mrioc->stop_drv_processing) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scsi_done(scmd);
+ goto out;
+ }
+ retval = SCSI_MLQUEUE_DEVICE_BUSY;
+ goto out;
+ }
+
+ dev_handle = stgt_priv_data->dev_handle;
+ if (dev_handle == MPI3MR_INVALID_DEV_HANDLE) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scsi_done(scmd);
+ goto out;
+ }
+ if (stgt_priv_data->dev_removed) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scsi_done(scmd);
+ goto out;
+ }
+
+ if (stgt_priv_data->dev_type == MPI3_DEVICE_DEVFORM_PCIE)
+ is_pcie_dev = 1;
+ if ((scmd->cmnd[0] == UNMAP) && is_pcie_dev &&
+ (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) &&
+ mpi3mr_check_return_unmap(mrioc, scmd))
+ goto out;
+
+ host_tag = mpi3mr_host_tag_for_scmd(mrioc, scmd);
+ if (host_tag == MPI3MR_HOSTTAG_INVALID) {
+ scmd->result = DID_ERROR << 16;
+ scsi_done(scmd);
+ goto out;
+ }
+
+ if (scmd->sc_data_direction == DMA_FROM_DEVICE)
+ scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_READ;
+ else if (scmd->sc_data_direction == DMA_TO_DEVICE)
+ scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE;
+ else
+ scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER;
+
+ scsiio_flags |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ;
+
+ if (sdev_priv_data->ncq_prio_enable) {
+ iprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
+ if (iprio_class == IOPRIO_CLASS_RT)
+ scsiio_flags |= 1 << MPI3_SCSIIO_FLAGS_CMDPRI_SHIFT;
+ }
+
+ if (scmd->cmd_len > 16)
+ scsiio_flags |= MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16;
+
+ scmd_priv_data = scsi_cmd_priv(scmd);
+ memset(scmd_priv_data->mpi3mr_scsiio_req, 0, MPI3MR_ADMIN_REQ_FRAME_SZ);
+ scsiio_req = (struct mpi3_scsi_io_request *)scmd_priv_data->mpi3mr_scsiio_req;
+ scsiio_req->function = MPI3_FUNCTION_SCSI_IO;
+ scsiio_req->host_tag = cpu_to_le16(host_tag);
+
+ mpi3mr_setup_eedp(mrioc, scmd, scsiio_req);
+
+ memcpy(scsiio_req->cdb.cdb32, scmd->cmnd, scmd->cmd_len);
+ scsiio_req->data_length = cpu_to_le32(scsi_bufflen(scmd));
+ scsiio_req->dev_handle = cpu_to_le16(dev_handle);
+ scsiio_req->flags = cpu_to_le32(scsiio_flags);
+ int_to_scsilun(sdev_priv_data->lun_id,
+ (struct scsi_lun *)scsiio_req->lun);
+
+ if (mpi3mr_build_sg_scmd(mrioc, scmd, scsiio_req)) {
+ mpi3mr_clear_scmd_priv(mrioc, scmd);
+ retval = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+ op_req_q = &mrioc->req_qinfo[scmd_priv_data->req_q_idx];
+ data_len_blks = scsi_bufflen(scmd) >> 9;
+ if ((data_len_blks >= mrioc->io_throttle_data_length) &&
+ stgt_priv_data->io_throttle_enabled) {
+ tracked_io_sz = data_len_blks;
+ tg = stgt_priv_data->throttle_group;
+ if (tg) {
+ ioc_pend_data_len = atomic_add_return(data_len_blks,
+ &mrioc->pend_large_data_sz);
+ tg_pend_data_len = atomic_add_return(data_len_blks,
+ &tg->pend_large_data_sz);
+ if (!tg->io_divert && ((ioc_pend_data_len >=
+ mrioc->io_throttle_high) ||
+ (tg_pend_data_len >= tg->high))) {
+ tg->io_divert = 1;
+ tg->need_qd_reduction = 1;
+ mpi3mr_set_io_divert_for_all_vd_in_tg(mrioc,
+ tg, 1);
+ mpi3mr_queue_qd_reduction_event(mrioc, tg);
+ }
+ } else {
+ ioc_pend_data_len = atomic_add_return(data_len_blks,
+ &mrioc->pend_large_data_sz);
+ if (ioc_pend_data_len >= mrioc->io_throttle_high)
+ stgt_priv_data->io_divert = 1;
+ }
+ }
+
+ if (stgt_priv_data->io_divert) {
+ scsiio_req->msg_flags |=
+ MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE;
+ scsiio_flags |= MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING;
+ }
+ scsiio_req->flags = cpu_to_le32(scsiio_flags);
+
+ if (mpi3mr_op_request_post(mrioc, op_req_q,
+ scmd_priv_data->mpi3mr_scsiio_req)) {
+ mpi3mr_clear_scmd_priv(mrioc, scmd);
+ retval = SCSI_MLQUEUE_HOST_BUSY;
+ if (tracked_io_sz) {
+ atomic_sub(tracked_io_sz, &mrioc->pend_large_data_sz);
+ if (tg)
+ atomic_sub(tracked_io_sz,
+ &tg->pend_large_data_sz);
+ }
+ goto out;
+ }
+
+out:
+ return retval;
+}
+
+static struct scsi_host_template mpi3mr_driver_template = {
+ .module = THIS_MODULE,
+ .name = "MPI3 Storage Controller",
+ .proc_name = MPI3MR_DRIVER_NAME,
+ .queuecommand = mpi3mr_qcmd,
+ .target_alloc = mpi3mr_target_alloc,
+ .slave_alloc = mpi3mr_slave_alloc,
+ .slave_configure = mpi3mr_slave_configure,
+ .target_destroy = mpi3mr_target_destroy,
+ .slave_destroy = mpi3mr_slave_destroy,
+ .scan_finished = mpi3mr_scan_finished,
+ .scan_start = mpi3mr_scan_start,
+ .change_queue_depth = mpi3mr_change_queue_depth,
+ .eh_device_reset_handler = mpi3mr_eh_dev_reset,
+ .eh_target_reset_handler = mpi3mr_eh_target_reset,
+ .eh_host_reset_handler = mpi3mr_eh_host_reset,
+ .bios_param = mpi3mr_bios_param,
+ .map_queues = mpi3mr_map_queues,
+ .mq_poll = mpi3mr_blk_mq_poll,
+ .no_write_same = 1,
+ .can_queue = 1,
+ .this_id = -1,
+ .sg_tablesize = MPI3MR_SG_DEPTH,
+ /* max xfer supported is 1M (2K in 512 byte sized sectors)
+ */
+ .max_sectors = 2048,
+ .cmd_per_lun = MPI3MR_MAX_CMDS_LUN,
+ .max_segment_size = 0xffffffff,
+ .track_queue_depth = 1,
+ .cmd_size = sizeof(struct scmd_priv),
+ .shost_groups = mpi3mr_host_groups,
+ .sdev_groups = mpi3mr_dev_groups,
+};
+
+/**
+ * mpi3mr_init_drv_cmd - Initialize internal command tracker
+ * @cmdptr: Internal command tracker
+ * @host_tag: Host tag used for the specific command
+ *
+ * Initialize the internal command tracker structure with
+ * specified host tag.
+ *
+ * Return: Nothing.
+ */
+static inline void mpi3mr_init_drv_cmd(struct mpi3mr_drv_cmd *cmdptr,
+ u16 host_tag)
+{
+ mutex_init(&cmdptr->mutex);
+ cmdptr->reply = NULL;
+ cmdptr->state = MPI3MR_CMD_NOTUSED;
+ cmdptr->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
+ cmdptr->host_tag = host_tag;
+}
+
+/**
+ * osintfc_mrioc_security_status -Check controller secure status
+ * @pdev: PCI device instance
+ *
+ * Read the Device Serial Number capability from PCI config
+ * space and decide whether the controller is secure or not.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int
+osintfc_mrioc_security_status(struct pci_dev *pdev)
+{
+ u32 cap_data;
+ int base;
+ u32 ctlr_status;
+ u32 debug_status;
+ int retval = 0;
+
+ base = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
+ if (!base) {
+ dev_err(&pdev->dev,
+ "%s: PCI_EXT_CAP_ID_DSN is not supported\n", __func__);
+ return -1;
+ }
+
+ pci_read_config_dword(pdev, base + 4, &cap_data);
+
+ debug_status = cap_data & MPI3MR_CTLR_SECURE_DBG_STATUS_MASK;
+ ctlr_status = cap_data & MPI3MR_CTLR_SECURITY_STATUS_MASK;
+
+ switch (ctlr_status) {
+ case MPI3MR_INVALID_DEVICE:
+ dev_err(&pdev->dev,
+ "%s: Non secure ctlr (Invalid) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n",
+ __func__, pdev->device, pdev->subsystem_vendor,
+ pdev->subsystem_device);
+ retval = -1;
+ break;
+ case MPI3MR_CONFIG_SECURE_DEVICE:
+ if (!debug_status)
+ dev_info(&pdev->dev,
+ "%s: Config secure ctlr is detected\n",
+ __func__);
+ break;
+ case MPI3MR_HARD_SECURE_DEVICE:
+ break;
+ case MPI3MR_TAMPERED_DEVICE:
+ dev_err(&pdev->dev,
+ "%s: Non secure ctlr (Tampered) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n",
+ __func__, pdev->device, pdev->subsystem_vendor,
+ pdev->subsystem_device);
+ retval = -1;
+ break;
+ default:
+ retval = -1;
+ break;
+ }
+
+ if (!retval && debug_status) {
+ dev_err(&pdev->dev,
+ "%s: Non secure ctlr (Secure Dbg) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n",
+ __func__, pdev->device, pdev->subsystem_vendor,
+ pdev->subsystem_device);
+ retval = -1;
+ }
+
+ return retval;
+}
+
+/**
+ * mpi3mr_probe - PCI probe callback
+ * @pdev: PCI device instance
+ * @id: PCI device ID details
+ *
+ * controller initialization routine. Checks the security status
+ * of the controller and if it is invalid or tampered return the
+ * probe without initializing the controller. Otherwise,
+ * allocate per adapter instance through shost_priv and
+ * initialize controller specific data structures, initializae
+ * the controller hardware, add shost to the SCSI subsystem.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+
+static int
+mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct mpi3mr_ioc *mrioc = NULL;
+ struct Scsi_Host *shost = NULL;
+ int retval = 0, i;
+
+ if (osintfc_mrioc_security_status(pdev)) {
+ warn_non_secure_ctlr = 1;
+ return 1; /* For Invalid and Tampered device */
+ }
+
+ shost = scsi_host_alloc(&mpi3mr_driver_template,
+ sizeof(struct mpi3mr_ioc));
+ if (!shost) {
+ retval = -ENODEV;
+ goto shost_failed;
+ }
+
+ mrioc = shost_priv(shost);
+ mrioc->id = mrioc_ids++;
+ sprintf(mrioc->driver_name, "%s", MPI3MR_DRIVER_NAME);
+ sprintf(mrioc->name, "%s%d", mrioc->driver_name, mrioc->id);
+ INIT_LIST_HEAD(&mrioc->list);
+ spin_lock(&mrioc_list_lock);
+ list_add_tail(&mrioc->list, &mrioc_list);
+ spin_unlock(&mrioc_list_lock);
+
+ spin_lock_init(&mrioc->admin_req_lock);
+ spin_lock_init(&mrioc->reply_free_queue_lock);
+ spin_lock_init(&mrioc->sbq_lock);
+ spin_lock_init(&mrioc->fwevt_lock);
+ spin_lock_init(&mrioc->tgtdev_lock);
+ spin_lock_init(&mrioc->watchdog_lock);
+ spin_lock_init(&mrioc->chain_buf_lock);
+ spin_lock_init(&mrioc->sas_node_lock);
+
+ INIT_LIST_HEAD(&mrioc->fwevt_list);
+ INIT_LIST_HEAD(&mrioc->tgtdev_list);
+ INIT_LIST_HEAD(&mrioc->delayed_rmhs_list);
+ INIT_LIST_HEAD(&mrioc->delayed_evtack_cmds_list);
+ INIT_LIST_HEAD(&mrioc->sas_expander_list);
+ INIT_LIST_HEAD(&mrioc->hba_port_table_list);
+ INIT_LIST_HEAD(&mrioc->enclosure_list);
+
+ mutex_init(&mrioc->reset_mutex);
+ mpi3mr_init_drv_cmd(&mrioc->init_cmds, MPI3MR_HOSTTAG_INITCMDS);
+ mpi3mr_init_drv_cmd(&mrioc->host_tm_cmds, MPI3MR_HOSTTAG_BLK_TMS);
+ mpi3mr_init_drv_cmd(&mrioc->bsg_cmds, MPI3MR_HOSTTAG_BSG_CMDS);
+ mpi3mr_init_drv_cmd(&mrioc->cfg_cmds, MPI3MR_HOSTTAG_CFG_CMDS);
+ mpi3mr_init_drv_cmd(&mrioc->transport_cmds,
+ MPI3MR_HOSTTAG_TRANSPORT_CMDS);
+
+ for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
+ mpi3mr_init_drv_cmd(&mrioc->dev_rmhs_cmds[i],
+ MPI3MR_HOSTTAG_DEVRMCMD_MIN + i);
+
+ for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++)
+ mpi3mr_init_drv_cmd(&mrioc->evtack_cmds[i],
+ MPI3MR_HOSTTAG_EVTACKCMD_MIN + i);
+
+ if (pdev->revision)
+ mrioc->enable_segqueue = true;
+
+ init_waitqueue_head(&mrioc->reset_waitq);
+ mrioc->logging_level = logging_level;
+ mrioc->shost = shost;
+ mrioc->pdev = pdev;
+ mrioc->stop_bsgs = 1;
+
+ /* init shost parameters */
+ shost->max_cmd_len = MPI3MR_MAX_CDB_LENGTH;
+ shost->max_lun = -1;
+ shost->unique_id = mrioc->id;
+
+ shost->max_channel = 0;
+ shost->max_id = 0xFFFFFFFF;
+
+ shost->host_tagset = 1;
+
+ if (prot_mask >= 0)
+ scsi_host_set_prot(shost, prot_mask);
+ else {
+ prot_mask = SHOST_DIF_TYPE1_PROTECTION
+ | SHOST_DIF_TYPE2_PROTECTION
+ | SHOST_DIF_TYPE3_PROTECTION;
+ scsi_host_set_prot(shost, prot_mask);
+ }
+
+ ioc_info(mrioc,
+ "%s :host protection capabilities enabled %s%s%s%s%s%s%s\n",
+ __func__,
+ (prot_mask & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
+ (prot_mask & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
+ (prot_mask & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
+ (prot_mask & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
+ (prot_mask & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
+ (prot_mask & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
+ (prot_mask & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
+
+ if (prot_guard_mask)
+ scsi_host_set_guard(shost, (prot_guard_mask & 3));
+ else
+ scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
+
+ snprintf(mrioc->fwevt_worker_name, sizeof(mrioc->fwevt_worker_name),
+ "%s%d_fwevt_wrkr", mrioc->driver_name, mrioc->id);
+ mrioc->fwevt_worker_thread = alloc_ordered_workqueue(
+ mrioc->fwevt_worker_name, 0);
+ if (!mrioc->fwevt_worker_thread) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ retval = -ENODEV;
+ goto fwevtthread_failed;
+ }
+
+ mrioc->is_driver_loading = 1;
+ mrioc->cpu_count = num_online_cpus();
+ if (mpi3mr_setup_resources(mrioc)) {
+ ioc_err(mrioc, "setup resources failed\n");
+ retval = -ENODEV;
+ goto resource_alloc_failed;
+ }
+ if (mpi3mr_init_ioc(mrioc)) {
+ ioc_err(mrioc, "initializing IOC failed\n");
+ retval = -ENODEV;
+ goto init_ioc_failed;
+ }
+
+ shost->nr_hw_queues = mrioc->num_op_reply_q;
+ if (mrioc->active_poll_qcount)
+ shost->nr_maps = 3;
+
+ shost->can_queue = mrioc->max_host_ios;
+ shost->sg_tablesize = MPI3MR_SG_DEPTH;
+ shost->max_id = mrioc->facts.max_perids + 1;
+
+ retval = scsi_add_host(shost, &pdev->dev);
+ if (retval) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto addhost_failed;
+ }
+
+ scsi_scan_host(shost);
+ mpi3mr_bsg_init(mrioc);
+ return retval;
+
+addhost_failed:
+ mpi3mr_stop_watchdog(mrioc);
+ mpi3mr_cleanup_ioc(mrioc);
+init_ioc_failed:
+ mpi3mr_free_mem(mrioc);
+ mpi3mr_cleanup_resources(mrioc);
+resource_alloc_failed:
+ destroy_workqueue(mrioc->fwevt_worker_thread);
+fwevtthread_failed:
+ spin_lock(&mrioc_list_lock);
+ list_del(&mrioc->list);
+ spin_unlock(&mrioc_list_lock);
+ scsi_host_put(shost);
+shost_failed:
+ return retval;
+}
+
+/**
+ * mpi3mr_remove - PCI remove callback
+ * @pdev: PCI device instance
+ *
+ * Cleanup the IOC by issuing MUR and shutdown notification.
+ * Free up all memory and resources associated with the
+ * controllerand target devices, unregister the shost.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_remove(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct mpi3mr_ioc *mrioc;
+ struct workqueue_struct *wq;
+ unsigned long flags;
+ struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next;
+ struct mpi3mr_hba_port *port, *hba_port_next;
+ struct mpi3mr_sas_node *sas_expander, *sas_expander_next;
+
+ if (!shost)
+ return;
+
+ mrioc = shost_priv(shost);
+ while (mrioc->reset_in_progress || mrioc->is_driver_loading)
+ ssleep(1);
+
+ if (!pci_device_is_present(mrioc->pdev)) {
+ mrioc->unrecoverable = 1;
+ mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
+ }
+
+ mpi3mr_bsg_exit(mrioc);
+ mrioc->stop_drv_processing = 1;
+ mpi3mr_cleanup_fwevt_list(mrioc);
+ spin_lock_irqsave(&mrioc->fwevt_lock, flags);
+ wq = mrioc->fwevt_worker_thread;
+ mrioc->fwevt_worker_thread = NULL;
+ spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
+ if (wq)
+ destroy_workqueue(wq);
+
+ if (mrioc->sas_transport_enabled)
+ sas_remove_host(shost);
+ else
+ scsi_remove_host(shost);
+
+ list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
+ list) {
+ mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
+ mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+ mpi3mr_stop_watchdog(mrioc);
+ mpi3mr_cleanup_ioc(mrioc);
+ mpi3mr_free_mem(mrioc);
+ mpi3mr_cleanup_resources(mrioc);
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ list_for_each_entry_safe_reverse(sas_expander, sas_expander_next,
+ &mrioc->sas_expander_list, list) {
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ mpi3mr_expander_node_remove(mrioc, sas_expander);
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ }
+ list_for_each_entry_safe(port, hba_port_next, &mrioc->hba_port_table_list, list) {
+ ioc_info(mrioc,
+ "removing hba_port entry: %p port: %d from hba_port list\n",
+ port, port->port_id);
+ list_del(&port->list);
+ kfree(port);
+ }
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+ if (mrioc->sas_hba.num_phys) {
+ kfree(mrioc->sas_hba.phy);
+ mrioc->sas_hba.phy = NULL;
+ mrioc->sas_hba.num_phys = 0;
+ }
+
+ spin_lock(&mrioc_list_lock);
+ list_del(&mrioc->list);
+ spin_unlock(&mrioc_list_lock);
+
+ scsi_host_put(shost);
+}
+
+/**
+ * mpi3mr_shutdown - PCI shutdown callback
+ * @pdev: PCI device instance
+ *
+ * Free up all memory and resources associated with the
+ * controller
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_shutdown(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct mpi3mr_ioc *mrioc;
+ struct workqueue_struct *wq;
+ unsigned long flags;
+
+ if (!shost)
+ return;
+
+ mrioc = shost_priv(shost);
+ while (mrioc->reset_in_progress || mrioc->is_driver_loading)
+ ssleep(1);
+
+ mrioc->stop_drv_processing = 1;
+ mpi3mr_cleanup_fwevt_list(mrioc);
+ spin_lock_irqsave(&mrioc->fwevt_lock, flags);
+ wq = mrioc->fwevt_worker_thread;
+ mrioc->fwevt_worker_thread = NULL;
+ spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
+ if (wq)
+ destroy_workqueue(wq);
+
+ mpi3mr_stop_watchdog(mrioc);
+ mpi3mr_cleanup_ioc(mrioc);
+ mpi3mr_cleanup_resources(mrioc);
+}
+
+/**
+ * mpi3mr_suspend - PCI power management suspend callback
+ * @dev: Device struct
+ *
+ * Change the power state to the given value and cleanup the IOC
+ * by issuing MUR and shutdown notification
+ *
+ * Return: 0 always.
+ */
+static int __maybe_unused
+mpi3mr_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct mpi3mr_ioc *mrioc;
+
+ if (!shost)
+ return 0;
+
+ mrioc = shost_priv(shost);
+ while (mrioc->reset_in_progress || mrioc->is_driver_loading)
+ ssleep(1);
+ mrioc->stop_drv_processing = 1;
+ mpi3mr_cleanup_fwevt_list(mrioc);
+ scsi_block_requests(shost);
+ mpi3mr_stop_watchdog(mrioc);
+ mpi3mr_cleanup_ioc(mrioc);
+
+ ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state\n",
+ pdev, pci_name(pdev));
+ mpi3mr_cleanup_resources(mrioc);
+
+ return 0;
+}
+
+/**
+ * mpi3mr_resume - PCI power management resume callback
+ * @dev: Device struct
+ *
+ * Restore the power state to D0 and reinitialize the controller
+ * and resume I/O operations to the target devices
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+static int __maybe_unused
+mpi3mr_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct mpi3mr_ioc *mrioc;
+ pci_power_t device_state = pdev->current_state;
+ int r;
+
+ if (!shost)
+ return 0;
+
+ mrioc = shost_priv(shost);
+
+ ioc_info(mrioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
+ pdev, pci_name(pdev), device_state);
+ mrioc->pdev = pdev;
+ mrioc->cpu_count = num_online_cpus();
+ r = mpi3mr_setup_resources(mrioc);
+ if (r) {
+ ioc_info(mrioc, "%s: Setup resources failed[%d]\n",
+ __func__, r);
+ return r;
+ }
+
+ mrioc->stop_drv_processing = 0;
+ mpi3mr_invalidate_devhandles(mrioc);
+ mpi3mr_free_enclosure_list(mrioc);
+ mpi3mr_memset_buffers(mrioc);
+ r = mpi3mr_reinit_ioc(mrioc, 1);
+ if (r) {
+ ioc_err(mrioc, "resuming controller failed[%d]\n", r);
+ return r;
+ }
+ ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME);
+ scsi_unblock_requests(shost);
+ mrioc->device_refresh_on = 0;
+ mpi3mr_start_watchdog(mrioc);
+
+ return 0;
+}
+
+static const struct pci_device_id mpi3mr_pci_id_table[] = {
+ {
+ PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM,
+ MPI3_MFGPAGE_DEVID_SAS4116, PCI_ANY_ID, PCI_ANY_ID)
+ },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table);
+
+static SIMPLE_DEV_PM_OPS(mpi3mr_pm_ops, mpi3mr_suspend, mpi3mr_resume);
+
+static struct pci_driver mpi3mr_pci_driver = {
+ .name = MPI3MR_DRIVER_NAME,
+ .id_table = mpi3mr_pci_id_table,
+ .probe = mpi3mr_probe,
+ .remove = mpi3mr_remove,
+ .shutdown = mpi3mr_shutdown,
+ .driver.pm = &mpi3mr_pm_ops,
+};
+
+static ssize_t event_counter_show(struct device_driver *dd, char *buf)
+{
+ return sprintf(buf, "%llu\n", atomic64_read(&event_counter));
+}
+static DRIVER_ATTR_RO(event_counter);
+
+static int __init mpi3mr_init(void)
+{
+ int ret_val;
+
+ pr_info("Loading %s version %s\n", MPI3MR_DRIVER_NAME,
+ MPI3MR_DRIVER_VERSION);
+
+ mpi3mr_transport_template =
+ sas_attach_transport(&mpi3mr_transport_functions);
+ if (!mpi3mr_transport_template) {
+ pr_err("%s failed to load due to sas transport attach failure\n",
+ MPI3MR_DRIVER_NAME);
+ return -ENODEV;
+ }
+
+ ret_val = pci_register_driver(&mpi3mr_pci_driver);
+ if (ret_val) {
+ pr_err("%s failed to load due to pci register driver failure\n",
+ MPI3MR_DRIVER_NAME);
+ goto err_pci_reg_fail;
+ }
+
+ ret_val = driver_create_file(&mpi3mr_pci_driver.driver,
+ &driver_attr_event_counter);
+ if (ret_val)
+ goto err_event_counter;
+
+ return ret_val;
+
+err_event_counter:
+ pci_unregister_driver(&mpi3mr_pci_driver);
+
+err_pci_reg_fail:
+ sas_release_transport(mpi3mr_transport_template);
+ return ret_val;
+}
+
+static void __exit mpi3mr_exit(void)
+{
+ if (warn_non_secure_ctlr)
+ pr_warn(
+ "Unloading %s version %s while managing a non secure controller\n",
+ MPI3MR_DRIVER_NAME, MPI3MR_DRIVER_VERSION);
+ else
+ pr_info("Unloading %s version %s\n", MPI3MR_DRIVER_NAME,
+ MPI3MR_DRIVER_VERSION);
+
+ driver_remove_file(&mpi3mr_pci_driver.driver,
+ &driver_attr_event_counter);
+ pci_unregister_driver(&mpi3mr_pci_driver);
+ sas_release_transport(mpi3mr_transport_template);
+}
+
+module_init(mpi3mr_init);
+module_exit(mpi3mr_exit);
diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c
new file mode 100644
index 000000000..5748bd936
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
@@ -0,0 +1,3291 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Broadcom MPI3 Storage Controllers
+ *
+ * Copyright (C) 2017-2022 Broadcom Inc.
+ * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
+ *
+ */
+
+#include "mpi3mr.h"
+
+/**
+ * mpi3mr_post_transport_req - Issue transport requests and wait
+ * @mrioc: Adapter instance reference
+ * @request: Properly populated MPI3 request
+ * @request_sz: Size of the MPI3 request
+ * @reply: Pointer to return MPI3 reply
+ * @reply_sz: Size of the MPI3 reply buffer
+ * @timeout: Timeout in seconds
+ * @ioc_status: Pointer to return ioc status
+ *
+ * A generic function for posting MPI3 requests from the SAS
+ * transport layer that uses transport command infrastructure.
+ * This blocks for the completion of request for timeout seconds
+ * and if the request times out this function faults the
+ * controller with proper reason code.
+ *
+ * On successful completion of the request this function returns
+ * appropriate ioc status from the firmware back to the caller.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_post_transport_req(struct mpi3mr_ioc *mrioc, void *request,
+ u16 request_sz, void *reply, u16 reply_sz, int timeout,
+ u16 *ioc_status)
+{
+ int retval = 0;
+
+ mutex_lock(&mrioc->transport_cmds.mutex);
+ if (mrioc->transport_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "sending transport request failed due to command in use\n");
+ mutex_unlock(&mrioc->transport_cmds.mutex);
+ goto out;
+ }
+ mrioc->transport_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->transport_cmds.is_waiting = 1;
+ mrioc->transport_cmds.callback = NULL;
+ mrioc->transport_cmds.ioc_status = 0;
+ mrioc->transport_cmds.ioc_loginfo = 0;
+
+ init_completion(&mrioc->transport_cmds.done);
+ dprint_cfg_info(mrioc, "posting transport request\n");
+ if (mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO)
+ dprint_dump(request, request_sz, "transport_req");
+ retval = mpi3mr_admin_request_post(mrioc, request, request_sz, 1);
+ if (retval) {
+ ioc_err(mrioc, "posting transport request failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->transport_cmds.done,
+ (timeout * HZ));
+ if (!(mrioc->transport_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT);
+ ioc_err(mrioc, "transport request timed out\n");
+ retval = -1;
+ goto out_unlock;
+ }
+ *ioc_status = mrioc->transport_cmds.ioc_status &
+ MPI3_IOCSTATUS_STATUS_MASK;
+ if ((*ioc_status) != MPI3_IOCSTATUS_SUCCESS)
+ dprint_transport_err(mrioc,
+ "transport request returned with ioc_status(0x%04x), log_info(0x%08x)\n",
+ *ioc_status, mrioc->transport_cmds.ioc_loginfo);
+
+ if ((reply) && (mrioc->transport_cmds.state & MPI3MR_CMD_REPLY_VALID))
+ memcpy((u8 *)reply, mrioc->transport_cmds.reply, reply_sz);
+
+out_unlock:
+ mrioc->transport_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->transport_cmds.mutex);
+
+out:
+ return retval;
+}
+
+/* report manufacture request structure */
+struct rep_manu_request {
+ u8 smp_frame_type;
+ u8 function;
+ u8 reserved;
+ u8 request_length;
+};
+
+/* report manufacture reply structure */
+struct rep_manu_reply {
+ u8 smp_frame_type; /* 0x41 */
+ u8 function; /* 0x01 */
+ u8 function_result;
+ u8 response_length;
+ u16 expander_change_count;
+ u8 reserved0[2];
+ u8 sas_format;
+ u8 reserved2[3];
+ u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
+ u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
+ u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
+ u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
+ u16 component_id;
+ u8 component_revision_id;
+ u8 reserved3;
+ u8 vendor_specific[8];
+};
+
+/**
+ * mpi3mr_report_manufacture - obtain SMP report_manufacture
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of the expander device
+ * @edev: SAS transport layer sas_expander_device object
+ * @port_id: ID of the HBA port
+ *
+ * Fills in the sas_expander_device with manufacturing info.
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int mpi3mr_report_manufacture(struct mpi3mr_ioc *mrioc,
+ u64 sas_address, struct sas_expander_device *edev, u8 port_id)
+{
+ struct mpi3_smp_passthrough_request mpi_request;
+ struct mpi3_smp_passthrough_reply mpi_reply;
+ struct rep_manu_reply *manufacture_reply;
+ struct rep_manu_request *manufacture_request;
+ int rc = 0;
+ void *psge;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma;
+ dma_addr_t data_in_dma;
+ size_t data_in_sz;
+ size_t data_out_sz;
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+ u16 request_sz = sizeof(struct mpi3_smp_passthrough_request);
+ u16 reply_sz = sizeof(struct mpi3_smp_passthrough_reply);
+ u16 ioc_status;
+ u8 *tmp;
+
+ if (mrioc->reset_in_progress) {
+ ioc_err(mrioc, "%s: host reset in progress!\n", __func__);
+ return -EFAULT;
+ }
+
+ data_out_sz = sizeof(struct rep_manu_request);
+ data_in_sz = sizeof(struct rep_manu_reply);
+ data_out = dma_alloc_coherent(&mrioc->pdev->dev,
+ data_out_sz + data_in_sz, &data_out_dma, GFP_KERNEL);
+ if (!data_out) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ data_in_dma = data_out_dma + data_out_sz;
+ manufacture_reply = data_out + data_out_sz;
+
+ manufacture_request = data_out;
+ manufacture_request->smp_frame_type = 0x40;
+ manufacture_request->function = 1;
+ manufacture_request->reserved = 0;
+ manufacture_request->request_length = 0;
+
+ memset(&mpi_request, 0, request_sz);
+ memset(&mpi_reply, 0, reply_sz);
+ mpi_request.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_TRANSPORT_CMDS);
+ mpi_request.function = MPI3_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request.io_unit_port = (u8) port_id;
+ mpi_request.sas_address = cpu_to_le64(sas_address);
+
+ psge = &mpi_request.request_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, data_out_sz, data_out_dma);
+
+ psge = &mpi_request.response_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, data_in_sz, data_in_dma);
+
+ dprint_transport_info(mrioc,
+ "sending report manufacturer SMP request to sas_address(0x%016llx), port(%d)\n",
+ (unsigned long long)sas_address, port_id);
+
+ rc = mpi3mr_post_transport_req(mrioc, &mpi_request, request_sz,
+ &mpi_reply, reply_sz,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status);
+ if (rc)
+ goto out;
+
+ dprint_transport_info(mrioc,
+ "report manufacturer SMP request completed with ioc_status(0x%04x)\n",
+ ioc_status);
+
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ dprint_transport_info(mrioc,
+ "report manufacturer - reply data transfer size(%d)\n",
+ le16_to_cpu(mpi_reply.response_data_length));
+
+ if (le16_to_cpu(mpi_reply.response_data_length) !=
+ sizeof(struct rep_manu_reply)) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ strscpy(edev->vendor_id, manufacture_reply->vendor_id,
+ SAS_EXPANDER_VENDOR_ID_LEN);
+ strscpy(edev->product_id, manufacture_reply->product_id,
+ SAS_EXPANDER_PRODUCT_ID_LEN);
+ strscpy(edev->product_rev, manufacture_reply->product_rev,
+ SAS_EXPANDER_PRODUCT_REV_LEN);
+ edev->level = manufacture_reply->sas_format & 1;
+ if (edev->level) {
+ strscpy(edev->component_vendor_id,
+ manufacture_reply->component_vendor_id,
+ SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN);
+ tmp = (u8 *)&manufacture_reply->component_id;
+ edev->component_id = tmp[0] << 8 | tmp[1];
+ edev->component_revision_id =
+ manufacture_reply->component_revision_id;
+ }
+
+out:
+ if (data_out)
+ dma_free_coherent(&mrioc->pdev->dev, data_out_sz + data_in_sz,
+ data_out, data_out_dma);
+
+ return rc;
+}
+
+/**
+ * __mpi3mr_expander_find_by_handle - expander search by handle
+ * @mrioc: Adapter instance reference
+ * @handle: Firmware device handle of the expander
+ *
+ * Context: The caller should acquire sas_node_lock
+ *
+ * This searches for expander device based on handle, then
+ * returns the sas_node object.
+ *
+ * Return: Expander sas_node object reference or NULL
+ */
+struct mpi3mr_sas_node *__mpi3mr_expander_find_by_handle(struct mpi3mr_ioc
+ *mrioc, u16 handle)
+{
+ struct mpi3mr_sas_node *sas_expander, *r;
+
+ r = NULL;
+ list_for_each_entry(sas_expander, &mrioc->sas_expander_list, list) {
+ if (sas_expander->handle != handle)
+ continue;
+ r = sas_expander;
+ goto out;
+ }
+ out:
+ return r;
+}
+
+/**
+ * mpi3mr_is_expander_device - if device is an expander
+ * @device_info: Bitfield providing information about the device
+ *
+ * Return: 1 if the device is expander device, else 0.
+ */
+u8 mpi3mr_is_expander_device(u16 device_info)
+{
+ if ((device_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) ==
+ MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_EXPANDER)
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * mpi3mr_get_sas_address - retrieve sas_address for handle
+ * @mrioc: Adapter instance reference
+ * @handle: Firmware device handle
+ * @sas_address: Address to hold sas address
+ *
+ * This function issues device page0 read for a given device
+ * handle and gets the SAS address and return it back
+ *
+ * Return: 0 for success, non-zero for failure
+ */
+static int mpi3mr_get_sas_address(struct mpi3mr_ioc *mrioc, u16 handle,
+ u64 *sas_address)
+{
+ struct mpi3_device_page0 dev_pg0;
+ u16 ioc_status;
+ struct mpi3_device0_sas_sata_format *sasinf;
+
+ *sas_address = 0;
+
+ if ((mpi3mr_cfg_get_dev_pg0(mrioc, &ioc_status, &dev_pg0,
+ sizeof(dev_pg0), MPI3_DEVICE_PGAD_FORM_HANDLE,
+ handle))) {
+ ioc_err(mrioc, "%s: device page0 read failed\n", __func__);
+ return -ENXIO;
+ }
+
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "device page read failed for handle(0x%04x), with ioc_status(0x%04x) failure at %s:%d/%s()!\n",
+ handle, ioc_status, __FILE__, __LINE__, __func__);
+ return -ENXIO;
+ }
+
+ if (le16_to_cpu(dev_pg0.flags) &
+ MPI3_DEVICE0_FLAGS_CONTROLLER_DEV_HANDLE)
+ *sas_address = mrioc->sas_hba.sas_address;
+ else if (dev_pg0.device_form == MPI3_DEVICE_DEVFORM_SAS_SATA) {
+ sasinf = &dev_pg0.device_specific.sas_sata_format;
+ *sas_address = le64_to_cpu(sasinf->sas_address);
+ } else {
+ ioc_err(mrioc, "%s: device_form(%d) is not SAS_SATA\n",
+ __func__, dev_pg0.device_form);
+ return -ENXIO;
+ }
+ return 0;
+}
+
+/**
+ * __mpi3mr_get_tgtdev_by_addr - target device search
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of the device
+ * @hba_port: HBA port entry
+ *
+ * This searches for target device from sas address and hba port
+ * pointer then return mpi3mr_tgt_dev object.
+ *
+ * Return: Valid tget_dev or NULL
+ */
+static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_addr(struct mpi3mr_ioc *mrioc,
+ u64 sas_address, struct mpi3mr_hba_port *hba_port)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ assert_spin_locked(&mrioc->tgtdev_lock);
+
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
+ if ((tgtdev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA) &&
+ (tgtdev->dev_spec.sas_sata_inf.sas_address == sas_address)
+ && (tgtdev->dev_spec.sas_sata_inf.hba_port == hba_port))
+ goto found_device;
+ return NULL;
+found_device:
+ mpi3mr_tgtdev_get(tgtdev);
+ return tgtdev;
+}
+
+/**
+ * mpi3mr_get_tgtdev_by_addr - target device search
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of the device
+ * @hba_port: HBA port entry
+ *
+ * This searches for target device from sas address and hba port
+ * pointer then return mpi3mr_tgt_dev object.
+ *
+ * Context: This function will acquire tgtdev_lock and will
+ * release before returning the mpi3mr_tgt_dev object.
+ *
+ * Return: Valid tget_dev or NULL
+ */
+static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_addr(struct mpi3mr_ioc *mrioc,
+ u64 sas_address, struct mpi3mr_hba_port *hba_port)
+{
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ unsigned long flags;
+
+ if (!hba_port)
+ goto out;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgtdev = __mpi3mr_get_tgtdev_by_addr(mrioc, sas_address, hba_port);
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
+out:
+ return tgtdev;
+}
+
+/**
+ * mpi3mr_remove_device_by_sas_address - remove the device
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of the device
+ * @hba_port: HBA port entry
+ *
+ * This searches for target device using sas address and hba
+ * port pointer then removes it from the OS.
+ *
+ * Return: None
+ */
+static void mpi3mr_remove_device_by_sas_address(struct mpi3mr_ioc *mrioc,
+ u64 sas_address, struct mpi3mr_hba_port *hba_port)
+{
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ unsigned long flags;
+ u8 was_on_tgtdev_list = 0;
+
+ if (!hba_port)
+ return;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgtdev = __mpi3mr_get_tgtdev_by_addr(mrioc,
+ sas_address, hba_port);
+ if (tgtdev) {
+ if (!list_empty(&tgtdev->list)) {
+ list_del_init(&tgtdev->list);
+ was_on_tgtdev_list = 1;
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+ }
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+ if (was_on_tgtdev_list) {
+ if (tgtdev->host_exposed)
+ mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+}
+
+/**
+ * __mpi3mr_get_tgtdev_by_addr_and_rphy - target device search
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of the device
+ * @rphy: SAS transport layer rphy object
+ *
+ * This searches for target device from sas address and rphy
+ * pointer then return mpi3mr_tgt_dev object.
+ *
+ * Return: Valid tget_dev or NULL
+ */
+struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_addr_and_rphy(
+ struct mpi3mr_ioc *mrioc, u64 sas_address, struct sas_rphy *rphy)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ assert_spin_locked(&mrioc->tgtdev_lock);
+
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
+ if ((tgtdev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA) &&
+ (tgtdev->dev_spec.sas_sata_inf.sas_address == sas_address)
+ && (tgtdev->dev_spec.sas_sata_inf.rphy == rphy))
+ goto found_device;
+ return NULL;
+found_device:
+ mpi3mr_tgtdev_get(tgtdev);
+ return tgtdev;
+}
+
+/**
+ * mpi3mr_expander_find_by_sas_address - sas expander search
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of expander
+ * @hba_port: HBA port entry
+ *
+ * Return: A valid SAS expander node or NULL.
+ *
+ */
+static struct mpi3mr_sas_node *mpi3mr_expander_find_by_sas_address(
+ struct mpi3mr_ioc *mrioc, u64 sas_address,
+ struct mpi3mr_hba_port *hba_port)
+{
+ struct mpi3mr_sas_node *sas_expander, *r = NULL;
+
+ if (!hba_port)
+ goto out;
+
+ list_for_each_entry(sas_expander, &mrioc->sas_expander_list, list) {
+ if ((sas_expander->sas_address != sas_address) ||
+ (sas_expander->hba_port != hba_port))
+ continue;
+ r = sas_expander;
+ goto out;
+ }
+out:
+ return r;
+}
+
+/**
+ * __mpi3mr_sas_node_find_by_sas_address - sas node search
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of expander or sas host
+ * @hba_port: HBA port entry
+ * Context: Caller should acquire mrioc->sas_node_lock.
+ *
+ * If the SAS address indicates the device is direct attached to
+ * the controller (controller's SAS address) then the SAS node
+ * associated with the controller is returned back else the SAS
+ * address and hba port are used to identify the exact expander
+ * and the associated sas_node object is returned. If there is
+ * no match NULL is returned.
+ *
+ * Return: A valid SAS node or NULL.
+ *
+ */
+static struct mpi3mr_sas_node *__mpi3mr_sas_node_find_by_sas_address(
+ struct mpi3mr_ioc *mrioc, u64 sas_address,
+ struct mpi3mr_hba_port *hba_port)
+{
+
+ if (mrioc->sas_hba.sas_address == sas_address)
+ return &mrioc->sas_hba;
+ return mpi3mr_expander_find_by_sas_address(mrioc, sas_address,
+ hba_port);
+}
+
+/**
+ * mpi3mr_parent_present - Is parent present for a phy
+ * @mrioc: Adapter instance reference
+ * @phy: SAS transport layer phy object
+ *
+ * Return: 0 if parent is present else non-zero
+ */
+static int mpi3mr_parent_present(struct mpi3mr_ioc *mrioc, struct sas_phy *phy)
+{
+ unsigned long flags;
+ struct mpi3mr_hba_port *hba_port = phy->hostdata;
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ if (__mpi3mr_sas_node_find_by_sas_address(mrioc,
+ phy->identify.sas_address,
+ hba_port) == NULL) {
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ return -1;
+ }
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ return 0;
+}
+
+/**
+ * mpi3mr_convert_phy_link_rate -
+ * @link_rate: link rate as defined in the MPI header
+ *
+ * Convert link_rate from mpi format into sas_transport layer
+ * form.
+ *
+ * Return: A valid SAS transport layer defined link rate
+ */
+static enum sas_linkrate mpi3mr_convert_phy_link_rate(u8 link_rate)
+{
+ enum sas_linkrate rc;
+
+ switch (link_rate) {
+ case MPI3_SAS_NEG_LINK_RATE_1_5:
+ rc = SAS_LINK_RATE_1_5_GBPS;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_3_0:
+ rc = SAS_LINK_RATE_3_0_GBPS;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_6_0:
+ rc = SAS_LINK_RATE_6_0_GBPS;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_12_0:
+ rc = SAS_LINK_RATE_12_0_GBPS;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_22_5:
+ rc = SAS_LINK_RATE_22_5_GBPS;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_PHY_DISABLED:
+ rc = SAS_PHY_DISABLED;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED:
+ rc = SAS_LINK_RATE_FAILED;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_PORT_SELECTOR:
+ rc = SAS_SATA_PORT_SELECTOR;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS:
+ rc = SAS_PHY_RESET_IN_PROGRESS;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE:
+ case MPI3_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE:
+ default:
+ rc = SAS_LINK_RATE_UNKNOWN;
+ break;
+ }
+ return rc;
+}
+
+/**
+ * mpi3mr_delete_sas_phy - Remove a single phy from port
+ * @mrioc: Adapter instance reference
+ * @mr_sas_port: Internal Port object
+ * @mr_sas_phy: Internal Phy object
+ *
+ * Return: None.
+ */
+static void mpi3mr_delete_sas_phy(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_port *mr_sas_port,
+ struct mpi3mr_sas_phy *mr_sas_phy)
+{
+ u64 sas_address = mr_sas_port->remote_identify.sas_address;
+
+ dev_info(&mr_sas_phy->phy->dev,
+ "remove: sas_address(0x%016llx), phy(%d)\n",
+ (unsigned long long) sas_address, mr_sas_phy->phy_id);
+
+ list_del(&mr_sas_phy->port_siblings);
+ mr_sas_port->num_phys--;
+ mr_sas_port->phy_mask &= ~(1 << mr_sas_phy->phy_id);
+ if (mr_sas_port->lowest_phy == mr_sas_phy->phy_id)
+ mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1;
+ sas_port_delete_phy(mr_sas_port->port, mr_sas_phy->phy);
+ mr_sas_phy->phy_belongs_to_port = 0;
+}
+
+/**
+ * mpi3mr_add_sas_phy - Adding a single phy to a port
+ * @mrioc: Adapter instance reference
+ * @mr_sas_port: Internal Port object
+ * @mr_sas_phy: Internal Phy object
+ *
+ * Return: None.
+ */
+static void mpi3mr_add_sas_phy(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_port *mr_sas_port,
+ struct mpi3mr_sas_phy *mr_sas_phy)
+{
+ u64 sas_address = mr_sas_port->remote_identify.sas_address;
+
+ dev_info(&mr_sas_phy->phy->dev,
+ "add: sas_address(0x%016llx), phy(%d)\n", (unsigned long long)
+ sas_address, mr_sas_phy->phy_id);
+
+ list_add_tail(&mr_sas_phy->port_siblings, &mr_sas_port->phy_list);
+ mr_sas_port->num_phys++;
+ mr_sas_port->phy_mask |= (1 << mr_sas_phy->phy_id);
+ if (mr_sas_phy->phy_id < mr_sas_port->lowest_phy)
+ mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1;
+ sas_port_add_phy(mr_sas_port->port, mr_sas_phy->phy);
+ mr_sas_phy->phy_belongs_to_port = 1;
+}
+
+/**
+ * mpi3mr_add_phy_to_an_existing_port - add phy to existing port
+ * @mrioc: Adapter instance reference
+ * @mr_sas_node: Internal sas node object (expander or host)
+ * @mr_sas_phy: Internal Phy object *
+ * @sas_address: SAS address of device/expander were phy needs
+ * to be added to
+ * @hba_port: HBA port entry
+ *
+ * Return: None.
+ */
+static void mpi3mr_add_phy_to_an_existing_port(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_node *mr_sas_node, struct mpi3mr_sas_phy *mr_sas_phy,
+ u64 sas_address, struct mpi3mr_hba_port *hba_port)
+{
+ struct mpi3mr_sas_port *mr_sas_port;
+ struct mpi3mr_sas_phy *srch_phy;
+
+ if (mr_sas_phy->phy_belongs_to_port == 1)
+ return;
+
+ if (!hba_port)
+ return;
+
+ list_for_each_entry(mr_sas_port, &mr_sas_node->sas_port_list,
+ port_list) {
+ if (mr_sas_port->remote_identify.sas_address !=
+ sas_address)
+ continue;
+ if (mr_sas_port->hba_port != hba_port)
+ continue;
+ list_for_each_entry(srch_phy, &mr_sas_port->phy_list,
+ port_siblings) {
+ if (srch_phy == mr_sas_phy)
+ return;
+ }
+ mpi3mr_add_sas_phy(mrioc, mr_sas_port, mr_sas_phy);
+ return;
+ }
+}
+
+/**
+ * mpi3mr_delete_sas_port - helper function to removing a port
+ * @mrioc: Adapter instance reference
+ * @mr_sas_port: Internal Port object
+ *
+ * Return: None.
+ */
+static void mpi3mr_delete_sas_port(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_port *mr_sas_port)
+{
+ u64 sas_address = mr_sas_port->remote_identify.sas_address;
+ struct mpi3mr_hba_port *hba_port = mr_sas_port->hba_port;
+ enum sas_device_type device_type =
+ mr_sas_port->remote_identify.device_type;
+
+ dev_info(&mr_sas_port->port->dev,
+ "remove: sas_address(0x%016llx)\n",
+ (unsigned long long) sas_address);
+
+ if (device_type == SAS_END_DEVICE)
+ mpi3mr_remove_device_by_sas_address(mrioc, sas_address,
+ hba_port);
+
+ else if (device_type == SAS_EDGE_EXPANDER_DEVICE ||
+ device_type == SAS_FANOUT_EXPANDER_DEVICE)
+ mpi3mr_expander_remove(mrioc, sas_address, hba_port);
+}
+
+/**
+ * mpi3mr_del_phy_from_an_existing_port - del phy from a port
+ * @mrioc: Adapter instance reference
+ * @mr_sas_node: Internal sas node object (expander or host)
+ * @mr_sas_phy: Internal Phy object
+ *
+ * Return: None.
+ */
+static void mpi3mr_del_phy_from_an_existing_port(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_node *mr_sas_node, struct mpi3mr_sas_phy *mr_sas_phy)
+{
+ struct mpi3mr_sas_port *mr_sas_port, *next;
+ struct mpi3mr_sas_phy *srch_phy;
+
+ if (mr_sas_phy->phy_belongs_to_port == 0)
+ return;
+
+ list_for_each_entry_safe(mr_sas_port, next, &mr_sas_node->sas_port_list,
+ port_list) {
+ list_for_each_entry(srch_phy, &mr_sas_port->phy_list,
+ port_siblings) {
+ if (srch_phy != mr_sas_phy)
+ continue;
+ if ((mr_sas_port->num_phys == 1) &&
+ !mrioc->reset_in_progress)
+ mpi3mr_delete_sas_port(mrioc, mr_sas_port);
+ else
+ mpi3mr_delete_sas_phy(mrioc, mr_sas_port,
+ mr_sas_phy);
+ return;
+ }
+ }
+}
+
+/**
+ * mpi3mr_sas_port_sanity_check - sanity check while adding port
+ * @mrioc: Adapter instance reference
+ * @mr_sas_node: Internal sas node object (expander or host)
+ * @sas_address: SAS address of device/expander
+ * @hba_port: HBA port entry
+ *
+ * Verifies whether the Phys attached to a device with the given
+ * SAS address already belongs to an existing sas port if so
+ * will remove those phys from the sas port
+ *
+ * Return: None.
+ */
+static void mpi3mr_sas_port_sanity_check(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_node *mr_sas_node, u64 sas_address,
+ struct mpi3mr_hba_port *hba_port)
+{
+ int i;
+
+ for (i = 0; i < mr_sas_node->num_phys; i++) {
+ if ((mr_sas_node->phy[i].remote_identify.sas_address !=
+ sas_address) || (mr_sas_node->phy[i].hba_port != hba_port))
+ continue;
+ if (mr_sas_node->phy[i].phy_belongs_to_port == 1)
+ mpi3mr_del_phy_from_an_existing_port(mrioc,
+ mr_sas_node, &mr_sas_node->phy[i]);
+ }
+}
+
+/**
+ * mpi3mr_set_identify - set identify for phys and end devices
+ * @mrioc: Adapter instance reference
+ * @handle: Firmware device handle
+ * @identify: SAS transport layer's identify info
+ *
+ * Populates sas identify info for a specific device.
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int mpi3mr_set_identify(struct mpi3mr_ioc *mrioc, u16 handle,
+ struct sas_identify *identify)
+{
+
+ struct mpi3_device_page0 device_pg0;
+ struct mpi3_device0_sas_sata_format *sasinf;
+ u16 device_info;
+ u16 ioc_status;
+
+ if (mrioc->reset_in_progress) {
+ ioc_err(mrioc, "%s: host reset in progress!\n", __func__);
+ return -EFAULT;
+ }
+
+ if ((mpi3mr_cfg_get_dev_pg0(mrioc, &ioc_status, &device_pg0,
+ sizeof(device_pg0), MPI3_DEVICE_PGAD_FORM_HANDLE, handle))) {
+ ioc_err(mrioc, "%s: device page0 read failed\n", __func__);
+ return -ENXIO;
+ }
+
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "device page read failed for handle(0x%04x), with ioc_status(0x%04x) failure at %s:%d/%s()!\n",
+ handle, ioc_status, __FILE__, __LINE__, __func__);
+ return -EIO;
+ }
+
+ memset(identify, 0, sizeof(struct sas_identify));
+ sasinf = &device_pg0.device_specific.sas_sata_format;
+ device_info = le16_to_cpu(sasinf->device_info);
+
+ /* sas_address */
+ identify->sas_address = le64_to_cpu(sasinf->sas_address);
+
+ /* phy number of the parent device this device is linked to */
+ identify->phy_identifier = sasinf->phy_num;
+
+ /* device_type */
+ switch (device_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) {
+ case MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_NO_DEVICE:
+ identify->device_type = SAS_PHY_UNUSED;
+ break;
+ case MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE:
+ identify->device_type = SAS_END_DEVICE;
+ break;
+ case MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_EXPANDER:
+ identify->device_type = SAS_EDGE_EXPANDER_DEVICE;
+ break;
+ }
+
+ /* initiator_port_protocols */
+ if (device_info & MPI3_SAS_DEVICE_INFO_SSP_INITIATOR)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_SSP;
+ /* MPI3.0 doesn't have define for SATA INIT so setting both here*/
+ if (device_info & MPI3_SAS_DEVICE_INFO_STP_INITIATOR)
+ identify->initiator_port_protocols |= (SAS_PROTOCOL_STP |
+ SAS_PROTOCOL_SATA);
+ if (device_info & MPI3_SAS_DEVICE_INFO_SMP_INITIATOR)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_SMP;
+
+ /* target_port_protocols */
+ if (device_info & MPI3_SAS_DEVICE_INFO_SSP_TARGET)
+ identify->target_port_protocols |= SAS_PROTOCOL_SSP;
+ /* MPI3.0 doesn't have define for STP Target so setting both here*/
+ if (device_info & MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET)
+ identify->target_port_protocols |= (SAS_PROTOCOL_STP |
+ SAS_PROTOCOL_SATA);
+ if (device_info & MPI3_SAS_DEVICE_INFO_SMP_TARGET)
+ identify->target_port_protocols |= SAS_PROTOCOL_SMP;
+ return 0;
+}
+
+/**
+ * mpi3mr_add_host_phy - report sas_host phy to SAS transport
+ * @mrioc: Adapter instance reference
+ * @mr_sas_phy: Internal Phy object
+ * @phy_pg0: SAS phy page 0
+ * @parent_dev: Prent device class object
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int mpi3mr_add_host_phy(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_phy *mr_sas_phy, struct mpi3_sas_phy_page0 phy_pg0,
+ struct device *parent_dev)
+{
+ struct sas_phy *phy;
+ int phy_index = mr_sas_phy->phy_id;
+
+
+ INIT_LIST_HEAD(&mr_sas_phy->port_siblings);
+ phy = sas_phy_alloc(parent_dev, phy_index);
+ if (!phy) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+ if ((mpi3mr_set_identify(mrioc, mr_sas_phy->handle,
+ &mr_sas_phy->identify))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ sas_phy_free(phy);
+ return -1;
+ }
+ phy->identify = mr_sas_phy->identify;
+ mr_sas_phy->attached_handle = le16_to_cpu(phy_pg0.attached_dev_handle);
+ if (mr_sas_phy->attached_handle)
+ mpi3mr_set_identify(mrioc, mr_sas_phy->attached_handle,
+ &mr_sas_phy->remote_identify);
+ phy->identify.phy_identifier = mr_sas_phy->phy_id;
+ phy->negotiated_linkrate = mpi3mr_convert_phy_link_rate(
+ (phy_pg0.negotiated_link_rate &
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK) >>
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT);
+ phy->minimum_linkrate_hw = mpi3mr_convert_phy_link_rate(
+ phy_pg0.hw_link_rate & MPI3_SAS_HWRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate_hw = mpi3mr_convert_phy_link_rate(
+ phy_pg0.hw_link_rate >> 4);
+ phy->minimum_linkrate = mpi3mr_convert_phy_link_rate(
+ phy_pg0.programmed_link_rate & MPI3_SAS_PRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate = mpi3mr_convert_phy_link_rate(
+ phy_pg0.programmed_link_rate >> 4);
+ phy->hostdata = mr_sas_phy->hba_port;
+
+ if ((sas_phy_add(phy))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ sas_phy_free(phy);
+ return -1;
+ }
+ if ((mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO))
+ dev_info(&phy->dev,
+ "add: handle(0x%04x), sas_address(0x%016llx)\n"
+ "\tattached_handle(0x%04x), sas_address(0x%016llx)\n",
+ mr_sas_phy->handle, (unsigned long long)
+ mr_sas_phy->identify.sas_address,
+ mr_sas_phy->attached_handle,
+ (unsigned long long)
+ mr_sas_phy->remote_identify.sas_address);
+ mr_sas_phy->phy = phy;
+ return 0;
+}
+
+/**
+ * mpi3mr_add_expander_phy - report expander phy to transport
+ * @mrioc: Adapter instance reference
+ * @mr_sas_phy: Internal Phy object
+ * @expander_pg1: SAS Expander page 1
+ * @parent_dev: Parent device class object
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int mpi3mr_add_expander_phy(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_phy *mr_sas_phy,
+ struct mpi3_sas_expander_page1 expander_pg1,
+ struct device *parent_dev)
+{
+ struct sas_phy *phy;
+ int phy_index = mr_sas_phy->phy_id;
+
+ INIT_LIST_HEAD(&mr_sas_phy->port_siblings);
+ phy = sas_phy_alloc(parent_dev, phy_index);
+ if (!phy) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+ if ((mpi3mr_set_identify(mrioc, mr_sas_phy->handle,
+ &mr_sas_phy->identify))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ sas_phy_free(phy);
+ return -1;
+ }
+ phy->identify = mr_sas_phy->identify;
+ mr_sas_phy->attached_handle =
+ le16_to_cpu(expander_pg1.attached_dev_handle);
+ if (mr_sas_phy->attached_handle)
+ mpi3mr_set_identify(mrioc, mr_sas_phy->attached_handle,
+ &mr_sas_phy->remote_identify);
+ phy->identify.phy_identifier = mr_sas_phy->phy_id;
+ phy->negotiated_linkrate = mpi3mr_convert_phy_link_rate(
+ (expander_pg1.negotiated_link_rate &
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK) >>
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT);
+ phy->minimum_linkrate_hw = mpi3mr_convert_phy_link_rate(
+ expander_pg1.hw_link_rate & MPI3_SAS_HWRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate_hw = mpi3mr_convert_phy_link_rate(
+ expander_pg1.hw_link_rate >> 4);
+ phy->minimum_linkrate = mpi3mr_convert_phy_link_rate(
+ expander_pg1.programmed_link_rate & MPI3_SAS_PRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate = mpi3mr_convert_phy_link_rate(
+ expander_pg1.programmed_link_rate >> 4);
+ phy->hostdata = mr_sas_phy->hba_port;
+
+ if ((sas_phy_add(phy))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ sas_phy_free(phy);
+ return -1;
+ }
+ if ((mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO))
+ dev_info(&phy->dev,
+ "add: handle(0x%04x), sas_address(0x%016llx)\n"
+ "\tattached_handle(0x%04x), sas_address(0x%016llx)\n",
+ mr_sas_phy->handle, (unsigned long long)
+ mr_sas_phy->identify.sas_address,
+ mr_sas_phy->attached_handle,
+ (unsigned long long)
+ mr_sas_phy->remote_identify.sas_address);
+ mr_sas_phy->phy = phy;
+ return 0;
+}
+
+/**
+ * mpi3mr_alloc_hba_port - alloc hba port object
+ * @mrioc: Adapter instance reference
+ * @port_id: Port number
+ *
+ * Alloc memory for hba port object.
+ */
+static struct mpi3mr_hba_port *
+mpi3mr_alloc_hba_port(struct mpi3mr_ioc *mrioc, u16 port_id)
+{
+ struct mpi3mr_hba_port *hba_port;
+
+ hba_port = kzalloc(sizeof(struct mpi3mr_hba_port),
+ GFP_KERNEL);
+ if (!hba_port)
+ return NULL;
+ hba_port->port_id = port_id;
+ ioc_info(mrioc, "hba_port entry: %p, port: %d is added to hba_port list\n",
+ hba_port, hba_port->port_id);
+ list_add_tail(&hba_port->list, &mrioc->hba_port_table_list);
+ return hba_port;
+}
+
+/**
+ * mpi3mr_get_hba_port_by_id - find hba port by id
+ * @mrioc: Adapter instance reference
+ * @port_id - Port ID to search
+ *
+ * Return: mpi3mr_hba_port reference for the matched port
+ */
+
+struct mpi3mr_hba_port *mpi3mr_get_hba_port_by_id(struct mpi3mr_ioc *mrioc,
+ u8 port_id)
+{
+ struct mpi3mr_hba_port *port, *port_next;
+
+ list_for_each_entry_safe(port, port_next,
+ &mrioc->hba_port_table_list, list) {
+ if (port->port_id != port_id)
+ continue;
+ if (port->flags & MPI3MR_HBA_PORT_FLAG_DIRTY)
+ continue;
+ return port;
+ }
+
+ return NULL;
+}
+
+/**
+ * mpi3mr_update_links - refreshing SAS phy link changes
+ * @mrioc: Adapter instance reference
+ * @sas_address_parent: SAS address of parent expander or host
+ * @handle: Firmware device handle of attached device
+ * @phy_number: Phy number
+ * @link_rate: New link rate
+ * @hba_port: HBA port entry
+ *
+ * Return: None.
+ */
+void mpi3mr_update_links(struct mpi3mr_ioc *mrioc,
+ u64 sas_address_parent, u16 handle, u8 phy_number, u8 link_rate,
+ struct mpi3mr_hba_port *hba_port)
+{
+ unsigned long flags;
+ struct mpi3mr_sas_node *mr_sas_node;
+ struct mpi3mr_sas_phy *mr_sas_phy;
+
+ if (mrioc->reset_in_progress)
+ return;
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ mr_sas_node = __mpi3mr_sas_node_find_by_sas_address(mrioc,
+ sas_address_parent, hba_port);
+ if (!mr_sas_node) {
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ return;
+ }
+
+ mr_sas_phy = &mr_sas_node->phy[phy_number];
+ mr_sas_phy->attached_handle = handle;
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ if (handle && (link_rate >= MPI3_SAS_NEG_LINK_RATE_1_5)) {
+ mpi3mr_set_identify(mrioc, handle,
+ &mr_sas_phy->remote_identify);
+ mpi3mr_add_phy_to_an_existing_port(mrioc, mr_sas_node,
+ mr_sas_phy, mr_sas_phy->remote_identify.sas_address,
+ hba_port);
+ } else
+ memset(&mr_sas_phy->remote_identify, 0, sizeof(struct
+ sas_identify));
+
+ if (mr_sas_phy->phy)
+ mr_sas_phy->phy->negotiated_linkrate =
+ mpi3mr_convert_phy_link_rate(link_rate);
+
+ if ((mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO))
+ dev_info(&mr_sas_phy->phy->dev,
+ "refresh: parent sas_address(0x%016llx),\n"
+ "\tlink_rate(0x%02x), phy(%d)\n"
+ "\tattached_handle(0x%04x), sas_address(0x%016llx)\n",
+ (unsigned long long)sas_address_parent,
+ link_rate, phy_number, handle, (unsigned long long)
+ mr_sas_phy->remote_identify.sas_address);
+}
+
+/**
+ * mpi3mr_sas_host_refresh - refreshing sas host object contents
+ * @mrioc: Adapter instance reference
+ *
+ * This function refreshes the controllers phy information and
+ * updates the SAS transport layer with updated information,
+ * this is executed for each device addition or device info
+ * change events
+ *
+ * Return: None.
+ */
+void mpi3mr_sas_host_refresh(struct mpi3mr_ioc *mrioc)
+{
+ int i;
+ u8 link_rate;
+ u16 sz, port_id, attached_handle;
+ struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0 = NULL;
+
+ dprint_transport_info(mrioc,
+ "updating handles for sas_host(0x%016llx)\n",
+ (unsigned long long)mrioc->sas_hba.sas_address);
+
+ sz = offsetof(struct mpi3_sas_io_unit_page0, phy_data) +
+ (mrioc->sas_hba.num_phys *
+ sizeof(struct mpi3_sas_io_unit0_phy_data));
+ sas_io_unit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_io_unit_pg0)
+ return;
+ if (mpi3mr_cfg_get_sas_io_unit_pg0(mrioc, sas_io_unit_pg0, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ mrioc->sas_hba.handle = 0;
+ for (i = 0; i < mrioc->sas_hba.num_phys; i++) {
+ if (sas_io_unit_pg0->phy_data[i].phy_flags &
+ (MPI3_SASIOUNIT0_PHYFLAGS_HOST_PHY |
+ MPI3_SASIOUNIT0_PHYFLAGS_VIRTUAL_PHY))
+ continue;
+ link_rate =
+ sas_io_unit_pg0->phy_data[i].negotiated_link_rate >> 4;
+ if (!mrioc->sas_hba.handle)
+ mrioc->sas_hba.handle = le16_to_cpu(
+ sas_io_unit_pg0->phy_data[i].controller_dev_handle);
+ port_id = sas_io_unit_pg0->phy_data[i].io_unit_port;
+ if (!(mpi3mr_get_hba_port_by_id(mrioc, port_id)))
+ if (!mpi3mr_alloc_hba_port(mrioc, port_id))
+ goto out;
+
+ mrioc->sas_hba.phy[i].handle = mrioc->sas_hba.handle;
+ attached_handle = le16_to_cpu(
+ sas_io_unit_pg0->phy_data[i].attached_dev_handle);
+ if (attached_handle && link_rate < MPI3_SAS_NEG_LINK_RATE_1_5)
+ link_rate = MPI3_SAS_NEG_LINK_RATE_1_5;
+ mrioc->sas_hba.phy[i].hba_port =
+ mpi3mr_get_hba_port_by_id(mrioc, port_id);
+ mpi3mr_update_links(mrioc, mrioc->sas_hba.sas_address,
+ attached_handle, i, link_rate,
+ mrioc->sas_hba.phy[i].hba_port);
+ }
+ out:
+ kfree(sas_io_unit_pg0);
+}
+
+/**
+ * mpi3mr_sas_host_add - create sas host object
+ * @mrioc: Adapter instance reference
+ *
+ * This function creates the controllers phy information and
+ * updates the SAS transport layer with updated information,
+ * this is executed for first device addition or device info
+ * change event.
+ *
+ * Return: None.
+ */
+void mpi3mr_sas_host_add(struct mpi3mr_ioc *mrioc)
+{
+ int i;
+ u16 sz, num_phys = 1, port_id, ioc_status;
+ struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0 = NULL;
+ struct mpi3_sas_phy_page0 phy_pg0;
+ struct mpi3_device_page0 dev_pg0;
+ struct mpi3_enclosure_page0 encl_pg0;
+ struct mpi3_device0_sas_sata_format *sasinf;
+
+ sz = offsetof(struct mpi3_sas_io_unit_page0, phy_data) +
+ (num_phys * sizeof(struct mpi3_sas_io_unit0_phy_data));
+ sas_io_unit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_io_unit_pg0)
+ return;
+
+ if (mpi3mr_cfg_get_sas_io_unit_pg0(mrioc, sas_io_unit_pg0, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ num_phys = sas_io_unit_pg0->num_phys;
+ kfree(sas_io_unit_pg0);
+
+ mrioc->sas_hba.host_node = 1;
+ INIT_LIST_HEAD(&mrioc->sas_hba.sas_port_list);
+ mrioc->sas_hba.parent_dev = &mrioc->shost->shost_gendev;
+ mrioc->sas_hba.phy = kcalloc(num_phys,
+ sizeof(struct mpi3mr_sas_phy), GFP_KERNEL);
+ if (!mrioc->sas_hba.phy)
+ return;
+
+ mrioc->sas_hba.num_phys = num_phys;
+
+ sz = offsetof(struct mpi3_sas_io_unit_page0, phy_data) +
+ (num_phys * sizeof(struct mpi3_sas_io_unit0_phy_data));
+ sas_io_unit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_io_unit_pg0)
+ return;
+
+ if (mpi3mr_cfg_get_sas_io_unit_pg0(mrioc, sas_io_unit_pg0, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ mrioc->sas_hba.handle = 0;
+ for (i = 0; i < mrioc->sas_hba.num_phys; i++) {
+ if (sas_io_unit_pg0->phy_data[i].phy_flags &
+ (MPI3_SASIOUNIT0_PHYFLAGS_HOST_PHY |
+ MPI3_SASIOUNIT0_PHYFLAGS_VIRTUAL_PHY))
+ continue;
+ if (mpi3mr_cfg_get_sas_phy_pg0(mrioc, &ioc_status, &phy_pg0,
+ sizeof(struct mpi3_sas_phy_page0),
+ MPI3_SAS_PHY_PGAD_FORM_PHY_NUMBER, i)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ if (!mrioc->sas_hba.handle)
+ mrioc->sas_hba.handle = le16_to_cpu(
+ sas_io_unit_pg0->phy_data[i].controller_dev_handle);
+ port_id = sas_io_unit_pg0->phy_data[i].io_unit_port;
+
+ if (!(mpi3mr_get_hba_port_by_id(mrioc, port_id)))
+ if (!mpi3mr_alloc_hba_port(mrioc, port_id))
+ goto out;
+
+ mrioc->sas_hba.phy[i].handle = mrioc->sas_hba.handle;
+ mrioc->sas_hba.phy[i].phy_id = i;
+ mrioc->sas_hba.phy[i].hba_port =
+ mpi3mr_get_hba_port_by_id(mrioc, port_id);
+ mpi3mr_add_host_phy(mrioc, &mrioc->sas_hba.phy[i],
+ phy_pg0, mrioc->sas_hba.parent_dev);
+ }
+ if ((mpi3mr_cfg_get_dev_pg0(mrioc, &ioc_status, &dev_pg0,
+ sizeof(dev_pg0), MPI3_DEVICE_PGAD_FORM_HANDLE,
+ mrioc->sas_hba.handle))) {
+ ioc_err(mrioc, "%s: device page0 read failed\n", __func__);
+ goto out;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "device page read failed for handle(0x%04x), with ioc_status(0x%04x) failure at %s:%d/%s()!\n",
+ mrioc->sas_hba.handle, ioc_status, __FILE__, __LINE__,
+ __func__);
+ goto out;
+ }
+ mrioc->sas_hba.enclosure_handle =
+ le16_to_cpu(dev_pg0.enclosure_handle);
+ sasinf = &dev_pg0.device_specific.sas_sata_format;
+ mrioc->sas_hba.sas_address =
+ le64_to_cpu(sasinf->sas_address);
+ ioc_info(mrioc,
+ "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
+ mrioc->sas_hba.handle,
+ (unsigned long long) mrioc->sas_hba.sas_address,
+ mrioc->sas_hba.num_phys);
+
+ if (mrioc->sas_hba.enclosure_handle) {
+ if (!(mpi3mr_cfg_get_enclosure_pg0(mrioc, &ioc_status,
+ &encl_pg0, sizeof(encl_pg0),
+ MPI3_ENCLOS_PGAD_FORM_HANDLE,
+ mrioc->sas_hba.enclosure_handle)) &&
+ (ioc_status == MPI3_IOCSTATUS_SUCCESS))
+ mrioc->sas_hba.enclosure_logical_id =
+ le64_to_cpu(encl_pg0.enclosure_logical_id);
+ }
+
+out:
+ kfree(sas_io_unit_pg0);
+}
+
+/**
+ * mpi3mr_sas_port_add - Expose the SAS device to the SAS TL
+ * @mrioc: Adapter instance reference
+ * @handle: Firmware device handle of the attached device
+ * @sas_address_parent: sas address of parent expander or host
+ * @hba_port: HBA port entry
+ *
+ * This function creates a new sas port object for the given end
+ * device matching sas address and hba_port and adds it to the
+ * sas_node's sas_port_list and expose the attached sas device
+ * to the SAS transport layer through sas_rphy_add.
+ *
+ * Returns a valid mpi3mr_sas_port reference or NULL.
+ */
+static struct mpi3mr_sas_port *mpi3mr_sas_port_add(struct mpi3mr_ioc *mrioc,
+ u16 handle, u64 sas_address_parent, struct mpi3mr_hba_port *hba_port)
+{
+ struct mpi3mr_sas_phy *mr_sas_phy, *next;
+ struct mpi3mr_sas_port *mr_sas_port;
+ unsigned long flags;
+ struct mpi3mr_sas_node *mr_sas_node;
+ struct sas_rphy *rphy;
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ int i;
+ struct sas_port *port;
+
+ if (!hba_port) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return NULL;
+ }
+
+ mr_sas_port = kzalloc(sizeof(struct mpi3mr_sas_port), GFP_KERNEL);
+ if (!mr_sas_port)
+ return NULL;
+
+ INIT_LIST_HEAD(&mr_sas_port->port_list);
+ INIT_LIST_HEAD(&mr_sas_port->phy_list);
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ mr_sas_node = __mpi3mr_sas_node_find_by_sas_address(mrioc,
+ sas_address_parent, hba_port);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+ if (!mr_sas_node) {
+ ioc_err(mrioc, "%s:could not find parent sas_address(0x%016llx)!\n",
+ __func__, (unsigned long long)sas_address_parent);
+ goto out_fail;
+ }
+
+ if ((mpi3mr_set_identify(mrioc, handle,
+ &mr_sas_port->remote_identify))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ if (mr_sas_port->remote_identify.device_type == SAS_PHY_UNUSED) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ mr_sas_port->hba_port = hba_port;
+ mpi3mr_sas_port_sanity_check(mrioc, mr_sas_node,
+ mr_sas_port->remote_identify.sas_address, hba_port);
+
+ for (i = 0; i < mr_sas_node->num_phys; i++) {
+ if ((mr_sas_node->phy[i].remote_identify.sas_address !=
+ mr_sas_port->remote_identify.sas_address) ||
+ (mr_sas_node->phy[i].hba_port != hba_port))
+ continue;
+ list_add_tail(&mr_sas_node->phy[i].port_siblings,
+ &mr_sas_port->phy_list);
+ mr_sas_port->num_phys++;
+ mr_sas_port->phy_mask |= (1 << i);
+ }
+
+ if (!mr_sas_port->num_phys) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1;
+
+ if (mr_sas_port->remote_identify.device_type == SAS_END_DEVICE) {
+ tgtdev = mpi3mr_get_tgtdev_by_addr(mrioc,
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_port->hba_port);
+
+ if (!tgtdev) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+ tgtdev->dev_spec.sas_sata_inf.pend_sas_rphy_add = 1;
+ }
+
+ if (!mr_sas_node->parent_dev) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ port = sas_port_alloc_num(mr_sas_node->parent_dev);
+ if ((sas_port_add(port))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ list_for_each_entry(mr_sas_phy, &mr_sas_port->phy_list,
+ port_siblings) {
+ if ((mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO))
+ dev_info(&port->dev,
+ "add: handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
+ handle, (unsigned long long)
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_phy->phy_id);
+ sas_port_add_phy(port, mr_sas_phy->phy);
+ mr_sas_phy->phy_belongs_to_port = 1;
+ mr_sas_phy->hba_port = hba_port;
+ }
+
+ mr_sas_port->port = port;
+ if (mr_sas_port->remote_identify.device_type == SAS_END_DEVICE) {
+ rphy = sas_end_device_alloc(port);
+ tgtdev->dev_spec.sas_sata_inf.rphy = rphy;
+ } else {
+ rphy = sas_expander_alloc(port,
+ mr_sas_port->remote_identify.device_type);
+ }
+ rphy->identify = mr_sas_port->remote_identify;
+
+ if (mrioc->current_event)
+ mrioc->current_event->pending_at_sml = 1;
+
+ if ((sas_rphy_add(rphy))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ }
+ if (mr_sas_port->remote_identify.device_type == SAS_END_DEVICE) {
+ tgtdev->dev_spec.sas_sata_inf.pend_sas_rphy_add = 0;
+ tgtdev->dev_spec.sas_sata_inf.sas_transport_attached = 1;
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+
+ dev_info(&rphy->dev,
+ "%s: added: handle(0x%04x), sas_address(0x%016llx)\n",
+ __func__, handle, (unsigned long long)
+ mr_sas_port->remote_identify.sas_address);
+
+ mr_sas_port->rphy = rphy;
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ list_add_tail(&mr_sas_port->port_list, &mr_sas_node->sas_port_list);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+ if (mrioc->current_event) {
+ mrioc->current_event->pending_at_sml = 0;
+ if (mrioc->current_event->discard)
+ mpi3mr_print_device_event_notice(mrioc, true);
+ }
+
+ /* fill in report manufacture */
+ if (mr_sas_port->remote_identify.device_type ==
+ SAS_EDGE_EXPANDER_DEVICE ||
+ mr_sas_port->remote_identify.device_type ==
+ SAS_FANOUT_EXPANDER_DEVICE)
+ mpi3mr_report_manufacture(mrioc,
+ mr_sas_port->remote_identify.sas_address,
+ rphy_to_expander_device(rphy), hba_port->port_id);
+
+ return mr_sas_port;
+
+ out_fail:
+ list_for_each_entry_safe(mr_sas_phy, next, &mr_sas_port->phy_list,
+ port_siblings)
+ list_del(&mr_sas_phy->port_siblings);
+ kfree(mr_sas_port);
+ return NULL;
+}
+
+/**
+ * mpi3mr_sas_port_remove - remove port from the list
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of attached device
+ * @sas_address_parent: SAS address of parent expander or host
+ * @hba_port: HBA port entry
+ *
+ * Removing object and freeing associated memory from the
+ * sas_port_list.
+ *
+ * Return: None
+ */
+static void mpi3mr_sas_port_remove(struct mpi3mr_ioc *mrioc, u64 sas_address,
+ u64 sas_address_parent, struct mpi3mr_hba_port *hba_port)
+{
+ int i;
+ unsigned long flags;
+ struct mpi3mr_sas_port *mr_sas_port, *next;
+ struct mpi3mr_sas_node *mr_sas_node;
+ u8 found = 0;
+ struct mpi3mr_sas_phy *mr_sas_phy, *next_phy;
+ struct mpi3mr_hba_port *srch_port, *hba_port_next = NULL;
+
+ if (!hba_port)
+ return;
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ mr_sas_node = __mpi3mr_sas_node_find_by_sas_address(mrioc,
+ sas_address_parent, hba_port);
+ if (!mr_sas_node) {
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ return;
+ }
+ list_for_each_entry_safe(mr_sas_port, next, &mr_sas_node->sas_port_list,
+ port_list) {
+ if (mr_sas_port->remote_identify.sas_address != sas_address)
+ continue;
+ if (mr_sas_port->hba_port != hba_port)
+ continue;
+ found = 1;
+ list_del(&mr_sas_port->port_list);
+ goto out;
+ }
+
+ out:
+ if (!found) {
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ return;
+ }
+
+ if (mr_sas_node->host_node) {
+ list_for_each_entry_safe(srch_port, hba_port_next,
+ &mrioc->hba_port_table_list, list) {
+ if (srch_port != hba_port)
+ continue;
+ ioc_info(mrioc,
+ "removing hba_port entry: %p port: %d from hba_port list\n",
+ srch_port, srch_port->port_id);
+ list_del(&hba_port->list);
+ kfree(hba_port);
+ break;
+ }
+ }
+
+ for (i = 0; i < mr_sas_node->num_phys; i++) {
+ if (mr_sas_node->phy[i].remote_identify.sas_address ==
+ sas_address)
+ memset(&mr_sas_node->phy[i].remote_identify, 0,
+ sizeof(struct sas_identify));
+ }
+
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+ if (mrioc->current_event)
+ mrioc->current_event->pending_at_sml = 1;
+
+ list_for_each_entry_safe(mr_sas_phy, next_phy,
+ &mr_sas_port->phy_list, port_siblings) {
+ if ((!mrioc->stop_drv_processing) &&
+ (mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO))
+ dev_info(&mr_sas_port->port->dev,
+ "remove: sas_address(0x%016llx), phy(%d)\n",
+ (unsigned long long)
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_phy->phy_id);
+ mr_sas_phy->phy_belongs_to_port = 0;
+ if (!mrioc->stop_drv_processing)
+ sas_port_delete_phy(mr_sas_port->port,
+ mr_sas_phy->phy);
+ list_del(&mr_sas_phy->port_siblings);
+ }
+ if (!mrioc->stop_drv_processing)
+ sas_port_delete(mr_sas_port->port);
+ ioc_info(mrioc, "%s: removed sas_address(0x%016llx)\n",
+ __func__, (unsigned long long)sas_address);
+
+ if (mrioc->current_event) {
+ mrioc->current_event->pending_at_sml = 0;
+ if (mrioc->current_event->discard)
+ mpi3mr_print_device_event_notice(mrioc, false);
+ }
+
+ kfree(mr_sas_port);
+}
+
+/**
+ * struct host_port - host port details
+ * @sas_address: SAS Address of the attached device
+ * @phy_mask: phy mask of host port
+ * @handle: Device Handle of attached device
+ * @iounit_port_id: port ID
+ * @used: host port is already matched with sas port from sas_port_list
+ * @lowest_phy: lowest phy ID of host port
+ */
+struct host_port {
+ u64 sas_address;
+ u32 phy_mask;
+ u16 handle;
+ u8 iounit_port_id;
+ u8 used;
+ u8 lowest_phy;
+};
+
+/**
+ * mpi3mr_update_mr_sas_port - update sas port objects during reset
+ * @mrioc: Adapter instance reference
+ * @h_port: host_port object
+ * @mr_sas_port: sas_port objects which needs to be updated
+ *
+ * Update the port ID of sas port object. Also add the phys if new phys got
+ * added to current sas port and remove the phys if some phys are moved
+ * out of the current sas port.
+ *
+ * Return: Nothing.
+ */
+static void
+mpi3mr_update_mr_sas_port(struct mpi3mr_ioc *mrioc, struct host_port *h_port,
+ struct mpi3mr_sas_port *mr_sas_port)
+{
+ struct mpi3mr_sas_phy *mr_sas_phy;
+ u32 phy_mask_xor;
+ u64 phys_to_be_added, phys_to_be_removed;
+ int i;
+
+ h_port->used = 1;
+ mr_sas_port->marked_responding = 1;
+
+ dev_info(&mr_sas_port->port->dev,
+ "sas_address(0x%016llx), old: port_id %d phy_mask 0x%x, new: port_id %d phy_mask:0x%x\n",
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_port->hba_port->port_id, mr_sas_port->phy_mask,
+ h_port->iounit_port_id, h_port->phy_mask);
+
+ mr_sas_port->hba_port->port_id = h_port->iounit_port_id;
+ mr_sas_port->hba_port->flags &= ~MPI3MR_HBA_PORT_FLAG_DIRTY;
+
+ /* Get the newly added phys bit map & removed phys bit map */
+ phy_mask_xor = mr_sas_port->phy_mask ^ h_port->phy_mask;
+ phys_to_be_added = h_port->phy_mask & phy_mask_xor;
+ phys_to_be_removed = mr_sas_port->phy_mask & phy_mask_xor;
+
+ /*
+ * Register these new phys to current mr_sas_port's port.
+ * if these phys are previously registered with another port
+ * then delete these phys from that port first.
+ */
+ for_each_set_bit(i, (ulong *) &phys_to_be_added, BITS_PER_TYPE(u32)) {
+ mr_sas_phy = &mrioc->sas_hba.phy[i];
+ if (mr_sas_phy->phy_belongs_to_port)
+ mpi3mr_del_phy_from_an_existing_port(mrioc,
+ &mrioc->sas_hba, mr_sas_phy);
+ mpi3mr_add_phy_to_an_existing_port(mrioc,
+ &mrioc->sas_hba, mr_sas_phy,
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_port->hba_port);
+ }
+
+ /* Delete the phys which are not part of current mr_sas_port's port. */
+ for_each_set_bit(i, (ulong *) &phys_to_be_removed, BITS_PER_TYPE(u32)) {
+ mr_sas_phy = &mrioc->sas_hba.phy[i];
+ if (mr_sas_phy->phy_belongs_to_port)
+ mpi3mr_del_phy_from_an_existing_port(mrioc,
+ &mrioc->sas_hba, mr_sas_phy);
+ }
+}
+
+/**
+ * mpi3mr_refresh_sas_ports - update host's sas ports during reset
+ * @mrioc: Adapter instance reference
+ *
+ * Update the host's sas ports during reset by checking whether
+ * sas ports are still intact or not. Add/remove phys if any hba
+ * phys are (moved in)/(moved out) of sas port. Also update
+ * io_unit_port if it got changed during reset.
+ *
+ * Return: Nothing.
+ */
+void
+mpi3mr_refresh_sas_ports(struct mpi3mr_ioc *mrioc)
+{
+ struct host_port h_port[32];
+ int i, j, found, host_port_count = 0, port_idx;
+ u16 sz, attached_handle, ioc_status;
+ struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0 = NULL;
+ struct mpi3_device_page0 dev_pg0;
+ struct mpi3_device0_sas_sata_format *sasinf;
+ struct mpi3mr_sas_port *mr_sas_port;
+
+ sz = offsetof(struct mpi3_sas_io_unit_page0, phy_data) +
+ (mrioc->sas_hba.num_phys *
+ sizeof(struct mpi3_sas_io_unit0_phy_data));
+ sas_io_unit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_io_unit_pg0)
+ return;
+ if (mpi3mr_cfg_get_sas_io_unit_pg0(mrioc, sas_io_unit_pg0, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ /* Create a new expander port table */
+ for (i = 0; i < mrioc->sas_hba.num_phys; i++) {
+ attached_handle = le16_to_cpu(
+ sas_io_unit_pg0->phy_data[i].attached_dev_handle);
+ if (!attached_handle)
+ continue;
+ found = 0;
+ for (j = 0; j < host_port_count; j++) {
+ if (h_port[j].handle == attached_handle) {
+ h_port[j].phy_mask |= (1 << i);
+ found = 1;
+ break;
+ }
+ }
+ if (found)
+ continue;
+ if ((mpi3mr_cfg_get_dev_pg0(mrioc, &ioc_status, &dev_pg0,
+ sizeof(dev_pg0), MPI3_DEVICE_PGAD_FORM_HANDLE,
+ attached_handle))) {
+ dprint_reset(mrioc,
+ "failed to read dev_pg0 for handle(0x%04x) at %s:%d/%s()!\n",
+ attached_handle, __FILE__, __LINE__, __func__);
+ continue;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ dprint_reset(mrioc,
+ "ioc_status(0x%x) while reading dev_pg0 for handle(0x%04x) at %s:%d/%s()!\n",
+ ioc_status, attached_handle,
+ __FILE__, __LINE__, __func__);
+ continue;
+ }
+ sasinf = &dev_pg0.device_specific.sas_sata_format;
+
+ port_idx = host_port_count;
+ h_port[port_idx].sas_address = le64_to_cpu(sasinf->sas_address);
+ h_port[port_idx].handle = attached_handle;
+ h_port[port_idx].phy_mask = (1 << i);
+ h_port[port_idx].iounit_port_id = sas_io_unit_pg0->phy_data[i].io_unit_port;
+ h_port[port_idx].lowest_phy = sasinf->phy_num;
+ h_port[port_idx].used = 0;
+ host_port_count++;
+ }
+
+ if (!host_port_count)
+ goto out;
+
+ if (mrioc->logging_level & MPI3_DEBUG_RESET) {
+ ioc_info(mrioc, "Host port details before reset\n");
+ list_for_each_entry(mr_sas_port, &mrioc->sas_hba.sas_port_list,
+ port_list) {
+ ioc_info(mrioc,
+ "port_id:%d, sas_address:(0x%016llx), phy_mask:(0x%x), lowest phy id:%d\n",
+ mr_sas_port->hba_port->port_id,
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_port->phy_mask, mr_sas_port->lowest_phy);
+ }
+ mr_sas_port = NULL;
+ ioc_info(mrioc, "Host port details after reset\n");
+ for (i = 0; i < host_port_count; i++) {
+ ioc_info(mrioc,
+ "port_id:%d, sas_address:(0x%016llx), phy_mask:(0x%x), lowest phy id:%d\n",
+ h_port[i].iounit_port_id, h_port[i].sas_address,
+ h_port[i].phy_mask, h_port[i].lowest_phy);
+ }
+ }
+
+ /* mark all host sas port entries as dirty */
+ list_for_each_entry(mr_sas_port, &mrioc->sas_hba.sas_port_list,
+ port_list) {
+ mr_sas_port->marked_responding = 0;
+ mr_sas_port->hba_port->flags |= MPI3MR_HBA_PORT_FLAG_DIRTY;
+ }
+
+ /* First check for matching lowest phy */
+ for (i = 0; i < host_port_count; i++) {
+ mr_sas_port = NULL;
+ list_for_each_entry(mr_sas_port, &mrioc->sas_hba.sas_port_list,
+ port_list) {
+ if (mr_sas_port->marked_responding)
+ continue;
+ if (h_port[i].sas_address != mr_sas_port->remote_identify.sas_address)
+ continue;
+ if (h_port[i].lowest_phy == mr_sas_port->lowest_phy) {
+ mpi3mr_update_mr_sas_port(mrioc, &h_port[i], mr_sas_port);
+ break;
+ }
+ }
+ }
+
+ /* In case if lowest phy is got enabled or disabled during reset */
+ for (i = 0; i < host_port_count; i++) {
+ if (h_port[i].used)
+ continue;
+ mr_sas_port = NULL;
+ list_for_each_entry(mr_sas_port, &mrioc->sas_hba.sas_port_list,
+ port_list) {
+ if (mr_sas_port->marked_responding)
+ continue;
+ if (h_port[i].sas_address != mr_sas_port->remote_identify.sas_address)
+ continue;
+ if (h_port[i].phy_mask & mr_sas_port->phy_mask) {
+ mpi3mr_update_mr_sas_port(mrioc, &h_port[i], mr_sas_port);
+ break;
+ }
+ }
+ }
+
+ /* In case if expander cable is removed & connected to another HBA port during reset */
+ for (i = 0; i < host_port_count; i++) {
+ if (h_port[i].used)
+ continue;
+ mr_sas_port = NULL;
+ list_for_each_entry(mr_sas_port, &mrioc->sas_hba.sas_port_list,
+ port_list) {
+ if (mr_sas_port->marked_responding)
+ continue;
+ if (h_port[i].sas_address != mr_sas_port->remote_identify.sas_address)
+ continue;
+ mpi3mr_update_mr_sas_port(mrioc, &h_port[i], mr_sas_port);
+ break;
+ }
+ }
+out:
+ kfree(sas_io_unit_pg0);
+}
+
+/**
+ * mpi3mr_refresh_expanders - Refresh expander device exposure
+ * @mrioc: Adapter instance reference
+ *
+ * This is executed post controller reset to identify any
+ * missing expander devices during reset and remove from the upper layers
+ * or expose any newly detected expander device to the upper layers.
+ *
+ * Return: Nothing.
+ */
+void
+mpi3mr_refresh_expanders(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3mr_sas_node *sas_expander, *sas_expander_next;
+ struct mpi3_sas_expander_page0 expander_pg0;
+ u16 ioc_status, handle;
+ u64 sas_address;
+ int i;
+ unsigned long flags;
+ struct mpi3mr_hba_port *hba_port;
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ list_for_each_entry(sas_expander, &mrioc->sas_expander_list, list) {
+ sas_expander->non_responding = 1;
+ }
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+ sas_expander = NULL;
+
+ handle = 0xffff;
+
+ /* Search for responding expander devices and add them if they are newly got added */
+ while (true) {
+ if ((mpi3mr_cfg_get_sas_exp_pg0(mrioc, &ioc_status, &expander_pg0,
+ sizeof(struct mpi3_sas_expander_page0),
+ MPI3_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
+ dprint_reset(mrioc,
+ "failed to read exp pg0 for handle(0x%04x) at %s:%d/%s()!\n",
+ handle, __FILE__, __LINE__, __func__);
+ break;
+ }
+
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ dprint_reset(mrioc,
+ "ioc_status(0x%x) while reading exp pg0 for handle:(0x%04x), %s:%d/%s()!\n",
+ ioc_status, handle, __FILE__, __LINE__, __func__);
+ break;
+ }
+
+ handle = le16_to_cpu(expander_pg0.dev_handle);
+ sas_address = le64_to_cpu(expander_pg0.sas_address);
+ hba_port = mpi3mr_get_hba_port_by_id(mrioc, expander_pg0.io_unit_port);
+
+ if (!hba_port) {
+ mpi3mr_sas_host_refresh(mrioc);
+ mpi3mr_expander_add(mrioc, handle);
+ continue;
+ }
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ sas_expander =
+ mpi3mr_expander_find_by_sas_address(mrioc,
+ sas_address, hba_port);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+ if (!sas_expander) {
+ mpi3mr_sas_host_refresh(mrioc);
+ mpi3mr_expander_add(mrioc, handle);
+ continue;
+ }
+
+ sas_expander->non_responding = 0;
+ if (sas_expander->handle == handle)
+ continue;
+
+ sas_expander->handle = handle;
+ for (i = 0 ; i < sas_expander->num_phys ; i++)
+ sas_expander->phy[i].handle = handle;
+ }
+
+ /*
+ * Delete non responding expander devices and the corresponding
+ * hba_port if the non responding expander device's parent device
+ * is a host node.
+ */
+ sas_expander = NULL;
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ list_for_each_entry_safe_reverse(sas_expander, sas_expander_next,
+ &mrioc->sas_expander_list, list) {
+ if (sas_expander->non_responding) {
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ mpi3mr_expander_node_remove(mrioc, sas_expander);
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ }
+ }
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+}
+
+/**
+ * mpi3mr_expander_node_add - insert an expander to the list.
+ * @mrioc: Adapter instance reference
+ * @sas_expander: Expander sas node
+ * Context: This function will acquire sas_node_lock.
+ *
+ * Adding new object to the ioc->sas_expander_list.
+ *
+ * Return: None.
+ */
+static void mpi3mr_expander_node_add(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_node *sas_expander)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ list_add_tail(&sas_expander->list, &mrioc->sas_expander_list);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+}
+
+/**
+ * mpi3mr_expander_add - Create expander object
+ * @mrioc: Adapter instance reference
+ * @handle: Expander firmware device handle
+ *
+ * This function creating expander object, stored in
+ * sas_expander_list and expose it to the SAS transport
+ * layer.
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+int mpi3mr_expander_add(struct mpi3mr_ioc *mrioc, u16 handle)
+{
+ struct mpi3mr_sas_node *sas_expander;
+ struct mpi3mr_enclosure_node *enclosure_dev;
+ struct mpi3_sas_expander_page0 expander_pg0;
+ struct mpi3_sas_expander_page1 expander_pg1;
+ u16 ioc_status, parent_handle, temp_handle;
+ u64 sas_address, sas_address_parent = 0;
+ int i;
+ unsigned long flags;
+ u8 port_id, link_rate;
+ struct mpi3mr_sas_port *mr_sas_port = NULL;
+ struct mpi3mr_hba_port *hba_port;
+ u32 phynum_handle;
+ int rc = 0;
+
+ if (!handle)
+ return -1;
+
+ if (mrioc->reset_in_progress)
+ return -1;
+
+ if ((mpi3mr_cfg_get_sas_exp_pg0(mrioc, &ioc_status, &expander_pg0,
+ sizeof(expander_pg0), MPI3_SAS_EXPAND_PGAD_FORM_HANDLE, handle))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ parent_handle = le16_to_cpu(expander_pg0.parent_dev_handle);
+ if (mpi3mr_get_sas_address(mrioc, parent_handle, &sas_address_parent)
+ != 0) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ port_id = expander_pg0.io_unit_port;
+ hba_port = mpi3mr_get_hba_port_by_id(mrioc, port_id);
+ if (!hba_port) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ if (sas_address_parent != mrioc->sas_hba.sas_address) {
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ sas_expander =
+ mpi3mr_expander_find_by_sas_address(mrioc,
+ sas_address_parent, hba_port);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ if (!sas_expander) {
+ rc = mpi3mr_expander_add(mrioc, parent_handle);
+ if (rc != 0)
+ return rc;
+ } else {
+ /*
+ * When there is a parent expander present, update it's
+ * phys where child expander is connected with the link
+ * speed, attached dev handle and sas address.
+ */
+ for (i = 0 ; i < sas_expander->num_phys ; i++) {
+ phynum_handle =
+ (i << MPI3_SAS_EXPAND_PGAD_PHYNUM_SHIFT) |
+ parent_handle;
+ if (mpi3mr_cfg_get_sas_exp_pg1(mrioc,
+ &ioc_status, &expander_pg1,
+ sizeof(expander_pg1),
+ MPI3_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM,
+ phynum_handle)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -1;
+ return rc;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -1;
+ return rc;
+ }
+ temp_handle = le16_to_cpu(
+ expander_pg1.attached_dev_handle);
+ if (temp_handle != handle)
+ continue;
+ link_rate = (expander_pg1.negotiated_link_rate &
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK) >>
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT;
+ mpi3mr_update_links(mrioc, sas_address_parent,
+ handle, i, link_rate, hba_port);
+ }
+ }
+ }
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ sas_address = le64_to_cpu(expander_pg0.sas_address);
+ sas_expander = mpi3mr_expander_find_by_sas_address(mrioc,
+ sas_address, hba_port);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+ if (sas_expander)
+ return 0;
+
+ sas_expander = kzalloc(sizeof(struct mpi3mr_sas_node),
+ GFP_KERNEL);
+ if (!sas_expander)
+ return -1;
+
+ sas_expander->handle = handle;
+ sas_expander->num_phys = expander_pg0.num_phys;
+ sas_expander->sas_address_parent = sas_address_parent;
+ sas_expander->sas_address = sas_address;
+ sas_expander->hba_port = hba_port;
+
+ ioc_info(mrioc,
+ "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
+ handle, parent_handle, (unsigned long long)
+ sas_expander->sas_address, sas_expander->num_phys);
+
+ if (!sas_expander->num_phys) {
+ rc = -1;
+ goto out_fail;
+ }
+ sas_expander->phy = kcalloc(sas_expander->num_phys,
+ sizeof(struct mpi3mr_sas_phy), GFP_KERNEL);
+ if (!sas_expander->phy) {
+ rc = -1;
+ goto out_fail;
+ }
+
+ INIT_LIST_HEAD(&sas_expander->sas_port_list);
+ mr_sas_port = mpi3mr_sas_port_add(mrioc, handle, sas_address_parent,
+ sas_expander->hba_port);
+ if (!mr_sas_port) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+ sas_expander->parent_dev = &mr_sas_port->rphy->dev;
+ sas_expander->rphy = mr_sas_port->rphy;
+
+ for (i = 0 ; i < sas_expander->num_phys ; i++) {
+ phynum_handle = (i << MPI3_SAS_EXPAND_PGAD_PHYNUM_SHIFT) |
+ handle;
+ if (mpi3mr_cfg_get_sas_exp_pg1(mrioc, &ioc_status,
+ &expander_pg1, sizeof(expander_pg1),
+ MPI3_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM,
+ phynum_handle)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+
+ sas_expander->phy[i].handle = handle;
+ sas_expander->phy[i].phy_id = i;
+ sas_expander->phy[i].hba_port = hba_port;
+
+ if ((mpi3mr_add_expander_phy(mrioc, &sas_expander->phy[i],
+ expander_pg1, sas_expander->parent_dev))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+ }
+
+ if (sas_expander->enclosure_handle) {
+ enclosure_dev =
+ mpi3mr_enclosure_find_by_handle(mrioc,
+ sas_expander->enclosure_handle);
+ if (enclosure_dev)
+ sas_expander->enclosure_logical_id = le64_to_cpu(
+ enclosure_dev->pg0.enclosure_logical_id);
+ }
+
+ mpi3mr_expander_node_add(mrioc, sas_expander);
+ return 0;
+
+out_fail:
+
+ if (mr_sas_port)
+ mpi3mr_sas_port_remove(mrioc,
+ sas_expander->sas_address,
+ sas_address_parent, sas_expander->hba_port);
+ kfree(sas_expander->phy);
+ kfree(sas_expander);
+ return rc;
+}
+
+/**
+ * mpi3mr_expander_node_remove - recursive removal of expander.
+ * @mrioc: Adapter instance reference
+ * @sas_expander: Expander device object
+ *
+ * Removes expander object and freeing associated memory from
+ * the sas_expander_list and removes the same from SAS TL, if
+ * one of the attached device is an expander then it recursively
+ * removes the expander device too.
+ *
+ * Return nothing.
+ */
+void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_node *sas_expander)
+{
+ struct mpi3mr_sas_port *mr_sas_port, *next;
+ unsigned long flags;
+ u8 port_id;
+
+ /* remove sibling ports attached to this expander */
+ list_for_each_entry_safe(mr_sas_port, next,
+ &sas_expander->sas_port_list, port_list) {
+ if (mrioc->reset_in_progress)
+ return;
+ if (mr_sas_port->remote_identify.device_type ==
+ SAS_END_DEVICE)
+ mpi3mr_remove_device_by_sas_address(mrioc,
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_port->hba_port);
+ else if (mr_sas_port->remote_identify.device_type ==
+ SAS_EDGE_EXPANDER_DEVICE ||
+ mr_sas_port->remote_identify.device_type ==
+ SAS_FANOUT_EXPANDER_DEVICE)
+ mpi3mr_expander_remove(mrioc,
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_port->hba_port);
+ }
+
+ port_id = sas_expander->hba_port->port_id;
+ mpi3mr_sas_port_remove(mrioc, sas_expander->sas_address,
+ sas_expander->sas_address_parent, sas_expander->hba_port);
+
+ ioc_info(mrioc, "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
+ sas_expander->handle, (unsigned long long)
+ sas_expander->sas_address, port_id);
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ list_del(&sas_expander->list);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+ kfree(sas_expander->phy);
+ kfree(sas_expander);
+}
+
+/**
+ * mpi3mr_expander_remove - Remove expander object
+ * @mrioc: Adapter instance reference
+ * @sas_address: Remove expander sas_address
+ * @hba_port: HBA port reference
+ *
+ * This function remove expander object, stored in
+ * mrioc->sas_expander_list and removes it from the SAS TL by
+ * calling mpi3mr_expander_node_remove().
+ *
+ * Return: None
+ */
+void mpi3mr_expander_remove(struct mpi3mr_ioc *mrioc, u64 sas_address,
+ struct mpi3mr_hba_port *hba_port)
+{
+ struct mpi3mr_sas_node *sas_expander;
+ unsigned long flags;
+
+ if (mrioc->reset_in_progress)
+ return;
+
+ if (!hba_port)
+ return;
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ sas_expander = mpi3mr_expander_find_by_sas_address(mrioc, sas_address,
+ hba_port);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ if (sas_expander)
+ mpi3mr_expander_node_remove(mrioc, sas_expander);
+
+}
+
+/**
+ * mpi3mr_get_sas_negotiated_logical_linkrate - get linkrate
+ * @mrioc: Adapter instance reference
+ * @tgtdev: Target device
+ *
+ * This function identifies whether the target device is
+ * attached directly or through expander and issues sas phy
+ * page0 or expander phy page1 and gets the link rate, if there
+ * is any failure in reading the pages then this returns link
+ * rate of 1.5.
+ *
+ * Return: logical link rate.
+ */
+static u8 mpi3mr_get_sas_negotiated_logical_linkrate(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev)
+{
+ u8 link_rate = MPI3_SAS_NEG_LINK_RATE_1_5, phy_number;
+ struct mpi3_sas_expander_page1 expander_pg1;
+ struct mpi3_sas_phy_page0 phy_pg0;
+ u32 phynum_handle;
+ u16 ioc_status;
+
+ phy_number = tgtdev->dev_spec.sas_sata_inf.phy_id;
+ if (!(tgtdev->devpg0_flag & MPI3_DEVICE0_FLAGS_ATT_METHOD_DIR_ATTACHED)) {
+ phynum_handle = ((phy_number<<MPI3_SAS_EXPAND_PGAD_PHYNUM_SHIFT)
+ | tgtdev->parent_handle);
+ if (mpi3mr_cfg_get_sas_exp_pg1(mrioc, &ioc_status,
+ &expander_pg1, sizeof(expander_pg1),
+ MPI3_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM,
+ phynum_handle)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ link_rate = (expander_pg1.negotiated_link_rate &
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK) >>
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT;
+ goto out;
+ }
+ if (mpi3mr_cfg_get_sas_phy_pg0(mrioc, &ioc_status, &phy_pg0,
+ sizeof(struct mpi3_sas_phy_page0),
+ MPI3_SAS_PHY_PGAD_FORM_PHY_NUMBER, phy_number)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ link_rate = (phy_pg0.negotiated_link_rate &
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK) >>
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT;
+out:
+ return link_rate;
+}
+
+/**
+ * mpi3mr_report_tgtdev_to_sas_transport - expose dev to SAS TL
+ * @mrioc: Adapter instance reference
+ * @tgtdev: Target device
+ *
+ * This function exposes the target device after
+ * preparing host_phy, setting up link rate etc.
+ *
+ * Return: 0 on success, non-zero for failure.
+ */
+int mpi3mr_report_tgtdev_to_sas_transport(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev)
+{
+ int retval = 0;
+ u8 link_rate, parent_phy_number;
+ u64 sas_address_parent, sas_address;
+ struct mpi3mr_hba_port *hba_port;
+ u8 port_id;
+
+ if ((tgtdev->dev_type != MPI3_DEVICE_DEVFORM_SAS_SATA) ||
+ !mrioc->sas_transport_enabled)
+ return -1;
+
+ sas_address = tgtdev->dev_spec.sas_sata_inf.sas_address;
+ if (!mrioc->sas_hba.num_phys)
+ mpi3mr_sas_host_add(mrioc);
+ else
+ mpi3mr_sas_host_refresh(mrioc);
+
+ if (mpi3mr_get_sas_address(mrioc, tgtdev->parent_handle,
+ &sas_address_parent) != 0) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+ tgtdev->dev_spec.sas_sata_inf.sas_address_parent = sas_address_parent;
+
+ parent_phy_number = tgtdev->dev_spec.sas_sata_inf.phy_id;
+ port_id = tgtdev->io_unit_port;
+
+ hba_port = mpi3mr_get_hba_port_by_id(mrioc, port_id);
+ if (!hba_port) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+ tgtdev->dev_spec.sas_sata_inf.hba_port = hba_port;
+
+ link_rate = mpi3mr_get_sas_negotiated_logical_linkrate(mrioc, tgtdev);
+
+ mpi3mr_update_links(mrioc, sas_address_parent, tgtdev->dev_handle,
+ parent_phy_number, link_rate, hba_port);
+
+ tgtdev->host_exposed = 1;
+ if (!mpi3mr_sas_port_add(mrioc, tgtdev->dev_handle,
+ sas_address_parent, hba_port)) {
+ retval = -1;
+ } else if ((!tgtdev->starget) && (!mrioc->is_driver_loading)) {
+ mpi3mr_sas_port_remove(mrioc, sas_address,
+ sas_address_parent, hba_port);
+ retval = -1;
+ }
+ if (retval) {
+ tgtdev->dev_spec.sas_sata_inf.hba_port = NULL;
+ tgtdev->host_exposed = 0;
+ }
+ return retval;
+}
+
+/**
+ * mpi3mr_remove_tgtdev_from_sas_transport - remove from SAS TL
+ * @mrioc: Adapter instance reference
+ * @tgtdev: Target device
+ *
+ * This function removes the target device
+ *
+ * Return: None.
+ */
+void mpi3mr_remove_tgtdev_from_sas_transport(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev)
+{
+ u64 sas_address_parent, sas_address;
+ struct mpi3mr_hba_port *hba_port;
+
+ if ((tgtdev->dev_type != MPI3_DEVICE_DEVFORM_SAS_SATA) ||
+ !mrioc->sas_transport_enabled)
+ return;
+
+ hba_port = tgtdev->dev_spec.sas_sata_inf.hba_port;
+ sas_address = tgtdev->dev_spec.sas_sata_inf.sas_address;
+ sas_address_parent = tgtdev->dev_spec.sas_sata_inf.sas_address_parent;
+ mpi3mr_sas_port_remove(mrioc, sas_address, sas_address_parent,
+ hba_port);
+ tgtdev->host_exposed = 0;
+ tgtdev->dev_spec.sas_sata_inf.hba_port = NULL;
+}
+
+/**
+ * mpi3mr_get_port_id_by_sas_phy - Get port ID of the given phy
+ * @phy: SAS transport layer phy object
+ *
+ * Return: Port number for valid ID else 0xFFFF
+ */
+static inline u8 mpi3mr_get_port_id_by_sas_phy(struct sas_phy *phy)
+{
+ u8 port_id = 0xFF;
+ struct mpi3mr_hba_port *hba_port = phy->hostdata;
+
+ if (hba_port)
+ port_id = hba_port->port_id;
+
+ return port_id;
+}
+
+/**
+ * mpi3mr_get_port_id_by_rphy - Get Port number from SAS rphy
+ *
+ * @mrioc: Adapter instance reference
+ * @rphy: SAS transport layer remote phy object
+ *
+ * Retrieves HBA port number in which the device pointed by the
+ * rphy object is attached with.
+ *
+ * Return: Valid port number on success else OxFFFF.
+ */
+static u8 mpi3mr_get_port_id_by_rphy(struct mpi3mr_ioc *mrioc, struct sas_rphy *rphy)
+{
+ struct mpi3mr_sas_node *sas_expander;
+ struct mpi3mr_tgt_dev *tgtdev;
+ unsigned long flags;
+ u8 port_id = 0xFF;
+
+ if (!rphy)
+ return port_id;
+
+ if (rphy->identify.device_type == SAS_EDGE_EXPANDER_DEVICE ||
+ rphy->identify.device_type == SAS_FANOUT_EXPANDER_DEVICE) {
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ list_for_each_entry(sas_expander, &mrioc->sas_expander_list,
+ list) {
+ if (sas_expander->rphy == rphy) {
+ port_id = sas_expander->hba_port->port_id;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ } else if (rphy->identify.device_type == SAS_END_DEVICE) {
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+
+ tgtdev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
+ rphy->identify.sas_address, rphy);
+ if (tgtdev && tgtdev->dev_spec.sas_sata_inf.hba_port) {
+ port_id =
+ tgtdev->dev_spec.sas_sata_inf.hba_port->port_id;
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+ }
+ return port_id;
+}
+
+static inline struct mpi3mr_ioc *phy_to_mrioc(struct sas_phy *phy)
+{
+ struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
+
+ return shost_priv(shost);
+}
+
+static inline struct mpi3mr_ioc *rphy_to_mrioc(struct sas_rphy *rphy)
+{
+ struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent);
+
+ return shost_priv(shost);
+}
+
+/* report phy error log structure */
+struct phy_error_log_request {
+ u8 smp_frame_type; /* 0x40 */
+ u8 function; /* 0x11 */
+ u8 allocated_response_length;
+ u8 request_length; /* 02 */
+ u8 reserved_1[5];
+ u8 phy_identifier;
+ u8 reserved_2[2];
+};
+
+/* report phy error log reply structure */
+struct phy_error_log_reply {
+ u8 smp_frame_type; /* 0x41 */
+ u8 function; /* 0x11 */
+ u8 function_result;
+ u8 response_length;
+ __be16 expander_change_count;
+ u8 reserved_1[3];
+ u8 phy_identifier;
+ u8 reserved_2[2];
+ __be32 invalid_dword;
+ __be32 running_disparity_error;
+ __be32 loss_of_dword_sync;
+ __be32 phy_reset_problem;
+};
+
+
+/**
+ * mpi3mr_get_expander_phy_error_log - return expander counters:
+ * @mrioc: Adapter instance reference
+ * @phy: The SAS transport layer phy object
+ *
+ * Return: 0 for success, non-zero for failure.
+ *
+ */
+static int mpi3mr_get_expander_phy_error_log(struct mpi3mr_ioc *mrioc,
+ struct sas_phy *phy)
+{
+ struct mpi3_smp_passthrough_request mpi_request;
+ struct mpi3_smp_passthrough_reply mpi_reply;
+ struct phy_error_log_request *phy_error_log_request;
+ struct phy_error_log_reply *phy_error_log_reply;
+ int rc;
+ void *psge;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma, data_in_dma;
+ u32 data_out_sz, data_in_sz, sz;
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+ u16 request_sz = sizeof(struct mpi3_smp_passthrough_request);
+ u16 reply_sz = sizeof(struct mpi3_smp_passthrough_reply);
+ u16 ioc_status;
+
+ if (mrioc->reset_in_progress) {
+ ioc_err(mrioc, "%s: host reset in progress!\n", __func__);
+ return -EFAULT;
+ }
+
+ data_out_sz = sizeof(struct phy_error_log_request);
+ data_in_sz = sizeof(struct phy_error_log_reply);
+ sz = data_out_sz + data_in_sz;
+ data_out = dma_alloc_coherent(&mrioc->pdev->dev, sz, &data_out_dma,
+ GFP_KERNEL);
+ if (!data_out) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ data_in_dma = data_out_dma + data_out_sz;
+ phy_error_log_reply = data_out + data_out_sz;
+
+ rc = -EINVAL;
+ memset(data_out, 0, sz);
+ phy_error_log_request = data_out;
+ phy_error_log_request->smp_frame_type = 0x40;
+ phy_error_log_request->function = 0x11;
+ phy_error_log_request->request_length = 2;
+ phy_error_log_request->allocated_response_length = 0;
+ phy_error_log_request->phy_identifier = phy->number;
+
+ memset(&mpi_request, 0, request_sz);
+ memset(&mpi_reply, 0, reply_sz);
+ mpi_request.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_TRANSPORT_CMDS);
+ mpi_request.function = MPI3_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request.io_unit_port = (u8) mpi3mr_get_port_id_by_sas_phy(phy);
+ mpi_request.sas_address = cpu_to_le64(phy->identify.sas_address);
+
+ psge = &mpi_request.request_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, data_out_sz, data_out_dma);
+
+ psge = &mpi_request.response_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, data_in_sz, data_in_dma);
+
+ dprint_transport_info(mrioc,
+ "sending phy error log SMP request to sas_address(0x%016llx), phy_id(%d)\n",
+ (unsigned long long)phy->identify.sas_address, phy->number);
+
+ if (mpi3mr_post_transport_req(mrioc, &mpi_request, request_sz,
+ &mpi_reply, reply_sz, MPI3MR_INTADMCMD_TIMEOUT, &ioc_status))
+ goto out;
+
+ dprint_transport_info(mrioc,
+ "phy error log SMP request completed with ioc_status(0x%04x)\n",
+ ioc_status);
+
+ if (ioc_status == MPI3_IOCSTATUS_SUCCESS) {
+ dprint_transport_info(mrioc,
+ "phy error log - reply data transfer size(%d)\n",
+ le16_to_cpu(mpi_reply.response_data_length));
+
+ if (le16_to_cpu(mpi_reply.response_data_length) !=
+ sizeof(struct phy_error_log_reply))
+ goto out;
+
+ dprint_transport_info(mrioc,
+ "phy error log - function_result(%d)\n",
+ phy_error_log_reply->function_result);
+
+ phy->invalid_dword_count =
+ be32_to_cpu(phy_error_log_reply->invalid_dword);
+ phy->running_disparity_error_count =
+ be32_to_cpu(phy_error_log_reply->running_disparity_error);
+ phy->loss_of_dword_sync_count =
+ be32_to_cpu(phy_error_log_reply->loss_of_dword_sync);
+ phy->phy_reset_problem_count =
+ be32_to_cpu(phy_error_log_reply->phy_reset_problem);
+ rc = 0;
+ }
+
+out:
+ if (data_out)
+ dma_free_coherent(&mrioc->pdev->dev, sz, data_out,
+ data_out_dma);
+
+ return rc;
+}
+
+/**
+ * mpi3mr_transport_get_linkerrors - return phy error counters
+ * @phy: The SAS transport layer phy object
+ *
+ * This function retrieves the phy error log information of the
+ * HBA or expander for which the phy belongs to
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int mpi3mr_transport_get_linkerrors(struct sas_phy *phy)
+{
+ struct mpi3mr_ioc *mrioc = phy_to_mrioc(phy);
+ struct mpi3_sas_phy_page1 phy_pg1;
+ int rc = 0;
+ u16 ioc_status;
+
+ rc = mpi3mr_parent_present(mrioc, phy);
+ if (rc)
+ return rc;
+
+ if (phy->identify.sas_address != mrioc->sas_hba.sas_address)
+ return mpi3mr_get_expander_phy_error_log(mrioc, phy);
+
+ memset(&phy_pg1, 0, sizeof(struct mpi3_sas_phy_page1));
+ /* get hba phy error logs */
+ if ((mpi3mr_cfg_get_sas_phy_pg1(mrioc, &ioc_status, &phy_pg1,
+ sizeof(struct mpi3_sas_phy_page1),
+ MPI3_SAS_PHY_PGAD_FORM_PHY_NUMBER, phy->number))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -ENXIO;
+ }
+
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -ENXIO;
+ }
+ phy->invalid_dword_count = le32_to_cpu(phy_pg1.invalid_dword_count);
+ phy->running_disparity_error_count =
+ le32_to_cpu(phy_pg1.running_disparity_error_count);
+ phy->loss_of_dword_sync_count =
+ le32_to_cpu(phy_pg1.loss_dword_synch_count);
+ phy->phy_reset_problem_count =
+ le32_to_cpu(phy_pg1.phy_reset_problem_count);
+ return 0;
+}
+
+/**
+ * mpi3mr_transport_get_enclosure_identifier - Get Enclosure ID
+ * @rphy: The SAS transport layer remote phy object
+ * @identifier: Enclosure identifier to be returned
+ *
+ * Returns the enclosure id for the device pointed by the remote
+ * phy object.
+ *
+ * Return: 0 on success or -ENXIO
+ */
+static int
+mpi3mr_transport_get_enclosure_identifier(struct sas_rphy *rphy,
+ u64 *identifier)
+{
+ struct mpi3mr_ioc *mrioc = rphy_to_mrioc(rphy);
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgtdev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
+ rphy->identify.sas_address, rphy);
+ if (tgtdev) {
+ *identifier =
+ tgtdev->enclosure_logical_id;
+ rc = 0;
+ mpi3mr_tgtdev_put(tgtdev);
+ } else {
+ *identifier = 0;
+ rc = -ENXIO;
+ }
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
+ return rc;
+}
+
+/**
+ * mpi3mr_transport_get_bay_identifier - Get bay ID
+ * @rphy: The SAS transport layer remote phy object
+ *
+ * Returns the slot id for the device pointed by the remote phy
+ * object.
+ *
+ * Return: Valid slot ID on success or -ENXIO
+ */
+static int
+mpi3mr_transport_get_bay_identifier(struct sas_rphy *rphy)
+{
+ struct mpi3mr_ioc *mrioc = rphy_to_mrioc(rphy);
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgtdev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
+ rphy->identify.sas_address, rphy);
+ if (tgtdev) {
+ rc = tgtdev->slot;
+ mpi3mr_tgtdev_put(tgtdev);
+ } else
+ rc = -ENXIO;
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
+ return rc;
+}
+
+/* phy control request structure */
+struct phy_control_request {
+ u8 smp_frame_type; /* 0x40 */
+ u8 function; /* 0x91 */
+ u8 allocated_response_length;
+ u8 request_length; /* 0x09 */
+ u16 expander_change_count;
+ u8 reserved_1[3];
+ u8 phy_identifier;
+ u8 phy_operation;
+ u8 reserved_2[13];
+ u64 attached_device_name;
+ u8 programmed_min_physical_link_rate;
+ u8 programmed_max_physical_link_rate;
+ u8 reserved_3[6];
+};
+
+/* phy control reply structure */
+struct phy_control_reply {
+ u8 smp_frame_type; /* 0x41 */
+ u8 function; /* 0x11 */
+ u8 function_result;
+ u8 response_length;
+};
+
+#define SMP_PHY_CONTROL_LINK_RESET (0x01)
+#define SMP_PHY_CONTROL_HARD_RESET (0x02)
+#define SMP_PHY_CONTROL_DISABLE (0x03)
+
+/**
+ * mpi3mr_expander_phy_control - expander phy control
+ * @mrioc: Adapter instance reference
+ * @phy: The SAS transport layer phy object
+ * @phy_operation: The phy operation to be executed
+ *
+ * Issues SMP passthru phy control request to execute a specific
+ * phy operation for a given expander device.
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+mpi3mr_expander_phy_control(struct mpi3mr_ioc *mrioc,
+ struct sas_phy *phy, u8 phy_operation)
+{
+ struct mpi3_smp_passthrough_request mpi_request;
+ struct mpi3_smp_passthrough_reply mpi_reply;
+ struct phy_control_request *phy_control_request;
+ struct phy_control_reply *phy_control_reply;
+ int rc;
+ void *psge;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma;
+ dma_addr_t data_in_dma;
+ size_t data_in_sz;
+ size_t data_out_sz;
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+ u16 request_sz = sizeof(struct mpi3_smp_passthrough_request);
+ u16 reply_sz = sizeof(struct mpi3_smp_passthrough_reply);
+ u16 ioc_status;
+ u16 sz;
+
+ if (mrioc->reset_in_progress) {
+ ioc_err(mrioc, "%s: host reset in progress!\n", __func__);
+ return -EFAULT;
+ }
+
+ data_out_sz = sizeof(struct phy_control_request);
+ data_in_sz = sizeof(struct phy_control_reply);
+ sz = data_out_sz + data_in_sz;
+ data_out = dma_alloc_coherent(&mrioc->pdev->dev, sz, &data_out_dma,
+ GFP_KERNEL);
+ if (!data_out) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ data_in_dma = data_out_dma + data_out_sz;
+ phy_control_reply = data_out + data_out_sz;
+
+ rc = -EINVAL;
+ memset(data_out, 0, sz);
+
+ phy_control_request = data_out;
+ phy_control_request->smp_frame_type = 0x40;
+ phy_control_request->function = 0x91;
+ phy_control_request->request_length = 9;
+ phy_control_request->allocated_response_length = 0;
+ phy_control_request->phy_identifier = phy->number;
+ phy_control_request->phy_operation = phy_operation;
+ phy_control_request->programmed_min_physical_link_rate =
+ phy->minimum_linkrate << 4;
+ phy_control_request->programmed_max_physical_link_rate =
+ phy->maximum_linkrate << 4;
+
+ memset(&mpi_request, 0, request_sz);
+ memset(&mpi_reply, 0, reply_sz);
+ mpi_request.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_TRANSPORT_CMDS);
+ mpi_request.function = MPI3_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request.io_unit_port = (u8) mpi3mr_get_port_id_by_sas_phy(phy);
+ mpi_request.sas_address = cpu_to_le64(phy->identify.sas_address);
+
+ psge = &mpi_request.request_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, data_out_sz, data_out_dma);
+
+ psge = &mpi_request.response_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, data_in_sz, data_in_dma);
+
+ dprint_transport_info(mrioc,
+ "sending phy control SMP request to sas_address(0x%016llx), phy_id(%d) opcode(%d)\n",
+ (unsigned long long)phy->identify.sas_address, phy->number,
+ phy_operation);
+
+ if (mpi3mr_post_transport_req(mrioc, &mpi_request, request_sz,
+ &mpi_reply, reply_sz, MPI3MR_INTADMCMD_TIMEOUT, &ioc_status))
+ goto out;
+
+ dprint_transport_info(mrioc,
+ "phy control SMP request completed with ioc_status(0x%04x)\n",
+ ioc_status);
+
+ if (ioc_status == MPI3_IOCSTATUS_SUCCESS) {
+ dprint_transport_info(mrioc,
+ "phy control - reply data transfer size(%d)\n",
+ le16_to_cpu(mpi_reply.response_data_length));
+
+ if (le16_to_cpu(mpi_reply.response_data_length) !=
+ sizeof(struct phy_control_reply))
+ goto out;
+ dprint_transport_info(mrioc,
+ "phy control - function_result(%d)\n",
+ phy_control_reply->function_result);
+ rc = 0;
+ }
+ out:
+ if (data_out)
+ dma_free_coherent(&mrioc->pdev->dev, sz, data_out,
+ data_out_dma);
+
+ return rc;
+}
+
+/**
+ * mpi3mr_transport_phy_reset - Reset a given phy
+ * @phy: The SAS transport layer phy object
+ * @hard_reset: Flag to indicate the type of reset
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+mpi3mr_transport_phy_reset(struct sas_phy *phy, int hard_reset)
+{
+ struct mpi3mr_ioc *mrioc = phy_to_mrioc(phy);
+ struct mpi3_iounit_control_request mpi_request;
+ struct mpi3_iounit_control_reply mpi_reply;
+ u16 request_sz = sizeof(struct mpi3_iounit_control_request);
+ u16 reply_sz = sizeof(struct mpi3_iounit_control_reply);
+ int rc = 0;
+ u16 ioc_status;
+
+ rc = mpi3mr_parent_present(mrioc, phy);
+ if (rc)
+ return rc;
+
+ /* handle expander phys */
+ if (phy->identify.sas_address != mrioc->sas_hba.sas_address)
+ return mpi3mr_expander_phy_control(mrioc, phy,
+ (hard_reset == 1) ? SMP_PHY_CONTROL_HARD_RESET :
+ SMP_PHY_CONTROL_LINK_RESET);
+
+ /* handle hba phys */
+ memset(&mpi_request, 0, request_sz);
+ mpi_request.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_TRANSPORT_CMDS);
+ mpi_request.function = MPI3_FUNCTION_IO_UNIT_CONTROL;
+ mpi_request.operation = MPI3_CTRL_OP_SAS_PHY_CONTROL;
+ mpi_request.param8[MPI3_CTRL_OP_SAS_PHY_CONTROL_PARAM8_ACTION_INDEX] =
+ (hard_reset ? MPI3_CTRL_ACTION_HARD_RESET :
+ MPI3_CTRL_ACTION_LINK_RESET);
+ mpi_request.param8[MPI3_CTRL_OP_SAS_PHY_CONTROL_PARAM8_PHY_INDEX] =
+ phy->number;
+
+ dprint_transport_info(mrioc,
+ "sending phy reset request to sas_address(0x%016llx), phy_id(%d) hard_reset(%d)\n",
+ (unsigned long long)phy->identify.sas_address, phy->number,
+ hard_reset);
+
+ if (mpi3mr_post_transport_req(mrioc, &mpi_request, request_sz,
+ &mpi_reply, reply_sz, MPI3MR_INTADMCMD_TIMEOUT, &ioc_status)) {
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ dprint_transport_info(mrioc,
+ "phy reset request completed with ioc_status(0x%04x)\n",
+ ioc_status);
+out:
+ return rc;
+}
+
+/**
+ * mpi3mr_transport_phy_enable - enable/disable phys
+ * @phy: The SAS transport layer phy object
+ * @enable: flag to enable/disable, enable phy when true
+ *
+ * This function enables/disables a given by executing required
+ * configuration page changes or expander phy control command
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+mpi3mr_transport_phy_enable(struct sas_phy *phy, int enable)
+{
+ struct mpi3mr_ioc *mrioc = phy_to_mrioc(phy);
+ struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0 = NULL;
+ struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1 = NULL;
+ u16 sz;
+ int rc = 0;
+ int i, discovery_active;
+
+ rc = mpi3mr_parent_present(mrioc, phy);
+ if (rc)
+ return rc;
+
+ /* handle expander phys */
+ if (phy->identify.sas_address != mrioc->sas_hba.sas_address)
+ return mpi3mr_expander_phy_control(mrioc, phy,
+ (enable == 1) ? SMP_PHY_CONTROL_LINK_RESET :
+ SMP_PHY_CONTROL_DISABLE);
+
+ /* handle hba phys */
+ sz = offsetof(struct mpi3_sas_io_unit_page0, phy_data) +
+ (mrioc->sas_hba.num_phys *
+ sizeof(struct mpi3_sas_io_unit0_phy_data));
+ sas_io_unit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_io_unit_pg0) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ if (mpi3mr_cfg_get_sas_io_unit_pg0(mrioc, sas_io_unit_pg0, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ /* unable to enable/disable phys when discovery is active */
+ for (i = 0, discovery_active = 0; i < mrioc->sas_hba.num_phys ; i++) {
+ if (sas_io_unit_pg0->phy_data[i].port_flags &
+ MPI3_SASIOUNIT0_PORTFLAGS_DISC_IN_PROGRESS) {
+ ioc_err(mrioc,
+ "discovery is active on port = %d, phy = %d\n"
+ "\tunable to enable/disable phys, try again later!\n",
+ sas_io_unit_pg0->phy_data[i].io_unit_port, i);
+ discovery_active = 1;
+ }
+ }
+
+ if (discovery_active) {
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ if ((sas_io_unit_pg0->phy_data[phy->number].phy_flags &
+ (MPI3_SASIOUNIT0_PHYFLAGS_HOST_PHY |
+ MPI3_SASIOUNIT0_PHYFLAGS_VIRTUAL_PHY))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ /* read sas_iounit page 1 */
+ sz = offsetof(struct mpi3_sas_io_unit_page1, phy_data) +
+ (mrioc->sas_hba.num_phys *
+ sizeof(struct mpi3_sas_io_unit1_phy_data));
+ sas_io_unit_pg1 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_io_unit_pg1) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (mpi3mr_cfg_get_sas_io_unit_pg1(mrioc, sas_io_unit_pg1, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ if (enable)
+ sas_io_unit_pg1->phy_data[phy->number].phy_flags
+ &= ~MPI3_SASIOUNIT1_PHYFLAGS_PHY_DISABLE;
+ else
+ sas_io_unit_pg1->phy_data[phy->number].phy_flags
+ |= MPI3_SASIOUNIT1_PHYFLAGS_PHY_DISABLE;
+
+ mpi3mr_cfg_set_sas_io_unit_pg1(mrioc, sas_io_unit_pg1, sz);
+
+ /* link reset */
+ if (enable)
+ mpi3mr_transport_phy_reset(phy, 0);
+
+ out:
+ kfree(sas_io_unit_pg1);
+ kfree(sas_io_unit_pg0);
+ return rc;
+}
+
+/**
+ * mpi3mr_transport_phy_speed - set phy min/max speed
+ * @phy: The SAS transport later phy object
+ * @rates: Rates defined as in sas_phy_linkrates
+ *
+ * This function sets the link rates given in the rates
+ * argument to the given phy by executing required configuration
+ * page changes or expander phy control command
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+mpi3mr_transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
+{
+ struct mpi3mr_ioc *mrioc = phy_to_mrioc(phy);
+ struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1 = NULL;
+ struct mpi3_sas_phy_page0 phy_pg0;
+ u16 sz, ioc_status;
+ int rc = 0;
+
+ rc = mpi3mr_parent_present(mrioc, phy);
+ if (rc)
+ return rc;
+
+ if (!rates->minimum_linkrate)
+ rates->minimum_linkrate = phy->minimum_linkrate;
+ else if (rates->minimum_linkrate < phy->minimum_linkrate_hw)
+ rates->minimum_linkrate = phy->minimum_linkrate_hw;
+
+ if (!rates->maximum_linkrate)
+ rates->maximum_linkrate = phy->maximum_linkrate;
+ else if (rates->maximum_linkrate > phy->maximum_linkrate_hw)
+ rates->maximum_linkrate = phy->maximum_linkrate_hw;
+
+ /* handle expander phys */
+ if (phy->identify.sas_address != mrioc->sas_hba.sas_address) {
+ phy->minimum_linkrate = rates->minimum_linkrate;
+ phy->maximum_linkrate = rates->maximum_linkrate;
+ return mpi3mr_expander_phy_control(mrioc, phy,
+ SMP_PHY_CONTROL_LINK_RESET);
+ }
+
+ /* handle hba phys */
+ sz = offsetof(struct mpi3_sas_io_unit_page1, phy_data) +
+ (mrioc->sas_hba.num_phys *
+ sizeof(struct mpi3_sas_io_unit1_phy_data));
+ sas_io_unit_pg1 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_io_unit_pg1) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (mpi3mr_cfg_get_sas_io_unit_pg1(mrioc, sas_io_unit_pg1, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ sas_io_unit_pg1->phy_data[phy->number].max_min_link_rate =
+ (rates->minimum_linkrate + (rates->maximum_linkrate << 4));
+
+ if (mpi3mr_cfg_set_sas_io_unit_pg1(mrioc, sas_io_unit_pg1, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ /* link reset */
+ mpi3mr_transport_phy_reset(phy, 0);
+
+ /* read phy page 0, then update the rates in the sas transport phy */
+ if (!mpi3mr_cfg_get_sas_phy_pg0(mrioc, &ioc_status, &phy_pg0,
+ sizeof(struct mpi3_sas_phy_page0),
+ MPI3_SAS_PHY_PGAD_FORM_PHY_NUMBER, phy->number) &&
+ (ioc_status == MPI3_IOCSTATUS_SUCCESS)) {
+ phy->minimum_linkrate = mpi3mr_convert_phy_link_rate(
+ phy_pg0.programmed_link_rate &
+ MPI3_SAS_PRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate = mpi3mr_convert_phy_link_rate(
+ phy_pg0.programmed_link_rate >> 4);
+ phy->negotiated_linkrate =
+ mpi3mr_convert_phy_link_rate(
+ (phy_pg0.negotiated_link_rate &
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK)
+ >> MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT);
+ }
+
+out:
+ kfree(sas_io_unit_pg1);
+ return rc;
+}
+
+/**
+ * mpi3mr_map_smp_buffer - map BSG dma buffer
+ * @dev: Generic device reference
+ * @buf: BSG buffer pointer
+ * @dma_addr: Physical address holder
+ * @dma_len: Mapped DMA buffer length.
+ * @p: Virtual address holder
+ *
+ * This function maps the DMAable buffer
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+static int
+mpi3mr_map_smp_buffer(struct device *dev, struct bsg_buffer *buf,
+ dma_addr_t *dma_addr, size_t *dma_len, void **p)
+{
+ /* Check if the request is split across multiple segments */
+ if (buf->sg_cnt > 1) {
+ *p = dma_alloc_coherent(dev, buf->payload_len, dma_addr,
+ GFP_KERNEL);
+ if (!*p)
+ return -ENOMEM;
+ *dma_len = buf->payload_len;
+ } else {
+ if (!dma_map_sg(dev, buf->sg_list, 1, DMA_BIDIRECTIONAL))
+ return -ENOMEM;
+ *dma_addr = sg_dma_address(buf->sg_list);
+ *dma_len = sg_dma_len(buf->sg_list);
+ *p = NULL;
+ }
+
+ return 0;
+}
+
+/**
+ * mpi3mr_unmap_smp_buffer - unmap BSG dma buffer
+ * @dev: Generic device reference
+ * @buf: BSG buffer pointer
+ * @dma_addr: Physical address to be unmapped
+ * @p: Virtual address
+ *
+ * This function unmaps the DMAable buffer
+ */
+static void
+mpi3mr_unmap_smp_buffer(struct device *dev, struct bsg_buffer *buf,
+ dma_addr_t dma_addr, void *p)
+{
+ if (p)
+ dma_free_coherent(dev, buf->payload_len, p, dma_addr);
+ else
+ dma_unmap_sg(dev, buf->sg_list, 1, DMA_BIDIRECTIONAL);
+}
+
+/**
+ * mpi3mr_transport_smp_handler - handler for smp passthru
+ * @job: BSG job reference
+ * @shost: SCSI host object reference
+ * @rphy: SAS transport rphy object pointing the expander
+ *
+ * This is used primarily by smp utils for sending the SMP
+ * commands to the expanders attached to the controller
+ */
+static void
+mpi3mr_transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
+ struct sas_rphy *rphy)
+{
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+ struct mpi3_smp_passthrough_request mpi_request;
+ struct mpi3_smp_passthrough_reply mpi_reply;
+ int rc;
+ void *psge;
+ dma_addr_t dma_addr_in;
+ dma_addr_t dma_addr_out;
+ void *addr_in = NULL;
+ void *addr_out = NULL;
+ size_t dma_len_in;
+ size_t dma_len_out;
+ unsigned int reslen = 0;
+ u16 request_sz = sizeof(struct mpi3_smp_passthrough_request);
+ u16 reply_sz = sizeof(struct mpi3_smp_passthrough_reply);
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+ u16 ioc_status;
+
+ if (mrioc->reset_in_progress) {
+ ioc_err(mrioc, "%s: host reset in progress!\n", __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+
+ rc = mpi3mr_map_smp_buffer(&mrioc->pdev->dev, &job->request_payload,
+ &dma_addr_out, &dma_len_out, &addr_out);
+ if (rc)
+ goto out;
+
+ if (addr_out)
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt, addr_out,
+ job->request_payload.payload_len);
+
+ rc = mpi3mr_map_smp_buffer(&mrioc->pdev->dev, &job->reply_payload,
+ &dma_addr_in, &dma_len_in, &addr_in);
+ if (rc)
+ goto unmap_out;
+
+ memset(&mpi_request, 0, request_sz);
+ memset(&mpi_reply, 0, reply_sz);
+ mpi_request.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_TRANSPORT_CMDS);
+ mpi_request.function = MPI3_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request.io_unit_port = (u8) mpi3mr_get_port_id_by_rphy(mrioc, rphy);
+ mpi_request.sas_address = ((rphy) ?
+ cpu_to_le64(rphy->identify.sas_address) :
+ cpu_to_le64(mrioc->sas_hba.sas_address));
+ psge = &mpi_request.request_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, dma_len_out - 4, dma_addr_out);
+
+ psge = &mpi_request.response_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, dma_len_in - 4, dma_addr_in);
+
+ dprint_transport_info(mrioc, "sending SMP request\n");
+
+ rc = mpi3mr_post_transport_req(mrioc, &mpi_request, request_sz,
+ &mpi_reply, reply_sz,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status);
+ if (rc)
+ goto unmap_in;
+
+ dprint_transport_info(mrioc,
+ "SMP request completed with ioc_status(0x%04x)\n", ioc_status);
+
+ dprint_transport_info(mrioc,
+ "SMP request - reply data transfer size(%d)\n",
+ le16_to_cpu(mpi_reply.response_data_length));
+
+ memcpy(job->reply, &mpi_reply, reply_sz);
+ job->reply_len = reply_sz;
+ reslen = le16_to_cpu(mpi_reply.response_data_length);
+
+ if (addr_in)
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt, addr_in,
+ job->reply_payload.payload_len);
+
+ rc = 0;
+unmap_in:
+ mpi3mr_unmap_smp_buffer(&mrioc->pdev->dev, &job->reply_payload,
+ dma_addr_in, addr_in);
+unmap_out:
+ mpi3mr_unmap_smp_buffer(&mrioc->pdev->dev, &job->request_payload,
+ dma_addr_out, addr_out);
+out:
+ bsg_job_done(job, rc, reslen);
+}
+
+struct sas_function_template mpi3mr_transport_functions = {
+ .get_linkerrors = mpi3mr_transport_get_linkerrors,
+ .get_enclosure_identifier = mpi3mr_transport_get_enclosure_identifier,
+ .get_bay_identifier = mpi3mr_transport_get_bay_identifier,
+ .phy_reset = mpi3mr_transport_phy_reset,
+ .phy_enable = mpi3mr_transport_phy_enable,
+ .set_phy_speed = mpi3mr_transport_phy_speed,
+ .smp_handler = mpi3mr_transport_smp_handler,
+};
+
+struct scsi_transport_template *mpi3mr_transport_template;